This is the 5.10.68 stable release
-----BEGIN PGP SIGNATURE-----
iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmFLB58ACgkQONu9yGCS
aT7uAhAAraX1qVdfkq3g4w9jaURkiR/Z1LbPqjMswIojApmcXV3e0mUtEWxBBEJT
o/uId9KUr/OrfAN++DO+9iLmPIjZHW+49I+CeHcDS95PdeWSKxZ3HBPUqK8uX8tU
QdPjh2PVL7Kkzbgi65RWeTOERHLlEj6qo21xu4W9QuwmZZojEB8xVP9BB/U6p84Q
KYPX+zyGUo9NgsaVTwOXxZzyT8JgcfEUKg0F4nHeNJxEh106dN2XgZpq+GvB7Hq7
koDy/dg2I4hS++Ds/Fjz9wQrgcvw3WSo3pUZzyTS2zfrcefLjqDVWzSY/1Ttd4b9
B7Lw7WiEgbX75EFXX8RgCrmNSsNW8pnFyR2URoOfFD6ckJNj/XCPVV+tfiSfAnH5
vlOQOicjtr/yFeOfhre8U4pTBWXk9BYscJyzNp/wScaExHXXkI+HYi92cbbTWKCU
/ig1RmIqTATdFAXjukHUqt6QzI1iqPtTQCGd99AhaBGq0Hb8OK2HponzBOpQvAHb
xaEMSL9YsJhoAux+n+R95FQKCk2KrjgX8Bczyuj2OAL5jeST10fWrYe6DflSta5K
9fNWmyjegpQEcmtDidQ7HH81Fy793S/34R8FQ4y1zPEi1A0yH//FO2lA8dS4Rdvo
ho7l7W+Hd/Ut67P0b7OFz2znw0T4OqMF6Il30q88pOfcis2TfNs=
=2XgB
-----END PGP SIGNATURE-----
Merge 5.10.68 into android12-5.10-lts
Changes in 5.10.68
drm/bridge: lt9611: Fix handling of 4k panels
btrfs: fix upper limit for max_inline for page size 64K
io_uring: ensure symmetry in handling iter types in loop_rw_iter()
xen: reset legacy rtc flag for PV domU
bnx2x: Fix enabling network interfaces without VFs
arm64/sve: Use correct size when reinitialising SVE state
PM: base: power: don't try to use non-existing RTC for storing data
PCI: Add AMD GPU multi-function power dependencies
drm/amd/amdgpu: Increase HWIP_MAX_INSTANCE to 10
drm/etnaviv: return context from etnaviv_iommu_context_get
drm/etnaviv: put submit prev MMU context when it exists
drm/etnaviv: stop abusing mmu_context as FE running marker
drm/etnaviv: keep MMU context across runtime suspend/resume
drm/etnaviv: exec and MMU state is lost when resetting the GPU
drm/etnaviv: fix MMU context leak on GPU reset
drm/etnaviv: reference MMU context when setting up hardware state
drm/etnaviv: add missing MMU context put when reaping MMU mapping
s390/sclp: fix Secure-IPL facility detection
x86/pat: Pass valid address to sanitize_phys()
x86/mm: Fix kern_addr_valid() to cope with existing but not present entries
tipc: fix an use-after-free issue in tipc_recvmsg
ethtool: Fix rxnfc copy to user buffer overflow
net/{mlx5|nfp|bnxt}: Remove unnecessary RTNL lock assert
net-caif: avoid user-triggerable WARN_ON(1)
ptp: dp83640: don't define PAGE0
dccp: don't duplicate ccid when cloning dccp sock
net/l2tp: Fix reference count leak in l2tp_udp_recv_core
r6040: Restore MDIO clock frequency after MAC reset
tipc: increase timeout in tipc_sk_enqueue()
drm/rockchip: cdn-dp-core: Make cdn_dp_core_resume __maybe_unused
perf machine: Initialize srcline string member in add_location struct
net/mlx5: FWTrace, cancel work on alloc pd error flow
net/mlx5: Fix potential sleeping in atomic context
nvme-tcp: fix io_work priority inversion
events: Reuse value read using READ_ONCE instead of re-reading it
net: ipa: initialize all filter table slots
gen_compile_commands: fix missing 'sys' package
vhost_net: fix OoB on sendmsg() failure.
net/af_unix: fix a data-race in unix_dgram_poll
net: dsa: destroy the phylink instance on any error in dsa_slave_phy_setup
x86/uaccess: Fix 32-bit __get_user_asm_u64() when CC_HAS_ASM_GOTO_OUTPUT=y
tcp: fix tp->undo_retrans accounting in tcp_sacktag_one()
selftest: net: fix typo in altname test
qed: Handle management FW error
udp_tunnel: Fix udp_tunnel_nic work-queue type
dt-bindings: arm: Fix Toradex compatible typo
ibmvnic: check failover_pending in login response
KVM: PPC: Book3S HV: Tolerate treclaim. in fake-suspend mode changing registers
bnxt_en: make bnxt_free_skbs() safe to call after bnxt_free_mem()
net: hns3: pad the short tunnel frame before sending to hardware
net: hns3: change affinity_mask to numa node range
net: hns3: disable mac in flr process
net: hns3: fix the timing issue of VF clearing interrupt sources
mm/memory_hotplug: use "unsigned long" for PFN in zone_for_pfn_range()
dt-bindings: mtd: gpmc: Fix the ECC bytes vs. OOB bytes equation
mfd: db8500-prcmu: Adjust map to reality
PCI: Add ACS quirks for NXP LX2xx0 and LX2xx2 platforms
fuse: fix use after free in fuse_read_interrupt()
PCI: tegra194: Fix handling BME_CHGED event
PCI: tegra194: Fix MSI-X programming
PCI: tegra: Fix OF node reference leak
mfd: Don't use irq_create_mapping() to resolve a mapping
PCI: rcar: Fix runtime PM imbalance in rcar_pcie_ep_probe()
tracing/probes: Reject events which have the same name of existing one
PCI: cadence: Use bitfield for *quirk_retrain_flag* instead of bool
PCI: cadence: Add quirk flag to set minimum delay in LTSSM Detect.Quiet state
PCI: j721e: Add PCIe support for J7200
PCI: j721e: Add PCIe support for AM64
PCI: Add ACS quirks for Cavium multi-function devices
watchdog: Start watchdog in watchdog_set_last_hw_keepalive only if appropriate
octeontx2-af: Add additional register check to rvu_poll_reg()
Set fc_nlinfo in nh_create_ipv4, nh_create_ipv6
net: usb: cdc_mbim: avoid altsetting toggling for Telit LN920
block, bfq: honor already-setup queue merges
PCI: ibmphp: Fix double unmap of io_mem
ethtool: Fix an error code in cxgb2.c
NTB: Fix an error code in ntb_msit_probe()
NTB: perf: Fix an error code in perf_setup_inbuf()
s390/bpf: Fix optimizing out zero-extensions
s390/bpf: Fix 64-bit subtraction of the -0x80000000 constant
s390/bpf: Fix branch shortening during codegen pass
mfd: axp20x: Update AXP288 volatile ranges
backlight: ktd253: Stabilize backlight
PCI: of: Don't fail devm_pci_alloc_host_bridge() on missing 'ranges'
PCI: iproc: Fix BCMA probe resource handling
netfilter: Fix fall-through warnings for Clang
netfilter: nft_ct: protect nft_ct_pcpu_template_refcnt with mutex
KVM: arm64: Restrict IPA size to maximum 48 bits on 4K and 16K page size
PCI: Fix pci_dev_str_match_path() alloc while atomic bug
mfd: tqmx86: Clear GPIO IRQ resource when no IRQ is set
tracing/boot: Fix a hist trigger dependency for boot time tracing
mtd: mtdconcat: Judge callback existence based on the master
mtd: mtdconcat: Check _read, _write callbacks existence before assignment
KVM: arm64: Fix read-side race on updates to vcpu reset state
KVM: arm64: Handle PSCI resets before userspace touches vCPU state
PCI: Sync __pci_register_driver() stub for CONFIG_PCI=n
mtd: rawnand: cafe: Fix a resource leak in the error handling path of 'cafe_nand_probe()'
ARC: export clear_user_page() for modules
perf unwind: Do not overwrite FEATURE_CHECK_LDFLAGS-libunwind-{x86,aarch64}
perf bench inject-buildid: Handle writen() errors
gpio: mpc8xxx: Fix a resources leak in the error handling path of 'mpc8xxx_probe()'
gpio: mpc8xxx: Use 'devm_gpiochip_add_data()' to simplify the code and avoid a leak
net: dsa: tag_rtl4_a: Fix egress tags
selftests: mptcp: clean tmp files in simult_flows
net: hso: add failure handler for add_net_device
net: dsa: b53: Fix calculating number of switch ports
net: dsa: b53: Set correct number of ports in the DSA struct
netfilter: socket: icmp6: fix use-after-scope
fq_codel: reject silly quantum parameters
qlcnic: Remove redundant unlock in qlcnic_pinit_from_rom
ip_gre: validate csum_start only on pull
net: dsa: b53: Fix IMP port setup on BCM5301x
bnxt_en: fix stored FW_PSID version masks
bnxt_en: Fix asic.rev in devlink dev info command
bnxt_en: log firmware debug notifications
bnxt_en: Consolidate firmware reset event logging.
bnxt_en: Convert to use netif_level() helpers.
bnxt_en: Improve logging of error recovery settings information.
bnxt_en: Fix possible unintended driver initiated error recovery
mfd: lpc_sch: Partially revert "Add support for Intel Quark X1000"
mfd: lpc_sch: Rename GPIOBASE to prevent build error
net: renesas: sh_eth: Fix freeing wrong tx descriptor
x86/mce: Avoid infinite loop for copy from user recovery
bnxt_en: Fix error recovery regression
net: dsa: bcm_sf2: Fix array overrun in bcm_sf2_num_active_ports()
Linux 5.10.68
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I542f48f8de516dcabce91d3d399583483aba0da7
This commit is contained in:
commit
beafee90ec
124 changed files with 903 additions and 418 deletions
|
|
@ -54,7 +54,7 @@ properties:
|
|||
- const: toradex,apalis_t30
|
||||
- const: nvidia,tegra30
|
||||
- items:
|
||||
- const: toradex,apalis_t30-eval-v1.1
|
||||
- const: toradex,apalis_t30-v1.1-eval
|
||||
- const: toradex,apalis_t30-eval
|
||||
- const: toradex,apalis_t30-v1.1
|
||||
- const: toradex,apalis_t30
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ on various other factors also like;
|
|||
so the device should have enough free bytes available its OOB/Spare
|
||||
area to accommodate ECC for entire page. In general following expression
|
||||
helps in determining if given device can accommodate ECC syndrome:
|
||||
"2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
|
||||
"2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
|
||||
where
|
||||
OOBSIZE number of bytes in OOB/spare area
|
||||
PAGESIZE number of bytes in main-area of device page
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 67
|
||||
SUBLEVEL = 68
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
|
|
|||
|
|
@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
|
|||
clear_page(to);
|
||||
clear_bit(PG_dc_clean, &page->flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(clear_user_page);
|
||||
|
||||
/**********************************************************************
|
||||
* Explicit Cache flush request from user space via syscall
|
||||
|
|
|
|||
|
|
@ -510,7 +510,7 @@ size_t sve_state_size(struct task_struct const *task)
|
|||
void sve_alloc(struct task_struct *task)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(current));
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1186,6 +1186,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
if (copy_from_user(®, argp, sizeof(reg)))
|
||||
break;
|
||||
|
||||
/*
|
||||
* We could owe a reset due to PSCI. Handle the pending reset
|
||||
* here to ensure userspace register accesses are ordered after
|
||||
* the reset.
|
||||
*/
|
||||
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
|
||||
kvm_reset_vcpu(vcpu);
|
||||
|
||||
if (ioctl == KVM_SET_ONE_REG)
|
||||
r = kvm_arm_set_reg(vcpu, ®);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -206,10 +206,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_reset_state reset_state;
|
||||
int ret;
|
||||
bool loaded;
|
||||
u32 pstate;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
reset_state = vcpu->arch.reset_state;
|
||||
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
||||
/* Reset PMU outside of the non-preemptible section */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
|
|
@ -272,8 +278,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
* Additional reset state handling that PSCI may have imposed on us.
|
||||
* Must be done after all the sys_reg reset.
|
||||
*/
|
||||
if (vcpu->arch.reset_state.reset) {
|
||||
unsigned long target_pc = vcpu->arch.reset_state.pc;
|
||||
if (reset_state.reset) {
|
||||
unsigned long target_pc = reset_state.pc;
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
||||
|
|
@ -282,13 +288,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* Propagate caller endianness */
|
||||
if (vcpu->arch.reset_state.be)
|
||||
if (reset_state.be)
|
||||
kvm_vcpu_set_be(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
||||
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
vcpu_set_reg(vcpu, 0, reset_state.r0);
|
||||
}
|
||||
|
||||
/* Reset timer */
|
||||
|
|
@ -313,6 +317,14 @@ int kvm_set_ipa_limit(void)
|
|||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_PARANGE_SHIFT);
|
||||
/*
|
||||
* IPA size beyond 48 bits could not be supported
|
||||
* on either 4K or 16K page size. Hence let's cap
|
||||
* it to 48 bits, in case it's reported as larger
|
||||
* on the system.
|
||||
*/
|
||||
if (PAGE_SIZE != SZ_64K)
|
||||
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
|
||||
|
||||
/*
|
||||
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
|
||||
|
|
|
|||
|
|
@ -3146,7 +3146,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
|||
/* The following code handles the fake_suspend = 1 case */
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
stdu r1, -TM_FRAME_SIZE(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
|
|
@ -3161,10 +3161,42 @@ BEGIN_FTR_SECTION
|
|||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
nop
|
||||
|
||||
/*
|
||||
* It's possible that treclaim. may modify registers, if we have lost
|
||||
* track of fake-suspend state in the guest due to it using rfscv.
|
||||
* Save and restore registers in case this occurs.
|
||||
*/
|
||||
mfspr r3, SPRN_DSCR
|
||||
mfspr r4, SPRN_XER
|
||||
mfspr r5, SPRN_AMR
|
||||
/* SPRN_TAR would need to be saved here if the kernel ever used it */
|
||||
mfcr r12
|
||||
SAVE_NVGPRS(r1)
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_GPR(3, r1)
|
||||
SAVE_GPR(4, r1)
|
||||
SAVE_GPR(5, r1)
|
||||
stw r12, 8(r1)
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* We have to treclaim here because that's the only way to do S->N */
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
TRECLAIM(R3)
|
||||
|
||||
GET_PACA(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(3, r1)
|
||||
REST_GPR(4, r1)
|
||||
REST_GPR(5, r1)
|
||||
lwz r12, 8(r1)
|
||||
REST_NVGPRS(r1)
|
||||
mtspr SPRN_DSCR, r3
|
||||
mtspr SPRN_XER, r4
|
||||
mtspr SPRN_AMR, r5
|
||||
mtcr r12
|
||||
HMT_MEDIUM
|
||||
|
||||
/*
|
||||
* We were in fake suspend, so we are not going to save the
|
||||
* register state as the guest checkpointed state (since
|
||||
|
|
@ -3192,7 +3224,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
|||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
addi r1, r1, TM_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
|
|
|||
|
|
@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
|
|||
|
||||
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
|
||||
({ \
|
||||
/* Branch instruction needs 6 bytes */ \
|
||||
int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
|
||||
int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
|
||||
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
|
||||
REG_SET_SEEN(b1); \
|
||||
REG_SET_SEEN(b2); \
|
||||
|
|
@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4(0xb9080000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
|
||||
|
|
@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4(0xb9090000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
if (imm == -0x80000000) {
|
||||
/* algfi %dst,0x80000000 */
|
||||
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
|
||||
} else {
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* BPF_MUL
|
||||
|
|
@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4(0xb90c0000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
|
||||
if (imm == 1)
|
||||
break;
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
if (imm != 1) {
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
|
||||
|
|
@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
if (BPF_OP(insn->code) == BPF_MOD)
|
||||
/* lhgi %dst,0 */
|
||||
EMIT4_IMM(0xa7090000, dst_reg, 0);
|
||||
else
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
}
|
||||
/* lhi %w0,0 */
|
||||
|
|
@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4(0xb9820000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
|
||||
|
|
@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
|
||||
|
|
@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
|
||||
|
|
@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
|
||||
|
|
|
|||
|
|
@ -301,8 +301,8 @@ do { \
|
|||
unsigned int __gu_low, __gu_high; \
|
||||
const unsigned int __user *__gu_ptr; \
|
||||
__gu_ptr = (const void __user *)(ptr); \
|
||||
__get_user_asm(__gu_low, ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
|
||||
__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
|
||||
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
|
||||
} while (0)
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -1241,6 +1241,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
|
|||
|
||||
static void kill_me_now(struct callback_head *ch)
|
||||
{
|
||||
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
|
||||
|
||||
p->mce_count = 0;
|
||||
force_sig(SIGBUS);
|
||||
}
|
||||
|
||||
|
|
@ -1249,6 +1252,7 @@ static void kill_me_maybe(struct callback_head *cb)
|
|||
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
|
||||
int flags = MF_ACTION_REQUIRED;
|
||||
|
||||
p->mce_count = 0;
|
||||
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
|
||||
|
||||
if (!p->mce_ripv)
|
||||
|
|
@ -1269,17 +1273,34 @@ static void kill_me_maybe(struct callback_head *cb)
|
|||
}
|
||||
}
|
||||
|
||||
static void queue_task_work(struct mce *m, int kill_it)
|
||||
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
|
||||
{
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(m);
|
||||
int count = ++current->mce_count;
|
||||
|
||||
if (kill_it)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
/* First call, save all the details */
|
||||
if (count == 1) {
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(m);
|
||||
|
||||
if (kill_current_task)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
}
|
||||
|
||||
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
|
||||
if (count > 10)
|
||||
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
|
||||
|
||||
/* Second or later call, make sure page address matches the one from first call */
|
||||
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
|
||||
mce_panic("Consecutive machine checks to different user pages", m, msg);
|
||||
|
||||
/* Do not call task_work_add() more than once */
|
||||
if (count > 1)
|
||||
return;
|
||||
|
||||
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||
}
|
||||
|
|
@ -1427,7 +1448,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||
/* If this triggers there is no way to recover. Die hard. */
|
||||
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
||||
|
||||
queue_task_work(&m, kill_it);
|
||||
queue_task_work(&m, msg, kill_it);
|
||||
|
||||
} else {
|
||||
/*
|
||||
|
|
@ -1445,7 +1466,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
||||
queue_task_work(&m, kill_it);
|
||||
queue_task_work(&m, msg, kill_it);
|
||||
}
|
||||
out:
|
||||
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
||||
|
|
|
|||
|
|
@ -1389,18 +1389,18 @@ int kern_addr_valid(unsigned long addr)
|
|||
return 0;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
if (!p4d_present(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
|
|
|
|||
|
|
@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
|
|||
int err = 0;
|
||||
|
||||
start = sanitize_phys(start);
|
||||
end = sanitize_phys(end);
|
||||
|
||||
/*
|
||||
* The end address passed into this function is exclusive, but
|
||||
* sanitize_phys() expects an inclusive address.
|
||||
*/
|
||||
end = sanitize_phys(end - 1) + 1;
|
||||
if (start >= end) {
|
||||
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
|
||||
start, end - 1, cattr_name(req_type));
|
||||
|
|
|
|||
|
|
@ -1204,6 +1204,11 @@ static void __init xen_dom0_set_legacy_features(void)
|
|||
x86_platform.legacy.rtc = 1;
|
||||
}
|
||||
|
||||
static void __init xen_domu_set_legacy_features(void)
|
||||
{
|
||||
x86_platform.legacy.rtc = 0;
|
||||
}
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
{
|
||||
|
|
@ -1356,6 +1361,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
add_preferred_console("xenboot", 0, NULL);
|
||||
if (pci_xen)
|
||||
x86_init.pci.arch_init = pci_xen_init;
|
||||
x86_platform.set_legacy_features =
|
||||
xen_domu_set_legacy_features;
|
||||
} else {
|
||||
const struct dom0_vga_console_info *info =
|
||||
(void *)((char *)xen_start_info +
|
||||
|
|
|
|||
|
|
@ -2526,6 +2526,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
|||
* are likely to increase the throughput.
|
||||
*/
|
||||
bfqq->new_bfqq = new_bfqq;
|
||||
/*
|
||||
* The above assignment schedules the following redirections:
|
||||
* each time some I/O for bfqq arrives, the process that
|
||||
* generated that I/O is disassociated from bfqq and
|
||||
* associated with new_bfqq. Here we increases new_bfqq->ref
|
||||
* in advance, adding the number of processes that are
|
||||
* expected to be associated with new_bfqq as they happen to
|
||||
* issue I/O.
|
||||
*/
|
||||
new_bfqq->ref += process_refs;
|
||||
return new_bfqq;
|
||||
}
|
||||
|
|
@ -2585,6 +2594,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
{
|
||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||
|
||||
/* if a merge has already been setup, then proceed with that first */
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
/*
|
||||
* Do not perform queue merging if the device is non
|
||||
* rotational and performs internal queueing. In fact, such a
|
||||
|
|
@ -2639,9 +2652,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
if (bfq_too_late_for_merging(bfqq))
|
||||
return NULL;
|
||||
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mc146818rtc.h>
|
||||
|
||||
|
|
@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
|||
const char *file = *(const char **)(tracedata + 2);
|
||||
unsigned int user_hash_value, file_hash_value;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return;
|
||||
|
||||
user_hash_value = user % USERHASH;
|
||||
file_hash_value = hash_string(lineno, file, FILEHASH);
|
||||
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
|
||||
|
|
@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
|
|||
|
||||
static int __init early_resume_init(void)
|
||||
{
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
hash_value_early_read = read_magic_time();
|
||||
register_pm_notifier(&pm_trace_nb);
|
||||
return 0;
|
||||
|
|
@ -277,6 +284,9 @@ static int __init late_resume_init(void)
|
|||
unsigned int val = hash_value_early_read;
|
||||
unsigned int user, file, dev;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
user = val % USERHASH;
|
||||
val = val / USERHASH;
|
||||
file = val % FILEHASH;
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
|||
of_device_is_compatible(np, "fsl,ls1088a-gpio"))
|
||||
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
|
||||
|
||||
ret = gpiochip_add_data(gc, mpc8xxx_gc);
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
|
||||
if (ret) {
|
||||
pr_err("%pOF: GPIO chip registration failed with status %d\n",
|
||||
np, ret);
|
||||
|
|
@ -406,6 +406,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
err:
|
||||
if (mpc8xxx_gc->irq)
|
||||
irq_domain_remove(mpc8xxx_gc->irq);
|
||||
iounmap(mpc8xxx_gc->regs);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -419,7 +421,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
|
|||
irq_domain_remove(mpc8xxx_gc->irq);
|
||||
}
|
||||
|
||||
gpiochip_remove(&mpc8xxx_gc->gc);
|
||||
iounmap(mpc8xxx_gc->regs);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -717,7 +717,7 @@ enum amd_hw_ip_block_type {
|
|||
MAX_HWIP
|
||||
};
|
||||
|
||||
#define HWIP_MAX_INSTANCE 8
|
||||
#define HWIP_MAX_INSTANCE 10
|
||||
|
||||
struct amd_powerplay {
|
||||
void *pp_handle;
|
||||
|
|
|
|||
|
|
@ -867,8 +867,14 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
|
||||
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
|
||||
|
||||
return lt9611_mode ? MODE_OK : MODE_BAD;
|
||||
if (!lt9611_mode)
|
||||
return MODE_BAD;
|
||||
else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
|
||||
return MODE_PANEL;
|
||||
else
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
|
|
|
|||
|
|
@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
|||
if (switch_mmu_context) {
|
||||
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
|
||||
|
||||
etnaviv_iommu_context_get(mmu_context);
|
||||
gpu->mmu_context = mmu_context;
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
|
||||
etnaviv_iommu_context_put(old_context);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -305,8 +305,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
|
|||
list_del(&mapping->obj_node);
|
||||
}
|
||||
|
||||
etnaviv_iommu_context_get(mmu_context);
|
||||
mapping->context = mmu_context;
|
||||
mapping->context = etnaviv_iommu_context_get(mmu_context);
|
||||
mapping->use = 1;
|
||||
|
||||
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
|
||||
|
|
|
|||
|
|
@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
goto err_submit_objects;
|
||||
|
||||
submit->ctx = file->driver_priv;
|
||||
etnaviv_iommu_context_get(submit->ctx->mmu);
|
||||
submit->mmu_context = submit->ctx->mmu;
|
||||
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
|
||||
submit->exec_state = args->exec_state;
|
||||
submit->flags = args->flags;
|
||||
|
||||
|
|
|
|||
|
|
@ -561,6 +561,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|||
/* We rely on the GPU running, so program the clock */
|
||||
etnaviv_gpu_update_clock(gpu);
|
||||
|
||||
gpu->fe_running = false;
|
||||
gpu->exec_state = -1;
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -623,19 +629,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
|
|||
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
|
||||
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
|
||||
}
|
||||
|
||||
gpu->fe_running = true;
|
||||
}
|
||||
|
||||
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
|
||||
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_iommu_context *context)
|
||||
{
|
||||
u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
|
||||
&gpu->mmu_context->cmdbuf_mapping);
|
||||
u16 prefetch;
|
||||
u32 address;
|
||||
|
||||
/* setup the MMU */
|
||||
etnaviv_iommu_restore(gpu, gpu->mmu_context);
|
||||
etnaviv_iommu_restore(gpu, context);
|
||||
|
||||
/* Start command processor */
|
||||
prefetch = etnaviv_buffer_init(gpu);
|
||||
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
|
||||
&gpu->mmu_context->cmdbuf_mapping);
|
||||
|
||||
etnaviv_gpu_start_fe(gpu, address, prefetch);
|
||||
}
|
||||
|
|
@ -814,7 +824,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
/* Now program the hardware */
|
||||
mutex_lock(&gpu->lock);
|
||||
etnaviv_gpu_hw_init(gpu);
|
||||
gpu->exec_state = -1;
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
|
|
@ -1039,8 +1048,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
|
|||
spin_unlock(&gpu->event_spinlock);
|
||||
|
||||
etnaviv_gpu_hw_init(gpu);
|
||||
gpu->exec_state = -1;
|
||||
gpu->mmu_context = NULL;
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
|
|
@ -1352,14 +1359,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!gpu->mmu_context) {
|
||||
etnaviv_iommu_context_get(submit->mmu_context);
|
||||
gpu->mmu_context = submit->mmu_context;
|
||||
etnaviv_gpu_start_fe_idleloop(gpu);
|
||||
} else {
|
||||
etnaviv_iommu_context_get(gpu->mmu_context);
|
||||
submit->prev_mmu_context = gpu->mmu_context;
|
||||
}
|
||||
if (!gpu->fe_running)
|
||||
etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
|
||||
|
||||
if (submit->prev_mmu_context)
|
||||
etnaviv_iommu_context_put(submit->prev_mmu_context);
|
||||
submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
|
||||
|
||||
if (submit->nr_pmrs) {
|
||||
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
|
||||
|
|
@ -1561,7 +1566,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
|
|||
|
||||
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
if (gpu->initialized && gpu->mmu_context) {
|
||||
if (gpu->initialized && gpu->fe_running) {
|
||||
/* Replace the last WAIT with END */
|
||||
mutex_lock(&gpu->lock);
|
||||
etnaviv_buffer_end(gpu);
|
||||
|
|
@ -1574,8 +1579,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
|
|||
*/
|
||||
etnaviv_gpu_wait_idle(gpu, 100);
|
||||
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = NULL;
|
||||
gpu->fe_running = false;
|
||||
}
|
||||
|
||||
gpu->exec_state = -1;
|
||||
|
|
@ -1723,6 +1727,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
|
|||
etnaviv_gpu_hw_suspend(gpu);
|
||||
#endif
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
|
||||
if (gpu->initialized) {
|
||||
etnaviv_cmdbuf_free(&gpu->buffer);
|
||||
etnaviv_iommu_global_fini(gpu);
|
||||
|
|
|
|||
|
|
@ -101,6 +101,7 @@ struct etnaviv_gpu {
|
|||
struct workqueue_struct *wq;
|
||||
struct drm_gpu_scheduler sched;
|
||||
bool initialized;
|
||||
bool fe_running;
|
||||
|
||||
/* 'ring'-buffer: */
|
||||
struct etnaviv_cmdbuf buffer;
|
||||
|
|
|
|||
|
|
@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
|
|||
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
|
||||
u32 pgtable;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
/* set base addresses */
|
||||
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
|
||||
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
|
||||
|
|
|
|||
|
|
@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
|
|||
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
|
||||
return;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
prefetch = etnaviv_buffer_config_mmuv2(gpu,
|
||||
(u32)v2_context->mtlb_dma,
|
||||
(u32)context->global->bad_page_dma);
|
||||
|
|
@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
|
|||
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
|
||||
return;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
|
||||
lower_32_bits(context->global->v2.pta_dma));
|
||||
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
|
||||
|
|
|
|||
|
|
@ -197,6 +197,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
|
|||
*/
|
||||
list_for_each_entry_safe(m, n, &list, scan_node) {
|
||||
etnaviv_iommu_remove_mapping(context, m);
|
||||
etnaviv_iommu_context_put(m->context);
|
||||
m->context = NULL;
|
||||
list_del_init(&m->mmu_node);
|
||||
list_del_init(&m->scan_node);
|
||||
|
|
|
|||
|
|
@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
|
|||
struct etnaviv_iommu_context *
|
||||
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
|
||||
struct etnaviv_cmdbuf_suballoc *suballoc);
|
||||
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
||||
static inline struct etnaviv_iommu_context *
|
||||
etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->refcount);
|
||||
return ctx;
|
||||
}
|
||||
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
|
||||
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
|
||||
|
|
|
|||
|
|
@ -1122,7 +1122,7 @@ static int cdn_dp_suspend(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cdn_dp_resume(struct device *dev)
|
||||
static __maybe_unused int cdn_dp_resume(struct device *dev)
|
||||
{
|
||||
struct cdn_dp_device *dp = dev_get_drvdata(dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
|
|||
if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
|
||||
line += 1;
|
||||
|
||||
handle_nested_irq(irq_create_mapping(ab8500->domain, line));
|
||||
handle_nested_irq(irq_find_mapping(ab8500->domain, line));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
|
|||
|
||||
static const struct regmap_range axp288_volatile_ranges[] = {
|
||||
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
|
||||
regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
|
||||
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
|
||||
regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
|
||||
regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
|
||||
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
|
||||
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
|
||||
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
|
||||
regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
|
||||
regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
|
||||
regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
|
|||
}
|
||||
|
||||
static const unsigned long db8500_armss_freqs[] = {
|
||||
200000000,
|
||||
400000000,
|
||||
800000000,
|
||||
199680000,
|
||||
399360000,
|
||||
798720000,
|
||||
998400000
|
||||
};
|
||||
|
||||
/* The DB8520 has slightly higher ARMSS max frequency */
|
||||
static const unsigned long db8520_armss_freqs[] = {
|
||||
200000000,
|
||||
400000000,
|
||||
800000000,
|
||||
199680000,
|
||||
399360000,
|
||||
798720000,
|
||||
1152000000
|
||||
};
|
||||
|
||||
|
||||
|
||||
static long round_armss_rate(unsigned long rate)
|
||||
{
|
||||
unsigned long freq = 0;
|
||||
|
|
|
|||
|
|
@ -22,13 +22,10 @@
|
|||
#define SMBASE 0x40
|
||||
#define SMBUS_IO_SIZE 64
|
||||
|
||||
#define GPIOBASE 0x44
|
||||
#define GPIO_BASE 0x44
|
||||
#define GPIO_IO_SIZE 64
|
||||
#define GPIO_IO_SIZE_CENTERTON 128
|
||||
|
||||
/* Intel Quark X1000 GPIO IRQ Number */
|
||||
#define GPIO_IRQ_QUARK_X1000 9
|
||||
|
||||
#define WDTBASE 0x84
|
||||
#define WDT_IO_SIZE 64
|
||||
|
||||
|
|
@ -43,30 +40,25 @@ struct lpc_sch_info {
|
|||
unsigned int io_size_smbus;
|
||||
unsigned int io_size_gpio;
|
||||
unsigned int io_size_wdt;
|
||||
int irq_gpio;
|
||||
};
|
||||
|
||||
static struct lpc_sch_info sch_chipset_info[] = {
|
||||
[LPC_SCH] = {
|
||||
.io_size_smbus = SMBUS_IO_SIZE,
|
||||
.io_size_gpio = GPIO_IO_SIZE,
|
||||
.irq_gpio = -1,
|
||||
},
|
||||
[LPC_ITC] = {
|
||||
.io_size_smbus = SMBUS_IO_SIZE,
|
||||
.io_size_gpio = GPIO_IO_SIZE,
|
||||
.io_size_wdt = WDT_IO_SIZE,
|
||||
.irq_gpio = -1,
|
||||
},
|
||||
[LPC_CENTERTON] = {
|
||||
.io_size_smbus = SMBUS_IO_SIZE,
|
||||
.io_size_gpio = GPIO_IO_SIZE_CENTERTON,
|
||||
.io_size_wdt = WDT_IO_SIZE,
|
||||
.irq_gpio = -1,
|
||||
},
|
||||
[LPC_QUARK_X1000] = {
|
||||
.io_size_gpio = GPIO_IO_SIZE,
|
||||
.irq_gpio = GPIO_IRQ_QUARK_X1000,
|
||||
.io_size_wdt = WDT_IO_SIZE,
|
||||
},
|
||||
};
|
||||
|
|
@ -113,13 +105,13 @@ static int lpc_sch_get_io(struct pci_dev *pdev, int where, const char *name,
|
|||
}
|
||||
|
||||
static int lpc_sch_populate_cell(struct pci_dev *pdev, int where,
|
||||
const char *name, int size, int irq,
|
||||
int id, struct mfd_cell *cell)
|
||||
const char *name, int size, int id,
|
||||
struct mfd_cell *cell)
|
||||
{
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
res = devm_kcalloc(&pdev->dev, 2, sizeof(*res), GFP_KERNEL);
|
||||
res = devm_kzalloc(&pdev->dev, sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -135,18 +127,6 @@ static int lpc_sch_populate_cell(struct pci_dev *pdev, int where,
|
|||
cell->ignore_resource_conflicts = true;
|
||||
cell->id = id;
|
||||
|
||||
/* Check if we need to add an IRQ resource */
|
||||
if (irq < 0)
|
||||
return 0;
|
||||
|
||||
res++;
|
||||
|
||||
res->start = irq;
|
||||
res->end = irq;
|
||||
res->flags = IORESOURCE_IRQ;
|
||||
|
||||
cell->num_resources++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -158,15 +138,15 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
int ret;
|
||||
|
||||
ret = lpc_sch_populate_cell(dev, SMBASE, "isch_smbus",
|
||||
info->io_size_smbus, -1,
|
||||
info->io_size_smbus,
|
||||
id->device, &lpc_sch_cells[cells]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
cells++;
|
||||
|
||||
ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio",
|
||||
info->io_size_gpio, info->irq_gpio,
|
||||
ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio",
|
||||
info->io_size_gpio,
|
||||
id->device, &lpc_sch_cells[cells]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
@ -174,7 +154,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
cells++;
|
||||
|
||||
ret = lpc_sch_populate_cell(dev, WDTBASE, "ie6xx_wdt",
|
||||
info->io_size_wdt, -1,
|
||||
info->io_size_wdt,
|
||||
id->device, &lpc_sch_cells[cells]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -1091,7 +1091,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
|
|||
|
||||
if (variant->id_val == STMPE801_ID ||
|
||||
variant->id_val == STMPE1600_ID) {
|
||||
int base = irq_create_mapping(stmpe->domain, 0);
|
||||
int base = irq_find_mapping(stmpe->domain, 0);
|
||||
|
||||
handle_nested_irq(base);
|
||||
return IRQ_HANDLED;
|
||||
|
|
@ -1119,7 +1119,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
|
|||
while (status) {
|
||||
int bit = __ffs(status);
|
||||
int line = bank * 8 + bit;
|
||||
int nestedirq = irq_create_mapping(stmpe->domain, line);
|
||||
int nestedirq = irq_find_mapping(stmpe->domain, line);
|
||||
|
||||
handle_nested_irq(nestedirq);
|
||||
status &= ~(1 << bit);
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ again:
|
|||
|
||||
while (status) {
|
||||
int bit = __ffs(status);
|
||||
int virq = irq_create_mapping(tc3589x->domain, bit);
|
||||
int virq = irq_find_mapping(tc3589x->domain, bit);
|
||||
|
||||
handle_nested_irq(virq);
|
||||
status &= ~(1 << bit);
|
||||
|
|
|
|||
|
|
@ -209,6 +209,8 @@ static int tqmx86_probe(struct platform_device *pdev)
|
|||
|
||||
/* Assumes the IRQ resource is first. */
|
||||
tqmx_gpio_resources[0].start = gpio_irq;
|
||||
} else {
|
||||
tqmx_gpio_resources[0].flags = 0;
|
||||
}
|
||||
|
||||
ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
|
|||
struct wm8994 *wm8994 = data;
|
||||
|
||||
while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
|
||||
handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
|
||||
handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
int i;
|
||||
size_t size;
|
||||
struct mtd_concat *concat;
|
||||
struct mtd_info *subdev_master = NULL;
|
||||
uint32_t max_erasesize, curr_erasesize;
|
||||
int num_erase_region;
|
||||
int max_writebufsize = 0;
|
||||
|
|
@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
|
||||
concat->mtd.oobsize = subdev[0]->oobsize;
|
||||
concat->mtd.oobavail = subdev[0]->oobavail;
|
||||
if (subdev[0]->_writev)
|
||||
|
||||
subdev_master = mtd_get_master(subdev[0]);
|
||||
if (subdev_master->_writev)
|
||||
concat->mtd._writev = concat_writev;
|
||||
if (subdev[0]->_read_oob)
|
||||
if (subdev_master->_read_oob)
|
||||
concat->mtd._read_oob = concat_read_oob;
|
||||
if (subdev[0]->_write_oob)
|
||||
if (subdev_master->_write_oob)
|
||||
concat->mtd._write_oob = concat_write_oob;
|
||||
if (subdev[0]->_block_isbad)
|
||||
if (subdev_master->_block_isbad)
|
||||
concat->mtd._block_isbad = concat_block_isbad;
|
||||
if (subdev[0]->_block_markbad)
|
||||
if (subdev_master->_block_markbad)
|
||||
concat->mtd._block_markbad = concat_block_markbad;
|
||||
if (subdev[0]->_panic_write)
|
||||
if (subdev_master->_panic_write)
|
||||
concat->mtd._panic_write = concat_panic_write;
|
||||
if (subdev_master->_read)
|
||||
concat->mtd._read = concat_read;
|
||||
if (subdev_master->_write)
|
||||
concat->mtd._write = concat_write;
|
||||
|
||||
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
|
||||
|
||||
|
|
@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
subdev[i]->flags & MTD_WRITEABLE;
|
||||
}
|
||||
|
||||
subdev_master = mtd_get_master(subdev[i]);
|
||||
concat->mtd.size += subdev[i]->size;
|
||||
concat->mtd.ecc_stats.badblocks +=
|
||||
subdev[i]->ecc_stats.badblocks;
|
||||
if (concat->mtd.writesize != subdev[i]->writesize ||
|
||||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
|
||||
concat->mtd.oobsize != subdev[i]->oobsize ||
|
||||
!concat->mtd._read_oob != !subdev[i]->_read_oob ||
|
||||
!concat->mtd._write_oob != !subdev[i]->_write_oob) {
|
||||
!concat->mtd._read_oob != !subdev_master->_read_oob ||
|
||||
!concat->mtd._write_oob != !subdev_master->_write_oob) {
|
||||
/*
|
||||
* Check against subdev[i] for data members, because
|
||||
* subdev's attributes may be different from master
|
||||
* mtd device. Check against subdev's master mtd
|
||||
* device for callbacks, because the existence of
|
||||
* subdev's callbacks is decided by master mtd device.
|
||||
*/
|
||||
kfree(concat);
|
||||
printk("Incompatible OOB or ECC data on \"%s\"\n",
|
||||
subdev[i]->name);
|
||||
|
|
@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
concat->mtd.name = name;
|
||||
|
||||
concat->mtd._erase = concat_erase;
|
||||
concat->mtd._read = concat_read;
|
||||
concat->mtd._write = concat_write;
|
||||
concat->mtd._sync = concat_sync;
|
||||
concat->mtd._lock = concat_lock;
|
||||
concat->mtd._unlock = concat_unlock;
|
||||
|
|
|
|||
|
|
@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
|
|||
"CAFE NAND", mtd);
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
|
||||
goto out_ior;
|
||||
goto out_free_rs;
|
||||
}
|
||||
|
||||
/* Disable master reset, enable NAND clock */
|
||||
|
|
@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
|
|||
/* Disable NAND IRQ in global IRQ mask register */
|
||||
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
|
||||
free_irq(pdev->irq, mtd);
|
||||
out_free_rs:
|
||||
free_rs(cafe->rs);
|
||||
out_ior:
|
||||
pci_iounmap(pdev, cafe->mmio);
|
||||
out_free_mtd:
|
||||
|
|
|
|||
|
|
@ -1083,7 +1083,7 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
|
|||
u8 reg, val, off;
|
||||
|
||||
/* Override the port settings */
|
||||
if (port == dev->cpu_port) {
|
||||
if (port == dev->imp_port) {
|
||||
off = B53_PORT_OVERRIDE_CTRL;
|
||||
val = PORT_OVERRIDE_EN;
|
||||
} else {
|
||||
|
|
@ -1107,7 +1107,7 @@ static void b53_force_port_config(struct b53_device *dev, int port,
|
|||
u8 reg, val, off;
|
||||
|
||||
/* Override the port settings */
|
||||
if (port == dev->cpu_port) {
|
||||
if (port == dev->imp_port) {
|
||||
off = B53_PORT_OVERRIDE_CTRL;
|
||||
val = PORT_OVERRIDE_EN;
|
||||
} else {
|
||||
|
|
@ -1175,7 +1175,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
|
|||
b53_force_link(dev, port, phydev->link);
|
||||
|
||||
if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
|
||||
if (port == 8)
|
||||
if (port == dev->imp_port)
|
||||
off = B53_RGMII_CTRL_IMP;
|
||||
else
|
||||
off = B53_RGMII_CTRL_P(port);
|
||||
|
|
@ -2238,6 +2238,7 @@ struct b53_chip_data {
|
|||
const char *dev_name;
|
||||
u16 vlans;
|
||||
u16 enabled_ports;
|
||||
u8 imp_port;
|
||||
u8 cpu_port;
|
||||
u8 vta_regs[3];
|
||||
u8 arl_bins;
|
||||
|
|
@ -2262,6 +2263,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 2,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 5,
|
||||
.cpu_port = B53_CPU_PORT_25,
|
||||
.duplex_reg = B53_DUPLEX_STAT_FE,
|
||||
},
|
||||
|
|
@ -2272,6 +2274,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 2,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 5,
|
||||
.cpu_port = B53_CPU_PORT_25,
|
||||
.duplex_reg = B53_DUPLEX_STAT_FE,
|
||||
},
|
||||
|
|
@ -2282,6 +2285,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2295,6 +2299,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2308,6 +2313,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_9798,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2321,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x7f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_9798,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2335,6 +2342,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
|
|
@ -2347,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0xff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2360,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1ff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2373,6 +2383,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0, /* pdata must provide them */
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_63XX,
|
||||
.duplex_reg = B53_DUPLEX_STAT_63XX,
|
||||
|
|
@ -2386,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2399,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1bf,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2412,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1bf,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2425,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2438,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2451,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1ff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2464,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x103,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2477,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1ff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2490,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.enabled_ports = 0x1ff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 256,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
@ -2515,6 +2535,7 @@ static int b53_switch_init(struct b53_device *dev)
|
|||
dev->vta_regs[1] = chip->vta_regs[1];
|
||||
dev->vta_regs[2] = chip->vta_regs[2];
|
||||
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
|
||||
dev->imp_port = chip->imp_port;
|
||||
dev->cpu_port = chip->cpu_port;
|
||||
dev->num_vlans = chip->vlans;
|
||||
dev->num_arl_bins = chip->arl_bins;
|
||||
|
|
@ -2556,9 +2577,10 @@ static int b53_switch_init(struct b53_device *dev)
|
|||
dev->cpu_port = 5;
|
||||
}
|
||||
|
||||
/* cpu port is always last */
|
||||
dev->num_ports = dev->cpu_port + 1;
|
||||
dev->enabled_ports |= BIT(dev->cpu_port);
|
||||
dev->num_ports = fls(dev->enabled_ports);
|
||||
|
||||
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
|
||||
|
||||
/* Include non standard CPU port built-in PHYs to be probed */
|
||||
if (is539x(dev) || is531x5(dev)) {
|
||||
|
|
@ -2604,7 +2626,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
|
|||
return NULL;
|
||||
|
||||
ds->dev = base;
|
||||
ds->num_ports = DSA_MAX_PORTS;
|
||||
|
||||
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@ struct b53_device {
|
|||
|
||||
/* used ports mask */
|
||||
u16 enabled_ports;
|
||||
unsigned int imp_port;
|
||||
unsigned int cpu_port;
|
||||
|
||||
/* connect specific data */
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
|
|||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
unsigned int port, count = 0;
|
||||
|
||||
for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
|
||||
for (port = 0; port < ds->num_ports; port++) {
|
||||
if (dsa_is_cpu_port(ds, port))
|
||||
continue;
|
||||
if (priv->port_sts[port].enabled)
|
||||
|
|
|
|||
|
|
@ -1225,7 +1225,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
|
|||
|
||||
/* SR-IOV capability was enabled but there are no VFs*/
|
||||
if (iov->total == 0) {
|
||||
err = -EINVAL;
|
||||
err = 0;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -272,6 +272,7 @@ static const u16 bnxt_async_events_arr[] = {
|
|||
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
|
||||
};
|
||||
|
||||
|
|
@ -1304,8 +1305,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
} else {
|
||||
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
|
||||
tpa_info->gso_type = 0;
|
||||
if (netif_msg_rx_err(bp))
|
||||
netdev_warn(bp->dev, "TPA packet without valid hash\n");
|
||||
netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
|
||||
}
|
||||
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
|
||||
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
|
||||
|
|
@ -2081,10 +2081,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|||
goto async_event_process_exit;
|
||||
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
|
||||
break;
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
|
||||
if (netif_msg_hw(bp))
|
||||
netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
|
||||
data1, data2);
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
|
||||
char *fatal_str = "non-fatal";
|
||||
|
||||
if (!bp->fw_health)
|
||||
goto async_event_process_exit;
|
||||
|
||||
|
|
@ -2096,42 +2095,57 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|||
if (!bp->fw_reset_max_dsecs)
|
||||
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
|
||||
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
|
||||
netdev_warn(bp->dev, "Firmware fatal reset event received\n");
|
||||
fatal_str = "fatal";
|
||||
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
|
||||
} else {
|
||||
netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
|
||||
bp->fw_reset_max_dsecs * 100);
|
||||
}
|
||||
netif_warn(bp, hw, bp->dev,
|
||||
"Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
|
||||
fatal_str, data1, data2,
|
||||
bp->fw_reset_min_dsecs * 100,
|
||||
bp->fw_reset_max_dsecs * 100);
|
||||
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
|
||||
break;
|
||||
}
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
|
||||
struct bnxt_fw_health *fw_health = bp->fw_health;
|
||||
|
||||
if (!fw_health)
|
||||
goto async_event_process_exit;
|
||||
|
||||
fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
|
||||
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
|
||||
if (!fw_health->enabled)
|
||||
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
|
||||
fw_health->enabled = false;
|
||||
netif_info(bp, drv, bp->dev,
|
||||
"Error recovery info: error recovery[0]\n");
|
||||
break;
|
||||
|
||||
if (netif_msg_drv(bp))
|
||||
netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
|
||||
fw_health->enabled, fw_health->master,
|
||||
bnxt_fw_health_readl(bp,
|
||||
BNXT_FW_RESET_CNT_REG),
|
||||
bnxt_fw_health_readl(bp,
|
||||
BNXT_FW_HEALTH_REG));
|
||||
}
|
||||
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
|
||||
fw_health->tmr_multiplier =
|
||||
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
|
||||
bp->current_interval * 10);
|
||||
fw_health->tmr_counter = fw_health->tmr_multiplier;
|
||||
fw_health->last_fw_heartbeat =
|
||||
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
|
||||
if (!fw_health->enabled)
|
||||
fw_health->last_fw_heartbeat =
|
||||
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
|
||||
fw_health->last_fw_reset_cnt =
|
||||
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
|
||||
netif_info(bp, drv, bp->dev,
|
||||
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
|
||||
fw_health->master, fw_health->last_fw_reset_cnt,
|
||||
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
|
||||
if (!fw_health->enabled) {
|
||||
/* Make sure tmr_counter is set and visible to
|
||||
* bnxt_health_check() before setting enabled to true.
|
||||
*/
|
||||
smp_wmb();
|
||||
fw_health->enabled = true;
|
||||
}
|
||||
goto async_event_process_exit;
|
||||
}
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
|
||||
netif_notice(bp, hw, bp->dev,
|
||||
"Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
|
||||
data1, data2);
|
||||
goto async_event_process_exit;
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
|
||||
struct bnxt_rx_ring_info *rxr;
|
||||
u16 grp_idx;
|
||||
|
|
@ -2591,6 +2605,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
|
|||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||
int j;
|
||||
|
||||
if (!txr->tx_buf_ring)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < max_idx;) {
|
||||
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
|
||||
struct sk_buff *skb;
|
||||
|
|
@ -2675,6 +2692,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
|
|||
}
|
||||
|
||||
skip_rx_tpa_free:
|
||||
if (!rxr->rx_buf_ring)
|
||||
goto skip_rx_buf_free;
|
||||
|
||||
for (i = 0; i < max_idx; i++) {
|
||||
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
|
||||
dma_addr_t mapping = rx_buf->mapping;
|
||||
|
|
@ -2697,6 +2717,11 @@ skip_rx_tpa_free:
|
|||
kfree(data);
|
||||
}
|
||||
}
|
||||
|
||||
skip_rx_buf_free:
|
||||
if (!rxr->rx_agg_ring)
|
||||
goto skip_rx_agg_free;
|
||||
|
||||
for (i = 0; i < max_agg_idx; i++) {
|
||||
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
|
||||
struct page *page = rx_agg_buf->page;
|
||||
|
|
@ -2713,6 +2738,8 @@ skip_rx_tpa_free:
|
|||
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
skip_rx_agg_free:
|
||||
if (rxr->rx_page) {
|
||||
__free_page(rxr->rx_page);
|
||||
rxr->rx_page = NULL;
|
||||
|
|
@ -10719,6 +10746,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
|
|||
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
return;
|
||||
|
||||
/* Make sure it is enabled before checking the tmr_counter. */
|
||||
smp_rmb();
|
||||
if (fw_health->tmr_counter) {
|
||||
fw_health->tmr_counter--;
|
||||
return;
|
||||
|
|
@ -11623,6 +11652,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
dev_close(bp->dev);
|
||||
}
|
||||
|
||||
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
|
||||
bp->fw_health->enabled) {
|
||||
bp->fw_health->last_fw_reset_cnt =
|
||||
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
|
||||
}
|
||||
bp->fw_reset_state = 0;
|
||||
/* Make sure fw_reset_state is 0 before clearing the flag */
|
||||
smp_mb__before_atomic();
|
||||
|
|
|
|||
|
|
@ -452,7 +452,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
|||
return rc;
|
||||
|
||||
ver_resp = &bp->ver_resp;
|
||||
sprintf(buf, "%X", ver_resp->chip_rev);
|
||||
sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
|
||||
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
|
||||
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
|
||||
if (rc)
|
||||
|
|
@ -474,8 +474,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
|||
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
|
||||
u32 ver = nvm_cfg_ver.vu32;
|
||||
|
||||
sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
|
||||
ver & 0xf);
|
||||
sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
|
||||
ver & 0xff);
|
||||
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
|
||||
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
|
||||
buf);
|
||||
|
|
|
|||
|
|
@ -1870,9 +1870,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
|
|||
{
|
||||
struct bnxt_flower_indr_block_cb_priv *cb_priv;
|
||||
|
||||
/* All callback list access should be protected by RTNL. */
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
|
||||
if (cb_priv->tunnel_netdev == netdev)
|
||||
return cb_priv;
|
||||
|
|
|
|||
|
|
@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!adapter->registered_device_map) {
|
||||
pr_err("%s: could not register any net devices\n",
|
||||
pci_name(pdev));
|
||||
err = -EINVAL;
|
||||
goto out_release_adapter_res;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
|
|||
#define HNS3_OUTER_VLAN_TAG 2
|
||||
|
||||
#define HNS3_MIN_TX_LEN 33U
|
||||
#define HNS3_MIN_TUN_PKT_LEN 65U
|
||||
|
||||
/* hns3_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
|
|
@ -913,8 +914,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
|||
l4.tcp->doff);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (hns3_tunnel_csum_bug(skb))
|
||||
return skb_checksum_help(skb);
|
||||
if (hns3_tunnel_csum_bug(skb)) {
|
||||
int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
|
||||
|
||||
return ret ? ret : skb_checksum_help(skb);
|
||||
}
|
||||
|
||||
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
||||
|
|
|
|||
|
|
@ -1463,9 +1463,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
|
|||
|
||||
static int hclge_configure(struct hclge_dev *hdev)
|
||||
{
|
||||
const struct cpumask *cpumask = cpu_online_mask;
|
||||
struct hclge_cfg cfg;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
int node, ret;
|
||||
|
||||
ret = hclge_get_cfg(hdev, &cfg);
|
||||
if (ret)
|
||||
|
|
@ -1526,11 +1527,12 @@ static int hclge_configure(struct hclge_dev *hdev)
|
|||
|
||||
hclge_init_kdump_kernel_config(hdev);
|
||||
|
||||
/* Set the init affinity based on pci func number */
|
||||
i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
|
||||
i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
|
||||
cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
|
||||
&hdev->affinity_mask);
|
||||
/* Set the affinity based on numa node */
|
||||
node = dev_to_node(&hdev->pdev->dev);
|
||||
if (node != NUMA_NO_NODE)
|
||||
cpumask = cpumask_of_node(node);
|
||||
|
||||
cpumask_copy(&hdev->affinity_mask, cpumask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -7003,11 +7005,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|||
hclge_clear_arfs_rules(handle);
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
/* If it is not PF reset, the firmware will disable the MAC,
|
||||
/* If it is not PF reset or FLR, the firmware will disable the MAC,
|
||||
* so it only need to stop phy here.
|
||||
*/
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
|
||||
hdev->reset_type != HNAE3_FUNC_RESET) {
|
||||
hdev->reset_type != HNAE3_FUNC_RESET &&
|
||||
hdev->reset_type != HNAE3_FLR_RESET) {
|
||||
hclge_mac_stop_phy(hdev);
|
||||
hclge_update_link_status(hdev);
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -2352,6 +2352,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
|||
|
||||
hclgevf_enable_vector(&hdev->misc_vector, false);
|
||||
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
|
||||
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
|
||||
hclgevf_clear_event_cause(hdev, clearval);
|
||||
|
||||
switch (event_cause) {
|
||||
case HCLGEVF_VECTOR0_EVENT_RST:
|
||||
|
|
@ -2364,10 +2366,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
|||
break;
|
||||
}
|
||||
|
||||
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
|
||||
hclgevf_clear_event_cause(hdev, clearval);
|
||||
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
|
||||
hclgevf_enable_vector(&hdev->misc_vector, true);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4478,6 +4478,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (adapter->failover_pending) {
|
||||
adapter->init_done_rc = -EAGAIN;
|
||||
netdev_dbg(netdev, "Failover pending, ignoring login response\n");
|
||||
complete(&adapter->init_done);
|
||||
/* login response buffer will be released on reset */
|
||||
return 0;
|
||||
}
|
||||
|
||||
netdev->mtu = adapter->req_mtu - ETH_HLEN;
|
||||
|
||||
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
|
||||
|
|
|
|||
|
|
@ -84,7 +84,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
|
|||
*/
|
||||
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
|
||||
{
|
||||
unsigned long timeout = jiffies + usecs_to_jiffies(10000);
|
||||
unsigned long timeout = jiffies + usecs_to_jiffies(20000);
|
||||
bool twice = false;
|
||||
void __iomem *reg;
|
||||
u64 reg_val;
|
||||
|
||||
|
|
@ -99,6 +100,15 @@ again:
|
|||
usleep_range(1, 5);
|
||||
goto again;
|
||||
}
|
||||
/* In scenarios where CPU is scheduled out before checking
|
||||
* 'time_before' (above) and gets scheduled in such that
|
||||
* jiffies are beyond timeout value, then check again if HW is
|
||||
* done with the operation in the meantime.
|
||||
*/
|
||||
if (!twice) {
|
||||
twice = true;
|
||||
goto again;
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
|
|||
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
|
||||
return err;
|
||||
goto err_cancel_work;
|
||||
}
|
||||
|
||||
err = mlx5_fw_tracer_create_mkey(tracer);
|
||||
|
|
@ -1031,6 +1031,7 @@ err_notifier_unregister:
|
|||
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
|
||||
err_dealloc_pd:
|
||||
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
|
||||
err_cancel_work:
|
||||
cancel_work_sync(&tracer->read_fw_strings_work);
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -298,9 +298,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
|
|||
{
|
||||
struct mlx5e_rep_indr_block_priv *cb_priv;
|
||||
|
||||
/* All callback list access should be protected by RTNL. */
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(cb_priv,
|
||||
&rpriv->uplink_priv.tc_indr_block_priv_list,
|
||||
list)
|
||||
|
|
|
|||
|
|
@ -1675,14 +1675,13 @@ static int build_match_list(struct match_list *match_head,
|
|||
|
||||
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
|
||||
if (!curr_match) {
|
||||
rcu_read_unlock();
|
||||
free_match_list(match_head, ft_locked);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
curr_match->g = g;
|
||||
list_add_tail(&curr_match->list, &match_head->list);
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1732,9 +1732,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
|
|||
struct nfp_flower_indr_block_cb_priv *cb_priv;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
||||
/* All callback list access should be protected by RTNL. */
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
|
||||
if (cb_priv->netdev == netdev)
|
||||
return cb_priv;
|
||||
|
|
|
|||
|
|
@ -3376,6 +3376,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
|
|||
struct qed_nvm_image_att *p_image_att)
|
||||
{
|
||||
enum nvm_image_type type;
|
||||
int rc;
|
||||
u32 i;
|
||||
|
||||
/* Translate image_id into MFW definitions */
|
||||
|
|
@ -3404,7 +3405,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
qed_mcp_nvm_info_populate(p_hwfn);
|
||||
rc = qed_mcp_nvm_info_populate(p_hwfn);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
|
||||
if (type == p_hwfn->nvm_info.image_att[i].image_type)
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -439,7 +439,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
|
|||
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
|
||||
msleep(20);
|
||||
|
||||
qlcnic_rom_unlock(adapter);
|
||||
/* big hammer don't reset CAM block on reset */
|
||||
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
|
||||
|
||||
|
|
|
|||
|
|
@ -119,6 +119,8 @@
|
|||
#define PHY_ST 0x8A /* PHY status register */
|
||||
#define MAC_SM 0xAC /* MAC status machine */
|
||||
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
|
||||
#define MD_CSC 0xb6 /* MDC speed control register */
|
||||
#define MD_CSC_DEFAULT 0x0030
|
||||
#define MAC_ID 0xBE /* Identifier register */
|
||||
|
||||
#define TX_DCNT 0x80 /* TX descriptor count */
|
||||
|
|
@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
|
|||
{
|
||||
void __iomem *ioaddr = lp->base;
|
||||
int limit = MAC_DEF_TIMEOUT;
|
||||
u16 cmd;
|
||||
u16 cmd, md_csc;
|
||||
|
||||
md_csc = ioread16(ioaddr + MD_CSC);
|
||||
iowrite16(MAC_RST, ioaddr + MCR1);
|
||||
while (limit--) {
|
||||
cmd = ioread16(ioaddr + MCR1);
|
||||
|
|
@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
|
|||
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
|
||||
iowrite16(0, ioaddr + MAC_SM);
|
||||
mdelay(5);
|
||||
|
||||
/* Restore MDIO clock frequency */
|
||||
if (md_csc != MD_CSC_DEFAULT)
|
||||
iowrite16(md_csc, ioaddr + MD_CSC);
|
||||
}
|
||||
|
||||
static void r6040_init_mac_regs(struct net_device *dev)
|
||||
|
|
|
|||
|
|
@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
|
|||
else
|
||||
txdesc->status |= cpu_to_le32(TD_TACT);
|
||||
|
||||
wmb(); /* cur_tx must be incremented after TACT bit was set */
|
||||
mdp->cur_tx++;
|
||||
|
||||
if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
|
||||
|
|
|
|||
|
|
@ -451,7 +451,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
|
|||
* table region determines the number of entries it has.
|
||||
*/
|
||||
if (filter) {
|
||||
count = hweight32(ipa->filter_map);
|
||||
/* Include one extra "slot" to hold the filter map itself */
|
||||
count = 1 + hweight32(ipa->filter_map);
|
||||
hash_count = hash_mem->size ? count : 0;
|
||||
} else {
|
||||
count = mem->size / IPA_TABLE_ENTRY_SIZE;
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
#ifndef HAVE_DP83640_REGISTERS
|
||||
#define HAVE_DP83640_REGISTERS
|
||||
|
||||
#define PAGE0 0x0000
|
||||
/* #define PAGE0 0x0000 */
|
||||
#define PHYCR2 0x001c /* PHY Control Register 2 */
|
||||
|
||||
#define PAGE4 0x0004
|
||||
|
|
|
|||
|
|
@ -653,6 +653,11 @@ static const struct usb_device_id mbim_devs[] = {
|
|||
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
|
||||
},
|
||||
|
||||
/* Telit LN920 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
|
||||
},
|
||||
|
||||
/* default entry */
|
||||
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
|
|
|
|||
|
|
@ -2537,13 +2537,17 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
if (!hso_net->mux_bulk_tx_buf)
|
||||
goto err_free_tx_urb;
|
||||
|
||||
add_net_device(hso_dev);
|
||||
result = add_net_device(hso_dev);
|
||||
if (result) {
|
||||
dev_err(&interface->dev, "Failed to add net device\n");
|
||||
goto err_free_tx_buf;
|
||||
}
|
||||
|
||||
/* registering our net device */
|
||||
result = register_netdev(net);
|
||||
if (result) {
|
||||
dev_err(&interface->dev, "Failed to register device\n");
|
||||
goto err_free_tx_buf;
|
||||
goto err_rmv_ndev;
|
||||
}
|
||||
|
||||
hso_log_port(hso_dev);
|
||||
|
|
@ -2552,8 +2556,9 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
|
||||
return hso_dev;
|
||||
|
||||
err_free_tx_buf:
|
||||
err_rmv_ndev:
|
||||
remove_net_device(hso_dev);
|
||||
err_free_tx_buf:
|
||||
kfree(hso_net->mux_bulk_tx_buf);
|
||||
err_free_tx_urb:
|
||||
usb_free_urb(hso_net->mux_bulk_tx_urb);
|
||||
|
|
|
|||
|
|
@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
|
|||
if (ret)
|
||||
goto remove_dbgfs;
|
||||
|
||||
if (!nm->isr_ctx)
|
||||
if (!nm->isr_ctx) {
|
||||
ret = -ENOMEM;
|
||||
goto remove_dbgfs;
|
||||
}
|
||||
|
||||
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
|
||||
|
||||
|
|
|
|||
|
|
@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
|
|||
return -ENOMEM;
|
||||
}
|
||||
if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
|
||||
ret = -EINVAL;
|
||||
dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
|
||||
goto err_free_inbuf;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
|||
} while (ret > 0);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
bool sync, bool last)
|
||||
{
|
||||
|
|
@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
|||
nvme_tcp_send_all(queue);
|
||||
queue->more_requests = false;
|
||||
mutex_unlock(&queue->send_mutex);
|
||||
} else if (last) {
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
|
||||
if (last && nvme_tcp_queue_more(queue))
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
|
||||
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
|
||||
|
|
@ -890,12 +897,6 @@ done:
|
|||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
queue->request = NULL;
|
||||
|
|
@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
|||
pending = true;
|
||||
else if (unlikely(result < 0))
|
||||
break;
|
||||
} else
|
||||
pending = !llist_empty(&queue->req_list);
|
||||
}
|
||||
|
||||
result = nvme_tcp_try_recv(queue);
|
||||
if (result > 0)
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
#define STATUS_REG_SYS_2 0x508
|
||||
#define STATUS_CLR_REG_SYS_2 0x708
|
||||
#define LINK_DOWN BIT(1)
|
||||
#define J7200_LINK_DOWN BIT(10)
|
||||
|
||||
#define J721E_PCIE_USER_CMD_STATUS 0x4
|
||||
#define LINK_TRAINING_ENABLE BIT(0)
|
||||
|
|
@ -54,6 +55,7 @@ struct j721e_pcie {
|
|||
struct cdns_pcie *cdns_pcie;
|
||||
void __iomem *user_cfg_base;
|
||||
void __iomem *intd_cfg_base;
|
||||
u32 linkdown_irq_regfield;
|
||||
};
|
||||
|
||||
enum j721e_pcie_mode {
|
||||
|
|
@ -63,7 +65,10 @@ enum j721e_pcie_mode {
|
|||
|
||||
struct j721e_pcie_data {
|
||||
enum j721e_pcie_mode mode;
|
||||
bool quirk_retrain_flag;
|
||||
unsigned int quirk_retrain_flag:1;
|
||||
unsigned int quirk_detect_quiet_flag:1;
|
||||
u32 linkdown_irq_regfield;
|
||||
unsigned int byte_access_allowed:1;
|
||||
};
|
||||
|
||||
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
|
||||
|
|
@ -95,12 +100,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
|
|||
u32 reg;
|
||||
|
||||
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
|
||||
if (!(reg & LINK_DOWN))
|
||||
if (!(reg & pcie->linkdown_irq_regfield))
|
||||
return IRQ_NONE;
|
||||
|
||||
dev_err(dev, "LINK DOWN!\n");
|
||||
|
||||
j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
|
||||
j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
@ -109,7 +114,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
|
|||
u32 reg;
|
||||
|
||||
reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
|
||||
reg |= LINK_DOWN;
|
||||
reg |= pcie->linkdown_irq_regfield;
|
||||
j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
|
||||
}
|
||||
|
||||
|
|
@ -272,10 +277,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
|
|||
static const struct j721e_pcie_data j721e_pcie_rc_data = {
|
||||
.mode = PCI_MODE_RC,
|
||||
.quirk_retrain_flag = true,
|
||||
.byte_access_allowed = false,
|
||||
.linkdown_irq_regfield = LINK_DOWN,
|
||||
};
|
||||
|
||||
static const struct j721e_pcie_data j721e_pcie_ep_data = {
|
||||
.mode = PCI_MODE_EP,
|
||||
.linkdown_irq_regfield = LINK_DOWN,
|
||||
};
|
||||
|
||||
static const struct j721e_pcie_data j7200_pcie_rc_data = {
|
||||
.mode = PCI_MODE_RC,
|
||||
.quirk_detect_quiet_flag = true,
|
||||
.linkdown_irq_regfield = J7200_LINK_DOWN,
|
||||
.byte_access_allowed = true,
|
||||
};
|
||||
|
||||
static const struct j721e_pcie_data j7200_pcie_ep_data = {
|
||||
.mode = PCI_MODE_EP,
|
||||
.quirk_detect_quiet_flag = true,
|
||||
};
|
||||
|
||||
static const struct j721e_pcie_data am64_pcie_rc_data = {
|
||||
.mode = PCI_MODE_RC,
|
||||
.linkdown_irq_regfield = J7200_LINK_DOWN,
|
||||
.byte_access_allowed = true,
|
||||
};
|
||||
|
||||
static const struct j721e_pcie_data am64_pcie_ep_data = {
|
||||
.mode = PCI_MODE_EP,
|
||||
.linkdown_irq_regfield = J7200_LINK_DOWN,
|
||||
};
|
||||
|
||||
static const struct of_device_id of_j721e_pcie_match[] = {
|
||||
|
|
@ -287,6 +318,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
|
|||
.compatible = "ti,j721e-pcie-ep",
|
||||
.data = &j721e_pcie_ep_data,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,j7200-pcie-host",
|
||||
.data = &j7200_pcie_rc_data,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,j7200-pcie-ep",
|
||||
.data = &j7200_pcie_ep_data,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,am64-pcie-host",
|
||||
.data = &am64_pcie_rc_data,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,am64-pcie-ep",
|
||||
.data = &am64_pcie_ep_data,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
@ -319,6 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pcie->dev = dev;
|
||||
pcie->mode = mode;
|
||||
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
|
||||
|
||||
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
|
||||
if (IS_ERR(base))
|
||||
|
|
@ -378,9 +426,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
|||
goto err_get_sync;
|
||||
}
|
||||
|
||||
bridge->ops = &cdns_ti_pcie_host_ops;
|
||||
if (!data->byte_access_allowed)
|
||||
bridge->ops = &cdns_ti_pcie_host_ops;
|
||||
rc = pci_host_bridge_priv(bridge);
|
||||
rc->quirk_retrain_flag = data->quirk_retrain_flag;
|
||||
rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
|
||||
|
||||
cdns_pcie = &rc->pcie;
|
||||
cdns_pcie->dev = dev;
|
||||
|
|
@ -430,6 +480,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
|||
ret = -ENOMEM;
|
||||
goto err_get_sync;
|
||||
}
|
||||
ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
|
||||
|
||||
cdns_pcie = &ep->pcie;
|
||||
cdns_pcie->dev = dev;
|
||||
|
|
|
|||
|
|
@ -578,6 +578,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
|
|||
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
|
||||
/* Reserve region 0 for IRQs */
|
||||
set_bit(0, &ep->ob_region_map);
|
||||
|
||||
if (ep->quirk_detect_quiet_flag)
|
||||
cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
|
||||
|
||||
spin_lock_init(&ep->lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -497,6 +497,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
|
|||
return PTR_ERR(rc->cfg_base);
|
||||
rc->cfg_res = res;
|
||||
|
||||
if (rc->quirk_detect_quiet_flag)
|
||||
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
|
||||
|
||||
ret = cdns_pcie_start_link(pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to start link\n");
|
||||
|
|
|
|||
|
|
@ -7,6 +7,22 @@
|
|||
|
||||
#include "pcie-cadence.h"
|
||||
|
||||
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
|
||||
{
|
||||
u32 delay = 0x3;
|
||||
u32 ltssm_control_cap;
|
||||
|
||||
/*
|
||||
* Set the LTSSM Detect Quiet state min. delay to 2ms.
|
||||
*/
|
||||
ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
|
||||
ltssm_control_cap = ((ltssm_control_cap &
|
||||
~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
|
||||
CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
|
||||
}
|
||||
|
||||
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
|
||||
u32 r, bool is_io,
|
||||
u64 cpu_addr, u64 pci_addr, size_t size)
|
||||
|
|
|
|||
|
|
@ -189,6 +189,14 @@
|
|||
/* AXI link down register */
|
||||
#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
|
||||
|
||||
/* LTSSM Capabilities register */
|
||||
#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
|
||||
#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
|
||||
#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
|
||||
#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
|
||||
(((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
|
||||
CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
|
||||
|
||||
enum cdns_pcie_rp_bar {
|
||||
RP_BAR_UNDEFINED = -1,
|
||||
RP_BAR0,
|
||||
|
|
@ -291,6 +299,7 @@ struct cdns_pcie {
|
|||
* @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
|
||||
* available
|
||||
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
|
||||
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
|
||||
*/
|
||||
struct cdns_pcie_rc {
|
||||
struct cdns_pcie pcie;
|
||||
|
|
@ -299,7 +308,8 @@ struct cdns_pcie_rc {
|
|||
u32 vendor_id;
|
||||
u32 device_id;
|
||||
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
|
||||
bool quirk_retrain_flag;
|
||||
unsigned int quirk_retrain_flag:1;
|
||||
unsigned int quirk_detect_quiet_flag:1;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -330,6 +340,7 @@ struct cdns_pcie_epf {
|
|||
* registers fields (RMW) accessible by both remote RC and EP to
|
||||
* minimize time between read and write
|
||||
* @epf: Structure to hold info about endpoint function
|
||||
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
|
||||
*/
|
||||
struct cdns_pcie_ep {
|
||||
struct cdns_pcie pcie;
|
||||
|
|
@ -344,6 +355,7 @@ struct cdns_pcie_ep {
|
|||
/* protect writing to PCI_STATUS while raising legacy interrupts */
|
||||
spinlock_t lock;
|
||||
struct cdns_pcie_epf *epf;
|
||||
unsigned int quirk_detect_quiet_flag:1;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -504,6 +516,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
|
||||
|
||||
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
|
||||
u32 r, bool is_io,
|
||||
u64 cpu_addr, u64 pci_addr, size_t size);
|
||||
|
|
|
|||
|
|
@ -515,19 +515,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
|
|||
struct tegra_pcie_dw *pcie = arg;
|
||||
struct dw_pcie_ep *ep = &pcie->pci.ep;
|
||||
int spurious = 1;
|
||||
u32 val, tmp;
|
||||
u32 status_l0, status_l1, link_status;
|
||||
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L0);
|
||||
if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
|
||||
status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
|
||||
if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
|
||||
status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
|
||||
appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
|
||||
if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
|
||||
pex_ep_event_hot_rst_done(pcie);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
|
||||
tmp = appl_readl(pcie, APPL_LINK_STATUS);
|
||||
if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
|
||||
if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
|
||||
link_status = appl_readl(pcie, APPL_LINK_STATUS);
|
||||
if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
|
||||
dev_dbg(pcie->dev, "Link is up with Host\n");
|
||||
dw_pcie_ep_linkup(ep);
|
||||
}
|
||||
|
|
@ -536,11 +536,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
|
|||
spurious = 0;
|
||||
}
|
||||
|
||||
if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
|
||||
if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
|
||||
status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
|
||||
appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
|
||||
if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
|
||||
return IRQ_WAKE_THREAD;
|
||||
|
||||
spurious = 0;
|
||||
|
|
@ -548,8 +548,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
|
|||
|
||||
if (spurious) {
|
||||
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
|
||||
val);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L0);
|
||||
status_l0);
|
||||
appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
|
@ -1778,7 +1778,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
|
|||
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
|
||||
val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
|
||||
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
|
||||
val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
|
||||
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
|
||||
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
|
||||
|
||||
ret = dw_pcie_ep_init_complete(ep);
|
||||
|
|
|
|||
|
|
@ -2160,13 +2160,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
rp->np = port;
|
||||
|
||||
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
|
||||
if (IS_ERR(rp->base))
|
||||
return PTR_ERR(rp->base);
|
||||
if (IS_ERR(rp->base)) {
|
||||
err = PTR_ERR(rp->base);
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
|
||||
if (!label) {
|
||||
dev_err(dev, "failed to create reset GPIO label\n");
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2184,7 +2186,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
} else {
|
||||
dev_err(dev, "failed to get reset GPIO: %ld\n",
|
||||
PTR_ERR(rp->reset_gpio));
|
||||
return PTR_ERR(rp->reset_gpio);
|
||||
err = PTR_ERR(rp->reset_gpio);
|
||||
goto err_node_put;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
|
|||
{
|
||||
struct device *dev = &bdev->dev;
|
||||
struct iproc_pcie *pcie;
|
||||
LIST_HEAD(resources);
|
||||
struct pci_host_bridge *bridge;
|
||||
int ret;
|
||||
|
||||
|
|
@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
|
|||
pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
|
||||
pcie->mem.name = "PCIe MEM space";
|
||||
pcie->mem.flags = IORESOURCE_MEM;
|
||||
pci_add_resource(&resources, &pcie->mem);
|
||||
pci_add_resource(&bridge->windows, &pcie->mem);
|
||||
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pcie->map_irq = iproc_pcie_bcma_map_irq;
|
||||
|
||||
ret = iproc_pcie_setup(pcie, &resources);
|
||||
if (ret) {
|
||||
dev_err(dev, "PCIe controller setup failed\n");
|
||||
pci_free_resource_list(&resources);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bcma_set_drvdata(bdev, pcie);
|
||||
return 0;
|
||||
|
||||
return iproc_pcie_setup(pcie, &bridge->windows);
|
||||
}
|
||||
|
||||
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
|
||||
|
|
|
|||
|
|
@ -492,9 +492,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
|
|||
pcie->dev = dev;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
err = pm_runtime_get_sync(dev);
|
||||
err = pm_runtime_resume_and_get(dev);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync failed\n");
|
||||
dev_err(dev, "pm_runtime_resume_and_get failed\n");
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,9 +40,6 @@ ibmphp:
|
|||
|
||||
* The return value of pci_hp_register() is not checked.
|
||||
|
||||
* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
|
||||
and once more in the error path of its caller ibmphp_access_ebda().
|
||||
|
||||
* The various slot data structures are difficult to follow and need to be
|
||||
simplified. A lot of functions are too large and too complex, they need
|
||||
to be broken up into smaller, manageable pieces. Negative examples are
|
||||
|
|
|
|||
|
|
@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
|
|||
/* init hpc structure */
|
||||
hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
|
||||
if (!hpc_ptr) {
|
||||
rc = -ENOMEM;
|
||||
goto error_no_hpc;
|
||||
return -ENOMEM;
|
||||
}
|
||||
hpc_ptr->ctlr_id = ctlr_id;
|
||||
hpc_ptr->ctlr_relative_id = ctlr;
|
||||
|
|
@ -910,8 +909,6 @@ error:
|
|||
kfree(tmp_slot);
|
||||
error_no_slot:
|
||||
free_ebda_hpc(hpc_ptr);
|
||||
error_no_hpc:
|
||||
iounmap(io_mem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -295,7 +295,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
|
|||
/* Check for ranges property */
|
||||
err = of_pci_range_parser_init(&parser, dev_node);
|
||||
if (err)
|
||||
goto failed;
|
||||
return 0;
|
||||
|
||||
dev_dbg(dev, "Parsing ranges property...\n");
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
|
|
|
|||
|
|
@ -272,7 +272,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
|
|||
|
||||
*endptr = strchrnul(path, ';');
|
||||
|
||||
wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
|
||||
wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
|
||||
if (!wpath)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -4626,6 +4626,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
|
|||
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
|
||||
}
|
||||
|
||||
/*
|
||||
* Each of these NXP Root Ports is in a Root Complex with a unique segment
|
||||
* number and does provide isolation features to disable peer transactions
|
||||
* and validate bus numbers in requests, but does not provide an ACS
|
||||
* capability.
|
||||
*/
|
||||
static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
|
||||
{
|
||||
return pci_acs_ctrl_enabled(acs_flags,
|
||||
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
|
||||
}
|
||||
|
||||
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
|
||||
{
|
||||
if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
|
||||
|
|
@ -4852,6 +4864,10 @@ static const struct pci_dev_acs_enabled {
|
|||
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
|
||||
/* Cavium ThunderX */
|
||||
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
|
||||
/* Cavium multi-function devices */
|
||||
{ PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
|
||||
{ PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
|
||||
{ PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
|
||||
/* APM X-Gene */
|
||||
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
|
||||
/* Ampere Computing */
|
||||
|
|
@ -4872,6 +4888,39 @@ static const struct pci_dev_acs_enabled {
|
|||
{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
|
||||
{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
|
||||
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
|
||||
/* NXP root ports, xx=16, 12, or 08 cores */
|
||||
/* LX2xx0A : without security features + CAN-FD */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx0C : security features + CAN-FD */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx0E : security features + CAN */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx0N : without security features + CAN */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx2A : without security features + CAN-FD */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx2C : security features + CAN-FD */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx2E : security features + CAN */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
|
||||
/* LX2xx2N : without security features + CAN */
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
|
||||
{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
|
||||
/* Zhaoxin Root/Downstream Ports */
|
||||
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
|
||||
{ 0 }
|
||||
|
|
@ -5346,7 +5395,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
|||
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
|
||||
|
||||
/*
|
||||
* Create device link for NVIDIA GPU with integrated USB xHCI Host
|
||||
* Create device link for GPUs with integrated USB xHCI Host
|
||||
* controller to VGA.
|
||||
*/
|
||||
static void quirk_gpu_usb(struct pci_dev *usb)
|
||||
|
|
@ -5355,9 +5404,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
|
|||
}
|
||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
||||
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
|
||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
|
||||
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
|
||||
|
||||
/*
|
||||
* Create device link for NVIDIA GPU with integrated Type-C UCSI controller
|
||||
* Create device link for GPUs with integrated Type-C UCSI controller
|
||||
* to VGA. Currently there is no class code defined for UCSI device over PCI
|
||||
* so using UNKNOWN class for now and it will be updated when UCSI
|
||||
* over PCI gets a class code.
|
||||
|
|
@ -5370,6 +5421,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
|
|||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
||||
PCI_CLASS_SERIAL_UNKNOWN, 8,
|
||||
quirk_gpu_usb_typec_ucsi);
|
||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
|
||||
PCI_CLASS_SERIAL_UNKNOWN, 8,
|
||||
quirk_gpu_usb_typec_ucsi);
|
||||
|
||||
/*
|
||||
* Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
|
||||
|
|
|
|||
|
|
@ -40,13 +40,14 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
|
|||
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
|
||||
sclp.has_hvs = !!(sccb->fac119 & 0x80);
|
||||
sclp.has_kss = !!(sccb->fac98 & 0x01);
|
||||
sclp.has_sipl = !!(sccb->cbl & 0x4000);
|
||||
if (sccb->fac85 & 0x02)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
|
||||
if (sccb->fac91 & 0x40)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
|
||||
if (sccb->cpuoff > 134)
|
||||
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
|
||||
if (sccb->cpuoff > 137)
|
||||
sclp.has_sipl = !!(sccb->cbl & 0x4000);
|
||||
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
|
||||
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
|
||||
sclp.rzm <<= 20;
|
||||
|
|
|
|||
|
|
@ -466,7 +466,7 @@ static void vhost_tx_batch(struct vhost_net *net,
|
|||
.num = nvq->batched_xdp,
|
||||
.ptr = nvq->xdp,
|
||||
};
|
||||
int err;
|
||||
int i, err;
|
||||
|
||||
if (nvq->batched_xdp == 0)
|
||||
goto signal_used;
|
||||
|
|
@ -475,6 +475,15 @@ static void vhost_tx_batch(struct vhost_net *net,
|
|||
err = sock->ops->sendmsg(sock, msghdr, 0);
|
||||
if (unlikely(err < 0)) {
|
||||
vq_err(&nvq->vq, "Fail to batch sending packets\n");
|
||||
|
||||
/* free pages owned by XDP; since this is an unlikely error path,
|
||||
* keep it simple and avoid more complex bulk update for the
|
||||
* used pages
|
||||
*/
|
||||
for (i = 0; i < nvq->batched_xdp; ++i)
|
||||
put_page(virt_to_head_page(nvq->xdp[i].data));
|
||||
nvq->batched_xdp = 0;
|
||||
nvq->done_idx = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
|
||||
#define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
|
||||
#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
|
||||
#define KTD253_T_OFF_MS 3
|
||||
|
||||
struct ktd253_backlight {
|
||||
|
|
@ -34,13 +35,50 @@ struct ktd253_backlight {
|
|||
u16 ratio;
|
||||
};
|
||||
|
||||
static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
|
||||
{
|
||||
gpiod_set_value_cansleep(ktd253->gpiod, 1);
|
||||
ndelay(KTD253_T_HIGH_NS);
|
||||
/* We always fall back to this when we power on */
|
||||
}
|
||||
|
||||
static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
|
||||
{
|
||||
/*
|
||||
* These GPIO operations absolutely can NOT sleep so no _cansleep
|
||||
* suffixes, and no using GPIO expanders on slow buses for this!
|
||||
*
|
||||
* The maximum number of cycles of the loop is 32 so the time taken
|
||||
* should nominally be:
|
||||
* (T_LOW_NS + T_HIGH_NS + loop_time) * 32
|
||||
*
|
||||
* Architectures do not always support ndelay() and we will get a few us
|
||||
* instead. If we get to a critical time limit an interrupt has likely
|
||||
* occured in the low part of the loop and we need to restart from the
|
||||
* top so we have the backlight in a known state.
|
||||
*/
|
||||
u64 ns;
|
||||
|
||||
ns = ktime_get_ns();
|
||||
gpiod_set_value(ktd253->gpiod, 0);
|
||||
ndelay(KTD253_T_LOW_NS);
|
||||
gpiod_set_value(ktd253->gpiod, 1);
|
||||
ns = ktime_get_ns() - ns;
|
||||
if (ns >= KTD253_T_OFF_CRIT_NS) {
|
||||
dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns);
|
||||
return -EAGAIN;
|
||||
}
|
||||
ndelay(KTD253_T_HIGH_NS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ktd253_backlight_update_status(struct backlight_device *bl)
|
||||
{
|
||||
struct ktd253_backlight *ktd253 = bl_get_data(bl);
|
||||
int brightness = backlight_get_brightness(bl);
|
||||
u16 target_ratio;
|
||||
u16 current_ratio = ktd253->ratio;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
|
||||
|
||||
|
|
@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct backlight_device *bl)
|
|||
}
|
||||
|
||||
if (current_ratio == 0) {
|
||||
gpiod_set_value_cansleep(ktd253->gpiod, 1);
|
||||
ndelay(KTD253_T_HIGH_NS);
|
||||
/* We always fall back to this when we power on */
|
||||
ktd253_backlight_set_max_ratio(ktd253);
|
||||
current_ratio = KTD253_MAX_RATIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* WARNING:
|
||||
* The loop to set the correct current level is performed
|
||||
* with interrupts disabled as it is timing critical.
|
||||
* The maximum number of cycles of the loop is 32
|
||||
* so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
while (current_ratio != target_ratio) {
|
||||
/*
|
||||
* These GPIO operations absolutely can NOT sleep so no
|
||||
* _cansleep suffixes, and no using GPIO expanders on
|
||||
* slow buses for this!
|
||||
*/
|
||||
gpiod_set_value(ktd253->gpiod, 0);
|
||||
ndelay(KTD253_T_LOW_NS);
|
||||
gpiod_set_value(ktd253->gpiod, 1);
|
||||
ndelay(KTD253_T_HIGH_NS);
|
||||
/* After 1/32 we loop back to 32/32 */
|
||||
if (current_ratio == KTD253_MIN_RATIO)
|
||||
ret = ktd253_backlight_stepdown(ktd253);
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* Something disturbed the backlight setting code when
|
||||
* running so we need to bring the PWM back to a known
|
||||
* state. This shouldn't happen too much.
|
||||
*/
|
||||
gpiod_set_value_cansleep(ktd253->gpiod, 0);
|
||||
msleep(KTD253_T_OFF_MS);
|
||||
ktd253_backlight_set_max_ratio(ktd253);
|
||||
current_ratio = KTD253_MAX_RATIO;
|
||||
else
|
||||
} else if (current_ratio == KTD253_MIN_RATIO) {
|
||||
/* After 1/32 we loop back to 32/32 */
|
||||
current_ratio = KTD253_MAX_RATIO;
|
||||
} else {
|
||||
current_ratio--;
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
ktd253->ratio = current_ratio;
|
||||
|
||||
dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
|
||||
|
|
|
|||
|
|
@ -1164,7 +1164,10 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
|
|||
|
||||
wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
|
||||
|
||||
return __watchdog_ping(wdd);
|
||||
if (watchdog_hw_running(wdd) && handle_boot_enabled)
|
||||
return __watchdog_ping(wdd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
|
||||
|
||||
|
|
|
|||
|
|
@ -3019,6 +3019,29 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
|||
*/
|
||||
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
|
||||
|
||||
/*
|
||||
* Flag our filesystem as having big metadata blocks if they are bigger
|
||||
* than the page size
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
|
||||
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
|
||||
btrfs_info(fs_info,
|
||||
"flagging fs with big metadata feature");
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
}
|
||||
|
||||
/* Set up fs_info before parsing mount options */
|
||||
nodesize = btrfs_super_nodesize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = sectorsize;
|
||||
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
/* Cache block sizes */
|
||||
fs_info->nodesize = nodesize;
|
||||
fs_info->sectorsize = sectorsize;
|
||||
fs_info->stripesize = stripesize;
|
||||
|
||||
ret = btrfs_parse_options(fs_info, options, sb->s_flags);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
|
|
@ -3045,28 +3068,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
|||
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
|
||||
btrfs_info(fs_info, "has skinny extents");
|
||||
|
||||
/*
|
||||
* flag our filesystem as having big metadata blocks if
|
||||
* they are bigger than the page size
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
|
||||
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
|
||||
btrfs_info(fs_info,
|
||||
"flagging fs with big metadata feature");
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
}
|
||||
|
||||
nodesize = btrfs_super_nodesize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = sectorsize;
|
||||
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
/* Cache block sizes */
|
||||
fs_info->nodesize = nodesize;
|
||||
fs_info->sectorsize = sectorsize;
|
||||
fs_info->stripesize = stripesize;
|
||||
|
||||
/*
|
||||
* mixed block groups end up with duplicate but slightly offset
|
||||
* extent buffers for the same range. It leads to corruptions
|
||||
|
|
|
|||
|
|
@ -289,10 +289,10 @@ void fuse_request_end(struct fuse_req *req)
|
|||
|
||||
/*
|
||||
* test_and_set_bit() implies smp_mb() between bit
|
||||
* changing and below intr_entry check. Pairs with
|
||||
* changing and below FR_INTERRUPTED check. Pairs with
|
||||
* smp_mb() from queue_interrupt().
|
||||
*/
|
||||
if (!list_empty(&req->intr_entry)) {
|
||||
if (test_bit(FR_INTERRUPTED, &req->flags)) {
|
||||
spin_lock(&fiq->lock);
|
||||
list_del_init(&req->intr_entry);
|
||||
spin_unlock(&fiq->lock);
|
||||
|
|
|
|||
|
|
@ -3206,12 +3206,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
|
|||
ret = nr;
|
||||
break;
|
||||
}
|
||||
if (!iov_iter_is_bvec(iter)) {
|
||||
iov_iter_advance(iter, nr);
|
||||
} else {
|
||||
req->rw.len -= nr;
|
||||
req->rw.addr += nr;
|
||||
}
|
||||
ret += nr;
|
||||
if (nr != iovec.iov_len)
|
||||
break;
|
||||
req->rw.len -= nr;
|
||||
req->rw.addr += nr;
|
||||
iov_iter_advance(iter, nr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -361,8 +361,8 @@ extern void sparse_remove_section(struct mem_section *ms,
|
|||
unsigned long map_offset, struct vmem_altmap *altmap);
|
||||
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
|
||||
unsigned long pnum);
|
||||
extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
|
||||
unsigned long nr_pages);
|
||||
extern struct zone *zone_for_pfn_range(int online_type, int nid,
|
||||
unsigned long start_pfn, unsigned long nr_pages);
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
#endif /* __LINUX_MEMORY_HOTPLUG_H */
|
||||
|
|
|
|||
|
|
@ -1737,8 +1737,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { }
|
|||
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
|
||||
static inline int pci_assign_resource(struct pci_dev *dev, int i)
|
||||
{ return -EBUSY; }
|
||||
static inline int __pci_register_driver(struct pci_driver *drv,
|
||||
struct module *owner)
|
||||
static inline int __must_check __pci_register_driver(struct pci_driver *drv,
|
||||
struct module *owner,
|
||||
const char *mod_name)
|
||||
{ return 0; }
|
||||
static inline int pci_register_driver(struct pci_driver *drv)
|
||||
{ return 0; }
|
||||
|
|
|
|||
|
|
@ -2478,7 +2478,8 @@
|
|||
#define PCI_VENDOR_ID_TDI 0x192E
|
||||
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
|
||||
|
||||
#define PCI_VENDOR_ID_FREESCALE 0x1957
|
||||
#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
|
||||
#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
|
||||
#define PCI_DEVICE_ID_MPC8308 0xc006
|
||||
#define PCI_DEVICE_ID_MPC8315E 0x00b4
|
||||
#define PCI_DEVICE_ID_MPC8315 0x00b5
|
||||
|
|
|
|||
|
|
@ -1375,6 +1375,7 @@ struct task_struct {
|
|||
mce_whole_page : 1,
|
||||
__mce_reserved : 62;
|
||||
struct callback_head mce_kill_me;
|
||||
int mce_count;
|
||||
#endif
|
||||
ANDROID_VENDOR_DATA_ARRAY(1, 64);
|
||||
ANDROID_OEM_DATA_ARRAY(1, 32);
|
||||
|
|
|
|||
|
|
@ -1915,7 +1915,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
|
|||
WRITE_ONCE(newsk->prev, prev);
|
||||
WRITE_ONCE(next->prev, newsk);
|
||||
WRITE_ONCE(prev->next, newsk);
|
||||
list->qlen++;
|
||||
WRITE_ONCE(list->qlen, list->qlen + 1);
|
||||
}
|
||||
|
||||
static inline void __skb_queue_splice(const struct sk_buff_head *list,
|
||||
|
|
|
|||
|
|
@ -826,6 +826,8 @@ struct tc_codel_xstats {
|
|||
|
||||
/* FQ_CODEL */
|
||||
|
||||
#define FQ_CODEL_QUANTUM_MAX (1 << 20)
|
||||
|
||||
enum {
|
||||
TCA_FQ_CODEL_UNSPEC,
|
||||
TCA_FQ_CODEL_TARGET,
|
||||
|
|
|
|||
|
|
@ -9974,7 +9974,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
|||
return;
|
||||
|
||||
if (ifh->nr_file_filters) {
|
||||
mm = get_task_mm(event->ctx->task);
|
||||
mm = get_task_mm(task);
|
||||
if (!mm)
|
||||
goto restart;
|
||||
|
||||
|
|
|
|||
|
|
@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
|
|||
pr_err("Failed to apply filter: %s\n", buf);
|
||||
}
|
||||
|
||||
xbc_node_for_each_array_value(enode, "actions", anode, p) {
|
||||
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
|
||||
pr_err("action string is too long: %s\n", p);
|
||||
else if (trigger_process_regex(file, buf) < 0)
|
||||
pr_err("Failed to apply an action: %s\n", buf);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
|
||||
xbc_node_for_each_array_value(enode, "actions", anode, p) {
|
||||
if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
|
||||
pr_err("action string is too long: %s\n", p);
|
||||
else if (trigger_process_regex(file, buf) < 0)
|
||||
pr_err("Failed to apply an action: %s\n", buf);
|
||||
}
|
||||
} else if (xbc_node_find_value(enode, "actions", NULL))
|
||||
pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
|
||||
|
||||
if (xbc_node_find_value(enode, "enable", NULL)) {
|
||||
if (trace_event_enable_disable(file, 1, 0) < 0)
|
||||
|
|
|
|||
|
|
@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
|||
/* Register new event */
|
||||
ret = register_kprobe_event(tk);
|
||||
if (ret) {
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1029,11 +1029,36 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct trace_event_call *
|
||||
find_trace_event_call(const char *system, const char *event_name)
|
||||
{
|
||||
struct trace_event_call *tp_event;
|
||||
const char *name;
|
||||
|
||||
list_for_each_entry(tp_event, &ftrace_events, list) {
|
||||
if (!tp_event->class->system ||
|
||||
strcmp(system, tp_event->class->system))
|
||||
continue;
|
||||
name = trace_event_name(tp_event);
|
||||
if (!name || strcmp(event_name, name))
|
||||
continue;
|
||||
return tp_event;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int trace_probe_register_event_call(struct trace_probe *tp)
|
||||
{
|
||||
struct trace_event_call *call = trace_probe_event_call(tp);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
if (find_trace_event_call(trace_probe_group_name(tp),
|
||||
trace_probe_name(tp)))
|
||||
return -EEXIST;
|
||||
|
||||
ret = register_trace_event(&call->event);
|
||||
if (!ret)
|
||||
return -ENODEV;
|
||||
|
|
|
|||
|
|
@ -398,6 +398,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
|
|||
C(NO_EVENT_NAME, "Event name is not specified"), \
|
||||
C(EVENT_TOO_LONG, "Event name is too long"), \
|
||||
C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \
|
||||
C(EVENT_EXIST, "Given group/event name is already used by another event"), \
|
||||
C(RETVAL_ON_PROBE, "$retval is not available on probe"), \
|
||||
C(BAD_STACK_NUM, "Invalid stack number"), \
|
||||
C(BAD_ARG_NUM, "Invalid argument number"), \
|
||||
|
|
|
|||
|
|
@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
|||
|
||||
ret = register_uprobe_event(tu);
|
||||
if (ret) {
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue