This is the 4.19.11 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlwai8oACgkQONu9yGCS aT7mEA//TNX+LqwK18576UwG/nnUmFNlcfFsTycY1cAOSa4PdYdA5yavO8+BvRuf D8iLvHhaFM7YINvkWy8Yngb4H6MLCBqFYrpPcwIBTf5vPf4i7Ct31X9Jw7Kilv1B j6sCgGvI7BUjkXAL/rqFLfnlS3qkUcaF3g1OOvyaCcg8A+mxP0mZ+8hWNC6GzVue If7RzoeQFVSeG38Ji6acrwwfeIGcD4JS8nmHv0ATMRn9QDj/Sc1rHlv6kWxKzrlD k1876ciCGSdo3LWxqhbNiyL6z1cNL+eYQiof7NCCb2BReVhteT2Wsp7SdwTiA/0V tT2ZqC7z+qXKrO3O1KbKYO3OVUsV/Au3E9cj2RCripkO4UJdnYMZ43XYaHA4lHsq NoV0THuaO+O1CqBV0hZC22gIwue1vJ+D5G+jeygOl9bBS5NGEeCentNguRKUVOQC sybn0x8EQ1ldWUxIYateJ/9NCDDTXsbD/heEtXMGYL48KG3x4ibagysXwWobGspK uoJKAXD3UtcsLCeJ7p6qlA+hhtUBcFm48m3ADvJ0SYDDFynzAK+BOER39XSUW8AF u6LAFc0/XV+1Ci+GuIVXL1grIehZyRzqmamqfn+6c9kOnZ1DMEyVPMUdtKyi+c3G 4wvYKK+uf6RBGr2n8Fg9rMaL6ZWOSolj7SV/QBSducKhJS4quYo= =dihN -----END PGP SIGNATURE----- Merge 4.19.11 into android-4.19 Changes in 4.19.11 sched/pelt: Fix warning and clean up IRQ PELT config scsi: raid_attrs: fix unused variable warning staging: olpc_dcon: add a missing dependency slimbus: ngd: mark PM functions as __maybe_unused i2c: aspeed: fix build warning ARM: dts: qcom-apq8064-arrow-sd-600eval fix graph_endpoint warning drm/msm: fix address space warning pinctrl: sunxi: a83t: Fix IRQ offset typo for PH11 aio: fix spectre gadget in lookup_ioctx scripts/spdxcheck.py: always open files in binary mode fs/iomap.c: get/put the page in iomap_page_create/release() userfaultfd: check VM_MAYWRITE was set after verifying the uffd is registered arm64: dma-mapping: Fix FORCE_CONTIGUOUS buffer clearing block/bio: Do not zero user pages ovl: fix decode of dir file handle with multi lower layers ovl: fix missing override creds in link of a metacopy upper MMC: OMAP: fix broken MMC on OMAP15XX/OMAP5910/OMAP310 mmc: core: use mrq->sbc when sending CMD23 for RPMB mmc: sdhci-omap: Fix DCRC error handling during tuning mmc: sdhci: fix the timeout check window for clock and reset fuse: continue to send FUSE_RELEASEDIR when FUSE_OPEN returns ENOSYS ARM: mmp/mmp2: fix cpu_is_mmp2() on mmp2-dt ARM: dts: bcm2837: Fix polarity of wifi reset GPIOs dm thin: send event about thin-pool state change _after_ making it dm cache metadata: verify cache has blocks in blocks_are_clean_separate_dirty() dm: call blk_queue_split() to impose device limits on bios tracing: Fix memory leak in create_filter() tracing: Fix memory leak in set_trigger_filter() tracing: Fix memory leak of instance function hash filters media: vb2: don't call __vb2_queue_cancel if vb2_start_streaming failed powerpc/msi: Fix NULL pointer access in teardown code powerpc: Look for "stdout-path" when setting up legacy consoles drm/nouveau/kms: Fix memory leak in nv50_mstm_del() drm/nouveau/kms/nv50-: also flush fb writes when rewinding push buffer Revert "drm/rockchip: Allow driver to be shutdown on reboot/kexec" drm/i915/gvt: Fix tiled memory decoding bug on BDW drm/i915/execlists: Apply a full mb before execution for Braswell drm/amdgpu/powerplay: Apply avfs cks-off voltages on VI drm/amdkfd: add new vega10 pci ids drm/amdgpu: add some additional vega10 pci ids drm/amdgpu: update smu firmware images for VI variants (v2) drm/amdgpu: update SMC firmware image for polaris10 variants dm zoned: Fix target BIO completion handling x86/build: Fix compiler support check for CONFIG_RETPOLINE Linux 4.19.11 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
a87fb6b90d
53 changed files with 311 additions and 218 deletions
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 10
|
||||
SUBLEVEL = 11
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
wifi_pwrseq: wifi-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
wifi_pwrseq: wifi-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -387,6 +387,11 @@
|
|||
hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
ports {
|
||||
port@0 {
|
||||
endpoint {
|
||||
remote-endpoint = <&mdp_dtv_out>;
|
||||
};
|
||||
};
|
||||
port@1 {
|
||||
endpoint {
|
||||
remote-endpoint = <&hdmi_con>;
|
||||
|
|
|
|||
|
|
@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
|
|||
#define cpu_is_pxa910() (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_MMP2
|
||||
#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
|
||||
static inline int cpu_is_mmp2(void)
|
||||
{
|
||||
return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
|
||||
return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
|
||||
(((mmp_chip_id & 0xfff) == 0x410) ||
|
||||
((mmp_chip_id & 0xfff) == 0x610));
|
||||
}
|
||||
#else
|
||||
#define cpu_is_mmp2() (0)
|
||||
|
|
|
|||
|
|
@ -587,9 +587,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
prot,
|
||||
__builtin_return_address(0));
|
||||
if (addr) {
|
||||
memset(addr, 0, size);
|
||||
if (!coherent)
|
||||
__dma_flush_area(page_to_virt(page), iosize);
|
||||
memset(addr, 0, size);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
|
|
|
|||
|
|
@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
|
|||
|
||||
/* Now find out if one of these is out firmware console */
|
||||
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
|
||||
if (path == NULL)
|
||||
path = of_get_property(of_chosen, "stdout-path", NULL);
|
||||
if (path != NULL) {
|
||||
stdout = of_find_node_by_path(path);
|
||||
if (stdout)
|
||||
|
|
@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
|
|||
/* We are getting a weird phandle from OF ... */
|
||||
/* ... So use the full path instead */
|
||||
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
|
||||
if (name == NULL)
|
||||
name = of_get_property(of_chosen, "stdout-path", NULL);
|
||||
if (name == NULL) {
|
||||
DBG(" no linux,stdout-path !\n");
|
||||
DBG(" no stdout-path !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
prom_stdout = of_find_node_by_path(name);
|
||||
|
|
|
|||
|
|
@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
|
|||
{
|
||||
struct pci_controller *phb = pci_bus_to_host(dev->bus);
|
||||
|
||||
phb->controller_ops.teardown_msi_irqs(dev);
|
||||
/*
|
||||
* We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
|
||||
* so check the pointer again.
|
||||
*/
|
||||
if (phb->controller_ops.teardown_msi_irqs)
|
||||
phb->controller_ops.teardown_msi_irqs(dev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -223,9 +223,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
|
||||
# Avoid indirect branches in kernel to deal with Spectre
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
$(error You are building kernel with non-retpoline compiler, please update your compiler.)
|
||||
endif
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
|
||||
endif
|
||||
|
||||
|
|
@ -297,6 +294,14 @@ PHONY += vdso_install
|
|||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
@echo "You are building kernel with non-retpoline compiler." >&2
|
||||
@echo "Please update your compiler." >&2
|
||||
@false
|
||||
endif
|
||||
endif
|
||||
|
||||
archclean:
|
||||
$(Q)rm -rf $(objtree)/arch/i386
|
||||
$(Q)rm -rf $(objtree)/arch/x86_64
|
||||
|
|
|
|||
|
|
@ -1262,7 +1262,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|||
if (ret)
|
||||
goto cleanup;
|
||||
} else {
|
||||
zero_fill_bio(bio);
|
||||
if (bmd->is_our_pages)
|
||||
zero_fill_bio(bio);
|
||||
iov_iter_advance(iter, bio->bi_iter.bi_size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
case CHIP_TOPAZ:
|
||||
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
|
||||
} else
|
||||
|
|
@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe2) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
|
|
@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
(adev->pdev->revision == 0xff)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
|
||||
} else
|
||||
} else if ((adev->pdev->device == 0x67ef) &&
|
||||
(adev->pdev->revision == 0xe2)) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
}
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
|
||||
}
|
||||
|
|
@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
(adev->pdev->revision == 0xe7) ||
|
||||
(adev->pdev->revision == 0xef))) ||
|
||||
((adev->pdev->device == 0x6fdf) &&
|
||||
(adev->pdev->revision == 0xef))) {
|
||||
((adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
|
||||
} else
|
||||
} else if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe1) ||
|
||||
(adev->pdev->revision == 0xf7))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
}
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
if (((adev->pdev->device == 0x6987) &&
|
||||
((adev->pdev->revision == 0xc0) ||
|
||||
(adev->pdev->revision == 0xc3))) ||
|
||||
((adev->pdev->device == 0x6981) &&
|
||||
((adev->pdev->revision == 0x00) ||
|
||||
(adev->pdev->revision == 0x01) ||
|
||||
(adev->pdev->revision == 0x10)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
strcpy(fw_name, "amdgpu/vegam_smc.bin");
|
||||
|
|
|
|||
|
|
@ -761,7 +761,13 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
/* Vega 12 */
|
||||
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
|
||||
|
|
|
|||
|
|
@ -326,7 +326,13 @@ static const struct kfd_deviceid supported_devices[] = {
|
|||
{ 0x6864, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6867, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6868, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6869, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686A, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686B, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
|
||||
{ 0x686D, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686E, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686F, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x687F, &vega10_device_info }, /* Vega10 */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
|
|||
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
|
||||
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
|
||||
|
||||
#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
|
||||
|
||||
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
|
||||
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
|
||||
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
|
||||
|
|
|
|||
|
|
@ -1984,6 +1984,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
|
|||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
|
||||
|
||||
/* Apply avfs cks-off voltages to avoid the overshoot
|
||||
* when switching to the highest sclk frequency
|
||||
*/
|
||||
if (data->apply_avfs_cks_off_voltage)
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
|||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
plane->bpp = skl_pixel_formats[fmt].bpp;
|
||||
plane->drm_format = skl_pixel_formats[fmt].drm_format;
|
||||
} else {
|
||||
plane->tiled = !!(val & DISPPLANE_TILED);
|
||||
plane->tiled = val & DISPPLANE_TILED;
|
||||
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
|
||||
plane->bpp = bdw_pixel_formats[fmt].bpp;
|
||||
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
|
||||
|
|
|
|||
|
|
@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
|
|||
* may not be visible to the HW prior to the completion of the UC
|
||||
* register write and that we may begin execution from the context
|
||||
* before its image is complete leading to invalid PD chasing.
|
||||
*
|
||||
* Furthermore, Braswell, at least, wants a full mb to be sure that
|
||||
* the writes are coherent in memory (visible to the GPU) prior to
|
||||
* execution, and not just visible to other CPUs (as is the result of
|
||||
* wmb).
|
||||
*/
|
||||
wmb();
|
||||
mb();
|
||||
return ce->lrc_desc;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1962,7 +1962,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
|||
u32 *dump_addr = NULL;
|
||||
u32 status = 0;
|
||||
struct dpu_debug_bus_entry *head;
|
||||
phys_addr_t phys = 0;
|
||||
dma_addr_t dma = 0;
|
||||
int list_size;
|
||||
int i;
|
||||
u32 offset;
|
||||
|
|
@ -2000,7 +2000,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
|||
if (in_mem) {
|
||||
if (!(*dump_mem))
|
||||
*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
|
||||
list_size, &phys, GFP_KERNEL);
|
||||
list_size, &dma, GFP_KERNEL);
|
||||
|
||||
if (*dump_mem) {
|
||||
dump_addr = *dump_mem;
|
||||
|
|
@ -2101,7 +2101,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
|||
u32 value, d0, d1;
|
||||
unsigned long reg, reg1, reg2;
|
||||
struct vbif_debug_bus_entry *head;
|
||||
phys_addr_t phys = 0;
|
||||
dma_addr_t dma = 0;
|
||||
int i, list_size = 0;
|
||||
void __iomem *mem_base = NULL;
|
||||
struct vbif_debug_bus_entry *dbg_bus;
|
||||
|
|
@ -2151,7 +2151,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
|||
if (in_mem) {
|
||||
if (!(*dump_mem))
|
||||
*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
|
||||
list_size, &phys, GFP_KERNEL);
|
||||
list_size, &dma, GFP_KERNEL);
|
||||
|
||||
if (*dump_mem) {
|
||||
dump_addr = *dump_mem;
|
||||
|
|
|
|||
|
|
@ -197,6 +197,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
|||
/******************************************************************************
|
||||
* EVO channel helpers
|
||||
*****************************************************************************/
|
||||
static void
|
||||
evo_flush(struct nv50_dmac *dmac)
|
||||
{
|
||||
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
||||
* writes have been flushed right through to VRAM before writing PUT.
|
||||
*/
|
||||
if (dmac->push.type & NVIF_MEM_VRAM) {
|
||||
struct nvif_device *device = dmac->base.device;
|
||||
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
||||
nvif_msec(device, 2000,
|
||||
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
||||
break;
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
u32 *
|
||||
evo_wait(struct nv50_dmac *evoc, int nr)
|
||||
{
|
||||
|
|
@ -207,6 +223,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
|
|||
mutex_lock(&dmac->lock);
|
||||
if (put + nr >= (PAGE_SIZE / 4) - 8) {
|
||||
dmac->ptr[put] = 0x20000000;
|
||||
evo_flush(dmac);
|
||||
|
||||
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
|
||||
if (nvif_msec(device, 2000,
|
||||
|
|
@ -229,17 +246,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
|
|||
{
|
||||
struct nv50_dmac *dmac = evoc;
|
||||
|
||||
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
||||
* writes have been flushed right through to VRAM before writing PUT.
|
||||
*/
|
||||
if (dmac->push.type & NVIF_MEM_VRAM) {
|
||||
struct nvif_device *device = dmac->base.device;
|
||||
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
||||
nvif_msec(device, 2000,
|
||||
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
||||
break;
|
||||
);
|
||||
}
|
||||
evo_flush(dmac);
|
||||
|
||||
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
|
||||
mutex_unlock(&dmac->lock);
|
||||
|
|
@ -1226,6 +1233,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
|
|||
{
|
||||
struct nv50_mstm *mstm = *pmstm;
|
||||
if (mstm) {
|
||||
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
|
||||
kfree(*pmstm);
|
||||
*pmstm = NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -442,11 +442,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
rockchip_drm_platform_remove(pdev);
|
||||
}
|
||||
|
||||
static const struct of_device_id rockchip_drm_dt_ids[] = {
|
||||
{ .compatible = "rockchip,display-subsystem", },
|
||||
{ /* sentinel */ },
|
||||
|
|
@ -456,7 +451,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
|
|||
static struct platform_driver rockchip_drm_platform_driver = {
|
||||
.probe = rockchip_drm_platform_probe,
|
||||
.remove = rockchip_drm_platform_remove,
|
||||
.shutdown = rockchip_drm_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "rockchip-drm",
|
||||
.of_match_table = rockchip_drm_dt_ids,
|
||||
|
|
|
|||
|
|
@ -555,7 +555,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
|
|||
spin_lock(&bus->lock);
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C_SLAVE)
|
||||
if (aspeed_i2c_slave_irq(bus)) {
|
||||
if (IS_ENABLED(CONFIG_I2C_SLAVE) && aspeed_i2c_slave_irq(bus)) {
|
||||
dev_dbg(bus->dev, "irq handled by slave.\n");
|
||||
ret = true;
|
||||
goto out;
|
||||
|
|
@ -564,7 +564,9 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
|
|||
|
||||
ret = aspeed_i2c_master_irq(bus);
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C_SLAVE)
|
||||
out:
|
||||
#endif
|
||||
spin_unlock(&bus->lock);
|
||||
return ret ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
|
|||
bool dirty_flag;
|
||||
*result = true;
|
||||
|
||||
if (from_cblock(cmd->cache_blocks) == 0)
|
||||
/* Nothing to do */
|
||||
return 0;
|
||||
|
||||
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
|
||||
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
|
||||
if (r) {
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
|
|||
struct dm_thin_new_mapping;
|
||||
|
||||
/*
|
||||
* The pool runs in 4 modes. Ordered in degraded order for comparisons.
|
||||
* The pool runs in various modes. Ordered in degraded order for comparisons.
|
||||
*/
|
||||
enum pool_mode {
|
||||
PM_WRITE, /* metadata may be changed */
|
||||
|
|
@ -282,9 +282,38 @@ struct pool {
|
|||
mempool_t mapping_pool;
|
||||
};
|
||||
|
||||
static enum pool_mode get_pool_mode(struct pool *pool);
|
||||
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
|
||||
|
||||
static enum pool_mode get_pool_mode(struct pool *pool)
|
||||
{
|
||||
return pool->pf.mode;
|
||||
}
|
||||
|
||||
static void notify_of_pool_mode_change(struct pool *pool)
|
||||
{
|
||||
const char *descs[] = {
|
||||
"write",
|
||||
"out-of-data-space",
|
||||
"read-only",
|
||||
"read-only",
|
||||
"fail"
|
||||
};
|
||||
const char *extra_desc = NULL;
|
||||
enum pool_mode mode = get_pool_mode(pool);
|
||||
|
||||
if (mode == PM_OUT_OF_DATA_SPACE) {
|
||||
if (!pool->pf.error_if_no_space)
|
||||
extra_desc = " (queue IO)";
|
||||
else
|
||||
extra_desc = " (error IO)";
|
||||
}
|
||||
|
||||
dm_table_event(pool->ti->table);
|
||||
DMINFO("%s: switching pool to %s%s mode",
|
||||
dm_device_name(pool->pool_md),
|
||||
descs[(int)mode], extra_desc ? : "");
|
||||
}
|
||||
|
||||
/*
|
||||
* Target context for a pool.
|
||||
*/
|
||||
|
|
@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
|
|||
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
|
||||
}
|
||||
|
||||
static void notify_of_pool_mode_change_to_oods(struct pool *pool);
|
||||
|
||||
/*
|
||||
* We're holding onto IO to allow userland time to react. After the
|
||||
* timeout either the pool will have been resized (and thus back in
|
||||
|
|
@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
|
|||
|
||||
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
|
||||
pool->pf.error_if_no_space = true;
|
||||
notify_of_pool_mode_change_to_oods(pool);
|
||||
notify_of_pool_mode_change(pool);
|
||||
error_retry_list_with_code(pool, BLK_STS_NOSPC);
|
||||
}
|
||||
}
|
||||
|
|
@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
|
|||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
static enum pool_mode get_pool_mode(struct pool *pool)
|
||||
{
|
||||
return pool->pf.mode;
|
||||
}
|
||||
|
||||
static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
|
||||
{
|
||||
dm_table_event(pool->ti->table);
|
||||
DMINFO("%s: switching pool to %s mode",
|
||||
dm_device_name(pool->pool_md), new_mode);
|
||||
}
|
||||
|
||||
static void notify_of_pool_mode_change_to_oods(struct pool *pool)
|
||||
{
|
||||
if (!pool->pf.error_if_no_space)
|
||||
notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
|
||||
else
|
||||
notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
|
||||
}
|
||||
|
||||
static bool passdown_enabled(struct pool_c *pt)
|
||||
{
|
||||
return pt->adjusted_pf.discard_passdown;
|
||||
|
|
@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
|
||||
switch (new_mode) {
|
||||
case PM_FAIL:
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change(pool, "failure");
|
||||
dm_pool_metadata_read_only(pool->pmd);
|
||||
pool->process_bio = process_bio_fail;
|
||||
pool->process_discard = process_bio_fail;
|
||||
|
|
@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
|
||||
case PM_OUT_OF_METADATA_SPACE:
|
||||
case PM_READ_ONLY:
|
||||
if (!is_read_only_pool_mode(old_mode))
|
||||
notify_of_pool_mode_change(pool, "read-only");
|
||||
dm_pool_metadata_read_only(pool->pmd);
|
||||
pool->process_bio = process_bio_read_only;
|
||||
pool->process_discard = process_bio_success;
|
||||
|
|
@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
* alarming rate. Adjust your low water mark if you're
|
||||
* frequently seeing this mode.
|
||||
*/
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change_to_oods(pool);
|
||||
pool->out_of_data_space = true;
|
||||
pool->process_bio = process_bio_read_only;
|
||||
pool->process_discard = process_discard_bio;
|
||||
|
|
@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
break;
|
||||
|
||||
case PM_WRITE:
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change(pool, "write");
|
||||
if (old_mode == PM_OUT_OF_DATA_SPACE)
|
||||
cancel_delayed_work_sync(&pool->no_space_timeout);
|
||||
pool->out_of_data_space = false;
|
||||
|
|
@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
* doesn't cause an unexpected mode transition on resume.
|
||||
*/
|
||||
pt->adjusted_pf.mode = new_mode;
|
||||
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change(pool);
|
||||
}
|
||||
|
||||
static void abort_transaction(struct pool *pool)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ struct dmz_bioctx {
|
|||
struct dm_zone *zone;
|
||||
struct bio *bio;
|
||||
atomic_t ref;
|
||||
blk_status_t status;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
|
|||
{
|
||||
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
|
||||
|
||||
if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
|
||||
bioctx->status = status;
|
||||
bio_endio(bio);
|
||||
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
|
||||
bio->bi_status = status;
|
||||
|
||||
if (atomic_dec_and_test(&bioctx->ref)) {
|
||||
struct dm_zone *zone = bioctx->zone;
|
||||
|
||||
if (zone) {
|
||||
if (bio->bi_status != BLK_STS_OK &&
|
||||
bio_op(bio) == REQ_OP_WRITE &&
|
||||
dmz_is_seq(zone))
|
||||
set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
|
||||
dmz_deactivate_zone(zone);
|
||||
}
|
||||
bio_endio(bio);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Partial clone read BIO completion callback. This terminates the
|
||||
* Completion callback for an internally cloned target BIO. This terminates the
|
||||
* target BIO when there are no more references to its context.
|
||||
*/
|
||||
static void dmz_read_bio_end_io(struct bio *bio)
|
||||
static void dmz_clone_endio(struct bio *clone)
|
||||
{
|
||||
struct dmz_bioctx *bioctx = bio->bi_private;
|
||||
blk_status_t status = bio->bi_status;
|
||||
struct dmz_bioctx *bioctx = clone->bi_private;
|
||||
blk_status_t status = clone->bi_status;
|
||||
|
||||
bio_put(bio);
|
||||
bio_put(clone);
|
||||
dmz_bio_endio(bioctx->bio, status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a BIO to a zone. The BIO may only partially process the
|
||||
* Issue a clone of a target BIO. The clone may only partially process the
|
||||
* original target BIO.
|
||||
*/
|
||||
static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
|
||||
struct bio *bio, sector_t chunk_block,
|
||||
unsigned int nr_blocks)
|
||||
static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
|
||||
struct bio *bio, sector_t chunk_block,
|
||||
unsigned int nr_blocks)
|
||||
{
|
||||
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
|
||||
sector_t sector;
|
||||
struct bio *clone;
|
||||
|
||||
/* BIO remap sector */
|
||||
sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
|
||||
|
||||
/* If the read is not partial, there is no need to clone the BIO */
|
||||
if (nr_blocks == dmz_bio_blocks(bio)) {
|
||||
/* Setup and submit the BIO */
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
atomic_inc(&bioctx->ref);
|
||||
generic_make_request(bio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Partial BIO: we need to clone the BIO */
|
||||
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
|
||||
if (!clone)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Setup the clone */
|
||||
clone->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(clone, dmz->dev->bdev);
|
||||
clone->bi_iter.bi_sector =
|
||||
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
|
||||
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
|
||||
clone->bi_end_io = dmz_read_bio_end_io;
|
||||
clone->bi_end_io = dmz_clone_endio;
|
||||
clone->bi_private = bioctx;
|
||||
|
||||
bio_advance(bio, clone->bi_iter.bi_size);
|
||||
|
||||
/* Submit the clone */
|
||||
atomic_inc(&bioctx->ref);
|
||||
generic_make_request(clone);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
|
||||
zone->wp_block += nr_blocks;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
|
|||
if (nr_blocks) {
|
||||
/* Valid blocks found: read them */
|
||||
nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
|
||||
ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
|
||||
ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
|
||||
if (ret)
|
||||
return ret;
|
||||
chunk_block += nr_blocks;
|
||||
|
|
@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a write BIO to a zone.
|
||||
*/
|
||||
static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
|
||||
struct bio *bio, sector_t chunk_block,
|
||||
unsigned int nr_blocks)
|
||||
{
|
||||
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
|
||||
|
||||
/* Setup and submit the BIO */
|
||||
bio_set_dev(bio, dmz->dev->bdev);
|
||||
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
|
||||
atomic_inc(&bioctx->ref);
|
||||
generic_make_request(bio);
|
||||
|
||||
if (dmz_is_seq(zone))
|
||||
zone->wp_block += nr_blocks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write blocks directly in a data zone, at the write pointer.
|
||||
* If a buffer zone is assigned, invalidate the blocks written
|
||||
|
|
@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
|
|||
return -EROFS;
|
||||
|
||||
/* Submit write */
|
||||
dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
|
||||
ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Validate the blocks in the data zone and invalidate
|
||||
|
|
@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
|
|||
return -EROFS;
|
||||
|
||||
/* Submit write */
|
||||
dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
|
||||
ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Validate the blocks in the buffer zone
|
||||
|
|
@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
|
|||
bioctx->zone = NULL;
|
||||
bioctx->bio = bio;
|
||||
atomic_set(&bioctx->ref, 1);
|
||||
bioctx->status = BLK_STS_OK;
|
||||
|
||||
/* Set the BIO pending in the flush list */
|
||||
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
|
||||
|
|
@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
|
|||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Completed target BIO processing.
|
||||
*/
|
||||
static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
|
||||
{
|
||||
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
|
||||
|
||||
if (bioctx->status == BLK_STS_OK && *error)
|
||||
bioctx->status = *error;
|
||||
|
||||
if (!atomic_dec_and_test(&bioctx->ref))
|
||||
return DM_ENDIO_INCOMPLETE;
|
||||
|
||||
/* Done */
|
||||
bio->bi_status = bioctx->status;
|
||||
|
||||
if (bioctx->zone) {
|
||||
struct dm_zone *zone = bioctx->zone;
|
||||
|
||||
if (*error && bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (dmz_is_seq(zone))
|
||||
set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
|
||||
}
|
||||
dmz_deactivate_zone(zone);
|
||||
}
|
||||
|
||||
return DM_ENDIO_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get zoned device information.
|
||||
*/
|
||||
|
|
@ -947,7 +902,6 @@ static struct target_type dmz_type = {
|
|||
.ctr = dmz_ctr,
|
||||
.dtr = dmz_dtr,
|
||||
.map = dmz_map,
|
||||
.end_io = dmz_end_io,
|
||||
.io_hints = dmz_io_hints,
|
||||
.prepare_ioctl = dmz_prepare_ioctl,
|
||||
.postsuspend = dmz_suspend,
|
||||
|
|
|
|||
|
|
@ -1592,6 +1592,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
|||
return ret;
|
||||
}
|
||||
|
||||
blk_queue_split(md->queue, &bio);
|
||||
|
||||
init_clone_info(&ci, md, map, bio);
|
||||
|
||||
if (bio->bi_opf & REQ_PREFLUSH) {
|
||||
|
|
|
|||
|
|
@ -1755,10 +1755,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
|
|||
if (ret)
|
||||
return ret;
|
||||
ret = vb2_start_streaming(q);
|
||||
if (ret) {
|
||||
__vb2_queue_cancel(q);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
q->streaming = 1;
|
||||
|
|
|
|||
|
|
@ -472,7 +472,7 @@ out:
|
|||
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
||||
struct mmc_blk_ioc_data *idata)
|
||||
{
|
||||
struct mmc_command cmd = {};
|
||||
struct mmc_command cmd = {}, sbc = {};
|
||||
struct mmc_data data = {};
|
||||
struct mmc_request mrq = {};
|
||||
struct scatterlist sg;
|
||||
|
|
@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|||
}
|
||||
|
||||
if (idata->rpmb) {
|
||||
err = mmc_set_blockcount(card, data.blocks,
|
||||
idata->ic.write_flag & (1 << 31));
|
||||
if (err)
|
||||
return err;
|
||||
sbc.opcode = MMC_SET_BLOCK_COUNT;
|
||||
/*
|
||||
* We don't do any blockcount validation because the max size
|
||||
* may be increased by a future standard. We just copy the
|
||||
* 'Reliable Write' bit here.
|
||||
*/
|
||||
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
|
||||
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
mrq.sbc = &sbc;
|
||||
}
|
||||
|
||||
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ struct mmc_omap_slot {
|
|||
unsigned int vdd;
|
||||
u16 saved_con;
|
||||
u16 bus_mode;
|
||||
u16 power_mode;
|
||||
unsigned int fclk_freq;
|
||||
|
||||
struct tasklet_struct cover_tasklet;
|
||||
|
|
@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
struct mmc_omap_slot *slot = mmc_priv(mmc);
|
||||
struct mmc_omap_host *host = slot->host;
|
||||
int i, dsor;
|
||||
int clk_enabled;
|
||||
int clk_enabled, init_stream;
|
||||
|
||||
mmc_omap_select_slot(slot, 0);
|
||||
|
||||
|
|
@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
slot->vdd = ios->vdd;
|
||||
|
||||
clk_enabled = 0;
|
||||
init_stream = 0;
|
||||
switch (ios->power_mode) {
|
||||
case MMC_POWER_OFF:
|
||||
mmc_omap_set_power(slot, 0, ios->vdd);
|
||||
|
|
@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
case MMC_POWER_UP:
|
||||
/* Cannot touch dsor yet, just power up MMC */
|
||||
mmc_omap_set_power(slot, 1, ios->vdd);
|
||||
slot->power_mode = ios->power_mode;
|
||||
goto exit;
|
||||
case MMC_POWER_ON:
|
||||
mmc_omap_fclk_enable(host, 1);
|
||||
clk_enabled = 1;
|
||||
dsor |= 1 << 11;
|
||||
if (slot->power_mode != MMC_POWER_ON)
|
||||
init_stream = 1;
|
||||
break;
|
||||
}
|
||||
slot->power_mode = ios->power_mode;
|
||||
|
||||
if (slot->bus_mode != ios->bus_mode) {
|
||||
if (slot->pdata->set_bus_mode != NULL)
|
||||
|
|
@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
for (i = 0; i < 2; i++)
|
||||
OMAP_MMC_WRITE(host, CON, dsor);
|
||||
slot->saved_con = dsor;
|
||||
if (ios->power_mode == MMC_POWER_ON) {
|
||||
if (init_stream) {
|
||||
/* worst case at 400kHz, 80 cycles makes 200 microsecs */
|
||||
int usecs = 250;
|
||||
|
||||
|
|
@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
|
|||
slot->host = host;
|
||||
slot->mmc = mmc;
|
||||
slot->id = id;
|
||||
slot->power_mode = MMC_POWER_UNDEFINED;
|
||||
slot->pdata = &host->pdata->slots[id];
|
||||
|
||||
host->slots[id] = slot;
|
||||
|
|
|
|||
|
|
@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|||
struct device *dev = omap_host->dev;
|
||||
struct mmc_ios *ios = &mmc->ios;
|
||||
u32 start_window = 0, max_window = 0;
|
||||
bool dcrc_was_enabled = false;
|
||||
u8 cur_match, prev_match = 0;
|
||||
u32 length = 0, max_len = 0;
|
||||
u32 ier = host->ier;
|
||||
u32 phase_delay = 0;
|
||||
int ret = 0;
|
||||
u32 reg;
|
||||
|
|
@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|||
* during the tuning procedure. So disable it during the
|
||||
* tuning procedure.
|
||||
*/
|
||||
ier &= ~SDHCI_INT_DATA_CRC;
|
||||
sdhci_writel(host, ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
|
||||
if (host->ier & SDHCI_INT_DATA_CRC) {
|
||||
host->ier &= ~SDHCI_INT_DATA_CRC;
|
||||
dcrc_was_enabled = true;
|
||||
}
|
||||
|
||||
while (phase_delay <= MAX_PHASE_DELAY) {
|
||||
sdhci_omap_set_dll(omap_host, phase_delay);
|
||||
|
|
@ -366,6 +367,9 @@ tuning_error:
|
|||
|
||||
ret:
|
||||
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
||||
/* Reenable forbidden interrupt */
|
||||
if (dcrc_was_enabled)
|
||||
host->ier |= SDHCI_INT_DATA_CRC;
|
||||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -193,8 +193,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
|
|||
timeout = ktime_add_ms(ktime_get(), 100);
|
||||
|
||||
/* hw clears the bit when it's done */
|
||||
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
|
||||
if (ktime_after(ktime_get(), timeout)) {
|
||||
while (1) {
|
||||
bool timedout = ktime_after(ktime_get(), timeout);
|
||||
|
||||
if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
|
||||
break;
|
||||
if (timedout) {
|
||||
pr_err("%s: Reset 0x%x never completed.\n",
|
||||
mmc_hostname(host->mmc), (int)mask);
|
||||
sdhci_dumpregs(host);
|
||||
|
|
@ -1495,9 +1499,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
|
|||
|
||||
/* Wait max 20 ms */
|
||||
timeout = ktime_add_ms(ktime_get(), 20);
|
||||
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
|
||||
& SDHCI_CLOCK_INT_STABLE)) {
|
||||
if (ktime_after(ktime_get(), timeout)) {
|
||||
while (1) {
|
||||
bool timedout = ktime_after(ktime_get(), timeout);
|
||||
|
||||
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
||||
if (clk & SDHCI_CLOCK_INT_STABLE)
|
||||
break;
|
||||
if (timedout) {
|
||||
pr_err("%s: Internal clock never stabilised.\n",
|
||||
mmc_hostname(host->mmc));
|
||||
sdhci_dumpregs(host);
|
||||
|
|
|
|||
|
|
@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
|
|||
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
|
||||
SUNXI_FUNCTION(0x0, "gpio_in"),
|
||||
SUNXI_FUNCTION(0x1, "gpio_out"),
|
||||
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
|
||||
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
|
||||
};
|
||||
|
||||
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
|
||||
|
|
|
|||
|
|
@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
|
|||
* emulated RAID devices, so start with SCSI */
|
||||
struct raid_internal *i = ac_to_raid_internal(cont);
|
||||
|
||||
#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
|
||||
if (scsi_is_sdev_device(dev)) {
|
||||
if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
|
||||
if (i->f->cookie != sdev->host->hostt)
|
||||
|
|
@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
|
|||
|
||||
return i->f->is_raid(dev);
|
||||
}
|
||||
#endif
|
||||
/* FIXME: look at other subsystems too */
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1467,7 +1467,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_slim_ngd_runtime_idle(struct device *dev)
|
||||
static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
|
|
@ -1477,8 +1477,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int qcom_slim_ngd_runtime_suspend(struct device *dev)
|
||||
static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
|
@ -1491,7 +1490,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev)
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ config FB_OLPC_DCON
|
|||
tristate "One Laptop Per Child Display CONtroller support"
|
||||
depends on OLPC && FB
|
||||
depends on I2C
|
||||
depends on BACKLIGHT_LCD_SUPPORT
|
||||
depends on (GPIO_CS5535 || GPIO_CS5535=n)
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
|
|
|
|||
2
fs/aio.c
2
fs/aio.c
|
|
@ -45,6 +45,7 @@
|
|||
|
||||
#include <asm/kmap_types.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
|
@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|||
if (!table || id >= table->nr)
|
||||
goto out;
|
||||
|
||||
id = array_index_nospec(id, table->nr);
|
||||
ctx = rcu_dereference(table->table[id]);
|
||||
if (ctx && ctx->user_id == ctx_id) {
|
||||
if (percpu_ref_tryget_live(&ctx->users))
|
||||
|
|
|
|||
|
|
@ -1439,7 +1439,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
|
|||
|
||||
static int fuse_dir_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
fuse_release_common(file, FUSE_RELEASEDIR);
|
||||
fuse_release_common(file, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,12 +87,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
|
|||
iput(req->misc.release.inode);
|
||||
}
|
||||
|
||||
static void fuse_file_put(struct fuse_file *ff, bool sync)
|
||||
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
|
||||
{
|
||||
if (refcount_dec_and_test(&ff->count)) {
|
||||
struct fuse_req *req = ff->reserved_req;
|
||||
|
||||
if (ff->fc->no_open) {
|
||||
if (ff->fc->no_open && !isdir) {
|
||||
/*
|
||||
* Drop the release request when client does not
|
||||
* implement 'open'
|
||||
|
|
@ -245,10 +245,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
|
|||
req->in.args[0].value = inarg;
|
||||
}
|
||||
|
||||
void fuse_release_common(struct file *file, int opcode)
|
||||
void fuse_release_common(struct file *file, bool isdir)
|
||||
{
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct fuse_req *req = ff->reserved_req;
|
||||
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
|
||||
|
||||
fuse_prepare_release(ff, file->f_flags, opcode);
|
||||
|
||||
|
|
@ -270,7 +271,7 @@ void fuse_release_common(struct file *file, int opcode)
|
|||
* synchronous RELEASE is allowed (and desirable) in this case
|
||||
* because the server can be trusted not to screw up.
|
||||
*/
|
||||
fuse_file_put(ff, ff->fc->destroy_req != NULL);
|
||||
fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
|
||||
}
|
||||
|
||||
static int fuse_open(struct inode *inode, struct file *file)
|
||||
|
|
@ -286,7 +287,7 @@ static int fuse_release(struct inode *inode, struct file *file)
|
|||
if (fc->writeback_cache)
|
||||
write_inode_now(inode, 1);
|
||||
|
||||
fuse_release_common(file, FUSE_RELEASE);
|
||||
fuse_release_common(file, false);
|
||||
|
||||
/* return value is ignored by VFS */
|
||||
return 0;
|
||||
|
|
@ -300,7 +301,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
|
|||
* iput(NULL) is a no-op and since the refcount is 1 and everything's
|
||||
* synchronous, we are fine with not doing igrab() here"
|
||||
*/
|
||||
fuse_file_put(ff, true);
|
||||
fuse_file_put(ff, true, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fuse_sync_release);
|
||||
|
||||
|
|
@ -805,7 +806,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
|
|||
put_page(page);
|
||||
}
|
||||
if (req->ff)
|
||||
fuse_file_put(req->ff, false);
|
||||
fuse_file_put(req->ff, false, false);
|
||||
}
|
||||
|
||||
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
|
||||
|
|
@ -1459,7 +1460,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
|
|||
__free_page(req->pages[i]);
|
||||
|
||||
if (req->ff)
|
||||
fuse_file_put(req->ff, false);
|
||||
fuse_file_put(req->ff, false, false);
|
||||
}
|
||||
|
||||
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
|
||||
|
|
@ -1616,7 +1617,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
ff = __fuse_write_file_get(fc, fi);
|
||||
err = fuse_flush_times(inode, ff);
|
||||
if (ff)
|
||||
fuse_file_put(ff, 0);
|
||||
fuse_file_put(ff, false, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
@ -1930,7 +1931,7 @@ static int fuse_writepages(struct address_space *mapping,
|
|||
err = 0;
|
||||
}
|
||||
if (data.ff)
|
||||
fuse_file_put(data.ff, false);
|
||||
fuse_file_put(data.ff, false, false);
|
||||
|
||||
kfree(data.orig_pages);
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -749,7 +749,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
|
|||
/**
|
||||
* Send RELEASE or RELEASEDIR request
|
||||
*/
|
||||
void fuse_release_common(struct file *file, int opcode);
|
||||
void fuse_release_common(struct file *file, bool isdir);
|
||||
|
||||
/**
|
||||
* Send FSYNC or FSYNCDIR request
|
||||
|
|
|
|||
|
|
@ -117,6 +117,12 @@ iomap_page_create(struct inode *inode, struct page *page)
|
|||
atomic_set(&iop->read_count, 0);
|
||||
atomic_set(&iop->write_count, 0);
|
||||
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
|
||||
|
||||
/*
|
||||
* migrate_page_move_mapping() assumes that pages with private data have
|
||||
* their count elevated by 1.
|
||||
*/
|
||||
get_page(page);
|
||||
set_page_private(page, (unsigned long)iop);
|
||||
SetPagePrivate(page);
|
||||
return iop;
|
||||
|
|
@ -133,6 +139,7 @@ iomap_page_release(struct page *page)
|
|||
WARN_ON_ONCE(atomic_read(&iop->write_count));
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
kfree(iop);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -652,6 +652,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
|
|||
return ovl_create_object(dentry, S_IFLNK, 0, link);
|
||||
}
|
||||
|
||||
static int ovl_set_link_redirect(struct dentry *dentry)
|
||||
{
|
||||
const struct cred *old_cred;
|
||||
int err;
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = ovl_set_redirect(dentry, false);
|
||||
revert_creds(old_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_link(struct dentry *old, struct inode *newdir,
|
||||
struct dentry *new)
|
||||
{
|
||||
|
|
@ -672,7 +684,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
|
|||
goto out_drop_write;
|
||||
|
||||
if (ovl_is_metacopy_dentry(old)) {
|
||||
err = ovl_set_redirect(old, false);
|
||||
err = ovl_set_link_redirect(old);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Otherwise, get a connected non-upper dir or disconnected non-dir */
|
||||
if (d_is_dir(origin.dentry) &&
|
||||
(origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
|
||||
/* Find origin.dentry again with ovl_acceptable() layer check */
|
||||
if (d_is_dir(origin.dentry)) {
|
||||
dput(origin.dentry);
|
||||
origin.dentry = NULL;
|
||||
err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
|
||||
|
|
@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
/* Get a connected non-upper dir or disconnected non-dir */
|
||||
dentry = ovl_get_dentry(sb, NULL, &origin, index);
|
||||
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -1568,7 +1568,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
|||
cond_resched();
|
||||
|
||||
BUG_ON(!vma_can_userfault(vma));
|
||||
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
|
||||
|
||||
/*
|
||||
* Nothing to do: this vma is already registered into this
|
||||
|
|
@ -1577,6 +1576,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
|||
if (!vma->vm_userfaultfd_ctx.ctx)
|
||||
goto skip;
|
||||
|
||||
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
|
||||
|
||||
if (vma->vm_start > start)
|
||||
start = vma->vm_start;
|
||||
vma_end = min(end, vma->vm_end);
|
||||
|
|
|
|||
|
|
@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
|
|||
|
||||
If in doubt, say N here.
|
||||
|
||||
config HAVE_SCHED_AVG_IRQ
|
||||
def_bool y
|
||||
depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
|
||||
depends on SMP
|
||||
|
||||
config BSD_PROCESS_ACCT
|
||||
bool "BSD Process Accounting"
|
||||
depends on MULTIUSER
|
||||
|
|
|
|||
|
|
@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|||
* In theory, the compile should just see 0 here, and optimize out the call
|
||||
* to sched_rt_avg_update. But I don't trust it...
|
||||
*/
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
s64 steal = 0, irq_delta = 0;
|
||||
#endif
|
||||
s64 __maybe_unused steal = 0, irq_delta = 0;
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
|
||||
|
||||
|
|
@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|||
|
||||
rq->clock_task += delta;
|
||||
|
||||
#ifdef HAVE_SCHED_AVG_IRQ
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
|
||||
update_irq_load_avg(rq, irq_delta + steal);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -8168,7 +8168,7 @@ static inline bool others_have_blocked(struct rq *rq)
|
|||
if (READ_ONCE(rq->avg_dl.util_avg))
|
||||
return true;
|
||||
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
if (READ_ONCE(rq->avg_irq.util_avg))
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -372,7 +372,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
/*
|
||||
* irq:
|
||||
*
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
|
|||
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
|
||||
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
|
||||
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
int update_irq_load_avg(struct rq *rq, u64 running);
|
||||
#else
|
||||
static inline int
|
||||
|
|
|
|||
|
|
@ -895,8 +895,7 @@ struct rq {
|
|||
|
||||
struct sched_avg avg_rt;
|
||||
struct sched_avg avg_dl;
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
#define HAVE_SCHED_AVG_IRQ
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
struct sched_avg avg_irq;
|
||||
#endif
|
||||
u64 idle_stamp;
|
||||
|
|
@ -2296,7 +2295,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SCHED_AVG_IRQ
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
static inline unsigned long cpu_util_irq(struct rq *rq)
|
||||
{
|
||||
return rq->avg_irq.util_avg;
|
||||
|
|
|
|||
|
|
@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
|
|||
if (ops->flags & FTRACE_OPS_FL_ENABLED)
|
||||
ftrace_shutdown(ops, 0);
|
||||
ops->flags |= FTRACE_OPS_FL_DELETED;
|
||||
ftrace_free_filter(ops);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
|
|||
}
|
||||
}
|
||||
|
||||
kfree(op_stack);
|
||||
kfree(inverts);
|
||||
return prog;
|
||||
out_free:
|
||||
kfree(op_stack);
|
||||
kfree(prog_stack);
|
||||
kfree(inverts);
|
||||
kfree(prog_stack);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
|
@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
|
|||
err = process_preds(call, filter_string, *filterp, pe);
|
||||
if (err && set_str)
|
||||
append_filter_err(pe, *filterp);
|
||||
create_filter_finish(pe);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
|
|||
|
||||
/* The filter is for the 'trigger' event, not the triggered event */
|
||||
ret = create_event_filter(file->event_call, filter_str, false, &filter);
|
||||
if (ret)
|
||||
goto out;
|
||||
/*
|
||||
* If create_event_filter() fails, filter still needs to be freed.
|
||||
* Which the calling code will do with data->filter.
|
||||
*/
|
||||
assign:
|
||||
tmp = rcu_access_pointer(data->filter);
|
||||
|
||||
|
|
|
|||
|
|
@ -168,6 +168,7 @@ class id_parser(object):
|
|||
self.curline = 0
|
||||
try:
|
||||
for line in fd:
|
||||
line = line.decode(locale.getpreferredencoding(False), errors='ignore')
|
||||
self.curline += 1
|
||||
if self.curline > maxlines:
|
||||
break
|
||||
|
|
@ -249,12 +250,13 @@ if __name__ == '__main__':
|
|||
|
||||
try:
|
||||
if len(args.path) and args.path[0] == '-':
|
||||
parser.parse_lines(sys.stdin, args.maxlines, '-')
|
||||
stdin = os.fdopen(sys.stdin.fileno(), 'rb')
|
||||
parser.parse_lines(stdin, args.maxlines, '-')
|
||||
else:
|
||||
if args.path:
|
||||
for p in args.path:
|
||||
if os.path.isfile(p):
|
||||
parser.parse_lines(open(p), args.maxlines, p)
|
||||
parser.parse_lines(open(p, 'rb'), args.maxlines, p)
|
||||
elif os.path.isdir(p):
|
||||
scan_git_subtree(repo.head.reference.commit.tree, p)
|
||||
else:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue