MALI: rockchip: upgrade bifrost DDK to g24p0-00eac0, from g22p0-01eac0
mali_csffw.bin from Valhall DDK g24(r49) is included. Change-Id: Ic48b63e744457163fbae3f41b477fc2827a1380e Signed-off-by: Zhen Chen <chenzhen@rock-chips.com>
This commit is contained in:
parent
f8fff854d7
commit
4cedc115fd
147 changed files with 5342 additions and 2886 deletions
|
|
@ -341,8 +341,7 @@ Description:
|
|||
device-driver that supports a CSF GPU.
|
||||
|
||||
Used to enable firmware logs, logging levels valid values
|
||||
are indicated using 'min and 'max' attribute values
|
||||
values that are read-only.
|
||||
are indicated using 'min' and 'max' attributes, which are read-only.
|
||||
|
||||
Log level can be set using the 'cur' read, write attribute,
|
||||
we can use a valid log level value from min and max range values
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ Description:
|
|||
|
||||
What: /sys/bus/coresight/devices/mali-source-etm/is_enabled
|
||||
Description:
|
||||
Attribute used to check if Coresight Source ITM is enabled.
|
||||
Attribute used to check if Coresight Source ETM is enabled.
|
||||
|
||||
What: /sys/bus/coresight/devices/mali-source-etm/trcconfigr
|
||||
Description:
|
||||
|
|
|
|||
|
|
@ -111,7 +111,10 @@ for details.
|
|||
- idvs-group-size : Override the IDVS group size value. Tasks are sent to
|
||||
cores in groups of N + 1, so i.e. 0xF means 16 tasks.
|
||||
Valid values are between 0 to 0x3F (including).
|
||||
- l2-size : Override L2 cache size on GPU that supports it
|
||||
- l2-size : Override L2 cache size on GPU that supports it. Value should be larger than the minimum
|
||||
size 1KiB and smaller than the maximum size. Maximum size is Hardware integration dependent.
|
||||
The value passed should be of log2(Cache Size in Bytes).
|
||||
For example for a 1KiB of cache size, 0xa should be passed.
|
||||
- l2-hash : Override L2 hash function on GPU that supports it
|
||||
- l2-hash-values : Override L2 hash function using provided hash values, on GPUs that supports it.
|
||||
It is mutually exclusive with 'l2-hash'. Only one or the other must be
|
||||
|
|
@ -237,7 +240,7 @@ gpu@0xfc010000 {
|
|||
...
|
||||
pbha {
|
||||
int-id-override = <2 0x32>, <9 0x05>, <16 0x32>;
|
||||
propagate-bits = /bits/ 4 <0x03>;
|
||||
propagate-bits = /bits/ 8 <0x03>;
|
||||
};
|
||||
...
|
||||
};
|
||||
|
|
|
|||
|
|
@ -125,6 +125,8 @@ CFLAGS_MODULE += -Wno-sign-compare
|
|||
CFLAGS_MODULE += -Wno-shift-negative-value
|
||||
# This flag is needed to avoid build errors on older kernels
|
||||
CFLAGS_MODULE += $(call cc-option, -Wno-cast-function-type)
|
||||
# The following ensures the stack frame does not get larger than a page
|
||||
CFLAGS_MODULE += -Wframe-larger-than=4096
|
||||
|
||||
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -51,10 +51,6 @@ static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigne
|
|||
}
|
||||
#endif
|
||||
|
||||
#define PTE_PBHA_SHIFT (59)
|
||||
#define PTE_PBHA_MASK ((uint64_t)0xf << PTE_PBHA_SHIFT)
|
||||
#define PTE_RES_BIT_MULTI_AS_SHIFT (63)
|
||||
|
||||
#define IMPORTED_MEMORY_ID (MEMORY_GROUP_MANAGER_NR_GROUPS - 1)
|
||||
|
||||
/**
|
||||
|
|
@ -263,7 +259,7 @@ static struct page *example_mgm_alloc_page(struct memory_group_manager_device *m
|
|||
} else {
|
||||
struct mgm_groups *data = mgm_dev->data;
|
||||
|
||||
dev_err(data->dev, "alloc_pages failed\n");
|
||||
dev_dbg(data->dev, "alloc_pages failed\n");
|
||||
}
|
||||
|
||||
return p;
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ endif
|
|||
#
|
||||
|
||||
# Driver version string which is returned to userspace via an ioctl
|
||||
MALI_RELEASE_NAME ?= '"g22p0-01eac0"'
|
||||
MALI_RELEASE_NAME ?= '"g24p0-00eac0"'
|
||||
# Set up defaults if not defined by build system
|
||||
ifeq ($(CONFIG_MALI_BIFROST_DEBUG), y)
|
||||
MALI_UNIT_TEST = 1
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2012-2023 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2012-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -70,7 +70,6 @@ config MALI_NO_MALI_DEFAULT_GPU
|
|||
help
|
||||
This option sets the default GPU to identify as for No Mali builds.
|
||||
|
||||
|
||||
endchoice
|
||||
|
||||
menu "Platform specific options"
|
||||
|
|
@ -214,16 +213,6 @@ config MALI_CORESTACK
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
comment "Platform options"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT
|
||||
|
||||
config MALI_BIFROST_ERROR_INJECT
|
||||
bool "Enable No Mali error injection"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT && MALI_BIFROST_NO_MALI
|
||||
default n
|
||||
help
|
||||
Enables insertion of errors to test module failure and recovery mechanisms.
|
||||
|
||||
comment "Debug options"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT
|
||||
|
||||
|
|
@ -304,7 +293,7 @@ endchoice
|
|||
|
||||
config MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
|
||||
bool "Enable runtime selection of performance counters set via debugfs"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT && DEBUG_FS
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT && DEBUG_FS && !MALI_CSF_SUPPORT
|
||||
default n
|
||||
help
|
||||
Select this option to make the secondary set of performance counters
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -41,11 +41,12 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
|||
CONFIG_MALI_BIFROST_GATOR_SUPPORT ?= y
|
||||
CONFIG_MALI_ARBITRATION ?= n
|
||||
CONFIG_MALI_PARTITION_MANAGER ?= n
|
||||
CONFIG_MALI_64BIT_HW_ACCESS ?= n
|
||||
|
||||
|
||||
ifneq ($(CONFIG_MALI_BIFROST_NO_MALI),y)
|
||||
# Prevent misuse when CONFIG_MALI_BIFROST_NO_MALI=y
|
||||
# Prevent misuse when CONFIG_MALI_BIFROST_NO_MALI!=y
|
||||
CONFIG_MALI_REAL_HW ?= y
|
||||
else
|
||||
CONFIG_MALI_CORESIGHT = n
|
||||
endif
|
||||
|
||||
|
|
@ -76,7 +77,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
|||
else
|
||||
# Prevent misuse when CONFIG_MALI_BIFROST_NO_MALI=n
|
||||
CONFIG_MALI_REAL_HW = y
|
||||
CONFIG_MALI_BIFROST_ERROR_INJECT = n
|
||||
endif
|
||||
|
||||
|
||||
|
|
@ -108,7 +108,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
|||
CONFIG_MALI_JOB_DUMP = n
|
||||
CONFIG_MALI_BIFROST_NO_MALI = n
|
||||
CONFIG_MALI_REAL_HW = y
|
||||
CONFIG_MALI_BIFROST_ERROR_INJECT = n
|
||||
CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED = n
|
||||
CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE = n
|
||||
CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS = n
|
||||
|
|
@ -171,7 +170,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
|||
CONFIG_MALI_PWRSOFT_765 \
|
||||
CONFIG_MALI_JOB_DUMP \
|
||||
CONFIG_MALI_BIFROST_NO_MALI \
|
||||
CONFIG_MALI_BIFROST_ERROR_INJECT \
|
||||
CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED \
|
||||
CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE \
|
||||
CONFIG_MALI_PRFCNT_SET_PRIMARY \
|
||||
|
|
@ -272,6 +270,8 @@ CFLAGS_MODULE += -Wmissing-field-initializers
|
|||
CFLAGS_MODULE += -Wno-type-limits
|
||||
CFLAGS_MODULE += $(call cc-option, -Wmaybe-uninitialized)
|
||||
CFLAGS_MODULE += $(call cc-option, -Wunused-macros)
|
||||
# The following ensures the stack frame does not get larger than a page
|
||||
CFLAGS_MODULE += -Wframe-larger-than=4096
|
||||
|
||||
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN2
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -21,3 +21,4 @@
|
|||
bifrost_kbase-y += \
|
||||
arbiter/mali_kbase_arbif.o \
|
||||
arbiter/mali_kbase_arbiter_pm.o
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -160,28 +160,19 @@ static void on_gpu_lost(struct device *dev)
|
|||
kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_LOST_EVT);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_arbif_init() - Kbase Arbiter interface initialisation.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Initialise Kbase Arbiter interface and assign callback functions.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - the interface was initialized or was not specified
|
||||
* * in the device tree.
|
||||
* * -EFAULT - the interface was specified but failed to initialize.
|
||||
* * -EPROBE_DEFER - module dependencies are not yet available.
|
||||
*/
|
||||
int kbase_arbif_init(struct kbase_device *kbdev)
|
||||
static int kbase_arbif_of_init(struct kbase_device *kbdev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_OF)
|
||||
struct arbiter_if_arb_vm_ops ops;
|
||||
struct arbiter_if_dev *arb_if;
|
||||
struct device_node *arbiter_if_node;
|
||||
struct platform_device *pdev;
|
||||
int err;
|
||||
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
if (!IS_ENABLED(CONFIG_OF)) {
|
||||
/*
|
||||
* Return -ENODEV in the event CONFIG_OF is not available and let the
|
||||
* internal AW check for suitability for arbitration.
|
||||
*/
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
arbiter_if_node = of_parse_phandle(kbdev->dev->of_node, "arbiter-if", 0);
|
||||
if (!arbiter_if_node)
|
||||
|
|
@ -191,7 +182,7 @@ int kbase_arbif_init(struct kbase_device *kbdev)
|
|||
/* no arbiter interface defined in device tree */
|
||||
kbdev->arb.arb_dev = NULL;
|
||||
kbdev->arb.arb_if = NULL;
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(arbiter_if_node);
|
||||
|
|
@ -215,6 +206,47 @@ int kbase_arbif_init(struct kbase_device *kbdev)
|
|||
}
|
||||
|
||||
kbdev->arb.arb_if = arb_if;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kbase_arbif_of_term(struct kbase_device *kbdev)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_OF))
|
||||
return;
|
||||
|
||||
if (kbdev->arb.arb_dev) {
|
||||
module_put(kbdev->arb.arb_dev->driver->owner);
|
||||
put_device(kbdev->arb.arb_dev);
|
||||
}
|
||||
kbdev->arb.arb_dev = NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* kbase_arbif_init() - Kbase Arbiter interface initialisation.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Initialise Kbase Arbiter interface and assign callback functions.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - the interface was initialized or was not specified
|
||||
* * in the device tree.
|
||||
* * -EFAULT - the interface was specified but failed to initialize.
|
||||
* * -EPROBE_DEFER - module dependencies are not yet available.
|
||||
*/
|
||||
int kbase_arbif_init(struct kbase_device *kbdev)
|
||||
{
|
||||
struct arbiter_if_arb_vm_ops ops;
|
||||
struct arbiter_if_dev *arb_if;
|
||||
int err = 0;
|
||||
|
||||
/* Tries to init with 'arbiter-if' if present in devicetree */
|
||||
err = kbase_arbif_of_init(kbdev);
|
||||
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ops.arb_vm_gpu_stop = on_gpu_stop;
|
||||
ops.arb_vm_gpu_granted = on_gpu_granted;
|
||||
ops.arb_vm_gpu_lost = on_gpu_lost;
|
||||
|
|
@ -225,25 +257,35 @@ int kbase_arbif_init(struct kbase_device *kbdev)
|
|||
kbdev->arb.arb_freq.freq_updated = false;
|
||||
mutex_init(&kbdev->arb.arb_freq.arb_freq_lock);
|
||||
|
||||
/* register kbase arbiter_if callbacks */
|
||||
if (arb_if->vm_ops.vm_arb_register_dev) {
|
||||
err = arb_if->vm_ops.vm_arb_register_dev(arb_if, kbdev->dev, &ops);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to register with arbiter. (err = %d)\n", err);
|
||||
module_put(pdev->dev.driver->owner);
|
||||
put_device(&pdev->dev);
|
||||
if (err != -EPROBE_DEFER)
|
||||
err = -EFAULT;
|
||||
return err;
|
||||
}
|
||||
arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if == NULL) {
|
||||
dev_err(kbdev->dev, "No arbiter interface present\n");
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
if (!arb_if->vm_ops.vm_arb_register_dev) {
|
||||
dev_err(kbdev->dev, "arbiter_if registration callback not present\n");
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
/* register kbase arbiter_if callbacks */
|
||||
err = arb_if->vm_ops.vm_arb_register_dev(arb_if, kbdev->dev, &ops);
|
||||
if (err) {
|
||||
dev_err(kbdev->dev, "Failed to register with arbiter. (err = %d)\n", err);
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
dev_dbg(kbdev->dev, "No arbiter without Device Tree support\n");
|
||||
kbdev->arb.arb_dev = NULL;
|
||||
kbdev->arb.arb_if = NULL;
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
failure_term:
|
||||
{
|
||||
kbase_arbif_of_term(kbdev);
|
||||
}
|
||||
|
||||
if (err != -EPROBE_DEFER)
|
||||
err = -EFAULT;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -256,16 +298,13 @@ void kbase_arbif_destroy(struct kbase_device *kbdev)
|
|||
{
|
||||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_unregister_dev) {
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_unregister_dev)
|
||||
arb_if->vm_ops.vm_arb_unregister_dev(kbdev->arb.arb_if);
|
||||
|
||||
{
|
||||
kbase_arbif_of_term(kbdev);
|
||||
}
|
||||
kbdev->arb.arb_if = NULL;
|
||||
if (kbdev->arb.arb_dev) {
|
||||
module_put(kbdev->arb.arb_dev->driver->owner);
|
||||
put_device(kbdev->arb.arb_dev);
|
||||
}
|
||||
kbdev->arb.arb_dev = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -278,10 +317,8 @@ void kbase_arbif_get_max_config(struct kbase_device *kbdev)
|
|||
{
|
||||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_get_max_config) {
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_get_max_config)
|
||||
arb_if->vm_ops.vm_arb_get_max_config(arb_if);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -295,7 +332,6 @@ void kbase_arbif_gpu_request(struct kbase_device *kbdev)
|
|||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_request) {
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
|
||||
arb_if->vm_ops.vm_arb_gpu_request(arb_if);
|
||||
}
|
||||
|
|
@ -312,7 +348,6 @@ void kbase_arbif_gpu_stopped(struct kbase_device *kbdev, u8 gpu_required)
|
|||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_stopped) {
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
KBASE_TLSTREAM_TL_ARBITER_STOPPED(kbdev, kbdev);
|
||||
if (gpu_required)
|
||||
KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
|
||||
|
|
@ -330,10 +365,8 @@ void kbase_arbif_gpu_active(struct kbase_device *kbdev)
|
|||
{
|
||||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_active) {
|
||||
dev_dbg(kbdev->dev, "%s\n", __func__);
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_active)
|
||||
arb_if->vm_ops.vm_arb_gpu_active(arb_if);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -346,8 +379,6 @@ void kbase_arbif_gpu_idle(struct kbase_device *kbdev)
|
|||
{
|
||||
struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_idle) {
|
||||
dev_dbg(kbdev->dev, "vm_arb_gpu_idle\n");
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_idle)
|
||||
arb_if->vm_ops.vm_arb_gpu_idle(arb_if);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -50,6 +50,7 @@ enum kbase_arbif_evt {
|
|||
KBASE_VM_OS_RESUME_EVENT,
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* kbase_arbif_init() - Initialize the arbiter interface functionality.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -48,7 +48,7 @@ MODULE_PARM_DESC(
|
|||
"On a virtualized platform, if the GPU is not granted within this time(ms) kbase will defer the probe");
|
||||
|
||||
static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev);
|
||||
static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(struct kbase_device *kbdev);
|
||||
static inline bool kbase_arbiter_pm_vm_gpu_assigned_locked(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_state_str() - Helper function to get string
|
||||
|
|
@ -85,7 +85,6 @@ static inline const char *kbase_arbiter_pm_vm_state_str(enum kbase_vm_state stat
|
|||
case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
|
||||
return "KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT";
|
||||
default:
|
||||
KBASE_DEBUG_ASSERT(false);
|
||||
return "[UnknownState]";
|
||||
}
|
||||
}
|
||||
|
|
@ -117,14 +116,13 @@ static inline const char *kbase_arbiter_pm_vm_event_str(enum kbase_arbif_evt evt
|
|||
case KBASE_VM_REF_EVENT:
|
||||
return "KBASE_VM_REF_EVENT";
|
||||
default:
|
||||
KBASE_DEBUG_ASSERT(false);
|
||||
return "[UnknownEvent]";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_set_state() - Sets new kbase_arbiter_vm_state
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
* @new_state: kbase VM new state
|
||||
*
|
||||
* This function sets the new state for the VM
|
||||
|
|
@ -229,7 +227,7 @@ static enum hrtimer_restart request_timer_callback(struct hrtimer *timer)
|
|||
|
||||
/**
|
||||
* start_request_timer() - Start a timer after requesting GPU
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Start a timer to track when kbase is waiting for the GPU from the
|
||||
* Arbiter. If the timer expires before GPU is granted, a warning in
|
||||
|
|
@ -245,7 +243,7 @@ static void start_request_timer(struct kbase_device *kbdev)
|
|||
|
||||
/**
|
||||
* cancel_request_timer() - Stop the request timer
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Stops the request timer once GPU has been granted. Safe to call
|
||||
* even if timer is no longer running.
|
||||
|
|
@ -260,7 +258,7 @@ static void cancel_request_timer(struct kbase_device *kbdev)
|
|||
/**
|
||||
* kbase_arbiter_pm_early_init() - Initialize arbiter for VM
|
||||
* Paravirtualized use.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Initialize the arbiter and other required resources during the runtime
|
||||
* and request the GPU for the VM for the first time.
|
||||
|
|
@ -272,7 +270,7 @@ int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
|
|||
int err;
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = NULL;
|
||||
|
||||
arb_vm_state = kmalloc(sizeof(struct kbase_arbiter_vm_state), GFP_KERNEL);
|
||||
arb_vm_state = kzalloc(sizeof(struct kbase_arbiter_vm_state), GFP_KERNEL);
|
||||
if (arb_vm_state == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -311,7 +309,7 @@ int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
|
|||
msecs_to_jiffies((unsigned int)gpu_req_timeout));
|
||||
|
||||
if (!err) {
|
||||
dev_dbg(kbdev->dev,
|
||||
dev_err(kbdev->dev,
|
||||
"Kbase probe Deferred after waiting %d ms to receive GPU_GRANT\n",
|
||||
gpu_req_timeout);
|
||||
|
||||
|
|
@ -336,7 +334,7 @@ arbif_init_fail:
|
|||
|
||||
/**
|
||||
* kbase_arbiter_pm_early_term() - Shutdown arbiter and free resources
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Clean up all the resources
|
||||
*/
|
||||
|
|
@ -344,6 +342,11 @@ void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
|
|||
{
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
||||
if (arb_vm_state == NULL)
|
||||
return;
|
||||
|
||||
kbase_arbiter_pm_release_interrupts(kbdev);
|
||||
|
||||
cancel_request_timer(kbdev);
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
if (arb_vm_state->vm_state > KBASE_VM_STATE_STOPPED_GPU_REQUESTED) {
|
||||
|
|
@ -358,12 +361,6 @@ void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
|
|||
kbdev->pm.arb_vm_state = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_release_interrupts() - Release the GPU interrupts
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Releases interrupts and set the interrupt flag to false
|
||||
*/
|
||||
void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
|
@ -376,29 +373,25 @@ void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev)
|
|||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_install_interrupts() - Install the GPU interrupts
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Install interrupts and set the interrupt_install flag to true.
|
||||
*
|
||||
* Return: 0 if success, or a Linux error code
|
||||
*/
|
||||
int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
arb_vm_state->interrupts_installed = true;
|
||||
err = kbase_install_interrupts(kbdev);
|
||||
if (arb_vm_state->interrupts_installed == false) {
|
||||
arb_vm_state->interrupts_installed = true;
|
||||
err = kbase_install_interrupts(kbdev);
|
||||
} else {
|
||||
dev_dbg(kbdev->dev, "%s: interrupts installed already", __func__);
|
||||
}
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_stopped() - Handle stop state for the VM
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Handles a stop state for the VM
|
||||
*/
|
||||
|
|
@ -416,7 +409,13 @@ void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
|
|||
dev_dbg(kbdev->dev, "%s %s\n", __func__,
|
||||
kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
|
||||
|
||||
if (arb_vm_state->interrupts_installed) {
|
||||
/*
|
||||
* Release the interrupts on external arb_if to address Xen requirements.
|
||||
* Interrupts are not released with internal arb_if as the IRQs are required
|
||||
* to handle messaging to/from Arbiter/Resource Group.
|
||||
*/
|
||||
if (arb_vm_state->interrupts_installed
|
||||
) {
|
||||
arb_vm_state->interrupts_installed = false;
|
||||
kbase_release_interrupts(kbdev);
|
||||
}
|
||||
|
|
@ -507,7 +506,7 @@ int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev)
|
|||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_gpu_start() - Handles the start state of the VM
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Handles the start state of the VM
|
||||
*/
|
||||
|
|
@ -532,7 +531,15 @@ static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
|
|||
case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
|
||||
kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STARTING);
|
||||
arb_vm_state->interrupts_installed = true;
|
||||
kbase_install_interrupts(kbdev);
|
||||
/*
|
||||
* Re-install interrupts that were released for external arb_if to
|
||||
* address Xen requirements. Interrupts are not released with internal
|
||||
* arb_if as the IRQs are required to handle messaging to/from
|
||||
* Arbiter/Resource Group.
|
||||
*/
|
||||
{
|
||||
kbase_install_interrupts(kbdev);
|
||||
}
|
||||
/*
|
||||
* GPU GRANTED received while in stop can be a result of a
|
||||
* repartitioning.
|
||||
|
|
@ -561,7 +568,7 @@ static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
|
|||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_gpu_stop() - Handles the stop state of the VM
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Handles the start state of the VM
|
||||
*/
|
||||
|
|
@ -603,7 +610,7 @@ static void kbase_arbiter_pm_vm_gpu_stop(struct kbase_device *kbdev)
|
|||
|
||||
/**
|
||||
* kbase_gpu_lost() - Kbase signals GPU is lost on a lost event signal
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* On GPU lost event signals GPU_LOST to the aribiter
|
||||
*/
|
||||
|
|
@ -658,7 +665,7 @@ static void kbase_gpu_lost(struct kbase_device *kbdev)
|
|||
/**
|
||||
* kbase_arbiter_pm_vm_os_suspend_ready_state() - checks if VM is ready
|
||||
* to be moved to suspended state.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Return: True if its ready to be suspended else False.
|
||||
*/
|
||||
|
|
@ -678,7 +685,7 @@ static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(struct kbase_devic
|
|||
/**
|
||||
* kbase_arbiter_pm_vm_os_prepare_suspend() - Prepare OS to be in suspend state
|
||||
* until it receives the grant message from arbiter
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Prepares OS to be in suspend state until it receives GRANT message
|
||||
* from Arbiter asynchronously.
|
||||
|
|
@ -745,7 +752,7 @@ static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
|
|||
/**
|
||||
* kbase_arbiter_pm_vm_os_resume() - Resume OS function once it receives
|
||||
* a grant message from arbiter
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Resume OS function once it receives GRANT message
|
||||
* from Arbiter asynchronously.
|
||||
|
|
@ -774,7 +781,7 @@ static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
|
|||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_event() - Dispatch VM event to the state machine.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
* @evt: VM event
|
||||
*
|
||||
* The state machine function. Receives events and transitions states
|
||||
|
|
@ -853,7 +860,7 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev, enum kbase_arbif_evt
|
|||
break;
|
||||
|
||||
default:
|
||||
dev_alert(kbdev->dev, "Got Unknown Event!");
|
||||
dev_err(kbdev->dev, "Got Unknown Event!");
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
|
|
@ -863,7 +870,7 @@ KBASE_EXPORT_TEST_API(kbase_arbiter_pm_vm_event);
|
|||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_wait_gpu_assignment() - VM wait for a GPU assignment.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* VM waits for a GPU assignment.
|
||||
*/
|
||||
|
|
@ -879,14 +886,14 @@ static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* kbase_arbiter_pm_vm_gpu_assigned_lockheld() - Check if VM holds VM state lock
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* kbase_arbiter_pm_vm_gpu_assigned_locked() - Check if VM holds VM state lock
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Checks if the virtual machine holds VM state lock.
|
||||
*
|
||||
* Return: true if GPU is assigned, else false.
|
||||
*/
|
||||
static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(struct kbase_device *kbdev)
|
||||
static inline bool kbase_arbiter_pm_vm_gpu_assigned_locked(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
||||
|
|
@ -898,7 +905,7 @@ static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(struct kbase_device
|
|||
/**
|
||||
* kbase_arbiter_pm_ctx_active_handle_suspend() - Handle suspend operation for
|
||||
* arbitration mode
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @kbdev: The kbase device structure for the device
|
||||
* @suspend_handler: The handler code for how to handle a suspend
|
||||
* that might occur
|
||||
*
|
||||
|
|
@ -916,7 +923,7 @@ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
|
|||
|
||||
if (kbdev->arb.arb_if) {
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
while (!kbase_arbiter_pm_vm_gpu_assigned_lockheld(kbdev)) {
|
||||
while (!kbase_arbiter_pm_vm_gpu_assigned_locked(kbdev)) {
|
||||
/* Update VM state since we have GPU work to do */
|
||||
if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
|
||||
kbase_arbiter_pm_vm_set_state(kbdev,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -102,7 +102,7 @@ void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev);
|
|||
*
|
||||
* Install interrupts and set the interrupt_install flag to true.
|
||||
*
|
||||
* Return: 0 if success, or a Linux error code
|
||||
* Return: 0 if success or already installed. Otherwise a Linux error code
|
||||
*/
|
||||
int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2014-2022 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -47,12 +47,7 @@ endif
|
|||
bifrost_kbase-$(CONFIG_MALI_BIFROST_DEVFREQ) += \
|
||||
backend/gpu/mali_kbase_devfreq.o
|
||||
|
||||
ifneq ($(CONFIG_MALI_REAL_HW),y)
|
||||
bifrost_kbase-y += backend/gpu/mali_kbase_model_linux.o
|
||||
endif
|
||||
bifrost_kbase-$(CONFIG_MALI_BIFROST_NO_MALI) += backend/gpu/mali_kbase_model_linux.o
|
||||
|
||||
# NO_MALI Dummy model interface
|
||||
bifrost_kbase-$(CONFIG_MALI_BIFROST_NO_MALI) += backend/gpu/mali_kbase_model_dummy.o
|
||||
# HW error simulation
|
||||
bifrost_kbase-$(CONFIG_MALI_BIFROST_NO_MALI) += backend/gpu/mali_kbase_model_error_generator.o
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -366,7 +366,7 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
|
|||
err = of_property_read_u64(node, "opp-hz-real", real_freqs);
|
||||
#endif
|
||||
if (err < 0) {
|
||||
dev_warn(kbdev->dev, "Failed to read opp-hz-real property with error %d\n",
|
||||
dev_warn(kbdev->dev, "Failed to read opp-hz-real property with error %d",
|
||||
err);
|
||||
continue;
|
||||
}
|
||||
|
|
@ -374,8 +374,8 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
|
|||
err = of_property_read_u32_array(node, "opp-microvolt", opp_volts,
|
||||
kbdev->nr_regulators);
|
||||
if (err < 0) {
|
||||
dev_warn(kbdev->dev,
|
||||
"Failed to read opp-microvolt property with error %d\n", err);
|
||||
dev_warn(kbdev->dev, "Failed to read opp-microvolt property with error %d",
|
||||
err);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
|
@ -386,11 +386,12 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
|
|||
|
||||
dev_warn(
|
||||
kbdev->dev,
|
||||
"Ignoring OPP %llu - Dynamic Core Scaling not supported on this GPU\n",
|
||||
"Ignoring OPP %llu - Dynamic Core Scaling not supported on this GPU",
|
||||
opp_freq);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
core_count_p = of_get_property(node, "opp-core-count", NULL);
|
||||
if (core_count_p) {
|
||||
u64 remaining_core_mask = kbdev->gpu_props.shader_present;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -29,6 +29,8 @@
|
|||
#include <device/mali_kbase_device.h>
|
||||
#include <backend/gpu/mali_kbase_instr_internal.h>
|
||||
|
||||
#define WAIT_FOR_DUMP_TIMEOUT_MS 5000
|
||||
|
||||
static int wait_prfcnt_ready(struct kbase_device *kbdev)
|
||||
{
|
||||
u32 val;
|
||||
|
|
@ -163,6 +165,7 @@ int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
|
|||
{
|
||||
unsigned long flags, pm_flags;
|
||||
struct kbase_device *kbdev = kctx->kbdev;
|
||||
const unsigned long timeout = msecs_to_jiffies(WAIT_FOR_DUMP_TIMEOUT_MS);
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
|
||||
|
|
@ -199,7 +202,8 @@ int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
|
|||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
|
||||
|
||||
/* Ongoing dump/setup - wait for its completion */
|
||||
wait_event(kbdev->hwcnt.backend.wait, kbdev->hwcnt.backend.triggered != 0);
|
||||
wait_event_timeout(kbdev->hwcnt.backend.wait, kbdev->hwcnt.backend.triggered != 0,
|
||||
timeout);
|
||||
}
|
||||
|
||||
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
|
||||
|
|
@ -319,8 +323,19 @@ int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
|
|||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
unsigned long remaining;
|
||||
const unsigned long timeout = msecs_to_jiffies(WAIT_FOR_DUMP_TIMEOUT_MS);
|
||||
|
||||
/* Wait for dump & cache clean to complete */
|
||||
wait_event(kbdev->hwcnt.backend.wait, kbdev->hwcnt.backend.triggered != 0);
|
||||
remaining = wait_event_timeout(kbdev->hwcnt.backend.wait,
|
||||
kbdev->hwcnt.backend.triggered != 0, timeout);
|
||||
if (remaining == 0) {
|
||||
err = -ETIME;
|
||||
/* Set the backend state so it's clear things have gone bad (could be a HW issue)
|
||||
*/
|
||||
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_UNRECOVERABLE_ERROR;
|
||||
goto timed_out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
|
||||
|
||||
|
|
@ -336,7 +351,7 @@ int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
|
||||
|
||||
timed_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ void kbase_synchronize_irqs(struct kbase_device *kbdev);
|
|||
* Return: 0 on success. Error code (negative) on failure.
|
||||
*/
|
||||
int kbase_validate_interrupts(struct kbase_device *const kbdev);
|
||||
#endif /* CONFIG_MALI_REAL_HW */
|
||||
#endif /* IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
#endif /* CONFIG_MALI_BIFROST_DEBUG */
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -23,6 +23,7 @@
|
|||
#include <device/mali_kbase_device.h>
|
||||
#include <backend/gpu/mali_kbase_irq_internal.h>
|
||||
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_MALI_REAL_HW)
|
||||
|
|
@ -163,13 +164,9 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
|
|||
static irqreturn_t kbase_combined_irq_handler(int irq, void *data)
|
||||
{
|
||||
irqreturn_t irq_state = IRQ_NONE;
|
||||
|
||||
if (kbase_job_irq_handler(irq, data) == IRQ_HANDLED)
|
||||
irq_state = IRQ_HANDLED;
|
||||
if (kbase_mmu_irq_handler(irq, data) == IRQ_HANDLED)
|
||||
irq_state = IRQ_HANDLED;
|
||||
if (kbase_gpu_irq_handler(irq, data) == IRQ_HANDLED)
|
||||
irq_state = IRQ_HANDLED;
|
||||
irq_state |= kbase_job_irq_handler(irq, data);
|
||||
irq_state |= kbase_mmu_irq_handler(irq, data);
|
||||
irq_state |= kbase_gpu_irq_handler(irq, data);
|
||||
|
||||
return irq_state;
|
||||
}
|
||||
|
|
@ -212,8 +209,7 @@ int kbase_set_custom_irq_handler(struct kbase_device *kbdev, irq_handler_t custo
|
|||
if (!handler)
|
||||
handler = kbase_get_interrupt_handler(kbdev, irq_tag);
|
||||
|
||||
if (request_irq(kbdev->irqs[irq].irq, handler,
|
||||
kbdev->irqs[irq].flags | ((kbdev->nr_irqs == 1) ? 0 : IRQF_SHARED),
|
||||
if (request_irq(kbdev->irqs[irq].irq, handler, kbdev->irqs[irq].flags | IRQF_SHARED,
|
||||
dev_name(kbdev->dev), kbase_tag(kbdev, irq)) != 0) {
|
||||
result = -EINVAL;
|
||||
dev_err(kbdev->dev, "Can't request interrupt %u (index %u)\n", kbdev->irqs[irq].irq,
|
||||
|
|
@ -396,8 +392,8 @@ static int validate_interrupt(struct kbase_device *const kbdev, u32 tag)
|
|||
|
||||
/* restore original interrupt */
|
||||
if (request_irq(kbdev->irqs[irq].irq, kbase_get_interrupt_handler(kbdev, tag),
|
||||
kbdev->irqs[irq].flags | ((kbdev->nr_irqs == 1) ? 0 : IRQF_SHARED),
|
||||
dev_name(kbdev->dev), kbase_tag(kbdev, irq))) {
|
||||
kbdev->irqs[irq].flags | IRQF_SHARED, dev_name(kbdev->dev),
|
||||
kbase_tag(kbdev, irq))) {
|
||||
dev_err(kbdev->dev, "Can't restore original interrupt %u (index %u)\n",
|
||||
kbdev->irqs[irq].irq, tag);
|
||||
err = -EINVAL;
|
||||
|
|
@ -449,10 +445,10 @@ int kbase_install_interrupts(struct kbase_device *kbdev)
|
|||
u32 i;
|
||||
|
||||
for (i = 0; i < kbdev->nr_irqs; i++) {
|
||||
const int result = request_irq(
|
||||
kbdev->irqs[i].irq, kbase_get_interrupt_handler(kbdev, i),
|
||||
kbdev->irqs[i].flags | ((kbdev->nr_irqs == 1) ? 0 : IRQF_SHARED),
|
||||
dev_name(kbdev->dev), kbase_tag(kbdev, i));
|
||||
const int result = request_irq(kbdev->irqs[i].irq,
|
||||
kbase_get_interrupt_handler(kbdev, i),
|
||||
kbdev->irqs[i].flags | IRQF_SHARED,
|
||||
dev_name(kbdev->dev), kbase_tag(kbdev, i));
|
||||
if (result) {
|
||||
dev_err(kbdev->dev, "Can't request interrupt %u (index %u)\n",
|
||||
kbdev->irqs[i].irq, i);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -1328,7 +1328,7 @@ void kbase_reset_gpu(struct kbase_device *kbdev)
|
|||
|
||||
if (!kbase_is_quick_reset_enabled(kbdev))
|
||||
dev_err(kbdev->dev,
|
||||
"Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
|
||||
"Preparing to soft-reset GPU: Waiting (up to %d ms) for all jobs to complete soft-stop\n",
|
||||
kbdev->reset_timeout_ms);
|
||||
|
||||
hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
|
||||
|
|
@ -1350,7 +1350,7 @@ void kbase_reset_gpu_locked(struct kbase_device *kbdev)
|
|||
|
||||
if (!kbase_is_quick_reset_enabled(kbdev))
|
||||
dev_err(kbdev->dev,
|
||||
"Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
|
||||
"Preparing to soft-reset GPU: Waiting (up to %d ms) for all jobs to complete soft-stop\n",
|
||||
kbdev->reset_timeout_ms);
|
||||
hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
|
||||
HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms), HRTIMER_MODE_REL);
|
||||
|
|
|
|||
|
|
@ -1437,7 +1437,7 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
|
|||
* then leave it in the RB and next time we're kicked
|
||||
* it will be processed again from the starting state.
|
||||
*/
|
||||
if (keep_in_jm_rb) {
|
||||
if (!kbase_is_gpu_removed(kbdev) && keep_in_jm_rb) {
|
||||
katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
|
||||
/* As the atom was not removed, increment the
|
||||
* index so that we read the correct atom in the
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -25,42 +25,8 @@
|
|||
* insmod'ing mali_kbase.ko with no arguments after a build with "scons
|
||||
* gpu=tXYZ" will yield the expected GPU ID for tXYZ. This can always be
|
||||
* overridden by passing the 'no_mali_gpu' argument to insmod.
|
||||
*
|
||||
* - if CONFIG_MALI_BIFROST_ERROR_INJECT is defined the error injection system is
|
||||
* activated.
|
||||
*/
|
||||
|
||||
/* Implementation of failure injection system:
|
||||
*
|
||||
* Error conditions are generated by gpu_generate_error().
|
||||
* According to CONFIG_MALI_BIFROST_ERROR_INJECT definition gpu_generate_error() either
|
||||
* generates an error HW condition randomly (CONFIG_MALI_ERROR_INJECT_RANDOM) or
|
||||
* checks if there is (in error_track_list) an error configuration to be set for
|
||||
* the current job chain (CONFIG_MALI_ERROR_INJECT_RANDOM not defined).
|
||||
* Each error condition will trigger a specific "state" for a certain set of
|
||||
* registers as per Midgard Architecture Specifications doc.
|
||||
*
|
||||
* According to Midgard Architecture Specifications doc the following registers
|
||||
* are always affected by error conditions:
|
||||
*
|
||||
* JOB Exception:
|
||||
* JOB_IRQ_RAWSTAT
|
||||
* JOB<n> STATUS AREA
|
||||
*
|
||||
* MMU Exception:
|
||||
* MMU_IRQ_RAWSTAT
|
||||
* AS<n>_FAULTSTATUS
|
||||
* AS<n>_FAULTADDRESS
|
||||
*
|
||||
* GPU Exception:
|
||||
* GPU_IRQ_RAWSTAT
|
||||
* GPU_FAULTSTATUS
|
||||
* GPU_FAULTADDRESS
|
||||
*
|
||||
* For further clarification on the model behaviour upon specific error
|
||||
* conditions the user may refer to the Midgard Architecture Specification
|
||||
* document
|
||||
*/
|
||||
#include <mali_kbase.h>
|
||||
#include <device/mali_kbase_device.h>
|
||||
#include <hw_access/mali_kbase_hw_access_regmap.h>
|
||||
|
|
@ -126,7 +92,7 @@ struct error_status_t hw_error_status;
|
|||
*/
|
||||
struct control_reg_values_t {
|
||||
const char *name;
|
||||
u32 gpu_id;
|
||||
u64 gpu_id;
|
||||
u32 as_present;
|
||||
u32 thread_max_threads;
|
||||
u32 thread_max_workgroup_size;
|
||||
|
|
@ -524,7 +490,7 @@ MODULE_PARM_DESC(no_mali_gpu, "GPU to identify as");
|
|||
static u32 gpu_model_get_prfcnt_value(enum kbase_ipa_core_type core_type, u32 cnt_idx,
|
||||
bool is_low_word)
|
||||
{
|
||||
u64 *counters_data;
|
||||
u64 *counters_data = NULL;
|
||||
u32 core_count = 0;
|
||||
u32 event_index;
|
||||
u64 value = 0;
|
||||
|
|
@ -580,6 +546,9 @@ static u32 gpu_model_get_prfcnt_value(enum kbase_ipa_core_type core_type, u32 cn
|
|||
break;
|
||||
}
|
||||
|
||||
if (unlikely(counters_data == NULL))
|
||||
return 0;
|
||||
|
||||
for (core = 0; core < core_count; core++) {
|
||||
value += counters_data[event_index];
|
||||
event_index += KBASE_DUMMY_MODEL_COUNTER_PER_CORE;
|
||||
|
|
@ -1172,9 +1141,6 @@ static void midgard_model_update(void *h)
|
|||
|
||||
/*this job is done assert IRQ lines */
|
||||
signal_int(dummy, i);
|
||||
#ifdef CONFIG_MALI_BIFROST_ERROR_INJECT
|
||||
midgard_set_error(i);
|
||||
#endif /* CONFIG_MALI_BIFROST_ERROR_INJECT */
|
||||
update_register_statuses(dummy, i);
|
||||
/*if this job slot returned failures we cannot use it */
|
||||
if (hw_error_status.job_irq_rawstat & (1u << (i + 16))) {
|
||||
|
|
@ -1564,6 +1530,7 @@ void midgard_model_write_reg(void *h, u32 addr, u32 value)
|
|||
case L2_PWROFF_HI:
|
||||
case PWR_KEY:
|
||||
case PWR_OVERRIDE0:
|
||||
case PWR_OVERRIDE1:
|
||||
#if MALI_USE_CSF
|
||||
case SHADER_PWRFEATURES:
|
||||
case CSF_CONFIG:
|
||||
|
|
@ -1607,8 +1574,7 @@ void midgard_model_read_reg(void *h, u32 addr, u32 *const value)
|
|||
#else /* !MALI_USE_CSF */
|
||||
if (addr == GPU_CONTROL_REG(GPU_ID)) {
|
||||
#endif /* !MALI_USE_CSF */
|
||||
|
||||
*value = dummy->control_reg_values->gpu_id;
|
||||
*value = dummy->control_reg_values->gpu_id & U32_MAX;
|
||||
} else if (addr == JOB_CONTROL_REG(JOB_IRQ_RAWSTAT)) {
|
||||
*value = hw_error_status.job_irq_rawstat;
|
||||
pr_debug("%s", "JS_IRQ_RAWSTAT being read");
|
||||
|
|
@ -2166,9 +2132,3 @@ int gpu_model_control(void *model, struct kbase_model_control_params *params)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 midgard_model_arch_timer_get_cntfrq(void *h)
|
||||
{
|
||||
CSTD_UNUSED(h);
|
||||
return arch_timer_get_cntfrq();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,172 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2015, 2018-2023 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
* Foundation, and any use by you of this program is subject to the terms
|
||||
* of such GNU license.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <mali_kbase.h>
|
||||
#include <linux/random.h>
|
||||
#include "backend/gpu/mali_kbase_model_linux.h"
|
||||
|
||||
static struct kbase_error_atom *error_track_list;
|
||||
|
||||
#ifdef CONFIG_MALI_ERROR_INJECT_RANDOM
|
||||
|
||||
/** Kernel 6.1.0 has dropped prandom_u32(), use get_random_u32() */
|
||||
#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
|
||||
#define prandom_u32 get_random_u32
|
||||
#endif
|
||||
|
||||
/*following error probability are set quite high in order to stress the driver*/
|
||||
static unsigned int error_probability = 50; /* to be set between 0 and 100 */
|
||||
/* probability to have multiple error give that there is an error */
|
||||
static unsigned int multiple_error_probability = 50;
|
||||
|
||||
/* all the error conditions supported by the model */
|
||||
#define TOTAL_FAULTS 27
|
||||
/* maximum number of levels in the MMU translation table tree */
|
||||
#define MAX_MMU_TABLE_LEVEL 4
|
||||
/* worst case scenario is <1 MMU fault + 1 job fault + 2 GPU faults> */
|
||||
#define MAX_CONCURRENT_FAULTS 3
|
||||
|
||||
/**
|
||||
* gpu_generate_error - Generate GPU error
|
||||
*/
|
||||
static void gpu_generate_error(void)
|
||||
{
|
||||
unsigned int errors_num = 0;
|
||||
|
||||
/*is there at least one error? */
|
||||
if ((prandom_u32() % 100) < error_probability) {
|
||||
/* pick up a faulty mmu address space */
|
||||
hw_error_status.faulty_mmu_as = prandom_u32() % NUM_MMU_AS;
|
||||
/* pick up an mmu table level */
|
||||
hw_error_status.mmu_table_level = 1 + (prandom_u32() % MAX_MMU_TABLE_LEVEL);
|
||||
hw_error_status.errors_mask = (u32)(1 << (prandom_u32() % TOTAL_FAULTS));
|
||||
|
||||
/*is there also one or more errors? */
|
||||
if ((prandom_u32() % 100) < multiple_error_probability) {
|
||||
errors_num = 1 + (prandom_u32() % (MAX_CONCURRENT_FAULTS - 1));
|
||||
while (errors_num-- > 0) {
|
||||
u32 temp_mask;
|
||||
|
||||
temp_mask = (u32)(1 << (prandom_u32() % TOTAL_FAULTS));
|
||||
/* below we check that no bit of the same error
|
||||
* type is set again in the error mask
|
||||
*/
|
||||
if ((temp_mask & IS_A_JOB_ERROR) &&
|
||||
(hw_error_status.errors_mask & IS_A_JOB_ERROR)) {
|
||||
errors_num++;
|
||||
continue;
|
||||
}
|
||||
if ((temp_mask & IS_A_MMU_ERROR) &&
|
||||
(hw_error_status.errors_mask & IS_A_MMU_ERROR)) {
|
||||
errors_num++;
|
||||
continue;
|
||||
}
|
||||
if ((temp_mask & IS_A_GPU_ERROR) &&
|
||||
(hw_error_status.errors_mask & IS_A_GPU_ERROR)) {
|
||||
errors_num++;
|
||||
continue;
|
||||
}
|
||||
/* this error mask is already set */
|
||||
if ((hw_error_status.errors_mask | temp_mask) ==
|
||||
hw_error_status.errors_mask) {
|
||||
errors_num++;
|
||||
continue;
|
||||
}
|
||||
hw_error_status.errors_mask |= temp_mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int job_atom_inject_error(struct kbase_error_params *params)
|
||||
{
|
||||
struct kbase_error_atom *new_elem;
|
||||
|
||||
KBASE_DEBUG_ASSERT(params);
|
||||
|
||||
new_elem = kzalloc(sizeof(*new_elem), GFP_KERNEL);
|
||||
|
||||
if (!new_elem) {
|
||||
model_error_log(KBASE_CORE,
|
||||
"\njob_atom_inject_error: kzalloc failed for new_elem\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
new_elem->params.jc = params->jc;
|
||||
new_elem->params.errors_mask = params->errors_mask;
|
||||
new_elem->params.mmu_table_level = params->mmu_table_level;
|
||||
new_elem->params.faulty_mmu_as = params->faulty_mmu_as;
|
||||
|
||||
/*circular list below */
|
||||
if (error_track_list == NULL) { /*no elements */
|
||||
error_track_list = new_elem;
|
||||
new_elem->next = error_track_list;
|
||||
} else {
|
||||
struct kbase_error_atom *walker = error_track_list;
|
||||
|
||||
while (walker->next != error_track_list)
|
||||
walker = walker->next;
|
||||
|
||||
new_elem->next = error_track_list;
|
||||
walker->next = new_elem;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void midgard_set_error(u32 job_slot)
|
||||
{
|
||||
#ifdef CONFIG_MALI_ERROR_INJECT_RANDOM
|
||||
gpu_generate_error();
|
||||
#else
|
||||
struct kbase_error_atom *walker, *auxiliar;
|
||||
|
||||
if (error_track_list != NULL) {
|
||||
walker = error_track_list->next;
|
||||
auxiliar = error_track_list;
|
||||
do {
|
||||
if (walker->params.jc == hw_error_status.current_jc) {
|
||||
/* found a faulty atom matching with the
|
||||
* current one
|
||||
*/
|
||||
hw_error_status.errors_mask = walker->params.errors_mask;
|
||||
hw_error_status.mmu_table_level = walker->params.mmu_table_level;
|
||||
hw_error_status.faulty_mmu_as = walker->params.faulty_mmu_as;
|
||||
hw_error_status.current_job_slot = job_slot;
|
||||
|
||||
if (walker->next == walker) {
|
||||
/* only one element */
|
||||
kfree(error_track_list);
|
||||
error_track_list = NULL;
|
||||
} else {
|
||||
auxiliar->next = walker->next;
|
||||
if (walker == error_track_list)
|
||||
error_track_list = walker->next;
|
||||
|
||||
kfree(walker);
|
||||
}
|
||||
break;
|
||||
}
|
||||
auxiliar = walker;
|
||||
walker = walker->next;
|
||||
} while (auxiliar->next != error_track_list);
|
||||
}
|
||||
#endif /* CONFIG_MALI_ERROR_INJECT_RANDOM */
|
||||
}
|
||||
|
|
@ -48,12 +48,8 @@
|
|||
/*
|
||||
* Include Model definitions
|
||||
*/
|
||||
|
||||
#if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
|
||||
#include <backend/gpu/mali_kbase_model_dummy.h>
|
||||
#endif /* IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
|
||||
|
||||
#if !IS_ENABLED(CONFIG_MALI_REAL_HW)
|
||||
/**
|
||||
* kbase_gpu_device_create() - Generic create function.
|
||||
*
|
||||
|
|
@ -116,15 +112,6 @@ void midgard_model_write_reg(void *h, u32 addr, u32 value);
|
|||
*/
|
||||
void midgard_model_read_reg(void *h, u32 addr, u32 *const value);
|
||||
|
||||
/**
|
||||
* midgard_model_arch_timer_get_cntfrq - Get Model specific System Timer Frequency
|
||||
*
|
||||
* @h: Model handle.
|
||||
*
|
||||
* Return: Frequency in Hz
|
||||
*/
|
||||
u64 midgard_model_arch_timer_get_cntfrq(void *h);
|
||||
|
||||
/**
|
||||
* gpu_device_raise_irq() - Private IRQ raise function.
|
||||
*
|
||||
|
|
@ -155,6 +142,5 @@ void gpu_device_set_data(void *model, void *data);
|
|||
* Return: Pointer to the data carried by model.
|
||||
*/
|
||||
void *gpu_device_get_data(void *model);
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
|
||||
#endif /* _KBASE_MODEL_LINUX_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -36,6 +36,9 @@
|
|||
#include <linux/version_compat_defs.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <mali_kbase_reset_gpu.h>
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
#include <csf/mali_kbase_csf_scheduler.h>
|
||||
#endif /* !CONFIG_MALI_ARBITER_SUPPORT */
|
||||
#endif /* !MALI_USE_CSF */
|
||||
#include <hwcnt/mali_kbase_hwcnt_context.h>
|
||||
#include <backend/gpu/mali_kbase_pm_internal.h>
|
||||
|
|
@ -393,7 +396,7 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
|
|||
backend->poweron_required = false;
|
||||
kbdev->pm.backend.l2_desired = true;
|
||||
#if MALI_USE_CSF
|
||||
kbdev->pm.backend.mcu_desired = true;
|
||||
kbdev->pm.backend.mcu_desired = kbdev->pm.backend.mcu_poweron_required;
|
||||
#endif
|
||||
kbase_pm_update_state(kbdev);
|
||||
kbase_pm_update_cores_state_nolock(kbdev);
|
||||
|
|
@ -860,9 +863,11 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
|
|||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_pm_set_debug_core_mask);
|
||||
#else
|
||||
void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask_js0,
|
||||
u64 new_core_mask_js1, u64 new_core_mask_js2)
|
||||
void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 *new_core_mask,
|
||||
size_t new_core_mask_size)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
lockdep_assert_held(&kbdev->pm.lock);
|
||||
|
||||
|
|
@ -870,13 +875,14 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask_
|
|||
dev_warn_once(
|
||||
kbdev->dev,
|
||||
"Change of core mask not supported for slot 0 as dummy job WA is enabled");
|
||||
new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
|
||||
new_core_mask[0] = kbdev->pm.debug_core_mask[0];
|
||||
}
|
||||
|
||||
kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
|
||||
kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
|
||||
kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
|
||||
kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 | new_core_mask_js2;
|
||||
kbdev->pm.debug_core_mask_all = 0;
|
||||
for (i = 0; i < new_core_mask_size; i++) {
|
||||
kbdev->pm.debug_core_mask[i] = new_core_mask[i];
|
||||
kbdev->pm.debug_core_mask_all |= new_core_mask[i];
|
||||
}
|
||||
|
||||
kbase_pm_update_dynamic_cores_onoff(kbdev);
|
||||
}
|
||||
|
|
@ -962,7 +968,9 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
|
|||
void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
#if !MALI_USE_CSF
|
||||
#if MALI_USE_CSF
|
||||
unsigned long flags_sched;
|
||||
#else
|
||||
ktime_t end_timestamp = ktime_get_raw();
|
||||
#endif
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
|
@ -981,24 +989,44 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
|||
*/
|
||||
WARN(!kbase_is_gpu_removed(kbdev), "GPU is still available after GPU lost event\n");
|
||||
|
||||
/* Full GPU reset will have been done by hypervisor, so
|
||||
* cancel
|
||||
*/
|
||||
#if MALI_USE_CSF
|
||||
/* Full GPU reset will have been done by hypervisor, so cancel */
|
||||
kbase_reset_gpu_prevent_and_wait(kbdev);
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags_sched);
|
||||
atomic_set(&kbdev->hwaccess.backend.reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags_sched);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
||||
kbase_synchronize_irqs(kbdev);
|
||||
|
||||
/* Scheduler reset happens outside of spinlock due to the mutex it acquires */
|
||||
kbase_csf_scheduler_reset(kbdev);
|
||||
|
||||
/* Update kbase status */
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbdev->protected_mode = false;
|
||||
kbase_pm_update_state(kbdev);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
||||
/* Cancel any pending HWC dumps */
|
||||
kbase_hwcnt_backend_csf_on_unrecoverable_error(&kbdev->hwcnt_gpu_iface);
|
||||
#else
|
||||
/* Full GPU reset will have been done by hypervisor, so cancel */
|
||||
atomic_set(&kbdev->hwaccess.backend.reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
|
||||
hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
|
||||
|
||||
kbase_synchronize_irqs(kbdev);
|
||||
|
||||
/* Clear all jobs running on the GPU */
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbdev->protected_mode = false;
|
||||
#if !MALI_USE_CSF
|
||||
kbase_backend_reset(kbdev, &end_timestamp);
|
||||
kbase_pm_metrics_update(kbdev, NULL);
|
||||
#endif
|
||||
kbase_pm_update_state(kbdev);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
||||
#if !MALI_USE_CSF
|
||||
/* Cancel any pending HWC dumps */
|
||||
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
|
||||
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING ||
|
||||
|
|
@ -1008,12 +1036,11 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
|||
wake_up(&kbdev->hwcnt.backend.wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
|
||||
#endif
|
||||
#endif /* MALI_USE_CSF */
|
||||
}
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
mutex_unlock(&kbdev->pm.lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
|
||||
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
|
||||
|
|
@ -1063,26 +1090,15 @@ static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
|
|||
}
|
||||
|
||||
/* Check if a Doorbell mirror interrupt occurred meanwhile.
|
||||
* Also check if GPU idle work item is pending. If FW had sent the GPU idle notification
|
||||
* after the wake up of MCU then it can be assumed that Userspace submission didn't make
|
||||
* GPU non-idle, so runtime suspend doesn't need to be aborted.
|
||||
*/
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
if (kbdev->pm.backend.gpu_sleep_mode_active && kbdev->pm.backend.exit_gpu_sleep_mode &&
|
||||
!work_pending(&kbdev->csf.scheduler.gpu_idle_work)) {
|
||||
u32 glb_req =
|
||||
kbase_csf_firmware_global_input_read(&kbdev->csf.global_iface, GLB_REQ);
|
||||
u32 glb_ack = kbase_csf_firmware_global_output(&kbdev->csf.global_iface, GLB_ACK);
|
||||
|
||||
/* Only abort the runtime suspend if GPU idle event is not pending */
|
||||
if (!((glb_req ^ glb_ack) & GLB_REQ_IDLE_EVENT_MASK)) {
|
||||
dev_dbg(kbdev->dev,
|
||||
"DB mirror interrupt occurred during runtime suspend after L2 power up");
|
||||
kbdev->pm.backend.gpu_wakeup_override = false;
|
||||
kbdev->pm.backend.runtime_suspend_abort_reason = ABORT_REASON_DB_MIRROR_IRQ;
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (kbdev->pm.backend.gpu_sleep_mode_active && kbdev->pm.backend.exit_gpu_sleep_mode) {
|
||||
dev_dbg(kbdev->dev,
|
||||
"DB mirror interrupt occurred during runtime suspend after L2 power up");
|
||||
kbdev->pm.backend.gpu_wakeup_override = false;
|
||||
kbdev->pm.backend.runtime_suspend_abort_reason = ABORT_REASON_DB_MIRROR_IRQ;
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
/* Need to release the kbdev->pm.lock to avoid lock ordering issue
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -332,7 +332,11 @@ union kbase_pm_policy_data {
|
|||
* cores may be different, but there should be transitions in
|
||||
* progress that will eventually achieve this state (assuming
|
||||
* that the policy doesn't change its mind in the mean time).
|
||||
* @mcu_desired: True if the micro-control unit should be powered on
|
||||
* @mcu_desired: True if the micro-control unit should be powered on by the MCU state
|
||||
* machine. Updated as per the value of @mcu_poweron_required.
|
||||
* @mcu_poweron_required: Boolean flag updated mainly by the CSF Scheduler code,
|
||||
* before updating the PM active count, to indicate to the
|
||||
* PM code that micro-control unit needs to be powered up/down.
|
||||
* @policy_change_clamp_state_to_off: Signaling the backend is in PM policy
|
||||
* change transition, needs the mcu/L2 to be brought back to the
|
||||
* off state and remain in that state until the flag is cleared.
|
||||
|
|
@ -485,6 +489,7 @@ struct kbase_pm_backend_data {
|
|||
u64 shaders_desired_mask;
|
||||
#if MALI_USE_CSF
|
||||
bool mcu_desired;
|
||||
bool mcu_poweron_required;
|
||||
bool policy_change_clamp_state_to_off;
|
||||
unsigned int csf_pm_sched_flags;
|
||||
struct mutex policy_change_lock;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -70,6 +70,19 @@ MODULE_PARM_DESC(corestack_driver_control,
|
|||
"to the Mali GPU is known to be problematic.");
|
||||
KBASE_EXPORT_TEST_API(corestack_driver_control);
|
||||
|
||||
/**
|
||||
* enum kbase_gpu_state - The state of data in the GPU.
|
||||
*
|
||||
* @GPU_STATE_INTACT: The GPU state is intact
|
||||
* @GPU_STATE_LOST: The GPU state is lost
|
||||
* @GPU_STATE_IN_RESET: The GPU is in reset state
|
||||
*
|
||||
* This enumeration is private to the file. It is used as
|
||||
* the return values of platform specific PM
|
||||
* callback (*power_on_callback).
|
||||
*/
|
||||
enum kbase_gpu_state { GPU_STATE_INTACT = 0, GPU_STATE_LOST, GPU_STATE_IN_RESET };
|
||||
|
||||
/**
|
||||
* enum kbasep_pm_action - Actions that can be performed on a core.
|
||||
*
|
||||
|
|
@ -110,7 +123,15 @@ bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev)
|
|||
if (kbdev->pm.backend.l2_force_off_after_mcu_halt)
|
||||
return false;
|
||||
|
||||
if (kbdev->csf.scheduler.pm_active_count && kbdev->pm.backend.mcu_desired)
|
||||
/* Check if policy changing transition needs MCU to be off. */
|
||||
if (unlikely(kbdev->pm.backend.policy_change_clamp_state_to_off))
|
||||
return false;
|
||||
|
||||
if (kbdev->pm.backend.mcu_desired)
|
||||
return true;
|
||||
|
||||
/* For always_on policy, the MCU needs to be kept on */
|
||||
if (kbase_pm_no_mcu_core_pwroff(kbdev))
|
||||
return true;
|
||||
|
||||
#ifdef KBASE_PM_RUNTIME
|
||||
|
|
@ -119,13 +140,7 @@ bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev)
|
|||
return true;
|
||||
#endif
|
||||
|
||||
/* MCU is supposed to be ON, only when scheduler.pm_active_count is
|
||||
* non zero. But for always_on policy, the MCU needs to be kept on,
|
||||
* unless policy changing transition needs it off.
|
||||
*/
|
||||
|
||||
return (kbdev->pm.backend.mcu_desired && kbase_pm_no_mcu_core_pwroff(kbdev) &&
|
||||
!kbdev->pm.backend.policy_change_clamp_state_to_off);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -979,8 +994,8 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
|||
kbase_hwcnt_backend_csf_set_hw_availability(
|
||||
&kbdev->hwcnt_gpu_iface,
|
||||
kbdev->gpu_props.curr_config.l2_slices,
|
||||
kbdev->gpu_props.curr_config.shader_present &
|
||||
kbdev->pm.debug_core_mask);
|
||||
kbdev->gpu_props.curr_config.shader_present,
|
||||
kbdev->pm.debug_core_mask);
|
||||
kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
backend->hwcnt_disabled = false;
|
||||
|
|
@ -1342,6 +1357,8 @@ static void kbase_pm_l2_clear_backend_slot_submit_kctx(struct kbase_device *kbde
|
|||
|
||||
static bool can_power_down_l2(struct kbase_device *kbdev)
|
||||
{
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
|
||||
/* Defer the power-down if MMU is in process of page migration. */
|
||||
return !kbdev->mmu_page_migrate_in_progress;
|
||||
}
|
||||
|
|
@ -2797,7 +2814,7 @@ static void update_user_reg_page_mapping(struct kbase_device *kbdev)
|
|||
void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
||||
{
|
||||
struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
|
||||
bool reset_required = is_resume;
|
||||
int ret = is_resume;
|
||||
unsigned long flags;
|
||||
|
||||
KBASE_DEBUG_ASSERT(kbdev != NULL);
|
||||
|
|
@ -2836,7 +2853,7 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
|||
backend->callback_power_resume(kbdev);
|
||||
return;
|
||||
} else if (backend->callback_power_on) {
|
||||
reset_required = backend->callback_power_on(kbdev);
|
||||
ret = backend->callback_power_on(kbdev);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
|
|
@ -2849,7 +2866,12 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
|||
#endif
|
||||
|
||||
|
||||
if (reset_required) {
|
||||
if (ret == GPU_STATE_IN_RESET) {
|
||||
/* GPU is already in reset state after power on and no
|
||||
* soft-reset needed. Just reconfiguration is needed.
|
||||
*/
|
||||
kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS | PM_NO_RESET);
|
||||
} else if (ret == GPU_STATE_LOST) {
|
||||
/* GPU state was lost, reset GPU to ensure it is in a
|
||||
* consistent state
|
||||
*/
|
||||
|
|
@ -2898,7 +2920,7 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
|||
backend->l2_desired = true;
|
||||
#if MALI_USE_CSF
|
||||
{
|
||||
if (reset_required) {
|
||||
if (ret != GPU_STATE_INTACT) {
|
||||
/* GPU reset was done after the power on, so send the post
|
||||
* reset event instead. This is okay as GPU power off event
|
||||
* is same as pre GPU reset event.
|
||||
|
|
@ -3139,6 +3161,7 @@ static int kbase_set_tiler_quirks(struct kbase_device *kbdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
|
||||
{
|
||||
struct device_node *np = kbdev->dev->of_node;
|
||||
|
|
@ -3191,6 +3214,7 @@ static int kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
|
|||
error = kbase_set_mmu_quirks(kbdev);
|
||||
}
|
||||
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
@ -3210,6 +3234,7 @@ static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
|
|||
#else
|
||||
kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(JM_CONFIG), kbdev->hw_quirks_gpu);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
|
||||
|
|
@ -3257,16 +3282,10 @@ static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
||||
static int kbase_pm_do_reset_soft(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbasep_reset_timeout_data rtdata;
|
||||
u32 reg_offset, reg_val;
|
||||
int ret;
|
||||
|
||||
KBASE_KTRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, 0);
|
||||
|
||||
KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev, kbdev);
|
||||
|
||||
if (kbdev->pm.backend.callback_soft_reset) {
|
||||
ret = kbdev->pm.backend.callback_soft_reset(kbdev);
|
||||
if (ret < 0)
|
||||
|
|
@ -3279,12 +3298,30 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
|||
GPU_COMMAND_SOFT_RESET);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
reg_offset = GPU_CONTROL_ENUM(GPU_IRQ_MASK);
|
||||
reg_val = RESET_COMPLETED;
|
||||
static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbasep_reset_timeout_data rtdata;
|
||||
u32 reg_offset, reg_val;
|
||||
int ret;
|
||||
|
||||
/* Unmask the reset complete interrupt only */
|
||||
kbase_reg_write32(kbdev, reg_offset, reg_val);
|
||||
KBASE_KTRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, 0);
|
||||
|
||||
KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev, kbdev);
|
||||
|
||||
{
|
||||
ret = kbase_pm_do_reset_soft(kbdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg_offset = GPU_CONTROL_ENUM(GPU_IRQ_MASK);
|
||||
reg_val = RESET_COMPLETED;
|
||||
|
||||
/* Unmask the reset complete interrupt only */
|
||||
kbase_reg_write32(kbdev, reg_offset, reg_val);
|
||||
}
|
||||
|
||||
/* Initialize a structure for tracking the status of the reset */
|
||||
rtdata.kbdev = kbdev;
|
||||
|
|
@ -3335,7 +3372,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
|||
*/
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (!kbdev->arb.arb_if) {
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
#endif
|
||||
dev_err(kbdev->dev,
|
||||
"Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
|
||||
RESET_TIMEOUT);
|
||||
|
|
@ -3367,7 +3404,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
|||
RESET_TIMEOUT);
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
}
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -3418,9 +3455,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
|
|||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
|
||||
|
||||
/* Soft reset the GPU */
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (!(flags & PM_NO_RESET))
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
err = kbdev->protected_ops->protected_mode_disable(kbdev->protected_dev);
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
|
||||
|
|
@ -3441,7 +3476,6 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
|
|||
if (err)
|
||||
goto exit;
|
||||
|
||||
|
||||
if (flags & PM_HW_ISSUES_DETECT) {
|
||||
err = kbase_pm_hw_issues_detect(kbdev);
|
||||
if (err)
|
||||
|
|
@ -3451,6 +3485,9 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
|
|||
kbase_pm_hw_issues_apply(kbdev);
|
||||
kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
|
||||
kbase_amba_set_shareable_cache_support(kbdev);
|
||||
#if MALI_USE_CSF
|
||||
kbase_backend_update_gpu_timestamp_offset(kbdev);
|
||||
#endif
|
||||
|
||||
/* Sanity check protected mode was left after reset */
|
||||
WARN_ON(kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_STATUS)) &
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -127,7 +127,7 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
|
|||
pm->backend.poweroff_wait_in_progress = false;
|
||||
pm->backend.l2_desired = true;
|
||||
#if MALI_USE_CSF
|
||||
pm->backend.mcu_desired = true;
|
||||
pm->backend.mcu_desired = pm->backend.mcu_poweron_required;
|
||||
#endif
|
||||
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -30,10 +30,7 @@
|
|||
#include <mali_kbase_config_defaults.h>
|
||||
#include <linux/version_compat_defs.h>
|
||||
#include <asm/arch_timer.h>
|
||||
|
||||
#if !IS_ENABLED(CONFIG_MALI_REAL_HW)
|
||||
#include <backend/gpu/mali_kbase_model_linux.h>
|
||||
#endif
|
||||
#include <linux/mali_hw_access.h>
|
||||
|
||||
struct kbase_timeout_info {
|
||||
char *selector_str;
|
||||
|
|
@ -41,12 +38,16 @@ struct kbase_timeout_info {
|
|||
};
|
||||
|
||||
#if MALI_USE_CSF
|
||||
|
||||
#define GPU_TIMESTAMP_OFFSET_INVALID S64_MAX
|
||||
|
||||
static struct kbase_timeout_info timeout_info[KBASE_TIMEOUT_SELECTOR_COUNT] = {
|
||||
[CSF_FIRMWARE_TIMEOUT] = { "CSF_FIRMWARE_TIMEOUT", MIN(CSF_FIRMWARE_TIMEOUT_CYCLES,
|
||||
CSF_FIRMWARE_PING_TIMEOUT_CYCLES) },
|
||||
[CSF_PM_TIMEOUT] = { "CSF_PM_TIMEOUT", CSF_PM_TIMEOUT_CYCLES },
|
||||
[CSF_GPU_RESET_TIMEOUT] = { "CSF_GPU_RESET_TIMEOUT", CSF_GPU_RESET_TIMEOUT_CYCLES },
|
||||
[CSF_CSG_SUSPEND_TIMEOUT] = { "CSF_CSG_SUSPEND_TIMEOUT", CSF_CSG_SUSPEND_TIMEOUT_CYCLES },
|
||||
[CSF_CSG_TERM_TIMEOUT] = { "CSF_CSG_TERM_TIMEOUT", CSF_CSG_TERM_TIMEOUT_CYCLES },
|
||||
[CSF_FIRMWARE_BOOT_TIMEOUT] = { "CSF_FIRMWARE_BOOT_TIMEOUT",
|
||||
CSF_FIRMWARE_BOOT_TIMEOUT_CYCLES },
|
||||
[CSF_FIRMWARE_PING_TIMEOUT] = { "CSF_FIRMWARE_PING_TIMEOUT",
|
||||
|
|
@ -82,6 +83,68 @@ static struct kbase_timeout_info timeout_info[KBASE_TIMEOUT_SELECTOR_COUNT] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
#if MALI_USE_CSF
|
||||
void kbase_backend_invalidate_gpu_timestamp_offset(struct kbase_device *kbdev)
|
||||
{
|
||||
kbdev->backend_time.gpu_timestamp_offset = GPU_TIMESTAMP_OFFSET_INVALID;
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_backend_invalidate_gpu_timestamp_offset);
|
||||
|
||||
/**
|
||||
* kbase_backend_compute_gpu_ts_offset() - Compute GPU TS offset.
|
||||
*
|
||||
* @kbdev: Kbase device.
|
||||
*
|
||||
* This function compute the value of GPU and CPU TS offset:
|
||||
* - set to zero current TIMESTAMP_OFFSET register
|
||||
* - read CPU TS and convert it to ticks
|
||||
* - read GPU TS
|
||||
* - calculate diff between CPU and GPU ticks
|
||||
* - cache the diff as the GPU TS offset
|
||||
*
|
||||
* To reduce delays, preemption must be disabled during reads of both CPU and GPU TS
|
||||
* this function require access to GPU register to be enabled
|
||||
*/
|
||||
static inline void kbase_backend_compute_gpu_ts_offset(struct kbase_device *kbdev)
|
||||
{
|
||||
s64 cpu_ts_ticks = 0;
|
||||
s64 gpu_ts_ticks = 0;
|
||||
|
||||
if (kbdev->backend_time.gpu_timestamp_offset != GPU_TIMESTAMP_OFFSET_INVALID)
|
||||
return;
|
||||
|
||||
kbase_reg_write64(kbdev, GPU_CONTROL_ENUM(TIMESTAMP_OFFSET), 0);
|
||||
|
||||
gpu_ts_ticks = kbase_reg_read64_coherent(kbdev, GPU_CONTROL_ENUM(TIMESTAMP));
|
||||
cpu_ts_ticks = ktime_get_raw_ns();
|
||||
cpu_ts_ticks = div64_u64(cpu_ts_ticks * kbdev->backend_time.divisor,
|
||||
kbdev->backend_time.multiplier);
|
||||
kbdev->backend_time.gpu_timestamp_offset = cpu_ts_ticks - gpu_ts_ticks;
|
||||
}
|
||||
|
||||
void kbase_backend_update_gpu_timestamp_offset(struct kbase_device *kbdev)
|
||||
{
|
||||
lockdep_assert_held(&kbdev->pm.lock);
|
||||
|
||||
kbase_backend_compute_gpu_ts_offset(kbdev);
|
||||
|
||||
dev_dbg(kbdev->dev, "Setting GPU timestamp offset register to %lld (%lld ns)",
|
||||
kbdev->backend_time.gpu_timestamp_offset,
|
||||
div64_s64(kbdev->backend_time.gpu_timestamp_offset *
|
||||
(s64)kbdev->backend_time.multiplier,
|
||||
(s64)kbdev->backend_time.divisor));
|
||||
kbase_reg_write64(kbdev, GPU_CONTROL_ENUM(TIMESTAMP_OFFSET),
|
||||
kbdev->backend_time.gpu_timestamp_offset);
|
||||
}
|
||||
#if MALI_UNIT_TEST
|
||||
u64 kbase_backend_read_gpu_timestamp_offset_reg(struct kbase_device *kbdev)
|
||||
{
|
||||
return kbase_reg_read64_coherent(kbdev, GPU_CONTROL_ENUM(TIMESTAMP_OFFSET));
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_backend_read_gpu_timestamp_offset_reg);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void kbase_backend_get_gpu_time_norequest(struct kbase_device *kbdev, u64 *cycle_counter,
|
||||
u64 *system_time, struct timespec64 *ts)
|
||||
{
|
||||
|
|
@ -100,6 +163,7 @@ void kbase_backend_get_gpu_time_norequest(struct kbase_device *kbdev, u64 *cycle
|
|||
ktime_get_raw_ts64(ts);
|
||||
#endif
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_backend_get_gpu_time_norequest);
|
||||
|
||||
#if !MALI_USE_CSF
|
||||
/**
|
||||
|
|
@ -143,6 +207,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
|
|||
kbase_pm_release_gpu_cycle_counter(kbdev);
|
||||
#endif
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_backend_get_gpu_time);
|
||||
|
||||
static u64 kbase_device_get_scaling_frequency(struct kbase_device *kbdev)
|
||||
{
|
||||
|
|
@ -282,36 +347,14 @@ u64 __maybe_unused kbase_backend_time_convert_gpu_to_cpu(struct kbase_device *kb
|
|||
if (WARN_ON(!kbdev))
|
||||
return 0;
|
||||
|
||||
return div64_u64(gpu_ts * kbdev->backend_time.multiplier, kbdev->backend_time.divisor) +
|
||||
kbdev->backend_time.offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_cpu_gpu_time() - Get current CPU and GPU timestamps.
|
||||
*
|
||||
* @kbdev: Kbase device.
|
||||
* @cpu_ts: Output CPU timestamp.
|
||||
* @gpu_ts: Output GPU timestamp.
|
||||
* @gpu_cycle: Output GPU cycle counts.
|
||||
*/
|
||||
static void get_cpu_gpu_time(struct kbase_device *kbdev, u64 *cpu_ts, u64 *gpu_ts, u64 *gpu_cycle)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
kbase_backend_get_gpu_time(kbdev, gpu_cycle, gpu_ts, &ts);
|
||||
|
||||
if (cpu_ts)
|
||||
*cpu_ts = (u64)(ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
|
||||
return div64_u64(gpu_ts * kbdev->backend_time.multiplier, kbdev->backend_time.divisor);
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_backend_time_convert_gpu_to_cpu);
|
||||
#endif
|
||||
|
||||
u64 kbase_arch_timer_get_cntfrq(struct kbase_device *kbdev)
|
||||
{
|
||||
u64 freq = arch_timer_get_cntfrq();
|
||||
|
||||
#if !IS_ENABLED(CONFIG_MALI_REAL_HW)
|
||||
freq = midgard_model_arch_timer_get_cntfrq(kbdev->model);
|
||||
#endif
|
||||
u64 freq = mali_arch_timer_get_cntfrq();
|
||||
|
||||
dev_dbg(kbdev->dev, "System Timer Freq = %lluHz", freq);
|
||||
|
||||
|
|
@ -322,13 +365,10 @@ int kbase_backend_time_init(struct kbase_device *kbdev)
|
|||
{
|
||||
int err = 0;
|
||||
#if MALI_USE_CSF
|
||||
u64 cpu_ts = 0;
|
||||
u64 gpu_ts = 0;
|
||||
u64 freq;
|
||||
u64 common_factor;
|
||||
|
||||
kbase_pm_register_access_enable(kbdev);
|
||||
get_cpu_gpu_time(kbdev, &cpu_ts, &gpu_ts, NULL);
|
||||
freq = kbase_arch_timer_get_cntfrq(kbdev);
|
||||
|
||||
if (!freq) {
|
||||
|
|
@ -348,9 +388,8 @@ int kbase_backend_time_init(struct kbase_device *kbdev)
|
|||
goto disable_registers;
|
||||
}
|
||||
|
||||
kbdev->backend_time.offset =
|
||||
(s64)(cpu_ts - div64_u64(gpu_ts * kbdev->backend_time.multiplier,
|
||||
kbdev->backend_time.divisor));
|
||||
kbase_backend_invalidate_gpu_timestamp_offset(
|
||||
kbdev); /* force computation of GPU Timestamp offset */
|
||||
#endif
|
||||
|
||||
if (kbase_timeout_scaling_init(kbdev)) {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2017-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -71,18 +71,6 @@ bob_defaults {
|
|||
mali_real_hw: {
|
||||
kbuild_options: ["CONFIG_MALI_REAL_HW=y"],
|
||||
},
|
||||
mali_error_inject_none: {
|
||||
kbuild_options: ["CONFIG_MALI_ERROR_INJECT_NONE=y"],
|
||||
},
|
||||
mali_error_inject_track_list: {
|
||||
kbuild_options: ["CONFIG_MALI_ERROR_INJECT_TRACK_LIST=y"],
|
||||
},
|
||||
mali_error_inject_random: {
|
||||
kbuild_options: ["CONFIG_MALI_ERROR_INJECT_RANDOM=y"],
|
||||
},
|
||||
mali_error_inject: {
|
||||
kbuild_options: ["CONFIG_MALI_BIFROST_ERROR_INJECT=y"],
|
||||
},
|
||||
mali_debug: {
|
||||
kbuild_options: [
|
||||
"CONFIG_MALI_BIFROST_DEBUG=y",
|
||||
|
|
@ -239,6 +227,7 @@ bob_kernel_module {
|
|||
"jm/*.h",
|
||||
"tl/backend/*_jm.c",
|
||||
"mmu/backend/*_jm.c",
|
||||
"mmu/backend/*_jm.h",
|
||||
"ipa/backend/*_jm.c",
|
||||
"ipa/backend/*_jm.h",
|
||||
],
|
||||
|
|
@ -263,6 +252,7 @@ bob_kernel_module {
|
|||
"hwcnt/backend/*_csf_*.h",
|
||||
"tl/backend/*_csf.c",
|
||||
"mmu/backend/*_csf.c",
|
||||
"mmu/backend/*_csf.h",
|
||||
"ipa/backend/*_csf.c",
|
||||
"ipa/backend/*_csf.h",
|
||||
],
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -116,8 +116,7 @@ static void kbase_context_term_partial(struct kbase_context *kctx, unsigned int
|
|||
|
||||
struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
|
||||
base_context_create_flags const flags,
|
||||
unsigned long const api_version,
|
||||
struct kbase_file *const kfile)
|
||||
unsigned long const api_version, struct file *const filp)
|
||||
{
|
||||
struct kbase_context *kctx;
|
||||
unsigned int i = 0;
|
||||
|
|
@ -136,7 +135,7 @@ struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_c
|
|||
|
||||
kctx->kbdev = kbdev;
|
||||
kctx->api_version = api_version;
|
||||
kctx->kfile = kfile;
|
||||
kctx->filp = filp;
|
||||
kctx->create_flags = flags;
|
||||
|
||||
memcpy(kctx->comm, current->comm, sizeof(current->comm));
|
||||
|
|
@ -187,11 +186,17 @@ void kbase_destroy_context(struct kbase_context *kctx)
|
|||
* Customer side that a hang could occur if context termination is
|
||||
* not blocked until the resume of GPU device.
|
||||
*/
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
atomic_inc(&kbdev->pm.gpu_users_waiting);
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
while (kbase_pm_context_active_handle_suspend(kbdev,
|
||||
KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
|
||||
dev_info(kbdev->dev, "Suspend in progress when destroying context");
|
||||
dev_dbg(kbdev->dev, "Suspend in progress when destroying context");
|
||||
wait_event(kbdev->pm.resume_wait, !kbase_pm_is_suspending(kbdev));
|
||||
}
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
atomic_dec(&kbdev->pm.gpu_users_waiting);
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
|
||||
/* Have synchronized against the System suspend and incremented the
|
||||
* pm.active_count. So any subsequent invocation of System suspend
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -168,8 +168,7 @@ static void kbase_context_term_partial(struct kbase_context *kctx, unsigned int
|
|||
|
||||
struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
|
||||
base_context_create_flags const flags,
|
||||
unsigned long const api_version,
|
||||
struct kbase_file *const kfile)
|
||||
unsigned long const api_version, struct file *const filp)
|
||||
{
|
||||
struct kbase_context *kctx;
|
||||
unsigned int i = 0;
|
||||
|
|
@ -188,7 +187,7 @@ struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_c
|
|||
|
||||
kctx->kbdev = kbdev;
|
||||
kctx->api_version = api_version;
|
||||
kctx->kfile = kfile;
|
||||
kctx->filp = filp;
|
||||
kctx->create_flags = flags;
|
||||
|
||||
if (is_compat)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -141,7 +141,7 @@ int kbase_context_common_init(struct kbase_context *kctx)
|
|||
kctx->pid = task_pid_vnr(current);
|
||||
|
||||
/* Check if this is a Userspace created context */
|
||||
if (likely(kctx->kfile)) {
|
||||
if (likely(kctx->filp)) {
|
||||
struct pid *pid_struct;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
@ -184,6 +184,8 @@ int kbase_context_common_init(struct kbase_context *kctx)
|
|||
spin_lock_init(&kctx->waiting_soft_jobs_lock);
|
||||
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
|
||||
|
||||
init_waitqueue_head(&kctx->event_queue);
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
|
@ -195,7 +197,7 @@ int kbase_context_common_init(struct kbase_context *kctx)
|
|||
mutex_unlock(&kctx->kbdev->kctx_list_lock);
|
||||
if (err) {
|
||||
dev_err(kctx->kbdev->dev, "(err:%d) failed to insert kctx to kbase_process", err);
|
||||
if (likely(kctx->kfile)) {
|
||||
if (likely(kctx->filp)) {
|
||||
mmdrop(kctx->process_mm);
|
||||
put_task_struct(kctx->task);
|
||||
}
|
||||
|
|
@ -284,7 +286,7 @@ void kbase_context_common_term(struct kbase_context *kctx)
|
|||
kbase_remove_kctx_from_process(kctx);
|
||||
mutex_unlock(&kctx->kbdev->kctx_list_lock);
|
||||
|
||||
if (likely(kctx->kfile)) {
|
||||
if (likely(kctx->filp)) {
|
||||
mmdrop(kctx->process_mm);
|
||||
put_task_struct(kctx->task);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2011-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2011-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -56,9 +56,9 @@ void kbase_context_debugfs_term(struct kbase_context *const kctx);
|
|||
* BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
|
||||
* @api_version: Application program interface version, as encoded in
|
||||
* a single integer by the KBASE_API_VERSION macro.
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device
|
||||
* file instance. Shall be passed as NULL for internally created
|
||||
* contexts.
|
||||
* @filp: Pointer to the struct file corresponding to device file
|
||||
* /dev/malixx instance, passed to the file's open method.
|
||||
* Shall be passed as NULL for internally created contexts.
|
||||
*
|
||||
* Up to one context can be created for each client that opens the device file
|
||||
* /dev/malixx. Context creation is deferred until a special ioctl() system call
|
||||
|
|
@ -68,8 +68,7 @@ void kbase_context_debugfs_term(struct kbase_context *const kctx);
|
|||
*/
|
||||
struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
|
||||
base_context_create_flags const flags,
|
||||
unsigned long api_version,
|
||||
struct kbase_file *const kfile);
|
||||
unsigned long api_version, struct file *filp);
|
||||
|
||||
/**
|
||||
* kbase_destroy_context - Destroy a kernel base context.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -539,6 +539,8 @@ static int csf_queue_register_internal(struct kbase_context *kctx,
|
|||
|
||||
queue->blocked_reason = CS_STATUS_BLOCKED_REASON_REASON_UNBLOCKED;
|
||||
|
||||
queue->clear_faults = true;
|
||||
|
||||
INIT_LIST_HEAD(&queue->link);
|
||||
atomic_set(&queue->pending_kick, 0);
|
||||
INIT_LIST_HEAD(&queue->pending_kick_link);
|
||||
|
|
@ -589,11 +591,19 @@ int kbase_csf_queue_register_ex(struct kbase_context *kctx,
|
|||
u32 const glb_version = iface->version;
|
||||
u32 instr = iface->instr_features;
|
||||
u8 max_size = GLB_INSTR_FEATURES_EVENT_SIZE_MAX_GET(instr);
|
||||
u32 min_buf_size =
|
||||
(1u << reg->ex_event_size) * GLB_INSTR_FEATURES_OFFSET_UPDATE_RATE_GET(instr);
|
||||
const u8 event_size = reg->ex_event_size;
|
||||
u64 min_buf_size;
|
||||
|
||||
/* If cs_trace_command not supported, the call fails */
|
||||
if (glb_version < kbase_csf_interface_version(1, 1, 0))
|
||||
return -EPERM;
|
||||
|
||||
/* Sanity check to avoid shift-out-of-bounds */
|
||||
if (event_size >= 32)
|
||||
return -EINVAL;
|
||||
|
||||
min_buf_size = ((u64)1 << event_size) * GLB_INSTR_FEATURES_OFFSET_UPDATE_RATE_GET(instr);
|
||||
if (min_buf_size > UINT32_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* Validate the ring buffer configuration parameters */
|
||||
|
|
@ -605,8 +615,8 @@ int kbase_csf_queue_register_ex(struct kbase_context *kctx,
|
|||
|
||||
/* Validate the cs_trace configuration parameters */
|
||||
if (reg->ex_buffer_size &&
|
||||
((reg->ex_event_size > max_size) || (reg->ex_buffer_size & (reg->ex_buffer_size - 1)) ||
|
||||
(reg->ex_buffer_size < min_buf_size)))
|
||||
((event_size > max_size) || (reg->ex_buffer_size & (reg->ex_buffer_size - 1)) ||
|
||||
(reg->ex_buffer_size < (u32)min_buf_size)))
|
||||
return -EINVAL;
|
||||
|
||||
return csf_queue_register_internal(kctx, NULL, reg);
|
||||
|
|
@ -734,7 +744,7 @@ out:
|
|||
}
|
||||
|
||||
/**
|
||||
* get_bound_queue_group - Get the group to which a queue was bound
|
||||
* get_bound_queue_group() - Get the group to which a queue was bound
|
||||
*
|
||||
* @queue: Pointer to the queue for this group
|
||||
*
|
||||
|
|
@ -847,6 +857,47 @@ void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev, int csi_index
|
|||
kbase_csf_ring_csg_doorbell(kbdev, csg_nr);
|
||||
}
|
||||
|
||||
int kbase_csf_queue_group_clear_faults(struct kbase_context *kctx,
|
||||
struct kbase_ioctl_queue_group_clear_faults *faults)
|
||||
{
|
||||
void __user *user_bufs = u64_to_user_ptr(faults->addr);
|
||||
u32 i;
|
||||
struct kbase_device *kbdev = kctx->kbdev;
|
||||
const u32 nr_queues = faults->nr_queues;
|
||||
|
||||
if (unlikely(nr_queues > kbdev->csf.global_iface.groups[0].stream_num)) {
|
||||
dev_warn(kbdev->dev, "Invalid nr_queues %u", nr_queues);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_queues; ++i) {
|
||||
u64 buf_gpu_addr;
|
||||
struct kbase_va_region *region;
|
||||
|
||||
if (copy_from_user(&buf_gpu_addr, user_bufs, sizeof(buf_gpu_addr)))
|
||||
return -EFAULT;
|
||||
mutex_lock(&kctx->csf.lock);
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
region = kbase_region_tracker_find_region_enclosing_address(kctx, buf_gpu_addr);
|
||||
if (likely(!kbase_is_region_invalid_or_free(region))) {
|
||||
struct kbase_queue *queue = region->user_data;
|
||||
|
||||
queue->clear_faults = true;
|
||||
} else {
|
||||
dev_warn(kbdev->dev, "GPU queue %u without a valid command buffer region",
|
||||
i);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
mutex_unlock(&kctx->csf.lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
mutex_unlock(&kctx->csf.lock);
|
||||
user_bufs = (void __user *)((uintptr_t)user_bufs + sizeof(buf_gpu_addr));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kbase_csf_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_kick *kick)
|
||||
{
|
||||
struct kbase_device *kbdev = kctx->kbdev;
|
||||
|
|
@ -868,7 +919,7 @@ int kbase_csf_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue
|
|||
struct kbase_queue *queue = region->user_data;
|
||||
|
||||
if (queue && (queue->bind_state == KBASE_CSF_QUEUE_BOUND)) {
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
if (list_empty(&queue->pending_kick_link)) {
|
||||
/* Queue termination shall block until this
|
||||
* kick has been handled.
|
||||
|
|
@ -876,10 +927,12 @@ int kbase_csf_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue
|
|||
atomic_inc(&queue->pending_kick);
|
||||
list_add_tail(
|
||||
&queue->pending_kick_link,
|
||||
&kbdev->csf.pending_gpuq_kicks[queue->group_priority]);
|
||||
complete(&kbdev->csf.scheduler.kthread_signal);
|
||||
&kbdev->csf.pending_gpuq_kick_queues[queue->group_priority]);
|
||||
if (atomic_cmpxchg(&kbdev->csf.pending_gpuq_kicks, false, true) ==
|
||||
false)
|
||||
complete(&kbdev->csf.scheduler.kthread_signal);
|
||||
}
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
}
|
||||
} else {
|
||||
dev_dbg(kbdev->dev,
|
||||
|
|
@ -1095,12 +1148,11 @@ static int create_normal_suspend_buffer(struct kbase_context *const kctx,
|
|||
}
|
||||
|
||||
static void timer_event_worker(struct work_struct *data);
|
||||
static void protm_event_worker(struct work_struct *data);
|
||||
static void term_normal_suspend_buffer(struct kbase_context *const kctx,
|
||||
struct kbase_normal_suspend_buffer *s_buf);
|
||||
|
||||
/**
|
||||
* create_suspend_buffers - Setup normal and protected mode
|
||||
* create_suspend_buffers() - Setup normal and protected mode
|
||||
* suspend buffers.
|
||||
*
|
||||
* @kctx: Address of the kbase context within which the queue group
|
||||
|
|
@ -1199,6 +1251,8 @@ static int create_queue_group(struct kbase_context *const kctx,
|
|||
group->deschedule_deferred_cnt = 0;
|
||||
#endif
|
||||
|
||||
group->cs_fault_report_enable = create->in.cs_fault_report_enable;
|
||||
|
||||
group->group_uid = generate_group_uid();
|
||||
create->out.group_uid = group->group_uid;
|
||||
|
||||
|
|
@ -1206,7 +1260,8 @@ static int create_queue_group(struct kbase_context *const kctx,
|
|||
INIT_LIST_HEAD(&group->link_to_schedule);
|
||||
INIT_LIST_HEAD(&group->error_fatal.link);
|
||||
INIT_WORK(&group->timer_event_work, timer_event_worker);
|
||||
INIT_WORK(&group->protm_event_work, protm_event_worker);
|
||||
INIT_LIST_HEAD(&group->protm_event_work);
|
||||
atomic_set(&group->pending_protm_event_work, 0);
|
||||
bitmap_zero(group->protm_pending_bitmap, MAX_SUPPORTED_STREAMS_PER_GROUP);
|
||||
|
||||
group->run_state = KBASE_CSF_GROUP_INACTIVE;
|
||||
|
|
@ -1254,10 +1309,8 @@ int kbase_csf_queue_group_create(struct kbase_context *const kctx,
|
|||
size_t i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(create->in.padding); i++) {
|
||||
if (create->in.padding[i] != 0) {
|
||||
dev_warn(kctx->kbdev->dev, "Invalid padding not 0 in queue group create\n");
|
||||
if (create->in.padding[i] != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&kctx->csf.lock);
|
||||
|
|
@ -1379,7 +1432,7 @@ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group)
|
|||
}
|
||||
|
||||
/**
|
||||
* term_queue_group - Terminate a GPU command queue group.
|
||||
* term_queue_group() - Terminate a GPU command queue group.
|
||||
*
|
||||
* @group: Pointer to GPU command queue group data.
|
||||
*
|
||||
|
|
@ -1407,8 +1460,8 @@ static void term_queue_group(struct kbase_queue_group *group)
|
|||
}
|
||||
|
||||
/**
|
||||
* wait_group_deferred_deschedule_completion - Wait for refcount of the group to
|
||||
* become 0 that was taken when the group deschedule had to be deferred.
|
||||
* wait_group_deferred_deschedule_completion() - Wait for refcount of the group
|
||||
* to become 0 that was taken when the group deschedule had to be deferred.
|
||||
*
|
||||
* @group: Pointer to GPU command queue group that is being deleted.
|
||||
*
|
||||
|
|
@ -1437,7 +1490,10 @@ static void wait_group_deferred_deschedule_completion(struct kbase_queue_group *
|
|||
static void cancel_queue_group_events(struct kbase_queue_group *group)
|
||||
{
|
||||
cancel_work_sync(&group->timer_event_work);
|
||||
cancel_work_sync(&group->protm_event_work);
|
||||
|
||||
/* Drain a pending protected mode request if any */
|
||||
kbase_csf_scheduler_wait_for_kthread_pending_work(group->kctx->kbdev,
|
||||
&group->pending_protm_event_work);
|
||||
}
|
||||
|
||||
static void remove_pending_group_fatal_error(struct kbase_queue_group *group)
|
||||
|
|
@ -1592,6 +1648,7 @@ int kbase_csf_ctx_init(struct kbase_context *kctx)
|
|||
|
||||
INIT_LIST_HEAD(&kctx->csf.queue_list);
|
||||
INIT_LIST_HEAD(&kctx->csf.link);
|
||||
atomic_set(&kctx->csf.pending_sync_update, 0);
|
||||
|
||||
kbase_csf_event_init(kctx);
|
||||
|
||||
|
|
@ -1827,7 +1884,7 @@ void kbase_csf_ctx_term(struct kbase_context *kctx)
|
|||
}
|
||||
|
||||
/**
|
||||
* handle_oom_event - Handle the OoM event generated by the firmware for the
|
||||
* handle_oom_event() - Handle the OoM event generated by the firmware for the
|
||||
* CSI.
|
||||
*
|
||||
* @group: Pointer to the CSG group the oom-event belongs to.
|
||||
|
|
@ -1902,7 +1959,7 @@ static int handle_oom_event(struct kbase_queue_group *const group,
|
|||
}
|
||||
|
||||
/**
|
||||
* report_tiler_oom_error - Report a CSG error due to a tiler heap OOM event
|
||||
* report_tiler_oom_error() - Report a CSG error due to a tiler heap OOM event
|
||||
*
|
||||
* @group: Pointer to the GPU command queue group that encountered the error
|
||||
*/
|
||||
|
|
@ -1945,7 +2002,7 @@ static void flush_gpu_cache_on_fatal_error(struct kbase_device *kbdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* kbase_queue_oom_event - Handle tiler out-of-memory for a GPU command queue.
|
||||
* kbase_queue_oom_event() - Handle tiler out-of-memory for a GPU command queue.
|
||||
*
|
||||
* @queue: Pointer to queue for which out-of-memory event was received.
|
||||
*
|
||||
|
|
@ -2033,7 +2090,7 @@ unlock:
|
|||
}
|
||||
|
||||
/**
|
||||
* oom_event_worker - Tiler out-of-memory handler called from a workqueue.
|
||||
* oom_event_worker() - Tiler out-of-memory handler called from a workqueue.
|
||||
*
|
||||
* @data: Pointer to a work_struct embedded in GPU command queue data.
|
||||
*
|
||||
|
|
@ -2061,7 +2118,8 @@ static void oom_event_worker(struct work_struct *data)
|
|||
}
|
||||
|
||||
/**
|
||||
* report_group_timeout_error - Report the timeout error for the group to userspace.
|
||||
* report_group_timeout_error() - Report the timeout error for the group to
|
||||
* userspace.
|
||||
*
|
||||
* @group: Pointer to the group for which timeout error occurred
|
||||
*/
|
||||
|
|
@ -2085,7 +2143,7 @@ static void report_group_timeout_error(struct kbase_queue_group *const group)
|
|||
}
|
||||
|
||||
/**
|
||||
* timer_event_worker - Handle the progress timeout error for the group
|
||||
* timer_event_worker() - Handle the progress timeout error for the group
|
||||
*
|
||||
* @data: Pointer to a work_struct embedded in GPU command queue group data.
|
||||
*
|
||||
|
|
@ -2120,7 +2178,7 @@ static void timer_event_worker(struct work_struct *data)
|
|||
}
|
||||
|
||||
/**
|
||||
* handle_progress_timer_event - Progress timer timeout event handler.
|
||||
* handle_progress_timer_event() - Progress timer timeout event handler.
|
||||
*
|
||||
* @group: Pointer to GPU queue group for which the timeout event is received.
|
||||
*
|
||||
|
|
@ -2211,41 +2269,7 @@ static void report_group_fatal_error(struct kbase_queue_group *const group)
|
|||
}
|
||||
|
||||
/**
|
||||
* protm_event_worker - Protected mode switch request event handler
|
||||
* called from a workqueue.
|
||||
*
|
||||
* @data: Pointer to a work_struct embedded in GPU command queue group data.
|
||||
*
|
||||
* Request to switch to protected mode.
|
||||
*/
|
||||
static void protm_event_worker(struct work_struct *data)
|
||||
{
|
||||
struct kbase_queue_group *const group =
|
||||
container_of(data, struct kbase_queue_group, protm_event_work);
|
||||
struct kbase_protected_suspend_buffer *sbuf = &group->protected_suspend_buf;
|
||||
int err = 0;
|
||||
|
||||
KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_START, group, 0u);
|
||||
|
||||
err = alloc_grp_protected_suspend_buffer_pages(group);
|
||||
if (!err) {
|
||||
kbase_csf_scheduler_group_protm_enter(group);
|
||||
} else if (err == -ENOMEM && sbuf->alloc_retries <= PROTM_ALLOC_MAX_RETRIES) {
|
||||
sbuf->alloc_retries++;
|
||||
/* try again to allocate pages */
|
||||
queue_work(group->kctx->csf.wq, &group->protm_event_work);
|
||||
} else if (sbuf->alloc_retries >= PROTM_ALLOC_MAX_RETRIES || err != -ENOMEM) {
|
||||
dev_err(group->kctx->kbdev->dev,
|
||||
"Failed to allocate physical pages for Protected mode suspend buffer for the group %d of context %d_%d",
|
||||
group->handle, group->kctx->tgid, group->kctx->id);
|
||||
report_group_fatal_error(group);
|
||||
}
|
||||
|
||||
KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_END, group, 0u);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_fault_event - Handler for CS fault.
|
||||
* handle_fault_event() - Handler for CS fault.
|
||||
*
|
||||
* @queue: Pointer to queue for which fault event was received.
|
||||
* @cs_ack: Value of the CS_ACK register in the CS kernel input page used for
|
||||
|
|
@ -2286,47 +2310,32 @@ static void handle_fault_event(struct kbase_queue *const queue, const u32 cs_ack
|
|||
cs_fault_info_exception_data);
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
/* CS_RESOURCE_TERMINATED type fault event can be ignored from the
|
||||
* standpoint of dump on error. It is used to report fault for the CSIs
|
||||
* that are associated with the same CSG as the CSI for which the actual
|
||||
* fault was reported by the Iterator.
|
||||
* Dumping would be triggered when the actual fault is reported.
|
||||
/* If dump-on-fault daemon is waiting for a fault, wake up the daemon.
|
||||
* Acknowledging the fault is deferred to the bottom-half until the wait
|
||||
* of the dump completion is done.
|
||||
*
|
||||
* CS_INHERIT_FAULT can also be ignored. It could happen due to the error
|
||||
* in other types of queues (cpu/kcpu). If a fault had occurred in some
|
||||
* other GPU queue then the dump would have been performed anyways when
|
||||
* that fault was reported.
|
||||
* Otherwise acknowledge the fault and ring the doorbell for the faulty queue
|
||||
* to enter into recoverable state.
|
||||
*/
|
||||
if ((cs_fault_exception_type != CS_FAULT_EXCEPTION_TYPE_CS_INHERIT_FAULT) &&
|
||||
(cs_fault_exception_type != CS_FAULT_EXCEPTION_TYPE_CS_RESOURCE_TERMINATED)) {
|
||||
if (unlikely(kbase_debug_csf_fault_notify(kbdev, queue->kctx, DF_CS_FAULT))) {
|
||||
queue->cs_error = cs_fault;
|
||||
queue->cs_error_info = cs_fault_info;
|
||||
queue->cs_error_fatal = false;
|
||||
queue_work(queue->kctx->csf.wq, &queue->cs_error_work);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (likely(!kbase_debug_csf_fault_notify(kbdev, queue->kctx, DF_CS_FAULT))) {
|
||||
kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FAULT_MASK);
|
||||
kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, queue->group->csg_nr,
|
||||
true);
|
||||
queue->cs_error_acked = true;
|
||||
} else
|
||||
queue->cs_error_acked = false;
|
||||
|
||||
kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FAULT_MASK);
|
||||
kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, queue->group->csg_nr, true);
|
||||
queue->cs_error = cs_fault;
|
||||
queue->cs_error_info = cs_fault_info;
|
||||
queue->cs_error_fatal = false;
|
||||
if (!queue_work(queue->kctx->csf.wq, &queue->cs_error_work))
|
||||
dev_warn(kbdev->dev, "%s: failed to enqueue a work", __func__);
|
||||
}
|
||||
|
||||
static void report_queue_fatal_error(struct kbase_queue *const queue, u32 cs_fatal,
|
||||
u64 cs_fatal_info, struct kbase_queue_group *group)
|
||||
static void report_queue_error(struct kbase_queue *const queue, u32 cs_error, u64 cs_error_info,
|
||||
struct kbase_queue_group *group, bool fatal)
|
||||
{
|
||||
struct base_csf_notification
|
||||
error = { .type = BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR,
|
||||
.payload = {
|
||||
.csg_error = {
|
||||
.error = { .error_type =
|
||||
BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL,
|
||||
.payload = { .fatal_queue = {
|
||||
.sideband = cs_fatal_info,
|
||||
.status = cs_fatal,
|
||||
} } } } } };
|
||||
struct base_csf_notification error = { .type = BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR };
|
||||
|
||||
if (!queue)
|
||||
return;
|
||||
|
|
@ -2335,17 +2344,30 @@ static void report_queue_fatal_error(struct kbase_queue *const queue, u32 cs_fat
|
|||
return;
|
||||
|
||||
error.payload.csg_error.handle = group->handle;
|
||||
error.payload.csg_error.error.payload.fatal_queue.csi_index = (__u8)queue->csi_index;
|
||||
if (fatal) {
|
||||
error.payload.csg_error.error.error_type = BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL;
|
||||
error.payload.csg_error.error.payload.fatal_queue.sideband = cs_error_info;
|
||||
error.payload.csg_error.error.payload.fatal_queue.status = cs_error;
|
||||
error.payload.csg_error.error.payload.fatal_queue.csi_index = queue->csi_index;
|
||||
} else {
|
||||
error.payload.csg_error.error.error_type = BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FAULT;
|
||||
error.payload.csg_error.error.payload.fault_queue.sideband = cs_error_info;
|
||||
error.payload.csg_error.error.payload.fault_queue.status = cs_error;
|
||||
error.payload.csg_error.error.payload.fault_queue.csi_index = queue->csi_index;
|
||||
}
|
||||
kbase_csf_event_add_error(queue->kctx, &group->error_fatal, &error);
|
||||
kbase_event_wakeup(queue->kctx);
|
||||
|
||||
if (!fatal)
|
||||
queue->clear_faults = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cs_error_worker - Handle the CS_FATAL/CS_FAULT error for the GPU queue
|
||||
* cs_error_worker() - Handle the CS_FATAL/CS_FAULT error for the GPU queue
|
||||
*
|
||||
* @data: Pointer to a work_struct embedded in GPU command queue.
|
||||
*
|
||||
* Terminate the CSG and report the error to userspace.
|
||||
* Terminate the CSG for CS_FATAL and report the error to userspace.
|
||||
*/
|
||||
static void cs_error_worker(struct work_struct *const data)
|
||||
{
|
||||
|
|
@ -2356,6 +2378,7 @@ static void cs_error_worker(struct work_struct *const data)
|
|||
struct kbase_queue_group *group;
|
||||
bool reset_prevented = false;
|
||||
int err;
|
||||
const bool cs_fatal = queue->cs_error_fatal;
|
||||
|
||||
kbase_debug_csf_fault_wait_completion(kbdev);
|
||||
err = kbase_reset_gpu_prevent_and_wait(kbdev);
|
||||
|
|
@ -2371,45 +2394,57 @@ static void cs_error_worker(struct work_struct *const data)
|
|||
|
||||
group = get_bound_queue_group(queue);
|
||||
if (!group) {
|
||||
dev_warn(kbdev->dev, "queue not bound when handling fatal event");
|
||||
dev_warn(kbdev->dev, "queue not bound when handling an error event");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (!queue->cs_error_fatal) {
|
||||
unsigned long flags;
|
||||
int slot_num;
|
||||
if (!cs_fatal) {
|
||||
if (group->cs_fault_report_enable && queue->clear_faults)
|
||||
report_queue_error(queue, queue->cs_error, queue->cs_error_info, group,
|
||||
false);
|
||||
if (unlikely(!queue->cs_error_acked)) {
|
||||
unsigned long flags;
|
||||
int slot_num;
|
||||
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
slot_num = kbase_csf_scheduler_group_get_slot_locked(group);
|
||||
if (slot_num >= 0) {
|
||||
struct kbase_csf_cmd_stream_group_info const *ginfo =
|
||||
&kbdev->csf.global_iface.groups[slot_num];
|
||||
struct kbase_csf_cmd_stream_info const *stream =
|
||||
&ginfo->streams[queue->csi_index];
|
||||
u32 const cs_ack = kbase_csf_firmware_cs_output(stream, CS_ACK);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
slot_num = kbase_csf_scheduler_group_get_slot_locked(group);
|
||||
if (likely(slot_num >= 0)) {
|
||||
struct kbase_csf_cmd_stream_group_info const *ginfo =
|
||||
&kbdev->csf.global_iface.groups[slot_num];
|
||||
struct kbase_csf_cmd_stream_info const *stream =
|
||||
&ginfo->streams[queue->csi_index];
|
||||
u32 const cs_ack = kbase_csf_firmware_cs_output(stream, CS_ACK);
|
||||
u32 const cs_req = kbase_csf_firmware_cs_input_read(stream, CS_REQ);
|
||||
|
||||
kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FAULT_MASK);
|
||||
kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, slot_num, true);
|
||||
/* Acknowledge the fault and ring the doorbell for the queue
|
||||
* if it hasn't yet done.
|
||||
*/
|
||||
if ((cs_ack & CS_ACK_FAULT_MASK) != (cs_req & CS_REQ_FAULT_MASK)) {
|
||||
kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack,
|
||||
CS_REQ_FAULT_MASK);
|
||||
kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index,
|
||||
slot_num, true);
|
||||
}
|
||||
}
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
}
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
goto unlock;
|
||||
}
|
||||
#endif
|
||||
|
||||
term_queue_group(group);
|
||||
flush_gpu_cache_on_fatal_error(kbdev);
|
||||
/* For an invalid GPU page fault, CS_BUS_FAULT fatal error is expected after the
|
||||
* page fault handler disables the AS of faulty context. Need to skip reporting the
|
||||
* CS_BUS_FAULT fatal error to the Userspace as it doesn't have the full fault info.
|
||||
* Page fault handler will report the fatal error with full page fault info.
|
||||
*/
|
||||
if ((cs_fatal_exception_type == CS_FATAL_EXCEPTION_TYPE_CS_BUS_FAULT) && group->faulted) {
|
||||
dev_dbg(kbdev->dev,
|
||||
"Skipped reporting CS_BUS_FAULT for queue %d of group %d of ctx %d_%d",
|
||||
queue->csi_index, group->handle, kctx->tgid, kctx->id);
|
||||
} else {
|
||||
report_queue_fatal_error(queue, queue->cs_error, queue->cs_error_info, group);
|
||||
term_queue_group(group);
|
||||
flush_gpu_cache_on_fatal_error(kbdev);
|
||||
/* For an invalid GPU page fault, CS_BUS_FAULT fatal error is expected after the
|
||||
* page fault handler disables the AS of faulty context. Need to skip reporting the
|
||||
* CS_BUS_FAULT fatal error to the Userspace as it doesn't have the full fault info.
|
||||
* Page fault handler will report the fatal error with full page fault info.
|
||||
*/
|
||||
if ((cs_fatal_exception_type == CS_FATAL_EXCEPTION_TYPE_CS_BUS_FAULT) &&
|
||||
group->faulted) {
|
||||
dev_dbg(kbdev->dev,
|
||||
"Skipped reporting CS_BUS_FAULT for queue %d of group %d of ctx %d_%d",
|
||||
queue->csi_index, group->handle, kctx->tgid, kctx->id);
|
||||
} else {
|
||||
report_queue_error(queue, queue->cs_error, queue->cs_error_info, group,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
|
@ -2419,7 +2454,7 @@ unlock:
|
|||
}
|
||||
|
||||
/**
|
||||
* handle_fatal_event - Handler for CS fatal.
|
||||
* handle_fatal_event() - Handler for CS fatal.
|
||||
*
|
||||
* @queue: Pointer to queue for which fatal event was received.
|
||||
* @stream: Pointer to the structure containing info provided by the
|
||||
|
|
@ -2481,7 +2516,7 @@ static void handle_fatal_event(struct kbase_queue *const queue,
|
|||
}
|
||||
|
||||
/**
|
||||
* process_cs_interrupts - Process interrupts for a CS.
|
||||
* process_cs_interrupts() - Process interrupts for a CS.
|
||||
*
|
||||
* @group: Pointer to GPU command queue group data.
|
||||
* @ginfo: The CSG interface provided by the firmware.
|
||||
|
|
@ -2595,7 +2630,7 @@ static void process_cs_interrupts(struct kbase_queue_group *const group,
|
|||
}
|
||||
|
||||
if (!group->protected_suspend_buf.pma)
|
||||
queue_work(group->kctx->csf.wq, &group->protm_event_work);
|
||||
kbase_csf_scheduler_enqueue_protm_event_work(group);
|
||||
|
||||
if (test_bit(group->csg_nr, scheduler->csg_slots_idle_mask)) {
|
||||
clear_bit(group->csg_nr, scheduler->csg_slots_idle_mask);
|
||||
|
|
@ -2608,7 +2643,7 @@ static void process_cs_interrupts(struct kbase_queue_group *const group,
|
|||
}
|
||||
|
||||
/**
|
||||
* process_csg_interrupts - Process interrupts for a CSG.
|
||||
* process_csg_interrupts() - Process interrupts for a CSG.
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
* @csg_nr: CSG number.
|
||||
|
|
@ -2728,7 +2763,7 @@ static void process_csg_interrupts(struct kbase_device *const kbdev, u32 const c
|
|||
}
|
||||
|
||||
/**
|
||||
* process_prfcnt_interrupts - Process performance counter interrupts.
|
||||
* process_prfcnt_interrupts() - Process performance counter interrupts.
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
* @glb_req: Global request register value.
|
||||
|
|
@ -2800,7 +2835,7 @@ static void process_prfcnt_interrupts(struct kbase_device *kbdev, u32 glb_req, u
|
|||
}
|
||||
|
||||
/**
|
||||
* check_protm_enter_req_complete - Check if PROTM_ENTER request completed
|
||||
* check_protm_enter_req_complete() - Check if PROTM_ENTER request completed
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
* @glb_req: Global request register value.
|
||||
|
|
@ -2834,7 +2869,7 @@ static inline void check_protm_enter_req_complete(struct kbase_device *kbdev, u3
|
|||
}
|
||||
|
||||
/**
|
||||
* process_protm_exit - Handle the protected mode exit interrupt
|
||||
* process_protm_exit() - Handle the protected mode exit interrupt
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
* @glb_ack: Global acknowledge register value.
|
||||
|
|
@ -2923,7 +2958,7 @@ static inline void process_tracked_info_for_protm(struct kbase_device *kbdev,
|
|||
if (!tock_triggered) {
|
||||
dev_dbg(kbdev->dev, "Group-%d on slot-%d start protm work\n", group->handle,
|
||||
group->csg_nr);
|
||||
queue_work(group->kctx->csf.wq, &group->protm_event_work);
|
||||
kbase_csf_scheduler_enqueue_protm_event_work(group);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2952,6 +2987,46 @@ static void order_job_irq_clear_with_iface_mem_read(void)
|
|||
dmb(osh);
|
||||
}
|
||||
|
||||
static const char *const glb_fatal_status_errors[GLB_FATAL_STATUS_VALUE_COUNT] = {
|
||||
[GLB_FATAL_STATUS_VALUE_OK] = "OK",
|
||||
[GLB_FATAL_STATUS_VALUE_ASSERT] = "Firmware assert triggered",
|
||||
[GLB_FATAL_STATUS_VALUE_UNEXPECTED_EXCEPTION] =
|
||||
"Hardware raised an exception firmware did not expect",
|
||||
[GLB_FATAL_STATUS_VALUE_HANG] = "Firmware hangs and watchdog timer expired",
|
||||
};
|
||||
|
||||
/**
|
||||
* handle_glb_fatal_event() - Handle the GLB fatal event
|
||||
*
|
||||
* @kbdev: Instance of GPU device.
|
||||
* @global_iface: CSF global interface
|
||||
*/
|
||||
static void handle_glb_fatal_event(struct kbase_device *kbdev,
|
||||
const struct kbase_csf_global_iface *const global_iface)
|
||||
{
|
||||
const char *error_string = NULL;
|
||||
const u32 fatal_status = kbase_csf_firmware_global_output(global_iface, GLB_FATAL_STATUS);
|
||||
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
kbase_csf_scheduler_spin_lock_assert_held(kbdev);
|
||||
dev_warn(kbdev->dev, "MCU encountered unrecoverable error");
|
||||
|
||||
if (fatal_status < GLB_FATAL_STATUS_VALUE_COUNT)
|
||||
error_string = glb_fatal_status_errors[fatal_status];
|
||||
else {
|
||||
dev_err(kbdev->dev, "Invalid GLB_FATAL_STATUS (%u)", fatal_status);
|
||||
return;
|
||||
}
|
||||
|
||||
if (fatal_status == GLB_FATAL_STATUS_VALUE_OK)
|
||||
dev_err(kbdev->dev, "GLB_FATAL_STATUS(OK) must be set with proper reason");
|
||||
else {
|
||||
dev_warn(kbdev->dev, "GLB_FATAL_STATUS: %s", error_string);
|
||||
if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
|
||||
kbase_reset_gpu_locked(kbdev);
|
||||
}
|
||||
}
|
||||
|
||||
void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val)
|
||||
{
|
||||
bool deferred_handling_glb_idle_irq = false;
|
||||
|
|
@ -3026,6 +3101,9 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val)
|
|||
deferred_handling_glb_idle_irq = true;
|
||||
}
|
||||
|
||||
if (glb_ack & GLB_ACK_FATAL_MASK)
|
||||
handle_glb_fatal_event(kbdev, global_iface);
|
||||
|
||||
process_prfcnt_interrupts(kbdev, glb_req, glb_ack);
|
||||
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
|
|
@ -3050,13 +3128,10 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val)
|
|||
|
||||
if (deferred_handling_glb_idle_irq) {
|
||||
unsigned long flags;
|
||||
bool invoke_pm_state_machine;
|
||||
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
invoke_pm_state_machine = kbase_csf_scheduler_process_gpu_idle_event(kbdev);
|
||||
kbase_csf_scheduler_process_gpu_idle_event(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
if (unlikely(invoke_pm_state_machine))
|
||||
kbase_pm_update_state(kbdev);
|
||||
}
|
||||
|
||||
wake_up_all(&kbdev->csf.event_wait);
|
||||
|
|
@ -3087,6 +3162,11 @@ void kbase_csf_doorbell_mapping_term(struct kbase_device *kbdev)
|
|||
if (kbdev->csf.db_filp) {
|
||||
struct page *page = as_page(kbdev->csf.dummy_db_page);
|
||||
|
||||
/* This is a shared dummy sink page for avoiding potential segmentation fault
|
||||
* to user-side library when a csi is off slot. Additionally, the call is on
|
||||
* module unload path, so the page can be left uncleared before returning it
|
||||
* back to kbdev memory pool.
|
||||
*/
|
||||
kbase_mem_pool_free(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], page, false);
|
||||
|
||||
fput(kbdev->csf.db_filp);
|
||||
|
|
@ -3118,26 +3198,27 @@ int kbase_csf_doorbell_mapping_init(struct kbase_device *kbdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kbase_csf_pending_gpuq_kicks_init(struct kbase_device *kbdev)
|
||||
void kbase_csf_pending_gpuq_kick_queues_init(struct kbase_device *kbdev)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i != ARRAY_SIZE(kbdev->csf.pending_gpuq_kicks); ++i)
|
||||
INIT_LIST_HEAD(&kbdev->csf.pending_gpuq_kicks[i]);
|
||||
spin_lock_init(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
atomic_set(&kbdev->csf.pending_gpuq_kicks, false);
|
||||
for (i = 0; i != ARRAY_SIZE(kbdev->csf.pending_gpuq_kick_queues); ++i)
|
||||
INIT_LIST_HEAD(&kbdev->csf.pending_gpuq_kick_queues[i]);
|
||||
spin_lock_init(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
}
|
||||
|
||||
void kbase_csf_pending_gpuq_kicks_term(struct kbase_device *kbdev)
|
||||
void kbase_csf_pending_gpuq_kick_queues_term(struct kbase_device *kbdev)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
for (i = 0; i != ARRAY_SIZE(kbdev->csf.pending_gpuq_kicks); ++i) {
|
||||
if (!list_empty(&kbdev->csf.pending_gpuq_kicks[i]))
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
for (i = 0; i != ARRAY_SIZE(kbdev->csf.pending_gpuq_kick_queues); ++i) {
|
||||
if (!list_empty(&kbdev->csf.pending_gpuq_kick_queues[i]))
|
||||
dev_warn(kbdev->dev,
|
||||
"Some GPU queue kicks for priority %zu were not handled", i);
|
||||
}
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
}
|
||||
|
||||
void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev)
|
||||
|
|
@ -3145,6 +3226,11 @@ void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev)
|
|||
if (kbdev->csf.user_reg.filp) {
|
||||
struct page *page = as_page(kbdev->csf.user_reg.dummy_page);
|
||||
|
||||
/* This is a shared dummy page in place of the real USER Register page just
|
||||
* before the GPU is powered down. Additionally, the call is on module unload
|
||||
* path, so the page can be left uncleared before returning it back to kbdev
|
||||
* memory pool.
|
||||
*/
|
||||
kbase_mem_pool_free(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], page, false);
|
||||
fput(kbdev->csf.user_reg.filp);
|
||||
}
|
||||
|
|
@ -3227,17 +3313,17 @@ void kbase_csf_process_queue_kick(struct kbase_queue *queue)
|
|||
if (err == -EBUSY) {
|
||||
retry_kick = true;
|
||||
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
spin_lock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
if (list_empty(&queue->pending_kick_link)) {
|
||||
/* A failed queue kick shall be pushed to the
|
||||
* back of the queue to avoid potential abuse.
|
||||
*/
|
||||
list_add_tail(
|
||||
&queue->pending_kick_link,
|
||||
&kbdev->csf.pending_gpuq_kicks[queue->group_priority]);
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
&kbdev->csf.pending_gpuq_kick_queues[queue->group_priority]);
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
} else {
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kicks_lock);
|
||||
spin_unlock(&kbdev->csf.pending_gpuq_kick_queues_lock);
|
||||
WARN_ON(atomic_read(&queue->pending_kick) == 0);
|
||||
}
|
||||
|
||||
|
|
@ -3260,3 +3346,27 @@ out_release_queue:
|
|||
WARN_ON(atomic_read(&queue->pending_kick) == 0);
|
||||
atomic_dec(&queue->pending_kick);
|
||||
}
|
||||
|
||||
void kbase_csf_process_protm_event_request(struct kbase_queue_group *group)
|
||||
{
|
||||
struct kbase_protected_suspend_buffer *sbuf = &group->protected_suspend_buf;
|
||||
int err = 0;
|
||||
|
||||
KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_START, group, 0u);
|
||||
|
||||
err = alloc_grp_protected_suspend_buffer_pages(group);
|
||||
if (!err) {
|
||||
kbase_csf_scheduler_group_protm_enter(group);
|
||||
} else if (err == -ENOMEM && sbuf->alloc_retries <= PROTM_ALLOC_MAX_RETRIES) {
|
||||
sbuf->alloc_retries++;
|
||||
/* try again to allocate pages */
|
||||
kbase_csf_scheduler_enqueue_protm_event_work(group);
|
||||
} else if (sbuf->alloc_retries >= PROTM_ALLOC_MAX_RETRIES || err != -ENOMEM) {
|
||||
dev_err(group->kctx->kbdev->dev,
|
||||
"Failed to allocate physical pages for Protected mode suspend buffer for the group %d of context %d_%d",
|
||||
group->handle, group->kctx->tgid, group->kctx->id);
|
||||
report_group_fatal_error(group);
|
||||
}
|
||||
|
||||
KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_END, group, 0u);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -243,6 +243,19 @@ struct kbase_queue_group *kbase_csf_find_queue_group(struct kbase_context *kctx,
|
|||
*/
|
||||
int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kctx, u8 group_handle);
|
||||
|
||||
/**
|
||||
* kbase_csf_queue_group_clear_faults - Re-enable CS Fault reporting.
|
||||
*
|
||||
* @kctx: Pointer to the kbase context within which the
|
||||
* CS Faults for the queues has to be re-enabled.
|
||||
* @clear_faults: Pointer to the structure which contains details of the
|
||||
* queues for which the CS Fault reporting has to be re-enabled.
|
||||
*
|
||||
* Return: 0 on success, or negative on failure.
|
||||
*/
|
||||
int kbase_csf_queue_group_clear_faults(struct kbase_context *kctx,
|
||||
struct kbase_ioctl_queue_group_clear_faults *clear_faults);
|
||||
|
||||
/**
|
||||
* kbase_csf_queue_group_create - Create a GPU command queue group.
|
||||
*
|
||||
|
|
@ -379,20 +392,20 @@ int kbase_csf_setup_dummy_user_reg_page(struct kbase_device *kbdev);
|
|||
void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_csf_pending_gpuq_kicks_init - Initialize the data used for handling
|
||||
* GPU queue kicks.
|
||||
* kbase_csf_pending_gpuq_kick_queues_init - Initialize the data used for handling
|
||||
* GPU queue kicks.
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
*/
|
||||
void kbase_csf_pending_gpuq_kicks_init(struct kbase_device *kbdev);
|
||||
void kbase_csf_pending_gpuq_kick_queues_init(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_csf_pending_gpuq_kicks_term - De-initialize the data used for handling
|
||||
* GPU queue kicks.
|
||||
* kbase_csf_pending_gpuq_kick_queues_term - De-initialize the data used for handling
|
||||
* GPU queue kicks.
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface.
|
||||
*/
|
||||
void kbase_csf_pending_gpuq_kicks_term(struct kbase_device *kbdev);
|
||||
void kbase_csf_pending_gpuq_kick_queues_term(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_csf_ring_csg_doorbell - ring the doorbell for a CSG interface.
|
||||
|
|
@ -546,4 +559,13 @@ static inline u64 kbase_csf_ktrace_gpu_cycle_cnt(struct kbase_device *kbdev)
|
|||
*/
|
||||
void kbase_csf_process_queue_kick(struct kbase_queue *queue);
|
||||
|
||||
/**
|
||||
* kbase_csf_process_protm_event_request - Handle protected mode switch request
|
||||
*
|
||||
* @group: The group to handle protected mode request
|
||||
*
|
||||
* Request to switch to protected mode.
|
||||
*/
|
||||
void kbase_csf_process_protm_event_request(struct kbase_queue_group *group);
|
||||
|
||||
#endif /* _KBASE_CSF_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -268,6 +268,7 @@ enum kbase_queue_group_priority {
|
|||
* Shader, L2 and MCU state.
|
||||
* @CSF_GPU_RESET_TIMEOUT: Waiting timeout for GPU reset to complete.
|
||||
* @CSF_CSG_SUSPEND_TIMEOUT: Timeout given for a CSG to be suspended.
|
||||
* @CSF_CSG_TERM_TIMEOUT: Timeout given for a CSG to be terminated.
|
||||
* @CSF_FIRMWARE_BOOT_TIMEOUT: Maximum time to wait for firmware to boot.
|
||||
* @CSF_FIRMWARE_PING_TIMEOUT: Maximum time to wait for firmware to respond
|
||||
* to a ping from KBase.
|
||||
|
|
@ -290,6 +291,7 @@ enum kbase_timeout_selector {
|
|||
CSF_PM_TIMEOUT,
|
||||
CSF_GPU_RESET_TIMEOUT,
|
||||
CSF_CSG_SUSPEND_TIMEOUT,
|
||||
CSF_CSG_TERM_TIMEOUT,
|
||||
CSF_FIRMWARE_BOOT_TIMEOUT,
|
||||
CSF_FIRMWARE_PING_TIMEOUT,
|
||||
CSF_SCHED_PROTM_PROGRESS_TIMEOUT,
|
||||
|
|
@ -398,6 +400,10 @@ struct kbase_csf_notification {
|
|||
* @cs_error: Records information about the CS fatal event or
|
||||
* about CS fault event if dump on fault is enabled.
|
||||
* @cs_error_fatal: Flag to track if the CS fault or CS fatal event occurred.
|
||||
* @cs_error_acked: Flag to indicate that acknowledging the fault has been done
|
||||
* at top-half of fault handler.
|
||||
* @clear_faults: Flag to track if the CS fault reporting is enabled for this queue.
|
||||
* It's protected by &kbase_context.csf.lock.
|
||||
* @extract_ofs: The current EXTRACT offset, this is only updated when handling
|
||||
* the GLB IDLE IRQ if the idle timeout value is non-0 in order
|
||||
* to help detect a queue's true idle status.
|
||||
|
|
@ -441,6 +447,8 @@ struct kbase_queue {
|
|||
u64 cs_error_info;
|
||||
u32 cs_error;
|
||||
bool cs_error_fatal;
|
||||
bool cs_error_acked;
|
||||
bool clear_faults;
|
||||
u64 extract_ofs;
|
||||
u64 saved_cmd_ptr;
|
||||
};
|
||||
|
|
@ -501,6 +509,8 @@ struct kbase_protected_suspend_buffer {
|
|||
* @compute_max: Maximum number of compute endpoints the group is
|
||||
* allowed to use.
|
||||
* @csi_handlers: Requested CSI exception handler flags for the group.
|
||||
* @cs_fault_report_enable: Indicated if reporting of CS_FAULTs to
|
||||
* userspace is enabled.
|
||||
* @tiler_mask: Mask of tiler endpoints the group is allowed to use.
|
||||
* @fragment_mask: Mask of fragment endpoints the group is allowed to use.
|
||||
* @compute_mask: Mask of compute endpoints the group is allowed to use.
|
||||
|
|
@ -531,8 +541,13 @@ struct kbase_protected_suspend_buffer {
|
|||
* @bound_queues: Array of registered queues bound to this queue group.
|
||||
* @doorbell_nr: Index of the hardware doorbell page assigned to the
|
||||
* group.
|
||||
* @protm_event_work: Work item corresponding to the protected mode entry
|
||||
* event for this queue.
|
||||
* @protm_event_work: List item corresponding to the protected mode entry
|
||||
* event for this queue. This would be handled by
|
||||
* kbase_csf_scheduler_kthread().
|
||||
* @pending_protm_event_work: Indicates that kbase_csf_scheduler_kthread() should
|
||||
* handle PROTM request for this group. This would
|
||||
* be set to false when the work is done. This is used
|
||||
* mainly for synchronisation with group termination.
|
||||
* @protm_pending_bitmap: Bit array to keep a track of CSs that
|
||||
* have pending protected mode entry requests.
|
||||
* @error_fatal: An error of type BASE_GPU_QUEUE_GROUP_ERROR_FATAL to be
|
||||
|
|
@ -569,7 +584,7 @@ struct kbase_queue_group {
|
|||
u8 compute_max;
|
||||
u8 csi_handlers;
|
||||
|
||||
|
||||
__u8 cs_fault_report_enable;
|
||||
u64 tiler_mask;
|
||||
u64 fragment_mask;
|
||||
u64 compute_mask;
|
||||
|
|
@ -588,7 +603,8 @@ struct kbase_queue_group {
|
|||
struct kbase_queue *bound_queues[MAX_SUPPORTED_STREAMS_PER_GROUP];
|
||||
|
||||
int doorbell_nr;
|
||||
struct work_struct protm_event_work;
|
||||
struct list_head protm_event_work;
|
||||
atomic_t pending_protm_event_work;
|
||||
DECLARE_BITMAP(protm_pending_bitmap, MAX_SUPPORTED_STREAMS_PER_GROUP);
|
||||
|
||||
struct kbase_csf_notification error_fatal;
|
||||
|
|
@ -625,6 +641,9 @@ struct kbase_queue_group {
|
|||
* @cmd_seq_num: The sequence number assigned to an enqueued command,
|
||||
* in incrementing order (older commands shall have a
|
||||
* smaller number).
|
||||
* @kcpu_wq: Work queue to process KCPU commands for all queues in this
|
||||
* context. This would be used if the context is not prioritised,
|
||||
* otherwise it would be handled by kbase_csf_scheduler_kthread().
|
||||
* @jit_lock: Lock to serialise JIT operations.
|
||||
* @jit_cmds_head: A list of the just-in-time memory commands, both
|
||||
* allocate & free, in submission order, protected
|
||||
|
|
@ -640,6 +659,8 @@ struct kbase_csf_kcpu_queue_context {
|
|||
DECLARE_BITMAP(in_use, KBASEP_MAX_KCPU_QUEUES);
|
||||
atomic64_t cmd_seq_num;
|
||||
|
||||
struct workqueue_struct *kcpu_wq;
|
||||
|
||||
struct mutex jit_lock;
|
||||
struct list_head jit_cmds_head;
|
||||
struct list_head jit_blocked_queues;
|
||||
|
|
@ -747,15 +768,7 @@ struct kbase_csf_ctx_heap_reclaim_info {
|
|||
* GPU command queues are idle and at least one of them
|
||||
* is blocked on a sync wait operation.
|
||||
* @num_idle_wait_grps: Length of the @idle_wait_groups list.
|
||||
* @sync_update_wq_high_prio: high-priority work queue to process the
|
||||
* SYNC_UPDATE events by sync_set / sync_add
|
||||
* instruction execution on command streams bound to
|
||||
* groups of @idle_wait_groups list. This WQ would
|
||||
* be used if the context is prioritised.
|
||||
* @sync_update_wq_normal_prio: similar to sync_update_wq_high_prio, but this
|
||||
* WQ would be used if the context is not
|
||||
* prioritised.
|
||||
* @sync_update_work: Work item to process the SYNC_UPDATE events.
|
||||
* @sync_update_work: List item to process the SYNC_UPDATE event.
|
||||
* @ngrp_to_schedule: Number of groups added for the context to the
|
||||
* 'groups_to_schedule' list of scheduler instance.
|
||||
* @heap_info: Heap reclaim information data of the kctx. As the
|
||||
|
|
@ -768,9 +781,7 @@ struct kbase_csf_scheduler_context {
|
|||
u32 num_runnable_grps;
|
||||
struct list_head idle_wait_groups;
|
||||
u32 num_idle_wait_grps;
|
||||
struct workqueue_struct *sync_update_wq_high_prio;
|
||||
struct workqueue_struct *sync_update_wq_normal_prio;
|
||||
struct work_struct sync_update_work;
|
||||
struct list_head sync_update_work;
|
||||
u32 ngrp_to_schedule;
|
||||
struct kbase_csf_ctx_heap_reclaim_info heap_info;
|
||||
};
|
||||
|
|
@ -865,17 +876,16 @@ struct kbase_csf_user_reg_context {
|
|||
* @wq: Dedicated workqueue to process work items corresponding
|
||||
* to the OoM events raised for chunked tiler heaps being
|
||||
* used by GPU command queues, and progress timeout events.
|
||||
* @kcpu_wq_high_prio: High-priority work queue to process KCPU commands for
|
||||
* all queues in this context. This WQ would be used if
|
||||
* the context is prioritised.
|
||||
* @kcpu_wq_normal_prio: Similar to kcpu_wq_high_prio, but this WQ would be
|
||||
* used if the context is not prioritised.
|
||||
* @link: Link to this csf context in the 'runnable_kctxs' list of
|
||||
* the scheduler instance
|
||||
* @sched: Object representing the scheduler's context
|
||||
* @cpu_queue: CPU queue information. Only be available when DEBUG_FS
|
||||
* is enabled.
|
||||
* @user_reg: Collective information to support mapping to USER Register page.
|
||||
* @pending_sync_update: Indicates that kbase_csf_scheduler_kthread() should
|
||||
* handle SYNC_UPDATE event for this context. This would
|
||||
* be set to false when the work is done. This is used
|
||||
* mainly for synchronisation with context termination.
|
||||
*/
|
||||
struct kbase_csf_context {
|
||||
struct list_head event_pages_head;
|
||||
|
|
@ -888,12 +898,11 @@ struct kbase_csf_context {
|
|||
struct kbase_csf_event event;
|
||||
struct kbase_csf_tiler_heap_context tiler_heaps;
|
||||
struct workqueue_struct *wq;
|
||||
struct workqueue_struct *kcpu_wq_high_prio;
|
||||
struct workqueue_struct *kcpu_wq_normal_prio;
|
||||
struct list_head link;
|
||||
struct kbase_csf_scheduler_context sched;
|
||||
struct kbase_csf_cpu_queue_context cpu_queue;
|
||||
struct kbase_csf_user_reg_context user_reg;
|
||||
atomic_t pending_sync_update;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -936,14 +945,15 @@ struct kbase_csf_csg_slot {
|
|||
* struct kbase_csf_sched_heap_reclaim_mgr - Object for managing tiler heap reclaim
|
||||
* kctx lists inside the CSF device's scheduler.
|
||||
*
|
||||
* @heap_reclaim: Tiler heap reclaim shrinker object.
|
||||
* @heap_reclaim: Defines Tiler heap reclaim shrinker object.
|
||||
* @ctx_lists: Array of kctx lists, size matching CSG defined priorities. The
|
||||
* lists track the kctxs attached to the reclaim manager.
|
||||
* @unused_pages: Estimated number of unused pages from the @ctxlist array. The
|
||||
* number is indicative for use with reclaim shrinker's count method.
|
||||
*/
|
||||
struct kbase_csf_sched_heap_reclaim_mgr {
|
||||
struct shrinker heap_reclaim;
|
||||
DEFINE_KBASE_SHRINKER heap_reclaim;
|
||||
|
||||
struct list_head ctx_lists[KBASE_QUEUE_GROUP_PRIORITY_COUNT];
|
||||
atomic_t unused_pages;
|
||||
};
|
||||
|
|
@ -1042,10 +1052,29 @@ struct kbase_csf_mcu_shared_regions {
|
|||
* workqueue items (kernel-provided delayed_work
|
||||
* items do not use hrtimer and for some reason do
|
||||
* not provide sufficiently reliable periodicity).
|
||||
* @pending_tick_work: Indicates that kbase_csf_scheduler_kthread() should perform
|
||||
* a scheduling tick.
|
||||
* @pending_tock_work: Indicates that kbase_csf_scheduler_kthread() should perform
|
||||
* a scheduling tock.
|
||||
* @pending_sync_update_works: Indicates that kbase_csf_scheduler_kthread()
|
||||
* should handle SYNC_UPDATE events.
|
||||
* @sync_update_work_ctxs_lock: Lock protecting the list of contexts that
|
||||
* require handling SYNC_UPDATE events.
|
||||
* @sync_update_work_ctxs: The list of contexts that require handling
|
||||
* SYNC_UPDATE events.
|
||||
* @pending_protm_event_works: Indicates that kbase_csf_scheduler_kthread()
|
||||
* should handle PROTM requests.
|
||||
* @protm_event_work_grps_lock: Lock protecting the list of groups that
|
||||
* have requested protected mode.
|
||||
* @protm_event_work_grps: The list of groups that have requested
|
||||
* protected mode.
|
||||
* @pending_kcpuq_works: Indicates that kbase_csf_scheduler_kthread()
|
||||
* should process pending KCPU queue works.
|
||||
* @kcpuq_work_queues_lock: Lock protecting the list of KCPU queues that
|
||||
* need to be processed.
|
||||
* @kcpuq_work_queues: The list of KCPU queue that need to be processed
|
||||
* @pending_tick_work: Indicates that kbase_csf_scheduler_kthread() should
|
||||
* perform a scheduling tick.
|
||||
* @pending_tock_work: Indicates that kbase_csf_scheduler_kthread() should
|
||||
* perform a scheduling tock.
|
||||
* @pending_gpu_idle_work: Indicates that kbase_csf_scheduler_kthread() should
|
||||
* handle the GPU IDLE event.
|
||||
* @ping_work: Work item that would ping the firmware at regular
|
||||
* intervals, only if there is a single active CSG
|
||||
* slot, to check if firmware is alive and would
|
||||
|
|
@ -1063,10 +1092,6 @@ struct kbase_csf_mcu_shared_regions {
|
|||
* This pointer being set doesn't necessarily indicates
|
||||
* that GPU is in protected mode, kbdev->protected_mode
|
||||
* needs to be checked for that.
|
||||
* @idle_wq: Workqueue for executing GPU idle notification
|
||||
* handler.
|
||||
* @gpu_idle_work: Work item for facilitating the scheduler to bring
|
||||
* the GPU to a low-power mode on becoming idle.
|
||||
* @fast_gpu_idle_handling: Indicates whether to relax many of the checks
|
||||
* normally done in the GPU idle worker. This is
|
||||
* set to true when handling the GLB IDLE IRQ if the
|
||||
|
|
@ -1109,7 +1134,8 @@ struct kbase_csf_mcu_shared_regions {
|
|||
* thread when a queue needs attention.
|
||||
* @kthread_running: Whether the GPU queue submission thread should keep
|
||||
* executing.
|
||||
* @gpuq_kthread: High-priority thread used to handle GPU queue
|
||||
* @gpuq_kthread: Dedicated thread primarily used to handle
|
||||
* latency-sensitive tasks such as GPU queue
|
||||
* submissions.
|
||||
*/
|
||||
struct kbase_csf_scheduler {
|
||||
|
|
@ -1134,14 +1160,22 @@ struct kbase_csf_scheduler {
|
|||
unsigned long last_schedule;
|
||||
atomic_t timer_enabled;
|
||||
struct hrtimer tick_timer;
|
||||
atomic_t pending_sync_update_works;
|
||||
spinlock_t sync_update_work_ctxs_lock;
|
||||
struct list_head sync_update_work_ctxs;
|
||||
atomic_t pending_protm_event_works;
|
||||
spinlock_t protm_event_work_grps_lock;
|
||||
struct list_head protm_event_work_grps;
|
||||
atomic_t pending_kcpuq_works;
|
||||
spinlock_t kcpuq_work_queues_lock;
|
||||
struct list_head kcpuq_work_queues;
|
||||
atomic_t pending_tick_work;
|
||||
atomic_t pending_tock_work;
|
||||
atomic_t pending_gpu_idle_work;
|
||||
struct delayed_work ping_work;
|
||||
struct kbase_context *top_kctx;
|
||||
struct kbase_queue_group *top_grp;
|
||||
struct kbase_queue_group *active_protm_grp;
|
||||
struct workqueue_struct *idle_wq;
|
||||
struct work_struct gpu_idle_work;
|
||||
bool fast_gpu_idle_handling;
|
||||
atomic_t gpu_no_longer_idle;
|
||||
atomic_t non_idle_offslot_grps;
|
||||
|
|
@ -1653,12 +1687,16 @@ struct kbase_csf_user_reg {
|
|||
* @dof: Structure for dump on fault.
|
||||
* @user_reg: Collective information to support the mapping to
|
||||
* USER Register page for user processes.
|
||||
* @pending_gpuq_kicks: Lists of GPU queue that have been kicked but not
|
||||
* yet processed, categorised by queue group's priority.
|
||||
* @pending_gpuq_kicks_lock: Protect @pending_gpu_kicks and
|
||||
* kbase_queue.pending_kick_link.
|
||||
* @pending_gpuq_kicks: Indicates that kbase_csf_scheduler_kthread()
|
||||
* should handle GPU queue kicks.
|
||||
* @pending_gpuq_kick_queues: Lists of GPU queued that have been kicked but not
|
||||
* yet processed, categorised by queue group's priority.
|
||||
* @pending_gpuq_kick_queues_lock: Protect @pending_gpuq_kick_queues and
|
||||
* kbase_queue.pending_kick_link.
|
||||
* @quirks_ext: Pointer to an allocated buffer containing the firmware
|
||||
* workarounds configuration.
|
||||
* @pmode_sync_sem: RW Semaphore to prevent MMU operations during P.Mode entrance.
|
||||
* @gpu_idle_timer_enabled: Tracks whether the GPU idle timer is enabled or disabled.
|
||||
*/
|
||||
struct kbase_csf_device {
|
||||
struct kbase_mmu_table mcu_mmu;
|
||||
|
|
@ -1710,9 +1748,12 @@ struct kbase_csf_device {
|
|||
struct kbase_debug_coresight_device coresight;
|
||||
#endif /* IS_ENABLED(CONFIG_MALI_CORESIGHT) */
|
||||
struct kbase_csf_user_reg user_reg;
|
||||
struct list_head pending_gpuq_kicks[KBASE_QUEUE_GROUP_PRIORITY_COUNT];
|
||||
spinlock_t pending_gpuq_kicks_lock;
|
||||
atomic_t pending_gpuq_kicks;
|
||||
struct list_head pending_gpuq_kick_queues[KBASE_QUEUE_GROUP_PRIORITY_COUNT];
|
||||
spinlock_t pending_gpuq_kick_queues_lock;
|
||||
u32 *quirks_ext;
|
||||
struct rw_semaphore pmode_sync_sem;
|
||||
bool gpu_idle_timer_enabled;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -1552,7 +1552,6 @@ static bool global_request_complete(struct kbase_device *const kbdev, u32 const
|
|||
unsigned long flags;
|
||||
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
|
||||
if ((kbase_csf_firmware_global_output(global_iface, GLB_ACK) & req_mask) ==
|
||||
(kbase_csf_firmware_global_input_read(global_iface, GLB_REQ) & req_mask))
|
||||
complete = true;
|
||||
|
|
@ -1644,6 +1643,23 @@ static void set_timeout_global(const struct kbase_csf_global_iface *const global
|
|||
set_global_request(global_iface, GLB_REQ_CFG_PROGRESS_TIMER_MASK);
|
||||
}
|
||||
|
||||
static inline void set_gpu_idle_timer_glb_req(struct kbase_device *const kbdev, bool set)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
||||
kbase_csf_scheduler_spin_lock_assert_held(kbdev);
|
||||
|
||||
if (set) {
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_ENABLE,
|
||||
GLB_REQ_IDLE_ENABLE_MASK);
|
||||
} else {
|
||||
kbase_csf_firmware_global_input_mask(
|
||||
global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_DISABLE, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
}
|
||||
|
||||
kbdev->csf.gpu_idle_timer_enabled = set;
|
||||
}
|
||||
|
||||
static void enable_gpu_idle_timer(struct kbase_device *const kbdev)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
|
@ -1657,8 +1673,7 @@ static void enable_gpu_idle_timer(struct kbase_device *const kbdev)
|
|||
kbdev->csf.gpu_idle_dur_count_no_modifier,
|
||||
GLB_IDLE_TIMER_CONFIG_NO_MODIFIER_MASK);
|
||||
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_ENABLE,
|
||||
GLB_REQ_IDLE_ENABLE_MASK);
|
||||
set_gpu_idle_timer_glb_req(kbdev, true);
|
||||
dev_dbg(kbdev->dev, "Enabling GPU idle timer with count-value: 0x%.8x",
|
||||
kbdev->csf.gpu_idle_dur_count);
|
||||
}
|
||||
|
|
@ -1890,6 +1905,7 @@ static void kbase_csf_firmware_reload_worker(struct work_struct *work)
|
|||
{
|
||||
struct kbase_device *kbdev =
|
||||
container_of(work, struct kbase_device, csf.firmware_reload_work);
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
dev_info(kbdev->dev, "reloading firmware");
|
||||
|
|
@ -1908,7 +1924,9 @@ static void kbase_csf_firmware_reload_worker(struct work_struct *work)
|
|||
return;
|
||||
|
||||
/* Reboot the firmware */
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbase_csf_firmware_enable_mcu(kbdev);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
}
|
||||
|
||||
void kbase_csf_firmware_trigger_reload(struct kbase_device *kbdev)
|
||||
|
|
@ -2045,29 +2063,33 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
|
|||
return kbdev->csf.gpu_idle_dur_count;
|
||||
}
|
||||
|
||||
/* The 'reg_lock' is also taken and is held till the update is not
|
||||
/* The scheduler lock is also taken and is held till the update is not
|
||||
* complete, to ensure the update of idle timer value by multiple Users
|
||||
* gets serialized.
|
||||
*/
|
||||
mutex_lock(&kbdev->csf.reg_lock);
|
||||
/* The firmware only reads the new idle timer value when the timer is
|
||||
* disabled.
|
||||
*/
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
/* Ensure that the request has taken effect */
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
|
||||
kbase_csf_scheduler_lock(kbdev);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
|
||||
kbdev->csf.gpu_idle_dur_count = hysteresis_val;
|
||||
kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
|
||||
kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
|
||||
mutex_unlock(&kbdev->csf.reg_lock);
|
||||
|
||||
if (kbdev->csf.gpu_idle_timer_enabled) {
|
||||
/* Timer is already enabled. Disable the timer as FW only reads
|
||||
* the new idle timer value when timer is re-enabled.
|
||||
*/
|
||||
kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
/* Ensure that the request has taken effect */
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
|
||||
} else {
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
}
|
||||
|
||||
kbase_csf_scheduler_unlock(kbdev);
|
||||
kbase_csf_scheduler_pm_idle(kbdev);
|
||||
kbase_reset_gpu_allow(kbdev);
|
||||
end:
|
||||
|
|
@ -2255,8 +2277,9 @@ int kbase_csf_firmware_early_init(struct kbase_device *kbdev)
|
|||
|
||||
kbdev->csf.glb_init_request_pending = true;
|
||||
|
||||
init_rwsem(&kbdev->csf.pmode_sync_sem);
|
||||
mutex_init(&kbdev->csf.reg_lock);
|
||||
kbase_csf_pending_gpuq_kicks_init(kbdev);
|
||||
kbase_csf_pending_gpuq_kick_queues_init(kbdev);
|
||||
|
||||
kbdev->csf.fw = (struct kbase_csf_mcu_fw){ .data = NULL };
|
||||
|
||||
|
|
@ -2265,7 +2288,7 @@ int kbase_csf_firmware_early_init(struct kbase_device *kbdev)
|
|||
|
||||
void kbase_csf_firmware_early_term(struct kbase_device *kbdev)
|
||||
{
|
||||
kbase_csf_pending_gpuq_kicks_term(kbdev);
|
||||
kbase_csf_pending_gpuq_kick_queues_term(kbdev);
|
||||
mutex_destroy(&kbdev->csf.reg_lock);
|
||||
}
|
||||
|
||||
|
|
@ -2731,7 +2754,7 @@ int kbase_csf_firmware_mcu_register_poll(struct kbase_device *const kbdev, u32 c
|
|||
unsigned long remaining =
|
||||
kbase_csf_timeout_in_jiffies(kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_TIMEOUT)) +
|
||||
jiffies;
|
||||
u32 read_val;
|
||||
u32 read_val = 0;
|
||||
|
||||
dev_dbg(kbdev->dev, "p: reg %08x val %08x mask %08x", reg_addr, reg_val, val_mask);
|
||||
|
||||
|
|
@ -2778,12 +2801,9 @@ void kbase_csf_firmware_enable_gpu_idle_timer(struct kbase_device *kbdev)
|
|||
|
||||
void kbase_csf_firmware_disable_gpu_idle_timer(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
||||
kbase_csf_scheduler_spin_lock_assert_held(kbdev);
|
||||
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_DISABLE,
|
||||
GLB_REQ_IDLE_DISABLE_MASK);
|
||||
set_gpu_idle_timer_glb_req(kbdev, false);
|
||||
dev_dbg(kbdev->dev, "Sending request to disable gpu idle timer");
|
||||
|
||||
kbase_csf_ring_doorbell(kbdev, CSF_KERNEL_DOORBELL_NR);
|
||||
|
|
@ -2807,6 +2827,7 @@ int kbase_csf_firmware_ping_wait(struct kbase_device *const kbdev, unsigned int
|
|||
return wait_for_global_request_with_timeout(kbdev, GLB_REQ_PING_MASK, wait_timeout_ms);
|
||||
}
|
||||
|
||||
|
||||
int kbase_csf_firmware_set_timeout(struct kbase_device *const kbdev, u64 const timeout)
|
||||
{
|
||||
const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface;
|
||||
|
|
@ -2845,8 +2866,6 @@ int kbase_csf_wait_protected_mode_enter(struct kbase_device *kbdev)
|
|||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&kbdev->mmu_hw_mutex);
|
||||
|
||||
err = wait_for_global_request(kbdev, GLB_REQ_PROTM_ENTER_MASK);
|
||||
|
||||
if (!err) {
|
||||
|
|
@ -2912,6 +2931,7 @@ void kbase_csf_firmware_enable_mcu(struct kbase_device *kbdev)
|
|||
{
|
||||
struct kbase_csf_global_iface *iface = &kbdev->csf.global_iface;
|
||||
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
/* Clear the HALT bit before triggering the boot of MCU firmware */
|
||||
kbase_csf_firmware_global_input_mask(iface, GLB_REQ, 0, GLB_REQ_HALT_MASK);
|
||||
|
||||
|
|
@ -2927,6 +2947,7 @@ void kbase_csf_firmware_trigger_mcu_sleep(struct kbase_device *kbdev)
|
|||
KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP(kbdev, kbase_backend_get_cycle_cnt(kbdev));
|
||||
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
set_gpu_idle_timer_glb_req(kbdev, false);
|
||||
set_global_request(global_iface, GLB_REQ_SLEEP_MASK);
|
||||
dev_dbg(kbdev->dev, "Sending sleep request to MCU");
|
||||
kbase_csf_ring_doorbell(kbdev, CSF_KERNEL_DOORBELL_NR);
|
||||
|
|
@ -3191,6 +3212,9 @@ void kbase_csf_firmware_mcu_shared_mapping_term(struct kbase_device *kbdev,
|
|||
}
|
||||
|
||||
if (csf_mapping->phys) {
|
||||
/* This is on module unload path, so the pages can be left uncleared before
|
||||
* returning them back to kbdev memory pool.
|
||||
*/
|
||||
kbase_mem_pool_free_pages(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW],
|
||||
csf_mapping->num_pages, csf_mapping->phys, false, false);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -618,6 +618,7 @@ void kbase_csf_firmware_trigger_mcu_sleep(struct kbase_device *kbdev);
|
|||
bool kbase_csf_firmware_is_mcu_in_sleep(struct kbase_device *kbdev);
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* kbase_csf_firmware_trigger_reload() - Trigger the reboot of MCU firmware, for
|
||||
* the cold boot case firmware image would
|
||||
|
|
|
|||
|
|
@ -367,10 +367,10 @@ int kbase_csf_firmware_cfg_fw_wa_init(struct kbase_device *kbdev)
|
|||
*/
|
||||
entry_count = of_property_count_u32_elems(kbdev->dev->of_node, "quirks-ext");
|
||||
|
||||
if (entry_count == -EINVAL)
|
||||
if (entry_count < 0)
|
||||
entry_count = of_property_count_u32_elems(kbdev->dev->of_node, "quirks_ext");
|
||||
|
||||
if (entry_count == -EINVAL || entry_count == -ENODATA)
|
||||
if (entry_count < 0)
|
||||
return 0;
|
||||
|
||||
entry_bytes = (size_t)entry_count * sizeof(u32);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -670,6 +670,23 @@ static void set_timeout_global(const struct kbase_csf_global_iface *const global
|
|||
set_global_request(global_iface, GLB_REQ_CFG_PROGRESS_TIMER_MASK);
|
||||
}
|
||||
|
||||
static inline void set_gpu_idle_timer_glb_req(struct kbase_device *const kbdev, bool set)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
||||
kbase_csf_scheduler_spin_lock_assert_held(kbdev);
|
||||
|
||||
if (set) {
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_ENABLE,
|
||||
GLB_REQ_IDLE_ENABLE_MASK);
|
||||
} else {
|
||||
kbase_csf_firmware_global_input_mask(
|
||||
global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_DISABLE, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
}
|
||||
|
||||
kbdev->csf.gpu_idle_timer_enabled = set;
|
||||
}
|
||||
|
||||
static void enable_gpu_idle_timer(struct kbase_device *const kbdev)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
|
@ -678,8 +695,11 @@ static void enable_gpu_idle_timer(struct kbase_device *const kbdev)
|
|||
|
||||
kbase_csf_firmware_global_input(global_iface, GLB_IDLE_TIMER,
|
||||
kbdev->csf.gpu_idle_dur_count);
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_ENABLE,
|
||||
GLB_REQ_IDLE_ENABLE_MASK);
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_IDLE_TIMER_CONFIG,
|
||||
kbdev->csf.gpu_idle_dur_count_no_modifier,
|
||||
GLB_IDLE_TIMER_CONFIG_NO_MODIFIER_MASK);
|
||||
|
||||
set_gpu_idle_timer_glb_req(kbdev, true);
|
||||
dev_dbg(kbdev->dev, "Enabling GPU idle timer with count-value: 0x%.8x",
|
||||
kbdev->csf.gpu_idle_dur_count);
|
||||
}
|
||||
|
|
@ -857,11 +877,11 @@ static void kbase_csf_firmware_reload_worker(struct work_struct *work)
|
|||
container_of(work, struct kbase_device, csf.firmware_reload_work);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
/* Reboot the firmware */
|
||||
kbase_csf_firmware_enable_mcu(kbdev);
|
||||
|
||||
/* Tell MCU state machine to transit to next state */
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbdev->csf.firmware_reloaded = true;
|
||||
kbase_pm_update_state(kbdev);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
|
@ -900,7 +920,7 @@ static u32 convert_dur_to_idle_count(struct kbase_device *kbdev, const u32 dur_n
|
|||
/* Get the cntfreq_el0 value, which drives the SYSTEM_TIMESTAMP */
|
||||
u64 freq = kbase_arch_timer_get_cntfrq(kbdev);
|
||||
u64 dur_val = dur_ns;
|
||||
u32 cnt_val_u32, reg_val_u32;
|
||||
u32 cnt_val_u32, reg_val_u32, timer_src;
|
||||
bool src_system_timestamp = freq > 0;
|
||||
|
||||
if (!src_system_timestamp) {
|
||||
|
|
@ -932,9 +952,9 @@ static u32 convert_dur_to_idle_count(struct kbase_device *kbdev, const u32 dur_n
|
|||
|
||||
reg_val_u32 = GLB_IDLE_TIMER_TIMEOUT_SET(0, cnt_val_u32);
|
||||
/* add the source flag */
|
||||
reg_val_u32 = GLB_IDLE_TIMER_TIMER_SOURCE_SET(
|
||||
reg_val_u32, (src_system_timestamp ? GLB_IDLE_TIMER_TIMER_SOURCE_SYSTEM_TIMESTAMP :
|
||||
GLB_IDLE_TIMER_TIMER_SOURCE_GPU_COUNTER));
|
||||
timer_src = src_system_timestamp ? GLB_IDLE_TIMER_TIMER_SOURCE_SYSTEM_TIMESTAMP :
|
||||
GLB_IDLE_TIMER_TIMER_SOURCE_GPU_COUNTER;
|
||||
reg_val_u32 = GLB_IDLE_TIMER_TIMER_SOURCE_SET(reg_val_u32, timer_src);
|
||||
|
||||
return reg_val_u32;
|
||||
}
|
||||
|
|
@ -989,29 +1009,33 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
|
|||
return kbdev->csf.gpu_idle_dur_count;
|
||||
}
|
||||
|
||||
/* The 'reg_lock' is also taken and is held till the update is not
|
||||
/* The scheduler lock is also taken and is held till the update is not
|
||||
* complete, to ensure the update of idle timer value by multiple Users
|
||||
* gets serialized.
|
||||
*/
|
||||
mutex_lock(&kbdev->csf.reg_lock);
|
||||
/* The firmware only reads the new idle timer value when the timer is
|
||||
* disabled.
|
||||
*/
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
/* Ensure that the request has taken effect */
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
|
||||
kbase_csf_scheduler_lock(kbdev);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
|
||||
kbdev->csf.gpu_idle_dur_count = hysteresis_val;
|
||||
kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
|
||||
kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
|
||||
mutex_unlock(&kbdev->csf.reg_lock);
|
||||
|
||||
if (kbdev->csf.gpu_idle_timer_enabled) {
|
||||
/* Timer is already enabled. Disable the timer as FW only reads
|
||||
* the new idle timer value when timer is re-enabled.
|
||||
*/
|
||||
kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
/* Ensure that the request has taken effect */
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
|
||||
} else {
|
||||
kbase_csf_scheduler_spin_unlock(kbdev, flags);
|
||||
}
|
||||
|
||||
kbase_csf_scheduler_unlock(kbdev);
|
||||
kbase_csf_scheduler_pm_idle(kbdev);
|
||||
kbase_reset_gpu_allow(kbdev);
|
||||
end:
|
||||
|
|
@ -1118,15 +1142,16 @@ int kbase_csf_firmware_early_init(struct kbase_device *kbdev)
|
|||
INIT_WORK(&kbdev->csf.firmware_reload_work, kbase_csf_firmware_reload_worker);
|
||||
INIT_WORK(&kbdev->csf.fw_error_work, firmware_error_worker);
|
||||
|
||||
init_rwsem(&kbdev->csf.pmode_sync_sem);
|
||||
mutex_init(&kbdev->csf.reg_lock);
|
||||
kbase_csf_pending_gpuq_kicks_init(kbdev);
|
||||
kbase_csf_pending_gpuq_kick_queues_init(kbdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kbase_csf_firmware_early_term(struct kbase_device *kbdev)
|
||||
{
|
||||
kbase_csf_pending_gpuq_kicks_term(kbdev);
|
||||
kbase_csf_pending_gpuq_kick_queues_term(kbdev);
|
||||
mutex_destroy(&kbdev->csf.reg_lock);
|
||||
}
|
||||
|
||||
|
|
@ -1278,13 +1303,9 @@ void kbase_csf_firmware_enable_gpu_idle_timer(struct kbase_device *kbdev)
|
|||
|
||||
void kbase_csf_firmware_disable_gpu_idle_timer(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
|
||||
|
||||
kbase_csf_scheduler_spin_lock_assert_held(kbdev);
|
||||
|
||||
kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, GLB_REQ_REQ_IDLE_DISABLE,
|
||||
GLB_REQ_IDLE_DISABLE_MASK);
|
||||
|
||||
set_gpu_idle_timer_glb_req(kbdev, false);
|
||||
dev_dbg(kbdev->dev, "Sending request to disable gpu idle timer");
|
||||
|
||||
kbase_csf_ring_doorbell(kbdev, CSF_KERNEL_DOORBELL_NR);
|
||||
|
|
@ -1308,6 +1329,7 @@ int kbase_csf_firmware_ping_wait(struct kbase_device *const kbdev, unsigned int
|
|||
return wait_for_global_request(kbdev, GLB_REQ_PING_MASK);
|
||||
}
|
||||
|
||||
|
||||
int kbase_csf_firmware_set_timeout(struct kbase_device *const kbdev, u64 const timeout)
|
||||
{
|
||||
const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface;
|
||||
|
|
@ -1370,6 +1392,8 @@ void kbase_csf_firmware_trigger_mcu_halt(struct kbase_device *kbdev)
|
|||
|
||||
void kbase_csf_firmware_enable_mcu(struct kbase_device *kbdev)
|
||||
{
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
|
||||
/* Trigger the boot of MCU firmware, Use the AUTO mode as
|
||||
* otherwise on fast reset, to exit protected mode, MCU will
|
||||
* not reboot by itself to enter normal mode.
|
||||
|
|
@ -1384,6 +1408,7 @@ void kbase_csf_firmware_trigger_mcu_sleep(struct kbase_device *kbdev)
|
|||
unsigned long flags;
|
||||
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags);
|
||||
set_gpu_idle_timer_glb_req(kbdev, false);
|
||||
set_global_request(global_iface, GLB_REQ_SLEEP_MASK);
|
||||
dev_dbg(kbdev->dev, "Sending sleep request to MCU");
|
||||
kbase_csf_ring_doorbell(kbdev, CSF_KERNEL_DOORBELL_NR);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -39,13 +39,7 @@
|
|||
static DEFINE_SPINLOCK(kbase_csf_fence_lock);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
|
||||
#define FENCE_WAIT_TIMEOUT_MS 3000
|
||||
#endif
|
||||
|
||||
static void kcpu_queue_process(struct kbase_kcpu_command_queue *kcpu_queue, bool drain_queue);
|
||||
|
||||
static void kcpu_queue_process_worker(struct work_struct *data);
|
||||
|
||||
static int kbase_kcpu_map_import_prepare(struct kbase_kcpu_command_queue *kcpu_queue,
|
||||
struct base_kcpu_command_import_info *import_info,
|
||||
|
|
@ -445,6 +439,16 @@ static void kbase_kcpu_jit_allocate_finish(struct kbase_kcpu_command_queue *queu
|
|||
kfree(cmd->info.jit_alloc.info);
|
||||
}
|
||||
|
||||
static void enqueue_kcpuq_work(struct kbase_kcpu_command_queue *queue)
|
||||
{
|
||||
struct kbase_context *const kctx = queue->kctx;
|
||||
|
||||
if (!atomic_read(&kctx->prioritized))
|
||||
queue_work(kctx->csf.kcpu_queues.kcpu_wq, &queue->work);
|
||||
else
|
||||
kbase_csf_scheduler_enqueue_kcpuq_work(queue);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_kcpu_jit_retry_pending_allocs() - Retry blocked JIT_ALLOC commands
|
||||
*
|
||||
|
|
@ -464,9 +468,7 @@ static void kbase_kcpu_jit_retry_pending_allocs(struct kbase_context *kctx)
|
|||
* kbase_csf_kcpu_queue_context.jit_lock .
|
||||
*/
|
||||
list_for_each_entry(blocked_queue, &kctx->csf.kcpu_queues.jit_blocked_queues, jit_blocked)
|
||||
queue_work(atomic_read(&kctx->prioritized) ? kctx->csf.kcpu_wq_high_prio :
|
||||
kctx->csf.kcpu_wq_normal_prio,
|
||||
&blocked_queue->work);
|
||||
enqueue_kcpuq_work(blocked_queue);
|
||||
}
|
||||
|
||||
static int kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue *queue,
|
||||
|
|
@ -717,11 +719,8 @@ static int kbase_csf_queue_group_suspend_process(struct kbase_context *kctx,
|
|||
static enum kbase_csf_event_callback_action event_cqs_callback(void *param)
|
||||
{
|
||||
struct kbase_kcpu_command_queue *kcpu_queue = (struct kbase_kcpu_command_queue *)param;
|
||||
struct kbase_context *kctx = kcpu_queue->kctx;
|
||||
|
||||
queue_work(atomic_read(&kctx->prioritized) ? kctx->csf.kcpu_wq_high_prio :
|
||||
kctx->csf.kcpu_wq_normal_prio,
|
||||
&kcpu_queue->work);
|
||||
enqueue_kcpuq_work(kcpu_queue);
|
||||
|
||||
return KBASE_CSF_EVENT_CALLBACK_KEEP;
|
||||
}
|
||||
|
|
@ -1322,9 +1321,7 @@ static void kbase_csf_fence_wait_callback(struct dma_fence *fence, struct dma_fe
|
|||
fence->seqno);
|
||||
|
||||
/* Resume kcpu command queue processing. */
|
||||
queue_work(atomic_read(&kctx->prioritized) ? kctx->csf.kcpu_wq_high_prio :
|
||||
kctx->csf.kcpu_wq_normal_prio,
|
||||
&kcpu_queue->work);
|
||||
enqueue_kcpuq_work(kcpu_queue);
|
||||
}
|
||||
|
||||
static void kbasep_kcpu_fence_wait_cancel(struct kbase_kcpu_command_queue *kcpu_queue,
|
||||
|
|
@ -1360,7 +1357,6 @@ static void kbasep_kcpu_fence_wait_cancel(struct kbase_kcpu_command_queue *kcpu_
|
|||
fence_info->fence = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
|
||||
/**
|
||||
* fence_timeout_callback() - Timeout callback function for fence-wait
|
||||
*
|
||||
|
|
@ -1399,9 +1395,7 @@ static void fence_timeout_callback(struct timer_list *timer)
|
|||
kbase_sync_fence_info_get(fence, &info);
|
||||
|
||||
if (info.status == 1) {
|
||||
queue_work(atomic_read(&kctx->prioritized) ? kctx->csf.kcpu_wq_high_prio :
|
||||
kctx->csf.kcpu_wq_normal_prio,
|
||||
&kcpu_queue->work);
|
||||
enqueue_kcpuq_work(kcpu_queue);
|
||||
} else if (info.status == 0) {
|
||||
dev_warn(kctx->kbdev->dev, "fence has not yet signalled in %ums",
|
||||
FENCE_WAIT_TIMEOUT_MS);
|
||||
|
|
@ -1430,7 +1424,6 @@ static void fence_wait_timeout_start(struct kbase_kcpu_command_queue *cmd)
|
|||
{
|
||||
mod_timer(&cmd->fence_timeout, jiffies + msecs_to_jiffies(FENCE_WAIT_TIMEOUT_MS));
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kbase_kcpu_fence_wait_process() - Process the kcpu fence wait command
|
||||
|
|
@ -1469,9 +1462,8 @@ static int kbase_kcpu_fence_wait_process(struct kbase_kcpu_command_queue *kcpu_q
|
|||
fence_status = cb_err;
|
||||
if (cb_err == 0) {
|
||||
kcpu_queue->fence_wait_processed = true;
|
||||
#ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
|
||||
fence_wait_timeout_start(kcpu_queue);
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_MALI_BIFROST_FENCE_DEBUG))
|
||||
fence_wait_timeout_start(kcpu_queue);
|
||||
} else if (cb_err == -ENOENT) {
|
||||
fence_status = dma_fence_get_status(fence);
|
||||
if (!fence_status) {
|
||||
|
|
@ -1692,9 +1684,7 @@ static void fence_signal_timeout_cb(struct timer_list *timer)
|
|||
if (atomic_read(&kcpu_queue->fence_signal_pending_cnt) > 1)
|
||||
fence_signal_timeout_start(kcpu_queue);
|
||||
|
||||
queue_work(atomic_read(&kctx->prioritized) ? kctx->csf.kcpu_wq_high_prio :
|
||||
kctx->csf.kcpu_wq_normal_prio,
|
||||
&kcpu_queue->timeout_work);
|
||||
queue_work(kctx->csf.kcpu_queues.kcpu_wq, &kcpu_queue->timeout_work);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1973,7 +1963,7 @@ static void kcpu_queue_process_worker(struct work_struct *data)
|
|||
container_of(data, struct kbase_kcpu_command_queue, work);
|
||||
|
||||
mutex_lock(&queue->lock);
|
||||
kcpu_queue_process(queue, false);
|
||||
kbase_csf_kcpu_queue_process(queue, false);
|
||||
mutex_unlock(&queue->lock);
|
||||
}
|
||||
|
||||
|
|
@ -2006,7 +1996,7 @@ static int delete_queue(struct kbase_context *kctx, u32 id)
|
|||
/* Drain the remaining work for this queue first and go past
|
||||
* all the waits.
|
||||
*/
|
||||
kcpu_queue_process(queue, true);
|
||||
kbase_csf_kcpu_queue_process(queue, true);
|
||||
|
||||
/* All commands should have been processed */
|
||||
WARN_ON(queue->num_pending_cmds);
|
||||
|
|
@ -2022,11 +2012,20 @@ static int delete_queue(struct kbase_context *kctx, u32 id)
|
|||
mutex_unlock(&queue->lock);
|
||||
|
||||
cancel_work_sync(&queue->timeout_work);
|
||||
|
||||
/*
|
||||
* Drain a pending request to process this queue in
|
||||
* kbase_csf_scheduler_kthread() if any. By this point the
|
||||
* queue would be empty so this would be a no-op.
|
||||
*/
|
||||
kbase_csf_scheduler_wait_for_kthread_pending_work(kctx->kbdev,
|
||||
&queue->pending_kick);
|
||||
|
||||
cancel_work_sync(&queue->work);
|
||||
|
||||
mutex_destroy(&queue->lock);
|
||||
|
||||
kfree(queue);
|
||||
vfree(queue);
|
||||
} else {
|
||||
dev_dbg(kctx->kbdev->dev, "Attempt to delete a non-existent KCPU queue");
|
||||
mutex_unlock(&kctx->csf.kcpu_queues.lock);
|
||||
|
|
@ -2079,7 +2078,7 @@ KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_END(struct kbase_device *kbde
|
|||
KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END(kbdev, queue);
|
||||
}
|
||||
|
||||
static void kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drain_queue)
|
||||
void kbase_csf_kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drain_queue)
|
||||
{
|
||||
struct kbase_device *kbdev = queue->kctx->kbdev;
|
||||
bool process_next = true;
|
||||
|
|
@ -2199,10 +2198,10 @@ static void kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drai
|
|||
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START(kbdev,
|
||||
queue);
|
||||
|
||||
kbase_gpu_vm_lock(queue->kctx);
|
||||
kbase_gpu_vm_lock_with_pmode_sync(queue->kctx);
|
||||
meta = kbase_sticky_resource_acquire(queue->kctx,
|
||||
cmd->info.import.gpu_va);
|
||||
kbase_gpu_vm_unlock(queue->kctx);
|
||||
cmd->info.import.gpu_va, NULL);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(queue->kctx);
|
||||
|
||||
if (meta == NULL) {
|
||||
queue->has_error = true;
|
||||
|
|
@ -2219,10 +2218,10 @@ static void kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drai
|
|||
|
||||
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START(kbdev, queue);
|
||||
|
||||
kbase_gpu_vm_lock(queue->kctx);
|
||||
kbase_gpu_vm_lock_with_pmode_sync(queue->kctx);
|
||||
ret = kbase_sticky_resource_release(queue->kctx, NULL,
|
||||
cmd->info.import.gpu_va);
|
||||
kbase_gpu_vm_unlock(queue->kctx);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(queue->kctx);
|
||||
|
||||
if (!ret) {
|
||||
queue->has_error = true;
|
||||
|
|
@ -2240,10 +2239,10 @@ static void kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drai
|
|||
KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START(kbdev,
|
||||
queue);
|
||||
|
||||
kbase_gpu_vm_lock(queue->kctx);
|
||||
kbase_gpu_vm_lock_with_pmode_sync(queue->kctx);
|
||||
ret = kbase_sticky_resource_release_force(queue->kctx, NULL,
|
||||
cmd->info.import.gpu_va);
|
||||
kbase_gpu_vm_unlock(queue->kctx);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(queue->kctx);
|
||||
|
||||
if (!ret) {
|
||||
queue->has_error = true;
|
||||
|
|
@ -2642,7 +2641,7 @@ int kbase_csf_kcpu_queue_enqueue(struct kbase_context *kctx,
|
|||
}
|
||||
|
||||
queue->num_pending_cmds += enq->nr_commands;
|
||||
kcpu_queue_process(queue, false);
|
||||
kbase_csf_kcpu_queue_process(queue, false);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
@ -2653,23 +2652,14 @@ out:
|
|||
|
||||
int kbase_csf_kcpu_queue_context_init(struct kbase_context *kctx)
|
||||
{
|
||||
kctx->csf.kcpu_wq_high_prio = alloc_workqueue("mali_kcpu_wq_%i_high_prio",
|
||||
WQ_UNBOUND | WQ_HIGHPRI, 0, kctx->tgid);
|
||||
if (kctx->csf.kcpu_wq_high_prio == NULL) {
|
||||
kctx->csf.kcpu_queues.kcpu_wq =
|
||||
alloc_workqueue("mali_kcpu_wq_%i_%i", 0, 0, kctx->tgid, kctx->id);
|
||||
if (kctx->csf.kcpu_queues.kcpu_wq == NULL) {
|
||||
dev_err(kctx->kbdev->dev,
|
||||
"Failed to initialize KCPU queue high-priority workqueue");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kctx->csf.kcpu_wq_normal_prio =
|
||||
alloc_workqueue("mali_kcpu_wq_%i_normal_prio", 0, 0, kctx->tgid);
|
||||
if (kctx->csf.kcpu_wq_normal_prio == NULL) {
|
||||
dev_err(kctx->kbdev->dev,
|
||||
"Failed to initialize KCPU queue normal-priority workqueue");
|
||||
destroy_workqueue(kctx->csf.kcpu_wq_high_prio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_init(&kctx->csf.kcpu_queues.lock);
|
||||
|
||||
return 0;
|
||||
|
|
@ -2688,8 +2678,7 @@ void kbase_csf_kcpu_queue_context_term(struct kbase_context *kctx)
|
|||
|
||||
mutex_destroy(&kctx->csf.kcpu_queues.lock);
|
||||
|
||||
destroy_workqueue(kctx->csf.kcpu_wq_normal_prio);
|
||||
destroy_workqueue(kctx->csf.kcpu_wq_high_prio);
|
||||
destroy_workqueue(kctx->csf.kcpu_queues.kcpu_wq);
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_csf_kcpu_queue_context_term);
|
||||
|
||||
|
|
@ -2699,15 +2688,42 @@ int kbase_csf_kcpu_queue_delete(struct kbase_context *kctx,
|
|||
return delete_queue(kctx, (u32)del->id);
|
||||
}
|
||||
|
||||
static struct kbase_kcpu_dma_fence_meta *
|
||||
kbase_csf_kcpu_queue_metadata_new(struct kbase_context *kctx, u64 fence_context)
|
||||
{
|
||||
int n;
|
||||
struct kbase_kcpu_dma_fence_meta *metadata = kzalloc(sizeof(*metadata), GFP_KERNEL);
|
||||
|
||||
if (!metadata)
|
||||
goto early_ret;
|
||||
|
||||
*metadata = (struct kbase_kcpu_dma_fence_meta){
|
||||
.kbdev = kctx->kbdev,
|
||||
.kctx_id = kctx->id,
|
||||
};
|
||||
|
||||
/* Please update MAX_TIMELINE_NAME macro when making changes to the string. */
|
||||
n = scnprintf(metadata->timeline_name, MAX_TIMELINE_NAME, "%u-%d_%u-%llu-kcpu",
|
||||
kctx->kbdev->id, kctx->tgid, kctx->id, fence_context);
|
||||
if (WARN_ON(n >= MAX_TIMELINE_NAME)) {
|
||||
kfree(metadata);
|
||||
metadata = NULL;
|
||||
goto early_ret;
|
||||
}
|
||||
|
||||
kbase_refcount_set(&metadata->refcount, 1);
|
||||
|
||||
early_ret:
|
||||
return metadata;
|
||||
}
|
||||
KBASE_ALLOW_ERROR_INJECTION_TEST_API(kbase_csf_kcpu_queue_metadata_new, ERRNO_NULL);
|
||||
|
||||
int kbase_csf_kcpu_queue_new(struct kbase_context *kctx, struct kbase_ioctl_kcpu_queue_new *newq)
|
||||
{
|
||||
struct kbase_kcpu_command_queue *queue;
|
||||
int idx;
|
||||
int n;
|
||||
int ret = 0;
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
struct kbase_kcpu_dma_fence_meta *metadata;
|
||||
#endif
|
||||
int idx;
|
||||
int ret = 0;
|
||||
/* The queue id is of u8 type and we use the index of the kcpu_queues
|
||||
* array as an id, so the number of elements in the array can't be
|
||||
* more than 256.
|
||||
|
|
@ -2727,54 +2743,48 @@ int kbase_csf_kcpu_queue_new(struct kbase_context *kctx, struct kbase_ioctl_kcpu
|
|||
goto out;
|
||||
}
|
||||
|
||||
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
||||
|
||||
queue = vzalloc(sizeof(*queue));
|
||||
if (!queue) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*queue = (struct kbase_kcpu_command_queue)
|
||||
{
|
||||
.kctx = kctx, .start_offset = 0, .num_pending_cmds = 0, .enqueue_failed = false,
|
||||
.command_started = false, .has_error = false, .id = idx,
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
.fence_context = dma_fence_context_alloc(1), .fence_seqno = 0,
|
||||
.fence_wait_processed = false,
|
||||
#endif /* IS_ENABLED(CONFIG_SYNC_FILE) */
|
||||
};
|
||||
|
||||
mutex_init(&queue->lock);
|
||||
INIT_WORK(&queue->work, kcpu_queue_process_worker);
|
||||
INIT_LIST_HEAD(&queue->high_prio_work);
|
||||
atomic_set(&queue->pending_kick, 0);
|
||||
INIT_WORK(&queue->timeout_work, kcpu_queue_timeout_worker);
|
||||
INIT_LIST_HEAD(&queue->jit_blocked);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SYNC_FILE)) {
|
||||
metadata = kbase_csf_kcpu_queue_metadata_new(kctx, queue->fence_context);
|
||||
if (!metadata) {
|
||||
vfree(queue);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
queue->metadata = metadata;
|
||||
atomic_inc(&kctx->kbdev->live_fence_metadata);
|
||||
atomic_set(&queue->fence_signal_pending_cnt, 0);
|
||||
kbase_timer_setup(&queue->fence_signal_timeout, fence_signal_timeout_cb);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_MALI_BIFROST_FENCE_DEBUG))
|
||||
kbase_timer_setup(&queue->fence_timeout, fence_timeout_callback);
|
||||
|
||||
bitmap_set(kctx->csf.kcpu_queues.in_use, (unsigned int)idx, 1);
|
||||
kctx->csf.kcpu_queues.array[idx] = queue;
|
||||
mutex_init(&queue->lock);
|
||||
queue->kctx = kctx;
|
||||
queue->start_offset = 0;
|
||||
queue->num_pending_cmds = 0;
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
queue->fence_context = dma_fence_context_alloc(1);
|
||||
queue->fence_seqno = 0;
|
||||
queue->fence_wait_processed = false;
|
||||
|
||||
metadata = kzalloc(sizeof(*metadata), GFP_KERNEL);
|
||||
if (!metadata) {
|
||||
kfree(queue);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
metadata->kbdev = kctx->kbdev;
|
||||
metadata->kctx_id = kctx->id;
|
||||
n = snprintf(metadata->timeline_name, MAX_TIMELINE_NAME, "%u-%d_%u-%llu-kcpu",
|
||||
kctx->kbdev->id, kctx->tgid, kctx->id, queue->fence_context);
|
||||
if (WARN_ON(n >= MAX_TIMELINE_NAME)) {
|
||||
kfree(queue);
|
||||
kfree(metadata);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kbase_refcount_set(&metadata->refcount, 1);
|
||||
queue->metadata = metadata;
|
||||
atomic_inc(&kctx->kbdev->live_fence_metadata);
|
||||
#endif /* CONFIG_SYNC_FILE */
|
||||
queue->enqueue_failed = false;
|
||||
queue->command_started = false;
|
||||
INIT_LIST_HEAD(&queue->jit_blocked);
|
||||
queue->has_error = false;
|
||||
INIT_WORK(&queue->work, kcpu_queue_process_worker);
|
||||
INIT_WORK(&queue->timeout_work, kcpu_queue_timeout_worker);
|
||||
queue->id = idx;
|
||||
|
||||
newq->id = idx;
|
||||
|
||||
/* Fire the tracepoint with the mutex held to enforce correct ordering
|
||||
|
|
@ -2784,14 +2794,6 @@ int kbase_csf_kcpu_queue_new(struct kbase_context *kctx, struct kbase_ioctl_kcpu
|
|||
queue->num_pending_cmds);
|
||||
|
||||
KBASE_KTRACE_ADD_CSF_KCPU(kctx->kbdev, KCPU_QUEUE_CREATE, queue, queue->fence_context, 0);
|
||||
#ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
|
||||
kbase_timer_setup(&queue->fence_timeout, fence_timeout_callback);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
atomic_set(&queue->fence_signal_pending_cnt, 0);
|
||||
kbase_timer_setup(&queue->fence_signal_timeout, fence_signal_timeout_cb);
|
||||
#endif
|
||||
out:
|
||||
mutex_unlock(&kctx->csf.kcpu_queues.lock);
|
||||
|
||||
|
|
|
|||
|
|
@ -243,7 +243,19 @@ struct kbase_kcpu_command {
|
|||
* @work: struct work_struct which contains a pointer to
|
||||
* the function which handles processing of kcpu
|
||||
* commands enqueued into a kcpu command queue;
|
||||
* part of kernel API for processing workqueues
|
||||
* part of kernel API for processing workqueues.
|
||||
* This would be used if the context is not
|
||||
* prioritised, otherwise it would be handled by
|
||||
* kbase_csf_scheduler_kthread().
|
||||
* @high_prio_work: A counterpart to @work, this queue would be
|
||||
* added to a list to be processed by
|
||||
* kbase_csf_scheduler_kthread() if it is
|
||||
* prioritised.
|
||||
* @pending_kick: Indicates that kbase_csf_scheduler_kthread()
|
||||
* should re-evaluate pending commands for this
|
||||
* queue. This would be set to false when the work
|
||||
* is done. This is used mainly for
|
||||
* synchronisation with queue termination.
|
||||
* @timeout_work: struct work_struct which contains a pointer to the
|
||||
* function which handles post-timeout actions
|
||||
* queue when a fence signal timeout occurs.
|
||||
|
|
@ -287,6 +299,8 @@ struct kbase_kcpu_command_queue {
|
|||
struct kbase_context *kctx;
|
||||
struct kbase_kcpu_command commands[KBASEP_KCPU_QUEUE_SIZE];
|
||||
struct work_struct work;
|
||||
struct list_head high_prio_work;
|
||||
atomic_t pending_kick;
|
||||
struct work_struct timeout_work;
|
||||
u8 start_offset;
|
||||
u8 id;
|
||||
|
|
@ -299,9 +313,7 @@ struct kbase_kcpu_command_queue {
|
|||
bool command_started;
|
||||
struct list_head jit_blocked;
|
||||
bool has_error;
|
||||
#ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
|
||||
struct timer_list fence_timeout;
|
||||
#endif /* CONFIG_MALI_BIFROST_FENCE_DEBUG */
|
||||
#if IS_ENABLED(CONFIG_SYNC_FILE)
|
||||
struct kbase_kcpu_dma_fence_meta *metadata;
|
||||
#endif /* CONFIG_SYNC_FILE */
|
||||
|
|
@ -334,6 +346,18 @@ int kbase_csf_kcpu_queue_new(struct kbase_context *kctx, struct kbase_ioctl_kcpu
|
|||
int kbase_csf_kcpu_queue_delete(struct kbase_context *kctx,
|
||||
struct kbase_ioctl_kcpu_queue_delete *del);
|
||||
|
||||
/**
|
||||
* kbase_csf_kcpu_queue_process - Proces pending KCPU queue commands
|
||||
*
|
||||
* @queue: The queue to process pending commands for
|
||||
* @drain_queue: Whether to skip all blocking commands in the queue.
|
||||
* This is expected to be set to true on queue
|
||||
* termination.
|
||||
*
|
||||
* Return: 0 if successful or a negative error code on failure.
|
||||
*/
|
||||
void kbase_csf_kcpu_queue_process(struct kbase_kcpu_command_queue *queue, bool drain_queue);
|
||||
|
||||
/**
|
||||
* kbase_csf_kcpu_queue_enqueue - Enqueue a KCPU command into a KCPU command
|
||||
* queue.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -250,7 +250,7 @@
|
|||
|
||||
#define GLB_ACK 0x0000 /* () Global acknowledge */
|
||||
#define GLB_DB_ACK 0x0008 /* () Global doorbell acknowledge */
|
||||
#define GLB_HALT_STATUS 0x0010 /* () Global halt status */
|
||||
#define GLB_FATAL_STATUS 0x0010 /* () Global fatal error status */
|
||||
#define GLB_PRFCNT_STATUS 0x0014 /* () Performance counter status */
|
||||
#define GLB_PRFCNT_INSERT 0x0018 /* () Performance counter buffer insert index */
|
||||
#define GLB_DEBUG_FWUTF_RESULT GLB_DEBUG_ARG_OUT0 /* () Firmware debug test result */
|
||||
|
|
@ -1422,6 +1422,12 @@
|
|||
#define GLB_REQ_PRFCNT_OVERFLOW_SET(reg_val, value) \
|
||||
(((reg_val) & ~GLB_REQ_PRFCNT_OVERFLOW_MASK) | \
|
||||
(((value) << GLB_REQ_PRFCNT_OVERFLOW_SHIFT) & GLB_REQ_PRFCNT_OVERFLOW_MASK))
|
||||
#define GLB_ACK_FATAL_SHIFT GPU_U(27)
|
||||
#define GLB_ACK_FATAL_MASK (GPU_U(0x1) << GLB_ACK_FATAL_SHIFT)
|
||||
#define GLB_ACK_FATAL_GET(reg_val) (((reg_val)&GLB_ACK_FATAL_MASK) >> GLB_ACK_FATAL_SHIFT)
|
||||
#define GLB_ACK_FATAL_SET(reg_val, value) \
|
||||
(~(~(reg_val) | GLB_ACK_FATAL_MASK) | \
|
||||
(((value) << GLB_ACK_FATAL_SHIFT) & GLB_ACK_FATAL_MASK))
|
||||
#define GLB_REQ_DEBUG_CSF_REQ_SHIFT 30
|
||||
#define GLB_REQ_DEBUG_CSF_REQ_MASK (0x1 << GLB_REQ_DEBUG_CSF_REQ_SHIFT)
|
||||
#define GLB_REQ_DEBUG_CSF_REQ_GET(reg_val) \
|
||||
|
|
@ -1822,6 +1828,20 @@
|
|||
(((reg_val) & ~GLB_DEBUG_REQ_RUN_MODE_MASK) | \
|
||||
(((value) << GLB_DEBUG_REQ_RUN_MODE_SHIFT) & GLB_DEBUG_REQ_RUN_MODE_MASK))
|
||||
|
||||
/* GLB_FATAL_STATUS register */
|
||||
#define GLB_FATAL_STATUS_VALUE_SHIFT GPU_U(0)
|
||||
#define GLB_FATAL_STATUS_VALUE_MASK (GPU_U(0xFFFFFFFF) << GLB_FATAL_STATUS_VALUE_SHIFT)
|
||||
#define GLB_FATAL_STATUS_VALUE_GET(reg_val) \
|
||||
(((reg_val)&GLB_FATAL_STATUS_VALUE_MASK) >> GLB_FATAL_STATUS_VALUE_SHIFT)
|
||||
|
||||
enum glb_fatal_status {
|
||||
GLB_FATAL_STATUS_VALUE_OK,
|
||||
GLB_FATAL_STATUS_VALUE_ASSERT,
|
||||
GLB_FATAL_STATUS_VALUE_UNEXPECTED_EXCEPTION,
|
||||
GLB_FATAL_STATUS_VALUE_HANG,
|
||||
GLB_FATAL_STATUS_VALUE_COUNT
|
||||
};
|
||||
|
||||
/* GLB_DEBUG_ACK register */
|
||||
#define GLB_DEBUG_ACK_DEBUG_RUN_SHIFT GPU_U(23)
|
||||
#define GLB_DEBUG_ACK_DEBUG_RUN_MASK (GPU_U(0x1) << GLB_DEBUG_ACK_DEBUG_RUN_SHIFT)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -224,8 +224,11 @@ static void kbase_csf_reset_end_hw_access(struct kbase_device *kbdev, int err_du
|
|||
|
||||
static void kbase_csf_debug_dump_registers(struct kbase_device *kbdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
kbase_io_history_dump(kbdev);
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
dev_err(kbdev->dev, "Register state:");
|
||||
dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x MCU_STATUS=0x%08x",
|
||||
kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_IRQ_RAWSTAT)),
|
||||
|
|
@ -251,6 +254,7 @@ static void kbase_csf_debug_dump_registers(struct kbase_device *kbdev)
|
|||
kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(TILER_CONFIG)));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -396,6 +400,7 @@ static int kbase_csf_reset_gpu_now(struct kbase_device *kbdev, bool firmware_ini
|
|||
*/
|
||||
if (likely(firmware_inited))
|
||||
kbase_csf_scheduler_reset(kbdev);
|
||||
|
||||
cancel_work_sync(&kbdev->csf.firmware_reload_work);
|
||||
|
||||
dev_dbg(kbdev->dev, "Disable GPU hardware counters.\n");
|
||||
|
|
@ -403,6 +408,7 @@ static int kbase_csf_reset_gpu_now(struct kbase_device *kbdev, bool firmware_ini
|
|||
kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
|
||||
|
||||
ret = kbase_csf_reset_gpu_once(kbdev, firmware_inited, silent);
|
||||
|
||||
if (ret == SOFT_RESET_FAILED) {
|
||||
dev_err(kbdev->dev, "Soft-reset failed");
|
||||
goto err;
|
||||
|
|
@ -490,6 +496,13 @@ static void kbase_csf_reset_gpu_worker(struct work_struct *data)
|
|||
|
||||
bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev, unsigned int flags)
|
||||
{
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (kbase_pm_is_gpu_lost(kbdev)) {
|
||||
/* GPU access has been removed, reset will be done by Arbiter instead */
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (flags & RESET_FLAGS_HWC_UNRECOVERABLE_ERROR)
|
||||
kbase_hwcnt_backend_csf_on_unrecoverable_error(&kbdev->hwcnt_gpu_iface);
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -235,7 +235,8 @@ void kbase_csf_scheduler_early_term(struct kbase_device *kbdev);
|
|||
* No explicit re-initialization is done for CSG & CS interface I/O pages;
|
||||
* instead, that happens implicitly on firmware reload.
|
||||
*
|
||||
* Should be called only after initiating the GPU reset.
|
||||
* Should be called either after initiating the GPU reset or when MCU reset is
|
||||
* expected to follow such as GPU_LOST case.
|
||||
*/
|
||||
void kbase_csf_scheduler_reset(struct kbase_device *kbdev);
|
||||
|
||||
|
|
@ -487,6 +488,48 @@ static inline bool kbase_csf_scheduler_all_csgs_idle(struct kbase_device *kbdev)
|
|||
kbdev->csf.global_iface.group_num);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_enqueue_sync_update_work() - Add a context to the list
|
||||
* of contexts to handle
|
||||
* SYNC_UPDATE events.
|
||||
*
|
||||
* @kctx: The context to handle SYNC_UPDATE event
|
||||
*
|
||||
* This function wakes up kbase_csf_scheduler_kthread() to handle pending
|
||||
* SYNC_UPDATE events for all contexts.
|
||||
*/
|
||||
void kbase_csf_scheduler_enqueue_sync_update_work(struct kbase_context *kctx);
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_enqueue_protm_event_work() - Add a group to the list
|
||||
* of groups to handle
|
||||
* PROTM requests.
|
||||
*
|
||||
* @group: The group to handle protected mode request
|
||||
*
|
||||
* This function wakes up kbase_csf_scheduler_kthread() to handle pending
|
||||
* protected mode requests for all groups.
|
||||
*/
|
||||
void kbase_csf_scheduler_enqueue_protm_event_work(struct kbase_queue_group *group);
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_enqueue_kcpuq_work() - Wake up kbase_csf_scheduler_kthread() to process
|
||||
* pending commands for a KCPU queue.
|
||||
*
|
||||
* @queue: The queue to process pending commands for
|
||||
*/
|
||||
void kbase_csf_scheduler_enqueue_kcpuq_work(struct kbase_kcpu_command_queue *queue);
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_wait_for_kthread_pending_work - Wait until a pending work has completed in
|
||||
* kbase_csf_scheduler_kthread().
|
||||
*
|
||||
* @kbdev: Instance of a GPU platform device that implements a CSF interface
|
||||
* @pending: The work to wait for
|
||||
*/
|
||||
void kbase_csf_scheduler_wait_for_kthread_pending_work(struct kbase_device *kbdev,
|
||||
atomic_t *pending);
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick
|
||||
*
|
||||
|
|
@ -591,11 +634,8 @@ int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev);
|
|||
* @kbdev: Pointer to the device
|
||||
*
|
||||
* This function is called when a GPU idle IRQ has been raised.
|
||||
*
|
||||
* Return: true if the PM state machine needs to be invoked after the processing
|
||||
* of GPU idle irq, otherwise false.
|
||||
*/
|
||||
bool kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev);
|
||||
void kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -117,13 +117,13 @@ static void kbasep_csf_sync_print_kcpu_fence_wait_or_signal(char *buffer, int *l
|
|||
timeline_name = fence->ops->get_timeline_name(fence);
|
||||
is_signaled = info.status > 0;
|
||||
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:%s obj:0x%pK live_value:0x%.8x | ", cmd_name, fence, is_signaled);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:%s obj:0x%pK live_value:0x%.8x | ", cmd_name, fence, is_signaled);
|
||||
|
||||
/* Note: fence->seqno was u32 until 5.1 kernel, then u64 */
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"timeline_name:%s timeline_context:0x%.16llx fence_seqno:0x%.16llx",
|
||||
timeline_name, fence->context, (u64)fence->seqno);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"timeline_name:%s timeline_context:0x%.16llx fence_seqno:0x%.16llx",
|
||||
timeline_name, fence->context, (u64)fence->seqno);
|
||||
|
||||
kbase_fence_put(fence);
|
||||
}
|
||||
|
|
@ -149,19 +149,19 @@ static void kbasep_csf_sync_print_kcpu_cqs_wait(struct kbase_context *kctx, char
|
|||
int ret = kbasep_csf_sync_get_cqs_live_u32(kctx, cqs_obj->addr, &live_val);
|
||||
bool live_val_valid = (ret >= 0);
|
||||
|
||||
*length +=
|
||||
snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_WAIT_OPERATION obj:0x%.16llx live_value:", cqs_obj->addr);
|
||||
*length += scnprintf(
|
||||
buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_WAIT_OPERATION obj:0x%.16llx live_value:", cqs_obj->addr);
|
||||
|
||||
if (live_val_valid)
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", (u64)live_val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", (u64)live_val);
|
||||
else
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:gt arg_value:0x%.8x", cqs_obj->val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:gt arg_value:0x%.8x", cqs_obj->val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -187,18 +187,18 @@ static void kbasep_csf_sync_print_kcpu_cqs_set(struct kbase_context *kctx, char
|
|||
bool live_val_valid = (ret >= 0);
|
||||
|
||||
*length +=
|
||||
snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_SET_OPERATION obj:0x%.16llx live_value:", cqs_obj->addr);
|
||||
scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_SET_OPERATION obj:0x%.16llx live_value:", cqs_obj->addr);
|
||||
|
||||
if (live_val_valid)
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", (u64)live_val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", (u64)live_val);
|
||||
else
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:add arg_value:0x%.8x", 1);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:add arg_value:0x%.8x", 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -277,19 +277,19 @@ static void kbasep_csf_sync_print_kcpu_cqs_wait_op(struct kbase_context *kctx, c
|
|||
|
||||
bool live_val_valid = (ret >= 0);
|
||||
|
||||
*length +=
|
||||
snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_WAIT_OPERATION obj:0x%.16llx live_value:", wait_op->addr);
|
||||
*length += scnprintf(
|
||||
buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_WAIT_OPERATION obj:0x%.16llx live_value:", wait_op->addr);
|
||||
|
||||
if (live_val_valid)
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", live_val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", live_val);
|
||||
else
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:%s arg_value:0x%.16llx", op_name, wait_op->val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:%s arg_value:0x%.16llx", op_name, wait_op->val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -319,18 +319,18 @@ static void kbasep_csf_sync_print_kcpu_cqs_set_op(struct kbase_context *kctx, ch
|
|||
bool live_val_valid = (ret >= 0);
|
||||
|
||||
*length +=
|
||||
snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_SET_OPERATION obj:0x%.16llx live_value:", set_op->addr);
|
||||
scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"cmd:CQS_SET_OPERATION obj:0x%.16llx live_value:", set_op->addr);
|
||||
|
||||
if (live_val_valid)
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", live_val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
"0x%.16llx", live_val);
|
||||
else
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
CQS_UNREADABLE_LIVE_VALUE);
|
||||
|
||||
*length += snprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:%s arg_value:0x%.16llx", op_name, set_op->val);
|
||||
*length += scnprintf(buffer + *length, CSF_SYNC_DUMP_SIZE - *length,
|
||||
" | op:%s arg_value:0x%.16llx", op_name, set_op->val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -360,8 +360,8 @@ static void kbasep_csf_sync_kcpu_print_queue(struct kbase_context *kctx,
|
|||
int length = 0;
|
||||
|
||||
started_or_pending = ((i == 0) && queue->command_started) ? 'S' : 'P';
|
||||
length += snprintf(buffer, CSF_SYNC_DUMP_SIZE, "queue:KCPU-%d-%d exec:%c ",
|
||||
kctx->id, queue->id, started_or_pending);
|
||||
length += scnprintf(buffer, CSF_SYNC_DUMP_SIZE, "queue:KCPU-%d-%d exec:%c ",
|
||||
kctx->id, queue->id, started_or_pending);
|
||||
|
||||
cmd = &queue->commands[(u8)(queue->start_offset + i)];
|
||||
switch (cmd->type) {
|
||||
|
|
@ -388,12 +388,12 @@ static void kbasep_csf_sync_kcpu_print_queue(struct kbase_context *kctx,
|
|||
kbasep_csf_sync_print_kcpu_cqs_set_op(kctx, buffer, &length, cmd);
|
||||
break;
|
||||
default:
|
||||
length += snprintf(buffer + length, CSF_SYNC_DUMP_SIZE - length,
|
||||
", U, Unknown blocking command");
|
||||
length += scnprintf(buffer + length, CSF_SYNC_DUMP_SIZE - length,
|
||||
", U, Unknown blocking command");
|
||||
break;
|
||||
}
|
||||
|
||||
length += snprintf(buffer + length, CSF_SYNC_DUMP_SIZE - length, "\n");
|
||||
length += scnprintf(buffer + length, CSF_SYNC_DUMP_SIZE - length, "\n");
|
||||
kbasep_print(kbpr, buffer);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -218,7 +218,7 @@ static void remove_unlinked_chunk(struct kbase_context *kctx,
|
|||
if (WARN_ON(!list_empty(&chunk->link)))
|
||||
return;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
kbase_gpu_vm_lock_with_pmode_sync(kctx);
|
||||
kbase_vunmap(kctx, &chunk->map);
|
||||
/* KBASE_REG_DONT_NEED regions will be confused with ephemeral regions (inc freed JIT
|
||||
* regions), and so we must clear that flag too before freeing.
|
||||
|
|
@ -231,7 +231,7 @@ static void remove_unlinked_chunk(struct kbase_context *kctx,
|
|||
chunk->region->flags &= ~KBASE_REG_DONT_NEED;
|
||||
#endif
|
||||
kbase_mem_free_region(kctx, chunk->region);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(kctx);
|
||||
|
||||
kfree(chunk);
|
||||
}
|
||||
|
|
@ -1058,6 +1058,7 @@ static bool delete_chunk_physical_pages(struct kbase_csf_tiler_heap *heap, u64 c
|
|||
struct kbase_csf_tiler_heap_chunk *chunk = NULL;
|
||||
|
||||
lockdep_assert_held(&heap->kctx->csf.tiler_heaps.lock);
|
||||
lockdep_assert_held(&kctx->kbdev->csf.scheduler.lock);
|
||||
|
||||
chunk = find_chunk(heap, chunk_gpu_va);
|
||||
if (unlikely(!chunk)) {
|
||||
|
|
|
|||
|
|
@ -331,8 +331,8 @@ static unsigned long kbase_csf_tiler_heap_reclaim_scan_free_pages(struct kbase_d
|
|||
static unsigned long kbase_csf_tiler_heap_reclaim_count_objects(struct shrinker *s,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct kbase_device *kbdev =
|
||||
container_of(s, struct kbase_device, csf.scheduler.reclaim_mgr.heap_reclaim);
|
||||
struct kbase_device *kbdev = KBASE_GET_KBASE_DATA_FROM_SHRINKER(
|
||||
s, struct kbase_device, csf.scheduler.reclaim_mgr.heap_reclaim);
|
||||
|
||||
return kbase_csf_tiler_heap_reclaim_count_free_pages(kbdev, sc);
|
||||
}
|
||||
|
|
@ -340,8 +340,8 @@ static unsigned long kbase_csf_tiler_heap_reclaim_count_objects(struct shrinker
|
|||
static unsigned long kbase_csf_tiler_heap_reclaim_scan_objects(struct shrinker *s,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct kbase_device *kbdev =
|
||||
container_of(s, struct kbase_device, csf.scheduler.reclaim_mgr.heap_reclaim);
|
||||
struct kbase_device *kbdev = KBASE_GET_KBASE_DATA_FROM_SHRINKER(
|
||||
s, struct kbase_device, csf.scheduler.reclaim_mgr.heap_reclaim);
|
||||
|
||||
return kbase_csf_tiler_heap_reclaim_scan_free_pages(kbdev, sc);
|
||||
}
|
||||
|
|
@ -352,11 +352,17 @@ void kbase_csf_tiler_heap_reclaim_ctx_init(struct kbase_context *kctx)
|
|||
INIT_LIST_HEAD(&kctx->csf.sched.heap_info.mgr_link);
|
||||
}
|
||||
|
||||
void kbase_csf_tiler_heap_reclaim_mgr_init(struct kbase_device *kbdev)
|
||||
int kbase_csf_tiler_heap_reclaim_mgr_init(struct kbase_device *kbdev)
|
||||
{
|
||||
struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
|
||||
struct shrinker *reclaim = &scheduler->reclaim_mgr.heap_reclaim;
|
||||
u8 prio;
|
||||
struct shrinker *reclaim;
|
||||
|
||||
reclaim =
|
||||
KBASE_INIT_RECLAIM(&(scheduler->reclaim_mgr), heap_reclaim, "mali-csf-tiler-heap");
|
||||
if (!reclaim)
|
||||
return -ENOMEM;
|
||||
KBASE_SET_RECLAIM(&(scheduler->reclaim_mgr), heap_reclaim, reclaim);
|
||||
|
||||
for (prio = KBASE_QUEUE_GROUP_PRIORITY_REALTIME; prio < KBASE_QUEUE_GROUP_PRIORITY_COUNT;
|
||||
prio++)
|
||||
|
|
@ -366,6 +372,11 @@ void kbase_csf_tiler_heap_reclaim_mgr_init(struct kbase_device *kbdev)
|
|||
reclaim->scan_objects = kbase_csf_tiler_heap_reclaim_scan_objects;
|
||||
reclaim->seeks = HEAP_SHRINKER_SEEKS;
|
||||
reclaim->batch = HEAP_SHRINKER_BATCH;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MALI_VECTOR_DUMP))
|
||||
KBASE_REGISTER_SHRINKER(reclaim, "mali-csf-tiler-heap", kbdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kbase_csf_tiler_heap_reclaim_mgr_term(struct kbase_device *kbdev)
|
||||
|
|
@ -373,6 +384,9 @@ void kbase_csf_tiler_heap_reclaim_mgr_term(struct kbase_device *kbdev)
|
|||
struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
|
||||
u8 prio;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MALI_VECTOR_DUMP))
|
||||
KBASE_UNREGISTER_SHRINKER(scheduler->reclaim_mgr.heap_reclaim);
|
||||
|
||||
for (prio = KBASE_QUEUE_GROUP_PRIORITY_REALTIME; prio < KBASE_QUEUE_GROUP_PRIORITY_COUNT;
|
||||
prio++)
|
||||
WARN_ON(!list_empty(&scheduler->reclaim_mgr.ctx_lists[prio]));
|
||||
|
|
|
|||
|
|
@ -66,8 +66,10 @@ void kbase_csf_tiler_heap_reclaim_ctx_init(struct kbase_context *kctx);
|
|||
* @kbdev: Pointer to the device.
|
||||
*
|
||||
* This function must be called only when a kbase device is initialized.
|
||||
*
|
||||
* Return: 0 if issuing reclaim_mgr init was successful, otherwise an error code.
|
||||
*/
|
||||
void kbase_csf_tiler_heap_reclaim_mgr_init(struct kbase_device *kbdev);
|
||||
int kbase_csf_tiler_heap_reclaim_mgr_init(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_csf_tiler_heap_reclaim_mgr_term - Termination call for the tiler heap reclaim manger.
|
||||
|
|
|
|||
|
|
@ -151,13 +151,22 @@ static bool tl_reader_overflow_check(struct kbase_csf_tl_reader *self, u16 event
|
|||
*
|
||||
* Reset the reader to the default state, i.e. set all the
|
||||
* mutable fields to zero.
|
||||
*
|
||||
* NOTE: this function expects the irq spinlock to be held.
|
||||
*/
|
||||
static void tl_reader_reset(struct kbase_csf_tl_reader *self)
|
||||
{
|
||||
lockdep_assert_held(&self->read_lock);
|
||||
|
||||
self->got_first_event = false;
|
||||
self->is_active = false;
|
||||
self->expected_event_id = 0;
|
||||
self->tl_header.btc = 0;
|
||||
|
||||
/* There might be data left in the trace buffer from the previous
|
||||
* tracing session. We don't want it to leak into this session.
|
||||
*/
|
||||
kbase_csf_firmware_trace_buffer_discard_all(self->trace_buffer);
|
||||
}
|
||||
|
||||
int kbase_csf_tl_reader_flush_buffer(struct kbase_csf_tl_reader *self)
|
||||
|
|
@ -324,21 +333,16 @@ static int tl_reader_update_enable_bit(struct kbase_csf_tl_reader *self, bool va
|
|||
|
||||
void kbase_csf_tl_reader_init(struct kbase_csf_tl_reader *self, struct kbase_tlstream *stream)
|
||||
{
|
||||
self->timer_interval = KBASE_CSF_TL_READ_INTERVAL_DEFAULT;
|
||||
*self = (struct kbase_csf_tl_reader){
|
||||
.timer_interval = KBASE_CSF_TL_READ_INTERVAL_DEFAULT,
|
||||
.stream = stream,
|
||||
.kbdev = NULL, /* This will be initialized by tl_reader_init_late() */
|
||||
.is_active = false,
|
||||
};
|
||||
|
||||
kbase_timer_setup(&self->read_timer, kbasep_csf_tl_reader_read_callback);
|
||||
|
||||
self->stream = stream;
|
||||
|
||||
/* This will be initialized by tl_reader_init_late() */
|
||||
self->kbdev = NULL;
|
||||
self->trace_buffer = NULL;
|
||||
self->tl_header.data = NULL;
|
||||
self->tl_header.size = 0;
|
||||
|
||||
spin_lock_init(&self->read_lock);
|
||||
|
||||
tl_reader_reset(self);
|
||||
}
|
||||
|
||||
void kbase_csf_tl_reader_term(struct kbase_csf_tl_reader *self)
|
||||
|
|
@ -348,13 +352,19 @@ void kbase_csf_tl_reader_term(struct kbase_csf_tl_reader *self)
|
|||
|
||||
int kbase_csf_tl_reader_start(struct kbase_csf_tl_reader *self, struct kbase_device *kbdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
int rcode;
|
||||
|
||||
spin_lock_irqsave(&self->read_lock, flags);
|
||||
|
||||
/* If already running, early exit. */
|
||||
if (self->is_active)
|
||||
if (self->is_active) {
|
||||
spin_unlock_irqrestore(&self->read_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tl_reader_init_late(self, kbdev)) {
|
||||
spin_unlock_irqrestore(&self->read_lock, flags);
|
||||
#if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
|
||||
dev_warn(kbdev->dev, "CSFFW timeline is not available for MALI_BIFROST_NO_MALI builds!");
|
||||
return 0;
|
||||
|
|
@ -366,6 +376,9 @@ int kbase_csf_tl_reader_start(struct kbase_csf_tl_reader *self, struct kbase_dev
|
|||
tl_reader_reset(self);
|
||||
|
||||
self->is_active = true;
|
||||
|
||||
spin_unlock_irqrestore(&self->read_lock, flags);
|
||||
|
||||
/* Set bytes to copy to the header size. This is to trigger copying
|
||||
* of the header to the user space.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -519,6 +519,14 @@ void kbase_csf_firmware_trace_buffer_discard(struct firmware_trace_buffer *trace
|
|||
}
|
||||
EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_discard);
|
||||
|
||||
void kbase_csf_firmware_trace_buffer_discard_all(struct firmware_trace_buffer *trace_buffer)
|
||||
{
|
||||
if (WARN_ON(!trace_buffer))
|
||||
return;
|
||||
|
||||
*(trace_buffer->cpu_va.extract_cpu_va) = *(trace_buffer->cpu_va.insert_cpu_va);
|
||||
}
|
||||
|
||||
static void update_trace_buffer_active_mask64(struct firmware_trace_buffer *tb, u64 mask)
|
||||
{
|
||||
unsigned int i;
|
||||
|
|
|
|||
|
|
@ -179,6 +179,15 @@ unsigned int kbase_csf_firmware_trace_buffer_read_data(struct firmware_trace_buf
|
|||
*/
|
||||
void kbase_csf_firmware_trace_buffer_discard(struct firmware_trace_buffer *trace_buffer);
|
||||
|
||||
/**
|
||||
* kbase_csf_firmware_trace_buffer_discard_all - Discard all data from a trace buffer
|
||||
*
|
||||
* @trace_buffer: Trace buffer handle
|
||||
*
|
||||
* Discard all the data in the trace buffer to make it empty.
|
||||
*/
|
||||
void kbase_csf_firmware_trace_buffer_discard_all(struct firmware_trace_buffer *trace_buffer);
|
||||
|
||||
/**
|
||||
* kbase_csf_firmware_trace_buffer_get_active_mask64 - Get trace buffer active mask
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -115,7 +115,7 @@ struct kbasep_printer *kbasep_printer_buffer_init(struct kbase_device *kbdev,
|
|||
|
||||
if (kbpr) {
|
||||
if (kfifo_alloc(&kbpr->fifo, KBASEP_PRINTER_BUFFER_MAX_SIZE, GFP_KERNEL)) {
|
||||
kfree(kbpr);
|
||||
vfree(kbpr);
|
||||
return NULL;
|
||||
}
|
||||
kbpr->kbdev = kbdev;
|
||||
|
|
@ -224,7 +224,7 @@ __attribute__((format(__printf__, 2, 3))) void kbasep_print(struct kbasep_printe
|
|||
va_list arglist;
|
||||
|
||||
va_start(arglist, fmt);
|
||||
len = vsnprintf(buffer, KBASEP_PRINT_FORMAT_BUFFER_MAX_SIZE, fmt, arglist);
|
||||
len = vscnprintf(buffer, KBASEP_PRINT_FORMAT_BUFFER_MAX_SIZE, fmt, arglist);
|
||||
if (len <= 0) {
|
||||
pr_err("message write to the buffer failed");
|
||||
goto exit;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
void kbasep_ktrace_backend_format_header(char *buffer, int sz, s32 *written)
|
||||
{
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"group,slot,prio,csi,kcpu"),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"group,slot,prio,csi,kcpu"),
|
||||
0);
|
||||
}
|
||||
|
||||
|
|
@ -44,38 +44,39 @@ void kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg *trace_msg, char *
|
|||
if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_GROUP) {
|
||||
const s8 slot = be_msg->gpu.csg_nr;
|
||||
/* group,slot, */
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%u,%d,",
|
||||
be_msg->gpu.group_handle, slot),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"%u,%d,", be_msg->gpu.group_handle, slot),
|
||||
0);
|
||||
|
||||
/* prio */
|
||||
if (slot >= 0)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"%u", be_msg->gpu.slot_prio),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"%u", be_msg->gpu.slot_prio),
|
||||
0);
|
||||
|
||||
/* , */
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
*written +=
|
||||
MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
} else {
|
||||
/* No group,slot,prio fields, but ensure ending with "," */
|
||||
*written +=
|
||||
MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ",,,"), 0);
|
||||
MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ",,,"), 0);
|
||||
}
|
||||
|
||||
/* queue parts: csi */
|
||||
if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_CSF_QUEUE)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
be_msg->gpu.csi_index),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
be_msg->gpu.csi_index),
|
||||
0);
|
||||
|
||||
/* , */
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
|
||||
if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_KCPU) {
|
||||
/* kcpu data */
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"kcpu %d (0x%llx)", be_msg->kcpu.id,
|
||||
be_msg->kcpu.extra_info_val),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"kcpu %d (0x%llx)", be_msg->kcpu.id,
|
||||
be_msg->kcpu.extra_info_val),
|
||||
0);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
void kbasep_ktrace_backend_format_header(char *buffer, int sz, s32 *written)
|
||||
{
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"katom,gpu_addr,jobslot,refcount"),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"katom,gpu_addr,jobslot,refcount"),
|
||||
0);
|
||||
}
|
||||
|
||||
|
|
@ -37,34 +37,34 @@ void kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg *trace_msg, char *
|
|||
{
|
||||
/* katom */
|
||||
if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_JM_ATOM)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"atom %u (ud: 0x%llx 0x%llx)",
|
||||
trace_msg->backend.gpu.atom_number,
|
||||
trace_msg->backend.gpu.atom_udata[0],
|
||||
trace_msg->backend.gpu.atom_udata[1]),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
"atom %u (ud: 0x%llx 0x%llx)",
|
||||
trace_msg->backend.gpu.atom_number,
|
||||
trace_msg->backend.gpu.atom_udata[0],
|
||||
trace_msg->backend.gpu.atom_udata[1]),
|
||||
0);
|
||||
|
||||
/* gpu_addr */
|
||||
if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_BACKEND)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
",%.8llx,", trace_msg->backend.gpu.gpu_addr),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0),
|
||||
",%.8llx,", trace_msg->backend.gpu.gpu_addr),
|
||||
0);
|
||||
else
|
||||
*written +=
|
||||
MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ",,"), 0);
|
||||
MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ",,"), 0);
|
||||
|
||||
/* jobslot */
|
||||
if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_JM_JOBSLOT)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
trace_msg->backend.gpu.jobslot),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
trace_msg->backend.gpu.jobslot),
|
||||
0);
|
||||
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), ","), 0);
|
||||
|
||||
/* refcount */
|
||||
if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_JM_REFCOUNT)
|
||||
*written += MAX(snprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
trace_msg->backend.gpu.refcount),
|
||||
*written += MAX(scnprintf(buffer + *written, (size_t)MAX(sz - *written, 0), "%d",
|
||||
trace_msg->backend.gpu.refcount),
|
||||
0);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -71,15 +71,15 @@ static const char *const kbasep_ktrace_code_string[] = {
|
|||
|
||||
static void kbasep_ktrace_format_header(char *buffer, int sz, s32 written)
|
||||
{
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0),
|
||||
"secs,thread_id,cpu,code,kctx,"),
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0),
|
||||
"secs,thread_id,cpu,code,kctx,"),
|
||||
0);
|
||||
|
||||
kbasep_ktrace_backend_format_header(buffer, sz, &written);
|
||||
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0),
|
||||
",info_val,ktrace_version=%u.%u", KBASE_KTRACE_VERSION_MAJOR,
|
||||
KBASE_KTRACE_VERSION_MINOR),
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0),
|
||||
",info_val,ktrace_version=%u.%u", KBASE_KTRACE_VERSION_MAJOR,
|
||||
KBASE_KTRACE_VERSION_MINOR),
|
||||
0);
|
||||
|
||||
buffer[sz - 1] = 0;
|
||||
|
|
@ -93,21 +93,21 @@ static void kbasep_ktrace_format_msg(struct kbase_ktrace_msg *trace_msg, char *b
|
|||
*
|
||||
* secs,thread_id,cpu,code,
|
||||
*/
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0), "%d.%.6d,%d,%d,%s,",
|
||||
(int)trace_msg->timestamp.tv_sec,
|
||||
(int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id,
|
||||
trace_msg->cpu,
|
||||
kbasep_ktrace_code_string[trace_msg->backend.gpu.code]),
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0),
|
||||
"%d.%.6d,%d,%d,%s,", (int)trace_msg->timestamp.tv_sec,
|
||||
(int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id,
|
||||
trace_msg->cpu,
|
||||
kbasep_ktrace_code_string[trace_msg->backend.gpu.code]),
|
||||
0);
|
||||
|
||||
/* kctx part: */
|
||||
if (trace_msg->kctx_tgid) {
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0), "%d_%u",
|
||||
trace_msg->kctx_tgid, trace_msg->kctx_id),
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0), "%d_%u",
|
||||
trace_msg->kctx_tgid, trace_msg->kctx_id),
|
||||
0);
|
||||
}
|
||||
/* Trailing comma */
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0), ","), 0);
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0), ","), 0);
|
||||
|
||||
/* Backend parts */
|
||||
kbasep_ktrace_backend_format_msg(trace_msg, buffer, sz, &written);
|
||||
|
|
@ -119,8 +119,8 @@ static void kbasep_ktrace_format_msg(struct kbase_ktrace_msg *trace_msg, char *b
|
|||
* Note that the last column is empty, it's simply to hold the ktrace
|
||||
* version in the header
|
||||
*/
|
||||
written += MAX(snprintf(buffer + written, (size_t)MAX(sz - written, 0), ",0x%.16llx",
|
||||
(unsigned long long)trace_msg->info_val),
|
||||
written += MAX(scnprintf(buffer + written, (size_t)MAX(sz - written, 0), ",0x%.16llx",
|
||||
(unsigned long long)trace_msg->info_val),
|
||||
0);
|
||||
buffer[sz - 1] = 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -159,9 +159,11 @@ fail_reset_gpu_init:
|
|||
*/
|
||||
static void kbase_backend_late_term(struct kbase_device *kbdev)
|
||||
{
|
||||
kbase_backend_devfreq_term(kbdev);
|
||||
kbasep_pm_metrics_term(kbdev);
|
||||
kbase_ipa_control_term(kbdev);
|
||||
{
|
||||
kbase_backend_devfreq_term(kbdev);
|
||||
kbasep_pm_metrics_term(kbdev);
|
||||
kbase_ipa_control_term(kbdev);
|
||||
}
|
||||
kbase_hwaccess_pm_halt(kbdev);
|
||||
kbase_reset_gpu_term(kbdev);
|
||||
kbase_hwaccess_pm_term(kbdev);
|
||||
|
|
@ -279,10 +281,8 @@ static const struct kbase_device_init dev_init[] = {
|
|||
{ kbase_gpu_device_create, kbase_gpu_device_destroy, "Dummy model initialization failed" },
|
||||
#else /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
{ kbase_get_irqs, NULL, "IRQ search failed" },
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
#if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
|
||||
{ registers_map, registers_unmap, "Register map failed" },
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
#if IS_ENABLED(CONFIG_MALI_TRACE_POWER_GPU_WORK_PERIOD)
|
||||
{ kbase_gpu_metrics_init, kbase_gpu_metrics_term, "GPU metrics initialization failed" },
|
||||
#endif /* IS_ENABLED(CONFIG_MALI_TRACE_POWER_GPU_WORK_PERIOD) */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -28,6 +28,16 @@
|
|||
#include <mali_kbase_reset_gpu.h>
|
||||
#include <mmu/mali_kbase_mmu.h>
|
||||
#include <mali_kbase_ctx_sched.h>
|
||||
#include <mmu/mali_kbase_mmu_faults_decoder.h>
|
||||
|
||||
bool kbase_is_gpu_removed(struct kbase_device *kbdev)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_MALI_ARBITER_SUPPORT))
|
||||
return false;
|
||||
|
||||
|
||||
return (KBASE_REG_READ(kbdev, GPU_CONTROL_ENUM(GPU_ID)) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_report_gpu_fault - Report a GPU fault of the device.
|
||||
|
|
@ -173,6 +183,9 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
|
|||
kbase_pm_power_changed(kbdev);
|
||||
}
|
||||
|
||||
if (val & MCU_STATUS_GPU_IRQ)
|
||||
wake_up_all(&kbdev->csf.event_wait);
|
||||
|
||||
KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, val);
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_gpu_interrupt);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -28,6 +28,14 @@
|
|||
#include <mali_kbase_reset_gpu.h>
|
||||
#include <mmu/mali_kbase_mmu.h>
|
||||
|
||||
bool kbase_is_gpu_removed(struct kbase_device *kbdev)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_MALI_ARBITER_SUPPORT))
|
||||
return false;
|
||||
|
||||
return (KBASE_REG_READ(kbdev, GPU_CONTROL_ENUM(GPU_ID)) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_report_gpu_fault - Report a GPU fault.
|
||||
* @kbdev: Kbase device pointer
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -217,16 +217,14 @@ static const struct kbase_device_init dev_init[] = {
|
|||
{ kbase_gpu_device_create, kbase_gpu_device_destroy, "Dummy model initialization failed" },
|
||||
#else /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
{ kbase_get_irqs, NULL, "IRQ search failed" },
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
#if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
|
||||
{ registers_map, registers_unmap, "Register map failed" },
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
|
||||
#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
|
||||
#if IS_ENABLED(CONFIG_MALI_TRACE_POWER_GPU_WORK_PERIOD)
|
||||
{ kbase_gpu_metrics_init, kbase_gpu_metrics_term, "GPU metrics initialization failed" },
|
||||
#endif /* IS_ENABLED(CONFIG_MALI_TRACE_POWER_GPU_WORK_PERIOD) */
|
||||
{ power_control_init, power_control_term, "Power control initialization failed" },
|
||||
{ kbase_device_io_history_init, kbase_device_io_history_term,
|
||||
"Register access history initialization failed" },
|
||||
{ kbase_device_pm_init, kbase_device_pm_term, "Power management initialization failed" },
|
||||
{ kbase_device_early_init, kbase_device_early_term, "Early device initialization failed" },
|
||||
{ kbase_backend_time_init, NULL, "Time backend initialization failed" },
|
||||
{ kbase_device_misc_init, kbase_device_misc_term,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -556,14 +556,27 @@ int kbase_device_early_init(struct kbase_device *kbdev)
|
|||
/* Ensure we can access the GPU registers */
|
||||
kbase_pm_register_access_enable(kbdev);
|
||||
|
||||
/* Initialize GPU_ID props */
|
||||
kbase_gpuprops_parse_gpu_id(&kbdev->gpu_props.gpu_id, kbase_reg_get_gpu_id(kbdev));
|
||||
|
||||
/* Initialize register mapping LUTs */
|
||||
err = kbase_regmap_init(kbdev);
|
||||
if (err)
|
||||
/*
|
||||
* If -EPERM is returned, it means the device backend is not supported, but
|
||||
* device initialization can continue.
|
||||
*/
|
||||
err = kbase_device_backend_init(kbdev);
|
||||
if (err != 0 && err != -EPERM)
|
||||
goto pm_runtime_term;
|
||||
|
||||
/*
|
||||
* Initialize register mapping LUTs. This would have been initialized on HW
|
||||
* Arbitration but not on PV or non-arbitration devices.
|
||||
*/
|
||||
if (!kbase_reg_is_init(kbdev)) {
|
||||
/* Initialize GPU_ID props */
|
||||
kbase_gpuprops_parse_gpu_id(&kbdev->gpu_props.gpu_id, kbase_reg_get_gpu_id(kbdev));
|
||||
|
||||
err = kbase_regmap_init(kbdev);
|
||||
if (err)
|
||||
goto backend_term;
|
||||
}
|
||||
|
||||
/* Set the list of features available on the current HW
|
||||
* (identified by the GPU_ID register)
|
||||
*/
|
||||
|
|
@ -572,7 +585,7 @@ int kbase_device_early_init(struct kbase_device *kbdev)
|
|||
/* Find out GPU properties based on the GPU feature registers. */
|
||||
err = kbase_gpuprops_init(kbdev);
|
||||
if (err)
|
||||
goto regmap_term;
|
||||
goto backend_term;
|
||||
|
||||
/* Get the list of workarounds for issues on the current HW
|
||||
* (identified by the GPU_ID register and impl_tech in THREAD_FEATURES)
|
||||
|
|
@ -585,13 +598,16 @@ int kbase_device_early_init(struct kbase_device *kbdev)
|
|||
kbase_pm_register_access_disable(kbdev);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (kbdev->arb.arb_if)
|
||||
err = kbase_arbiter_pm_install_interrupts(kbdev);
|
||||
else
|
||||
if (kbdev->arb.arb_if) {
|
||||
if (kbdev->pm.arb_vm_state)
|
||||
err = kbase_arbiter_pm_install_interrupts(kbdev);
|
||||
} else {
|
||||
err = kbase_install_interrupts(kbdev);
|
||||
}
|
||||
#else
|
||||
err = kbase_install_interrupts(kbdev);
|
||||
#endif
|
||||
|
||||
if (err)
|
||||
goto gpuprops_term;
|
||||
|
||||
|
|
@ -599,9 +615,13 @@ int kbase_device_early_init(struct kbase_device *kbdev)
|
|||
|
||||
gpuprops_term:
|
||||
kbase_gpuprops_term(kbdev);
|
||||
regmap_term:
|
||||
backend_term:
|
||||
kbase_device_backend_term(kbdev);
|
||||
kbase_regmap_term(kbdev);
|
||||
pm_runtime_term:
|
||||
if (kbdev->pm.backend.gpu_powered)
|
||||
kbase_pm_register_access_disable(kbdev);
|
||||
|
||||
kbase_pm_runtime_term(kbdev);
|
||||
platform_device_term:
|
||||
kbasep_platform_device_term(kbdev);
|
||||
|
|
@ -620,8 +640,11 @@ void kbase_device_early_term(struct kbase_device *kbdev)
|
|||
kbase_release_interrupts(kbdev);
|
||||
#else
|
||||
kbase_release_interrupts(kbdev);
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
#endif
|
||||
|
||||
kbase_gpuprops_term(kbdev);
|
||||
kbase_device_backend_term(kbdev);
|
||||
kbase_regmap_term(kbdev);
|
||||
kbase_pm_runtime_term(kbdev);
|
||||
kbasep_platform_device_term(kbdev);
|
||||
kbase_ktrace_term(kbdev);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2019-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -58,6 +58,9 @@ void kbase_increment_device_id(void);
|
|||
* When a device file is opened for the first time,
|
||||
* load firmware and initialize hardware counter components.
|
||||
*
|
||||
* It is safe for this function to be called multiple times without ill
|
||||
* effects. Only the first call would be effective.
|
||||
*
|
||||
* Return: 0 on success. An error code on failure.
|
||||
*/
|
||||
int kbase_device_firmware_init_once(struct kbase_device *kbdev);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,14 +27,6 @@
|
|||
#include <mali_kbase_reset_gpu.h>
|
||||
#include <mmu/mali_kbase_mmu.h>
|
||||
|
||||
bool kbase_is_gpu_removed(struct kbase_device *kbdev)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_MALI_ARBITER_SUPPORT))
|
||||
return false;
|
||||
|
||||
return (kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_ID)) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* busy_wait_cache_operation - Wait for a pending cache flush to complete
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -46,7 +46,7 @@ u32 kbase_reg_read32(struct kbase_device *kbdev, u32 reg_enum)
|
|||
u32 val = 0;
|
||||
u32 offset;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_32_BIT)))
|
||||
|
|
@ -68,7 +68,7 @@ u64 kbase_reg_read64(struct kbase_device *kbdev, u32 reg_enum)
|
|||
u32 val32[2] = { 0 };
|
||||
u32 offset;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
|
|
@ -91,7 +91,7 @@ u64 kbase_reg_read64_coherent(struct kbase_device *kbdev, u32 reg_enum)
|
|||
u32 hi1 = 0, hi2 = 0, lo = 0;
|
||||
u32 offset;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
|
|
@ -116,7 +116,7 @@ void kbase_reg_write32(struct kbase_device *kbdev, u32 reg_enum, u32 value)
|
|||
unsigned long flags;
|
||||
u32 offset;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_WRITE | KBASE_REGMAP_WIDTH_32_BIT)))
|
||||
|
|
@ -135,7 +135,7 @@ void kbase_reg_write64(struct kbase_device *kbdev, u32 reg_enum, u64 value)
|
|||
unsigned long flags;
|
||||
u32 offset;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_WRITE | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -24,12 +24,13 @@
|
|||
|
||||
#include <mali_kbase.h>
|
||||
#include <hw_access/mali_kbase_hw_access.h>
|
||||
#include <linux/mali_hw_access.h>
|
||||
|
||||
u64 kbase_reg_get_gpu_id(struct kbase_device *kbdev)
|
||||
{
|
||||
u32 val[2] = { 0 };
|
||||
|
||||
val[0] = readl(kbdev->reg);
|
||||
val[0] = mali_readl(kbdev->reg);
|
||||
|
||||
|
||||
return (u64)val[0] | ((u64)val[1] << 32);
|
||||
|
|
@ -39,13 +40,13 @@ u32 kbase_reg_read32(struct kbase_device *kbdev, u32 reg_enum)
|
|||
{
|
||||
u32 val;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_32_BIT)))
|
||||
return 0;
|
||||
|
||||
val = readl(kbdev->regmap.regs[reg_enum]);
|
||||
val = mali_readl(kbdev->regmap.regs[reg_enum]);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (unlikely(kbdev->io_history.enabled))
|
||||
|
|
@ -63,14 +64,13 @@ u64 kbase_reg_read64(struct kbase_device *kbdev, u32 reg_enum)
|
|||
{
|
||||
u64 val;
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
return 0;
|
||||
|
||||
val = (u64)readl(kbdev->regmap.regs[reg_enum]) |
|
||||
((u64)readl(kbdev->regmap.regs[reg_enum] + 4) << 32);
|
||||
val = mali_readq(kbdev->regmap.regs[reg_enum]);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (unlikely(kbdev->io_history.enabled)) {
|
||||
|
|
@ -90,23 +90,14 @@ KBASE_EXPORT_TEST_API(kbase_reg_read64);
|
|||
u64 kbase_reg_read64_coherent(struct kbase_device *kbdev, u32 reg_enum)
|
||||
{
|
||||
u64 val;
|
||||
#if !IS_ENABLED(CONFIG_MALI_64BIT_HW_ACCESS)
|
||||
u32 hi1, hi2, lo;
|
||||
#endif
|
||||
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return 0;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_READ | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
hi1 = readl(kbdev->regmap.regs[reg_enum] + 4);
|
||||
lo = readl(kbdev->regmap.regs[reg_enum]);
|
||||
hi2 = readl(kbdev->regmap.regs[reg_enum] + 4);
|
||||
} while (hi1 != hi2);
|
||||
|
||||
val = lo | (((u64)hi1) << 32);
|
||||
val = mali_readq_coherent(kbdev->regmap.regs[reg_enum]);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (unlikely(kbdev->io_history.enabled)) {
|
||||
|
|
@ -125,13 +116,13 @@ KBASE_EXPORT_TEST_API(kbase_reg_read64_coherent);
|
|||
|
||||
void kbase_reg_write32(struct kbase_device *kbdev, u32 reg_enum, u32 value)
|
||||
{
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_WRITE | KBASE_REGMAP_WIDTH_32_BIT)))
|
||||
return;
|
||||
|
||||
writel(value, kbdev->regmap.regs[reg_enum]);
|
||||
mali_writel(value, kbdev->regmap.regs[reg_enum]);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (unlikely(kbdev->io_history.enabled))
|
||||
|
|
@ -145,14 +136,13 @@ KBASE_EXPORT_TEST_API(kbase_reg_write32);
|
|||
|
||||
void kbase_reg_write64(struct kbase_device *kbdev, u32 reg_enum, u64 value)
|
||||
{
|
||||
if (WARN_ON(!kbdev->pm.backend.gpu_powered))
|
||||
if (WARN_ON(!kbase_reg_is_powered_access_allowed(kbdev, reg_enum)))
|
||||
return;
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum,
|
||||
KBASE_REGMAP_PERM_WRITE | KBASE_REGMAP_WIDTH_64_BIT)))
|
||||
return;
|
||||
|
||||
writel(value & 0xFFFFFFFF, kbdev->regmap.regs[reg_enum]);
|
||||
writel(value >> 32, kbdev->regmap.regs[reg_enum] + 4);
|
||||
mali_writeq(value, kbdev->regmap.regs[reg_enum]);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
if (unlikely(kbdev->io_history.enabled)) {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -24,9 +24,50 @@
|
|||
|
||||
#include <mali_kbase.h>
|
||||
#include "mali_kbase_hw_access.h"
|
||||
#include "mali_kbase_hw_access_regmap.h"
|
||||
|
||||
#include <uapi/gpu/arm/bifrost/gpu/mali_kbase_gpu_id.h>
|
||||
|
||||
#define KBASE_REGMAP_ACCESS_ALWAYS_POWERED (1U << 16)
|
||||
|
||||
static u32 always_powered_regs[] = {
|
||||
#if MALI_USE_CSF
|
||||
#else /* MALI_USE_CSF */
|
||||
PTM_AW_IRQ_CLEAR,
|
||||
PTM_AW_IRQ_INJECTION,
|
||||
PTM_AW_IRQ_MASK,
|
||||
PTM_AW_IRQ_RAWSTAT,
|
||||
PTM_AW_IRQ_STATUS,
|
||||
PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0,
|
||||
PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1,
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0,
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1,
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS,
|
||||
PTM_ID,
|
||||
#endif /* MALI_USE_CSF */
|
||||
};
|
||||
|
||||
static void kbasep_reg_setup_always_powered_registers(struct kbase_device *kbdev)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(always_powered_regs); i++) {
|
||||
u32 reg_enum = always_powered_regs[i];
|
||||
|
||||
if (!kbase_reg_is_valid(kbdev, reg_enum))
|
||||
continue;
|
||||
|
||||
kbdev->regmap.flags[reg_enum] |= KBASE_REGMAP_ACCESS_ALWAYS_POWERED;
|
||||
}
|
||||
}
|
||||
|
||||
bool kbase_reg_is_powered_access_allowed(struct kbase_device *kbdev, u32 reg_enum)
|
||||
{
|
||||
if (kbdev->regmap.flags[reg_enum] & KBASE_REGMAP_ACCESS_ALWAYS_POWERED)
|
||||
return true;
|
||||
return kbdev->pm.backend.gpu_powered;
|
||||
}
|
||||
|
||||
bool kbase_reg_is_size64(struct kbase_device *kbdev, u32 reg_enum)
|
||||
{
|
||||
if (WARN_ON(reg_enum >= kbdev->regmap.size))
|
||||
|
|
@ -67,6 +108,11 @@ bool kbase_reg_is_accessible(struct kbase_device *kbdev, u32 reg_enum, u32 flags
|
|||
return true;
|
||||
}
|
||||
|
||||
bool kbase_reg_is_init(struct kbase_device *kbdev)
|
||||
{
|
||||
return (kbdev->regmap.regs != NULL) && (kbdev->regmap.flags != NULL);
|
||||
}
|
||||
|
||||
int kbase_reg_get_offset(struct kbase_device *kbdev, u32 reg_enum, u32 *offset)
|
||||
{
|
||||
if (unlikely(!kbase_reg_is_accessible(kbdev, reg_enum, 0)))
|
||||
|
|
@ -108,12 +154,12 @@ int kbase_regmap_init(struct kbase_device *kbdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kbasep_reg_setup_always_powered_registers(kbdev);
|
||||
|
||||
dev_info(kbdev->dev, "Register LUT %08x initialized for GPU arch 0x%08x\n", lut_arch_id,
|
||||
kbdev->gpu_props.gpu_id.arch_id);
|
||||
|
||||
#if IS_ENABLED(CONFIG_MALI_64BIT_HW_ACCESS) && IS_ENABLED(CONFIG_MALI_REAL_HW)
|
||||
dev_info(kbdev->dev, "64-bit HW access enabled\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -128,6 +128,25 @@ bool kbase_reg_is_valid(struct kbase_device *kbdev, u32 reg_enum);
|
|||
*/
|
||||
bool kbase_reg_is_accessible(struct kbase_device *kbdev, u32 reg_enum, u32 flags);
|
||||
|
||||
/**
|
||||
* kbase_reg_is_powered_access_allowed - check if registered is accessible given
|
||||
* current power state
|
||||
*
|
||||
* @kbdev: Kbase device pointer
|
||||
* @reg_enum: Register enum
|
||||
*
|
||||
* Return: boolean if register is accessible
|
||||
*/
|
||||
bool kbase_reg_is_powered_access_allowed(struct kbase_device *kbdev, u32 reg_enum);
|
||||
|
||||
/**
|
||||
* kbase_reg_is_init - check if regmap is initialized
|
||||
*
|
||||
* @kbdev: Kbase device pointer
|
||||
* Return: boolean if regmap is initialized
|
||||
*/
|
||||
bool kbase_reg_is_init(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_reg_get_offset - get register offset from enum
|
||||
* @kbdev: Kbase device pointer
|
||||
|
|
|
|||
|
|
@ -308,6 +308,16 @@
|
|||
#define TC_CLOCK_GATE_OVERRIDE (1ul << 0)
|
||||
/* End TILER_CONFIG register */
|
||||
|
||||
/* L2_FEATURES register */
|
||||
#define L2_FEATURES_CACHE_SIZE_SHIFT GPU_U(16)
|
||||
#define L2_FEATURES_CACHE_SIZE_MASK (GPU_U(0xFF) << L2_FEATURES_CACHE_SIZE_SHIFT)
|
||||
#define L2_FEATURES_CACHE_SIZE_GET(reg_val) \
|
||||
(((reg_val)&L2_FEATURES_CACHE_SIZE_MASK) >> L2_FEATURES_CACHE_SIZE_SHIFT)
|
||||
#define L2_FEATURES_CACHE_SIZE_SET(reg_val, value) \
|
||||
(~(~(reg_val) | L2_FEATURES_CACHE_SIZE_MASK) | \
|
||||
(((value) << L2_FEATURES_CACHE_SIZE_SHIFT) & L2_FEATURES_CACHE_SIZE_MASK))
|
||||
/* End L2_FEATURES register */
|
||||
|
||||
/* L2_CONFIG register */
|
||||
#define L2_CONFIG_SIZE_SHIFT 16
|
||||
#define L2_CONFIG_SIZE_MASK (0xFFul << L2_CONFIG_SIZE_SHIFT)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -2240,6 +2240,56 @@ static void kbase_regmap_v9_2_init(struct kbase_device *kbdev)
|
|||
kbdev->regmap.regs[GPU_CONTROL__L2_CONFIG] = kbdev->reg + 0x48;
|
||||
}
|
||||
|
||||
static void kbase_regmap_v9_14_init(struct kbase_device *kbdev)
|
||||
{
|
||||
if (kbdev->regmap.regs == NULL && kbdev->regmap.flags == NULL) {
|
||||
kbdev->regmap.size = NR_V9_14_REGS;
|
||||
kbdev->regmap.regs =
|
||||
kcalloc(kbdev->regmap.size, sizeof(void __iomem *), GFP_KERNEL);
|
||||
kbdev->regmap.flags = kcalloc(kbdev->regmap.size, sizeof(u32), GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (WARN_ON(kbdev->regmap.regs == NULL))
|
||||
return;
|
||||
if (WARN_ON(kbdev->regmap.flags == NULL))
|
||||
return;
|
||||
|
||||
kbase_regmap_v9_2_init(kbdev);
|
||||
|
||||
kbdev->regmap.flags[PTM_AW_IRQ_CLEAR] = KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ |
|
||||
KBASE_REGMAP_PERM_WRITE;
|
||||
kbdev->regmap.flags[PTM_AW_IRQ_INJECTION] =
|
||||
KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ | KBASE_REGMAP_PERM_WRITE;
|
||||
kbdev->regmap.flags[PTM_AW_IRQ_MASK] = KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ |
|
||||
KBASE_REGMAP_PERM_WRITE;
|
||||
kbdev->regmap.flags[PTM_AW_IRQ_RAWSTAT] = KBASE_REGMAP_WIDTH_32_BIT |
|
||||
KBASE_REGMAP_PERM_READ;
|
||||
kbdev->regmap.flags[PTM_AW_IRQ_STATUS] = KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ;
|
||||
kbdev->regmap.flags[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0] = KBASE_REGMAP_WIDTH_32_BIT |
|
||||
KBASE_REGMAP_PERM_READ;
|
||||
kbdev->regmap.flags[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1] = KBASE_REGMAP_WIDTH_32_BIT |
|
||||
KBASE_REGMAP_PERM_READ;
|
||||
kbdev->regmap.flags[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0] =
|
||||
KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ | KBASE_REGMAP_PERM_WRITE;
|
||||
kbdev->regmap.flags[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1] =
|
||||
KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ | KBASE_REGMAP_PERM_WRITE;
|
||||
kbdev->regmap.flags[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS] =
|
||||
KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ;
|
||||
kbdev->regmap.flags[PTM_ID] = KBASE_REGMAP_WIDTH_32_BIT | KBASE_REGMAP_PERM_READ;
|
||||
|
||||
kbdev->regmap.regs[PTM_AW_IRQ_CLEAR] = kbdev->reg + 0x1ffc8;
|
||||
kbdev->regmap.regs[PTM_AW_IRQ_INJECTION] = kbdev->reg + 0x1ffd4;
|
||||
kbdev->regmap.regs[PTM_AW_IRQ_MASK] = kbdev->reg + 0x1ffcc;
|
||||
kbdev->regmap.regs[PTM_AW_IRQ_RAWSTAT] = kbdev->reg + 0x1ffc4;
|
||||
kbdev->regmap.regs[PTM_AW_IRQ_STATUS] = kbdev->reg + 0x1ffd0;
|
||||
kbdev->regmap.regs[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0] = kbdev->reg + 0x1ffd8;
|
||||
kbdev->regmap.regs[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1] = kbdev->reg + 0x1ffdc;
|
||||
kbdev->regmap.regs[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0] = kbdev->reg + 0x1ffe4;
|
||||
kbdev->regmap.regs[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1] = kbdev->reg + 0x1ffe8;
|
||||
kbdev->regmap.regs[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS] = kbdev->reg + 0x1ffe0;
|
||||
kbdev->regmap.regs[PTM_ID] = kbdev->reg + 0x1ffc0;
|
||||
}
|
||||
|
||||
u32 kbase_regmap_backend_init(struct kbase_device *kbdev)
|
||||
{
|
||||
int i = 0;
|
||||
|
|
@ -2254,6 +2304,7 @@ u32 kbase_regmap_backend_init(struct kbase_device *kbdev)
|
|||
{ GPU_ID_ARCH_MAKE(7, 2, 0), kbase_regmap_v7_2_init },
|
||||
{ GPU_ID_ARCH_MAKE(9, 0, 0), kbase_regmap_v9_0_init },
|
||||
{ GPU_ID_ARCH_MAKE(9, 2, 0), kbase_regmap_v9_2_init },
|
||||
{ GPU_ID_ARCH_MAKE(9, 14, 0), kbase_regmap_v9_14_init },
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(init_array) - 1; i++) {
|
||||
|
|
@ -2967,6 +3018,18 @@ static char *enum_strings[] = {
|
|||
[GPU_CONTROL__CORE_FEATURES] = "GPU_CONTROL__CORE_FEATURES",
|
||||
[GPU_CONTROL__THREAD_TLS_ALLOC] = "GPU_CONTROL__THREAD_TLS_ALLOC",
|
||||
[GPU_CONTROL__L2_CONFIG] = "GPU_CONTROL__L2_CONFIG",
|
||||
[PTM_AW_IRQ_CLEAR] = "PTM_AW_IRQ_CLEAR",
|
||||
[PTM_AW_IRQ_INJECTION] = "PTM_AW_IRQ_INJECTION",
|
||||
[PTM_AW_IRQ_MASK] = "PTM_AW_IRQ_MASK",
|
||||
[PTM_AW_IRQ_RAWSTAT] = "PTM_AW_IRQ_RAWSTAT",
|
||||
[PTM_AW_IRQ_STATUS] = "PTM_AW_IRQ_STATUS",
|
||||
[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0] = "PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0",
|
||||
[PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1] = "PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1",
|
||||
[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0] = "PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0",
|
||||
[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1] = "PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1",
|
||||
[PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS] =
|
||||
"PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS",
|
||||
[PTM_ID] = "PTM_ID",
|
||||
};
|
||||
|
||||
const char *kbase_reg_get_enum_string(u32 reg_enum)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -759,4 +759,19 @@ enum kbase_regmap_enum_v9_2 {
|
|||
NR_V9_2_REGS,
|
||||
};
|
||||
|
||||
enum kbase_regmap_enum_v9_14 {
|
||||
PTM_AW_IRQ_CLEAR = NR_V9_2_REGS, /* (RW) 32-bit 0x1FFC8 */
|
||||
PTM_AW_IRQ_INJECTION, /* (RW) 32-bit 0x1FFD4 */
|
||||
PTM_AW_IRQ_MASK, /* (RW) 32-bit 0x1FFCC */
|
||||
PTM_AW_IRQ_RAWSTAT, /* (RO) 32-bit 0x1FFC4 */
|
||||
PTM_AW_IRQ_STATUS, /* (RO) 32-bit 0x1FFD0 */
|
||||
PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE0, /* (RO) 32-bit 0x1FFD8 */
|
||||
PTM_AW_MESSAGE__PTM_INCOMING_MESSAGE1, /* (RO) 32-bit 0x1FFDC */
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE0, /* (RW) 32-bit 0x1FFE4 */
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE1, /* (RW) 32-bit 0x1FFE8 */
|
||||
PTM_AW_MESSAGE__PTM_OUTGOING_MESSAGE_STATUS, /* (RO) 32-bit 0x1FFE0 */
|
||||
PTM_ID, /* (RO) 32-bit 0x1FFC0 */
|
||||
NR_V9_14_REGS,
|
||||
};
|
||||
|
||||
#endif /* _MALI_KBASE_REGMAP_JM_ENUMS_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2023-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -47,6 +47,8 @@
|
|||
#define MMU_AS_OFFSET(n, regname) ENUM_OFFSET(n, MMU_AS_ENUM(0, regname), MMU_AS_ENUM(1, regname))
|
||||
#define MMU_AS_BASE_OFFSET(n) MMU_AS_OFFSET(n, TRANSTAB)
|
||||
|
||||
#define PTM_AW_MESSAGE_ENUM(regname) PTM_AW_MESSAGE__##regname
|
||||
|
||||
/* register value macros */
|
||||
/* GPU_STATUS values */
|
||||
#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */
|
||||
|
|
@ -295,4 +297,11 @@
|
|||
(GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED | POWER_CHANGED_ALL | \
|
||||
PRFCNT_SAMPLE_COMPLETED)
|
||||
|
||||
#define WINDOW_IRQ_MESSAGE (1U << 0)
|
||||
#define WINDOW_IRQ_INVALID_ACCESS (1U << 1)
|
||||
#define WINDOW_IRQ_GPU (1U << 2)
|
||||
#define WINDOW_IRQ_JOB (1U << 3)
|
||||
#define WINDOW_IRQ_MMU (1U << 4)
|
||||
#define WINDOW_IRQ_EVENT (1U << 5)
|
||||
|
||||
#endif /* _MALI_KBASE_REGMAP_JM_MACROS_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2021-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
#include "hwcnt/backend/mali_kbase_hwcnt_backend_csf.h"
|
||||
#include "hwcnt/mali_kbase_hwcnt_gpu.h"
|
||||
#include "hwcnt/mali_kbase_hwcnt_types.h"
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <linux/kernel.h>
|
||||
|
|
@ -31,6 +30,7 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/version_compat_defs.h>
|
||||
|
||||
#ifndef BASE_MAX_NR_CLOCKS_REGULATORS
|
||||
#define BASE_MAX_NR_CLOCKS_REGULATORS 4
|
||||
|
|
@ -255,7 +255,8 @@ struct kbase_hwcnt_csf_physical_layout {
|
|||
* @hwc_threshold_work: Worker for consuming available samples when
|
||||
* threshold interrupt raised.
|
||||
* @num_l2_slices: Current number of L2 slices allocated to the GPU.
|
||||
* @shader_present_bitmap: Current shader-present bitmap that is allocated to the GPU.
|
||||
* @powered_shader_core_mask: The common mask between the debug_core_mask
|
||||
* and the shader_present_bitmap.
|
||||
*/
|
||||
struct kbase_hwcnt_backend_csf {
|
||||
struct kbase_hwcnt_backend_csf_info *info;
|
||||
|
|
@ -283,7 +284,7 @@ struct kbase_hwcnt_backend_csf {
|
|||
struct work_struct hwc_dump_work;
|
||||
struct work_struct hwc_threshold_work;
|
||||
size_t num_l2_slices;
|
||||
u64 shader_present_bitmap;
|
||||
u64 powered_shader_core_mask;
|
||||
};
|
||||
|
||||
static bool kbasep_hwcnt_backend_csf_backend_exists(struct kbase_hwcnt_backend_csf_info *csf_info)
|
||||
|
|
@ -296,9 +297,11 @@ static bool kbasep_hwcnt_backend_csf_backend_exists(struct kbase_hwcnt_backend_c
|
|||
}
|
||||
|
||||
void kbase_hwcnt_backend_csf_set_hw_availability(struct kbase_hwcnt_backend_interface *iface,
|
||||
size_t num_l2_slices, u64 shader_present_bitmap)
|
||||
size_t num_l2_slices, u64 shader_present,
|
||||
u64 power_core_mask)
|
||||
{
|
||||
struct kbase_hwcnt_backend_csf_info *csf_info;
|
||||
u64 norm_shader_present = power_core_mask & shader_present;
|
||||
|
||||
if (!iface)
|
||||
return;
|
||||
|
|
@ -309,16 +312,17 @@ void kbase_hwcnt_backend_csf_set_hw_availability(struct kbase_hwcnt_backend_inte
|
|||
if (!csf_info || !csf_info->backend)
|
||||
return;
|
||||
|
||||
|
||||
if (WARN_ON(csf_info->backend->enable_state != KBASE_HWCNT_BACKEND_CSF_DISABLED))
|
||||
return;
|
||||
|
||||
if (WARN_ON(num_l2_slices > csf_info->backend->phys_layout.mmu_l2_cnt) ||
|
||||
WARN_ON((shader_present_bitmap & csf_info->backend->phys_layout.shader_avail_mask) !=
|
||||
shader_present_bitmap))
|
||||
WARN_ON((norm_shader_present & csf_info->backend->phys_layout.shader_avail_mask) !=
|
||||
norm_shader_present))
|
||||
return;
|
||||
|
||||
csf_info->backend->num_l2_slices = num_l2_slices;
|
||||
csf_info->backend->shader_present_bitmap = shader_present_bitmap;
|
||||
csf_info->backend->powered_shader_core_mask = norm_shader_present;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -424,7 +428,7 @@ static void kbasep_hwcnt_backend_csf_init_layout(
|
|||
WARN_ON(!prfcnt_info);
|
||||
WARN_ON(!phys_layout);
|
||||
|
||||
shader_core_cnt = (size_t)fls64(prfcnt_info->core_mask);
|
||||
shader_core_cnt = (size_t)fls64(prfcnt_info->sc_core_mask);
|
||||
values_per_block = prfcnt_info->prfcnt_block_size / KBASE_HWCNT_VALUE_HW_BYTES;
|
||||
fw_block_cnt = div_u64(prfcnt_info->prfcnt_fw_size, prfcnt_info->prfcnt_block_size);
|
||||
hw_block_cnt = div_u64(prfcnt_info->prfcnt_hw_size, prfcnt_info->prfcnt_block_size);
|
||||
|
|
@ -445,7 +449,7 @@ static void kbasep_hwcnt_backend_csf_init_layout(
|
|||
.fw_block_cnt = fw_block_cnt,
|
||||
.hw_block_cnt = hw_block_cnt,
|
||||
.block_cnt = fw_block_cnt + hw_block_cnt,
|
||||
.shader_avail_mask = prfcnt_info->core_mask,
|
||||
.shader_avail_mask = prfcnt_info->sc_core_mask,
|
||||
.headers_per_block = KBASE_HWCNT_V5_HEADERS_PER_BLOCK,
|
||||
.values_per_block = values_per_block,
|
||||
.counters_per_block = values_per_block - KBASE_HWCNT_V5_HEADERS_PER_BLOCK,
|
||||
|
|
@ -454,17 +458,20 @@ static void kbasep_hwcnt_backend_csf_init_layout(
|
|||
}
|
||||
|
||||
static void
|
||||
kbasep_hwcnt_backend_csf_reset_internal_buffers(struct kbase_hwcnt_backend_csf *backend_csf)
|
||||
kbasep_hwcnt_backend_csf_reset_internal_buffers(struct kbase_hwcnt_backend_csf *backend_csf,
|
||||
bool user_bufs)
|
||||
{
|
||||
size_t user_buf_bytes = backend_csf->info->metadata->dump_buf_bytes;
|
||||
size_t block_state_bytes = backend_csf->phys_layout.block_cnt *
|
||||
KBASE_HWCNT_BLOCK_STATE_BYTES * KBASE_HWCNT_BLOCK_STATE_STRIDE;
|
||||
|
||||
memset(backend_csf->to_user_buf, 0, user_buf_bytes);
|
||||
memset(backend_csf->accum_buf, 0, user_buf_bytes);
|
||||
memset(backend_csf->old_sample_buf, 0, backend_csf->info->prfcnt_info.dump_bytes);
|
||||
memset(backend_csf->block_states, 0, block_state_bytes);
|
||||
memset(backend_csf->to_user_block_states, 0, block_state_bytes);
|
||||
if (user_bufs) {
|
||||
memset(backend_csf->to_user_buf, 0, user_buf_bytes);
|
||||
memset(backend_csf->to_user_block_states, 0, block_state_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -517,34 +524,21 @@ static void kbasep_hwcnt_backend_csf_update_user_sample(struct kbase_hwcnt_backe
|
|||
memset(backend_csf->block_states, 0, block_state_bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbasep_hwcnt_backend_csf_update_block_state - Update block state of a block instance with
|
||||
* information from a sample.
|
||||
* @phys_layout: Physical memory layout information of HWC
|
||||
* sample buffer.
|
||||
* @enable_mask: Counter enable mask for the block whose state is being updated.
|
||||
* @enable_state: The CSF backend internal enabled state.
|
||||
* @exiting_protm: Whether or not the sample is taken when the GPU is exiting
|
||||
* protected mode.
|
||||
* @block_idx: Index of block within the ringbuffer.
|
||||
* @block_state: Pointer to existing block state of the block whose state is being
|
||||
* updated.
|
||||
* @fw_in_protected_mode: Whether or not GPU is in protected mode during sampling.
|
||||
*/
|
||||
static void kbasep_hwcnt_backend_csf_update_block_state(
|
||||
const struct kbase_hwcnt_csf_physical_layout *phys_layout, const u32 enable_mask,
|
||||
enum kbase_hwcnt_backend_csf_enable_state enable_state, bool exiting_protm,
|
||||
size_t block_idx, blk_stt_t *const block_state, bool fw_in_protected_mode)
|
||||
void kbasep_hwcnt_backend_csf_update_block_state(struct kbase_hwcnt_backend_csf *backend,
|
||||
const u32 enable_mask, bool exiting_protm,
|
||||
size_t block_idx, blk_stt_t *const block_state,
|
||||
bool fw_in_protected_mode)
|
||||
{
|
||||
const struct kbase_hwcnt_csf_physical_layout *phys_layout = &backend->phys_layout;
|
||||
/* Offset of shader core blocks from the start of the HW blocks in the sample */
|
||||
size_t shader_core_block_offset =
|
||||
(size_t)(phys_layout->hw_block_cnt - phys_layout->shader_cnt);
|
||||
(size_t)(phys_layout->block_cnt - phys_layout->shader_cnt);
|
||||
bool is_shader_core_block;
|
||||
|
||||
is_shader_core_block = block_idx >= shader_core_block_offset;
|
||||
is_shader_core_block = (block_idx >= shader_core_block_offset);
|
||||
|
||||
/* Set power bits for the block state for the block, for the sample */
|
||||
switch (enable_state) {
|
||||
switch (backend->enable_state) {
|
||||
/* Disabled states */
|
||||
case KBASE_HWCNT_BACKEND_CSF_DISABLED:
|
||||
case KBASE_HWCNT_BACKEND_CSF_TRANSITIONING_TO_ENABLED:
|
||||
|
|
@ -592,21 +586,45 @@ static void kbasep_hwcnt_backend_csf_update_block_state(
|
|||
KBASE_HWCNT_STATE_NORMAL);
|
||||
else
|
||||
kbase_hwcnt_block_state_append(block_state, KBASE_HWCNT_STATE_NORMAL);
|
||||
|
||||
/* powered_shader_core_mask stored in the backend is a combination of
|
||||
* the shader present and the debug core mask, so explicit checking of the
|
||||
* core mask is not required here.
|
||||
*/
|
||||
if (is_shader_core_block) {
|
||||
u64 current_shader_core = 1ULL << (block_idx - shader_core_block_offset);
|
||||
|
||||
WARN_ON_ONCE(backend->phys_layout.shader_cnt > 64);
|
||||
|
||||
if (current_shader_core & backend->info->backend->powered_shader_core_mask)
|
||||
kbase_hwcnt_block_state_append(block_state, KBASE_HWCNT_STATE_AVAILABLE);
|
||||
else if (current_shader_core & ~backend->info->backend->powered_shader_core_mask)
|
||||
kbase_hwcnt_block_state_append(block_state, KBASE_HWCNT_STATE_UNAVAILABLE);
|
||||
else
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
else
|
||||
kbase_hwcnt_block_state_append(block_state, KBASE_HWCNT_STATE_AVAILABLE);
|
||||
}
|
||||
|
||||
static void kbasep_hwcnt_backend_csf_accumulate_sample(
|
||||
const struct kbase_hwcnt_csf_physical_layout *phys_layout, size_t dump_bytes,
|
||||
u64 *accum_buf, const u32 *old_sample_buf, const u32 *new_sample_buf,
|
||||
blk_stt_t *const block_states, bool clearing_samples,
|
||||
enum kbase_hwcnt_backend_csf_enable_state enable_state, bool fw_in_protected_mode)
|
||||
static void kbasep_hwcnt_backend_csf_accumulate_sample(struct kbase_hwcnt_backend_csf *backend,
|
||||
const u32 *old_sample_buf,
|
||||
const u32 *new_sample_buf)
|
||||
{
|
||||
const struct kbase_hwcnt_csf_physical_layout *phys_layout = &backend->phys_layout;
|
||||
const size_t dump_bytes = backend->info->prfcnt_info.dump_bytes;
|
||||
const size_t values_per_block = phys_layout->values_per_block;
|
||||
blk_stt_t *const block_states = backend->block_states;
|
||||
const bool fw_in_protected_mode = backend->info->fw_in_protected_mode;
|
||||
const bool clearing_samples = backend->info->prfcnt_info.clearing_samples;
|
||||
u64 *accum_buf = backend->accum_buf;
|
||||
|
||||
size_t block_idx;
|
||||
const u32 *old_block = old_sample_buf;
|
||||
const u32 *new_block = new_sample_buf;
|
||||
u64 *acc_block = accum_buf;
|
||||
/* Flag to indicate whether current sample is exiting protected mode. */
|
||||
bool exiting_protm = false;
|
||||
const size_t values_per_block = phys_layout->values_per_block;
|
||||
|
||||
/* The block pointers now point to the first HW block, which is always a CSHW/front-end
|
||||
* block. The counter enable mask for this block can be checked to determine whether this
|
||||
|
|
@ -620,9 +638,8 @@ static void kbasep_hwcnt_backend_csf_accumulate_sample(
|
|||
const u32 old_enable_mask = old_block[phys_layout->enable_mask_offset];
|
||||
const u32 new_enable_mask = new_block[phys_layout->enable_mask_offset];
|
||||
/* Update block state with information of the current sample */
|
||||
kbasep_hwcnt_backend_csf_update_block_state(phys_layout, new_enable_mask,
|
||||
enable_state, exiting_protm, block_idx,
|
||||
&block_states[block_idx],
|
||||
kbasep_hwcnt_backend_csf_update_block_state(backend, new_enable_mask, exiting_protm,
|
||||
block_idx, &block_states[block_idx],
|
||||
fw_in_protected_mode);
|
||||
|
||||
if (!(new_enable_mask & HWCNT_BLOCK_EMPTY_SAMPLE)) {
|
||||
|
|
@ -706,7 +723,6 @@ static void kbasep_hwcnt_backend_csf_accumulate_samples(struct kbase_hwcnt_backe
|
|||
u8 *cpu_dump_base = (u8 *)backend_csf->ring_buf_cpu_base;
|
||||
const size_t ring_buf_cnt = backend_csf->info->ring_buf_cnt;
|
||||
const size_t buf_dump_bytes = backend_csf->info->prfcnt_info.dump_bytes;
|
||||
bool clearing_samples = backend_csf->info->prfcnt_info.clearing_samples;
|
||||
u32 *old_sample_buf = backend_csf->old_sample_buf;
|
||||
u32 *new_sample_buf = old_sample_buf;
|
||||
const struct kbase_hwcnt_csf_physical_layout *phys_layout = &backend_csf->phys_layout;
|
||||
|
|
@ -740,10 +756,8 @@ static void kbasep_hwcnt_backend_csf_accumulate_samples(struct kbase_hwcnt_backe
|
|||
const u32 buf_idx = raw_idx & (ring_buf_cnt - 1);
|
||||
|
||||
new_sample_buf = (u32 *)&cpu_dump_base[buf_idx * buf_dump_bytes];
|
||||
kbasep_hwcnt_backend_csf_accumulate_sample(
|
||||
phys_layout, buf_dump_bytes, backend_csf->accum_buf, old_sample_buf,
|
||||
new_sample_buf, backend_csf->block_states, clearing_samples,
|
||||
backend_csf->enable_state, backend_csf->info->fw_in_protected_mode);
|
||||
kbasep_hwcnt_backend_csf_accumulate_sample(backend_csf, old_sample_buf,
|
||||
new_sample_buf);
|
||||
|
||||
old_sample_buf = new_sample_buf;
|
||||
}
|
||||
|
|
@ -1215,11 +1229,6 @@ static void kbasep_hwcnt_backend_csf_dump_disable(struct kbase_hwcnt_backend *ba
|
|||
backend_csf->ring_buf, 0,
|
||||
backend_csf->info->ring_buf_cnt, false);
|
||||
|
||||
/* Reset accumulator, old_sample_buf and user_sample to all-0 to prepare
|
||||
* for next enable.
|
||||
*/
|
||||
kbasep_hwcnt_backend_csf_reset_internal_buffers(backend_csf);
|
||||
|
||||
/* Disabling HWCNT is an indication that blocks have been powered off. This is important to
|
||||
* know for L2, CSHW, and Tiler blocks, as this is currently the only way a backend can
|
||||
* know if they are being powered off.
|
||||
|
|
@ -1255,6 +1264,12 @@ static void kbasep_hwcnt_backend_csf_dump_disable(struct kbase_hwcnt_backend *ba
|
|||
kbase_hwcnt_block_state_set(&backend_csf->accum_all_blk_stt,
|
||||
KBASE_HWCNT_STATE_UNKNOWN);
|
||||
}
|
||||
|
||||
/* Reset accumulator, old_sample_buf and block_states to all-0 to prepare for next enable.
|
||||
* Reset user buffers if ownership is transferred to the caller (i.e. dump_buffer
|
||||
* is provided).
|
||||
*/
|
||||
kbasep_hwcnt_backend_csf_reset_internal_buffers(backend_csf, dump_buffer);
|
||||
}
|
||||
|
||||
/* CSF backend implementation of kbase_hwcnt_backend_dump_request_fn */
|
||||
|
|
@ -1279,6 +1294,11 @@ static int kbasep_hwcnt_backend_csf_dump_request(struct kbase_hwcnt_backend *bac
|
|||
backend_csf->dump_state = KBASE_HWCNT_BACKEND_CSF_DUMP_COMPLETED;
|
||||
*dump_time_ns = kbasep_hwcnt_backend_csf_timestamp_ns(backend);
|
||||
kbasep_hwcnt_backend_csf_cc_update(backend_csf);
|
||||
/* There is a possibility that the transition to enabled state will remain
|
||||
* during multiple dumps, hence append the OFF state.
|
||||
*/
|
||||
kbase_hwcnt_block_state_append(&backend_csf->accum_all_blk_stt,
|
||||
KBASE_HWCNT_STATE_OFF);
|
||||
backend_csf->user_requested = true;
|
||||
backend_csf->info->csf_if->unlock(backend_csf->info->csf_if->ctx, flags);
|
||||
return 0;
|
||||
|
|
@ -1457,7 +1477,7 @@ static int kbasep_hwcnt_backend_csf_dump_get(struct kbase_hwcnt_backend *backend
|
|||
ret = kbase_hwcnt_csf_dump_get(dst, backend_csf->to_user_buf,
|
||||
backend_csf->to_user_block_states, dst_enable_map,
|
||||
backend_csf->num_l2_slices,
|
||||
backend_csf->shader_present_bitmap, accumulate);
|
||||
backend_csf->powered_shader_core_mask, accumulate);
|
||||
|
||||
/* If no error occurred (zero ret value), then update block state for all blocks in the
|
||||
* accumulation with the current sample's block state.
|
||||
|
|
@ -1469,6 +1489,12 @@ static int kbasep_hwcnt_backend_csf_dump_get(struct kbase_hwcnt_backend *backend
|
|||
KBASE_HWCNT_STATE_UNKNOWN);
|
||||
}
|
||||
|
||||
/* Clear consumed user buffers. */
|
||||
memset(backend_csf->to_user_buf, 0, backend_csf->info->metadata->dump_buf_bytes);
|
||||
memset(backend_csf->to_user_block_states, 0,
|
||||
backend_csf->phys_layout.block_cnt * KBASE_HWCNT_BLOCK_STATE_BYTES *
|
||||
KBASE_HWCNT_BLOCK_STATE_STRIDE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -2098,7 +2124,7 @@ int kbase_hwcnt_backend_csf_metadata_init(struct kbase_hwcnt_backend_interface *
|
|||
gpu_info.has_fw_counters = csf_info->prfcnt_info.prfcnt_fw_size > 0;
|
||||
gpu_info.l2_count = csf_info->prfcnt_info.l2_count;
|
||||
gpu_info.csg_cnt = csf_info->prfcnt_info.csg_count;
|
||||
gpu_info.core_mask = csf_info->prfcnt_info.core_mask;
|
||||
gpu_info.sc_core_mask = csf_info->prfcnt_info.sc_core_mask;
|
||||
gpu_info.clk_cnt = csf_info->prfcnt_info.clk_cnt;
|
||||
gpu_info.prfcnt_values_per_block =
|
||||
csf_info->prfcnt_info.prfcnt_block_size / KBASE_HWCNT_VALUE_HW_BYTES;
|
||||
|
|
@ -2115,7 +2141,7 @@ void kbase_hwcnt_backend_csf_metadata_term(struct kbase_hwcnt_backend_interface
|
|||
|
||||
csf_info = (struct kbase_hwcnt_backend_csf_info *)iface->info;
|
||||
if (csf_info->metadata) {
|
||||
kbase_hwcnt_csf_metadata_destroy(csf_info->metadata);
|
||||
kbase_hwcnt_metadata_destroy(csf_info->metadata);
|
||||
csf_info->metadata = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2021-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -30,8 +30,10 @@
|
|||
#include "hwcnt/backend/mali_kbase_hwcnt_backend.h"
|
||||
#include "hwcnt/backend/mali_kbase_hwcnt_backend_csf_if.h"
|
||||
#include "hwcnt/mali_kbase_hwcnt_watchdog_if.h"
|
||||
#include "hwcnt/mali_kbase_hwcnt_types.h"
|
||||
|
||||
struct kbase_hwcnt_physical_enable_map;
|
||||
struct kbase_hwcnt_backend_csf;
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_backend_csf_create() - Create a CSF hardware counter backend
|
||||
|
|
@ -123,11 +125,12 @@ void kbase_hwcnt_backend_csf_on_before_reset(struct kbase_hwcnt_backend_interfac
|
|||
* this function is called.
|
||||
* @iface: Non-NULL pointer to HWC backend interface.
|
||||
* @num_l2_slices: Current number of L2 slices allocated to the GPU.
|
||||
* @shader_present_bitmap: Current shader-present bitmap that is allocated to the GPU.
|
||||
* @shader_present: Shader_present of the current configuration.
|
||||
* @power_core_mask: Mask containing changed shader core power state.
|
||||
*/
|
||||
void kbase_hwcnt_backend_csf_set_hw_availability(struct kbase_hwcnt_backend_interface *iface,
|
||||
size_t num_l2_slices,
|
||||
uint64_t shader_present_bitmap);
|
||||
size_t num_l2_slices, u64 shader_present,
|
||||
u64 power_core_mask);
|
||||
|
||||
/** kbasep_hwcnt_backend_csf_process_enable_map() - Process the enable_map to
|
||||
* guarantee headers are
|
||||
|
|
@ -174,4 +177,21 @@ void kbase_hwcnt_backend_csf_on_prfcnt_enable(struct kbase_hwcnt_backend_interfa
|
|||
*/
|
||||
void kbase_hwcnt_backend_csf_on_prfcnt_disable(struct kbase_hwcnt_backend_interface *iface);
|
||||
|
||||
/**
|
||||
* kbasep_hwcnt_backend_csf_update_block_state - Update block state of a block instance with
|
||||
* information from a sample.
|
||||
* @backend: CSF hardware counter backend.
|
||||
* @enable_mask: Counter enable mask for the block whose state is being updated.
|
||||
* @exiting_protm: Whether or not the sample is taken when the GPU is exiting
|
||||
* protected mode.
|
||||
* @block_idx: Index of block within the ringbuffer.
|
||||
* @block_state: Pointer to existing block state of the block whose state is being
|
||||
* updated.
|
||||
* @fw_in_protected_mode: Whether or not GPU is in protected mode during sampling.
|
||||
*/
|
||||
void kbasep_hwcnt_backend_csf_update_block_state(struct kbase_hwcnt_backend_csf *backend,
|
||||
const u32 enable_mask, bool exiting_protm,
|
||||
size_t block_idx, blk_stt_t *const block_state,
|
||||
bool fw_in_protected_mode);
|
||||
|
||||
#endif /* _KBASE_HWCNT_BACKEND_CSF_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2021-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -68,7 +68,7 @@ struct kbase_hwcnt_backend_csf_if_enable {
|
|||
* @prfcnt_block_size: Bytes of each performance counter block.
|
||||
* @l2_count: The MMU L2 cache count.
|
||||
* @csg_count: The total number of CSGs in the system
|
||||
* @core_mask: Shader core mask.
|
||||
* @sc_core_mask: Shader core mask.
|
||||
* @clk_cnt: Clock domain count in the system.
|
||||
* @clearing_samples: Indicates whether counters are cleared after each sample
|
||||
* is taken.
|
||||
|
|
@ -80,7 +80,7 @@ struct kbase_hwcnt_backend_csf_if_prfcnt_info {
|
|||
size_t prfcnt_block_size;
|
||||
size_t l2_count;
|
||||
u32 csg_count;
|
||||
u64 core_mask;
|
||||
u64 sc_core_mask;
|
||||
u8 clk_cnt;
|
||||
bool clearing_samples;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2021-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -229,7 +229,7 @@ static void kbasep_hwcnt_backend_csf_if_fw_get_prfcnt_info(
|
|||
|
||||
*prfcnt_info = (struct kbase_hwcnt_backend_csf_if_prfcnt_info){
|
||||
.l2_count = KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS,
|
||||
.core_mask = (1ull << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1,
|
||||
.sc_core_mask = (1ull << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1,
|
||||
.prfcnt_hw_size =
|
||||
KBASE_DUMMY_MODEL_MAX_NUM_HARDWARE_BLOCKS * KBASE_DUMMY_MODEL_BLOCK_SIZE,
|
||||
.prfcnt_fw_size =
|
||||
|
|
@ -290,12 +290,13 @@ static void kbasep_hwcnt_backend_csf_if_fw_get_prfcnt_info(
|
|||
.dump_bytes = fw_ctx->buf_bytes,
|
||||
.prfcnt_block_size = prfcnt_block_size,
|
||||
.l2_count = kbdev->gpu_props.num_l2_slices,
|
||||
.core_mask = kbasep_hwcnt_backend_csf_core_mask(&kbdev->gpu_props),
|
||||
.sc_core_mask = kbasep_hwcnt_backend_csf_core_mask(&kbdev->gpu_props),
|
||||
.csg_count = fw_block_count > 1 ? csg_count : 0,
|
||||
.clk_cnt = fw_ctx->clk_cnt,
|
||||
.clearing_samples = true,
|
||||
};
|
||||
|
||||
|
||||
/* Block size must be multiple of counter size. */
|
||||
WARN_ON((prfcnt_info->prfcnt_block_size % KBASE_HWCNT_VALUE_HW_BYTES) != 0);
|
||||
/* Total size must be multiple of block size. */
|
||||
|
|
@ -513,10 +514,15 @@ kbasep_hwcnt_backend_csf_if_fw_ring_buf_free(struct kbase_hwcnt_backend_csf_if_c
|
|||
fw_ring_buf->phys, fw_ring_buf->num_pages, fw_ring_buf->num_pages,
|
||||
MCU_AS_NR));
|
||||
|
||||
/* Clear the dump ring_buf content to zeros */
|
||||
memset(fw_ring_buf->cpu_dump_base, 0, fw_ring_buf->num_pages * PAGE_SIZE);
|
||||
vunmap(fw_ring_buf->cpu_dump_base);
|
||||
|
||||
/* After zeroing, the ring_buf pages are dirty so need to pass the 'dirty' flag
|
||||
* as true when freeing the pages to the Global pool.
|
||||
*/
|
||||
kbase_mem_pool_free_pages(&fw_ctx->kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW],
|
||||
fw_ring_buf->num_pages, fw_ring_buf->phys, false, false);
|
||||
fw_ring_buf->num_pages, fw_ring_buf->phys, true, false);
|
||||
|
||||
kfree(fw_ring_buf->phys);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -165,7 +165,7 @@ static int kbasep_hwcnt_backend_jm_gpu_info_init(struct kbase_device *kbdev,
|
|||
#endif
|
||||
|
||||
info->l2_count = l2_count;
|
||||
info->core_mask = core_mask;
|
||||
info->sc_core_mask = core_mask;
|
||||
info->prfcnt_values_per_block = KBASE_HWCNT_V5_DEFAULT_VALUES_PER_BLOCK;
|
||||
|
||||
/* Determine the number of available clock domains. */
|
||||
|
|
@ -186,7 +186,7 @@ static void kbasep_hwcnt_backend_jm_init_layout(const struct kbase_hwcnt_gpu_inf
|
|||
WARN_ON(!gpu_info);
|
||||
WARN_ON(!phys_layout);
|
||||
|
||||
shader_core_cnt = fls64(gpu_info->core_mask);
|
||||
shader_core_cnt = fls64(gpu_info->sc_core_mask);
|
||||
|
||||
*phys_layout = (struct kbase_hwcnt_jm_physical_layout){
|
||||
.fe_cnt = KBASE_HWCNT_V5_FE_BLOCK_COUNT,
|
||||
|
|
@ -195,7 +195,7 @@ static void kbasep_hwcnt_backend_jm_init_layout(const struct kbase_hwcnt_gpu_inf
|
|||
.shader_cnt = shader_core_cnt,
|
||||
.block_cnt = KBASE_HWCNT_V5_FE_BLOCK_COUNT + KBASE_HWCNT_V5_TILER_BLOCK_COUNT +
|
||||
gpu_info->l2_count + shader_core_cnt,
|
||||
.shader_avail_mask = gpu_info->core_mask,
|
||||
.shader_avail_mask = gpu_info->sc_core_mask,
|
||||
.headers_per_block = KBASE_HWCNT_V5_HEADERS_PER_BLOCK,
|
||||
.values_per_block = gpu_info->prfcnt_values_per_block,
|
||||
.counters_per_block =
|
||||
|
|
@ -384,14 +384,12 @@ kbasep_hwcnt_backend_jm_dump_enable_nolock(struct kbase_hwcnt_backend *backend,
|
|||
|
||||
enable = (struct kbase_instr_hwcnt_enable)
|
||||
{
|
||||
.fe_bm = phys_enable_map.fe_bm,
|
||||
.shader_bm = phys_enable_map.shader_bm,
|
||||
.tiler_bm = phys_enable_map.tiler_bm,
|
||||
.mmu_l2_bm = phys_enable_map.mmu_l2_bm,
|
||||
.fe_bm = phys_enable_map.fe_bm, .shader_bm = phys_enable_map.shader_bm,
|
||||
.tiler_bm = phys_enable_map.tiler_bm, .mmu_l2_bm = phys_enable_map.mmu_l2_bm,
|
||||
.counter_set = phys_counter_set,
|
||||
#if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
|
||||
/* The dummy model needs the CPU mapping. */
|
||||
.dump_buffer = (uintptr_t)backend_jm->cpu_dump_va,
|
||||
.dump_buffer = (uintptr_t)backend_jm->cpu_dump_va,
|
||||
#else
|
||||
.dump_buffer = backend_jm->gpu_dump_va,
|
||||
#endif /* CONFIG_MALI_BIFROST_NO_MALI */
|
||||
|
|
@ -411,7 +409,7 @@ kbasep_hwcnt_backend_jm_dump_enable_nolock(struct kbase_hwcnt_backend *backend,
|
|||
|
||||
backend_jm->debug_core_mask = kbase_pm_ca_get_debug_core_mask(kbdev);
|
||||
backend_jm->max_l2_slices = backend_jm->info->hwcnt_gpu_info.l2_count;
|
||||
backend_jm->max_core_mask = backend_jm->info->hwcnt_gpu_info.core_mask;
|
||||
backend_jm->max_core_mask = backend_jm->info->hwcnt_gpu_info.sc_core_mask;
|
||||
|
||||
backend_jm->pm_core_mask = kbase_pm_ca_get_instr_core_mask(kbdev);
|
||||
|
||||
|
|
@ -660,8 +658,8 @@ static int kbasep_hwcnt_backend_jm_dump_get(struct kbase_hwcnt_backend *backend,
|
|||
#endif /* CONFIG_MALI_BIFROST_NO_MALI */
|
||||
errcode = kbase_hwcnt_jm_dump_get(dst, backend_jm->to_user_buf, dst_enable_map,
|
||||
backend_jm->pm_core_mask, backend_jm->debug_core_mask,
|
||||
backend_jm->max_core_mask, backend_jm->max_l2_slices,
|
||||
&backend_jm->curr_config, accumulate);
|
||||
backend_jm->max_l2_slices, &backend_jm->curr_config,
|
||||
accumulate);
|
||||
|
||||
if (errcode)
|
||||
return errcode;
|
||||
|
|
@ -864,7 +862,7 @@ static void kbasep_hwcnt_backend_jm_info_destroy(const struct kbase_hwcnt_backen
|
|||
if (!info)
|
||||
return;
|
||||
|
||||
kbase_hwcnt_jm_metadata_destroy(info->metadata);
|
||||
kbase_hwcnt_metadata_destroy(info->metadata);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -169,7 +169,7 @@ static int kbasep_hwcnt_backend_gpu_metadata_create(const struct kbase_hwcnt_gpu
|
|||
/* Calculate number of block instances that aren't cores */
|
||||
non_core_block_count = 2 + gpu_info->l2_count;
|
||||
/* Calculate number of block instances that are shader cores */
|
||||
sc_block_count = (size_t)fls64(gpu_info->core_mask);
|
||||
sc_block_count = (size_t)fls64(gpu_info->sc_core_mask);
|
||||
/* Determine the total number of cores */
|
||||
core_block_count = sc_block_count;
|
||||
|
||||
|
|
@ -277,7 +277,7 @@ static int kbasep_hwcnt_backend_gpu_metadata_create(const struct kbase_hwcnt_gpu
|
|||
kbase_hwcnt_set_avail_mask(&desc.avail_mask, 0, 0);
|
||||
kbase_hwcnt_set_avail_mask_bits(&desc.avail_mask, 0, non_core_block_count, U64_MAX);
|
||||
kbase_hwcnt_set_avail_mask_bits(&desc.avail_mask, non_core_block_count, sc_block_count,
|
||||
gpu_info->core_mask);
|
||||
gpu_info->sc_core_mask);
|
||||
|
||||
|
||||
return kbase_hwcnt_metadata_create(&desc, metadata);
|
||||
|
|
@ -294,7 +294,7 @@ static size_t kbasep_hwcnt_backend_jm_dump_bytes(const struct kbase_hwcnt_gpu_in
|
|||
{
|
||||
WARN_ON(!gpu_info);
|
||||
|
||||
return (2 + gpu_info->l2_count + (size_t)fls64(gpu_info->core_mask)) *
|
||||
return (2 + gpu_info->l2_count + (size_t)fls64(gpu_info->sc_core_mask)) *
|
||||
gpu_info->prfcnt_values_per_block * KBASE_HWCNT_VALUE_HW_BYTES;
|
||||
}
|
||||
|
||||
|
|
@ -338,14 +338,6 @@ int kbase_hwcnt_jm_metadata_create(const struct kbase_hwcnt_gpu_info *gpu_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kbase_hwcnt_jm_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
|
||||
{
|
||||
if (!metadata)
|
||||
return;
|
||||
|
||||
kbase_hwcnt_metadata_destroy(metadata);
|
||||
}
|
||||
|
||||
int kbase_hwcnt_csf_metadata_create(const struct kbase_hwcnt_gpu_info *gpu_info,
|
||||
enum kbase_hwcnt_set counter_set,
|
||||
const struct kbase_hwcnt_metadata **out_metadata)
|
||||
|
|
@ -365,14 +357,6 @@ int kbase_hwcnt_csf_metadata_create(const struct kbase_hwcnt_gpu_info *gpu_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kbase_hwcnt_csf_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
|
||||
{
|
||||
if (!metadata)
|
||||
return;
|
||||
|
||||
kbase_hwcnt_metadata_destroy(metadata);
|
||||
}
|
||||
|
||||
bool kbase_hwcnt_is_block_type_shader(const enum kbase_hwcnt_gpu_v5_block_type blk_type)
|
||||
{
|
||||
if (blk_type == KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC ||
|
||||
|
|
@ -384,6 +368,7 @@ bool kbase_hwcnt_is_block_type_shader(const enum kbase_hwcnt_gpu_v5_block_type b
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool kbase_hwcnt_is_block_type_memsys(const enum kbase_hwcnt_gpu_v5_block_type blk_type)
|
||||
{
|
||||
if (blk_type == KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS ||
|
||||
|
|
@ -416,7 +401,7 @@ bool kbase_hwcnt_is_block_type_fe(const enum kbase_hwcnt_gpu_v5_block_type blk_t
|
|||
|
||||
int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
||||
const struct kbase_hwcnt_enable_map *dst_enable_map, u64 pm_core_mask,
|
||||
u64 debug_core_mask, u64 max_core_mask, size_t max_l2_slices,
|
||||
u64 debug_core_mask, size_t max_l2_slices,
|
||||
const struct kbase_hwcnt_curr_config *curr_config, bool accumulate)
|
||||
{
|
||||
const struct kbase_hwcnt_metadata *metadata;
|
||||
|
|
@ -466,9 +451,7 @@ int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
|||
else
|
||||
hw_res_available = true;
|
||||
|
||||
/*
|
||||
* Skip block if no values in the destination block are enabled.
|
||||
*/
|
||||
/* Skip block if no values in the destination block are enabled. */
|
||||
if (kbase_hwcnt_enable_map_block_enabled(dst_enable_map, blk, blk_inst)) {
|
||||
u64 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, blk, blk_inst);
|
||||
const u64 *src_blk = dump_src + src_offset;
|
||||
|
|
@ -581,7 +564,6 @@ int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
|||
/* Shift each core mask right by 1 */
|
||||
core_mask >>= 1;
|
||||
debug_core_mask >>= 1;
|
||||
max_core_mask >>= 1;
|
||||
shader_present >>= 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -592,7 +574,7 @@ int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
|||
int kbase_hwcnt_csf_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
||||
blk_stt_t *src_block_stt,
|
||||
const struct kbase_hwcnt_enable_map *dst_enable_map,
|
||||
size_t num_l2_slices, u64 shader_present_bitmap, bool accumulate)
|
||||
size_t num_l2_slices, u64 powered_shader_core_mask, bool accumulate)
|
||||
{
|
||||
const struct kbase_hwcnt_metadata *metadata;
|
||||
const u64 *dump_src = src;
|
||||
|
|
@ -614,9 +596,7 @@ int kbase_hwcnt_csf_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
|||
blk_stt_t *dst_blk_stt =
|
||||
kbase_hwcnt_dump_buffer_block_state_instance(dst, blk, blk_inst);
|
||||
|
||||
/*
|
||||
* Skip block if no values in the destination block are enabled.
|
||||
*/
|
||||
/* Skip block if no values in the destination block are enabled. */
|
||||
if (kbase_hwcnt_enable_map_block_enabled(dst_enable_map, blk, blk_inst)) {
|
||||
u64 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, blk, blk_inst);
|
||||
const u64 *src_blk = dump_src + src_offset;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -169,7 +169,7 @@ enum kbase_hwcnt_physical_set {
|
|||
/**
|
||||
* struct kbase_hwcnt_gpu_info - Information about hwcnt blocks on the GPUs.
|
||||
* @l2_count: L2 cache count.
|
||||
* @core_mask: Shader core mask. May be sparse.
|
||||
* @sc_core_mask: Shader core mask. May be sparse.
|
||||
* @clk_cnt: Number of clock domains available.
|
||||
* @csg_cnt: Number of CSGs available.
|
||||
* @prfcnt_values_per_block: Total entries (header + counters) of performance
|
||||
|
|
@ -178,7 +178,7 @@ enum kbase_hwcnt_physical_set {
|
|||
*/
|
||||
struct kbase_hwcnt_gpu_info {
|
||||
size_t l2_count;
|
||||
u64 core_mask;
|
||||
u64 sc_core_mask;
|
||||
u8 clk_cnt;
|
||||
u8 csg_cnt;
|
||||
size_t prfcnt_values_per_block;
|
||||
|
|
@ -261,13 +261,6 @@ int kbase_hwcnt_jm_metadata_create(const struct kbase_hwcnt_gpu_info *info,
|
|||
const struct kbase_hwcnt_metadata **out_metadata,
|
||||
size_t *out_dump_bytes);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_jm_metadata_destroy() - Destroy JM GPU hardware counter metadata.
|
||||
*
|
||||
* @metadata: Pointer to metadata to destroy.
|
||||
*/
|
||||
void kbase_hwcnt_jm_metadata_destroy(const struct kbase_hwcnt_metadata *metadata);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_csf_metadata_create() - Create hardware counter metadata for the
|
||||
* CSF GPUs.
|
||||
|
|
@ -282,13 +275,6 @@ int kbase_hwcnt_csf_metadata_create(const struct kbase_hwcnt_gpu_info *info,
|
|||
enum kbase_hwcnt_set counter_set,
|
||||
const struct kbase_hwcnt_metadata **out_metadata);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_csf_metadata_destroy() - Destroy CSF GPU hardware counter
|
||||
* metadata.
|
||||
* @metadata: Pointer to metadata to destroy.
|
||||
*/
|
||||
void kbase_hwcnt_csf_metadata_destroy(const struct kbase_hwcnt_metadata *metadata);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_jm_dump_get() - Copy or accumulate enabled counters from the raw
|
||||
* dump buffer in src into the dump buffer
|
||||
|
|
@ -300,9 +286,6 @@ void kbase_hwcnt_csf_metadata_destroy(const struct kbase_hwcnt_metadata *metadat
|
|||
* @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
|
||||
* @pm_core_mask: PM state synchronized shaders core mask with the dump.
|
||||
* @debug_core_mask: User-set mask of cores to be used by the GPU.
|
||||
* @max_core_mask: Core mask of all cores allocated to the GPU (non
|
||||
* virtualized platforms) or resource group (virtualized
|
||||
* platforms).
|
||||
* @max_l2_slices: Maximum number of L2 slices allocated to the GPU (non
|
||||
* virtualised platforms) or resource group (virtualized
|
||||
* platforms).
|
||||
|
|
@ -319,23 +302,23 @@ void kbase_hwcnt_csf_metadata_destroy(const struct kbase_hwcnt_metadata *metadat
|
|||
*/
|
||||
int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
||||
const struct kbase_hwcnt_enable_map *dst_enable_map,
|
||||
const u64 pm_core_mask, u64 debug_core_mask, u64 max_core_mask,
|
||||
size_t max_l2_slices, const struct kbase_hwcnt_curr_config *curr_config,
|
||||
bool accumulate);
|
||||
const u64 pm_core_mask, u64 debug_core_mask, size_t max_l2_slices,
|
||||
const struct kbase_hwcnt_curr_config *curr_config, bool accumulate);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_csf_dump_get() - Copy or accumulate enabled counters from the raw
|
||||
* dump buffer in src into the dump buffer
|
||||
* abstraction in dst.
|
||||
* @dst: Non-NULL pointer to destination dump buffer.
|
||||
* @src: Non-NULL pointer to source raw dump buffer, of same length
|
||||
* as dump_buf_bytes in the metadata of dst dump buffer.
|
||||
* @src_block_stt: Non-NULL pointer to source block state buffer.
|
||||
* @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
|
||||
* @num_l2_slices: Current number of L2 slices allocated to the GPU.
|
||||
* @shader_present_bitmap: Current shader-present bitmap that is allocated to the GPU.
|
||||
* @accumulate: True if counters in src should be accumulated into
|
||||
* destination, rather than copied.
|
||||
* @dst: Non-NULL pointer to destination dump buffer.
|
||||
* @src: Non-NULL pointer to source raw dump buffer, of same length
|
||||
* as dump_buf_bytes in the metadata of dst dump buffer.
|
||||
* @src_block_stt: Non-NULL pointer to source block state buffer.
|
||||
* @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
|
||||
* @num_l2_slices: Current number of L2 slices allocated to the GPU.
|
||||
* @powered_shader_core_mask: The common mask between the debug_core_mask
|
||||
* and the shader_present_bitmap.
|
||||
* @accumulate: True if counters in src should be accumulated into
|
||||
* destination, rather than copied.
|
||||
*
|
||||
* The dst and dst_enable_map MUST have been created from the same metadata as
|
||||
* returned from the call to kbase_hwcnt_csf_metadata_create as was used to get
|
||||
|
|
@ -346,7 +329,7 @@ int kbase_hwcnt_jm_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
|||
int kbase_hwcnt_csf_dump_get(struct kbase_hwcnt_dump_buffer *dst, u64 *src,
|
||||
blk_stt_t *src_block_stt,
|
||||
const struct kbase_hwcnt_enable_map *dst_enable_map,
|
||||
size_t num_l2_slices, u64 shader_present_bitmap, bool accumulate);
|
||||
size_t num_l2_slices, u64 powered_shader_core_mask, bool accumulate);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_backend_gpu_block_map_to_physical() - Convert from a block
|
||||
|
|
@ -453,6 +436,7 @@ bool kbase_hwcnt_is_block_type_memsys(const enum kbase_hwcnt_gpu_v5_block_type b
|
|||
bool kbase_hwcnt_is_block_type_tiler(const enum kbase_hwcnt_gpu_v5_block_type blk_type);
|
||||
|
||||
bool kbase_hwcnt_is_block_type_fe(const enum kbase_hwcnt_gpu_v5_block_type blk_type);
|
||||
|
||||
/**
|
||||
* kbase_hwcnt_gpu_enable_map_from_cm() - Builds enable map abstraction from
|
||||
* counter selection bitmasks.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2018-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -125,6 +125,9 @@ int kbase_hwcnt_metadata_create(const struct kbase_hwcnt_description *desc,
|
|||
|
||||
void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
|
||||
{
|
||||
if (!metadata)
|
||||
return;
|
||||
|
||||
kfree(metadata);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,6 +27,8 @@
|
|||
#ifndef _BASE_HWCONFIG_FEATURES_H_
|
||||
#define _BASE_HWCONFIG_FEATURES_H_
|
||||
|
||||
#include <linux/version_compat_defs.h>
|
||||
|
||||
enum base_hw_feature {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
|
|
@ -45,55 +47,55 @@ enum base_hw_feature {
|
|||
BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_generic[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_generic[] = {
|
||||
BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tMIx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tMIx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tHEx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tHEx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tSIx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tSIx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tDVx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tDVx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tNOx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tNOx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, BASE_HW_FEATURE_TLS_HASHING,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tGOx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tGOx[] = {
|
||||
BASE_HW_FEATURE_THREAD_GROUP_SPLIT, BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, BASE_HW_FEATURE_TLS_HASHING,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE, BASE_HW_FEATURE_CORE_FEATURES,
|
||||
BASE_HW_FEATURE_THREAD_TLS_ALLOC, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tTRx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tTRx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tNAx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tNAx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tBEx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tBEx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
|
||||
|
|
@ -103,7 +105,7 @@ __attribute__((unused)) static const enum base_hw_feature base_hw_features_tBEx[
|
|||
BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tBAx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tBAx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
|
||||
|
|
@ -113,31 +115,31 @@ __attribute__((unused)) static const enum base_hw_feature base_hw_features_tBAx[
|
|||
BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tODx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tODx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG, BASE_HW_FEATURE_CLEAN_ONLY_SAFE, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tGRx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tGRx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_CORE_FEATURES, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tVAx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tVAx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_CORE_FEATURES, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tTUx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tTUx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_L2_SLICE_HASH, BASE_HW_FEATURE_GPU_SLEEP,
|
||||
BASE_HW_FEATURE_CORE_FEATURES, BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tTIx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tTIx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION,
|
||||
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG,
|
||||
|
|
@ -149,7 +151,7 @@ __attribute__((unused)) static const enum base_hw_feature base_hw_features_tTIx[
|
|||
BASE_HW_FEATURE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_feature base_hw_features_tKRx[] = {
|
||||
__maybe_unused static const enum base_hw_feature base_hw_features_tKRx[] = {
|
||||
BASE_HW_FEATURE_FLUSH_REDUCTION, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
|
||||
BASE_HW_FEATURE_L2_CONFIG, BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
|
||||
BASE_HW_FEATURE_L2_SLICE_HASH, BASE_HW_FEATURE_GPU_SLEEP,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,6 +27,8 @@
|
|||
#ifndef _BASE_HWCONFIG_ISSUES_H_
|
||||
#define _BASE_HWCONFIG_ISSUES_H_
|
||||
|
||||
#include <linux/version_compat_defs.h>
|
||||
|
||||
enum base_hw_issue {
|
||||
BASE_HW_ISSUE_5736,
|
||||
BASE_HW_ISSUE_9435,
|
||||
|
|
@ -72,13 +74,14 @@ enum base_hw_issue {
|
|||
BASE_HW_ISSUE_KRAKEHW_2151,
|
||||
BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_KRAKEHW_2269,
|
||||
BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((
|
||||
unused)) static const enum base_hw_issue base_hw_issues_generic[] = { BASE_HW_ISSUE_END };
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_T76X_3953, BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_8042,
|
||||
BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TMIX_8138, BASE_HW_ISSUE_TMIX_8206,
|
||||
|
|
@ -88,7 +91,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_7940, BASE_HW_ISSUE_TMIX_8042,
|
||||
BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TMIX_8138, BASE_HW_ISSUE_TMIX_8206,
|
||||
|
|
@ -98,7 +101,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_7940, BASE_HW_ISSUE_TMIX_8042,
|
||||
BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TMIX_8138, BASE_HW_ISSUE_TMIX_8206,
|
||||
|
|
@ -108,7 +111,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p1
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_7891,
|
||||
BASE_HW_ISSUE_TMIX_7940, BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TMIX_8138, BASE_HW_ISSUE_TMIX_8206, BASE_HW_ISSUE_TMIX_8343,
|
||||
|
|
@ -116,7 +119,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tMI
|
|||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336,
|
||||
|
|
@ -124,7 +127,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p0
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336,
|
||||
|
|
@ -132,7 +135,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_11054,
|
||||
BASE_HW_ISSUE_TMIX_7891, BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336,
|
||||
|
|
@ -140,21 +143,21 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p2
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_10682, BASE_HW_ISSUE_TMIX_7891,
|
||||
BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_7891,
|
||||
BASE_HW_ISSUE_TMIX_8042, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_11054, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TSIX_1792,
|
||||
BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464,
|
||||
|
|
@ -162,7 +165,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p0
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_11054, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TSIX_1792,
|
||||
BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464,
|
||||
|
|
@ -170,77 +173,77 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_11054, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921,
|
||||
BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_1116,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336,
|
||||
BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tSIx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tSIx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_3464,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_1116,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_GPU2017_1336,
|
||||
BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_3464,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_1116,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TNOX_1194, BASE_HW_ISSUE_TTRX_921,
|
||||
BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tNOx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tNOx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_3464,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_1116,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TNOX_1194, BASE_HW_ISSUE_TTRX_921,
|
||||
BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133, BASE_HW_ISSUE_TSIX_1116,
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TGOX_R1_1234, BASE_HW_ISSUE_TTRX_921,
|
||||
BASE_HW_ISSUE_GPU2017_1336, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TMIX_8133,
|
||||
BASE_HW_ISSUE_TSIX_1116, BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_3464,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_3076, BASE_HW_ISSUE_TTRX_921,
|
||||
|
|
@ -251,7 +254,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_3076, BASE_HW_ISSUE_TTRX_921,
|
||||
|
|
@ -262,7 +265,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p1
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p2[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTRx_r0p2[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -280,14 +283,14 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p2
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_3414, BASE_HW_ISSUE_TTRX_3083,
|
||||
BASE_HW_ISSUE_TTRX_3470, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_3076, BASE_HW_ISSUE_TTRX_921,
|
||||
|
|
@ -298,7 +301,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -316,14 +319,14 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_3414, BASE_HW_ISSUE_TTRX_3083,
|
||||
BASE_HW_ISSUE_TTRX_3470, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_TTRX_3414,
|
||||
|
|
@ -333,7 +336,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBEx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -349,7 +352,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBEx_r1p0[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -365,7 +368,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p0
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBEx_r1p1[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -381,14 +384,14 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_3414, BASE_HW_ISSUE_TTRX_3083,
|
||||
BASE_HW_ISSUE_TTRX_3470, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_lBEx_r1p0[] = {
|
||||
BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_921, BASE_HW_ISSUE_TTRX_3414,
|
||||
|
|
@ -398,7 +401,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p0
|
|||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_lBEx_r1p1[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -414,7 +417,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBAx_r0p0[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -430,7 +433,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p0
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBAx_r0p1[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -446,7 +449,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p1
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p2[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tBAx_r0p2[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
|
|
@ -462,73 +465,56 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p2
|
|||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r1p0[] = {
|
||||
BASE_HW_ISSUE_9435,
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
|
||||
BASE_HW_ISSUE_TTRX_921,
|
||||
BASE_HW_ISSUE_TTRX_3414,
|
||||
BASE_HW_ISSUE_TTRX_3083,
|
||||
BASE_HW_ISSUE_TTRX_3470,
|
||||
BASE_HW_ISSUE_TTRX_3464,
|
||||
BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tBAx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tBAx[] = {
|
||||
BASE_HW_ISSUE_5736, BASE_HW_ISSUE_9435, BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TTRX_3414, BASE_HW_ISSUE_TTRX_3083,
|
||||
BASE_HW_ISSUE_TTRX_3470, BASE_HW_ISSUE_TTRX_3464, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3212,
|
||||
BASE_HW_ISSUE_GPU2019_3878, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tODx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tODx[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3212,
|
||||
BASE_HW_ISSUE_GPU2019_3878, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGRx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tGRx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tGRx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tGRx[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_1997,
|
||||
BASE_HW_ISSUE_GPU2019_3878, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901,
|
||||
BASE_HW_ISSUE_GPU2021PRO_290, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tVAx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r0p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033,
|
||||
BASE_HW_ISSUE_TTRX_1337,
|
||||
BASE_HW_ISSUE_TURSEHW_1997,
|
||||
|
|
@ -539,79 +525,96 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r0p1
|
|||
BASE_HW_ISSUE_TITANHW_2710,
|
||||
BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922,
|
||||
BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTUx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_1997,
|
||||
BASE_HW_ISSUE_GPU2019_3878, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901,
|
||||
BASE_HW_ISSUE_GPU2021PRO_290, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tTUx[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r1p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p1[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r1p1[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p2[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r1p2[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p3[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTUx_r1p3[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_GPU2019_3878,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2019_3901, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTIx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tTIx[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716,
|
||||
BASE_HW_ISSUE_GPU2021PRO_290, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2952,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTIx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTIx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716,
|
||||
BASE_HW_ISSUE_GPU2021PRO_290, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2922, BASE_HW_ISSUE_TITANHW_2952,
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTIx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716,
|
||||
BASE_HW_ISSUE_GPU2021PRO_290, BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938, BASE_HW_ISSUE_END
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tTIx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TSIX_2033, BASE_HW_ISSUE_TTRX_1337,
|
||||
BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2021PRO_290,
|
||||
BASE_HW_ISSUE_TITANHW_2710, BASE_HW_ISSUE_TITANHW_2679,
|
||||
BASE_HW_ISSUE_GPU2022PRO_148, BASE_HW_ISSUE_TITANHW_2938,
|
||||
BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tKRx_r0p0[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tKRx_r0p0[] = {
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_KRAKEHW_2151, BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_KRAKEHW_2151, BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_TITANHW_2922,
|
||||
BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_tKRx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tKRx[] = {
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_tKRx_r0p1[] = {
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_KRAKEHW_2151, BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_END
|
||||
BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_TURSEHW_2934, BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
__maybe_unused static const enum base_hw_issue base_hw_issues_model_tKRx[] = {
|
||||
BASE_HW_ISSUE_TTRX_1337, BASE_HW_ISSUE_TURSEHW_2716, BASE_HW_ISSUE_GPU2022PRO_148,
|
||||
BASE_HW_ISSUE_KRAKEHW_2151, BASE_HW_ISSUE_KRAKEHW_2269, BASE_HW_ISSUE_TURSEHW_2934,
|
||||
BASE_HW_ISSUE_END
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -204,22 +204,24 @@ int kbase_protected_mode_init(struct kbase_device *kbdev);
|
|||
void kbase_protected_mode_term(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_device_pm_init() - Performs power management initialization and
|
||||
* Verifies device tree configurations.
|
||||
* kbase_device_backend_init() - Performs backend initialization and performs
|
||||
* devicetree validation.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Return: 0 if successful, otherwise a standard Linux error code
|
||||
* If -EPERM is returned, it means the device backend is not supported, but
|
||||
* device initialization can continue.
|
||||
*/
|
||||
int kbase_device_pm_init(struct kbase_device *kbdev);
|
||||
int kbase_device_backend_init(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_device_pm_term() - Performs power management deinitialization and
|
||||
* Free resources.
|
||||
* kbase_device_backend_term() - Performs backend deinitialization and free
|
||||
* resources.
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
*
|
||||
* Clean up all the resources
|
||||
*/
|
||||
void kbase_device_pm_term(struct kbase_device *kbdev);
|
||||
void kbase_device_backend_term(struct kbase_device *kbdev);
|
||||
|
||||
int power_control_init(struct kbase_device *kbdev);
|
||||
void power_control_term(struct kbase_device *kbdev);
|
||||
|
|
@ -812,108 +814,8 @@ bool kbasep_adjust_prioritized_process(struct kbase_device *kbdev, bool add, uin
|
|||
#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kbase_file_fops_count() - Get the kfile::fops_count value
|
||||
*
|
||||
* @kfile: Pointer to the object representing the mali device file.
|
||||
*
|
||||
* The value is read with kfile::lock held.
|
||||
*
|
||||
* Return: sampled value of kfile::fops_count.
|
||||
*/
|
||||
static inline int kbase_file_fops_count(struct kbase_file *kfile)
|
||||
{
|
||||
int fops_count;
|
||||
|
||||
spin_lock(&kfile->lock);
|
||||
fops_count = kfile->fops_count;
|
||||
spin_unlock(&kfile->lock);
|
||||
|
||||
return fops_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_file_inc_fops_count_unless_closed() - Increment the kfile::fops_count value if the
|
||||
* kfile::owner is still set.
|
||||
*
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device file instance.
|
||||
*
|
||||
* Return: true if the increment was done otherwise false.
|
||||
*/
|
||||
static inline bool kbase_file_inc_fops_count_unless_closed(struct kbase_file *kfile)
|
||||
{
|
||||
bool count_incremented = false;
|
||||
|
||||
spin_lock(&kfile->lock);
|
||||
if (kfile->owner) {
|
||||
kfile->fops_count++;
|
||||
count_incremented = true;
|
||||
}
|
||||
spin_unlock(&kfile->lock);
|
||||
|
||||
return count_incremented;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_file_dec_fops_count() - Decrement the kfile::fops_count value
|
||||
*
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device file instance.
|
||||
*
|
||||
* This function shall only be called to decrement kfile::fops_count if a successful call
|
||||
* to kbase_file_inc_fops_count_unless_closed() was made previously by the current thread.
|
||||
*
|
||||
* The function would enqueue the kfile::destroy_kctx_work if the process that originally
|
||||
* created the file instance has closed its copy and no Kbase handled file operations are
|
||||
* in progress and no memory mappings are present for the file instance.
|
||||
*/
|
||||
static inline void kbase_file_dec_fops_count(struct kbase_file *kfile)
|
||||
{
|
||||
spin_lock(&kfile->lock);
|
||||
WARN_ON_ONCE(kfile->fops_count <= 0);
|
||||
kfile->fops_count--;
|
||||
if (unlikely(!kfile->fops_count && !kfile->owner && !kfile->map_count)) {
|
||||
queue_work(system_wq, &kfile->destroy_kctx_work);
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
wake_up(&kfile->zero_fops_count_wait);
|
||||
#if !defined(UINT32_MAX)
|
||||
#define UINT32_MAX ((uint32_t)0xFFFFFFFFU)
|
||||
#endif
|
||||
}
|
||||
spin_unlock(&kfile->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_file_inc_cpu_mapping_count() - Increment the kfile::map_count value.
|
||||
*
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device file instance.
|
||||
*
|
||||
* This function shall be called when the memory mapping on /dev/malixx device file
|
||||
* instance is created. The kbase_file::setup_state shall be KBASE_FILE_COMPLETE.
|
||||
*/
|
||||
static inline void kbase_file_inc_cpu_mapping_count(struct kbase_file *kfile)
|
||||
{
|
||||
spin_lock(&kfile->lock);
|
||||
kfile->map_count++;
|
||||
spin_unlock(&kfile->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_file_dec_cpu_mapping_count() - Decrement the kfile::map_count value
|
||||
*
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device file instance.
|
||||
*
|
||||
* This function is called to decrement kfile::map_count value when the memory mapping
|
||||
* on /dev/malixx device file is closed.
|
||||
* The function would enqueue the kfile::destroy_kctx_work if the process that originally
|
||||
* created the file instance has closed its copy and there are no mappings present and no
|
||||
* Kbase handled file operations are in progress for the file instance.
|
||||
*/
|
||||
static inline void kbase_file_dec_cpu_mapping_count(struct kbase_file *kfile)
|
||||
{
|
||||
spin_lock(&kfile->lock);
|
||||
WARN_ON_ONCE(kfile->map_count <= 0);
|
||||
kfile->map_count--;
|
||||
if (unlikely(!kfile->map_count && !kfile->owner && !kfile->fops_count))
|
||||
queue_work(system_wq, &kfile->destroy_kctx_work);
|
||||
spin_unlock(&kfile->lock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -33,15 +33,26 @@
|
|||
*
|
||||
* @MALI_KBASE_CAP_SYSTEM_MONITOR: System Monitor
|
||||
* @MALI_KBASE_CAP_JIT_PRESSURE_LIMIT: JIT Pressure limit
|
||||
* @MALI_KBASE_CAP_MEM_DONT_NEED: Not needed physical memory
|
||||
* @MALI_KBASE_CAP_MEM_GROW_ON_GPF: Memory grow on page fault
|
||||
* @MALI_KBASE_CAP_MEM_PROTECTED: Protected memory
|
||||
* @MALI_KBASE_CAP_MEM_IMPORT_SYNC_ON_MAP_UNMAP: CPU cache maintenance required when
|
||||
* imported GPU memory is mapped/unmapped
|
||||
* @MALI_KBASE_CAP_MEM_KERNEL_SYNC: Kernel side cache sync ops required
|
||||
* @MALI_KBASE_CAP_MEM_SAME_VA: Same VA on CPU and GPU
|
||||
* @MALI_KBASE_NUM_CAPS: Delimiter
|
||||
*
|
||||
* New enumerator must not be negative and smaller than @MALI_KBASE_NUM_CAPS.
|
||||
*/
|
||||
enum mali_kbase_cap {
|
||||
MALI_KBASE_CAP_SYSTEM_MONITOR = 0,
|
||||
MALI_KBASE_CAP_JIT_PRESSURE_LIMIT,
|
||||
MALI_KBASE_CAP_MEM_DONT_NEED,
|
||||
MALI_KBASE_CAP_MEM_GROW_ON_GPF,
|
||||
MALI_KBASE_CAP_MEM_PROTECTED,
|
||||
MALI_KBASE_CAP_MEM_IMPORT_SYNC_ON_MAP_UNMAP,
|
||||
MALI_KBASE_CAP_MEM_KERNEL_SYNC,
|
||||
MALI_KBASE_CAP_MEM_SAME_VA,
|
||||
MALI_KBASE_NUM_CAPS
|
||||
};
|
||||
|
||||
|
|
@ -57,6 +68,11 @@ static inline bool mali_kbase_supports_jit_pressure_limit(unsigned long api_vers
|
|||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_JIT_PRESSURE_LIMIT);
|
||||
}
|
||||
|
||||
static inline bool mali_kbase_supports_mem_dont_need(unsigned long api_version)
|
||||
{
|
||||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_DONT_NEED);
|
||||
}
|
||||
|
||||
static inline bool mali_kbase_supports_mem_grow_on_gpf(unsigned long api_version)
|
||||
{
|
||||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_GROW_ON_GPF);
|
||||
|
|
@ -67,4 +83,19 @@ static inline bool mali_kbase_supports_mem_protected(unsigned long api_version)
|
|||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_PROTECTED);
|
||||
}
|
||||
|
||||
static inline bool mali_kbase_supports_mem_import_sync_on_map_unmap(unsigned long api_version)
|
||||
{
|
||||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_IMPORT_SYNC_ON_MAP_UNMAP);
|
||||
}
|
||||
|
||||
static inline bool mali_kbase_supports_mem_kernel_sync(unsigned long api_version)
|
||||
{
|
||||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_KERNEL_SYNC);
|
||||
}
|
||||
|
||||
static inline bool mali_kbase_supports_mem_same_va(unsigned long api_version)
|
||||
{
|
||||
return mali_kbase_supports_cap(api_version, MALI_KBASE_CAP_MEM_SAME_VA);
|
||||
}
|
||||
|
||||
#endif /* __KBASE_CAPS_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -166,8 +166,9 @@ struct kbase_pm_callback_conf {
|
|||
*
|
||||
* The system integrator can decide whether to either do nothing, just switch off
|
||||
* the clocks to the GPU, or to completely power down the GPU.
|
||||
* The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
|
||||
* platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
|
||||
* The platform specific private pointer kbase_device::platform_context can be
|
||||
* accessed and modified in here. It is the platform \em callbacks responsibility
|
||||
* to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
|
||||
*
|
||||
* If runtime PM is enabled and @power_runtime_gpu_idle_callback is used
|
||||
* then this callback should power off the GPU (or switch off the clocks
|
||||
|
|
@ -179,15 +180,18 @@ struct kbase_pm_callback_conf {
|
|||
|
||||
/** Callback for when the GPU is about to become active and power must be supplied.
|
||||
*
|
||||
* This function must not return until the GPU is powered and clocked sufficiently for register access to
|
||||
* succeed. The return value specifies whether the GPU was powered down since the call to power_off_callback.
|
||||
* If the GPU state has been lost then this function must return 1, otherwise it should return 0.
|
||||
* The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
|
||||
* platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
|
||||
* This function must not return until the GPU is powered and clocked sufficiently
|
||||
* for register access to succeed. The return value specifies whether the GPU was
|
||||
* powered down since the call to power_off_callback.
|
||||
* If the GPU is in reset state it should return 2, if the GPU state has been lost
|
||||
* then this function must return 1, otherwise it should return 0.
|
||||
* The platform specific private pointer kbase_device::platform_context can be
|
||||
* accessed and modified in here. It is the platform \em callbacks responsibility
|
||||
* to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
|
||||
*
|
||||
* The return value of the first call to this function is ignored.
|
||||
*
|
||||
* @return 1 if the GPU state may have been lost, 0 otherwise.
|
||||
* @return 2 if GPU in reset state, 1 if the GPU state may have been lost, 0 otherwise.
|
||||
*/
|
||||
int (*power_on_callback)(struct kbase_device *kbdev);
|
||||
|
||||
|
|
@ -223,9 +227,11 @@ struct kbase_pm_callback_conf {
|
|||
|
||||
/** Callback for handling runtime power management initialization.
|
||||
*
|
||||
* The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
|
||||
* will become active from calls made to the OS from within this function.
|
||||
* The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback.
|
||||
* The runtime power management callbacks @ref power_runtime_off_callback
|
||||
* and @ref power_runtime_on_callback will become active from calls made
|
||||
* to the OS from within this function.
|
||||
* The runtime calls can be triggered by calls from @ref power_off_callback
|
||||
* and @ref power_on_callback.
|
||||
* Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
|
||||
*
|
||||
* @return 0 on success, else int error code.
|
||||
|
|
@ -234,8 +240,9 @@ struct kbase_pm_callback_conf {
|
|||
|
||||
/** Callback for handling runtime power management termination.
|
||||
*
|
||||
* The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
|
||||
* should no longer be called by the OS on completion of this function.
|
||||
* The runtime power management callbacks @ref power_runtime_off_callback
|
||||
* and @ref power_runtime_on_callback should no longer be called by the
|
||||
* OS on completion of this function.
|
||||
* Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
|
||||
*/
|
||||
void (*power_runtime_term_callback)(struct kbase_device *kbdev);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2013-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -194,9 +194,22 @@ enum {
|
|||
*/
|
||||
#define CSF_CSG_SUSPEND_TIMEOUT_CYCLES (3100000000ull)
|
||||
|
||||
/* Waiting timeout in clock cycles for GPU suspend to complete. */
|
||||
#define CSF_GPU_SUSPEND_TIMEOUT_CYCLES (CSF_CSG_SUSPEND_TIMEOUT_CYCLES)
|
||||
|
||||
/* Waiting timeout in clock cycles for GPU reset to complete. */
|
||||
#define CSF_GPU_RESET_TIMEOUT_CYCLES (CSF_CSG_SUSPEND_TIMEOUT_CYCLES * 2)
|
||||
|
||||
/* Waiting timeout in clock cycles for a CSG to be terminated.
|
||||
*
|
||||
* Based on 0.6s timeout at 100MHZ, scaled from 0.1s at 600Mhz GPU frequency
|
||||
* which is the timeout defined in FW to wait for iterator to complete the
|
||||
* transitioning to DISABLED state.
|
||||
* More cycles (0.4s @ 100Mhz = 40000000) are added up to ensure that
|
||||
* host timeout is always bigger than FW timeout.
|
||||
*/
|
||||
#define CSF_CSG_TERM_TIMEOUT_CYCLES (100000000)
|
||||
|
||||
/* Waiting timeout in clock cycles for GPU firmware to boot.
|
||||
*
|
||||
* Based on 250ms timeout at 100MHz, scaled from a 50MHz GPU system.
|
||||
|
|
@ -213,7 +226,10 @@ enum {
|
|||
*
|
||||
* Based on 10s timeout at 100MHz, scaled from a 50MHz GPU system.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_MALI_IS_FPGA)
|
||||
#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP)
|
||||
/* Set a large value to avoid timing out while vector dumping */
|
||||
#define KCPU_FENCE_SIGNAL_TIMEOUT_CYCLES (250000000000ull)
|
||||
#elif IS_ENABLED(CONFIG_MALI_IS_FPGA)
|
||||
#define KCPU_FENCE_SIGNAL_TIMEOUT_CYCLES (2500000000ull)
|
||||
#else
|
||||
#define KCPU_FENCE_SIGNAL_TIMEOUT_CYCLES (1000000000ull)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2013-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -237,7 +237,11 @@ static int debug_mem_open(struct inode *i, struct file *file)
|
|||
int ret;
|
||||
enum kbase_memory_zone idx;
|
||||
|
||||
if (!kbase_file_inc_fops_count_unless_closed(kctx->kfile))
|
||||
#if (KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE)
|
||||
if (get_file_rcu(kctx->filp) == 0)
|
||||
#else
|
||||
if (get_file_rcu(&kctx->filp) == 0)
|
||||
#endif
|
||||
return -ENOENT;
|
||||
|
||||
/* Check if file was opened in write mode. GPU memory contents
|
||||
|
|
@ -297,7 +301,7 @@ out:
|
|||
}
|
||||
seq_release(i, file);
|
||||
open_fail:
|
||||
kbase_file_dec_fops_count(kctx->kfile);
|
||||
fput(kctx->filp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -327,7 +331,7 @@ static int debug_mem_release(struct inode *inode, struct file *file)
|
|||
kfree(mem_data);
|
||||
}
|
||||
|
||||
kbase_file_dec_fops_count(kctx->kfile);
|
||||
fput(kctx->filp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2011-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2011-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -178,16 +178,11 @@ struct kbase_gpu_metrics {
|
|||
*
|
||||
* @link: Links the object in kbase_device::gpu_metrics::active_list
|
||||
* or kbase_device::gpu_metrics::inactive_list.
|
||||
* @first_active_start_time: Records the time at which the application first became
|
||||
* @active_start_time: Records the time at which the application first became
|
||||
* active in the current work period.
|
||||
* @last_active_start_time: Records the time at which the application last became
|
||||
* active in the current work period.
|
||||
* @last_active_end_time: Records the time at which the application last became
|
||||
* inactive in the current work period.
|
||||
* @total_active: Tracks the time for which application has been active
|
||||
* in the current work period.
|
||||
* @prev_wp_active_end_time: Records the time at which the application last became
|
||||
* inactive in the previous work period.
|
||||
* @active_end_time: Records the time at which the application last became
|
||||
* inactive in the current work period, or the time of the end of
|
||||
* previous work period if the application remained active.
|
||||
* @aid: Unique identifier for an application.
|
||||
* @kctx_count: Counter to keep a track of the number of Kbase contexts
|
||||
* created for an application. There may be multiple Kbase
|
||||
|
|
@ -195,19 +190,14 @@ struct kbase_gpu_metrics {
|
|||
* metrics context.
|
||||
* @active_cnt: Counter that is updated every time the GPU activity starts
|
||||
* and ends in the current work period for an application.
|
||||
* @flags: Flags to track the state of GPU metrics context.
|
||||
*/
|
||||
struct kbase_gpu_metrics_ctx {
|
||||
struct list_head link;
|
||||
u64 first_active_start_time;
|
||||
u64 last_active_start_time;
|
||||
u64 last_active_end_time;
|
||||
u64 total_active;
|
||||
u64 prev_wp_active_end_time;
|
||||
u64 active_start_time;
|
||||
u64 active_end_time;
|
||||
unsigned int aid;
|
||||
unsigned int kctx_count;
|
||||
u8 active_cnt;
|
||||
u8 flags;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
@ -555,7 +545,7 @@ struct kbase_mem_pool {
|
|||
u8 group_id;
|
||||
spinlock_t pool_lock;
|
||||
struct list_head page_list;
|
||||
struct shrinker reclaim;
|
||||
DEFINE_KBASE_SHRINKER reclaim;
|
||||
atomic_t isolation_in_progress_cnt;
|
||||
|
||||
struct kbase_mem_pool *next_pool;
|
||||
|
|
@ -847,8 +837,6 @@ struct kbase_mem_migrate {
|
|||
* @as_free: Bitpattern of free/available GPU address spaces.
|
||||
* @mmu_mask_change: Lock to serialize the access to MMU interrupt mask
|
||||
* register used in the handling of Bus & Page faults.
|
||||
* @pagesize_2mb: Boolean to determine whether 2MiB page sizes are
|
||||
* supported and used where possible.
|
||||
* @gpu_props: Object containing complete information about the
|
||||
* configuration/properties of GPU HW device in use.
|
||||
* @hw_issues_mask: List of SW workarounds for HW issues
|
||||
|
|
@ -1144,8 +1132,6 @@ struct kbase_device {
|
|||
|
||||
spinlock_t mmu_mask_change;
|
||||
|
||||
bool pagesize_2mb;
|
||||
|
||||
struct kbase_gpu_props gpu_props;
|
||||
|
||||
unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
|
||||
|
|
@ -1424,9 +1410,6 @@ struct kbase_device {
|
|||
* @KBASE_FILE_COMPLETE: Indicates if the setup for context has
|
||||
* completed, i.e. flags have been set for the
|
||||
* context.
|
||||
* @KBASE_FILE_DESTROY_CTX: Indicates that destroying of context has begun or
|
||||
* is complete. This state can only be reached after
|
||||
* @KBASE_FILE_COMPLETE.
|
||||
*
|
||||
* The driver allows only limited interaction with user-space until setup
|
||||
* is complete.
|
||||
|
|
@ -1436,8 +1419,7 @@ enum kbase_file_state {
|
|||
KBASE_FILE_VSN_IN_PROGRESS,
|
||||
KBASE_FILE_NEED_CTX,
|
||||
KBASE_FILE_CTX_IN_PROGRESS,
|
||||
KBASE_FILE_COMPLETE,
|
||||
KBASE_FILE_DESTROY_CTX
|
||||
KBASE_FILE_COMPLETE
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -1447,12 +1429,6 @@ enum kbase_file_state {
|
|||
* allocated from the probe method of the Mali driver.
|
||||
* @filp: Pointer to the struct file corresponding to device file
|
||||
* /dev/malixx instance, passed to the file's open method.
|
||||
* @owner: Pointer to the file table structure of a process that
|
||||
* created the instance of /dev/malixx device file. Set to
|
||||
* NULL when that process closes the file instance. No more
|
||||
* file operations would be allowed once set to NULL.
|
||||
* It would be updated only in the Userspace context, i.e.
|
||||
* when @kbase_open or @kbase_flush is called.
|
||||
* @kctx: Object representing an entity, among which GPU is
|
||||
* scheduled and which gets its own GPU address space.
|
||||
* Invalid until @setup_state is KBASE_FILE_COMPLETE.
|
||||
|
|
@ -1461,44 +1437,13 @@ enum kbase_file_state {
|
|||
* @setup_state is KBASE_FILE_NEED_CTX.
|
||||
* @setup_state: Initialization state of the file. Values come from
|
||||
* the kbase_file_state enumeration.
|
||||
* @destroy_kctx_work: Work item for destroying the @kctx, enqueued only when
|
||||
* @fops_count and @map_count becomes zero after /dev/malixx
|
||||
* file was previously closed by the @owner.
|
||||
* @lock: Lock to serialize the access to members like @owner, @fops_count,
|
||||
* @map_count.
|
||||
* @fops_count: Counter that is incremented at the beginning of a method
|
||||
* defined for @kbase_fops and is decremented at the end.
|
||||
* So the counter keeps a track of the file operations in progress
|
||||
* for /dev/malixx file, that are being handled by the Kbase.
|
||||
* The counter is needed to defer the context termination as
|
||||
* Userspace can close the /dev/malixx file and flush() method
|
||||
* can get called when some other file operation is in progress.
|
||||
* @map_count: Counter to keep a track of the memory mappings present on
|
||||
* /dev/malixx file instance. The counter is needed to defer the
|
||||
* context termination as Userspace can close the /dev/malixx
|
||||
* file and flush() method can get called when mappings are still
|
||||
* present.
|
||||
* @zero_fops_count_wait: Waitqueue used to wait for the @fops_count to become 0.
|
||||
* Currently needed only for the "mem_view" debugfs file.
|
||||
* @event_queue: Wait queue used for blocking the thread, which consumes
|
||||
* the base_jd_event corresponding to an atom, when there
|
||||
* are no more posted events.
|
||||
*/
|
||||
struct kbase_file {
|
||||
struct kbase_device *kbdev;
|
||||
struct file *filp;
|
||||
fl_owner_t owner;
|
||||
struct kbase_context *kctx;
|
||||
unsigned long api_version;
|
||||
atomic_t setup_state;
|
||||
struct work_struct destroy_kctx_work;
|
||||
spinlock_t lock;
|
||||
int fops_count;
|
||||
int map_count;
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
wait_queue_head_t zero_fops_count_wait;
|
||||
#endif
|
||||
wait_queue_head_t event_queue;
|
||||
};
|
||||
#if MALI_JIT_PRESSURE_LIMIT_BASE
|
||||
/**
|
||||
|
|
@ -1680,8 +1625,8 @@ struct kbase_sub_alloc {
|
|||
/**
|
||||
* struct kbase_context - Kernel base context
|
||||
*
|
||||
* @kfile: Pointer to the object representing the /dev/malixx device
|
||||
* file instance.
|
||||
* @filp: Pointer to the struct file corresponding to device file
|
||||
* /dev/malixx instance, passed to the file's open method.
|
||||
* @kbdev: Pointer to the Kbase device for which the context is created.
|
||||
* @kctx_list_link: Node into Kbase device list of contexts.
|
||||
* @mmu: Structure holding details of the MMU tables for this
|
||||
|
|
@ -1734,6 +1679,9 @@ struct kbase_sub_alloc {
|
|||
* used in conjunction with @cookies bitmask mainly for
|
||||
* providing a mechansim to have the same value for CPU &
|
||||
* GPU virtual address.
|
||||
* @event_queue: Wait queue used for blocking the thread, which consumes
|
||||
* the base_jd_event corresponding to an atom, when there
|
||||
* are no more posted events.
|
||||
* @tgid: Thread group ID of the process whose thread created
|
||||
* the context (by calling KBASE_IOCTL_VERSION_CHECK or
|
||||
* KBASE_IOCTL_SET_FLAGS, depending on the @api_version).
|
||||
|
|
@ -1945,7 +1893,7 @@ struct kbase_sub_alloc {
|
|||
* is made on the device file.
|
||||
*/
|
||||
struct kbase_context {
|
||||
struct kbase_file *kfile;
|
||||
struct file *filp;
|
||||
struct kbase_device *kbdev;
|
||||
struct list_head kctx_list_link;
|
||||
struct kbase_mmu_table mmu;
|
||||
|
|
@ -1997,6 +1945,7 @@ struct kbase_context {
|
|||
DECLARE_BITMAP(cookies, BITS_PER_LONG);
|
||||
struct kbase_va_region *pending_regions[BITS_PER_LONG];
|
||||
|
||||
wait_queue_head_t event_queue;
|
||||
pid_t tgid;
|
||||
pid_t pid;
|
||||
atomic_t prioritized;
|
||||
|
|
@ -2006,7 +1955,8 @@ struct kbase_context {
|
|||
|
||||
struct kbase_mem_pool_group mem_pools;
|
||||
|
||||
struct shrinker reclaim;
|
||||
DEFINE_KBASE_SHRINKER reclaim;
|
||||
|
||||
struct list_head evict_list;
|
||||
atomic_t evict_nents;
|
||||
|
||||
|
|
|
|||
|
|
@ -35,8 +35,37 @@
|
|||
#include <linux/version_compat_defs.h>
|
||||
|
||||
#if MALI_USE_CSF
|
||||
/* Number of digits needed to express the max value of given unsigned type.
|
||||
*
|
||||
* Details: The number of digits needed to express the max value of given type is log10(t_max) + 1
|
||||
* sizeof(t) == log2(t_max)/8
|
||||
* log10(t_max) == log2(t_max) / log2(10)
|
||||
* log2(t_max) == sizeof(type) * 8
|
||||
* 1/log2(10) is approx (1233 >> 12)
|
||||
* Hence, number of digits for given type == log10(t_max) + 1 == sizeof(type) * 8 * (1233 >> 12) + 1
|
||||
*/
|
||||
#define MAX_DIGITS_FOR_UNSIGNED_TYPE(t) ((((sizeof(t) * BITS_PER_BYTE) * 1233) >> 12) + 1)
|
||||
|
||||
/* Number of digits needed to express the max value of given signed type,
|
||||
* including the sign character,
|
||||
*/
|
||||
#define MAX_DIGITS_FOR_SIGNED_TYPE(t) (MAX_DIGITS_FOR_UNSIGNED_TYPE(t) + 1)
|
||||
|
||||
/* Max number of characters for id member of kbase_device struct. */
|
||||
#define MAX_KBDEV_ID_LEN MAX_DIGITS_FOR_UNSIGNED_TYPE(u32)
|
||||
/* Max number of characters for tgid member of kbase_context struct. */
|
||||
#define MAX_KCTX_TGID_LEN MAX_DIGITS_FOR_SIGNED_TYPE(pid_t)
|
||||
/* Max number of characters for id member of kbase_context struct. */
|
||||
#define MAX_KCTX_ID_LEN MAX_DIGITS_FOR_UNSIGNED_TYPE(u32)
|
||||
/* Max number of characters for fence_context member of kbase_kcpu_command_queue struct. */
|
||||
#define MAX_KCTX_QUEUE_FENCE_CTX_LEN MAX_DIGITS_FOR_UNSIGNED_TYPE(u64)
|
||||
/* Max number of characters for timeline name fixed format, including null character. */
|
||||
#define FIXED_FORMAT_LEN (9)
|
||||
|
||||
/* Maximum number of characters in DMA fence timeline name. */
|
||||
#define MAX_TIMELINE_NAME (32)
|
||||
#define MAX_TIMELINE_NAME \
|
||||
(MAX_KBDEV_ID_LEN + MAX_KCTX_TGID_LEN + MAX_KCTX_ID_LEN + MAX_KCTX_QUEUE_FENCE_CTX_LEN + \
|
||||
FIXED_FORMAT_LEN)
|
||||
|
||||
/**
|
||||
* struct kbase_kcpu_dma_fence_meta - Metadata structure for dma fence objects containing
|
||||
|
|
|
|||
|
|
@ -29,46 +29,12 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* enum gpu_metrics_ctx_flags - Flags for the GPU metrics context
|
||||
*
|
||||
* @ACTIVE_INTERVAL_IN_WP: Flag set when the application first becomes active in
|
||||
* the current work period.
|
||||
*
|
||||
* @INSIDE_ACTIVE_LIST: Flag to track if object is in kbase_device::gpu_metrics::active_list
|
||||
*
|
||||
* All members need to be separate bits. This enum is intended for use in a
|
||||
* bitmask where multiple values get OR-ed together.
|
||||
*/
|
||||
enum gpu_metrics_ctx_flags {
|
||||
ACTIVE_INTERVAL_IN_WP = 1 << 0,
|
||||
INSIDE_ACTIVE_LIST = 1 << 1,
|
||||
};
|
||||
|
||||
static unsigned long gpu_metrics_tp_emit_interval_ns = DEFAULT_GPU_METRICS_TP_EMIT_INTERVAL_NS;
|
||||
|
||||
module_param(gpu_metrics_tp_emit_interval_ns, ulong, 0444);
|
||||
MODULE_PARM_DESC(gpu_metrics_tp_emit_interval_ns,
|
||||
"Time interval in nano seconds at which GPU metrics tracepoints are emitted");
|
||||
|
||||
static inline bool gpu_metrics_ctx_flag(struct kbase_gpu_metrics_ctx *gpu_metrics_ctx,
|
||||
enum gpu_metrics_ctx_flags flag)
|
||||
{
|
||||
return (gpu_metrics_ctx->flags & flag);
|
||||
}
|
||||
|
||||
static inline void gpu_metrics_ctx_flag_set(struct kbase_gpu_metrics_ctx *gpu_metrics_ctx,
|
||||
enum gpu_metrics_ctx_flags flag)
|
||||
{
|
||||
gpu_metrics_ctx->flags |= flag;
|
||||
}
|
||||
|
||||
static inline void gpu_metrics_ctx_flag_clear(struct kbase_gpu_metrics_ctx *gpu_metrics_ctx,
|
||||
enum gpu_metrics_ctx_flags flag)
|
||||
{
|
||||
gpu_metrics_ctx->flags &= ~flag;
|
||||
}
|
||||
|
||||
static inline void validate_tracepoint_data(struct kbase_gpu_metrics_ctx *gpu_metrics_ctx,
|
||||
u64 start_time, u64 end_time, u64 total_active)
|
||||
{
|
||||
|
|
@ -82,43 +48,30 @@ static inline void validate_tracepoint_data(struct kbase_gpu_metrics_ctx *gpu_me
|
|||
WARN(total_active > (end_time - start_time),
|
||||
"total_active %llu > end_time %llu - start_time %llu for aid %u active_cnt %u",
|
||||
total_active, end_time, start_time, gpu_metrics_ctx->aid, gpu_metrics_ctx->active_cnt);
|
||||
|
||||
WARN(gpu_metrics_ctx->prev_wp_active_end_time > start_time,
|
||||
"prev_wp_active_end_time %llu > start_time %llu for aid %u active_cnt %u",
|
||||
gpu_metrics_ctx->prev_wp_active_end_time, start_time, gpu_metrics_ctx->aid,
|
||||
gpu_metrics_ctx->active_cnt);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void emit_tracepoint_for_active_gpu_metrics_ctx(
|
||||
struct kbase_device *kbdev, struct kbase_gpu_metrics_ctx *gpu_metrics_ctx, u64 current_time)
|
||||
{
|
||||
const u64 start_time = gpu_metrics_ctx->first_active_start_time;
|
||||
u64 total_active = gpu_metrics_ctx->total_active;
|
||||
u64 end_time;
|
||||
const u64 start_time = gpu_metrics_ctx->active_start_time;
|
||||
u64 total_active, end_time = current_time;
|
||||
|
||||
/* Check if the GPU activity is currently ongoing */
|
||||
if (gpu_metrics_ctx->active_cnt) {
|
||||
/* The following check is to handle the race on CSF GPUs that can happen between
|
||||
* the draining of trace buffer and FW emitting the ACT=1 event .
|
||||
*/
|
||||
if (unlikely(current_time == gpu_metrics_ctx->last_active_start_time))
|
||||
current_time++;
|
||||
end_time = current_time;
|
||||
total_active += end_time - gpu_metrics_ctx->last_active_start_time;
|
||||
|
||||
gpu_metrics_ctx->first_active_start_time = current_time;
|
||||
gpu_metrics_ctx->last_active_start_time = current_time;
|
||||
} else {
|
||||
end_time = gpu_metrics_ctx->last_active_end_time;
|
||||
gpu_metrics_ctx_flag_clear(gpu_metrics_ctx, ACTIVE_INTERVAL_IN_WP);
|
||||
if (unlikely(end_time == start_time))
|
||||
end_time++;
|
||||
gpu_metrics_ctx->active_start_time = end_time;
|
||||
}
|
||||
|
||||
total_active = end_time - start_time;
|
||||
trace_gpu_work_period(kbdev->id, gpu_metrics_ctx->aid, start_time, end_time, total_active);
|
||||
|
||||
validate_tracepoint_data(gpu_metrics_ctx, start_time, end_time, total_active);
|
||||
gpu_metrics_ctx->prev_wp_active_end_time = end_time;
|
||||
gpu_metrics_ctx->total_active = 0;
|
||||
gpu_metrics_ctx->active_end_time = end_time;
|
||||
}
|
||||
|
||||
void kbase_gpu_metrics_ctx_put(struct kbase_device *kbdev,
|
||||
|
|
@ -131,7 +84,8 @@ void kbase_gpu_metrics_ctx_put(struct kbase_device *kbdev,
|
|||
if (gpu_metrics_ctx->kctx_count)
|
||||
return;
|
||||
|
||||
if (gpu_metrics_ctx_flag(gpu_metrics_ctx, ACTIVE_INTERVAL_IN_WP))
|
||||
/* Generate a tracepoint if there's still activity */
|
||||
if (gpu_metrics_ctx->active_cnt)
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kbdev, gpu_metrics_ctx,
|
||||
ktime_get_raw_ns());
|
||||
|
||||
|
|
@ -166,12 +120,11 @@ struct kbase_gpu_metrics_ctx *kbase_gpu_metrics_ctx_get(struct kbase_device *kbd
|
|||
void kbase_gpu_metrics_ctx_init(struct kbase_device *kbdev,
|
||||
struct kbase_gpu_metrics_ctx *gpu_metrics_ctx, unsigned int aid)
|
||||
{
|
||||
gpu_metrics_ctx->active_start_time = 0;
|
||||
gpu_metrics_ctx->active_end_time = 0;
|
||||
gpu_metrics_ctx->aid = aid;
|
||||
gpu_metrics_ctx->total_active = 0;
|
||||
gpu_metrics_ctx->kctx_count = 1;
|
||||
gpu_metrics_ctx->active_cnt = 0;
|
||||
gpu_metrics_ctx->prev_wp_active_end_time = 0;
|
||||
gpu_metrics_ctx->flags = 0;
|
||||
list_add_tail(&gpu_metrics_ctx->link, &kbdev->gpu_metrics.inactive_list);
|
||||
}
|
||||
|
||||
|
|
@ -180,17 +133,9 @@ void kbase_gpu_metrics_ctx_start_activity(struct kbase_context *kctx, u64 timest
|
|||
struct kbase_gpu_metrics_ctx *gpu_metrics_ctx = kctx->gpu_metrics_ctx;
|
||||
|
||||
gpu_metrics_ctx->active_cnt++;
|
||||
if (gpu_metrics_ctx->active_cnt == 1)
|
||||
gpu_metrics_ctx->last_active_start_time = timestamp_ns;
|
||||
|
||||
if (!gpu_metrics_ctx_flag(gpu_metrics_ctx, ACTIVE_INTERVAL_IN_WP)) {
|
||||
gpu_metrics_ctx->first_active_start_time = timestamp_ns;
|
||||
gpu_metrics_ctx_flag_set(gpu_metrics_ctx, ACTIVE_INTERVAL_IN_WP);
|
||||
}
|
||||
|
||||
if (!gpu_metrics_ctx_flag(gpu_metrics_ctx, INSIDE_ACTIVE_LIST)) {
|
||||
if (gpu_metrics_ctx->active_cnt == 1) {
|
||||
gpu_metrics_ctx->active_start_time = timestamp_ns;
|
||||
list_move_tail(&gpu_metrics_ctx->link, &kctx->kbdev->gpu_metrics.active_list);
|
||||
gpu_metrics_ctx_flag_set(gpu_metrics_ctx, INSIDE_ACTIVE_LIST);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -201,22 +146,22 @@ void kbase_gpu_metrics_ctx_end_activity(struct kbase_context *kctx, u64 timestam
|
|||
if (WARN_ON_ONCE(!gpu_metrics_ctx->active_cnt))
|
||||
return;
|
||||
|
||||
/* Do not emit tracepoint if GPU activity still continues. */
|
||||
if (--gpu_metrics_ctx->active_cnt)
|
||||
return;
|
||||
|
||||
if (likely(timestamp_ns > gpu_metrics_ctx->last_active_start_time)) {
|
||||
gpu_metrics_ctx->last_active_end_time = timestamp_ns;
|
||||
gpu_metrics_ctx->total_active +=
|
||||
timestamp_ns - gpu_metrics_ctx->last_active_start_time;
|
||||
if (likely(timestamp_ns > gpu_metrics_ctx->active_start_time)) {
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kctx->kbdev, gpu_metrics_ctx,
|
||||
timestamp_ns);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Due to conversion from system timestamp to CPU timestamp (which involves rounding)
|
||||
* the value for start and end timestamp could come as same on CSF GPUs.
|
||||
*/
|
||||
if (timestamp_ns == gpu_metrics_ctx->last_active_start_time) {
|
||||
gpu_metrics_ctx->last_active_end_time = timestamp_ns + 1;
|
||||
gpu_metrics_ctx->total_active += 1;
|
||||
if (timestamp_ns == gpu_metrics_ctx->active_start_time) {
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kctx->kbdev, gpu_metrics_ctx,
|
||||
timestamp_ns + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -224,12 +169,9 @@ void kbase_gpu_metrics_ctx_end_activity(struct kbase_context *kctx, u64 timestam
|
|||
* visible to the Kbase even though the system timestamp value sampled by FW was less than
|
||||
* the system timestamp value sampled by Kbase just before the draining of trace buffer.
|
||||
*/
|
||||
if (gpu_metrics_ctx->last_active_start_time == gpu_metrics_ctx->first_active_start_time &&
|
||||
gpu_metrics_ctx->prev_wp_active_end_time == gpu_metrics_ctx->first_active_start_time) {
|
||||
WARN_ON_ONCE(gpu_metrics_ctx->total_active);
|
||||
gpu_metrics_ctx->last_active_end_time =
|
||||
gpu_metrics_ctx->prev_wp_active_end_time + 1;
|
||||
gpu_metrics_ctx->total_active = 1;
|
||||
if (gpu_metrics_ctx->active_end_time == gpu_metrics_ctx->active_start_time) {
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kctx->kbdev, gpu_metrics_ctx,
|
||||
gpu_metrics_ctx->active_end_time + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -242,15 +184,12 @@ void kbase_gpu_metrics_emit_tracepoint(struct kbase_device *kbdev, u64 ts)
|
|||
struct kbase_gpu_metrics_ctx *gpu_metrics_ctx, *tmp;
|
||||
|
||||
list_for_each_entry_safe(gpu_metrics_ctx, tmp, &gpu_metrics->active_list, link) {
|
||||
if (!gpu_metrics_ctx_flag(gpu_metrics_ctx, ACTIVE_INTERVAL_IN_WP)) {
|
||||
WARN_ON(!gpu_metrics_ctx_flag(gpu_metrics_ctx, INSIDE_ACTIVE_LIST));
|
||||
WARN_ON(gpu_metrics_ctx->active_cnt);
|
||||
list_move_tail(&gpu_metrics_ctx->link, &gpu_metrics->inactive_list);
|
||||
gpu_metrics_ctx_flag_clear(gpu_metrics_ctx, INSIDE_ACTIVE_LIST);
|
||||
if (gpu_metrics_ctx->active_cnt) {
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kbdev, gpu_metrics_ctx, ts);
|
||||
continue;
|
||||
}
|
||||
|
||||
emit_tracepoint_for_active_gpu_metrics_ctx(kbdev, gpu_metrics_ctx, ts);
|
||||
list_move_tail(&gpu_metrics_ctx->link, &gpu_metrics->inactive_list);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ void kbase_gpu_metrics_ctx_init(struct kbase_device *kbdev,
|
|||
* @kctx: Pointer to the Kbase context contributing data to the GPU metrics context.
|
||||
* @timestamp_ns: CPU timestamp at which the GPU activity started.
|
||||
*
|
||||
* The provided timestamp would be later used as the "start_time_ns" for the
|
||||
* The provided timestamp is used as the "start_time_ns" for the
|
||||
* power/gpu_work_period tracepoint if this is the first GPU activity for the GPU
|
||||
* metrics context in the current work period.
|
||||
*
|
||||
|
|
@ -122,9 +122,9 @@ void kbase_gpu_metrics_ctx_start_activity(struct kbase_context *kctx, u64 timest
|
|||
* @kctx: Pointer to the Kbase context contributing data to the GPU metrics context.
|
||||
* @timestamp_ns: CPU timestamp at which the GPU activity ended.
|
||||
*
|
||||
* The provided timestamp would be later used as the "end_time_ns" for the
|
||||
* power/gpu_work_period tracepoint if this is the last GPU activity for the GPU
|
||||
* metrics context in the current work period.
|
||||
* The provided timestamp is used as the "end_time_ns" for the power/gpu_work_period
|
||||
* tracepoint if this is the last GPU activity for the GPU metrics context
|
||||
* in the current work period.
|
||||
*
|
||||
* Note: The caller must appropriately serialize the call to this function with the
|
||||
* call to other GPU metrics functions declared in this file.
|
||||
|
|
@ -138,8 +138,8 @@ void kbase_gpu_metrics_ctx_end_activity(struct kbase_context *kctx, u64 timestam
|
|||
* @kbdev: Pointer to the GPU device.
|
||||
* @ts: Timestamp at which the tracepoint is being emitted.
|
||||
*
|
||||
* This function would loop through all the active GPU metrics contexts and emit a
|
||||
* power/gpu_work_period tracepoint for them.
|
||||
* This function would loop through all GPU metrics contexts in the active list and
|
||||
* emit a power/gpu_work_period tracepoint if the GPU work in the context still active.
|
||||
* The GPU metrics context that is found to be inactive since the last tracepoint
|
||||
* was emitted would be moved to the inactive list.
|
||||
* The current work period would be considered as over and a new work period would
|
||||
|
|
|
|||
|
|
@ -357,6 +357,7 @@ enum l2_config_override_result {
|
|||
/**
|
||||
* kbase_read_l2_config_from_dt - Read L2 configuration
|
||||
* @kbdev: The kbase device for which to get the L2 configuration.
|
||||
* @regdump: Pointer to struct kbase_gpuprops_regdump structure.
|
||||
*
|
||||
* Check for L2 configuration overrides in module parameters and device tree.
|
||||
* Override values in module parameters take priority over override values in
|
||||
|
|
@ -366,9 +367,16 @@ enum l2_config_override_result {
|
|||
* overridden, L2_CONFIG_OVERRIDE_NONE if no overrides are provided.
|
||||
* L2_CONFIG_OVERRIDE_FAIL otherwise.
|
||||
*/
|
||||
static enum l2_config_override_result kbase_read_l2_config_from_dt(struct kbase_device *const kbdev)
|
||||
static enum l2_config_override_result
|
||||
kbase_read_l2_config_from_dt(struct kbase_device *const kbdev,
|
||||
struct kbasep_gpuprops_regdump *regdump)
|
||||
{
|
||||
struct device_node *np = kbdev->dev->of_node;
|
||||
/*
|
||||
* CACHE_SIZE bit fields in L2_FEATURES register, default value after the reset/powerup
|
||||
* holds the maximum size of the cache that can be programmed in L2_CONFIG register.
|
||||
*/
|
||||
const u8 l2_size_max = L2_FEATURES_CACHE_SIZE_GET(regdump->l2_features);
|
||||
|
||||
if (!np)
|
||||
return L2_CONFIG_OVERRIDE_NONE;
|
||||
|
|
@ -378,8 +386,12 @@ static enum l2_config_override_result kbase_read_l2_config_from_dt(struct kbase_
|
|||
else if (of_property_read_u8(np, "l2-size", &kbdev->l2_size_override))
|
||||
kbdev->l2_size_override = 0;
|
||||
|
||||
if (kbdev->l2_size_override != 0 && kbdev->l2_size_override < OVERRIDE_L2_SIZE_MIN_LOG2)
|
||||
if (kbdev->l2_size_override != 0 && (kbdev->l2_size_override < OVERRIDE_L2_SIZE_MIN_LOG2 ||
|
||||
kbdev->l2_size_override > l2_size_max)) {
|
||||
dev_err(kbdev->dev, "Invalid Cache Size in %s",
|
||||
override_l2_size ? "Module parameters" : "Device tree node");
|
||||
return L2_CONFIG_OVERRIDE_FAIL;
|
||||
}
|
||||
|
||||
/* Check overriding value is supported, if not will result in
|
||||
* undefined behavior.
|
||||
|
|
@ -429,7 +441,7 @@ int kbase_gpuprops_update_l2_features(struct kbase_device *kbdev)
|
|||
struct kbasep_gpuprops_regdump *regdump = &PRIV_DATA_REGDUMP(kbdev);
|
||||
|
||||
/* Check for L2 cache size & hash overrides */
|
||||
switch (kbase_read_l2_config_from_dt(kbdev)) {
|
||||
switch (kbase_read_l2_config_from_dt(kbdev, regdump)) {
|
||||
case L2_CONFIG_OVERRIDE_FAIL:
|
||||
err = -EIO;
|
||||
goto exit;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -30,9 +30,10 @@
|
|||
#include <linux/module.h>
|
||||
|
||||
static inline void kbase_gpu_gwt_setup_page_permission(struct kbase_context *kctx,
|
||||
unsigned long flag, struct rb_node *node)
|
||||
unsigned long flag,
|
||||
struct kbase_reg_zone *zone)
|
||||
{
|
||||
struct rb_node *rbnode = node;
|
||||
struct rb_node *rbnode = rb_first(&zone->reg_rbtree);
|
||||
|
||||
while (rbnode) {
|
||||
struct kbase_va_region *reg;
|
||||
|
|
@ -55,17 +56,15 @@ static inline void kbase_gpu_gwt_setup_page_permission(struct kbase_context *kct
|
|||
|
||||
static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx, unsigned long flag)
|
||||
{
|
||||
kbase_gpu_gwt_setup_page_permission(kctx, flag,
|
||||
rb_first(&kctx->reg_zone[SAME_VA_ZONE].reg_rbtree));
|
||||
kbase_gpu_gwt_setup_page_permission(kctx, flag,
|
||||
rb_first(&kctx->reg_zone[CUSTOM_VA_ZONE].reg_rbtree));
|
||||
kbase_gpu_gwt_setup_page_permission(kctx, flag, &kctx->reg_zone[SAME_VA_ZONE]);
|
||||
kbase_gpu_gwt_setup_page_permission(kctx, flag, &kctx->reg_zone[CUSTOM_VA_ZONE]);
|
||||
}
|
||||
|
||||
int kbase_gpu_gwt_start(struct kbase_context *kctx)
|
||||
{
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
kbase_gpu_vm_lock_with_pmode_sync(kctx);
|
||||
if (kctx->gwt_enabled) {
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(kctx);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
@ -91,7 +90,7 @@ int kbase_gpu_gwt_start(struct kbase_context *kctx)
|
|||
|
||||
kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
|
||||
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
kbase_gpu_vm_unlock_with_pmode_sync(kctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2012-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2012-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -225,6 +225,8 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(struct kbase_dev
|
|||
|
||||
{ GPU_ID_PRODUCT_TVAX,
|
||||
{ { GPU_ID_VERSION_MAKE(0, 0, 0), base_hw_issues_tVAx_r0p0 },
|
||||
{ GPU_ID_VERSION_MAKE(0, 0, 5), base_hw_issues_tVAx_r0p0 },
|
||||
{ GPU_ID_VERSION_MAKE(0, 1, 0), base_hw_issues_tVAx_r0p1 },
|
||||
{ U32_MAX, NULL } } },
|
||||
|
||||
{ GPU_ID_PRODUCT_TTUX,
|
||||
|
|
@ -334,6 +336,8 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(struct kbase_dev
|
|||
gpu_id->version_id = fallback_version;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -129,14 +129,14 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
|
|||
* kbase_pm_set_debug_core_mask - Set the debug core mask.
|
||||
*
|
||||
* @kbdev: The kbase device structure for the device (must be a valid pointer)
|
||||
* @new_core_mask_js0: The core mask to use for job slot 0
|
||||
* @new_core_mask_js1: The core mask to use for job slot 1
|
||||
* @new_core_mask_js2: The core mask to use for job slot 2
|
||||
* @new_core_mask: The core mask to use, as an array where each element refers
|
||||
* to a job slot.
|
||||
* @new_core_mask_size: Number of elements in the core mask array.
|
||||
*
|
||||
* This determines which cores the power manager is allowed to use.
|
||||
*/
|
||||
void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask_js0,
|
||||
u64 new_core_mask_js1, u64 new_core_mask_js2);
|
||||
void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 *new_core_mask,
|
||||
size_t new_core_mask_size);
|
||||
#endif /* MALI_USE_CSF */
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014, 2018-2021, 2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014, 2018-2021, 2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -27,7 +27,8 @@
|
|||
*
|
||||
* @multiplier: Numerator of the converter's fraction.
|
||||
* @divisor: Denominator of the converter's fraction.
|
||||
* @offset: Converter's offset term.
|
||||
* @gpu_timestamp_offset: Cached CPU to GPU TS offset computed whenever whole system
|
||||
* enters into standby mode where CPU Monotonic time is suspend.
|
||||
* @device_scaled_timeouts: Timeouts in milliseconds that were scaled to be
|
||||
* consistent with the minimum MCU frequency. This
|
||||
* array caches the results of all of the conversions
|
||||
|
|
@ -55,7 +56,7 @@ struct kbase_backend_time {
|
|||
#if MALI_USE_CSF
|
||||
u64 multiplier;
|
||||
u64 divisor;
|
||||
s64 offset;
|
||||
s64 gpu_timestamp_offset;
|
||||
#endif
|
||||
unsigned int device_scaled_timeouts[KBASE_TIMEOUT_SELECTOR_COUNT];
|
||||
};
|
||||
|
|
@ -70,6 +71,40 @@ struct kbase_backend_time {
|
|||
* Return: The CPU timestamp.
|
||||
*/
|
||||
u64 __maybe_unused kbase_backend_time_convert_gpu_to_cpu(struct kbase_device *kbdev, u64 gpu_ts);
|
||||
|
||||
/**
|
||||
* kbase_backend_update_gpu_timestamp_offset() - Updates GPU timestamp offset register with the
|
||||
* cached value.
|
||||
*
|
||||
* @kbdev: Kbase device pointer
|
||||
*
|
||||
* Compute the new cached value for GPU timestamp offset if the previously cached value has been
|
||||
* invalidated and update the GPU timestamp offset register with the cached value.
|
||||
*/
|
||||
void kbase_backend_update_gpu_timestamp_offset(struct kbase_device *kbdev);
|
||||
|
||||
/**
|
||||
* kbase_backend_invalidate_gpu_timestamp_offset() - Invalidate cached GPU timestamp offset value
|
||||
*
|
||||
* @kbdev: Kbase device pointer
|
||||
*
|
||||
* This function invalidates cached GPU timestamp offset value whenever system suspend
|
||||
* is about to happen where CPU TS counter will be stopped.
|
||||
*/
|
||||
void kbase_backend_invalidate_gpu_timestamp_offset(struct kbase_device *kbdev);
|
||||
|
||||
#if MALI_UNIT_TEST
|
||||
/**
|
||||
* kbase_backend_read_gpu_timestamp_offset_reg() - Read GPU TIMESTAMP OFFSET Register
|
||||
*
|
||||
* @kbdev: Kbase device pointer
|
||||
*
|
||||
* This function read GPU TIMESTAMP OFFSET Register with proper register access
|
||||
*
|
||||
* Return: GPU TIMESTAMP OFFSET Register value, as unsigned 64 bit value
|
||||
*/
|
||||
u64 kbase_backend_read_gpu_timestamp_offset_reg(struct kbase_device *kbdev);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2011-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2011-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
|
|
@ -161,7 +161,7 @@ static inline int gpu_metrics_ctx_init(struct kbase_context *kctx)
|
|||
put_cred(cred);
|
||||
|
||||
/* Return early if this is not a Userspace created context */
|
||||
if (unlikely(!kctx->kfile))
|
||||
if (unlikely(!kctx->filp))
|
||||
return 0;
|
||||
|
||||
/* Serialize against the other threads trying to create/destroy Kbase contexts. */
|
||||
|
|
@ -200,7 +200,7 @@ static inline void gpu_metrics_ctx_term(struct kbase_context *kctx)
|
|||
unsigned long flags;
|
||||
|
||||
/* Return early if this is not a Userspace created context */
|
||||
if (unlikely(!kctx->kfile))
|
||||
if (unlikely(!kctx->filp))
|
||||
return;
|
||||
|
||||
/* Serialize against the other threads trying to create/destroy Kbase contexts. */
|
||||
|
|
@ -2615,7 +2615,7 @@ static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
|
|||
*
|
||||
* Remove all post dependencies of an atom from the context ringbuffers.
|
||||
*
|
||||
* The original atom's event_code will be propogated to all dependent atoms.
|
||||
* The original atom's event_code will be propagated to all dependent atoms.
|
||||
*
|
||||
* Context: Caller must hold the HW access lock
|
||||
*/
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue