From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/gpu/arm/bifrost/mali_kbase_defs.h | 567 +++++++++++++++++++++++++++++++++++++------------------
1 files changed, 378 insertions(+), 189 deletions(-)
diff --git a/kernel/drivers/gpu/arm/bifrost/mali_kbase_defs.h b/kernel/drivers/gpu/arm/bifrost/mali_kbase_defs.h
index ce0a247..809e730 100755
--- a/kernel/drivers/gpu/arm/bifrost/mali_kbase_defs.h
+++ b/kernel/drivers/gpu/arm/bifrost/mali_kbase_defs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2011-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,11 +35,15 @@
#include <backend/gpu/mali_kbase_instr_defs.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_gpuprops_types.h>
+#include <hwcnt/mali_kbase_hwcnt_watchdog_if.h>
+
#if MALI_USE_CSF
-#include <mali_kbase_hwcnt_backend_csf.h>
+#include <hwcnt/backend/mali_kbase_hwcnt_backend_csf.h>
#else
-#include <mali_kbase_hwcnt_backend_jm.h>
+#include <hwcnt/backend/mali_kbase_hwcnt_backend_jm.h>
+#include <hwcnt/backend/mali_kbase_hwcnt_backend_jm_watchdog.h>
#endif
+
#include <protected_mode_switcher.h>
#include <linux/atomic.h>
@@ -49,11 +53,7 @@
#include <linux/sizes.h>
-#if defined(CONFIG_SYNC)
-#include <sync.h>
-#else
#include "mali_kbase_fence_defs.h"
-#endif
#if IS_ENABLED(CONFIG_DEBUG_FS)
#include <linux/debugfs.h>
@@ -63,6 +63,10 @@
#include <linux/devfreq.h>
#endif /* CONFIG_MALI_BIFROST_DEVFREQ */
+#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#endif
+
#ifdef CONFIG_MALI_ARBITER_SUPPORT
#include <arbiter/mali_kbase_arbiter_defs.h>
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
@@ -70,10 +74,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/memory_group_manager.h>
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM)
-#define KBASE_PM_RUNTIME 1
-#endif
+#include <soc/rockchip/rockchip_opp_select.h>
#include "debug/mali_kbase_debug_ktrace_defs.h"
@@ -81,7 +82,7 @@
#define RESET_TIMEOUT 500
/**
- * The maximum number of Job Slots to support in the Hardware.
+ * BASE_JM_MAX_NR_SLOTS - The maximum number of Job Slots to support in the Hardware.
*
* You can optimize this down if your target devices will only ever support a
* small number of job slots.
@@ -89,7 +90,7 @@
#define BASE_JM_MAX_NR_SLOTS 3
/**
- * The maximum number of Address Spaces to support in the Hardware.
+ * BASE_MAX_NR_AS - The maximum number of Address Spaces to support in the Hardware.
*
* You can optimize this down if your target devices will only ever support a
* small number of Address Spaces
@@ -109,27 +110,26 @@
#define KBASEP_AS_NR_INVALID (-1)
/**
- * Maximum size in bytes of a MMU lock region, as a logarithm
+ * KBASE_LOCK_REGION_MAX_SIZE_LOG2 - Maximum size in bytes of a MMU lock region,
+ * as a logarithm
*/
-#define KBASE_LOCK_REGION_MAX_SIZE_LOG2 (64)
+#define KBASE_LOCK_REGION_MAX_SIZE_LOG2 (48) /* 256 TB */
/**
- * Minimum size in bytes of a MMU lock region, as a logarithm
+ * KBASE_REG_ZONE_MAX - Maximum number of GPU memory region zones
*/
-#define KBASE_LOCK_REGION_MIN_SIZE_LOG2 (15)
-
-/**
- * Maximum number of GPU memory region zones
- */
+#if MALI_USE_CSF
+#define KBASE_REG_ZONE_MAX 6ul
+#else
#define KBASE_REG_ZONE_MAX 4ul
+#endif
#include "mali_kbase_hwaccess_defs.h"
/* Maximum number of pages of memory that require a permanent mapping, per
* kbase_context
*/
-#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((32 * 1024ul * 1024ul) >> \
- PAGE_SHIFT)
+#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((64 * 1024ul * 1024ul) >> PAGE_SHIFT)
/* Minimum threshold period for hwcnt dumps between different hwcnt virtualizer
* clients, to reduce undesired system load.
* If a virtualizer client requests a dump within this threshold period after
@@ -152,8 +152,10 @@
* the device node.
* This is dependent on support for of_property_read_u64_array() in the
* kernel.
+ * While, the number of clocks could be more than regulators,
+ * as mentioned in power_control_init().
*/
-#define BASE_MAX_NR_CLOCKS_REGULATORS (2)
+#define BASE_MAX_NR_CLOCKS_REGULATORS (4)
/* Forward declarations */
struct kbase_context;
@@ -243,11 +245,25 @@
bool protected_mode;
};
+/** Maximum number of memory pages that should be allocated for the array
+ * of pointers to free PGDs.
+ *
+ * This number has been pre-calculated to deal with the maximum allocation
+ * size expressed by the default value of KBASE_MEM_ALLOC_MAX_SIZE.
+ * This is supposed to be enough for almost the entirety of MMU operations.
+ * Any size greater than KBASE_MEM_ALLOC_MAX_SIZE requires being broken down
+ * into multiple iterations, each dealing with at most KBASE_MEM_ALLOC_MAX_SIZE
+ * bytes.
+ *
+ * Please update this value if KBASE_MEM_ALLOC_MAX_SIZE changes.
+ */
+#define MAX_PAGES_FOR_FREE_PGDS ((size_t)9)
+
+/* Maximum number of pointers to free PGDs */
+#define MAX_FREE_PGDS ((PAGE_SIZE / sizeof(struct page *)) * MAX_PAGES_FOR_FREE_PGDS)
+
/**
* struct kbase_mmu_table - object representing a set of GPU page tables
- * @mmu_teardown_pages: Buffer of 4 Pages in size, used to cache the entries
- * of top & intermediate level page tables to avoid
- * repeated calls to kmap_atomic during the MMU teardown.
* @mmu_lock: Lock to serialize the accesses made to multi level GPU
* page tables
* @pgd: Physical address of the page allocated for the top
@@ -259,14 +275,55 @@
* Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
* @kctx: If this set of MMU tables belongs to a context then
* this is a back-reference to the context, otherwise
- * it is NULL
+ * it is NULL.
+ * @scratch_mem: Scratch memory used for MMU operations, which are
+ * serialized by the @mmu_lock.
*/
struct kbase_mmu_table {
- u64 *mmu_teardown_pages;
struct mutex mmu_lock;
phys_addr_t pgd;
u8 group_id;
struct kbase_context *kctx;
+ union {
+ /**
+ * @teardown_pages: Scratch memory used for backup copies of whole
+ * PGD pages when tearing down levels upon
+ * termination of the MMU table.
+ */
+ struct {
+ /**
+ * @levels: Array of PGD pages, large enough to copy one PGD
+ * for each level of the MMU table.
+ */
+ u64 levels[MIDGARD_MMU_BOTTOMLEVEL][PAGE_SIZE / sizeof(u64)];
+ } teardown_pages;
+ /**
+ * @free_pgds: Scratch memory user for insertion, update and teardown
+ * operations to store a temporary list of PGDs to be freed
+ * at the end of the operation.
+ */
+ struct {
+ /** @pgds: Array of pointers to PGDs to free. */
+ struct page *pgds[MAX_FREE_PGDS];
+ /** @head_index: Index of first free element in the PGDs array. */
+ size_t head_index;
+ } free_pgds;
+ } scratch_mem;
+};
+
+/**
+ * struct kbase_reg_zone - Information about GPU memory region zones
+ * @base_pfn: Page Frame Number in GPU virtual address space for the start of
+ * the Zone
+ * @va_size_pages: Size of the Zone in pages
+ *
+ * Track information about a zone KBASE_REG_ZONE() and related macros.
+ * In future, this could also store the &rb_root that are currently in
+ * &kbase_context and &kbase_csf_device.
+ */
+struct kbase_reg_zone {
+ u64 base_pfn;
+ u64 va_size_pages;
};
#if MALI_USE_CSF
@@ -274,6 +331,8 @@
#else
#include "jm/mali_kbase_jm_defs.h"
#endif
+
+#include "mali_kbase_hwaccess_time.h"
static inline int kbase_as_has_bus_fault(struct kbase_as *as,
struct kbase_fault *fault)
@@ -339,8 +398,6 @@
* enumerated GPU clock.
* @clk_rate_trace_ops: Pointer to the platform specific GPU clock rate trace
* operations.
- * @gpu_clk_rate_trace_write: Pointer to the function that would emit the
- * tracepoint for the clock rate change.
* @listeners: List of listener attached.
* @lock: Lock to serialize the actions of GPU clock rate trace
* manager.
@@ -355,39 +412,49 @@
/**
* struct kbase_pm_device_data - Data stored per device for power management.
- * @lock: The lock protecting Power Management structures accessed outside of
- * IRQ.
- * This lock must also be held whenever the GPU is being powered on or
- * off.
- * @active_count: The reference count of active contexts on this device. Note
- * that some code paths keep shaders/the tiler powered whilst this is 0.
- * Use kbase_pm_is_active() instead to check for such cases.
+ * @lock: The lock protecting Power Management structures accessed
+ * outside of IRQ.
+ * This lock must also be held whenever the GPU is being
+ * powered on or off.
+ * @active_count: The reference count of active contexts on this device.
+ * Note that some code paths keep shaders/the tiler
+ * powered whilst this is 0.
+ * Use kbase_pm_is_active() instead to check for such cases.
* @suspending: Flag indicating suspending/suspended
+ * @runtime_active: Flag to track if the GPU is in runtime suspended or active
+ * state. This ensures that runtime_put and runtime_get
+ * functions are called in pairs. For example if runtime_get
+ * has already been called from the power_on callback, then
+ * the call to it from runtime_gpu_active callback can be
+ * skipped.
* @gpu_lost: Flag indicating gpu lost
- * This structure contains data for the power management framework. There
- * is one instance of this structure per device in the system.
+ * This structure contains data for the power management framework.
+ * There is one instance of this structure per device in the system.
* @zero_active_count_wait: Wait queue set when active_count == 0
* @resume_wait: system resume of GPU device.
* @debug_core_mask: Bit masks identifying the available shader cores that are
- * specified via sysfs. One mask per job slot.
+ * specified via sysfs. One mask per job slot.
* @debug_core_mask_all: Bit masks identifying the available shader cores that
- * are specified via sysfs.
+ * are specified via sysfs.
* @callback_power_runtime_init: Callback for initializing the runtime power
- * management. Return 0 on success, else error code
+ * management. Return 0 on success, else error code
* @callback_power_runtime_term: Callback for terminating the runtime power
- * management.
+ * management.
* @dvfs_period: Time in milliseconds between each dvfs sample
* @backend: KBase PM backend data
* @arb_vm_state: The state of the arbiter VM machine
* @gpu_users_waiting: Used by virtualization to notify the arbiter that there
- * are users waiting for the GPU so that it can request and resume the
- * driver.
+ * are users waiting for the GPU so that it can request
+ * and resume the driver.
* @clk_rtm: The state of the GPU clock rate trace manager
*/
struct kbase_pm_device_data {
struct mutex lock;
int active_count;
bool suspending;
+#if MALI_USE_CSF
+ bool runtime_active;
+#endif
#ifdef CONFIG_MALI_ARBITER_SUPPORT
atomic_t gpu_lost;
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
@@ -415,36 +482,40 @@
/**
* struct kbase_mem_pool - Page based memory pool for kctx/kbdev
- * @kbdev: Kbase device where memory is used
- * @cur_size: Number of free pages currently in the pool (may exceed
- * @max_size in some corner cases)
- * @max_size: Maximum number of free pages in the pool
- * @order: order = 0 refers to a pool of 4 KB pages
- * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
- * @group_id: A memory group ID to be passed to a platform-specific
- * memory group manager, if present. Immutable.
- * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
- * @pool_lock: Lock protecting the pool - must be held when modifying
- * @cur_size and @page_list
- * @page_list: List of free pages in the pool
- * @reclaim: Shrinker for kernel reclaim of free pages
- * @next_pool: Pointer to next pool where pages can be allocated when this
- * pool is empty. Pages will spill over to the next pool when
- * this pool is full. Can be NULL if there is no next pool.
- * @dying: true if the pool is being terminated, and any ongoing
- * operations should be abandoned
- * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
- * this pool, eg during a grow operation
+ * @kbdev: Kbase device where memory is used
+ * @cur_size: Number of free pages currently in the pool (may exceed
+ * @max_size in some corner cases)
+ * @max_size: Maximum number of free pages in the pool
+ * @order: order = 0 refers to a pool of 4 KB pages
+ * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
+ * @group_id: A memory group ID to be passed to a platform-specific
+ * memory group manager, if present. Immutable.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @pool_lock: Lock protecting the pool - must be held when modifying
+ * @cur_size and @page_list
+ * @page_list: List of free pages in the pool
+ * @reclaim: Shrinker for kernel reclaim of free pages
+ * @isolation_in_progress_cnt: Number of pages in pool undergoing page isolation.
+ * This is used to avoid race condition between pool termination
+ * and page isolation for page migration.
+ * @next_pool: Pointer to next pool where pages can be allocated when this
+ * pool is empty. Pages will spill over to the next pool when
+ * this pool is full. Can be NULL if there is no next pool.
+ * @dying: true if the pool is being terminated, and any ongoing
+ * operations should be abandoned
+ * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
+ * this pool, eg during a grow operation
*/
struct kbase_mem_pool {
struct kbase_device *kbdev;
- size_t cur_size;
- size_t max_size;
- u8 order;
- u8 group_id;
- spinlock_t pool_lock;
- struct list_head page_list;
- struct shrinker reclaim;
+ size_t cur_size;
+ size_t max_size;
+ u8 order;
+ u8 group_id;
+ spinlock_t pool_lock;
+ struct list_head page_list;
+ struct shrinker reclaim;
+ atomic_t isolation_in_progress_cnt;
struct kbase_mem_pool *next_pool;
@@ -455,16 +526,16 @@
/**
* struct kbase_mem_pool_group - a complete set of physical memory pools.
*
+ * @small: Array of objects containing the state for pools of 4 KiB size
+ * physical pages.
+ * @large: Array of objects containing the state for pools of 2 MiB size
+ * physical pages.
+ *
* Memory pools are used to allow efficient reallocation of previously-freed
* physical pages. A pair of memory pools is initialized for each physical
* memory group: one for 4 KiB pages and one for 2 MiB pages. These arrays
* should be indexed by physical memory group ID, the meaning of which is
* defined by the systems integrator.
- *
- * @small: Array of objects containing the state for pools of 4 KiB size
- * physical pages.
- * @large: Array of objects containing the state for pools of 2 MiB size
- * physical pages.
*/
struct kbase_mem_pool_group {
struct kbase_mem_pool small[MEMORY_GROUP_MANAGER_NR_GROUPS];
@@ -485,11 +556,11 @@
* struct kbase_mem_pool_group_config - Initial configuration for a complete
* set of physical memory pools
*
- * This array should be indexed by physical memory group ID, the meaning
- * of which is defined by the systems integrator.
- *
* @small: Array of initial configuration for pools of 4 KiB pages.
* @large: Array of initial configuration for pools of 2 MiB pages.
+ *
+ * This array should be indexed by physical memory group ID, the meaning
+ * of which is defined by the systems integrator.
*/
struct kbase_mem_pool_group_config {
struct kbase_mem_pool_config small[MEMORY_GROUP_MANAGER_NR_GROUPS];
@@ -529,8 +600,11 @@
* @entry_set_ate: program the pte to be a valid address translation entry to
* encode the physical address of the actual page being mapped.
* @entry_set_pte: program the pte to be a valid entry to encode the physical
- * address of the next lower level page table.
- * @entry_invalidate: clear out or invalidate the pte.
+ * address of the next lower level page table and also update
+ * the number of valid entries.
+ * @entries_invalidate: clear out or invalidate a range of ptes.
+ * @get_num_valid_entries: returns the number of valid entries for a specific pgd.
+ * @set_num_valid_entries: sets the number of valid entries for a specific pgd
* @flags: bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants.
*/
struct kbase_mmu_mode {
@@ -546,7 +620,10 @@
void (*entry_set_ate)(u64 *entry, struct tagged_addr phy,
unsigned long flags, int level);
void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
- void (*entry_invalidate)(u64 *entry);
+ void (*entries_invalidate)(u64 *entry, u32 count);
+ unsigned int (*get_num_valid_entries)(u64 *pgd);
+ void (*set_num_valid_entries)(u64 *pgd,
+ unsigned int num_of_valid_entries);
unsigned long flags;
};
@@ -611,6 +688,33 @@
};
/**
+ * struct kbase_mem_migrate - Object representing an instance for managing
+ * page migration.
+ *
+ * @free_pages_list: List of deferred pages to free. Mostly used when page migration
+ * is enabled. Pages in memory pool that require migrating
+ * will be freed instead. However page cannot be freed
+ * right away as Linux will need to release the page lock.
+ * Therefore page will be added to this list and freed later.
+ * @free_pages_lock: This lock should be held when adding or removing pages
+ * from @free_pages_list.
+ * @free_pages_workq: Work queue to process the work items queued to free
+ * pages in @free_pages_list.
+ * @free_pages_work: Work item to free pages in @free_pages_list.
+ * @inode: Pointer to inode whose address space operations are used
+ * for page migration purposes.
+ */
+struct kbase_mem_migrate {
+ struct list_head free_pages_list;
+ spinlock_t free_pages_lock;
+ struct workqueue_struct *free_pages_workq;
+ struct work_struct free_pages_work;
+#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
+ struct inode *inode;
+#endif
+};
+
+/**
* struct kbase_device - Object representing an instance of GPU platform device,
* allocated from the probe method of mali driver.
* @hw_quirks_sc: Configuration to be used for the shader cores as per
@@ -641,6 +745,7 @@
* @irqs.flags: irq flags
* @clocks: Pointer to the input clock resources referenced by
* the GPU device node.
+ * @scmi_clk: Pointer to the input scmi clock resources
* @nr_clocks: Number of clocks set in the clocks array.
* @regulators: Pointer to the structs corresponding to the
* regulators referenced by the GPU device node.
@@ -648,6 +753,10 @@
* @opp_table: Pointer to the device OPP structure maintaining the
* link to OPPs attached to a device. This is obtained
* after setting regulator names for the device.
+ * @token: Integer replacement for opp_table in kernel versions
+ * 6 and greater. Value is a token id number when 0 or greater,
+ * and a linux errno when negative. Must be initialised
+ * to an non-zero value as 0 is valid token id.
* @devname: string containing the name used for GPU device instance,
* miscellaneous device is registered using the same name.
* @id: Unique identifier for the device, indicates the number of
@@ -694,6 +803,8 @@
* GPU adrress spaces assigned to them.
* @mmu_mask_change: Lock to serialize the access to MMU interrupt mask
* register used in the handling of Bus & Page faults.
+ * @pagesize_2mb: Boolean to determine whether 2MiB page sizes are
+ * supported and used where possible.
* @gpu_props: Object containing complete information about the
* configuration/properties of GPU HW device in use.
* @hw_issues_mask: List of SW workarounds for HW issues
@@ -716,33 +827,35 @@
* @hwcnt.addr: HW counter address
* @hwcnt.addr_bytes: HW counter size in bytes
* @hwcnt.backend: Kbase instrumentation backend
+ * @hwcnt_gpu_jm_backend: Job manager GPU backend interface, used as superclass reference
+ * pointer by hwcnt_gpu_iface, which wraps this implementation in
+ * order to extend it with periodic dumping functionality.
* @hwcnt_gpu_iface: Backend interface for GPU hardware counter access.
+ * @hwcnt_watchdog_timer: Watchdog interface, used by the GPU backend hwcnt_gpu_iface to
+ * perform periodic dumps in order to prevent hardware counter value
+ * overflow or saturation.
* @hwcnt_gpu_ctx: Context for GPU hardware counter access.
* @hwaccess_lock must be held when calling
* kbase_hwcnt_context_enable() with @hwcnt_gpu_ctx.
* @hwcnt_gpu_virt: Virtualizer for GPU hardware counters.
* @vinstr_ctx: vinstr context created per device.
+ * @kinstr_prfcnt_ctx: kinstr_prfcnt context created per device.
* @timeline_flags: Bitmask defining which sets of timeline tracepoints
* are enabled. If zero, there is no timeline client and
* therefore timeline is disabled.
* @timeline: Timeline context created per device.
* @ktrace: kbase device's ktrace
- * @trace_lock: Lock to serialize the access to trace buffer.
- * @trace_first_out: Index/offset in the trace buffer at which the first
- * unread message is present.
- * @trace_next_in: Index/offset in the trace buffer at which the new
- * message will be written.
- * @trace_rbuf: Pointer to the buffer storing debug messages/prints
- * tracing the various events in Driver.
- * The buffer is filled in circular fashion.
* @reset_timeout_ms: Number of milliseconds to wait for the soft stop to
* complete for the GPU jobs before proceeding with the
* GPU reset.
+ * @lowest_gpu_freq_khz: Lowest frequency in KHz that the GPU can run at. Used
+ * to calculate suitable timeouts for wait operations.
+ * @backend_time: Kbase backend time related attributes.
* @cache_clean_in_progress: Set when a cache clean has been started, and
* cleared when it has finished. This prevents multiple
* cache cleans being done simultaneously.
- * @cache_clean_queued: Set if a cache clean is invoked while another is in
- * progress. If this happens, another cache clean needs
+ * @cache_clean_queued: Pended cache clean operations invoked while another is
+ * in progress. If this is not 0, another cache clean needs
* to be triggered immediately after completion of the
* current one.
* @cache_clean_wait: Signalled when a cache clean has finished.
@@ -752,8 +865,6 @@
* including any contexts that might be created for
* hardware counters.
* @kctx_list_lock: Lock protecting concurrent accesses to @kctx_list.
- * @group_max_uid_in_devices: Max value of any queue group UID in any kernel
- * context in the kbase device.
* @devfreq_profile: Describes devfreq profile for the Mali GPU device, passed
* to devfreq_add_device() to add devfreq feature to Mali
* GPU device.
@@ -839,6 +950,17 @@
* backend specific data for HW access layer.
* @faults_pending: Count of page/bus faults waiting for bottom half processing
* via workqueues.
+ * @mmu_hw_operation_in_progress: Set before sending the MMU command and is
+ * cleared after the command is complete. Whilst this
+ * flag is set, the write to L2_PWROFF register will be
+ * skipped which is needed to workaround the HW issue
+ * GPU2019-3878. PM state machine is invoked after
+ * clearing this flag and @hwaccess_lock is used to
+ * serialize the access.
+ * @mmu_page_migrate_in_progress: Set before starting a MMU page migration transaction
+ * and cleared after the transaction completes. PM L2 state is
+ * prevented from entering powering up/down transitions when the
+ * flag is set, @hwaccess_lock is used to serialize the access.
* @poweroff_pending: Set when power off operation for GPU is started, reset when
* power on for GPU is started.
* @infinite_cache_active_default: Set to enable using infinite cache for all the
@@ -868,9 +990,6 @@
* enabled.
* @protected_mode_hwcnt_disable_work: Work item to disable GPU hardware
* counters, used if atomic disable is not possible.
- * @buslogger: Pointer to the structure required for interfacing
- * with the bus logger module to set the size of buffer
- * used by the module for capturing bus logs.
* @irq_reset_flush: Flag to indicate that GPU reset is in-flight and flush of
* IRQ + bottom half is being done, to prevent the writes
* to MMU_IRQ_CLEAR & MMU_IRQ_MASK registers.
@@ -891,6 +1010,10 @@
* @l2_hash_override: Used to set L2 cache hash via device tree blob
* @l2_hash_values_override: true if @l2_hash_values is valid.
* @l2_hash_values: Used to set L2 asn_hash via device tree blob
+ * @sysc_alloc: Array containing values to be programmed into
+ * SYSC_ALLOC[0..7] GPU registers on L2 cache
+ * power down. These come from either DTB or
+ * via DebugFS (if it is available in kernel).
* @process_root: rb_tree root node for maintaining a rb_tree of
* kbase_process based on key tgid(thread group ID).
* @dma_buf_root: rb_tree root node for maintaining a rb_tree of
@@ -917,6 +1040,14 @@
* @pcm_dev: The priority control manager device.
* @oom_notifier_block: notifier_block containing kernel-registered out-of-
* memory handler.
+ * @mem_migrate: Per device object for managing page migration.
+ * @live_fence_metadata: Count of live fence metadata structures created by
+ * KCPU queue. These structures may outlive kbase module
+ * itself. Therefore, in such a case, a warning should be
+ * be produced.
+ * @mmu_as_inactive_wait_time_ms: Maximum waiting time in ms for the completion of
+ * a MMU operation
+ * @va_region_slab: kmem_cache (slab) for allocated kbase_va_region structures.
*/
struct kbase_device {
u32 hw_quirks_sc;
@@ -941,13 +1072,24 @@
#if IS_ENABLED(CONFIG_REGULATOR)
struct regulator *regulators[BASE_MAX_NR_CLOCKS_REGULATORS];
unsigned int nr_regulators;
-#if (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
+#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
+ int token;
+#elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
struct opp_table *opp_table;
-#endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
+#endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
#endif /* CONFIG_REGULATOR */
char devname[DEVNAME_SIZE];
u32 id;
+#if !IS_ENABLED(CONFIG_MALI_REAL_HW)
+ void *model;
+ struct kmem_cache *irq_slab;
+ struct workqueue_struct *irq_workq;
+ atomic_t serving_job_irq;
+ atomic_t serving_gpu_irq;
+ atomic_t serving_mmu_irq;
+ spinlock_t reg_op_lock;
+#endif /* !IS_ENABLED(CONFIG_MALI_REAL_HW) */
struct kbase_pm_device_data pm;
struct kbase_mem_pool_group mem_pools;
@@ -957,10 +1099,12 @@
struct memory_group_manager_device *mgm_dev;
struct kbase_as as[BASE_MAX_NR_AS];
- u16 as_free; /* Bitpattern of free Address Spaces */
+ u16 as_free;
struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
spinlock_t mmu_mask_change;
+
+ bool pagesize_2mb;
struct kbase_gpu_props gpu_props;
@@ -975,6 +1119,12 @@
s8 nr_hw_address_spaces;
s8 nr_user_address_spaces;
+ /**
+ * @pbha_propagate_bits: Record of Page-Based Hardware Attribute Propagate bits to
+ * restore to L2_CONFIG upon GPU reset.
+ */
+ u8 pbha_propagate_bits;
+
#if MALI_USE_CSF
struct kbase_hwcnt_backend_csf_if hwcnt_backend_csf_if_fw;
#else
@@ -987,12 +1137,17 @@
struct kbase_instr_backend backend;
} hwcnt;
+
+ struct kbase_hwcnt_backend_interface hwcnt_gpu_jm_backend;
#endif
struct kbase_hwcnt_backend_interface hwcnt_gpu_iface;
+ struct kbase_hwcnt_watchdog_interface hwcnt_watchdog_timer;
+
struct kbase_hwcnt_context *hwcnt_gpu_ctx;
struct kbase_hwcnt_virtualizer *hwcnt_gpu_virt;
struct kbase_vinstr_context *vinstr_ctx;
+ struct kbase_kinstr_prfcnt_context *kinstr_prfcnt_ctx;
atomic_t timeline_flags;
struct kbase_timeline *timeline;
@@ -1002,31 +1157,39 @@
#endif
u32 reset_timeout_ms;
+ u64 lowest_gpu_freq_khz;
+
+#if MALI_USE_CSF
+ struct kbase_backend_time backend_time;
+#endif
+
bool cache_clean_in_progress;
- bool cache_clean_queued;
+ u32 cache_clean_queued;
wait_queue_head_t cache_clean_wait;
void *platform_context;
struct list_head kctx_list;
struct mutex kctx_list_lock;
- atomic_t group_max_uid_in_devices;
+ struct rockchip_opp_info opp_info;
+ bool is_runtime_resumed;
+ unsigned long current_nominal_freq;
+ struct monitor_dev_info *mdev_info;
#ifdef CONFIG_MALI_BIFROST_DEVFREQ
struct devfreq_dev_profile devfreq_profile;
struct devfreq *devfreq;
unsigned long current_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
- unsigned long current_nominal_freq;
unsigned long current_voltages[BASE_MAX_NR_CLOCKS_REGULATORS];
u64 current_core_mask;
struct kbase_devfreq_opp *devfreq_table;
int num_opps;
struct kbasep_pm_metrics last_devfreq_metrics;
- struct monitor_dev_info *mdev_info;
struct ipa_power_model_data *model_data;
struct kbase_devfreq_queue_info devfreq_queue;
#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
+ struct devfreq_cooling_power dfc_power;
struct thermal_cooling_device *devfreq_cooling;
bool ipa_protection_mode_switched;
struct {
@@ -1052,7 +1215,9 @@
#endif /* CONFIG_MALI_BIFROST_DEVFREQ */
unsigned long previous_frequency;
+#if !MALI_USE_CSF
atomic_t job_fault_debug;
+#endif /* !MALI_USE_CSF */
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *mali_debugfs_directory;
@@ -1063,11 +1228,13 @@
u64 debugfs_as_read_bitmap;
#endif /* CONFIG_MALI_BIFROST_DEBUG */
+#if !MALI_USE_CSF
wait_queue_head_t job_fault_wq;
wait_queue_head_t job_fault_resume_wq;
struct workqueue_struct *job_fault_resume_workq;
struct list_head job_fault_event_list;
spinlock_t job_fault_event_lock;
+#endif /* !MALI_USE_CSF */
#if !MALI_CUSTOMER_RELEASE
struct {
@@ -1086,13 +1253,14 @@
atomic_t faults_pending;
+#if MALI_USE_CSF
+ bool mmu_hw_operation_in_progress;
+#endif
+ bool mmu_page_migrate_in_progress;
bool poweroff_pending;
-#if (KERNEL_VERSION(4, 4, 0) <= LINUX_VERSION_CODE)
bool infinite_cache_active_default;
-#else
- u32 infinite_cache_active_default;
-#endif
+
struct kbase_mem_pool_group_config mem_pool_defaults;
u32 current_gpu_coherency_mode;
@@ -1130,6 +1298,8 @@
u8 l2_hash_override;
bool l2_hash_values_override;
u32 l2_hash_values[ASN_HASH_COUNT];
+
+ u32 sysc_alloc[SYSC_ALLOC_COUNT];
struct mutex fw_load_lock;
#if MALI_USE_CSF
@@ -1172,6 +1342,27 @@
struct priority_control_manager_device *pcm_dev;
struct notifier_block oom_notifier_block;
+
+#if !MALI_USE_CSF
+ spinlock_t quick_reset_lock;
+ bool quick_reset_enabled;
+ /*
+ * 进入 quck_reset_mode 后 (quick_reset_enabled 为 true),
+ * 对已经进入 KBASE_JD_ATOM_STATE_HW_COMPLETED 状态的 atom 的计数.
+ *
+ * 若 num_of_atoms_hw_completed 达到一定值, 将退出 quck_reset_mode.
+ * 见 kbase_js_complete_atom() 对 num_of_atoms_hw_completed 的引用.
+ */
+ u32 num_of_atoms_hw_completed;
+#endif
+
+ struct kbase_mem_migrate mem_migrate;
+
+#if MALI_USE_CSF && IS_ENABLED(CONFIG_SYNC_FILE)
+ atomic_t live_fence_metadata;
+#endif
+ u32 mmu_as_inactive_wait_time_ms;
+ struct kmem_cache *va_region_slab;
};
/**
@@ -1254,10 +1445,6 @@
*
* @KCTX_DYING: Set when the context process is in the process of being evicted.
*
- * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
- * context, to disable use of implicit dma-buf fences. This is used to avoid
- * potential synchronization deadlocks.
- *
* @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
* allocations. For 64-bit clients it is enabled by default, and disabled by
* default on 32-bit clients. Being able to clear this flag is only used for
@@ -1300,7 +1487,6 @@
KCTX_PRIVILEGED = 1U << 7,
KCTX_SCHEDULED = 1U << 8,
KCTX_DYING = 1U << 9,
- KCTX_NO_IMPLICIT_SYNC = 1U << 10,
KCTX_FORCE_SAME_VA = 1U << 11,
KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
@@ -1339,9 +1525,6 @@
*
* @KCTX_DYING: Set when the context process is in the process of being evicted.
*
- * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
- * context, to disable use of implicit dma-buf fences. This is used to avoid
- * potential synchronization deadlocks.
*
* @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
* allocations. For 64-bit clients it is enabled by default, and disabled by
@@ -1382,7 +1565,6 @@
KCTX_PRIVILEGED = 1U << 7,
KCTX_SCHEDULED = 1U << 8,
KCTX_DYING = 1U << 9,
- KCTX_NO_IMPLICIT_SYNC = 1U << 10,
KCTX_FORCE_SAME_VA = 1U << 11,
KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
@@ -1395,21 +1577,6 @@
struct list_head link;
struct page *page;
DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
-};
-
-/**
- * struct kbase_reg_zone - Information about GPU memory region zones
- * @base_pfn: Page Frame Number in GPU virtual address space for the start of
- * the Zone
- * @va_size_pages: Size of the Zone in pages
- *
- * Track information about a zone KBASE_REG_ZONE() and related macros.
- * In future, this could also store the &rb_root that are currently in
- * &kbase_context
- */
-struct kbase_reg_zone {
- u64 base_pfn;
- u64 va_size_pages;
};
/**
@@ -1449,8 +1616,8 @@
* @mem_partials_lock: Lock for protecting the operations done on the elements
* added to @mem_partials list.
* @mem_partials: List head for the list of large pages, 2MB in size, which
- * which have been split into 4 KB pages and are used
- * partially for the allocations >= 2 MB in size.
+ * have been split into 4 KB pages and are used partially
+ * for the allocations >= 2 MB in size.
* @reg_lock: Lock used for GPU virtual address space management operations,
* like adding/freeing a memory region in the address space.
* Can be converted to a rwlock ?.
@@ -1462,6 +1629,17 @@
* @reg_rbtree_exec: RB tree of the memory regions allocated from the EXEC_VA
* zone of the GPU virtual address space. Used for GPU-executable
* allocations which don't need the SAME_VA property.
+ * @reg_rbtree_exec_fixed: RB tree of the memory regions allocated from the
+ * EXEC_FIXED_VA zone of the GPU virtual address space. Used for
+ * GPU-executable allocations with FIXED/FIXABLE GPU virtual
+ * addresses.
+ * @reg_rbtree_fixed: RB tree of the memory regions allocated from the FIXED_VA zone
+ * of the GPU virtual address space. Used for allocations with
+ * FIXED/FIXABLE GPU virtual addresses.
+ * @num_fixable_allocs: A count for the number of memory allocations with the
+ * BASE_MEM_FIXABLE property.
+ * @num_fixed_allocs: A count for the number of memory allocations with the
+ * BASE_MEM_FIXED property.
* @reg_zone: Zone information for the reg_rbtree_<...> members.
* @cookies: Bitmask containing of BITS_PER_LONG bits, used mainly for
* SAME_VA allocations to defer the reservation of memory region
@@ -1538,12 +1716,20 @@
* is scheduled in and an atom is pulled from the context's per
* slot runnable tree in JM GPU or GPU command queue
* group is programmed on CSG slot in CSF GPU.
- * @mm_update_lock: lock used for handling of special tracking page.
* @process_mm: Pointer to the memory descriptor of the process which
* created the context. Used for accounting the physical
* pages used for GPU allocations, done for the context,
- * to the memory consumed by the process.
+ * to the memory consumed by the process. A reference is taken
+ * on this descriptor for the Userspace created contexts so that
+ * Kbase can safely access it to update the memory usage counters.
+ * The reference is dropped on context termination.
* @gpu_va_end: End address of the GPU va space (in 4KB page units)
+ * @running_total_tiler_heap_nr_chunks: Running total of number of chunks in all
+ * tiler heaps of the kbase context.
+ * @running_total_tiler_heap_memory: Running total of the tiler heap memory in the
+ * kbase context.
+ * @peak_total_tiler_heap_memory: Peak value of the total tiler heap memory in the
+ * kbase context.
* @jit_va: Indicates if a JIT_VA zone has been created.
* @mem_profile_data: Buffer containing the profiling information provided by
* Userspace, can be read through the mem_profile debugfs file.
@@ -1559,29 +1745,19 @@
* dumping of its debug info is in progress.
* @job_fault_resume_event_list: List containing atoms completed after the faulty
* atom but before the debug data for faulty atom was dumped.
+ * @mem_view_column_width: Controls the number of bytes shown in every column of the
+ * output of "mem_view" debugfs file.
* @jsctx_queue: Per slot & priority arrays of object containing the root
* of RB-tree holding currently runnable atoms on the job slot
* and the head item of the linked list of atoms blocked on
* cross-slot dependencies.
- * @atoms_pulled: Total number of atoms currently pulled from the context.
- * @atoms_pulled_slot: Per slot count of the number of atoms currently pulled
- * from the context.
- * @atoms_pulled_slot_pri: Per slot & priority count of the number of atoms currently
- * pulled from the context. hwaccess_lock shall be held when
- * accessing it.
- * @blocked_js: Indicates if the context is blocked from submitting atoms
- * on a slot at a given priority. This is set to true, when
- * the atom corresponding to context is soft/hard stopped or
- * removed from the HEAD_NEXT register in response to
- * soft/hard stop.
+ * @slot_tracking: Tracking and control of this context's use of all job
+ * slots
+ * @atoms_pulled_all_slots: Total number of atoms currently pulled from the
+ * context, across all slots.
* @slots_pullable: Bitmask of slots, indicating the slots for which the
* context has pullable atoms in the runnable tree.
* @work: Work structure used for deferred ASID assignment.
- * @legacy_hwcnt_cli: Pointer to the legacy userspace hardware counters
- * client, there can be only such client per kbase
- * context.
- * @legacy_hwcnt_lock: Lock used to prevent concurrent access to
- * @legacy_hwcnt_cli.
* @completed_jobs: List containing completed atoms for which base_jd_event is
* to be posted.
* @work_count: Number of work items, corresponding to atoms, currently
@@ -1597,12 +1773,6 @@
* memory allocations.
* @jit_current_allocations_per_bin: Current number of in-flight just-in-time
* memory allocations per bin.
- * @jit_version: Version number indicating whether userspace is using
- * old or new version of interface for just-in-time
- * memory allocations.
- * 1 -> client used KBASE_IOCTL_MEM_JIT_INIT_10_2
- * 2 -> client used KBASE_IOCTL_MEM_JIT_INIT_11_5
- * 3 -> client used KBASE_IOCTL_MEM_JIT_INIT
* @jit_group_id: A memory group ID to be passed to a platform-specific
* memory group manager.
* Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
@@ -1674,6 +1844,10 @@
* @limited_core_mask: The mask that is applied to the affinity in case of atoms
* marked with BASE_JD_REQ_LIMITED_CORE_MASK.
* @platform_data: Pointer to platform specific per-context data.
+ * @task: Pointer to the task structure of the main thread of the process
+ * that created the Kbase context. It would be set only for the
+ * contexts created by the Userspace and not for the contexts
+ * created internally by the Kbase.
*
* A kernel base context is an entity among which the GPU is scheduled.
* Each context has its own GPU address space.
@@ -1711,6 +1885,12 @@
struct rb_root reg_rbtree_same;
struct rb_root reg_rbtree_custom;
struct rb_root reg_rbtree_exec;
+#if MALI_USE_CSF
+ struct rb_root reg_rbtree_exec_fixed;
+ struct rb_root reg_rbtree_fixed;
+ atomic64_t num_fixable_allocs;
+ atomic64_t num_fixed_allocs;
+#endif
struct kbase_reg_zone reg_zone[KBASE_REG_ZONE_MAX];
#if MALI_USE_CSF
@@ -1719,17 +1899,14 @@
struct kbase_jd_context jctx;
struct jsctx_queue jsctx_queue
[KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
+ struct kbase_jsctx_slot_tracking slot_tracking[BASE_JM_MAX_NR_SLOTS];
+ atomic_t atoms_pulled_all_slots;
struct list_head completed_jobs;
atomic_t work_count;
struct timer_list soft_job_timeout;
- atomic_t atoms_pulled;
- atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
- int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][
- KBASE_JS_ATOM_SCHED_PRIO_COUNT];
int priority;
- bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
u32 slots_pullable;
u32 age_count;
@@ -1753,20 +1930,18 @@
struct list_head waiting_soft_jobs;
spinlock_t waiting_soft_jobs_lock;
-#ifdef CONFIG_MALI_BIFROST_DMA_FENCE
- struct {
- struct list_head waiting_resource;
- struct workqueue_struct *wq;
- } dma_fence;
-#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */
int as_nr;
atomic_t refcount;
- spinlock_t mm_update_lock;
- struct mm_struct __rcu *process_mm;
+ struct mm_struct *process_mm;
u64 gpu_va_end;
+#if MALI_USE_CSF
+ u32 running_total_tiler_heap_nr_chunks;
+ u64 running_total_tiler_heap_memory;
+ u64 peak_total_tiler_heap_memory;
+#endif
bool jit_va;
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -1778,17 +1953,13 @@
unsigned int *reg_dump;
atomic_t job_fault_count;
struct list_head job_fault_resume_event_list;
+ unsigned int mem_view_column_width;
#endif /* CONFIG_DEBUG_FS */
-
- struct kbase_hwcnt_legacy_client *legacy_hwcnt_cli;
- struct mutex legacy_hwcnt_lock;
-
struct kbase_va_region *jit_alloc[1 + BASE_JIT_ALLOC_COUNT];
u8 jit_max_allocations;
u8 jit_current_allocations;
u8 jit_current_allocations_per_bin[256];
- u8 jit_version;
u8 jit_group_id;
#if MALI_JIT_PRESSURE_LIMIT_BASE
u64 jit_phys_pages_limit;
@@ -1827,6 +1998,8 @@
#if !MALI_USE_CSF
void *platform_data;
#endif
+
+ struct task_struct *task;
};
#ifdef CONFIG_MALI_CINSTR_GWT
@@ -1855,17 +2028,15 @@
* to a @kbase_context.
* @ext_res_node: List head for adding the metadata to a
* @kbase_context.
- * @alloc: The physical memory allocation structure
- * which is mapped.
- * @gpu_addr: The GPU virtual address the resource is
- * mapped to.
+ * @reg: External resource information, containing
+ * the corresponding VA region
* @ref: Reference count.
*
* External resources can be mapped into multiple contexts as well as the same
* context multiple times.
- * As kbase_va_region itself isn't refcounted we can't attach our extra
- * information to it as it could be removed under our feet leaving external
- * resources pinned.
+ * As kbase_va_region is refcounted, we guarantee that it will be available
+ * for the duration of the external resource, meaning it is sufficient to use
+ * it to rederive any additional data, like the GPU address.
* This metadata structure binds a single external resource to a single
* context, ensuring that per context mapping is tracked separately so it can
* be overridden when needed and abuses by the application (freeing the resource
@@ -1873,8 +2044,7 @@
*/
struct kbase_ctx_ext_res_meta {
struct list_head ext_res_node;
- struct kbase_mem_phy_alloc *alloc;
- u64 gpu_addr;
+ struct kbase_va_region *reg;
u32 ref;
};
@@ -1904,6 +2074,24 @@
return false;
}
+/**
+ * kbase_get_lock_region_min_size_log2 - Returns the minimum size of the MMU lock
+ * region, as a logarithm
+ *
+ * @gpu_props: GPU properties
+ *
+ * Return: the minimum size of the MMU lock region as dictated by the corresponding
+ * arch spec.
+ */
+static inline u64 kbase_get_lock_region_min_size_log2(struct kbase_gpu_props const *gpu_props)
+{
+ if (GPU_ID2_MODEL_MATCH_VALUE(gpu_props->props.core_props.product_id) >=
+ GPU_ID2_MODEL_MAKE(12, 0))
+ return 12; /* 4 kB */
+
+ return 15; /* 32 kB */
+}
+
/* Conversion helpers for setting up high resolution timers */
#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
@@ -1912,5 +2100,6 @@
#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
#define KBASE_AS_INACTIVE_MAX_LOOPS 100000000
-
-#endif /* _KBASE_DEFS_H_ */
+/* Maximum number of loops polling the GPU PRFCNT_ACTIVE bit before we assume the GPU has hung */
+#define KBASE_PRFCNT_ACTIVE_MAX_LOOPS 100000000
+#endif /* _KBASE_DEFS_H_ */
--
Gitblit v1.6.2