aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAkhil P Oommen <akhilpo@oss.qualcomm.com>2025-11-18 14:20:41 +0530
committerRob Clark <robin.clark@oss.qualcomm.com>2025-11-18 09:04:01 -0800
commit288a932008925644d8d0ca69bf7a69a0dce82dc5 (patch)
treee8167abf3cc72d92fef880b841a83418a4254e49 /drivers
parent06cfbca0e1c6e26855564fd48bb3babb9c66f446 (diff)
drm/msm/adreno: Introduce A8x GPU Support
A8x is the next generation of Adreno GPUs, featuring a significant hardware design change. A major update to the design is the introduction of Slice architecture. Slices are sort of mini-GPUs within the GPU which are more independent in processing Graphics and compute workloads. Also, in addition to the BV and BR pipe we saw in A7x, CP has more concurrency with additional pipes. From a software interface perspective, these changes have a significant impact on the KMD side. First, the GPU register space has been extensively reorganized. Second, to avoid a register space explosion caused by the new slice architecture and additional pipes, many registers are now virtualized, instead of duplicated as in A7x. KMD must configure an aperture register with the appropriate slice and pipe ID before accessing these virtualized registers. Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com> Patchwork: https://patchwork.freedesktop.org/patch/689019/ Message-ID: <20251118-kaana-gpu-support-v4-14-86eeb8e93fb6@oss.qualcomm.com> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c118
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1202
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml1
7 files changed, 1321 insertions, 34 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 7acf2cc13cd0..8aa7d07303fb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -24,6 +24,7 @@ adreno-y := \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
adreno/a6xx_preempt.o \
+ adreno/a8xx_gpu.o \
adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index cede210a0a78..5c30b3258165 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1174,6 +1174,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
return ret;
}
+ /* Read the slice info on A8x GPUs */
+ a8xx_gpu_get_slice_info(gpu);
+
/* Set the bus quota to a reasonable value for boot */
a6xx_gmu_set_initial_bw(gpu, gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 2f06e7e7d87f..6cd0e8fb91b4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -157,7 +157,7 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
}
}
-static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -245,14 +245,21 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
}
if (!sysprof) {
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
}
- OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
- OUT_RING(ring, 1);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ OUT_PKT4(ring, REG_A8XX_RBBM_SLICE_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
}
/* Execute the table update */
@@ -281,7 +288,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* to make sure BV doesn't race ahead while BR is still switching
* pagetables.
*/
- if (adreno_is_a7xx(&a6xx_gpu->base)) {
+ if (adreno_is_a7xx(&a6xx_gpu->base) || adreno_is_a8xx(&a6xx_gpu->base)) {
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
@@ -295,20 +302,22 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
+ u32 reg_status = adreno_is_a8xx(adreno_gpu) ?
+ REG_A8XX_RBBM_PERFCTR_SRAM_INIT_STATUS :
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS;
/*
* Wait for SRAM clear after the pgtable update, so the
* two can happen in parallel:
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
- OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(
- REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(reg_status));
OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
@@ -446,6 +455,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
+ u32 rbbm_perfctr_cp0, cp_always_on_counter;
unsigned int i, ibs = 0;
adreno_check_and_reenable_stall(adreno_gpu);
@@ -466,10 +476,16 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
if (gpu->nr_rings > 1)
a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue);
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_start));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_start));
+ if (adreno_is_a8xx(adreno_gpu)) {
+ rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER;
+ } else {
+ rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER;
+ }
+
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start));
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BOTH);
@@ -516,14 +532,17 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, 0x00e); /* IB1LIST end */
}
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_end));
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
- OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
- OUT_RING(ring, submit->seqno);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_CP_SCRATCH_GLOBAL(2), 1);
+ OUT_RING(ring, submit->seqno);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
+ OUT_RING(ring, submit->seqno);
+ }
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BR);
@@ -723,8 +742,11 @@ static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
/* Copy the data into the internal struct to drop the const qualifier (temporarily) */
*cfg = *common_cfg;
- cfg->ubwc_swizzle = 0x6;
- cfg->highest_bank_bit = 15;
+ /* Use common config as is for A8x */
+ if (!adreno_is_a8xx(gpu)) {
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
+ }
if (adreno_is_a610(gpu)) {
cfg->highest_bank_bit = 13;
@@ -1013,7 +1035,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
return false;
/* A7xx is safe! */
- if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
return true;
/*
@@ -1127,7 +1149,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
return 0;
}
-static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+int a6xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
int ret;
@@ -2089,7 +2111,7 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
u32 fuse_val;
int ret;
- if (adreno_is_a750(adreno_gpu)) {
+ if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
/*
* Assume that if qcom scm isn't available, that whatever
* replacement allows writing the fuse register ourselves.
@@ -2115,9 +2137,9 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
return ret;
/*
- * On a750 raytracing may be disabled by the firmware, find out
- * whether that's the case. The scm call above sets the fuse
- * register.
+ * On A7XX_GEN3 and newer, raytracing may be disabled by the
+ * firmware, find out whether that's the case. The scm call
+ * above sets the fuse register.
*/
fuse_val = a6xx_llc_read(a6xx_gpu,
REG_A7XX_CX_MISC_SW_FUSE_VALUE);
@@ -2178,7 +2200,7 @@ void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
{
/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
- if (adreno_is_a610(to_adreno_gpu(gpu)))
+ if (adreno_is_a610(to_adreno_gpu(gpu)) || adreno_is_a8xx(to_adreno_gpu(gpu)))
return;
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
@@ -2209,7 +2231,12 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
+ if (adreno_is_a8xx(adreno_gpu))
+ a8xx_llc_activate(a6xx_gpu);
+ else if (adreno_is_a7xx(adreno_gpu))
+ a7xx_llc_activate(a6xx_gpu);
+ else
+ a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2589,10 +2616,8 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->base.hw_apriv =
!!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
- /* gpu->info only gets assigned in adreno_gpu_init() */
- is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
- config->info->family == ADRENO_7XX_GEN2 ||
- config->info->family == ADRENO_7XX_GEN3;
+ /* gpu->info only gets assigned in adreno_gpu_init(). A8x is included intentionally */
+ is_a7xx = config->info->family >= ADRENO_7XX_GEN1;
a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
@@ -2630,7 +2655,7 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
ret = a7xx_cx_mem_init(a6xx_gpu);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
@@ -2754,3 +2779,30 @@ const struct adreno_gpu_funcs a7xx_gpu_funcs = {
.bus_halt = a6xx_bus_clear_pending_transactions,
.mmu_fault_handler = a6xx_fault_handler,
};
+
+const struct adreno_gpu_funcs a8xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a8xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a8xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a8xx_irq,
+ .destroy = a6xx_destroy,
+ .gpu_busy = a8xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a8xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a8xx_gmu_get_timestamp,
+ .bus_halt = a8xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a8xx_fault_handler,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ef66e1eb9152..e6218b0b9732 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,6 +46,8 @@ struct a6xx_info {
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
const struct adreno_reglist_list *ifpc_reglist;
+ const struct adreno_reglist_pipe *nonctxt_reglist;
+ u32 max_slices;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -101,6 +103,11 @@ struct a6xx_gpu {
void *htw_llc_slice;
bool have_mmu500;
bool hung;
+
+ u32 cached_aperture;
+ spinlock_t aperture_lock;
+
+ u32 slice_mask;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -302,5 +309,19 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
-
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a6xx_zap_shader_init(struct msm_gpu *gpu);
+
+void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
+int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data);
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value);
+u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate);
+int a8xx_gpu_feature_probe(struct msm_gpu *gpu);
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu);
+int a8xx_hw_init(struct msm_gpu *gpu);
+irqreturn_t a8xx_irq(struct msm_gpu *gpu);
+void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu);
+bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void a8xx_recover(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
new file mode 100644
index 000000000000..c9cd7546024a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
@@ -0,0 +1,1202 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define GPU_PAS_ID 13
+
+static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+
+ val = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe) | A8XX_CP_APERTURE_CNTL_HOST_SLICEID(slice);
+
+ if (a6xx_gpu->cached_aperture == val)
+ return;
+
+ gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val);
+
+ a6xx_gpu->cached_aperture = val;
+}
+
+static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, *flags);
+
+ a8xx_aperture_slice_set(gpu, pipe, 0);
+}
+
+static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+}
+
+static void a8xx_aperture_clear(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, PIPE_NONE, &flags);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static void a8xx_write_pipe(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 offset, u32 data)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, pipe, &flags);
+ gpu_write(gpu, offset, data);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static u32 a8xx_read_pipe_slice(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice, u32 offset)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, flags);
+ a8xx_aperture_slice_set(gpu, pipe, slice);
+ val = gpu_read(gpu, offset);
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+
+ return val;
+}
+
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ u32 slice_mask;
+
+ if (adreno_gpu->info->family < ADRENO_8XX_GEN1)
+ return;
+
+ if (a6xx_gpu->slice_mask)
+ return;
+
+ slice_mask = GENMASK(info->max_slices - 1, 0);
+
+ /* GEN1 doesn't support partial slice configurations */
+ if (adreno_gpu->info->family == ADRENO_8XX_GEN1) {
+ a6xx_gpu->slice_mask = slice_mask;
+ return;
+ }
+
+ slice_mask &= a6xx_llc_read(a6xx_gpu,
+ REG_A8XX_CX_MISC_SLICE_ENABLE_FINAL);
+
+ a6xx_gpu->slice_mask = slice_mask;
+
+ /* Chip ID depends on the number of slices available. So update it */
+ adreno_gpu->chip_id |= FIELD_PREP(GENMASK(7, 4), hweight32(slice_mask));
+}
+
+static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu)
+{
+ return ffs(a6xx_gpu->slice_mask) - 1;
+}
+
+static inline bool _a8xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check that the CX master is idle */
+ if (gpu_read(gpu, REG_A8XX_RBBM_STATUS) &
+ ~A8XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+static bool a8xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a8xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A8XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ /* Update HW if this is the current ring and we are not in preempt*/
+ if (!a6xx_in_preempt(a6xx_gpu)) {
+ if (a6xx_gpu->cur_ring == ring)
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ else
+ ring->restore_wptr = true;
+ } else {
+ ring->restore_wptr = true;
+ }
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+}
+
+static void a8xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? 0x110111 : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? 0x55555 : 0);
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_GLOBAL_LOAD_CMD, !!state);
+
+ if (state) {
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_P2S_TRIG_CMD, 1);
+
+ if (gpu_poll_timeout(gpu, REG_A8XX_RBBM_CGC_P2S_STATUS, val,
+ val & A8XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) {
+ dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n");
+ return;
+ }
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 0);
+ } else {
+ /*
+ * GMU enables clk gating in GBIF during boot up. So,
+ * override that here when hwcg feature is disabled
+ */
+ gpu_rmw(gpu, REG_A8XX_GBIF_CX_CONFIG, BIT(0), 0);
+ }
+}
+
+static void a8xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect;
+ u32 cntl, final_cfg;
+ unsigned int i;
+
+ cntl = A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_PROT_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_FAULT_ON_VIOL_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_LAST_SPAN_INF_RANGE |
+ A8XX_CP_PROTECT_CNTL_PIPE_HALT_SQE_RANGE__MASK;
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+
+ a8xx_aperture_clear(gpu);
+
+ for (i = 0; i < protect->count; i++) {
+ /* Intentionally skip writing to some registers */
+ if (protect->regs[i]) {
+ gpu_write(gpu, REG_A8XX_CP_PROTECT_GLOBAL(i), protect->regs[i]);
+ final_cfg = protect->regs[i];
+ }
+ }
+
+ /*
+ * Last span feature is only supported on PIPE specific register.
+ * So update those here
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+
+ a8xx_aperture_clear(gpu);
+}
+
+static void a8xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ u32 level3_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL3);
+ bool rgba8888_lossless = false, fp16compoptdis = false;
+ bool yuvnotcomptofc = false, min_acc_len_64b = false;
+ bool rgb565_predicator = false, amsbc = false;
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ u32 ubwc_version = cfg->ubwc_enc_version;
+ u32 hbb, hbb_hi, hbb_lo, mode = 1;
+ u8 uavflagprd_inv = 2;
+
+ switch (ubwc_version) {
+ case UBWC_5_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ mode = 4;
+ break;
+ case UBWC_4_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ fp16compoptdis = true;
+ rgba8888_lossless = true;
+ mode = 2;
+ break;
+ case UBWC_3_0:
+ amsbc = true;
+ mode = 1;
+ break;
+ default:
+ dev_err(&gpu->pdev->dev, "Unknown UBWC version: 0x%x\n", ubwc_version);
+ break;
+ }
+
+ /*
+ * We subtract 13 from the highest bank bit (13 is the minimum value
+ * allowed by hw) and write the lowest two bits of the remaining value
+ * as hbb_lo and the one above it as hbb_hi to the hardware.
+ */
+ WARN_ON(cfg->highest_bank_bit < 13);
+ hbb = cfg->highest_bank_bit - 13;
+ hbb_hi = hbb >> 2;
+ hbb_lo = hbb & 3;
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL,
+ yuvnotcomptofc << 6 |
+ hbb_hi << 3 |
+ hbb_lo << 1);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CMP_NC_MODE_CNTL,
+ mode << 15 |
+ yuvnotcomptofc << 6 |
+ rgba8888_lossless << 4 |
+ fp16compoptdis << 3 |
+ rgb565_predicator << 2 |
+ amsbc << 1 |
+ min_acc_len_64b);
+
+ a8xx_aperture_clear(gpu);
+
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level3_swizzling_dis << 13 |
+ level2_swizzling_dis << 12 |
+ hbb_hi << 10 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level3_swizzling_dis << 7 |
+ level2_swizzling_dis << 6 |
+ hbb_hi << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+}
+
+static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ const struct adreno_reglist_pipe *regs = info->nonctxt_reglist;
+ unsigned int pipe_id, i;
+ unsigned long flags;
+
+ for (pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ /* We don't have support for LPAC yet */
+ if (pipe_id == PIPE_LPAC)
+ continue;
+
+ a8xx_aperture_acquire(gpu, pipe_id, &flags);
+
+ for (i = 0; regs[i].offset; i++) {
+ if (!(BIT(pipe_id) & regs[i].pipe))
+ continue;
+
+ if (regs[i].offset == REG_A8XX_RB_GC_GMEM_PROTECT)
+ *gmem_protect = regs[i].value;
+
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+ }
+
+ a8xx_aperture_release(gpu, flags);
+ }
+
+ a8xx_aperture_clear(gpu);
+}
+
+static int a8xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ u32 mask;
+
+ /* Disable concurrent binning before sending CP init */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, BIT(27));
+
+ OUT_PKT7(ring, CP_ME_INIT, 4);
+
+ /* Use multiple HW contexts */
+ mask = BIT(0);
+
+ /* Enable error detection */
+ mask |= BIT(1);
+
+ /* Set default reset state */
+ mask |= BIT(3);
+
+ /* Disable save/restore of performance counters across preemption */
+ mask |= BIT(6);
+
+ OUT_RING(ring, mask);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Operation mode mask */
+ OUT_RING(ring, 0x00000002);
+
+ a6xx_flush(gpu, ring);
+ return a8xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+#define A8XX_INT_MASK \
+ (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_SW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
+ A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
+ A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \
+ A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
+
+#define A8XX_APRIV_MASK \
+ (A8XX_CP_APRIV_CNTL_PIPE_ICACHE | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBFETCH | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBPRIVLEVEL | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBRPWB)
+
+#define A8XX_BR_APRIV_MASK \
+ (A8XX_APRIV_MASK | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDREAD | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDWRITE)
+
+#define A8XX_CP_GLOBAL_INT_MASK \
+ (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBV)
+
+#define A8XX_CP_INTERRUPT_STATUS_MASK_PIPE \
+ (A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB1WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB2WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB3WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFSDSWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFMRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFVSDWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_OPCODEERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VSDPARITYERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_REGISTERPROTECTIONERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_ILLEGALINSTRUCTION | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_SMMUFAULT | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPCLIENT| \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPTYPE | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPREAD | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_RTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTREFCNTOVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTCLRRESMISS)
+
+#define A8XX_CP_HW_FAULT_STATUS_MASK_PIPE \
+ (A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB1FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB2FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB3FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFSDSFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFMRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFVSDFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_SQEREADBURSTOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_EVENTENGINEOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_UCODEERROR)
+
+static int hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int pipe_id, i;
+ u32 gmem_protect = 0;
+ u64 gmem_range_min;
+ int ret;
+
+ ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (ret)
+ return ret;
+
+ /* Clear the cached value to force aperture configuration next time */
+ a6xx_gpu->cached_aperture = UINT_MAX;
+ a8xx_aperture_clear(gpu);
+
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A6XX_GBIF_HALT);
+
+ gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000);
+ gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Setup GMEM Range in UCHE */
+ gmem_range_min = SZ_64M;
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN, gmem_range_min);
+ gpu_write64(gpu, REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN, gmem_range_min);
+
+ /* Setup UCHE Trap region */
+ gpu_write64(gpu, REG_A8XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_CNTL, 0x1);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_PERFCTR_CNTL, 0x1);
+
+ /* Turn on the IFPC counter (countable 4 on XOCLK1) */
+ gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1,
+ FIELD_PREP(GENMASK(7, 0), 0x4));
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A8XX_CP_PERFCTR_CP_SEL(0), 1);
+
+ a8xx_set_ubwc_config(gpu);
+
+ /* Set weights for bicubic filtering */
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), 0x3fe05ff4);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 0x3fa0ebee);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 0x3f5193ed);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 0x3f0243f0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5), 0x00000000);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6), 0x3fd093e8);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7), 0x3f4133dc);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8), 0x3ea1dfdb);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9), 0x3e0283e0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10), 0x0000ac2b);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11), 0x0000f01d);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12), 0x00114412);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13), 0x0021980a);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14), 0x0051ec05);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15), 0x0000380e);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16), 0x3ff09001);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17), 0x3fc10bfa);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18), 0x3f9193f7);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19), 0x3f7227f7);
+
+ gpu_write(gpu, REG_A8XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
+
+ a8xx_nonctxt_config(gpu, &gmem_protect);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, BIT(30) | 0xcfffff);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL, BIT(30));
+
+ /* Set up the CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+
+ /* Enable the power counter */
+ gmu_rmw(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0, 0xff, BIT(5));
+ gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
+ /* Protect registers from the CP */
+ a8xx_set_cp_protect(gpu);
+
+ /* Enable the GMEM save/restore feature for preemption */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 1);