aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2026-03-26 14:01:16 -0700
committerRodrigo Vivi <rodrigo.vivi@intel.com>2026-04-29 11:27:38 -0400
commita0fc362f095330f7b3f68ac0c55ef8da18290c87 (patch)
treee5b41d74df974e6cb22906c36f2e1bb4d67ad010 /drivers/gpu
parent254f49634ee16a731174d2ae34bc50bd5f45e731 (diff)
drm/xe: Drop registration of guc_submit_wedged_fini from xe_guc_submit_wedge()
xe_guc_submit_wedge() runs in the DMA-fence signaling path, where GFP_KERNEL memory allocations are not permitted. However, registering guc_submit_wedged_fini via drmm_add_action_or_reset() triggers such an allocation. Avoid this by moving the logic from guc_submit_wedged_fini() into guc_submit_fini(), where wedged exec queue references are dropped during normal teardown. Fixes: 8ed9aaae39f3 ("drm/xe: Force wedged state and block GT reset upon any GPU hang") Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patch.msgid.link/20260326210116.202585-3-matthew.brost@intel.com (cherry picked from commit 4a706bd93c4fb156a13477e26ffdf2e633edeb10) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index a145234f662b..10556156eaad 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -261,22 +261,10 @@ static void guc_submit_sw_fini(struct drm_device *drm, void *arg)
static void guc_submit_fini(void *arg)
{
struct xe_guc *guc = arg;
-
- /* Forcefully kill any remaining exec queues */
- xe_guc_ct_stop(&guc->ct);
- guc_submit_reset_prepare(guc);
- xe_guc_softreset(guc);
- xe_guc_submit_stop(guc);
- xe_uc_fw_sanitize(&guc->fw);
- xe_guc_submit_pause_abort(guc);
-}
-
-static void guc_submit_wedged_fini(void *arg)
-{
- struct xe_guc *guc = arg;
struct xe_exec_queue *q;
unsigned long index;
+ /* Drop any wedged queue refs */
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
if (exec_queue_wedged(q)) {
@@ -286,6 +274,14 @@ static void guc_submit_wedged_fini(void *arg)
}
}
mutex_unlock(&guc->submission_state.lock);
+
+ /* Forcefully kill any remaining exec queues */
+ xe_guc_ct_stop(&guc->ct);
+ guc_submit_reset_prepare(guc);
+ xe_guc_softreset(guc);
+ xe_guc_submit_stop(guc);
+ xe_uc_fw_sanitize(&guc->fw);
+ xe_guc_submit_pause_abort(guc);
}
static const struct xe_exec_queue_ops guc_exec_queue_ops;
@@ -1320,10 +1316,8 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
void xe_guc_submit_wedge(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
- struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
unsigned long index;
- int err;
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
@@ -1335,15 +1329,6 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
return;
if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) {
- err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
- guc_submit_wedged_fini, guc);
- if (err) {
- xe_gt_err(gt, "Failed to register clean-up on wedged.mode=%s; "
- "Although device is wedged.\n",
- xe_wedged_mode_to_string(XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET));
- return;
- }
-
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
if (xe_exec_queue_get_unless_zero(q))