// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "xe_exec_queue.h"
#include <linux/nospec.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_ring_ops_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
#include "xe_pxp.h"
/**
* DOC: Execution Queue
*
* An Execution queue is an interface for the HW context of execution.
* The user creates an execution queue, submits the GPU jobs through those
* queues and in the end destroys them.
*
* Execution queues can also be created by XeKMD itself for driver internal
* operations like object migration etc.
*
* An execution queue is associated with a specified HW engine or a group of
* engines (belonging to the same tile and engine class) and any GPU job
* submitted on the queue will be run on one of these engines.
*
* An execution queue is tied to an address space (VM). It holds a reference
* of the associated VM and the underlying Logical Ring Context/s (LRC/s)
* until the queue is destroyed.
*
* The execution queue sits on top of the submission backend. It opaquely
* handles the GuC and Execlist backends whichever the platform uses, and
* the ring operations the different engine classes support.
*/
enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
XE_EXEC_QUEUE_TIMESLICE = 1,
XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
};
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
u64 extensions, int ext_number);
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
int i;
for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
if (q->tlb_inval[i].dep_scheduler)
xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
xe_vm_put(q->vm);
if (q->xef)
xe_file_put(q->xef);
kfree(q);
}
static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
{
struct xe_tile *tile = gt_to_tile(q->gt);
int i;
for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
struct xe_dep_scheduler *dep_scheduler;
struct xe_gt *gt;
struct workqueue_struct *wq;
if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
gt<