aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-01-01 16:39:19 +1000
committerDave Airlie <airlied@redhat.com>2026-01-01 16:39:54 +1000
commit1054f19572acbbec80e2339dbf61f2b40ffb918c (patch)
tree33945258ded8a2050db27c1d4e0e0a8381c1399a /include
parentf8f9c1f4d0c7a64600e2ca312dec824a0bc2f1da (diff)
parentbed2a6bd20681aacfb063015c1edfab6f58a333e (diff)
Merge tag 'drm-xe-fixes-2025-12-30' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
Core Changes: - Ensure a SVM device memory allocation is idle before migration complete (Thomas) Driver Changes: - Fix a SVM debug printout (Thomas) - Use READ_ONCE() / WRITE_ONCE() for g2h_fence (Jonathan) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aVOTf6-whmkgrUuq@fedora
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_pagemap.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index f6e7e234c089..70a7991f784f 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -8,6 +8,7 @@
#define NR_PAGES(order) (1U << (order))
+struct dma_fence;
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops {
* @pages: Pointer to array of device memory pages (destination)
* @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to device memory. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_devmem)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
/**
* @copy_to_ram: Copy to system RAM (required for migration)
* @pages: Pointer to array of device memory pages (source)
* @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to system RAM. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_ram)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
};
/**
@@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops {
* @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
* @size: Size of device memory allocation
* @timeslice_expiration: Timeslice expiration in jiffies
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
struct drm_pagemap_devmem {
struct device *dev;
@@ -221,6 +230,7 @@ struct drm_pagemap_devmem {
struct drm_pagemap *dpagemap;
size_t size;
u64 timeslice_expiration;
+ struct dma_fence *pre_migrate_fence;
};
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
@@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size);
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence);
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,