// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024, Advanced Micro Devices, Inc.
*/
#include <drm/amdxdna_accel.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/dma-buf.h>
#include <linux/dma-direct.h>
#include <linux/iosys-map.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
#include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
MODULE_IMPORT_NS("DMA_BUF");
static int
amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
{
struct amdxdna_client *client = abo->client;
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_mem *mem = &abo->mem;
struct amdxdna_gem_obj *heap;
u64 offset;
u32 align;
int ret;
mutex_lock(&client->mm_lock);
heap = client->dev_heap;
if (!heap) {
ret = -EINVAL;
goto unlock_out;
}
if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
XDNA_ERR(xdna, "Invalid dev heap userptr");
ret = -EINVAL;
goto unlock_out;
}
if (mem->size == 0 || mem->size > heap->mem.size) {
XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
mem->size, heap->mem.size);
ret = -EINVAL;
goto unlock_out;
}
align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
mem->size, align,
0, DRM_MM_INSERT_BEST);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
goto unlock_out;
}
mem->dev_addr = abo->mm_node.start;
offset = mem->dev_addr - heap->mem.dev_addr;
mem->userptr = heap->mem.userptr + offset;
mem->kva = heap->mem.kva + offset;
drm_gem_object_get(to_gobj(heap));
unlock_out:
mutex_unlock(&client->mm_lock);
return ret;
}
static void
amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
{
mutex_destroy(&abo->lock);
kfree(abo);
}
static void
amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
{
struct amdxdna_gem_obj *heap;
mutex_lock(&abo->client->mm_lock);
drm_mm_remove_node(&abo->mm_node);
heap = abo->client->dev_heap;
drm_gem_object_put(to_gobj(heap));
mutex_unlock(&abo->client->mm_lock);
}
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
struct amdxdna_gem_obj *abo = mapp->abo;
struct amdxdna_dev *xdna;
xdna = to_xdna_dev(to_gobj(abo)->dev);
XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
if (!mmu_notifier_range_blockable(range))
return false;
down_write(&xdna->notifier_lock);
abo->mem.map_invalid = true;
mapp->invalid = true;
mmu_interval_set_seq(&mapp->notifier, cur_seq);
up_write(&xdna->notifier_lock);
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
if (range->event == MMU_NOTIFY_UNMAP) {
down_write(&xdna->notifier_lock);
if (!mapp->unmapped) {
queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
mapp->unmapped = true;
}
up_write(&xdna->notifier_lock);
}
return true;
}
static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
.invalidate = amdxdna_hmm_invalidate,
};
static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
struct