// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2021 NXP
*/
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "vpu.h"
#include "vpu_defs.h"
#include "vpu_core.h"
#include "vpu_mbox.h"
#include "vpu_msgs.h"
#include "vpu_rpc.h"
#include "vpu_cmds.h"
void csr_writel(struct vpu_core *core, u32 reg, u32 val)
{
writel(val, core->base + reg);
}
u32 csr_readl(struct vpu_core *core, u32 reg)
{
return readl(core->base + reg);
}
static int vpu_core_load_firmware(struct vpu_core *core)
{
const struct firmware *pfw = NULL;
int ret = 0;
if (!core->fw.virt) {
dev_err(core->dev, "firmware buffer is not ready\n");
return -EINVAL;
}
ret = request_firmware(&pfw, core->res->fwname, core->dev);
dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
if (ret) {
dev_err(core->dev, "request firmware %s failed, ret = %d\n",
core->res->fwname, ret);
return ret;
}
if (core->fw.length < pfw->size) {
dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
pfw->size, core->fw.length);
ret = -EINVAL;
goto exit;
}
memset(core->fw.virt, 0, core->fw.length);
memcpy(core->fw.virt, pfw->data, pfw->size);
core->fw.bytesused = pfw->size;
ret = vpu_iface_on_firmware_loaded(core);
exit:
release_firmware(pfw);
pfw = NULL;
return ret;
}
static int vpu_core_boot_done(struct vpu_core *core)
{
u32 fw_version;
fw_version = vpu_iface_get_version(core);
dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
vpu_core_type_desc(core->type),
(fw_version >> 16) & 0xff,
(fw_version >> 8) & 0xff,
fw_version & 0xff);
core->supported_instance_count = vpu_iface_get_max_instance_count(core);
if (core->res->act_size) {
u32 count = core->act.length / core->res->act_size;
core->supported_instance_count = min(core->supported_instance_count, count);
}
if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
core->fw_version = fw_version;
vpu_core_set_state(core, VPU_CORE_ACTIVE);
return 0;
}
static int vpu_core_wait_boot_done(struct vpu_core *core)
{
int ret;
ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
if (!ret) {
dev_err(core->dev, "boot timeout\n");
return -EINVAL;
}
return vpu_core_boot_done(core);
}
static int vpu_core_boot(struct vpu_core *core, bool load)
{
int ret;
reinit_completion(&core->cmp);
if (load) {
ret = vpu_core_load_firmware(core);
if (ret)
return ret;
}
vpu_iface_boot_core(core);
return vpu_core_wait_boot_done(core);
}
static int vpu_core_shutdown(struct vpu_core *core)
{
return vpu_iface_shutdown_core(core);
}
static int vpu_core_restore(struct vpu_core *core)
{
int ret;
ret = vpu_core_sw_reset(core);
if (ret)
return ret;
vpu_core_boot_done(core);
return vpu_iface_restore_core(core);
}
static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
{
gfp_t gfp = GFP_KERNEL | GFP_DMA32;
if (!buf->length)
return 0;
buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
if (!buf->virt)
return -ENOMEM;
buf->dev = dev;
return 0;
}
void vpu_free_dma(struct vpu_buffer *buf)
{
if (!buf->virt || !buf->dev)
return;
dma_free_coherent(buf->dev,