// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/hmac.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include "eip93-cipher.h"
#include "eip93-hash.h"
#include "eip93-common.h"
#include "eip93-main.h"
#include "eip93-regs.h"
int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)
{
u32 ext_err;
if (!err)
return 0;
switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
case EIP93_PE_CTRL_PE_AUTH_ERR:
case EIP93_PE_CTRL_PE_PAD_ERR:
return -EBADMSG;
/* let software handle anti-replay errors */
case EIP93_PE_CTRL_PE_SEQNUM_ERR:
return 0;
case EIP93_PE_CTRL_PE_EXT_ERR:
break;
default:
dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);
return -EINVAL;
}
/* Parse additional ext errors */
ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
switch (ext_err) {
case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
return -EIO;
case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
return -EACCES;
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
return -EINVAL;
case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
return -EBADMSG;
default:
dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);
return -EINVAL;
}
}
static void *eip93_ring_next_wptr(struct eip93_device *eip93,
struct eip93_desc_ring *ring)
{
void *ptr = ring->write;
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
if (ring->write == ring->base_end)
ring->write = ring->base;
else
ring->write += ring->offset;
return ptr;
}
static void *eip93_ring_next_rptr(struct eip93_device *eip93,
struct eip93_desc_ring *ring)
{
void *ptr = ring->read;
if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
if (ring->read == ring->base_end)
ring->read = ring->base;
else
ring->read += ring->offset;
return ptr;
}
int eip93_put_descriptor(struct eip93_device *eip93,
struct eip93_descriptor *desc)
{
struct eip93_descriptor *cdesc;
struct eip93_descriptor *rdesc;
rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);
if (IS_ERR(rdesc))
return -ENOENT;
cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);
if (IS_ERR(cdesc))
return -ENOENT;
memset(rdesc, 0, sizeof(struct eip93_descriptor));
memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
return 0;
}
void *eip93_get_descriptor(struct eip93_device *eip93)
{
struct eip93_descriptor *cdesc;
void *ptr;
cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);
if (IS_ERR(cdesc))
return ERR_PTR(-ENOENT);
memset(cdesc, 0, sizeof(struct eip93_descriptor));
ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);
if (IS_ERR(ptr))
return ERR_PTR(-ENOENT);
return ptr;
}
static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
{
if (!*sg || !len)
return;
free_pages((unsigned long)sg_virt(*sg), get_order(len));
kfree(*sg);
*sg = NULL;
}
static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
const u32 len, const bool copy)
{
void *pages;
*dst = kmalloc_obj(**dst);
if (!*dst)
return -ENOMEM;
pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
get_order(len));
if (!pages) {
kfree(*dst);
*dst = NULL;
return -ENOMEM;
}
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
/* copy only as requested */
if (copy)
sg_copy_to_buffer(src, sg_nents(src), pages, len);
return 0;
}
static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
const int blksize)
{
int nents