// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024
*
* Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include <crypto/hmac.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include "eip93-cipher.h"
#include "eip93-hash.h"
#include "eip93-main.h"
#include "eip93-common.h"
#include "eip93-regs.h"
static void eip93_hash_free_data_blocks(struct ahash_request *req)
{
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct eip93_device *eip93 = ctx->eip93;
struct mkt_hash_block *block, *tmp;
list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
dma_unmap_single(eip93->dev, block->data_dma,
SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
kfree(block);
}
if (!list_empty(&rctx->blocks))
INIT_LIST_HEAD(&rctx->blocks);
if (rctx->finalize)
dma_unmap_single(eip93->dev, rctx->data_dma,
rctx->data_used,
DMA_TO_DEVICE);
}
static void eip93_hash_free_sa_record(struct ahash_request *req)
{
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct eip93_device *eip93 = ctx->eip93;
if (IS_HMAC(ctx->flags))
dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
dma_unmap_single(eip93->dev, rctx->sa_record_base,
sizeof(rctx->sa_record), DMA_TO_DEVICE);
}
void eip93_hash_handle_result(struct crypto_async_request *async, int err)
{
struct ahash_request *req = ahash_request_cast(async);
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct sa_state *sa_state = &rctx->sa_state;
struct eip93_device *eip93 = ctx->eip93;
int i;
dma_unmap_single(eip93->dev, rctx->sa_state_base,
sizeof(*sa_state), DMA_FROM_DEVICE);
/*
* With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
* This is to handle SHA224 that have a 32 byte intermediate digest.
*/
if (rctx->partial_hash)
digestsize = SHA256_DIGEST_SIZE;
if (rctx->finalize || rctx->partial_hash) {
/* bytes needs to be swapped for req->result */
if (!IS_HASH_MD5(ctx->flags)) {
for (i = 0; i < digestsize / sizeof(u32); i++) {
u32 *digest = (u32 *)sa_state->state_i_digest;
digest[i] = be32_to_cpu((__be32 __force)digest[i]);
}
}
memcpy(req->result, sa_state->state_i_digest, digestsize);
}
eip93_hash_free_sa_record(req);
eip93_hash_free_data_blocks(req);
ahash_request_complete(req, err);
}
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
{
static const u32 sha256_init[] = {
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
};
static const u32 sha224_init[] = {
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
};
static const u32 sha1_init[] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
};
static const u32 md5_init[] = {
MD5_H0, MD5_H1, MD5_H2, MD5_H3
};
/* Init HASH constant */
switch (hash) {
case EIP93_HASH_SHA256:
memcpy(digest, sha256_init, sizeof(sha256_init));
return;
case EIP93_HASH_SHA224:
memcpy(digest, sha224_init, sizeof(sha224_init));
return;
case EIP93_HASH_SHA1:
memcpy(digest, sha1_init, sizeof(sha1_init));
return;
case EIP93_HASH_MD5:
memcpy(digest, md5_init, sizeof(md5_init));
return;
default: /* Impossible */
return;
}
}
static void eip93_hash_export_sa_state(struct ahash_request *req,
struct eip93_hash_export_state *state)
{
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
struct sa_state *sa_state = &rctx->sa_state;
/*
* EIP93 have special handling for state_byte_cnt in sa_state.
* Even if a zero packet is passed (and a BADMSG is returned),
* state_byte_cnt is incremented to the digest handled (with the hash
* primitive). This is problematic with export/import as EIP93
* expect 0 state_byte_cnt for the very