// SPDX-License-Identifier: GPL-2.0
/*
* Check for KVM_GET_REG_LIST regressions.
*
* Copyright (C) 2020, Red Hat, Inc.
*
* While the blessed list should be created from the oldest possible
* kernel, we can't go older than v5.2, though, because that's the first
* release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
* core register IDs in KVM_GET_REG_LIST"). Without that commit the core
* registers won't match expectations.
*/
#include <stdio.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
#define SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
sys_reg_Op1(SYS_ ## r), \
sys_reg_CRn(SYS_ ## r), \
sys_reg_CRm(SYS_ ## r), \
sys_reg_Op2(SYS_ ## r))
struct feature_id_reg {
__u64 reg;
__u64 id_reg;
__u64 feat_shift;
__u64 feat_min;
};
#define FEAT(id, f, v) \
.id_reg = SYS_REG(id), \
.feat_shift = id ## _ ## f ## _SHIFT, \
.feat_min = id ## _ ## f ## _ ## v
#define REG_FEAT(r, id, f, v) \
{ \
.reg = SYS_REG(r), \
FEAT(id, f, v) \
}
static struct feature_id_reg feat_id_regs[] = {
REG_FEAT(TCR2_EL1, ID_AA64MMFR3_EL1, TCRX, IMP),
REG_FEAT(TCR2_EL2, ID_AA64MMFR3_EL1, TCRX, IMP),
REG_FEAT(PIRE0_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
REG_FEAT(PIRE0_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
REG_FEAT(PIR_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
REG_FEAT(PIR_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
REG_FEAT(POR_EL1, ID_AA64MMFR3_EL1, S1POE, IMP),
REG_FEAT(POR_EL0, ID_AA64MMFR3_EL1, S1POE, IMP),
REG_FEAT(POR_EL2, ID_AA64MMFR3_EL1, S1POE, IMP),
REG_FEAT(HCRX_EL2, ID_AA64MMFR1_EL1, HCX, IMP),
REG_FEAT(HFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HFGITR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HDFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HDFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HAFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
REG_FEAT(HFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(HFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(HFGITR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(HDFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
REG_FEAT(SCTLR2_EL2, ID_AA64MMFR3_EL1, SCTLRX, IMP),
REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
};
bool filter_reg(__u64 reg)
{
/*
* DEMUX register presence depends on the host's CLIDR_EL1.
* This means there's no set of them that we can bless.
*/
if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return true;
return false;
}
static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
int i, ret;
__u64 data, feat_val;
for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
if (feat_id_regs[i].reg == reg) {
ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
if (ret < 0)
return false;
feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
return feat_val >= feat_id_regs[i].feat_min;
}
}
return true;
}
bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
return check_supported_feat_reg(vcpu, reg);
}
bool check_reject_set(int err)
{
return err == EPERM;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
struct vcpu_reg_sublist *s;
int feature;
<