// SPDX-License-Identifier: GPL-2.0
/*
* Hosting Protected Virtual Machines
*
* Copyright IBM Corp. 2019, 2020
* Author(s): Janosch Frank <frankja@linux.ibm.com>
*/
#include <linux/export.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/minmax.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <asm/gmap.h>
#include <asm/uv.h>
#include <asm/mman.h>
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
lockdep_assert_held(&kvm->lock);
return !!kvm_s390_pv_get_handle(kvm);
}
EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
{
lockdep_assert_held(&vcpu->mutex);
return !!kvm_s390_pv_cpu_get_handle(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
/**
* kvm_s390_pv_make_secure() - make one guest page secure
* @kvm: the guest
* @gaddr: the guest address that needs to be made secure
* @uvcb: the UVCB specifying which operation needs to be performed
*
* Context: needs to be called with kvm->srcu held.
* Return: 0 on success, < 0 in case of error.
*/
int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb)
{
unsigned long vmaddr;
lockdep_assert_held(&kvm->srcu);
vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
if (kvm_is_error_hva(vmaddr))
return -EFAULT;
return make_hva_secure(kvm->mm, vmaddr, uvcb);
}
int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr)
{
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
.header.len = sizeof(uvcb),
.guest_handle = kvm_s390_pv_get_handle(kvm),
.gaddr = gaddr,
};
return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb);
}
/**
* kvm_s390_pv_destroy_page() - Destroy a guest page.
* @kvm: the guest
* @gaddr: the guest address to destroy
*
* An attempt will be made to destroy the given guest page. If the attempt
* fails, an attempt is made to export the page. If both attempts fail, an
* appropriate error is returned.
*
* Context: may sleep.
*/
int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr)
{
struct page *page;
int rc = 0;
mmap_read_lock(kvm->mm);
page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
if (page)
rc = __kvm_s390_pv_destroy_page(page);
kvm_release_page_clean(page);
mmap_read_unlock(kvm->mm);
return rc;
}
/**
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
* be destroyed
*
* @list: list head for the list of leftover VMs
* @old_gmap_table: the gmap table of the leftover protected VM
* @handle: the handle of the leftover protected VM
* @stor_var: pointer to the variable storage of the leftover protected VM
* @stor_base: address of the base storage of the leftover protected VM
*
* Represents a protected VM that is still registered with the Ultravisor,
* but which does not correspond any longer to an active KVM VM. It should
* be destroyed at some point later, either asynchronously or when the
* process terminates.
*/
struct pv_vm_to_be_destroyed {
struct list_head list;
unsigned long old_gmap_table;
u64 handle;
void *stor_var;
unsigned long stor_base;
};
static void kvm_s390_clear_pv_state(struct kvm *kvm)
{
kvm->arch.pv.handle = 0;
kvm->arch.pv.guest_len = 0;
kvm->arch.pv.stor_base = 0;
kvm->arch.pv.stor_var = NULL;
}
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
int cc;
if (!kvm_s390_pv_cpu_get_handle(vcpu))
return 0;
cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
vcpu->vcpu_id, *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
/* Intended memory leak for something that should never happen. */
if (!cc)
free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len));
free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
vcpu->arch.sie_block->pv_handle_cpu = 0;
vcpu->arch.sie_block->pv_handle_config = 0;
memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
vcpu->arch.sie_block->sdf = 0;
/*
* The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
* Use the reset value of gbea to avoid leaking the kernel pointer of
* the just freed sida.
*/
vcpu->arch.sie_block->gbea = 1;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return cc ? EIO : 0;
}
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
struct uv_cb_csc uvcb =