// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vfio_pci_core.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_net.h>
#include <linux/virtio_pci_admin.h>
#include <linux/anon_inodes.h>
#include "common.h"
/* Device specification max parts size */
#define MAX_LOAD_SIZE (BIT_ULL(BITS_PER_TYPE \
(((struct virtio_admin_cmd_dev_parts_metadata_result *)0)->parts_size.size)) - 1)
/* Initial target buffer size */
#define VIRTIOVF_TARGET_INITIAL_BUF_SIZE SZ_1M
static int
virtiovf_read_device_context_chunk(struct virtiovf_migration_file *migf,
u32 ctx_size);
static struct page *
virtiovf_get_migration_page(struct virtiovf_data_buffer *buf,
unsigned long offset)
{
unsigned long cur_offset = 0;
struct scatterlist *sg;
unsigned int i;
/* All accesses are sequential */
if (offset < buf->last_offset || !buf->last_offset_sg) {
buf->last_offset = 0;
buf->last_offset_sg = buf->table.sgt.sgl;
buf->sg_last_entry = 0;
}
cur_offset = buf->last_offset;
for_each_sg(buf->last_offset_sg, sg,
buf->table.sgt.orig_nents - buf->sg_last_entry, i) {
if (offset < sg->length + cur_offset) {
buf->last_offset_sg = sg;
buf->sg_last_entry += i;
buf->last_offset = cur_offset;
return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE;
}
cur_offset += sg->length;
}
return NULL;
}
static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
unsigned int npages)
{
unsigned int to_alloc = npages;
struct page **