/* SPDX-License-Identifier: GPL-2.0-only */
|
|
#ifndef __KVM_TYPES_H__
|
#define __KVM_TYPES_H__
|
|
struct kvm;
|
struct kvm_async_pf;
|
struct kvm_device_ops;
|
struct kvm_interrupt;
|
struct kvm_irq_routing_table;
|
struct kvm_memory_slot;
|
struct kvm_one_reg;
|
struct kvm_run;
|
struct kvm_userspace_memory_region;
|
struct kvm_vcpu;
|
struct kvm_vcpu_init;
|
struct kvm_memslots;
|
|
enum kvm_mr_change;
|
|
#include <linux/types.h>
|
|
#include <asm/kvm_types.h>
|
|
/*
|
* Address types:
|
*
|
* gva - guest virtual address
|
* gpa - guest physical address
|
* gfn - guest frame number
|
* hva - host virtual address
|
* hpa - host physical address
|
* hfn - host frame number
|
*/
|
|
typedef unsigned long gva_t;
|
typedef u64 gpa_t;
|
typedef u64 gfn_t;
|
|
#define GPA_INVALID (~(gpa_t)0)
|
|
typedef unsigned long hva_t;
|
typedef u64 hpa_t;
|
typedef u64 hfn_t;
|
|
typedef hfn_t kvm_pfn_t;
|
|
struct gfn_to_hva_cache {
|
u64 generation;
|
gpa_t gpa;
|
unsigned long hva;
|
unsigned long len;
|
struct kvm_memory_slot *memslot;
|
};
|
|
struct gfn_to_pfn_cache {
|
u64 generation;
|
gfn_t gfn;
|
kvm_pfn_t pfn;
|
bool dirty;
|
};
|
|
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
/*
|
* Memory caches are used to preallocate memory ahead of various MMU flows,
|
* e.g. page fault handlers. Gracefully handling allocation failures deep in
|
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
|
* holding MMU locks. Note, these caches act more like prefetch buffers than
|
* classical caches, i.e. objects are not returned to the cache on being freed.
|
*/
|
struct kvm_mmu_memory_cache {
|
int nobjs;
|
gfp_t gfp_zero;
|
struct kmem_cache *kmem_cache;
|
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
|
};
|
#endif
|
|
|
#endif /* __KVM_TYPES_H__ */
|