1 #ifndef KVM_DIRTY_RING_H
2 #define KVM_DIRTY_RING_H
3 
4 #include <linux/kvm.h>
5 
6 /**
7  * kvm_dirty_ring: KVM internal dirty ring structure
8  *
9  * @dirty_index: free running counter that points to the next slot in
10  *               dirty_ring->dirty_gfns, where a new dirty page should go
11  * @reset_index: free running counter that points to the next dirty page
12  *               in dirty_ring->dirty_gfns for which dirty trap needs to
13  *               be reenabled
14  * @size:        size of the compact list, dirty_ring->dirty_gfns
15  * @soft_limit:  when the number of dirty pages in the list reaches this
16  *               limit, vcpu that owns this ring should exit to userspace
17  *               to allow userspace to harvest all the dirty pages
18  * @dirty_gfns:  the array to keep the dirty gfns
19  * @index:       index of this dirty ring
20  */
21 struct kvm_dirty_ring {
22 	u32 dirty_index;
23 	u32 reset_index;
24 	u32 size;
25 	u32 soft_limit;
26 	struct kvm_dirty_gfn *dirty_gfns;
27 	int index;
28 };
29 
30 #if (KVM_DIRTY_LOG_PAGE_OFFSET == 0)
31 /*
32  * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should
33  * not be included as well, so define these nop functions for the arch.
34  */
kvm_dirty_ring_get_rsvd_entries(void)35 static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
36 {
37 	return 0;
38 }
39 
kvm_dirty_ring_alloc(struct kvm_dirty_ring * ring,int index,u32 size)40 static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
41 				       int index, u32 size)
42 {
43 	return 0;
44 }
45 
kvm_dirty_ring_get(struct kvm * kvm)46 static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
47 {
48 	return NULL;
49 }
50 
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring)51 static inline int kvm_dirty_ring_reset(struct kvm *kvm,
52 				       struct kvm_dirty_ring *ring)
53 {
54 	return 0;
55 }
56 
kvm_dirty_ring_push(struct kvm_dirty_ring * ring,u32 slot,u64 offset)57 static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
58 				       u32 slot, u64 offset)
59 {
60 }
61 
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)62 static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
63 						   u32 offset)
64 {
65 	return NULL;
66 }
67 
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)68 static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
69 {
70 }
71 
kvm_dirty_ring_soft_full(struct kvm_dirty_ring * ring)72 static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
73 {
74 	return true;
75 }
76 
77 #else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
78 
79 u32 kvm_dirty_ring_get_rsvd_entries(void);
80 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
81 struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm);
82 
83 /*
84  * called with kvm->slots_lock held, returns the number of
85  * processed pages.
86  */
87 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
88 
89 /*
90  * returns =0: successfully pushed
91  *         <0: unable to push, need to wait
92  */
93 void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
94 
95 /* for use in vm_operations_struct */
96 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
97 
98 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
99 bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
100 
101 #endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
102 
103 #endif	/* KVM_DIRTY_RING_H */
104