1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * KVM dirty ring implementation
4 *
5 * Copyright 2019 Red Hat, Inc.
6 */
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
12 #include "mmu_lock.h"
13
kvm_cpu_dirty_log_size(void)14 int __weak kvm_cpu_dirty_log_size(void)
15 {
16 return 0;
17 }
18
kvm_dirty_ring_get_rsvd_entries(void)19 u32 kvm_dirty_ring_get_rsvd_entries(void)
20 {
21 return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
22 }
23
kvm_dirty_ring_used(struct kvm_dirty_ring * ring)24 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
25 {
26 return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
27 }
28
kvm_dirty_ring_soft_full(struct kvm_dirty_ring * ring)29 bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
30 {
31 return kvm_dirty_ring_used(ring) >= ring->soft_limit;
32 }
33
kvm_dirty_ring_full(struct kvm_dirty_ring * ring)34 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
35 {
36 return kvm_dirty_ring_used(ring) >= ring->size;
37 }
38
kvm_dirty_ring_get(struct kvm * kvm)39 struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
40 {
41 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
42
43 WARN_ON_ONCE(vcpu->kvm != kvm);
44
45 return &vcpu->dirty_ring;
46 }
47
kvm_reset_dirty_gfn(struct kvm * kvm,u32 slot,u64 offset,u64 mask)48 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
49 {
50 struct kvm_memory_slot *memslot;
51 int as_id, id;
52
53 as_id = slot >> 16;
54 id = (u16)slot;
55
56 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
57 return;
58
59 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
60
61 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
62 return;
63
64 KVM_MMU_LOCK(kvm);
65 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
66 KVM_MMU_UNLOCK(kvm);
67 }
68
kvm_dirty_ring_alloc(struct kvm_dirty_ring * ring,int index,u32 size)69 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
70 {
71 ring->dirty_gfns = vzalloc(size);
72 if (!ring->dirty_gfns)
73 return -ENOMEM;
74
75 ring->size = size / sizeof(struct kvm_dirty_gfn);
76 ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
77 ring->dirty_index = 0;
78 ring->reset_index = 0;
79 ring->index = index;
80
81 return 0;
82 }
83
kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn * gfn)84 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
85 {
86 gfn->flags = 0;
87 }
88
kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn * gfn)89 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
90 {
91 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
92 }
93
kvm_dirty_gfn_invalid(struct kvm_dirty_gfn * gfn)94 static inline bool kvm_dirty_gfn_invalid(struct kvm_dirty_gfn *gfn)
95 {
96 return gfn->flags == 0;
97 }
98
kvm_dirty_gfn_harvested(struct kvm_dirty_gfn * gfn)99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
100 {
101 return gfn->flags & KVM_DIRTY_GFN_F_RESET;
102 }
103
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring)104 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
105 {
106 u32 cur_slot, next_slot;
107 u64 cur_offset, next_offset;
108 unsigned long mask;
109 int count = 0;
110 struct kvm_dirty_gfn *entry;
111 bool first_round = true;
112
113 /* This is only needed to make compilers happy */
114 cur_slot = cur_offset = mask = 0;
115
116 while (true) {
117 entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
118
119 if (!kvm_dirty_gfn_harvested(entry))
120 break;
121
122 next_slot = READ_ONCE(entry->slot);
123 next_offset = READ_ONCE(entry->offset);
124
125 /* Update the flags to reflect that this GFN is reset */
126 kvm_dirty_gfn_set_invalid(entry);
127
128 ring->reset_index++;
129 count++;
130 /*
131 * Try to coalesce the reset operations when the guest is
132 * scanning pages in the same slot.
133 */
134 if (!first_round && next_slot == cur_slot) {
135 s64 delta = next_offset - cur_offset;
136
137 if (delta >= 0 && delta < BITS_PER_LONG) {
138 mask |= 1ull << delta;
139 continue;
140 }
141
142 /* Backwards visit, careful about overflows! */
143 if (delta > -BITS_PER_LONG && delta < 0 &&
144 (mask << -delta >> -delta) == mask) {
145 cur_offset = next_offset;
146 mask = (mask << -delta) | 1;
147 continue;
148 }
149 }
150 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
151 cur_slot = next_slot;
152 cur_offset = next_offset;
153 mask = 1;
154 first_round = false;
155 }
156
157 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
158
159 trace_kvm_dirty_ring_reset(ring);
160
161 return count;
162 }
163
kvm_dirty_ring_push(struct kvm_dirty_ring * ring,u32 slot,u64 offset)164 void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
165 {
166 struct kvm_dirty_gfn *entry;
167
168 /* It should never get full */
169 WARN_ON_ONCE(kvm_dirty_ring_full(ring));
170
171 entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
172
173 entry->slot = slot;
174 entry->offset = offset;
175 /*
176 * Make sure the data is filled in before we publish this to
177 * the userspace program. There's no paired kernel-side reader.
178 */
179 smp_wmb();
180 kvm_dirty_gfn_set_dirtied(entry);
181 ring->dirty_index++;
182 trace_kvm_dirty_ring_push(ring, slot, offset);
183 }
184
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)185 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
186 {
187 return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
188 }
189
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)190 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
191 {
192 vfree(ring->dirty_gfns);
193 ring->dirty_gfns = NULL;
194 }
195