1 // Copyright 2015 Citra Emulator Project
2 // Licensed under GPLv2 or any later version
3 // Refer to the license.txt file included.
4 
5 #include <algorithm>
6 #include <iterator>
7 #include "common/assert.h"
8 #include "core/hle/kernel/errors.h"
9 #include "core/hle/kernel/vm_manager.h"
10 #include "core/memory.h"
11 #include "core/mmio.h"
12 
13 namespace Kernel {
14 
GetMemoryStateName(MemoryState state)15 static const char* GetMemoryStateName(MemoryState state) {
16     static const char* names[] = {
17         "Free",   "Reserved",   "IO",      "Static", "Code",      "Private",
18         "Shared", "Continuous", "Aliased", "Alias",  "AliasCode", "Locked",
19     };
20 
21     return names[(int)state];
22 }
23 
CanBeMergedWith(const VirtualMemoryArea & next) const24 bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
25     ASSERT(base + size == next.base);
26     if (permissions != next.permissions || meminfo_state != next.meminfo_state ||
27         type != next.type) {
28         return false;
29     }
30     if (type == VMAType::BackingMemory &&
31         backing_memory.GetPtr() + size != next.backing_memory.GetPtr()) {
32         return false;
33     }
34     if (type == VMAType::MMIO && paddr + size != next.paddr) {
35         return false;
36     }
37     return true;
38 }
39 
VMManager(Memory::MemorySystem & memory)40 VMManager::VMManager(Memory::MemorySystem& memory)
41     : page_table(std::make_shared<Memory::PageTable>()), memory(memory) {
42     Reset();
43 }
44 
45 VMManager::~VMManager() = default;
46 
Reset()47 void VMManager::Reset() {
48     ASSERT(!is_locked);
49 
50     vma_map.clear();
51 
52     // Initialize the map with a single free region covering the entire managed space.
53     VirtualMemoryArea initial_vma;
54     initial_vma.size = MAX_ADDRESS;
55     vma_map.emplace(initial_vma.base, initial_vma);
56 
57     page_table->Clear();
58 
59     UpdatePageTableForVMA(initial_vma);
60 }
61 
FindVMA(VAddr target) const62 VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
63     if (target >= MAX_ADDRESS) {
64         return vma_map.end();
65     } else {
66         return std::prev(vma_map.upper_bound(target));
67     }
68 }
69 
MapBackingMemoryToBase(VAddr base,u32 region_size,MemoryRef memory,u32 size,MemoryState state)70 ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
71                                                    u32 size, MemoryState state) {
72     ASSERT(!is_locked);
73 
74     // Find the first Free VMA.
75     VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
76         if (vma.second.type != VMAType::Free)
77             return false;
78 
79         VAddr vma_end = vma.second.base + vma.second.size;
80         return vma_end > base && vma_end >= base + size;
81     });
82 
83     VAddr target = std::max(base, vma_handle->second.base);
84 
85     // Do not try to allocate the block if there are no available addresses within the desired
86     // region.
87     if (vma_handle == vma_map.end() || target + size > base + region_size) {
88         return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
89                           ErrorSummary::OutOfResource, ErrorLevel::Permanent);
90     }
91 
92     auto result = MapBackingMemory(target, memory, size, state);
93 
94     if (result.Failed())
95         return result.Code();
96 
97     return MakeResult<VAddr>(target);
98 }
99 
MapBackingMemory(VAddr target,MemoryRef memory,u32 size,MemoryState state)100 ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
101                                                             u32 size, MemoryState state) {
102     ASSERT(!is_locked);
103     ASSERT(memory.GetPtr() != nullptr);
104 
105     // This is the appropriately sized VMA that will turn into our allocation.
106     CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
107     VirtualMemoryArea& final_vma = vma_handle->second;
108     ASSERT(final_vma.size == size);
109 
110     final_vma.type = VMAType::BackingMemory;
111     final_vma.permissions = VMAPermission::ReadWrite;
112     final_vma.meminfo_state = state;
113     final_vma.backing_memory = memory;
114     UpdatePageTableForVMA(final_vma);
115 
116     return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
117 }
118 
MapMMIO(VAddr target,PAddr paddr,u32 size,MemoryState state,Memory::MMIORegionPointer mmio_handler)119 ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
120                                                    MemoryState state,
121                                                    Memory::MMIORegionPointer mmio_handler) {
122     ASSERT(!is_locked);
123 
124     // This is the appropriately sized VMA that will turn into our allocation.
125     CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
126     VirtualMemoryArea& final_vma = vma_handle->second;
127     ASSERT(final_vma.size == size);
128 
129     final_vma.type = VMAType::MMIO;
130     final_vma.permissions = VMAPermission::ReadWrite;
131     final_vma.meminfo_state = state;
132     final_vma.paddr = paddr;
133     final_vma.mmio_handler = mmio_handler;
134     UpdatePageTableForVMA(final_vma);
135 
136     return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
137 }
138 
ChangeMemoryState(VAddr target,u32 size,MemoryState expected_state,VMAPermission expected_perms,MemoryState new_state,VMAPermission new_perms)139 ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expected_state,
140                                         VMAPermission expected_perms, MemoryState new_state,
141                                         VMAPermission new_perms) {
142     if (is_locked) {
143         return RESULT_SUCCESS;
144     }
145 
146     VAddr target_end = target + size;
147     VMAIter begin_vma = StripIterConstness(FindVMA(target));
148     VMAIter i_end = vma_map.lower_bound(target_end);
149 
150     if (begin_vma == vma_map.end())
151         return ERR_INVALID_ADDRESS;
152 
153     for (auto i = begin_vma; i != i_end; ++i) {
154         auto& vma = i->second;
155         if (vma.meminfo_state != expected_state) {
156             return ERR_INVALID_ADDRESS_STATE;
157         }
158         u32 perms = static_cast<u32>(expected_perms);
159         if ((static_cast<u32>(vma.permissions) & perms) != perms) {
160             return ERR_INVALID_ADDRESS_STATE;
161         }
162     }
163 
164     CASCADE_RESULT(auto vma, CarveVMARange(target, size));
165 
166     const VMAIter end = vma_map.end();
167     // The comparison against the end of the range must be done using addresses since VMAs can be
168     // merged during this process, causing invalidation of the iterators.
169     while (vma != end && vma->second.base < target_end) {
170         vma->second.permissions = new_perms;
171         vma->second.meminfo_state = new_state;
172         UpdatePageTableForVMA(vma->second);
173         vma = std::next(MergeAdjacent(vma));
174     }
175 
176     return RESULT_SUCCESS;
177 }
178 
Unmap(VMAIter vma_handle)179 VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
180     ASSERT(!is_locked);
181 
182     VirtualMemoryArea& vma = vma_handle->second;
183     vma.type = VMAType::Free;
184     vma.permissions = VMAPermission::None;
185     vma.meminfo_state = MemoryState::Free;
186 
187     vma.backing_memory = nullptr;
188     vma.paddr = 0;
189 
190     UpdatePageTableForVMA(vma);
191 
192     return MergeAdjacent(vma_handle);
193 }
194 
UnmapRange(VAddr target,u32 size)195 ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
196     ASSERT(!is_locked);
197 
198     CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
199     const VAddr target_end = target + size;
200 
201     const VMAIter end = vma_map.end();
202     // The comparison against the end of the range must be done using addresses since VMAs can be
203     // merged during this process, causing invalidation of the iterators.
204     while (vma != end && vma->second.base < target_end) {
205         vma = std::next(Unmap(vma));
206     }
207 
208     ASSERT(FindVMA(target)->second.size >= size);
209     return RESULT_SUCCESS;
210 }
211 
Reprotect(VMAHandle vma_handle,VMAPermission new_perms)212 VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
213     ASSERT(!is_locked);
214 
215     VMAIter iter = StripIterConstness(vma_handle);
216 
217     VirtualMemoryArea& vma = iter->second;
218     vma.permissions = new_perms;
219     UpdatePageTableForVMA(vma);
220 
221     return MergeAdjacent(iter);
222 }
223 
ReprotectRange(VAddr target,u32 size,VMAPermission new_perms)224 ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
225     ASSERT(!is_locked);
226 
227     CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
228     const VAddr target_end = target + size;
229 
230     const VMAIter end = vma_map.end();
231     // The comparison against the end of the range must be done using addresses since VMAs can be
232     // merged during this process, causing invalidation of the iterators.
233     while (vma != end && vma->second.base < target_end) {
234         vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
235     }
236 
237     return RESULT_SUCCESS;
238 }
239 
LogLayout(Log::Level log_level) const240 void VMManager::LogLayout(Log::Level log_level) const {
241     for (const auto& p : vma_map) {
242         const VirtualMemoryArea& vma = p.second;
243         LOG_GENERIC(::Log::Class::Kernel, log_level, "{:08X} - {:08X}  size: {:8X} {}{}{} {}",
244                     vma.base, vma.base + vma.size, vma.size,
245                     (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
246                     (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
247                     (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-',
248                     GetMemoryStateName(vma.meminfo_state));
249     }
250 }
251 
Unlock()252 void VMManager::Unlock() {
253     is_locked = false;
254 }
255 
StripIterConstness(const VMAHandle & iter)256 VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
257     // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
258     // non-const access to its container.
259     return vma_map.erase(iter, iter); // Erases an empty range of elements
260 }
261 
CarveVMA(VAddr base,u32 size)262 ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
263     ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
264     ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base);
265 
266     VMAIter vma_handle = StripIterConstness(FindVMA(base));
267     if (vma_handle == vma_map.end()) {
268         // Target address is outside the range managed by the kernel
269         return ERR_INVALID_ADDRESS;
270     }
271 
272     const VirtualMemoryArea& vma = vma_handle->second;
273     if (vma.type != VMAType::Free) {
274         // Region is already allocated
275         return ERR_INVALID_ADDRESS_STATE;
276     }
277 
278     const VAddr start_in_vma = base - vma.base;
279     const VAddr end_in_vma = start_in_vma + size;
280 
281     if (end_in_vma > vma.size) {
282         // Requested allocation doesn't fit inside VMA
283         return ERR_INVALID_ADDRESS_STATE;
284     }
285 
286     if (end_in_vma != vma.size) {
287         // Split VMA at the end of the allocated region
288         SplitVMA(vma_handle, end_in_vma);
289     }
290     if (start_in_vma != 0) {
291         // Split VMA at the start of the allocated region
292         vma_handle = SplitVMA(vma_handle, start_in_vma);
293     }
294 
295     return MakeResult<VMAIter>(vma_handle);
296 }
297 
CarveVMARange(VAddr target,u32 size)298 ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
299     ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
300     ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target);
301 
302     const VAddr target_end = target + size;
303     ASSERT(target_end >= target);
304     ASSERT(target_end <= MAX_ADDRESS);
305     ASSERT(size > 0);
306 
307     VMAIter begin_vma = StripIterConstness(FindVMA(target));
308     const VMAIter i_end = vma_map.lower_bound(target_end);
309     if (std::any_of(begin_vma, i_end,
310                     [](const auto& entry) { return entry.second.type == VMAType::Free; })) {
311         return ERR_INVALID_ADDRESS_STATE;
312     }
313 
314     if (target != begin_vma->second.base) {
315         begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
316     }
317 
318     VMAIter end_vma = StripIterConstness(FindVMA(target_end));
319     if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
320         end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
321     }
322 
323     return MakeResult<VMAIter>(begin_vma);
324 }
325 
SplitVMA(VMAIter vma_handle,u32 offset_in_vma)326 VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
327     VirtualMemoryArea& old_vma = vma_handle->second;
328     VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
329 
330     // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
331     // a bug. This restriction might be removed later.
332     ASSERT(offset_in_vma < old_vma.size);
333     ASSERT(offset_in_vma > 0);
334 
335     old_vma.size = offset_in_vma;
336     new_vma.base += offset_in_vma;
337     new_vma.size -= offset_in_vma;
338 
339     switch (new_vma.type) {
340     case VMAType::Free:
341         break;
342     case VMAType::BackingMemory:
343         new_vma.backing_memory += offset_in_vma;
344         break;
345     case VMAType::MMIO:
346         new_vma.paddr += offset_in_vma;
347         break;
348     }
349 
350     ASSERT(old_vma.CanBeMergedWith(new_vma));
351 
352     return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
353 }
354 
MergeAdjacent(VMAIter iter)355 VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
356     const VMAIter next_vma = std::next(iter);
357     if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
358         iter->second.size += next_vma->second.size;
359         vma_map.erase(next_vma);
360     }
361 
362     if (iter != vma_map.begin()) {
363         VMAIter prev_vma = std::prev(iter);
364         if (prev_vma->second.CanBeMergedWith(iter->second)) {
365             prev_vma->second.size += iter->second.size;
366             vma_map.erase(iter);
367             iter = prev_vma;
368         }
369     }
370 
371     return iter;
372 }
373 
UpdatePageTableForVMA(const VirtualMemoryArea & vma)374 void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
375     switch (vma.type) {
376     case VMAType::Free:
377         memory.UnmapRegion(*page_table, vma.base, vma.size);
378         break;
379     case VMAType::BackingMemory:
380         memory.MapMemoryRegion(*page_table, vma.base, vma.size, vma.backing_memory);
381         break;
382     case VMAType::MMIO:
383         memory.MapIoRegion(*page_table, vma.base, vma.size, vma.mmio_handler);
384         break;
385     }
386 }
387 
GetBackingBlocksForRange(VAddr address,u32 size)388 ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(VAddr address,
389                                                                                       u32 size) {
390     std::vector<std::pair<MemoryRef, u32>> backing_blocks;
391     VAddr interval_target = address;
392     while (interval_target != address + size) {
393         auto vma = FindVMA(interval_target);
394         if (vma->second.type != VMAType::BackingMemory) {
395             LOG_ERROR(Kernel, "Trying to use already freed memory");
396             return ERR_INVALID_ADDRESS_STATE;
397         }
398 
399         VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
400         u32 interval_size = interval_end - interval_target;
401         auto backing_memory = vma->second.backing_memory + (interval_target - vma->second.base);
402         backing_blocks.push_back({backing_memory, interval_size});
403 
404         interval_target += interval_size;
405     }
406     return MakeResult(backing_blocks);
407 }
408 } // namespace Kernel
409