1 // Copyright 2015 Citra Emulator Project
2 // Licensed under GPLv2 or any later version
3 // Refer to the license.txt file included.
4
5 #include <algorithm>
6 #include <memory>
7 #include <boost/serialization/array.hpp>
8 #include <boost/serialization/bitset.hpp>
9 #include <boost/serialization/shared_ptr.hpp>
10 #include "common/archives.h"
11 #include "common/assert.h"
12 #include "common/common_funcs.h"
13 #include "common/logging/log.h"
14 #include "common/serialization/boost_vector.hpp"
15 #include "core/hle/kernel/errors.h"
16 #include "core/hle/kernel/memory.h"
17 #include "core/hle/kernel/process.h"
18 #include "core/hle/kernel/resource_limit.h"
19 #include "core/hle/kernel/thread.h"
20 #include "core/hle/kernel/vm_manager.h"
21 #include "core/memory.h"
22
23 SERIALIZE_EXPORT_IMPL(Kernel::Process)
24 SERIALIZE_EXPORT_IMPL(Kernel::CodeSet)
25
26 namespace Kernel {
27
28 template <class Archive>
serialize(Archive & ar,const unsigned int file_version)29 void Process::serialize(Archive& ar, const unsigned int file_version) {
30 ar& boost::serialization::base_object<Object>(*this);
31 ar& handle_table;
32 ar& codeset; // TODO: Replace with apploader reference
33 ar& resource_limit;
34 ar& svc_access_mask;
35 ar& handle_table_size;
36 ar&(boost::container::vector<AddressMapping, boost::container::dtl::static_storage_allocator<
37 AddressMapping, 8, 0, true>>&)address_mappings;
38 ar& flags.raw;
39 ar& kernel_version;
40 ar& ideal_processor;
41 ar& status;
42 ar& process_id;
43 ar& vm_manager;
44 ar& memory_used;
45 ar& memory_region;
46 ar& tls_slots;
47 }
48
SERIALIZE_IMPL(Process)49 SERIALIZE_IMPL(Process)
50
51 std::shared_ptr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
52 auto codeset{std::make_shared<CodeSet>(*this)};
53
54 codeset->name = std::move(name);
55 codeset->program_id = program_id;
56
57 return codeset;
58 }
59
CodeSet(KernelSystem & kernel)60 CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
~CodeSet()61 CodeSet::~CodeSet() {}
62
CreateProcess(std::shared_ptr<CodeSet> code_set)63 std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> code_set) {
64 auto process{std::make_shared<Process>(*this)};
65
66 process->codeset = std::move(code_set);
67 process->flags.raw = 0;
68 process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
69 process->status = ProcessStatus::Created;
70 process->process_id = ++next_process_id;
71
72 process_list.push_back(process);
73 return process;
74 }
75
ParseKernelCaps(const u32 * kernel_caps,std::size_t len)76 void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
77 for (std::size_t i = 0; i < len; ++i) {
78 u32 descriptor = kernel_caps[i];
79 u32 type = descriptor >> 20;
80
81 if (descriptor == 0xFFFFFFFF) {
82 // Unused descriptor entry
83 continue;
84 } else if ((type & 0xF00) == 0xE00) { // 0x0FFF
85 // Allowed interrupts list
86 LOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
87 } else if ((type & 0xF80) == 0xF00) { // 0x07FF
88 // Allowed syscalls mask
89 unsigned int index = ((descriptor >> 24) & 7) * 24;
90 u32 bits = descriptor & 0xFFFFFF;
91
92 while (bits && index < svc_access_mask.size()) {
93 svc_access_mask.set(index, bits & 1);
94 ++index;
95 bits >>= 1;
96 }
97 } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF
98 // Handle table size
99 handle_table_size = descriptor & 0x3FF;
100 } else if ((type & 0xFF8) == 0xFF0) { // 0x007F
101 // Misc. flags
102 flags.raw = descriptor & 0xFFFF;
103 } else if ((type & 0xFFE) == 0xFF8) { // 0x001F
104 // Mapped memory range
105 if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
106 LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
107 continue;
108 }
109 u32 end_desc = kernel_caps[i + 1];
110 ++i; // Skip over the second descriptor on the next iteration
111
112 AddressMapping mapping;
113 mapping.address = descriptor << 12;
114 VAddr end_address = end_desc << 12;
115
116 if (mapping.address < end_address) {
117 mapping.size = end_address - mapping.address;
118 } else {
119 mapping.size = 0;
120 }
121
122 mapping.read_only = (descriptor & (1 << 20)) != 0;
123 mapping.unk_flag = (end_desc & (1 << 20)) != 0;
124
125 address_mappings.push_back(mapping);
126 } else if ((type & 0xFFF) == 0xFFE) { // 0x000F
127 // Mapped memory page
128 AddressMapping mapping;
129 mapping.address = descriptor << 12;
130 mapping.size = Memory::PAGE_SIZE;
131 mapping.read_only = false;
132 mapping.unk_flag = false;
133
134 address_mappings.push_back(mapping);
135 } else if ((type & 0xFE0) == 0xFC0) { // 0x01FF
136 // Kernel version
137 kernel_version = descriptor & 0xFFFF;
138
139 int minor = kernel_version & 0xFF;
140 int major = (kernel_version >> 8) & 0xFF;
141 LOG_INFO(Loader, "ExHeader kernel version: {}.{}", major, minor);
142 } else {
143 LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor);
144 }
145 }
146 }
147
Set3dsxKernelCaps()148 void Process::Set3dsxKernelCaps() {
149 svc_access_mask.set();
150
151 address_mappings = {
152 {0x1FF50000, 0x8000, true}, // part of DSP RAM
153 {0x1FF70000, 0x8000, true}, // part of DSP RAM
154 {0x1F000000, 0x600000, false}, // entire VRAM
155 };
156
157 // Similar to Rosalina, we set kernel version to a recent one.
158 // This is 11.2.0, to be consistent with core/hle/kernel/config_mem.cpp
159 // TODO: refactor kernel version out so it is configurable and consistent
160 // among all relevant places.
161 kernel_version = 0x234;
162 }
163
Run(s32 main_thread_priority,u32 stack_size)164 void Process::Run(s32 main_thread_priority, u32 stack_size) {
165 memory_region = kernel.GetMemoryRegion(flags.memory_region);
166
167 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
168 MemoryState memory_state) {
169 HeapAllocate(segment.addr, segment.size, permissions, memory_state, true);
170 kernel.memory.WriteBlock(*this, segment.addr, codeset->memory.data() + segment.offset,
171 segment.size);
172 };
173
174 // Map CodeSet segments
175 MapSegment(codeset->CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
176 MapSegment(codeset->RODataSegment(), VMAPermission::Read, MemoryState::Code);
177 MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
178
179 // Allocate and map stack
180 HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite,
181 MemoryState::Locked, true);
182
183 // Map special address mappings
184 kernel.MapSharedPages(vm_manager);
185 for (const auto& mapping : address_mappings) {
186 kernel.HandleSpecialMapping(vm_manager, mapping);
187 }
188
189 status = ProcessStatus::Running;
190
191 vm_manager.LogLayout(Log::Level::Debug);
192 Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, SharedFrom(this));
193 }
194
GetLinearHeapAreaAddress() const195 VAddr Process::GetLinearHeapAreaAddress() const {
196 // Starting from system version 8.0.0 a new linear heap layout is supported to allow usage of
197 // the extra RAM in the n3DS.
198 return kernel_version < 0x22C ? Memory::LINEAR_HEAP_VADDR : Memory::NEW_LINEAR_HEAP_VADDR;
199 }
200
GetLinearHeapBase() const201 VAddr Process::GetLinearHeapBase() const {
202 return GetLinearHeapAreaAddress() + memory_region->base;
203 }
204
GetLinearHeapLimit() const205 VAddr Process::GetLinearHeapLimit() const {
206 return GetLinearHeapBase() + memory_region->size;
207 }
208
HeapAllocate(VAddr target,u32 size,VMAPermission perms,MemoryState memory_state,bool skip_range_check)209 ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms,
210 MemoryState memory_state, bool skip_range_check) {
211 LOG_DEBUG(Kernel, "Allocate heap target={:08X}, size={:08X}", target, size);
212 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
213 target + size < target) {
214 if (!skip_range_check) {
215 LOG_ERROR(Kernel, "Invalid heap address");
216 return ERR_INVALID_ADDRESS;
217 }
218 }
219
220 auto vma = vm_manager.FindVMA(target);
221 if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
222 LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
223 return ERR_INVALID_ADDRESS_STATE;
224 }
225
226 auto allocated_fcram = memory_region->HeapAllocate(size);
227 if (allocated_fcram.empty()) {
228 LOG_ERROR(Kernel, "Not enough space");
229 return ERR_OUT_OF_HEAP_MEMORY;
230 }
231
232 // Maps heap block by block
233 VAddr interval_target = target;
234 for (const auto& interval : allocated_fcram) {
235 u32 interval_size = interval.upper() - interval.lower();
236 LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
237 interval.upper());
238 std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
239 kernel.memory.GetFCRAMPointer(interval.upper()), 0);
240 auto vma = vm_manager.MapBackingMemory(interval_target,
241 kernel.memory.GetFCRAMRef(interval.lower()),
242 interval_size, memory_state);
243 ASSERT(vma.Succeeded());
244 vm_manager.Reprotect(vma.Unwrap(), perms);
245 interval_target += interval_size;
246 }
247
248 memory_used += size;
249 resource_limit->current_commit += size;
250
251 return MakeResult<VAddr>(target);
252 }
253
HeapFree(VAddr target,u32 size)254 ResultCode Process::HeapFree(VAddr target, u32 size) {
255 LOG_DEBUG(Kernel, "Free heap target={:08X}, size={:08X}", target, size);
256 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
257 target + size < target) {
258 LOG_ERROR(Kernel, "Invalid heap address");
259 return ERR_INVALID_ADDRESS;
260 }
261
262 if (size == 0) {
263 return RESULT_SUCCESS;
264 }
265
266 // Free heaps block by block
267 CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
268 for (const auto [backing_memory, block_size] : backing_blocks) {
269 memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()), block_size);
270 }
271
272 ResultCode result = vm_manager.UnmapRange(target, size);
273 ASSERT(result.IsSuccess());
274
275 memory_used -= size;
276 resource_limit->current_commit -= size;
277
278 return RESULT_SUCCESS;
279 }
280
LinearAllocate(VAddr target,u32 size,VMAPermission perms)281 ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
282 LOG_DEBUG(Kernel, "Allocate linear heap target={:08X}, size={:08X}", target, size);
283 u32 physical_offset;
284 if (target == 0) {
285 auto offset = memory_region->LinearAllocate(size);
286 if (!offset) {
287 LOG_ERROR(Kernel, "Not enough space");
288 return ERR_OUT_OF_HEAP_MEMORY;
289 }
290 physical_offset = *offset;
291 target = physical_offset + GetLinearHeapAreaAddress();
292 } else {
293 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
294 target + size < target) {
295 LOG_ERROR(Kernel, "Invalid linear heap address");
296 return ERR_INVALID_ADDRESS;
297 }
298
299 // Kernel would crash/return error when target doesn't meet some requirement.
300 // It seems that target is required to follow immediately after the allocated linear heap,
301 // or cover the entire hole if there is any.
302 // Right now we just ignore these checks because they are still unclear. Further more,
303 // games and homebrew only ever seem to pass target = 0 here (which lets the kernel decide
304 // the address), so this not important.
305
306 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
307 if (!memory_region->LinearAllocate(physical_offset, size)) {
308 LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
309 return ERR_INVALID_ADDRESS_STATE;
310 }
311 }
312
313 auto backing_memory = kernel.memory.GetFCRAMRef(physical_offset);
314
315 std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + size, 0);
316 auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
317 ASSERT(vma.Succeeded());
318 vm_manager.Reprotect(vma.Unwrap(), perms);
319
320 memory_used += size;
321 resource_limit->current_commit += size;
322
323 LOG_DEBUG(Kernel, "Allocated at target={:08X}", target);
324 return MakeResult<VAddr>(target);
325 }
326
LinearFree(VAddr target,u32 size)327 ResultCode Process::LinearFree(VAddr target, u32 size) {
328 LOG_DEBUG(Kernel, "Free linear heap target={:08X}, size={:08X}", target, size);
329 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
330 target + size < target) {
331 LOG_ERROR(Kernel, "Invalid linear heap address");
332 return ERR_INVALID_ADDRESS;
333 }
334
335 if (size == 0) {
336 return RESULT_SUCCESS;
337 }
338
339 ResultCode result = vm_manager.UnmapRange(target, size);
340 if (result.IsError()) {
341 LOG_ERROR(Kernel, "Trying to free already freed memory");
342 return result;
343 }
344
345 memory_used -= size;
346 resource_limit->current_commit -= size;
347
348 u32 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
349 memory_region->Free(physical_offset, size);
350
351 return RESULT_SUCCESS;
352 }
353
Map(VAddr target,VAddr source,u32 size,VMAPermission perms,bool privileged)354 ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms,
355 bool privileged) {
356 LOG_DEBUG(Kernel, "Map memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", target,
357 source, size, perms);
358 if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
359 source + size < source) {
360 LOG_ERROR(Kernel, "Invalid source address");
361 return ERR_INVALID_ADDRESS;
362 }
363
364 // TODO(wwylele): check target address range. Is it also restricted to heap region?
365
366 auto vma = vm_manager.FindVMA(target);
367 if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
368 LOG_ERROR(Kernel, "Trying to map to already allocated memory");
369 return ERR_INVALID_ADDRESS_STATE;
370 }
371
372 // Check range overlapping
373 if (source - target < size || target - source < size) {
374 if (privileged) {
375 if (source == target) {
376 // privileged Map allows identical source and target address, which simply changes
377 // the state and the permission of the memory
378 return vm_manager.ChangeMemoryState(source, size, MemoryState::Private,
379 VMAPermission::ReadWrite,
380 MemoryState::AliasCode, perms);
381 } else {
382 return ERR_INVALID_ADDRESS;
383 }
384 } else {
385 return ERR_INVALID_ADDRESS_STATE;
386 }
387 }
388
389 MemoryState source_state = privileged ? MemoryState::Locked : MemoryState::Aliased;
390 MemoryState target_state = privileged ? MemoryState::AliasCode : MemoryState::Alias;
391 VMAPermission source_perm = privileged ? VMAPermission::None : VMAPermission::ReadWrite;
392
393 // Mark source region as Aliased
394 CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Private,
395 VMAPermission::ReadWrite, source_state, source_perm));
396
397 CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size));
398 VAddr interval_target = target;
399 for (const auto [backing_memory, block_size] : backing_blocks) {
400 auto target_vma =
401 vm_manager.MapBackingMemory(interval_target, backing_memory, block_size, target_state);
402 ASSERT(target_vma.Succeeded());
403 vm_manager.Reprotect(target_vma.Unwrap(), perms);
404 interval_target += block_size;
405 }
406
407 return RESULT_SUCCESS;
408 }
Unmap(VAddr target,VAddr source,u32 size,VMAPermission perms,bool privileged)409 ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
410 bool privileged) {
411 LOG_DEBUG(Kernel, "Unmap memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}",
412 target, source, size, perms);
413 if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
414 source + size < source) {
415 LOG_ERROR(Kernel, "Invalid source address");
416 return ERR_INVALID_ADDRESS;
417 }
418
419 // TODO(wwylele): check target address range. Is it also restricted to heap region?
420
421 // TODO(wwylele): check that the source and the target are actually a pair created by Map
422 // Should return error 0xD8E007F5 in this case
423
424 if (source - target < size || target - source < size) {
425 if (privileged) {
426 if (source == target) {
427 // privileged Unmap allows identical source and target address, which simply changes
428 // the state and the permission of the memory
429 return vm_manager.ChangeMemoryState(source, size, MemoryState::AliasCode,
430 VMAPermission::None, MemoryState::Private,
431 perms);
432 } else {
433 return ERR_INVALID_ADDRESS;
434 }
435 } else {
436 return ERR_INVALID_ADDRESS_STATE;
437 }
438 }
439
440 MemoryState source_state = privileged ? MemoryState::Locked : MemoryState::Aliased;
441
442 CASCADE_CODE(vm_manager.UnmapRange(target, size));
443
444 // Change back source region state. Note that the permission is reprotected according to param
445 CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, source_state, VMAPermission::None,
446 MemoryState::Private, perms));
447
448 return RESULT_SUCCESS;
449 }
450
Process(KernelSystem & kernel)451 Kernel::Process::Process(KernelSystem& kernel)
452 : Object(kernel), handle_table(kernel), vm_manager(kernel.memory), kernel(kernel) {
453 kernel.memory.RegisterPageTable(vm_manager.page_table);
454 }
~Process()455 Kernel::Process::~Process() {
456 // Release all objects this process owns first so that their potential destructor can do clean
457 // up with this process before further destruction.
458 // TODO(wwylele): explicitly destroy or invalidate objects this process owns (threads, shared
459 // memory etc.) even if they are still referenced by other processes.
460 handle_table.Clear();
461
462 kernel.memory.UnregisterPageTable(vm_manager.page_table);
463 }
464
GetProcessById(u32 process_id) const465 std::shared_ptr<Process> KernelSystem::GetProcessById(u32 process_id) const {
466 auto itr = std::find_if(
467 process_list.begin(), process_list.end(),
468 [&](const std::shared_ptr<Process>& process) { return process->process_id == process_id; });
469
470 if (itr == process_list.end())
471 return nullptr;
472
473 return *itr;
474 }
475 } // namespace Kernel
476