Core: Properly configure address space when loading a binary

The code now properly configures the process image to match the loaded
binary segments (code, rodata, data) instead of just blindly allocating
a large chunk of dummy memory.
This commit is contained in:
Yuri Kunde Schlesner 2015-07-09 22:52:15 -03:00
parent 51820691e7
commit 5c5cf2f8e0
11 changed files with 223 additions and 52 deletions

View file

@ -32,7 +32,6 @@ struct MemoryArea {
// We don't declare the IO regions in here since its handled by other means.
static MemoryArea memory_areas[] = {
{PROCESS_IMAGE_VADDR, PROCESS_IMAGE_MAX_SIZE, "Process Image"}, // ExeFS:/.code is loaded here
{HEAP_VADDR, HEAP_SIZE, "Heap"}, // Application heap (main memory)
{SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"}, // Shared memory
{LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE, "Linear Heap"}, // Linear heap (main memory)
@ -132,13 +131,13 @@ VAddr PhysicalToVirtualAddress(const PAddr addr) {
return addr | 0x80000000;
}
// TODO(yuriks): Move this into Process
static Kernel::VMManager address_space;
void Init() {
using namespace Kernel;
InitMemoryMap();
LOG_DEBUG(HW_Memory, "initialized OK");
}
void InitLegacyAddressSpace(Kernel::VMManager& address_space) {
using namespace Kernel;
for (MemoryArea& area : memory_areas) {
auto block = std::make_shared<std::vector<u8>>(area.size);
@ -152,14 +151,11 @@ void Init() {
auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
(u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
LOG_DEBUG(HW_Memory, "initialized OK");
}
void Shutdown() {
heap_map.clear();
heap_linear_map.clear();
address_space.Reset();
LOG_DEBUG(HW_Memory, "shutdown OK");
}