Core: Properly configure address space when loading a binary

The code now properly configures the process image to match the loaded
binary segments (code, rodata, data) instead of just blindly allocating
a large chunk of dummy memory.
This commit is contained in:
Yuri Kunde Schlesner
2015-07-09 22:52:15 -03:00
parent 51820691e7
commit 5c5cf2f8e0
11 changed files with 223 additions and 52 deletions

View File

@ -101,7 +101,7 @@ struct VirtualMemoryArea {
* - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
* - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
*/
class VMManager {
class VMManager final {
// TODO(yuriks): Make page tables switchable to support multiple VMManagers
public:
/**
@ -121,6 +121,7 @@ public:
using VMAHandle = decltype(vma_map)::const_iterator;
VMManager();
~VMManager();
/// Clears the address space map, re-initializing with a single free area.
void Reset();
@ -168,6 +169,9 @@ public:
/// Changes the permissions of the given VMA.
void Reprotect(VMAHandle vma, VMAPermission new_perms);
/// Dumps the address space layout to the log, for debugging
void LogLayout() const;
private:
using VMAIter = decltype(vma_map)::iterator;