2019-06-07 11:43:58 +02:00
# include "CMOS.h"
# include "Process.h"
# include "StdLib.h"
2018-10-17 23:13:55 +02:00
# include <AK/Assertions.h>
# include <AK/kstdio.h>
2019-06-07 20:02:01 +02:00
# include <Kernel/Arch/i386/CPU.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/FileSystem/Inode.h>
2019-06-09 11:48:58 +02:00
# include <Kernel/Multiboot.h>
2019-08-07 18:06:17 +02:00
# include <Kernel/VM/AnonymousVMObject.h>
# include <Kernel/VM/InodeVMObject.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/MemoryManager.h>
2018-10-17 23:13:55 +02:00
2018-11-01 11:30:48 +01:00
//#define MM_DEBUG
2018-11-05 13:48:07 +01:00
//#define PAGE_FAULT_DEBUG
2018-11-01 11:30:48 +01:00
2018-10-17 23:13:55 +02:00
static MemoryManager * s_the ;
2018-10-27 14:56:52 +02:00
MemoryManager & MM
2018-10-17 23:13:55 +02:00
{
return * s_the ;
}
2019-11-08 16:37:33 +02:00
MemoryManager : : MemoryManager ( u32 physical_address_for_kernel_page_tables )
2018-10-17 23:13:55 +02:00
{
2019-11-08 16:37:33 +02:00
m_kernel_page_directory = PageDirectory : : create_at_fixed_address ( PhysicalAddress ( physical_address_for_kernel_page_tables ) ) ;
m_page_table_zero = ( PageTableEntry * ) ( physical_address_for_kernel_page_tables + PAGE_SIZE ) ;
2019-11-21 16:08:11 +11:00
m_page_table_768 = ( PageTableEntry * ) ( physical_address_for_kernel_page_tables + PAGE_SIZE * 2 ) ;
m_page_table_769 = ( PageTableEntry * ) ( physical_address_for_kernel_page_tables + PAGE_SIZE * 3 ) ;
2018-12-03 01:38:22 +01:00
initialize_paging ( ) ;
2019-03-23 22:03:17 +01:00
kprintf ( " MM initialized. \n " ) ;
2018-10-17 23:13:55 +02:00
}
MemoryManager : : ~ MemoryManager ( )
{
}
2018-12-31 14:58:03 +01:00
void MemoryManager : : populate_page_directory ( PageDirectory & page_directory )
{
2019-01-01 02:09:43 +01:00
page_directory . m_directory_page = allocate_supervisor_physical_page ( ) ;
2019-06-26 21:45:56 +02:00
page_directory . entries ( ) [ 0 ] . copy_from ( { } , kernel_page_directory ( ) . entries ( ) [ 0 ] ) ;
2019-03-11 11:11:29 +01:00
// Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
for ( int i = 768 ; i < 1024 ; + + i )
2019-06-26 21:45:56 +02:00
page_directory . entries ( ) [ i ] . copy_from ( { } , kernel_page_directory ( ) . entries ( ) [ i ] ) ;
2018-11-01 09:01:51 +01:00
}
2018-12-03 01:38:22 +01:00
void MemoryManager : : initialize_paging ( )
2018-10-17 23:13:55 +02:00
{
2018-12-03 01:38:22 +01:00
memset ( m_page_table_zero , 0 , PAGE_SIZE ) ;
2018-10-17 23:13:55 +02:00
2018-10-31 20:10:39 +01:00
# ifdef MM_DEBUG
2018-12-31 14:58:03 +01:00
dbgprintf ( " MM: Kernel page directory @ %p \n " , kernel_page_directory ( ) . cr3 ( ) ) ;
2018-10-31 20:10:39 +01:00
# endif
2018-10-18 13:05:00 +02:00
2018-12-31 14:58:03 +01:00
# ifdef MM_DEBUG
dbgprintf ( " MM: Protect against null dereferences \n " ) ;
# endif
2018-10-21 21:57:59 +02:00
// Make null dereferences crash.
2019-06-07 12:56:50 +02:00
map_protected ( VirtualAddress ( 0 ) , PAGE_SIZE ) ;
2018-10-21 21:57:59 +02:00
2018-12-31 14:58:03 +01:00
# ifdef MM_DEBUG
2019-11-21 16:08:11 +11:00
dbgprintf ( " MM: Identity map bottom 1MiB \n " , kernel_virtual_base ) ;
2018-12-31 14:58:03 +01:00
# endif
2019-11-21 16:08:11 +11:00
create_identity_mapping ( kernel_page_directory ( ) , VirtualAddress ( PAGE_SIZE ) , ( 1 * MB ) - PAGE_SIZE ) ;
2019-11-04 12:00:29 +01:00
// Basic physical memory map:
2019-11-21 16:08:11 +11:00
// 0 -> 1 MB Page table/directory / I/O memory region
2019-11-04 12:00:29 +01:00
// 1 -> 3 MB Kernel image.
// (last page before 2MB) Used by quickmap_page().
// 2 MB -> 4 MB kmalloc_eternal() space.
// 4 MB -> 7 MB kmalloc() space.
// 7 MB -> 8 MB Supervisor physical pages (available for allocation!)
// 8 MB -> MAX Userspace physical pages (available for allocation!)
// Basic virtual memory map:
2019-11-21 16:08:11 +11:00
// 0x00000000-0x00100000 Identity mapped for Kernel Physical pages handed out by allocate_supervisor_physical_page (for I/O, page tables etc).
// 0x00800000-0xbfffffff Userspace program virtual address space.
// 0xc0001000-0xc0800000 Kernel-only virtual address space. This area is mapped to the first 8 MB of physical memory and includes areas for kmalloc, etc.
// 0xc0800000-0xffffffff Kernel virtual address space for kernel Page Directory.
2019-05-14 11:51:00 +02:00
2019-06-11 21:13:02 +10:00
# ifdef MM_DEBUG
dbgprintf ( " MM: Quickmap will use %p \n " , m_quickmap_addr . get ( ) ) ;
# endif
2019-11-04 12:00:29 +01:00
m_quickmap_addr = VirtualAddress ( ( 2 * MB ) - PAGE_SIZE ) ;
2019-06-11 21:13:02 +10:00
2019-06-27 13:34:28 +02:00
RefPtr < PhysicalRegion > region ;
2019-06-11 21:13:02 +10:00
bool region_is_super = false ;
2019-06-09 11:48:58 +02:00
for ( auto * mmap = ( multiboot_memory_map_t * ) multiboot_info_ptr - > mmap_addr ; ( unsigned long ) mmap < multiboot_info_ptr - > mmap_addr + multiboot_info_ptr - > mmap_length ; mmap = ( multiboot_memory_map_t * ) ( ( unsigned long ) mmap + mmap - > size + sizeof ( mmap - > size ) ) ) {
kprintf ( " MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x \n " ,
2019-07-03 21:17:35 +02:00
( u32 ) ( mmap - > addr > > 32 ) ,
( u32 ) ( mmap - > addr & 0xffffffff ) ,
( u32 ) ( mmap - > len > > 32 ) ,
( u32 ) ( mmap - > len & 0xffffffff ) ,
( u32 ) mmap - > type ) ;
2019-06-09 11:48:58 +02:00
if ( mmap - > type ! = MULTIBOOT_MEMORY_AVAILABLE )
continue ;
2019-06-11 21:13:02 +10:00
2019-09-17 09:58:38 +10:00
if ( ( mmap - > addr + mmap - > len ) > 0xffffffff )
continue ;
2019-09-28 16:22:50 +10:00
auto diff = ( u32 ) mmap - > addr % PAGE_SIZE ;
if ( diff ! = 0 ) {
kprintf ( " MM: got an unaligned region base from the bootloader; correcting %p by %d bytes \n " , mmap - > addr , diff ) ;
diff = PAGE_SIZE - diff ;
mmap - > addr + = diff ;
mmap - > len - = diff ;
}
if ( ( mmap - > len % PAGE_SIZE ) ! = 0 ) {
kprintf ( " MM: got an unaligned region length from the bootloader; correcting %d by %d bytes \n " , mmap - > len , mmap - > len % PAGE_SIZE ) ;
mmap - > len - = mmap - > len % PAGE_SIZE ;
}
if ( mmap - > len < PAGE_SIZE ) {
kprintf ( " MM: memory region from bootloader is too small; we want >= %d bytes, but got %d bytes \n " , PAGE_SIZE , mmap - > len ) ;
continue ;
}
2019-06-11 21:13:02 +10:00
# ifdef MM_DEBUG
kprintf ( " MM: considering memory at %p - %p \n " ,
2019-07-03 21:17:35 +02:00
( u32 ) mmap - > addr , ( u32 ) ( mmap - > addr + mmap - > len ) ) ;
2019-06-11 21:13:02 +10:00
# endif
2019-06-09 11:48:58 +02:00
for ( size_t page_base = mmap - > addr ; page_base < ( mmap - > addr + mmap - > len ) ; page_base + = PAGE_SIZE ) {
2019-06-11 21:13:02 +10:00
auto addr = PhysicalAddress ( page_base ) ;
2019-11-21 16:08:11 +11:00
// Anything below 1 * MB is a Kernel Physical region
if ( page_base > PAGE_SIZE & & page_base < 1 * MB ) {
2019-06-11 21:13:02 +10:00
if ( region . is_null ( ) | | ! region_is_super | | region - > upper ( ) . offset ( PAGE_SIZE ) ! = addr ) {
m_super_physical_regions . append ( PhysicalRegion : : create ( addr , addr ) ) ;
region = m_super_physical_regions . last ( ) ;
region_is_super = true ;
} else {
region - > expand ( region - > lower ( ) , addr ) ;
}
2019-11-21 16:08:11 +11:00
} else if ( page_base > 8 * MB ) {
2019-06-11 21:13:02 +10:00
if ( region . is_null ( ) | | region_is_super | | region - > upper ( ) . offset ( PAGE_SIZE ) ! = addr ) {
m_user_physical_regions . append ( PhysicalRegion : : create ( addr , addr ) ) ;
2019-11-08 23:39:29 +02:00
region = m_user_physical_regions . last ( ) ;
2019-06-11 21:13:02 +10:00
region_is_super = false ;
} else {
region - > expand ( region - > lower ( ) , addr ) ;
}
2019-06-09 11:48:58 +02:00
}
}
}
2018-12-31 14:58:03 +01:00
2019-06-11 21:13:02 +10:00
for ( auto & region : m_super_physical_regions )
2019-06-27 13:34:28 +02:00
m_super_physical_pages + = region . finalize_capacity ( ) ;
2019-06-11 21:13:02 +10:00
for ( auto & region : m_user_physical_regions )
2019-06-27 13:34:28 +02:00
m_user_physical_pages + = region . finalize_capacity ( ) ;
2019-06-11 21:13:02 +10:00
2018-12-31 14:58:03 +01:00
# ifdef MM_DEBUG
dbgprintf ( " MM: Installing page directory \n " ) ;
# endif
2019-11-03 23:40:10 +01:00
// Turn on CR4.PGE so the CPU will respect the G bit in page tables.
asm volatile (
" mov %cr4, %eax \n "
" orl $0x10, %eax \n "
" mov %eax, %cr4 \n " ) ;
2019-06-07 11:43:58 +02:00
asm volatile ( " movl %%eax, %%cr3 " : : " a " ( kernel_page_directory ( ) . cr3 ( ) ) ) ;
2018-10-17 23:13:55 +02:00
asm volatile (
2019-01-31 03:57:06 +01:00
" movl %%cr0, %%eax \n "
" orl $0x80000001, %%eax \n "
2019-06-07 11:43:58 +02:00
" movl %%eax, %%cr0 \n " : :
: " %eax " , " memory " ) ;
2018-10-17 23:13:55 +02:00
}
2019-06-26 21:45:56 +02:00
PageTableEntry & MemoryManager : : ensure_pte ( PageDirectory & page_directory , VirtualAddress vaddr )
2018-10-17 23:13:55 +02:00
{
2018-10-25 10:15:28 +02:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 21:17:35 +02:00
u32 page_directory_index = ( vaddr . get ( ) > > 22 ) & 0x3ff ;
u32 page_table_index = ( vaddr . get ( ) > > 12 ) & 0x3ff ;
2018-10-17 23:13:55 +02:00
2019-06-26 21:45:56 +02:00
PageDirectoryEntry & pde = page_directory . entries ( ) [ page_directory_index ] ;
2018-12-03 01:38:22 +01:00
if ( ! pde . is_present ( ) ) {
2018-10-31 20:10:39 +01:00
# ifdef MM_DEBUG
2019-08-23 16:18:27 +02:00
dbgprintf ( " MM: PDE %u not present (requested for V%p), allocating \n " , page_directory_index , vaddr . get ( ) ) ;
2018-10-31 20:10:39 +01:00
# endif
2018-11-01 23:04:34 +01:00
if ( page_directory_index = = 0 ) {
2019-04-14 02:36:06 +02:00
ASSERT ( & page_directory = = m_kernel_page_directory ) ;
2019-07-03 21:17:35 +02:00
pde . set_page_table_base ( ( u32 ) m_page_table_zero ) ;
2018-12-03 01:38:22 +01:00
pde . set_user_allowed ( false ) ;
pde . set_present ( true ) ;
pde . set_writable ( true ) ;
2019-11-03 23:40:10 +01:00
pde . set_global ( true ) ;
2019-11-21 16:08:11 +11:00
} else if ( page_directory_index = = 768 ) {
ASSERT ( & page_directory = = m_kernel_page_directory ) ;
pde . set_page_table_base ( ( u32 ) m_page_table_768 ) ;
pde . set_user_allowed ( false ) ;
pde . set_present ( true ) ;
pde . set_writable ( true ) ;
pde . set_global ( true ) ;
} else if ( page_directory_index = = 769 ) {
2019-06-09 11:48:58 +02:00
ASSERT ( & page_directory = = m_kernel_page_directory ) ;
2019-11-21 16:08:11 +11:00
pde . set_page_table_base ( ( u32 ) m_page_table_769 ) ;
2019-06-09 11:48:58 +02:00
pde . set_user_allowed ( false ) ;
pde . set_present ( true ) ;
pde . set_writable ( true ) ;
2019-11-03 23:40:10 +01:00
pde . set_global ( true ) ;
2018-10-17 23:13:55 +02:00
} else {
2019-03-10 15:25:33 +01:00
//ASSERT(&page_directory != m_kernel_page_directory.ptr());
2019-09-15 20:34:03 +02:00
auto page_table = allocate_supervisor_physical_page ( ) ;
2018-11-01 11:30:48 +01:00
# ifdef MM_DEBUG
2019-09-16 14:45:44 +02:00
dbgprintf ( " MM: PD K%p (%s) at P%p allocated page table #%u (for V%p) at P%p \n " ,
2018-12-31 14:58:03 +01:00
& page_directory ,
2019-04-14 02:36:06 +02:00
& page_directory = = m_kernel_page_directory ? " Kernel " : " User " ,
2019-01-01 02:09:43 +01:00
page_directory . cr3 ( ) ,
2018-11-03 00:31:42 +01:00
page_directory_index ,
2019-06-07 12:56:50 +02:00
vaddr . get ( ) ,
2018-11-05 10:23:00 +01:00
page_table - > paddr ( ) . get ( ) ) ;
2018-11-01 11:30:48 +01:00
# endif
2018-11-05 10:23:00 +01:00
2019-01-31 17:31:23 +01:00
pde . set_page_table_base ( page_table - > paddr ( ) . get ( ) ) ;
2018-12-03 01:38:22 +01:00
pde . set_user_allowed ( true ) ;
pde . set_present ( true ) ;
pde . set_writable ( true ) ;
2019-11-03 23:40:10 +01:00
pde . set_global ( & page_directory = = m_kernel_page_directory . ptr ( ) ) ;
2018-12-31 15:10:12 +01:00
page_directory . m_physical_pages . set ( page_directory_index , move ( page_table ) ) ;
2018-10-17 23:13:55 +02:00
}
}
2019-11-21 16:08:11 +11:00
return pde . page_table_virtual_base ( ) [ page_table_index ] ;
2018-10-17 23:13:55 +02:00
}
2019-06-07 12:56:50 +02:00
void MemoryManager : : map_protected ( VirtualAddress vaddr , size_t length )
2018-10-21 21:57:59 +02:00
{
2018-10-25 10:15:28 +02:00
InterruptDisabler disabler ;
2019-06-13 21:42:12 +02:00
ASSERT ( vaddr . is_page_aligned ( ) ) ;
2019-07-03 21:17:35 +02:00
for ( u32 offset = 0 ; offset < length ; offset + = PAGE_SIZE ) {
2019-06-07 12:56:50 +02:00
auto pte_address = vaddr . offset ( offset ) ;
2019-06-26 21:45:56 +02:00
auto & pte = ensure_pte ( kernel_page_directory ( ) , pte_address ) ;
2019-01-31 17:31:23 +01:00
pte . set_physical_page_base ( pte_address . get ( ) ) ;
2018-12-03 01:38:22 +01:00
pte . set_user_allowed ( false ) ;
pte . set_present ( false ) ;
pte . set_writable ( false ) ;
2019-01-31 17:31:23 +01:00
flush_tlb ( pte_address ) ;
2018-10-21 21:57:59 +02:00
}
}
2019-06-07 12:56:50 +02:00
void MemoryManager : : create_identity_mapping ( PageDirectory & page_directory , VirtualAddress vaddr , size_t size )
2018-10-17 23:13:55 +02:00
{
2018-10-25 10:15:28 +02:00
InterruptDisabler disabler ;
2019-06-07 12:56:50 +02:00
ASSERT ( ( vaddr . get ( ) & ~ PAGE_MASK ) = = 0 ) ;
2019-07-03 21:17:35 +02:00
for ( u32 offset = 0 ; offset < size ; offset + = PAGE_SIZE ) {
2019-06-07 12:56:50 +02:00
auto pte_address = vaddr . offset ( offset ) ;
2019-06-26 21:45:56 +02:00
auto & pte = ensure_pte ( page_directory , pte_address ) ;
2019-01-31 17:31:23 +01:00
pte . set_physical_page_base ( pte_address . get ( ) ) ;
2018-12-03 01:38:22 +01:00
pte . set_user_allowed ( false ) ;
pte . set_present ( true ) ;
pte . set_writable ( true ) ;
2019-01-31 17:31:23 +01:00
page_directory . flush ( pte_address ) ;
2018-10-17 23:13:55 +02:00
}
}
2019-11-08 16:37:33 +02:00
void MemoryManager : : initialize ( u32 physical_address_for_kernel_page_tables )
2018-10-17 23:13:55 +02:00
{
2019-11-08 16:37:33 +02:00
s_the = new MemoryManager ( physical_address_for_kernel_page_tables ) ;
2018-10-17 23:13:55 +02:00
}
2019-08-06 10:31:20 +02:00
Region * MemoryManager : : kernel_region_from_vaddr ( VirtualAddress vaddr )
2018-11-05 13:48:07 +01:00
{
2019-08-06 11:19:16 +02:00
if ( vaddr . get ( ) < 0xc0000000 )
return nullptr ;
2019-08-08 13:40:58 +02:00
for ( auto & region : MM . m_kernel_regions ) {
if ( region . contains ( vaddr ) )
return & region ;
2019-05-14 11:51:00 +02:00
}
2019-08-06 10:31:20 +02:00
return nullptr ;
}
2019-05-14 11:51:00 +02:00
2019-08-06 10:31:20 +02:00
Region * MemoryManager : : user_region_from_vaddr ( Process & process , VirtualAddress vaddr )
{
2018-11-05 13:48:07 +01:00
// FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
for ( auto & region : process . m_regions ) {
2019-06-27 13:34:28 +02:00
if ( region . contains ( vaddr ) )
return & region ;
2018-11-05 13:48:07 +01:00
}
2019-08-06 10:31:20 +02:00
dbg ( ) < < process < < " Couldn't find user region for " < < vaddr ;
2019-01-25 01:39:15 +01:00
return nullptr ;
}
2019-08-06 10:31:20 +02:00
Region * MemoryManager : : region_from_vaddr ( Process & process , VirtualAddress vaddr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-08-06 11:19:16 +02:00
if ( auto * region = kernel_region_from_vaddr ( vaddr ) )
return region ;
2019-08-06 10:31:20 +02:00
return user_region_from_vaddr ( process , vaddr ) ;
}
2019-06-07 12:56:50 +02:00
const Region * MemoryManager : : region_from_vaddr ( const Process & process , VirtualAddress vaddr )
2019-01-25 01:39:15 +01:00
{
2019-08-06 11:19:16 +02:00
if ( auto * region = kernel_region_from_vaddr ( vaddr ) )
return region ;
2019-08-06 10:31:20 +02:00
return user_region_from_vaddr ( const_cast < Process & > ( process ) , vaddr ) ;
2018-11-05 13:48:07 +01:00
}
2019-08-06 11:19:16 +02:00
Region * MemoryManager : : region_from_vaddr ( VirtualAddress vaddr )
{
if ( auto * region = kernel_region_from_vaddr ( vaddr ) )
return region ;
auto page_directory = PageDirectory : : find_by_pdb ( cpu_cr3 ( ) ) ;
if ( ! page_directory )
return nullptr ;
ASSERT ( page_directory - > process ( ) ) ;
return user_region_from_vaddr ( * page_directory - > process ( ) , vaddr ) ;
}
2018-11-05 10:29:19 +01:00
PageFaultResponse MemoryManager : : handle_page_fault ( const PageFault & fault )
2018-10-18 13:05:00 +02:00
{
2018-10-25 10:15:28 +02:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-03-23 22:03:17 +01:00
ASSERT ( current ) ;
2018-11-05 13:48:07 +01:00
# ifdef PAGE_FAULT_DEBUG
2019-08-23 16:18:27 +02:00
dbgprintf ( " MM: handle_page_fault(%w) at V%p \n " , fault . code ( ) , fault . vaddr ( ) . get ( ) ) ;
2018-11-05 13:48:07 +01:00
# endif
2019-06-07 12:56:50 +02:00
ASSERT ( fault . vaddr ( ) ! = m_quickmap_addr ) ;
2019-08-06 09:33:35 +02:00
if ( fault . type ( ) = = PageFault : : Type : : PageNotPresent & & fault . vaddr ( ) . get ( ) > = 0xc0000000 ) {
2019-08-06 14:01:29 +02:00
auto * current_page_directory = reinterpret_cast < PageDirectoryEntry * > ( cpu_cr3 ( ) ) ;
2019-07-03 21:17:35 +02:00
u32 page_directory_index = ( fault . vaddr ( ) . get ( ) > > 22 ) & 0x3ff ;
2019-08-06 07:28:35 +02:00
auto & kernel_pde = kernel_page_directory ( ) . entries ( ) [ page_directory_index ] ;
2019-08-06 14:01:29 +02:00
auto & current_pde = current_page_directory [ page_directory_index ] ;
if ( kernel_pde . is_present ( ) & & ! current_pde . is_present ( ) ) {
dbg ( ) < < " NP(kernel): Copying new kernel mapping for " < < fault . vaddr ( ) < < " into current page directory " ;
current_pde . copy_from ( { } , kernel_pde ) ;
2019-08-04 21:20:34 +02:00
flush_tlb ( fault . vaddr ( ) . page_base ( ) ) ;
2019-06-26 22:27:41 +02:00
return PageFaultResponse : : Continue ;
}
}
2019-08-06 11:19:16 +02:00
auto * region = region_from_vaddr ( fault . vaddr ( ) ) ;
2018-11-08 15:39:26 +01:00
if ( ! region ) {
2019-08-23 16:18:27 +02:00
kprintf ( " NP(error) fault at invalid address V%p \n " , fault . vaddr ( ) . get ( ) ) ;
2018-11-08 15:39:26 +01:00
return PageFaultResponse : : ShouldCrash ;
}
2019-11-04 00:45:33 +01:00
return region - > handle_fault ( fault ) ;
2018-10-18 13:05:00 +02:00
}
2019-09-27 14:19:07 +02:00
OwnPtr < Region > MemoryManager : : allocate_kernel_region ( size_t size , const StringView & name , bool user_accessible , bool should_commit )
2019-05-14 11:51:00 +02:00
{
InterruptDisabler disabler ;
ASSERT ( ! ( size % PAGE_SIZE ) ) ;
2019-05-20 04:46:29 +02:00
auto range = kernel_page_directory ( ) . range_allocator ( ) . allocate_anywhere ( size ) ;
2019-05-17 04:02:29 +02:00
ASSERT ( range . is_valid ( ) ) ;
2019-09-27 14:19:07 +02:00
OwnPtr < Region > region ;
2019-07-19 17:01:16 +02:00
if ( user_accessible )
2019-10-01 19:58:41 +02:00
region = Region : : create_user_accessible ( range , name , PROT_READ | PROT_WRITE | PROT_EXEC ) ;
2019-07-19 17:01:16 +02:00
else
2019-10-01 19:58:41 +02:00
region = Region : : create_kernel_only ( range , name , PROT_READ | PROT_WRITE | PROT_EXEC ) ;
2019-11-03 20:48:35 +01:00
region - > map ( kernel_page_directory ( ) ) ;
2019-05-14 15:36:24 +02:00
// FIXME: It would be cool if these could zero-fill on demand instead.
2019-08-06 14:12:52 +02:00
if ( should_commit )
region - > commit ( ) ;
2019-05-14 11:51:00 +02:00
return region ;
}
2019-09-27 14:19:07 +02:00
OwnPtr < Region > MemoryManager : : allocate_user_accessible_kernel_region ( size_t size , const StringView & name )
2019-07-19 17:01:16 +02:00
{
return allocate_kernel_region ( size , name , true ) ;
}
2019-06-14 14:56:21 +03:00
void MemoryManager : : deallocate_user_physical_page ( PhysicalPage & & page )
2019-06-11 21:13:02 +10:00
{
for ( auto & region : m_user_physical_regions ) {
2019-06-27 13:34:28 +02:00
if ( ! region . contains ( page ) ) {
2019-06-11 21:13:02 +10:00
kprintf (
" MM: deallocate_user_physical_page: %p not in %p -> %p \n " ,
2019-06-27 13:34:28 +02:00
page . paddr ( ) , region . lower ( ) . get ( ) , region . upper ( ) . get ( ) ) ;
2019-06-11 21:13:02 +10:00
continue ;
}
2019-06-27 13:34:28 +02:00
region . return_page ( move ( page ) ) ;
- - m_user_physical_pages_used ;
2019-06-11 21:13:02 +10:00
return ;
}
kprintf ( " MM: deallocate_user_physical_page couldn't figure out region for user page @ %p \n " , page . paddr ( ) ) ;
ASSERT_NOT_REACHED ( ) ;
}
2019-11-08 23:39:29 +02:00
RefPtr < PhysicalPage > MemoryManager : : find_free_user_physical_page ( )
2018-11-08 12:59:16 +01:00
{
2019-06-27 13:34:28 +02:00
RefPtr < PhysicalPage > page ;
2019-06-11 21:13:02 +10:00
for ( auto & region : m_user_physical_regions ) {
2019-06-27 13:34:28 +02:00
page = region . take_free_page ( false ) ;
2019-11-08 23:39:29 +02:00
if ( ! page . is_null ( ) )
break ;
2019-06-11 21:13:02 +10:00
}
2019-11-08 23:39:29 +02:00
return page ;
}
RefPtr < PhysicalPage > MemoryManager : : allocate_user_physical_page ( ShouldZeroFill should_zero_fill )
{
InterruptDisabler disabler ;
RefPtr < PhysicalPage > page = find_free_user_physical_page ( ) ;
2019-06-11 21:13:02 +10:00
if ( ! page ) {
if ( m_user_physical_regions . is_empty ( ) ) {
kprintf ( " MM: no user physical regions available (?) \n " ) ;
}
kprintf ( " MM: no user physical pages available \n " ) ;
2019-02-20 15:34:55 +01:00
ASSERT_NOT_REACHED ( ) ;
2019-06-07 11:43:58 +02:00
return { } ;
2019-02-20 15:34:55 +01:00
}
2019-06-11 21:13:02 +10:00
2018-11-08 12:59:16 +01:00
# ifdef MM_DEBUG
2019-06-11 21:13:02 +10:00
dbgprintf ( " MM: allocate_user_physical_page vending P%p \n " , page - > paddr ( ) . get ( ) ) ;
2018-11-08 12:59:16 +01:00
# endif
2019-06-11 21:13:02 +10:00
2019-01-31 03:57:06 +01:00
if ( should_zero_fill = = ShouldZeroFill : : Yes ) {
2019-07-03 21:17:35 +02:00
auto * ptr = ( u32 * ) quickmap_page ( * page ) ;
fast_u32_fill ( ptr , 0 , PAGE_SIZE / sizeof ( u32 ) ) ;
2019-01-31 03:57:06 +01:00
unquickmap_page ( ) ;
}
2019-06-11 21:13:02 +10:00
2019-06-27 13:34:28 +02:00
+ + m_user_physical_pages_used ;
2019-06-11 21:13:02 +10:00
return page ;
}
2019-06-14 14:56:21 +03:00
void MemoryManager : : deallocate_supervisor_physical_page ( PhysicalPage & & page )
2019-06-11 21:13:02 +10:00
{
for ( auto & region : m_super_physical_regions ) {
2019-06-27 13:34:28 +02:00
if ( ! region . contains ( page ) ) {
2019-06-11 21:13:02 +10:00
kprintf (
" MM: deallocate_supervisor_physical_page: %p not in %p -> %p \n " ,
2019-06-27 13:34:28 +02:00
page . paddr ( ) , region . lower ( ) . get ( ) , region . upper ( ) . get ( ) ) ;
2019-06-11 21:13:02 +10:00
continue ;
}
2019-06-27 13:34:28 +02:00
region . return_page ( move ( page ) ) ;
- - m_super_physical_pages_used ;
2019-06-11 21:13:02 +10:00
return ;
}
kprintf ( " MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ %p \n " , page . paddr ( ) ) ;
ASSERT_NOT_REACHED ( ) ;
2018-11-08 12:59:16 +01:00
}
2019-06-21 18:37:47 +02:00
RefPtr < PhysicalPage > MemoryManager : : allocate_supervisor_physical_page ( )
2018-11-01 11:30:48 +01:00
{
InterruptDisabler disabler ;
2019-06-27 13:34:28 +02:00
RefPtr < PhysicalPage > page ;
2019-06-11 21:13:02 +10:00
for ( auto & region : m_super_physical_regions ) {
2019-06-27 13:34:28 +02:00
page = region . take_free_page ( true ) ;
2019-06-11 21:13:02 +10:00
if ( page . is_null ( ) )
continue ;
}
if ( ! page ) {
if ( m_super_physical_regions . is_empty ( ) ) {
kprintf ( " MM: no super physical regions available (?) \n " ) ;
}
kprintf ( " MM: no super physical pages available \n " ) ;
2019-02-20 15:34:55 +01:00
ASSERT_NOT_REACHED ( ) ;
2019-06-07 11:43:58 +02:00
return { } ;
2019-02-20 15:34:55 +01:00
}
2019-06-11 21:13:02 +10:00
2019-01-01 02:09:43 +01:00
# ifdef MM_DEBUG
2019-06-11 21:13:02 +10:00
dbgprintf ( " MM: allocate_supervisor_physical_page vending P%p \n " , page - > paddr ( ) . get ( ) ) ;
2019-01-01 02:09:43 +01:00
# endif
2019-06-11 21:13:02 +10:00
2019-07-03 21:17:35 +02:00
fast_u32_fill ( ( u32 * ) page - > paddr ( ) . as_ptr ( ) , 0 , PAGE_SIZE / sizeof ( u32 ) ) ;
2019-06-27 13:34:28 +02:00
+ + m_super_physical_pages_used ;
2019-06-11 21:13:02 +10:00
return page ;
2018-11-01 11:30:48 +01:00
}
2018-11-01 13:15:46 +01:00
void MemoryManager : : enter_process_paging_scope ( Process & process )
2018-11-01 11:30:48 +01:00
{
2019-03-23 22:03:17 +01:00
ASSERT ( current ) ;
2018-11-01 11:30:48 +01:00
InterruptDisabler disabler ;
2019-03-23 22:03:17 +01:00
current - > tss ( ) . cr3 = process . page_directory ( ) . cr3 ( ) ;
2019-06-07 11:43:58 +02:00
asm volatile ( " movl %%eax, %%cr3 " : : " a " ( process . page_directory ( ) . cr3 ( ) )
: " memory " ) ;
2018-11-01 11:30:48 +01:00
}
2018-12-03 01:38:22 +01:00
void MemoryManager : : flush_entire_tlb ( )
2018-10-23 11:03:56 +02:00
{
asm volatile (
2019-01-31 03:57:06 +01:00
" mov %%cr3, %%eax \n "
2019-06-07 11:43:58 +02:00
" mov %%eax, %%cr3 \n " : :
: " %eax " , " memory " ) ;
2018-10-23 11:03:56 +02:00
}
2019-06-07 12:56:50 +02:00
void MemoryManager : : flush_tlb ( VirtualAddress vaddr )
2018-10-23 15:53:11 +02:00
{
2019-06-07 11:43:58 +02:00
asm volatile ( " invlpg %0 "
:
2019-06-07 12:56:50 +02:00
: " m " ( * ( char * ) vaddr . get ( ) )
2019-06-07 11:43:58 +02:00
: " memory " ) ;
2018-10-23 15:53:11 +02:00
}
2019-10-16 10:27:00 -06:00
void MemoryManager : : map_for_kernel ( VirtualAddress vaddr , PhysicalAddress paddr , bool cache_disabled )
2019-03-10 15:25:33 +01:00
{
2019-06-26 21:45:56 +02:00
auto & pte = ensure_pte ( kernel_page_directory ( ) , vaddr ) ;
2019-03-10 15:25:33 +01:00
pte . set_physical_page_base ( paddr . get ( ) ) ;
pte . set_present ( true ) ;
pte . set_writable ( true ) ;
pte . set_user_allowed ( false ) ;
2019-10-16 10:27:00 -06:00
pte . set_cache_disabled ( cache_disabled ) ;
2019-06-07 12:56:50 +02:00
flush_tlb ( vaddr ) ;
2019-03-10 15:25:33 +01:00
}
2019-07-03 21:17:35 +02:00
u8 * MemoryManager : : quickmap_page ( PhysicalPage & physical_page )
2018-11-05 13:48:07 +01:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-01-31 03:57:06 +01:00
ASSERT ( ! m_quickmap_in_use ) ;
m_quickmap_in_use = true ;
2019-06-07 12:56:50 +02:00
auto page_vaddr = m_quickmap_addr ;
2019-06-26 21:45:56 +02:00
auto & pte = ensure_pte ( kernel_page_directory ( ) , page_vaddr ) ;
2018-12-03 01:38:22 +01:00
pte . set_physical_page_base ( physical_page . paddr ( ) . get ( ) ) ;
2019-01-01 02:09:43 +01:00
pte . set_present ( true ) ;
2018-12-03 01:38:22 +01:00
pte . set_writable ( true ) ;
2019-01-31 03:57:06 +01:00
pte . set_user_allowed ( false ) ;
2019-06-07 12:56:50 +02:00
flush_tlb ( page_vaddr ) ;
2019-07-03 21:17:35 +02:00
ASSERT ( ( u32 ) pte . physical_page_base ( ) = = physical_page . paddr ( ) . get ( ) ) ;
2018-11-05 13:48:07 +01:00
# ifdef MM_DEBUG
2019-09-16 14:45:44 +02:00
dbg ( ) < < " MM: >> quickmap_page " < < page_vaddr < < " => " < < physical_page . paddr ( ) < < " @ PTE= " < < ( void * ) pte . raw ( ) < < " { " < < & pte < < " } " ;
2018-11-05 13:48:07 +01:00
# endif
2019-06-07 12:56:50 +02:00
return page_vaddr . as_ptr ( ) ;
2018-11-05 13:48:07 +01:00
}
void MemoryManager : : unquickmap_page ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-01-31 03:57:06 +01:00
ASSERT ( m_quickmap_in_use ) ;
2019-06-07 12:56:50 +02:00
auto page_vaddr = m_quickmap_addr ;
2019-06-26 21:45:56 +02:00
auto & pte = ensure_pte ( kernel_page_directory ( ) , page_vaddr ) ;
2018-11-05 13:48:07 +01:00
# ifdef MM_DEBUG
2018-12-29 03:34:24 +01:00
auto old_physical_address = pte . physical_page_base ( ) ;
2018-11-05 13:48:07 +01:00
# endif
2018-12-03 01:38:22 +01:00
pte . set_physical_page_base ( 0 ) ;
pte . set_present ( false ) ;
pte . set_writable ( false ) ;
2019-06-07 12:56:50 +02:00
flush_tlb ( page_vaddr ) ;
2018-11-05 13:48:07 +01:00
# ifdef MM_DEBUG
2019-09-16 14:45:44 +02:00
dbg ( ) < < " MM: >> unquickmap_page " < < page_vaddr < < " =/> " < < old_physical_address ;
2018-11-05 13:48:07 +01:00
# endif
2019-01-31 03:57:06 +01:00
m_quickmap_in_use = false ;
2018-11-05 13:48:07 +01:00
}
2019-11-17 12:11:43 +01:00
bool MemoryManager : : validate_user_stack ( const Process & process , VirtualAddress vaddr ) const
{
auto * region = region_from_vaddr ( process , vaddr ) ;
return region & & region - > is_stack ( ) ;
}
2019-06-07 12:56:50 +02:00
bool MemoryManager : : validate_user_read ( const Process & process , VirtualAddress vaddr ) const
2018-11-01 12:45:51 +01:00
{
2019-06-07 12:56:50 +02:00
auto * region = region_from_vaddr ( process , vaddr ) ;
2019-01-25 01:39:15 +01:00
return region & & region - > is_readable ( ) ;
2018-11-01 12:45:51 +01:00
}
2019-06-07 12:56:50 +02:00
bool MemoryManager : : validate_user_write ( const Process & process , VirtualAddress vaddr ) const
2018-11-01 12:45:51 +01:00
{
2019-06-07 12:56:50 +02:00
auto * region = region_from_vaddr ( process , vaddr ) ;
2019-01-25 01:39:15 +01:00
return region & & region - > is_writable ( ) ;
2018-10-22 15:42:39 +02:00
}
2018-11-02 20:41:58 +01:00
2018-11-08 21:20:09 +01:00
void MemoryManager : : register_vmo ( VMObject & vmo )
{
2018-11-08 22:24:02 +01:00
InterruptDisabler disabler ;
2019-08-08 10:43:44 +02:00
m_vmobjects . append ( & vmo ) ;
2018-11-08 21:20:09 +01:00
}
void MemoryManager : : unregister_vmo ( VMObject & vmo )
{
2018-11-08 22:24:02 +01:00
InterruptDisabler disabler ;
2019-08-08 10:43:44 +02:00
m_vmobjects . remove ( & vmo ) ;
2018-11-08 21:20:09 +01:00
}
2018-11-08 22:24:02 +01:00
void MemoryManager : : register_region ( Region & region )
{
InterruptDisabler disabler ;
2019-06-07 12:56:50 +02:00
if ( region . vaddr ( ) . get ( ) > = 0xc0000000 )
2019-08-08 10:53:24 +02:00
m_kernel_regions . append ( & region ) ;
2019-05-14 11:51:00 +02:00
else
2019-08-08 10:53:24 +02:00
m_user_regions . append ( & region ) ;
2018-11-08 22:24:02 +01:00
}
void MemoryManager : : unregister_region ( Region & region )
{
InterruptDisabler disabler ;
2019-06-07 12:56:50 +02:00
if ( region . vaddr ( ) . get ( ) > = 0xc0000000 )
2019-05-14 11:51:00 +02:00
m_kernel_regions . remove ( & region ) ;
else
m_user_regions . remove ( & region ) ;
2018-11-08 22:24:02 +01:00
}
2018-11-09 01:25:31 +01:00
2019-03-23 22:03:17 +01:00
ProcessPagingScope : : ProcessPagingScope ( Process & process )
{
ASSERT ( current ) ;
MM . enter_process_paging_scope ( process ) ;
}
ProcessPagingScope : : ~ ProcessPagingScope ( )
{
MM . enter_process_paging_scope ( current - > process ( ) ) ;
}