2020-01-18 09:38:21 +01:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2019-07-19 16:09:34 +02:00
# include <Kernel/FileSystem/Inode.h>
2019-04-03 15:13:07 +02:00
# include <Kernel/Process.h>
# include <Kernel/Thread.h>
2019-08-07 18:06:17 +02:00
# include <Kernel/VM/AnonymousVMObject.h>
2020-02-28 20:07:51 +01:00
# include <Kernel/VM/SharedInodeVMObject.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 01:33:41 +01:00
# include <Kernel/VM/PageDirectory.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/Region.h>
2019-04-03 15:13:07 +02:00
2019-11-04 00:05:57 +01:00
//#define MM_DEBUG
2019-11-04 00:45:33 +01:00
//#define PAGE_FAULT_DEBUG
2019-11-04 00:05:57 +01:00
2020-02-16 01:27:42 +01:00
namespace Kernel {
2020-01-09 23:29:31 +02:00
Region : : Region ( const Range & range , const String & name , u8 access , bool cacheable )
2019-05-17 04:32:08 +02:00
: m_range ( range )
2019-09-04 11:27:14 +02:00
, m_vmobject ( AnonymousVMObject : : create_with_size ( size ( ) ) )
2019-06-07 20:58:12 +02:00
, m_name ( name )
2019-05-30 16:14:37 +02:00
, m_access ( access )
2020-01-09 23:29:31 +02:00
, m_cacheable ( cacheable )
2019-04-03 15:13:07 +02:00
{
MM . register_region ( * this ) ;
}
2020-01-09 23:29:31 +02:00
Region : : Region ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const String & name , u8 access , bool cacheable )
2019-05-17 04:32:08 +02:00
: m_range ( range )
2019-12-19 19:13:44 +01:00
, m_offset_in_vmobject ( offset_in_vmobject )
, m_vmobject ( move ( vmobject ) )
2019-06-07 20:58:12 +02:00
, m_name ( name )
2019-05-30 16:14:37 +02:00
, m_access ( access )
2020-01-09 23:29:31 +02:00
, m_cacheable ( cacheable )
2019-04-03 15:13:07 +02:00
{
MM . register_region ( * this ) ;
}
Region : : ~ Region ( )
{
2019-09-05 11:13:10 +02:00
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
// find the address<->region mappings in an invalid state there.
InterruptDisabler disabler ;
2019-04-03 15:13:07 +02:00
if ( m_page_directory ) {
2019-11-03 20:37:03 +01:00
unmap ( ShouldDeallocateVirtualMemoryRange : : Yes ) ;
2019-04-03 15:13:07 +02:00
ASSERT ( ! m_page_directory ) ;
}
MM . unregister_region ( * this ) ;
}
2019-09-27 14:19:07 +02:00
NonnullOwnPtr < Region > Region : : clone ( )
2019-04-03 15:13:07 +02:00
{
2020-02-17 15:04:27 +01:00
ASSERT ( Process : : current ) ;
2019-07-19 16:09:34 +02:00
2020-02-28 20:07:51 +01:00
// FIXME: What should we do for privately mapped SharedInodeVMObjects?
2019-12-15 16:53:46 +01:00
if ( m_shared | | vmobject ( ) . is_inode ( ) ) {
2019-11-17 12:11:43 +01:00
ASSERT ( ! m_stack ) ;
2019-04-03 15:13:07 +02:00
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < Process : : current - > name ( ) . characters ( ) < < " < " < < Process : : current - > pid ( ) < < " > Region::clone(): sharing " < < m_name . characters ( ) < < " (V " < < String : : format ( " %p " , vaddr ( ) . get ( ) ) < < " ) " ;
2019-04-03 15:13:07 +02:00
# endif
// Create a new region backed by the same VMObject.
2020-01-10 19:24:01 +01:00
auto region = Region : : create_user_accessible ( m_range , m_vmobject , m_offset_in_vmobject , m_name , m_access ) ;
region - > set_mmap ( m_mmap ) ;
2020-02-08 02:39:46 +01:00
region - > set_shared ( m_shared ) ;
2020-01-10 19:24:01 +01:00
return region ;
2019-04-03 15:13:07 +02:00
}
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
// FIXME: If m_name.characters() is returning null pointer, the debug message will lead to a crash
// For now we use String::format() to mitigate that.
dbg ( ) < < Process : : current - > name ( ) . characters ( ) < < " < " < < Process : : current - > pid ( ) < < " > Region::clone(): cowing " < < String : : format ( " %s " , m_name . characters ( ) ) < < " (V " < < String : : format ( " %p " , vaddr ( ) . get ( ) ) < < " ) " ,
2019-04-03 15:13:07 +02:00
# endif
2020-02-24 16:40:42 +02:00
// Set up a COW region. The parent (this) region becomes COW as well!
ensure_cow_map ( ) . fill ( true ) ;
2019-11-03 20:59:54 +01:00
remap ( ) ;
2019-12-19 19:13:44 +01:00
auto clone_region = Region : : create_user_accessible ( m_range , m_vmobject - > clone ( ) , m_offset_in_vmobject , m_name , m_access ) ;
2019-10-01 19:58:41 +02:00
clone_region - > ensure_cow_map ( ) ;
2019-11-17 12:11:43 +01:00
if ( m_stack ) {
ASSERT ( is_readable ( ) ) ;
ASSERT ( is_writable ( ) ) ;
ASSERT ( ! is_shared ( ) ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
clone_region - > set_stack ( true ) ;
}
2020-01-10 19:24:01 +01:00
clone_region - > set_mmap ( m_mmap ) ;
2019-10-01 19:58:41 +02:00
return clone_region ;
2019-04-03 15:13:07 +02:00
}
2019-12-18 22:43:32 +01:00
bool Region : : commit ( )
2019-04-03 15:13:07 +02:00
{
InterruptDisabler disabler ;
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: commit " < < vmobject ( ) . page_count ( ) < < " pages in Region " < < this < < " (VMO= " < < & vmobject ( ) < < " ) at V " < < String : : format ( " %p " , vaddr ( ) . get ( ) ) ;
2019-04-03 15:13:07 +02:00
# endif
2019-11-03 15:41:49 +01:00
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2019-12-18 22:43:32 +01:00
if ( ! commit ( i ) )
return false ;
2019-04-03 15:13:07 +02:00
}
2019-12-18 22:43:32 +01:00
return true ;
}
bool Region : : commit ( size_t page_index )
{
ASSERT ( vmobject ( ) . is_anonymous ( ) | | vmobject ( ) . is_purgeable ( ) ) ;
InterruptDisabler disabler ;
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: commit single page " < < String : : format ( " %zu " , page_index , vmobject ( ) . page_count ( ) ) < < " in Region " < < this < < " (VMO= " < < & vmobject ( ) < < " ) at V " < < String : : format ( " %p " , vaddr ( ) . get ( ) ) ;
2019-12-18 22:43:32 +01:00
# endif
auto & vmobject_physical_page_entry = vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index ] ;
2020-02-15 13:12:02 +01:00
if ( ! vmobject_physical_page_entry . is_null ( ) & & ! vmobject_physical_page_entry - > is_shared_zero_page ( ) )
2019-12-18 22:43:32 +01:00
return true ;
auto physical_page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : Yes ) ;
if ( ! physical_page ) {
kprintf ( " MM: commit was unable to allocate a physical page \n " ) ;
return false ;
}
vmobject_physical_page_entry = move ( physical_page ) ;
remap_page ( page_index ) ;
return true ;
2019-04-03 15:13:07 +02:00
}
2019-12-15 16:53:00 +01:00
u32 Region : : cow_pages ( ) const
{
if ( ! m_cow_map )
return 0 ;
u32 count = 0 ;
2020-02-24 09:55:46 +01:00
for ( size_t i = 0 ; i < m_cow_map - > size ( ) ; + + i )
2019-12-15 16:53:00 +01:00
count + = m_cow_map - > get ( i ) ;
return count ;
}
2019-12-29 12:28:32 +01:00
size_t Region : : amount_dirty ( ) const
{
if ( ! vmobject ( ) . is_inode ( ) )
return amount_resident ( ) ;
2020-02-28 20:07:51 +01:00
return static_cast < const SharedInodeVMObject & > ( vmobject ( ) ) . amount_dirty ( ) ;
2019-12-29 12:28:32 +01:00
}
2019-04-03 15:13:07 +02:00
size_t Region : : amount_resident ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-02-15 13:12:02 +01:00
auto & physical_page = m_vmobject - > physical_pages ( ) [ first_page_index ( ) + i ] ;
if ( physical_page & & ! physical_page - > is_shared_zero_page ( ) )
2019-04-03 15:13:07 +02:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
size_t Region : : amount_shared ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2019-09-04 11:27:14 +02:00
auto & physical_page = m_vmobject - > physical_pages ( ) [ first_page_index ( ) + i ] ;
2020-02-15 13:12:02 +01:00
if ( physical_page & & physical_page - > ref_count ( ) > 1 & & ! physical_page - > is_shared_zero_page ( ) )
2019-04-03 15:13:07 +02:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
2019-07-19 16:09:34 +02:00
2020-01-09 23:29:31 +02:00
NonnullOwnPtr < Region > Region : : create_user_accessible ( const Range & range , const StringView & name , u8 access , bool cacheable )
2019-07-19 16:09:34 +02:00
{
2020-01-09 23:29:31 +02:00
auto region = make < Region > ( range , name , access , cacheable ) ;
2019-07-19 16:09:34 +02:00
region - > m_user_accessible = true ;
return region ;
}
2020-01-09 23:29:31 +02:00
NonnullOwnPtr < Region > Region : : create_user_accessible ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable )
2019-07-19 16:09:34 +02:00
{
2020-01-09 23:29:31 +02:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable ) ;
2019-07-19 16:09:34 +02:00
region - > m_user_accessible = true ;
return region ;
}
2020-01-09 23:29:31 +02:00
NonnullOwnPtr < Region > Region : : create_kernel_only ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable )
2019-07-19 16:09:34 +02:00
{
2020-01-09 23:29:31 +02:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable ) ;
2019-07-19 16:09:34 +02:00
region - > m_user_accessible = false ;
2020-01-10 06:57:18 +01:00
return region ;
}
2020-01-09 23:29:31 +02:00
NonnullOwnPtr < Region > Region : : create_kernel_only ( const Range & range , const StringView & name , u8 access , bool cacheable )
2020-01-10 06:57:18 +01:00
{
2020-01-09 23:29:31 +02:00
auto region = make < Region > ( range , name , access , cacheable ) ;
2020-01-10 06:57:18 +01:00
region - > m_user_accessible = false ;
2019-07-19 16:09:34 +02:00
return region ;
}
2019-10-01 19:58:41 +02:00
bool Region : : should_cow ( size_t page_index ) const
{
2020-02-21 16:03:03 +01:00
auto & slot = vmobject ( ) . physical_pages ( ) [ page_index ] ;
if ( slot & & slot - > is_shared_zero_page ( ) )
2020-02-15 13:12:02 +01:00
return true ;
2019-10-01 19:58:41 +02:00
if ( m_shared )
return false ;
return m_cow_map & & m_cow_map - > get ( page_index ) ;
}
void Region : : set_should_cow ( size_t page_index , bool cow )
{
ASSERT ( ! m_shared ) ;
ensure_cow_map ( ) . set ( page_index , cow ) ;
}
Bitmap & Region : : ensure_cow_map ( ) const
{
if ( ! m_cow_map )
m_cow_map = make < Bitmap > ( page_count ( ) , true ) ;
return * m_cow_map ;
}
2019-11-03 15:32:11 +01:00
2020-01-01 19:30:38 +01:00
void Region : : map_individual_page_impl ( size_t page_index )
2019-11-03 15:32:11 +01:00
{
2020-01-01 19:30:38 +01:00
auto page_vaddr = vaddr ( ) . offset ( page_index * PAGE_SIZE ) ;
2019-11-04 00:26:00 +01:00
auto & pte = MM . ensure_pte ( * m_page_directory , page_vaddr ) ;
2020-01-01 19:30:38 +01:00
auto & physical_page = vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index ] ;
if ( ! physical_page ) {
2020-02-08 12:49:00 +01:00
pte . clear ( ) ;
2020-01-01 19:30:38 +01:00
} else {
2020-01-09 23:29:31 +02:00
pte . set_cache_disabled ( ! m_cacheable ) ;
2020-01-01 19:30:38 +01:00
pte . set_physical_page_base ( physical_page - > paddr ( ) . get ( ) ) ;
2020-01-20 13:06:55 +01:00
pte . set_present ( true ) ;
2020-01-01 19:30:38 +01:00
if ( should_cow ( page_index ) )
pte . set_writable ( false ) ;
else
pte . set_writable ( is_writable ( ) ) ;
if ( g_cpu_supports_nx )
pte . set_execute_disabled ( ! is_executable ( ) ) ;
pte . set_user_allowed ( is_user_accessible ( ) ) ;
2019-11-03 15:32:11 +01:00
# ifdef MM_DEBUG
2020-01-17 19:59:20 +01:00
dbg ( ) < < " MM: >> region map (PD= " < < m_page_directory - > cr3 ( ) < < " , PTE= " < < ( void * ) pte . raw ( ) < < " { " < < & pte < < " }) " < < name ( ) < < " " < < page_vaddr < < " => " < < physical_page - > paddr ( ) < < " (@ " < < physical_page . ptr ( ) < < " ) " ;
2019-11-03 15:32:11 +01:00
# endif
2020-01-09 23:29:31 +02:00
}
MM . flush_tlb ( page_vaddr ) ;
2019-11-03 15:32:11 +01:00
}
2019-11-03 20:37:03 +01:00
2020-01-01 19:30:38 +01:00
void Region : : remap_page ( size_t page_index )
{
ASSERT ( m_page_directory ) ;
InterruptDisabler disabler ;
ASSERT ( vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index ] ) ;
map_individual_page_impl ( page_index ) ;
}
2019-11-03 20:37:03 +01:00
void Region : : unmap ( ShouldDeallocateVirtualMemoryRange deallocate_range )
{
InterruptDisabler disabler ;
2019-11-04 00:26:00 +01:00
ASSERT ( m_page_directory ) ;
2019-11-03 20:37:03 +01:00
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
auto vaddr = this - > vaddr ( ) . offset ( i * PAGE_SIZE ) ;
2019-11-04 00:26:00 +01:00
auto & pte = MM . ensure_pte ( * m_page_directory , vaddr ) ;
2020-02-08 12:49:00 +01:00
pte . clear ( ) ;
2020-01-09 23:29:31 +02:00
MM . flush_tlb ( vaddr ) ;
2019-11-03 20:37:03 +01:00
# ifdef MM_DEBUG
2019-11-04 00:05:57 +01:00
auto & physical_page = vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + i ] ;
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: >> Unmapped V " < < String : : format ( " %p " , vaddr . get ( ) ) < < " => P " < < String : : format ( " %p " , physical_page ? physical_page - > paddr ( ) . get ( ) : 0 ) < < " << " ;
2019-11-03 20:37:03 +01:00
# endif
}
if ( deallocate_range = = ShouldDeallocateVirtualMemoryRange : : Yes )
2019-11-04 00:26:00 +01:00
m_page_directory - > range_allocator ( ) . deallocate ( range ( ) ) ;
2019-11-04 00:23:31 +01:00
m_page_directory = nullptr ;
2019-11-03 20:37:03 +01:00
}
2020-01-09 23:29:31 +02:00
void Region : : set_page_directory ( PageDirectory & page_directory )
2019-11-03 20:37:03 +01:00
{
2019-11-04 00:23:31 +01:00
ASSERT ( ! m_page_directory | | m_page_directory = = & page_directory ) ;
2019-11-04 00:05:57 +01:00
InterruptDisabler disabler ;
2019-11-04 00:23:31 +01:00
m_page_directory = page_directory ;
2020-01-09 23:29:31 +02:00
}
void Region : : map ( PageDirectory & page_directory )
{
set_page_directory ( page_directory ) ;
InterruptDisabler disabler ;
2019-11-04 00:05:57 +01:00
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: Region::map() will map VMO pages " < < first_page_index ( ) < < " - " < < last_page_index ( ) < < " (VMO page count: " < < vmobject ( ) . page_count ( ) < < " ) " ;
2019-11-04 00:05:57 +01:00
# endif
2020-01-01 19:30:38 +01:00
for ( size_t page_index = 0 ; page_index < page_count ( ) ; + + page_index )
map_individual_page_impl ( page_index ) ;
2019-11-03 20:37:03 +01:00
}
2019-11-03 20:59:54 +01:00
void Region : : remap ( )
{
ASSERT ( m_page_directory ) ;
2019-11-04 00:05:57 +01:00
map ( * m_page_directory ) ;
2019-11-03 20:59:54 +01:00
}
2019-11-04 00:45:33 +01:00
PageFaultResponse Region : : handle_fault ( const PageFault & fault )
{
auto page_index_in_region = page_index_from_address ( fault . vaddr ( ) ) ;
if ( fault . type ( ) = = PageFault : : Type : : PageNotPresent ) {
2020-01-20 13:06:55 +01:00
if ( fault . is_read ( ) & & ! is_readable ( ) ) {
2020-02-24 16:40:42 +02:00
dbg ( ) < < " NP(non-readable) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-12-02 19:14:16 +01:00
return PageFaultResponse : : ShouldCrash ;
}
2019-11-04 00:45:33 +01:00
if ( vmobject ( ) . is_inode ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " NP(inode) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-11-04 00:45:33 +01:00
# endif
return handle_inode_fault ( page_index_in_region ) ;
}
2020-02-15 13:12:02 +01:00
# ifdef MAP_SHARED_ZERO_PAGE_LAZILY
if ( fault . is_read ( ) ) {
vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] = MM . shared_zero_page ( ) ;
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
2019-11-04 00:45:33 +01:00
return handle_zero_fault ( page_index_in_region ) ;
2020-02-15 13:12:02 +01:00
# else
ASSERT_NOT_REACHED ( ) ;
# endif
2019-11-04 00:45:33 +01:00
}
ASSERT ( fault . type ( ) = = PageFault : : Type : : ProtectionViolation ) ;
2019-12-02 19:20:09 +01:00
if ( fault . access ( ) = = PageFault : : Access : : Write & & is_writable ( ) & & should_cow ( page_index_in_region ) ) {
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " PV(cow) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-11-04 00:45:33 +01:00
# endif
2020-02-15 13:12:02 +01:00
if ( vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] - > is_shared_zero_page ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " NP(zero) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2020-02-15 13:12:02 +01:00
# endif
return handle_zero_fault ( page_index_in_region ) ;
}
2019-11-04 00:45:33 +01:00
return handle_cow_fault ( page_index_in_region ) ;
}
kprintf ( " PV(error) fault in Region{%p}[%u] at V%p \n " , this , page_index_in_region , fault . vaddr ( ) . get ( ) ) ;
return PageFaultResponse : : ShouldCrash ;
}
PageFaultResponse Region : : handle_zero_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
2019-12-09 19:12:38 +01:00
sti ( ) ;
LOCKER ( vmobject ( ) . m_paging_lock ) ;
cli ( ) ;
2019-11-04 00:45:33 +01:00
2019-12-09 19:12:38 +01:00
auto & vmobject_physical_page_entry = vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] ;
2019-11-04 00:45:33 +01:00
2020-02-15 13:12:02 +01:00
if ( ! vmobject_physical_page_entry . is_null ( ) & & ! vmobject_physical_page_entry - > is_shared_zero_page ( ) ) {
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: zero_page() but page already present. Fine with me! " ;
2019-11-04 00:45:33 +01:00
# endif
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
2020-02-17 15:04:27 +01:00
if ( Thread : : current )
Thread : : current - > did_zero_fault ( ) ;
2019-11-04 00:45:33 +01:00
auto physical_page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : Yes ) ;
if ( physical_page . is_null ( ) ) {
kprintf ( " MM: handle_zero_fault was unable to allocate a physical page \n " ) ;
return PageFaultResponse : : ShouldCrash ;
}
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " >> ZERO P \n "
< < String : : format ( " %p " , physical_page - > paddr ( ) . get ( ) ) ;
2019-11-04 00:45:33 +01:00
# endif
vmobject_physical_page_entry = move ( physical_page ) ;
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
PageFaultResponse Region : : handle_cow_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
auto & vmobject_physical_page_entry = vmobject ( ) . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] ;
if ( vmobject_physical_page_entry - > ref_count ( ) = = 1 ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " >> It's a COW page but nobody is sharing it anymore. Remap r/w " ;
2019-11-04 00:45:33 +01:00
# endif
set_should_cow ( page_index_in_region , false ) ;
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
2020-02-17 15:04:27 +01:00
if ( Thread : : current )
Thread : : current - > did_cow_fault ( ) ;
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " >> It's a COW page and it's time to COW! " ;
2019-11-04 00:45:33 +01:00
# endif
auto physical_page_to_copy = move ( vmobject_physical_page_entry ) ;
auto physical_page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : No ) ;
if ( physical_page . is_null ( ) ) {
kprintf ( " MM: handle_cow_fault was unable to allocate a physical page \n " ) ;
return PageFaultResponse : : ShouldCrash ;
}
u8 * dest_ptr = MM . quickmap_page ( * physical_page ) ;
const u8 * src_ptr = vaddr ( ) . offset ( page_index_in_region * PAGE_SIZE ) . as_ptr ( ) ;
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " >> COW P " < < String : : format ( " %p " , physical_page - > paddr ( ) . get ( ) ) < < " <- P " < < String : : format ( " %p " , physical_page_to_copy - > paddr ( ) . get ( ) ) ;
2019-11-04 00:45:33 +01:00
# endif
2020-01-19 09:14:14 +01:00
copy_from_user ( dest_ptr , src_ptr , PAGE_SIZE ) ;
2019-11-04 00:45:33 +01:00
vmobject_physical_page_entry = move ( physical_page ) ;
MM . unquickmap_page ( ) ;
set_should_cow ( page_index_in_region , false ) ;
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
PageFaultResponse Region : : handle_inode_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_inode ( ) ) ;
sti ( ) ;
LOCKER ( vmobject ( ) . m_paging_lock ) ;
cli ( ) ;
2020-02-28 20:20:35 +01:00
auto & inode_vmobject = static_cast < InodeVMObject & > ( vmobject ( ) ) ;
2020-02-08 12:54:06 +01:00
auto & vmobject_physical_page_entry = inode_vmobject . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] ;
2019-12-29 13:16:53 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-01-21 16:14:39 +01:00
dbg ( ) < < " Inode fault in " < < name ( ) < < " page index: " < < page_index_in_region ;
2019-12-29 13:16:53 +01:00
# endif
2019-11-04 00:45:33 +01:00
if ( ! vmobject_physical_page_entry . is_null ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < ( " MM: page_in_from_inode() but page already present. Fine with me! " ) ;
2019-11-04 00:45:33 +01:00
# endif
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
2020-02-17 15:04:27 +01:00
if ( Thread : : current )
Thread : : current - > did_inode_fault ( ) ;
2019-11-04 00:45:33 +01:00
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: page_in_from_inode ready to read from inode " ;
2019-11-04 00:45:33 +01:00
# endif
sti ( ) ;
u8 page_buffer [ PAGE_SIZE ] ;
auto & inode = inode_vmobject . inode ( ) ;
auto nread = inode . read_bytes ( ( first_page_index ( ) + page_index_in_region ) * PAGE_SIZE , PAGE_SIZE , page_buffer , nullptr ) ;
if ( nread < 0 ) {
2019-11-04 00:48:24 +01:00
kprintf ( " MM: handle_inode_fault had error (%d) while reading! \n " , nread ) ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : ShouldCrash ;
}
if ( nread < PAGE_SIZE ) {
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
memset ( page_buffer + nread , 0 , PAGE_SIZE - nread ) ;
}
cli ( ) ;
vmobject_physical_page_entry = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : No ) ;
if ( vmobject_physical_page_entry . is_null ( ) ) {
2019-11-04 00:48:24 +01:00
kprintf ( " MM: handle_inode_fault was unable to allocate a physical page \n " ) ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : ShouldCrash ;
}
2019-12-21 16:21:13 +01:00
u8 * dest_ptr = MM . quickmap_page ( * vmobject_physical_page_entry ) ;
2020-01-19 09:14:14 +01:00
memcpy ( dest_ptr , page_buffer , PAGE_SIZE ) ;
2019-12-21 16:21:13 +01:00
MM . unquickmap_page ( ) ;
remap_page ( page_index_in_region ) ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : Continue ;
}
2020-02-16 01:27:42 +01:00
}