2020-01-18 09:38:21 +01:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2020-03-08 12:33:14 +01:00
# include <AK/Memory.h>
2020-03-23 13:45:10 +01:00
# include <AK/StringView.h>
2019-07-19 16:09:34 +02:00
# include <Kernel/FileSystem/Inode.h>
2019-04-03 15:13:07 +02:00
# include <Kernel/Process.h>
# include <Kernel/Thread.h>
2019-08-07 18:06:17 +02:00
# include <Kernel/VM/AnonymousVMObject.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 01:33:41 +01:00
# include <Kernel/VM/PageDirectory.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/Region.h>
2020-02-29 12:51:44 +01:00
# include <Kernel/VM/SharedInodeVMObject.h>
2019-04-03 15:13:07 +02:00
2019-11-04 00:05:57 +01:00
//#define MM_DEBUG
2019-11-04 00:45:33 +01:00
//#define PAGE_FAULT_DEBUG
2019-11-04 00:05:57 +01:00
2020-02-16 01:27:42 +01:00
namespace Kernel {
2021-01-02 16:38:05 +01:00
Region : : Region ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const String & name , u8 access , bool cacheable , bool kernel , bool shared )
2020-09-02 22:57:09 -06:00
: PurgeablePageRanges ( vmobject )
, m_range ( range )
2019-12-19 19:13:44 +01:00
, m_offset_in_vmobject ( offset_in_vmobject )
, m_vmobject ( move ( vmobject ) )
2019-06-07 20:58:12 +02:00
, m_name ( name )
2019-05-30 16:14:37 +02:00
, m_access ( access )
2021-01-02 16:38:05 +01:00
, m_shared ( shared )
2020-01-09 23:29:31 +02:00
, m_cacheable ( cacheable )
2020-06-01 22:55:09 -06:00
, m_kernel ( kernel )
2019-04-03 15:13:07 +02:00
{
2021-01-02 12:03:14 -07:00
m_vmobject - > ref_region ( ) ;
2020-09-02 22:57:09 -06:00
register_purgeable_page_ranges ( ) ;
2019-04-03 15:13:07 +02:00
MM . register_region ( * this ) ;
}
Region : : ~ Region ( )
{
2021-01-02 12:03:14 -07:00
m_vmobject - > unref_region ( ) ;
2020-09-02 22:57:09 -06:00
unregister_purgeable_page_ranges ( ) ;
2019-09-05 11:13:10 +02:00
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
// find the address<->region mappings in an invalid state there.
2020-07-06 07:27:22 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2019-04-03 15:13:07 +02:00
if ( m_page_directory ) {
2019-11-03 20:37:03 +01:00
unmap ( ShouldDeallocateVirtualMemoryRange : : Yes ) ;
2019-04-03 15:13:07 +02:00
ASSERT ( ! m_page_directory ) ;
}
2020-09-02 22:57:09 -06:00
2019-04-03 15:13:07 +02:00
MM . unregister_region ( * this ) ;
}
2020-09-02 22:57:09 -06:00
void Region : : register_purgeable_page_ranges ( )
{
2020-09-05 15:52:14 -06:00
if ( m_vmobject - > is_anonymous ( ) ) {
auto & vmobject = static_cast < AnonymousVMObject & > ( * m_vmobject ) ;
2020-09-02 22:57:09 -06:00
vmobject . register_purgeable_page_ranges ( * this ) ;
}
}
void Region : : unregister_purgeable_page_ranges ( )
{
2020-09-05 15:52:14 -06:00
if ( m_vmobject - > is_anonymous ( ) ) {
auto & vmobject = static_cast < AnonymousVMObject & > ( * m_vmobject ) ;
2020-09-02 22:57:09 -06:00
vmobject . unregister_purgeable_page_ranges ( * this ) ;
}
}
2021-01-01 00:02:40 -07:00
OwnPtr < Region > Region : : clone ( Process & new_owner )
2019-04-03 15:13:07 +02:00
{
2020-06-28 15:34:31 -06:00
ASSERT ( Process : : current ( ) ) ;
2019-07-19 16:09:34 +02:00
2020-07-06 12:47:08 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-04-12 20:22:26 +02:00
if ( m_inherit_mode = = InheritMode : : ZeroedOnFork ) {
ASSERT ( m_mmap ) ;
ASSERT ( ! m_shared ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
2020-09-05 15:52:14 -06:00
auto new_vmobject = AnonymousVMObject : : create_with_size ( size ( ) , AllocationStrategy : : Reserve ) ; // TODO: inherit committed non-volatile areas?
if ( ! new_vmobject )
return { } ;
2021-01-01 00:02:40 -07:00
auto zeroed_region = Region : : create_user_accessible ( & new_owner , m_range , new_vmobject . release_nonnull ( ) , 0 , m_name , m_access ) ;
2020-09-05 15:52:14 -06:00
zeroed_region - > copy_purgeable_page_ranges ( * this ) ;
2020-04-12 20:22:26 +02:00
zeroed_region - > set_mmap ( m_mmap ) ;
zeroed_region - > set_inherit_mode ( m_inherit_mode ) ;
return zeroed_region ;
}
2020-03-01 10:54:18 +01:00
if ( m_shared ) {
2019-11-17 12:11:43 +01:00
ASSERT ( ! m_stack ) ;
2019-04-03 15:13:07 +02:00
# ifdef MM_DEBUG
2020-02-29 12:51:44 +01:00
dbg ( ) < < " Region::clone(): Sharing " < < name ( ) < < " ( " < < vaddr ( ) < < " ) " ;
2019-04-03 15:13:07 +02:00
# endif
2020-03-01 11:08:28 +01:00
if ( vmobject ( ) . is_inode ( ) )
ASSERT ( vmobject ( ) . is_shared_inode ( ) ) ;
2019-04-03 15:13:07 +02:00
// Create a new region backed by the same VMObject.
2021-01-01 00:02:40 -07:00
auto region = Region : : create_user_accessible ( & new_owner , m_range , m_vmobject , m_offset_in_vmobject , m_name , m_access ) ;
2020-09-05 15:52:14 -06:00
if ( m_vmobject - > is_anonymous ( ) )
region - > copy_purgeable_page_ranges ( * this ) ;
2020-01-10 19:24:01 +01:00
region - > set_mmap ( m_mmap ) ;
2020-02-08 02:39:46 +01:00
region - > set_shared ( m_shared ) ;
2020-01-10 19:24:01 +01:00
return region ;
2019-04-03 15:13:07 +02:00
}
2020-03-01 11:08:28 +01:00
if ( vmobject ( ) . is_inode ( ) )
ASSERT ( vmobject ( ) . is_private_inode ( ) ) ;
2020-09-05 15:52:14 -06:00
auto vmobject_clone = vmobject ( ) . clone ( ) ;
2020-09-04 21:12:25 -06:00
if ( ! vmobject_clone )
return { } ;
2019-04-03 15:13:07 +02:00
# ifdef MM_DEBUG
2020-02-29 12:51:44 +01:00
dbg ( ) < < " Region::clone(): CoWing " < < name ( ) < < " ( " < < vaddr ( ) < < " ) " ;
2019-04-03 15:13:07 +02:00
# endif
2020-02-29 12:51:44 +01:00
// Set up a COW region. The parent (this) region becomes COW as well!
2019-11-03 20:59:54 +01:00
remap ( ) ;
2021-01-01 00:02:40 -07:00
auto clone_region = Region : : create_user_accessible ( & new_owner , m_range , vmobject_clone . release_nonnull ( ) , m_offset_in_vmobject , m_name , m_access ) ;
2020-09-05 15:52:14 -06:00
if ( m_vmobject - > is_anonymous ( ) )
clone_region - > copy_purgeable_page_ranges ( * this ) ;
2019-11-17 12:11:43 +01:00
if ( m_stack ) {
ASSERT ( is_readable ( ) ) ;
ASSERT ( is_writable ( ) ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
clone_region - > set_stack ( true ) ;
}
2020-01-10 19:24:01 +01:00
clone_region - > set_mmap ( m_mmap ) ;
2019-10-01 19:58:41 +02:00
return clone_region ;
2019-04-03 15:13:07 +02:00
}
2020-09-02 22:57:09 -06:00
void Region : : set_vmobject ( NonnullRefPtr < VMObject > & & obj )
{
if ( m_vmobject . ptr ( ) = = obj . ptr ( ) )
return ;
unregister_purgeable_page_ranges ( ) ;
2021-01-02 12:03:14 -07:00
m_vmobject - > unref_region ( ) ;
2020-09-02 22:57:09 -06:00
m_vmobject = move ( obj ) ;
2021-01-02 12:03:14 -07:00
m_vmobject - > ref_region ( ) ;
2020-09-02 22:57:09 -06:00
register_purgeable_page_ranges ( ) ;
}
bool Region : : is_volatile ( VirtualAddress vaddr , size_t size ) const
{
2020-09-05 15:52:14 -06:00
if ( ! m_vmobject - > is_anonymous ( ) )
2020-09-02 22:57:09 -06:00
return false ;
auto offset_in_vmobject = vaddr . get ( ) - ( this - > vaddr ( ) . get ( ) - m_offset_in_vmobject ) ;
size_t first_page_index = PAGE_ROUND_DOWN ( offset_in_vmobject ) / PAGE_SIZE ;
size_t last_page_index = PAGE_ROUND_UP ( offset_in_vmobject + size ) / PAGE_SIZE ;
return is_volatile_range ( { first_page_index , last_page_index - first_page_index } ) ;
}
auto Region : : set_volatile ( VirtualAddress vaddr , size_t size , bool is_volatile , bool & was_purged ) - > SetVolatileError
{
was_purged = false ;
2020-09-05 15:52:14 -06:00
if ( ! m_vmobject - > is_anonymous ( ) )
2020-09-02 22:57:09 -06:00
return SetVolatileError : : NotPurgeable ;
auto offset_in_vmobject = vaddr . get ( ) - ( this - > vaddr ( ) . get ( ) - m_offset_in_vmobject ) ;
if ( is_volatile ) {
// If marking pages as volatile, be prudent by not marking
// partial pages volatile to prevent potentially non-volatile
// data to be discarded. So rund up the first page and round
// down the last page.
size_t first_page_index = PAGE_ROUND_UP ( offset_in_vmobject ) / PAGE_SIZE ;
size_t last_page_index = PAGE_ROUND_DOWN ( offset_in_vmobject + size ) / PAGE_SIZE ;
if ( first_page_index ! = last_page_index )
add_volatile_range ( { first_page_index , last_page_index - first_page_index } ) ;
} else {
// If marking pages as non-volatile, round down the first page
// and round up the last page to make sure the beginning and
// end of the range doesn't inadvertedly get discarded.
size_t first_page_index = PAGE_ROUND_DOWN ( offset_in_vmobject ) / PAGE_SIZE ;
size_t last_page_index = PAGE_ROUND_UP ( offset_in_vmobject + size ) / PAGE_SIZE ;
2020-09-05 15:52:14 -06:00
switch ( remove_volatile_range ( { first_page_index , last_page_index - first_page_index } , was_purged ) ) {
case PurgeablePageRanges : : RemoveVolatileError : : Success :
case PurgeablePageRanges : : RemoveVolatileError : : SuccessNoChange :
break ;
case PurgeablePageRanges : : RemoveVolatileError : : OutOfMemory :
return SetVolatileError : : OutOfMemory ;
2020-09-02 22:57:09 -06:00
}
}
return SetVolatileError : : Success ;
}
2020-09-05 15:52:14 -06:00
size_t Region : : cow_pages ( ) const
2019-04-03 15:13:07 +02:00
{
2020-09-05 15:52:14 -06:00
if ( ! vmobject ( ) . is_anonymous ( ) )
2019-12-15 16:53:00 +01:00
return 0 ;
2020-09-05 15:52:14 -06:00
return static_cast < const AnonymousVMObject & > ( vmobject ( ) ) . cow_pages ( ) ;
2019-12-15 16:53:00 +01:00
}
2019-12-29 12:28:32 +01:00
size_t Region : : amount_dirty ( ) const
{
if ( ! vmobject ( ) . is_inode ( ) )
return amount_resident ( ) ;
2020-03-01 12:27:03 +01:00
return static_cast < const InodeVMObject & > ( vmobject ( ) ) . amount_dirty ( ) ;
2019-12-29 12:28:32 +01:00
}
2019-04-03 15:13:07 +02:00
size_t Region : : amount_resident ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-04-28 16:19:50 +02:00
auto * page = physical_page ( i ) ;
2021-01-02 00:47:55 +01:00
if ( page & & ! page - > is_shared_zero_page ( ) & & ! page - > is_lazy_committed_page ( ) )
2019-04-03 15:13:07 +02:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
size_t Region : : amount_shared ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-04-28 16:19:50 +02:00
auto * page = physical_page ( i ) ;
2021-01-02 00:47:55 +01:00
if ( page & & page - > ref_count ( ) > 1 & & ! page - > is_shared_zero_page ( ) & & ! page - > is_lazy_committed_page ( ) )
2019-04-03 15:13:07 +02:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
2019-07-19 16:09:34 +02:00
2021-01-02 16:38:05 +01:00
NonnullOwnPtr < Region > Region : : create_user_accessible ( Process * owner , const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable , bool shared )
2019-07-19 16:09:34 +02:00
{
2021-01-02 16:38:05 +01:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable , false , shared ) ;
2020-09-02 22:57:09 -06:00
if ( owner )
region - > m_owner = owner - > make_weak_ptr ( ) ;
2019-07-19 16:09:34 +02:00
region - > m_user_accessible = true ;
return region ;
}
2020-01-09 23:29:31 +02:00
NonnullOwnPtr < Region > Region : : create_kernel_only ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable )
2019-07-19 16:09:34 +02:00
{
2021-01-02 16:38:05 +01:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable , true , false ) ;
2019-07-19 16:09:34 +02:00
region - > m_user_accessible = false ;
2020-01-10 06:57:18 +01:00
return region ;
}
2019-10-01 19:58:41 +02:00
bool Region : : should_cow ( size_t page_index ) const
{
2020-09-05 15:52:14 -06:00
if ( ! vmobject ( ) . is_anonymous ( ) )
2019-10-01 19:58:41 +02:00
return false ;
2020-09-05 15:52:14 -06:00
return static_cast < const AnonymousVMObject & > ( vmobject ( ) ) . should_cow ( first_page_index ( ) + page_index , m_shared ) ;
2019-10-01 19:58:41 +02:00
}
void Region : : set_should_cow ( size_t page_index , bool cow )
{
ASSERT ( ! m_shared ) ;
2020-09-05 15:52:14 -06:00
if ( vmobject ( ) . is_anonymous ( ) )
static_cast < AnonymousVMObject & > ( vmobject ( ) ) . set_should_cow ( first_page_index ( ) + page_index , cow ) ;
2019-10-01 19:58:41 +02:00
}
2019-11-03 15:32:11 +01:00
2020-09-01 16:10:54 -06:00
bool Region : : map_individual_page_impl ( size_t page_index )
2019-11-03 15:32:11 +01:00
{
2020-10-31 17:19:18 -06:00
ASSERT ( m_page_directory - > get_lock ( ) . own_lock ( ) ) ;
2020-07-06 12:47:08 -06:00
auto page_vaddr = vaddr_from_page_index ( page_index ) ;
2020-09-01 16:10:54 -06:00
auto * pte = MM . ensure_pte ( * m_page_directory , page_vaddr ) ;
if ( ! pte ) {
# ifdef MM_DEBUG
dbg ( ) < < " MM: >> region map (PD= " < < m_page_directory - > cr3 ( ) < < " " < < name ( ) < < " cannot create PTE for " < < page_vaddr ;
# endif
return false ;
}
2020-04-28 16:19:50 +02:00
auto * page = physical_page ( page_index ) ;
if ( ! page | | ( ! is_readable ( ) & & ! is_writable ( ) ) ) {
2020-09-01 16:10:54 -06:00
pte - > clear ( ) ;
2020-01-01 19:30:38 +01:00
} else {
2020-09-01 16:10:54 -06:00
pte - > set_cache_disabled ( ! m_cacheable ) ;
pte - > set_physical_page_base ( page - > paddr ( ) . get ( ) ) ;
pte - > set_present ( true ) ;
2020-09-05 15:52:14 -06:00
if ( page - > is_shared_zero_page ( ) | | page - > is_lazy_committed_page ( ) | | should_cow ( page_index ) )
2020-09-01 16:10:54 -06:00
pte - > set_writable ( false ) ;
2020-01-01 19:30:38 +01:00
else
2020-09-01 16:10:54 -06:00
pte - > set_writable ( is_writable ( ) ) ;
2020-07-03 10:23:09 -06:00
if ( Processor : : current ( ) . has_feature ( CPUFeature : : NX ) )
2020-09-01 16:10:54 -06:00
pte - > set_execute_disabled ( ! is_executable ( ) ) ;
pte - > set_user_allowed ( is_user_accessible ( ) ) ;
2019-11-03 15:32:11 +01:00
# ifdef MM_DEBUG
2020-09-01 16:10:54 -06:00
dbg ( ) < < " MM: >> region map (PD= " < < m_page_directory - > cr3 ( ) < < " , PTE= " < < ( void * ) pte - > raw ( ) < < " { " < < pte < < " }) " < < name ( ) < < " " < < page_vaddr < < " => " < < page - > paddr ( ) < < " (@ " < < page < < " ) " ;
2019-11-03 15:32:11 +01:00
# endif
2020-01-09 23:29:31 +02:00
}
2020-09-01 16:10:54 -06:00
return true ;
2019-11-03 15:32:11 +01:00
}
2019-11-03 20:37:03 +01:00
2021-01-02 12:03:14 -07:00
bool Region : : do_remap_vmobject_page_range ( size_t page_index , size_t page_count )
2020-09-02 22:57:09 -06:00
{
bool success = true ;
2021-01-02 12:03:14 -07:00
ASSERT ( s_mm_lock . own_lock ( ) ) ;
2020-09-02 22:57:09 -06:00
ASSERT ( m_page_directory ) ;
2021-01-02 12:03:14 -07:00
if ( ! translate_vmobject_page_range ( page_index , page_count ) )
return success ; // not an error, region doesn't map this page range
2020-09-02 22:57:09 -06:00
ScopedSpinLock page_lock ( m_page_directory - > get_lock ( ) ) ;
size_t index = page_index ;
while ( index < page_index + page_count ) {
if ( ! map_individual_page_impl ( index ) ) {
success = false ;
break ;
}
index + + ;
}
if ( index > page_index )
MM . flush_tlb ( vaddr_from_page_index ( page_index ) , index - page_index ) ;
return success ;
}
2021-01-02 12:03:14 -07:00
bool Region : : remap_vmobject_page_range ( size_t page_index , size_t page_count )
{
bool success = true ;
ScopedSpinLock lock ( s_mm_lock ) ;
auto & vmobject = this - > vmobject ( ) ;
if ( vmobject . is_shared_by_multiple_regions ( ) ) {
vmobject . for_each_region ( [ & ] ( auto & region ) {
if ( ! region . do_remap_vmobject_page_range ( page_index , page_count ) )
success = false ;
} ) ;
} else {
if ( ! do_remap_vmobject_page_range ( page_index , page_count ) )
success = false ;
}
return success ;
}
bool Region : : do_remap_vmobject_page ( size_t page_index , bool with_flush )
2020-01-01 19:30:38 +01:00
{
2020-07-06 07:27:22 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-10-31 17:19:18 -06:00
ASSERT ( m_page_directory ) ;
2021-01-02 12:03:14 -07:00
if ( ! translate_vmobject_page ( page_index ) )
return true ; // not an error, region doesn't map this page
2020-10-31 17:19:18 -06:00
ScopedSpinLock page_lock ( m_page_directory - > get_lock ( ) ) ;
2020-04-28 16:19:50 +02:00
ASSERT ( physical_page ( page_index ) ) ;
2020-09-01 16:10:54 -06:00
bool success = map_individual_page_impl ( page_index ) ;
2020-07-06 12:47:08 -06:00
if ( with_flush )
MM . flush_tlb ( vaddr_from_page_index ( page_index ) ) ;
2020-09-01 16:10:54 -06:00
return success ;
2020-01-01 19:30:38 +01:00
}
2021-01-02 12:03:14 -07:00
bool Region : : remap_vmobject_page ( size_t page_index , bool with_flush )
{
bool success = true ;
ScopedSpinLock lock ( s_mm_lock ) ;
auto & vmobject = this - > vmobject ( ) ;
if ( vmobject . is_shared_by_multiple_regions ( ) ) {
vmobject . for_each_region ( [ & ] ( auto & region ) {
if ( ! region . do_remap_vmobject_page ( page_index , with_flush ) )
success = false ;
} ) ;
} else {
if ( ! do_remap_vmobject_page ( page_index , with_flush ) )
success = false ;
}
return success ;
}
2019-11-03 20:37:03 +01:00
void Region : : unmap ( ShouldDeallocateVirtualMemoryRange deallocate_range )
{
2020-07-06 07:27:22 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-09-05 15:52:14 -06:00
if ( ! m_page_directory )
return ;
2020-10-31 17:19:18 -06:00
ScopedSpinLock page_lock ( m_page_directory - > get_lock ( ) ) ;
2020-08-27 21:29:17 -06:00
size_t count = page_count ( ) ;
for ( size_t i = 0 ; i < count ; + + i ) {
2020-07-06 12:47:08 -06:00
auto vaddr = vaddr_from_page_index ( i ) ;
2020-08-27 21:29:17 -06:00
MM . release_pte ( * m_page_directory , vaddr , i = = count - 1 ) ;
2019-11-03 20:37:03 +01:00
# ifdef MM_DEBUG
2020-04-28 16:19:50 +02:00
auto * page = physical_page ( i ) ;
dbg ( ) < < " MM: >> Unmapped " < < vaddr < < " => P " < < String : : format ( " %p " , page ? page - > paddr ( ) . get ( ) : 0 ) < < " << " ;
2019-11-03 20:37:03 +01:00
# endif
}
2020-07-06 12:47:08 -06:00
MM . flush_tlb ( vaddr ( ) , page_count ( ) ) ;
2020-06-01 22:55:09 -06:00
if ( deallocate_range = = ShouldDeallocateVirtualMemoryRange : : Yes ) {
if ( m_page_directory - > range_allocator ( ) . contains ( range ( ) ) )
m_page_directory - > range_allocator ( ) . deallocate ( range ( ) ) ;
else
m_page_directory - > identity_range_allocator ( ) . deallocate ( range ( ) ) ;
}
2019-11-04 00:23:31 +01:00
m_page_directory = nullptr ;
2019-11-03 20:37:03 +01:00
}
2020-01-09 23:29:31 +02:00
void Region : : set_page_directory ( PageDirectory & page_directory )
2019-11-03 20:37:03 +01:00
{
2019-11-04 00:23:31 +01:00
ASSERT ( ! m_page_directory | | m_page_directory = = & page_directory ) ;
2020-07-06 12:47:08 -06:00
ASSERT ( s_mm_lock . own_lock ( ) ) ;
2019-11-04 00:23:31 +01:00
m_page_directory = page_directory ;
2020-01-09 23:29:31 +02:00
}
2020-07-06 12:47:08 -06:00
2020-09-01 16:10:54 -06:00
bool Region : : map ( PageDirectory & page_directory )
2020-01-09 23:29:31 +02:00
{
2020-07-06 07:27:22 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-10-31 17:19:18 -06:00
ScopedSpinLock page_lock ( page_directory . get_lock ( ) ) ;
2020-07-06 12:47:08 -06:00
set_page_directory ( page_directory ) ;
2019-11-04 00:05:57 +01:00
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: Region::map() will map VMO pages " < < first_page_index ( ) < < " - " < < last_page_index ( ) < < " (VMO page count: " < < vmobject ( ) . page_count ( ) < < " ) " ;
2019-11-04 00:05:57 +01:00
# endif
2020-09-01 16:10:54 -06:00
size_t page_index = 0 ;
while ( page_index < page_count ( ) ) {
if ( ! map_individual_page_impl ( page_index ) )
break ;
+ + page_index ;
}
if ( page_index > 0 ) {
MM . flush_tlb ( vaddr ( ) , page_index ) ;
return page_index = = page_count ( ) ;
}
return false ;
2019-11-03 20:37:03 +01:00
}
2019-11-03 20:59:54 +01:00
void Region : : remap ( )
{
ASSERT ( m_page_directory ) ;
2019-11-04 00:05:57 +01:00
map ( * m_page_directory ) ;
2019-11-03 20:59:54 +01:00
}
2019-11-04 00:45:33 +01:00
PageFaultResponse Region : : handle_fault ( const PageFault & fault )
{
2020-09-05 15:52:14 -06:00
ScopedSpinLock lock ( s_mm_lock ) ;
2019-11-04 00:45:33 +01:00
auto page_index_in_region = page_index_from_address ( fault . vaddr ( ) ) ;
if ( fault . type ( ) = = PageFault : : Type : : PageNotPresent ) {
2020-01-20 13:06:55 +01:00
if ( fault . is_read ( ) & & ! is_readable ( ) ) {
2020-02-24 16:40:42 +02:00
dbg ( ) < < " NP(non-readable) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-12-02 19:14:16 +01:00
return PageFaultResponse : : ShouldCrash ;
}
2020-03-06 09:58:59 +01:00
if ( fault . is_write ( ) & & ! is_writable ( ) ) {
dbg ( ) < < " NP(non-writable) write fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
return PageFaultResponse : : ShouldCrash ;
}
2019-11-04 00:45:33 +01:00
if ( vmobject ( ) . is_inode ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " NP(inode) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-11-04 00:45:33 +01:00
# endif
return handle_inode_fault ( page_index_in_region ) ;
}
2020-09-04 21:12:25 -06:00
auto & page_slot = physical_page_slot ( page_index_in_region ) ;
if ( page_slot - > is_lazy_committed_page ( ) ) {
2021-01-02 12:03:14 -07:00
auto page_index_in_vmobject = translate_to_vmobject_page ( page_index_in_region ) ;
page_slot = static_cast < AnonymousVMObject & > ( * m_vmobject ) . allocate_committed_page ( page_index_in_vmobject ) ;
remap_vmobject_page ( page_index_in_vmobject ) ;
2020-09-04 21:12:25 -06:00
return PageFaultResponse : : Continue ;
}
2020-02-15 13:12:02 +01:00
# ifdef MAP_SHARED_ZERO_PAGE_LAZILY
if ( fault . is_read ( ) ) {
2020-09-04 21:12:25 -06:00
page_slot = MM . shared_zero_page ( ) ;
2021-01-02 12:03:14 -07:00
remap_vmobject_page ( translate_to_vmobject_page ( page_index_in_region ) ) ;
2020-02-15 13:12:02 +01:00
return PageFaultResponse : : Continue ;
}
2019-11-04 00:45:33 +01:00
return handle_zero_fault ( page_index_in_region ) ;
2020-02-15 13:12:02 +01:00
# else
2020-03-07 10:27:02 +01:00
dbg ( ) < < " BUG! Unexpected NP fault at " < < fault . vaddr ( ) ;
return PageFaultResponse : : ShouldCrash ;
2020-02-15 13:12:02 +01:00
# endif
2019-11-04 00:45:33 +01:00
}
ASSERT ( fault . type ( ) = = PageFault : : Type : : ProtectionViolation ) ;
2019-12-02 19:20:09 +01:00
if ( fault . access ( ) = = PageFault : : Access : : Write & & is_writable ( ) & & should_cow ( page_index_in_region ) ) {
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-09-05 15:52:14 -06:00
dbg ( ) < < " PV(cow) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
2019-11-04 00:45:33 +01:00
# endif
2020-09-04 21:12:25 -06:00
auto * phys_page = physical_page ( page_index_in_region ) ;
if ( phys_page - > is_shared_zero_page ( ) | | phys_page - > is_lazy_committed_page ( ) ) {
2020-02-15 13:12:02 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-09-05 15:52:14 -06:00
dbg ( ) < < " NP(zero) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
2020-02-15 13:12:02 +01:00
# endif
return handle_zero_fault ( page_index_in_region ) ;
}
2019-11-04 00:45:33 +01:00
return handle_cow_fault ( page_index_in_region ) ;
}
2020-03-01 21:45:39 +02:00
dbg ( ) < < " PV(error) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : ShouldCrash ;
}
PageFaultResponse Region : : handle_zero_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
2019-12-09 19:12:38 +01:00
LOCKER ( vmobject ( ) . m_paging_lock ) ;
2019-11-04 00:45:33 +01:00
2020-04-28 16:19:50 +02:00
auto & page_slot = physical_page_slot ( page_index_in_region ) ;
2021-01-02 12:03:14 -07:00
auto page_index_in_vmobject = translate_to_vmobject_page ( page_index_in_region ) ;
2019-11-04 00:45:33 +01:00
2020-09-04 21:12:25 -06:00
if ( ! page_slot . is_null ( ) & & ! page_slot - > is_shared_zero_page ( ) & & ! page_slot - > is_lazy_committed_page ( ) ) {
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: zero_page() but page already present. Fine with me! " ;
2019-11-04 00:45:33 +01:00
# endif
2021-01-02 12:03:14 -07:00
if ( ! remap_vmobject_page ( page_index_in_vmobject ) )
2020-09-01 16:10:54 -06:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : Continue ;
}
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread ! = nullptr )
current_thread - > did_zero_fault ( ) ;
2019-11-04 00:45:33 +01:00
2020-09-04 21:12:25 -06:00
if ( page_slot - > is_lazy_committed_page ( ) ) {
2021-01-02 12:03:14 -07:00
page_slot = static_cast < AnonymousVMObject & > ( * m_vmobject ) . allocate_committed_page ( page_index_in_vmobject ) ;
2020-09-05 15:52:14 -06:00
# ifdef PAGE_FAULT_DEBUG
dbg ( ) < < " >> ALLOCATED COMMITTED " < < page_slot - > paddr ( ) ;
# endif
2020-09-04 21:12:25 -06:00
} else {
page_slot = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : Yes ) ;
if ( page_slot . is_null ( ) ) {
klog ( ) < < " MM: handle_zero_fault was unable to allocate a physical page " ;
return PageFaultResponse : : OutOfMemory ;
}
2019-11-04 00:45:33 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-09-05 15:52:14 -06:00
dbg ( ) < < " >> ALLOCATED " < < page_slot - > paddr ( ) ;
2019-11-04 00:45:33 +01:00
# endif
2020-09-05 15:52:14 -06:00
}
2021-01-02 12:03:14 -07:00
if ( ! remap_vmobject_page ( page_index_in_vmobject ) ) {
2020-09-01 16:10:54 -06:00
klog ( ) < < " MM: handle_zero_fault was unable to allocate a page table to map " < < page_slot ;
return PageFaultResponse : : OutOfMemory ;
}
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : Continue ;
}
PageFaultResponse Region : : handle_cow_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread )
current_thread - > did_cow_fault ( ) ;
2019-11-04 00:45:33 +01:00
2020-09-05 15:52:14 -06:00
if ( ! vmobject ( ) . is_anonymous ( ) )
return PageFaultResponse : : ShouldCrash ;
2020-09-11 21:11:07 -06:00
2021-01-02 12:03:14 -07:00
auto page_index_in_vmobject = translate_to_vmobject_page ( page_index_in_region ) ;
auto response = reinterpret_cast < AnonymousVMObject & > ( vmobject ( ) ) . handle_cow_fault ( page_index_in_vmobject , vaddr ( ) . offset ( page_index_in_region * PAGE_SIZE ) ) ;
if ( ! remap_vmobject_page ( page_index_in_vmobject ) )
2020-09-01 16:10:54 -06:00
return PageFaultResponse : : OutOfMemory ;
2020-09-05 15:52:14 -06:00
return response ;
2019-11-04 00:45:33 +01:00
}
PageFaultResponse Region : : handle_inode_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_inode ( ) ) ;
LOCKER ( vmobject ( ) . m_paging_lock ) ;
2020-11-30 19:04:36 -07:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-02-28 20:20:35 +01:00
auto & inode_vmobject = static_cast < InodeVMObject & > ( vmobject ( ) ) ;
2021-01-02 12:03:14 -07:00
auto page_index_in_vmobject = translate_to_vmobject_page ( page_index_in_region ) ;
auto & vmobject_physical_page_entry = inode_vmobject . physical_pages ( ) [ page_index_in_vmobject ] ;
2020-02-08 12:54:06 +01:00
2019-12-29 13:16:53 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-01-21 16:14:39 +01:00
dbg ( ) < < " Inode fault in " < < name ( ) < < " page index: " < < page_index_in_region ;
2019-12-29 13:16:53 +01:00
# endif
2019-11-04 00:45:33 +01:00
if ( ! vmobject_physical_page_entry . is_null ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < ( " MM: page_in_from_inode() but page already present. Fine with me! " ) ;
2019-11-04 00:45:33 +01:00
# endif
2021-01-02 12:03:14 -07:00
if ( ! remap_vmobject_page ( page_index_in_vmobject ) )
2020-09-01 16:10:54 -06:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : Continue ;
}
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread )
current_thread - > did_inode_fault ( ) ;
2019-11-04 00:45:33 +01:00
# ifdef MM_DEBUG
2020-02-24 16:40:42 +02:00
dbg ( ) < < " MM: page_in_from_inode ready to read from inode " ;
2019-11-04 00:45:33 +01:00
# endif
2020-11-30 19:04:36 -07:00
2019-11-04 00:45:33 +01:00
u8 page_buffer [ PAGE_SIZE ] ;
auto & inode = inode_vmobject . inode ( ) ;
2020-09-11 21:11:07 -06:00
auto buffer = UserOrKernelBuffer : : for_kernel_buffer ( page_buffer ) ;
2021-01-02 12:03:14 -07:00
auto nread = inode . read_bytes ( page_index_in_vmobject * PAGE_SIZE , PAGE_SIZE , buffer , nullptr ) ;
2019-11-04 00:45:33 +01:00
if ( nread < 0 ) {
2020-03-01 21:45:39 +02:00
klog ( ) < < " MM: handle_inode_fault had error ( " < < nread < < " ) while reading! " ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : ShouldCrash ;
}
if ( nread < PAGE_SIZE ) {
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
memset ( page_buffer + nread , 0 , PAGE_SIZE - nread ) ;
}
2020-11-30 19:04:36 -07:00
2019-11-04 00:45:33 +01:00
vmobject_physical_page_entry = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : No ) ;
if ( vmobject_physical_page_entry . is_null ( ) ) {
2020-03-01 21:45:39 +02:00
klog ( ) < < " MM: handle_inode_fault was unable to allocate a physical page " ;
2020-05-06 21:11:38 +02:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 00:45:33 +01:00
}
2019-12-21 16:21:13 +01:00
u8 * dest_ptr = MM . quickmap_page ( * vmobject_physical_page_entry ) ;
2020-09-11 21:11:07 -06:00
{
void * fault_at ;
if ( ! safe_memcpy ( dest_ptr , page_buffer , PAGE_SIZE , fault_at ) ) {
if ( ( u8 * ) fault_at > = dest_ptr & & ( u8 * ) fault_at < = dest_ptr + PAGE_SIZE )
dbg ( ) < < " >> inode fault: error copying data to " < < vmobject_physical_page_entry - > paddr ( ) < < " / " < < VirtualAddress ( dest_ptr ) < < " , failed at " < < VirtualAddress ( fault_at ) ;
else
ASSERT_NOT_REACHED ( ) ;
}
}
2019-12-21 16:21:13 +01:00
MM . unquickmap_page ( ) ;
2021-01-02 12:03:14 -07:00
remap_vmobject_page ( page_index_in_vmobject ) ;
2019-11-04 00:45:33 +01:00
return PageFaultResponse : : Continue ;
}
2020-02-16 01:27:42 +01:00
2020-09-02 22:57:09 -06:00
RefPtr < Process > Region : : get_owner ( )
{
return m_owner . strong_ref ( ) ;
}
2020-02-16 01:27:42 +01:00
}