2020-01-18 09:38:21 +01:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2020-02-22 20:38:17 +02:00
# include <AK/Assertions.h>
2020-03-01 21:45:39 +02:00
# include <AK/String.h>
2020-07-03 10:23:09 -06:00
# include <AK/StringBuilder.h>
2019-06-07 11:43:58 +02:00
# include <AK/Types.h>
2019-06-07 20:02:01 +02:00
# include <Kernel/Arch/i386/CPU.h>
2020-06-27 17:06:33 -06:00
# include <Kernel/Arch/i386/ProcessorInfo.h>
2020-02-22 20:38:17 +02:00
# include <Kernel/Arch/i386/ISRStubs.h>
# include <Kernel/Interrupts/APIC.h>
# include <Kernel/Interrupts/GenericInterruptHandler.h>
# include <Kernel/Interrupts/IRQHandler.h>
# include <Kernel/Interrupts/InterruptManagement.h>
# include <Kernel/Interrupts/SharedIRQHandler.h>
2020-02-28 18:25:19 +02:00
# include <Kernel/Interrupts/SpuriousInterruptHandler.h>
2020-02-22 20:38:17 +02:00
# include <Kernel/Interrupts/UnhandledInterruptHandler.h>
2019-04-28 22:05:13 +02:00
# include <Kernel/KSyms.h>
2020-02-22 20:38:17 +02:00
# include <Kernel/Process.h>
2020-06-27 13:42:28 -06:00
# include <Kernel/SpinLock.h>
# include <Kernel/Thread.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/MemoryManager.h>
2020-06-27 13:42:28 -06:00
# include <Kernel/VM/PageDirectory.h>
2020-05-16 12:00:04 +02:00
# include <Kernel/IO.h>
2019-06-19 20:52:12 +02:00
# include <LibC/mallocdefs.h>
2018-10-16 11:01:38 +02:00
2019-01-15 06:30:19 +01:00
//#define PAGE_FAULT_DEBUG
2020-06-27 13:42:28 -06:00
//#define CONTEXT_SWITCH_DEBUG
2018-11-05 14:10:18 +01:00
2020-02-16 01:27:42 +01:00
namespace Kernel {
2018-10-16 11:01:38 +02:00
static DescriptorTablePointer s_idtr ;
2019-05-17 18:25:50 +02:00
static Descriptor s_idt [ 256 ] ;
2018-10-16 11:01:38 +02:00
2020-02-22 20:38:17 +02:00
static GenericInterruptHandler * s_interrupt_handler [ GENERIC_INTERRUPT_HANDLERS_COUNT ] ;
2018-10-22 12:58:29 +02:00
2020-06-27 13:42:28 -06:00
extern " C " void handle_interrupt ( TrapFrame * ) ;
2019-06-07 11:43:58 +02:00
2020-02-16 00:15:37 +01:00
# define EH_ENTRY(ec, title) \
extern " C " void title # # _asm_entry ( ) ; \
2020-06-27 13:42:28 -06:00
extern " C " void title # # _handler ( TrapFrame * ) ; \
2020-02-16 00:15:37 +01:00
asm ( \
" .globl " # title " _asm_entry \n " \
" " # title " _asm_entry: \n " \
" pusha \n " \
" pushl %ds \n " \
" pushl %es \n " \
" pushl %fs \n " \
" pushl %gs \n " \
" pushl %ss \n " \
2020-06-27 13:42:28 -06:00
" mov $ " __STRINGIFY ( GDT_SELECTOR_DATA0 ) " , %ax \n " \
2020-02-16 00:15:37 +01:00
" mov %ax, %ds \n " \
" mov %ax, %es \n " \
2020-06-27 13:42:28 -06:00
" mov $ " __STRINGIFY ( GDT_SELECTOR_PROC ) " , %ax \n " \
" mov %ax, %fs \n " \
" pushl %esp \n " /* set TrapFrame::regs */ \
" subl $ " __STRINGIFY ( TRAP_FRAME_SIZE - 4 ) " , %esp \n " \
" pushl %esp \n " \
2020-02-16 00:15:37 +01:00
" cld \n " \
2020-06-27 13:42:28 -06:00
" call enter_trap_no_irq \n " \
2020-02-16 00:15:37 +01:00
" call " # title " _handler \n " \
2020-06-27 13:42:28 -06:00
" jmp common_trap_exit \n " ) ;
2019-06-07 11:43:58 +02:00
2020-02-16 00:15:37 +01:00
# define EH_ENTRY_NO_CODE(ec, title) \
2020-06-27 13:42:28 -06:00
extern " C " void title # # _handler ( TrapFrame * ) ; \
2020-02-16 00:15:37 +01:00
extern " C " void title # # _asm_entry ( ) ; \
asm ( \
" .globl " # title " _asm_entry \n " \
" " # title " _asm_entry: \n " \
" pushl $0x0 \n " \
" pusha \n " \
" pushl %ds \n " \
" pushl %es \n " \
" pushl %fs \n " \
" pushl %gs \n " \
" pushl %ss \n " \
2020-06-27 13:42:28 -06:00
" mov $ " __STRINGIFY ( GDT_SELECTOR_DATA0 ) " , %ax \n " \
2020-02-16 00:15:37 +01:00
" mov %ax, %ds \n " \
" mov %ax, %es \n " \
2020-06-27 13:42:28 -06:00
" mov $ " __STRINGIFY ( GDT_SELECTOR_PROC ) " , %ax \n " \
" mov %ax, %fs \n " \
" pushl %esp \n " /* set TrapFrame::regs */ \
" subl $ " __STRINGIFY ( TRAP_FRAME_SIZE - 4 ) " , %esp \n " \
" pushl %esp \n " \
2020-02-16 00:15:37 +01:00
" cld \n " \
2020-06-27 13:42:28 -06:00
" call enter_trap_no_irq \n " \
2020-02-16 00:15:37 +01:00
" call " # title " _handler \n " \
2020-06-27 13:42:28 -06:00
" jmp common_trap_exit \n " ) ;
2018-10-19 11:28:43 +02:00
2020-02-16 00:15:37 +01:00
static void dump ( const RegisterState & regs )
2018-10-19 11:28:43 +02:00
{
2019-07-03 21:17:35 +02:00
u16 ss ;
u32 esp ;
2020-06-28 15:34:31 -06:00
auto process = Process : : current ( ) ;
if ( ! process | | process - > is_ring0 ( ) ) {
2019-11-17 12:11:43 +01:00
ss = regs . ss ;
2018-10-19 11:28:43 +02:00
esp = regs . esp ;
} else {
2020-01-09 18:02:01 +01:00
ss = regs . userspace_ss ;
esp = regs . userspace_esp ;
2018-10-19 11:28:43 +02:00
}
2020-03-01 21:45:39 +02:00
klog ( ) < < " exception code: " < < String : : format ( " %04x " , regs . exception_code ) < < " (isr: " < < String : : format ( " %04x " , regs . isr_number ) ;
klog ( ) < < " pc= " < < String : : format ( " %04x " , ( u16 ) regs . cs ) < < " : " < < String : : format ( " %08x " , regs . eip ) < < " flags= " < < String : : format ( " %04x " , ( u16 ) regs . eflags ) ;
klog ( ) < < " stk= " < < String : : format ( " %04x " , ss ) < < " : " < < String : : format ( " %08x " , esp ) ;
klog ( ) < < " ds= " < < String : : format ( " %04x " , ( u16 ) regs . ds ) < < " es= " < < String : : format ( " %04x " , ( u16 ) regs . es ) < < " fs= " < < String : : format ( " %04x " , ( u16 ) regs . fs ) < < " gs= " < < String : : format ( " %04x " , ( u16 ) regs . gs ) ;
klog ( ) < < " eax= " < < String : : format ( " %08x " , regs . eax ) < < " ebx= " < < String : : format ( " %08x " , regs . ebx ) < < " ecx= " < < String : : format ( " %08x " , regs . ecx ) < < " edx= " < < String : : format ( " %08x " , regs . edx ) ;
klog ( ) < < " ebp= " < < String : : format ( " %08x " , regs . ebp ) < < " esp= " < < String : : format ( " %08x " , regs . esp ) < < " esi= " < < String : : format ( " %08x " , regs . esi ) < < " edi= " < < String : : format ( " %08x " , regs . edi ) ;
2020-01-17 19:59:20 +01:00
u32 cr0 ;
asm ( " movl %%cr0, %%eax "
: " =a " ( cr0 ) ) ;
u32 cr2 ;
asm ( " movl %%cr2, %%eax "
: " =a " ( cr2 ) ) ;
2020-02-10 20:00:32 +01:00
u32 cr3 = read_cr3 ( ) ;
2020-01-17 19:59:20 +01:00
u32 cr4 ;
asm ( " movl %%cr4, %%eax "
: " =a " ( cr4 ) ) ;
2020-03-01 21:45:39 +02:00
klog ( ) < < " cr0= " < < String : : format ( " %08x " , cr0 ) < < " cr2= " < < String : : format ( " %08x " , cr2 ) < < " cr3= " < < String : : format ( " %08x " , cr3 ) < < " cr4= " < < String : : format ( " %08x " , cr4 ) ;
2020-01-17 19:59:20 +01:00
2020-06-28 15:34:31 -06:00
if ( process & & process - > validate_read ( ( void * ) regs . eip , 8 ) ) {
2020-01-06 14:26:47 +01:00
SmapDisabler disabler ;
2019-07-03 21:17:35 +02:00
u8 * codeptr = ( u8 * ) regs . eip ;
2020-03-01 21:45:39 +02:00
klog ( ) < < " code: " < < String : : format ( " %02x " , codeptr [ 0 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 1 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 2 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 3 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 4 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 5 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 6 ] ) < < " " < < String : : format ( " %02x " , codeptr [ 7 ] ) ;
2019-02-20 12:28:41 +01:00
}
}
2020-05-06 21:11:38 +02:00
void handle_crash ( RegisterState & regs , const char * description , int signal , bool out_of_memory )
2019-02-20 12:28:41 +01:00
{
2020-06-28 15:34:31 -06:00
auto process = Process : : current ( ) ;
if ( ! process ) {
2020-03-01 21:45:39 +02:00
klog ( ) < < description < < " with !current " ;
2019-03-23 22:03:17 +01:00
hang ( ) ;
}
2019-02-20 12:28:41 +01:00
2020-01-19 09:54:58 +01:00
// If a process crashed while inspecting another process,
// make sure we switch back to the right page tables.
2020-06-28 15:34:31 -06:00
MM . enter_process_paging_scope ( * process ) ;
2020-01-19 09:54:58 +01:00
2020-06-28 15:34:31 -06:00
klog ( ) < < " CRASH: CPU # " < < Processor : : current ( ) . id ( ) < < " " < < description < < " . Ring " < < ( process - > is_ring0 ( ) ? 0 : 3 ) < < " . " ;
2019-05-26 02:08:51 +02:00
dump ( regs ) ;
2019-05-22 13:22:27 +02:00
2020-06-28 15:34:31 -06:00
if ( process - > is_ring0 ( ) ) {
2020-06-17 18:20:28 +02:00
klog ( ) < < " Crash in ring 0 :( " ;
2019-06-19 18:50:02 +02:00
dump_backtrace ( ) ;
2019-02-15 12:30:48 +01:00
hang ( ) ;
2018-10-19 11:28:43 +02:00
}
2019-11-17 12:11:43 +01:00
cli ( ) ;
2020-06-28 15:34:31 -06:00
process - > crash ( signal , regs . eip , out_of_memory ) ;
2019-06-25 05:55:18 +02:00
}
2019-12-14 16:09:07 +01:00
EH_ENTRY_NO_CODE ( 6 , illegal_instruction ) ;
2020-06-27 13:42:28 -06:00
void illegal_instruction_handler ( TrapFrame * trap )
2019-06-25 05:55:18 +02:00
{
2020-01-08 07:27:37 +01:00
clac ( ) ;
2020-06-27 13:42:28 -06:00
handle_crash ( * trap - > regs , " Illegal instruction " , SIGILL ) ;
2019-06-25 05:55:18 +02:00
}
2019-12-14 16:09:07 +01:00
EH_ENTRY_NO_CODE ( 0 , divide_error ) ;
2020-06-27 13:42:28 -06:00
void divide_error_handler ( TrapFrame * trap )
2019-06-25 05:55:18 +02:00
{
2020-01-08 07:27:37 +01:00
clac ( ) ;
2020-06-27 13:42:28 -06:00
handle_crash ( * trap - > regs , " Divide error " , SIGFPE ) ;
2019-06-25 05:55:18 +02:00
}
2019-12-14 16:09:07 +01:00
EH_ENTRY ( 13 , general_protection_fault ) ;
2020-06-27 13:42:28 -06:00
void general_protection_fault_handler ( TrapFrame * trap )
2019-06-25 05:55:18 +02:00
{
2020-01-08 07:27:37 +01:00
clac ( ) ;
2020-06-27 13:42:28 -06:00
handle_crash ( * trap - > regs , " General protection fault " , SIGSEGV ) ;
2018-10-19 11:28:43 +02:00
}
2019-01-25 07:52:44 +01:00
// 7: FPU not available exception
2019-12-14 16:09:07 +01:00
EH_ENTRY_NO_CODE ( 7 , fpu_exception ) ;
2020-06-27 13:42:28 -06:00
void fpu_exception_handler ( TrapFrame * )
2019-01-25 05:01:27 +01:00
{
2020-01-01 16:49:08 +01:00
// Just clear the TS flag. We've already restored the FPU state eagerly.
// FIXME: It would be nice if we didn't have to do this at all.
2019-01-25 07:52:44 +01:00
asm volatile ( " clts " ) ;
2019-01-25 05:01:27 +01:00
}
2018-10-18 13:05:00 +02:00
// 14: Page Fault
2019-12-14 16:09:07 +01:00
EH_ENTRY ( 14 , page_fault ) ;
2020-06-27 13:42:28 -06:00
void page_fault_handler ( TrapFrame * trap )
2018-10-18 13:05:00 +02:00
{
2020-01-08 07:27:37 +01:00
clac ( ) ;
2018-10-30 15:33:37 +01:00
2020-06-27 13:42:28 -06:00
auto & regs = * trap - > regs ;
2019-07-03 21:17:35 +02:00
u32 fault_address ;
2019-06-07 11:43:58 +02:00
asm ( " movl %%cr2, %%eax "
2019-06-19 20:52:12 +02:00
: " =a " ( fault_address ) ) ;
2018-10-18 13:05:00 +02:00
2019-02-10 11:37:59 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-10 20:00:32 +01:00
u32 fault_page_directory = read_cr3 ( ) ;
2020-06-28 16:04:35 -06:00
dbg ( ) < < " CPU # " < < ( Processor : : is_initialized ( ) ? Processor : : current ( ) . id ( ) : 0 ) < < " ring " < < ( regs . cs & 3 )
2020-03-01 13:23:26 +01:00
< < " " < < ( regs . exception_code & 1 ? " PV " : " NP " )
< < " page fault in PD= " < < String : : format ( " %x " , fault_page_directory ) < < " , "
< < ( regs . exception_code & 8 ? " reserved-bit " : " " )
< < ( regs . exception_code & 2 ? " write " : " read " )
< < " " < < VirtualAddress ( fault_address ) ;
2019-02-10 11:37:59 +01:00
# endif
2018-10-18 13:05:00 +02:00
2019-01-25 00:32:44 +01:00
# ifdef PAGE_FAULT_DEBUG
2019-02-20 12:28:41 +01:00
dump ( regs ) ;
2019-01-25 00:32:44 +01:00
# endif
2019-11-17 12:11:43 +01:00
bool faulted_in_userspace = ( regs . cs & 3 ) = = 3 ;
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( faulted_in_userspace & & ! MM . validate_user_stack ( current_thread - > process ( ) , VirtualAddress ( regs . userspace_esp ) ) ) {
2020-03-06 15:00:44 +01:00
dbg ( ) < < " Invalid stack pointer: " < < VirtualAddress ( regs . userspace_esp ) ;
2019-11-17 12:11:43 +01:00
handle_crash ( regs , " Bad stack on page fault " , SIGSTKFLT ) ;
ASSERT_NOT_REACHED ( ) ;
}
2019-06-19 20:52:12 +02:00
auto response = MM . handle_page_fault ( PageFault ( regs . exception_code , VirtualAddress ( fault_address ) ) ) ;
2018-10-18 13:05:00 +02:00
2020-05-06 21:11:38 +02:00
if ( response = = PageFaultResponse : : ShouldCrash | | response = = PageFaultResponse : : OutOfMemory ) {
if ( response ! = PageFaultResponse : : OutOfMemory ) {
2020-06-28 15:34:31 -06:00
if ( current_thread - > has_signal_handler ( SIGSEGV ) ) {
current_thread - > send_urgent_signal_to_self ( SIGSEGV ) ;
2020-05-06 21:11:38 +02:00
return ;
}
2019-11-06 13:03:45 +01:00
}
2019-10-07 22:22:50 +13:00
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Unrecoverable page fault, "
2020-03-22 13:12:45 +13:00
< < ( regs . exception_code & PageFaultFlags : : ReservedBitViolation ? " reserved bit violation / " : " " )
< < ( regs . exception_code & PageFaultFlags : : InstructionFetch ? " instruction fetch / " : " " )
< < ( regs . exception_code & PageFaultFlags : : Write ? " write to " : " read from " )
< < " address " < < VirtualAddress ( fault_address ) ;
2019-07-03 21:17:35 +02:00
u32 malloc_scrub_pattern = explode_byte ( MALLOC_SCRUB_BYTE ) ;
u32 free_scrub_pattern = explode_byte ( FREE_SCRUB_BYTE ) ;
2020-02-01 10:26:05 +01:00
u32 kmalloc_scrub_pattern = explode_byte ( KMALLOC_SCRUB_BYTE ) ;
u32 kfree_scrub_pattern = explode_byte ( KFREE_SCRUB_BYTE ) ;
2020-02-01 10:36:25 +01:00
u32 slab_alloc_scrub_pattern = explode_byte ( SLAB_ALLOC_SCRUB_BYTE ) ;
u32 slab_dealloc_scrub_pattern = explode_byte ( SLAB_DEALLOC_SCRUB_BYTE ) ;
2019-06-19 20:52:12 +02:00
if ( ( fault_address & 0xffff0000 ) = = ( malloc_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be uninitialized malloc() memory " ;
2019-06-19 20:52:12 +02:00
} else if ( ( fault_address & 0xffff0000 ) = = ( free_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be recently free()'d memory " ;
2020-02-01 10:26:05 +01:00
} else if ( ( fault_address & 0xffff0000 ) = = ( kmalloc_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be uninitialized kmalloc() memory " ;
2020-02-01 10:26:05 +01:00
} else if ( ( fault_address & 0xffff0000 ) = = ( kfree_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be recently kfree()'d memory " ;
2020-02-01 10:36:25 +01:00
} else if ( ( fault_address & 0xffff0000 ) = = ( slab_alloc_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be uninitialized slab_alloc() memory " ;
2020-02-01 10:36:25 +01:00
} else if ( ( fault_address & 0xffff0000 ) = = ( slab_dealloc_scrub_pattern & 0xffff0000 ) ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like it may be recently slab_dealloc()'d memory " ;
2019-06-19 20:52:12 +02:00
} else if ( fault_address < 4096 ) {
2020-03-06 16:45:53 +02:00
dbg ( ) < < " Note: Address " < < VirtualAddress ( fault_address ) < < " looks like a possible nullptr dereference " ;
2019-06-19 20:52:12 +02:00
}
2020-05-06 21:11:38 +02:00
handle_crash ( regs , " Page Fault " , SIGSEGV , response = = PageFaultResponse : : OutOfMemory ) ;
2018-10-18 13:05:00 +02:00
} else if ( response = = PageFaultResponse : : Continue ) {
2018-11-05 13:48:07 +01:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 19:03:18 +02:00
dbg ( ) < < " Continuing after resolved page fault " ;
2018-11-05 13:48:07 +01:00
# endif
2018-10-18 13:05:00 +02:00
} else {
ASSERT_NOT_REACHED ( ) ;
}
}
2020-04-13 16:37:47 +03:00
EH_ENTRY_NO_CODE ( 1 , debug ) ;
2020-06-27 13:42:28 -06:00
void debug_handler ( TrapFrame * trap )
2020-04-13 16:37:47 +03:00
{
clac ( ) ;
2020-06-27 13:42:28 -06:00
auto & regs = * trap - > regs ;
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( & current_thread - > process ( ) = = nullptr | | ( regs . cs & 3 ) = = 0 ) {
2020-04-13 16:37:47 +03:00
klog ( ) < < " Debug Exception in Ring0 " ;
hang ( ) ;
return ;
}
constexpr u8 REASON_SINGLESTEP = 14 ;
bool is_reason_singlestep = ( read_dr6 ( ) & ( 1 < < REASON_SINGLESTEP ) ) ;
if ( ! is_reason_singlestep )
return ;
2020-06-28 15:34:31 -06:00
if ( current_thread - > tracer ( ) ) {
current_thread - > tracer ( ) - > set_regs ( regs ) ;
2020-04-13 16:37:47 +03:00
}
2020-06-28 15:34:31 -06:00
current_thread - > send_urgent_signal_to_self ( SIGTRAP ) ;
2020-04-13 16:37:47 +03:00
}
2020-04-03 14:50:17 +03:00
EH_ENTRY_NO_CODE ( 3 , breakpoint ) ;
2020-06-27 13:42:28 -06:00
void breakpoint_handler ( TrapFrame * trap )
2020-04-03 14:50:17 +03:00
{
clac ( ) ;
2020-06-27 13:42:28 -06:00
auto & regs = * trap - > regs ;
2020-06-28 15:34:31 -06:00
auto current_thread = Thread : : current ( ) ;
if ( & current_thread - > process ( ) = = nullptr | | ( regs . cs & 3 ) = = 0 ) {
2020-04-03 14:50:17 +03:00
klog ( ) < < " Breakpoint Trap in Ring0 " ;
hang ( ) ;
return ;
}
2020-06-28 15:34:31 -06:00
if ( current_thread - > tracer ( ) ) {
current_thread - > tracer ( ) - > set_regs ( regs ) ;
2020-04-03 14:50:17 +03:00
}
2020-06-28 15:34:31 -06:00
current_thread - > send_urgent_signal_to_self ( SIGTRAP ) ;
2020-04-03 14:50:17 +03:00
}
2020-03-01 21:45:39 +02:00
# define EH(i, msg) \
static void _exception # # i ( ) \
{ \
klog ( ) < < msg ; \
u32 cr0 , cr2 , cr3 , cr4 ; \
asm ( " movl %%cr0, %%eax " \
: " =a " ( cr0 ) ) ; \
asm ( " movl %%cr2, %%eax " \
: " =a " ( cr2 ) ) ; \
asm ( " movl %%cr3, %%eax " \
: " =a " ( cr3 ) ) ; \
asm ( " movl %%cr4, %%eax " \
: " =a " ( cr4 ) ) ; \
klog ( ) < < " CR0= " < < String : : format ( " %x " , cr0 ) < < " CR2= " < < String : : format ( " %x " , cr2 ) < < " CR3= " < < String : : format ( " %x " , cr3 ) < < " CR4= " < < String : : format ( " %x " , cr4 ) ; \
hang ( ) ; \
2018-10-16 11:01:38 +02:00
}
EH ( 2 , " Unknown error " )
EH ( 4 , " Overflow " )
EH ( 5 , " Bounds check " )
EH ( 8 , " Double fault " )
EH ( 9 , " Coprocessor segment overrun " )
EH ( 10 , " Invalid TSS " )
EH ( 11 , " Segment not present " )
EH ( 12 , " Stack exception " )
EH ( 15 , " Unknown error " )
EH ( 16 , " Coprocessor error " )
2020-06-04 09:10:16 -06:00
const DescriptorTablePointer & get_idtr ( )
{
return s_idtr ;
}
2018-10-16 11:01:38 +02:00
static void unimp_trap ( )
{
2020-03-01 21:45:39 +02:00
klog ( ) < < " Unhandled IRQ. " ;
2019-02-15 12:30:48 +01:00
hang ( ) ;
2018-10-16 11:01:38 +02:00
}
2020-02-22 20:38:17 +02:00
GenericInterruptHandler & get_interrupt_handler ( u8 interrupt_number )
2018-10-22 12:58:29 +02:00
{
2020-02-22 20:38:17 +02:00
ASSERT ( s_interrupt_handler [ interrupt_number ] ! = nullptr ) ;
return * s_interrupt_handler [ interrupt_number ] ;
2018-10-22 12:58:29 +02:00
}
2020-02-22 20:38:17 +02:00
static void revert_to_unused_handler ( u8 interrupt_number )
2018-10-22 12:58:29 +02:00
{
2020-02-22 20:38:17 +02:00
new UnhandledInterruptHandler ( interrupt_number ) ;
}
void register_generic_interrupt_handler ( u8 interrupt_number , GenericInterruptHandler & handler )
{
if ( s_interrupt_handler [ interrupt_number ] ! = nullptr ) {
2020-03-05 19:13:55 +02:00
if ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : UnhandledInterruptHandler ) {
2020-02-22 20:38:17 +02:00
s_interrupt_handler [ interrupt_number ] = & handler ;
return ;
}
if ( s_interrupt_handler [ interrupt_number ] - > is_shared_handler ( ) & & ! s_interrupt_handler [ interrupt_number ] - > is_sharing_with_others ( ) ) {
2020-03-05 19:13:55 +02:00
ASSERT ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : SharedIRQHandler ) ;
2020-02-22 20:38:17 +02:00
static_cast < SharedIRQHandler * > ( s_interrupt_handler [ interrupt_number ] ) - > register_handler ( handler ) ;
return ;
2020-02-23 14:14:01 +02:00
}
2020-02-22 20:38:17 +02:00
if ( ! s_interrupt_handler [ interrupt_number ] - > is_shared_handler ( ) ) {
2020-03-05 19:13:55 +02:00
ASSERT ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : IRQHandler ) ;
2020-02-22 20:38:17 +02:00
auto & previous_handler = * s_interrupt_handler [ interrupt_number ] ;
s_interrupt_handler [ interrupt_number ] = nullptr ;
SharedIRQHandler : : initialize ( interrupt_number ) ;
static_cast < SharedIRQHandler * > ( s_interrupt_handler [ interrupt_number ] ) - > register_handler ( previous_handler ) ;
static_cast < SharedIRQHandler * > ( s_interrupt_handler [ interrupt_number ] ) - > register_handler ( handler ) ;
return ;
}
ASSERT_NOT_REACHED ( ) ;
} else {
s_interrupt_handler [ interrupt_number ] = & handler ;
}
}
void unregister_generic_interrupt_handler ( u8 interrupt_number , GenericInterruptHandler & handler )
{
ASSERT ( s_interrupt_handler [ interrupt_number ] ! = nullptr ) ;
2020-03-05 19:13:55 +02:00
if ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : UnhandledInterruptHandler ) {
2020-02-22 20:38:17 +02:00
dbg ( ) < < " Trying to unregister unused handler (?) " ;
return ;
}
if ( s_interrupt_handler [ interrupt_number ] - > is_shared_handler ( ) & & ! s_interrupt_handler [ interrupt_number ] - > is_sharing_with_others ( ) ) {
2020-03-05 19:13:55 +02:00
ASSERT ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : SharedIRQHandler ) ;
2020-02-22 20:38:17 +02:00
static_cast < SharedIRQHandler * > ( s_interrupt_handler [ interrupt_number ] ) - > unregister_handler ( handler ) ;
2020-02-23 14:14:01 +02:00
if ( ! static_cast < SharedIRQHandler * > ( s_interrupt_handler [ interrupt_number ] ) - > sharing_devices_count ( ) ) {
2020-02-22 20:38:17 +02:00
revert_to_unused_handler ( interrupt_number ) ;
}
return ;
}
if ( ! s_interrupt_handler [ interrupt_number ] - > is_shared_handler ( ) ) {
2020-03-05 19:13:55 +02:00
ASSERT ( s_interrupt_handler [ interrupt_number ] - > type ( ) = = HandlerType : : IRQHandler ) ;
2020-02-22 20:38:17 +02:00
revert_to_unused_handler ( interrupt_number ) ;
return ;
}
ASSERT_NOT_REACHED ( ) ;
2018-10-22 12:58:29 +02:00
}
2019-07-03 21:17:35 +02:00
void register_interrupt_handler ( u8 index , void ( * f ) ( ) )
2018-10-16 11:01:38 +02:00
{
s_idt [ index ] . low = 0x00080000 | LSW ( ( f ) ) ;
2019-07-03 21:17:35 +02:00
s_idt [ index ] . high = ( ( u32 ) ( f ) & 0xffff0000 ) | 0x8e00 ;
2018-10-16 11:01:38 +02:00
}
2019-07-03 21:17:35 +02:00
void register_user_callable_interrupt_handler ( u8 index , void ( * f ) ( ) )
2018-10-16 11:01:38 +02:00
{
s_idt [ index ] . low = 0x00080000 | LSW ( ( f ) ) ;
2019-07-03 21:17:35 +02:00
s_idt [ index ] . high = ( ( u32 ) ( f ) & 0xffff0000 ) | 0xef00 ;
2018-10-16 11:01:38 +02:00
}
2018-12-03 00:39:25 +01:00
void flush_idt ( )
2018-10-16 11:01:38 +02:00
{
2019-06-07 11:43:58 +02:00
asm ( " lidt %0 " : : " m " ( s_idtr ) ) ;
2018-10-16 11:01:38 +02:00
}
2020-06-27 13:42:28 -06:00
static void idt_init ( )
2018-10-16 11:01:38 +02:00
{
s_idtr . address = s_idt ;
2019-05-17 18:16:22 +02:00
s_idtr . limit = 0x100 * 8 - 1 ;
2018-10-16 11:01:38 +02:00
2019-07-03 21:17:35 +02:00
for ( u8 i = 0xff ; i > 0x10 ; - - i )
2018-12-03 00:39:25 +01:00
register_interrupt_handler ( i , unimp_trap ) ;
2019-12-14 16:09:07 +01:00
register_interrupt_handler ( 0x00 , divide_error_asm_entry ) ;
2020-04-13 16:37:47 +03:00
register_user_callable_interrupt_handler ( 0x01 , debug_asm_entry ) ;
2018-12-03 00:39:25 +01:00
register_interrupt_handler ( 0x02 , _exception2 ) ;
2020-04-03 14:50:17 +03:00
register_user_callable_interrupt_handler ( 0x03 , breakpoint_asm_entry ) ;
2018-12-03 00:39:25 +01:00
register_interrupt_handler ( 0x04 , _exception4 ) ;
register_interrupt_handler ( 0x05 , _exception5 ) ;
2019-12-14 16:09:07 +01:00
register_interrupt_handler ( 0x06 , illegal_instruction_asm_entry ) ;
register_interrupt_handler ( 0x07 , fpu_exception_asm_entry ) ;
2018-12-03 00:39:25 +01:00
register_interrupt_handler ( 0x08 , _exception8 ) ;
register_interrupt_handler ( 0x09 , _exception9 ) ;
register_interrupt_handler ( 0x0a , _exception10 ) ;
register_interrupt_handler ( 0x0b , _exception11 ) ;
register_interrupt_handler ( 0x0c , _exception12 ) ;
2019-12-14 16:09:07 +01:00
register_interrupt_handler ( 0x0d , general_protection_fault_asm_entry ) ;
register_interrupt_handler ( 0x0e , page_fault_asm_entry ) ;
2018-12-03 00:39:25 +01:00
register_interrupt_handler ( 0x0f , _exception15 ) ;
register_interrupt_handler ( 0x10 , _exception16 ) ;
2020-02-22 20:38:17 +02:00
register_interrupt_handler ( 0x50 , interrupt_0_asm_entry ) ;
register_interrupt_handler ( 0x51 , interrupt_1_asm_entry ) ;
register_interrupt_handler ( 0x52 , interrupt_2_asm_entry ) ;
register_interrupt_handler ( 0x53 , interrupt_3_asm_entry ) ;
register_interrupt_handler ( 0x54 , interrupt_4_asm_entry ) ;
register_interrupt_handler ( 0x55 , interrupt_5_asm_entry ) ;
register_interrupt_handler ( 0x56 , interrupt_6_asm_entry ) ;
register_interrupt_handler ( 0x57 , interrupt_7_asm_entry ) ;
register_interrupt_handler ( 0x58 , interrupt_8_asm_entry ) ;
register_interrupt_handler ( 0x59 , interrupt_9_asm_entry ) ;
register_interrupt_handler ( 0x5a , interrupt_10_asm_entry ) ;
register_interrupt_handler ( 0x5b , interrupt_11_asm_entry ) ;
register_interrupt_handler ( 0x5c , interrupt_12_asm_entry ) ;
register_interrupt_handler ( 0x5d , interrupt_13_asm_entry ) ;
register_interrupt_handler ( 0x5e , interrupt_14_asm_entry ) ;
register_interrupt_handler ( 0x5f , interrupt_15_asm_entry ) ;
register_interrupt_handler ( 0x60 , interrupt_16_asm_entry ) ;
register_interrupt_handler ( 0x61 , interrupt_17_asm_entry ) ;
register_interrupt_handler ( 0x62 , interrupt_18_asm_entry ) ;
register_interrupt_handler ( 0x63 , interrupt_19_asm_entry ) ;
register_interrupt_handler ( 0x64 , interrupt_20_asm_entry ) ;
register_interrupt_handler ( 0x65 , interrupt_21_asm_entry ) ;
register_interrupt_handler ( 0x66 , interrupt_22_asm_entry ) ;
register_interrupt_handler ( 0x67 , interrupt_23_asm_entry ) ;
register_interrupt_handler ( 0x68 , interrupt_24_asm_entry ) ;
register_interrupt_handler ( 0x69 , interrupt_25_asm_entry ) ;
register_interrupt_handler ( 0x6a , interrupt_26_asm_entry ) ;
register_interrupt_handler ( 0x6b , interrupt_27_asm_entry ) ;
register_interrupt_handler ( 0x6c , interrupt_28_asm_entry ) ;
register_interrupt_handler ( 0x6d , interrupt_29_asm_entry ) ;
register_interrupt_handler ( 0x6e , interrupt_30_asm_entry ) ;
register_interrupt_handler ( 0x6f , interrupt_31_asm_entry ) ;
register_interrupt_handler ( 0x70 , interrupt_32_asm_entry ) ;
register_interrupt_handler ( 0x71 , interrupt_33_asm_entry ) ;
register_interrupt_handler ( 0x72 , interrupt_34_asm_entry ) ;
register_interrupt_handler ( 0x73 , interrupt_35_asm_entry ) ;
register_interrupt_handler ( 0x74 , interrupt_36_asm_entry ) ;
register_interrupt_handler ( 0x75 , interrupt_37_asm_entry ) ;
register_interrupt_handler ( 0x76 , interrupt_38_asm_entry ) ;
register_interrupt_handler ( 0x77 , interrupt_39_asm_entry ) ;
register_interrupt_handler ( 0x78 , interrupt_40_asm_entry ) ;
register_interrupt_handler ( 0x79 , interrupt_41_asm_entry ) ;
register_interrupt_handler ( 0x7a , interrupt_42_asm_entry ) ;
register_interrupt_handler ( 0x7b , interrupt_43_asm_entry ) ;
register_interrupt_handler ( 0x7c , interrupt_44_asm_entry ) ;
register_interrupt_handler ( 0x7d , interrupt_45_asm_entry ) ;
register_interrupt_handler ( 0x7e , interrupt_46_asm_entry ) ;
register_interrupt_handler ( 0x7f , interrupt_47_asm_entry ) ;
register_interrupt_handler ( 0x80 , interrupt_48_asm_entry ) ;
register_interrupt_handler ( 0x81 , interrupt_49_asm_entry ) ;
register_interrupt_handler ( 0x82 , interrupt_50_asm_entry ) ;
register_interrupt_handler ( 0x83 , interrupt_51_asm_entry ) ;
register_interrupt_handler ( 0x84 , interrupt_52_asm_entry ) ;
register_interrupt_handler ( 0x85 , interrupt_53_asm_entry ) ;
register_interrupt_handler ( 0x86 , interrupt_54_asm_entry ) ;
register_interrupt_handler ( 0x87 , interrupt_55_asm_entry ) ;
register_interrupt_handler ( 0x88 , interrupt_56_asm_entry ) ;
register_interrupt_handler ( 0x89 , interrupt_57_asm_entry ) ;
register_interrupt_handler ( 0x8a , interrupt_58_asm_entry ) ;
register_interrupt_handler ( 0x8b , interrupt_59_asm_entry ) ;
register_interrupt_handler ( 0x8c , interrupt_60_asm_entry ) ;
register_interrupt_handler ( 0x8d , interrupt_61_asm_entry ) ;
register_interrupt_handler ( 0x8e , interrupt_62_asm_entry ) ;
register_interrupt_handler ( 0x8f , interrupt_63_asm_entry ) ;
register_interrupt_handler ( 0x90 , interrupt_64_asm_entry ) ;
register_interrupt_handler ( 0x91 , interrupt_65_asm_entry ) ;
register_interrupt_handler ( 0x92 , interrupt_66_asm_entry ) ;
register_interrupt_handler ( 0x93 , interrupt_67_asm_entry ) ;
register_interrupt_handler ( 0x94 , interrupt_68_asm_entry ) ;
register_interrupt_handler ( 0x95 , interrupt_69_asm_entry ) ;
register_interrupt_handler ( 0x96 , interrupt_70_asm_entry ) ;
register_interrupt_handler ( 0x97 , interrupt_71_asm_entry ) ;
register_interrupt_handler ( 0x98 , interrupt_72_asm_entry ) ;
register_interrupt_handler ( 0x99 , interrupt_73_asm_entry ) ;
register_interrupt_handler ( 0x9a , interrupt_74_asm_entry ) ;
register_interrupt_handler ( 0x9b , interrupt_75_asm_entry ) ;
register_interrupt_handler ( 0x9c , interrupt_76_asm_entry ) ;
register_interrupt_handler ( 0x9d , interrupt_77_asm_entry ) ;
register_interrupt_handler ( 0x9e , interrupt_78_asm_entry ) ;
register_interrupt_handler ( 0x9f , interrupt_79_asm_entry ) ;
register_interrupt_handler ( 0xa0 , interrupt_80_asm_entry ) ;
register_interrupt_handler ( 0xa1 , interrupt_81_asm_entry ) ;
register_interrupt_handler ( 0xa2 , interrupt_82_asm_entry ) ;
register_interrupt_handler ( 0xa3 , interrupt_83_asm_entry ) ;
register_interrupt_handler ( 0xa4 , interrupt_84_asm_entry ) ;
register_interrupt_handler ( 0xa5 , interrupt_85_asm_entry ) ;
register_interrupt_handler ( 0xa6 , interrupt_86_asm_entry ) ;
register_interrupt_handler ( 0xa7 , interrupt_87_asm_entry ) ;
register_interrupt_handler ( 0xa8 , interrupt_88_asm_entry ) ;
register_interrupt_handler ( 0xa9 , interrupt_89_asm_entry ) ;
register_interrupt_handler ( 0xaa , interrupt_90_asm_entry ) ;
register_interrupt_handler ( 0xab , interrupt_91_asm_entry ) ;
register_interrupt_handler ( 0xac , interrupt_92_asm_entry ) ;
register_interrupt_handler ( 0xad , interrupt_93_asm_entry ) ;
register_interrupt_handler ( 0xae , interrupt_94_asm_entry ) ;
register_interrupt_handler ( 0xaf , interrupt_95_asm_entry ) ;
register_interrupt_handler ( 0xb0 , interrupt_96_asm_entry ) ;
register_interrupt_handler ( 0xb1 , interrupt_97_asm_entry ) ;
register_interrupt_handler ( 0xb2 , interrupt_98_asm_entry ) ;
register_interrupt_handler ( 0xb3 , interrupt_99_asm_entry ) ;
register_interrupt_handler ( 0xb4 , interrupt_100_asm_entry ) ;
register_interrupt_handler ( 0xb5 , interrupt_101_asm_entry ) ;
register_interrupt_handler ( 0xb6 , interrupt_102_asm_entry ) ;
register_interrupt_handler ( 0xb7 , interrupt_103_asm_entry ) ;
register_interrupt_handler ( 0xb8 , interrupt_104_asm_entry ) ;
register_interrupt_handler ( 0xb9 , interrupt_105_asm_entry ) ;
register_interrupt_handler ( 0xba , interrupt_106_asm_entry ) ;
register_interrupt_handler ( 0xbb , interrupt_107_asm_entry ) ;
register_interrupt_handler ( 0xbc , interrupt_108_asm_entry ) ;
register_interrupt_handler ( 0xbd , interrupt_109_asm_entry ) ;
register_interrupt_handler ( 0xbe , interrupt_110_asm_entry ) ;
register_interrupt_handler ( 0xbf , interrupt_111_asm_entry ) ;
register_interrupt_handler ( 0xc0 , interrupt_112_asm_entry ) ;
register_interrupt_handler ( 0xc1 , interrupt_113_asm_entry ) ;
register_interrupt_handler ( 0xc2 , interrupt_114_asm_entry ) ;
register_interrupt_handler ( 0xc3 , interrupt_115_asm_entry ) ;
register_interrupt_handler ( 0xc4 , interrupt_116_asm_entry ) ;
register_interrupt_handler ( 0xc5 , interrupt_117_asm_entry ) ;
register_interrupt_handler ( 0xc6 , interrupt_118_asm_entry ) ;
register_interrupt_handler ( 0xc7 , interrupt_119_asm_entry ) ;
register_interrupt_handler ( 0xc8 , interrupt_120_asm_entry ) ;
register_interrupt_handler ( 0xc9 , interrupt_121_asm_entry ) ;
register_interrupt_handler ( 0xca , interrupt_122_asm_entry ) ;
register_interrupt_handler ( 0xcb , interrupt_123_asm_entry ) ;
register_interrupt_handler ( 0xcc , interrupt_124_asm_entry ) ;
register_interrupt_handler ( 0xcd , interrupt_125_asm_entry ) ;
register_interrupt_handler ( 0xce , interrupt_126_asm_entry ) ;
register_interrupt_handler ( 0xcf , interrupt_127_asm_entry ) ;
2020-03-06 03:20:51 +02:00
dbg ( ) < < " Installing Unhandled Handlers " ;
2020-02-22 20:38:17 +02:00
for ( u8 i = 0 ; i < GENERIC_INTERRUPT_HANDLERS_COUNT ; + + i ) {
new UnhandledInterruptHandler ( i ) ;
2018-10-22 12:58:29 +02:00
}
2018-12-03 00:39:25 +01:00
flush_idt ( ) ;
2018-10-16 11:01:38 +02:00
}
2019-07-03 21:17:35 +02:00
void load_task_register ( u16 selector )
2018-10-16 11:01:38 +02:00
{
2019-06-07 11:43:58 +02:00
asm ( " ltr %0 " : : " r " ( selector ) ) ;
2018-10-16 11:01:38 +02:00
}
2018-10-22 12:58:29 +02:00
2020-06-27 13:42:28 -06:00
void handle_interrupt ( TrapFrame * trap )
2018-10-22 12:58:29 +02:00
{
2020-01-08 07:27:37 +01:00
clac ( ) ;
2020-06-27 13:42:28 -06:00
auto & regs = * trap - > regs ;
2020-03-06 03:20:51 +02:00
ASSERT ( regs . isr_number > = IRQ_VECTOR_BASE & & regs . isr_number < = ( IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT ) ) ;
2019-12-15 12:11:39 +01:00
u8 irq = ( u8 ) ( regs . isr_number - 0x50 ) ;
2020-02-28 18:25:19 +02:00
ASSERT ( s_interrupt_handler [ irq ] ) ;
s_interrupt_handler [ irq ] - > handle_interrupt ( regs ) ;
2020-03-06 15:50:00 +02:00
s_interrupt_handler [ irq ] - > eoi ( ) ;
2018-10-22 12:58:29 +02:00
}
2018-11-04 13:12:58 +01:00
2020-06-27 13:42:28 -06:00
void enter_trap_no_irq ( TrapFrame * trap )
{
Processor : : current ( ) . enter_trap ( * trap , false ) ;
}
void enter_trap ( TrapFrame * trap )
{
Processor : : current ( ) . enter_trap ( * trap , true ) ;
}
void exit_trap ( TrapFrame * trap )
{
return Processor : : current ( ) . exit_trap ( * trap ) ;
}
2020-07-03 10:23:09 -06:00
static void sse_init ( )
2019-03-27 13:40:00 +01:00
{
asm volatile (
" mov %cr0, %eax \n "
" andl $0xfffffffb, %eax \n "
" orl $0x2, %eax \n "
" mov %eax, %cr0 \n "
" mov %cr4, %eax \n "
" orl $0x600, %eax \n "
2019-06-07 11:43:58 +02:00
" mov %eax, %cr4 \n " ) ;
2019-03-27 13:40:00 +01:00
}
2020-01-01 12:56:21 +01:00
2020-07-03 10:23:09 -06:00
u32 read_cr0 ( )
{
u32 cr0 ;
asm ( " movl %%cr0, %%eax "
: " =a " ( cr0 ) ) ;
return cr0 ;
}
u32 read_cr3 ( )
{
u32 cr3 ;
asm ( " movl %%cr3, %%eax "
: " =a " ( cr3 ) ) ;
return cr3 ;
}
void write_cr3 ( u32 cr3 )
{
asm volatile ( " movl %%eax, %%cr3 " : : " a " ( cr3 )
: " memory " ) ;
}
u32 read_cr4 ( )
{
u32 cr4 ;
asm ( " movl %%cr4, %%eax "
: " =a " ( cr4 ) ) ;
return cr4 ;
}
u32 read_dr6 ( )
{
u32 dr6 ;
asm ( " movl %%dr6, %%eax "
: " =a " ( dr6 ) ) ;
return dr6 ;
}
FPUState Processor : : s_clean_fpu_state ;
static Vector < Processor * > * s_processors ;
static SpinLock s_processor_lock ;
Vector < Processor * > & Processor : : processors ( )
{
ASSERT ( s_processors ) ;
return * s_processors ;
}
2020-01-01 12:56:21 +01:00
2020-07-03 10:23:09 -06:00
Processor & Processor : : by_id ( u32 cpu )
2020-01-01 12:56:21 +01:00
{
2020-07-03 10:23:09 -06:00
// s_processors does not need to be protected by a lock of any kind.
// It is populated early in the boot process, and the BSP is waiting
// for all APs to finish, after which this array never gets modified
// again, so it's safe to not protect access to it here
auto & procs = processors ( ) ;
ASSERT ( procs [ cpu ] ! = nullptr ) ;
ASSERT ( procs . size ( ) > cpu ) ;
return * procs [ cpu ] ;
}
void Processor : : cpu_detect ( )
{
// NOTE: This is called during Processor::early_initialize, we cannot
// safely log at this point because we don't have kmalloc
// initialized yet!
auto set_feature =
[ & ] ( CPUFeature f ) {
m_features = static_cast < CPUFeature > ( static_cast < u32 > ( m_features ) | static_cast < u32 > ( f ) ) ;
} ;
m_features = static_cast < CPUFeature > ( 0 ) ;
2020-01-01 12:56:21 +01:00
CPUID processor_info ( 0x1 ) ;
2020-07-03 10:23:09 -06:00
if ( processor_info . edx ( ) & ( 1 < < 6 ) )
set_feature ( CPUFeature : : PAE ) ;
if ( processor_info . edx ( ) & ( 1 < < 13 ) )
set_feature ( CPUFeature : : PGE ) ;
if ( processor_info . edx ( ) & ( 1 < < 25 ) )
set_feature ( CPUFeature : : SSE ) ;
if ( processor_info . edx ( ) & ( 1 < < 4 ) )
set_feature ( CPUFeature : : TSC ) ;
if ( processor_info . ecx ( ) & ( 1 < < 30 ) )
set_feature ( CPUFeature : : RDRAND ) ;
2020-01-01 12:56:21 +01:00
CPUID extended_processor_info ( 0x80000001 ) ;
2020-07-03 10:23:09 -06:00
if ( extended_processor_info . edx ( ) & ( 1 < < 20 ) )
set_feature ( CPUFeature : : NX ) ;
2020-01-01 12:56:21 +01:00
CPUID extended_features ( 0x7 ) ;
2020-07-03 10:23:09 -06:00
if ( extended_features . ebx ( ) & ( 1 < < 20 ) )
set_feature ( CPUFeature : : SMAP ) ;
if ( extended_features . ebx ( ) & ( 1 < < 7 ) )
set_feature ( CPUFeature : : SMEP ) ;
if ( extended_features . ecx ( ) & ( 1 < < 2 ) )
set_feature ( CPUFeature : : UMIP ) ;
if ( extended_features . ebx ( ) & ( 1 < < 18 ) )
set_feature ( CPUFeature : : RDSEED ) ;
2020-01-01 12:56:21 +01:00
}
2020-01-05 18:00:15 +01:00
2020-07-03 10:23:09 -06:00
void Processor : : cpu_setup ( )
2020-01-17 23:56:13 +01:00
{
2020-07-03 10:23:09 -06:00
// NOTE: This is called during Processor::early_initialize, we cannot
// safely log at this point because we don't have kmalloc
// initialized yet!
cpu_detect ( ) ;
2020-01-18 10:11:29 +01:00
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : SSE ) )
2020-01-18 10:11:29 +01:00
sse_init ( ) ;
2020-01-17 23:56:13 +01:00
asm volatile (
2020-01-18 10:11:29 +01:00
" movl %%cr0, %%eax \n "
" orl $0x00010000, %%eax \n "
" movl %%eax, %%cr0 \n " : :
: " %eax " , " memory " ) ;
2020-01-17 23:56:13 +01:00
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : PGE ) ) {
2020-01-17 23:56:13 +01:00
// Turn on CR4.PGE so the CPU will respect the G bit in page tables.
asm volatile (
" mov %cr4, %eax \n "
" orl $0x80, %eax \n "
" mov %eax, %cr4 \n " ) ;
}
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : NX ) ) {
2020-01-18 10:11:29 +01:00
// Turn on IA32_EFER.NXE
asm volatile (
" movl $0xc0000080, %ecx \n "
" rdmsr \n "
" orl $0x800, %eax \n "
" wrmsr \n " ) ;
}
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : SMEP ) ) {
2020-01-17 23:56:13 +01:00
// Turn on CR4.SMEP
asm volatile (
" mov %cr4, %eax \n "
" orl $0x100000, %eax \n "
" mov %eax, %cr4 \n " ) ;
}
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : SMAP ) ) {
2020-01-17 23:56:13 +01:00
// Turn on CR4.SMAP
asm volatile (
" mov %cr4, %eax \n "
" orl $0x200000, %eax \n "
" mov %eax, %cr4 \n " ) ;
}
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : UMIP ) ) {
2020-01-17 23:56:13 +01:00
asm volatile (
2020-01-18 10:11:29 +01:00
" mov %cr4, %eax \n "
2020-01-17 23:56:13 +01:00
" orl $0x800, %eax \n "
2020-01-18 10:11:29 +01:00
" mov %eax, %cr4 \n " ) ;
2020-01-17 23:56:13 +01:00
}
2020-07-03 10:23:09 -06:00
if ( has_feature ( CPUFeature : : TSC ) ) {
2020-01-18 10:11:29 +01:00
asm volatile (
" mov %cr4, %eax \n "
" orl $0x4, %eax \n "
" mov %eax, %cr4 \n " ) ;
}
2020-06-04 09:10:16 -06:00
}
2020-07-03 10:23:09 -06:00
String Processor : : features_string ( ) const
{
StringBuilder builder ;
auto feature_to_str =
[ ] ( CPUFeature f ) - > const char *
{
switch ( f ) {
case CPUFeature : : NX :
return " nx " ;
case CPUFeature : : PAE :
return " pae " ;
case CPUFeature : : PGE :
return " pge " ;
case CPUFeature : : RDRAND :
return " rdrand " ;
case CPUFeature : : RDSEED :
return " rdseed " ;
case CPUFeature : : SMAP :
return " smap " ;
case CPUFeature : : SMEP :
return " smep " ;
case CPUFeature : : SSE :
return " sse " ;
case CPUFeature : : TSC :
return " tsc " ;
case CPUFeature : : UMIP :
return " umip " ;
// no default statement here intentionally so that we get
// a warning if a new feature is forgotten to be added here
}
// Shouldn't ever happen
return " ??? " ;
} ;
bool first = true ;
for ( u32 flag = 1 ; flag < sizeof ( m_features ) * 8 ; flag < < = 1 ) {
if ( ( static_cast < u32 > ( m_features ) & flag ) ! = 0 ) {
if ( first )
first = false ;
else
builder . append ( ' ' ) ;
auto str = feature_to_str ( static_cast < CPUFeature > ( flag ) ) ;
builder . append ( str , strlen ( str ) ) ;
}
}
return builder . build ( ) ;
2020-06-27 17:06:33 -06:00
}
2020-07-02 08:34:00 -06:00
void Processor : : early_initialize ( u32 cpu )
2020-06-27 13:42:28 -06:00
{
m_self = this ;
m_cpu = cpu ;
m_in_irq = 0 ;
2020-07-03 05:19:50 -06:00
m_in_critical = 0 ;
m_invoke_scheduler_async = false ;
m_scheduler_initialized = false ;
2020-06-27 13:42:28 -06:00
2020-06-28 15:34:31 -06:00
m_idle_thread = nullptr ;
m_current_thread = nullptr ;
2020-06-28 16:04:35 -06:00
m_mm_data = nullptr ;
2020-07-02 08:34:00 -06:00
m_info = nullptr ;
2020-06-28 15:34:31 -06:00
2020-07-03 10:23:09 -06:00
cpu_setup ( ) ;
2020-06-27 13:42:28 -06:00
gdt_init ( ) ;
2020-07-02 08:34:00 -06:00
ASSERT ( & current ( ) = = this ) ; // sanity check
}
void Processor : : initialize ( u32 cpu )
{
ASSERT ( m_self = = this ) ;
ASSERT ( & current ( ) = = this ) ; // sanity check
2020-07-03 10:23:09 -06:00
klog ( ) < < " CPU[ " < < id ( ) < < " ]: Supported features: " < < features_string ( ) ;
if ( ! has_feature ( CPUFeature : : RDRAND ) )
klog ( ) < < " CPU[ " < < id ( ) < < " ]: No RDRAND support detected, randomness will be poor " ;
2020-07-02 08:34:00 -06:00
2020-06-27 13:42:28 -06:00
if ( cpu = = 0 )
idt_init ( ) ;
else
flush_idt ( ) ;
if ( cpu = = 0 ) {
ASSERT ( ( FlatPtr ( & s_clean_fpu_state ) & 0xF ) = = 0 ) ;
asm volatile ( " fninit " ) ;
asm volatile ( " fxsave %0 "
: " =m " ( s_clean_fpu_state ) ) ;
}
2020-06-27 17:06:33 -06:00
m_info = new ProcessorInfo ( * this ) ;
2020-06-28 15:34:31 -06:00
{
ScopedSpinLock lock ( s_processor_lock ) ;
// We need to prevent races between APs starting up at the same time
if ( ! s_processors )
s_processors = new Vector < Processor * > ( ) ;
if ( cpu > = s_processors - > size ( ) )
s_processors - > resize ( cpu + 1 ) ;
( * s_processors ) [ cpu ] = this ;
}
2020-06-27 17:06:33 -06:00
2020-06-28 16:04:35 -06:00
klog ( ) < < " CPU[ " < < cpu < < " ]: initialized Processor at " < < VirtualAddress ( FlatPtr ( this ) ) ;
2020-06-27 13:42:28 -06:00
}
void Processor : : write_raw_gdt_entry ( u16 selector , u32 low , u32 high )
{
u16 i = ( selector & 0xfffc ) > > 3 ;
u32 prev_gdt_length = m_gdt_length ;
if ( i > m_gdt_length ) {
m_gdt_length = i + 1 ;
ASSERT ( m_gdt_length < = sizeof ( m_gdt ) / sizeof ( m_gdt [ 0 ] ) ) ;
m_gdtr . limit = ( m_gdt_length + 1 ) * 8 - 1 ;
}
m_gdt [ i ] . low = low ;
m_gdt [ i ] . high = high ;
// clear selectors we may have skipped
while ( i < prev_gdt_length ) {
m_gdt [ i ] . low = 0 ;
m_gdt [ i ] . high = 0 ;
i + + ;
}
}
void Processor : : write_gdt_entry ( u16 selector , Descriptor & descriptor )
{
write_raw_gdt_entry ( selector , descriptor . low , descriptor . high ) ;
}
Descriptor & Processor : : get_gdt_entry ( u16 selector )
{
u16 i = ( selector & 0xfffc ) > > 3 ;
return * ( Descriptor * ) ( & m_gdt [ i ] ) ;
}
void Processor : : flush_gdt ( )
{
m_gdtr . address = m_gdt ;
m_gdtr . limit = ( m_gdt_length * 8 ) - 1 ;
asm volatile ( " lgdt %0 " : : " m " ( m_gdtr )
: " memory " ) ;
}
const DescriptorTablePointer & Processor : : get_gdtr ( )
{
return m_gdtr ;
}
extern " C " void enter_thread_context ( Thread * from_thread , Thread * to_thread )
{
ASSERT ( from_thread = = to_thread | | from_thread - > state ( ) ! = Thread : : Running ) ;
ASSERT ( to_thread - > state ( ) = = Thread : : Running ) ;
2020-06-28 15:34:31 -06:00
auto & processor = Processor : : current ( ) ;
processor . set_current_thread ( * to_thread ) ;
2020-06-27 13:42:28 -06:00
auto & from_tss = from_thread - > tss ( ) ;
auto & to_tss = to_thread - > tss ( ) ;
asm volatile ( " fxsave %0 "
: " =m " ( from_thread - > fpu_state ( ) ) ) ;
from_tss . fs = get_fs ( ) ;
from_tss . gs = get_gs ( ) ;
set_fs ( to_tss . fs ) ;
set_gs ( to_tss . gs ) ;
2020-06-27 22:36:15 -06:00
auto & tls_descriptor = processor . get_gdt_entry ( GDT_SELECTOR_TLS ) ;
2020-06-27 13:42:28 -06:00
tls_descriptor . set_base ( to_thread - > thread_specific_data ( ) . as_ptr ( ) ) ;
tls_descriptor . set_limit ( to_thread - > thread_specific_region_size ( ) ) ;
if ( from_tss . cr3 ! = to_tss . cr3 )
write_cr3 ( to_tss . cr3 ) ;
2020-06-27 22:36:15 -06:00
to_thread - > set_cpu ( processor . id ( ) ) ;
2020-06-27 13:42:28 -06:00
asm volatile ( " fxrstor %0 "
: : " m " ( to_thread - > fpu_state ( ) ) ) ;
// TODO: debug registers
// TODO: ioperm?
}
# define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) // to_thread, from_thread
void Processor : : switch_context ( Thread * from_thread , Thread * to_thread )
{
ASSERT ( ! in_irq ( ) ) ;
2020-07-03 05:19:50 -06:00
ASSERT ( ! m_in_critical ) ;
2020-06-27 13:42:28 -06:00
ASSERT ( is_kernel_mode ( ) ) ;
# ifdef CONTEXT_SWITCH_DEBUG
2020-07-03 05:19:50 -06:00
dbg ( ) < < " switch_context --> switching out of: " < < VirtualAddress ( from_thread ) < < " " < < * from_thread ;
2020-06-27 13:42:28 -06:00
# endif
// Switch to new thread context, passing from_thread and to_thread
// through to the new context using registers edx and eax
asm volatile (
// NOTE: changing how much we push to the stack affects
// SWITCH_CONTEXT_TO_STACK_SIZE and thread_context_first_enter()!
" pushfl \n "
" pushl %%ebx \n "
" pushl %%esi \n "
" pushl %%edi \n "
" pushl %%ebp \n "
" movl %%esp, %[from_esp] \n "
" movl $1f, %[from_eip] \n "
" movl %[to_esp0], %%ebx \n "
" movl %%ebx, %[tss_esp0] \n "
" movl %[to_esp], %%esp \n "
" pushl %[to_thread] \n "
" pushl %[from_thread] \n "
" pushl %[to_eip] \n "
" cld \n "
" jmp enter_thread_context \n "
" 1: \n "
" popl %%edx \n "
" popl %%eax \n "
" popl %%ebp \n "
" popl %%edi \n "
" popl %%esi \n "
" popl %%ebx \n "
" popfl \n "
: [ from_esp ] " =m " ( from_thread - > tss ( ) . esp ) ,
[ from_eip ] " =m " ( from_thread - > tss ( ) . eip ) ,
[ tss_esp0 ] " =m " ( m_tss . esp0 ) ,
" =d " ( from_thread ) , // needed so that from_thread retains the correct value
" =a " ( to_thread ) // needed so that to_thread retains the correct value
: [ to_esp ] " g " ( to_thread - > tss ( ) . esp ) ,
[ to_esp0 ] " g " ( to_thread - > tss ( ) . esp0 ) ,
[ to_eip ] " c " ( to_thread - > tss ( ) . eip ) ,
[ from_thread ] " d " ( from_thread ) ,
[ to_thread ] " a " ( to_thread )
) ;
# ifdef CONTEXT_SWITCH_DEBUG
2020-07-03 05:19:50 -06:00
dbg ( ) < < " switch_context <-- from " < < VirtualAddress ( from_thread ) < < " " < < * from_thread < < " to " < < VirtualAddress ( to_thread ) < < " " < < * to_thread ;
2020-06-27 13:42:28 -06:00
# endif
}
extern " C " void context_first_init ( Thread * from_thread , Thread * to_thread , TrapFrame * trap )
{
ASSERT ( ! are_interrupts_enabled ( ) ) ;
ASSERT ( is_kernel_mode ( ) ) ;
( void ) from_thread ;
( void ) to_thread ;
( void ) trap ;
2020-07-03 05:19:50 -06:00
ASSERT ( to_thread = = Thread : : current ( ) ) ;
2020-06-27 13:42:28 -06:00
# ifdef CONTEXT_SWITCH_DEBUG
2020-07-03 05:19:50 -06:00
dbg ( ) < < " switch_context <-- from " < < VirtualAddress ( from_thread ) < < " " < < * from_thread < < " to " < < VirtualAddress ( to_thread ) < < " " < < * to_thread < < " (context_first_init) " ;
2020-06-27 13:42:28 -06:00
# endif
2020-07-03 05:19:50 -06:00
if ( to_thread - > process ( ) . wait_for_tracer_at_next_execve ( ) ) {
to_thread - > send_urgent_signal_to_self ( SIGSTOP ) ;
}
2020-06-27 13:42:28 -06:00
}
extern " C " void thread_context_first_enter ( void ) ;
asm (
// enter_thread_context returns to here first time a thread is executing
" .globl thread_context_first_enter \n "
" thread_context_first_enter: \n "
// switch_context will have pushed from_thread and to_thread to our new
// stack prior to thread_context_first_enter() being called, and the
// pointer to TrapFrame was the top of the stack before that
" movl 8(%esp), %ebx \n " // save pointer to TrapFrame
" cld \n "
" call context_first_init \n "
" addl $ " __STRINGIFY ( ENTER_THREAD_CONTEXT_ARGS_SIZE ) " , %esp \n "
" movl %ebx, 0(%esp) \n " // push pointer to TrapFrame
" jmp common_trap_exit \n "
) ;
2020-07-03 05:19:50 -06:00
u32 Processor : : init_context ( Thread & thread , bool leave_crit )
2020-06-27 13:42:28 -06:00
{
ASSERT ( is_kernel_mode ( ) ) ;
2020-07-03 05:19:50 -06:00
if ( leave_crit ) {
ASSERT ( in_critical ( ) ) ;
m_in_critical - - ; // leave it without triggering anything
ASSERT ( ! in_critical ( ) ) ;
}
2020-06-27 13:42:28 -06:00
const u32 kernel_stack_top = thread . kernel_stack_top ( ) ;
u32 stack_top = kernel_stack_top ;
// TODO: handle NT?
ASSERT ( ( cpu_flags ( ) & 0x24000 ) = = 0 ) ; // Assume !(NT | VM)
auto & tss = thread . tss ( ) ;
bool return_to_user = ( tss . cs & 3 ) ! = 0 ;
// make room for an interrupt frame
if ( ! return_to_user ) {
// userspace_esp and userspace_ss are not popped off by iret
// unless we're switching back to user mode
stack_top - = sizeof ( RegisterState ) - 2 * sizeof ( u32 ) ;
} else {
stack_top - = sizeof ( RegisterState ) ;
}
// we want to end up 16-byte aligned, %esp + 4 should be aligned
stack_top - = sizeof ( u32 ) ;
* reinterpret_cast < u32 * > ( kernel_stack_top - 4 ) = 0 ;
// set up the stack so that after returning from thread_context_first_enter()
// we will end up either in kernel mode or user mode, depending on how the thread is set up
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState & iretframe = * reinterpret_cast < RegisterState * > ( stack_top ) ;
iretframe . ss = tss . ss ;
iretframe . gs = tss . gs ;
iretframe . fs = tss . fs ;
iretframe . es = tss . es ;
iretframe . ds = tss . ds ;
iretframe . edi = tss . edi ;
iretframe . esi = tss . esi ;
iretframe . ebp = tss . ebp ;
iretframe . esp = 0 ;
iretframe . ebx = tss . ebx ;
iretframe . edx = tss . edx ;
iretframe . ecx = tss . ecx ;
iretframe . eax = tss . eax ;
iretframe . eflags = tss . eflags ;
iretframe . eip = tss . eip ;
iretframe . cs = tss . cs ;
if ( return_to_user ) {
iretframe . userspace_esp = tss . esp ;
iretframe . userspace_ss = tss . ss ;
}
// make space for a trap frame
stack_top - = sizeof ( TrapFrame ) ;
TrapFrame & trap = * reinterpret_cast < TrapFrame * > ( stack_top ) ;
trap . regs = & iretframe ;
trap . prev_irq_level = 0 ;
stack_top - = sizeof ( u32 ) ; // pointer to TrapFrame
* reinterpret_cast < u32 * > ( stack_top ) = stack_top + 4 ;
# ifdef CONTEXT_SWITCH_DEBUG
2020-07-03 05:19:50 -06:00
if ( return_to_user )
dbg ( ) < < " init_context " < < thread < < " ( " < < VirtualAddress ( & thread ) < < " ) set up to execute at eip: " < < String : : format ( " %02x:%08x " , iretframe . cs , ( u32 ) tss . eip ) < < " esp: " < < VirtualAddress ( tss . esp ) < < " stack top: " < < VirtualAddress ( stack_top ) < < " user esp: " < < String : : format ( " %02x:%08x " , iretframe . userspace_ss , ( u32 ) iretframe . userspace_esp ) ;
else
dbg ( ) < < " init_context " < < thread < < " ( " < < VirtualAddress ( & thread ) < < " ) set up to execute at eip: " < < String : : format ( " %02x:%08x " , iretframe . cs , ( u32 ) tss . eip ) < < " esp: " < < VirtualAddress ( tss . esp ) < < " stack top: " < < VirtualAddress ( stack_top ) ;
2020-06-27 13:42:28 -06:00
# endif
// make switch_context() always first return to thread_context_first_enter()
// in kernel mode, so set up these values so that we end up popping iretframe
// off the stack right after the context switch completed, at which point
// control is transferred to what iretframe is pointing to.
tss . eip = FlatPtr ( & thread_context_first_enter ) ;
tss . esp0 = kernel_stack_top ;
tss . esp = stack_top ;
tss . cs = GDT_SELECTOR_CODE0 ;
tss . ds = GDT_SELECTOR_DATA0 ;
tss . es = GDT_SELECTOR_DATA0 ;
tss . gs = GDT_SELECTOR_DATA0 ;
tss . ss = GDT_SELECTOR_DATA0 ;
tss . fs = GDT_SELECTOR_PROC ;
return stack_top ;
}
2020-07-03 05:19:50 -06:00
extern " C " u32 do_init_context ( Thread * thread , u32 flags )
2020-06-27 13:42:28 -06:00
{
2020-07-03 05:19:50 -06:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( Processor : : current ( ) . in_critical ( ) ) ;
thread - > tss ( ) . eflags = flags ;
return Processor : : current ( ) . init_context ( * thread , true ) ;
2020-06-27 13:42:28 -06:00
}
2020-07-03 05:19:50 -06:00
extern " C " void do_assume_context ( Thread * thread , u32 flags ) ;
2020-06-27 13:42:28 -06:00
asm (
" .global do_assume_context \n "
" do_assume_context: \n "
" movl 4(%esp), %ebx \n "
2020-07-03 05:19:50 -06:00
" movl 8(%esp), %esi \n "
2020-06-27 13:42:28 -06:00
// We're going to call Processor::init_context, so just make sure
// we have enough stack space so we don't stomp over it
" subl $( " __STRINGIFY ( 4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4 ) " ), %esp \n "
2020-07-03 05:19:50 -06:00
" pushl %esi \n "
2020-06-27 13:42:28 -06:00
" pushl %ebx \n "
" cld \n "
" call do_init_context \n "
2020-07-03 05:19:50 -06:00
" addl $8, %esp \n "
2020-06-27 13:42:28 -06:00
" movl %eax, %esp \n " // move stack pointer to what Processor::init_context set up for us
" pushl %ebx \n " // push to_thread
" pushl %ebx \n " // push from_thread
" pushl $thread_context_first_enter \n " // should be same as tss.eip
" jmp enter_thread_context \n "
) ;
2020-07-03 05:19:50 -06:00
void Processor : : assume_context ( Thread & thread , u32 flags )
2020-06-27 13:42:28 -06:00
{
2020-07-03 05:19:50 -06:00
# ifdef CONTEXT_SWITCH_DEBUG
dbg ( ) < < " Assume context for thread " < < VirtualAddress ( & thread ) < < " " < < thread ;
# endif
ASSERT_INTERRUPTS_DISABLED ( ) ;
do_assume_context ( & thread , flags ) ;
2020-06-27 13:42:28 -06:00
ASSERT_NOT_REACHED ( ) ;
}
void Processor : : initialize_context_switching ( Thread & initial_thread )
{
ASSERT ( initial_thread . process ( ) . is_ring0 ( ) ) ;
auto & tss = initial_thread . tss ( ) ;
m_tss = tss ;
m_tss . esp0 = tss . esp0 ;
m_tss . ss0 = GDT_SELECTOR_DATA0 ;
// user mode needs to be able to switch to kernel mode:
m_tss . cs = m_tss . ds = m_tss . es = m_tss . gs = m_tss . ss = GDT_SELECTOR_CODE0 | 3 ;
m_tss . fs = GDT_SELECTOR_PROC | 3 ;
2020-07-03 05:19:50 -06:00
m_scheduler_initialized = true ;
2020-06-27 13:42:28 -06:00
asm volatile (
" movl %[new_esp], %%esp \n " // swich to new stack
" pushl %[from_to_thread] \n " // to_thread
" pushl %[from_to_thread] \n " // from_thread
" pushl $ " __STRINGIFY ( GDT_SELECTOR_CODE0 ) " \n "
" pushl %[new_eip] \n " // save the entry eip to the stack
" movl %%esp, %%ebx \n "
" addl $20, %%ebx \n " // calculate pointer to TrapFrame
" pushl %%ebx \n "
" cld \n "
" call enter_trap_no_irq \n "
" addl $4, %%esp \n "
" lret \n "
: : [ new_esp ] " g " ( tss . esp ) ,
[ new_eip ] " a " ( tss . eip ) ,
[ from_to_thread ] " b " ( & initial_thread )
) ;
ASSERT_NOT_REACHED ( ) ;
}
void Processor : : enter_trap ( TrapFrame & trap , bool raise_irq )
{
InterruptDisabler disabler ;
trap . prev_irq_level = m_in_irq ;
if ( raise_irq )
m_in_irq + + ;
}
void Processor : : exit_trap ( TrapFrame & trap )
{
InterruptDisabler disabler ;
ASSERT ( m_in_irq > = trap . prev_irq_level ) ;
m_in_irq = trap . prev_irq_level ;
2020-07-03 05:19:50 -06:00
if ( ! m_in_irq & & ! m_in_critical )
check_invoke_scheduler ( ) ;
}
void Processor : : check_invoke_scheduler ( )
{
ASSERT ( ! m_in_irq ) ;
ASSERT ( ! m_in_critical ) ;
if ( m_invoke_scheduler_async & & m_scheduler_initialized ) {
2020-06-27 13:42:28 -06:00
m_invoke_scheduler_async = false ;
Scheduler : : invoke_async ( ) ;
}
}
void Processor : : gdt_init ( )
{
m_gdt_length = 0 ;
m_gdtr . address = nullptr ;
m_gdtr . limit = 0 ;
write_raw_gdt_entry ( 0x0000 , 0x00000000 , 0x00000000 ) ;
write_raw_gdt_entry ( GDT_SELECTOR_CODE0 , 0x0000ffff , 0x00cf9a00 ) ; // code0
write_raw_gdt_entry ( GDT_SELECTOR_DATA0 , 0x0000ffff , 0x00cf9200 ) ; // data0
write_raw_gdt_entry ( GDT_SELECTOR_CODE3 , 0x0000ffff , 0x00cffa00 ) ; // code3
write_raw_gdt_entry ( GDT_SELECTOR_DATA3 , 0x0000ffff , 0x00cff200 ) ; // data3
Descriptor tls_descriptor ;
tls_descriptor . low = tls_descriptor . high = 0 ;
tls_descriptor . dpl = 3 ;
tls_descriptor . segment_present = 1 ;
tls_descriptor . granularity = 0 ;
tls_descriptor . zero = 0 ;
tls_descriptor . operation_size = 1 ;
tls_descriptor . descriptor_type = 1 ;
tls_descriptor . type = 2 ;
write_gdt_entry ( GDT_SELECTOR_TLS , tls_descriptor ) ; // tls3
Descriptor fs_descriptor ;
fs_descriptor . set_base ( this ) ;
fs_descriptor . set_limit ( sizeof ( Processor ) ) ;
fs_descriptor . dpl = 0 ;
fs_descriptor . segment_present = 1 ;
fs_descriptor . granularity = 0 ;
fs_descriptor . zero = 0 ;
fs_descriptor . operation_size = 1 ;
fs_descriptor . descriptor_type = 1 ;
fs_descriptor . type = 2 ;
write_gdt_entry ( GDT_SELECTOR_PROC , fs_descriptor ) ; // fs0
Descriptor tss_descriptor ;
tss_descriptor . set_base ( & m_tss ) ;
tss_descriptor . set_limit ( sizeof ( TSS32 ) ) ;
tss_descriptor . dpl = 0 ;
tss_descriptor . segment_present = 1 ;
tss_descriptor . granularity = 0 ;
tss_descriptor . zero = 0 ;
tss_descriptor . operation_size = 1 ;
tss_descriptor . descriptor_type = 0 ;
tss_descriptor . type = 9 ;
write_gdt_entry ( GDT_SELECTOR_TSS , tss_descriptor ) ; // tss
flush_gdt ( ) ;
load_task_register ( GDT_SELECTOR_TSS ) ;
asm volatile (
" mov %%ax, %%ds \n "
" mov %%ax, %%es \n "
" mov %%ax, %%gs \n "
" mov %%ax, %%ss \n " : : " a " ( GDT_SELECTOR_DATA0 )
: " memory " ) ;
set_fs ( GDT_SELECTOR_PROC ) ;
// Make sure CS points to the kernel code descriptor.
asm volatile (
" ljmpl $ " __STRINGIFY ( GDT_SELECTOR_CODE0 ) " , $sanity \n "
" sanity: \n " ) ;
}
void Processor : : set_thread_specific ( u8 * data , size_t len )
{
auto & descriptor = get_gdt_entry ( GDT_SELECTOR_TLS ) ;
descriptor . set_base ( data ) ;
descriptor . set_limit ( len ) ;
}
2020-02-16 01:27:42 +01:00
}
# ifdef DEBUG
void __assertion_failed ( const char * msg , const char * file , unsigned line , const char * func )
{
asm volatile ( " cli " ) ;
2020-03-01 21:45:39 +02:00
klog ( ) < < " ASSERTION FAILED: " < < msg < < " \n "
< < file < < " : " < < line < < " in " < < func ;
2020-02-16 01:27:42 +01:00
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
2020-06-28 15:34:31 -06:00
auto process = Process : : current ( ) ;
if ( process )
MM . enter_process_paging_scope ( * process ) ;
2020-02-16 01:27:42 +01:00
Kernel : : dump_backtrace ( ) ;
asm volatile ( " hlt " ) ;
for ( ; ; )
;
}
# endif
2020-02-26 02:04:05 +02:00
NonMaskableInterruptDisabler : : NonMaskableInterruptDisabler ( )
{
IO : : out8 ( 0x70 , IO : : in8 ( 0x70 ) | 0x80 ) ;
}
NonMaskableInterruptDisabler : : ~ NonMaskableInterruptDisabler ( )
{
IO : : out8 ( 0x70 , IO : : in8 ( 0x70 ) & 0x7F ) ;
}