From 9ed051fe2584d4621e284e7ad6848d1d7801f257 Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Sun, 27 Jun 2021 13:59:41 +0200 Subject: [PATCH] Kernel: Implement initializing threads on x86_64 --- Kernel/Arch/x86/common/Processor.cpp | 31 ++++++++++++++++++++++ Kernel/Arch/x86/i386/CPU.cpp | 21 --------------- Kernel/Arch/x86/x86_64/CPU.cpp | 6 ----- Kernel/Arch/x86/x86_64/Processor.cpp | 39 ++++++++++++++++------------ Kernel/Thread.cpp | 6 +++++ 5 files changed, 59 insertions(+), 44 deletions(-) diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index b76380a6e50..a4d3af0cfb4 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -40,6 +40,10 @@ Atomic Processor::s_idle_cpu_mask { 0 }; extern "C" void thread_context_first_enter(void); extern "C" void exit_kernel_thread(void); +// The compiler can't see the calls to this function inside assembly. +// Declare it, to avoid dead code warnings. +extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used)); + UNMAP_AFTER_INIT static void sse_init() { write_cr0((read_cr0() & 0xfffffffbu) | 0x2); @@ -1134,4 +1138,31 @@ UNMAP_AFTER_INIT void Processor::gdt_init() // clang-format on #endif } + +extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap) +{ + VERIFY(!are_interrupts_enabled()); + VERIFY(is_kernel_mode()); + + dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread); + + VERIFY(to_thread == Thread::current()); + + Scheduler::enter_current(*from_thread, true); + + // Since we got here and don't have Scheduler::context_switch in the + // call stack (because this is the first time we switched into this + // context), we need to notify the scheduler so that it can release + // the scheduler lock. We don't want to enable interrupts at this point + // as we're still in the middle of a context switch. Doing so could + // trigger a context switch within a context switch, leading to a crash. + FlatPtr flags; +#if ARCH(I386) + flags = trap->regs->eflags; +#else + flags = trap->regs->rflags; +#endif + Scheduler::leave_on_first_switch(flags & ~0x200); +} + } diff --git a/Kernel/Arch/x86/i386/CPU.cpp b/Kernel/Arch/x86/i386/CPU.cpp index 8d25ca3d97b..27636b9ad9e 100644 --- a/Kernel/Arch/x86/i386/CPU.cpp +++ b/Kernel/Arch/x86/i386/CPU.cpp @@ -19,7 +19,6 @@ namespace Kernel { // The compiler can't see the calls to these functions inside assembly. // Declare them, to avoid dead code warnings. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used)); -extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used)); extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used)); extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) @@ -73,26 +72,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) // TODO: ioperm? } -extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap) -{ - VERIFY(!are_interrupts_enabled()); - VERIFY(is_kernel_mode()); - - dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread); - - VERIFY(to_thread == Thread::current()); - - Scheduler::enter_current(*from_thread, true); - - // Since we got here and don't have Scheduler::context_switch in the - // call stack (because this is the first time we switched into this - // context), we need to notify the scheduler so that it can release - // the scheduler lock. We don't want to enable interrupts at this point - // as we're still in the middle of a context switch. Doing so could - // trigger a context switch within a context switch, leading to a crash. - Scheduler::leave_on_first_switch(trap->regs->eflags & ~0x200); -} - extern "C" u32 do_init_context(Thread* thread, u32 flags) { VERIFY_INTERRUPTS_DISABLED(); diff --git a/Kernel/Arch/x86/x86_64/CPU.cpp b/Kernel/Arch/x86/x86_64/CPU.cpp index 8d9c0954d3b..4c02848a522 100644 --- a/Kernel/Arch/x86/x86_64/CPU.cpp +++ b/Kernel/Arch/x86/x86_64/CPU.cpp @@ -19,7 +19,6 @@ namespace Kernel { // The compiler can't see the calls to these functions inside assembly. // Declare them, to avoid dead code warnings. extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used)); -extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used)); extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used)); extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) @@ -29,11 +28,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) TODO(); } -extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap) -{ - TODO(); -} - extern "C" u32 do_init_context(Thread* thread, u32 flags) { (void)thread; diff --git a/Kernel/Arch/x86/x86_64/Processor.cpp b/Kernel/Arch/x86/x86_64/Processor.cpp index 6ed1bb4044d..f45c95cb4e1 100644 --- a/Kernel/Arch/x86/x86_64/Processor.cpp +++ b/Kernel/Arch/x86/x86_64/Processor.cpp @@ -15,7 +15,6 @@ namespace Kernel { -#define ENTER_THREAD_CONTEXT_ARGS_SIZE (2 * 4) // to_thread, from_thread extern "C" void thread_context_first_enter(void); extern "C" void do_assume_context(Thread* thread, u32 flags); extern "C" void exit_kernel_thread(void); @@ -28,11 +27,11 @@ asm( // switch_context will have pushed from_thread and to_thread to our new // stack prior to thread_context_first_enter() being called, and the // pointer to TrapFrame was the top of the stack before that -" movl 8(%esp), %ebx \n" // save pointer to TrapFrame +" popq %rdi \n" // from_thread (argument 0) +" popq %rsi \n" // to_thread (argument 1) +" popq %rdx \n" // pointer to TrapFrame (argument 2) " cld \n" " call context_first_init \n" -" addl $" __STRINGIFY(ENTER_THREAD_CONTEXT_ARGS_SIZE) ", %esp \n" -" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame " jmp common_trap_exit \n" ); // clang-format on @@ -79,12 +78,12 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) VERIFY(in_critical() == 1); } - u32 kernel_stack_top = thread.kernel_stack_top(); + u64 kernel_stack_top = thread.kernel_stack_top(); // Add a random offset between 0-256 (16-byte aligned) kernel_stack_top -= round_up_to_power_of_two(get_fast_random(), 16); - u32 stack_top = kernel_stack_top; + u64 stack_top = kernel_stack_top; // TODO: handle NT? VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM) @@ -102,13 +101,13 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // which should be in regs.rsp and exit_kernel_thread as return // address. stack_top -= 2 * sizeof(u64); - *reinterpret_cast(kernel_stack_top - 2 * sizeof(u32)) = regs.rsp; - *reinterpret_cast(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread); + *reinterpret_cast(kernel_stack_top - 2 * sizeof(u64)) = regs.rsp; + *reinterpret_cast(kernel_stack_top - 3 * sizeof(u64)) = FlatPtr(&exit_kernel_thread); } else { stack_top -= sizeof(RegisterState); } - // we want to end up 16-byte aligned, %esp + 4 should be aligned + // we want to end up 16-byte aligned, %rsp + 8 should be aligned stack_top -= sizeof(u64); *reinterpret_cast(kernel_stack_top - sizeof(u64)) = 0; @@ -116,7 +115,19 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // we will end up either in kernel mode or user mode, depending on how the thread is set up // However, the first step is to always start in kernel mode with thread_context_first_enter RegisterState& iretframe = *reinterpret_cast(stack_top); - // FIXME: copy state to be recovered through TSS + iretframe.rdi = regs.rdi; + iretframe.rsi = regs.rsi; + iretframe.rbp = regs.rbp; + iretframe.rsp = 0; + iretframe.rbx = regs.rbx; + iretframe.rdx = regs.rdx; + iretframe.rcx = regs.rcx; + iretframe.rax = regs.rax; + iretframe.rflags = regs.rflags; + iretframe.rip = regs.rip; + iretframe.cs = regs.cs; + if (return_to_user) + iretframe.userspace_rsp = regs.rsp; // make space for a trap frame stack_top -= sizeof(TrapFrame); @@ -205,21 +216,15 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th "movq %[new_rsp], %%rsp \n" // switch to new stack "pushq %[from_to_thread] \n" // to_thread "pushq %[from_to_thread] \n" // from_thread - "pushq $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n" "pushq %[new_rip] \n" // save the entry rip to the stack - "movq %%rsp, %%rbx \n" - "addq $40, %%rbx \n" // calculate pointer to TrapFrame - "pushq %%rbx \n" "cld \n" "pushq %[cpu] \n" // push argument for init_finished before register is clobbered "call pre_init_finished \n" "pop %%rdi \n" // move argument for init_finished into place "call init_finished \n" - "addq $8, %%rsp \n" "call post_init_finished \n" - "pop %%rdi \n" // move pointer to TrapFrame into place + "movq 24(%%rsp), %%rdi \n" // move pointer to TrapFrame into place "call enter_trap_no_irq \n" - "addq $8, %%rsp \n" "retq \n" :: [new_rsp] "g" (regs.rsp), [new_rip] "a" (regs.rip), diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 8a2e14fc6e1..7e5cb0fe507 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -105,7 +105,13 @@ Thread::Thread(NonnullRefPtr process, NonnullOwnPtr kernel_stac m_regs.gs = GDT_SELECTOR_TLS | 3; } #else + // Only IF is set when a process boots. m_regs.rflags = 0x0202; + + if (m_process->is_kernel_process()) + m_regs.cs = GDT_SELECTOR_CODE0; + else + m_regs.cs = GDT_SELECTOR_CODE3 | 3; #endif m_regs.cr3 = m_process->space().page_directory().cr3();