diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 2251534de0c..b76380a6e50 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -489,14 +489,10 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_frames // pushed the callee-saved registers, and the last of them happens // to be ebp. ProcessPagingScope paging_scope(thread.process()); - auto& tss = thread.tss(); - u32* stack_top; #if ARCH(I386) - stack_top = reinterpret_cast(tss.esp); -#else - (void)tss; - TODO(); -#endif + auto& regs = thread.regs(); + u32* stack_top; + stack_top = reinterpret_cast(regs.esp); if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) { if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0])) frame_ptr = 0; @@ -505,8 +501,7 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_frames if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at)) frame_ptr = 0; } -#if ARCH(I386) - eip = tss.eip; + eip = regs.eip; #else TODO(); #endif diff --git a/Kernel/Arch/x86/i386/CPU.cpp b/Kernel/Arch/x86/i386/CPU.cpp index 99296b7deea..8d25ca3d97b 100644 --- a/Kernel/Arch/x86/i386/CPU.cpp +++ b/Kernel/Arch/x86/i386/CPU.cpp @@ -30,8 +30,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) bool has_fxsr = Processor::current().has_feature(CPUFeature::FXSR); Processor::set_current_thread(*to_thread); - auto& from_tss = from_thread->tss(); - auto& to_tss = to_thread->tss(); + auto& from_regs = from_thread->regs(); + auto& to_regs = to_thread->regs(); if (has_fxsr) asm volatile("fxsave %0" @@ -40,10 +40,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) asm volatile("fnsave %0" : "=m"(from_thread->fpu_state())); - from_tss.fs = get_fs(); - from_tss.gs = get_gs(); - set_fs(to_tss.fs); - set_gs(to_tss.gs); + from_regs.fs = get_fs(); + from_regs.gs = get_gs(); + set_fs(to_regs.fs); + set_gs(to_regs.gs); if (from_thread->process().is_traced()) read_debug_registers_into(from_thread->debug_register_state()); @@ -59,8 +59,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) tls_descriptor.set_base(to_thread->thread_specific_data()); tls_descriptor.set_limit(to_thread->thread_specific_region_size()); - if (from_tss.cr3 != to_tss.cr3) - write_cr3(to_tss.cr3); + if (from_regs.cr3 != to_regs.cr3) + write_cr3(to_regs.cr3); to_thread->set_cpu(processor.get_id()); processor.restore_in_critical(to_thread->saved_critical()); @@ -96,7 +96,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe extern "C" u32 do_init_context(Thread* thread, u32 flags) { VERIFY_INTERRUPTS_DISABLED(); - thread->tss().eflags = flags; + thread->regs().eflags = flags; return Processor::current().init_context(*thread, true); } diff --git a/Kernel/Arch/x86/i386/Processor.cpp b/Kernel/Arch/x86/i386/Processor.cpp index 1432a94b02f..2719c52cb3a 100644 --- a/Kernel/Arch/x86/i386/Processor.cpp +++ b/Kernel/Arch/x86/i386/Processor.cpp @@ -54,7 +54,7 @@ asm( " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us " pushl %ebx \n" // push to_thread " pushl %ebx \n" // push from_thread -" pushl $thread_context_first_enter \n" // should be same as tss.eip +" pushl $thread_context_first_enter \n" // should be same as regs.eip " jmp enter_thread_context \n" ); // clang-format on @@ -86,8 +86,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // TODO: handle NT? VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM) - auto& tss = thread.tss(); - bool return_to_user = (tss.cs & 3) != 0; + auto& regs = thread.regs(); + bool return_to_user = (regs.cs & 3) != 0; // make room for an interrupt frame if (!return_to_user) { @@ -96,10 +96,10 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) stack_top -= sizeof(RegisterState) - 2 * sizeof(u32); // For kernel threads we'll push the thread function argument - // which should be in tss.esp and exit_kernel_thread as return + // which should be in regs.esp and exit_kernel_thread as return // address. stack_top -= 2 * sizeof(u32); - *reinterpret_cast(kernel_stack_top - 2 * sizeof(u32)) = tss.esp; + *reinterpret_cast(kernel_stack_top - 2 * sizeof(u32)) = regs.esp; *reinterpret_cast(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread); } else { stack_top -= sizeof(RegisterState); @@ -113,25 +113,25 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // we will end up either in kernel mode or user mode, depending on how the thread is set up // However, the first step is to always start in kernel mode with thread_context_first_enter RegisterState& iretframe = *reinterpret_cast(stack_top); - iretframe.ss = tss.ss; - iretframe.gs = tss.gs; - iretframe.fs = tss.fs; - iretframe.es = tss.es; - iretframe.ds = tss.ds; - iretframe.edi = tss.edi; - iretframe.esi = tss.esi; - iretframe.ebp = tss.ebp; + iretframe.ss = regs.ss; + iretframe.gs = regs.gs; + iretframe.fs = regs.fs; + iretframe.es = regs.es; + iretframe.ds = regs.ds; + iretframe.edi = regs.edi; + iretframe.esi = regs.esi; + iretframe.ebp = regs.ebp; iretframe.esp = 0; - iretframe.ebx = tss.ebx; - iretframe.edx = tss.edx; - iretframe.ecx = tss.ecx; - iretframe.eax = tss.eax; - iretframe.eflags = tss.eflags; - iretframe.eip = tss.eip; - iretframe.cs = tss.cs; + iretframe.ebx = regs.ebx; + iretframe.edx = regs.edx; + iretframe.ecx = regs.ecx; + iretframe.eax = regs.eax; + iretframe.eflags = regs.eflags; + iretframe.eip = regs.eip; + iretframe.cs = regs.cs; if (return_to_user) { - iretframe.userspace_esp = tss.esp; - iretframe.userspace_ss = tss.ss; + iretframe.userspace_esp = regs.esp; + iretframe.userspace_ss = regs.ss; } // make space for a trap frame @@ -149,8 +149,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}", thread, VirtualAddress(&thread), - iretframe.cs, tss.eip, - VirtualAddress(tss.esp), + iretframe.cs, regs.eip, + VirtualAddress(regs.esp), VirtualAddress(stack_top), iretframe.userspace_ss, iretframe.userspace_esp); @@ -158,8 +158,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}", thread, VirtualAddress(&thread), - iretframe.cs, tss.eip, - VirtualAddress(tss.esp), + iretframe.cs, regs.eip, + VirtualAddress(regs.esp), VirtualAddress(stack_top)); } } @@ -168,15 +168,15 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // in kernel mode, so set up these values so that we end up popping iretframe // off the stack right after the context switch completed, at which point // control is transferred to what iretframe is pointing to. - tss.eip = FlatPtr(&thread_context_first_enter); - tss.esp0 = kernel_stack_top; - tss.esp = stack_top; - tss.cs = GDT_SELECTOR_CODE0; - tss.ds = GDT_SELECTOR_DATA0; - tss.es = GDT_SELECTOR_DATA0; - tss.gs = GDT_SELECTOR_DATA0; - tss.ss = GDT_SELECTOR_DATA0; - tss.fs = GDT_SELECTOR_PROC; + regs.eip = FlatPtr(&thread_context_first_enter); + regs.esp0 = kernel_stack_top; + regs.esp = stack_top; + regs.cs = GDT_SELECTOR_CODE0; + regs.ds = GDT_SELECTOR_DATA0; + regs.es = GDT_SELECTOR_DATA0; + regs.gs = GDT_SELECTOR_DATA0; + regs.ss = GDT_SELECTOR_DATA0; + regs.fs = GDT_SELECTOR_PROC; return stack_top; } @@ -218,14 +218,14 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) "popl %%esi \n" "popl %%ebx \n" "popfl \n" - : [from_esp] "=m" (from_thread->tss().esp), - [from_eip] "=m" (from_thread->tss().eip), + : [from_esp] "=m" (from_thread->regs().esp), + [from_eip] "=m" (from_thread->regs().eip), [tss_esp0] "=m" (m_tss.esp0), "=d" (from_thread), // needed so that from_thread retains the correct value "=a" (to_thread) // needed so that to_thread retains the correct value - : [to_esp] "g" (to_thread->tss().esp), - [to_esp0] "g" (to_thread->tss().esp0), - [to_eip] "c" (to_thread->tss().eip), + : [to_esp] "g" (to_thread->regs().esp), + [to_esp0] "g" (to_thread->regs().esp0), + [to_eip] "c" (to_thread->regs().eip), [from_thread] "d" (from_thread), [to_thread] "a" (to_thread) : "memory" @@ -256,13 +256,10 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th { VERIFY(initial_thread.process().is_kernel_process()); - auto& tss = initial_thread.tss(); - m_tss = tss; - m_tss.esp0 = tss.esp0; + auto& regs = initial_thread.regs(); + m_tss.iomapbase = sizeof(m_tss); + m_tss.esp0 = regs.esp0; m_tss.ss0 = GDT_SELECTOR_DATA0; - // user mode needs to be able to switch to kernel mode: - m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3; - m_tss.fs = GDT_SELECTOR_PROC | 3; m_scheduler_initialized = true; @@ -285,8 +282,8 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th "call enter_trap_no_irq \n" "addl $4, %%esp \n" "lret \n" - :: [new_esp] "g" (tss.esp), - [new_eip] "a" (tss.eip), + :: [new_esp] "g" (regs.esp), + [new_eip] "a" (regs.eip), [from_to_thread] "b" (&initial_thread), [cpu] "c" (id()) ); diff --git a/Kernel/Arch/x86/x86_64/Processor.cpp b/Kernel/Arch/x86/x86_64/Processor.cpp index 01c41a0481d..6ed1bb4044d 100644 --- a/Kernel/Arch/x86/x86_64/Processor.cpp +++ b/Kernel/Arch/x86/x86_64/Processor.cpp @@ -67,6 +67,7 @@ String Processor::platform_string() const return "x86_64"; } +// FIXME: For the most part this is a copy of the i386-specific function, get rid of the code duplication u32 Processor::init_context(Thread& thread, bool leave_crit) { VERIFY(is_kernel_mode()); @@ -88,29 +89,28 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // TODO: handle NT? VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM) -#if 0 - auto& tss = thread.tss(); - bool return_to_user = (tss.cs & 3) != 0; + auto& regs = thread.regs(); + bool return_to_user = (regs.cs & 3) != 0; // make room for an interrupt frame if (!return_to_user) { - // userspace_esp and userspace_ss are not popped off by iret + // userspace_rsp is not popped off by iretq // unless we're switching back to user mode - stack_top -= sizeof(RegisterState) - 2 * sizeof(u32); + stack_top -= sizeof(RegisterState) - 2 * sizeof(FlatPtr); // For kernel threads we'll push the thread function argument - // which should be in tss.esp and exit_kernel_thread as return + // which should be in regs.rsp and exit_kernel_thread as return // address. - stack_top -= 2 * sizeof(u32); - *reinterpret_cast(kernel_stack_top - 2 * sizeof(u32)) = tss.esp; + stack_top -= 2 * sizeof(u64); + *reinterpret_cast(kernel_stack_top - 2 * sizeof(u32)) = regs.rsp; *reinterpret_cast(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread); } else { stack_top -= sizeof(RegisterState); } // we want to end up 16-byte aligned, %esp + 4 should be aligned - stack_top -= sizeof(u32); - *reinterpret_cast(kernel_stack_top - sizeof(u32)) = 0; + stack_top -= sizeof(u64); + *reinterpret_cast(kernel_stack_top - sizeof(u64)) = 0; // set up the stack so that after returning from thread_context_first_enter() // we will end up either in kernel mode or user mode, depending on how the thread is set up @@ -125,25 +125,24 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) trap.prev_irq_level = 0; trap.next_trap = nullptr; - stack_top -= sizeof(u32); // pointer to TrapFrame - *reinterpret_cast(stack_top) = stack_top + 4; + stack_top -= sizeof(u64); // pointer to TrapFrame + *reinterpret_cast(stack_top) = stack_top + 8; if constexpr (CONTEXT_SWITCH_DEBUG) { if (return_to_user) { - dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}", + dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}, user_top={}", thread, VirtualAddress(&thread), - iretframe.cs, tss.eip, - VirtualAddress(tss.esp), + iretframe.cs, regs.rip, + VirtualAddress(regs.rsp), VirtualAddress(stack_top), - iretframe.userspace_ss, - iretframe.userspace_esp); + iretframe.userspace_rsp); } else { - dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}", + dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}", thread, VirtualAddress(&thread), - iretframe.cs, tss.eip, - VirtualAddress(tss.esp), + iretframe.cs, regs.rip, + VirtualAddress(regs.rsp), VirtualAddress(stack_top)); } } @@ -152,18 +151,9 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) // in kernel mode, so set up these values so that we end up popping iretframe // off the stack right after the context switch completed, at which point // control is transferred to what iretframe is pointing to. - tss.eip = FlatPtr(&thread_context_first_enter); - tss.esp0 = kernel_stack_top; - tss.esp = stack_top; - tss.cs = GDT_SELECTOR_CODE0; - tss.ds = GDT_SELECTOR_DATA0; - tss.es = GDT_SELECTOR_DATA0; - tss.gs = GDT_SELECTOR_DATA0; - tss.ss = GDT_SELECTOR_DATA0; - tss.fs = GDT_SELECTOR_PROC; -#else - TODO(); -#endif + regs.rip = FlatPtr(&thread_context_first_enter); + regs.rsp0 = kernel_stack_top; + regs.rsp = stack_top; return stack_top; } @@ -203,21 +193,40 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th { VERIFY(initial_thread.process().is_kernel_process()); - auto& tss = initial_thread.tss(); - m_tss = tss; -#if 0 - m_tss.esp0 = tss.esp0; - m_tss.ss0 = GDT_SELECTOR_DATA0; - // user mode needs to be able to switch to kernel mode: - m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3; - m_tss.fs = GDT_SELECTOR_PROC | 3; -#else - TODO(); -#endif + auto& regs = initial_thread.regs(); + m_tss.iomapbase = sizeof(m_tss); + m_tss.rsp0l = regs.rsp0 & 0xffffffff; + m_tss.rsp0h = regs.rsp0 >> 32; m_scheduler_initialized = true; - // FIXME: Context switching (see i386 impl) + // clang-format off + asm volatile( + "movq %[new_rsp], %%rsp \n" // switch to new stack + "pushq %[from_to_thread] \n" // to_thread + "pushq %[from_to_thread] \n" // from_thread + "pushq $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n" + "pushq %[new_rip] \n" // save the entry rip to the stack + "movq %%rsp, %%rbx \n" + "addq $40, %%rbx \n" // calculate pointer to TrapFrame + "pushq %%rbx \n" + "cld \n" + "pushq %[cpu] \n" // push argument for init_finished before register is clobbered + "call pre_init_finished \n" + "pop %%rdi \n" // move argument for init_finished into place + "call init_finished \n" + "addq $8, %%rsp \n" + "call post_init_finished \n" + "pop %%rdi \n" // move pointer to TrapFrame into place + "call enter_trap_no_irq \n" + "addq $8, %%rsp \n" + "retq \n" + :: [new_rsp] "g" (regs.rsp), + [new_rip] "a" (regs.rip), + [from_to_thread] "b" (&initial_thread), + [cpu] "c" ((u64)id()) + ); + // clang-format on VERIFY_NOT_REACHED(); } diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index f4a6befab8e..427606bf7b5 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -181,12 +181,11 @@ RefPtr Process::create_kernel_process(RefPtr& first_thread, Str if (!first_thread || !process) return {}; #if ARCH(I386) - first_thread->tss().eip = (FlatPtr)entry; - first_thread->tss().esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp + first_thread->regs().eip = (FlatPtr)entry; + first_thread->regs().esp = FlatPtr(entry_data); // entry function argument is expected to be in regs.esp #else - (void)entry; - (void)entry_data; - PANIC("Process::create_kernel_process() not implemented"); + first_thread->regs().rip = (FlatPtr)entry; + first_thread->regs().rsp = FlatPtr(entry_data); // entry function argument is expected to be in regs.rsp #endif if (process->pid() != 0) { @@ -645,14 +644,13 @@ RefPtr Process::create_kernel_thread(void (*entry)(void*), void* entry_d if (!joinable) thread->detach(); + auto& regs = thread->regs(); #if ARCH(I386) - auto& tss = thread->tss(); - tss.eip = (FlatPtr)entry; - tss.esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp + regs.eip = (FlatPtr)entry; + regs.esp = FlatPtr(entry_data); // entry function argument is expected to be in regs.rsp #else - (void)entry; - (void)entry_data; - PANIC("Process::create_kernel_thread() not implemented"); + regs.rip = (FlatPtr)entry; + regs.rsp = FlatPtr(entry_data); // entry function argument is expected to be in regs.rsp #endif ScopedSpinLock lock(g_scheduler_lock); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 1aca27dd2d0..b972bd8a00b 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -232,7 +232,7 @@ bool Scheduler::pick_next() dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}", Processor::id(), thread_to_schedule, - thread_to_schedule.tss().cs, thread_to_schedule.tss().eip); + thread_to_schedule.regs().cs, thread_to_schedule.regs().eip); #else PANIC("Scheduler::pick_next() not implemented"); #endif @@ -354,7 +354,8 @@ bool Scheduler::context_switch(Thread* thread) from_thread->set_state(Thread::Runnable); #ifdef LOG_EVERY_CONTEXT_SWITCH - dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::id(), from_thread->tid().value(), thread->tid().value(), thread->priority(), thread->tss().cs, thread->tss().eip); + dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::id(), from_thread->tid().value(), + thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().eip); #endif } @@ -583,7 +584,7 @@ void dump_thread_list() auto get_cs = [](Thread& thread) -> u16 { #if ARCH(I386) if (!thread.current_trap()) - return thread.tss().cs; + return thread.regs().cs; #else PANIC("get_cs() not implemented"); #endif @@ -593,7 +594,7 @@ void dump_thread_list() auto get_eip = [](Thread& thread) -> u32 { #if ARCH(I386) if (!thread.current_trap()) - return thread.tss().eip; + return thread.regs().eip; return thread.get_register_dump_from_stack().eip; #else PANIC("get_eip() not implemented"); diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 2485b95d4a6..ad79715ee78 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -597,7 +597,7 @@ KResult Process::do_exec(NonnullRefPtr main_program_description auto make_stack_result = make_userspace_stack_for_main_thread(*load_result.stack_region.unsafe_ptr(), move(arguments), move(environment), move(auxv)); if (make_stack_result.is_error()) return make_stack_result.error(); - u32 new_userspace_esp = make_stack_result.value(); + FlatPtr new_userspace_esp = make_stack_result.value(); if (wait_for_tracer_at_next_execve()) { // Make sure we release the ptrace lock here or the tracer will block forever. @@ -636,22 +636,21 @@ KResult Process::do_exec(NonnullRefPtr main_program_description } new_main_thread->reset_fpu_state(); + auto& regs = new_main_thread->m_regs; #if ARCH(I386) - auto& tss = new_main_thread->m_tss; - tss.cs = GDT_SELECTOR_CODE3 | 3; - tss.ds = GDT_SELECTOR_DATA3 | 3; - tss.es = GDT_SELECTOR_DATA3 | 3; - tss.ss = GDT_SELECTOR_DATA3 | 3; - tss.fs = GDT_SELECTOR_DATA3 | 3; - tss.gs = GDT_SELECTOR_TLS | 3; - tss.eip = load_result.entry_eip; - tss.esp = new_userspace_esp; - tss.cr3 = space().page_directory().cr3(); - tss.ss2 = pid().value(); + regs.cs = GDT_SELECTOR_CODE3 | 3; + regs.ds = GDT_SELECTOR_DATA3 | 3; + regs.es = GDT_SELECTOR_DATA3 | 3; + regs.ss = GDT_SELECTOR_DATA3 | 3; + regs.fs = GDT_SELECTOR_DATA3 | 3; + regs.gs = GDT_SELECTOR_TLS | 3; + regs.eip = load_result.entry_eip; + regs.esp = new_userspace_esp; #else - (void)new_userspace_esp; - PANIC("Process::do_exec() not implemented"); + regs.rip = load_result.entry_eip; + regs.rsp = new_userspace_esp; #endif + regs.cr3 = space().page_directory().cr3(); { TemporaryChange profiling_disabler(m_profiling, was_profiling); diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp index 2161506da63..dab1ca807d0 100644 --- a/Kernel/Syscalls/fork.cpp +++ b/Kernel/Syscalls/fork.cpp @@ -45,25 +45,26 @@ KResultOr Process::sys$fork(RegisterState& regs) child->space().set_enforces_syscall_regions(space().enforces_syscall_regions()); #if ARCH(I386) - auto& child_tss = child_first_thread->m_tss; - child_tss.eax = 0; // fork() returns 0 in the child :^) - child_tss.ebx = regs.ebx; - child_tss.ecx = regs.ecx; - child_tss.edx = regs.edx; - child_tss.ebp = regs.ebp; - child_tss.esp = regs.userspace_esp; - child_tss.esi = regs.esi; - child_tss.edi = regs.edi; - child_tss.eflags = regs.eflags; - child_tss.eip = regs.eip; - child_tss.cs = regs.cs; - child_tss.ds = regs.ds; - child_tss.es = regs.es; - child_tss.fs = regs.fs; - child_tss.gs = regs.gs; - child_tss.ss = regs.userspace_ss; + auto& child_regs = child_first_thread->m_regs; + child_regs.eax = 0; // fork() returns 0 in the child :^) + child_regs.ebx = regs.ebx; + child_regs.ecx = regs.ecx; + child_regs.edx = regs.edx; + child_regs.ebp = regs.ebp; + child_regs.esp = regs.userspace_esp; + child_regs.esi = regs.esi; + child_regs.edi = regs.edi; + child_regs.eflags = regs.eflags; + child_regs.eip = regs.eip; + child_regs.cs = regs.cs; + child_regs.ds = regs.ds; + child_regs.es = regs.es; + child_regs.fs = regs.fs; + child_regs.gs = regs.gs; + child_regs.ss = regs.userspace_ss; - dbgln_if(FORK_DEBUG, "fork: child will begin executing at {:04x}:{:08x} with stack {:04x}:{:08x}, kstack {:04x}:{:08x}", child_tss.cs, child_tss.eip, child_tss.ss, child_tss.esp, child_tss.ss0, child_tss.esp0); + dbgln_if(FORK_DEBUG, "fork: child will begin executing at {:04x}:{:08x} with stack {:04x}:{:08x}, kstack {:04x}:{:08x}", + child_regs.cs, child_regs.eip, child_regs.ss, child_regs.esp, child_regs.ss0, child_regs.esp0); #else (void)regs; PANIC("Process::sys$fork() not implemented."); diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp index ee934a0b2e5..995254545b4 100644 --- a/Kernel/Syscalls/thread.cpp +++ b/Kernel/Syscalls/thread.cpp @@ -61,16 +61,17 @@ KResultOr Process::sys$create_thread(void* (*entry)(void*), Userspacedetach(); + auto& regs = thread->regs(); #if ARCH(I386) - auto& tss = thread->tss(); - tss.eip = (FlatPtr)entry; - tss.eflags = 0x0202; - tss.cr3 = space().page_directory().cr3(); - tss.esp = user_esp.value(); + regs.eip = (FlatPtr)entry; + regs.eflags = 0x0202; + regs.esp = user_esp.value(); #else - (void)entry; - PANIC("Process::sys$create_thread() not implemented"); + regs.rip = (FlatPtr)entry; + regs.rflags = 0x0202; + regs.rsp = user_esp.value(); #endif + regs.cr3 = space().page_directory().cr3(); auto tsr_result = thread->make_thread_specific_region({}); if (tsr_result.is_error()) diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 5af5c1b35c2..8a2e14fc6e1 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -84,48 +84,51 @@ Thread::Thread(NonnullRefPtr process, NonnullOwnPtr kernel_stac m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState)); reset_fpu_state(); - m_tss.iomapbase = sizeof(TSS32); #if ARCH(I386) // Only IF is set when a process boots. - m_tss.eflags = 0x0202; + m_regs.eflags = 0x0202; if (m_process->is_kernel_process()) { - m_tss.cs = GDT_SELECTOR_CODE0; - m_tss.ds = GDT_SELECTOR_DATA0; - m_tss.es = GDT_SELECTOR_DATA0; - m_tss.fs = GDT_SELECTOR_PROC; - m_tss.ss = GDT_SELECTOR_DATA0; - m_tss.gs = 0; + m_regs.cs = GDT_SELECTOR_CODE0; + m_regs.ds = GDT_SELECTOR_DATA0; + m_regs.es = GDT_SELECTOR_DATA0; + m_regs.fs = GDT_SELECTOR_PROC; + m_regs.ss = GDT_SELECTOR_DATA0; + m_regs.gs = 0; } else { - m_tss.cs = GDT_SELECTOR_CODE3 | 3; - m_tss.ds = GDT_SELECTOR_DATA3 | 3; - m_tss.es = GDT_SELECTOR_DATA3 | 3; - m_tss.fs = GDT_SELECTOR_DATA3 | 3; - m_tss.ss = GDT_SELECTOR_DATA3 | 3; - m_tss.gs = GDT_SELECTOR_TLS | 3; + m_regs.cs = GDT_SELECTOR_CODE3 | 3; + m_regs.ds = GDT_SELECTOR_DATA3 | 3; + m_regs.es = GDT_SELECTOR_DATA3 | 3; + m_regs.fs = GDT_SELECTOR_DATA3 | 3; + m_regs.ss = GDT_SELECTOR_DATA3 | 3; + m_regs.gs = GDT_SELECTOR_TLS | 3; } - - m_tss.cr3 = m_process->space().page_directory().cr3(); #else - PANIC("Thread::Thread() not implemented"); + m_regs.rflags = 0x0202; #endif + m_regs.cr3 = m_process->space().page_directory().cr3(); + m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; -#if ARCH(I386) if (m_process->is_kernel_process()) { - m_tss.esp = m_tss.esp0 = m_kernel_stack_top; +#if ARCH(I386) + m_regs.esp = m_regs.esp0 = m_kernel_stack_top; +#else + m_regs.rsp = m_regs.rsp0 = m_kernel_stack_top; +#endif } else { // Ring 3 processes get a separate stack for ring 0. // The ring 3 stack will be assigned by exec(). - m_tss.ss0 = GDT_SELECTOR_DATA0; - m_tss.esp0 = m_kernel_stack_top; - } +#if ARCH(I386) + m_regs.ss0 = GDT_SELECTOR_DATA0; + m_regs.esp0 = m_kernel_stack_top; #else - PANIC("Thread::Thread() not implemented"); + m_regs.rsp0 = m_kernel_stack_top; #endif + } // We need to add another reference if we could successfully create // all the resources needed for this thread. The reason for this is that @@ -864,9 +867,9 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) #endif #if ARCH(I386) - dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_tss.cs, m_tss.eip, signal); + dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_regs.cs, m_regs.eip, signal); #else - PANIC("Thread:dispatch_signal() not implemented"); + dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {}", state_string(), m_regs.cs, m_regs.rip, signal); #endif return DispatchSignalResult::Continue; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index b7f2bf541e7..f71d51d22f6 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -62,6 +62,53 @@ struct ThreadSpecificData { #define THREAD_AFFINITY_DEFAULT 0xffffffff +struct ThreadRegisters { +#if ARCH(I386) + FlatPtr ss; + FlatPtr gs; + FlatPtr fs; + FlatPtr es; + FlatPtr ds; + FlatPtr edi; + FlatPtr esi; + FlatPtr ebp; + FlatPtr esp; + FlatPtr ebx; + FlatPtr edx; + FlatPtr ecx; + FlatPtr eax; + FlatPtr eip; + FlatPtr esp0; + FlatPtr ss0; +#else + FlatPtr rdi; + FlatPtr rsi; + FlatPtr rbp; + FlatPtr rsp; + FlatPtr rbx; + FlatPtr rdx; + FlatPtr rcx; + FlatPtr rax; + FlatPtr r8; + FlatPtr r9; + FlatPtr r10; + FlatPtr r11; + FlatPtr r12; + FlatPtr r13; + FlatPtr r14; + FlatPtr r15; + FlatPtr rip; + FlatPtr rsp0; +#endif + FlatPtr cs; +#if ARCH(I386) + FlatPtr eflags; +#else + FlatPtr rflags; +#endif + FlatPtr cr3; +}; + class Thread : public RefCounted , public Weakable { @@ -748,8 +795,9 @@ public: DebugRegisterState& debug_register_state() { return m_debug_register_state; } const DebugRegisterState& debug_register_state() const { return m_debug_register_state; } - TSS& tss() { return m_tss; } - const TSS& tss() const { return m_tss; } + ThreadRegisters& regs() { return m_regs; } + ThreadRegisters const& regs() const { return m_regs; } + State state() const { return m_state; } const char* state_string() const; @@ -1210,7 +1258,7 @@ private: mutable RecursiveSpinLock m_block_lock; NonnullRefPtr m_process; ThreadID m_tid { -1 }; - TSS m_tss {}; + ThreadRegisters m_regs; DebugRegisterState m_debug_register_state {}; TrapFrame* m_current_trap { nullptr }; u32 m_saved_critical { 1 }; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 34eea25c01b..58b337a9b5d 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -733,13 +733,8 @@ void MemoryManager::enter_space(Space& space) VERIFY(current_thread != nullptr); ScopedSpinLock lock(s_mm_lock); -#if ARCH(I386) - current_thread->tss().cr3 = space.page_directory().cr3(); + current_thread->regs().cr3 = space.page_directory().cr3(); write_cr3(space.page_directory().cr3()); -#else - (void)space; - PANIC("MemoryManager::enter_space not implemented"); -#endif } void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count) diff --git a/Kernel/VM/ProcessPagingScope.cpp b/Kernel/VM/ProcessPagingScope.cpp index db3bee8026d..d3d48956275 100644 --- a/Kernel/VM/ProcessPagingScope.cpp +++ b/Kernel/VM/ProcessPagingScope.cpp @@ -21,11 +21,7 @@ ProcessPagingScope::ProcessPagingScope(Process& process) ProcessPagingScope::~ProcessPagingScope() { InterruptDisabler disabler; -#if ARCH(I386) - Thread::current()->tss().cr3 = m_previous_cr3; -#else - PANIC("ProcessPagingScope::~ProcessPagingScope() not implemented"); -#endif + Thread::current()->regs().cr3 = m_previous_cr3; write_cr3(m_previous_cr3); } diff --git a/Userland/Utilities/strace.cpp b/Userland/Utilities/strace.cpp index a8709e2bb50..82b2ed9ecf8 100644 --- a/Userland/Utilities/strace.cpp +++ b/Userland/Utilities/strace.cpp @@ -140,7 +140,6 @@ int main(int argc, char** argv) u64 arg3 = regs.rbx; #endif - if (ptrace(PT_SYSCALL, g_pid, 0, 0) == -1) { perror("syscall"); return 1;