Kernel: Turn Thread::current and Process::current into functions

This allows us to query the current thread and process on a
per processor basis
This commit is contained in:
Tom 2020-06-28 15:34:31 -06:00 committed by Andreas Kling
parent cdc78515b6
commit 16783bd14d
Notes: sideshowbarker 2024-07-19 05:17:59 +09:00
39 changed files with 518 additions and 369 deletions

View file

@ -125,8 +125,8 @@ DebugLogStream dbg()
stream << "\033[33;1m" << process_name_buffer << '(' << getpid() << ")\033[0m: ";
#endif
#if defined(__serenity__) && defined(KERNEL)
if (Kernel::Thread::current)
stream << "\033[34;1m[" << *Kernel::Thread::current << "]\033[0m: ";
if (Kernel::Processor::is_initialized() && Kernel::Thread::current())
stream << "\033[34;1m[" << *Kernel::Thread::current() << "]\033[0m: ";
else
stream << "\033[36;1m[Kernel]\033[0m: ";
#endif
@ -137,8 +137,8 @@ DebugLogStream dbg()
KernelLogStream klog()
{
KernelLogStream stream;
if (Kernel::Thread::current)
stream << "\033[34;1m[" << *Kernel::Thread::current << "]\033[0m: ";
if (Kernel::Processor::is_initialized() && Kernel::Thread::current())
stream << "\033[34;1m[" << *Kernel::Thread::current() << "]\033[0m: ";
else
stream << "\033[36;1m[Kernel]\033[0m: ";
return stream;

View file

@ -113,7 +113,8 @@ static void dump(const RegisterState& regs)
{
u16 ss;
u32 esp;
if (!Process::current || Process::current->is_ring0()) {
auto process = Process::current();
if (!process || process->is_ring0()) {
ss = regs.ss;
esp = regs.esp;
} else {
@ -139,7 +140,7 @@ static void dump(const RegisterState& regs)
: "=a"(cr4));
klog() << "cr0=" << String::format("%08x", cr0) << " cr2=" << String::format("%08x", cr2) << " cr3=" << String::format("%08x", cr3) << " cr4=" << String::format("%08x", cr4);
if (Process::current && Process::current->validate_read((void*)regs.eip, 8)) {
if (process && process->validate_read((void*)regs.eip, 8)) {
SmapDisabler disabler;
u8* codeptr = (u8*)regs.eip;
klog() << "code: " << String::format("%02x", codeptr[0]) << " " << String::format("%02x", codeptr[1]) << " " << String::format("%02x", codeptr[2]) << " " << String::format("%02x", codeptr[3]) << " " << String::format("%02x", codeptr[4]) << " " << String::format("%02x", codeptr[5]) << " " << String::format("%02x", codeptr[6]) << " " << String::format("%02x", codeptr[7]);
@ -148,26 +149,27 @@ static void dump(const RegisterState& regs)
void handle_crash(RegisterState& regs, const char* description, int signal, bool out_of_memory)
{
if (!Process::current) {
auto process = Process::current();
if (!process) {
klog() << description << " with !current";
hang();
}
// If a process crashed while inspecting another process,
// make sure we switch back to the right page tables.
MM.enter_process_paging_scope(*Process::current);
MM.enter_process_paging_scope(*process);
klog() << "CRASH: CPU #" << Processor::current().id() << " " << description << ". Ring " << (Process::current->is_ring0() ? 0 : 3) << ".";
klog() << "CRASH: CPU #" << Processor::current().id() << " " << description << ". Ring " << (process->is_ring0() ? 0 : 3) << ".";
dump(regs);
if (Process::current->is_ring0()) {
if (process->is_ring0()) {
klog() << "Crash in ring 0 :(";
dump_backtrace();
hang();
}
cli();
Process::current->crash(signal, regs.eip, out_of_memory);
process->crash(signal, regs.eip, out_of_memory);
}
EH_ENTRY_NO_CODE(6, illegal_instruction);
@ -226,7 +228,8 @@ void page_fault_handler(TrapFrame* trap)
#endif
bool faulted_in_userspace = (regs.cs & 3) == 3;
if (faulted_in_userspace && !MM.validate_user_stack(*Process::current, VirtualAddress(regs.userspace_esp))) {
auto current_thread = Thread::current();
if (faulted_in_userspace && !MM.validate_user_stack(current_thread->process(), VirtualAddress(regs.userspace_esp))) {
dbg() << "Invalid stack pointer: " << VirtualAddress(regs.userspace_esp);
handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);
ASSERT_NOT_REACHED();
@ -236,8 +239,8 @@ void page_fault_handler(TrapFrame* trap)
if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory) {
if (response != PageFaultResponse::OutOfMemory) {
if (Thread::current->has_signal_handler(SIGSEGV)) {
Thread::current->send_urgent_signal_to_self(SIGSEGV);
if (current_thread->has_signal_handler(SIGSEGV)) {
current_thread->send_urgent_signal_to_self(SIGSEGV);
return;
}
}
@ -284,7 +287,8 @@ void debug_handler(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
if (!Process::current || (regs.cs & 3) == 0) {
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
klog() << "Debug Exception in Ring0";
hang();
return;
@ -294,10 +298,10 @@ void debug_handler(TrapFrame* trap)
if (!is_reason_singlestep)
return;
if (Thread::current->tracer()) {
Thread::current->tracer()->set_regs(regs);
if (current_thread->tracer()) {
current_thread->tracer()->set_regs(regs);
}
Thread::current->send_urgent_signal_to_self(SIGTRAP);
current_thread->send_urgent_signal_to_self(SIGTRAP);
}
EH_ENTRY_NO_CODE(3, breakpoint);
@ -305,15 +309,16 @@ void breakpoint_handler(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
if (!Process::current || (regs.cs & 3) == 0) {
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
klog() << "Breakpoint Trap in Ring0";
hang();
return;
}
if (Thread::current->tracer()) {
Thread::current->tracer()->set_regs(regs);
if (current_thread->tracer()) {
current_thread->tracer()->set_regs(regs);
}
Thread::current->send_urgent_signal_to_self(SIGTRAP);
current_thread->send_urgent_signal_to_self(SIGTRAP);
}
#define EH(i, msg) \
@ -797,6 +802,7 @@ u32 read_dr6()
FPUState Processor::s_clean_fpu_state;
static Vector<Processor*>* s_processors;
static SpinLock s_processor_lock;
Vector<Processor*>& Processor::processors()
{
@ -806,6 +812,10 @@ Vector<Processor*>& Processor::processors()
Processor& Processor::by_id(u32 cpu)
{
// s_processors does not need to be protected by a lock of any kind.
// It is populated early in the boot process, and the BSP is waiting
// for all APs to finish, after which this array never gets modified
// again, so it's safe to not protect access to it here
auto& procs = processors();
ASSERT(procs[cpu] != nullptr);
ASSERT(procs.size() > cpu);
@ -819,6 +829,9 @@ void Processor::initialize(u32 cpu)
m_cpu = cpu;
m_in_irq = 0;
m_idle_thread = nullptr;
m_current_thread = nullptr;
gdt_init();
if (cpu == 0)
idt_init();
@ -836,11 +849,15 @@ void Processor::initialize(u32 cpu)
m_info = new ProcessorInfo(*this);
if (!s_processors)
s_processors = new Vector<Processor*>();
if (cpu >= s_processors->size())
s_processors->resize(cpu + 1);
(*s_processors)[cpu] = this;
{
ScopedSpinLock lock(s_processor_lock);
// We need to prevent races between APs starting up at the same time
if (!s_processors)
s_processors = new Vector<Processor*>();
if (cpu >= s_processors->size())
s_processors->resize(cpu + 1);
(*s_processors)[cpu] = this;
}
klog() << "CPU #" << cpu << " using Processor at " << VirtualAddress(FlatPtr(this));
}
@ -894,6 +911,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
ASSERT(to_thread->state() == Thread::Running);
auto& processor = Processor::current();
processor.set_current_thread(*to_thread);
auto& from_tss = from_thread->tss();
auto& to_tss = to_thread->tss();
asm volatile("fxsave %0"
@ -904,7 +925,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
set_fs(to_tss.fs);
set_gs(to_tss.gs);
auto& processor = Processor::current();
auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
tls_descriptor.set_base(to_thread->thread_specific_data().as_ptr());
tls_descriptor.set_limit(to_thread->thread_specific_region_size());
@ -1250,8 +1270,9 @@ void __assertion_failed(const char* msg, const char* file, unsigned line, const
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
if (Process::current)
MM.enter_process_paging_scope(*Process::current);
auto process = Process::current();
if (process)
MM.enter_process_paging_scope(*process);
Kernel::dump_backtrace();
asm volatile("hlt");

View file

@ -626,6 +626,8 @@ class Processor {
static FPUState s_clean_fpu_state;
ProcessorInfo* m_info;
Thread* m_current_thread;
Thread* m_idle_thread;
bool m_invoke_scheduler_async;
@ -661,6 +663,33 @@ public:
return *(Processor*)read_fs_u32(0);
}
ALWAYS_INLINE static bool is_initialized()
{
return get_fs() == GDT_SELECTOR_PROC && read_fs_u32(0) != 0;
}
ALWAYS_INLINE Thread* idle_thread() const
{
return m_idle_thread;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
ALWAYS_INLINE Thread* current_thread() const
{
// NOTE: NOT safe to call from another processor!
ASSERT(&Processor::current() == this);
return m_current_thread;
}
ALWAYS_INLINE void set_current_thread(Thread& current_thread)
{
m_current_thread = &current_thread;
}
ALWAYS_INLINE u32 id()
{
return m_cpu;

View file

@ -198,14 +198,14 @@ int BXVGADevice::ioctl(FileDescription&, unsigned request, FlatPtr arg)
switch (request) {
case FB_IOCTL_GET_SIZE_IN_BYTES: {
auto* out = (size_t*)arg;
if (!Process::current->validate_write_typed(out))
if (!Process::current()->validate_write_typed(out))
return -EFAULT;
*out = framebuffer_size_in_bytes();
return 0;
}
case FB_IOCTL_GET_BUFFER: {
auto* index = (int*)arg;
if (!Process::current->validate_write_typed(index))
if (!Process::current()->validate_write_typed(index))
return -EFAULT;
*index = m_y_offset == 0 ? 0 : 1;
return 0;
@ -218,7 +218,7 @@ int BXVGADevice::ioctl(FileDescription&, unsigned request, FlatPtr arg)
}
case FB_IOCTL_GET_RESOLUTION: {
auto* resolution = (FBResolution*)arg;
if (!Process::current->validate_write_typed(resolution))
if (!Process::current()->validate_write_typed(resolution))
return -EFAULT;
resolution->pitch = m_framebuffer_pitch;
resolution->width = m_framebuffer_width;
@ -227,7 +227,7 @@ int BXVGADevice::ioctl(FileDescription&, unsigned request, FlatPtr arg)
}
case FB_IOCTL_SET_RESOLUTION: {
auto* resolution = (FBResolution*)arg;
if (!Process::current->validate_read_typed(resolution) || !Process::current->validate_write_typed(resolution))
if (!Process::current()->validate_read_typed(resolution) || !Process::current()->validate_write_typed(resolution))
return -EFAULT;
if (resolution->width > MAX_RESOLUTION_WIDTH || resolution->height > MAX_RESOLUTION_HEIGHT)
return -EINVAL;

View file

@ -79,21 +79,21 @@ int MBVGADevice::ioctl(FileDescription&, unsigned request, FlatPtr arg)
switch (request) {
case FB_IOCTL_GET_SIZE_IN_BYTES: {
auto* out = (size_t*)arg;
if (!Process::current->validate_write_typed(out))
if (!Process::current()->validate_write_typed(out))
return -EFAULT;
*out = framebuffer_size_in_bytes();
return 0;
}
case FB_IOCTL_GET_BUFFER: {
auto* index = (int*)arg;
if (!Process::current->validate_write_typed(index))
if (!Process::current()->validate_write_typed(index))
return -EFAULT;
*index = 0;
return 0;
}
case FB_IOCTL_GET_RESOLUTION: {
auto* resolution = (FBResolution*)arg;
if (!Process::current->validate_write_typed(resolution))
if (!Process::current()->validate_write_typed(resolution))
return -EFAULT;
resolution->pitch = m_framebuffer_pitch;
resolution->width = m_framebuffer_width;
@ -102,7 +102,7 @@ int MBVGADevice::ioctl(FileDescription&, unsigned request, FlatPtr arg)
}
case FB_IOCTL_SET_RESOLUTION: {
auto* resolution = (FBResolution*)arg;
if (!Process::current->validate_read_typed(resolution) || !Process::current->validate_write_typed(resolution))
if (!Process::current()->validate_read_typed(resolution) || !Process::current()->validate_write_typed(resolution))
return -EFAULT;
resolution->pitch = m_framebuffer_pitch;
resolution->width = m_framebuffer_width;

View file

@ -177,7 +177,7 @@ static void print_ide_status(u8 status)
void PATAChannel::wait_for_irq()
{
Thread::current->wait_on(m_irq_queue);
Thread::current()->wait_on(m_irq_queue);
disable_irq();
}

View file

@ -222,7 +222,7 @@ void SB16::handle_irq(const RegisterState&)
void SB16::wait_for_irq()
{
Thread::current->wait_on(m_irq_queue);
Thread::current()->wait_on(m_irq_queue);
disable_irq();
}

View file

@ -133,7 +133,7 @@ ssize_t FIFO::read(FileDescription&, size_t, u8* buffer, ssize_t size)
ssize_t FIFO::write(FileDescription&, size_t, const u8* buffer, ssize_t size)
{
if (!m_readers) {
Thread::current->send_signal(SIGPIPE, Process::current);
Thread::current()->send_signal(SIGPIPE, Process::current());
return -EPIPE;
}
#ifdef FIFO_DEBUG

View file

@ -48,7 +48,7 @@ ssize_t InodeFile::read(FileDescription& description, size_t offset, u8* buffer,
{
ssize_t nread = m_inode->read_bytes(offset, count, buffer, &description);
if (nread > 0)
Thread::current->did_file_read(nread);
Thread::current()->did_file_read(nread);
return nread;
}
@ -57,7 +57,7 @@ ssize_t InodeFile::write(FileDescription& description, size_t offset, const u8*
ssize_t nwritten = m_inode->write_bytes(offset, count, data, &description);
if (nwritten > 0) {
m_inode->set_mtime(kgettimeofday().tv_sec);
Thread::current->did_file_write(nwritten);
Thread::current()->did_file_write(nwritten);
}
return nwritten;
}

View file

@ -296,7 +296,7 @@ Optional<KBuffer> procfs$pid_vm(InodeIdentifier identifier)
KBufferBuilder builder;
JsonArraySerializer array { builder };
for (auto& region : process.regions()) {
if (!region.is_user_accessible() && !Process::current->is_superuser())
if (!region.is_user_accessible() && !Process::current()->is_superuser())
continue;
auto region_object = array.add_object();
region_object.add("readable", region.is_readable());
@ -439,7 +439,7 @@ Optional<KBuffer> procfs$profile(InodeIdentifier)
object.add("executable", Profiling::executable_path());
auto array = object.add_array("events");
bool mask_kernel_addresses = !Process::current->is_superuser();
bool mask_kernel_addresses = !Process::current()->is_superuser();
Profiling::for_each_sample([&](auto& sample) {
auto object = array.add_object();
object.add("type", "sample");
@ -677,7 +677,7 @@ Optional<KBuffer> procfs$pid_root(InodeIdentifier identifier)
Optional<KBuffer> procfs$self(InodeIdentifier)
{
char buffer[16];
sprintf(buffer, "%u", Process::current->pid());
sprintf(buffer, "%u", Process::current()->pid());
return KBuffer::copy((const u8*)buffer, strlen(buffer));
}
@ -807,7 +807,7 @@ Optional<KBuffer> procfs$memstat(InodeIdentifier)
Optional<KBuffer> procfs$all(InodeIdentifier)
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_scheduler_lock);
auto processes = Process::all_processes();
KBufferBuilder builder;
JsonArraySerializer array { builder };

View file

@ -220,7 +220,8 @@ KResult VFS::utime(StringView path, Custody& base, time_t atime, time_t mtime)
return custody_or_error.error();
auto& custody = *custody_or_error.value();
auto& inode = custody.inode();
if (!Process::current->is_superuser() && inode.metadata().uid != Process::current->euid())
auto current_process = Process::current();
if (!current_process->is_superuser() && inode.metadata().uid != current_process->euid())
return KResult(-EACCES);
if (custody.is_readonly())
return KResult(-EROFS);
@ -272,18 +273,19 @@ KResultOr<NonnullRefPtr<FileDescription>> VFS::open(StringView path, int options
bool should_truncate_file = false;
if ((options & O_RDONLY) && !metadata.may_read(*Process::current))
auto current_process = Process::current();
if ((options & O_RDONLY) && !metadata.may_read(*current_process))
return KResult(-EACCES);
if (options & O_WRONLY) {
if (!metadata.may_write(*Process::current))
if (!metadata.may_write(*current_process))
return KResult(-EACCES);
if (metadata.is_directory())
return KResult(-EISDIR);
should_truncate_file = options & O_TRUNC;
}
if (options & O_EXEC) {
if (!metadata.may_execute(*Process::current) || (custody.mount_flags() & MS_NOEXEC))
if (!metadata.may_execute(*current_process) || (custody.mount_flags() & MS_NOEXEC))
return KResult(-EACCES);
}
@ -333,14 +335,15 @@ KResult VFS::mknod(StringView path, mode_t mode, dev_t dev, Custody& base)
if (existing_file_or_error.error() != -ENOENT)
return existing_file_or_error.error();
auto& parent_inode = parent_custody->inode();
if (!parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (parent_custody->is_readonly())
return KResult(-EROFS);
LexicalPath p(path);
dbg() << "VFS::mknod: '" << p.basename() << "' mode=" << mode << " dev=" << dev << " in " << parent_inode.identifier();
return parent_inode.create_child(p.basename(), mode, dev, Process::current->uid(), Process::current->gid()).result();
return parent_inode.create_child(p.basename(), mode, dev, current_process->uid(), current_process->gid()).result();
}
KResultOr<NonnullRefPtr<FileDescription>> VFS::create(StringView path, int options, mode_t mode, Custody& parent_custody, Optional<UidAndGid> owner)
@ -355,7 +358,8 @@ KResultOr<NonnullRefPtr<FileDescription>> VFS::create(StringView path, int optio
}
auto& parent_inode = parent_custody.inode();
if (!parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (parent_custody.is_readonly())
return KResult(-EROFS);
@ -364,8 +368,8 @@ KResultOr<NonnullRefPtr<FileDescription>> VFS::create(StringView path, int optio
#ifdef VFS_DEBUG
dbg() << "VFS::create: '" << p.basename() << "' in " << parent_inode.identifier();
#endif
uid_t uid = owner.has_value() ? owner.value().uid : Process::current->uid();
gid_t gid = owner.has_value() ? owner.value().gid : Process::current->gid();
uid_t uid = owner.has_value() ? owner.value().uid : current_process->uid();
gid_t gid = owner.has_value() ? owner.value().gid : current_process->gid();
auto inode_or_error = parent_inode.create_child(p.basename(), mode, 0, uid, gid);
if (inode_or_error.is_error())
return inode_or_error.error();
@ -396,7 +400,8 @@ KResult VFS::mkdir(StringView path, mode_t mode, Custody& base)
return result.error();
auto& parent_inode = parent_custody->inode();
if (!parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (parent_custody->is_readonly())
return KResult(-EROFS);
@ -405,7 +410,7 @@ KResult VFS::mkdir(StringView path, mode_t mode, Custody& base)
#ifdef VFS_DEBUG
dbg() << "VFS::mkdir: '" << p.basename() << "' in " << parent_inode.identifier();
#endif
return parent_inode.create_child(p.basename(), S_IFDIR | mode, 0, Process::current->uid(), Process::current->gid()).result();
return parent_inode.create_child(p.basename(), S_IFDIR | mode, 0, current_process->uid(), current_process->gid()).result();
}
KResult VFS::access(StringView path, int mode, Custody& base)
@ -416,18 +421,19 @@ KResult VFS::access(StringView path, int mode, Custody& base)
auto& custody = *custody_or_error.value();
auto& inode = custody.inode();
auto metadata = inode.metadata();
auto current_process = Process::current();
if (mode & R_OK) {
if (!metadata.may_read(*Process::current))
if (!metadata.may_read(*current_process))
return KResult(-EACCES);
}
if (mode & W_OK) {
if (!metadata.may_write(*Process::current))
if (!metadata.may_write(*current_process))
return KResult(-EACCES);
if (custody.is_readonly())
return KResult(-EROFS);
}
if (mode & X_OK) {
if (!metadata.may_execute(*Process::current))
if (!metadata.may_execute(*current_process))
return KResult(-EACCES);
}
return KSuccess;
@ -442,7 +448,7 @@ KResultOr<NonnullRefPtr<Custody>> VFS::open_directory(StringView path, Custody&
auto& inode = custody.inode();
if (!inode.is_directory())
return KResult(-ENOTDIR);
if (!inode.metadata().may_execute(*Process::current))
if (!inode.metadata().may_execute(*Process::current()))
return KResult(-EACCES);
return custody;
}
@ -451,7 +457,8 @@ KResult VFS::chmod(Custody& custody, mode_t mode)
{
auto& inode = custody.inode();
if (Process::current->euid() != inode.metadata().uid && !Process::current->is_superuser())
auto current_process = Process::current();
if (current_process->euid() != inode.metadata().uid && !current_process->is_superuser())
return KResult(-EPERM);
if (custody.is_readonly())
return KResult(-EROFS);
@ -492,14 +499,15 @@ KResult VFS::rename(StringView old_path, StringView new_path, Custody& base)
if (&old_parent_inode.fs() != &new_parent_inode.fs())
return KResult(-EXDEV);
if (!new_parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!new_parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (!old_parent_inode.metadata().may_write(*Process::current))
if (!old_parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (old_parent_inode.metadata().is_sticky()) {
if (!Process::current->is_superuser() && old_inode.metadata().uid != Process::current->euid())
if (!current_process->is_superuser() && old_inode.metadata().uid != current_process->euid())
return KResult(-EACCES);
}
@ -515,7 +523,7 @@ KResult VFS::rename(StringView old_path, StringView new_path, Custody& base)
if (&new_inode == &old_inode)
return KSuccess;
if (new_parent_inode.metadata().is_sticky()) {
if (!Process::current->is_superuser() && new_inode.metadata().uid != Process::current->euid())
if (!current_process->is_superuser() && new_inode.metadata().uid != current_process->euid())
return KResult(-EACCES);
}
if (new_inode.is_directory() && !old_inode.is_directory())
@ -541,19 +549,20 @@ KResult VFS::chown(Custody& custody, uid_t a_uid, gid_t a_gid)
auto& inode = custody.inode();
auto metadata = inode.metadata();
if (Process::current->euid() != metadata.uid && !Process::current->is_superuser())
auto current_process = Process::current();
if (current_process->euid() != metadata.uid && !current_process->is_superuser())
return KResult(-EPERM);
uid_t new_uid = metadata.uid;
gid_t new_gid = metadata.gid;
if (a_uid != (uid_t)-1) {
if (Process::current->euid() != a_uid && !Process::current->is_superuser())
if (current_process->euid() != a_uid && !current_process->is_superuser())
return KResult(-EPERM);
new_uid = a_uid;
}
if (a_gid != (gid_t)-1) {
if (!Process::current->in_group(a_gid) && !Process::current->is_superuser())
if (!current_process->in_group(a_gid) && !current_process->is_superuser())
return KResult(-EPERM);
new_gid = a_gid;
}
@ -603,7 +612,7 @@ KResult VFS::link(StringView old_path, StringView new_path, Custody& base)
if (parent_inode.fsid() != old_inode.fsid())
return KResult(-EXDEV);
if (!parent_inode.metadata().may_write(*Process::current))
if (!parent_inode.metadata().may_write(*Process::current()))
return KResult(-EACCES);
if (old_inode.is_directory())
@ -633,11 +642,12 @@ KResult VFS::unlink(StringView path, Custody& base)
ASSERT(parent_custody);
auto& parent_inode = parent_custody->inode();
if (!parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (parent_inode.metadata().is_sticky()) {
if (!Process::current->is_superuser() && inode.metadata().uid != Process::current->euid())
if (!current_process->is_superuser() && inode.metadata().uid != current_process->euid())
return KResult(-EACCES);
}
@ -662,14 +672,15 @@ KResult VFS::symlink(StringView target, StringView linkpath, Custody& base)
if (existing_custody_or_error.error() != -ENOENT)
return existing_custody_or_error.error();
auto& parent_inode = parent_custody->inode();
if (!parent_inode.metadata().may_write(*Process::current))
auto current_process = Process::current();
if (!parent_inode.metadata().may_write(*current_process))
return KResult(-EACCES);
if (parent_custody->is_readonly())
return KResult(-EROFS);
LexicalPath p(linkpath);
dbg() << "VFS::symlink: '" << p.basename() << "' (-> '" << target << "') in " << parent_inode.identifier();
auto inode_or_error = parent_inode.create_child(p.basename(), S_IFLNK | 0644, 0, Process::current->uid(), Process::current->gid());
auto inode_or_error = parent_inode.create_child(p.basename(), S_IFLNK | 0644, 0, current_process->uid(), current_process->gid());
if (inode_or_error.is_error())
return inode_or_error.error();
auto& inode = inode_or_error.value();
@ -700,7 +711,7 @@ KResult VFS::rmdir(StringView path, Custody& base)
auto& parent_inode = parent_custody->inode();
if (!parent_inode.metadata().may_write(*Process::current))
if (!parent_inode.metadata().may_write(*Process::current()))
return KResult(-EACCES);
if (inode.directory_entry_count() != 2)
@ -778,7 +789,7 @@ Custody& VFS::root_custody()
const UnveiledPath* VFS::find_matching_unveiled_path(StringView path)
{
for (auto& unveiled_path : Process::current->unveiled_paths()) {
for (auto& unveiled_path : Process::current()->unveiled_paths()) {
if (path == unveiled_path.path)
return &unveiled_path;
if (path.starts_with(unveiled_path.path) && path.length() > unveiled_path.path.length() && path[unveiled_path.path.length()] == '/')
@ -789,7 +800,7 @@ const UnveiledPath* VFS::find_matching_unveiled_path(StringView path)
KResult VFS::validate_path_against_process_veil(StringView path, int options)
{
if (Process::current->veil_state() == VeilState::None)
if (Process::current()->veil_state() == VeilState::None)
return KSuccess;
// FIXME: Figure out a nicer way to do this.
@ -865,7 +876,8 @@ KResultOr<NonnullRefPtr<Custody>> VFS::resolve_path_without_veil(StringView path
return KResult(-EINVAL);
auto parts = path.split_view('/', true);
auto& current_root = Process::current->root_directory();
auto current_process = Process::current();
auto& current_root = current_process->root_directory();
NonnullRefPtr<Custody> custody = path[0] == '/' ? current_root : base;
@ -875,7 +887,7 @@ KResultOr<NonnullRefPtr<Custody>> VFS::resolve_path_without_veil(StringView path
if (!parent_metadata.is_directory())
return KResult(-ENOTDIR);
// Ensure the current user is allowed to resolve paths inside this directory.
if (!parent_metadata.may_execute(*Process::current))
if (!parent_metadata.may_execute(*current_process))
return KResult(-EACCES);
auto& part = parts[i];

View file

@ -130,8 +130,9 @@ NEVER_INLINE void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksyms)
}
OwnPtr<Process::ELFBundle> elf_bundle;
if (Process::current)
elf_bundle = Process::current->elf_bundle();
auto current_process = Process::current();
if (current_process)
elf_bundle = current_process->elf_bundle();
struct RecognizedSymbol {
FlatPtr address;
@ -142,13 +143,13 @@ NEVER_INLINE void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksyms)
size_t recognized_symbol_count = 0;
if (use_ksyms) {
for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer;
(Process::current ? Process::current->validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2) : 1) && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)*stack_ptr) {
(current_process ? current_process->validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2) : 1) && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)*stack_ptr) {
FlatPtr retaddr = stack_ptr[1];
recognized_symbols[recognized_symbol_count++] = { retaddr, symbolicate_kernel_address(retaddr) };
}
} else {
for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer;
(Process::current ? Process::current->validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2) : 1); stack_ptr = (FlatPtr*)*stack_ptr) {
(current_process ? current_process->validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2) : 1); stack_ptr = (FlatPtr*)*stack_ptr) {
FlatPtr retaddr = stack_ptr[1];
dbg() << String::format("%x", retaddr) << " (next: " << String::format("%x", (stack_ptr ? (u32*)*stack_ptr : 0)) << ")";
}

View file

@ -49,29 +49,31 @@ void Lock::lock(Mode mode)
dump_backtrace();
hang();
}
auto current_thread = Thread::current();
for (;;) {
bool expected = false;
if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
// FIXME: Do not add new readers if writers are queued.
bool modes_dont_conflict = !modes_conflict(m_mode, mode);
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == Thread::current;
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
if (modes_dont_conflict || already_hold_exclusive_lock) {
// We got the lock!
if (!already_hold_exclusive_lock)
m_mode = mode;
m_holder = Thread::current;
m_holder = current_thread;
m_times_locked++;
m_lock.store(false, AK::memory_order_release);
return;
}
timeval* timeout = nullptr;
Thread::current->wait_on(m_queue, timeout, &m_lock, m_holder, m_name);
current_thread->wait_on(m_queue, timeout, &m_lock, m_holder, m_name);
}
}
}
void Lock::unlock()
{
auto current_thread = Thread::current();
for (;;) {
bool expected = false;
if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
@ -80,8 +82,8 @@ void Lock::unlock()
ASSERT(m_mode != Mode::Unlocked);
if (m_mode == Mode::Exclusive)
ASSERT(m_holder == Thread::current);
if (m_holder == Thread::current && (m_mode == Mode::Shared || m_times_locked == 0))
ASSERT(m_holder == current_thread);
if (m_holder == current_thread && (m_mode == Mode::Shared || m_times_locked == 0))
m_holder = nullptr;
if (m_times_locked > 0) {
@ -101,7 +103,7 @@ bool Lock::force_unlock_if_locked()
{
ASSERT(m_mode != Mode::Shared);
InterruptDisabler disabler;
if (m_holder != Thread::current)
if (m_holder != Thread::current())
return false;
ASSERT(m_times_locked == 1);
m_holder = nullptr;

View file

@ -416,7 +416,7 @@ void E1000NetworkAdapter::send_raw(const u8* data, size_t length)
sti();
break;
}
Thread::current->wait_on(m_wait_queue);
Thread::current()->wait_on(m_wait_queue);
}
#ifdef E1000_DEBUG
klog() << "E1000: Sent packet, status is now " << String::format("%b", descriptor.status) << "!";

View file

@ -111,9 +111,9 @@ KResult IPv4Socket::bind(const sockaddr* user_address, socklen_t address_size)
return KResult(-EINVAL);
auto requested_local_port = ntohs(address.sin_port);
if (!Process::current->is_superuser()) {
if (!Process::current()->is_superuser()) {
if (requested_local_port < 1024) {
dbg() << "UID " << Process::current->uid() << " attempted to bind " << class_name() << " to port " << requested_local_port;
dbg() << "UID " << Process::current()->uid() << " attempted to bind " << class_name() << " to port " << requested_local_port;
return KResult(-EACCES);
}
}
@ -232,7 +232,7 @@ ssize_t IPv4Socket::sendto(FileDescription&, const void* data, size_t data_lengt
int nsent = protocol_send(data, data_length);
if (nsent > 0)
Thread::current->did_ipv4_socket_write(nsent);
Thread::current()->did_ipv4_socket_write(nsent);
return nsent;
}
@ -246,7 +246,7 @@ ssize_t IPv4Socket::receive_byte_buffered(FileDescription& description, void* bu
return -EAGAIN;
locker.unlock();
auto res = Thread::current->block<Thread::ReadBlocker>(description);
auto res = Thread::current()->block<Thread::ReadBlocker>(description);
locker.lock();
if (!m_can_read) {
@ -261,7 +261,7 @@ ssize_t IPv4Socket::receive_byte_buffered(FileDescription& description, void* bu
ASSERT(!m_receive_buffer.is_empty());
int nreceived = m_receive_buffer.read((u8*)buffer, buffer_length);
if (nreceived > 0)
Thread::current->did_ipv4_socket_read((size_t)nreceived);
Thread::current()->did_ipv4_socket_read((size_t)nreceived);
m_can_read = !m_receive_buffer.is_empty();
return nreceived;
@ -296,7 +296,7 @@ ssize_t IPv4Socket::receive_packet_buffered(FileDescription& description, void*
}
locker.unlock();
auto res = Thread::current->block<Thread::ReadBlocker>(description);
auto res = Thread::current()->block<Thread::ReadBlocker>(description);
locker.lock();
if (!m_can_read) {
@ -354,7 +354,7 @@ ssize_t IPv4Socket::recvfrom(FileDescription& description, void* buffer, size_t
nreceived = receive_packet_buffered(description, buffer, buffer_length, flags, addr, addr_length);
if (nreceived > 0)
Thread::current->did_ipv4_socket_read(nreceived);
Thread::current()->did_ipv4_socket_read(nreceived);
return nreceived;
}
@ -468,7 +468,7 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
auto ioctl_route = [request, arg]() {
auto* route = (rtentry*)arg;
if (!Process::current->validate_read_typed(route))
if (!Process::current()->validate_read_typed(route))
return -EFAULT;
char namebuf[IFNAMSIZ + 1];
@ -481,7 +481,7 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
switch (request) {
case SIOCADDRT:
if (!Process::current->is_superuser())
if (!Process::current()->is_superuser())
return -EPERM;
if (route->rt_gateway.sa_family != AF_INET)
return -EAFNOSUPPORT;
@ -500,7 +500,7 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
auto ioctl_interface = [request, arg]() {
auto* ifr = (ifreq*)arg;
if (!Process::current->validate_read_typed(ifr))
if (!Process::current()->validate_read_typed(ifr))
return -EFAULT;
char namebuf[IFNAMSIZ + 1];
@ -513,7 +513,7 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
switch (request) {
case SIOCSIFADDR:
if (!Process::current->is_superuser())
if (!Process::current()->is_superuser())
return -EPERM;
if (ifr->ifr_addr.sa_family != AF_INET)
return -EAFNOSUPPORT;
@ -521,7 +521,7 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
return 0;
case SIOCSIFNETMASK:
if (!Process::current->is_superuser())
if (!Process::current()->is_superuser())
return -EPERM;
if (ifr->ifr_addr.sa_family != AF_INET)
return -EAFNOSUPPORT;
@ -529,14 +529,14 @@ int IPv4Socket::ioctl(FileDescription&, unsigned request, FlatPtr arg)
return 0;
case SIOCGIFADDR:
if (!Process::current->validate_write_typed(ifr))
if (!Process::current()->validate_write_typed(ifr))
return -EFAULT;
ifr->ifr_addr.sa_family = AF_INET;
((sockaddr_in&)ifr->ifr_addr).sin_addr.s_addr = adapter->ipv4_address().to_u32();
return 0;
case SIOCGIFHWADDR:
if (!Process::current->validate_write_typed(ifr))
if (!Process::current()->validate_write_typed(ifr))
return -EFAULT;
ifr->ifr_hwaddr.sa_family = AF_INET;
{

View file

@ -63,8 +63,9 @@ LocalSocket::LocalSocket(int type)
LOCKER(all_sockets().lock());
all_sockets().resource().append(this);
m_prebind_uid = Process::current->uid();
m_prebind_gid = Process::current->gid();
auto current_process = Process::current();
m_prebind_uid = current_process->uid();
m_prebind_gid = current_process->gid();
m_prebind_mode = 0666;
#ifdef DEBUG_LOCAL_SOCKET
@ -110,7 +111,7 @@ KResult LocalSocket::bind(const sockaddr* user_address, socklen_t address_size)
mode_t mode = S_IFSOCK | (m_prebind_mode & 04777);
UidAndGid owner { m_prebind_uid, m_prebind_gid };
auto result = VFS::the().open(path, O_CREAT | O_EXCL | O_NOFOLLOW_NOERROR, mode, Process::current->current_directory(), owner);
auto result = VFS::the().open(path, O_CREAT | O_EXCL | O_NOFOLLOW_NOERROR, mode, Process::current()->current_directory(), owner);
if (result.is_error()) {
if (result.error() == -EEXIST)
return KResult(-EADDRINUSE);
@ -148,7 +149,7 @@ KResult LocalSocket::connect(FileDescription& description, const sockaddr* addre
dbg() << "LocalSocket{" << this << "} connect(" << safe_address << ")";
#endif
auto description_or_error = VFS::the().open(safe_address, O_RDWR, 0, Process::current->current_directory());
auto description_or_error = VFS::the().open(safe_address, O_RDWR, 0, Process::current()->current_directory());
if (description_or_error.is_error())
return KResult(-ECONNREFUSED);
@ -175,7 +176,7 @@ KResult LocalSocket::connect(FileDescription& description, const sockaddr* addre
return KSuccess;
}
if (Thread::current->block<Thread::ConnectBlocker>(description) != Thread::BlockResult::WokeNormally) {
if (Thread::current()->block<Thread::ConnectBlocker>(description) != Thread::BlockResult::WokeNormally) {
m_connect_side_role = Role::None;
return KResult(-EINTR);
}
@ -265,7 +266,7 @@ ssize_t LocalSocket::sendto(FileDescription& description, const void* data, size
return -EPIPE;
ssize_t nwritten = send_buffer_for(description).write((const u8*)data, data_size);
if (nwritten > 0)
Thread::current->did_unix_socket_write(nwritten);
Thread::current()->did_unix_socket_write(nwritten);
return nwritten;
}
@ -299,7 +300,7 @@ ssize_t LocalSocket::recvfrom(FileDescription& description, void* buffer, size_t
return -EAGAIN;
}
} else if (!can_read(description, 0)) {
auto result = Thread::current->block<Thread::ReadBlocker>(description);
auto result = Thread::current()->block<Thread::ReadBlocker>(description);
if (result != Thread::BlockResult::WokeNormally)
return -EINTR;
}
@ -308,7 +309,7 @@ ssize_t LocalSocket::recvfrom(FileDescription& description, void* buffer, size_t
ASSERT(!buffer_for_me.is_empty());
int nread = buffer_for_me.read((u8*)buffer, buffer_size);
if (nread > 0)
Thread::current->did_unix_socket_read(nread);
Thread::current()->did_unix_socket_read(nread);
return nread;
}
@ -389,7 +390,8 @@ KResult LocalSocket::chown(FileDescription&, uid_t uid, gid_t gid)
if (m_file)
return m_file->chown(uid, gid);
if (!Process::current->is_superuser() && (Process::current->euid() != uid || !Process::current->in_group(gid)))
auto current_process = Process::current();
if (!current_process->is_superuser() && (current_process->euid() != uid || !current_process->in_group(gid)))
return KResult(-EPERM);
m_prebind_uid = uid;

View file

@ -113,7 +113,7 @@ void NetworkTask_main()
for (;;) {
size_t packet_size = dequeue_packet(buffer, buffer_size);
if (!packet_size) {
Thread::current->wait_on(packet_wait_queue);
Thread::current()->wait_on(packet_wait_queue);
continue;
}
if (packet_size < sizeof(EthernetFrameHeader)) {

View file

@ -135,7 +135,7 @@ RoutingDecision route_to(const IPv4Address& target, const IPv4Address& source, c
request.set_sender_protocol_address(adapter->ipv4_address());
adapter->send({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, request);
(void)Thread::current->block_until("Routing (ARP)", [next_hop_ip] {
(void)Thread::current()->block_until("Routing (ARP)", [next_hop_ip] {
return arp_table().resource().get(next_hop_ip).has_value();
});

View file

@ -55,7 +55,7 @@ Socket::Socket(int domain, int type, int protocol)
, m_type(type)
, m_protocol(protocol)
{
auto& process = *Process::current;
auto& process = *Process::current();
m_origin = { process.pid(), process.uid(), process.gid() };
}
@ -82,7 +82,7 @@ RefPtr<Socket> Socket::accept()
#endif
auto client = m_pending.take_first();
ASSERT(!client->is_connected());
auto& process = *Process::current;
auto& process = *Process::current();
client->m_acceptor = { process.pid(), process.uid(), process.gid() };
client->m_connected = true;
client->m_role = Role::Accepted;

View file

@ -372,7 +372,7 @@ KResult TCPSocket::protocol_connect(FileDescription& description, ShouldBlock sh
m_direction = Direction::Outgoing;
if (should_block == ShouldBlock::Yes) {
if (Thread::current->block<Thread::ConnectBlocker>(description) != Thread::BlockResult::WokeNormally)
if (Thread::current()->block<Thread::ConnectBlocker>(description) != Thread::BlockResult::WokeNormally)
return KResult(-EINTR);
ASSERT(setup_state() == SetupState::Completed);
if (has_error()) {

View file

@ -67,11 +67,12 @@ KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2)
asm volatile("movl %%ebp, %%eax"
: "=a"(ebp));
FlatPtr eip;
copy_from_user(&eip, (FlatPtr*)&Thread::current->get_register_dump_from_stack().eip);
auto current_thread = Thread::current();
copy_from_user(&eip, (FlatPtr*)&current_thread->get_register_dump_from_stack().eip);
Vector<FlatPtr> backtrace;
{
SmapDisabler disabler;
backtrace = Thread::current->raw_backtrace(ebp, eip);
backtrace = current_thread->raw_backtrace(ebp, eip);
}
event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));

View file

@ -84,10 +84,10 @@
#include <LibELF/Validation.h>
#include <LibKeyboard/CharacterMapData.h>
//#define PROCESS_DEBUG
#define PROCESS_DEBUG
//#define DEBUG_POLL_SELECT
//#define DEBUG_IO
//#define TASK_DEBUG
#define TASK_DEBUG
//#define FORK_DEBUG
//#define EXEC_DEBUG
//#define SIGNAL_DEBUG
@ -97,9 +97,8 @@ namespace Kernel {
static void create_signal_trampolines();
Process* Process::current;
static pid_t next_pid;
RecursiveSpinLock g_processes_lock;
static Atomic<pid_t> next_pid;
InlineLinkedList<Process>* g_processes;
static String* s_hostname;
static Lock* s_hostname_lock;
@ -108,15 +107,14 @@ HashMap<String, OwnPtr<Module>>* g_modules;
pid_t Process::allocate_pid()
{
InterruptDisabler disabler;
return next_pid++;
return next_pid.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
}
void Process::initialize()
{
g_modules = new HashMap<String, OwnPtr<Module>>;
next_pid = 0;
next_pid.store(0, AK::MemoryOrder::memory_order_release);
g_processes = new InlineLinkedList<Process>;
s_hostname = new String("courage");
s_hostname_lock = new Lock;
@ -127,7 +125,7 @@ void Process::initialize()
Vector<pid_t> Process::all_pids()
{
Vector<pid_t> pids;
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
pids.ensure_capacity((int)g_processes->size_slow());
for (auto& process : *g_processes)
pids.append(process.pid());
@ -137,7 +135,7 @@ Vector<pid_t> Process::all_pids()
Vector<Process*> Process::all_processes()
{
Vector<Process*> processes;
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
processes.ensure_capacity((int)g_processes->size_slow());
for (auto& process : *g_processes)
processes.append(&process);
@ -234,7 +232,7 @@ Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size
bool Process::deallocate_region(Region& region)
{
InterruptDisabler disabler;
ScopedSpinLock lock(m_lock);
if (m_region_lookup_cache.region == &region)
m_region_lookup_cache.region = nullptr;
for (size_t i = 0; i < m_regions.size(); ++i) {
@ -248,6 +246,7 @@ bool Process::deallocate_region(Region& region)
Region* Process::region_from_range(const Range& range)
{
ScopedSpinLock lock(m_lock);
if (m_region_lookup_cache.range == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region;
@ -264,6 +263,7 @@ Region* Process::region_from_range(const Range& range)
Region* Process::region_containing(const Range& range)
{
ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
if (region.contains(range))
return &region;
@ -738,17 +738,6 @@ pid_t Process::sys$fork(RegisterState& regs)
dbg() << "fork: child=" << child;
#endif
for (auto& region : m_regions) {
#ifdef FORK_DEBUG
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
auto& child_region = child->add_region(region.clone());
child_region.map(child->page_directory());
if (&region == m_master_tls_region)
child->m_master_tls_region = child_region.make_weak_ptr();
}
child->m_extra_gids = m_extra_gids;
auto& child_tss = child_first_thread->m_tss;
@ -773,8 +762,20 @@ pid_t Process::sys$fork(RegisterState& regs)
dbg() << "fork: child will begin executing at " << String::format("%w", child_tss.cs) << ":" << String::format("%x", child_tss.eip) << " with stack " << String::format("%w", child_tss.ss) << ":" << String::format("%x", child_tss.esp) << ", kstack " << String::format("%w", child_tss.ss0) << ":" << String::format("%x", child_tss.esp0);
#endif
ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
#ifdef FORK_DEBUG
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
auto& child_region = child->add_region(region.clone());
child_region.map(child->page_directory());
if (&region == m_master_tls_region)
child->m_master_tls_region = child_region.make_weak_ptr();
}
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
g_processes->prepend(child);
}
#ifdef TASK_DEBUG
@ -789,11 +790,12 @@ void Process::kill_threads_except_self()
{
InterruptDisabler disabler;
if (m_thread_count <= 1)
if (thread_count() <= 1)
return;
auto current_thread = Thread::current();
for_each_thread([&](Thread& thread) {
if (&thread == Thread::current
if (&thread == current_thread
|| thread.state() == Thread::State::Dead
|| thread.state() == Thread::State::Dying)
return IterationDecision::Continue;
@ -857,7 +859,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
// Mark this thread as the current thread that does exec
// No other thread from this process will be scheduled to run
m_exec_tid = Thread::current->tid();
auto current_thread = Thread::current();
m_exec_tid = current_thread->tid();
RefPtr<PageDirectory> old_page_directory;
NonnullOwnPtrVector<Region> old_regions;
@ -905,7 +908,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
RefPtr<ELF::Loader> loader;
{
ArmedScopeGuard rollback_regions_guard([&]() {
ASSERT(Process::current == this);
ASSERT(Process::current() == this);
// Need to make sure we don't swap contexts in the middle
InterruptDisabler disabler;
m_page_directory = move(old_page_directory);
@ -1005,9 +1008,9 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
m_egid = m_sgid = main_program_metadata.gid;
}
Thread::current->set_default_signal_dispositions();
Thread::current->m_signal_mask = 0;
Thread::current->m_pending_signals = 0;
current_thread->set_default_signal_dispositions();
current_thread->m_signal_mask = 0;
current_thread->m_pending_signals = 0;
m_futex_queues.clear();
@ -1024,8 +1027,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
}
Thread* new_main_thread = nullptr;
if (Process::current == this) {
new_main_thread = Thread::current;
if (&current_thread->process() == this) {
new_main_thread = current_thread;
} else {
for_each_thread([&](auto& thread) {
new_main_thread = &thread;
@ -1041,7 +1044,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
// We cli() manually here because we don't want to get interrupted between do_exec() and Processor::assume_context().
// The reason is that the task redirection we've set up above will be clobbered by the timer IRQ.
// If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec().
if (Process::current == this)
if (&current_thread->process() == this)
cli();
// NOTE: Be careful to not trigger any page faults below!
@ -1253,16 +1256,17 @@ int Process::exec(String path, Vector<String> arguments, Vector<String> environm
if (rc < 0)
return rc;
auto current_thread = Thread::current();
if (m_wait_for_tracer_at_next_execve) {
ASSERT(Thread::current->state() == Thread::State::Skip1SchedulerPass);
ASSERT(current_thread->state() == Thread::State::Skip1SchedulerPass);
// State::Skip1SchedulerPass is irrelevant since we block the thread
Thread::current->set_state(Thread::State::Running);
Thread::current->send_urgent_signal_to_self(SIGSTOP);
current_thread->set_state(Thread::State::Running);
current_thread->send_urgent_signal_to_self(SIGSTOP);
}
if (Process::current == this) {
Thread::current->set_state(Thread::State::Running);
Processor::assume_context(*Thread::current);
if (&current_thread->process() == this) {
current_thread->set_state(Thread::State::Running);
Processor::assume_context(*current_thread);
ASSERT_NOT_REACHED();
}
return 0;
@ -1282,7 +1286,7 @@ int Process::sys$execve(const Syscall::SC_execve_params* user_params)
return -E2BIG;
if (m_wait_for_tracer_at_next_execve)
Thread::current->send_urgent_signal_to_self(SIGSTOP);
Thread::current()->send_urgent_signal_to_self(SIGSTOP);
String path;
{
@ -1331,7 +1335,7 @@ Process* Process::create_user_process(Thread*& first_thread, const String& path,
RefPtr<Custody> cwd;
RefPtr<Custody> root;
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
if (auto* parent = Process::from_pid(parent_pid)) {
cwd = parent->m_cwd;
root = parent->m_root_directory;
@ -1361,7 +1365,7 @@ Process* Process::create_user_process(Thread*& first_thread, const String& path,
}
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
g_processes->prepend(process);
}
#ifdef TASK_DEBUG
@ -1371,19 +1375,20 @@ Process* Process::create_user_process(Thread*& first_thread, const String& path,
return process;
}
Process* Process::create_kernel_process(Thread*& first_thread, String&& name, void (*e)())
Process* Process::create_kernel_process(Thread*& first_thread, String&& name, void (*e)(), u32 affinity)
{
auto* process = new Process(first_thread, move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0);
first_thread->tss().eip = (FlatPtr)e;
if (process->pid() != 0) {
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
g_processes->prepend(process);
#ifdef TASK_DEBUG
klog() << "Kernel process " << process->pid() << " (" << process->name().characters() << ") spawned @ " << String::format("%p", first_thread->tss().eip);
#endif
}
first_thread->set_affinity(affinity);
first_thread->set_state(Thread::State::Runnable);
return process;
}
@ -1414,7 +1419,7 @@ Process::Process(Thread*& first_thread, const String& name, uid_t uid, gid_t gid
if (fork_parent) {
// NOTE: fork() doesn't clone all threads; the thread that called fork() becomes the only thread in the new process.
first_thread = Thread::current->clone(*this);
first_thread = Thread::current()->clone(*this);
} else {
// NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.)
first_thread = new Thread(*this);
@ -1449,7 +1454,7 @@ void Process::sys$exit(int status)
m_termination_status = status;
m_termination_signal = 0;
die();
Thread::current->die_if_needed();
Thread::current()->die_if_needed();
ASSERT_NOT_REACHED();
}
@ -1513,7 +1518,7 @@ int Process::sys$sigreturn(RegisterState& registers)
//pop the stored eax, ebp, return address, handler and signal code
stack_ptr += 5;
Thread::current->m_signal_mask = *stack_ptr;
Thread::current()->m_signal_mask = *stack_ptr;
stack_ptr++;
//pop edi, esi, ebp, esp, ebx, edx, ecx and eax
@ -1534,7 +1539,7 @@ void Process::crash(int signal, u32 eip, bool out_of_memory)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!is_dead());
ASSERT(Process::current == this);
ASSERT(Process::current() == this);
if (out_of_memory) {
dbg() << "\033[31;1mOut of memory\033[m, killing: " << *this;
@ -1555,13 +1560,14 @@ void Process::crash(int signal, u32 eip, bool out_of_memory)
die();
// We can not return from here, as there is nowhere
// to unwind to, so die right away.
Thread::current->die_if_needed();
Thread::current()->die_if_needed();
ASSERT_NOT_REACHED();
}
Process* Process::from_pid(pid_t pid)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_processes_lock);
for (auto& process : *g_processes) {
if (process.pid() == pid)
return &process;
@ -1713,7 +1719,7 @@ ssize_t Process::do_write(FileDescription& description, const u8* data, int data
#ifdef IO_DEBUG
dbg() << "block write on " << description.absolute_path();
#endif
if (Thread::current->block<Thread::WriteBlocker>(description) != Thread::BlockResult::WokeNormally) {
if (Thread::current()->block<Thread::WriteBlocker>(description) != Thread::BlockResult::WokeNormally) {
if (nwritten == 0)
return -EINTR;
}
@ -1776,7 +1782,7 @@ ssize_t Process::sys$read(int fd, u8* buffer, ssize_t size)
return -EISDIR;
if (description->is_blocking()) {
if (!description->can_read()) {
if (Thread::current->block<Thread::ReadBlocker>(*description) != Thread::BlockResult::WokeNormally)
if (Thread::current()->block<Thread::ReadBlocker>(*description) != Thread::BlockResult::WokeNormally)
return -EINTR;
if (!description->can_read())
return -EAGAIN;
@ -2323,6 +2329,7 @@ KResult Process::do_killall(int signal)
KResult error = KSuccess;
// Send the signal to all processes we have access to for.
ScopedSpinLock lock(g_processes_lock);
for (auto& process : *g_processes) {
KResult res = KSuccess;
if (process.pid() == m_pid)
@ -2346,9 +2353,10 @@ KResult Process::do_killself(int signal)
if (signal == 0)
return KSuccess;
if (!Thread::current->should_ignore_signal(signal)) {
Thread::current->send_signal(signal, this);
(void)Thread::current->block<Thread::SemiPermanentBlocker>(Thread::SemiPermanentBlocker::Reason::Signal);
auto current_thread = Thread::current();
if (!current_thread->should_ignore_signal(signal)) {
current_thread->send_signal(signal, this);
(void)current_thread->block<Thread::SemiPermanentBlocker>(Thread::SemiPermanentBlocker::Reason::Signal);
}
return KSuccess;
@ -2373,7 +2381,7 @@ int Process::sys$kill(pid_t pid, int signal)
if (pid == m_pid) {
return do_killself(signal);
}
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* peer = Process::from_pid(pid);
if (!peer)
return -ESRCH;
@ -2385,7 +2393,7 @@ int Process::sys$usleep(useconds_t usec)
REQUIRE_PROMISE(stdio);
if (!usec)
return 0;
u64 wakeup_time = Thread::current->sleep(usec / 1000);
u64 wakeup_time = Thread::current()->sleep(usec / 1000);
if (wakeup_time > g_uptime)
return -EINTR;
return 0;
@ -2396,7 +2404,7 @@ int Process::sys$sleep(unsigned seconds)
REQUIRE_PROMISE(stdio);
if (!seconds)
return 0;
u64 wakeup_time = Thread::current->sleep(seconds * TimeManagement::the().ticks_per_second());
u64 wakeup_time = Thread::current()->sleep(seconds * TimeManagement::the().ticks_per_second());
if (wakeup_time > g_uptime) {
u32 ticks_left_until_original_wakeup_time = wakeup_time - g_uptime;
return ticks_left_until_original_wakeup_time / TimeManagement::the().ticks_per_second();
@ -2519,23 +2527,22 @@ siginfo_t Process::reap(Process& process)
siginfo.si_code = CLD_EXITED;
}
{
InterruptDisabler disabler;
ASSERT(g_processes_lock.is_locked());
if (process.ppid()) {
auto* parent = Process::from_pid(process.ppid());
if (parent) {
parent->m_ticks_in_user_for_dead_children += process.m_ticks_in_user + process.m_ticks_in_user_for_dead_children;
parent->m_ticks_in_kernel_for_dead_children += process.m_ticks_in_kernel + process.m_ticks_in_kernel_for_dead_children;
}
if (process.ppid()) {
auto* parent = Process::from_pid(process.ppid());
if (parent) {
parent->m_ticks_in_user_for_dead_children += process.m_ticks_in_user + process.m_ticks_in_user_for_dead_children;
parent->m_ticks_in_kernel_for_dead_children += process.m_ticks_in_kernel + process.m_ticks_in_kernel_for_dead_children;
}
}
#ifdef PROCESS_DEBUG
dbg() << "Reaping process " << process;
dbg() << "Reaping process " << process;
#endif
ASSERT(process.is_dead());
g_processes->remove(&process);
}
ASSERT(process.is_dead());
g_processes->remove(&process);
delete &process;
return siginfo;
}
@ -2543,7 +2550,7 @@ siginfo_t Process::reap(Process& process)
KResultOr<siginfo_t> Process::do_waitid(idtype_t idtype, int id, int options)
{
if (idtype == P_PID) {
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
if (idtype == P_PID && !Process::from_pid(id))
return KResult(-ECHILD);
}
@ -2560,10 +2567,10 @@ KResultOr<siginfo_t> Process::do_waitid(idtype_t idtype, int id, int options)
return KResult(-EINVAL);
}
if (Thread::current->block<Thread::WaitBlocker>(options, waitee_pid) != Thread::BlockResult::WokeNormally)
if (Thread::current()->block<Thread::WaitBlocker>(options, waitee_pid) != Thread::BlockResult::WokeNormally)
return KResult(-EINTR);
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
// NOTE: If waitee was -1, m_waitee_pid will have been filled in by the scheduler.
Process* waitee_process = Process::from_pid(waitee_pid);
@ -2660,7 +2667,7 @@ pid_t Process::sys$getsid(pid_t pid)
REQUIRE_PROMISE(proc);
if (pid == 0)
return m_sid;
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* process = Process::from_pid(pid);
if (!process)
return -ESRCH;
@ -2691,7 +2698,7 @@ pid_t Process::sys$getpgid(pid_t pid)
REQUIRE_PROMISE(proc);
if (pid == 0)
return m_pgid;
InterruptDisabler disabler; // FIXME: Use a ProcessHandle
ScopedSpinLock lock(g_processes_lock); // FIXME: Use a ProcessHandle
auto* process = Process::from_pid(pid);
if (!process)
return -ESRCH;
@ -2706,7 +2713,7 @@ pid_t Process::sys$getpgrp()
static pid_t get_sid_from_pgid(pid_t pgid)
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* group_leader = Process::from_pid(pgid);
if (!group_leader)
return -1;
@ -2716,7 +2723,7 @@ static pid_t get_sid_from_pgid(pid_t pgid)
int Process::sys$setpgid(pid_t specified_pid, pid_t specified_pgid)
{
REQUIRE_PROMISE(proc);
InterruptDisabler disabler; // FIXME: Use a ProcessHandle
ScopedSpinLock lock(g_processes_lock); // FIXME: Use a ProcessHandle
pid_t pid = specified_pid ? specified_pid : m_pid;
if (specified_pgid < 0) {
// The value of the pgid argument is less than 0, or is not a value supported by the implementation.
@ -2790,10 +2797,11 @@ int Process::sys$dup2(int old_fd, int new_fd)
int Process::sys$sigprocmask(int how, const sigset_t* set, sigset_t* old_set)
{
REQUIRE_PROMISE(sigaction);
auto current_thread = Thread::current();
if (old_set) {
if (!validate_write_typed(old_set))
return -EFAULT;
copy_to_user(old_set, &Thread::current->m_signal_mask);
copy_to_user(old_set, &current_thread->m_signal_mask);
}
if (set) {
if (!validate_read_typed(set))
@ -2802,13 +2810,13 @@ int Process::sys$sigprocmask(int how, const sigset_t* set, sigset_t* old_set)
copy_from_user(&set_value, set);
switch (how) {
case SIG_BLOCK:
Thread::current->m_signal_mask &= ~set_value;
current_thread->m_signal_mask &= ~set_value;
break;
case SIG_UNBLOCK:
Thread::current->m_signal_mask |= set_value;
current_thread->m_signal_mask |= set_value;
break;
case SIG_SETMASK:
Thread::current->m_signal_mask = set_value;
current_thread->m_signal_mask = set_value;
break;
default:
return -EINVAL;
@ -2822,7 +2830,7 @@ int Process::sys$sigpending(sigset_t* set)
REQUIRE_PROMISE(stdio);
if (!validate_write_typed(set))
return -EFAULT;
copy_to_user(set, &Thread::current->m_pending_signals);
copy_to_user(set, &Thread::current()->m_pending_signals);
return 0;
}
@ -2834,7 +2842,7 @@ int Process::sys$sigaction(int signum, const sigaction* act, sigaction* old_act)
if (!validate_read_typed(act))
return -EFAULT;
InterruptDisabler disabler; // FIXME: This should use a narrower lock. Maybe a way to ignore signals temporarily?
auto& action = Thread::current->m_signal_action_data[signum];
auto& action = Thread::current()->m_signal_action_data[signum];
if (old_act) {
if (!validate_write_typed(old_act))
return -EFAULT;
@ -2988,9 +2996,10 @@ int Process::sys$select(const Syscall::SC_select_params* params)
select_has_timeout = true;
}
ScopedValueRollback scoped_sigmask(Thread::current->m_signal_mask);
auto current_thread = Thread::current();
ScopedValueRollback scoped_sigmask(current_thread->m_signal_mask);
if (sigmask)
Thread::current->m_signal_mask = *sigmask;
current_thread->m_signal_mask = *sigmask;
Thread::SelectBlocker::FDVector rfds;
Thread::SelectBlocker::FDVector wfds;
@ -3023,7 +3032,7 @@ int Process::sys$select(const Syscall::SC_select_params* params)
#endif
if (!timeout || select_has_timeout) {
if (Thread::current->block<Thread::SelectBlocker>(computed_timeout, select_has_timeout, rfds, wfds, efds) != Thread::BlockResult::WokeNormally)
if (current_thread->block<Thread::SelectBlocker>(computed_timeout, select_has_timeout, rfds, wfds, efds) != Thread::BlockResult::WokeNormally)
return -EINTR;
// While we blocked, the process lock was dropped. This gave other threads
// the opportunity to mess with the memory. For example, it could free the
@ -3099,16 +3108,17 @@ int Process::sys$poll(const Syscall::SC_poll_params* params)
has_timeout = true;
}
ScopedValueRollback scoped_sigmask(Thread::current->m_signal_mask);
auto current_thread = Thread::current();
ScopedValueRollback scoped_sigmask(current_thread->m_signal_mask);
if (sigmask)
Thread::current->m_signal_mask = *sigmask;
current_thread->m_signal_mask = *sigmask;
#if defined(DEBUG_IO) || defined(DEBUG_POLL_SELECT)
dbg() << "polling on (read:" << rfds.size() << ", write:" << wfds.size() << "), timeout=" << timeout;
#endif
if (!timeout || has_timeout) {
if (Thread::current->block<Thread::SelectBlocker>(actual_timeout, has_timeout, rfds, wfds, Thread::SelectBlocker::FDVector()) != Thread::BlockResult::WokeNormally)
if (current_thread->block<Thread::SelectBlocker>(actual_timeout, has_timeout, rfds, wfds, Thread::SelectBlocker::FDVector()) != Thread::BlockResult::WokeNormally)
return -EINTR;
}
@ -3245,7 +3255,7 @@ int Process::sys$chown(const Syscall::SC_chown_params* user_params)
void Process::finalize()
{
ASSERT(Thread::current == g_finalizer);
ASSERT(Thread::current() == g_finalizer);
#ifdef PROCESS_DEBUG
dbg() << "Finalizing process " << *this;
#endif
@ -3460,7 +3470,7 @@ int Process::sys$accept(int accepting_socket_fd, sockaddr* user_address, socklen
if (!socket.can_accept()) {
if (accepting_socket_description->is_blocking()) {
if (Thread::current->block<Thread::AcceptBlocker>(*accepting_socket_description) != Thread::BlockResult::WokeNormally)
if (Thread::current()->block<Thread::AcceptBlocker>(*accepting_socket_description) != Thread::BlockResult::WokeNormally)
return -EINTR;
} else {
return -EAGAIN;
@ -3663,7 +3673,7 @@ int Process::sys$sched_setparam(int tid, const struct sched_param* param)
copy_from_user(&desired_priority, &param->sched_priority);
InterruptDisabler disabler;
auto* peer = Thread::current;
auto* peer = Thread::current();
if (tid != 0)
peer = Thread::from_tid(tid);
@ -3687,7 +3697,7 @@ int Process::sys$sched_getparam(pid_t pid, struct sched_param* param)
return -EFAULT;
InterruptDisabler disabler;
auto* peer = Thread::current;
auto* peer = Thread::current();
if (pid != 0)
peer = Thread::from_tid(pid);
@ -3808,7 +3818,7 @@ int Process::sys$shbuf_allow_pid(int shbuf_id, pid_t peer_pid)
if (!shared_buffer.is_shared_with(m_pid))
return -EPERM;
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* peer = Process::from_pid(peer_pid);
if (!peer)
return -ESRCH;
@ -3987,14 +3997,35 @@ int Process::sys$create_thread(void* (*entry)(void*), const Syscall::SC_create_t
return thread->tid();
}
Thread* Process::create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity, bool joinable)
{
ASSERT((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX));
// FIXME: Do something with guard pages?
auto* thread = new Thread(*this);
thread->set_name(name);
thread->set_affinity(affinity);
thread->set_priority(priority);
thread->set_joinable(joinable);
auto& tss = thread->tss();
tss.eip = (FlatPtr)entry;
thread->set_state(Thread::State::Runnable);
return thread;
}
void Process::sys$exit_thread(void* exit_value)
{
REQUIRE_PROMISE(thread);
cli();
Thread::current->m_exit_value = exit_value;
Thread::current->set_should_die();
auto current_thread = Thread::current();
current_thread->m_exit_value = exit_value;
current_thread->set_should_die();
big_lock().force_unlock_if_locked();
Thread::current->die_if_needed();
current_thread->die_if_needed();
ASSERT_NOT_REACHED();
}
@ -4024,13 +4055,14 @@ int Process::sys$join_thread(int tid, void** exit_value)
if (!thread || thread->pid() != pid())
return -ESRCH;
if (thread == Thread::current)
auto current_thread = Thread::current();
if (thread == current_thread)
return -EDEADLK;
if (thread->m_joinee == Thread::current)
if (thread->m_joinee == current_thread)
return -EDEADLK;
ASSERT(thread->m_joiner != Thread::current);
ASSERT(thread->m_joiner != current_thread);
if (thread->m_joiner)
return -EINVAL;
@ -4041,15 +4073,15 @@ int Process::sys$join_thread(int tid, void** exit_value)
// NOTE: pthread_join() cannot be interrupted by signals. Only by death.
for (;;) {
auto result = Thread::current->block<Thread::JoinBlocker>(*thread, joinee_exit_value);
auto result = current_thread->block<Thread::JoinBlocker>(*thread, joinee_exit_value);
if (result == Thread::BlockResult::InterruptedByDeath) {
// NOTE: This cleans things up so that Thread::finalize() won't
// get confused about a missing joiner when finalizing the joinee.
InterruptDisabler disabler_t;
if (Thread::current->m_joinee) {
Thread::current->m_joinee->m_joiner = nullptr;
Thread::current->m_joinee = nullptr;
if (current_thread->m_joinee) {
current_thread->m_joinee->m_joiner = nullptr;
current_thread->m_joinee = nullptr;
}
break;
@ -4107,7 +4139,7 @@ int Process::sys$get_thread_name(int tid, char* buffer, size_t buffer_size)
int Process::sys$gettid()
{
REQUIRE_PROMISE(stdio);
return Thread::current->tid();
return Thread::current()->tid();
}
int Process::sys$donate(int tid)
@ -4528,12 +4560,12 @@ int Process::sys$clock_nanosleep(const Syscall::SC_clock_nanosleep_params* user_
u64 wakeup_time;
if (is_absolute) {
u64 time_to_wake = (requested_sleep.tv_sec * 1000 + requested_sleep.tv_nsec / 1000000);
wakeup_time = Thread::current->sleep_until(time_to_wake);
wakeup_time = Thread::current()->sleep_until(time_to_wake);
} else {
u32 ticks_to_sleep = (requested_sleep.tv_sec * 1000 + requested_sleep.tv_nsec / 1000000);
if (!ticks_to_sleep)
return 0;
wakeup_time = Thread::current->sleep(ticks_to_sleep);
wakeup_time = Thread::current()->sleep(ticks_to_sleep);
}
if (wakeup_time > g_uptime) {
u32 ticks_left = wakeup_time - g_uptime;
@ -4571,14 +4603,14 @@ int Process::sys$sync()
int Process::sys$yield()
{
REQUIRE_PROMISE(stdio);
Thread::current->yield_without_holding_big_lock();
Thread::current()->yield_without_holding_big_lock();
return 0;
}
int Process::sys$beep()
{
PCSpeaker::tone_on(440);
u64 wakeup_time = Thread::current->sleep(100);
u64 wakeup_time = Thread::current()->sleep(100);
PCSpeaker::tone_off();
if (wakeup_time > g_uptime)
return -EINTR;
@ -4738,7 +4770,7 @@ int Process::sys$module_unload(const char* user_name, size_t name_length)
int Process::sys$profiling_enable(pid_t pid)
{
REQUIRE_NO_PROMISES;
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* process = Process::from_pid(pid);
if (!process)
return -ESRCH;
@ -4753,7 +4785,7 @@ int Process::sys$profiling_enable(pid_t pid)
int Process::sys$profiling_disable(pid_t pid)
{
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* process = Process::from_pid(pid);
if (!process)
return -ESRCH;
@ -4811,7 +4843,7 @@ int Process::sys$futex(const Syscall::SC_futex_params* user_params)
}
// FIXME: This is supposed to be interruptible by a signal, but right now WaitQueue cannot be interrupted.
Thread::BlockResult result = Thread::current->wait_on(wait_queue, optional_timeout);
Thread::BlockResult result = Thread::current()->wait_on(wait_queue, optional_timeout);
if (result == Thread::BlockResult::InterruptedByTimeout) {
return -ETIMEDOUT;
}
@ -4854,7 +4886,7 @@ int Process::sys$set_process_boost(pid_t pid, int amount)
REQUIRE_PROMISE(proc);
if (amount < 0 || amount > 20)
return -EINVAL;
InterruptDisabler disabler;
ScopedSpinLock lock(g_processes_lock);
auto* process = Process::from_pid(pid);
if (!process || process->is_dead())
return -ESRCH;
@ -4976,6 +5008,7 @@ int Process::sys$pledge(const Syscall::SC_pledge_params* user_params)
Region& Process::add_region(NonnullOwnPtr<Region> region)
{
auto* ptr = region.ptr();
ScopedSpinLock lock(m_lock);
m_regions.append(move(region));
return *ptr;
}
@ -5091,7 +5124,7 @@ int Process::sys$get_stack_bounds(FlatPtr* user_stack_base, size_t* user_stack_s
if (!validate_write_typed(user_stack_size))
return -EFAULT;
FlatPtr stack_pointer = Thread::current->get_register_dump_from_stack().userspace_esp;
FlatPtr stack_pointer = Thread::current()->get_register_dump_from_stack().userspace_esp;
auto* stack_region = MM.region_from_vaddr(*this, VirtualAddress(stack_pointer));
if (!stack_region) {
ASSERT_NOT_REACHED();

View file

@ -111,15 +111,21 @@ class Process : public InlineLinkedListNode<Process> {
friend class Thread;
public:
static Process* current;
inline static Process* current()
{
auto current_thread = Processor::current().current_thread();
return current_thread ? &current_thread->process() : nullptr;
}
static Process* create_kernel_process(Thread*& first_thread, String&& name, void (*entry)());
static Process* create_kernel_process(Thread*& first_thread, String&& name, void (*entry)(), u32 affinity = THREAD_AFFINITY_DEFAULT);
static Process* create_user_process(Thread*& first_thread, const String& path, uid_t, gid_t, pid_t ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr);
~Process();
static Vector<pid_t> all_pids();
static Vector<Process*> all_processes();
Thread* create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
bool is_profiling() const { return m_profiling; }
void set_profiling(bool profiling) { m_profiling = profiling; }
@ -417,7 +423,7 @@ public:
void terminate_due_to_signal(u8 signal);
KResult send_signal(u8 signal, Process* sender);
u16 thread_count() const { return m_thread_count; }
u16 thread_count() const { return m_thread_count.load(AK::MemoryOrder::memory_order_consume); }
Lock& big_lock() { return m_big_lock; }
@ -518,7 +524,7 @@ private:
RingLevel m_ring { Ring0 };
u8 m_termination_status { 0 };
u8 m_termination_signal { 0 };
u16 m_thread_count { 0 };
Atomic<u16> m_thread_count { 0 };
bool m_dead { false };
bool m_profiling { false };
@ -550,6 +556,7 @@ private:
size_t m_master_tls_alignment { 0 };
Lock m_big_lock { "Process" };
SpinLock<u32> m_lock;
u64 m_alarm_deadline { 0 };
@ -581,14 +588,14 @@ public:
ProcessInspectionHandle(Process& process)
: m_process(process)
{
if (&process != Process::current) {
if (&process != Process::current()) {
InterruptDisabler disabler;
m_process.increment_inspector_count({});
}
}
~ProcessInspectionHandle()
{
if (&m_process != Process::current) {
if (&m_process != Process::current()) {
InterruptDisabler disabler;
m_process.decrement_inspector_count({});
}
@ -613,11 +620,13 @@ private:
};
extern InlineLinkedList<Process>* g_processes;
extern RecursiveSpinLock g_processes_lock;
template<typename Callback>
inline void Process::for_each(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
auto* next_process = process->next();
if (callback(*process) == IterationDecision::Break)
@ -631,6 +640,7 @@ inline void Process::for_each_child(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
pid_t my_pid = pid();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
auto* next_process = process->next();
if (process->ppid() == my_pid || process->has_tracee_thread(m_pid)) {
@ -649,7 +659,14 @@ inline void Process::for_each_thread(Callback callback) const
if (my_pid == 0) {
// NOTE: Special case the colonel process, since its main thread is not in the global thread table.
callback(*g_colonel);
Processor::for_each(
[&](Processor& proc) -> IterationDecision
{
auto idle_thread = proc.idle_thread();
if (idle_thread != nullptr)
return callback(*idle_thread);
return IterationDecision::Continue;
});
return;
}
@ -665,6 +682,7 @@ template<typename Callback>
inline void Process::for_each_in_pgrp(pid_t pgid, Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
auto* next_process = process->next();
if (!process->is_dead() && process->pgid() == pgid) {
@ -705,25 +723,25 @@ inline u32 Thread::effective_priority() const
return m_priority + m_process.priority_boost() + m_priority_boost + m_extra_priority;
}
#define REQUIRE_NO_PROMISES \
do { \
if (Process::current->has_promises()) { \
dbg() << "Has made a promise"; \
cli(); \
Process::current->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
} \
#define REQUIRE_NO_PROMISES \
do { \
if (Process::current()->has_promises()) { \
dbg() << "Has made a promise"; \
cli(); \
Process::current()->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
} \
} while (0)
#define REQUIRE_PROMISE(promise) \
do { \
if (Process::current->has_promises() \
&& !Process::current->has_promised(Pledge::promise)) { \
dbg() << "Has not pledged " << #promise; \
cli(); \
Process::current->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
} \
#define REQUIRE_PROMISE(promise) \
do { \
if (Process::current()->has_promises() \
&& !Process::current()->has_promised(Pledge::promise)) { \
dbg() << "Has not pledged " << #promise; \
cli(); \
Process::current()->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
} \
} while (0)
}

View file

@ -36,7 +36,7 @@ namespace Ptrace {
KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, Process& caller)
{
if (params.request == PT_TRACE_ME) {
if (Thread::current->tracer())
if (Thread::current()->tracer())
return KResult(-EBUSY);
caller.set_wait_for_tracer_at_next_execve(true);

View file

@ -69,7 +69,7 @@ KernelRng::KernelRng()
void KernelRng::wait_for_entropy()
{
if (!resource().is_ready()) {
Thread::current->wait_on(m_seed_queue);
Thread::current()->wait_on(m_seed_queue);
}
}

View file

@ -43,6 +43,7 @@ namespace Kernel {
SchedulerData* g_scheduler_data;
timeval g_timeofday;
RecursiveSpinLock g_scheduler_lock;
void Scheduler::init_thread(Thread& thread)
{
@ -63,7 +64,7 @@ void Scheduler::update_state_for_thread(Thread& thread)
static u32 time_slice_for(const Thread& thread)
{
// One time slice unit == 1ms
if (&thread == g_colonel)
if (&thread == Processor::current().idle_thread())
return 1;
return 10;
}
@ -74,7 +75,6 @@ timeval Scheduler::time_since_boot()
}
Thread* g_finalizer;
Thread* g_colonel;
WaitQueue* g_finalizer_wait_queue;
Atomic<bool> g_finalizer_has_work{false};
static Process* s_colonel_process;
@ -85,8 +85,9 @@ Thread::JoinBlocker::JoinBlocker(Thread& joinee, void*& joinee_exit_value)
, m_joinee_exit_value(joinee_exit_value)
{
ASSERT(m_joinee.m_joiner == nullptr);
m_joinee.m_joiner = Thread::current;
Thread::current->m_joinee = &joinee;
auto current_thread = Thread::current();
m_joinee.m_joiner = current_thread;
current_thread->m_joinee = &joinee;
}
bool Thread::JoinBlocker::should_unblock(Thread& joiner, time_t, long)
@ -268,7 +269,6 @@ bool Thread::WaitBlocker::should_unblock(Thread& thread, time_t, long)
return IterationDecision::Continue;
m_waitee_pid = child.pid();
dbg() << "Unblocking thread " << thread << " process " << thread.process() << " child exited: " << m_waitee_pid;
should_unblock = true;
return IterationDecision::Break;
});
@ -317,15 +317,16 @@ void Thread::consider_unblock(time_t now_sec, long now_usec)
void Scheduler::start()
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!Thread::current);
Thread::current = g_colonel;
Process::current = &g_colonel->process();
g_colonel->set_ticks_left(time_slice_for(*g_colonel));
g_colonel->did_schedule();
g_colonel->set_initialized(true);
Processor::init_context(*g_colonel);
g_colonel->set_state(Thread::Running);
Processor::current().initialize_context_switching(*g_colonel);
auto& processor = Processor::current();
ASSERT(processor.current_thread() == nullptr);
auto& idle_thread = *processor.idle_thread();
processor.set_current_thread(idle_thread);
idle_thread.set_ticks_left(time_slice_for(idle_thread));
idle_thread.did_schedule();
idle_thread.set_initialized(true);
Processor::init_context(idle_thread);
idle_thread.set_state(Thread::Running);
processor.initialize_context_switching(idle_thread);
ASSERT_NOT_REACHED();
}
@ -333,12 +334,14 @@ bool Scheduler::pick_next()
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Thread::current);
auto current_thread = Thread::current();
auto now = time_since_boot();
auto now_sec = now.tv_sec;
auto now_usec = now.tv_usec;
ScopedSpinLock lock(g_scheduler_lock);
// Check and unblock threads whose wait conditions have been met.
Scheduler::for_each_nonrunnable([&](Thread& thread) {
thread.consider_unblock(now_sec, now_usec);
@ -347,7 +350,7 @@ bool Scheduler::pick_next()
Process::for_each([&](Process& process) {
if (process.is_dead()) {
if (Process::current->pid() != process.pid() && (!process.ppid() || !Process::from_pid(process.ppid()))) {
if (current_thread->process().pid() != process.pid() && (!process.ppid() || !Process::from_pid(process.ppid()))) {
auto name = process.name();
auto pid = process.pid();
auto exit_status = Process::reap(process);
@ -363,13 +366,13 @@ bool Scheduler::pick_next()
});
// Dispatch any pending signals.
Thread::for_each_living([](Thread& thread) -> IterationDecision {
Thread::for_each_living([&](Thread& thread) -> IterationDecision {
if (!thread.has_unmasked_pending_signals())
return IterationDecision::Continue;
// FIXME: It would be nice if the Scheduler didn't have to worry about who is "current"
// For now, avoid dispatching signals to "current" and do it in a scheduling pass
// while some other process is interrupted. Otherwise a mess will be made.
if (&thread == Thread::current)
if (&thread == current_thread)
return IterationDecision::Continue;
// We know how to interrupt blocked processes, but if they are just executing
// at some random point in the kernel, let them continue.
@ -433,32 +436,33 @@ bool Scheduler::pick_next()
}
if (!thread_to_schedule)
thread_to_schedule = g_colonel;
thread_to_schedule = Processor::current().idle_thread();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler: Switch to " << *thread_to_schedule << " @ " << String::format("%04x:%08x", thread_to_schedule->tss().cs, thread_to_schedule->tss().eip);
#endif
lock.unlock();
return context_switch(*thread_to_schedule);
}
bool Scheduler::yield()
{
//#ifdef SCHEDULER_DEBUG
#if 0
dbg() << "Scheduler: yielding thread " << *Thread::current << " in_trap: " << Processor::current().in_trap() << " in_irq: " << Processor::current().in_irq();
auto& proc = Processor::current();
auto current_thread = Thread::current();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler: yielding thread " << *current_thread << " in_trap: " << proc.in_trap() << " in_irq: " << proc.in_irq();
#endif
InterruptDisabler disabler;
ASSERT(Thread::current);
if (Processor::current().in_irq()) {
ASSERT(current_thread != nullptr);
if (proc.in_irq()) {
// If we're handling an IRQ we can't switch context, delay until
// exiting the trap
Processor::current().invoke_scheduler_async();
proc.invoke_scheduler_async();
} else if (!Scheduler::pick_next())
return false;
//#ifdef SCHEDULER_DEBUG
#if 0
dbg() << "Scheduler: yield returns to thread " << *Thread::current << " in_trap: " << Processor::current().in_trap() << " in_irq: " << Processor::current().in_irq();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler: yield returns to thread " << *current_thread << " in_trap: " << proc.in_trap() << " in_irq: " << proc.in_irq();
#endif
return true;
}
@ -471,7 +475,7 @@ bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
return false;
(void)reason;
unsigned ticks_left = Thread::current->ticks_left();
unsigned ticks_left = Thread::current()->ticks_left();
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
return Scheduler::yield();
@ -489,30 +493,28 @@ bool Scheduler::context_switch(Thread& thread)
thread.set_ticks_left(time_slice_for(thread));
thread.did_schedule();
if (Thread::current == &thread)
auto current_thread = Thread::current();
if (current_thread == &thread)
return false;
if (Thread::current) {
if (current_thread) {
// If the last process hasn't blocked (still marked as running),
// mark it as runnable for the next round.
if (Thread::current->state() == Thread::Running)
Thread::current->set_state(Thread::Runnable);
if (current_thread->state() == Thread::Running)
current_thread->set_state(Thread::Runnable);
#ifdef LOG_EVERY_CONTEXT_SWITCH
dbg() << "Scheduler: " << *Thread::current << " -> " << thread << " [" << thread.priority() << "] " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip);
dbg() << "Scheduler: " << *current_thread << " -> " << thread << " [" << thread.priority() << "] " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip);
#endif
}
Thread* from = Thread::current;
Thread::current = &thread;
Process::current = &thread.process();
if (!thread.is_initialized()) {
Processor::init_context(thread);
thread.set_initialized(true);
}
thread.set_state(Thread::Running);
Processor::current().switch_context(from, &thread);
Processor::current().switch_context(current_thread, &thread);
return true;
}
@ -527,11 +529,21 @@ void Scheduler::initialize(u32 cpu)
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue;
Thread* idle_thread = nullptr;
if (cpu == 0) {
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
s_colonel_process = Process::create_kernel_process(g_colonel, "colonel", idle_loop);
g_colonel->set_priority(THREAD_PRIORITY_MIN);
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop);
ASSERT(s_colonel_process);
ASSERT(idle_thread);
idle_thread->set_priority(THREAD_PRIORITY_MIN);
idle_thread->set_name(String::format("idle thread #%u", cpu));
} else {
ASSERT(s_colonel_process);
idle_thread = s_colonel_process->create_kernel_thread(idle_loop, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), false);
ASSERT(idle_thread);
}
Processor::current().set_idle_thread(*idle_thread);
}
void Scheduler::timer_tick(const RegisterState& regs)
@ -539,19 +551,20 @@ void Scheduler::timer_tick(const RegisterState& regs)
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
if (!Thread::current)
auto current_thread = Processor::current().current_thread();
if (!current_thread)
return;
++g_uptime;
g_timeofday = TimeManagement::now_as_timeval();
if (Process::current->is_profiling()) {
if (current_thread->process().is_profiling()) {
SmapDisabler disabler;
auto backtrace = Thread::current->raw_backtrace(regs.ebp, regs.eip);
auto backtrace = current_thread->raw_backtrace(regs.ebp, regs.eip);
auto& sample = Profiling::next_sample_slot();
sample.pid = Process::current->pid();
sample.tid = Thread::current->tid();
sample.pid = current_thread->process().pid();
sample.tid = current_thread->tid();
sample.timestamp = g_uptime;
for (size_t i = 0; i < min(backtrace.size(), Profiling::max_stack_frame_count); ++i) {
sample.frames[i] = backtrace[i];
@ -560,7 +573,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
TimerQueue::the().fire();
if (Thread::current->tick())
if (current_thread->tick())
return;
ASSERT_INTERRUPTS_DISABLED();

View file

@ -30,6 +30,7 @@
#include <AK/Function.h>
#include <AK/IntrusiveList.h>
#include <AK/Types.h>
#include <Kernel/SpinLock.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
@ -41,12 +42,12 @@ struct RegisterState;
struct SchedulerData;
extern Thread* g_finalizer;
extern Thread* g_colonel;
extern WaitQueue* g_finalizer_wait_queue;
extern Atomic<bool> g_finalizer_has_work;
extern u64 g_uptime;
extern SchedulerData* g_scheduler_data;
extern timeval g_timeofday;
extern RecursiveSpinLock g_scheduler_lock;
class Scheduler {
public:

View file

@ -87,8 +87,9 @@ static Handler s_syscall_table[] = {
int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
{
ASSERT_INTERRUPTS_ENABLED();
auto& process = *Process::current;
Thread::current->did_syscall();
auto current_thread = Thread::current();
auto& process = current_thread->process();
current_thread->did_syscall();
if (function == SC_exit || function == SC_exit_thread) {
// These syscalls need special handling since they never return to the caller.
@ -126,15 +127,17 @@ void syscall_handler(TrapFrame* trap)
auto& regs = *trap->regs;
// Special handling of the "gettid" syscall since it's extremely hot.
// FIXME: Remove this hack once userspace locks stop calling it so damn much.
auto current_thread = Thread::current();
auto& process = current_thread->process();
if (regs.eax == SC_gettid) {
regs.eax = Process::current->sys$gettid();
Thread::current->did_syscall();
regs.eax = process.sys$gettid();
current_thread->did_syscall();
return;
}
if (Thread::current->tracer() && Thread::current->tracer()->is_tracing_syscalls()) {
Thread::current->tracer()->set_trace_syscalls(false);
Thread::current->tracer_trap(regs);
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
current_thread->tracer()->set_trace_syscalls(false);
current_thread->tracer_trap(regs);
}
// Make sure SMAP protection is enabled on syscall entry.
@ -146,8 +149,6 @@ void syscall_handler(TrapFrame* trap)
asm volatile(""
: "=m"(*ptr));
auto& process = *Process::current;
if (!MM.validate_user_stack(process, VirtualAddress(regs.userspace_esp))) {
dbg() << "Invalid stack pointer: " << String::format("%p", regs.userspace_esp);
handle_crash(regs, "Bad stack on syscall entry", SIGSTKFLT);
@ -174,18 +175,18 @@ void syscall_handler(TrapFrame* trap)
u32 arg3 = regs.ebx;
regs.eax = (u32)Syscall::handle(regs, function, arg1, arg2, arg3);
if (Thread::current->tracer() && Thread::current->tracer()->is_tracing_syscalls()) {
Thread::current->tracer()->set_trace_syscalls(false);
Thread::current->tracer_trap(regs);
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
current_thread->tracer()->set_trace_syscalls(false);
current_thread->tracer_trap(regs);
}
process.big_lock().unlock();
// Check if we're supposed to return to userspace or just die.
Thread::current->die_if_needed();
current_thread->die_if_needed();
if (Thread::current->has_unmasked_pending_signals())
(void)Thread::current->block<Thread::SemiPermanentBlocker>(Thread::SemiPermanentBlocker::Reason::Signal);
if (current_thread->has_unmasked_pending_signals())
(void)current_thread->block<Thread::SemiPermanentBlocker>(Thread::SemiPermanentBlocker::Reason::Signal);
}
}

View file

@ -42,8 +42,9 @@ MasterPTY::MasterPTY(unsigned index)
, m_index(index)
{
m_pts_name = String::format("/dev/pts/%u", m_index);
set_uid(Process::current->uid());
set_gid(Process::current->gid());
auto process = Process::current();
set_uid(process->uid());
set_gid(process->gid());
}
MasterPTY::~MasterPTY()

View file

@ -39,8 +39,9 @@ SlavePTY::SlavePTY(MasterPTY& master, unsigned index)
, m_index(index)
{
sprintf(m_tty_name, "/dev/pts/%u", m_index);
set_uid(Process::current->uid());
set_gid(Process::current->gid());
auto process = Process::current();
set_uid(process->uid());
set_gid(process->gid());
DevPtsFS::register_slave_pty(*this);
set_size(80, 25);
}

View file

@ -282,7 +282,7 @@ void TTY::set_termios(const termios& t)
int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
{
REQUIRE_PROMISE(tty);
auto& process = *Process::current;
auto& current_process = *Process::current();
pid_t pgid;
termios* tp;
winsize* ws;
@ -290,7 +290,7 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
#if 0
// FIXME: When should we block things?
// How do we make this work together with MasterPTY forwarding to us?
if (process.tty() && process.tty() != this) {
if (current_process.tty() && current_process.tty() != this) {
return -ENOTTY;
}
#endif
@ -308,14 +308,14 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
return -EPERM;
if (pgid != process->pgid())
return -EPERM;
if (Process::current->sid() != process->sid())
if (current_process.sid() != process->sid())
return -EPERM;
}
m_pgid = pgid;
return 0;
case TCGETS:
tp = reinterpret_cast<termios*>(arg);
if (!process.validate_write(tp, sizeof(termios)))
if (!current_process.validate_write(tp, sizeof(termios)))
return -EFAULT;
*tp = m_termios;
return 0;
@ -323,7 +323,7 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
case TCSETSF:
case TCSETSW:
tp = reinterpret_cast<termios*>(arg);
if (!process.validate_read(tp, sizeof(termios)))
if (!current_process.validate_read(tp, sizeof(termios)))
return -EFAULT;
set_termios(*tp);
if (request == TCSETSF)
@ -331,14 +331,14 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
return 0;
case TIOCGWINSZ:
ws = reinterpret_cast<winsize*>(arg);
if (!process.validate_write(ws, sizeof(winsize)))
if (!current_process.validate_write(ws, sizeof(winsize)))
return -EFAULT;
ws->ws_row = m_rows;
ws->ws_col = m_columns;
return 0;
case TIOCSWINSZ:
ws = reinterpret_cast<winsize*>(arg);
if (!process.validate_read(ws, sizeof(winsize)))
if (!current_process.validate_read(ws, sizeof(winsize)))
return -EFAULT;
if (ws->ws_col == m_columns && ws->ws_row == m_rows)
return 0;
@ -347,10 +347,10 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
generate_signal(SIGWINCH);
return 0;
case TIOCSCTTY:
process.set_tty(this);
current_process.set_tty(this);
return 0;
case TIOCNOTTY:
process.set_tty(nullptr);
current_process.set_tty(nullptr);
return 0;
}
ASSERT_NOT_REACHED();

View file

@ -32,10 +32,10 @@ namespace Kernel {
void FinalizerTask::spawn()
{
Process::create_kernel_process(g_finalizer, "FinalizerTask", [] {
Thread::current->set_priority(THREAD_PRIORITY_LOW);
Thread::current()->set_priority(THREAD_PRIORITY_LOW);
for (;;) {
dbg() << "Finalizer task is running";
Thread::current->wait_on(*g_finalizer_wait_queue);
Thread::current()->wait_on(*g_finalizer_wait_queue);
bool expected = true;
if (g_finalizer_has_work.compare_exchange_strong(expected, false, AK::MemoryOrder::memory_order_acq_rel))

View file

@ -38,7 +38,7 @@ void SyncTask::spawn()
dbg() << "SyncTask is running";
for (;;) {
VFS::the().sync();
Thread::current->sleep(1 * TimeManagement::the().ticks_per_second());
Thread::current()->sleep(1 * TimeManagement::the().ticks_per_second());
}
});
}

View file

@ -46,8 +46,6 @@
namespace Kernel {
Thread* Thread::current;
HashTable<Thread*>& thread_table()
{
ASSERT_INTERRUPTS_DISABLED();
@ -61,13 +59,12 @@ Thread::Thread(Process& process)
: m_process(process)
, m_name(process.name())
{
if (m_process.m_thread_count == 0) {
if (m_process.m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel) == 0) {
// First thread gets TID == PID
m_tid = process.pid();
} else {
m_tid = Process::allocate_pid();
}
process.m_thread_count++;
#ifdef THREAD_DEBUG
dbg() << "Created new thread " << process.name() << "(" << process.pid() << ":" << m_tid << ")";
#endif
@ -127,14 +124,14 @@ Thread::~Thread()
thread_table().remove(this);
}
ASSERT(m_process.m_thread_count);
m_process.m_thread_count--;
auto thread_cnt_before = m_process.m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
}
void Thread::unblock()
{
m_blocker = nullptr;
if (current == this) {
if (Thread::current() == this) {
if (m_should_die)
set_state(Thread::Dying);
else
@ -179,7 +176,7 @@ void Thread::set_should_die()
void Thread::die_if_needed()
{
ASSERT(current == this);
ASSERT(Thread::current() == this);
if (!m_should_die)
return;
@ -213,7 +210,7 @@ u64 Thread::sleep(u32 ticks)
{
ASSERT(state() == Thread::Running);
u64 wakeup_time = g_uptime + ticks;
auto ret = Thread::current->block<Thread::SleepBlocker>(wakeup_time);
auto ret = Thread::current()->block<Thread::SleepBlocker>(wakeup_time);
if (wakeup_time > g_uptime) {
ASSERT(ret != Thread::BlockResult::WokeNormally);
}
@ -223,7 +220,7 @@ u64 Thread::sleep(u32 ticks)
u64 Thread::sleep_until(u64 wakeup_time)
{
ASSERT(state() == Thread::Running);
auto ret = Thread::current->block<Thread::SleepBlocker>(wakeup_time);
auto ret = Thread::current()->block<Thread::SleepBlocker>(wakeup_time);
if (wakeup_time > g_uptime)
ASSERT(ret != Thread::BlockResult::WokeNormally);
return wakeup_time;
@ -261,7 +258,7 @@ const char* Thread::state_string() const
void Thread::finalize()
{
ASSERT(current == g_finalizer);
ASSERT(Thread::current() == g_finalizer);
#ifdef THREAD_DEBUG
dbg() << "Finalizing thread " << *this;
@ -283,7 +280,7 @@ void Thread::finalize()
void Thread::finalize_dying_threads()
{
ASSERT(current == g_finalizer);
ASSERT(Thread::current() == g_finalizer);
Vector<Thread*, 32> dying_threads;
{
InterruptDisabler disabler;
@ -296,7 +293,7 @@ void Thread::finalize_dying_threads()
auto& process = thread->process();
thread->finalize();
delete thread;
if (process.m_thread_count == 0)
if (process.m_thread_count.load(AK::MemoryOrder::memory_order_consume) == 0)
process.finalize();
}
}
@ -770,7 +767,7 @@ String Thread::backtrace_impl() const
Vector<RecognizedSymbol, 128> recognized_symbols;
u32 start_frame;
if (current == this) {
if (Thread::current() == this) {
asm volatile("movl %%ebp, %%eax"
: "=a"(start_frame));
} else {
@ -853,7 +850,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b
if (lock)
*lock = false;
set_state(State::Queued);
queue.enqueue(*current);
queue.enqueue(*Thread::current());
if (timeout) {
@ -888,7 +885,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b
void Thread::wake_from_queue()
{
ASSERT(state() == State::Queued);
if (this != Thread::current)
if (this != Thread::current())
set_state(State::Runnable);
else
set_state(State::Running);

View file

@ -63,6 +63,8 @@ struct ThreadSpecificData {
#define THREAD_PRIORITY_HIGH 50
#define THREAD_PRIORITY_MAX 99
#define THREAD_AFFINITY_DEFAULT 0xffffffff
class Thread {
AK_MAKE_NONCOPYABLE(Thread);
AK_MAKE_NONMOVABLE(Thread);
@ -71,7 +73,10 @@ class Thread {
friend class Scheduler;
public:
static Thread* current;
inline static Thread* current()
{
return Processor::current().current_thread();
}
explicit Thread(Process&);
~Thread();
@ -275,6 +280,8 @@ public:
u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
u32 affinity() const { return m_cpu_affinity; }
void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
u32 frame_ptr() const { return m_tss.ebp; }
u32 stack_ptr() const { return m_tss.esp; }
@ -470,6 +477,7 @@ private:
TSS32 m_tss;
FarPtr m_far_ptr;
Atomic<u32> m_cpu { 0 };
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
u32 m_ticks { 0 };
u32 m_ticks_left { 0 };
u32 m_times_scheduled { 0 };
@ -539,6 +547,7 @@ template<typename Callback>
inline IterationDecision Thread::for_each(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_scheduler_lock);
auto ret = Scheduler::for_each_runnable(callback);
if (ret == IterationDecision::Break)
return ret;
@ -549,6 +558,7 @@ template<typename Callback>
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_scheduler_lock);
auto new_callback = [=](Thread& thread) -> IterationDecision {
if (thread.state() == state)
return callback(thread);
@ -579,6 +589,7 @@ template<typename Callback>
inline IterationDecision Scheduler::for_each_runnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.is_locked());
auto& tl = g_scheduler_data->m_runnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;
@ -594,6 +605,7 @@ template<typename Callback>
inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.is_locked());
auto& tl = g_scheduler_data->m_nonrunnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;

View file

@ -267,7 +267,7 @@ Region* MemoryManager::region_from_vaddr(VirtualAddress vaddr)
PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Thread::current);
ASSERT(Thread::current() != nullptr);
ScopedSpinLock lock(s_lock);
if (Processor::current().in_irq()) {
dbg() << "CPU[" << Processor::current().id() << "] BUG! Page fault while handling IRQ! code=" << fault.code() << ", vaddr=" << fault.vaddr() << ", irq level: " << Processor::current().in_irq();
@ -519,10 +519,11 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
void MemoryManager::enter_process_paging_scope(Process& process)
{
ASSERT(Thread::current);
auto current_thread = Thread::current();
ASSERT(current_thread != nullptr);
ScopedSpinLock lock(s_lock);
Thread::current->tss().cr3 = process.page_directory().cr3();
current_thread->tss().cr3 = process.page_directory().cr3();
write_cr3(process.page_directory().cr3());
}

View file

@ -31,7 +31,7 @@ namespace Kernel {
ProcessPagingScope::ProcessPagingScope(Process& process)
{
ASSERT(Thread::current);
ASSERT(Thread::current() != nullptr);
m_previous_cr3 = read_cr3();
MM.enter_process_paging_scope(process);
}
@ -39,7 +39,7 @@ ProcessPagingScope::ProcessPagingScope(Process& process)
ProcessPagingScope::~ProcessPagingScope()
{
InterruptDisabler disabler;
Thread::current->tss().cr3 = m_previous_cr3;
Thread::current()->tss().cr3 = m_previous_cr3;
write_cr3(m_previous_cr3);
}

View file

@ -67,7 +67,7 @@ Region::~Region()
NonnullOwnPtr<Region> Region::clone()
{
ASSERT(Process::current);
ASSERT(Process::current());
if (m_inherit_mode == InheritMode::ZeroedOnFork) {
ASSERT(m_mmap);
@ -367,8 +367,9 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
if (Thread::current)
Thread::current->did_zero_fault();
auto current_thread = Thread::current();
if (current_thread != nullptr)
current_thread->did_zero_fault();
auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page.is_null()) {
@ -397,8 +398,9 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
if (Thread::current)
Thread::current->did_cow_fault();
auto current_thread = Thread::current();
if (current_thread)
current_thread->did_cow_fault();
#ifdef PAGE_FAULT_DEBUG
dbg() << " >> It's a COW page and it's time to COW!";
@ -446,8 +448,9 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
if (Thread::current)
Thread::current->did_inode_fault();
auto current_thread = Thread::current();
if (current_thread)
current_thread->did_inode_fault();
#ifdef MM_DEBUG
dbg() << "MM: page_in_from_inode ready to read from inode";

View file

@ -310,7 +310,7 @@ void init_stage2()
hang();
}
Process::current->set_root_directory(VFS::the().root_custody());
Process::current()->set_root_directory(VFS::the().root_custody());
load_kernel_symbol_table();
@ -329,7 +329,7 @@ void init_stage2()
NetworkTask::spawn();
Process::current->sys$exit(0);
Process::current()->sys$exit(0);
ASSERT_NOT_REACHED();
}