Kernel: Store whether a thread is the idle thread in Thread directly

This solves a problem where checking whether a thread is an idle
thread may require iterating all processors if it is not the idle
thread of the current processor.
This commit is contained in:
Tom 2021-01-28 20:07:41 -07:00 committed by Andreas Kling
parent 9a69b9112b
commit ec27cbbb2a
Notes: sideshowbarker 2024-07-18 18:43:20 +09:00
3 changed files with 15 additions and 11 deletions

View file

@ -779,11 +779,6 @@ public:
return *m_mm_data;
}
ALWAYS_INLINE Thread* idle_thread() const
{
return m_idle_thread;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
@ -806,6 +801,12 @@ public:
write_fs_u32(__builtin_offsetof(Processor, m_current_thread), FlatPtr(&current_thread));
}
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 get_id() const
{
// NOTE: This variant should only be used when iterating over all

View file

@ -39,7 +39,7 @@ RecursiveSpinLock g_scheduler_lock;
static u32 time_slice_for(const Thread& thread)
{
// One time slice unit == 4ms (assuming 250 ticks/second)
if (&thread == Processor::current().idle_thread())
if (thread.is_idle_thread())
return 1;
return 2;
}
@ -105,12 +105,12 @@ Thread& Scheduler::pull_next_runnable_thread()
}
priority_mask &= ~(1u << priority);
}
return *Processor::current().idle_thread();
return *Processor::idle_thread();
}
bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
{
if (&thread == Processor::current().idle_thread())
if (thread.is_idle_thread())
return true;
ScopedSpinLock lock(g_ready_queues_lock);
auto priority = thread.m_runnable_priority;
@ -134,7 +134,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
void Scheduler::queue_runnable_thread(Thread& thread)
{
VERIFY(g_scheduler_lock.own_lock());
if (&thread == Processor::current().idle_thread())
if (thread.is_idle_thread())
return;
auto priority = thread_priority_to_priority_index(thread.priority());
@ -160,9 +160,8 @@ UNMAP_AFTER_INIT void Scheduler::start()
auto& processor = Processor::current();
processor.set_scheduler_data(*new SchedulerPerProcessorData());
VERIFY(processor.is_initialized());
auto& idle_thread = *processor.idle_thread();
auto& idle_thread = *Processor::idle_thread();
VERIFY(processor.current_thread() == &idle_thread);
VERIFY(processor.idle_thread() == &idle_thread);
idle_thread.set_ticks_left(time_slice_for(idle_thread));
idle_thread.did_schedule();
idle_thread.set_initialized(true);
@ -467,6 +466,7 @@ UNMAP_AFTER_INIT void Scheduler::initialize()
UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
{
idle_thread->set_idle_thread();
Processor::current().set_idle_thread(*idle_thread);
Processor::current().set_current_thread(*idle_thread);
}

View file

@ -1112,6 +1112,8 @@ public:
return m_handling_page_fault;
}
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
void set_idle_thread() { m_is_idle_thread = true; }
bool is_idle_thread() const { return m_is_idle_thread; }
private:
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
@ -1248,6 +1250,7 @@ private:
bool m_should_die { false };
bool m_initialized { false };
bool m_in_block { false };
bool m_is_idle_thread { false };
Atomic<bool> m_have_any_unmasked_pending_signals { false };
void yield_without_holding_big_lock();