Kernel: Move Spinlock lock/unlock functions out of line

I don't see why these have to be inlined everywhere in the kernel.
This commit is contained in:
Andreas Kling 2022-02-02 12:49:32 +01:00
parent 362e167239
commit 35e24bc774
Notes: sideshowbarker 2024-07-17 19:51:13 +09:00
3 changed files with 77 additions and 60 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -24,30 +24,8 @@ public:
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
Processor::wait_check();
}
track_lock_acquire(m_rank);
return prev_flags;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
Processor::leave_critical();
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
@ -74,41 +52,8 @@ public:
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
cli();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
Processor::leave_critical();
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/Spinlock.h>
namespace Kernel {
u32 Spinlock::lock()
{
u32 prev_flags = cpu_flags();
Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
Processor::wait_check();
track_lock_acquire(m_rank);
return prev_flags;
}
void Spinlock::unlock(u32 prev_flags)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
Processor::leave_critical();
}
u32 RecursiveSpinlock::lock()
{
u32 prev_flags = cpu_flags();
cli();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
}
void RecursiveSpinlock::unlock(u32 prev_flags)
{
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
Processor::leave_critical();
}
}

View file

@ -300,6 +300,7 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
${KERNEL_SOURCES}
Arch/x86/common/ScopedCritical.cpp
Arch/x86/common/SmapDisabler.cpp
Arch/x86/common/Spinlock.cpp
)
set(KERNEL_SOURCES