mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-09-30 08:41:15 +00:00
Kernel: Move Spinlock lock/unlock functions out of line
I don't see why these have to be inlined everywhere in the kernel.
This commit is contained in:
parent
362e167239
commit
35e24bc774
Notes:
sideshowbarker
2024-07-17 19:51:13 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/35e24bc7742
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
@ -24,30 +24,8 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 lock()
|
||||
{
|
||||
u32 prev_flags = cpu_flags();
|
||||
Processor::enter_critical();
|
||||
cli();
|
||||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
|
||||
Processor::wait_check();
|
||||
}
|
||||
track_lock_acquire(m_rank);
|
||||
return prev_flags;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void unlock(u32 prev_flags)
|
||||
{
|
||||
VERIFY(is_locked());
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
if ((prev_flags & 0x200) != 0)
|
||||
sti();
|
||||
else
|
||||
cli();
|
||||
|
||||
Processor::leave_critical();
|
||||
}
|
||||
u32 lock();
|
||||
void unlock(u32 prev_flags);
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
{
|
||||
|
@ -74,41 +52,8 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 lock()
|
||||
{
|
||||
u32 prev_flags = cpu_flags();
|
||||
cli();
|
||||
Processor::enter_critical();
|
||||
auto& proc = Processor::current();
|
||||
FlatPtr cpu = FlatPtr(&proc);
|
||||
FlatPtr expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
|
||||
if (expected == cpu)
|
||||
break;
|
||||
Processor::wait_check();
|
||||
expected = 0;
|
||||
}
|
||||
if (m_recursions == 0)
|
||||
track_lock_acquire(m_rank);
|
||||
m_recursions++;
|
||||
return prev_flags;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void unlock(u32 prev_flags)
|
||||
{
|
||||
VERIFY(m_recursions > 0);
|
||||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
|
||||
if (--m_recursions == 0) {
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
}
|
||||
if ((prev_flags & 0x200) != 0)
|
||||
sti();
|
||||
else
|
||||
cli();
|
||||
|
||||
Processor::leave_critical();
|
||||
}
|
||||
u32 lock();
|
||||
void unlock(u32 prev_flags);
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
{
|
||||
|
|
71
Kernel/Arch/x86/common/Spinlock.cpp
Normal file
71
Kernel/Arch/x86/common/Spinlock.cpp
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Arch/x86/Spinlock.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
u32 Spinlock::lock()
|
||||
{
|
||||
u32 prev_flags = cpu_flags();
|
||||
Processor::enter_critical();
|
||||
cli();
|
||||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
|
||||
Processor::wait_check();
|
||||
track_lock_acquire(m_rank);
|
||||
return prev_flags;
|
||||
}
|
||||
|
||||
void Spinlock::unlock(u32 prev_flags)
|
||||
{
|
||||
VERIFY(is_locked());
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
if ((prev_flags & 0x200) != 0)
|
||||
sti();
|
||||
else
|
||||
cli();
|
||||
|
||||
Processor::leave_critical();
|
||||
}
|
||||
|
||||
u32 RecursiveSpinlock::lock()
|
||||
{
|
||||
u32 prev_flags = cpu_flags();
|
||||
cli();
|
||||
Processor::enter_critical();
|
||||
auto& proc = Processor::current();
|
||||
FlatPtr cpu = FlatPtr(&proc);
|
||||
FlatPtr expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
|
||||
if (expected == cpu)
|
||||
break;
|
||||
Processor::wait_check();
|
||||
expected = 0;
|
||||
}
|
||||
if (m_recursions == 0)
|
||||
track_lock_acquire(m_rank);
|
||||
m_recursions++;
|
||||
return prev_flags;
|
||||
}
|
||||
|
||||
void RecursiveSpinlock::unlock(u32 prev_flags)
|
||||
{
|
||||
VERIFY(m_recursions > 0);
|
||||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
|
||||
if (--m_recursions == 0) {
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
}
|
||||
if ((prev_flags & 0x200) != 0)
|
||||
sti();
|
||||
else
|
||||
cli();
|
||||
|
||||
Processor::leave_critical();
|
||||
}
|
||||
|
||||
}
|
|
@ -300,6 +300,7 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
|
|||
${KERNEL_SOURCES}
|
||||
Arch/x86/common/ScopedCritical.cpp
|
||||
Arch/x86/common/SmapDisabler.cpp
|
||||
Arch/x86/common/Spinlock.cpp
|
||||
)
|
||||
|
||||
set(KERNEL_SOURCES
|
||||
|
|
Loading…
Reference in a new issue