ladybird/Kernel/Memory/InodeVMObject.cpp
Andreas Kling 2c72d495a3 Kernel: Use RefPtr instead of LockRefPtr for PhysicalPage
I believe this to be safe, as the main thing that LockRefPtr provides
over RefPtr is safe copying from a shared LockRefPtr instance. I've
inspected the uses of RefPtr<PhysicalPage> and it seems they're all
guarded by external locking. Some of it is less obvious, but this is
an area where we're making continuous headway.
2022-08-24 18:35:41 +02:00

100 lines
2.4 KiB
C++

/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Memory/InodeVMObject.h>
namespace Kernel::Memory {
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(inode)
, m_dirty_pages(move(dirty_pages))
{
}
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(other.m_inode)
, m_dirty_pages(move(dirty_pages))
{
for (size_t i = 0; i < page_count(); ++i)
m_dirty_pages.set(i, other.m_dirty_pages.get(i));
}
InodeVMObject::~InodeVMObject() = default;
size_t InodeVMObject::amount_clean() const
{
size_t count = 0;
VERIFY(page_count() == m_dirty_pages.size());
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i])
++count;
}
return count * PAGE_SIZE;
}
size_t InodeVMObject::amount_dirty() const
{
size_t count = 0;
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
if (m_dirty_pages.get(i))
++count;
}
return count * PAGE_SIZE;
}
int InodeVMObject::release_all_clean_pages()
{
SpinlockLocker locker(m_lock);
int count = 0;
for (size_t i = 0; i < page_count(); ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
m_physical_pages[i] = nullptr;
++count;
}
}
if (count) {
for_each_region([](auto& region) {
region.remap();
});
}
return count;
}
int InodeVMObject::try_release_clean_pages(int page_amount)
{
SpinlockLocker locker(m_lock);
int count = 0;
for (size_t i = 0; i < page_count() && count < page_amount; ++i) {
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
m_physical_pages[i] = nullptr;
++count;
}
}
if (count) {
for_each_region([](auto& region) {
region.remap();
});
}
return count;
}
u32 InodeVMObject::writable_mappings() const
{
u32 count = 0;
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_writable())
++count;
});
return count;
}
}