Kernel: Remove "supervisor" bit from PhysicalPage

Instead of each PhysicalPage knowing whether it comes from the
supervisor pages or from the user pages, we can just check in both
sets when freeing a page.

It's just a handful of pointer range checks, nothing expensive.
This commit is contained in:
Andreas Kling 2021-07-11 23:12:32 +02:00
parent ac78f1e812
commit c2792212f4
Notes: sideshowbarker 2024-07-18 09:12:26 +09:00
9 changed files with 39 additions and 53 deletions

View file

@ -22,7 +22,7 @@ UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMappin
PhysicalAddress segment_lower_addr = mmio_segment.get_paddr();
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(
PCI_MMIO_CONFIG_SPACE_SIZE * m_device_address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * m_device_address.device() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS) * (m_device_address.bus() - mmio_segment.get_start_bus()));
m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false, false);
m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false);
m_mapped_region->remap();
}

View file

@ -106,7 +106,7 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
{
VERIFY(paddr.page_base() == paddr);
for (size_t i = 0; i < page_count(); ++i)
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false);
}
AnonymousVMObject::AnonymousVMObject(PhysicalPage& page)

View file

@ -445,7 +445,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false, false));
auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false));
auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page));
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
@ -719,9 +719,11 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count)
m_system_memory_info.user_physical_pages_committed -= page_count;
}
void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr)
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
{
ScopedSpinLock lock(s_mm_lock);
// Are we returning a user page?
for (auto& region : m_user_physical_regions) {
if (!region.contains(paddr))
continue;
@ -736,8 +738,19 @@ void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr)
return;
}
dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", paddr);
VERIFY_NOT_REACHED();
// If it's not a user page, it should be a supervisor page.
for (auto& region : m_super_physical_regions) {
if (!region.contains(paddr)) {
dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper());
continue;
}
region.return_page(paddr);
--m_system_memory_info.super_physical_pages_used;
return;
}
PANIC("MM: deallocate_user_physical_page couldn't figure out region for page @ {}", paddr);
}
RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
@ -755,7 +768,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
m_system_memory_info.user_physical_pages_uncommitted--;
}
for (auto& region : m_user_physical_regions) {
page = region.take_free_page(false);
page = region.take_free_page();
if (!page.is_null()) {
++m_system_memory_info.user_physical_pages_used;
break;
@ -816,24 +829,6 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
return page;
}
void MemoryManager::deallocate_supervisor_physical_page(PhysicalAddress paddr)
{
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_super_physical_regions) {
if (!region.contains(paddr)) {
dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper());
continue;
}
region.return_page(paddr);
--m_system_memory_info.super_physical_pages_used;
return;
}
dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", paddr);
VERIFY_NOT_REACHED();
}
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment)
{
VERIFY(!(size % PAGE_SIZE));
@ -842,7 +837,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
NonnullRefPtrVector<PhysicalPage> physical_pages;
for (auto& region : m_super_physical_regions) {
physical_pages = region.take_contiguous_free_pages(count, true, physical_alignment);
physical_pages = region.take_contiguous_free_pages(count, physical_alignment);
if (!physical_pages.is_empty())
continue;
}
@ -869,7 +864,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
RefPtr<PhysicalPage> page;
for (auto& region : m_super_physical_regions) {
page = region.take_free_page(true);
page = region.take_free_page();
if (!page.is_null())
break;
}

View file

@ -145,8 +145,7 @@ public:
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE);
void deallocate_user_physical_page(PhysicalAddress);
void deallocate_supervisor_physical_page(PhysicalAddress);
void deallocate_physical_page(PhysicalAddress);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);

View file

@ -47,7 +47,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
#if ARCH(X86_64)
PhysicalAddress boot_pml4t_paddr(virtual_to_low_physical((FlatPtr)boot_pml4t));
dmesgln("MM: boot_pml4t @ {}", boot_pml4t_paddr);
m_pml4t = PhysicalPage::create(boot_pml4t_paddr, true, false);
m_pml4t = PhysicalPage::create(boot_pml4t_paddr, false);
#endif
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
@ -55,9 +55,9 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr);
dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr);
dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr);
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, true, false);
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, true, false);
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, true, false);
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, false);
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, false);
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, false);
}
PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)

View file

@ -10,15 +10,14 @@
namespace Kernel {
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool may_return_to_freelist)
{
auto& physical_page_entry = MM.get_physical_page_entry(paddr);
return adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(supervisor, may_return_to_freelist));
return adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(may_return_to_freelist));
}
PhysicalPage::PhysicalPage(bool supervisor, bool may_return_to_freelist)
PhysicalPage::PhysicalPage(bool may_return_to_freelist)
: m_may_return_to_freelist(may_return_to_freelist)
, m_supervisor(supervisor)
{
}
@ -31,14 +30,8 @@ void PhysicalPage::free_this()
{
if (m_may_return_to_freelist) {
auto paddr = MM.get_physical_address(*this);
bool is_supervisor = m_supervisor;
this->~PhysicalPage(); // delete in place
if (is_supervisor)
MM.deallocate_supervisor_physical_page(paddr);
else
MM.deallocate_user_physical_page(paddr);
MM.deallocate_physical_page(paddr);
} else {
this->~PhysicalPage(); // delete in place
}

View file

@ -32,7 +32,7 @@ public:
free_this();
}
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool may_return_to_freelist = true);
u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
@ -40,14 +40,13 @@ public:
bool is_lazy_committed_page() const;
private:
PhysicalPage(bool supervisor, bool may_return_to_freelist = true);
explicit PhysicalPage(bool may_return_to_freelist = true);
~PhysicalPage() = default;
void free_this();
Atomic<u32> m_ref_count { 1 };
bool m_may_return_to_freelist { true };
bool m_supervisor { false };
};
struct PhysicalPageEntry {

View file

@ -55,7 +55,7 @@ PhysicalRegion PhysicalRegion::take_pages_from_beginning(unsigned page_count)
return taken_region;
}
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment)
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, size_t physical_alignment)
{
VERIFY(m_pages);
VERIFY(m_used != m_pages);
@ -66,7 +66,7 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
auto first_contiguous_page = find_contiguous_free_pages(count, physical_alignment);
for (size_t index = 0; index < count; index++)
physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page)), supervisor));
physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page))));
return physical_pages;
}
@ -134,7 +134,7 @@ Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t cou
return {};
}
RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
{
VERIFY(m_pages);
@ -142,7 +142,7 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
if (!free_index.has_value())
return nullptr;
return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE), supervisor);
return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE));
}
void PhysicalRegion::free_page_at(PhysicalAddress addr)

View file

@ -31,8 +31,8 @@ public:
PhysicalRegion take_pages_from_beginning(unsigned);
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
RefPtr<PhysicalPage> take_free_page();
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
void return_page(PhysicalAddress);
private: