Kernel: Move Kernel/Memory/ code into Kernel::Memory namespace

This commit is contained in:
Andreas Kling 2021-08-06 13:49:36 +02:00
parent a1d7ebf85a
commit 93d98d4976
Notes: sideshowbarker 2024-07-18 07:24:58 +09:00
153 changed files with 473 additions and 467 deletions

View file

@ -37,7 +37,7 @@ private:
// ^IRQHandler
virtual bool handle_irq(const RegisterState&) override;
OwnPtr<Region> m_acpi_namespace;
OwnPtr<Memory::Region> m_acpi_namespace;
};
}

View file

@ -36,15 +36,15 @@ UNMAP_AFTER_INIT MultiProcessorParser::MultiProcessorParser(PhysicalAddress floa
UNMAP_AFTER_INIT void MultiProcessorParser::parse_floating_pointer_data()
{
auto floating_pointer = map_typed<MultiProcessor::FloatingPointer>(m_floating_pointer);
auto floating_pointer = Memory::map_typed<MultiProcessor::FloatingPointer>(m_floating_pointer);
m_configuration_table = PhysicalAddress(floating_pointer->physical_address_ptr);
dbgln("Features {}, IMCR? {}", floating_pointer->feature_info[0], (floating_pointer->feature_info[0] & (1 << 7)));
}
UNMAP_AFTER_INIT void MultiProcessorParser::parse_configuration_table()
{
auto configuration_table_length = map_typed<MultiProcessor::ConfigurationTableHeader>(m_configuration_table)->length;
auto config_table = map_typed<MultiProcessor::ConfigurationTableHeader>(m_configuration_table, configuration_table_length);
auto configuration_table_length = Memory::map_typed<MultiProcessor::ConfigurationTableHeader>(m_configuration_table)->length;
auto config_table = Memory::map_typed<MultiProcessor::ConfigurationTableHeader>(m_configuration_table, configuration_table_length);
size_t entry_count = config_table->entry_count;
auto* entry = config_table->entries;

View file

@ -48,7 +48,7 @@ KResultOr<size_t> ACPISysFSComponent::read_bytes(off_t offset, size_t count, Use
OwnPtr<KBuffer> ACPISysFSComponent::try_to_generate_buffer() const
{
auto acpi_blob = map_typed<u8>((m_paddr), m_length);
auto acpi_blob = Memory::map_typed<u8>((m_paddr), m_length);
return KBuffer::try_create_with_bytes(Span<u8> { acpi_blob.ptr(), m_length });
}
@ -80,10 +80,10 @@ UNMAP_AFTER_INIT ACPISysFSDirectory::ACPISysFSDirectory()
});
m_components = components;
auto rsdp = map_typed<Structures::RSDPDescriptor20>(ACPI::Parser::the()->rsdp());
auto rsdp = Memory::map_typed<Structures::RSDPDescriptor20>(ACPI::Parser::the()->rsdp());
m_components.append(ACPISysFSComponent::create("RSDP", ACPI::Parser::the()->rsdp(), rsdp->base.revision == 0 ? sizeof(Structures::RSDPDescriptor) : rsdp->length));
auto main_system_description_table = map_typed<Structures::SDTHeader>(ACPI::Parser::the()->main_system_description_table());
auto main_system_description_table = Memory::map_typed<Structures::SDTHeader>(ACPI::Parser::the()->main_system_description_table());
if (ACPI::Parser::the()->is_xsdt_supported()) {
m_components.append(ACPISysFSComponent::create("XSDT", ACPI::Parser::the()->main_system_description_table(), main_system_description_table->length));
} else {
@ -94,7 +94,7 @@ UNMAP_AFTER_INIT ACPISysFSDirectory::ACPISysFSDirectory()
void Parser::enumerate_static_tables(Function<void(const StringView&, PhysicalAddress, size_t)> callback)
{
for (auto& p_table : m_sdt_pointers) {
auto table = map_typed<Structures::SDTHeader>(p_table);
auto table = Memory::map_typed<Structures::SDTHeader>(p_table);
callback({ table->sig, 4 }, p_table, table->length);
}
}
@ -122,7 +122,7 @@ UNMAP_AFTER_INIT PhysicalAddress Parser::find_table(const StringView& signature)
{
dbgln_if(ACPI_DEBUG, "ACPI: Calling Find Table method!");
for (auto p_sdt : m_sdt_pointers) {
auto sdt = map_typed<Structures::SDTHeader>(p_sdt);
auto sdt = Memory::map_typed<Structures::SDTHeader>(p_sdt);
dbgln_if(ACPI_DEBUG, "ACPI: Examining Table @ {}", p_sdt);
if (!strncmp(sdt->sig, signature.characters_without_null_termination(), 4)) {
dbgln_if(ACPI_DEBUG, "ACPI: Found Table @ {}", p_sdt);
@ -145,7 +145,7 @@ UNMAP_AFTER_INIT void Parser::init_fadt()
m_fadt = find_table("FACP");
VERIFY(!m_fadt.is_null());
auto sdt = map_typed<const volatile Structures::FADT>(m_fadt);
auto sdt = Memory::map_typed<const volatile Structures::FADT>(m_fadt);
dbgln_if(ACPI_DEBUG, "ACPI: FADT @ V{}, {}", &sdt, m_fadt);
@ -188,7 +188,7 @@ UNMAP_AFTER_INIT void Parser::init_fadt()
bool Parser::can_reboot()
{
auto fadt = map_typed<Structures::FADT>(m_fadt);
auto fadt = Memory::map_typed<Structures::FADT>(m_fadt);
if (fadt->h.revision < 2)
return false;
return m_hardware_flags.reset_register_supported;
@ -224,16 +224,16 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
dbgln("ACPI: Sending value {:x} to {}", value, PhysicalAddress(structure.address));
switch ((GenericAddressStructure::AccessSize)structure.access_size) {
case GenericAddressStructure::AccessSize::Byte:
*map_typed<u8>(PhysicalAddress(structure.address)) = value;
*Memory::map_typed<u8>(PhysicalAddress(structure.address)) = value;
break;
case GenericAddressStructure::AccessSize::Word:
*map_typed<u16>(PhysicalAddress(structure.address)) = value;
*Memory::map_typed<u16>(PhysicalAddress(structure.address)) = value;
break;
case GenericAddressStructure::AccessSize::DWord:
*map_typed<u32>(PhysicalAddress(structure.address)) = value;
*Memory::map_typed<u32>(PhysicalAddress(structure.address)) = value;
break;
case GenericAddressStructure::AccessSize::QWord: {
*map_typed<u64>(PhysicalAddress(structure.address)) = value;
*Memory::map_typed<u64>(PhysicalAddress(structure.address)) = value;
break;
}
default:
@ -265,7 +265,7 @@ bool Parser::validate_reset_register()
{
// According to https://uefi.org/specs/ACPI/6.4/04_ACPI_Hardware_Specification/ACPI_Hardware_Specification.html#reset-register,
// the reset register can only be located in I/O bus, PCI bus or memory-mapped.
auto fadt = map_typed<Structures::FADT>(m_fadt);
auto fadt = Memory::map_typed<Structures::FADT>(m_fadt);
return (fadt->reset_reg.address_space == (u8)GenericAddressStructure::AddressSpace::PCIConfigurationSpace || fadt->reset_reg.address_space == (u8)GenericAddressStructure::AddressSpace::SystemMemory || fadt->reset_reg.address_space == (u8)GenericAddressStructure::AddressSpace::SystemIO);
}
@ -278,7 +278,7 @@ void Parser::try_acpi_reboot()
}
dbgln_if(ACPI_DEBUG, "ACPI: Rebooting, probing FADT ({})", m_fadt);
auto fadt = map_typed<Structures::FADT>(m_fadt);
auto fadt = Memory::map_typed<Structures::FADT>(m_fadt);
VERIFY(validate_reset_register());
access_generic_address(fadt->reset_reg, fadt->reset_value);
Processor::halt();
@ -293,14 +293,14 @@ size_t Parser::get_table_size(PhysicalAddress table_header)
{
InterruptDisabler disabler;
dbgln_if(ACPI_DEBUG, "ACPI: Checking SDT Length");
return map_typed<Structures::SDTHeader>(table_header)->length;
return Memory::map_typed<Structures::SDTHeader>(table_header)->length;
}
u8 Parser::get_table_revision(PhysicalAddress table_header)
{
InterruptDisabler disabler;
dbgln_if(ACPI_DEBUG, "ACPI: Checking SDT Revision");
return map_typed<Structures::SDTHeader>(table_header)->revision;
return Memory::map_typed<Structures::SDTHeader>(table_header)->revision;
}
UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
@ -310,7 +310,7 @@ UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
auto length = get_table_size(m_main_system_description_table);
auto revision = get_table_revision(m_main_system_description_table);
auto sdt = map_typed<Structures::SDTHeader>(m_main_system_description_table, length);
auto sdt = Memory::map_typed<Structures::SDTHeader>(m_main_system_description_table, length);
dmesgln("ACPI: Main Description Table valid? {}", validate_table(*sdt, length));
@ -337,7 +337,7 @@ UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
UNMAP_AFTER_INIT void Parser::locate_main_system_description_table()
{
auto rsdp = map_typed<Structures::RSDPDescriptor20>(m_rsdp);
auto rsdp = Memory::map_typed<Structures::RSDPDescriptor20>(m_rsdp);
if (rsdp->base.revision == 0) {
m_xsdt_supported = false;
} else if (rsdp->base.revision >= 2) {
@ -387,7 +387,7 @@ UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
VERIFY(signature.length() == 4);
auto rsdp = map_typed<Structures::RSDPDescriptor20>(rsdp_address);
auto rsdp = Memory::map_typed<Structures::RSDPDescriptor20>(rsdp_address);
if (rsdp->base.revision == 0)
return search_table_in_rsdt(PhysicalAddress(rsdp->base.rsdt_ptr), signature);
@ -405,7 +405,7 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsd
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
VERIFY(signature.length() == 4);
auto xsdt = map_typed<Structures::XSDT>(xsdt_address);
auto xsdt = Memory::map_typed<Structures::XSDT>(xsdt_address);
for (size_t i = 0; i < ((xsdt->h.length - sizeof(Structures::SDTHeader)) / sizeof(u64)); ++i) {
if (match_table_signature(PhysicalAddress((PhysicalPtr)xsdt->table_ptrs[i]), signature))
@ -419,7 +419,7 @@ static bool match_table_signature(PhysicalAddress table_header, const StringView
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
VERIFY(signature.length() == 4);
auto table = map_typed<Structures::RSDT>(table_header);
auto table = Memory::map_typed<Structures::RSDT>(table_header);
return !strncmp(table->h.sig, signature.characters_without_null_termination(), 4);
}
@ -428,7 +428,7 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsd
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
VERIFY(signature.length() == 4);
auto rsdt = map_typed<Structures::RSDT>(rsdt_address);
auto rsdt = Memory::map_typed<Structures::RSDT>(rsdt_address);
for (u32 i = 0; i < ((rsdt->h.length - sizeof(Structures::SDTHeader)) / sizeof(u32)); i++) {
if (match_table_signature(PhysicalAddress((PhysicalPtr)rsdt->table_ptrs[i]), signature))

View file

@ -53,7 +53,7 @@ UNMAP_AFTER_INIT DMIEntryPointExposedBlob::DMIEntryPointExposedBlob(PhysicalAddr
OwnPtr<KBuffer> DMIEntryPointExposedBlob::try_to_generate_buffer() const
{
auto dmi_blob = map_typed<u8>((m_dmi_entry_point), m_dmi_entry_point_length);
auto dmi_blob = Memory::map_typed<u8>((m_dmi_entry_point), m_dmi_entry_point_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_dmi_entry_point_length });
}
@ -71,14 +71,14 @@ UNMAP_AFTER_INIT SMBIOSExposedTable::SMBIOSExposedTable(PhysicalAddress smbios_s
OwnPtr<KBuffer> SMBIOSExposedTable::try_to_generate_buffer() const
{
auto dmi_blob = map_typed<u8>((m_smbios_structure_table), m_smbios_structure_table_length);
auto dmi_blob = Memory::map_typed<u8>((m_smbios_structure_table), m_smbios_structure_table_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_smbios_structure_table_length });
}
UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_64_bit_entry_initialization_values()
{
dbgln("BIOSSysFSDirectory: SMBIOS 64bit Entry point @ {}", m_dmi_entry_point);
auto smbios_entry = map_typed<SMBIOS::EntryPoint64bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
auto smbios_entry = Memory::map_typed<SMBIOS::EntryPoint64bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
m_smbios_structure_table = PhysicalAddress(smbios_entry.ptr()->table_ptr);
m_dmi_entry_point_length = smbios_entry.ptr()->length;
m_smbios_structure_table_length = smbios_entry.ptr()->table_maximum_size;
@ -87,7 +87,7 @@ UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_64_bit_entry_initialization_va
UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_32_bit_entry_initialization_values()
{
dbgln("BIOSSysFSDirectory: SMBIOS 32bit Entry point @ {}", m_dmi_entry_point);
auto smbios_entry = map_typed<SMBIOS::EntryPoint32bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
auto smbios_entry = Memory::map_typed<SMBIOS::EntryPoint32bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
m_smbios_structure_table = PhysicalAddress(smbios_entry.ptr()->legacy_structure.smbios_table_ptr);
m_dmi_entry_point_length = smbios_entry.ptr()->length;
m_smbios_structure_table_length = smbios_entry.ptr()->legacy_structure.smboios_table_length;
@ -130,7 +130,7 @@ UNMAP_AFTER_INIT void BIOSSysFSDirectory::initialize_dmi_exposer()
OwnPtr<KBuffer> BIOSSysFSDirectory::smbios_structure_table() const
{
auto dmi_blob = map_typed<u8>(m_smbios_structure_table, m_smbios_structure_table_length);
auto dmi_blob = Memory::map_typed<u8>(m_smbios_structure_table, m_smbios_structure_table_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_smbios_structure_table_length });
}
@ -160,26 +160,26 @@ UNMAP_AFTER_INIT Optional<PhysicalAddress> BIOSSysFSDirectory::find_dmi_entry32b
return map_bios().find_chunk_starting_with("_SM_", 16);
}
MappedROM map_bios()
Memory::MappedROM map_bios()
{
MappedROM mapping;
Memory::MappedROM mapping;
mapping.size = 128 * KiB;
mapping.paddr = PhysicalAddress(0xe0000);
mapping.region = MM.allocate_kernel_region(mapping.paddr, page_round_up(mapping.size), {}, Region::Access::Read);
mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size), {}, Memory::Region::Access::Read);
return mapping;
}
MappedROM map_ebda()
Memory::MappedROM map_ebda()
{
auto ebda_segment_ptr = map_typed<u16>(PhysicalAddress(0x40e));
auto ebda_length_ptr_b0 = map_typed<u8>(PhysicalAddress(0x413));
auto ebda_length_ptr_b1 = map_typed<u8>(PhysicalAddress(0x414));
auto ebda_segment_ptr = Memory::map_typed<u16>(PhysicalAddress(0x40e));
auto ebda_length_ptr_b0 = Memory::map_typed<u8>(PhysicalAddress(0x413));
auto ebda_length_ptr_b1 = Memory::map_typed<u8>(PhysicalAddress(0x414));
PhysicalAddress ebda_paddr(*ebda_segment_ptr << 4);
size_t ebda_size = (*ebda_length_ptr_b1 << 8) | *ebda_length_ptr_b0;
MappedROM mapping;
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), page_round_up(ebda_size), {}, Region::Access::Read);
Memory::MappedROM mapping;
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size), {}, Memory::Region::Access::Read);
mapping.offset = ebda_paddr.offset_in_page();
mapping.size = ebda_size;
mapping.paddr = ebda_paddr;

View file

@ -55,8 +55,8 @@ struct [[gnu::packed]] EntryPoint64bit {
namespace Kernel {
MappedROM map_bios();
MappedROM map_ebda();
Memory::MappedROM map_bios();
Memory::MappedROM map_ebda();
class BIOSSysFSComponent : public SysFSComponent {
public:

View file

@ -8,13 +8,11 @@
#include <AK/Badge.h>
#include <AK/Types.h>
#include <Kernel/Forward.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
class PageDirectory;
class PageTableEntry;
class PageDirectoryEntry {
public:
PhysicalPtr page_table_base() const { return PhysicalAddress::physical_page_base(m_raw); }
@ -28,7 +26,7 @@ public:
void clear() { m_raw = 0; }
u64 raw() const { return m_raw; }
void copy_from(Badge<PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
void copy_from(Badge<Memory::PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
enum Flags {
Present = 1 << 0,

View file

@ -57,7 +57,7 @@ struct ProcessorMessage {
ProcessorMessage* next; // only valid while in the pool
alignas(CallbackFunction) u8 callback_storage[sizeof(CallbackFunction)];
struct {
const PageDirectory* page_directory;
Memory::PageDirectory const* page_directory;
u8* ptr;
size_t page_count;
} flush_tlb;
@ -211,7 +211,7 @@ public:
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
@ -391,7 +391,7 @@ public:
bool smp_process_pending_messages();
static void smp_unicast(u32 cpu, Function<void()>, bool async);
static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count);
static void deferred_call_queue(Function<void()> callback);

View file

@ -469,7 +469,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (max_frames != 0 && count > max_frames)
break;
if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]) || !retaddr)
break;
stack_trace.append(retaddr);
@ -545,7 +545,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
ProcessPagingScope paging_scope(thread.process());
auto& regs = thread.regs();
FlatPtr* stack_top = reinterpret_cast<FlatPtr*>(regs.sp());
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (Memory::is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
} else {
@ -657,9 +657,9 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
}
}
void Processor::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
void Processor::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{
if (s_smp_enabled && (!is_user_address(vaddr) || Process::current()->thread_count() > 1))
if (s_smp_enabled && (!Memory::is_user_address(vaddr) || Process::current()->thread_count() > 1))
smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
else
flush_tlb_local(vaddr, page_count);
@ -818,9 +818,9 @@ bool Processor::smp_process_pending_messages()
msg->invoke_callback();
break;
case ProcessorMessage::FlushTlb:
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
if (Memory::is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
// We assume that we don't cross into kernel land!
VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
VERIFY(Memory::is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
// This processor isn't using this page directory right now, we can ignore this request
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
@ -949,7 +949,7 @@ void Processor::smp_unicast(u32 cpu, Function<void()> callback, bool async)
smp_unicast_message(cpu, msg, async);
}
void Processor::smp_broadcast_flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
void Processor::smp_broadcast_flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{
auto& msg = smp_get_from_pool();
msg.async = false;

View file

@ -57,7 +57,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
{
dmesgln("PCI: Using MMIO for PCI configuration space access");
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Region::Access::Read | Region::Access::Write);
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::Read | Memory::Region::Access::Write);
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size");
auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
u32 length = sdt->length;
@ -66,7 +66,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
checkup_region->unmap();
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Region::Access::Read | Region::Access::Write);
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::Read | Memory::Region::Access::Write);
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get()));
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
// PCI::PhysicalID objects to the vector, because get_capabilities calls
// PCI::read16 which will need this region to be mapped.
u8 start_bus = m_segments.get(0).value().get_start_bus();
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Region::Access::Read | Region::Access::Write);
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::Read | Memory::Region::Access::Write);
m_mapped_bus = start_bus;
dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
@ -102,7 +102,7 @@ void MMIOAccess::map_bus_region(u32 segment, u8 bus)
VERIFY(m_access_lock.is_locked());
if (m_mapped_bus == bus)
return;
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Region::Access::Read | Region::Access::Write);
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::Read | Memory::Region::Access::Write);
m_mapped_bus = bus;
dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
}

View file

@ -46,7 +46,7 @@ private:
VirtualAddress get_device_configuration_space(Address address);
SpinLock<u8> m_access_lock;
u8 m_mapped_bus { 0 };
OwnPtr<Region> m_mapped_region;
OwnPtr<Memory::Region> m_mapped_region;
protected:
explicit MMIOAccess(PhysicalAddress mcfg);

View file

@ -18,12 +18,12 @@ namespace PCI {
UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOAccess::MMIOSegment& mmio_segment)
: m_device_address(device_address)
, m_mapped_region(MM.allocate_kernel_region(page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Region::Access::Read | Region::Access::Write).release_nonnull())
, m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::Read | Memory::Region::Access::Write).release_nonnull())
{
PhysicalAddress segment_lower_addr = mmio_segment.get_paddr();
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(
PCI_MMIO_CONFIG_SPACE_SIZE * m_device_address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * m_device_address.device() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS) * (m_device_address.bus() - mmio_segment.get_start_bus()));
m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, MayReturnToFreeList::No);
m_mapped_region->physical_page_slot(0) = Memory::PhysicalPage::create(device_physical_mmio_space, Memory::MayReturnToFreeList::No);
m_mapped_region->remap();
}

View file

@ -30,7 +30,7 @@ public:
private:
Address m_device_address;
NonnullOwnPtr<Region> m_mapped_region;
NonnullOwnPtr<Memory::Region> m_mapped_region;
};
class WindowedMMIOAccess final : public MMIOAccess {

View file

@ -289,8 +289,8 @@ void UHCIController::reset()
}
// Let's allocate the physical page for the Frame List (which is 4KiB aligned)
auto framelist_vmobj = AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
m_framelist = MemoryManager::the().allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Region::Access::Write);
auto framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
m_framelist = MM.allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write);
dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr());
dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr());
write_sofmod(64); // 1mS frame time
@ -311,8 +311,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
{
// Let's allocate memory for both the QH and TD pools
// First the QH pool and all of the Interrupt QH's
auto qh_pool_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_qh_pool = MemoryManager::the().allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Region::Access::Write);
auto qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_qh_pool = MM.allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write);
memset(m_qh_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE); // Zero out both pages
// Let's populate our free qh list (so we have some we can allocate later on)
@ -331,8 +331,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
m_dummy_qh = allocate_queue_head();
// Now the Transfer Descriptor pool
auto td_pool_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_td_pool = MemoryManager::the().allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Region::Access::Write);
auto td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_td_pool = MM.allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write);
memset(m_td_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE);
// Set up the Isochronous Transfer Descriptor list

View file

@ -90,9 +90,9 @@ private:
QueueHead* m_bulk_qh;
QueueHead* m_dummy_qh; // Needed for PIIX4 hack
OwnPtr<Region> m_framelist;
OwnPtr<Region> m_qh_pool;
OwnPtr<Region> m_td_pool;
OwnPtr<Memory::Region> m_framelist;
OwnPtr<Memory::Region> m_qh_pool;
OwnPtr<Memory::Region> m_td_pool;
Array<RefPtr<USB::Device>, 2> m_devices; // Devices connected to the root ports (of which there are two)
};

View file

@ -11,20 +11,20 @@ namespace Kernel::USB {
RefPtr<Transfer> Transfer::try_create(Pipe& pipe, u16 len)
{
auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
auto vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
if (!vmobject)
return nullptr;
return AK::try_create<Transfer>(pipe, len, *vmobject);
}
Transfer::Transfer(Pipe& pipe, u16 len, AnonymousVMObject& vmobject)
Transfer::Transfer(Pipe& pipe, u16 len, Memory::AnonymousVMObject& vmobject)
: m_pipe(pipe)
, m_transfer_data_size(len)
{
// Initialize data buffer for transfer
// This will definitely need to be refactored in the future, I doubt this will scale well...
m_data_buffer = MemoryManager::the().allocate_kernel_region_with_vmobject(vmobject, PAGE_SIZE, "USB Transfer Buffer", Region::Access::Read | Region::Access::Write);
m_data_buffer = MM.allocate_kernel_region_with_vmobject(vmobject, PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::Read | Memory::Region::Access::Write);
}
Transfer::~Transfer()

View file

@ -23,7 +23,7 @@ public:
public:
Transfer() = delete;
Transfer(Pipe& pipe, u16 len, AnonymousVMObject&);
Transfer(Pipe& pipe, u16 len, Memory::AnonymousVMObject&);
~Transfer();
void set_setup_packet(const USBRequestData& request);
@ -41,11 +41,11 @@ public:
bool error_occurred() const { return m_error_occurred; }
private:
Pipe& m_pipe; // Pipe that initiated this transfer
USBRequestData m_request; // USB request
OwnPtr<Region> m_data_buffer; // DMA Data buffer for transaction
u16 m_transfer_data_size { 0 }; // Size of the transfer's data stage
bool m_complete { false }; // Has this transfer been completed?
bool m_error_occurred { false }; // Did an error occur during this transfer?
Pipe& m_pipe; // Pipe that initiated this transfer
USBRequestData m_request; // USB request
OwnPtr<Memory::Region> m_data_buffer; // DMA Data buffer for transaction
u16 m_transfer_data_size { 0 }; // Size of the transfer's data stage
bool m_complete { false }; // Has this transfer been completed?
bool m_error_occurred { false }; // Did an error occur during this transfer?
};
}

View file

@ -129,7 +129,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> a
return return_value;
}
KResultOr<Region*> KCOVDevice::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> KCOVDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
{
auto pid = process.pid();
auto maybe_kcov_instance = proc_instance->get(pid);

View file

@ -22,7 +22,7 @@ public:
static void free_process();
// ^File
KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
KResultOr<NonnullRefPtr<FileDescription>> open(int options) override;
// ^Device

View file

@ -22,20 +22,20 @@ KResult KCOVInstance::buffer_allocate(size_t buffer_size_in_entries)
// first entry contains index of last PC
this->m_buffer_size_in_entries = buffer_size_in_entries - 1;
this->m_buffer_size_in_bytes = page_round_up(buffer_size_in_entries * KCOV_ENTRY_SIZE);
this->m_buffer_size_in_bytes = Memory::page_round_up(buffer_size_in_entries * KCOV_ENTRY_SIZE);
// one single vmobject is representing the buffer
// - we allocate one kernel region using that vmobject
// - when an mmap call comes in, we allocate another userspace region,
// backed by the same vmobject
this->vmobject = AnonymousVMObject::try_create_with_size(
this->vmobject = Memory::AnonymousVMObject::try_create_with_size(
this->m_buffer_size_in_bytes, AllocationStrategy::AllocateNow);
if (!this->vmobject)
return ENOMEM;
this->m_kernel_region = MM.allocate_kernel_region_with_vmobject(
*this->vmobject, this->m_buffer_size_in_bytes, String::formatted("kcov_{}", this->m_pid),
Region::Access::Read | Region::Access::Write);
Memory::Region::Access::Read | Memory::Region::Access::Write);
if (!this->m_kernel_region)
return ENOMEM;

View file

@ -42,7 +42,7 @@ public:
TRACING = 2,
} state;
RefPtr<AnonymousVMObject> vmobject;
RefPtr<Memory::AnonymousVMObject> vmobject;
private:
ProcessID m_pid = { 0 };
@ -51,7 +51,7 @@ private:
kcov_pc_t* m_buffer = { nullptr };
// Here to ensure it's not garbage collected at the end of open()
OwnPtr<Region> m_kernel_region;
OwnPtr<Memory::Region> m_kernel_region;
};
}

View file

@ -37,7 +37,7 @@ void MemoryDevice::did_seek(FileDescription&, off_t)
TODO();
}
KResultOr<Region*> MemoryDevice::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
{
auto viewed_address = PhysicalAddress(offset);
@ -47,7 +47,7 @@ KResultOr<Region*> MemoryDevice::mmap(Process& process, FileDescription&, const
return EINVAL;
}
auto vmobject = AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size());
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size());
if (!vmobject)
return ENOMEM;
dbgln("MemoryDevice: Mapped physical memory at {} for range of {} bytes", viewed_address, range.size());

View file

@ -19,7 +19,7 @@ public:
static NonnullRefPtr<MemoryDevice> must_create();
~MemoryDevice();
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
// ^Device
virtual mode_t required_mode() const override { return 0660; }
@ -36,7 +36,7 @@ private:
virtual void did_seek(FileDescription&, off_t) override;
bool is_allowed_range(PhysicalAddress, const Range&) const;
bool is_allowed_range(PhysicalAddress, Memory::Range const&) const;
};
}

View file

@ -238,10 +238,10 @@ KResultOr<size_t> SB16::write(FileDescription&, u64, const UserOrKernelBuffer& d
if (!page)
return ENOMEM;
auto nonnull_page = page.release_nonnull();
auto vmobject = AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 });
auto vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 });
if (!vmobject)
return ENOMEM;
m_dma_region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_SIZE, "SB16 DMA buffer", Region::Access::Write);
m_dma_region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write);
if (!m_dma_region)
return ENOMEM;
}

View file

@ -55,7 +55,7 @@ private:
void set_irq_register(u8 irq_number);
void set_irq_line(u8 irq_number);
OwnPtr<Region> m_dma_region;
OwnPtr<Memory::Region> m_dma_region;
int m_major_version { 0 };
WaitQueue m_irq_queue;

View file

@ -19,7 +19,7 @@ inline void DoubleBuffer::compute_lockfree_metadata()
OwnPtr<DoubleBuffer> DoubleBuffer::try_create(size_t capacity)
{
auto storage = KBuffer::try_create_with_size(capacity * 2, Region::Access::Read | Region::Access::Write, "DoubleBuffer");
auto storage = KBuffer::try_create_with_size(capacity * 2, Memory::Region::Access::Read | Memory::Region::Access::Write, "DoubleBuffer");
if (!storage)
return {};

View file

@ -10,7 +10,7 @@
namespace Kernel {
AnonymousFile::AnonymousFile(NonnullRefPtr<AnonymousVMObject> vmobject)
AnonymousFile::AnonymousFile(NonnullRefPtr<Memory::AnonymousVMObject> vmobject)
: m_vmobject(move(vmobject))
{
}
@ -19,7 +19,7 @@ AnonymousFile::~AnonymousFile()
{
}
KResultOr<Region*> AnonymousFile::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> AnonymousFile::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
{
if (offset != 0)
return EINVAL;

View file

@ -13,14 +13,14 @@ namespace Kernel {
class AnonymousFile final : public File {
public:
static RefPtr<AnonymousFile> create(NonnullRefPtr<AnonymousVMObject> vmobject)
static RefPtr<AnonymousFile> create(NonnullRefPtr<Memory::AnonymousVMObject> vmobject)
{
return adopt_ref_if_nonnull(new (nothrow) AnonymousFile(move(vmobject)));
}
virtual ~AnonymousFile() override;
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
private:
virtual StringView class_name() const override { return "AnonymousFile"; }
@ -30,9 +30,9 @@ private:
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return ENOTSUP; }
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override { return ENOTSUP; }
explicit AnonymousFile(NonnullRefPtr<AnonymousVMObject>);
explicit AnonymousFile(NonnullRefPtr<Memory::AnonymousVMObject>);
NonnullRefPtr<AnonymousVMObject> m_vmobject;
NonnullRefPtr<Memory::AnonymousVMObject> m_vmobject;
};
}

View file

@ -132,7 +132,7 @@ bool Ext2FS::initialize()
auto blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
m_cached_group_descriptor_table = KBuffer::try_create_with_size(block_size() * blocks_to_read, Region::Access::Read | Region::Access::Write, "Ext2FS: Block group descriptors");
m_cached_group_descriptor_table = KBuffer::try_create_with_size(block_size() * blocks_to_read, Memory::Region::Access::Read | Memory::Region::Access::Write, "Ext2FS: Block group descriptors");
if (!m_cached_group_descriptor_table) {
dbgln("Ext2FS: Failed to allocate memory for group descriptor table");
return false;
@ -1505,7 +1505,7 @@ KResultOr<Ext2FS::CachedBitmap*> Ext2FS::get_bitmap_block(BlockIndex bitmap_bloc
return cached_bitmap;
}
auto block = KBuffer::try_create_with_size(block_size(), Region::Access::Read | Region::Access::Write, "Ext2FS: Cached bitmap block");
auto block = KBuffer::try_create_with_size(block_size(), Memory::Region::Access::Read | Memory::Region::Access::Write, "Ext2FS: Cached bitmap block");
if (!block)
return ENOMEM;
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block->data());

View file

@ -40,7 +40,7 @@ KResult File::ioctl(FileDescription&, unsigned, Userspace<void*>)
return ENOTTY;
}
KResultOr<Region*> File::mmap(Process&, FileDescription&, const Range&, u64, int, bool)
KResultOr<Memory::Region*> File::mmap(Process&, FileDescription&, Memory::Range const&, u64, int, bool)
{
return ENODEV;
}

View file

@ -88,7 +88,7 @@ public:
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) = 0;
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) = 0;
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg);
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared);
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared);
virtual KResult stat(::stat&) const { return EBADF; }
virtual String absolute_path(const FileDescription&) const = 0;

View file

@ -380,7 +380,7 @@ InodeMetadata FileDescription::metadata() const
return {};
}
KResultOr<Region*> FileDescription::mmap(Process& process, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> FileDescription::mmap(Process& process, Memory::Range const& range, u64 offset, int prot, bool shared)
{
MutexLocker locker(m_lock);
return m_file->mmap(process, *this, range, offset, prot, shared);

View file

@ -96,7 +96,7 @@ public:
Custody* custody() { return m_custody.ptr(); }
const Custody* custody() const { return m_custody.ptr(); }
KResultOr<Region*> mmap(Process&, const Range&, u64 offset, int prot, bool shared);
KResultOr<Memory::Region*> mmap(Process&, Memory::Range const&, u64 offset, int prot, bool shared);
bool is_blocking() const { return m_is_blocking; }
void set_blocking(bool b) { m_is_blocking = b; }

View file

@ -141,7 +141,7 @@ KResult Inode::decrement_link_count()
return ENOTIMPL;
}
void Inode::set_shared_vmobject(SharedInodeVMObject& vmobject)
void Inode::set_shared_vmobject(Memory::SharedInodeVMObject& vmobject)
{
MutexLocker locker(m_inode_lock);
m_shared_vmobject = vmobject;
@ -271,7 +271,7 @@ KResult Inode::prepare_to_write_data()
return KSuccess;
}
RefPtr<SharedInodeVMObject> Inode::shared_vmobject() const
RefPtr<Memory::SharedInodeVMObject> Inode::shared_vmobject() const
{
MutexLocker locker(m_inode_lock);
return m_shared_vmobject.strong_ref();

View file

@ -84,8 +84,8 @@ public:
void will_be_destroyed();
void set_shared_vmobject(SharedInodeVMObject&);
RefPtr<SharedInodeVMObject> shared_vmobject() const;
void set_shared_vmobject(Memory::SharedInodeVMObject&);
RefPtr<Memory::SharedInodeVMObject> shared_vmobject() const;
static void sync();
@ -116,7 +116,7 @@ protected:
private:
FileSystem& m_file_system;
InodeIndex m_index { 0 };
WeakPtr<SharedInodeVMObject> m_shared_vmobject;
WeakPtr<Memory::SharedInodeVMObject> m_shared_vmobject;
RefPtr<LocalSocket> m_socket;
HashTable<InodeWatcher*> m_watchers;
bool m_metadata_dirty { false };

View file

@ -93,14 +93,14 @@ KResult InodeFile::ioctl(FileDescription& description, unsigned request, Userspa
}
}
KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& description, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> InodeFile::mmap(Process& process, FileDescription& description, Memory::Range const& range, u64 offset, int prot, bool shared)
{
// FIXME: If PROT_EXEC, check that the underlying file system isn't mounted noexec.
RefPtr<InodeVMObject> vmobject;
RefPtr<Memory::InodeVMObject> vmobject;
if (shared)
vmobject = SharedInodeVMObject::try_create_with_inode(inode());
vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode());
else
vmobject = PrivateInodeVMObject::try_create_with_inode(inode());
vmobject = Memory::PrivateInodeVMObject::try_create_with_inode(inode());
if (!vmobject)
return ENOMEM;
return process.space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), offset, description.absolute_path(), prot, shared);

View file

@ -33,7 +33,7 @@ public:
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override;
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override;
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
virtual KResult stat(::stat& buffer) const override { return inode().metadata().stat(buffer); }
virtual String absolute_path(const FileDescription&) const override;

View file

@ -560,7 +560,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
if (result.is_error())
return result;
auto buffer = KBuffer::try_create_with_size(header.size, Region::Access::Read | Region::Access::Write);
auto buffer = KBuffer::try_create_with_size(header.size, Memory::Region::Access::Read | Memory::Region::Access::Write);
if (!buffer)
return ENOMEM;
// Copy the already read header into the buffer.

View file

@ -31,15 +31,10 @@ class InodeWatcher;
class KBuffer;
class KResult;
class LocalSocket;
class MemoryManager;
class Mutex;
class MappedROM;
class MasterPTY;
class Mount;
class PageDirectory;
class PerformanceEventBuffer;
class PhysicalPage;
class PhysicalRegion;
class ProcFS;
class ProcFSDirectoryInode;
class ProcFSExposedComponent;
@ -51,15 +46,10 @@ class ProcFSSystemBoolean;
class ProcFSSystemDirectory;
class Process;
class ProcessGroup;
class Range;
class RangeAllocator;
class RecursiveSpinLock;
class Region;
class Scheduler;
class SchedulerData;
class SharedInodeVMObject;
class Socket;
class Space;
class SysFS;
class SysFSDirectory;
class SysFSBusDirectory;
@ -71,11 +61,27 @@ class Thread;
class ThreadTracer;
class UDPSocket;
class UserOrKernelBuffer;
class VMObject;
class VirtualFileSystem;
class WaitQueue;
class WorkQueue;
namespace Memory {
class AnonymousVMObject;
class InodeVMObject;
class MappedROM;
class MemoryManager;
class PageDirectory;
class PhysicalPage;
class PhysicalRegion;
class PrivateInodeVMObject;
class Range;
class RangeAllocator;
class Region;
class SharedInodeVMObject;
class Space;
class VMObject;
}
template<typename BaseType>
class SpinLock;
template<typename LockType>

View file

@ -16,9 +16,9 @@ namespace Kernel {
class FutexQueue : public Thread::BlockCondition
, public RefCounted<FutexQueue>
, public VMObjectDeletedHandler {
, public Memory::VMObjectDeletedHandler {
public:
FutexQueue(FlatPtr user_address_or_offset, VMObject* vmobject = nullptr);
FutexQueue(FlatPtr user_address_or_offset, Memory::VMObject* vmobject = nullptr);
virtual ~FutexQueue();
u32 wake_n_requeue(u32, const Function<FutexQueue*()>&, u32, bool&, bool&);
@ -31,7 +31,7 @@ public:
return Thread::current()->block<Thread::FutexBlocker>(timeout, *this, forward<Args>(args)...);
}
virtual void vmobject_deleted(VMObject&) override;
virtual void vmobject_deleted(Memory::VMObject&) override;
bool queue_imminent_wait();
void did_remove();
@ -51,7 +51,7 @@ private:
// For private futexes we just use the user space address.
// But for global futexes we use the offset into the VMObject
const FlatPtr m_user_address_or_offset;
WeakPtr<VMObject> m_vmobject;
WeakPtr<Memory::VMObject> m_vmobject;
const bool m_is_global;
size_t m_imminent_waits { 1 }; // We only create this object if we're going to be waiting, so start out with 1
bool m_was_removed { false };

View file

@ -376,7 +376,7 @@ private:
kmalloc_stats stats;
get_kmalloc_stats(stats);
auto system_memory = MemoryManager::the().get_system_memory_info();
auto system_memory = MM.get_system_memory_info();
JsonObjectSerializer<KBufferBuilder> json { builder };
json.add("kmalloc_allocated", stats.bytes_allocated);

View file

@ -65,7 +65,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<BochsGraphicsAdapter> BochsGraphicsAdapter::initi
UNMAP_AFTER_INIT BochsGraphicsAdapter::BochsGraphicsAdapter(PCI::Address pci_address)
: PCI::DeviceController(pci_address)
, m_mmio_registers(PCI::get_BAR2(pci_address) & 0xfffffff0)
, m_registers(map_typed_writable<BochsDisplayMMIORegisters volatile>(m_mmio_registers))
, m_registers(Memory::map_typed_writable<BochsDisplayMMIORegisters volatile>(m_mmio_registers))
{
// We assume safe resolutio is 1024x768x32
m_framebuffer_console = Graphics::ContiguousFramebufferConsole::initialize(PhysicalAddress(PCI::get_BAR0(pci_address) & 0xfffffff0), 1024, 768, 1024 * sizeof(u32));

View file

@ -57,7 +57,7 @@ private:
void set_y_offset(size_t);
PhysicalAddress m_mmio_registers;
TypedMapping<BochsDisplayMMIORegisters volatile> m_registers;
Memory::TypedMapping<BochsDisplayMMIORegisters volatile> m_registers;
RefPtr<FramebufferDevice> m_framebuffer_device;
RefPtr<Graphics::GenericFramebufferConsole> m_framebuffer_console;
SpinLock<u8> m_console_mode_switch_lock;

View file

@ -27,8 +27,8 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s
m_height = height;
m_pitch = pitch;
dbgln("Framebuffer Console: taking {} bytes", page_round_up(pitch * height));
m_framebuffer_region = MM.allocate_kernel_region(m_framebuffer_address, page_round_up(pitch * height), "Framebuffer Console", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
dbgln("Framebuffer Console: taking {} bytes", Memory::page_round_up(pitch * height));
m_framebuffer_region = MM.allocate_kernel_region(m_framebuffer_address, Memory::page_round_up(pitch * height), "Framebuffer Console", Memory::Region::Access::Read | Memory::Region::Access::Write, Memory::Region::Cacheable::Yes);
VERIFY(m_framebuffer_region);
// Just to start cleanly, we clean the entire framebuffer

View file

@ -22,7 +22,7 @@ private:
{
return m_framebuffer_region->vaddr().as_ptr();
}
OwnPtr<Region> m_framebuffer_region;
OwnPtr<Memory::Region> m_framebuffer_region;
ContiguousFramebufferConsole(PhysicalAddress, size_t width, size_t height, size_t pitch);
PhysicalAddress m_framebuffer_address;
};

View file

@ -11,7 +11,7 @@ namespace Kernel::Graphics {
UNMAP_AFTER_INIT VGAConsole::VGAConsole(const VGACompatibleAdapter& adapter, Mode mode, size_t width, size_t height)
: Console(width, height)
, m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), page_round_up(0xc0000 - 0xa0000), "VGA Display", Region::Access::Read | Region::Access::Write).release_nonnull())
, m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), Memory::page_round_up(0xc0000 - 0xa0000), "VGA Display", Memory::Region::Access::Read | Memory::Region::Access::Write).release_nonnull())
, m_adapter(adapter)
, m_mode(mode)
{

View file

@ -33,7 +33,7 @@ public:
protected:
VGAConsole(const VGACompatibleAdapter&, Mode, size_t width, size_t height);
NonnullOwnPtr<Region> m_vga_region;
NonnullOwnPtr<Memory::Region> m_vga_region;
NonnullRefPtr<VGACompatibleAdapter> m_adapter;
const Mode m_mode;
};

View file

@ -25,7 +25,7 @@ NonnullRefPtr<FramebufferDevice> FramebufferDevice::create(const GraphicsDevice&
return adopt_ref(*new FramebufferDevice(adapter, output_port_index, paddr, width, height, pitch));
}
KResultOr<Region*> FramebufferDevice::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
{
ScopedSpinLock lock(m_activation_lock);
REQUIRE_PROMISE(video);
@ -33,31 +33,31 @@ KResultOr<Region*> FramebufferDevice::mmap(Process& process, FileDescription&, c
return ENODEV;
if (offset != 0)
return ENXIO;
if (range.size() != page_round_up(framebuffer_size_in_bytes()))
if (range.size() != Memory::page_round_up(framebuffer_size_in_bytes()))
return EOVERFLOW;
auto vmobject = AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, page_round_up(framebuffer_size_in_bytes()));
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (!vmobject)
return ENOMEM;
m_userspace_real_framebuffer_vmobject = vmobject;
m_real_framebuffer_vmobject = AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, page_round_up(framebuffer_size_in_bytes()));
m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (!m_real_framebuffer_vmobject)
return ENOMEM;
m_swapped_framebuffer_vmobject = AnonymousVMObject::try_create_with_size(page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
if (!m_swapped_framebuffer_vmobject)
return ENOMEM;
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Region::Access::Read | Region::Access::Write);
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::Read | Memory::Region::Access::Write);
if (!m_real_framebuffer_region)
return ENOMEM;
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Region::Access::Read | Region::Access::Write);
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::Read | Memory::Region::Access::Write);
if (!m_swapped_framebuffer_region)
return ENOMEM;
RefPtr<VMObject> chosen_vmobject;
RefPtr<Memory::VMObject> chosen_vmobject;
if (m_graphical_writes_enabled) {
chosen_vmobject = m_real_framebuffer_vmobject;
} else {
@ -81,7 +81,7 @@ void FramebufferDevice::deactivate_writes()
ScopedSpinLock lock(m_activation_lock);
if (!m_userspace_framebuffer_region)
return;
memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), page_round_up(framebuffer_size_in_bytes()));
memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes()));
auto vmobject = m_swapped_framebuffer_vmobject;
m_userspace_framebuffer_region->set_vmobject(vmobject.release_nonnull());
m_userspace_framebuffer_region->remap();
@ -95,7 +95,7 @@ void FramebufferDevice::activate_writes()
// restore the image we had in the void area
// FIXME: if we happen to have multiple Framebuffers that are writing to that location
// we will experience glitches...
memcpy(m_real_framebuffer_region->vaddr().as_ptr(), m_swapped_framebuffer_region->vaddr().as_ptr(), page_round_up(framebuffer_size_in_bytes()));
memcpy(m_real_framebuffer_region->vaddr().as_ptr(), m_swapped_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes()));
auto vmobject = m_userspace_real_framebuffer_vmobject;
m_userspace_framebuffer_region->set_vmobject(vmobject.release_nonnull());
m_userspace_framebuffer_region->remap();
@ -109,13 +109,13 @@ String FramebufferDevice::device_name() const
UNMAP_AFTER_INIT void FramebufferDevice::initialize()
{
m_real_framebuffer_vmobject = AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, page_round_up(framebuffer_size_in_bytes()));
m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
VERIFY(m_real_framebuffer_vmobject);
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Region::Access::Read | Region::Access::Write);
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::Read | Memory::Region::Access::Write);
VERIFY(m_real_framebuffer_region);
m_swapped_framebuffer_vmobject = AnonymousVMObject::try_create_with_size(page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
VERIFY(m_swapped_framebuffer_vmobject);
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Region::Access::Read | Region::Access::Write);
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::Read | Memory::Region::Access::Write);
VERIFY(m_swapped_framebuffer_region);
}

View file

@ -23,7 +23,7 @@ public:
static NonnullRefPtr<FramebufferDevice> create(const GraphicsDevice&, size_t, PhysicalAddress, size_t, size_t, size_t);
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
// ^Device
virtual mode_t required_mode() const override { return 0660; }
@ -55,15 +55,15 @@ private:
SpinLock<u8> m_activation_lock;
RefPtr<AnonymousVMObject> m_real_framebuffer_vmobject;
RefPtr<AnonymousVMObject> m_swapped_framebuffer_vmobject;
OwnPtr<Region> m_real_framebuffer_region;
OwnPtr<Region> m_swapped_framebuffer_region;
RefPtr<Memory::AnonymousVMObject> m_real_framebuffer_vmobject;
RefPtr<Memory::AnonymousVMObject> m_swapped_framebuffer_vmobject;
OwnPtr<Memory::Region> m_real_framebuffer_region;
OwnPtr<Memory::Region> m_swapped_framebuffer_region;
bool m_graphical_writes_enabled { true };
RefPtr<AnonymousVMObject> m_userspace_real_framebuffer_vmobject;
Region* m_userspace_framebuffer_region { nullptr };
RefPtr<Memory::AnonymousVMObject> m_userspace_real_framebuffer_vmobject;
Memory::Region* m_userspace_framebuffer_region { nullptr };
size_t m_y_offset { 0 };
size_t m_output_port_index;

View file

@ -32,7 +32,7 @@ bool GraphicsManagement::is_initialized()
}
UNMAP_AFTER_INIT GraphicsManagement::GraphicsManagement()
: m_vga_font_region(MM.allocate_kernel_region(PAGE_SIZE, "VGA font", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow).release_nonnull())
: m_vga_font_region(MM.allocate_kernel_region(PAGE_SIZE, "VGA font", Memory::Region::Access::Read | Memory::Region::Access::Write, AllocationStrategy::AllocateNow).release_nonnull())
, m_framebuffer_devices_allowed(!kernel_command_line().is_no_framebuffer_devices_mode())
{
}

View file

@ -49,7 +49,7 @@ public:
private:
bool determine_and_initialize_graphics_device(const PCI::Address& address, PCI::ID id);
NonnullRefPtrVector<GraphicsDevice> m_graphics_devices;
NonnullOwnPtr<Region> m_vga_font_region;
NonnullOwnPtr<Memory::Region> m_vga_font_region;
RefPtr<Graphics::Console> m_console;
// Note: there could be multiple VGA adapters, but only one can operate in VGA mode

View file

@ -189,7 +189,7 @@ IntelNativeGraphicsAdapter::IntelNativeGraphicsAdapter(PCI::Address address)
VERIFY(bar0_space_size == 0x80000);
dmesgln("Intel Native Graphics Adapter @ {}, MMIO @ {}, space size is {:x} bytes", address, PhysicalAddress(PCI::get_BAR0(address)), bar0_space_size);
dmesgln("Intel Native Graphics Adapter @ {}, framebuffer @ {}", address, PhysicalAddress(PCI::get_BAR2(address)));
m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Region::Access::Read | Region::Access::Write);
m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::Read | Memory::Region::Access::Write);
PCI::enable_bus_mastering(address);
{
ScopedSpinLock control_lock(m_control_lock);

View file

@ -168,7 +168,7 @@ private:
Graphics::VideoInfoBlock m_crt_edid;
const PhysicalAddress m_registers;
const PhysicalAddress m_framebuffer_addr;
OwnPtr<Region> m_registers_region;
OwnPtr<Memory::Region> m_registers_region;
};
}

View file

@ -32,15 +32,15 @@ void FrameBufferDevice::create_framebuffer()
// Allocate frame buffer for both front and back
auto& info = display_info();
m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height);
m_framebuffer = MM.allocate_kernel_region(m_buffer_size * 2, String::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()), Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
auto write_sink_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No).release_nonnull();
m_framebuffer = MM.allocate_kernel_region(m_buffer_size * 2, String::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()), Memory::Region::Access::Read | Memory::Region::Access::Write, AllocationStrategy::AllocateNow);
auto write_sink_page = MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No).release_nonnull();
auto num_needed_pages = m_framebuffer->vmobject().page_count();
NonnullRefPtrVector<PhysicalPage> pages;
NonnullRefPtrVector<Memory::PhysicalPage> pages;
for (auto i = 0u; i < num_needed_pages; ++i) {
pages.append(write_sink_page);
}
m_framebuffer_sink_vmobject = AnonymousVMObject::try_create_with_physical_pages(pages.span());
m_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span());
MutexLocker locker(m_gpu.operation_lock());
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
@ -241,7 +241,7 @@ KResult FrameBufferDevice::ioctl(FileDescription&, unsigned request, Userspace<v
};
}
KResultOr<Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared)
KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared)
{
REQUIRE_PROMISE(video);
if (!shared)

View file

@ -39,7 +39,7 @@ public:
static size_t calculate_framebuffer_size(size_t width, size_t height)
{
// VirtIO resources can only map on page boundaries!
return page_round_up(sizeof(u32) * width * height);
return Memory::page_round_up(sizeof(u32) * width * height);
}
void flush_dirty_window(Protocol::Rect const&, Buffer&);
@ -61,7 +61,7 @@ private:
void set_buffer(int);
virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override;
virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override;
virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override;
virtual bool can_read(const FileDescription&, size_t) const override { return true; }
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return EINVAL; }
virtual bool can_write(const FileDescription&, size_t) const override { return true; }
@ -88,12 +88,12 @@ private:
Atomic<int, AK::memory_order_relaxed> m_last_set_buffer_index { 0 };
Buffer m_main_buffer;
Buffer m_back_buffer;
OwnPtr<Region> m_framebuffer;
RefPtr<VMObject> m_framebuffer_sink_vmobject;
OwnPtr<Memory::Region> m_framebuffer;
RefPtr<Memory::VMObject> m_framebuffer_sink_vmobject;
size_t m_buffer_size { 0 };
bool m_are_writes_active { true };
// FIXME: This needs to be cleaned up if the WindowServer exits while we are in a tty
WeakPtr<Region> m_userspace_mmap_region;
WeakPtr<Memory::Region> m_userspace_mmap_region;
};
}

View file

@ -17,7 +17,7 @@ namespace Kernel::Graphics::VirtIOGPU {
GPU::GPU(PCI::Address address)
: VirtIODevice(address, "GPU")
, m_scratch_space(MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Region::Access::Read | Region::Access::Write))
, m_scratch_space(MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Memory::Region::Access::Read | Memory::Region::Access::Write))
{
VERIFY(!!m_scratch_space);
if (auto cfg = get_config(ConfigurationType::Device)) {
@ -138,7 +138,7 @@ ResourceID GPU::create_2d_resource(Protocol::Rect rect)
return resource_id;
}
void GPU::ensure_backing_storage(Region const& region, size_t buffer_offset, size_t buffer_length, ResourceID resource_id)
void GPU::ensure_backing_storage(Memory::Region const& region, size_t buffer_offset, size_t buffer_length, ResourceID resource_id)
{
VERIFY(m_operation_lock.is_locked());

View file

@ -102,7 +102,7 @@ private:
void query_display_information();
ResourceID create_2d_resource(Protocol::Rect rect);
void delete_resource(ResourceID resource_id);
void ensure_backing_storage(Region const& region, size_t buffer_offset, size_t buffer_length, ResourceID resource_id);
void ensure_backing_storage(Memory::Region const& region, size_t buffer_offset, size_t buffer_length, ResourceID resource_id);
void detach_backing_storage(ResourceID resource_id);
void set_scanout_resource(ScanoutID scanout, ResourceID resource_id, Protocol::Rect rect);
void transfer_framebuffer_data_to_host(ScanoutID scanout, Protocol::Rect const& rect, ResourceID resource_id);
@ -118,7 +118,7 @@ private:
// Synchronous commands
WaitQueue m_outstanding_request;
Mutex m_operation_lock;
OwnPtr<Region> m_scratch_space;
OwnPtr<Memory::Region> m_scratch_space;
};
}

View file

@ -112,7 +112,7 @@ static SlabAllocator<128> s_slab_allocator_128;
static SlabAllocator<256> s_slab_allocator_256;
#if ARCH(I386)
static_assert(sizeof(Region) <= s_slab_allocator_128.slab_size());
static_assert(sizeof(Memory::Region) <= s_slab_allocator_128.slab_size());
#endif
template<typename Callback>

View file

@ -47,7 +47,7 @@ struct KmallocGlobalHeap {
bool m_adding { false };
bool add_memory(size_t allocation_request)
{
if (!MemoryManager::is_initialized()) {
if (!Memory::MemoryManager::is_initialized()) {
if constexpr (KMALLOC_DEBUG) {
dmesgln("kmalloc: Cannot expand heap before MM is initialized!");
}
@ -94,12 +94,12 @@ struct KmallocGlobalHeap {
// was big enough to likely satisfy the request
if (subheap.free_bytes() < allocation_request) {
// Looks like we probably need more
size_t memory_size = page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request));
size_t memory_size = Memory::page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request));
// Add some more to the new heap. We're already using it for other
// allocations not including the original allocation_request
// that triggered heap expansion. If we don't allocate
memory_size += 1 * MiB;
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Memory::Region::Access::Read | Memory::Region::Access::Write, AllocationStrategy::AllocateNow);
if (region) {
dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
@ -162,8 +162,8 @@ struct KmallocGlobalHeap {
typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType;
HeapType m_heap;
NonnullOwnPtrVector<Region> m_subheap_memory;
OwnPtr<Region> m_backup_memory;
NonnullOwnPtrVector<Memory::Region> m_subheap_memory;
OwnPtr<Memory::Region> m_backup_memory;
KmallocGlobalHeap(u8* memory, size_t memory_size)
: m_heap(memory, memory_size, ExpandGlobalHeap(*this))
@ -173,7 +173,7 @@ struct KmallocGlobalHeap {
{
if (m_backup_memory)
return;
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Memory::Region::Access::Read | Memory::Region::Access::Write, AllocationStrategy::AllocateNow);
}
size_t backup_memory_bytes() const

View file

@ -228,7 +228,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
dbgln_if(APIC_DEBUG, "Initializing APIC, base: {}", apic_base);
set_base(apic_base);
m_apic_base = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Region::Access::Read | Region::Access::Write);
m_apic_base = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::Read | Memory::Region::Access::Write);
if (!m_apic_base) {
dbgln("APIC: Failed to allocate memory for APIC base");
return false;
@ -245,7 +245,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
return false;
}
auto madt = map_typed<ACPI::Structures::MADT>(madt_address);
auto madt = Memory::map_typed<ACPI::Structures::MADT>(madt_address);
size_t entry_index = 0;
size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
auto* madt_entry = madt->entries;
@ -283,13 +283,13 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps()
// Also account for the data appended to:
// * aps_to_enable u32 values for ap_cpu_init_stacks
// * aps_to_enable u32 values for ap_cpu_init_processor_info_array
auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Region::Access::Read | Region::Access::Write | Region::Access::Execute);
auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute);
memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
// Allocate enough stacks for all APs
Vector<OwnPtr<Region>> apic_ap_stacks;
Vector<OwnPtr<Memory::Region>> apic_ap_stacks;
for (u32 i = 0; i < aps_to_enable; i++) {
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::Read | Memory::Region::Access::Write, AllocationStrategy::AllocateNow);
if (!stack_region) {
dbgln("APIC: Failed to allocate stack for AP #{}", i);
return;

View file

@ -89,7 +89,7 @@ private:
u32 high() const { return m_high; }
};
OwnPtr<Region> m_apic_base;
OwnPtr<Memory::Region> m_apic_base;
Vector<OwnPtr<Processor>> m_ap_processor_info;
Vector<Thread*> m_ap_idle_threads;
Atomic<u8> m_apic_ap_count { 0 };

View file

@ -25,7 +25,7 @@ enum DeliveryMode {
UNMAP_AFTER_INIT IOAPIC::IOAPIC(PhysicalAddress address, u32 gsi_base)
: m_address(address)
, m_regs(map_typed_writable<ioapic_mmio_regs>(m_address))
, m_regs(Memory::map_typed_writable<ioapic_mmio_regs>(m_address))
, m_gsi_base(gsi_base)
, m_id((read_register(0x0) >> 24) & 0xFF)
, m_version(read_register(0x1) & 0xFF)

View file

@ -78,7 +78,7 @@ private:
void isa_identity_map(int index);
PhysicalAddress m_address;
mutable TypedMapping<ioapic_mmio_regs> m_regs;
mutable Memory::TypedMapping<ioapic_mmio_regs> m_regs;
u32 m_gsi_base;
u8 m_id;
u8 m_version;

View file

@ -183,7 +183,7 @@ UNMAP_AFTER_INIT void InterruptManagement::switch_to_ioapic_mode()
UNMAP_AFTER_INIT void InterruptManagement::locate_apic_data()
{
VERIFY(!m_madt.is_null());
auto madt = map_typed<ACPI::Structures::MADT>(m_madt);
auto madt = Memory::map_typed<ACPI::Structures::MADT>(m_madt);
int irq_controller_count = 0;
if (madt->flags & PCAT_COMPAT_FLAG) {

View file

@ -27,17 +27,17 @@ namespace Kernel {
class KBufferImpl : public RefCounted<KBufferImpl> {
public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
static RefPtr<KBufferImpl> try_create_with_size(size_t size, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto region = MM.allocate_kernel_region(page_round_up(size), name, access, strategy);
auto region = MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy);
if (!region)
return nullptr;
return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), size, strategy));
}
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto region = MM.allocate_kernel_region(page_round_up(bytes.size()), name, access, strategy);
auto region = MM.allocate_kernel_region(Memory::page_round_up(bytes.size()), name, access, strategy);
if (!region)
return nullptr;
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
@ -45,12 +45,12 @@ public:
return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), bytes.size(), strategy));
}
static RefPtr<KBufferImpl> create_with_size(size_t size, Region::Access access, StringView name, AllocationStrategy strategy = AllocationStrategy::Reserve)
static RefPtr<KBufferImpl> create_with_size(size_t size, Memory::Region::Access access, StringView name, AllocationStrategy strategy = AllocationStrategy::Reserve)
{
return try_create_with_size(size, access, name, strategy);
}
static RefPtr<KBufferImpl> copy(const void* data, size_t size, Region::Access access, StringView name)
static RefPtr<KBufferImpl> copy(const void* data, size_t size, Memory::Region::Access access, StringView name)
{
auto buffer = create_with_size(size, access, name, AllocationStrategy::AllocateNow);
if (!buffer)
@ -61,7 +61,7 @@ public:
[[nodiscard]] bool expand(size_t new_capacity)
{
auto new_region = MM.allocate_kernel_region(page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
auto new_region = MM.allocate_kernel_region(Memory::page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
if (!new_region)
return false;
if (m_size > 0)
@ -81,11 +81,11 @@ public:
m_size = size;
}
[[nodiscard]] const Region& region() const { return *m_region; }
[[nodiscard]] Region& region() { return *m_region; }
[[nodiscard]] Memory::Region const& region() const { return *m_region; }
[[nodiscard]] Memory::Region& region() { return *m_region; }
private:
explicit KBufferImpl(NonnullOwnPtr<Region>&& region, size_t size, AllocationStrategy strategy)
explicit KBufferImpl(NonnullOwnPtr<Memory::Region>&& region, size_t size, AllocationStrategy strategy)
: m_size(size)
, m_allocation_strategy(strategy)
, m_region(move(region))
@ -94,7 +94,7 @@ private:
size_t m_size { 0 };
AllocationStrategy m_allocation_strategy { AllocationStrategy::Reserve };
NonnullOwnPtr<Region> m_region;
NonnullOwnPtr<Memory::Region> m_region;
};
class [[nodiscard]] KBuffer {
@ -104,7 +104,7 @@ public:
{
}
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_size(size_t size, Region::Access access = Region::Access::Read | Region::Access::Write, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_size(size_t size, Memory::Region::Access access = Memory::Region::Access::Read | Memory::Region::Access::Write, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto impl = KBufferImpl::try_create_with_size(size, access, name, strategy);
if (!impl)
@ -112,7 +112,7 @@ public:
return adopt_own_if_nonnull(new (nothrow) KBuffer(impl.release_nonnull()));
}
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_bytes(ReadonlyBytes bytes, Region::Access access = Region::Access::Read | Region::Access::Write, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_bytes(ReadonlyBytes bytes, Memory::Region::Access access = Memory::Region::Access::Read | Memory::Region::Access::Write, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto impl = KBufferImpl::try_create_with_bytes(bytes, access, name, strategy);
if (!impl)
@ -120,7 +120,7 @@ public:
return adopt_own_if_nonnull(new (nothrow) KBuffer(impl.release_nonnull()));
}
[[nodiscard]] static KBuffer copy(const void* data, size_t size, Region::Access access = Region::Access::Read | Region::Access::Write, StringView name = "KBuffer")
[[nodiscard]] static KBuffer copy(const void* data, size_t size, Memory::Region::Access access = Memory::Region::Access::Read | Memory::Region::Access::Write, StringView name = "KBuffer")
{
return KBuffer(KBufferImpl::copy(data, size, access, name));
}
@ -141,7 +141,7 @@ public:
[[nodiscard]] const KBufferImpl& impl() const { return *m_impl; }
[[nodiscard]] RefPtr<KBufferImpl> take_impl() { return move(m_impl); }
KBuffer(const ByteBuffer& buffer, Region::Access access = Region::Access::Read | Region::Access::Write, StringView name = "KBuffer")
KBuffer(const ByteBuffer& buffer, Memory::Region::Access access = Memory::Region::Access::Read | Memory::Region::Access::Write, StringView name = "KBuffer")
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access, name))
{
}

View file

@ -20,7 +20,7 @@ inline bool KBufferBuilder::check_expand(size_t size)
size_t new_buffer_size = m_size + size;
if (Checked<size_t>::addition_would_overflow(new_buffer_size, 1 * MiB))
return false;
new_buffer_size = page_round_up(new_buffer_size + 1 * MiB);
new_buffer_size = Memory::page_round_up(new_buffer_size + 1 * MiB);
return m_buffer->expand(new_buffer_size);
}
@ -41,7 +41,7 @@ OwnPtr<KBuffer> KBufferBuilder::build()
}
KBufferBuilder::KBufferBuilder()
: m_buffer(KBufferImpl::try_create_with_size(4 * MiB, Region::Access::Read | Region::Access::Write))
: m_buffer(KBufferImpl::try_create_with_size(4 * MiB, Memory::Region::Access::Read | Memory::Region::Access::Write))
{
}

View file

@ -11,7 +11,7 @@
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Process.h>
namespace Kernel {
namespace Kernel::Memory {
RefPtr<VMObject> AnonymousVMObject::try_clone()
{

View file

@ -12,7 +12,7 @@
#include <Kernel/Memory/VMObject.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
namespace Kernel::Memory {
class AnonymousVMObject final : public VMObject {
public:

View file

@ -7,7 +7,7 @@
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Memory/InodeVMObject.h>
namespace Kernel {
namespace Kernel::Memory {
InodeVMObject::InodeVMObject(Inode& inode, size_t size)
: VMObject(size)

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/VMObject.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
namespace Kernel::Memory {
class InodeVMObject : public VMObject {
public:

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/Region.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
namespace Kernel::Memory {
class MappedROM {
public:

View file

@ -40,7 +40,7 @@ extern size_t multiboot_copy_boot_modules_count;
// Treat the super pages as logically separate from .bss
__attribute__((section(".super_pages"))) static u8 super_pages[1 * MiB];
namespace Kernel {
namespace Kernel::Memory {
// NOTE: We can NOT use AK::Singleton for this class, because
// MemoryManager::initialize is called *before* global constructors are
@ -49,7 +49,7 @@ namespace Kernel {
static MemoryManager* s_the;
RecursiveSpinLock s_mm_lock;
MemoryManager& MM
MemoryManager& MemoryManager::the()
{
return *s_the;
}

View file

@ -21,7 +21,7 @@
#include <Kernel/Memory/VMObject.h>
#include <Kernel/SpinLock.h>
namespace Kernel {
namespace Kernel::Memory {
constexpr bool page_round_up_would_wrap(FlatPtr x)
{
@ -88,7 +88,7 @@ struct PhysicalMemoryRange {
PhysicalSize length {};
};
#define MM Kernel::MemoryManager::the()
#define MM Kernel::Memory::MemoryManager::the()
struct MemoryManagerData {
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }

View file

@ -15,7 +15,7 @@
extern u8 end_of_kernel_image[];
namespace Kernel {
namespace Kernel::Memory {
static AK::Singleton<HashMap<FlatPtr, PageDirectory*>> s_cr3_map;

View file

@ -13,7 +13,7 @@
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/RangeAllocator.h>
namespace Kernel {
namespace Kernel::Memory {
class PageDirectory : public RefCounted<PageDirectory> {
friend class MemoryManager;

View file

@ -8,7 +8,7 @@
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalPage.h>
namespace Kernel {
namespace Kernel::Memory {
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
{

View file

@ -9,7 +9,7 @@
#include <AK/NonnullRefPtr.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
namespace Kernel::Memory {
enum class MayReturnToFreeList : bool {
No,

View file

@ -12,7 +12,7 @@
#include <Kernel/Memory/PhysicalZone.h>
#include <Kernel/Random.h>
namespace Kernel {
namespace Kernel::Memory {
static constexpr u32 next_power_of_two(u32 value)
{

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalZone.h>
namespace Kernel {
namespace Kernel::Memory {
class PhysicalRegion {
AK_MAKE_ETERNAL;

View file

@ -9,7 +9,7 @@
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalZone.h>
namespace Kernel {
namespace Kernel::Memory {
PhysicalPageEntry& PhysicalZone::get_freelist_entry(ChunkIndex index) const
{

View file

@ -9,7 +9,7 @@
#include <AK/Bitmap.h>
#include <AK/IntrusiveList.h>
namespace Kernel {
namespace Kernel::Memory {
// A PhysicalZone is an allocator that manages a sub-area of a PhysicalRegion.
// Its total size is always a power of two.

View file

@ -7,7 +7,7 @@
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Memory/PrivateInodeVMObject.h>
namespace Kernel {
namespace Kernel::Memory {
RefPtr<PrivateInodeVMObject> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
{

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/InodeVMObject.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
namespace Kernel::Memory {
class PrivateInodeVMObject final : public InodeVMObject {
AK_MAKE_NONMOVABLE(PrivateInodeVMObject);

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/Range.h>
#include <LibC/limits.h>
namespace Kernel {
namespace Kernel::Memory {
Vector<Range, 2> Range::carve(const Range& taken) const
{

View file

@ -10,7 +10,7 @@
#include <Kernel/KResult.h>
#include <Kernel/VirtualAddress.h>
namespace Kernel {
namespace Kernel::Memory {
class Range {
friend class RangeAllocator;
@ -61,8 +61,8 @@ private:
}
template<>
struct AK::Formatter<Kernel::Range> : Formatter<FormatString> {
void format(FormatBuilder& builder, Kernel::Range value)
struct AK::Formatter<Kernel::Memory::Range> : Formatter<FormatString> {
void format(FormatBuilder& builder, Kernel::Memory::Range value)
{
return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size());
}

View file

@ -10,7 +10,7 @@
#define VM_GUARD_PAGES
namespace Kernel {
namespace Kernel::Memory {
RangeAllocator::RangeAllocator()
: m_total_range({}, 0)

View file

@ -11,7 +11,7 @@
#include <Kernel/Memory/Range.h>
#include <Kernel/SpinLock.h>
namespace Kernel {
namespace Kernel::Memory {
class RangeAllocator {
public:
@ -42,7 +42,7 @@ private:
namespace AK {
template<>
struct Traits<Kernel::Range> : public GenericTraits<Kernel::Range> {
struct Traits<Kernel::Memory::Range> : public GenericTraits<Kernel::Memory::Range> {
static constexpr bool is_trivial() { return true; }
};
}

View file

@ -17,7 +17,7 @@
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
namespace Kernel {
namespace Kernel::Memory {
Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: m_range(range)

View file

@ -18,7 +18,7 @@
#include <Kernel/Sections.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
namespace Kernel::Memory {
enum class ShouldFlushTLB {
No,

View file

@ -8,7 +8,7 @@
#include <Kernel/Memory/RingBuffer.h>
#include <Kernel/UserOrKernelBuffer.h>
namespace Kernel {
namespace Kernel::Memory {
RingBuffer::RingBuffer(String region_name, size_t capacity)
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write))

View file

@ -10,7 +10,7 @@
#include <Kernel/PhysicalAddress.h>
#include <Kernel/UserOrKernelBuffer.h>
namespace Kernel {
namespace Kernel::Memory {
class RingBuffer {
public:
@ -30,7 +30,7 @@ public:
size_t bytes_till_end() const { return (m_capacity_in_bytes - ((m_start_of_used + m_num_used_bytes) % m_capacity_in_bytes)) % m_capacity_in_bytes; };
private:
OwnPtr<Region> m_region;
OwnPtr<Memory::Region> m_region;
SpinLock<u8> m_lock;
size_t m_start_of_used {};
size_t m_num_used_bytes {};

View file

@ -6,7 +6,7 @@
#include <Kernel/Memory/ScatterGatherList.h>
namespace Kernel {
namespace Kernel::Memory {
RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
{

View file

@ -12,7 +12,7 @@
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
namespace Kernel::Memory {
// A Scatter-Gather List type that owns its buffers

View file

@ -7,7 +7,7 @@
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Memory/SharedInodeVMObject.h>
namespace Kernel {
namespace Kernel::Memory {
RefPtr<SharedInodeVMObject> SharedInodeVMObject::try_create_with_inode(Inode& inode)
{

View file

@ -10,7 +10,7 @@
#include <Kernel/Memory/InodeVMObject.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
namespace Kernel::Memory {
class SharedInodeVMObject final : public InodeVMObject {
AK_MAKE_NONMOVABLE(SharedInodeVMObject);

View file

@ -13,7 +13,7 @@
#include <Kernel/Process.h>
#include <Kernel/SpinLock.h>
namespace Kernel {
namespace Kernel::Memory {
OwnPtr<Space> Space::try_create(Process& process, Space const* parent)
{

View file

@ -14,7 +14,7 @@
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
namespace Kernel::Memory {
class Space {
public:

View file

@ -9,7 +9,7 @@
#include <AK/StringView.h>
#include <Kernel/Memory/MemoryManager.h>
namespace Kernel {
namespace Kernel::Memory {
template<typename T>
struct TypedMapping {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -7,7 +7,7 @@
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/VMObject.h>
namespace Kernel {
namespace Kernel::Memory {
VMObject::VMObject(VMObject const& other)
: m_physical_pages(other.m_physical_pages)

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -17,7 +17,7 @@
#include <Kernel/Memory/Region.h>
#include <Kernel/Mutex.h>
namespace Kernel {
namespace Kernel::Memory {
class VMObjectDeletedHandler {
public:

View file

@ -204,7 +204,7 @@ UNMAP_AFTER_INIT bool E1000ENetworkAdapter::initialize()
enable_bus_mastering(pci_address());
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), page_round_up(mmio_base_size), "E1000e MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000e MMIO", Memory::Region::Access::Read | Memory::Region::Access::Write, Memory::Region::Cacheable::No);
if (!m_mmio_region)
return false;
m_mmio_base = m_mmio_region->vaddr();

Some files were not shown because too many files have changed in this diff Show more