Kernel: Fix UB caused by taking a reference to a packed struct's member

Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.

Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.

For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.

Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
This commit is contained in:
Daniel Bertalan 2021-08-01 20:30:43 +02:00 committed by Andreas Kling
parent 90caebe96a
commit fa8507d1ce
Notes: sideshowbarker 2024-07-18 07:16:00 +09:00
3 changed files with 45 additions and 25 deletions

View file

@ -351,6 +351,10 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector-strong")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nodefaultlibs -nostdlib")
if (USE_CLANG_TOOLCHAIN)
add_compile_options(-Waddress-of-packed-member)
endif()
# Apply any flags that are only available on >= GCC 11.1
if (NOT USE_CLANG_TOOLCHAIN AND CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.1)
# Zero any registers used within a function on return (to reduce data lifetime and ROP gadgets).

View file

@ -218,10 +218,18 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
auto start_address = PhysicalAddress(mmap->addr);
// We have to copy these onto the stack, because we take a reference to these when printing them out,
// and doing so on a packed struct field is UB.
auto address = mmap->addr;
auto length = mmap->len;
ArmedScopeGuard write_back_guard = [&]() {
mmap->addr = address;
mmap->len = length;
};
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
auto start_address = PhysicalAddress(address);
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
@ -249,23 +257,23 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
continue;
// Fix up unaligned memory regions.
auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
auto diff = (FlatPtr)address % PAGE_SIZE;
if (diff != 0) {
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", mmap->addr, diff);
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
diff = PAGE_SIZE - diff;
mmap->addr += diff;
mmap->len -= diff;
address += diff;
length -= diff;
}
if ((mmap->len % PAGE_SIZE) != 0) {
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", mmap->len, mmap->len % PAGE_SIZE);
mmap->len -= mmap->len % PAGE_SIZE;
if ((length % PAGE_SIZE) != 0) {
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
length -= length % PAGE_SIZE;
}
if (mmap->len < PAGE_SIZE) {
dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, mmap->len);
if (length < PAGE_SIZE) {
dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
continue;
}
for (PhysicalSize page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
auto addr = PhysicalAddress(page_base);
// Skip used memory ranges.

View file

@ -824,10 +824,10 @@ bool Thread::has_signal_handler(u8 signal) const
return !action.handler_or_sigaction.is_null();
}
static bool push_value_on_user_stack(FlatPtr* stack, FlatPtr data)
static bool push_value_on_user_stack(FlatPtr& stack, FlatPtr data)
{
*stack -= sizeof(FlatPtr);
return copy_to_user((FlatPtr*)*stack, &data);
stack -= sizeof(FlatPtr);
return copy_to_user((FlatPtr*)stack, &data);
}
void Thread::resume_from_stopped()
@ -946,15 +946,15 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
auto setup_stack = [&](RegisterState& state) {
#if ARCH(I386)
FlatPtr* stack = &state.userspace_esp;
FlatPtr old_esp = *stack;
FlatPtr stack = state.userspace_esp;
FlatPtr old_esp = stack;
FlatPtr ret_eip = state.eip;
FlatPtr ret_eflags = state.eflags;
dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to EIP {:p}, ESP {:p}", ret_eip, old_esp);
#elif ARCH(X86_64)
FlatPtr* stack = &state.userspace_rsp;
FlatPtr old_rsp = *stack;
FlatPtr stack = state.userspace_rsp;
FlatPtr old_rsp = stack;
FlatPtr ret_rip = state.rip;
FlatPtr ret_rflags = state.rflags;
@ -967,8 +967,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
// so we need to account for this here.
// 56 % 16 = 8, so we only need to take 8 bytes into consideration for
// the stack alignment.
FlatPtr stack_alignment = (*stack - 8) % 16;
*stack -= stack_alignment;
FlatPtr stack_alignment = (stack - 8) % 16;
stack -= stack_alignment;
push_value_on_user_stack(stack, ret_eflags);
@ -988,8 +988,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
// 22 % 2 = 0, so we dont need to take anything into consideration
// for the alignment.
// We also are not allowed to touch the thread's red-zone of 128 bytes
FlatPtr stack_alignment = *stack % 16;
*stack -= 128 + stack_alignment;
FlatPtr stack_alignment = stack % 16;
stack -= 128 + stack_alignment;
push_value_on_user_stack(stack, ret_rflags);
@ -1019,7 +1019,15 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
push_value_on_user_stack(stack, handler_vaddr.get());
push_value_on_user_stack(stack, 0); // push fake return address
VERIFY((*stack % 16) == 0);
// We write back the adjusted stack value into the register state.
// We have to do this because we can't just pass around a reference to a packed field, as it's UB.
#if ARCH(I386)
state.userspace_esp = stack;
#else
state.userspace_rsp = stack;
#endif
VERIFY((stack % 16) == 0);
};
// We now place the thread state on the userspace stack.