Kernel: Move block condition evaluation out of the Scheduler

This makes the Scheduler a lot leaner by not having to evaluate
block conditions every time it is invoked. Instead evaluate them as
the states change, and unblock threads at that point.

This also implements some more waitid/waitpid/wait features and
behavior. For example, WUNTRACED and WNOWAIT are now supported. And
wait will now not return EINTR when SIGCHLD is delivered at the
same time.
This commit is contained in:
Tom 2020-11-29 16:05:27 -07:00 committed by Andreas Kling
parent 6a620562cc
commit 046d6855f5
Notes: sideshowbarker 2024-07-19 01:08:50 +09:00
53 changed files with 2027 additions and 930 deletions

View file

@ -170,6 +170,7 @@ set(KERNEL_SOURCES
Tasks/FinalizerTask.cpp
Tasks/SyncTask.cpp
Thread.cpp
ThreadBlockers.cpp
ThreadTracer.cpp
Time/APICTimer.cpp
Time/HPET.cpp

View file

@ -95,6 +95,8 @@ void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncD
if (next_request)
next_request->start();
evaluate_block_conditions();
}
}

View file

@ -272,6 +272,8 @@ void KeyboardDevice::key_state_changed(u8 scan_code, bool pressed)
}
m_has_e0_prefix = false;
evaluate_block_conditions();
}
void KeyboardDevice::handle_irq(const RegisterState&)

View file

@ -89,8 +89,11 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
auto mouse_packet = backdoor->receive_mouse_packet();
if (mouse_packet.has_value()) {
m_entropy_source.add_random_event(mouse_packet.value());
ScopedSpinLock lock(m_queue_lock);
m_queue.enqueue(mouse_packet.value());
{
ScopedSpinLock lock(m_queue_lock);
m_queue.enqueue(mouse_packet.value());
}
evaluate_block_conditions();
}
return;
}
@ -102,8 +105,11 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
#endif
m_entropy_source.add_random_event(m_data.dword);
ScopedSpinLock lock(m_queue_lock);
m_queue.enqueue(parse_data_packet(m_data));
{
ScopedSpinLock lock(m_queue_lock);
m_queue.enqueue(parse_data_packet(m_data));
}
evaluate_block_conditions();
};
ASSERT(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));

View file

@ -70,6 +70,8 @@ ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
compute_lockfree_metadata();
if (!data.read(write_ptr, bytes_to_write))
return -EFAULT;
if (m_unblock_callback && !m_empty)
m_unblock_callback();
return (ssize_t)bytes_to_write;
}
@ -88,6 +90,8 @@ ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
return -EFAULT;
m_read_buffer_index += nread;
compute_lockfree_metadata();
if (m_unblock_callback && m_space_for_writing > 0)
m_unblock_callback();
return (ssize_t)nread;
}

View file

@ -29,6 +29,7 @@
#include <AK/Types.h>
#include <Kernel/KBuffer.h>
#include <Kernel/Lock.h>
#include <Kernel/Thread.h>
#include <Kernel/UserOrKernelBuffer.h>
namespace Kernel {
@ -53,6 +54,12 @@ public:
size_t space_for_writing() const { return m_space_for_writing; }
void set_unblock_callback(Function<void()> callback)
{
ASSERT(!m_unblock_callback);
m_unblock_callback = move(callback);
}
private:
void flip();
void compute_lockfree_metadata();
@ -68,6 +75,7 @@ private:
InnerBuffer m_buffer2;
KBuffer m_storage;
Function<void()> m_unblock_callback;
size_t m_capacity { 0 };
size_t m_read_buffer_index { 0 };
size_t m_space_for_writing { 0 };

View file

@ -95,6 +95,11 @@ FIFO::FIFO(uid_t uid)
LOCKER(all_fifos().lock());
all_fifos().resource().set(this);
m_fifo_id = ++s_next_fifo_id;
// Use the same block condition for read and write
m_buffer.set_unblock_callback([this]() {
evaluate_block_conditions();
});
}
FIFO::~FIFO()
@ -116,6 +121,8 @@ void FIFO::attach(Direction direction)
klog() << "open writer (" << m_writers << ")";
#endif
}
evaluate_block_conditions();
}
void FIFO::detach(Direction direction)
@ -133,6 +140,8 @@ void FIFO::detach(Direction direction)
ASSERT(m_writers);
--m_writers;
}
evaluate_block_conditions();
}
bool FIFO::can_read(const FileDescription&, size_t) const

View file

@ -31,6 +31,7 @@
namespace Kernel {
File::File()
: m_block_condition(*this)
{
}

View file

@ -39,6 +39,36 @@
namespace Kernel {
class File;
class FileBlockCondition : public Thread::BlockCondition {
public:
FileBlockCondition(File& file)
: m_file(file)
{
}
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override
{
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return !blocker.unblock(true, data);
}
void unblock()
{
ScopedSpinLock lock(m_lock);
do_unblock([&](auto& b, void* data) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return blocker.unblock(false, data);
});
}
private:
File& m_file;
};
// File is the base class for anything that can be referenced by a FileDescription.
//
// The most important functions in File are:
@ -103,8 +133,27 @@ public:
virtual bool is_character_device() const { return false; }
virtual bool is_socket() const { return false; }
virtual FileBlockCondition& block_condition() { return m_block_condition; }
protected:
File();
void evaluate_block_conditions()
{
if (Processor::current().in_irq()) {
// If called from an IRQ handler we need to delay evaluation
// and unblocking of waiting threads
Processor::deferred_call_queue([this]() {
ASSERT(!Processor::current().in_irq());
evaluate_block_conditions();
});
} else {
block_condition().unblock();
}
}
private:
FileBlockCondition m_block_condition;
};
}

View file

@ -75,6 +75,26 @@ FileDescription::~FileDescription()
m_inode = nullptr;
}
Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBlocker::BlockFlags block_flags) const
{
u32 unblock_flags = (u32)Thread::FileBlocker::BlockFlags::None;
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Read) && can_read())
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Read;
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Write) && can_write())
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Write;
// TODO: Implement Thread::FileBlocker::BlockFlags::Exception
if ((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::SocketFlags) {
auto* sock = socket();
ASSERT(sock);
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Accept) && sock->can_accept())
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Accept;
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Connect) && sock->setup_state() == Socket::SetupState::Completed)
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Connect;
}
return (Thread::FileBlocker::BlockFlags)unblock_flags;
}
KResult FileDescription::stat(::stat& buffer)
{
LOCKER(m_lock);
@ -113,6 +133,7 @@ off_t FileDescription::seek(off_t offset, int whence)
// FIXME: Return -EINVAL if attempting to seek past the end of a seekable device.
m_current_offset = new_offset;
evaluate_block_conditions();
return m_current_offset;
}
@ -124,8 +145,11 @@ KResultOr<size_t> FileDescription::read(UserOrKernelBuffer& buffer, size_t count
if (new_offset.has_overflow())
return -EOVERFLOW;
auto nread_or_error = m_file->read(*this, offset(), buffer, count);
if (!nread_or_error.is_error() && m_file->is_seekable())
m_current_offset += nread_or_error.value();
if (!nread_or_error.is_error()) {
if (m_file->is_seekable())
m_current_offset += nread_or_error.value();
evaluate_block_conditions();
}
return nread_or_error;
}
@ -137,8 +161,11 @@ KResultOr<size_t> FileDescription::write(const UserOrKernelBuffer& data, size_t
if (new_offset.has_overflow())
return -EOVERFLOW;
auto nwritten_or_error = m_file->write(*this, offset(), data, size);
if (!nwritten_or_error.is_error() && m_file->is_seekable())
m_current_offset += nwritten_or_error.value();
if (!nwritten_or_error.is_error()) {
if (m_file->is_seekable())
m_current_offset += nwritten_or_error.value();
evaluate_block_conditions();
}
return nwritten_or_error;
}
@ -340,4 +367,9 @@ KResult FileDescription::chown(uid_t uid, gid_t gid)
return m_file->chown(*this, uid, gid);
}
FileBlockCondition& FileDescription::block_condition()
{
return m_file->block_condition();
}
}

View file

@ -45,6 +45,8 @@ public:
static NonnullRefPtr<FileDescription> create(File&);
~FileDescription();
Thread::FileBlocker::BlockFlags should_unblock(Thread::FileBlocker::BlockFlags) const;
bool is_readable() const { return m_readable; }
bool is_writable() const { return m_writable; }
@ -130,11 +132,18 @@ public:
KResult chown(uid_t, gid_t);
FileBlockCondition& block_condition();
private:
friend class VFS;
explicit FileDescription(File&);
FileDescription(FIFO&, FIFO::Direction);
void evaluate_block_conditions()
{
block_condition().unblock();
}
RefPtr<Custody> m_custody;
RefPtr<Inode> m_inode;
NonnullRefPtr<File> m_file;

View file

@ -47,8 +47,10 @@ InodeFile::~InodeFile()
KResultOr<size_t> InodeFile::read(FileDescription& description, size_t offset, UserOrKernelBuffer& buffer, size_t count)
{
ssize_t nread = m_inode->read_bytes(offset, count, buffer, &description);
if (nread > 0)
if (nread > 0) {
Thread::current()->did_file_read(nread);
evaluate_block_conditions();
}
if (nread < 0)
return KResult(nread);
return nread;
@ -60,6 +62,7 @@ KResultOr<size_t> InodeFile::write(FileDescription& description, size_t offset,
if (nwritten > 0) {
m_inode->set_mtime(kgettimeofday().tv_sec);
Thread::current()->did_file_write(nwritten);
evaluate_block_conditions();
}
if (nwritten < 0)
return KResult(nwritten);

View file

@ -78,6 +78,7 @@ KResultOr<size_t> InodeWatcher::read(FileDescription&, size_t, UserOrKernelBuffe
});
if (nwritten < 0)
return KResult(nwritten);
evaluate_block_conditions();
return bytes_to_write;
}
@ -97,18 +98,21 @@ void InodeWatcher::notify_inode_event(Badge<Inode>, Event::Type event_type)
{
LOCKER(m_lock);
m_queue.enqueue({ event_type });
evaluate_block_conditions();
}
void InodeWatcher::notify_child_added(Badge<Inode>, const InodeIdentifier& child_id)
{
LOCKER(m_lock);
m_queue.enqueue({ Event::Type::ChildAdded, child_id.index() });
evaluate_block_conditions();
}
void InodeWatcher::notify_child_removed(Badge<Inode>, const InodeIdentifier& child_id)
{
LOCKER(m_lock);
m_queue.enqueue({ Event::Type::ChildRemoved, child_id.index() });
evaluate_block_conditions();
}
}

View file

@ -36,6 +36,7 @@ NonnullRefPtr<Plan9FS> Plan9FS::create(FileDescription& file_description)
Plan9FS::Plan9FS(FileDescription& file_description)
: FileBackedFS(file_description)
, m_completion_blocker(*this)
{
}
@ -216,6 +217,8 @@ private:
bool Plan9FS::initialize()
{
ensure_thread();
Message version_message { *this, Message::Type::Tversion };
version_message << (u32)m_max_message_size << "9P2000.L";
@ -412,7 +415,97 @@ const KBuffer& Plan9FS::Message::build()
return m_built.buffer;
}
KResult Plan9FS::post_message(Message& message)
Plan9FS::ReceiveCompletion::ReceiveCompletion(u16 tag)
: tag(tag)
{
}
Plan9FS::ReceiveCompletion::~ReceiveCompletion()
{
}
bool Plan9FS::Blocker::unblock(u16 tag)
{
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
m_did_unblock = true;
if (m_completion->tag != tag)
return false;
if (!m_completion->result.is_error())
m_message = move(*m_completion->message);
}
return unblock();
}
void Plan9FS::Blocker::not_blocking(bool)
{
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return;
}
m_fs.m_completion_blocker.try_unblock(*this);
}
bool Plan9FS::Blocker::is_completed() const
{
ScopedSpinLock lock(m_completion->lock);
return m_completion->completed;
}
bool Plan9FS::Plan9FSBlockCondition::should_add_blocker(Thread::Blocker& b, void*)
{
// NOTE: m_lock is held already!
auto& blocker = static_cast<Blocker&>(b);
return !blocker.is_completed();
}
void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
{
unblock([&](Thread::Blocker& b, void*) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock(tag);
});
}
void Plan9FS::Plan9FSBlockCondition::unblock_all()
{
BlockCondition::unblock_all([&](Thread::Blocker& b, void*) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock();
});
}
void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker)
{
if (m_fs.is_complete(*blocker.completion())) {
ScopedSpinLock lock(m_lock);
blocker.unblock(blocker.completion()->tag);
}
}
bool Plan9FS::is_complete(const ReceiveCompletion& completion)
{
LOCKER(m_lock);
if (m_completions.contains(completion.tag)) {
// If it's still in the map then it can't be complete
ASSERT(!completion.completed);
return false;
}
// if it's not in the map anymore, it must be complete. But we MUST
// hold m_lock to be able to check completion.completed!
ASSERT(completion.completed);
return true;
}
KResult Plan9FS::post_message(Message& message, RefPtr<ReceiveCompletion> completion)
{
auto& buffer = message.build();
const u8* data = buffer.data();
@ -421,9 +514,21 @@ KResult Plan9FS::post_message(Message& message)
LOCKER(m_send_lock);
if (completion) {
// Save the completion record *before* we send the message. This
// ensures that it exists when the thread reads the response
LOCKER(m_lock);
auto tag = completion->tag;
m_completions.set(tag, completion.release_nonnull());
// TODO: What if there is a collision? Do we need to wait until
// the existing record with the tag completes before queueing
// this one?
}
while (size > 0) {
if (!description.can_write()) {
if (Thread::current()->block<Thread::WriteBlocker>(nullptr, description).was_interrupted())
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::WriteBlocker>(nullptr, description, unblock_flags).was_interrupted())
return KResult(-EINTR);
}
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(const_cast<u8*>(data));
@ -443,7 +548,8 @@ KResult Plan9FS::do_read(u8* data, size_t size)
auto& description = file_description();
while (size > 0) {
if (!description.can_read()) {
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, description).was_interrupted())
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, description, unblock_flags).was_interrupted())
return KResult(-EINTR);
}
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data);
@ -461,9 +567,6 @@ KResult Plan9FS::do_read(u8* data, size_t size)
KResult Plan9FS::read_and_dispatch_one_message()
{
ASSERT(m_someone_is_reading);
// That someone is us.
struct [[gnu::packed]] Header
{
u32 size;
@ -485,112 +588,42 @@ KResult Plan9FS::read_and_dispatch_one_message()
LOCKER(m_lock);
auto optional_completion = m_completions.get(header.tag);
if (!optional_completion.has_value()) {
if (m_tags_to_ignore.contains(header.tag)) {
m_tags_to_ignore.remove(header.tag);
} else {
dbg() << "Received a 9p message of type " << header.type << " with an unexpected tag " << header.tag << ", dropping";
}
return KSuccess;
if (optional_completion.has_value()) {
auto completion = optional_completion.value();
ScopedSpinLock lock(completion->lock);
completion->result = KSuccess;
completion->message = new Message { move(buffer) };
completion->completed = true;
m_completions.remove(header.tag);
m_completion_blocker.unblock_completed(header.tag);
} else {
dbg() << "Received a 9p message of type " << header.type << " with an unexpected tag " << header.tag << ", dropping";
}
ReceiveCompletion& completion = *optional_completion.value();
completion.result = KSuccess;
completion.message = Message { move(buffer) };
completion.completed = true;
m_completions.remove(header.tag);
return KSuccess;
}
bool Plan9FS::Blocker::should_unblock(Thread&)
{
if (m_completion.completed)
return true;
bool someone_else_is_reading = m_completion.fs.m_someone_is_reading.exchange(true);
if (!someone_else_is_reading) {
// We're gonna start reading ourselves; unblock.
return true;
}
return false;
}
KResult Plan9FS::wait_for_specific_message(u16 tag, Message& out_message)
{
KResult result = KSuccess;
ReceiveCompletion completion { *this, out_message, result, false };
{
LOCKER(m_lock);
m_completions.set(tag, &completion);
}
// Block until either:
// * Someone else reads the message we're waiting for, and hands it to us;
// * Or we become the one to read and dispatch messages.
if (Thread::current()->block<Plan9FS::Blocker>(nullptr, completion).was_interrupted()) {
LOCKER(m_lock);
m_completions.remove(tag);
return KResult(-EINTR);
}
// See for which reason we woke up.
if (completion.completed) {
// Somebody else completed it for us; nothing further to do.
return result;
}
while (!completion.completed && result.is_success()) {
result = read_and_dispatch_one_message();
}
if (result.is_error()) {
// If we fail to read, wake up everyone with an error.
LOCKER(m_lock);
for (auto& it : m_completions) {
it.value->result = result;
it.value->completed = true;
}
m_completions.clear();
}
// Wake up someone else, if anyone is interested...
m_someone_is_reading = false;
// ...and return.
return result;
}
KResult Plan9FS::post_message_and_explicitly_ignore_reply(Message& message)
{
auto tag = message.tag();
{
LOCKER(m_lock);
m_tags_to_ignore.set(tag);
}
auto result = post_message(message);
if (result.is_error()) {
LOCKER(m_lock);
m_tags_to_ignore.remove(tag);
}
return result;
return post_message(message, {});
}
KResult Plan9FS::post_message_and_wait_for_a_reply(Message& message, bool auto_convert_error_reply_to_error)
KResult Plan9FS::post_message_and_wait_for_a_reply(Message& message)
{
auto request_type = message.type();
auto tag = message.tag();
auto result = post_message(message);
if (result.is_error())
return result;
result = wait_for_specific_message(tag, message);
auto completion = adopt(*new ReceiveCompletion(tag));
auto result = post_message(message, completion);
if (result.is_error())
return result;
if (Thread::current()->block<Plan9FS::Blocker>(nullptr, *this, message, completion).was_interrupted())
return KResult(-EINTR);
if (!auto_convert_error_reply_to_error)
return KSuccess;
if (completion->result.is_error()) {
dbg() << "Plan9FS: Message was aborted with error " << completion->result;
return KResult(-EIO);
}
auto reply_type = message.type();
@ -624,6 +657,39 @@ ssize_t Plan9FS::adjust_buffer_size(ssize_t size) const
return min(size, max_size);
}
void Plan9FS::thread_main()
{
dbg() << "Plan9FS: Thread running";
do {
auto result = read_and_dispatch_one_message();
if (result.is_error()) {
// If we fail to read, wake up everyone with an error.
LOCKER(m_lock);
for (auto& it : m_completions) {
it.value->result = result;
it.value->completed = true;
}
m_completions.clear();
m_completion_blocker.unblock_all();
dbg() << "Plan9FS: Thread terminating, error reading";
return;
}
} while (!m_thread_shutdown.load(AK::MemoryOrder::memory_order_relaxed));
dbg() << "Plan9FS: Thread terminating";
}
void Plan9FS::ensure_thread()
{
ScopedSpinLock lock(m_thread_lock);
if (!m_thread_running.exchange(true, AK::MemoryOrder::memory_order_acq_rel)) {
Process::create_kernel_process(m_thread, "Plan9FS", [&]() {
thread_main();
m_thread_running.store(false, AK::MemoryOrder::memory_order_release);
});
}
}
Plan9FSInode::Plan9FSInode(Plan9FS& fs, u32 fid)
: Inode(fs, fid)
{

View file

@ -68,38 +68,86 @@ public:
private:
Plan9FS(FileDescription&);
struct ReceiveCompletion {
Plan9FS& fs;
Message& message;
KResult& result;
Atomic<bool> completed;
class Blocker;
class Plan9FSBlockCondition : public Thread::BlockCondition {
public:
Plan9FSBlockCondition(Plan9FS& fs)
: m_fs(fs)
{
}
void unblock_completed(u16);
void unblock_all();
void try_unblock(Blocker&);
protected:
virtual bool should_add_blocker(Thread::Blocker&, void*) override;
private:
Plan9FS& m_fs;
mutable SpinLock<u8> m_lock;
};
struct ReceiveCompletion : public RefCounted<ReceiveCompletion> {
mutable SpinLock<u8> lock;
bool completed { false };
const u16 tag;
OwnPtr<Message> message;
KResult result { KSuccess };
ReceiveCompletion(u16 tag);
~ReceiveCompletion();
};
class Blocker final : public Thread::Blocker {
public:
Blocker(ReceiveCompletion& completion)
: m_completion(completion)
Blocker(Plan9FS& fs, Message& message, NonnullRefPtr<ReceiveCompletion> completion)
: m_fs(fs)
, m_message(message)
, m_completion(move(completion))
{
set_block_condition(fs.m_completion_blocker);
}
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override { return "Waiting"; }
virtual Type blocker_type() const override { return Type::Plan9FS; }
virtual void not_blocking(bool) override;
const NonnullRefPtr<ReceiveCompletion>& completion() const { return m_completion; }
u16 tag() const { return m_completion->tag; }
bool is_completed() const;
bool unblock()
{
unblock_from_blocker();
return true;
}
bool unblock(u16 tag);
private:
ReceiveCompletion& m_completion;
Plan9FS& m_fs;
Message& m_message;
NonnullRefPtr<ReceiveCompletion> m_completion;
bool m_did_unblock { false };
};
friend class Blocker;
virtual const char* class_name() const override { return "Plan9FS"; }
KResult post_message(Message&);
bool is_complete(const ReceiveCompletion&);
KResult post_message(Message&, RefPtr<ReceiveCompletion>);
KResult do_read(u8* buffer, size_t);
KResult read_and_dispatch_one_message();
KResult wait_for_specific_message(u16 tag, Message& out_message);
KResult post_message_and_wait_for_a_reply(Message&, bool auto_convert_error_reply_to_error = true);
KResult post_message_and_wait_for_a_reply(Message&);
KResult post_message_and_explicitly_ignore_reply(Message&);
ProtocolVersion parse_protocol_version(const StringView&) const;
ssize_t adjust_buffer_size(ssize_t size) const;
void thread_main();
void ensure_thread();
RefPtr<Plan9FSInode> m_root_inode;
Atomic<u16> m_next_tag { (u16)-1 };
Atomic<u32> m_next_fid { 1 };
@ -108,9 +156,13 @@ private:
size_t m_max_message_size { 4 * KiB };
Lock m_send_lock { "Plan9FS send" };
Atomic<bool> m_someone_is_reading { false };
HashMap<u16, ReceiveCompletion*> m_completions;
HashTable<u16> m_tags_to_ignore;
Plan9FSBlockCondition m_completion_blocker;
HashMap<u16, NonnullRefPtr<ReceiveCompletion>> m_completions;
SpinLock<u8> m_thread_lock;
RefPtr<Thread> m_thread;
Atomic<bool> m_thread_running { false };
Atomic<bool> m_thread_shutdown { false };
};
class Plan9FSInode final : public Inode {

View file

@ -51,6 +51,7 @@ class PerformanceEventBuffer;
class PhysicalPage;
class PhysicalRegion;
class Process;
class ProcessGroup;
class ThreadTracer;
class Range;
class RangeAllocator;

View file

@ -138,6 +138,7 @@ KResult IPv4Socket::listen(size_t backlog)
set_backlog(backlog);
m_role = Role::Listener;
evaluate_block_conditions();
#ifdef IPV4_SOCKET_DEBUG
dbg() << "IPv4Socket{" << this << "} listening with backlog=" << backlog;
@ -262,10 +263,11 @@ KResultOr<size_t> IPv4Socket::receive_byte_buffered(FileDescription& description
return KResult(-EAGAIN);
locker.unlock();
auto res = Thread::current()->block<Thread::ReadBlocker>(nullptr, description);
auto unblocked_flags = Thread::FileDescriptionBlocker::BlockFlags::None;
auto res = Thread::current()->block<Thread::ReadBlocker>(nullptr, description, unblocked_flags);
locker.lock();
if (!m_can_read) {
if (!((u32)unblocked_flags & (u32)Thread::FileDescriptionBlocker::BlockFlags::Read)) {
if (res.was_interrupted())
return KResult(-EINTR);
@ -279,7 +281,7 @@ KResultOr<size_t> IPv4Socket::receive_byte_buffered(FileDescription& description
if (nreceived > 0)
Thread::current()->did_ipv4_socket_read((size_t)nreceived);
m_can_read = !m_receive_buffer.is_empty();
set_can_read(!m_receive_buffer.is_empty());
return nreceived;
}
@ -299,7 +301,7 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
if (!m_receive_queue.is_empty()) {
packet = m_receive_queue.take_first();
m_can_read = !m_receive_queue.is_empty();
set_can_read(!m_receive_queue.is_empty());
#ifdef IPV4_SOCKET_DEBUG
dbg() << "IPv4Socket(" << this << "): recvfrom without blocking " << packet.data.value().size() << " bytes, packets in queue: " << m_receive_queue.size();
#endif
@ -312,10 +314,11 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
}
locker.unlock();
auto res = Thread::current()->block<Thread::ReadBlocker>(nullptr, description);
auto unblocked_flags = Thread::FileDescriptionBlocker::BlockFlags::None;
auto res = Thread::current()->block<Thread::ReadBlocker>(nullptr, description, unblocked_flags);
locker.lock();
if (!m_can_read) {
if (!((u32)unblocked_flags & (u32)Thread::FileDescriptionBlocker::BlockFlags::Read)) {
if (res.was_interrupted())
return KResult(-EINTR);
@ -325,7 +328,7 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
ASSERT(m_can_read);
ASSERT(!m_receive_queue.is_empty());
packet = m_receive_queue.take_first();
m_can_read = !m_receive_queue.is_empty();
set_can_read(!m_receive_queue.is_empty());
#ifdef IPV4_SOCKET_DEBUG
dbg() << "IPv4Socket(" << this << "): recvfrom with blocking " << packet.data.value().size() << " bytes, packets in queue: " << m_receive_queue.size();
#endif
@ -411,14 +414,14 @@ bool IPv4Socket::did_receive(const IPv4Address& source_address, u16 source_port,
ssize_t nwritten = m_receive_buffer.write(scratch_buffer, nreceived_or_error.value());
if (nwritten < 0)
return false;
m_can_read = !m_receive_buffer.is_empty();
set_can_read(!m_receive_buffer.is_empty());
} else {
if (m_receive_queue.size() > 2000) {
dbg() << "IPv4Socket(" << this << "): did_receive refusing packet since queue is full.";
return false;
}
m_receive_queue.append({ source_address, source_port, packet_timestamp, move(packet) });
m_can_read = true;
set_can_read(true);
}
m_bytes_received += packet_size;
#ifdef IPV4_SOCKET_DEBUG
@ -625,7 +628,14 @@ KResult IPv4Socket::close()
void IPv4Socket::shut_down_for_reading()
{
Socket::shut_down_for_reading();
m_can_read = true;
set_can_read(true);
}
void IPv4Socket::set_can_read(bool value)
{
m_can_read = value;
if (value)
evaluate_block_conditions();
}
}

View file

@ -113,6 +113,8 @@ private:
KResultOr<size_t> receive_byte_buffered(FileDescription&, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*>, Userspace<socklen_t*>);
KResultOr<size_t> receive_packet_buffered(FileDescription&, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*>, Userspace<socklen_t*>, timeval&);
void set_can_read(bool);
IPv4Address m_local_address;
IPv4Address m_peer_address;

View file

@ -68,6 +68,13 @@ LocalSocket::LocalSocket(int type)
m_prebind_gid = current_process->gid();
m_prebind_mode = 0666;
m_for_client.set_unblock_callback([this]() {
evaluate_block_conditions();
});
m_for_server.set_unblock_callback([this]() {
evaluate_block_conditions();
});
#ifdef DEBUG_LOCAL_SOCKET
dbg() << "LocalSocket{" << this << "} created with type=" << type;
#endif
@ -170,22 +177,23 @@ KResult LocalSocket::connect(FileDescription& description, Userspace<const socka
memcpy(m_address.sun_path, safe_address, sizeof(m_address.sun_path));
ASSERT(m_connect_side_fd == &description);
m_connect_side_role = Role::Connecting;
set_connect_side_role(Role::Connecting);
auto peer = m_file->inode()->socket();
auto result = peer->queue_connection_from(*this);
if (result.is_error()) {
m_connect_side_role = Role::None;
set_connect_side_role(Role::None);
return result;
}
if (is_connected()) {
m_connect_side_role = Role::Connected;
set_connect_side_role(Role::Connected);
return KSuccess;
}
if (Thread::current()->block<Thread::ConnectBlocker>(nullptr, description).was_interrupted()) {
m_connect_side_role = Role::None;
auto unblock_flags = Thread::FileDescriptionBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::ConnectBlocker>(nullptr, description, unblock_flags).was_interrupted()) {
set_connect_side_role(Role::None);
return KResult(-EINTR);
}
@ -193,11 +201,11 @@ KResult LocalSocket::connect(FileDescription& description, Userspace<const socka
dbg() << "LocalSocket{" << this << "} connect(" << safe_address << ") status is " << to_string(setup_state());
#endif
if (!is_connected()) {
m_connect_side_role = Role::None;
if (!((u32)unblock_flags & (u32)Thread::FileDescriptionBlocker::BlockFlags::Connect)) {
set_connect_side_role(Role::None);
return KResult(-ECONNREFUSED);
}
m_connect_side_role = Role::Connected;
set_connect_side_role(Role::Connected);
return KSuccess;
}
@ -207,7 +215,9 @@ KResult LocalSocket::listen(size_t backlog)
if (type() != SOCK_STREAM)
return KResult(-EOPNOTSUPP);
set_backlog(backlog);
m_connect_side_role = m_role = Role::Listener;
auto previous_role = m_role;
m_role = Role::Listener;
set_connect_side_role(Role::Listener, previous_role != m_role);
#ifdef DEBUG_LOCAL_SOCKET
dbg() << "LocalSocket{" << this << "} listening with backlog=" << backlog;
#endif
@ -224,6 +234,8 @@ void LocalSocket::attach(FileDescription& description)
ASSERT(m_connect_side_fd != &description);
m_accept_side_fd_open = true;
}
evaluate_block_conditions();
}
void LocalSocket::detach(FileDescription& description)
@ -234,6 +246,8 @@ void LocalSocket::detach(FileDescription& description)
ASSERT(m_accept_side_fd_open);
m_accept_side_fd_open = false;
}
evaluate_block_conditions();
}
bool LocalSocket::can_read(const FileDescription& description, size_t) const
@ -308,7 +322,8 @@ KResultOr<size_t> LocalSocket::recvfrom(FileDescription& description, UserOrKern
return KResult(-EAGAIN);
}
} else if (!can_read(description, 0)) {
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, description).was_interrupted())
auto unblock_flags = Thread::FileDescriptionBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, description, unblock_flags).was_interrupted())
return KResult(-EINTR);
}
if (!has_attached_peer(description) && buffer_for_me.is_empty())

View file

@ -77,6 +77,14 @@ private:
NonnullRefPtrVector<FileDescription>& sendfd_queue_for(const FileDescription&);
NonnullRefPtrVector<FileDescription>& recvfd_queue_for(const FileDescription&);
void set_connect_side_role(Role connect_side_role, bool force_evaluate_block_conditions = false)
{
auto previous = m_connect_side_role;
m_connect_side_role = connect_side_role;
if (previous != m_connect_side_role || force_evaluate_block_conditions)
evaluate_block_conditions();
}
// An open socket file on the filesystem.
RefPtr<FileDescription> m_file;

View file

@ -187,13 +187,7 @@ void handle_arp(const EthernetFrameHeader& eth, size_t frame_size)
// Someone has this IPv4 address. I guess we can try to remember that.
// FIXME: Protect against ARP spamming.
// FIXME: Support static ARP table entries.
LOCKER(arp_table().lock());
arp_table().resource().set(packet.sender_protocol_address(), packet.sender_hardware_address());
klog() << "ARP table (" << arp_table().resource().size() << " entries):";
for (auto& it : arp_table().resource()) {
klog() << it.value.to_string().characters() << " :: " << it.key.to_string().characters();
}
update_arp_table(packet.sender_protocol_address(), packet.sender_hardware_address());
}
if (packet.operation() == ARPOperation::Request) {

View file

@ -36,11 +36,105 @@ namespace Kernel {
static AK::Singleton<Lockable<HashMap<IPv4Address, MACAddress>>> s_arp_table;
class ARPTableBlocker : public Thread::Blocker {
public:
ARPTableBlocker(IPv4Address ip_addr, Optional<MACAddress>& addr);
virtual const char* state_string() const override { return "Routing (ARP)"; }
virtual Type blocker_type() const override { return Type::Routing; }
virtual bool should_block() override { return m_should_block; }
virtual void not_blocking(bool) override;
bool unblock(bool from_add_blocker, const IPv4Address& ip_addr, const MACAddress& addr)
{
if (m_ip_addr != ip_addr)
return false;
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
m_did_unblock = true;
m_addr = addr;
}
if (!from_add_blocker)
unblock_from_blocker();
return true;
}
const IPv4Address& ip_addr() const { return m_ip_addr; }
private:
const IPv4Address m_ip_addr;
Optional<MACAddress>& m_addr;
bool m_did_unblock { false };
bool m_should_block { true };
};
class ARPTableBlockCondition : public Thread::BlockCondition {
public:
void unblock(const IPv4Address& ip_addr, const MACAddress& addr)
{
unblock_all([&](auto& b, void*) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Routing);
auto& blocker = static_cast<ARPTableBlocker&>(b);
return blocker.unblock(false, ip_addr, addr);
});
}
protected:
virtual bool should_add_blocker(Thread::Blocker& b, void*) override
{
ASSERT(b.blocker_type() == Thread::Blocker::Type::Routing);
auto& blocker = static_cast<ARPTableBlocker&>(b);
auto val = s_arp_table->resource().get(blocker.ip_addr());
if (!val.has_value())
return true;
return blocker.unblock(true, blocker.ip_addr(), val.value());
}
};
static AK::Singleton<ARPTableBlockCondition> s_arp_table_block_condition;
ARPTableBlocker::ARPTableBlocker(IPv4Address ip_addr, Optional<MACAddress>& addr)
: m_ip_addr(ip_addr)
, m_addr(addr)
{
if (!set_block_condition(*s_arp_table_block_condition))
m_should_block = false;
}
void ARPTableBlocker::not_blocking(bool timeout_in_past)
{
ASSERT(timeout_in_past || !m_should_block);
auto addr = s_arp_table->resource().get(ip_addr());
ScopedSpinLock lock(m_lock);
if (!m_did_unblock) {
m_did_unblock = true;
m_addr = move(addr);
}
}
Lockable<HashMap<IPv4Address, MACAddress>>& arp_table()
{
return *s_arp_table;
}
void update_arp_table(const IPv4Address& ip_addr, const MACAddress& addr)
{
LOCKER(arp_table().lock());
arp_table().resource().set(ip_addr, addr);
s_arp_table_block_condition->unblock(ip_addr, addr);
klog() << "ARP table (" << arp_table().resource().size() << " entries):";
for (auto& it : arp_table().resource()) {
klog() << it.value.to_string().characters() << " :: " << it.key.to_string().characters();
}
}
bool RoutingDecision::is_zero() const
{
return adapter.is_null() || next_hop.is_zero();
@ -135,13 +229,8 @@ RoutingDecision route_to(const IPv4Address& target, const IPv4Address& source, c
request.set_sender_protocol_address(adapter->ipv4_address());
adapter->send({ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, request);
(void)Thread::current()->block_until("Routing (ARP)", [next_hop_ip] {
return arp_table().resource().get(next_hop_ip).has_value();
});
{
LOCKER(arp_table().lock());
auto addr = arp_table().resource().get(next_hop_ip);
Optional<MACAddress> addr;
if (!Thread::current()->block<ARPTableBlocker>(nullptr, next_hop_ip, addr).was_interrupted()) {
if (addr.has_value()) {
#ifdef ROUTING_DEBUG
klog() << "Routing: Got ARP response using adapter " << adapter->name().characters() << " for " << next_hop_ip.to_string().characters() << " (" << addr.value().to_string().characters() << ")";

View file

@ -27,6 +27,7 @@
#pragma once
#include <Kernel/Net/NetworkAdapter.h>
#include <Kernel/Thread.h>
namespace Kernel {
@ -37,6 +38,7 @@ struct RoutingDecision {
bool is_zero() const;
};
void update_arp_table(const IPv4Address&, const MACAddress&);
RoutingDecision route_to(const IPv4Address& target, const IPv4Address& source, const RefPtr<NetworkAdapter> through = nullptr);
Lockable<HashMap<IPv4Address, MACAddress>>& arp_table();

View file

@ -70,6 +70,7 @@ void Socket::set_setup_state(SetupState new_setup_state)
#endif
m_setup_state = new_setup_state;
evaluate_block_conditions();
}
RefPtr<Socket> Socket::accept()
@ -86,6 +87,8 @@ RefPtr<Socket> Socket::accept()
client->m_acceptor = { process.pid().value(), process.uid(), process.gid() };
client->m_connected = true;
client->m_role = Role::Accepted;
if (!m_pending.is_empty())
evaluate_block_conditions();
return client;
}
@ -98,6 +101,7 @@ KResult Socket::queue_connection_from(NonnullRefPtr<Socket> peer)
if (m_pending.size() >= m_backlog)
return KResult(-ECONNREFUSED);
m_pending.append(peer);
evaluate_block_conditions();
return KSuccess;
}

View file

@ -52,6 +52,9 @@ void TCPSocket::set_state(State new_state)
dbg() << "TCPSocket{" << this << "} state moving from " << to_string(m_state) << " to " << to_string(new_state);
#endif
auto was_disconnected = protocol_is_disconnected();
auto previous_role = m_role;
m_state = new_state;
if (new_state == State::Established && m_direction == Direction::Outgoing)
@ -61,6 +64,9 @@ void TCPSocket::set_state(State new_state)
LOCKER(closing_sockets().lock());
closing_sockets().resource().remove(tuple());
}
if (previous_role != m_role || was_disconnected != protocol_is_disconnected())
evaluate_block_conditions();
}
static AK::Singleton<Lockable<HashMap<IPv4SocketTuple, RefPtr<TCPSocket>>>> s_socket_closing;
@ -389,13 +395,16 @@ KResult TCPSocket::protocol_connect(FileDescription& description, ShouldBlock sh
m_role = Role::Connecting;
m_direction = Direction::Outgoing;
evaluate_block_conditions();
if (should_block == ShouldBlock::Yes) {
locker.unlock();
if (Thread::current()->block<Thread::ConnectBlocker>(nullptr, description).was_interrupted())
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::ConnectBlocker>(nullptr, description, unblock_flags).was_interrupted())
return KResult(-EINTR);
locker.lock();
ASSERT(setup_state() == SetupState::Completed);
if (has_error()) {
if (has_error()) { // TODO: check unblock_flags
m_role = Role::None;
return KResult(-ECONNREFUSED);
}

View file

@ -311,7 +311,7 @@ RefPtr<Process> Process::create_user_process(RefPtr<Thread>& first_thread, const
return process;
}
NonnullRefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void *entry_data, u32 affinity)
NonnullRefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data, u32 affinity)
{
auto process = adopt(*new Process(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true));
first_thread->tss().eip = (FlatPtr)entry;
@ -343,6 +343,7 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
, m_cwd(move(cwd))
, m_tty(tty)
, m_ppid(ppid)
, m_wait_block_condition(*this)
{
#ifdef PROCESS_DEBUG
dbg() << "Created new process " << m_name << "(" << m_pid.value() << ")";
@ -365,8 +366,13 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
Process::~Process()
{
ASSERT(!m_next && !m_prev); // should have been reaped
ASSERT(thread_count() == 0); // all threads should have been finalized
{
ScopedSpinLock processses_lock(g_processes_lock);
if (prev() || next())
g_processes->remove(this);
}
}
void Process::dump_regions()
@ -528,38 +534,21 @@ void kgettimeofday(timeval& tv)
tv = kgettimeofday();
}
siginfo_t Process::reap(Process& process)
siginfo_t Process::wait_info()
{
siginfo_t siginfo;
memset(&siginfo, 0, sizeof(siginfo));
siginfo.si_signo = SIGCHLD;
siginfo.si_pid = process.pid().value();
siginfo.si_uid = process.uid();
siginfo.si_pid = pid().value();
siginfo.si_uid = uid();
if (process.m_termination_signal) {
siginfo.si_status = process.m_termination_signal;
if (m_termination_signal) {
siginfo.si_status = m_termination_signal;
siginfo.si_code = CLD_KILLED;
} else {
siginfo.si_status = process.m_termination_status;
siginfo.si_status = m_termination_status;
siginfo.si_code = CLD_EXITED;
}
ASSERT(g_processes_lock.is_locked());
if (!!process.ppid()) {
auto parent = Process::from_pid(process.ppid());
if (parent) {
parent->m_ticks_in_user_for_dead_children += process.m_ticks_in_user + process.m_ticks_in_user_for_dead_children;
parent->m_ticks_in_kernel_for_dead_children += process.m_ticks_in_kernel + process.m_ticks_in_kernel_for_dead_children;
}
}
#ifdef PROCESS_DEBUG
dbg() << "Reaping process " << process;
#endif
ASSERT(process.is_dead());
g_processes->remove(&process);
process.unref();
return siginfo;
}
@ -587,7 +576,7 @@ KResultOr<String> Process::get_syscall_path_argument(const Syscall::StringArgume
return get_syscall_path_argument(path.characters, path.length);
}
void Process::finalize()
void Process::finalize(Thread& last_thread)
{
ASSERT(Thread::current() == g_finalizer);
#ifdef PROCESS_DEBUG
@ -614,26 +603,46 @@ void Process::finalize()
m_root_directory = nullptr;
m_root_directory_relative_to_global_root = nullptr;
m_dead = true;
disown_all_shared_buffers();
{
InterruptDisabler disabler;
// FIXME: PID/TID BUG
if (auto parent_thread = Thread::from_tid(m_ppid.value())) {
if (parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) {
// NOTE: If the parent doesn't care about this process, let it go.
m_ppid = 0;
} else {
if (!(parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT))
parent_thread->send_signal(SIGCHLD, this);
}
}
{
ScopedSpinLock processses_lock(g_processes_lock);
if (!!ppid()) {
if (auto parent = Process::from_pid(ppid())) {
parent->m_ticks_in_user_for_dead_children += m_ticks_in_user + m_ticks_in_user_for_dead_children;
parent->m_ticks_in_kernel_for_dead_children += m_ticks_in_kernel + m_ticks_in_kernel_for_dead_children;
}
}
}
unblock_waiters(last_thread, Thread::WaitBlocker::UnblockFlags::Terminated);
{
ScopedSpinLock lock(m_lock);
m_regions.clear();
}
m_dead = true;
ASSERT(ref_count() > 0);
// WaitBlockCondition::finalize will be in charge of dropping the last
// reference if there are still waiters around, or whenever the last
// waitable states are consumed. Unless there is no parent around
// anymore, in which case we'll just drop it right away.
m_wait_block_condition.finalize();
}
void Process::unblock_waiters(Thread& thread, Thread::WaitBlocker::UnblockFlags flags, u8 signal)
{
if (auto parent = Process::from_pid(ppid()))
parent->m_wait_block_condition.unblock(thread, flags, signal);
}
void Process::die()
@ -746,10 +755,8 @@ void Process::terminate_due_to_signal(u8 signal)
KResult Process::send_signal(u8 signal, Process* sender)
{
InterruptDisabler disabler;
Thread* receiver_thread;
// Try to send it to the "obvious" main thread:
receiver_thread = Thread::from_tid(m_pid.value());
auto receiver_thread = Thread::from_tid(m_pid.value());
// If the main thread has died, there may still be other threads:
if (!receiver_thread) {
// The first one should be good enough.

View file

@ -130,11 +130,13 @@ public:
static NonnullRefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT)
{
auto* entry_func = new EntryFunction(move(entry));
return create_kernel_process(first_thread, move(name), [](void* data) {
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
(*func)();
delete func;
}, entry_func, affinity);
return create_kernel_process(
first_thread, move(name), [](void* data) {
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
(*func)();
delete func;
},
entry_func, affinity);
}
static NonnullRefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT);
@ -152,7 +154,8 @@ public:
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
(*func)();
delete func;
}, priority, name, affinity, joinable);
},
priority, name, affinity, joinable);
}
RefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
@ -206,7 +209,7 @@ public:
void for_each_thread(Callback) const;
void die();
void finalize();
void finalize(Thread&);
ALWAYS_INLINE SpinLock<u32>& get_lock() const { return m_lock; }
@ -366,7 +369,8 @@ public:
static void initialize();
[[noreturn]] void crash(int signal, u32 eip, bool out_of_memory = false);
[[nodiscard]] static siginfo_t reap(Process&);
static void reap(Process&);
[[nodiscard]] siginfo_t wait_info();
const TTY* tty() const { return m_tty; }
void set_tty(TTY*);
@ -426,7 +430,7 @@ public:
u16 thread_count() const
{
return m_thread_count.load(AK::MemoryOrder::memory_order_consume);
return m_thread_count.load(AK::MemoryOrder::memory_order_relaxed);
}
Lock& big_lock()
@ -484,6 +488,9 @@ public:
KResultOr<u32> peek_user_data(Userspace<const u32*> address);
KResult poke_user_data(Userspace<u32*> address, u32 data);
void unblock_waiters(Thread&, Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
Thread::WaitBlockCondition& wait_block_condition() { return m_wait_block_condition; }
private:
friend class MemoryManager;
friend class Scheduler;
@ -625,6 +632,8 @@ private:
// If it is set to true, the process will stop at the next execve syscall
// and wait for a tracer to attach.
bool m_wait_for_tracer_at_next_execve { false };
Thread::WaitBlockCondition m_wait_block_condition;
};
extern InlineLinkedList<Process>* g_processes;
@ -710,7 +719,6 @@ inline bool InodeMetadata::may_write(const Process& process) const
return may_write(process.euid(), process.egid(), process.extra_gids());
}
inline bool InodeMetadata::may_execute(const Process& process) const
{
return may_execute(process.euid(), process.egid(), process.extra_gids());

View file

@ -50,13 +50,15 @@ NonnullRefPtr<ProcessGroup> ProcessGroup::create(ProcessGroupID pgid)
NonnullRefPtr<ProcessGroup> ProcessGroup::find_or_create(ProcessGroupID pgid)
{
ScopedSpinLock lock(g_process_groups_lock);
if (auto existing = from_pgid(pgid))
return *existing;
return existing.release_nonnull();
return create(pgid);
}
ProcessGroup* ProcessGroup::from_pgid(ProcessGroupID pgid)
RefPtr<ProcessGroup> ProcessGroup::from_pgid(ProcessGroupID pgid)
{
ScopedSpinLock lock(g_process_groups_lock);

View file

@ -50,7 +50,7 @@ public:
static NonnullRefPtr<ProcessGroup> create(ProcessGroupID);
static NonnullRefPtr<ProcessGroup> find_or_create(ProcessGroupID);
static ProcessGroup* from_pgid(ProcessGroupID);
static RefPtr<ProcessGroup> from_pgid(ProcessGroupID);
const ProcessGroupID& pgid() const { return m_pgid; }

View file

@ -35,6 +35,7 @@ namespace Ptrace {
KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, Process& caller)
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
if (params.request == PT_TRACE_ME) {
if (Thread::current()->tracer())
return KResult(-EBUSY);
@ -50,11 +51,7 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
if (params.tid == caller.pid().value())
return KResult(-EINVAL);
Thread* peer = nullptr;
{
InterruptDisabler disabler;
peer = Thread::from_tid(params.tid);
}
auto peer = Thread::from_tid(params.tid);
if (!peer)
return KResult(-ESRCH);
@ -67,10 +64,9 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
return KResult(-EBUSY);
}
peer->start_tracing_from(caller.pid());
ScopedSpinLock lock(peer->get_lock());
if (peer->state() != Thread::State::Stopped) {
ScopedSpinLock lock(peer->get_lock());
if (!(peer->has_blocker() && peer->blocker().is_reason_signal()))
peer->send_signal(SIGSTOP, &caller);
peer->send_signal(SIGSTOP, &caller);
}
return KSuccess;
}
@ -104,7 +100,6 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
case PT_GETREGS: {
if (!tracer->has_regs())
return KResult(-EINVAL);
auto* regs = reinterpret_cast<PtraceRegisters*>(params.addr);
if (!copy_to_user(regs, &tracer->regs()))
return KResult(-EFAULT);

View file

@ -28,8 +28,6 @@
#include <AK/ScopeGuard.h>
#include <AK/TemporaryChange.h>
#include <AK/Time.h>
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/Net/Socket.h>
#include <Kernel/Process.h>
#include <Kernel/Profiling.h>
#include <Kernel/RTC.h>
@ -77,292 +75,6 @@ WaitQueue* g_finalizer_wait_queue;
Atomic<bool> g_finalizer_has_work { false };
static Process* s_colonel_process;
Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value)
: m_joinee(&joinee)
, m_joinee_exit_value(joinee_exit_value)
{
auto* current_thread = Thread::current();
// We need to hold our lock to avoid a race where try_join succeeds
// but the joinee is joining immediately
ScopedSpinLock lock(m_lock);
try_join_result = joinee.try_join(*current_thread);
m_join_error = try_join_result.is_error();
}
void Thread::JoinBlocker::was_unblocked()
{
ScopedSpinLock lock(m_lock);
if (!m_join_error && m_joinee) {
// If the joinee hasn't exited yet, remove ourselves now
ASSERT(m_joinee != Thread::current());
m_joinee->join_done();
m_joinee = nullptr;
}
}
bool Thread::JoinBlocker::should_unblock(Thread&)
{
// We need to acquire our lock as the joinee could call joinee_exited
// at any moment
ScopedSpinLock lock(m_lock);
if (m_join_error) {
// Thread::block calls should_unblock before actually blocking.
// If detected that we can't really block due to an error, we'll
// return true here, which will cause Thread::block to return
// with BlockResult::NotBlocked. Technically, because m_join_error
// will only be set in the constructor, we don't need any lock
// to check for it, but at the same time there should not be
// any contention, either...
return true;
}
return m_joinee == nullptr;
}
void Thread::JoinBlocker::joinee_exited(void* value)
{
ScopedSpinLock lock(m_lock);
if (!m_joinee) {
// m_joinee can be nullptr if the joiner timed out and the
// joinee waits on m_lock while the joiner holds it but has
// not yet called join_done.
return;
}
m_joinee_exit_value = value;
m_joinee = nullptr;
set_interrupted_by_death();
}
Thread::FileDescriptionBlocker::FileDescriptionBlocker(const FileDescription& description)
: m_blocked_description(description)
{
}
const FileDescription& Thread::FileDescriptionBlocker::blocked_description() const
{
return m_blocked_description;
}
Thread::AcceptBlocker::AcceptBlocker(const FileDescription& description)
: FileDescriptionBlocker(description)
{
}
bool Thread::AcceptBlocker::should_unblock(Thread&)
{
auto& socket = *blocked_description().socket();
return socket.can_accept();
}
Thread::ConnectBlocker::ConnectBlocker(const FileDescription& description)
: FileDescriptionBlocker(description)
{
}
bool Thread::ConnectBlocker::should_unblock(Thread&)
{
auto& socket = *blocked_description().socket();
return socket.setup_state() == Socket::SetupState::Completed;
}
Thread::WriteBlocker::WriteBlocker(const FileDescription& description)
: FileDescriptionBlocker(description)
{
}
auto Thread::WriteBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_send_timeout()) {
m_timeout = BlockTimeout(false, &socket.send_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
}
bool Thread::WriteBlocker::should_unblock(Thread&)
{
return blocked_description().can_write();
}
Thread::ReadBlocker::ReadBlocker(const FileDescription& description)
: FileDescriptionBlocker(description)
{
}
auto Thread::ReadBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_receive_timeout()) {
m_timeout = BlockTimeout(false, &socket.receive_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
}
bool Thread::ReadBlocker::should_unblock(Thread&)
{
return blocked_description().can_read();
}
Thread::ConditionBlocker::ConditionBlocker(const char* state_string, Function<bool()>&& condition)
: m_block_until_condition(move(condition))
, m_state_string(state_string)
{
ASSERT(m_block_until_condition);
}
bool Thread::ConditionBlocker::should_unblock(Thread&)
{
return m_block_until_condition();
}
Thread::SleepBlocker::SleepBlocker(const BlockTimeout& deadline, timespec* remaining)
: m_deadline(deadline)
, m_remaining(remaining)
{
}
auto Thread::SleepBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
ASSERT(timeout.is_infinite()); // A timeout should not be provided
// To simplify things only use the sleep deadline.
return m_deadline;
}
void Thread::SleepBlocker::was_unblocked()
{
if (!m_remaining)
return;
auto time_now = TimeManagement::the().monotonic_time();
if (time_now < m_deadline.absolute_time())
timespec_sub(m_deadline.absolute_time(), time_now, *m_remaining);
else
*m_remaining = {};
}
Thread::BlockResult Thread::SleepBlocker::block_result(bool did_timeout)
{
auto result = Blocker::block_result(did_timeout);
if (result == Thread::BlockResult::InterruptedByTimeout)
return Thread::BlockResult::WokeNormally;
return result;
}
Thread::SelectBlocker::SelectBlocker(const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds)
: m_select_read_fds(read_fds)
, m_select_write_fds(write_fds)
, m_select_exceptional_fds(except_fds)
{
}
bool Thread::SelectBlocker::should_unblock(Thread& thread)
{
auto& process = thread.process();
for (int fd : m_select_read_fds) {
if (!process.m_fds[fd])
continue;
if (process.m_fds[fd].description()->can_read())
return true;
}
for (int fd : m_select_write_fds) {
if (!process.m_fds[fd])
continue;
if (process.m_fds[fd].description()->can_write())
return true;
}
return false;
}
Thread::WaitBlocker::WaitBlocker(int wait_options, ProcessID& waitee_pid)
: m_wait_options(wait_options)
, m_waitee_pid(waitee_pid)
{
}
bool Thread::WaitBlocker::should_unblock(Thread& thread)
{
bool should_unblock = m_wait_options & WNOHANG;
if (m_waitee_pid != -1) {
auto peer = Process::from_pid(m_waitee_pid);
if (!peer)
return true;
}
thread.process().for_each_child([&](Process& child) {
if (m_waitee_pid != -1 && m_waitee_pid != child.pid())
return IterationDecision::Continue;
bool child_exited = child.is_dead();
bool child_stopped = false;
if (child.thread_count()) {
child.for_each_thread([&](auto& child_thread) {
if (child_thread.state() == Thread::State::Stopped && !child_thread.has_pending_signal(SIGCONT)) {
child_stopped = true;
return IterationDecision::Break;
}
return IterationDecision::Continue;
});
}
bool fits_the_spec = ((m_wait_options & WEXITED) && child_exited)
|| ((m_wait_options & WSTOPPED) && child_stopped);
if (!fits_the_spec)
return IterationDecision::Continue;
m_waitee_pid = child.pid();
should_unblock = true;
return IterationDecision::Break;
});
return should_unblock;
}
Thread::SemiPermanentBlocker::SemiPermanentBlocker(Reason reason)
: m_reason(reason)
{
}
bool Thread::SemiPermanentBlocker::should_unblock(Thread&)
{
// someone else has to unblock us
return false;
}
// Called by the scheduler on threads that are blocked for some reason.
// Make a decision as to whether to unblock them or not.
void Thread::consider_unblock()
{
ScopedSpinLock lock(m_lock);
switch (state()) {
case Thread::Invalid:
case Thread::Runnable:
case Thread::Running:
case Thread::Dead:
case Thread::Stopped:
case Thread::Queued:
case Thread::Dying:
/* don't know, don't care */
return;
case Thread::Blocked: {
ASSERT(m_blocker != nullptr);
if (m_blocker->should_unblock(*this))
unblock();
return;
}
}
}
void Scheduler::start()
{
ASSERT_INTERRUPTS_DISABLED();
@ -425,22 +137,7 @@ bool Scheduler::pick_next()
current_thread->set_state(Thread::Dying);
}
// Check and unblock threads whose wait conditions have been met.
Scheduler::for_each_nonrunnable([&](Thread& thread) {
thread.consider_unblock();
return IterationDecision::Continue;
});
Process::for_each([&](Process& process) {
if (process.is_dead()) {
if (current_thread->process().pid() != process.pid() && (!process.ppid() || !Process::from_pid(process.ppid()))) {
auto name = process.name();
auto pid = process.pid();
auto exit_status = Process::reap(process);
dbg() << "Scheduler[" << Processor::current().id() << "]: Reaped unparented process " << name << "(" << pid.value() << "), exit status: " << exit_status.si_status;
}
return IterationDecision::Continue;
}
if (process.m_alarm_deadline && TimeManagement::the().uptime_ms() > process.m_alarm_deadline) {
process.m_alarm_deadline = 0;
// FIXME: Should we observe this signal somehow?
@ -449,26 +146,6 @@ bool Scheduler::pick_next()
return IterationDecision::Continue;
});
// Dispatch any pending signals.
Thread::for_each_living([&](Thread& thread) -> IterationDecision {
ScopedSpinLock lock(thread.get_lock());
if (!thread.has_unmasked_pending_signals())
return IterationDecision::Continue;
// NOTE: dispatch_one_pending_signal() may unblock the process.
bool was_blocked = thread.is_blocked();
if (thread.dispatch_one_pending_signal() == ShouldUnblockThread::No)
return IterationDecision::Continue;
if (was_blocked) {
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]:Unblock " << thread << " due to signal";
#endif
ASSERT(thread.m_blocker != nullptr);
thread.m_blocker->set_interrupted_by_signal();
thread.unblock();
}
return IterationDecision::Continue;
});
#ifdef SCHEDULER_RUNNABLE_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Non-runnables:";
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision {
@ -656,6 +333,14 @@ bool Scheduler::context_switch(Thread* thread)
thread->did_schedule();
auto from_thread = Thread::current();
// Check if we have any signals we should deliver (even if we don't
// end up switching to another thread)
if (from_thread && from_thread->state() == Thread::Running && from_thread->pending_signals_for_state()) {
ScopedSpinLock lock(from_thread->get_lock());
from_thread->dispatch_one_pending_signal();
}
if (from_thread == thread)
return false;

View file

@ -93,6 +93,13 @@ int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
if (function == SC_exit || function == SC_exit_thread) {
// These syscalls need special handling since they never return to the caller.
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
regs.eax = 0;
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
}
cli();
if (function == SC_exit)
process.sys$exit((int)arg1);
@ -131,9 +138,9 @@ void syscall_handler(TrapFrame* trap)
auto& regs = *trap->regs;
auto current_thread = Thread::current();
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
current_thread->tracer()->set_trace_syscalls(false);
current_thread->tracer_trap(regs);
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
}
// Make sure SMAP protection is enabled on syscall entry.
@ -176,11 +183,11 @@ void syscall_handler(TrapFrame* trap)
u32 arg1 = regs.edx;
u32 arg2 = regs.ecx;
u32 arg3 = regs.ebx;
regs.eax = (u32)Syscall::handle(regs, function, arg1, arg2, arg3);
regs.eax = Syscall::handle(regs, function, arg1, arg2, arg3);
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
current_thread->tracer()->set_trace_syscalls(false);
current_thread->tracer_trap(regs);
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
}
process.big_lock().unlock();
@ -188,8 +195,7 @@ void syscall_handler(TrapFrame* trap)
// Check if we're supposed to return to userspace or just die.
current_thread->die_if_needed();
if (current_thread->has_unmasked_pending_signals())
(void)current_thread->block<Thread::SemiPermanentBlocker>(nullptr, Thread::SemiPermanentBlocker::Reason::Signal);
ASSERT(!g_scheduler_lock.own_lock());
}
}

View file

@ -568,9 +568,6 @@ int Process::sys$execve(Userspace<const Syscall::SC_execve_params*> user_params)
if (params.arguments.length > ARG_MAX || params.environment.length > ARG_MAX)
return -E2BIG;
if (wait_for_tracer_at_next_execve())
Thread::current()->send_urgent_signal_to_self(SIGSTOP);
String path;
{
auto path_arg = get_syscall_path_argument(params.path);

View file

@ -111,10 +111,8 @@ KResult Process::do_killself(int signal)
return KSuccess;
auto current_thread = Thread::current();
if (!current_thread->should_ignore_signal(signal)) {
if (!current_thread->should_ignore_signal(signal))
current_thread->send_signal(signal, this);
(void)current_thread->block<Thread::SemiPermanentBlocker>(nullptr, Thread::SemiPermanentBlocker::Reason::Signal);
}
return KSuccess;
}

View file

@ -50,10 +50,12 @@ ssize_t Process::sys$read(int fd, Userspace<u8*> buffer, ssize_t size)
return -EISDIR;
if (description->is_blocking()) {
if (!description->can_read()) {
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, *description).was_interrupted())
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::ReadBlocker>(nullptr, *description, unblock_flags).was_interrupted())
return -EINTR;
if (!description->can_read())
if (!((u32)unblock_flags & (u32)Thread::FileBlocker::BlockFlags::Read))
return -EAGAIN;
// TODO: handle exceptions in unblock_flags
}
}
auto user_buffer = UserOrKernelBuffer::for_user_buffer(buffer, size);

View file

@ -68,66 +68,77 @@ int Process::sys$select(const Syscall::SC_select_params* user_params)
current_thread->update_signal_mask(previous_signal_mask);
});
Thread::SelectBlocker::FDVector rfds;
Thread::SelectBlocker::FDVector wfds;
Thread::SelectBlocker::FDVector efds;
fd_set fds_read, fds_write, fds_except;
if (params.readfds && !copy_from_user(&fds_read, params.readfds))
return -EFAULT;
if (params.writefds && !copy_from_user(&fds_write, params.writefds))
return -EFAULT;
if (params.exceptfds && !copy_from_user(&fds_except, params.exceptfds))
return -EFAULT;
auto transfer_fds = [&](auto* fds_unsafe, auto& vector) -> int {
vector.clear_with_capacity();
if (!fds_unsafe)
return 0;
fd_set fds;
if (!copy_from_user(&fds, fds_unsafe))
return -EFAULT;
for (int fd = 0; fd < params.nfds; ++fd) {
if (FD_ISSET(fd, &fds)) {
if (!file_description(fd)) {
dbg() << "sys$select: Bad fd number " << fd;
return -EBADF;
}
vector.append(fd);
}
Thread::SelectBlocker::FDVector fds_info;
Vector<int> fds;
for (int fd = 0; fd < params.nfds; fd++) {
u32 block_flags = (u32)Thread::FileBlocker::BlockFlags::None;
if (params.readfds && FD_ISSET(fd, &fds_read))
block_flags |= (u32)Thread::FileBlocker::BlockFlags::Read;
if (params.writefds && FD_ISSET(fd, &fds_write))
block_flags |= (u32)Thread::FileBlocker::BlockFlags::Write;
if (params.exceptfds && FD_ISSET(fd, &fds_except))
block_flags |= (u32)Thread::FileBlocker::BlockFlags::Exception;
if (block_flags == (u32)Thread::FileBlocker::BlockFlags::None)
continue;
auto description = file_description(fd);
if (!description) {
dbg() << "sys$select: Bad fd number " << fd;
return -EBADF;
}
return 0;
};
if (int error = transfer_fds(params.writefds, wfds))
return error;
if (int error = transfer_fds(params.readfds, rfds))
return error;
if (int error = transfer_fds(params.exceptfds, efds))
return error;
#if defined(DEBUG_IO) || defined(DEBUG_POLL_SELECT)
dbg() << "selecting on (read:" << rfds.size() << ", write:" << wfds.size() << "), timeout=" << params.timeout;
#endif
if (timeout.should_block()) {
if (current_thread->block<Thread::SelectBlocker>(timeout, rfds, wfds, efds).was_interrupted())
return -EINTR;
fds_info.append({ description.release_nonnull(), (Thread::FileBlocker::BlockFlags)block_flags });
fds.append(fd);
}
int marked_fd_count = 0;
auto mark_fds = [&](auto* fds_unsafe, auto& vector, auto should_mark) {
if (!fds_unsafe)
return 0;
fd_set fds;
FD_ZERO(&fds);
for (int fd : vector) {
if (auto description = file_description(fd); description && should_mark(*description)) {
FD_SET(fd, &fds);
++marked_fd_count;
}
}
if (!copy_to_user(fds_unsafe, &fds))
return -EFAULT;
return 0;
};
if (int error = mark_fds(params.readfds, rfds, [](auto& description) { return description.can_read(); }))
return error;
if (int error = mark_fds(params.writefds, wfds, [](auto& description) { return description.can_write(); }))
return error;
// FIXME: We should also mark exceptfds as appropriate.
#if defined(DEBUG_IO) || defined(DEBUG_POLL_SELECT)
dbg() << "selecting on " << fds_info.size() << " fds, timeout=" << params.timeout;
#endif
if (current_thread->block<Thread::SelectBlocker>(timeout, fds_info).was_interrupted()) {
dbg() << "select was interrupted";
return -EINTR;
}
if (params.readfds)
FD_ZERO(&fds_read);
if (params.writefds)
FD_ZERO(&fds_write);
if (params.exceptfds)
FD_ZERO(&fds_except);
int marked_fd_count = 0;
for (size_t i = 0; i < fds_info.size(); i++) {
auto& fd_entry = fds_info[i];
if (fd_entry.unblocked_flags == Thread::FileBlocker::BlockFlags::None)
continue;
if (params.readfds && ((u32)fd_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Read)) {
FD_SET(fds[i], &fds_read);
marked_fd_count++;
}
if (params.writefds && ((u32)fd_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Write)) {
FD_SET(fds[i], &fds_write);
marked_fd_count++;
}
if (params.exceptfds && ((u32)fd_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Exception)) {
FD_SET(fds[i], &fds_except);
marked_fd_count++;
}
}
if (params.readfds && !copy_to_user(params.readfds, &fds_read))
return -EFAULT;
if (params.writefds && !copy_to_user(params.writefds, &fds_write))
return -EFAULT;
if (params.exceptfds && !copy_to_user(params.exceptfds, &fds_except))
return -EFAULT;
return marked_fd_count;
}
@ -165,15 +176,22 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
return -EFAULT;
}
Thread::SelectBlocker::FDVector rfds;
Thread::SelectBlocker::FDVector wfds;
for (unsigned i = 0; i < params.nfds; ++i) {
Thread::SelectBlocker::FDVector fds_info;
for (size_t i = 0; i < params.nfds; i++) {
auto& pfd = fds_copy[i];
auto description = file_description(pfd.fd);
if (!description) {
dbg() << "sys$poll: Bad fd number " << pfd.fd;
return -EBADF;
}
u32 block_flags = (u32)Thread::FileBlocker::BlockFlags::Exception; // always want POLLERR, POLLHUP, POLLNVAL
if (pfd.events & POLLIN)
rfds.append(pfd.fd);
block_flags |= (u32)Thread::FileBlocker::BlockFlags::Read;
if (pfd.events & POLLOUT)
wfds.append(pfd.fd);
block_flags |= (u32)Thread::FileBlocker::BlockFlags::Write;
if (pfd.events & POLLPRI)
block_flags |= (u32)Thread::FileBlocker::BlockFlags::ReadPriority;
fds_info.append({ description.release_nonnull(), (Thread::FileBlocker::BlockFlags)block_flags });
}
auto current_thread = Thread::current();
@ -187,31 +205,45 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
});
#if defined(DEBUG_IO) || defined(DEBUG_POLL_SELECT)
dbg() << "polling on (read:" << rfds.size() << ", write:" << wfds.size() << ")";
dbg() << "polling on " << fds_info.size() << " fds, timeout=" << params.timeout;
#endif
if (timeout.should_block()) {
if (current_thread->block<Thread::SelectBlocker>(timeout, rfds, wfds, Thread::SelectBlocker::FDVector()).was_interrupted())
return -EINTR;
}
if (current_thread->block<Thread::SelectBlocker>(timeout, fds_info).was_interrupted())
return -EINTR;
int fds_with_revents = 0;
for (unsigned i = 0; i < params.nfds; ++i) {
auto& pfd = fds_copy[i];
auto description = file_description(pfd.fd);
if (!description) {
pfd.revents = POLLNVAL;
} else {
pfd.revents = 0;
if (pfd.events & POLLIN && description->can_read())
pfd.revents |= POLLIN;
if (pfd.events & POLLOUT && description->can_write())
pfd.revents |= POLLOUT;
auto& fds_entry = fds_info[i];
if (pfd.revents)
++fds_with_revents;
pfd.revents = 0;
if (fds_entry.unblocked_flags == Thread::FileBlocker::BlockFlags::None)
continue;
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Exception) {
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::ReadHangUp)
pfd.revents |= POLLRDHUP;
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::WriteError)
pfd.revents |= POLLERR;
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::WriteHangUp)
pfd.revents |= POLLNVAL;
} else {
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Read) {
ASSERT(pfd.events & POLLIN);
pfd.revents |= POLLIN;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::ReadPriority) {
ASSERT(pfd.events & POLLPRI);
pfd.revents |= POLLPRI;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Write) {
ASSERT(pfd.events & POLLOUT);
pfd.revents |= POLLOUT;
}
}
if (pfd.revents)
fds_with_revents++;
}
if (params.nfds > 0 && !copy_to_user(&params.fds[0], &fds_copy[0], params.nfds * sizeof(pollfd)))

View file

@ -112,7 +112,8 @@ int Process::sys$accept(int accepting_socket_fd, Userspace<sockaddr*> user_addre
if (!socket.can_accept()) {
if (accepting_socket_description->is_blocking()) {
if (Thread::current()->block<Thread::AcceptBlocker>(nullptr, *accepting_socket_description).was_interrupted())
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::AcceptBlocker>(nullptr, *accepting_socket_description, unblock_flags).was_interrupted())
return -EINTR;
} else {
return -EAGAIN;

View file

@ -32,74 +32,20 @@ namespace Kernel {
KResultOr<siginfo_t> Process::do_waitid(idtype_t idtype, int id, int options)
{
if (idtype == P_PID) {
ScopedSpinLock lock(g_processes_lock);
if (idtype == P_PID && !Process::from_pid(id))
return KResult(-ECHILD);
// FIXME: Race: After 'lock' releases, the 'id' process might vanish.
// If that is not a problem, why check for it?
// If it is a problem, let's fix it! (Eventually.)
}
ProcessID waitee_pid { 0 };
// FIXME: WaitBlocker should support idtype/id specs directly.
if (idtype == P_ALL) {
waitee_pid = -1;
} else if (idtype == P_PID) {
waitee_pid = id;
} else {
// FIXME: Implement other PID specs.
switch (idtype) {
case P_ALL:
case P_PID:
case P_PGID:
break;
default:
return KResult(-EINVAL);
}
if (Thread::current()->block<Thread::WaitBlocker>(nullptr, options, waitee_pid).was_interrupted())
KResultOr<siginfo_t> result = KResult(KSuccess);
if (Thread::current()->block<Thread::WaitBlocker>(nullptr, options, idtype, id, result).was_interrupted())
return KResult(-EINTR);
ScopedSpinLock lock(g_processes_lock);
// NOTE: If waitee was -1, m_waitee_pid will have been filled in by the scheduler.
auto waitee_process = Process::from_pid(waitee_pid);
if (!waitee_process)
return KResult(-ECHILD);
ASSERT(waitee_process);
if (waitee_process->is_dead()) {
return reap(*waitee_process);
} else {
// FIXME: PID/TID BUG
// Make sure to hold the scheduler lock so that we operate on a consistent state
ScopedSpinLock scheduler_lock(g_scheduler_lock);
auto waitee_thread = Thread::from_tid(waitee_pid.value());
if (!waitee_thread)
return KResult(-ECHILD);
ASSERT((options & WNOHANG) || waitee_thread->state() == Thread::State::Stopped);
siginfo_t siginfo;
memset(&siginfo, 0, sizeof(siginfo));
siginfo.si_signo = SIGCHLD;
siginfo.si_pid = waitee_process->pid().value();
siginfo.si_uid = waitee_process->uid();
switch (waitee_thread->state()) {
case Thread::State::Stopped:
siginfo.si_code = CLD_STOPPED;
break;
case Thread::State::Running:
case Thread::State::Runnable:
case Thread::State::Blocked:
case Thread::State::Dying:
case Thread::State::Dead:
case Thread::State::Queued:
siginfo.si_code = CLD_CONTINUED;
break;
default:
ASSERT_NOT_REACHED();
break;
}
siginfo.si_status = waitee_thread->m_stop_signal;
return siginfo;
}
ASSERT(!result.is_error() || (options & WNOHANG) || result.error() != KSuccess);
return result;
}
pid_t Process::sys$waitid(Userspace<const Syscall::SC_waitid_params*> user_params)

View file

@ -96,10 +96,12 @@ ssize_t Process::do_write(FileDescription& description, const UserOrKernelBuffer
ASSERT(total_nwritten > 0);
return total_nwritten;
}
if (Thread::current()->block<Thread::WriteBlocker>(nullptr, description).was_interrupted()) {
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
if (Thread::current()->block<Thread::WriteBlocker>(nullptr, description, unblock_flags).was_interrupted()) {
if (total_nwritten == 0)
return -EINTR;
}
// TODO: handle exceptions in unblock_flags
}
auto nwritten_or_error = description.write(data.offset(total_nwritten), data_size - total_nwritten);
if (nwritten_or_error.is_error()) {

View file

@ -45,6 +45,11 @@ MasterPTY::MasterPTY(unsigned index)
auto process = Process::current();
set_uid(process->uid());
set_gid(process->gid());
m_buffer.set_unblock_callback([this]() {
if (m_slave)
evaluate_block_conditions();
});
}
MasterPTY::~MasterPTY()

View file

@ -71,10 +71,11 @@ void SlavePTY::on_master_write(const UserOrKernelBuffer& buffer, ssize_t size)
{
ssize_t nread = buffer.read_buffered<128>(size, [&](const u8* data, size_t data_size) {
for (size_t i = 0; i < data_size; ++i)
emit(data[i]);
emit(data[i], false);
return (ssize_t)data_size;
});
(void)nread;
if (nread > 0)
evaluate_block_conditions();
}
ssize_t SlavePTY::on_tty_write(const UserOrKernelBuffer& data, ssize_t size)
@ -108,4 +109,9 @@ KResult SlavePTY::close()
return KSuccess;
}
FileBlockCondition& SlavePTY::block_condition()
{
return m_master->block_condition();
}
}

View file

@ -42,6 +42,8 @@ public:
time_t time_of_last_write() const { return m_time_of_last_write; }
virtual FileBlockCondition& block_condition() override;
private:
// ^TTY
virtual String tty_name() const override;

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/ScopeGuard.h>
#include <Kernel/Process.h>
#include <Kernel/TTY/TTY.h>
#include <LibC/errno_numbers.h>
@ -64,6 +65,7 @@ KResultOr<size_t> TTY::read(FileDescription&, size_t, UserOrKernelBuffer& buffer
size = m_input_buffer.size();
ssize_t nwritten;
bool need_evaluate_block_conditions = false;
if (in_canonical_mode()) {
nwritten = buffer.write_buffered<512>(size, [&](u8* data, size_t data_size) {
size_t i = 0;
@ -73,6 +75,7 @@ KResultOr<size_t> TTY::read(FileDescription&, size_t, UserOrKernelBuffer& buffer
//Here we handle a ^D line, so we don't add the
//character to the output.
m_available_lines--;
need_evaluate_block_conditions = true;
break;
} else if (ch == '\n' || is_eol(ch)) {
data[i] = ch;
@ -93,6 +96,8 @@ KResultOr<size_t> TTY::read(FileDescription&, size_t, UserOrKernelBuffer& buffer
}
if (nwritten < 0)
return KResult(nwritten);
if (nwritten > 0 || need_evaluate_block_conditions)
evaluate_block_conditions();
return (size_t)nwritten;
}
@ -145,7 +150,7 @@ bool TTY::is_werase(u8 ch) const
return ch == m_termios.c_cc[VWERASE];
}
void TTY::emit(u8 ch)
void TTY::emit(u8 ch, bool do_evaluate_block_conditions)
{
if (should_generate_signals()) {
if (ch == m_termios.c_cc[VINFO]) {
@ -173,6 +178,11 @@ void TTY::emit(u8 ch)
}
}
ScopeGuard guard([&]() {
if (do_evaluate_block_conditions)
evaluate_block_conditions();
});
if (in_canonical_mode()) {
if (is_eof(ch)) {
m_available_lines++;
@ -218,6 +228,8 @@ void TTY::do_backspace()
echo(8);
echo(' ');
echo(8);
evaluate_block_conditions();
}
}
@ -231,6 +243,7 @@ void TTY::erase_word()
//Note: if we have leading whitespace before the word
//we want to delete we have to also delete that.
bool first_char = false;
bool did_dequeue = false;
while (can_do_backspace()) {
u8 ch = m_input_buffer.last();
if (ch == ' ' && first_char)
@ -238,16 +251,23 @@ void TTY::erase_word()
if (ch != ' ')
first_char = true;
m_input_buffer.dequeue_end();
did_dequeue = true;
erase_character();
}
if (did_dequeue)
evaluate_block_conditions();
}
void TTY::kill_line()
{
bool did_dequeue = false;
while (can_do_backspace()) {
m_input_buffer.dequeue_end();
did_dequeue = true;
erase_character();
}
if (did_dequeue)
evaluate_block_conditions();
}
void TTY::erase_character()
@ -277,6 +297,7 @@ void TTY::flush_input()
{
m_available_lines = 0;
m_input_buffer.clear();
evaluate_block_conditions();
}
void TTY::set_termios(const termios& t)
@ -330,7 +351,7 @@ int TTY::ioctl(FileDescription&, unsigned request, FlatPtr arg)
return -EPERM;
if (process && pgid != process->pgid())
return -EPERM;
m_pg = *process_group;
m_pg = process_group;
if (process) {
if (auto parent = Process::from_pid(process->ppid())) {

View file

@ -72,7 +72,7 @@ protected:
void set_size(unsigned short columns, unsigned short rows);
TTY(unsigned major, unsigned minor);
void emit(u8);
void emit(u8, bool do_evaluate_block_conditions = false);
virtual void echo(u8) = 0;
bool can_do_backspace() const;

View file

@ -32,16 +32,17 @@ namespace Kernel {
void FinalizerTask::spawn()
{
RefPtr<Thread> finalizer_thread;
Process::create_kernel_process(finalizer_thread, "FinalizerTask", [](void*) {
Thread::current()->set_priority(THREAD_PRIORITY_LOW);
for (;;) {
Thread::current()->wait_on(*g_finalizer_wait_queue, "FinalizerTask");
Process::create_kernel_process(
finalizer_thread, "FinalizerTask", [](void*) {
Thread::current()->set_priority(THREAD_PRIORITY_LOW);
for (;;) {
Thread::current()->wait_on(*g_finalizer_wait_queue, "FinalizerTask");
bool expected = true;
if (g_finalizer_has_work.compare_exchange_strong(expected, false, AK::MemoryOrder::memory_order_acq_rel))
Thread::finalize_dying_threads();
}
}, nullptr);
if (g_finalizer_has_work.exchange(false, AK::MemoryOrder::memory_order_acq_rel) == true)
Thread::finalize_dying_threads();
}
},
nullptr);
g_finalizer = finalizer_thread;
}

View file

@ -51,7 +51,7 @@ Thread::Thread(NonnullRefPtr<Process> process)
: m_process(move(process))
, m_name(m_process->name())
{
if (m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel) == 0) {
if (m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0) {
// First thread gets TID == PID
m_tid = m_process->pid().value();
} else {
@ -125,14 +125,27 @@ Thread::~Thread()
ScopedSpinLock lock(g_scheduler_lock);
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
}
ASSERT(!m_joiner);
}
void Thread::unblock()
void Thread::unblock_from_blocker(Blocker& blocker)
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
if (m_blocker != &blocker)
return;
if (!is_stopped())
unblock();
}
void Thread::unblock(u8 signal)
{
ASSERT(g_scheduler_lock.own_lock());
ASSERT(m_lock.own_lock());
if (m_state != Thread::Blocked)
return;
ASSERT(m_blocker);
if (signal != 0)
m_blocker->set_interrupted_by_signal(signal);
m_blocker = nullptr;
if (Thread::current() == this) {
set_state(Thread::Running);
@ -218,7 +231,7 @@ void Thread::die_if_needed()
void Thread::exit(void* exit_value)
{
ASSERT(Thread::current() == this);
m_exit_value = exit_value;
m_join_condition.thread_did_exit(exit_value);
set_should_die();
unlock_process_if_locked();
die_if_needed();
@ -226,6 +239,7 @@ void Thread::exit(void* exit_value)
void Thread::yield_without_holding_big_lock()
{
ASSERT(!g_scheduler_lock.own_lock());
bool did_unlock = unlock_process_if_locked();
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
@ -310,11 +324,7 @@ void Thread::finalize()
dbg() << "Finalizing thread " << *this;
#endif
set_state(Thread::State::Dead);
if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) {
// Notify joiner that we exited
static_cast<JoinBlocker*>(joiner->m_blocker)->joinee_exited(m_exit_value);
}
m_join_condition.thread_finalizing();
}
if (m_dump_backtrace_on_finalization)
@ -323,9 +333,12 @@ void Thread::finalize()
kfree_aligned(m_fpu_state);
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
if (thread_cnt_before == 1)
process().finalize();
process().finalize(*this);
else
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Terminated);
}
void Thread::finalize_dying_threads()
@ -363,19 +376,26 @@ bool Thread::tick()
bool Thread::has_pending_signal(u8 signal) const
{
ScopedSpinLock lock(g_scheduler_lock);
return m_pending_signals & (1 << (signal - 1));
return pending_signals_for_state() & (1 << (signal - 1));
}
u32 Thread::pending_signals() const
{
ScopedSpinLock lock(g_scheduler_lock);
return m_pending_signals;
return pending_signals_for_state();
}
u32 Thread::pending_signals_for_state() const
{
ASSERT(g_scheduler_lock.own_lock());
constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask;
}
void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
{
ASSERT(signal < 32);
ScopedSpinLock lock(g_scheduler_lock);
ScopedSpinLock scheduler_lock(g_scheduler_lock);
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if (should_ignore_signal(signal)) {
@ -393,7 +413,15 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
#endif
m_pending_signals |= 1 << (signal - 1);
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
ScopedSpinLock lock(m_lock);
if (m_state == Stopped) {
if (pending_signals_for_state())
resume_from_stopped();
} else {
unblock(signal);
}
}
u32 Thread::update_signal_mask(u32 signal_mask)
@ -401,7 +429,7 @@ u32 Thread::update_signal_mask(u32 signal_mask)
ScopedSpinLock lock(g_scheduler_lock);
auto previous_signal_mask = m_signal_mask;
m_signal_mask = signal_mask;
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
return previous_signal_mask;
}
@ -419,7 +447,7 @@ u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
m_signal_mask &= ~signal_set;
else
m_signal_mask |= signal_set;
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
return previous_signal_mask;
}
@ -440,15 +468,19 @@ void Thread::clear_signals()
void Thread::send_urgent_signal_to_self(u8 signal)
{
ASSERT(Thread::current() == this);
ScopedSpinLock lock(g_scheduler_lock);
if (dispatch_signal(signal) == ShouldUnblockThread::No)
Scheduler::yield();
DispatchSignalResult result;
{
ScopedSpinLock lock(g_scheduler_lock);
result = dispatch_signal(signal);
}
if (result == DispatchSignalResult::Yield)
yield_without_holding_big_lock();
}
ShouldUnblockThread Thread::dispatch_one_pending_signal()
DispatchSignalResult Thread::dispatch_one_pending_signal()
{
ASSERT(m_lock.own_lock());
u32 signal_candidates = m_pending_signals & ~m_signal_mask;
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
ASSERT(signal_candidates);
u8 signal = 1;
@ -460,6 +492,17 @@ ShouldUnblockThread Thread::dispatch_one_pending_signal()
return dispatch_signal(signal);
}
DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
{
ASSERT(signal != 0);
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
if (!(signal_candidates & (1 << (signal - 1))))
return DispatchSignalResult::Continue;
return dispatch_signal(signal);
}
enum class DefaultSignalAction {
Terminate,
Ignore,
@ -542,22 +585,16 @@ void Thread::resume_from_stopped()
ASSERT(is_stopped());
ASSERT(m_stop_state != State::Invalid);
ASSERT(g_scheduler_lock.own_lock());
set_state(m_stop_state);
m_stop_state = State::Invalid;
// make sure SemiPermanentBlocker is unblocked
if (m_state != Thread::Runnable && m_state != Thread::Running) {
ScopedSpinLock lock(m_lock);
if (m_blocker && m_blocker->is_reason_signal())
unblock();
}
set_state(m_stop_state != Blocked ? m_stop_state : Runnable);
}
ShouldUnblockThread Thread::dispatch_signal(u8 signal)
DispatchSignalResult Thread::dispatch_signal(u8 signal)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.own_lock());
ASSERT(signal > 0 && signal <= 32);
ASSERT(process().is_user_process());
ASSERT(this == Thread::current());
#ifdef SIGNAL_DEBUG
klog() << "signal: dispatch signal " << signal << " to " << *this;
@ -568,7 +605,14 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
return ShouldUnblockThread::No;
return DispatchSignalResult::Deferred;
}
if (is_stopped() && signal != SIGCONT && signal != SIGKILL && signal != SIGTRAP) {
#ifdef SIGNAL_DEBUG
klog() << "signal: " << *this << " is stopped, will handle signal " << signal << " when resumed";
#endif
return DispatchSignalResult::Deferred;
}
auto& action = m_signal_action_data[signal];
@ -579,30 +623,35 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
m_pending_signals &= ~(1 << (signal - 1));
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
if (signal == SIGSTOP) {
auto* thread_tracer = tracer();
if (signal == SIGSTOP || (thread_tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
if (!is_stopped()) {
m_stop_signal = SIGSTOP;
#ifdef SIGNAL_DEBUG
dbg() << "signal: signal " << signal << " stopping thread " << *this;
#endif
m_stop_signal = signal;
set_state(State::Stopped);
}
return ShouldUnblockThread::No;
return DispatchSignalResult::Yield;
}
if (signal == SIGCONT && is_stopped()) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: SIGCONT resuming " << *this << " from stopped";
#endif
resume_from_stopped();
} else {
auto* thread_tracer = tracer();
if (thread_tracer != nullptr) {
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
if (!thread_tracer->has_pending_signal(signal)) {
m_stop_signal = signal;
// make sure SemiPermanentBlocker is unblocked
ScopedSpinLock lock(m_lock);
if (m_blocker && m_blocker->is_reason_signal())
unblock();
#ifdef SIGNAL_DEBUG
dbg() << "signal: " << signal << " stopping " << *this << " for tracer";
#endif
set_state(Stopped);
return ShouldUnblockThread::No;
return DispatchSignalResult::Yield;
}
thread_tracer->unset_signal(signal);
}
@ -614,7 +663,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
case DefaultSignalAction::Stop:
m_stop_signal = signal;
set_state(Stopped);
return ShouldUnblockThread::No;
return DispatchSignalResult::Yield;
case DefaultSignalAction::DumpCore:
process().for_each_thread([](auto& thread) {
thread.set_dump_backtrace_on_finalization();
@ -623,11 +672,11 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
[[fallthrough]];
case DefaultSignalAction::Terminate:
m_process->terminate_due_to_signal(signal);
return ShouldUnblockThread::No;
return DispatchSignalResult::Terminate;
case DefaultSignalAction::Ignore:
ASSERT_NOT_REACHED();
case DefaultSignalAction::Continue:
return ShouldUnblockThread::Yes;
return DispatchSignalResult::Continue;
}
ASSERT_NOT_REACHED();
}
@ -636,7 +685,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
#ifdef SIGNAL_DEBUG
klog() << "signal: " << *this << " ignored signal " << signal;
#endif
return ShouldUnblockThread::Yes;
return DispatchSignalResult::Continue;
}
ProcessPagingScope paging_scope(m_process);
@ -700,7 +749,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
#ifdef SIGNAL_DEBUG
klog() << "signal: Okay, " << *this << " {" << state_string() << "} has been primed with signal handler " << String::format("%w", m_tss.cs) << ":" << String::format("%x", m_tss.eip) << " to deliver " << signal;
#endif
return ShouldUnblockThread::Yes;
return DispatchSignalResult::Continue;
}
void Thread::set_default_signal_dispositions()
@ -827,11 +876,6 @@ void Thread::set_state(State new_state)
}
}
if (new_state == Stopped) {
// We don't want to restore to Running state, only Runnable!
m_stop_state = m_state != Running ? m_state : Runnable;
}
m_state = new_state;
#ifdef THREAD_DEBUG
dbg() << "Set Thread " << *this << " state to " << state_string();
@ -842,7 +886,16 @@ void Thread::set_state(State new_state)
ASSERT(g_scheduler_data->has_thread(*this));
}
if (m_state == Dying) {
if (previous_state == Stopped) {
m_stop_state = State::Invalid;
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Continued);
}
if (m_state == Stopped) {
// We don't want to restore to Running state, only Runnable!
m_stop_state = previous_state != Running ? m_state : Runnable;
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Stopped, m_stop_signal);
} else if (m_state == Dying) {
ASSERT(previous_state != Queued);
if (this != Thread::current() && is_finalizable()) {
// Some other thread set this thread to Dying, notify the
@ -1027,7 +1080,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
}
});
if (!timer) {
dbg() << "wait_on timed out before blocking";
// We timed out already, don't block
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called

View file

@ -47,9 +47,11 @@
namespace Kernel {
enum class ShouldUnblockThread {
No = 0,
Yes
enum class DispatchSignalResult {
Deferred = 0,
Yield,
Terminate,
Continue
};
struct SignalActionData {
@ -102,35 +104,6 @@ public:
u32 effective_priority() const;
KResult try_join(Thread& joiner)
{
if (&joiner == this)
return KResult(-EDEADLK);
ScopedSpinLock lock(m_lock);
if (!m_is_joinable || state() == Dead)
return KResult(-EINVAL);
Thread* expected = nullptr;
if (!m_joiner.compare_exchange_strong(expected, &joiner, AK::memory_order_acq_rel))
return KResult(-EINVAL);
// From this point on the thread is no longer joinable by anyone
// else. It also means that if the join is timed, it becomes
// detached when a timeout happens.
m_is_joinable = false;
return KSuccess;
}
void join_done()
{
// To avoid possible deadlocking, this function must not acquire
// m_lock. This deadlock could occur if the joiner times out
// almost at the same time as this thread, and calls into this
// function to clear the joiner.
m_joiner.store(nullptr, AK::memory_order_release);
}
void detach()
{
ScopedSpinLock lock(m_lock);
@ -275,88 +248,284 @@ public:
bool m_should_block { false };
};
class BlockCondition;
class Blocker {
public:
virtual ~Blocker() { }
virtual bool should_unblock(Thread&) = 0;
enum class Type {
Unknown = 0,
File,
Plan9FS,
Join,
Routing,
Sleep,
Wait
};
virtual ~Blocker();
virtual const char* state_string() const = 0;
virtual bool is_reason_signal() const { return false; }
virtual bool should_block() { return true; }
virtual Type blocker_type() const = 0;
virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
virtual void was_unblocked() { }
virtual void not_blocking(bool) = 0;
virtual void was_unblocked(bool did_timeout)
{
if (did_timeout) {
ScopedSpinLock lock(m_lock);
m_did_timeout = true;
}
}
void set_interrupted_by_death()
{
ScopedSpinLock lock(m_lock);
m_was_interrupted_by_death = true;
do_set_interrupted_by_death();
}
void set_interrupted_by_signal()
void set_interrupted_by_signal(u8 signal)
{
ScopedSpinLock lock(m_lock);
m_was_interrupted_while_blocked = true;
do_set_interrupted_by_signal(signal);
}
virtual Thread::BlockResult block_result(bool did_timeout)
u8 was_interrupted_by_signal() const
{
ScopedSpinLock lock(m_lock);
return do_get_interrupted_by_signal();
}
virtual Thread::BlockResult block_result()
{
ScopedSpinLock lock(m_lock);
if (m_was_interrupted_by_death)
return Thread::BlockResult::InterruptedByDeath;
if (m_was_interrupted_while_blocked)
if (m_was_interrupted_by_signal != 0)
return Thread::BlockResult::InterruptedBySignal;
if (did_timeout)
if (m_did_timeout)
return Thread::BlockResult::InterruptedByTimeout;
return Thread::BlockResult::WokeNormally;
}
void begin_blocking(Badge<Thread>);
BlockResult end_blocking(Badge<Thread>, bool);
protected:
void do_set_interrupted_by_death()
{
m_was_interrupted_by_death = true;
}
void do_set_interrupted_by_signal(u8 signal)
{
ASSERT(signal != 0);
m_was_interrupted_by_signal = signal;
}
void do_clear_interrupted_by_signal()
{
m_was_interrupted_by_signal = 0;
}
u8 do_get_interrupted_by_signal() const
{
return m_was_interrupted_by_signal;
}
bool was_interrupted() const
{
return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
}
void unblock_from_blocker()
{
RefPtr<Thread> thread;
{
ScopedSpinLock lock(m_lock);
if (m_is_blocking) {
m_is_blocking = false;
ASSERT(m_blocked_thread);
thread = m_blocked_thread;
}
}
if (thread)
thread->unblock_from_blocker(*this);
}
bool set_block_condition(BlockCondition&, void* = nullptr);
mutable RecursiveSpinLock m_lock;
private:
bool m_was_interrupted_while_blocked { false };
BlockCondition* m_block_condition { nullptr };
void* m_block_data { nullptr };
Thread* m_blocked_thread { nullptr };
u8 m_was_interrupted_by_signal { 0 };
bool m_is_blocking { false };
bool m_was_interrupted_by_death { false };
friend class Thread;
bool m_did_timeout { false };
};
class BlockCondition {
AK_MAKE_NONCOPYABLE(BlockCondition);
AK_MAKE_NONMOVABLE(BlockCondition);
public:
BlockCondition() = default;
virtual ~BlockCondition()
{
ScopedSpinLock lock(m_lock);
ASSERT(m_blockers.is_empty());
}
bool add_blocker(Blocker& blocker, void* data)
{
ScopedSpinLock lock(m_lock);
if (!should_add_blocker(blocker, data))
return false;
m_blockers.append({ &blocker, data });
return true;
}
void remove_blocker(Blocker& blocker, void* data)
{
ScopedSpinLock lock(m_lock);
// NOTE: it's possible that the blocker is no longer present
m_blockers.remove_first_matching([&](auto& info) {
return info.blocker == &blocker && info.data == data;
});
}
protected:
template<typename UnblockOne>
void unblock(UnblockOne unblock_one)
{
ScopedSpinLock lock(m_lock);
do_unblock(unblock_one);
}
template<typename UnblockOne>
void do_unblock(UnblockOne unblock_one)
{
ASSERT(m_lock.is_locked());
for (size_t i = 0; i < m_blockers.size();) {
auto& info = m_blockers[i];
if (unblock_one(*info.blocker, info.data)) {
m_blockers.remove(i);
continue;
}
i++;
}
}
template<typename UnblockOne>
void unblock_all(UnblockOne unblock_one)
{
ScopedSpinLock lock(m_lock);
do_unblock_all(unblock_one);
}
template<typename UnblockOne>
void do_unblock_all(UnblockOne unblock_one)
{
ASSERT(m_lock.is_locked());
for (auto& info : m_blockers) {
bool did_unblock = unblock_one(*info.blocker, info.data);
ASSERT(did_unblock);
}
m_blockers.clear();
}
virtual bool should_add_blocker(Blocker&, void*) { return true; }
SpinLock<u8> m_lock;
private:
struct BlockerInfo {
Blocker* blocker;
void* data;
};
Vector<BlockerInfo, 4> m_blockers;
};
friend class JoinBlocker;
class JoinBlocker final : public Blocker {
public:
explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
virtual bool should_unblock(Thread&) override;
virtual Type blocker_type() const override { return Type::Join; }
virtual const char* state_string() const override { return "Joining"; }
virtual void was_unblocked() override;
void joinee_exited(void* value);
virtual bool should_block() override { return !m_join_error && m_should_block; }
virtual void not_blocking(bool) override;
bool unblock(void*, bool);
private:
Thread* m_joinee;
NonnullRefPtr<Thread> m_joinee;
void*& m_joinee_exit_value;
bool m_join_error { false };
bool m_did_unblock { false };
bool m_should_block { true };
};
class FileDescriptionBlocker : public Blocker {
class FileBlocker : public Blocker {
public:
enum class BlockFlags : u32 {
None = 0,
Read = 1 << 0,
Write = 1 << 1,
ReadPriority = 1 << 2,
Accept = 1 << 3,
Connect = 1 << 4,
SocketFlags = Accept | Connect,
WriteNotOpen = 1 << 5,
WriteError = 1 << 6,
WriteHangUp = 1 << 7,
ReadHangUp = 1 << 8,
Exception = WriteNotOpen | WriteError | WriteHangUp | ReadHangUp,
};
virtual Type blocker_type() const override { return Type::File; }
virtual bool should_block() override
{
return m_should_block;
}
virtual bool unblock(bool, void*) = 0;
protected:
bool m_should_block { true };
};
class FileDescriptionBlocker : public FileBlocker {
public:
const FileDescription& blocked_description() const;
virtual bool unblock(bool, void*) override;
virtual void not_blocking(bool) override;
protected:
explicit FileDescriptionBlocker(const FileDescription&);
explicit FileDescriptionBlocker(FileDescription&, BlockFlags, BlockFlags&);
private:
NonnullRefPtr<FileDescription> m_blocked_description;
const BlockFlags m_flags;
BlockFlags& m_unblocked_flags;
bool m_did_unblock { false };
bool m_should_block { true };
};
class AcceptBlocker final : public FileDescriptionBlocker {
public:
explicit AcceptBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
explicit AcceptBlocker(FileDescription&, BlockFlags&);
virtual const char* state_string() const override { return "Accepting"; }
};
class ConnectBlocker final : public FileDescriptionBlocker {
public:
explicit ConnectBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
explicit ConnectBlocker(FileDescription&, BlockFlags&);
virtual const char* state_string() const override { return "Connecting"; }
};
class WriteBlocker final : public FileDescriptionBlocker {
public:
explicit WriteBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
explicit WriteBlocker(FileDescription&, BlockFlags&);
virtual const char* state_string() const override { return "Writing"; }
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
@ -366,8 +535,7 @@ public:
class ReadBlocker final : public FileDescriptionBlocker {
public:
explicit ReadBlocker(const FileDescription&);
virtual bool should_unblock(Thread&) override;
explicit ReadBlocker(FileDescription&, BlockFlags&);
virtual const char* state_string() const override { return "Reading"; }
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
@ -375,77 +543,136 @@ public:
BlockTimeout m_timeout;
};
class ConditionBlocker final : public Blocker {
public:
ConditionBlocker(const char* state_string, Function<bool()>&& condition);
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override { return m_state_string; }
private:
Function<bool()> m_block_until_condition;
const char* m_state_string { nullptr };
};
class SleepBlocker final : public Blocker {
public:
explicit SleepBlocker(const BlockTimeout&, timespec* = nullptr);
virtual bool should_unblock(Thread&) override { return false; }
virtual const char* state_string() const override { return "Sleeping"; }
virtual Type blocker_type() const override { return Type::Sleep; }
virtual const BlockTimeout& override_timeout(const BlockTimeout&) override;
virtual void was_unblocked() override;
virtual Thread::BlockResult block_result(bool) override;
virtual void not_blocking(bool) override;
virtual void was_unblocked(bool) override;
virtual Thread::BlockResult block_result() override;
private:
void calculate_remaining();
BlockTimeout m_deadline;
timespec* m_remaining;
};
class SelectBlocker final : public Blocker {
class SelectBlocker final : public FileBlocker {
public:
typedef Vector<int, FD_SETSIZE> FDVector;
SelectBlocker(const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
virtual bool should_unblock(Thread&) override;
struct FDInfo {
NonnullRefPtr<FileDescription> description;
BlockFlags block_flags;
BlockFlags unblocked_flags { BlockFlags::None };
};
typedef Vector<FDInfo, FD_SETSIZE> FDVector;
SelectBlocker(FDVector& fds);
virtual ~SelectBlocker();
virtual bool unblock(bool, void*) override;
virtual void not_blocking(bool) override;
virtual void was_unblocked(bool) override;
virtual const char* state_string() const override { return "Selecting"; }
private:
const FDVector& m_select_read_fds;
const FDVector& m_select_write_fds;
const FDVector& m_select_exceptional_fds;
size_t collect_unblocked_flags();
FDVector& m_fds;
size_t m_registered_count { 0 };
bool m_did_unblock { false };
};
class WaitBlocker final : public Blocker {
public:
WaitBlocker(int wait_options, ProcessID& waitee_pid);
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override { return "Waiting"; }
private:
int m_wait_options { 0 };
ProcessID& m_waitee_pid;
};
class SemiPermanentBlocker final : public Blocker {
public:
enum class Reason {
Signal,
enum class UnblockFlags {
Terminated,
Stopped,
Continued
};
SemiPermanentBlocker(Reason reason);
virtual bool should_unblock(Thread&) override;
virtual const char* state_string() const override
{
switch (m_reason) {
case Reason::Signal:
return "Signal";
}
ASSERT_NOT_REACHED();
}
virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result);
virtual const char* state_string() const override { return "Waiting"; }
virtual Type blocker_type() const override { return Type::Wait; }
virtual bool should_block() override { return m_should_block; }
virtual void not_blocking(bool) override;
virtual void was_unblocked(bool) override;
bool unblock(Thread& thread, UnblockFlags flags, u8 signal, bool from_add_blocker);
bool is_wait() const { return !(m_wait_options & WNOWAIT); }
private:
Reason m_reason;
void do_set_result(const siginfo_t&);
const int m_wait_options;
const idtype_t m_id_type;
const pid_t m_waitee_id;
KResultOr<siginfo_t>& m_result;
RefPtr<Process> m_waitee;
RefPtr<ProcessGroup> m_waitee_group;
bool m_did_unblock { false };
bool m_error { false };
bool m_got_sigchild { false };
bool m_should_block;
};
class WaitBlockCondition final : public BlockCondition {
friend class WaitBlocker;
public:
WaitBlockCondition(Process& process)
: m_process(process)
{
}
bool unblock(Thread&, WaitBlocker::UnblockFlags, u8);
void try_unblock(WaitBlocker&);
void finalize();
protected:
virtual bool should_add_blocker(Blocker&, void*) override;
private:
struct ThreadBlockInfo {
NonnullRefPtr<Thread> thread;
WaitBlocker::UnblockFlags flags;
u8 signal;
bool was_waited { false };
explicit ThreadBlockInfo(NonnullRefPtr<Thread>&& thread, WaitBlocker::UnblockFlags flags, u8 signal)
: thread(move(thread))
, flags(flags)
, signal(signal)
{
}
};
Process& m_process;
Vector<ThreadBlockInfo, 2> m_threads;
bool m_finalized { false };
};
KResult try_join(JoinBlocker& blocker)
{
if (Thread::current() == this)
return KResult(-EDEADLK);
ScopedSpinLock lock(m_lock);
if (!m_is_joinable || state() == Dead)
return KResult(-EINVAL);
bool added = m_join_condition.add_blocker(blocker, nullptr);
ASSERT(added);
// From this point on the thread is no longer joinable by anyone
// else. It also means that if the join is timed, it becomes
// detached when a timeout happens.
m_is_joinable = false;
return KSuccess;
}
void did_schedule() { ++m_times_scheduled; }
u32 times_scheduled() const { return m_times_scheduled; }
@ -481,21 +708,23 @@ public:
template<typename T, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{
ScopedSpinLock lock(m_lock);
// We need to hold m_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
T t(forward<Args>(args)...);
bool did_timeout = false;
RefPtr<Timer> timer;
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
m_blocker = &t;
if (t.should_unblock(*this)) {
if (!t.should_block()) {
// Don't block if the wake condition is already met
t.was_unblocked();
t.not_blocking(false);
m_blocker = nullptr;
return BlockResult::NotBlocked;
}
@ -508,12 +737,16 @@ public:
ScopedSpinLock lock(m_lock);
if (m_blocker) {
m_blocker_timeout = nullptr;
unblock();
if (!is_stopped()) {
// Only unblock if we're not stopped. In either
// case the blocker should be marked as timed out
unblock();
}
}
});
if (!m_blocker_timeout) {
// Timeout is already in the past
t.was_unblocked();
t.not_blocking(true);
m_blocker = nullptr;
return BlockResult::InterruptedByTimeout;
}
@ -521,18 +754,30 @@ public:
m_blocker_timeout = nullptr;
}
t.begin_blocking({});
set_state(Thread::Blocked);
}
lock.unlock();
// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();
lock.lock();
bool is_stopped = false;
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
if (t.was_interrupted_by_signal())
dispatch_one_pending_signal();
// We should no longer be blocked once we woke up
ASSERT(state() != Thread::Blocked);
auto current_state = state();
// We should no longer be blocked once we woke up, but we may be stopped
if (current_state == Stopped)
is_stopped = true;
else
ASSERT(current_state == Thread::Running);
// Remove ourselves...
m_blocker = nullptr;
@ -549,19 +794,20 @@ public:
// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
t.was_unblocked();
return t.block_result(did_timeout);
}
auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
[[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
{
return block<ConditionBlocker>(nullptr, state_string, move(condition));
if (is_stopped) {
// If we're stopped we need to yield
yield_without_holding_big_lock();
}
return result;
}
BlockResult wait_on(WaitQueue& queue, const char* reason, const BlockTimeout& = nullptr, Atomic<bool>* lock = nullptr, RefPtr<Thread> beneficiary = {});
void wake_from_queue();
void unblock();
void unblock_from_blocker(Blocker&);
void unblock(u8 signal = 0);
BlockResult sleep(const timespec&, timespec* = nullptr);
BlockResult sleep_until(const timespec&);
@ -588,7 +834,6 @@ public:
void send_urgent_signal_to_self(u8 signal);
void send_signal(u8 signal, Process* sender);
void consider_unblock();
u32 update_signal_mask(u32 signal_mask);
u32 signal_mask_block(sigset_t signal_set, bool block);
@ -597,14 +842,16 @@ public:
void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
ShouldUnblockThread dispatch_one_pending_signal();
ShouldUnblockThread dispatch_signal(u8 signal);
DispatchSignalResult dispatch_one_pending_signal();
DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
DispatchSignalResult dispatch_signal(u8 signal);
bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
void terminate_due_to_signal(u8 signal);
bool should_ignore_signal(u8 signal) const;
bool has_signal_handler(u8 signal) const;
bool has_pending_signal(u8 signal) const;
u32 pending_signals() const;
u32 pending_signals_for_state() const;
FPUState& fpu_state() { return *m_fpu_state; }
@ -710,6 +957,7 @@ public:
void start_tracing_from(ProcessID tracer);
void stop_tracing();
void tracer_trap(const RegisterState&);
bool is_traced() const { return !!m_tracer; }
RecursiveSpinLock& get_lock() const { return m_lock; }
@ -720,6 +968,61 @@ private:
private:
friend struct SchedulerData;
friend class WaitQueue;
class JoinBlockCondition : public BlockCondition {
public:
void thread_did_exit(void* exit_value)
{
ScopedSpinLock lock(m_lock);
ASSERT(!m_thread_did_exit);
m_thread_did_exit = true;
m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
do_unblock_joiner();
}
void thread_finalizing()
{
ScopedSpinLock lock(m_lock);
do_unblock_joiner();
}
void* exit_value() const
{
ASSERT(m_thread_did_exit);
return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
}
void try_unblock(JoinBlocker& blocker)
{
ScopedSpinLock lock(m_lock);
if (m_thread_did_exit)
blocker.unblock(exit_value(), false);
}
protected:
virtual bool should_add_blocker(Blocker& b, void*) override
{
ASSERT(b.blocker_type() == Blocker::Type::Join);
auto& blocker = static_cast<JoinBlocker&>(b);
// NOTE: m_lock is held already!
if (m_thread_did_exit)
blocker.unblock(exit_value(), true);
return m_thread_did_exit;
}
private:
void do_unblock_joiner()
{
do_unblock_all([&](Blocker& b, void*) {
ASSERT(b.blocker_type() == Blocker::Type::Join);
auto& blocker = static_cast<JoinBlocker&>(b);
return blocker.unblock(exit_value(), false);
});
}
Atomic<void*> m_exit_value { nullptr };
bool m_thread_did_exit { false };
};
bool unlock_process_if_locked();
void relock_process(bool did_unlock);
String backtrace_impl();
@ -747,10 +1050,9 @@ private:
const char* m_wait_reason { nullptr };
WaitQueue* m_queue { nullptr };
JoinBlockCondition m_join_condition;
Atomic<bool> m_is_active { false };
bool m_is_joinable { true };
Atomic<Thread*> m_joiner { nullptr };
void* m_exit_value { nullptr };
unsigned m_syscall_count { 0 };
unsigned m_inode_faults { 0 };

656
Kernel/ThreadBlockers.cpp Normal file
View file

@ -0,0 +1,656 @@
/*
* Copyright (c) 2020, The SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/Net/Socket.h>
#include <Kernel/Process.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Thread.h>
namespace Kernel {
bool Thread::Blocker::set_block_condition(Thread::BlockCondition& block_condition, void* data)
{
ASSERT(!m_block_condition);
if (block_condition.add_blocker(*this, data)) {
m_block_condition = &block_condition;
m_block_data = data;
return true;
}
return false;
}
Thread::Blocker::~Blocker()
{
ScopedSpinLock lock(m_lock);
if (m_block_condition)
m_block_condition->remove_blocker(*this, m_block_data);
}
void Thread::Blocker::begin_blocking(Badge<Thread>)
{
ScopedSpinLock lock(m_lock);
ASSERT(!m_is_blocking);
ASSERT(!m_blocked_thread);
m_blocked_thread = Thread::current();
m_is_blocking = true;
}
auto Thread::Blocker::end_blocking(Badge<Thread>, bool did_timeout) -> BlockResult
{
ScopedSpinLock lock(m_lock);
// if m_is_blocking is false here, some thread forced to
// unblock us when we get here. This is only called from the
// thread that was blocked.
ASSERT(Thread::current() == m_blocked_thread);
m_is_blocking = false;
m_blocked_thread = nullptr;
was_unblocked(did_timeout);
return block_result();
}
Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value)
: m_joinee(joinee)
, m_joinee_exit_value(joinee_exit_value)
{
{
// We need to hold our lock to avoid a race where try_join succeeds
// but the joinee is joining immediately
ScopedSpinLock lock(m_lock);
try_join_result = joinee.try_join(*this);
m_join_error = try_join_result.is_error();
}
if (!set_block_condition(joinee.m_join_condition))
m_should_block = false;
}
void Thread::JoinBlocker::not_blocking(bool timeout_in_past)
{
if (!m_should_block) {
// set_block_condition returned false, so unblock was already called
ASSERT(!timeout_in_past);
return;
}
// If we should have blocked but got here it must have been that the
// timeout was already in the past. So we need to ask the BlockCondition
// to supply us the information. We cannot hold the lock as unblock
// could be called by the BlockCondition at any time!
ASSERT(timeout_in_past);
m_joinee->m_join_condition.try_unblock(*this);
}
bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker)
{
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
m_did_unblock = true;
m_joinee_exit_value = value;
do_set_interrupted_by_death();
}
if (!from_add_blocker)
unblock_from_blocker();
return true;
}
Thread::FileDescriptionBlocker::FileDescriptionBlocker(FileDescription& description, BlockFlags flags, BlockFlags& unblocked_flags)
: m_blocked_description(description)
, m_flags(flags)
, m_unblocked_flags(unblocked_flags)
{
m_unblocked_flags = BlockFlags::None;
if (!set_block_condition(description.block_condition()))
m_should_block = false;
}
bool Thread::FileDescriptionBlocker::unblock(bool from_add_blocker, void*)
{
auto unblock_flags = m_blocked_description->should_unblock(m_flags);
if (unblock_flags == BlockFlags::None)
return false;
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
m_did_unblock = true;
m_unblocked_flags = unblock_flags;
}
if (!from_add_blocker)
unblock_from_blocker();
return true;
}
void Thread::FileDescriptionBlocker::not_blocking(bool timeout_in_past)
{
if (!m_should_block) {
// set_block_condition returned false, so unblock was already called
ASSERT(!timeout_in_past);
return;
}
// If we should have blocked but got here it must have been that the
// timeout was already in the past. So we need to ask the BlockCondition
// to supply us the information. We cannot hold the lock as unblock
// could be called by the BlockCondition at any time!
ASSERT(timeout_in_past);
// Just call unblock here because we will query the file description
// for the data and don't need any input from the FileBlockCondition.
// However, it's possible that if timeout_in_past is true then FileBlockCondition
// may call us at any given time, so our call to unblock here may fail.
// Either way, unblock will be called at least once, which provides
// all the data we need.
unblock(false, nullptr);
}
const FileDescription& Thread::FileDescriptionBlocker::blocked_description() const
{
return m_blocked_description;
}
Thread::AcceptBlocker::AcceptBlocker(FileDescription& description, BlockFlags& unblocked_flags)
: FileDescriptionBlocker(description, (BlockFlags)((u32)BlockFlags::Accept | (u32)BlockFlags::Exception), unblocked_flags)
{
}
Thread::ConnectBlocker::ConnectBlocker(FileDescription& description, BlockFlags& unblocked_flags)
: FileDescriptionBlocker(description, (BlockFlags)((u32)BlockFlags::Connect | (u32)BlockFlags::Exception), unblocked_flags)
{
}
Thread::WriteBlocker::WriteBlocker(FileDescription& description, BlockFlags& unblocked_flags)
: FileDescriptionBlocker(description, (BlockFlags)((u32)BlockFlags::Write | (u32)BlockFlags::Exception), unblocked_flags)
{
}
auto Thread::WriteBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_send_timeout()) {
m_timeout = BlockTimeout(false, &socket.send_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
}
Thread::ReadBlocker::ReadBlocker(FileDescription& description, BlockFlags& unblocked_flags)
: FileDescriptionBlocker(description, (BlockFlags)((u32)BlockFlags::Read | (u32)BlockFlags::Exception), unblocked_flags)
{
}
auto Thread::ReadBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
auto& description = blocked_description();
if (description.is_socket()) {
auto& socket = *description.socket();
if (socket.has_receive_timeout()) {
m_timeout = BlockTimeout(false, &socket.receive_timeout(), timeout.start_time());
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
return m_timeout;
}
}
return timeout;
}
Thread::SleepBlocker::SleepBlocker(const BlockTimeout& deadline, timespec* remaining)
: m_deadline(deadline)
, m_remaining(remaining)
{
}
auto Thread::SleepBlocker::override_timeout(const BlockTimeout& timeout) -> const BlockTimeout&
{
ASSERT(timeout.is_infinite()); // A timeout should not be provided
// To simplify things only use the sleep deadline.
return m_deadline;
}
void Thread::SleepBlocker::not_blocking(bool timeout_in_past)
{
// SleepBlocker::should_block should always return true, so timeout
// in the past is the only valid case when this function is called
ASSERT(timeout_in_past);
calculate_remaining();
}
void Thread::SleepBlocker::was_unblocked(bool did_timeout)
{
Blocker::was_unblocked(did_timeout);
calculate_remaining();
}
void Thread::SleepBlocker::calculate_remaining()
{
if (!m_remaining)
return;
auto time_now = TimeManagement::the().monotonic_time();
if (time_now < m_deadline.absolute_time())
timespec_sub(m_deadline.absolute_time(), time_now, *m_remaining);
else
*m_remaining = {};
}
Thread::BlockResult Thread::SleepBlocker::block_result()
{
auto result = Blocker::block_result();
if (result == Thread::BlockResult::InterruptedByTimeout)
return Thread::BlockResult::WokeNormally;
return result;
}
Thread::SelectBlocker::SelectBlocker(FDVector& fds)
: m_fds(fds)
{
for (auto& fd_entry : m_fds) {
fd_entry.unblocked_flags = FileBlocker::BlockFlags::None;
if (!m_should_block)
continue;
if (!fd_entry.description->block_condition().add_blocker(*this, &fd_entry))
m_should_block = false;
m_registered_count++;
}
}
Thread::SelectBlocker::~SelectBlocker()
{
if (m_registered_count > 0) {
for (auto& fd_entry : m_fds) {
fd_entry.description->block_condition().remove_blocker(*this, &fd_entry);
if (--m_registered_count == 0)
break;
}
}
}
void Thread::SelectBlocker::not_blocking(bool timeout_in_past)
{
// Either the timeout was in the past or we didn't add all blockers
ASSERT(timeout_in_past || !m_should_block);
ScopedSpinLock lock(m_lock);
if (!m_did_unblock) {
m_did_unblock = true;
if (!timeout_in_past) {
auto count = collect_unblocked_flags();
ASSERT(count > 0);
}
}
}
bool Thread::SelectBlocker::unblock(bool from_add_blocker, void* data)
{
ASSERT(data); // data is a pointer to an entry in the m_fds vector
auto& fd_info = *static_cast<FDInfo*>(data);
{
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
auto unblock_flags = fd_info.description->should_unblock(fd_info.block_flags);
if (unblock_flags == BlockFlags::None)
return false;
m_did_unblock = true;
// We need to store unblock_flags here, otherwise someone else
// affecting this file descriptor could change the information
// between now and when was_unblocked is called!
fd_info.unblocked_flags = unblock_flags;
}
// Only do this once for the first one
if (!from_add_blocker)
unblock_from_blocker();
return true;
}
size_t Thread::SelectBlocker::collect_unblocked_flags()
{
size_t count = 0;
for (auto& fd_entry : m_fds) {
ASSERT(fd_entry.block_flags != FileBlocker::BlockFlags::None);
// unblock will have set at least the first descriptor's unblock
// flags that triggered the unblock. Make sure we don't discard that
// information as it may have changed by now!
if (fd_entry.unblocked_flags == FileBlocker::BlockFlags::None)
fd_entry.unblocked_flags = fd_entry.description->should_unblock(fd_entry.block_flags);
if (fd_entry.unblocked_flags != FileBlocker::BlockFlags::None)
count++;
}
return count;
}
void Thread::SelectBlocker::was_unblocked(bool did_timeout)
{
Blocker::was_unblocked(did_timeout);
if (!did_timeout && !was_interrupted()) {
{
ScopedSpinLock lock(m_lock);
ASSERT(m_did_unblock);
}
size_t count = collect_unblocked_flags();
// If we were blocked and didn't time out, we should have at least one unblocked fd!
ASSERT(count > 0);
}
}
void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker)
{
ScopedSpinLock lock(m_lock);
// We if we have any processes pending
for (size_t i = 0; i < m_threads.size(); i++) {
auto& info = m_threads[i];
// We need to call unblock as if we were called from add_blocker
// so that we don't trigger a context switch by yielding!
if (info.was_waited && blocker.is_wait())
continue; // This state was already waited on, do not unblock
if (blocker.unblock(info.thread, info.flags, info.signal, true)) {
if (blocker.is_wait()) {
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated)
m_threads.remove(i);
else
info.was_waited = true;
}
break;
}
}
}
bool Thread::WaitBlockCondition::unblock(Thread& thread, WaitBlocker::UnblockFlags flags, u8 signal)
{
bool did_unblock_any = false;
bool did_wait = false;
bool was_waited_already = false;
ScopedSpinLock lock(m_lock);
if (m_finalized)
return false;
if (flags != WaitBlocker::UnblockFlags::Terminated) {
// First check if this state was already waited on
for (auto& info : m_threads) {
if (info.thread == &thread) {
was_waited_already = info.was_waited;
break;
}
}
}
do_unblock([&](Blocker& b, void*) {
ASSERT(b.blocker_type() == Blocker::Type::Wait);
auto& blocker = static_cast<WaitBlocker&>(b);
if (was_waited_already && blocker.is_wait())
return false; // This state was already waited on, do not unblock
if (blocker.unblock(thread, flags, signal, false)) {
did_wait |= blocker.is_wait(); // anyone requesting a wait
did_unblock_any = true;
return true;
}
return false;
});
// If no one has waited (yet), or this wasn't a wait, or if it's anything other than
// UnblockFlags::Terminated then add it to your list
if (!did_unblock_any || !did_wait || flags != WaitBlocker::UnblockFlags::Terminated) {
bool updated_existing = false;
for (auto& info : m_threads) {
if (info.thread == &thread) {
ASSERT(info.flags != WaitBlocker::UnblockFlags::Terminated);
info.flags = flags;
info.signal = signal;
info.was_waited = did_wait;
updated_existing = true;
break;
}
}
if (!updated_existing)
m_threads.append(ThreadBlockInfo(thread, flags, signal));
}
return did_unblock_any;
}
bool Thread::WaitBlockCondition::should_add_blocker(Blocker& b, void*)
{
// NOTE: m_lock is held already!
if (m_finalized)
return false;
ASSERT(b.blocker_type() == Blocker::Type::Wait);
auto& blocker = static_cast<WaitBlocker&>(b);
// See if we can match any process immediately
for (size_t i = 0; i < m_threads.size(); i++) {
auto& info = m_threads[i];
if (blocker.unblock(info.thread, info.flags, info.signal, true)) {
// Only remove the entry if UnblockFlags::Terminated
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated && blocker.is_wait())
m_threads.remove(i);
return false;
}
}
return true;
}
void Thread::WaitBlockCondition::finalize()
{
ScopedSpinLock lock(m_lock);
ASSERT(!m_finalized);
m_finalized = true;
// Clear the list of threads here so we can drop the references to them
m_threads.clear();
// No more waiters, drop the last reference immediately. This may
// cause us to be destructed ourselves!
ASSERT(m_process.ref_count() > 0);
m_process.unref();
}
Thread::WaitBlocker::WaitBlocker(int wait_options, idtype_t id_type, pid_t id, KResultOr<siginfo_t>& result)
: m_wait_options(wait_options)
, m_id_type(id_type)
, m_waitee_id(id)
, m_result(result)
, m_should_block(!(m_wait_options & WNOHANG))
{
switch (id_type) {
case P_PID: {
m_waitee = Process::from_pid(m_waitee_id);
if (!m_waitee || m_waitee->ppid() != Process::current()->pid()) {
m_result = KResult(-ECHILD);
m_error = true;
return;
}
break;
}
case P_PGID: {
m_waitee_group = ProcessGroup::from_pgid(m_waitee_id);
if (!m_waitee_group) {
m_result = KResult(-ECHILD);
m_error = true;
return;
}
break;
}
case P_ALL:
break;
default:
ASSERT_NOT_REACHED();
}
// NOTE: unblock may be called within set_block_condition, in which
// case it means that we already have a match without having to block.
// In that case set_block_condition will return false.
if (m_error || !set_block_condition(Process::current()->wait_block_condition()))
m_should_block = false;
}
void Thread::WaitBlocker::not_blocking(bool timeout_in_past)
{
ASSERT(timeout_in_past || !m_should_block);
if (!m_error)
Process::current()->wait_block_condition().try_unblock(*this);
}
void Thread::WaitBlocker::was_unblocked(bool)
{
bool got_sigchld, try_unblock;
{
ScopedSpinLock lock(m_lock);
try_unblock = !m_did_unblock;
got_sigchld = m_got_sigchild;
}
if (try_unblock)
Process::current()->wait_block_condition().try_unblock(*this);
// If we were interrupted by SIGCHLD (which gets special handling
// here) we're not going to return with EINTR. But we're going to
// deliver SIGCHLD (only) here.
auto* current_thread = Thread::current();
if (got_sigchld && current_thread->state() != State::Stopped)
current_thread->try_dispatch_one_pending_signal(SIGCHLD);
}
void Thread::WaitBlocker::do_set_result(const siginfo_t& result)
{
ASSERT(!m_did_unblock);
m_did_unblock = true;
m_result = result;
if (do_get_interrupted_by_signal() == SIGCHLD) {
// This makes it so that wait() will return normally despite the
// fact that SIGCHLD was delivered. Calling do_clear_interrupted_by_signal
// will disable dispatching signals in Thread::block and prevent
// it from returning with EINTR. We will then manually dispatch
// SIGCHLD (and only SIGCHLD) in was_unblocked.
m_got_sigchild = true;
do_clear_interrupted_by_signal();
}
}
bool Thread::WaitBlocker::unblock(Thread& thread, UnblockFlags flags, u8 signal, bool from_add_blocker)
{
ASSERT(flags != UnblockFlags::Terminated || signal == 0); // signal argument should be ignored for Terminated
auto& process = thread.process();
switch (m_id_type) {
case P_PID:
ASSERT(m_waitee);
if (process.pid() != m_waitee_id && thread.tid() != m_waitee_id) // TODO: pid/tid
return false;
break;
case P_PGID:
ASSERT(m_waitee_group);
if (process.pgid() != m_waitee_group->pgid())
return false;
break;
case P_ALL:
break;
default:
ASSERT_NOT_REACHED();
}
switch (flags) {
case UnblockFlags::Terminated:
if (!(m_wait_options & WEXITED))
return false;
break;
case UnblockFlags::Stopped:
if (!(m_wait_options & WSTOPPED))
return false;
if (!(m_wait_options & WUNTRACED) && !thread.is_traced())
return false;
break;
case UnblockFlags::Continued:
if (!(m_wait_options & WCONTINUED))
return false;
if (!(m_wait_options & WUNTRACED) && !thread.is_traced())
return false;
break;
}
if (flags == UnblockFlags::Terminated) {
ASSERT(process.is_dead());
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
// Up until this point, this function may have been called
// more than once!
do_set_result(process.wait_info());
} else {
siginfo_t siginfo;
memset(&siginfo, 0, sizeof(siginfo));
{
ScopedSpinLock lock(g_scheduler_lock);
// We need to gather the information before we release the sheduler lock!
siginfo.si_signo = SIGCHLD;
siginfo.si_pid = thread.tid().value();
siginfo.si_uid = process.uid();
siginfo.si_status = signal;
switch (flags) {
case UnblockFlags::Terminated:
ASSERT_NOT_REACHED();
case UnblockFlags::Stopped:
siginfo.si_code = CLD_STOPPED;
break;
case UnblockFlags::Continued:
siginfo.si_code = CLD_CONTINUED;
break;
}
}
ScopedSpinLock lock(m_lock);
if (m_did_unblock)
return false;
// Up until this point, this function may have been called
// more than once!
do_set_result(siginfo);
}
if (!from_add_blocker) {
// Only call unblock if we weren't called from within set_block_condition!
unblock_from_blocker();
}
// Because this may be called from add_blocker, in which case we should
// not be actually trying to unblock the thread (because it hasn't actually
// been blocked yet), we need to return true anyway
return true;
}
}

View file

@ -71,6 +71,7 @@ enum {
#define WSTOPPED WUNTRACED
#define WEXITED 4
#define WCONTINUED 8
#define WNOWAIT 0x1000000
#define R_OK 4
#define W_OK 2
@ -427,11 +428,12 @@ struct stat {
};
#define POLLIN (1u << 0)
#define POLLPRI (1u << 2)
#define POLLOUT (1u << 3)
#define POLLERR (1u << 4)
#define POLLHUP (1u << 5)
#define POLLNVAL (1u << 6)
#define POLLPRI (1u << 1)
#define POLLOUT (1u << 2)
#define POLLERR (1u << 3)
#define POLLHUP (1u << 4)
#define POLLNVAL (1u << 5)
#define POLLRDHUP (1u << 13)
struct pollfd {
int fd;

View file

@ -32,11 +32,12 @@
__BEGIN_DECLS
#define POLLIN (1u << 0)
#define POLLPRI (1u << 2)
#define POLLOUT (1u << 3)
#define POLLERR (1u << 4)
#define POLLHUP (1u << 5)
#define POLLNVAL (1u << 6)
#define POLLPRI (1u << 1)
#define POLLOUT (1u << 2)
#define POLLERR (1u << 3)
#define POLLHUP (1u << 4)
#define POLLNVAL (1u << 5)
#define POLLRDHUP (1u << 13)
struct pollfd {
int fd;

View file

@ -56,12 +56,22 @@ pid_t waitpid(pid_t waitee, int* wstatus, int options)
id = waitee;
}
// To be able to detect if a child was found when WNOHANG is set,
// we need to clear si_pid, which will only be set if it was found.
siginfo.si_pid = 0;
int rc = waitid(idtype, id, &siginfo, options | WEXITED);
if (rc < 0)
return rc;
if (wstatus) {
if ((options & WNOHANG) && siginfo.si_pid == 0) {
// No child in a waitable state was found. All other fields
// in siginfo are undefined
*wstatus = 0;
return 0;
}
switch (siginfo.si_code) {
case CLD_EXITED:
*wstatus = siginfo.si_status << 8;

View file

@ -44,6 +44,7 @@ __BEGIN_DECLS
#define WSTOPPED WUNTRACED
#define WEXITED 4
#define WCONTINUED 8
#define WNOWAIT 0x1000000
typedef enum {
P_ALL = 1,

View file

@ -53,7 +53,6 @@ static void handle_sigint(int)
int main(int argc, char** argv)
{
Vector<const char*> child_argv;
bool spawned_new_process = false;
Core::ArgsParser parser;
parser.add_option(g_pid, "Trace the given PID", "pid", 'p', "pid");
@ -61,6 +60,7 @@ int main(int argc, char** argv)
parser.parse(argc, argv);
int status;
if (g_pid == -1) {
if (child_argv.is_empty()) {
fprintf(stderr, "strace: Expected either a pid or some arguments\n");
@ -68,7 +68,6 @@ int main(int argc, char** argv)
}
child_argv.append(nullptr);
spawned_new_process = true;
int pid = fork();
if (pid < 0) {
perror("fork");
@ -89,7 +88,7 @@ int main(int argc, char** argv)
}
g_pid = pid;
if (waitpid(pid, nullptr, WSTOPPED) != pid) {
if (waitpid(pid, &status, WSTOPPED | WEXITED) != pid || !WIFSTOPPED(status)) {
perror("waitpid");
return 1;
}
@ -104,66 +103,42 @@ int main(int argc, char** argv)
perror("attach");
return 1;
}
if (waitpid(g_pid, nullptr, WSTOPPED) != g_pid) {
if (waitpid(g_pid, &status, WSTOPPED | WEXITED) != g_pid || !WIFSTOPPED(status)) {
perror("waitpid");
return 1;
}
if (spawned_new_process) {
if (ptrace(PT_CONTINUE, g_pid, 0, 0) < 0) {
perror("continue");
return 1;
}
if (waitpid(g_pid, nullptr, WSTOPPED) != g_pid) {
perror("wait_pid");
return 1;
}
}
for (;;) {
if (ptrace(PT_SYSCALL, g_pid, 0, 0) == -1) {
if (errno == ESRCH)
return 0;
perror("syscall");
return 1;
}
if (waitpid(g_pid, nullptr, WSTOPPED) != g_pid) {
if (waitpid(g_pid, &status, WSTOPPED | WEXITED) != g_pid || !WIFSTOPPED(status)) {
perror("wait_pid");
return 1;
}
PtraceRegisters regs = {};
if (ptrace(PT_GETREGS, g_pid, &regs, 0) == -1) {
perror("getregs");
return 1;
}
u32 syscall_index = regs.eax;
u32 arg1 = regs.edx;
u32 arg2 = regs.ecx;
u32 arg3 = regs.ebx;
// skip syscall exit
if (ptrace(PT_SYSCALL, g_pid, 0, 0) == -1) {
if (errno == ESRCH)
return 0;
perror("syscall");
return 1;
}
if (waitpid(g_pid, nullptr, WSTOPPED) != g_pid) {
if (waitpid(g_pid, &status, WSTOPPED | WEXITED) != g_pid || !WIFSTOPPED(status)) {
perror("wait_pid");
return 1;
}
if (ptrace(PT_GETREGS, g_pid, &regs, 0) == -1) {
if (errno == ESRCH && syscall_index == SC_exit) {
regs.eax = 0;
} else {
perror("getregs");
return 1;
}
perror("getregs");
return 1;
}
u32 res = regs.eax;