Kernel: Resolve clang-tidy readability-implicit-bool-conversion warnings

... In files included from Kernel/Process.cpp and Kernel/Thread.cpp
This commit is contained in:
Andrew Kaster 2021-11-06 15:06:08 -06:00 committed by Andreas Kling
parent 6f580f2047
commit a92132e44a
Notes: sideshowbarker 2024-07-18 01:07:25 +09:00
17 changed files with 85 additions and 84 deletions

View file

@ -100,7 +100,7 @@ ALWAYS_INLINE void write_gs_ptr(u32 offset, FlatPtr val)
ALWAYS_INLINE bool are_interrupts_enabled()
{
return cpu_flags() & 0x200;
return (cpu_flags() & 0x200) != 0;
}
FlatPtr read_cr0();

View file

@ -24,7 +24,7 @@ public:
~InterruptDisabler()
{
if (m_flags & 0x200)
if ((m_flags & 0x200) != 0)
sti();
}

View file

@ -39,28 +39,28 @@ public:
NoExecute = 0x8000000000000000ULL,
};
bool is_present() const { return raw() & Present; }
bool is_present() const { return (raw() & Present) == Present; }
void set_present(bool b) { set_bit(Present, b); }
bool is_user_allowed() const { return raw() & UserSupervisor; }
bool is_user_allowed() const { return (raw() & UserSupervisor) == UserSupervisor; }
void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
bool is_huge() const { return raw() & Huge; }
bool is_huge() const { return (raw() & Huge) == Huge; }
void set_huge(bool b) { set_bit(Huge, b); }
bool is_writable() const { return raw() & ReadWrite; }
bool is_writable() const { return (raw() & ReadWrite) == ReadWrite; }
void set_writable(bool b) { set_bit(ReadWrite, b); }
bool is_write_through() const { return raw() & WriteThrough; }
bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; }
void set_write_through(bool b) { set_bit(WriteThrough, b); }
bool is_cache_disabled() const { return raw() & CacheDisabled; }
bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; }
void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
bool is_global() const { return raw() & Global; }
bool is_global() const { return (raw() & Global) == Global; }
void set_global(bool b) { set_bit(Global, b); }
bool is_execute_disabled() const { return raw() & NoExecute; }
bool is_execute_disabled() const { return (raw() & NoExecute) == NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
void set_bit(u64 bit, bool value)
@ -96,25 +96,25 @@ public:
NoExecute = 0x8000000000000000ULL,
};
bool is_present() const { return raw() & Present; }
bool is_present() const { return (raw() & Present) == Present; }
void set_present(bool b) { set_bit(Present, b); }
bool is_user_allowed() const { return raw() & UserSupervisor; }
bool is_user_allowed() const { return (raw() & UserSupervisor) == UserSupervisor; }
void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
bool is_writable() const { return raw() & ReadWrite; }
bool is_writable() const { return (raw() & ReadWrite) == ReadWrite; }
void set_writable(bool b) { set_bit(ReadWrite, b); }
bool is_write_through() const { return raw() & WriteThrough; }
bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; }
void set_write_through(bool b) { set_bit(WriteThrough, b); }
bool is_cache_disabled() const { return raw() & CacheDisabled; }
bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; }
void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
bool is_global() const { return raw() & Global; }
bool is_global() const { return (raw() & Global) == Global; }
void set_global(bool b) { set_bit(Global, b); }
bool is_execute_disabled() const { return raw() & NoExecute; }
bool is_execute_disabled() const { return (raw() & NoExecute) == NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
bool is_null() const { return m_raw == 0; }

View file

@ -304,12 +304,12 @@ private:
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (!m_in_irq) {
if (m_in_irq == 0) {
deferred_call_execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
if (!m_in_irq)
if (m_in_irq == 0)
check_invoke_scheduler();
} else {
m_in_critical = m_in_critical - 1;
@ -327,7 +327,7 @@ public:
auto prev_critical = in_critical();
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
auto& proc = current();
if (!proc.m_in_irq)
if (proc.m_in_irq == 0)
proc.check_invoke_scheduler();
return prev_critical;
}

View file

@ -39,7 +39,7 @@ public:
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
if (prev_flags & 0x200)
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
@ -101,7 +101,7 @@ public:
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
if (prev_flags & 0x200)
if ((prev_flags & 0x200) != 0)
sti();
else
cli();

View file

@ -124,7 +124,7 @@ protected:
void evaluate_block_conditions()
{
if (Processor::current_in_irq()) {
if (Processor::current_in_irq() != 0) {
// If called from an IRQ handler we need to delay evaluation
// and unblocking of waiting threads. Note that this File
// instance may be deleted until the deferred call is executed!

View file

@ -30,9 +30,9 @@ inline bool is_regular_file(mode_t mode) { return (mode & S_IFMT) == S_IFREG; }
inline bool is_fifo(mode_t mode) { return (mode & S_IFMT) == S_IFIFO; }
inline bool is_symlink(mode_t mode) { return (mode & S_IFMT) == S_IFLNK; }
inline bool is_socket(mode_t mode) { return (mode & S_IFMT) == S_IFSOCK; }
inline bool is_sticky(mode_t mode) { return mode & S_ISVTX; }
inline bool is_setuid(mode_t mode) { return mode & S_ISUID; }
inline bool is_setgid(mode_t mode) { return mode & S_ISGID; }
inline bool is_sticky(mode_t mode) { return (mode & S_ISVTX) == S_ISVTX; }
inline bool is_setuid(mode_t mode) { return (mode & S_ISUID) == S_ISUID; }
inline bool is_setgid(mode_t mode) { return (mode & S_ISGID) == S_ISGID; }
struct InodeMetadata {
bool is_valid() const { return inode.is_valid(); }
@ -46,10 +46,10 @@ struct InodeMetadata {
if (u == 0)
return true;
if (uid == u)
return mode & S_IRUSR;
return (mode & S_IRUSR) == S_IRUSR;
if (gid == g || eg.contains_slow(gid))
return mode & S_IRGRP;
return mode & S_IROTH;
return (mode & S_IRGRP) == S_IRGRP;
return (mode & S_IROTH) == S_IROTH;
}
bool may_write(UserID u, GroupID g, Span<GroupID const> eg) const
@ -57,10 +57,10 @@ struct InodeMetadata {
if (u == 0)
return true;
if (uid == u)
return mode & S_IWUSR;
return (mode & S_IWUSR) == S_IWUSR;
if (gid == g || eg.contains_slow(gid))
return mode & S_IWGRP;
return mode & S_IWOTH;
return (mode & S_IWGRP) == S_IWGRP;
return (mode & S_IWOTH) == S_IWOTH;
}
bool may_execute(UserID u, GroupID g, Span<GroupID const> eg) const
@ -68,10 +68,10 @@ struct InodeMetadata {
if (u == 0)
return true;
if (uid == u)
return mode & S_IXUSR;
return (mode & S_IXUSR) == S_IXUSR;
if (gid == g || eg.contains_slow(gid))
return mode & S_IXGRP;
return mode & S_IXOTH;
return (mode & S_IXGRP) == S_IXGRP;
return (mode & S_IXOTH) == S_IXOTH;
}
bool is_directory() const { return Kernel::is_directory(mode); }

View file

@ -40,8 +40,8 @@ public:
void set_rw_mode(int options)
{
set_readable(options & O_RDONLY);
set_writable(options & O_WRONLY);
set_readable((options & O_RDONLY) == O_RDONLY);
set_writable((options & O_WRONLY) == O_WRONLY);
}
ErrorOr<void> close();

View file

@ -35,7 +35,7 @@ struct RefPtrTraits {
ALWAYS_INLINE static FlatPtr as_bits(T* ptr)
{
VERIFY(!((FlatPtr)ptr & 1));
VERIFY(((FlatPtr)ptr & 1) == 0);
return (FlatPtr)ptr;
}
@ -49,13 +49,13 @@ struct RefPtrTraits {
ALWAYS_INLINE static bool is_null(FlatPtr bits)
{
return !(bits & ~(FlatPtr)1);
return (bits & ~(FlatPtr)1) == 0;
}
ALWAYS_INLINE static FlatPtr exchange(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
// Only exchange when lock is not held
VERIFY(!(new_value & 1));
VERIFY((new_value & 1) == 0);
FlatPtr expected = atomic_var.load(AK::MemoryOrder::memory_order_relaxed);
for (;;) {
expected &= ~(FlatPtr)1; // only if lock bit is not set
@ -71,7 +71,7 @@ struct RefPtrTraits {
ALWAYS_INLINE static bool exchange_if_null(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
// Only exchange when lock is not held
VERIFY(!(new_value & 1));
VERIFY((new_value & 1) == 0);
for (;;) {
FlatPtr expected = default_null_value; // only if lock bit is not set
if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel))
@ -95,19 +95,19 @@ struct RefPtrTraits {
FlatPtr bits;
for (;;) {
bits = atomic_var.fetch_or(1, AK::MemoryOrder::memory_order_acq_rel);
if (!(bits & 1))
if ((bits & 1) == 0)
break;
#ifdef KERNEL
Kernel::Processor::wait_check();
#endif
}
VERIFY(!(bits & 1));
VERIFY((bits & 1) == 0);
return bits;
}
ALWAYS_INLINE static void unlock(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
VERIFY(!(new_value & 1));
VERIFY((new_value & 1) == 0);
atomic_var.store(new_value, AK::MemoryOrder::memory_order_release);
}

View file

@ -33,7 +33,7 @@ public:
void dump() const;
size_t available() const { return m_page_count - (m_used_chunks / 2); }
bool is_empty() const { return !available(); }
bool is_empty() const { return available() == 0; }
PhysicalAddress base() const { return m_base_address; }
bool contains(PhysicalAddress paddr) const

View file

@ -57,13 +57,13 @@ public:
[[nodiscard]] VirtualRange const& range() const { return m_range; }
[[nodiscard]] VirtualAddress vaddr() const { return m_range.base(); }
[[nodiscard]] size_t size() const { return m_range.size(); }
[[nodiscard]] bool is_readable() const { return m_access & Access::Read; }
[[nodiscard]] bool is_writable() const { return m_access & Access::Write; }
[[nodiscard]] bool is_executable() const { return m_access & Access::Execute; }
[[nodiscard]] bool is_readable() const { return (m_access & Access::Read) == Access::Read; }
[[nodiscard]] bool is_writable() const { return (m_access & Access::Write) == Access::Write; }
[[nodiscard]] bool is_executable() const { return (m_access & Access::Execute) == Access::Execute; }
[[nodiscard]] bool has_been_readable() const { return m_access & Access::HasBeenReadable; }
[[nodiscard]] bool has_been_writable() const { return m_access & Access::HasBeenWritable; }
[[nodiscard]] bool has_been_executable() const { return m_access & Access::HasBeenExecutable; }
[[nodiscard]] bool has_been_readable() const { return (m_access & Access::HasBeenReadable) == Access::HasBeenReadable; }
[[nodiscard]] bool has_been_writable() const { return (m_access & Access::HasBeenWritable) == Access::HasBeenWritable; }
[[nodiscard]] bool has_been_executable() const { return (m_access & Access::HasBeenExecutable) == Access::HasBeenExecutable; }
[[nodiscard]] bool is_cacheable() const { return m_cacheable; }
[[nodiscard]] StringView name() const { return m_name ? m_name->view() : StringView {}; }
@ -223,26 +223,26 @@ public:
AK_ENUM_BITWISE_OPERATORS(Region::Access)
inline Region::Access prot_to_region_access_flags(int prot)
inline constexpr Region::Access prot_to_region_access_flags(int prot)
{
Region::Access access = Region::Access::None;
if (prot & PROT_READ)
if ((prot & PROT_READ) == PROT_READ)
access |= Region::Access::Read;
if (prot & PROT_WRITE)
if ((prot & PROT_WRITE) == PROT_WRITE)
access |= Region::Access::Write;
if (prot & PROT_EXEC)
if ((prot & PROT_EXEC) == PROT_EXEC)
access |= Region::Access::Execute;
return access;
}
inline int region_access_flags_to_prot(Region::Access access)
inline constexpr int region_access_flags_to_prot(Region::Access access)
{
int prot = 0;
if (access & Region::Access::Read)
if ((access & Region::Access::Read) == Region::Access::Read)
prot |= PROT_READ;
if (access & Region::Access::Write)
if ((access & Region::Access::Write) == Region::Access::Write)
prot |= PROT_WRITE;
if (access & Region::Access::Execute)
if ((access & Region::Access::Execute) == Region::Access::Execute)
prot |= PROT_EXEC;
return prot;
}

View file

@ -509,7 +509,7 @@ siginfo_t Process::wait_info()
siginfo.si_pid = pid().value();
siginfo.si_uid = uid().value();
if (m_protected_values.termination_signal) {
if (m_protected_values.termination_signal != 0) {
siginfo.si_status = m_protected_values.termination_signal;
siginfo.si_code = CLD_KILLED;
} else {
@ -633,7 +633,7 @@ void Process::finalize()
{
// FIXME: PID/TID BUG
if (auto parent_thread = Thread::from_tid(ppid().value())) {
if (!(parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT))
if ((parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) != SA_NOCLDWAIT)
parent_thread->send_signal(SIGCHLD, this);
}
}

View file

@ -155,7 +155,7 @@ public:
inline static bool has_current()
{
return Processor::current_thread();
return Processor::current_thread() != nullptr;
}
template<typename EntryFunction>
@ -459,7 +459,7 @@ public:
Mutex& ptrace_lock() { return m_ptrace_lock; }
bool has_promises() const { return m_protected_values.has_promises; }
bool has_promised(Pledge pledge) const { return m_protected_values.promises & (1u << (u32)pledge); }
bool has_promised(Pledge pledge) const { return (m_protected_values.promises & (1U << (u32)pledge)) != 0; }
VeilState veil_state() const
{

View file

@ -41,10 +41,10 @@ public:
}
ErrorOr<void> set_termios(const termios&);
bool should_generate_signals() const { return m_termios.c_lflag & ISIG; }
bool should_flush_on_signal() const { return !(m_termios.c_lflag & NOFLSH); }
bool should_echo_input() const { return m_termios.c_lflag & ECHO; }
bool in_canonical_mode() const { return m_termios.c_lflag & ICANON; }
bool should_generate_signals() const { return (m_termios.c_lflag & ISIG) == ISIG; }
bool should_flush_on_signal() const { return (m_termios.c_lflag & NOFLSH) != NOFLSH; }
bool should_echo_input() const { return (m_termios.c_lflag & ECHO) == ECHO; }
bool in_canonical_mode() const { return (m_termios.c_lflag & ICANON) == ICANON; }
void set_default_termios();
void hang_up();

View file

@ -246,7 +246,7 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
VERIFY(m_state != Thread::Runnable && m_state != Thread::Running);
set_state(Thread::Runnable);
};
if (Processor::current_in_irq()) {
if (Processor::current_in_irq() != 0) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref())
do_unblock();
@ -267,7 +267,7 @@ void Thread::unblock_from_blocker(Blocker& blocker)
if (!should_be_stopped() && !is_stopped())
unblock();
};
if (Processor::current_in_irq()) {
if (Processor::current_in_irq() != 0) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref())
do_unblock();
@ -580,7 +580,8 @@ bool Thread::tick()
++m_process->m_ticks_in_user;
++m_ticks_in_user;
}
return --m_ticks_left;
--m_ticks_left;
return m_ticks_left != 0;
}
void Thread::check_dispatch_pending_signal()
@ -588,7 +589,7 @@ void Thread::check_dispatch_pending_signal()
auto result = DispatchSignalResult::Continue;
{
SpinlockLocker scheduler_lock(g_scheduler_lock);
if (pending_signals_for_state()) {
if (pending_signals_for_state() != 0) {
SpinlockLocker lock(m_lock);
result = dispatch_one_pending_signal();
}
@ -633,11 +634,11 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
}
m_pending_signals |= 1 << (signal - 1);
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store((pending_signals_for_state() & ~m_signal_mask) != 0, AK::memory_order_release);
if (m_state == Stopped) {
SpinlockLocker lock(m_lock);
if (pending_signals_for_state()) {
if (pending_signals_for_state() != 0) {
dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal);
resume_from_stopped();
}
@ -653,7 +654,7 @@ u32 Thread::update_signal_mask(u32 signal_mask)
SpinlockLocker lock(g_scheduler_lock);
auto previous_signal_mask = m_signal_mask;
m_signal_mask = signal_mask;
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store((pending_signals_for_state() & ~m_signal_mask) != 0, AK::memory_order_release);
return previous_signal_mask;
}
@ -671,7 +672,7 @@ u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
m_signal_mask &= ~signal_set;
else
m_signal_mask |= signal_set;
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store((pending_signals_for_state() & ~m_signal_mask) != 0, AK::memory_order_release);
return previous_signal_mask;
}
@ -711,7 +712,7 @@ DispatchSignalResult Thread::dispatch_one_pending_signal()
u8 signal = 1;
for (; signal < 32; ++signal) {
if (signal_candidates & (1 << (signal - 1))) {
if ((signal_candidates & (1 << (signal - 1))) != 0) {
break;
}
}
@ -724,7 +725,7 @@ DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
SpinlockLocker scheduler_lock(g_scheduler_lock);
SpinlockLocker lock(m_lock);
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
if (!(signal_candidates & (1 << (signal - 1))))
if ((signal_candidates & (1 << (signal - 1))) == 0)
return DispatchSignalResult::Continue;
return dispatch_signal(signal);
}
@ -853,7 +854,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
// Mark this signal as handled.
m_pending_signals &= ~(1 << (signal - 1));
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store((m_pending_signals & ~m_signal_mask) != 0, AK::memory_order_release);
auto& process = this->process();
auto tracer = process.tracer();
@ -914,13 +915,13 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
u32 old_signal_mask = m_signal_mask;
u32 new_signal_mask = action.mask;
if (action.flags & SA_NODEFER)
if ((action.flags & SA_NODEFER) == SA_NODEFER)
new_signal_mask &= ~(1 << (signal - 1));
else
new_signal_mask |= 1 << (signal - 1);
m_signal_mask |= new_signal_mask;
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
m_have_any_unmasked_pending_signals.store((m_pending_signals & ~m_signal_mask) != 0, AK::memory_order_release);
auto setup_stack = [&](RegisterState& state) {
FlatPtr stack = state.userspace_sp();
@ -1121,7 +1122,7 @@ struct RecognizedSymbol {
static bool symbolicate(RecognizedSymbol const& symbol, Process& process, StringBuilder& builder)
{
if (!symbol.address)
if (symbol.address == 0)
return false;
bool mask_kernel_addresses = !process.is_superuser();
@ -1201,7 +1202,7 @@ ErrorOr<void> Thread::make_thread_specific_region(Badge<Process>)
m_thread_specific_data = VirtualAddress(thread_specific_data);
thread_specific_data->self = thread_specific_data;
if (process().m_master_tls_size)
if (process().m_master_tls_size != 0)
memcpy(thread_local_storage, process().m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), process().m_master_tls_size);
return {};

View file

@ -714,7 +714,7 @@ public:
virtual bool setup_blocker() override;
bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
bool is_wait() const { return !(m_wait_options & WNOWAIT); }
bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
private:
void do_was_disowned();

View file

@ -18,7 +18,7 @@ public:
static ErrorOr<NonnullOwnPtr<ThreadTracer>> try_create(ProcessID tracer) { return adopt_nonnull_own_or_enomem(new (nothrow) ThreadTracer(tracer)); }
ProcessID tracer_pid() const { return m_tracer_pid; }
bool has_pending_signal(u32 signal) const { return m_pending_signals & (1 << (signal - 1)); }
bool has_pending_signal(u32 signal) const { return (m_pending_signals & (1 << (signal - 1))) != 0; }
void set_signal(u32 signal) { m_pending_signals |= (1 << (signal - 1)); }
void unset_signal(u32 signal) { m_pending_signals &= ~(1 << (signal - 1)); }