Mega-cleanup for atomic_t<> and named bit-sets bs_t<>

Remove "atomic operator" classes
Remove test, test_and_set, test_and_reset, test_and_complement global functions
Simplify atomic_t<> with constexpr if, remove some garbage
Redesign bs_t<> to use class, mark its methods constexpr
Implement atomic_bs_t<> for optimizations
Remove unused __bitwise_ops concept (should be in other header anyway)
Bitsets can now be tested via safe bool conversion
This commit is contained in:
Nekotekina 2018-09-02 20:22:35 +03:00
parent a6d06b2e20
commit 8abe6489ed
23 changed files with 604 additions and 1090 deletions

View file

@ -562,206 +562,11 @@ struct atomic_storage<T, 16> : atomic_storage<T, 0>
// TODO // TODO
}; };
template<typename T1, typename T2, typename = void>
struct atomic_add
{
auto operator()(T1& lhs, const T2& rhs) const
{
return lhs += rhs;
}
};
template<typename T1, typename T2>
struct atomic_add<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::fetch_add;
static constexpr auto op_fetch = &atomic_storage<T1>::add_fetch;
static constexpr auto atomic_op = &atomic_storage<T1>::add_fetch;
};
template<typename T1, typename T2, typename = void>
struct atomic_sub
{
auto operator()(T1& lhs, const T2& rhs) const
{
return lhs -= rhs;
}
};
template<typename T1, typename T2>
struct atomic_sub<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::fetch_sub;
static constexpr auto op_fetch = &atomic_storage<T1>::sub_fetch;
static constexpr auto atomic_op = &atomic_storage<T1>::sub_fetch;
};
template<typename T, typename = void>
struct atomic_pre_inc
{
auto operator()(T& v) const
{
return ++v;
}
};
template<typename T>
struct atomic_pre_inc<T, std::enable_if_t<std::is_integral<T>::value>>
{
static constexpr auto atomic_op = &atomic_storage<T>::inc_fetch;
};
template<typename T, typename = void>
struct atomic_post_inc
{
auto operator()(T& v) const
{
return v++;
}
};
template<typename T>
struct atomic_post_inc<T, std::enable_if_t<std::is_integral<T>::value>>
{
static constexpr auto atomic_op = &atomic_storage<T>::fetch_inc;
};
template<typename T, typename = void>
struct atomic_pre_dec
{
auto operator()(T& v) const
{
return --v;
}
};
template<typename T>
struct atomic_pre_dec<T, std::enable_if_t<std::is_integral<T>::value>>
{
static constexpr auto atomic_op = &atomic_storage<T>::dec_fetch;
};
template<typename T, typename = void>
struct atomic_post_dec
{
auto operator()(T& v) const
{
return v--;
}
};
template<typename T>
struct atomic_post_dec<T, std::enable_if_t<std::is_integral<T>::value>>
{
static constexpr auto atomic_op = &atomic_storage<T>::fetch_dec;
};
template<typename T1, typename T2, typename = void>
struct atomic_and
{
auto operator()(T1& lhs, const T2& rhs) const
{
return lhs &= rhs;
}
};
template<typename T1, typename T2>
struct atomic_and<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::fetch_and;
static constexpr auto op_fetch = &atomic_storage<T1>::and_fetch;
static constexpr auto atomic_op = &atomic_storage<T1>::and_fetch;
};
template<typename T1, typename T2, typename = void>
struct atomic_or
{
auto operator()(T1& lhs, const T2& rhs) const
{
return lhs |= rhs;
}
};
template<typename T1, typename T2>
struct atomic_or<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::fetch_or;
static constexpr auto op_fetch = &atomic_storage<T1>::or_fetch;
static constexpr auto atomic_op = &atomic_storage<T1>::or_fetch;
};
template<typename T1, typename T2, typename = void>
struct atomic_xor
{
auto operator()(T1& lhs, const T2& rhs) const
{
return lhs ^= rhs;
}
};
template<typename T1, typename T2>
struct atomic_xor<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::fetch_xor;
static constexpr auto op_fetch = &atomic_storage<T1>::xor_fetch;
static constexpr auto atomic_op = &atomic_storage<T1>::xor_fetch;
};
template<typename T1, typename T2, typename = void>
struct atomic_test_and_set
{
bool operator()(T1& lhs, const T2& rhs) const
{
return test_and_set(lhs, rhs);
}
};
template<typename T1, typename T2>
struct atomic_test_and_set<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::test_and_set;
static constexpr auto op_fetch = &atomic_storage<T1>::test_and_set;
static constexpr auto atomic_op = &atomic_storage<T1>::test_and_set;
};
template<typename T1, typename T2, typename = void>
struct atomic_test_and_reset
{
bool operator()(T1& lhs, const T2& rhs) const
{
return test_and_reset(lhs, rhs);
}
};
template<typename T1, typename T2>
struct atomic_test_and_reset<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::test_and_reset;
static constexpr auto op_fetch = &atomic_storage<T1>::test_and_reset;
static constexpr auto atomic_op = &atomic_storage<T1>::test_and_reset;
};
template<typename T1, typename T2, typename = void>
struct atomic_test_and_complement
{
bool operator()(T1& lhs, const T2& rhs) const
{
return test_and_complement(lhs, rhs);
}
};
template<typename T1, typename T2>
struct atomic_test_and_complement<T1, T2, std::enable_if_t<std::is_integral<T1>::value && std::is_convertible<T2, T1>::value>>
{
static constexpr auto fetch_op = &atomic_storage<T1>::test_and_complement;
static constexpr auto op_fetch = &atomic_storage<T1>::test_and_complement;
static constexpr auto atomic_op = &atomic_storage<T1>::test_and_complement;
};
// Atomic type with lock-free and standard layout guarantees (and appropriate limitations) // Atomic type with lock-free and standard layout guarantees (and appropriate limitations)
template <typename T> template <typename T>
class atomic_t class atomic_t
{ {
protected:
using type = typename std::remove_cv<T>::type; using type = typename std::remove_cv<T>::type;
static_assert(alignof(type) == sizeof(type), "atomic_t<> error: unexpected alignment, use alignas() if necessary"); static_assert(alignof(type) == sizeof(type), "atomic_t<> error: unexpected alignment, use alignas() if necessary");
@ -790,7 +595,7 @@ public:
} }
// Atomically compare data with cmp, replace with exch if equal, return previous data value anyway // Atomically compare data with cmp, replace with exch if equal, return previous data value anyway
simple_type compare_and_swap(const type& cmp, const type& exch) type compare_and_swap(const type& cmp, const type& exch)
{ {
type old = cmp; type old = cmp;
atomic_storage<type>::compare_exchange(m_data, old, exch); atomic_storage<type>::compare_exchange(m_data, old, exch);
@ -804,81 +609,67 @@ public:
return atomic_storage<type>::compare_exchange(m_data, old, exch); return atomic_storage<type>::compare_exchange(m_data, old, exch);
} }
// Atomic operation; returns old value, discards function result value // Atomic operation; returns old value
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, const Args&...)>> template <typename F>
type fetch_op(F&& func, const Args&... args) std::enable_if_t<std::is_void<std::invoke_result_t<F, T&>>::value, type> fetch_op(F&& func)
{ {
type _new, old = atomic_storage<type>::load(m_data); type _new, old = atomic_storage<type>::load(m_data);
while (true) while (true)
{ {
func((_new = old), args...); func((_new = old));
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) return old; if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) [[likely]]
}
}
// Helper overload for calling optimized implementation
template<typename F, typename... Args, typename FT = decltype(F::fetch_op), typename RT = std::result_of_t<FT(T&, const Args&...)>>
type fetch_op(F&&, const Args&... args)
{ {
return F::fetch_op(m_data, args...); return old;
}
}
} }
// Atomic operation; returns new value, discards function result value // Atomic operation; returns new value
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, const Args&...)>> template <typename F>
type op_fetch(F&& func, const Args&... args) std::enable_if_t<std::is_void<std::invoke_result_t<F, T&>>::value, type> op_fetch(F&& func)
{ {
type _new, old = atomic_storage<type>::load(m_data); type _new, old = atomic_storage<type>::load(m_data);
while (true) while (true)
{ {
func((_new = old), args...); func((_new = old));
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) return _new; if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) [[likely]]
}
}
// Helper overload for calling optimized implementation
template<typename F, typename... Args, typename FT = decltype(F::op_fetch), typename RT = std::result_of_t<FT(T&, const Args&...)>>
type op_fetch(F&&, const Args&... args)
{ {
return F::op_fetch(m_data, args...); return _new;
}
}
} }
// Atomic operation; returns function result value // Atomic operation; returns function result value (TODO: remove args)
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, const Args&...)>, typename = std::enable_if_t<!std::is_void<RT>::value>> template <typename F, typename... Args, typename RT = std::invoke_result_t<F, T&, const Args&...>>
RT atomic_op(F&& func, const Args&... args) RT atomic_op(F&& func, const Args&... args)
{ {
type _new, old = atomic_storage<type>::load(m_data); type _new, old = atomic_storage<type>::load(m_data);
while (true) while (true)
{ {
RT&& result = func((_new = old), args...); if constexpr(std::is_void<RT>::value)
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) return std::move(result);
}
}
// Overload for void return type
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, const Args&...)>, typename = std::enable_if_t<std::is_void<RT>::value>>
void atomic_op(F&& func, const Args&... args)
{
type _new, old = atomic_storage<type>::load(m_data);
while (true)
{ {
func((_new = old), args...); func((_new = old), args...);
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) return; if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) [[likely]]
}
}
// Helper overload for calling optimized implementation
template<typename F, typename... Args, typename FT = decltype(F::atomic_op), typename RT = std::result_of_t<FT(T&, const Args&...)>>
auto atomic_op(F&&, const Args&... args)
{ {
return F::atomic_op(m_data, args...); return;
}
}
else
{
RT result = func((_new = old), args...);
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new))) [[likely]]
{
return result;
}
}
}
} }
// Atomically read data // Atomically read data
@ -911,144 +702,250 @@ public:
return atomic_storage<type>::exchange(m_data, rhs); return atomic_storage<type>::exchange(m_data, rhs);
} }
template<typename T2> type fetch_add(const type& rhs)
type fetch_add(const T2& rhs)
{ {
return fetch_op(atomic_add<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_add(m_data, rhs);
} }
template<typename T2> return fetch_op([&](T& v)
type add_fetch(const T2& rhs)
{ {
return op_fetch(atomic_add<type, T2>{}, rhs); v += rhs;
});
} }
template<typename T2> type add_fetch(const type& rhs)
auto operator +=(const T2& rhs)
{ {
return atomic_op(atomic_add<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::add_fetch(m_data, rhs);
} }
template<typename T2> return op_fetch([&](T& v)
type fetch_sub(const T2& rhs)
{ {
return fetch_op(atomic_sub<type, T2>{}, rhs); v += rhs;
});
} }
template<typename T2> auto operator +=(const type& rhs)
type sub_fetch(const T2& rhs)
{ {
return op_fetch(atomic_sub<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::add_fetch(m_data, rhs);
} }
template<typename T2> return atomic_op([&](T& v)
auto operator -=(const T2& rhs)
{ {
return atomic_op(atomic_sub<type, T2>{}, rhs); return v += rhs;
});
} }
template<typename T2> type fetch_sub(const type& rhs)
type fetch_and(const T2& rhs)
{ {
return fetch_op(atomic_and<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_sub(m_data, rhs);
} }
template<typename T2> return fetch_op([&](T& v)
type and_fetch(const T2& rhs)
{ {
return op_fetch(atomic_and<type, T2>{}, rhs); v -= rhs;
});
} }
template<typename T2> type sub_fetch(const type& rhs)
auto operator &=(const T2& rhs)
{ {
return atomic_op(atomic_and<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::sub_fetch(m_data, rhs);
} }
template<typename T2> return op_fetch([&](T& v)
type fetch_or(const T2& rhs)
{ {
return fetch_op(atomic_or<type, T2>{}, rhs); v -= rhs;
});
} }
template<typename T2> auto operator -=(const type& rhs)
type or_fetch(const T2& rhs)
{ {
return op_fetch(atomic_or<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::sub_fetch(m_data, rhs);
} }
template<typename T2> return atomic_op([&](T& v)
auto operator |=(const T2& rhs)
{ {
return atomic_op(atomic_or<type, T2>{}, rhs); return v -= rhs;
});
} }
template<typename T2> type fetch_and(const type& rhs)
type fetch_xor(const T2& rhs)
{ {
return fetch_op(atomic_xor<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_and(m_data, rhs);
} }
template<typename T2> return fetch_op([&](T& v)
type xor_fetch(const T2& rhs)
{ {
return op_fetch(atomic_xor<type, T2>{}, rhs); v &= rhs;
});
} }
template<typename T2> type and_fetch(const type& rhs)
auto operator ^=(const T2& rhs)
{ {
return atomic_op(atomic_xor<type, T2>{}, rhs); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::and_fetch(m_data, rhs);
}
return op_fetch([&](T& v)
{
v &= rhs;
});
}
auto operator &=(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::and_fetch(m_data, rhs);
}
return atomic_op([&](T& v)
{
return v &= rhs;
});
}
type fetch_or(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_or(m_data, rhs);
}
return fetch_op([&](T& v)
{
v |= rhs;
});
}
type or_fetch(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::or_fetch(m_data, rhs);
}
return op_fetch([&](T& v)
{
v |= rhs;
});
}
auto operator |=(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::or_fetch(m_data, rhs);
}
return atomic_op([&](T& v)
{
return v |= rhs;
});
}
type fetch_xor(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_xor(m_data, rhs);
}
return fetch_op([&](T& v)
{
v ^= rhs;
});
}
type xor_fetch(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::xor_fetch(m_data, rhs);
}
return op_fetch([&](T& v)
{
v ^= rhs;
});
}
auto operator ^=(const type& rhs)
{
if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::xor_fetch(m_data, rhs);
}
return atomic_op([&](T& v)
{
return v ^= rhs;
});
} }
auto operator ++() auto operator ++()
{ {
return atomic_op(atomic_pre_inc<type>{}); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::inc_fetch(m_data);
}
return atomic_op([](T& v)
{
return ++v;
});
} }
auto operator --() auto operator --()
{ {
return atomic_op(atomic_pre_dec<type>{}); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::dec_fetch(m_data);
}
return atomic_op([](T& v)
{
return --v;
});
} }
auto operator ++(int) auto operator ++(int)
{ {
return atomic_op(atomic_post_inc<type>{}); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_inc(m_data);
}
return atomic_op([](T& v)
{
return v++;
});
} }
auto operator --(int) auto operator --(int)
{ {
return atomic_op(atomic_post_dec<type>{}); if constexpr(std::is_integral<type>::value)
{
return atomic_storage<type>::fetch_dec(m_data);
} }
template<typename T2 = T> return atomic_op([](T& v)
auto test_and_set(const T2& rhs)
{ {
return atomic_op(atomic_test_and_set<type, T2>{}, rhs); return v--;
} });
template<typename T2 = T>
auto test_and_reset(const T2& rhs)
{
return atomic_op(atomic_test_and_reset<type, T2>{}, rhs);
}
template<typename T2 = T>
auto test_and_complement(const T2& rhs)
{
return atomic_op(atomic_test_and_complement<type, T2>{}, rhs);
}
// Minimal pointer support (TODO: must forward operator ->())
type operator ->() const
{
return load();
}
// Minimal array support
template<typename I = std::size_t>
auto operator [](const I& index) const -> decltype(std::declval<const type>()[std::declval<I>()])
{
return load()[index];
} }
}; };

View file

@ -799,34 +799,34 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
#ifdef _WIN32 #ifdef _WIN32
DWORD access = 0; DWORD access = 0;
if (test(mode & fs::read)) access |= GENERIC_READ; if (mode & fs::read) access |= GENERIC_READ;
if (test(mode & fs::write)) access |= DELETE | (test(mode & fs::append) ? FILE_APPEND_DATA : GENERIC_WRITE); if (mode & fs::write) access |= DELETE | (mode & fs::append ? FILE_APPEND_DATA : GENERIC_WRITE);
DWORD disp = 0; DWORD disp = 0;
if (test(mode & fs::create)) if (mode & fs::create)
{ {
disp = disp =
test(mode & fs::excl) ? CREATE_NEW : mode & fs::excl ? CREATE_NEW :
test(mode & fs::trunc) ? CREATE_ALWAYS : OPEN_ALWAYS; mode & fs::trunc ? CREATE_ALWAYS : OPEN_ALWAYS;
} }
else else
{ {
if (test(mode & fs::excl)) if (mode & fs::excl)
{ {
g_tls_error = error::inval; g_tls_error = error::inval;
return; return;
} }
disp = test(mode & fs::trunc) ? TRUNCATE_EXISTING : OPEN_EXISTING; disp = mode & fs::trunc ? TRUNCATE_EXISTING : OPEN_EXISTING;
} }
DWORD share = 0; DWORD share = 0;
if (!test(mode, fs::unread) || !test(mode & fs::write)) if (!(mode & fs::unread) || !(mode & fs::write))
{ {
share |= FILE_SHARE_READ; share |= FILE_SHARE_READ;
} }
if (!test(mode, fs::lock + fs::unread) || !test(mode & fs::write)) if (!(mode & (fs::lock + fs::unread)) || !(mode & fs::write))
{ {
share |= FILE_SHARE_WRITE | FILE_SHARE_DELETE; share |= FILE_SHARE_WRITE | FILE_SHARE_DELETE;
} }
@ -949,18 +949,18 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
#else #else
int flags = 0; int flags = 0;
if (test(mode & fs::read) && test(mode & fs::write)) flags |= O_RDWR; if (mode & fs::read && mode & fs::write) flags |= O_RDWR;
else if (test(mode & fs::read)) flags |= O_RDONLY; else if (mode & fs::read) flags |= O_RDONLY;
else if (test(mode & fs::write)) flags |= O_WRONLY; else if (mode & fs::write) flags |= O_WRONLY;
if (test(mode & fs::append)) flags |= O_APPEND; if (mode & fs::append) flags |= O_APPEND;
if (test(mode & fs::create)) flags |= O_CREAT; if (mode & fs::create) flags |= O_CREAT;
if (test(mode & fs::trunc) && !test(mode, fs::lock + fs::unread)) flags |= O_TRUNC; if (mode & fs::trunc && !(mode & (fs::lock + fs::unread))) flags |= O_TRUNC;
if (test(mode & fs::excl)) flags |= O_EXCL; if (mode & fs::excl) flags |= O_EXCL;
int perm = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; int perm = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
if (test(mode & fs::write) && test(mode & fs::unread)) if (mode & fs::write && mode & fs::unread)
{ {
perm = 0; perm = 0;
} }
@ -973,14 +973,14 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
return; return;
} }
if (test(mode & fs::write) && test(mode, fs::lock + fs::unread) && ::flock(fd, LOCK_EX | LOCK_NB) != 0) if (mode & fs::write && mode & (fs::lock + fs::unread) && ::flock(fd, LOCK_EX | LOCK_NB) != 0)
{ {
g_tls_error = errno == EWOULDBLOCK ? fs::error::acces : to_error(errno); g_tls_error = errno == EWOULDBLOCK ? fs::error::acces : to_error(errno);
::close(fd); ::close(fd);
return; return;
} }
if (test(mode & fs::trunc) && test(mode, fs::lock + fs::unread)) if (mode & fs::trunc && mode & (fs::lock + fs::unread))
{ {
// Postpone truncation in order to avoid using O_TRUNC on a locked file // Postpone truncation in order to avoid using O_TRUNC on a locked file
::ftruncate(fd, 0); ::ftruncate(fd, 0);

View file

@ -1,27 +1,8 @@
#pragma once #pragma once
/* /*
This header helps to extend scoped enum types (enum class) in two possible ways: This header implements bs_t<> class for scoped enum types (enum class).
1) Enabling bitwise operators for enums To enable bs_t<>, enum scope must contain `__bitset_enum_max` entry.
2) Advanced bs_t<> template (this converts enum type to another "bitset" enum type)
To enable bitwise operators, enum scope must contain `__bitwise_ops` entry.
enum class flags
{
__bitwise_ops, // Not essential, but recommended to put it first
flag1 = 1 << 0,
flag2 = 1 << 1,
};
Examples:
`flags::flag1 | flags::flag2` - bitwise OR
`flags::flag1 & flags::flag2` - bitwise AND
`flags::flag1 ^ flags::flag2` - bitwise XOR
`~flags::flag1` - bitwise NEG
To enable bs_t<> template, enum scope must contain `__bitset_enum_max` entry.
enum class flagzz : u32 enum class flagzz : u32
{ {
@ -31,40 +12,42 @@ enum class flagzz : u32
__bitset_enum_max // It must be the last value __bitset_enum_max // It must be the last value
}; };
Now some operators are enabled for two enum types: `flagzz` and `bs_t<flagzz>`. This also enables helper operators for this enum type.
These are very different from previously described bitwise operators.
Examples: Examples:
`+flagzz::flag1` - unary `+` operator convert flagzz value to bs_t<flagzz> `+flagzz::flag1` - unary `+` operator convert flagzz value to bs_t<flagzz>
`flagzz::flag1 + flagzz::flag2` - bitset union `flagzz::flag1 + flagzz::flag2` - bitset union
`flagzz::flag1 - flagzz::flag2` - bitset difference `flagzz::flag1 - flagzz::flag2` - bitset difference
Intersection (&) and symmetric difference (^) is also available. Intersection (&) and symmetric difference (^) is also available.
*/ */
#include "types.h" #include "types.h"
#include "Atomic.h" #include "Atomic.h"
// Helper template
template <typename T> template <typename T>
struct bs_base class atomic_bs_t;
// Bitset type for enum class with available bits [0, T::__bitset_enum_max)
template <typename T>
class bs_t final
{ {
public:
// Underlying type // Underlying type
using under = std::underlying_type_t<T>; using under = std::underlying_type_t<T>;
// Actual bitset type private:
enum class type : under // Underlying value
{ under m_data;
null = 0, // Empty bitset
__bitset_set_type = 0 // SFINAE marker friend class atomic_bs_t<T>;
};
public:
static constexpr std::size_t bitmax = sizeof(T) * 8; static constexpr std::size_t bitmax = sizeof(T) * 8;
static constexpr std::size_t bitsize = static_cast<under>(T::__bitset_enum_max); static constexpr std::size_t bitsize = static_cast<under>(T::__bitset_enum_max);
static_assert(std::is_enum<T>::value, "bs_t<> error: invalid type (must be enum)"); static_assert(std::is_enum<T>::value, "bs_t<> error: invalid type (must be enum)");
static_assert(!bitsize || bitsize <= bitmax, "bs_t<> error: invalid __bitset_enum_max"); static_assert(bitsize <= bitmax, "bs_t<> error: invalid __bitset_enum_max");
static_assert(bitsize != bitmax || std::is_unsigned<under>::value, "bs_t<> error: invalid __bitset_enum_max (sign bit)");
// Helper function // Helper function
static constexpr under shift(T value) static constexpr under shift(T value)
@ -72,662 +55,328 @@ struct bs_base
return static_cast<under>(1) << static_cast<under>(value); return static_cast<under>(1) << static_cast<under>(value);
} }
friend type& operator +=(type& lhs, type rhs) bs_t() = default;
// Construct from a single bit
constexpr bs_t(T bit)
: m_data(shift(bit))
{ {
reinterpret_cast<under&>(lhs) |= static_cast<under>(rhs);
return lhs;
} }
friend type& operator -=(type& lhs, type rhs) // Test for empty bitset
constexpr explicit operator bool() const
{ {
reinterpret_cast<under&>(lhs) &= ~static_cast<under>(rhs); return m_data != 0;
return lhs;
} }
friend type& operator &=(type& lhs, type rhs) // Extract underlying data
constexpr explicit operator under() const
{ {
reinterpret_cast<under&>(lhs) &= static_cast<under>(rhs); return m_data;
return lhs;
} }
friend type& operator ^=(type& lhs, type rhs) // Copy
constexpr bs_t operator +() const
{ {
reinterpret_cast<under&>(lhs) ^= static_cast<under>(rhs); return *this;
return lhs;
} }
friend type& operator +=(type& lhs, T rhs) constexpr bs_t& operator +=(bs_t rhs)
{ {
reinterpret_cast<under&>(lhs) |= shift(rhs); m_data |= static_cast<under>(rhs);
return lhs; return *this;
} }
friend type& operator -=(type& lhs, T rhs) constexpr bs_t& operator -=(bs_t rhs)
{ {
reinterpret_cast<under&>(lhs) &= ~shift(rhs); m_data &= ~static_cast<under>(rhs);
return lhs; return *this;
} }
friend type& operator &=(type& lhs, T rhs) constexpr bs_t& operator &=(bs_t rhs)
{ {
reinterpret_cast<under&>(lhs) &= shift(rhs); m_data &= static_cast<under>(rhs);
return lhs; return *this;
} }
friend type& operator ^=(type& lhs, T rhs) constexpr bs_t& operator ^=(bs_t rhs)
{ {
reinterpret_cast<under&>(lhs) ^= shift(rhs); m_data ^= static_cast<under>(rhs);
return lhs; return *this;
} }
friend constexpr type operator +(type lhs, type rhs) constexpr bs_t operator +(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) | static_cast<under>(rhs)); bs_t r{};
r.m_data = m_data | rhs.m_data;
return r;
} }
friend constexpr type operator -(type lhs, type rhs) constexpr bs_t operator -(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) & ~static_cast<under>(rhs)); bs_t r{};
r.m_data = m_data & ~rhs.m_data;
return r;
} }
friend constexpr type operator &(type lhs, type rhs) constexpr bs_t operator &(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) & static_cast<under>(rhs)); bs_t r{};
r.m_data = m_data & rhs.m_data;
return r;
} }
friend constexpr type operator ^(type lhs, type rhs) constexpr bs_t operator ^(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) ^ static_cast<under>(rhs)); bs_t r{};
r.m_data = m_data ^ rhs.m_data;
return r;
} }
friend constexpr type operator &(type lhs, T rhs) constexpr bool operator ==(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) & shift(rhs)); return m_data == rhs.m_data;
} }
friend constexpr type operator ^(type lhs, T rhs) constexpr bool operator !=(bs_t rhs) const
{ {
return static_cast<type>(static_cast<under>(lhs) ^ shift(rhs)); return m_data != rhs.m_data;
} }
friend constexpr type operator &(T lhs, type rhs) constexpr bool test(bs_t rhs) const
{ {
return static_cast<type>(shift(lhs) & static_cast<under>(rhs)); return (m_data & rhs.m_data) != 0;
} }
friend constexpr type operator ^(T lhs, type rhs) constexpr bool test_and_set(T bit)
{ {
return static_cast<type>(shift(lhs) ^ static_cast<under>(rhs)); bool r = (m_data & shift(bit)) != 0;
m_data |= shift(bit);
return r;
} }
friend constexpr bool operator ==(T lhs, type rhs) constexpr bool test_and_reset(T bit)
{ {
return shift(lhs) == rhs; bool r = (m_data & shift(bit)) != 0;
m_data &= ~shift(bit);
return r;
} }
friend constexpr bool operator ==(type lhs, T rhs) constexpr bool test_and_complement(T bit)
{ {
return lhs == shift(rhs); bool r = (m_data & shift(bit)) != 0;
} m_data ^= shift(bit);
return r;
friend constexpr bool operator !=(T lhs, type rhs)
{
return shift(lhs) != rhs;
}
friend constexpr bool operator !=(type lhs, T rhs)
{
return lhs != shift(rhs);
}
friend constexpr bool test(type value)
{
return static_cast<under>(value) != 0;
}
friend constexpr bool test(type lhs, type rhs)
{
return (static_cast<under>(lhs) & static_cast<under>(rhs)) != 0;
}
friend constexpr bool test(type lhs, T rhs)
{
return (static_cast<under>(lhs) & shift(rhs)) != 0;
}
friend constexpr bool test(T lhs, type rhs)
{
return (shift(lhs) & static_cast<under>(rhs)) != 0;
}
friend bool test_and_set(type& lhs, type rhs)
{
return test_and_set(reinterpret_cast<under&>(lhs), static_cast<under>(rhs));
}
friend bool test_and_set(type& lhs, T rhs)
{
return test_and_set(reinterpret_cast<under&>(lhs), shift(rhs));
}
friend bool test_and_reset(type& lhs, type rhs)
{
return test_and_reset(reinterpret_cast<under&>(lhs), static_cast<under>(rhs));
}
friend bool test_and_reset(type& lhs, T rhs)
{
return test_and_reset(reinterpret_cast<under&>(lhs), shift(rhs));
}
friend bool test_and_complement(type& lhs, type rhs)
{
return test_and_complement(reinterpret_cast<under&>(lhs), static_cast<under>(rhs));
}
friend bool test_and_complement(type& lhs, T rhs)
{
return test_and_complement(reinterpret_cast<under&>(lhs), shift(rhs));
} }
}; };
// Bitset type for enum class with available bits [0, T::__bitset_enum_max)
template<typename T>
using bs_t = typename bs_base<T>::type;
// Unary '+' operator: promote plain enum value to bitset value // Unary '+' operator: promote plain enum value to bitset value
template <typename T, typename = decltype(T::__bitset_enum_max)> template <typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator +(T value) constexpr bs_t<T> operator +(T bit)
{ {
return static_cast<bs_t<T>>(bs_base<T>::shift(value)); return bit;
} }
// Binary '+' operator: bitset union // Binary '+' operator: bitset union
template <typename T, typename = decltype(T::__bitset_enum_max)> template <typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator +(T lhs, T rhs) constexpr bs_t<T> operator +(T lhs, T rhs)
{ {
return static_cast<bs_t<T>>(bs_base<T>::shift(lhs) | bs_base<T>::shift(rhs)); return bs_t<T>(lhs) + bs_t<T>(rhs);
}
// Binary '+' operator: bitset union
template<typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator +(typename bs_base<T>::type lhs, T rhs)
{
return static_cast<bs_t<T>>(static_cast<typename bs_base<T>::under>(lhs) | bs_base<T>::shift(rhs));
}
// Binary '+' operator: bitset union
template<typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator +(T lhs, typename bs_base<T>::type rhs)
{
return static_cast<bs_t<T>>(bs_base<T>::shift(lhs) | static_cast<typename bs_base<T>::under>(rhs));
} }
// Binary '-' operator: bitset difference // Binary '-' operator: bitset difference
template <typename T, typename = decltype(T::__bitset_enum_max)> template <typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator -(T lhs, T rhs) constexpr bs_t<T> operator -(T lhs, T rhs)
{ {
return static_cast<bs_t<T>>(bs_base<T>::shift(lhs) & ~bs_base<T>::shift(rhs)); return bs_t<T>(lhs) - bs_t<T>(rhs);
} }
// Binary '-' operator: bitset difference // Binary '&' operator: bitset intersection
template <typename T, typename = decltype(T::__bitset_enum_max)> template <typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator -(typename bs_base<T>::type lhs, T rhs) constexpr bs_t<T> operator &(T lhs, T rhs)
{ {
return static_cast<bs_t<T>>(static_cast<typename bs_base<T>::under>(lhs) & ~bs_base<T>::shift(rhs)); return bs_t<T>(lhs) & bs_t<T>(rhs);
} }
// Binary '-' operator: bitset difference // Binary '^' operator: bitset symmetric difference
template <typename T, typename = decltype(T::__bitset_enum_max)> template <typename T, typename = decltype(T::__bitset_enum_max)>
constexpr bs_t<T> operator -(T lhs, typename bs_base<T>::type rhs) constexpr bs_t<T> operator ^(T lhs, T rhs)
{ {
return static_cast<bs_t<T>>(bs_base<T>::shift(lhs) & ~static_cast<typename bs_base<T>::under>(rhs)); return bs_t<T>(lhs) ^ bs_t<T>(rhs);
} }
template<typename BS, typename T> // Atomic bitset specialization with optimized operations
struct atomic_add<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>>
{
using under = typename bs_base<T>::under;
static inline bs_t<T> op1(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::fetch_or(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto fetch_op = &op1;
static inline bs_t<T> op2(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::or_fetch(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename BS, typename T>
struct atomic_sub<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>>
{
using under = typename bs_base<T>::under;
static inline bs_t<T> op1(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::fetch_and(reinterpret_cast<under&>(left), ~bs_base<T>::shift(right)));
}
static constexpr auto fetch_op = &op1;
static inline bs_t<T> op2(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::and_fetch(reinterpret_cast<under&>(left), ~bs_base<T>::shift(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename BS, typename T>
struct atomic_and<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>>
{
using under = typename bs_base<T>::under;
static inline bs_t<T> op1(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::fetch_and(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto fetch_op = &op1;
static inline bs_t<T> op2(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::and_fetch(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename BS, typename T>
struct atomic_xor<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>>
{
using under = typename bs_base<T>::under;
static inline bs_t<T> op1(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::fetch_xor(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto fetch_op = &op1;
static inline bs_t<T> op2(bs_t<T>& left, T right)
{
return static_cast<bs_t<T>>(atomic_storage<under>::xor_fetch(reinterpret_cast<under&>(left), bs_base<T>::shift(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template <typename T> template <typename T>
struct atomic_add<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>> class atomic_bs_t : public atomic_t<::bs_t<T>> // TODO: true specialization
{ {
using under = std::underlying_type_t<T>; // Corresponding bitset type
using bs_t = ::bs_t<T>;
static inline T op1(T& left, T right) // Base class
using base = atomic_t<::bs_t<T>>;
// Use underlying m_data
using base::m_data;
public:
// Underlying type
using under = typename bs_t::under;
atomic_bs_t() = default;
atomic_bs_t(const atomic_bs_t&) = delete;
atomic_bs_t& operator =(const atomic_bs_t&) = delete;
explicit constexpr atomic_bs_t(bs_t value)
: base(value)
{ {
return static_cast<T>(atomic_storage<under>::fetch_or(reinterpret_cast<under&>(left), static_cast<under>(right)));
} }
static constexpr auto fetch_op = &op1; explicit constexpr atomic_bs_t(T bit)
: base(bit)
static inline T op2(T& left, T right)
{ {
return static_cast<T>(atomic_storage<under>::or_fetch(reinterpret_cast<under&>(left), static_cast<under>(right)));
} }
static constexpr auto op_fetch = &op2; explicit operator bool() const
static constexpr auto atomic_op = &op2; {
}; return static_cast<bool>(base::load());
}
template<typename T>
struct atomic_sub<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>> explicit operator under() const
{ {
using under = std::underlying_type_t<T>; return static_cast<under>(base::load());
}
static inline T op1(T& left, T right)
{ bs_t fetch_add(const bs_t& rhs)
return static_cast<T>(atomic_storage<under>::fetch_and(reinterpret_cast<under&>(left), ~static_cast<under>(right))); {
} bs_t r;
r.m_data = atomic_storage<under>::fetch_or(m_data.m_data, rhs.m_data);
static constexpr auto fetch_op = &op1; return r;
}
static inline T op2(T& left, T right)
{ bs_t add_fetch(const bs_t& rhs)
return static_cast<T>(atomic_storage<under>::and_fetch(reinterpret_cast<under&>(left), ~static_cast<under>(right))); {
} bs_t r;
r.m_data = atomic_storage<under>::or_fetch(m_data.m_data, rhs.m_data);
static constexpr auto op_fetch = &op2; return r;
static constexpr auto atomic_op = &op2; }
};
bs_t operator +=(const bs_t& rhs)
template<typename T> {
struct atomic_and<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>> return add_fetch(rhs);
{ }
using under = std::underlying_type_t<T>;
bs_t fetch_sub(const bs_t& rhs)
static inline T op1(T& left, T right) {
{ bs_t r;
return static_cast<T>(atomic_storage<under>::fetch_and(reinterpret_cast<under&>(left), static_cast<under>(right))); r.m_data = atomic_storage<under>::fetch_and(m_data.m_data, ~rhs.m_data);
} return r;
}
static constexpr auto fetch_op = &op1;
bs_t sub_fetch(const bs_t& rhs)
static inline T op2(T& left, T right) {
{ bs_t r;
return static_cast<T>(atomic_storage<under>::and_fetch(reinterpret_cast<under&>(left), static_cast<under>(right))); r.m_data = atomic_storage<under>::and_fetch(m_data.m_data, ~rhs.m_data);
} return r;
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2; bs_t operator -=(const bs_t& rhs)
}; {
return sub_fetch(rhs);
template<typename T> }
struct atomic_xor<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>>
{ bs_t fetch_and(const bs_t& rhs)
using under = std::underlying_type_t<T>; {
bs_t r;
static inline T op1(T& left, T right) r.m_data = atomic_storage<under>::fetch_and(m_data.m_data, rhs.m_data);
{ return r;
return static_cast<T>(atomic_storage<under>::fetch_xor(reinterpret_cast<under&>(left), static_cast<under>(right))); }
}
bs_t and_fetch(const bs_t& rhs)
static constexpr auto fetch_op = &op1; {
bs_t r;
static inline T op2(T& left, T right) r.m_data = atomic_storage<under>::and_fetch(m_data.m_data, rhs.m_data);
{ return r;
return static_cast<T>(atomic_storage<under>::xor_fetch(reinterpret_cast<under&>(left), static_cast<under>(right))); }
}
bs_t operator &=(const bs_t& rhs)
static constexpr auto op_fetch = &op2; {
static constexpr auto atomic_op = &op2; return and_fetch(rhs);
}; }
template<typename BS, typename T> bs_t fetch_xor(const bs_t& rhs)
struct atomic_test_and_set<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>> {
{ bs_t r;
using under = typename bs_base<T>::under; r.m_data = atomic_storage<under>::fetch_xor(m_data.m_data, rhs.m_data);
return r;
static inline bool _op(bs_t<T>& left, T value) }
{
return atomic_storage<under>::bts(reinterpret_cast<under&>(left), static_cast<uint>(static_cast<under>(value))); bs_t xor_fetch(const bs_t& rhs)
} {
bs_t r;
static constexpr auto fetch_op = &_op; r.m_data = atomic_storage<under>::xor_fetch(m_data.m_data, rhs.m_data);
static constexpr auto op_fetch = &_op; return r;
static constexpr auto atomic_op = &_op; }
};
bs_t operator ^=(const bs_t& rhs)
template<typename BS, typename T> {
struct atomic_test_and_reset<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>> return xor_fetch(rhs);
{ }
using under = typename bs_base<T>::under;
auto fetch_or(const bs_t&) = delete;
static inline bool _op(bs_t<T>& left, T value) auto or_fetch(const bs_t&) = delete;
{ auto operator |=(const bs_t&) = delete;
return atomic_storage<under>::btr(reinterpret_cast<under&>(left), static_cast<uint>(static_cast<under>(value))); auto operator ++() = delete;
} auto operator --() = delete;
auto operator ++(int) = delete;
static constexpr auto fetch_op = &_op; auto operator --(int) = delete;
static constexpr auto op_fetch = &_op;
static constexpr auto atomic_op = &_op; bs_t operator +(bs_t rhs) const
}; {
bs_t r{};
template<typename BS, typename T> r.m_data = base::load().m_data | rhs.m_data;
struct atomic_test_and_complement<BS, T, std::void_t<decltype(T::__bitset_enum_max), std::enable_if_t<std::is_same<BS, bs_t<T>>::value>>> return r;
{ }
using under = typename bs_base<T>::under;
bs_t operator -(bs_t rhs) const
static inline bool _op(bs_t<T>& left, T value) {
{ bs_t r{};
return atomic_storage<under>::btc(reinterpret_cast<under&>(left), static_cast<uint>(static_cast<under>(value))); r.m_data = base::load().m_data & ~rhs.m_data;
} return r;
}
static constexpr auto fetch_op = &_op;
static constexpr auto op_fetch = &_op; bs_t operator &(bs_t rhs) const
static constexpr auto atomic_op = &_op; {
}; bs_t r{};
r.m_data = base::load().m_data & rhs.m_data;
template<typename T> return r;
struct atomic_test_and_set<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>> }
{
using under = std::underlying_type_t<T>; bs_t operator ^(bs_t rhs) const
{
static inline bool _op(T& left, T value) bs_t r{};
{ r.m_data = base::load().m_data ^ rhs.m_data;
return atomic_storage<under>::test_and_set(reinterpret_cast<under&>(left), static_cast<under>(value)); return r;
} }
static constexpr auto fetch_op = &_op; bool test(const bs_t& rhs)
static constexpr auto op_fetch = &_op; {
static constexpr auto atomic_op = &_op; return base::load().test(rhs);
}; }
template<typename T> bool test_and_set(T rhs)
struct atomic_test_and_reset<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>> {
{ return atomic_storage<under>::bts(m_data.m_data, static_cast<uint>(static_cast<under>(rhs)));
using under = std::underlying_type_t<T>; }
static inline bool _op(T& left, T value) bool test_and_reset(T rhs)
{ {
return atomic_storage<under>::test_and_reset(reinterpret_cast<under&>(left), static_cast<under>(value)); return atomic_storage<under>::btr(m_data.m_data, static_cast<uint>(static_cast<under>(rhs)));
} }
static constexpr auto fetch_op = &_op; bool test_and_complement(T rhs)
static constexpr auto op_fetch = &_op; {
static constexpr auto atomic_op = &_op; return atomic_storage<under>::btc(m_data.m_data, static_cast<uint>(static_cast<under>(rhs)));
}; }
template<typename T>
struct atomic_test_and_complement<T, T, std::enable_if_t<sizeof(T::__bitset_set_type) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline bool _op(T& left, T value)
{
return atomic_storage<under>::test_and_complement(reinterpret_cast<under&>(left), static_cast<under>(value));
}
static constexpr auto fetch_op = &_op;
static constexpr auto op_fetch = &_op;
static constexpr auto atomic_op = &_op;
};
// Binary '|' operator: bitwise OR
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr T operator |(T lhs, T rhs)
{
return static_cast<T>(std::underlying_type_t<T>(lhs) | std::underlying_type_t<T>(rhs));
}
// Binary '&' operator: bitwise AND
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr T operator &(T lhs, T rhs)
{
return static_cast<T>(std::underlying_type_t<T>(lhs) & std::underlying_type_t<T>(rhs));
}
// Binary '^' operator: bitwise XOR
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr T operator ^(T lhs, T rhs)
{
return static_cast<T>(std::underlying_type_t<T>(lhs) ^ std::underlying_type_t<T>(rhs));
}
// Unary '~' operator: bitwise NEG
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr T operator ~(T value)
{
return static_cast<T>(~std::underlying_type_t<T>(value));
}
// Bitwise OR assignment
template<typename T, typename = decltype(T::__bitwise_ops)>
inline T& operator |=(T& lhs, T rhs)
{
reinterpret_cast<std::underlying_type_t<T>&>(lhs) |= std::underlying_type_t<T>(rhs);
return lhs;
}
// Bitwise AND assignment
template<typename T, typename = decltype(T::__bitwise_ops)>
inline T& operator &=(T& lhs, T rhs)
{
reinterpret_cast<std::underlying_type_t<T>&>(lhs) &= std::underlying_type_t<T>(rhs);
return lhs;
}
// Bitwise XOR assignment
template<typename T, typename = decltype(T::__bitwise_ops)>
inline T& operator ^=(T& lhs, T rhs)
{
reinterpret_cast<std::underlying_type_t<T>&>(lhs) ^= std::underlying_type_t<T>(rhs);
return lhs;
}
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr bool test(T value)
{
return std::underlying_type_t<T>(value) != 0;
}
template<typename T, typename = decltype(T::__bitwise_ops)>
constexpr bool test(T lhs, T rhs)
{
return (std::underlying_type_t<T>(lhs) & std::underlying_type_t<T>(rhs)) != 0;
}
template<typename T, typename = decltype(T::__bitwise_ops)>
inline bool test_and_set(T& lhs, T rhs)
{
return test_and_set(reinterpret_cast<std::underlying_type_t<T>&>(lhs), std::underlying_type_t<T>(rhs));
}
template<typename T, typename = decltype(T::__bitwise_ops)>
inline bool test_and_reset(T& lhs, T rhs)
{
return test_and_reset(reinterpret_cast<std::underlying_type_t<T>&>(lhs), std::underlying_type_t<T>(rhs));
}
template<typename T, typename = decltype(T::__bitwise_ops)>
inline bool test_and_complement(T& lhs, T rhs)
{
return test_and_complement(reinterpret_cast<std::underlying_type_t<T>&>(lhs), std::underlying_type_t<T>(rhs));
}
template<typename T>
struct atomic_or<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline T op1(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::fetch_or(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto fetch_op = &op1;
static inline T op2(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::or_fetch(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename T>
struct atomic_and<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline T op1(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::fetch_and(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto fetch_op = &op1;
static inline T op2(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::and_fetch(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename T>
struct atomic_xor<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline T op1(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::fetch_xor(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto fetch_op = &op1;
static inline T op2(T& left, T right)
{
return static_cast<T>(atomic_storage<under>::xor_fetch(reinterpret_cast<under&>(left), static_cast<under>(right)));
}
static constexpr auto op_fetch = &op2;
static constexpr auto atomic_op = &op2;
};
template<typename T>
struct atomic_test_and_set<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline bool _op(T& left, T value)
{
return atomic_storage<under>::test_and_set(reinterpret_cast<under&>(left), static_cast<under>(value));
}
static constexpr auto fetch_op = &_op;
static constexpr auto op_fetch = &_op;
static constexpr auto atomic_op = &_op;
};
template<typename T>
struct atomic_test_and_reset<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline bool _op(T& left, T value)
{
return atomic_storage<under>::test_and_reset(reinterpret_cast<under&>(left), static_cast<under>(value));
}
static constexpr auto fetch_op = &_op;
static constexpr auto op_fetch = &_op;
static constexpr auto atomic_op = &_op;
};
template<typename T>
struct atomic_test_and_complement<T, T, std::enable_if_t<sizeof(T::__bitwise_ops) != 0 && std::is_enum<T>::value>>
{
using under = std::underlying_type_t<T>;
static inline bool _op(T& left, T value)
{
return atomic_storage<under>::test_and_complement(reinterpret_cast<under&>(left), static_cast<under>(value));
}
static constexpr auto fetch_op = &_op;
static constexpr auto op_fetch = &_op;
static constexpr auto atomic_op = &_op;
}; };

View file

@ -707,42 +707,6 @@ constexpr u32 size32(const T (&)[Size], const char* msg = nullptr)
return static_cast<u32>(Size); return static_cast<u32>(Size);
} }
template <typename T1, typename = std::enable_if_t<std::is_integral<T1>::value>>
constexpr bool test(const T1& value)
{
return value != 0;
}
template <typename T1, typename T2, typename = std::enable_if_t<std::is_integral<T1>::value && std::is_integral<T2>::value>>
constexpr bool test(const T1& lhs, const T2& rhs)
{
return (lhs & rhs) != 0;
}
template <typename T, typename T2, typename = std::enable_if_t<std::is_integral<T>::value && std::is_integral<T2>::value>>
inline bool test_and_set(T& lhs, const T2& rhs)
{
const bool result = (lhs & rhs) != 0;
lhs |= rhs;
return result;
}
template <typename T, typename T2, typename = std::enable_if_t<std::is_integral<T>::value && std::is_integral<T2>::value>>
inline bool test_and_reset(T& lhs, const T2& rhs)
{
const bool result = (lhs & rhs) != 0;
lhs &= ~rhs;
return result;
}
template <typename T, typename T2, typename = std::enable_if_t<std::is_integral<T>::value && std::is_integral<T2>::value>>
inline bool test_and_complement(T& lhs, const T2& rhs)
{
const bool result = (lhs & rhs) != 0;
lhs ^= rhs;
return result;
}
// Simplified hash algorithm for pointers. May be used in std::unordered_(map|set). // Simplified hash algorithm for pointers. May be used in std::unordered_(map|set).
template <typename T, std::size_t Align = alignof(T)> template <typename T, std::size_t Align = alignof(T)>
struct pointer_hash struct pointer_hash

View file

@ -52,10 +52,10 @@ void cpu_thread::on_task()
g_tls_current_cpu_thread = this; g_tls_current_cpu_thread = this;
// Check thread status // Check thread status
while (!test(state, cpu_flag::exit + cpu_flag::dbg_global_stop)) while (!(state & (cpu_flag::exit + cpu_flag::dbg_global_stop)))
{ {
// Check stop status // Check stop status
if (!test(state & cpu_flag::stop)) if (!(state & cpu_flag::stop))
{ {
try try
{ {
@ -100,7 +100,8 @@ cpu_thread::cpu_thread(u32 id)
bool cpu_thread::check_state() bool cpu_thread::check_state()
{ {
#ifdef WITH_GDB_DEBUGGER #ifdef WITH_GDB_DEBUGGER
if (test(state, cpu_flag::dbg_pause)) { if (state & cpu_flag::dbg_pause)
{
fxm::get<GDBDebugServer>()->pause_from(this); fxm::get<GDBDebugServer>()->pause_from(this);
} }
#endif #endif
@ -110,7 +111,7 @@ bool cpu_thread::check_state()
while (true) while (true)
{ {
if (test(state, cpu_flag::memory) && state.test_and_reset(cpu_flag::memory)) if (state & cpu_flag::memory && state.test_and_reset(cpu_flag::memory))
{ {
cpu_flag_memory = true; cpu_flag_memory = true;
@ -121,17 +122,17 @@ bool cpu_thread::check_state()
} }
} }
if (test(state, cpu_flag::exit + cpu_flag::dbg_global_stop)) if (state & cpu_flag::exit + cpu_flag::dbg_global_stop)
{ {
return true; return true;
} }
if (test(state & cpu_flag::signal) && state.test_and_reset(cpu_flag::signal)) if (state & cpu_flag::signal && state.test_and_reset(cpu_flag::signal))
{ {
cpu_sleep_called = false; cpu_sleep_called = false;
} }
if (!test(state, cpu_state_pause)) if (!(state & cpu_state_pause))
{ {
if (cpu_flag_memory) if (cpu_flag_memory)
{ {
@ -140,7 +141,7 @@ bool cpu_thread::check_state()
break; break;
} }
else if (!cpu_sleep_called && test(state, cpu_flag::suspend)) else if (!cpu_sleep_called && state & cpu_flag::suspend)
{ {
cpu_sleep(); cpu_sleep();
cpu_sleep_called = true; cpu_sleep_called = true;
@ -152,12 +153,12 @@ bool cpu_thread::check_state()
const auto state_ = state.load(); const auto state_ = state.load();
if (test(state_, cpu_flag::ret + cpu_flag::stop)) if (state_ & (cpu_flag::ret + cpu_flag::stop))
{ {
return true; return true;
} }
if (test(state_, cpu_flag::dbg_step)) if (state_ & cpu_flag::dbg_step)
{ {
state += cpu_flag::dbg_pause; state += cpu_flag::dbg_pause;
state -= cpu_flag::dbg_step; state -= cpu_flag::dbg_step;
@ -168,7 +169,7 @@ bool cpu_thread::check_state()
void cpu_thread::test_state() void cpu_thread::test_state()
{ {
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
if (check_state()) if (check_state())
{ {

View file

@ -37,7 +37,7 @@ public:
cpu_thread(u32 id); cpu_thread(u32 id);
// Public thread state // Public thread state
atomic_t<bs_t<cpu_flag>> state{+cpu_flag::stop}; atomic_bs_t<cpu_flag> state{+cpu_flag::stop};
// Process thread state, return true if the checker must return // Process thread state, return true if the checker must return
bool check_state(); bool check_state();

View file

@ -148,7 +148,7 @@ error_code sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
} }
ppu.test_state(); ppu.test_state();
lwmutex->all_info += res; lwmutex->all_info += +res;
return CELL_OK; return CELL_OK;
} }
@ -173,7 +173,7 @@ error_code sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
if (res > 0) if (res > 0)
{ {
lwmutex->all_info += res; lwmutex->all_info += +res;
res = CELL_OK; res = CELL_OK;
} }

View file

@ -1051,7 +1051,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
} }
// Get function limit // Get function limit
const u32 func_end = std::min<u32>(get_limit(func.addr + 1), test(func.attr, ppu_attr::known_size) ? func.addr + func.size : end); const u32 func_end = std::min<u32>(get_limit(func.addr + 1), func.attr & ppu_attr::known_size ? func.addr + func.size : end);
// Block analysis workload // Block analysis workload
std::vector<std::reference_wrapper<std::pair<const u32, u32>>> block_queue; std::vector<std::reference_wrapper<std::pair<const u32, u32>>> block_queue;
@ -1084,7 +1084,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
} }
// TODO: lower priority? // TODO: lower priority?
if (test(func.attr, ppu_attr::no_size)) if (func.attr & ppu_attr::no_size)
{ {
// Get next function // Get next function
const auto _next = fmap.lower_bound(func.blocks.crbegin()->first + 1); const auto _next = fmap.lower_bound(func.blocks.crbegin()->first + 1);
@ -1135,12 +1135,12 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
} }
// Add next block if necessary // Add next block if necessary
if ((is_call && !test(pfunc->attr, ppu_attr::no_return)) || (type == ppu_itype::BC && (op.bo & 0x14) != 0x14)) if ((is_call && !(pfunc->attr & ppu_attr::no_return)) || (type == ppu_itype::BC && (op.bo & 0x14) != 0x14))
{ {
add_block(_ptr.addr()); add_block(_ptr.addr());
} }
if (is_call && test(pfunc->attr, ppu_attr::no_return)) if (is_call && pfunc->attr & ppu_attr::no_return)
{ {
// Nothing // Nothing
} }
@ -1201,7 +1201,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
if (jt_addr != jt_end && _ptr.addr() == jt_addr) if (jt_addr != jt_end && _ptr.addr() == jt_addr)
{ {
// Acknowledge jumptable detection failure // Acknowledge jumptable detection failure
if (!test(func.attr, ppu_attr::no_size)) if (!(func.attr & ppu_attr::no_size))
{ {
LOG_WARNING(PPU, "[0x%x] Jump table not found! 0x%x-0x%x", func.addr, jt_addr, jt_end); LOG_WARNING(PPU, "[0x%x] Jump table not found! 0x%x-0x%x", func.addr, jt_addr, jt_end);
} }
@ -1235,7 +1235,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
block.second = _ptr.addr() - block.first; block.second = _ptr.addr() - block.first;
break; break;
} }
else if (type == ppu_itype::STDU && test(func.attr, ppu_attr::no_size) && (op.opcode == *_ptr || *_ptr == ppu_instructions::BLR())) else if (type == ppu_itype::STDU && func.attr & ppu_attr::no_size && (op.opcode == *_ptr || *_ptr == ppu_instructions::BLR()))
{ {
// Hack // Hack
LOG_SUCCESS(PPU, "[0x%x] Instruction repetition: 0x%08x", iaddr, op.opcode); LOG_SUCCESS(PPU, "[0x%x] Instruction repetition: 0x%08x", iaddr, op.opcode);
@ -1254,7 +1254,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
} }
// Finalization: determine function size // Finalization: determine function size
if (!test(func.attr, ppu_attr::known_size)) if (!(func.attr & ppu_attr::known_size))
{ {
const auto last = func.blocks.crbegin(); const auto last = func.blocks.crbegin();
@ -1320,7 +1320,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry)
} }
// Finalization: decrease known function size (TODO) // Finalization: decrease known function size (TODO)
if (test(func.attr, ppu_attr::known_size)) if (func.attr & ppu_attr::known_size)
{ {
const auto last = func.blocks.crbegin(); const auto last = func.blocks.crbegin();

View file

@ -1167,7 +1167,7 @@ void ppu_load_exec(const ppu_exec_object& elf)
if (info.size < sizeof(process_param_t)) if (info.size < sizeof(process_param_t))
{ {
LOG_WARNING(LOADER, "Bad process_param size! [0x%x : 0x%x]", info.size, SIZE_32(process_param_t)); LOG_WARNING(LOADER, "Bad process_param size! [0x%x : 0x%x]", info.size, sizeof(process_param_t));
} }
if (info.magic != 0x13bcc5f6) if (info.magic != 0x13bcc5f6)

View file

@ -607,7 +607,7 @@ void ppu_thread::exec_task()
{ {
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm) if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm)
{ {
while (!test(state, cpu_flag::ret + cpu_flag::exit + cpu_flag::stop + cpu_flag::dbg_global_stop)) while (!(state & (cpu_flag::ret + cpu_flag::exit + cpu_flag::stop + cpu_flag::dbg_global_stop)))
{ {
reinterpret_cast<ppu_function_t>(static_cast<std::uintptr_t>(ppu_ref(cia)))(*this); reinterpret_cast<ppu_function_t>(static_cast<std::uintptr_t>(ppu_ref(cia)))(*this);
} }
@ -625,7 +625,7 @@ void ppu_thread::exec_task()
while (true) while (true)
{ {
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
if (check_state()) return; if (check_state()) return;
@ -678,7 +678,7 @@ void ppu_thread::exec_task()
func2 = func4; func2 = func4;
func3 = func5; func3 = func5;
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
break; break;
} }
@ -763,9 +763,9 @@ cmd64 ppu_thread::cmd_wait()
{ {
while (true) while (true)
{ {
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
if (test(state, cpu_flag::stop + cpu_flag::exit)) if (state & (cpu_flag::stop + cpu_flag::exit))
{ {
return cmd64{}; return cmd64{};
} }

View file

@ -1183,7 +1183,7 @@ static void check_state_ret(SPUThread& _spu, void*, u8*)
static void check_state(SPUThread* _spu, spu_function_t _ret) static void check_state(SPUThread* _spu, spu_function_t _ret)
{ {
if (test(_spu->state) && _spu->check_state()) if (_spu->state && _spu->check_state())
{ {
_ret = &check_state_ret; _ret = &check_state_ret;
} }

View file

@ -490,7 +490,7 @@ std::vector<u32> spu_recompiler_base::block(const be_t<u32>* ls, u32 entry_point
} }
} }
if (test(af, vf::is_const)) if (af & vf::is_const)
{ {
const u32 target = spu_branch_target(av); const u32 target = spu_branch_target(av);
@ -858,7 +858,7 @@ std::vector<u32> spu_recompiler_base::block(const be_t<u32>* ls, u32 entry_point
case spu_itype::HBR: case spu_itype::HBR:
{ {
hbr_loc = spu_branch_target(pos, op.roh << 7 | op.rt); hbr_loc = spu_branch_target(pos, op.roh << 7 | op.rt);
hbr_tg = test(vflags[op.ra], vf::is_const) && !op.c ? values[op.ra] & 0x3fffc : -1; hbr_tg = vflags[op.ra] & vf::is_const && !op.c ? values[op.ra] & 0x3fffc : -1;
break; break;
} }
@ -1017,7 +1017,7 @@ std::vector<u32> spu_recompiler_base::block(const be_t<u32>* ls, u32 entry_point
{ {
const u32 r2 = op.ra == 1 ? +op.rb : +op.ra; const u32 r2 = op.ra == 1 ? +op.rb : +op.ra;
if (test(vflags[r2], vf::is_const) && (values[r2] % 16) == 0) if (vflags[r2] & vf::is_const && (values[r2] % 16) == 0)
{ {
break; break;
} }

View file

@ -521,7 +521,7 @@ void SPUThread::cpu_task()
if (jit) if (jit)
{ {
while (LIKELY(!test(state) || !check_state())) while (LIKELY(!state || !check_state()))
{ {
jit_dispatcher[pc / 4](*this, vm::_ptr<u8>(offset), nullptr); jit_dispatcher[pc / 4](*this, vm::_ptr<u8>(offset), nullptr);
} }
@ -547,7 +547,7 @@ void SPUThread::cpu_task()
while (true) while (true)
{ {
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
if (check_state()) return; if (check_state()) return;
@ -594,7 +594,7 @@ void SPUThread::cpu_task()
func2 = func4; func2 = func4;
func3 = func5; func3 = func5;
if (UNLIKELY(test(state))) if (UNLIKELY(state))
{ {
break; break;
} }
@ -1093,7 +1093,7 @@ void SPUThread::do_mfc(bool wait)
if (args.cmd & MFC_LIST_MASK) if (args.cmd & MFC_LIST_MASK)
{ {
if (!test(ch_stall_mask, mask)) if (!(ch_stall_mask & mask))
{ {
if (do_list_transfer(args)) if (do_list_transfer(args))
{ {
@ -1160,7 +1160,7 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
// Stall infinitely if MFC queue is full // Stall infinitely if MFC queue is full
while (UNLIKELY(mfc_size >= 16)) while (UNLIKELY(mfc_size >= 16))
{ {
if (test(state, cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }
@ -1193,7 +1193,7 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
while (rdata == data && vm::reservation_acquire(raddr, 128) == rtime) while (rdata == data && vm::reservation_acquire(raddr, 128) == rtime)
{ {
if (test(state, cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
break; break;
} }
@ -1400,7 +1400,7 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
{ {
if (LIKELY(args.size <= 0x4000)) if (LIKELY(args.size <= 0x4000))
{ {
if (LIKELY(do_dma_check(args) && !test(ch_stall_mask, 1u << args.tag))) if (LIKELY(do_dma_check(args) && !(ch_stall_mask & 1u << args.tag)))
{ {
if (LIKELY(do_list_transfer(args))) if (LIKELY(do_list_transfer(args)))
{ {
@ -1558,7 +1558,7 @@ s64 SPUThread::get_ch_value(u32 ch)
while (!channel.try_pop(out)) while (!channel.try_pop(out))
{ {
if (test(state, cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return -1; return -1;
} }
@ -1596,7 +1596,7 @@ s64 SPUThread::get_ch_value(u32 ch)
return out; return out;
} }
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return -1; return -1;
} }
@ -1700,7 +1700,7 @@ s64 SPUThread::get_ch_value(u32 ch)
while (res = get_events(), !res) while (res = get_events(), !res)
{ {
if (test(state, cpu_flag::stop + cpu_flag::dbg_global_stop)) if (state & (cpu_flag::stop + cpu_flag::dbg_global_stop))
{ {
return -1; return -1;
} }
@ -1713,7 +1713,7 @@ s64 SPUThread::get_ch_value(u32 ch)
while (res = get_events(true), !res) while (res = get_events(true), !res)
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return -1; return -1;
} }
@ -1753,7 +1753,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{ {
while (!ch_out_intr_mbox.try_push(value)) while (!ch_out_intr_mbox.try_push(value))
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }
@ -1899,7 +1899,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{ {
while (!ch_out_mbox.try_push(value)) while (!ch_out_mbox.try_push(value))
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }
@ -2005,8 +2005,11 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
case MFC_WrListStallAck: case MFC_WrListStallAck:
{ {
// Reset stall status for specified tag // Reset stall status for specified tag
if (::test_and_reset(ch_stall_mask, 1u << value)) const u32 tag_mask = 1u << value;
if (ch_stall_mask & tag_mask)
{ {
ch_stall_mask &= ~tag_mask;
do_mfc(true); do_mfc(true);
} }
@ -2085,7 +2088,7 @@ bool SPUThread::stop_and_signal(u32 code)
// HACK: wait for executable code // HACK: wait for executable code
while (!_ref<u32>(pc)) while (!_ref<u32>(pc))
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }
@ -2141,7 +2144,7 @@ bool SPUThread::stop_and_signal(u32 code)
// Check group status, wait if necessary // Check group status, wait if necessary
while (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_SUSPENDED) while (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_SUSPENDED)
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }
@ -2210,7 +2213,7 @@ bool SPUThread::stop_and_signal(u32 code)
while (true) while (true)
{ {
if (test(state & cpu_flag::stop)) if (state & cpu_flag::stop)
{ {
return false; return false;
} }

View file

@ -1014,13 +1014,13 @@ void lv2_obj::sleep_timeout(named_thread& thread, u64 timeout)
auto state = ppu->state.fetch_op([&](auto& val) auto state = ppu->state.fetch_op([&](auto& val)
{ {
if (!test(val, cpu_flag::signal)) if (!(val & cpu_flag::signal))
{ {
val += cpu_flag::suspend; val += cpu_flag::suspend;
} }
}); });
if (test(state, cpu_flag::signal)) if (state & cpu_flag::signal)
{ {
LOG_TRACE(PPU, "sleep() failed (signaled)"); LOG_TRACE(PPU, "sleep() failed (signaled)");
return; return;
@ -1156,7 +1156,7 @@ void lv2_obj::schedule_all()
{ {
const auto target = g_ppu[i]; const auto target = g_ppu[i];
if (test(target->state, cpu_flag::suspend)) if (target->state & cpu_flag::suspend)
{ {
LOG_TRACE(PPU, "schedule(): %s", target->id); LOG_TRACE(PPU, "schedule(): %s", target->id);
target->state ^= (cpu_flag::signal + cpu_flag::suspend); target->state ^= (cpu_flag::signal + cpu_flag::suspend);

View file

@ -275,7 +275,7 @@ error_code sys_fs_open(vm::cptr<char> path, s32 flags, vm::ptr<u32> fd, s32 mode
open_mode = {}; // error open_mode = {}; // error
} }
if (!test(open_mode)) if (!open_mode)
{ {
fmt::throw_exception("sys_fs_open(%s): Invalid or unimplemented flags: %#o" HERE, path, flags); fmt::throw_exception("sys_fs_open(%s): Invalid or unimplemented flags: %#o" HERE, path, flags);
} }
@ -307,7 +307,7 @@ error_code sys_fs_open(vm::cptr<char> path, s32 flags, vm::ptr<u32> fd, s32 mode
if (!file) if (!file)
{ {
if (test(open_mode & fs::excl) && fs::g_tls_error == fs::error::exist) if (open_mode & fs::excl && fs::g_tls_error == fs::error::exist)
{ {
return not_an_error(CELL_EEXIST); return not_an_error(CELL_EEXIST);
} }

View file

@ -95,7 +95,7 @@ error_code _sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u32 int
} }
// If interrupt thread is running, it's already established on another interrupt tag // If interrupt thread is running, it's already established on another interrupt tag
if (!test(it->state & cpu_flag::stop)) if (!(it->state & cpu_flag::stop))
{ {
error = CELL_EAGAIN; error = CELL_EAGAIN;
return result; return result;

View file

@ -105,7 +105,7 @@ static void network_clear_queue(ppu_thread& ppu)
if (sock.queue.empty()) if (sock.queue.empty())
{ {
sock.events = {}; sock.events.store({});
} }
}); });
} }
@ -180,11 +180,11 @@ extern void network_thread_init()
events += lv2_socket::poll::error; events += lv2_socket::poll::error;
#endif #endif
if (test(events)) if (events)
{ {
semaphore_lock lock(socklist[i]->mutex); semaphore_lock lock(socklist[i]->mutex);
for (auto it = socklist[i]->queue.begin(); test(events) && it != socklist[i]->queue.end();) for (auto it = socklist[i]->queue.begin(); events && it != socklist[i]->queue.end();)
{ {
if (it->second(events)) if (it->second(events))
{ {
@ -197,7 +197,7 @@ extern void network_thread_init()
if (socklist[i]->queue.empty()) if (socklist[i]->queue.empty())
{ {
socklist[i]->events = {}; socklist[i]->events.store({});
} }
} }
} }
@ -226,10 +226,10 @@ extern void network_thread_init()
#ifdef _WIN32 #ifdef _WIN32
verify(HERE), 0 == WSAEventSelect(socklist[i]->socket, _eventh, FD_READ | FD_ACCEPT | FD_CLOSE | FD_WRITE | FD_CONNECT); verify(HERE), 0 == WSAEventSelect(socklist[i]->socket, _eventh, FD_READ | FD_ACCEPT | FD_CLOSE | FD_WRITE | FD_CONNECT);
#else #else
fds[i].fd = test(events) ? socklist[i]->socket : -1; fds[i].fd = events ? socklist[i]->socket : -1;
fds[i].events = fds[i].events =
(test(events, lv2_socket::poll::read) ? POLLIN : 0) | (events & lv2_socket::poll::read ? POLLIN : 0) |
(test(events, lv2_socket::poll::write) ? POLLOUT : 0) | (events & lv2_socket::poll::write ? POLLOUT : 0) |
0; 0;
fds[i].revents = 0; fds[i].revents = 0;
#endif #endif
@ -278,7 +278,7 @@ s32 sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
{ {
semaphore_lock lock(sock.mutex); semaphore_lock lock(sock.mutex);
//if (!test(sock.events, lv2_socket::poll::read)) //if (!(sock.events & lv2_socket::poll::read))
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_ACCEPT; sock.ev_set &= ~FD_ACCEPT;
@ -302,7 +302,7 @@ s32 sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
sock.events += lv2_socket::poll::read; sock.events += lv2_socket::poll::read;
sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool
{ {
if (test(events, lv2_socket::poll::read)) if (events & lv2_socket::poll::read)
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_ACCEPT; sock.ev_set &= ~FD_ACCEPT;
@ -472,7 +472,7 @@ s32 sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
sock.events += lv2_socket::poll::write; sock.events += lv2_socket::poll::write;
sock.queue.emplace_back(u32{0}, [&sock](bs_t<lv2_socket::poll> events) -> bool sock.queue.emplace_back(u32{0}, [&sock](bs_t<lv2_socket::poll> events) -> bool
{ {
if (test(events, lv2_socket::poll::write)) if (events & lv2_socket::poll::write)
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_CONNECT; sock.ev_set &= ~FD_CONNECT;
@ -503,7 +503,7 @@ s32 sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
sock.events += lv2_socket::poll::write; sock.events += lv2_socket::poll::write;
sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool
{ {
if (test(events, lv2_socket::poll::write)) if (events & lv2_socket::poll::write)
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_CONNECT; sock.ev_set &= ~FD_CONNECT;
@ -886,7 +886,7 @@ s32 sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32 len, s3
{ {
semaphore_lock lock(sock.mutex); semaphore_lock lock(sock.mutex);
//if (!test(sock.events, lv2_socket::poll::read)) //if (!(sock.events & lv2_socket::poll::read))
{ {
#ifdef _WIN32 #ifdef _WIN32
if (!(native_flags & MSG_PEEK)) sock.ev_set &= ~FD_READ; if (!(native_flags & MSG_PEEK)) sock.ev_set &= ~FD_READ;
@ -910,7 +910,7 @@ s32 sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32 len, s3
sock.events += lv2_socket::poll::read; sock.events += lv2_socket::poll::read;
sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool
{ {
if (test(events, lv2_socket::poll::read)) if (events & lv2_socket::poll::read)
{ {
#ifdef _WIN32 #ifdef _WIN32
if (!(native_flags & MSG_PEEK)) sock.ev_set &= ~FD_READ; if (!(native_flags & MSG_PEEK)) sock.ev_set &= ~FD_READ;
@ -1039,7 +1039,7 @@ s32 sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 len, s32
{ {
semaphore_lock lock(sock.mutex); semaphore_lock lock(sock.mutex);
//if (!test(sock.events, lv2_socket::poll::write)) //if (!(sock.events & lv2_socket::poll::write))
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_WRITE; sock.ev_set &= ~FD_WRITE;
@ -1063,7 +1063,7 @@ s32 sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 len, s32
sock.events += lv2_socket::poll::write; sock.events += lv2_socket::poll::write;
sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool sock.queue.emplace_back(ppu.id, [&](bs_t<lv2_socket::poll> events) -> bool
{ {
if (test(events, lv2_socket::poll::write)) if (events & lv2_socket::poll::write)
{ {
#ifdef _WIN32 #ifdef _WIN32
sock.ev_set &= ~FD_WRITE; sock.ev_set &= ~FD_WRITE;
@ -1517,13 +1517,13 @@ s32 sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 nfds, s3
sock->events += selected; sock->events += selected;
sock->queue.emplace_back(ppu.id, [sock, selected, fds, i, &signaled, &ppu](bs_t<lv2_socket::poll> events) sock->queue.emplace_back(ppu.id, [sock, selected, fds, i, &signaled, &ppu](bs_t<lv2_socket::poll> events)
{ {
if (test(events, selected)) if (events & selected)
{ {
if (test(events, selected & lv2_socket::poll::read)) if (events & selected & lv2_socket::poll::read)
fds[i].revents |= SYS_NET_POLLIN; fds[i].revents |= SYS_NET_POLLIN;
if (test(events, selected & lv2_socket::poll::write)) if (events & selected & lv2_socket::poll::write)
fds[i].revents |= SYS_NET_POLLOUT; fds[i].revents |= SYS_NET_POLLOUT;
if (test(events, selected & lv2_socket::poll::error)) if (events & selected & lv2_socket::poll::error)
fds[i].revents |= SYS_NET_POLLERR; fds[i].revents |= SYS_NET_POLLERR;
signaled++; signaled++;
@ -1615,7 +1615,7 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
//if (exceptfds && exceptfds->bit(i)) //if (exceptfds && exceptfds->bit(i))
// selected += lv2_socket::poll::error; // selected += lv2_socket::poll::error;
if (test(selected)) if (selected)
{ {
selected += lv2_socket::poll::error; selected += lv2_socket::poll::error;
} }
@ -1628,9 +1628,9 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
{ {
#ifdef _WIN32 #ifdef _WIN32
bool sig = false; bool sig = false;
if (sock->ev_set & (FD_READ | FD_ACCEPT | FD_CLOSE) && test(selected, lv2_socket::poll::read)) if (sock->ev_set & (FD_READ | FD_ACCEPT | FD_CLOSE) && selected & lv2_socket::poll::read)
sig = true, rread.set(i); sig = true, rread.set(i);
if (sock->ev_set & (FD_WRITE | FD_CONNECT) && test(selected, lv2_socket::poll::write)) if (sock->ev_set & (FD_WRITE | FD_CONNECT) && selected & lv2_socket::poll::write)
sig = true, rwrite.set(i); sig = true, rwrite.set(i);
if (sig) if (sig)
@ -1639,9 +1639,9 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
} }
#else #else
_fds[i].fd = sock->socket; _fds[i].fd = sock->socket;
if (test(selected, lv2_socket::poll::read)) if (selected & lv2_socket::poll::read)
_fds[i].events |= POLLIN; _fds[i].events |= POLLIN;
if (test(selected, lv2_socket::poll::write)) if (selected & lv2_socket::poll::write)
_fds[i].events |= POLLOUT; _fds[i].events |= POLLOUT;
#endif #endif
} }
@ -1691,7 +1691,7 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
//if (exceptfds && exceptfds->bit(i)) //if (exceptfds && exceptfds->bit(i))
// selected += lv2_socket::poll::error; // selected += lv2_socket::poll::error;
if (test(selected)) if (selected)
{ {
selected += lv2_socket::poll::error; selected += lv2_socket::poll::error;
} }
@ -1707,13 +1707,13 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
sock->events += selected; sock->events += selected;
sock->queue.emplace_back(ppu.id, [sock, selected, i, &rread, &rwrite, &rexcept, &signaled, &ppu](bs_t<lv2_socket::poll> events) sock->queue.emplace_back(ppu.id, [sock, selected, i, &rread, &rwrite, &rexcept, &signaled, &ppu](bs_t<lv2_socket::poll> events)
{ {
if (test(events, selected)) if (events & selected)
{ {
if (test(selected, lv2_socket::poll::read) && test(events, lv2_socket::poll::read + lv2_socket::poll::error)) if (selected & lv2_socket::poll::read && events & (lv2_socket::poll::read + lv2_socket::poll::error))
rread.set(i); rread.set(i);
if (test(selected, lv2_socket::poll::write) && test(events, lv2_socket::poll::write + lv2_socket::poll::error)) if (selected & lv2_socket::poll::write && events & (lv2_socket::poll::write + lv2_socket::poll::error))
rwrite.set(i); rwrite.set(i);
//if (test(events, selected & lv2_socket::poll::error)) //if (events & (selected & lv2_socket::poll::error))
// rexcept.set(i); // rexcept.set(i);
signaled++; signaled++;

View file

@ -335,7 +335,7 @@ struct lv2_socket final
socket_type socket; socket_type socket;
// Events selected for polling // Events selected for polling
atomic_t<bs_t<poll>> events{}; atomic_bs_t<poll> events{};
// Non-blocking IO option // Non-blocking IO option
s32 so_nbio = 0; s32 so_nbio = 0;

View file

@ -114,7 +114,7 @@ namespace vm
*ptr = nullptr; *ptr = nullptr;
ptr = nullptr; ptr = nullptr;
if (test(cpu.state, cpu_flag::memory)) if (cpu.state & cpu_flag::memory)
{ {
cpu.state -= cpu_flag::memory; cpu.state -= cpu_flag::memory;
} }
@ -221,7 +221,7 @@ namespace vm
{ {
while (cpu_thread* ptr = lock) while (cpu_thread* ptr = lock)
{ {
if (test(ptr->state, cpu_flag::dbg_global_stop + cpu_flag::exit)) if (ptr->state & (cpu_flag::dbg_global_stop + cpu_flag::exit))
{ {
break; break;
} }

View file

@ -244,7 +244,7 @@ public:
// Load program headers // Load program headers
std::vector<phdr_t> _phdrs; std::vector<phdr_t> _phdrs;
if (!test(opts, elf_opt::no_programs)) if (!(opts & elf_opt::no_programs))
{ {
_phdrs.resize(header.e_phnum); _phdrs.resize(header.e_phnum);
stream.seek(offset + header.e_phoff); stream.seek(offset + header.e_phoff);
@ -252,7 +252,7 @@ public:
return set_error(elf_error::stream_phdrs); return set_error(elf_error::stream_phdrs);
} }
if (!test(opts, elf_opt::no_sections)) if (!(opts & elf_opt::no_sections))
{ {
shdrs.resize(header.e_shnum); shdrs.resize(header.e_shnum);
stream.seek(offset + header.e_shoff); stream.seek(offset + header.e_shoff);
@ -268,7 +268,7 @@ public:
static_cast<phdr_t&>(progs.back()) = hdr; static_cast<phdr_t&>(progs.back()) = hdr;
if (!test(opts, elf_opt::no_data)) if (!(opts & elf_opt::no_data))
{ {
progs.back().bin.resize(hdr.p_filesz); progs.back().bin.resize(hdr.p_filesz);
stream.seek(offset + hdr.p_offset); stream.seek(offset + hdr.p_offset);

View file

@ -112,7 +112,7 @@ debugger_frame::debugger_frame(std::shared_ptr<gui_settings> settings, QWidget *
{ {
if (m_btn_run->text() == RunString && cpu->state.test_and_reset(cpu_flag::dbg_pause)) if (m_btn_run->text() == RunString && cpu->state.test_and_reset(cpu_flag::dbg_pause))
{ {
if (!test(cpu->state, cpu_flag::dbg_pause + cpu_flag::dbg_global_pause)) if (!(cpu->state & (cpu_flag::dbg_pause + cpu_flag::dbg_global_pause)))
{ {
cpu->notify(); cpu->notify();
} }
@ -293,7 +293,7 @@ void debugger_frame::UpdateUI()
m_last_stat = static_cast<u32>(state); m_last_stat = static_cast<u32>(state);
DoUpdate(); DoUpdate();
if (test(state & cpu_flag::dbg_pause)) if (state & cpu_flag::dbg_pause)
{ {
m_btn_run->setText(RunString); m_btn_run->setText(RunString);
m_btn_step->setEnabled(true); m_btn_step->setEnabled(true);
@ -570,13 +570,13 @@ void debugger_frame::DoStep(bool stepOver)
{ {
bool should_step_over = stepOver && cpu->id_type() == 1; bool should_step_over = stepOver && cpu->id_type() == 1;
if (test(cpu_flag::dbg_pause, cpu->state.fetch_op([&](bs_t<cpu_flag>& state) if (+cpu_flag::dbg_pause & +cpu->state.fetch_op([&](bs_t<cpu_flag>& state)
{ {
if (!should_step_over) if (!should_step_over)
state += cpu_flag::dbg_step; state += cpu_flag::dbg_step;
state -= cpu_flag::dbg_pause; state -= cpu_flag::dbg_pause;
}))) }))
{ {
if (should_step_over) if (should_step_over)
{ {

View file

@ -90,7 +90,7 @@ void debugger_list::ShowAddress(u32 addr)
item(i)->setText((IsBreakpoint(m_pc) ? ">>> " : " ") + qstr(m_disasm->last_opcode)); item(i)->setText((IsBreakpoint(m_pc) ? ">>> " : " ") + qstr(m_disasm->last_opcode));
if (test(cpu->state & cpu_state_pause) && m_pc == GetPc()) if (cpu->state & cpu_state_pause && m_pc == GetPc())
{ {
item(i)->setTextColor(m_text_color_pc); item(i)->setTextColor(m_text_color_pc);
item(i)->setBackgroundColor(m_color_pc); item(i)->setBackgroundColor(m_color_pc);

View file

@ -94,7 +94,7 @@ struct gui_listener : logs::listener
void pop() void pop()
{ {
if (const auto head = read->next.exchange(nullptr)) if (const auto head = read.load()->next.exchange(nullptr))
{ {
delete read.exchange(head); delete read.exchange(head);
} }
@ -102,7 +102,7 @@ struct gui_listener : logs::listener
void clear() void clear()
{ {
while (read->next) while (read.load()->next)
{ {
pop(); pop();
} }
@ -502,7 +502,7 @@ void log_frame::UpdateUI()
} }
// Check main logs // Check main logs
while (const auto packet = s_gui_listener.read->next.load()) while (const auto packet = s_gui_listener.read.load()->next.load())
{ {
// Confirm log level // Confirm log level
if (packet->sev <= s_gui_listener.enabled) if (packet->sev <= s_gui_listener.enabled)