Utils fixes for ASLR
Some checks are pending
Generate Translation Template / Generate Translation Template (push) Waiting to run
Build RPCS3 / RPCS3 Linux ubuntu-24.04 gcc (push) Waiting to run
Build RPCS3 / RPCS3 Linux ubuntu-24.04-arm clang (push) Waiting to run
Build RPCS3 / RPCS3 Linux ubuntu-24.04 clang (push) Waiting to run
Build RPCS3 / RPCS3 Windows (push) Waiting to run

This commit is contained in:
Vestral 2025-04-06 13:48:08 +09:00 committed by Megamouse
parent 63b7134dce
commit e066735fe9
5 changed files with 128 additions and 99 deletions

View file

@ -2490,7 +2490,7 @@ void thread_ctrl::wait_for(u64 usec, [[maybe_unused]] bool alert /* true */)
if (alert) if (alert)
{ {
list.set<0>(_this->m_sync, 0); list.set<0>(_this->m_sync, 0);
list.set<1>(utils::bless<atomic_t<u32>>(&_this->m_taskq)[1], 0); list.template set<1>(_this->m_taskq);
} }
else else
{ {

View file

@ -49,7 +49,7 @@ public:
if (!next) if (!next)
{ {
// Do not allow access beyond many element more at a time // Do not allow access beyond many element more at a time
ensure(!installed && index - i < N * 2); ensure(!installed && index - i < N * 2);
installed = true; installed = true;
@ -384,17 +384,26 @@ public:
template <typename T> template <typename T>
class lf_queue final class lf_queue final
{ {
atomic_t<u64> m_head{0}; public:
struct fat_ptr
lf_queue_item<T>* load(u64 value) const noexcept
{ {
return reinterpret_cast<lf_queue_item<T>*>(value >> 16); u64 ptr{};
u32 is_non_null{};
u32 reserved{};
};
private:
atomic_t<fat_ptr> m_head{fat_ptr{}};
lf_queue_item<T>* load(fat_ptr value) const noexcept
{
return reinterpret_cast<lf_queue_item<T>*>(value.ptr);
} }
// Extract all elements and reverse element order (FILO to FIFO) // Extract all elements and reverse element order (FILO to FIFO)
lf_queue_item<T>* reverse() noexcept lf_queue_item<T>* reverse() noexcept
{ {
if (auto* head = load(m_head) ? load(m_head.exchange(0)) : nullptr) if (auto* head = load(m_head) ? load(m_head.exchange(fat_ptr{})) : nullptr)
{ {
if (auto* prev = head->m_link) if (auto* prev = head->m_link)
{ {
@ -420,7 +429,7 @@ public:
lf_queue(lf_queue&& other) noexcept lf_queue(lf_queue&& other) noexcept
{ {
m_head.release(other.m_head.exchange(0)); m_head.release(other.m_head.exchange(fat_ptr{}));
} }
lf_queue& operator=(lf_queue&& other) noexcept lf_queue& operator=(lf_queue&& other) noexcept
@ -431,7 +440,7 @@ public:
} }
delete load(m_head); delete load(m_head);
m_head.release(other.m_head.exchange(0)); m_head.release(other.m_head.exchange(fat_ptr{}));
return *this; return *this;
} }
@ -442,9 +451,9 @@ public:
void wait(std::nullptr_t /*null*/ = nullptr) noexcept void wait(std::nullptr_t /*null*/ = nullptr) noexcept
{ {
if (m_head == 0) if (!operator bool())
{ {
utils::bless<atomic_t<u32>>(&m_head)[1].wait(0); utils::bless<atomic_t<u32>>(&m_head.raw().is_non_null)->wait(0);
} }
} }
@ -455,7 +464,7 @@ public:
explicit operator bool() const noexcept explicit operator bool() const noexcept
{ {
return m_head != 0; return observe() != nullptr;
} }
template <bool Notify = true, typename... Args> template <bool Notify = true, typename... Args>
@ -464,25 +473,25 @@ public:
auto oldv = m_head.load(); auto oldv = m_head.load();
auto item = new lf_queue_item<T>(load(oldv), std::forward<Args>(args)...); auto item = new lf_queue_item<T>(load(oldv), std::forward<Args>(args)...);
while (!m_head.compare_exchange(oldv, reinterpret_cast<u64>(item) << 16)) while (!m_head.compare_exchange(oldv, fat_ptr{reinterpret_cast<u64>(item), item != nullptr, 0}))
{ {
item->m_link = load(oldv); item->m_link = load(oldv);
} }
if (!oldv && Notify) if (!oldv.ptr && Notify)
{ {
// Notify only if queue was empty // Notify only if queue was empty
notify(true); notify(true);
} }
return !oldv; return !oldv.ptr;
} }
void notify(bool force = false) void notify(bool force = false)
{ {
if (force || operator bool()) if (force || operator bool())
{ {
utils::bless<atomic_t<u32>>(&m_head)[1].notify_one(); utils::bless<atomic_t<u32>>(&m_head.raw().is_non_null)->notify_one();
} }
} }
@ -498,7 +507,7 @@ public:
lf_queue_slice<T> pop_all_reversed() lf_queue_slice<T> pop_all_reversed()
{ {
lf_queue_slice<T> result; lf_queue_slice<T> result;
result.m_head = load(m_head.exchange(0)); result.m_head = load(m_head.exchange(fat_ptr{}));
return result; return result;
} }

View file

@ -57,8 +57,8 @@ static bool has_waitv()
// Total number of entries. // Total number of entries.
static constexpr usz s_hashtable_size = 1u << 17; static constexpr usz s_hashtable_size = 1u << 17;
// Reference counter combined with shifted pointer (which is assumed to be 48 bit) // Reference counter mask
static constexpr uptr s_ref_mask = 0xffff; static constexpr uptr s_ref_mask = 0xffff'ffff;
// Fix for silly on-first-use initializer // Fix for silly on-first-use initializer
static bool s_null_wait_cb(const void*, u64, u64){ return true; }; static bool s_null_wait_cb(const void*, u64, u64){ return true; };
@ -153,8 +153,16 @@ namespace
// Essentially a fat semaphore // Essentially a fat semaphore
struct alignas(64) cond_handle struct alignas(64) cond_handle
{ {
// Combined pointer (most significant 48 bits) and ref counter (16 least significant bits) struct fat_ptr
atomic_t<u64> ptr_ref; {
u64 ptr{};
u32 reserved{};
u32 ref_ctr{};
auto operator<=>(const fat_ptr& other) const = default;
};
atomic_t<fat_ptr> ptr_ref;
u64 tid; u64 tid;
u32 oldv; u32 oldv;
@ -183,7 +191,7 @@ namespace
mtx.init(mtx); mtx.init(mtx);
#endif #endif
ensure(!ptr_ref.exchange((iptr << 16) | 1)); ensure(ptr_ref.exchange(fat_ptr{iptr, 0, 1}) == fat_ptr{});
} }
void destroy() void destroy()
@ -370,7 +378,7 @@ namespace
if (cond_id) if (cond_id)
{ {
// Set fake refctr // Set fake refctr
s_cond_list[cond_id].ptr_ref.release(1); s_cond_list[cond_id].ptr_ref.release(cond_handle::fat_ptr{0, 0, 1});
cond_free(cond_id, -1); cond_free(cond_id, -1);
} }
} }
@ -390,7 +398,7 @@ static u32 cond_alloc(uptr iptr, u32 tls_slot = -1)
{ {
// Fast reinitialize // Fast reinitialize
const u32 id = std::exchange(*ptls, 0); const u32 id = std::exchange(*ptls, 0);
s_cond_list[id].ptr_ref.release((iptr << 16) | 1); s_cond_list[id].ptr_ref.release(cond_handle::fat_ptr{iptr, 0, 1});
return id; return id;
} }
@ -461,15 +469,15 @@ static void cond_free(u32 cond_id, u32 tls_slot = -1)
const auto cond = s_cond_list + cond_id; const auto cond = s_cond_list + cond_id;
// Dereference, destroy on last ref // Dereference, destroy on last ref
const bool last = cond->ptr_ref.atomic_op([](u64& val) const bool last = cond->ptr_ref.atomic_op([](cond_handle::fat_ptr& val)
{ {
ensure(val & s_ref_mask); ensure(val.ref_ctr);
val--; val.ref_ctr--;
if ((val & s_ref_mask) == 0) if (val.ref_ctr == 0)
{ {
val = 0; val = cond_handle::fat_ptr{};
return true; return true;
} }
@ -525,15 +533,15 @@ static cond_handle* cond_id_lock(u32 cond_id, uptr iptr = 0)
while (true) while (true)
{ {
const auto [old, ok] = cond->ptr_ref.fetch_op([&](u64& val) const auto [old, ok] = cond->ptr_ref.fetch_op([&](cond_handle::fat_ptr& val)
{ {
if (!val || (val & s_ref_mask) == s_ref_mask) if (val == cond_handle::fat_ptr{} || val.ref_ctr == s_ref_mask)
{ {
// Don't reference already deallocated semaphore // Don't reference already deallocated semaphore
return false; return false;
} }
if (iptr && (val >> 16) != iptr) if (iptr && val.ptr != iptr)
{ {
// Pointer mismatch // Pointer mismatch
return false; return false;
@ -548,7 +556,7 @@ static cond_handle* cond_id_lock(u32 cond_id, uptr iptr = 0)
if (!did_ref) if (!did_ref)
{ {
val++; val.ref_ctr++;
} }
return true; return true;
@ -566,7 +574,7 @@ static cond_handle* cond_id_lock(u32 cond_id, uptr iptr = 0)
return cond; return cond;
} }
if ((old & s_ref_mask) == s_ref_mask) if (old.ref_ctr == s_ref_mask)
{ {
fmt::throw_exception("Reference count limit (%u) reached in an atomic notifier.", s_ref_mask); fmt::throw_exception("Reference count limit (%u) reached in an atomic notifier.", s_ref_mask);
} }
@ -589,12 +597,14 @@ namespace
u64 maxc: 5; // Collision counter u64 maxc: 5; // Collision counter
u64 maxd: 11; // Distance counter u64 maxd: 11; // Distance counter
u64 bits: 24; // Allocated bits u64 bits: 24; // Allocated bits
u64 prio: 24; // Reserved u64 prio: 8; // Reserved
u64 ref : 16; // Ref counter u64 ref : 16; // Ref counter
u64 iptr: 48; // First pointer to use slot (to count used slots) u64 iptr: 64; // First pointer to use slot (to count used slots)
}; };
static_assert(sizeof(slot_allocator) == 16);
// Need to spare 16 bits for ref counter // Need to spare 16 bits for ref counter
static constexpr u64 max_threads = 24; static constexpr u64 max_threads = 24;
@ -935,7 +945,7 @@ atomic_wait_engine::wait(const void* data, u32 old_value, u64 timeout, atomic_wa
const auto stamp0 = utils::get_unique_tsc(); const auto stamp0 = utils::get_unique_tsc();
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 16); const uptr iptr = reinterpret_cast<uptr>(data);
uptr iptr_ext[atomic_wait::max_list - 1]{}; uptr iptr_ext[atomic_wait::max_list - 1]{};
@ -956,7 +966,7 @@ atomic_wait_engine::wait(const void* data, u32 old_value, u64 timeout, atomic_wa
} }
} }
iptr_ext[ext_size] = reinterpret_cast<uptr>(e->data) & (~s_ref_mask >> 16); iptr_ext[ext_size] = reinterpret_cast<uptr>(e->data);
ext_size++; ext_size++;
} }
} }
@ -1266,7 +1276,7 @@ void atomic_wait_engine::notify_one(const void* data)
return; return;
} }
#endif #endif
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 16); const uptr iptr = reinterpret_cast<uptr>(data);
root_info::slot_search(iptr, [&](u32 cond_id) root_info::slot_search(iptr, [&](u32 cond_id)
{ {
@ -1289,7 +1299,7 @@ atomic_wait_engine::notify_all(const void* data)
return; return;
} }
#endif #endif
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 16); const uptr iptr = reinterpret_cast<uptr>(data);
// Array count for batch notification // Array count for batch notification
u32 count = 0; u32 count = 0;

View file

@ -205,9 +205,9 @@ namespace atomic_wait
constexpr void set(lf_queue<T2>& var, std::nullptr_t = nullptr) constexpr void set(lf_queue<T2>& var, std::nullptr_t = nullptr)
{ {
static_assert(Index < Max); static_assert(Index < Max);
static_assert(sizeof(var) == sizeof(uptr)); static_assert(sizeof(var) == sizeof(uptr) * 2);
m_info[Index].data = reinterpret_cast<char*>(&var) + sizeof(u32); m_info[Index].data = reinterpret_cast<char*>(&var) + offsetof(typename lf_queue<T2>::fat_ptr, is_non_null);
m_info[Index].old = 0; m_info[Index].old = 0;
} }
@ -215,9 +215,9 @@ namespace atomic_wait
constexpr void set(stx::atomic_ptr<T2>& var, std::nullptr_t = nullptr) constexpr void set(stx::atomic_ptr<T2>& var, std::nullptr_t = nullptr)
{ {
static_assert(Index < Max); static_assert(Index < Max);
static_assert(sizeof(var) == sizeof(uptr)); static_assert(sizeof(var) == sizeof(uptr) * 2);
m_info[Index].data = reinterpret_cast<char*>(&var) + sizeof(u32); m_info[Index].data = reinterpret_cast<char*>(&var) + offsetof(typename stx::atomic_ptr<T2>::fat_ptr, is_non_null);
m_info[Index].old = 0; m_info[Index].old = 0;
} }

View file

@ -19,14 +19,8 @@ namespace stx
template <typename T> template <typename T>
class atomic_ptr; class atomic_ptr;
// Basic assumption of userspace pointer size // Use 16 bits as atomic_ptr internal counter of borrowed refs
constexpr uint c_ptr_size = 48; constexpr uint c_ref_mask = 0xffff;
// Use lower 16 bits as atomic_ptr internal counter of borrowed refs (pointer itself is shifted)
constexpr uint c_ref_mask = 0xffff, c_ref_size = 16;
// Remaining pointer bits
constexpr uptr c_ptr_mask = static_cast<uptr>(-1) << c_ref_size;
struct shared_counter struct shared_counter
{ {
@ -582,11 +576,21 @@ namespace stx
template <typename T> template <typename T>
class atomic_ptr class atomic_ptr
{ {
mutable atomic_t<uptr> m_val{0}; public:
struct fat_ptr
static shared_counter* d(uptr val) noexcept
{ {
return std::launder(reinterpret_cast<shared_counter*>((val >> c_ref_size) - sizeof(shared_counter))); uptr ptr{};
u32 is_non_null{};
u32 ref_ctr{};
};
private:
mutable atomic_t<fat_ptr> m_val{fat_ptr{}};
static shared_counter* d(fat_ptr val) noexcept
{
return std::launder(reinterpret_cast<shared_counter*>(val.ptr - sizeof(shared_counter)));
} }
shared_counter* d() const noexcept shared_counter* d() const noexcept
@ -594,14 +598,19 @@ namespace stx
return d(m_val); return d(m_val);
} }
static uptr to_val(const volatile std::remove_extent_t<T>* ptr) noexcept static fat_ptr to_val(const volatile std::remove_extent_t<T>* ptr) noexcept
{ {
return (reinterpret_cast<uptr>(ptr) << c_ref_size); return fat_ptr{reinterpret_cast<uptr>(ptr), ptr != nullptr, 0};
} }
static std::remove_extent_t<T>* ptr_to(uptr val) noexcept static fat_ptr to_val(uptr ptr) noexcept
{ {
return reinterpret_cast<std::remove_extent_t<T>*>(val >> c_ref_size); return fat_ptr{ptr, ptr != 0, 0};
}
static std::remove_extent_t<T>* ptr_to(fat_ptr val) noexcept
{
return reinterpret_cast<std::remove_extent_t<T>*>(val.ptr);
} }
template <typename U> template <typename U>
@ -644,7 +653,7 @@ namespace stx
atomic_ptr(const shared_ptr<U>& r) noexcept atomic_ptr(const shared_ptr<U>& r) noexcept
{ {
// Obtain a ref + as many refs as an atomic_ptr can additionally reference // Obtain a ref + as many refs as an atomic_ptr can additionally reference
if (uptr rval = to_val(r.m_ptr)) if (fat_ptr rval = to_val(r.m_ptr); rval.ptr != 0)
{ {
m_val.raw() = rval; m_val.raw() = rval;
d(rval)->refs += c_ref_mask + 1; d(rval)->refs += c_ref_mask + 1;
@ -654,7 +663,7 @@ namespace stx
template <typename U> requires same_ptr_implicit_v<T, U> template <typename U> requires same_ptr_implicit_v<T, U>
atomic_ptr(shared_ptr<U>&& r) noexcept atomic_ptr(shared_ptr<U>&& r) noexcept
{ {
if (uptr rval = to_val(r.m_ptr)) if (fat_ptr rval = to_val(r.m_ptr); rval.ptr != 0)
{ {
m_val.raw() = rval; m_val.raw() = rval;
d(rval)->refs += c_ref_mask; d(rval)->refs += c_ref_mask;
@ -666,7 +675,7 @@ namespace stx
template <typename U> requires same_ptr_implicit_v<T, U> template <typename U> requires same_ptr_implicit_v<T, U>
atomic_ptr(single_ptr<U>&& r) noexcept atomic_ptr(single_ptr<U>&& r) noexcept
{ {
if (uptr rval = to_val(r.m_ptr)) if (fat_ptr rval = to_val(r.m_ptr); rval.ptr != 0)
{ {
m_val.raw() = rval; m_val.raw() = rval;
d(rval)->refs += c_ref_mask; d(rval)->refs += c_ref_mask;
@ -677,13 +686,13 @@ namespace stx
~atomic_ptr() noexcept ~atomic_ptr() noexcept
{ {
const uptr v = m_val.raw(); const fat_ptr v = m_val.raw();
if (v >> c_ref_size) if (v.ptr)
{ {
const auto o = d(v); const auto o = d(v);
if (!o->refs.sub_fetch(c_ref_mask + 1 - (v & c_ref_mask))) if (!o->refs.sub_fetch(c_ref_mask + 1 - (v.ref_ctr & c_ref_mask)))
{ {
o->destroy.load()(o); o->destroy.load()(o);
} }
@ -732,11 +741,11 @@ namespace stx
shared_type r; shared_type r;
// Add reference // Add reference
const auto [prev, did_ref] = m_val.fetch_op([](uptr& val) const auto [prev, did_ref] = m_val.fetch_op([](fat_ptr& val)
{ {
if (val >> c_ref_size) if (val.ptr)
{ {
val++; val.ref_ctr++;
return true; return true;
} }
@ -754,11 +763,11 @@ namespace stx
r.d()->refs++; r.d()->refs++;
// Dereference if still the same pointer // Dereference if still the same pointer
const auto [_, did_deref] = m_val.fetch_op([prev = prev](uptr& val) const auto [_, did_deref] = m_val.fetch_op([prev = prev](fat_ptr& val)
{ {
if (val >> c_ref_size == prev >> c_ref_size) if (val.ptr == prev.ptr)
{ {
val--; val.ref_ctr--;
return true; return true;
} }
@ -781,11 +790,11 @@ namespace stx
shared_type r; shared_type r;
// Add reference // Add reference
const auto [prev, did_ref] = m_val.fetch_op([](uptr& val) const auto [prev, did_ref] = m_val.fetch_op([](fat_ptr& val)
{ {
if (val >> c_ref_size) if (val.ptr)
{ {
val++; val.ref_ctr++;
return true; return true;
} }
@ -822,11 +831,11 @@ namespace stx
} }
// Dereference if still the same pointer // Dereference if still the same pointer
const auto [_, did_deref] = m_val.fetch_op([prev = prev](uptr& val) const auto [_, did_deref] = m_val.fetch_op([prev = prev](fat_ptr& val)
{ {
if (val >> c_ref_size == prev >> c_ref_size) if (val.ptr == prev.ptr)
{ {
val--; val.ref_ctr--;
return true; return true;
} }
@ -887,7 +896,7 @@ namespace stx
atomic_ptr old; atomic_ptr old;
old.m_val.raw() = m_val.exchange(to_val(r.m_ptr)); old.m_val.raw() = m_val.exchange(to_val(r.m_ptr));
old.m_val.raw() += 1; old.m_val.raw().ref_ctr += 1;
r.m_ptr = std::launder(ptr_to(old.m_val)); r.m_ptr = std::launder(ptr_to(old.m_val));
return r; return r;
@ -903,7 +912,7 @@ namespace stx
atomic_ptr old; atomic_ptr old;
old.m_val.raw() = m_val.exchange(to_val(value.m_ptr)); old.m_val.raw() = m_val.exchange(to_val(value.m_ptr));
old.m_val.raw() += 1; old.m_val.raw().ref_ctr += 1;
value.m_ptr = std::launder(ptr_to(old.m_val)); value.m_ptr = std::launder(ptr_to(old.m_val));
return value; return value;
@ -922,21 +931,21 @@ namespace stx
atomic_ptr old; atomic_ptr old;
const uptr _val = m_val.fetch_op([&](uptr& val) const fat_ptr _val = m_val.fetch_op([&](fat_ptr& val)
{ {
if (val >> c_ref_size == _old) if (val.ptr == _old)
{ {
// Set new value // Set new value
val = _new << c_ref_size; val = to_val(_new);
} }
else if (val) else if (val.ptr != 0)
{ {
// Reference previous value // Reference previous value
val++; val.ref_ctr++;
} }
}); });
if (_val >> c_ref_size == _old) if (_val.ptr == _old)
{ {
// Success (exch is consumed, cmp_and_old is unchanged) // Success (exch is consumed, cmp_and_old is unchanged)
if (exch.m_ptr) if (exch.m_ptr)
@ -953,9 +962,10 @@ namespace stx
old_exch.m_val.raw() = to_val(std::exchange(exch.m_ptr, nullptr)); old_exch.m_val.raw() = to_val(std::exchange(exch.m_ptr, nullptr));
// Set to reset old cmp_and_old value // Set to reset old cmp_and_old value
old.m_val.raw() = to_val(cmp_and_old.m_ptr) | c_ref_mask; old.m_val.raw() = to_val(cmp_and_old.m_ptr);
old.m_val.raw().ref_ctr |= c_ref_mask;
if (!_val) if (!_val.ptr)
{ {
return false; return false;
} }
@ -965,11 +975,11 @@ namespace stx
cmp_and_old.d()->refs++; cmp_and_old.d()->refs++;
// Dereference if still the same pointer // Dereference if still the same pointer
const auto [_, did_deref] = m_val.fetch_op([_val](uptr& val) const auto [_, did_deref] = m_val.fetch_op([_val](fat_ptr& val)
{ {
if (val >> c_ref_size == _val >> c_ref_size) if (val.ptr == _val.ptr)
{ {
val--; val.ref_ctr--;
return true; return true;
} }
@ -1008,12 +1018,12 @@ namespace stx
atomic_ptr old; atomic_ptr old;
const auto [_val, ok] = m_val.fetch_op([&](uptr& val) const auto [_val, ok] = m_val.fetch_op([&](fat_ptr& val)
{ {
if (val >> c_ref_size == _old) if (val.ptr == _old)
{ {
// Set new value // Set new value
val = _new << c_ref_size; val = to_val(_new);
return true; return true;
} }
@ -1080,7 +1090,7 @@ namespace stx
if (next.m_ptr) if (next.m_ptr)
{ {
// Compensation for `next` assignment // Compensation for `next` assignment
old.m_val.raw() += 1; old.m_val.raw().ref_ctr += 1;
} }
} }
@ -1092,7 +1102,7 @@ namespace stx
explicit constexpr operator bool() const noexcept explicit constexpr operator bool() const noexcept
{ {
return m_val != 0; return observe() != nullptr;
} }
template <typename U> requires same_ptr_implicit_v<T, U> template <typename U> requires same_ptr_implicit_v<T, U>
@ -1109,17 +1119,17 @@ namespace stx
void wait(std::nullptr_t, atomic_wait_timeout timeout = atomic_wait_timeout::inf) void wait(std::nullptr_t, atomic_wait_timeout timeout = atomic_wait_timeout::inf)
{ {
utils::bless<atomic_t<u32>>(&m_val)[1].wait(0, timeout); utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->wait(0, timeout);
} }
void notify_one() void notify_one()
{ {
utils::bless<atomic_t<u32>>(&m_val)[1].notify_one(); utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->notify_one();
} }
void notify_all() void notify_all()
{ {
utils::bless<atomic_t<u32>>(&m_val)[1].notify_all(); utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->notify_all();
} }
}; };