Atomic utils fixup after ASLR changes (#17168)

This commit is contained in:
Vestral 2025-05-06 03:48:07 +09:00 committed by GitHub
parent 74fa59590a
commit 3cebfaa648
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 23 additions and 17 deletions

View file

@ -384,7 +384,7 @@ public:
template <typename T> template <typename T>
class lf_queue final class lf_queue final
{ {
public: private:
struct fat_ptr struct fat_ptr
{ {
u64 ptr{}; u64 ptr{};
@ -392,7 +392,6 @@ public:
u32 reserved{}; u32 reserved{};
}; };
private:
atomic_t<fat_ptr> m_head{fat_ptr{}}; atomic_t<fat_ptr> m_head{fat_ptr{}};
lf_queue_item<T>* load(fat_ptr value) const noexcept lf_queue_item<T>* load(fat_ptr value) const noexcept
@ -439,8 +438,7 @@ public:
return *this; return *this;
} }
delete load(m_head); delete load(m_head.exchange(other.m_head.exchange(fat_ptr{})));
m_head.release(other.m_head.exchange(fat_ptr{}));
return *this; return *this;
} }
@ -453,10 +451,15 @@ public:
{ {
if (!operator bool()) if (!operator bool())
{ {
utils::bless<atomic_t<u32>>(&m_head.raw().is_non_null)->wait(0); get_wait_atomic().wait(0);
} }
} }
atomic_t<u32> &get_wait_atomic()
{
return *utils::bless<atomic_t<u32>>(&m_head.raw().is_non_null);
}
const volatile void* observe() const noexcept const volatile void* observe() const noexcept
{ {
return load(m_head); return load(m_head);
@ -491,7 +494,7 @@ public:
{ {
if (force || operator bool()) if (force || operator bool())
{ {
utils::bless<atomic_t<u32>>(&m_head.raw().is_non_null)->notify_one(); get_wait_atomic().notify_one();
} }
} }

View file

@ -7376,7 +7376,7 @@ struct spu_llvm_worker
set_relax_flag = false; set_relax_flag = false;
} }
thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); thread_ctrl::wait_on(registered.get_wait_atomic(), 0);
slice = registered.pop_all(); slice = registered.pop_all();
}()) }())
{ {
@ -7491,7 +7491,7 @@ struct spu_llvm
while (!registered && thread_ctrl::state() != thread_state::aborting) while (!registered && thread_ctrl::state() != thread_state::aborting)
{ {
// Wait for the first SPU block before launching any thread // Wait for the first SPU block before launching any thread
thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); thread_ctrl::wait_on(registered.get_wait_atomic(), 0);
} }
if (thread_ctrl::state() == thread_state::aborting) if (thread_ctrl::state() == thread_state::aborting)
@ -7594,7 +7594,7 @@ struct spu_llvm
// Interrupt profiler thread and put it to sleep // Interrupt profiler thread and put it to sleep
static_cast<void>(prof_mutex.reset()); static_cast<void>(prof_mutex.reset());
thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); thread_ctrl::wait_on(registered.get_wait_atomic(), 0);
std::fill(notify_compile.begin(), notify_compile.end(), 0); // Reset notification flags std::fill(notify_compile.begin(), notify_compile.end(), 0); // Reset notification flags
notify_compile_count = 0; notify_compile_count = 0;
compile_pending = 0; compile_pending = 0;

View file

@ -207,7 +207,7 @@ namespace atomic_wait
static_assert(Index < Max); static_assert(Index < Max);
static_assert(sizeof(var) == sizeof(uptr) * 2); static_assert(sizeof(var) == sizeof(uptr) * 2);
m_info[Index].data = reinterpret_cast<char*>(&var) + offsetof(typename lf_queue<T2>::fat_ptr, is_non_null); m_info[Index].data = std::bit_cast<char*>(&var.get_wait_atomic().raw());
m_info[Index].old = 0; m_info[Index].old = 0;
} }
@ -217,7 +217,7 @@ namespace atomic_wait
static_assert(Index < Max); static_assert(Index < Max);
static_assert(sizeof(var) == sizeof(uptr) * 2); static_assert(sizeof(var) == sizeof(uptr) * 2);
m_info[Index].data = reinterpret_cast<char*>(&var) + offsetof(typename stx::atomic_ptr<T2>::fat_ptr, is_non_null); m_info[Index].data = std::bit_cast<char*>(&var.get_wait_atomic().raw());
m_info[Index].old = 0; m_info[Index].old = 0;
} }

View file

@ -576,7 +576,7 @@ namespace stx
template <typename T> template <typename T>
class atomic_ptr class atomic_ptr
{ {
public: private:
struct fat_ptr struct fat_ptr
{ {
uptr ptr{}; uptr ptr{};
@ -584,8 +584,6 @@ namespace stx
u32 ref_ctr{}; u32 ref_ctr{};
}; };
private:
mutable atomic_t<fat_ptr> m_val{fat_ptr{}}; mutable atomic_t<fat_ptr> m_val{fat_ptr{}};
static shared_counter* d(fat_ptr val) noexcept static shared_counter* d(fat_ptr val) noexcept
@ -1117,19 +1115,24 @@ namespace stx
return static_cast<volatile const void*>(observe()) == r.get(); return static_cast<volatile const void*>(observe()) == r.get();
} }
atomic_t<u32> &get_wait_atomic()
{
return *utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null);
}
void wait(std::nullptr_t, atomic_wait_timeout timeout = atomic_wait_timeout::inf) void wait(std::nullptr_t, atomic_wait_timeout timeout = atomic_wait_timeout::inf)
{ {
utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->wait(0, timeout); get_wait_atomic().wait(0, timeout);
} }
void notify_one() void notify_one()
{ {
utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->notify_one(); get_wait_atomic().notify_one();
} }
void notify_all() void notify_all()
{ {
utils::bless<atomic_t<u32>>(&m_val.raw().is_non_null)->notify_all(); get_wait_atomic().notify_all();
} }
}; };