Migration to named_thread<>

Add atomic_t<>::try_dec instead of fetch_dec_sat
Add atomic_t<>::try_inc
GDBDebugServer is broken (needs rewrite)
Removed old_thread class (former named_thread)
Removed storing/rethrowing exceptions from thread
Emu.Stop doesn't inject an exception anymore
task_stack helper class removed
thread_base simplified (no shared_from_this)
thread_ctrl::spawn simplified (creates detached thread)
Implemented overrideable thread detaching logic
Disabled cellAdec, cellDmux, cellFsAio
SPUThread renamed to spu_thread
RawSPUThread removed, spu_thread used instead
Disabled deriving from ppu_thread
Partial support for thread renaming
lv2_timer... simplified, screw it
idm/fxm: butchered support for on_stop/on_init
vm: improved allocation structure (added size)
This commit is contained in:
Nekotekina 2018-10-11 01:17:19 +03:00
parent 8ca6c9fff0
commit 1b37e775be
82 changed files with 1820 additions and 2023 deletions

View file

@ -1023,7 +1023,7 @@ public:
}
// Conditionally decrement
simple_type fetch_dec_sat(simple_type greater_than = std::numeric_limits<simple_type>::min(), simple_type amount = 1)
bool try_dec(simple_type greater_than = std::numeric_limits<simple_type>::min())
{
type _new, old = atomic_storage<type>::load(m_data);
@ -1031,17 +1031,39 @@ public:
{
_new = old;
if (_new <= greater_than)
if (!(_new > greater_than))
{
// Early exit
return old;
return false;
}
_new -= amount;
_new -= 1;
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new)))
{
return old;
return true;
}
}
}
// Conditionally increment
bool try_inc(simple_type less_than = std::numeric_limits<simple_type>::max())
{
type _new, old = atomic_storage<type>::load(m_data);
while (true)
{
_new = old;
if (!(_new < less_than))
{
return false;
}
_new += 1;
if (LIKELY(atomic_storage<type>::compare_exchange(m_data, old, _new)))
{
return true;
}
}
}

View file

@ -40,7 +40,7 @@ public:
const u64 ALL_THREADS = 0xffffffffffffffff;
const u64 ANY_THREAD = 0;
class GDBDebugServer : public old_thread
class GDBDebugServer
{
socket_t server_socket;
socket_t client_socket;
@ -112,29 +112,16 @@ class GDBDebugServer : public old_thread
bool cmd_set_breakpoint(gdb_cmd& cmd);
bool cmd_remove_breakpoint(gdb_cmd& cmd);
protected:
void on_task() override final;
void on_exit() override final;
public:
bool from_breakpoint = true;
bool stop = false;
bool paused = false;
u64 pausedBy;
virtual std::string get_name() const;
virtual void on_stop() override final;
void operator()();
void pause_from(cpu_thread* t);
};
extern u32 g_gdb_debugger_id;
template <>
struct id_manager::on_stop<GDBDebugServer> {
static inline void func(GDBDebugServer* ptr)
{
if (ptr) ptr->on_stop();
}
};
#endif

View file

@ -1091,33 +1091,40 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
const auto cpu = get_current_cpu_thread();
if (rsx::g_access_violation_handler)
{
bool handled = false;
try
{
handled = rsx::g_access_violation_handler(addr, is_writing);
}
catch (std::runtime_error &e)
catch (const std::exception& e)
{
LOG_FATAL(RSX, "g_access_violation_handler(0x%x, %d): %s", addr, is_writing, e.what());
if (cpu)
{
vm::temporary_unlock(*cpu);
cpu->state += cpu_flag::dbg_pause;
cpu->test_state();
return false;
if (cpu->test_stopped())
{
std::terminate();
}
}
return false;
}
if (handled)
{
g_tls_fault_rsx++;
if (cpu)
if (cpu && cpu->test_stopped())
{
cpu->test_state();
std::terminate();
}
return true;
}
}
@ -1160,7 +1167,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
// check if address is RawSPU MMIO register
if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET)
{
auto thread = idm::get<RawSPUThread>((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET);
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
if (!thread)
{
@ -1255,9 +1262,9 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | (is_writing ? vm::page_writable : vm::page_readable)))
{
if (cpu)
if (cpu && cpu->test_stopped())
{
cpu->test_state();
std::terminate();
}
return true;
@ -1321,6 +1328,11 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
LOG_FATAL(MEMORY, "Access violation %s location 0x%x", is_writing ? "writing" : "reading", addr);
cpu->state += cpu_flag::dbg_pause;
cpu->check_state();
if (cpu->test_stopped())
{
std::terminate();
}
}
return true;
@ -1571,53 +1583,6 @@ thread_local DECLARE(thread_ctrl::g_tls_this_thread) = nullptr;
DECLARE(thread_ctrl::g_native_core_layout) { native_core_arrangement::undefined };
void thread_base::start(const std::shared_ptr<thread_base>& ctrl, task_stack task)
{
#ifdef _WIN32
using thread_result = uint;
#else
using thread_result = void*;
#endif
// Thread entry point
const native_entry entry = [](void* arg) -> thread_result
{
// Recover shared_ptr from short-circuited thread_base object pointer
std::shared_ptr<thread_base> ctrl = static_cast<thread_base*>(arg)->m_self;
try
{
ctrl->initialize();
task_stack{std::move(ctrl->m_task)}.invoke();
}
catch (...)
{
// Capture exception
ctrl->finalize(std::current_exception());
finalize();
return 0;
}
ctrl->finalize(nullptr);
finalize();
return 0;
};
ctrl->m_self = ctrl;
ctrl->m_task = std::move(task);
#ifdef _WIN32
std::uintptr_t thread = _beginthreadex(nullptr, 0, entry, ctrl.get(), 0, nullptr);
verify("thread_ctrl::start" HERE), thread != 0;
#else
pthread_t thread;
verify("thread_ctrl::start" HERE), pthread_create(&thread, nullptr, entry, ctrl.get()) == 0;
#endif
// TODO: this is unsafe and must be duplicated in thread_ctrl::initialize
ctrl->m_thread = (uintptr_t)thread;
}
void thread_base::start(native_entry entry)
{
#ifdef _WIN32
@ -1679,7 +1644,7 @@ void thread_base::initialize()
#endif
}
std::shared_ptr<thread_base> thread_base::finalize(std::exception_ptr eptr) noexcept
bool thread_base::finalize(int) noexcept
{
// Report pending errors
error_code::error_report(0, 0, 0, 0);
@ -1712,17 +1677,13 @@ std::shared_ptr<thread_base> thread_base::finalize(std::exception_ptr eptr) noex
g_tls_fault_rsx,
g_tls_fault_spu);
// Untangle circular reference, set exception
std::unique_lock lock(m_mutex);
// Possibly last reference to the thread object
std::shared_ptr<thread_base> self = std::move(m_self);
m_state = thread_state::finished;
m_exception = eptr;
// Return true if need to delete thread object
const bool result = m_state.exchange(thread_state::finished) == thread_state::detached;
// Signal waiting threads
lock.unlock(), m_jcv.notify_all();
return self;
m_mutex.lock_unlock();
m_jcv.notify_all();
return result;
}
void thread_base::finalize() noexcept
@ -1741,8 +1702,6 @@ bool thread_ctrl::_wait_for(u64 usec)
// Mutex is unlocked at the start and after the waiting
if (u32 sig = _this->m_signal.load())
{
thread_ctrl::test();
if (sig & 1)
{
_this->m_signal &= ~1;
@ -1761,11 +1720,6 @@ bool thread_ctrl::_wait_for(u64 usec)
// Double-check the value
if (u32 sig = _this->m_signal.load())
{
if (sig & 2 && _this->m_exception)
{
_this->_throw();
}
if (sig & 1)
{
_this->m_signal &= ~1;
@ -1780,20 +1734,6 @@ bool thread_ctrl::_wait_for(u64 usec)
return false;
}
[[noreturn]] void thread_base::_throw()
{
std::exception_ptr ex = std::exchange(m_exception, std::exception_ptr{});
m_signal &= ~3;
m_mutex.unlock();
std::rethrow_exception(std::move(ex));
}
void thread_base::_notify(cond_variable thread_base::* ptr)
{
m_mutex.lock_unlock();
(this->*ptr).notify_one();
}
thread_base::thread_base(std::string_view name)
: m_name(name)
{
@ -1811,22 +1751,6 @@ thread_base::~thread_base()
}
}
void thread_base::set_exception(std::exception_ptr ptr)
{
std::lock_guard lock(m_mutex);
m_exception = ptr;
if (m_exception)
{
m_signal |= 2;
m_cond.notify_one();
}
else
{
m_signal &= ~2;
}
}
void thread_base::join() const
{
if (m_state == thread_state::finished)
@ -1842,33 +1766,13 @@ void thread_base::join() const
}
}
void thread_base::detach()
{
auto self = weak_from_this().lock();
if (!self)
{
LOG_FATAL(GENERAL, "Cannot detach thread '%s'", get_name());
return;
}
if (self->m_state.compare_and_swap_test(thread_state::created, thread_state::detached))
{
std::lock_guard lock(m_mutex);
if (m_state == thread_state::detached)
{
m_self = std::move(self);
}
}
}
void thread_base::notify()
{
if (!(m_signal & 1))
{
m_signal |= 1;
_notify(&thread_base::m_cond);
m_mutex.lock_unlock();
m_cond.notify_one();
}
}
@ -1886,16 +1790,13 @@ u64 thread_base::get_cycles()
{
cycles = static_cast<u64>(thread_time.tv_sec) * 1'000'000'000 + thread_time.tv_nsec;
#endif
// Report 0 the first time this function is called
if (m_cycles == 0)
if (const u64 old_cycles = m_cycles.exchange(cycles))
{
m_cycles = cycles;
return 0;
return cycles - old_cycles;
}
const auto diff_cycles = cycles - m_cycles;
m_cycles = cycles;
return diff_cycles;
// Report 0 the first time this function is called
return 0;
}
else
{
@ -1903,23 +1804,6 @@ u64 thread_base::get_cycles()
}
}
void thread_ctrl::test()
{
const auto _this = g_tls_this_thread;
if (_this->m_signal & 2)
{
_this->m_mutex.lock();
if (_this->m_exception)
{
_this->_throw();
}
_this->m_mutex.unlock();
}
}
void thread_ctrl::detect_cpu_layout()
{
if (!g_native_core_layout.compare_and_swap_test(native_core_arrangement::undefined, native_core_arrangement::generic))
@ -2067,45 +1951,3 @@ void thread_ctrl::set_thread_affinity_mask(u16 mask)
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs);
#endif
}
old_thread::old_thread()
{
}
old_thread::~old_thread()
{
}
std::string old_thread::get_name() const
{
return fmt::format("('%s') Unnamed Thread", typeid(*this).name());
}
void old_thread::start_thread(const std::shared_ptr<void>& _this)
{
// Ensure it's not called from the constructor and the correct object is passed
verify("old_thread::start_thread" HERE), _this.get() == this;
// Run thread
thread_ctrl::spawn(m_thread, get_name(), [this, _this]()
{
try
{
LOG_TRACE(GENERAL, "Thread started");
on_spawn();
on_task();
LOG_TRACE(GENERAL, "Thread ended");
}
catch (const std::exception& e)
{
LOG_FATAL(GENERAL, "%s thrown: %s", typeid(e).name(), e.what());
Emu.Pause();
}
on_exit();
});
}
task_stack::task_base::~task_base()
{
}

View file

@ -3,7 +3,6 @@
#include "types.h"
#include "Atomic.h"
#include <exception>
#include <string>
#include <memory>
#include <string_view>
@ -38,8 +37,8 @@ enum class thread_class : u32
enum class thread_state
{
created, // Initial state
detached, // Set if the thread has been detached successfully (only possible via shared_ptr)
aborting, // Set if the thread has been joined in destructor (mutually exclusive with detached)
detached, // The thread has been detached to destroy its own named_thread object (can be dangerously misused)
aborting, // The thread has been joined in the destructor or explicitly aborted (mutually exclusive with detached)
finished // Final state, always set at the end of thread execution
};
@ -89,84 +88,15 @@ struct thread_on_abort : std::bool_constant<false> {};
template <typename T>
struct thread_on_abort<T, decltype(std::declval<named_thread<T>&>().on_abort())> : std::bool_constant<true> {};
// Detect on_cleanup() static function (should return void)
// Detect on_cleanup() static member function (should return void) (in C++20 can use destroying delete instead)
template <typename T, typename = void>
struct thread_on_cleanup : std::bool_constant<false> {};
template <typename T>
struct thread_on_cleanup<T, decltype(named_thread<T>::on_cleanup(std::declval<named_thread<T>*>()))> : std::bool_constant<true> {};
// Simple list of void() functors
class task_stack
{
struct task_base
{
std::unique_ptr<task_base> next;
virtual ~task_base();
virtual void invoke()
{
if (next)
{
next->invoke();
}
}
};
template <typename F>
struct task_type final : task_base
{
std::remove_reference_t<F> func;
task_type(F&& func)
: func(std::forward<F>(func))
{
}
void invoke() final override
{
func();
task_base::invoke();
}
};
std::unique_ptr<task_base> m_stack;
public:
task_stack() = default;
template <typename F>
task_stack(F&& func)
: m_stack(new task_type<F>(std::forward<F>(func)))
{
}
void push(task_stack stack)
{
auto _top = stack.m_stack.release();
auto _next = m_stack.release();
m_stack.reset(_top);
while (UNLIKELY(_top->next)) _top = _top->next.get();
_top->next.reset(_next);
}
void reset()
{
m_stack.reset();
}
void invoke() const
{
if (m_stack)
{
m_stack->invoke();
}
}
};
// Thread base class (TODO: remove shared_ptr, make private base)
class thread_base : public std::enable_shared_from_this<thread_base>
// Thread base class
class thread_base
{
// Native thread entry point function type
#ifdef _WIN32
@ -175,9 +105,6 @@ class thread_base : public std::enable_shared_from_this<thread_base>
using native_entry = void*(*)(void* arg);
#endif
// Self pointer for detached thread
std::shared_ptr<thread_base> m_self;
// Thread handle (platform-specific)
atomic_t<std::uintptr_t> m_thread{0};
@ -196,71 +123,41 @@ class thread_base : public std::enable_shared_from_this<thread_base>
// Thread state
atomic_t<thread_state> m_state = thread_state::created;
// Remotely set or caught exception
std::exception_ptr m_exception;
// Thread initial task
task_stack m_task;
// Thread name
lf_value<std::string> m_name;
// CPU cycles thread has run for
u64 m_cycles{0};
//
atomic_t<u64> m_cycles = 0;
// Start thread
static void start(const std::shared_ptr<thread_base>&, task_stack);
void start(native_entry);
// Called at the thread start
void initialize();
// Called at the thread end, returns moved m_self (may be null)
std::shared_ptr<thread_base> finalize(std::exception_ptr) noexcept;
// Called at the thread end, returns true if needs destruction
bool finalize(int) noexcept;
// Cleanup after possibly deleting the thread instance
static void finalize() noexcept;
// Internal throwing function. Mutex must be locked and will be unlocked.
[[noreturn]] void _throw();
// Internal notification function
void _notify(cond_variable thread_base::*);
friend class thread_ctrl;
template <class Context>
friend class named_thread;
public:
protected:
thread_base(std::string_view name);
~thread_base();
// Get thread name
const std::string& get_name() const
{
return m_name;
}
// Set thread name (not recommended)
void set_name(std::string_view name)
{
m_name.assign(name);
}
public:
// Get CPU cycles since last time this function was called. First call returns 0.
u64 get_cycles();
// Set exception
void set_exception(std::exception_ptr ptr);
// Wait for the thread (it does NOT change thread state, and can be called from multiple threads)
void join() const;
// Make thread to manage a shared_ptr of itself
void detach();
// Notify the thread
void notify();
};
@ -306,25 +203,37 @@ public:
static_cast<thread_base&>(thread).m_name.assign(name);
}
template <typename T>
static u64 get_cycles(named_thread<T>& thread)
{
return static_cast<thread_base&>(thread).get_cycles();
}
template <typename T>
static void notify(named_thread<T>& thread)
{
static_cast<thread_base&>(thread).notify();
}
// Read current state
static inline thread_state state()
{
return g_tls_this_thread->m_state;
}
// Wait once with timeout. Abortable, may throw. May spuriously return false.
// Wait once with timeout. May spuriously return false.
static inline bool wait_for(u64 usec)
{
return _wait_for(usec);
}
// Wait. Abortable, may throw.
// Wait.
static inline void wait()
{
_wait_for(-1);
}
// Wait until pred(). Abortable, may throw.
// Wait until pred().
template <typename F, typename RT = std::invoke_result_t<F>>
static inline RT wait(F&& pred)
{
@ -339,42 +248,12 @@ public:
}
}
// Wait eternally until aborted.
[[noreturn]] static inline void eternalize()
{
while (true)
{
_wait_for(-1);
}
}
// Test exception (may throw).
static void test();
// Get current thread (may be nullptr)
static thread_base* get_current()
{
return g_tls_this_thread;
}
// Create detached named thread
template <typename N, typename F>
static inline void spawn(N&& name, F&& func)
{
auto out = std::make_shared<thread_base>(std::forward<N>(name));
thread_base::start(out, std::forward<F>(func));
}
// Named thread factory
template <typename N, typename F>
static inline void spawn(std::shared_ptr<thread_base>& out, N&& name, F&& func)
{
out = std::make_shared<thread_base>(std::forward<N>(name));
thread_base::start(out, std::forward<F>(func));
}
// Detect layout
static void detect_cpu_layout();
@ -387,22 +266,17 @@ public:
// Sets the preferred affinity mask for this thread
static void set_thread_affinity_mask(u16 mask);
// Spawn a detached named thread
template <typename F>
static inline std::shared_ptr<named_thread<F>> make_shared(std::string_view name, F&& lambda)
static void spawn(std::string_view name, F&& func)
{
return std::make_shared<named_thread<F>>(name, std::forward<F>(lambda));
}
template <typename T, typename... Args>
static inline std::shared_ptr<named_thread<T>> make_shared(std::string_view name, Args&&... args)
{
return std::make_shared<named_thread<T>>(name, std::forward<Args>(args)...);
new named_thread<F>(thread_state::detached, name, std::forward<F>(func));
}
};
// Derived from the callable object Context, possibly a lambda
template <class Context>
class named_thread final : public Context, result_storage_t<Context>, public thread_base
class named_thread final : public Context, result_storage_t<Context>, thread_base
{
using result = result_storage_t<Context>;
using thread = thread_base;
@ -414,7 +288,22 @@ class named_thread final : public Context, result_storage_t<Context>, public thr
static inline void* entry_point(void* arg) try
#endif
{
const auto maybe_last_ptr = static_cast<named_thread*>(static_cast<thread*>(arg))->entry_point();
const auto _this = static_cast<named_thread*>(static_cast<thread*>(arg));
// Perform self-cleanup if necessary
if (_this->entry_point())
{
// Call on_cleanup() static member function if it's available
if constexpr (thread_on_cleanup<Context>())
{
Context::on_cleanup(_this);
}
else
{
delete _this;
}
}
thread::finalize();
return 0;
}
@ -423,7 +312,7 @@ class named_thread final : public Context, result_storage_t<Context>, public thr
catch_all_exceptions();
}
std::shared_ptr<thread> entry_point()
bool entry_point()
{
thread::initialize();
@ -438,7 +327,16 @@ class named_thread final : public Context, result_storage_t<Context>, public thr
new (result::get()) typename result::type(Context::operator()());
}
return thread::finalize(nullptr);
return thread::finalize(0);
}
// Detached thread constructor
named_thread(thread_state s, std::string_view name, Context&& f)
: Context(std::forward<Context>(f))
, thread(name)
{
thread::m_state.raw() = s;
thread::start(&named_thread::entry_point);
}
friend class thread_ctrl;
@ -493,21 +391,23 @@ public:
return thread::m_state.load();
}
// Try to set thread_state::aborting
// Try to abort/detach
named_thread& operator=(thread_state s)
{
if (s != thread_state::aborting)
if (s != thread_state::aborting && s != thread_state::detached)
{
ASSUME(0);
}
// Notify thread if not detached or terminated
if (thread::m_state.compare_and_swap_test(thread_state::created, thread_state::aborting))
if (thread::m_state.compare_and_swap_test(thread_state::created, s))
{
// Call on_abort() method if it's available
if constexpr (thread_on_abort<Context>())
if (s == thread_state::aborting)
{
Context::on_abort();
// Call on_abort() method if it's available
if constexpr (thread_on_abort<Context>())
{
Context::on_abort();
}
}
thread::notify();
@ -528,63 +428,3 @@ public:
}
}
};
// Old named_thread
class old_thread
{
// Pointer to managed resource (shared with actual thread)
std::shared_ptr<thread_base> m_thread;
public:
old_thread();
virtual ~old_thread();
old_thread(const old_thread&) = delete;
old_thread& operator=(const old_thread&) = delete;
// Get thread name
virtual std::string get_name() const;
protected:
// Start thread (cannot be called from the constructor: should throw in such case)
void start_thread(const std::shared_ptr<void>& _this);
// Thread task (called in the thread)
virtual void on_task() = 0;
// Thread finalization (called after on_task)
virtual void on_exit() {}
// Called once upon thread spawn within the thread's own context
virtual void on_spawn() {}
public:
// ID initialization
virtual void on_init(const std::shared_ptr<void>& _this)
{
return start_thread(_this);
}
// ID finalization
virtual void on_stop()
{
m_thread->join();
}
thread_base* get() const
{
return m_thread.get();
}
void join() const
{
return m_thread->join();
}
void notify() const
{
return m_thread->notify();
}
};

View file

@ -21,7 +21,7 @@ bool cond_variable::imp_wait(u32 _old, u64 _timeout) noexcept
verify(HERE), rc == WAIT_TIMEOUT;
// Retire
while (!m_value.fetch_dec_sat())
while (!m_value.try_dec())
{
timeout.QuadPart = 0;

View file

@ -34,7 +34,7 @@ protected:
bool try_wait()
{
return m_value.fetch_dec_sat(0) > 0;
return m_value.try_dec(0);
}
void post(s32 _max)

View file

@ -774,10 +774,8 @@ namespace utils
// If max_count > 1 only id_new is supported
static_assert(std::is_same_v<id_tag, id_new_t> && !std::is_const_v<std::remove_reference_t<Type>>);
// Try to acquire the semaphore (conditional increment)
const uint old_sema = head->m_sema.load();
if (UNLIKELY(old_sema > last || !head->m_sema.compare_and_swap_test(old_sema, old_sema + 1)))
// Try to acquire the semaphore
if (UNLIKELY(!head->m_sema.try_inc(last + 1)))
{
block = nullptr;
}
@ -1225,7 +1223,7 @@ namespace utils
template <typename Type>
std::shared_lock<::notifier> get_free_notifier() const
{
return std::shared_lock{get_head<Type>()->m_free_notifier};
return std::shared_lock(get_head<Type>()->m_free_notifier, std::try_to_lock);
}
};
} // namespace utils