vk: Clean up around vkQueueSubmit handling

- Explicitly declare one version for CB flush and the other for Async flush
- Always flush descriptors on CB flush in case of page fault handling.
  Other threads other than offloader can also enter the method and require normal flow.
- Fix overlapping interrupt IDs.
- Minor formatting fixes
This commit is contained in:
kd-11 2021-09-28 20:42:38 +03:00 committed by kd-11
parent 3d49976b3c
commit dc8fc9fc79
6 changed files with 41 additions and 30 deletions

View file

@ -87,7 +87,7 @@ namespace rsx
void clear() void clear()
{ {
m_data.store(0); m_data.release(0);
} }
}; };
} }

View file

@ -23,19 +23,8 @@ namespace vk
g_submit_mutex.unlock(); g_submit_mutex.unlock();
} }
void queue_submit(VkQueue queue, const VkSubmitInfo* info, fence* pfence, VkBool32 flush) FORCE_INLINE
{ static void queue_submit_impl(VkQueue queue, const VkSubmitInfo* info, fence* pfence)
if (rsx::get_current_renderer()->is_current_thread())
{
vk::descriptors::flush();
}
if (!flush && g_cfg.video.multithreaded_rsx)
{
auto packet = new submit_packet(queue, pfence, info);
g_fxo->get<rsx::dma_manager>().backend_ctrl(rctrl_queue_submit, packet);
}
else
{ {
acquire_global_submit_lock(); acquire_global_submit_lock();
vkQueueSubmit(queue, 1, info, pfence->handle); vkQueueSubmit(queue, 1, info, pfence->handle);
@ -44,5 +33,27 @@ namespace vk
// Signal fence // Signal fence
pfence->signal_flushed(); pfence->signal_flushed();
} }
void queue_submit(VkQueue queue, const VkSubmitInfo* info, fence* pfence, VkBool32 flush)
{
// Access to this method must be externally synchronized.
// Offloader is guaranteed to never call this for async flushes.
vk::descriptors::flush();
if (!flush && g_cfg.video.multithreaded_rsx)
{
auto packet = new submit_packet(queue, pfence, info);
g_fxo->get<rsx::dma_manager>().backend_ctrl(rctrl_queue_submit, packet);
}
else
{
queue_submit_impl(queue, info, pfence);
}
}
void queue_submit(const vk::submit_packet* packet)
{
// Flush-only version used by asynchronous submit processing (MTRSX)
queue_submit_impl(packet->queue, &packet->submit_info, packet->pfence);
} }
} }

View file

@ -760,9 +760,9 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
} }
bool has_queue_ref = false; bool has_queue_ref = false;
if (!is_current_thread()) if (!is_current_thread()) [[likely]]
{ {
//Always submit primary cb to ensure state consistency (flush pending changes such as image transitions) // Always submit primary cb to ensure state consistency (flush pending changes such as image transitions)
vm::temporary_unlock(); vm::temporary_unlock();
std::lock_guard lock(m_flush_queue_mutex); std::lock_guard lock(m_flush_queue_mutex);
@ -777,13 +777,13 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
rsx_log.error("Fault in uninterruptible code!"); rsx_log.error("Fault in uninterruptible code!");
} }
//Flush primary cb queue to sync pending changes (e.g image transitions!) // Flush primary cb queue to sync pending changes (e.g image transitions!)
flush_command_queue(); flush_command_queue();
} }
if (has_queue_ref) if (has_queue_ref)
{ {
//Wait for the RSX thread to process request if it hasn't already // Wait for the RSX thread to process request if it hasn't already
m_flush_requests.producer_wait(); m_flush_requests.producer_wait();
} }
@ -791,7 +791,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
if (has_queue_ref) if (has_queue_ref)
{ {
//Release RSX thread // Release RSX thread
m_flush_requests.remove_one(); m_flush_requests.remove_one();
} }
} }
@ -2340,8 +2340,8 @@ void VKGSRender::renderctl(u32 request_code, void* args)
{ {
case vk::rctrl_queue_submit: case vk::rctrl_queue_submit:
{ {
auto packet = reinterpret_cast<vk::submit_packet*>(args); const auto packet = reinterpret_cast<vk::submit_packet*>(args);
vk::queue_submit(packet->queue, &packet->submit_info, packet->pfence, VK_TRUE); vk::queue_submit(packet);
free(packet); free(packet);
break; break;
} }

View file

@ -32,19 +32,19 @@ namespace vk
class image; class image;
class instance; class instance;
class render_device; class render_device;
struct submit_packet;
//VkAllocationCallbacks default_callbacks();
enum runtime_state enum runtime_state
{ {
uninterruptible = 1, uninterruptible = 1,
heap_dirty = 2, heap_dirty = 2,
heap_changed = 3 heap_changed = 4,
}; };
const vk::render_device *get_current_renderer(); const vk::render_device *get_current_renderer();
void set_current_renderer(const vk::render_device &device); void set_current_renderer(const vk::render_device &device);
//Compatibility workarounds // Compatibility workarounds
bool emulate_primitive_restart(rsx::primitive_type type); bool emulate_primitive_restart(rsx::primitive_type type);
bool sanitize_fp_values(); bool sanitize_fp_values();
bool fence_reset_disabled(); bool fence_reset_disabled();
@ -54,7 +54,7 @@ namespace vk
// Sync helpers around vkQueueSubmit // Sync helpers around vkQueueSubmit
void acquire_global_submit_lock(); void acquire_global_submit_lock();
void release_global_submit_lock(); void release_global_submit_lock();
void queue_submit(VkQueue queue, const VkSubmitInfo* info, fence* pfence, VkBool32 flush = VK_FALSE); void queue_submit(const vk::submit_packet* packet);
template<class T> template<class T>
T* get_compute_task(); T* get_compute_task();
@ -77,7 +77,7 @@ namespace vk
const std::vector<rsx::subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 layer_count, const std::vector<rsx::subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 layer_count,
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags); VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags);
//Other texture management helpers // Other texture management helpers
void copy_image_to_buffer(VkCommandBuffer cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, bool swap_bytes = false); void copy_image_to_buffer(VkCommandBuffer cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, bool swap_bytes = false);
void copy_buffer_to_image(VkCommandBuffer cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region); void copy_buffer_to_image(VkCommandBuffer cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region);
u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect); u64 calculate_working_buffer_size(u64 base_size, VkImageAspectFlags aspect);

View file

@ -6,7 +6,7 @@
namespace vk namespace vk
{ {
// This queue flushing method to be implemented by the backend as behavior depends on config // This queue flushing method to be implemented by the backend as behavior depends on config
void queue_submit(VkQueue queue, const VkSubmitInfo* info, fence* pfence, VkBool32 flush = VK_FALSE); void queue_submit(VkQueue queue, const VkSubmitInfo* info, fence* pfence, VkBool32 flush);
void command_pool::create(vk::render_device& dev, u32 queue_family_id) void command_pool::create(vk::render_device& dev, u32 queue_family_id)
{ {

View file

@ -39,8 +39,8 @@ namespace vk
public: public:
enum access_type_hint enum access_type_hint
{ {
flush_only, //Only to be submitted/opened/closed via command flush flush_only, // Only to be submitted/opened/closed via command flush
all //Auxiliary, can be submitted/opened/closed at any time all // Auxiliary, can be submitted/opened/closed at any time
} }
access_hint = flush_only; access_hint = flush_only;