vk: Lazy register/derigeter of hot data

This commit is contained in:
kd-11 2025-06-13 13:15:22 +03:00 committed by kd-11
parent 93e6aa6310
commit 2ae9753d79
5 changed files with 40 additions and 24 deletions

View file

@ -250,6 +250,11 @@ namespace vk
void cs_shuffle_base::set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count)
{
if (!m_program)
{
load_program(cmd);
}
ensure(use_push_constants);
vkCmdPushConstants(cmd, m_program->layout(), VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
}

View file

@ -451,6 +451,11 @@ namespace vk
void set_parameters(const vk::command_buffer& cmd)
{
if (!m_program)
{
load_program(cmd);
}
vkCmdPushConstants(cmd, m_program->layout(), VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data);
}

View file

@ -402,7 +402,8 @@ namespace vk
break;
}
bind_sets[count++] = set.m_descriptor_set.value(); // Current set pointer for binding
bind_sets[count++] = set.m_descriptor_set.value(); // Current set pointer for binding
set.m_descriptor_set.on_bind(); // Notify async queue
set.next_descriptor_set(); // Flush queue and update pointers
}

View file

@ -14,44 +14,37 @@ namespace vk
public:
inline void flush_all()
{
reader_lock lock(m_notifications_lock);
for (auto& set : m_notification_list)
{
set->flush();
}
m_notification_list.clear();
}
void register_(descriptor_set* set)
{
// Rare event, upon creation of a new set tracker.
// Check for spurious 'new' events when the aux context is taking over
for (const auto& set_ : m_notification_list)
{
if (set_ == set) return;
}
std::lock_guard lock(m_notifications_lock);
m_notification_list.push_back(set);
rsx_log.warning("[descriptor_manager::register] Now monitoring %u descriptor sets", m_notification_list.size());
// rsx_log.notice("[descriptor_manager::register] Now monitoring %u descriptor sets", m_notification_list.size());
}
void deregister(descriptor_set* set)
{
for (auto it = m_notification_list.begin(); it != m_notification_list.end(); ++it)
{
if (*it == set)
{
*it = m_notification_list.back();
m_notification_list.pop_back();
break;
}
}
std::lock_guard lock(m_notifications_lock);
rsx_log.warning("[descriptor_manager::deregister] Now monitoring %u descriptor sets", m_notification_list.size());
m_notification_list.erase_if(FN(x == set));
// rsx_log.notice("[descriptor_manager::deregister] Now monitoring %u descriptor sets", m_notification_list.size());
}
dispatch_manager() = default;
private:
rsx::simple_array<descriptor_set*> m_notification_list;
shared_mutex m_notifications_lock;
dispatch_manager(const dispatch_manager&) = delete;
dispatch_manager& operator = (const dispatch_manager&) = delete;
@ -295,11 +288,6 @@ namespace vk
m_in_use = true;
m_update_after_bind_mask = g_render_device->get_descriptor_update_after_bind_support();
if (m_update_after_bind_mask)
{
g_fxo->get<descriptors::dispatch_manager>().register_(this);
}
}
else if (m_push_type_mask & ~m_update_after_bind_mask)
{
@ -450,13 +438,29 @@ namespace vk
m_dynamic_offsets[offset.location] = offset.value;
}
void descriptor_set::bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
void descriptor_set::on_bind()
{
if (!m_push_type_mask)
{
return;
}
// We have queued writes
if ((m_push_type_mask & ~m_update_after_bind_mask) ||
(m_pending_writes.size() >= max_cache_size))
{
flush();
}
else if (m_update_after_bind_mask)
{
// Register for async flush
g_fxo->get<descriptors::dispatch_manager>().register_(this);
}
}
void descriptor_set::bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
{
on_bind();
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, ::size32(m_dynamic_offsets), m_dynamic_offsets.data());
}

View file

@ -106,6 +106,7 @@ namespace vk
void push(rsx::simple_array<VkWriteDescriptorSet>& write_cmds, u32 type_mask = umax);
void push(const descriptor_set_dynamic_offset_t& offset);
void on_bind();
void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void flush();