vk: Fix descriptor set update and caching model to support skipped updates

This commit is contained in:
kd-11 2025-06-15 22:56:32 +03:00 committed by kd-11
parent 5417d4854d
commit 15791cf94e
3 changed files with 25 additions and 33 deletions

View file

@ -364,7 +364,7 @@ namespace vk
break;
}
set.next_descriptor_set(); // Initializes the set layout and allocates first set
set.create_descriptor_set_layout();
set_layouts.push_back(set.m_descriptor_set_layout);
for (const auto& input : set.m_inputs[input_type_push_constant])
@ -402,8 +402,7 @@ namespace vk
continue;
}
bind_sets[count++] = set.m_descriptor_set.value(); // Current set pointer for binding
set.on_bind(); // Notify bind event. Internally updates handles and triggers flushing.
bind_sets[count++] = set.commit(); // Commit variable changes and return handle to the new set
}
vkCmdBindPipeline(cmd, bind_point, m_pipeline);
@ -418,10 +417,17 @@ namespace vk
return;
}
if (m_descriptor_set_layout)
{
vkDestroyDescriptorSetLayout(m_device, m_descriptor_set_layout, nullptr);
m_descriptor_pool->destroy();
}
if (m_descriptor_pool)
{
m_descriptor_pool->destroy();
m_descriptor_pool.reset();
}
m_device = VK_NULL_HANDLE;
}
@ -451,26 +457,24 @@ namespace vk
{
if (!m_descriptor_pool)
{
create_descriptor_set_layout();
create_descriptor_pool();
}
return m_descriptor_pool->allocate(m_descriptor_set_layout);
}
void descriptor_table_t::next_descriptor_set()
VkDescriptorSet descriptor_table_t::commit()
{
if (!m_descriptor_set)
{
m_descriptor_set = allocate_descriptor_set();
m_any_descriptors_dirty = true;
std::fill(m_descriptors_dirty.begin(), m_descriptors_dirty.end(), false);
return;
}
// Check if we need to actually open a new set
if (!m_any_descriptors_dirty)
{
return;
return m_descriptor_set.value();
}
auto push_descriptor_slot = [this](unsigned idx)
@ -500,6 +504,7 @@ namespace vk
m_copy_cmds.clear();
rsx::flags32_t type_mask = 0u;
m_descriptor_set = allocate_descriptor_set();
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
@ -526,8 +531,10 @@ namespace vk
}
m_descriptor_set.push(m_copy_cmds, type_mask); // Write previous state
m_descriptor_set = allocate_descriptor_set();
m_descriptor_set.on_bind();
m_any_descriptors_dirty = false;
return m_descriptor_set.value();
}
void descriptor_table_t::create_descriptor_set_layout()

View file

@ -136,13 +136,7 @@ namespace vk
void create_descriptor_pool();
VkDescriptorSet allocate_descriptor_set();
void next_descriptor_set();
inline void on_bind()
{
next_descriptor_set(); // Enqueue changes and update pointers
m_descriptor_set.on_bind(); // Notify async queue to flush any pending changes
}
VkDescriptorSet commit();
template <typename T>
inline void notify_descriptor_slot_updated(u32 slot, const T& data)

View file

@ -411,14 +411,10 @@ namespace vk
if (m_pending_copies.empty()) [[likely]]
{
m_pending_copies = std::move(copy_cmd);
return;
}
else
{
const auto old_size = m_pending_copies.size();
const auto new_size = copy_cmd.size() + old_size;
m_pending_copies.resize(new_size);
std::copy(copy_cmd.begin(), copy_cmd.end(), m_pending_copies.begin() + old_size);
}
m_pending_copies += copy_cmd;
}
void descriptor_set::push(rsx::simple_array<VkWriteDescriptorSet>& write_cmds, u32 type_mask)
@ -429,15 +425,10 @@ namespace vk
if (m_pending_writes.empty()) [[unlikely]]
{
m_pending_writes = std::move(write_cmds);
return;
}
else
#endif
{
const auto old_size = m_pending_writes.size();
const auto new_size = write_cmds.size() + old_size;
m_pending_writes.resize(new_size);
std::copy(write_cmds.begin(), write_cmds.end(), m_pending_writes.begin() + old_size);
}
m_pending_writes += write_cmds;
}
void descriptor_set::push(const descriptor_set_dynamic_offset_t& offset)