Skip to content

Commit

Permalink
layers: Rename LastBound::PER_SET
Browse files Browse the repository at this point in the history
  • Loading branch information
spencer-lunarg committed Nov 25, 2024
1 parent 75ffbf4 commit c82950c
Show file tree
Hide file tree
Showing 10 changed files with 137 additions and 136 deletions.
6 changes: 3 additions & 3 deletions layers/best_practices/bp_drawdispatch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -594,9 +594,9 @@ void BestPractices::ValidateBoundDescriptorSets(bp_state::CommandBuffer& cb_stat
auto lvl_bind_point = ConvertToLvlBindPoint(bind_point);
auto& last_bound = cb_state.lastBound[lvl_bind_point];

for (const auto& descriptor_set : last_bound.per_set) {
if (!descriptor_set.bound_descriptor_set) continue;
for (const auto& binding : *descriptor_set.bound_descriptor_set) {
for (const auto& ds_slot : last_bound.ds_slots) {
if (!ds_slot.ds_state) continue;
for (const auto& binding : *ds_slot.ds_state) {
// For bindless scenarios, we should not attempt to track descriptor set state.
// It is highly uncertain which resources are actually bound.
// Resources which are written to such a descriptor should be marked as indeterminate w.r.t. state.
Expand Down
58 changes: 29 additions & 29 deletions layers/core_checks/cc_drawdispatch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1305,26 +1305,26 @@ bool CoreChecks::ValidateActionStateDescriptorsPipeline(const LastBound &last_bo
bool skip = false;
const vvl::CommandBuffer &cb_state = last_bound_state.cb_state;

for (const auto &ds : last_bound_state.per_set) {
for (const auto &ds_slot : last_bound_state.ds_slots) {
// TODO - This currently implicitly is checking for VK_PIPELINE_CREATE_DESCRIPTOR_BUFFER_BIT_EXT being set
if (pipeline.descriptor_buffer_mode) {
if (ds.bound_descriptor_set && !ds.bound_descriptor_set->IsPushDescriptor()) {
const LogObjectList objlist(cb_state.Handle(), pipeline.Handle(), ds.bound_descriptor_set->Handle());
if (ds_slot.ds_state && !ds_slot.ds_state->IsPushDescriptor()) {
const LogObjectList objlist(cb_state.Handle(), pipeline.Handle(), ds_slot.ds_state->Handle());
skip |= LogError(vuid.descriptor_buffer_bit_not_set_08115, objlist, vuid.loc(),
"pipeline bound to %s requires a descriptor buffer (because it was created with "
"VK_PIPELINE_CREATE_DESCRIPTOR_BUFFER_BIT_EXT), but has a bound VkDescriptorSet (%s)",
string_VkPipelineBindPoint(bind_point), FormatHandle(ds.bound_descriptor_set->Handle()).c_str());
string_VkPipelineBindPoint(bind_point), FormatHandle(ds_slot.ds_state->Handle()).c_str());
break;
}

} else if (ds.bound_descriptor_buffer.has_value()) {
} else if (ds_slot.descriptor_buffer_binding.has_value()) {
const LogObjectList objlist(cb_state.Handle(), pipeline.Handle());
skip |= LogError(vuid.descriptor_buffer_set_offset_missing_08117, objlist, vuid.loc(),
"pipeline bound to %s requires a VkDescriptorSet (because it was not created with "
"VK_PIPELINE_CREATE_DESCRIPTOR_BUFFER_BIT_EXT), but has a bound descriptor buffer"
" (index=%" PRIu32 " offset=%" PRIu64 ")",
string_VkPipelineBindPoint(bind_point), ds.bound_descriptor_buffer->index,
ds.bound_descriptor_buffer->offset);
string_VkPipelineBindPoint(bind_point), ds_slot.descriptor_buffer_binding->index,
ds_slot.descriptor_buffer_binding->offset);
break;
}
}
Expand Down Expand Up @@ -1362,16 +1362,16 @@ bool CoreChecks::ValidateActionStateDescriptorsPipeline(const LastBound &last_bo
for (const auto &set_binding_pair : pipeline.active_slots) {
std::string error_string;
uint32_t set_index = set_binding_pair.first;
const auto set_info = last_bound_state.per_set[set_index];
if (!set_info.bound_descriptor_set) {
const auto ds_slot = last_bound_state.ds_slots[set_index];
if (!ds_slot.ds_state) {
skip |= LogError(vuid.compatible_pipeline_08600, cb_state.GetObjectList(bind_point), vuid.loc(),
"%s uses set #%" PRIu32
" but that set is not bound. (Need to use a command like vkCmdBindDescriptorSets to bind the set)",
FormatHandle(pipeline).c_str(), set_index);
} else if (!VerifySetLayoutCompatibility(*set_info.bound_descriptor_set, pipeline_layout->set_layouts,
pipeline_layout->Handle(), set_index, error_string)) {
} else if (!VerifySetLayoutCompatibility(*ds_slot.ds_state, pipeline_layout->set_layouts, pipeline_layout->Handle(),
set_index, error_string)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet set_handle = set_info.bound_descriptor_set->VkHandle();
VkDescriptorSet set_handle = ds_slot.ds_state->VkHandle();
LogObjectList objlist = cb_state.GetObjectList(bind_point);
objlist.add(set_handle);
objlist.add(pipeline_layout->Handle());
Expand All @@ -1381,7 +1381,7 @@ bool CoreChecks::ValidateActionStateDescriptorsPipeline(const LastBound &last_bo
error_string.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const auto *descriptor_set = set_info.bound_descriptor_set.get();
const auto *descriptor_set = ds_slot.ds_state.get();
ASSERT_AND_CONTINUE(descriptor_set);
// Validate the draw-time state for this descriptor set
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
Expand All @@ -1391,15 +1391,15 @@ bool CoreChecks::ValidateActionStateDescriptorsPipeline(const LastBound &last_bo
// binding_req_map which could potentially be expensive.
bool need_validate =
// Revalidate each time if the set has dynamic offsets
set_info.dynamicOffsets.size() > 0 ||
ds_slot.dynamic_offsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
set_info.validated_set != descriptor_set ||
set_info.validated_set_change_count != descriptor_set->GetChangeCount() ||
ds_slot.validated_set != descriptor_set ||
ds_slot.validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
set_info.validated_set_image_layout_change_count != cb_state.image_layout_change_count);
ds_slot.validated_set_image_layout_change_count != cb_state.image_layout_change_count);

if (need_validate) {
skip |= ValidateDrawState(*descriptor_set, set_index, set_binding_pair.second, set_info.dynamicOffsets,
skip |= ValidateDrawState(*descriptor_set, set_index, set_binding_pair.second, ds_slot.dynamic_offsets,
cb_state, vuid.loc(), vuid);
}
}
Expand Down Expand Up @@ -1430,40 +1430,40 @@ bool CoreChecks::ValidateActionStateDescriptorsShaderObject(const LastBound &las
for (const auto &set_binding_pair : shader_state->active_slots) {
std::string error_string;
uint32_t set_index = set_binding_pair.first;
const auto set_info = last_bound_state.per_set[set_index];
if (!set_info.bound_descriptor_set) {
const auto ds_slot = last_bound_state.ds_slots[set_index];
if (!ds_slot.ds_state) {
const LogObjectList objlist(cb_state.Handle(), shader_state->Handle());
skip |= LogError(vuid.compatible_pipeline_08600, objlist, vuid.loc(),
"%s uses set #%" PRIu32 " but that set is not bound.",
FormatHandle(shader_state->Handle()).c_str(), set_index);
} else if (!VerifySetLayoutCompatibility(*set_info.bound_descriptor_set, shader_state->set_layouts,
shader_state->Handle(), set_index, error_string)) {
} else if (!VerifySetLayoutCompatibility(*ds_slot.ds_state, shader_state->set_layouts, shader_state->Handle(),
set_index, error_string)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet set_handle = set_info.bound_descriptor_set->VkHandle();
VkDescriptorSet set_handle = ds_slot.ds_state->VkHandle();
const LogObjectList objlist(cb_state.Handle(), set_handle, shader_state->Handle());
skip |= LogError(vuid.compatible_pipeline_08600, objlist, vuid.loc(),
"%s bound as set #%" PRIu32 " is not compatible with overlapping %s due to: %s",
FormatHandle(set_handle).c_str(), set_index, FormatHandle(shader_state->Handle()).c_str(),
error_string.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const auto *descriptor_set = set_info.bound_descriptor_set.get();
const auto *descriptor_set = ds_slot.ds_state.get();
ASSERT_AND_CONTINUE(descriptor_set);
// Validate the draw-time state for this descriptor set
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values.
bool need_validate =
// Revalidate each time if the set has dynamic offsets
set_info.dynamicOffsets.size() > 0 ||
ds_slot.dynamic_offsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
set_info.validated_set != descriptor_set ||
set_info.validated_set_change_count != descriptor_set->GetChangeCount() ||
ds_slot.validated_set != descriptor_set ||
ds_slot.validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
set_info.validated_set_image_layout_change_count != cb_state.image_layout_change_count);
ds_slot.validated_set_image_layout_change_count != cb_state.image_layout_change_count);

if (need_validate) {
skip |= ValidateDrawState(*descriptor_set, set_index, set_binding_pair.second, set_info.dynamicOffsets,
skip |= ValidateDrawState(*descriptor_set, set_index, set_binding_pair.second, ds_slot.dynamic_offsets,
cb_state, vuid.loc(), vuid);
}
}
Expand Down
8 changes: 4 additions & 4 deletions layers/gpu/cmd_validation/gpuav_cmd_validation_common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,15 @@ void RestorablePipelineState::Create(vvl::CommandBuffer &cb_state, VkPipelineBin

push_constants_data_ = cb_state.push_constant_data_chunks;

descriptor_sets_.reserve(last_bound.per_set.size());
for (std::size_t set_i = 0; set_i < last_bound.per_set.size(); set_i++) {
const auto &bound_descriptor_set = last_bound.per_set[set_i].bound_descriptor_set;
descriptor_sets_.reserve(last_bound.ds_slots.size());
for (std::size_t set_i = 0; set_i < last_bound.ds_slots.size(); set_i++) {
const auto &bound_descriptor_set = last_bound.ds_slots[set_i].ds_state;
if (bound_descriptor_set) {
descriptor_sets_.emplace_back(bound_descriptor_set->VkHandle(), static_cast<uint32_t>(set_i));
if (bound_descriptor_set->IsPushDescriptor()) {
push_descriptor_set_index_ = static_cast<uint32_t>(set_i);
}
dynamic_offsets_.push_back(last_bound.per_set[set_i].dynamicOffsets);
dynamic_offsets_.push_back(last_bound.ds_slots[set_i].dynamic_offsets);
}
}

Expand Down
29 changes: 14 additions & 15 deletions layers/gpu/descriptor_validation/gpuav_descriptor_validation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@ void PreCallActionCommandPostProcess(Validator &gpuav, CommandBuffer &cb_state,
const uint32_t descriptor_command_binding_index = (uint32_t)cb_state.descriptor_command_bindings.size() - 1;
auto &action_command_snapshot = cb_state.action_command_snapshots.emplace_back(descriptor_command_binding_index);

const size_t number_of_sets = last_bound.per_set.size();
const size_t number_of_sets = last_bound.ds_slots.size();
action_command_snapshot.binding_req_maps.reserve(number_of_sets);

for (uint32_t i = 0; i < number_of_sets; i++) {
if (!last_bound.per_set[i].bound_descriptor_set) {
if (!last_bound.ds_slots[i].ds_state) {
continue; // can have gaps in descriptor sets
}

Expand Down Expand Up @@ -89,14 +89,14 @@ void UpdateBoundDescriptorsPostProcess(Validator &gpuav, CommandBuffer &cb_state

cb_state.post_process_buffer = descriptor_command_binding.post_process_ssbo_block.Buffer();

const size_t number_of_sets = last_bound.per_set.size();
const size_t number_of_sets = last_bound.ds_slots.size();
for (uint32_t i = 0; i < number_of_sets; i++) {
const auto &last_bound_set = last_bound.per_set[i];
if (!last_bound_set.bound_descriptor_set) {
const auto &ds_slot = last_bound.ds_slots[i];
if (!ds_slot.ds_state) {
continue; // can have gaps in descriptor sets
}

auto bound_descriptor_set = static_cast<DescriptorSet *>(last_bound_set.bound_descriptor_set.get());
auto bound_descriptor_set = static_cast<DescriptorSet *>(ds_slot.ds_state.get());
ssbo_block_ptr->descriptor_index_post_process_buffers[i] = bound_descriptor_set->GetPostProcessBuffer(gpuav, loc);
}

Expand All @@ -123,14 +123,14 @@ void UpdateBoundDescriptorsDescriptorChecks(Validator &gpuav, CommandBuffer &cb_

ssbo_block_ptr->initialized_status = gpuav.desc_heap_->GetDeviceAddress();

const size_t number_of_sets = last_bound.per_set.size();
const size_t number_of_sets = last_bound.ds_slots.size();
for (uint32_t i = 0; i < number_of_sets; i++) {
const auto &last_bound_set = last_bound.per_set[i];
if (!last_bound_set.bound_descriptor_set) {
const auto &ds_slot = last_bound.ds_slots[i];
if (!ds_slot.ds_state) {
continue; // can have gaps in descriptor sets
}

auto bound_descriptor_set = static_cast<DescriptorSet *>(last_bound_set.bound_descriptor_set.get());
auto bound_descriptor_set = static_cast<DescriptorSet *>(ds_slot.ds_state.get());
// If update after bind, wait until we process things in UpdateDescriptorStateSSBO()
if (!bound_descriptor_set->IsUpdateAfterBind()) {
ssbo_block_ptr->descriptor_set_types[i] = bound_descriptor_set->GetTypeAddress(gpuav, loc);
Expand All @@ -150,7 +150,7 @@ void UpdateBoundDescriptors(Validator &gpuav, CommandBuffer &cb_state, VkPipelin
const auto lv_bind_point = ConvertToLvlBindPoint(pipeline_bind_point);
auto const &last_bound = cb_state.lastBound[lv_bind_point];

const size_t number_of_sets = last_bound.per_set.size();
const size_t number_of_sets = last_bound.ds_slots.size();
if (number_of_sets == 0) {
return; // empty bind
} else if (number_of_sets > glsl::kDebugInputBindlessMaxDescSets) {
Expand All @@ -163,12 +163,11 @@ void UpdateBoundDescriptors(Validator &gpuav, CommandBuffer &cb_state, VkPipelin
// Currently we loop through the sets multiple times to reduce complexity and seperate the various parts, can revisit if we find
// this is actually a perf bottleneck (assume number of sets are low as people we will then to have a single large set)
for (uint32_t i = 0; i < number_of_sets; i++) {
const auto &last_bound_set = last_bound.per_set[i];
if (!last_bound_set.bound_descriptor_set) {
const auto &ds_slot = last_bound.ds_slots[i];
if (!ds_slot.ds_state) {
continue; // can have gaps in descriptor sets
}
std::shared_ptr<DescriptorSet> bound_descriptor_set =
std::static_pointer_cast<DescriptorSet>(last_bound_set.bound_descriptor_set);
std::shared_ptr<DescriptorSet> bound_descriptor_set = std::static_pointer_cast<DescriptorSet>(ds_slot.ds_state);
descriptor_command_binding.bound_descriptor_sets.emplace_back(std::move(bound_descriptor_set));
}

Expand Down
6 changes: 3 additions & 3 deletions layers/gpu/instrumentation/gpuav_instrumentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -487,13 +487,13 @@ void PostCallSetupShaderInstrumentationResources(Validator &gpuav, CommandBuffer

for (uint32_t set_i = 0; set_i < disturbed_bindings_count; ++set_i) {
const uint32_t last_bound_set_i = set_i + first_disturbed_set;
const auto &last_bound_set_state = last_bound.per_set[last_bound_set_i].bound_descriptor_set;
// last_bound.per_set is a LUT, and descriptor sets before the last one could be unbound.
const auto &last_bound_set_state = last_bound.ds_slots[last_bound_set_i].ds_state;
// last_bound.ds_slot is a LUT, and descriptor sets before the last one could be unbound.
if (!last_bound_set_state) {
continue;
}
VkDescriptorSet last_bound_set = last_bound_set_state->VkHandle();
const std::vector<uint32_t> &dynamic_offset = last_bound.per_set[last_bound_set_i].dynamicOffsets;
const std::vector<uint32_t> &dynamic_offset = last_bound.ds_slots[last_bound_set_i].dynamic_offsets;
const uint32_t dynamic_offset_count = static_cast<uint32_t>(dynamic_offset.size());
DispatchCmdBindDescriptorSets(cb_state.VkHandle(), bind_point,
last_bound_desc_set_pipe_layout_state->VkHandle(), last_bound_set_i, 1,
Expand Down
Loading

0 comments on commit c82950c

Please sign in to comment.