Skip to content

Commit

Permalink
Revert "renderer_vulkan: Cleanup and improve barriers in caches (shad…
Browse files Browse the repository at this point in the history
…ps4-emu#1865)"

This reverts commit 77d2172.
  • Loading branch information
Xcedf committed Jan 3, 2025
1 parent 2252970 commit 3e4bfe9
Show file tree
Hide file tree
Showing 5 changed files with 76 additions and 190 deletions.
147 changes: 30 additions & 117 deletions src/video_core/buffer_cache/buffer_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,16 +259,7 @@ void BufferCache::InlineData(VAddr address, const void* value, u32 num_bytes, bo
const BufferId buffer_id = FindBuffer(address, num_bytes);
return &slot_buffers[buffer_id];
}();
const vk::BufferMemoryBarrier2 pre_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eMemoryRead,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.buffer = buffer->Handle(),
.offset = buffer->Offset(address),
.size = num_bytes,
};
const vk::BufferMemoryBarrier2 post_barrier = {
const vk::BufferMemoryBarrier2 buf_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
Expand All @@ -280,14 +271,9 @@ void BufferCache::InlineData(VAddr address, const void* value, u32 num_bytes, bo
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &pre_barrier,
});
cmdbuf.updateBuffer(buffer->Handle(), buffer->Offset(address), num_bytes, value);
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &post_barrier,
.pBufferMemoryBarriers = &buf_barrier,
});
cmdbuf.updateBuffer(buffer->Handle(), buf_barrier.offset, num_bytes, value);
}

std::pair<Buffer*, u32> BufferCache::ObtainHostUBO(std::span<const u32> data) {
Expand Down Expand Up @@ -479,48 +465,21 @@ void BufferCache::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id,
};
scheduler.EndRendering();
const auto cmdbuf = scheduler.CommandBuffer();
const std::array pre_barriers = {
vk::BufferMemoryBarrier2{
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eMemoryRead | vk::AccessFlagBits2::eMemoryWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.buffer = overlap.Handle(),
.offset = 0,
.size = overlap.SizeBytes(),
},
static constexpr vk::MemoryBarrier READ_BARRIER{
.srcAccessMask = vk::AccessFlagBits::eMemoryWrite,
.dstAccessMask = vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite,
};
const std::array post_barriers = {
vk::BufferMemoryBarrier2{
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferRead,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.dstAccessMask = vk::AccessFlagBits2::eMemoryWrite,
.buffer = overlap.Handle(),
.offset = 0,
.size = overlap.SizeBytes(),
},
vk::BufferMemoryBarrier2{
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.dstAccessMask = vk::AccessFlagBits2::eMemoryRead | vk::AccessFlagBits2::eMemoryWrite,
.buffer = new_buffer.Handle(),
.offset = dst_base_offset,
.size = overlap.SizeBytes(),
},
static constexpr vk::MemoryBarrier WRITE_BARRIER{
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
.dstAccessMask = vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite,
};
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = pre_barriers.data(),
});
cmdbuf.copyBuffer(overlap.Handle(), new_buffer.Handle(), copy);
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = static_cast<u32>(post_barriers.size()),
.pBufferMemoryBarriers = post_barriers.data(),
});
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands,
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
READ_BARRIER, {}, {});
cmdbuf.copyBuffer(overlap.buffer, new_buffer.buffer, copy);
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eAllCommands,
vk::DependencyFlagBits::eByRegion, WRITE_BARRIER, {}, {});
DeleteBuffer(overlap_id);
}

Expand Down Expand Up @@ -624,35 +583,21 @@ void BufferCache::SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size,
}
scheduler.EndRendering();
const auto cmdbuf = scheduler.CommandBuffer();
const vk::BufferMemoryBarrier2 pre_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eMemoryRead,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.buffer = buffer.Handle(),
.offset = 0,
.size = buffer.SizeBytes(),
static constexpr vk::MemoryBarrier READ_BARRIER{
.srcAccessMask = vk::AccessFlagBits::eMemoryWrite,
.dstAccessMask = vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite,
};
const vk::BufferMemoryBarrier2 post_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.dstAccessMask = vk::AccessFlagBits2::eMemoryRead | vk::AccessFlagBits2::eMemoryWrite,
.buffer = buffer.Handle(),
.offset = 0,
.size = buffer.SizeBytes(),
static constexpr vk::MemoryBarrier WRITE_BARRIER{
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
.dstAccessMask = vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite,
};
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &pre_barrier,
});
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands,
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
READ_BARRIER, {}, {});
cmdbuf.copyBuffer(src_buffer, buffer.buffer, copies);
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &post_barrier,
});
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eAllCommands,
vk::DependencyFlagBits::eByRegion, WRITE_BARRIER, {}, {});
}

bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr, u32 size) {
Expand Down Expand Up @@ -702,42 +647,10 @@ bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr,
}
if (!copies.empty()) {
scheduler.EndRendering();
const vk::BufferMemoryBarrier2 pre_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eMemoryRead,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.buffer = buffer.Handle(),
.offset = max_offset - size,
.size = size,
};
const vk::BufferMemoryBarrier2 post_barrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.dstAccessMask = vk::AccessFlagBits2::eMemoryRead,
.buffer = buffer.Handle(),
.offset = max_offset - size,
.size = size,
};
auto barriers = image.GetBarriers(vk::ImageLayout::eTransferSrcOptimal,
vk::AccessFlagBits2::eTransferRead,
vk::PipelineStageFlagBits2::eTransfer, {});
image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits2::eTransferRead, {});
const auto cmdbuf = scheduler.CommandBuffer();
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &pre_barrier,
.imageMemoryBarrierCount = static_cast<u32>(barriers.size()),
.pImageMemoryBarriers = barriers.data(),
});
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.Handle(),
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
copies);
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &post_barrier,
});
}
return true;
}
Expand Down
8 changes: 1 addition & 7 deletions src/video_core/renderer_vulkan/vk_rasterizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -562,12 +562,6 @@ void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Binding
push_data.AddOffset(binding.buffer, adjust);
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned,
vsharp.GetSize() + adjust);
if (auto barrier =
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
: vk::AccessFlagBits2::eShaderRead,
vk::PipelineStageFlagBits2::eAllCommands)) {
buffer_barriers.emplace_back(*barrier);
}
}

set_writes.push_back({
Expand Down Expand Up @@ -606,7 +600,7 @@ void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Binding
if (auto barrier =
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
: vk::AccessFlagBits2::eShaderRead,
vk::PipelineStageFlagBits2::eAllCommands)) {
vk::PipelineStageFlagBits2::eComputeShader)) {
buffer_barriers.emplace_back(*barrier);
}
if (desc.is_written) {
Expand Down
45 changes: 7 additions & 38 deletions src/video_core/texture_cache/texture_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -542,62 +542,31 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
sched_ptr->EndRendering();

const auto cmdbuf = sched_ptr->CommandBuffer();
image.Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits2::eTransferWrite, {},
cmdbuf);

const VAddr image_addr = image.info.guest_address;
const size_t image_size = image.info.guest_size_bytes;
const auto [vk_buffer, buf_offset] =
buffer_cache.ObtainViewBuffer(image_addr, image_size, is_gpu_dirty);

// The obtained buffer may be written by a shader so we need to emit a barrier to prevent RAW
// hazard
if (auto barrier = vk_buffer->GetBarrier(vk::AccessFlagBits2::eTransferRead,
vk::PipelineStageFlagBits2::eTransfer)) {
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
const auto dependencies = vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &barrier.value(),
});
};
cmdbuf.pipelineBarrier2(dependencies);
}

const auto [buffer, offset] =
tile_manager.TryDetile(vk_buffer->Handle(), buf_offset, image.info);
const auto [buffer, offset] = tile_manager.TryDetile(vk_buffer->Handle(), buf_offset, image);
for (auto& copy : image_copy) {
copy.bufferOffset += offset;
}

const vk::BufferMemoryBarrier2 pre_barrier{
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eMemoryWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.buffer = buffer,
.offset = offset,
.size = image_size,
};
const vk::BufferMemoryBarrier2 post_barrier{
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.dstAccessMask = vk::AccessFlagBits2::eMemoryRead | vk::AccessFlagBits2::eMemoryWrite,
.buffer = buffer,
.offset = offset,
.size = image_size,
};
const auto image_barriers =
image.GetBarriers(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits2::eTransferWrite,
vk::PipelineStageFlagBits2::eTransfer, {});
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &pre_barrier,
.imageMemoryBarrierCount = static_cast<u32>(image_barriers.size()),
.pImageMemoryBarriers = image_barriers.data(),
});
cmdbuf.copyBufferToImage(buffer, image.image, vk::ImageLayout::eTransferDstOptimal, image_copy);
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &post_barrier,
});
image.flags &= ~ImageFlagBits::Dirty;
}

Expand Down
59 changes: 35 additions & 24 deletions src/video_core/texture_cache/tile_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include "video_core/renderer_vulkan/vk_instance.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/texture_cache/image_info.h"
#include "video_core/texture_cache/image_view.h"
#include "video_core/texture_cache/tile_manager.h"

Expand Down Expand Up @@ -87,10 +86,10 @@ static vk::Format DemoteImageFormatForDetiling(vk::Format format) {
return format;
}

const DetilerContext* TileManager::GetDetiler(const ImageInfo& info) const {
const auto format = DemoteImageFormatForDetiling(info.pixel_format);
const DetilerContext* TileManager::GetDetiler(const Image& image) const {
const auto format = DemoteImageFormatForDetiling(image.info.pixel_format);

switch (info.tiling_mode) {
switch (image.info.tiling_mode) {
case AmdGpu::TilingMode::Texture_MicroTiled:
switch (format) {
case vk::Format::eR8Uint:
Expand Down Expand Up @@ -259,23 +258,23 @@ void TileManager::FreeBuffer(ScratchBuffer buffer) {
}

std::pair<vk::Buffer, u32> TileManager::TryDetile(vk::Buffer in_buffer, u32 in_offset,
const ImageInfo& info) {
if (!info.props.is_tiled) {
Image& image) {
if (!image.info.props.is_tiled) {
return {in_buffer, in_offset};
}

const auto* detiler = GetDetiler(info);
const auto* detiler = GetDetiler(image);
if (!detiler) {
if (info.tiling_mode != AmdGpu::TilingMode::Texture_MacroTiled &&
info.tiling_mode != AmdGpu::TilingMode::Display_MacroTiled &&
info.tiling_mode != AmdGpu::TilingMode::Depth_MacroTiled) {
if (image.info.tiling_mode != AmdGpu::TilingMode::Texture_MacroTiled &&
image.info.tiling_mode != AmdGpu::TilingMode::Display_MacroTiled &&
image.info.tiling_mode != AmdGpu::TilingMode::Depth_MacroTiled) {
LOG_ERROR(Render_Vulkan, "Unsupported tiled image: {} ({})",
vk::to_string(info.pixel_format), NameOf(info.tiling_mode));
vk::to_string(image.info.pixel_format), NameOf(image.info.tiling_mode));
}
return {in_buffer, in_offset};
}

const u32 image_size = info.guest_size_bytes;
const u32 image_size = image.info.guest_size_bytes;

// Prepare output buffer
auto out_buffer = AllocBuffer(image_size, true);
Expand Down Expand Up @@ -318,21 +317,22 @@ std::pair<vk::Buffer, u32> TileManager::TryDetile(vk::Buffer in_buffer, u32 in_o
set_writes);

DetilerParams params;
params.num_levels = info.resources.levels;
params.pitch0 = info.pitch >> (info.props.is_block ? 2u : 0u);
params.height = info.size.height;
if (info.tiling_mode == AmdGpu::TilingMode::Texture_Volume) {
ASSERT(info.resources.levels == 1);
ASSERT(info.num_bits >= 32);
const auto tiles_per_row = info.pitch / 8u;
const auto tiles_per_slice = tiles_per_row * ((info.size.height + 7u) / 8u);
params.num_levels = image.info.resources.levels;
params.pitch0 = image.info.pitch >> (image.info.props.is_block ? 2u : 0u);
params.height = image.info.size.height;
if (image.info.tiling_mode == AmdGpu::TilingMode::Texture_Volume) {
ASSERT(image.info.resources.levels == 1);
ASSERT(image.info.num_bits >= 32);
const auto tiles_per_row = image.info.pitch / 8u;
const auto tiles_per_slice = tiles_per_row * ((image.info.size.height + 7u) / 8u);
params.sizes[0] = tiles_per_row;
params.sizes[1] = tiles_per_slice;
} else {
ASSERT(info.resources.levels <= 14);

ASSERT(image.info.resources.levels <= 14);
std::memset(&params.sizes, 0, sizeof(params.sizes));
for (int m = 0; m < info.resources.levels; ++m) {
params.sizes[m] = info.mips_layout[m].size * info.resources.layers +
for (int m = 0; m < image.info.resources.levels; ++m) {
params.sizes[m] = image.info.mips_layout[m].size * image.info.resources.layers +
(m > 0 ? params.sizes[m - 1] : 0);
}
}
Expand All @@ -341,9 +341,20 @@ std::pair<vk::Buffer, u32> TileManager::TryDetile(vk::Buffer in_buffer, u32 in_o
&params);

ASSERT((image_size % 64) == 0);
const auto bpp = info.num_bits * (info.props.is_block ? 16u : 1u);
const auto bpp = image.info.num_bits * (image.info.props.is_block ? 16u : 1u);
const auto num_tiles = image_size / (64 * (bpp / 8));
cmdbuf.dispatch(num_tiles, 1, 1);

const vk::BufferMemoryBarrier post_barrier{
.srcAccessMask = vk::AccessFlagBits::eShaderWrite,
.dstAccessMask = vk::AccessFlagBits::eTransferRead,
.buffer = out_buffer.first,
.size = image_size,
};
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader,
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
{}, post_barrier, {});

return {out_buffer.first, 0};
}

Expand Down
Loading

0 comments on commit 3e4bfe9

Please sign in to comment.