Skip to content

Commit 6564394

Browse files
committed
* renderer_vulkan: Introduce shader HLE system with copy shader implementation. (shadps4-emu#1683)
* renderer_vulkan: Introduce shader HLE system with copy shader implementation. Co-authored-by: TheTurtle <[email protected]> * buffer_cache: Handle obtaining buffer views partially within buffers. * vk_shader_hle: Make more efficient
1 parent 6c29e8f commit 6564394

File tree

11 files changed

+217
-8
lines changed

11 files changed

+217
-8
lines changed

CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -738,6 +738,8 @@ set(VIDEO_CORE src/video_core/amdgpu/liverpool.cpp
738738
src/video_core/renderer_vulkan/vk_resource_pool.h
739739
src/video_core/renderer_vulkan/vk_scheduler.cpp
740740
src/video_core/renderer_vulkan/vk_scheduler.h
741+
src/video_core/renderer_vulkan/vk_shader_hle.cpp
742+
src/video_core/renderer_vulkan/vk_shader_hle.h
741743
src/video_core/renderer_vulkan/vk_shader_util.cpp
742744
src/video_core/renderer_vulkan/vk_shader_util.h
743745
src/video_core/renderer_vulkan/vk_swapchain.cpp

src/video_core/buffer_cache/buffer_cache.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,8 @@ std::pair<Buffer*, u32> BufferCache::ObtainBuffer(VAddr device_addr, u32 size, b
360360
return {&buffer, buffer.Offset(device_addr)};
361361
}
362362

363-
std::pair<Buffer*, u32> BufferCache::ObtainViewBuffer(VAddr gpu_addr, u32 size) {
363+
std::pair<Buffer*, u32> BufferCache::ObtainViewBuffer(VAddr gpu_addr, u32 size, bool prefer_gpu) {
364+
// Check if any buffer contains the full requested range.
364365
const u64 page = gpu_addr >> CACHING_PAGEBITS;
365366
const BufferId buffer_id = page_table[page];
366367
if (buffer_id) {
@@ -370,6 +371,13 @@ std::pair<Buffer*, u32> BufferCache::ObtainViewBuffer(VAddr gpu_addr, u32 size)
370371
return {&buffer, buffer.Offset(gpu_addr)};
371372
}
372373
}
374+
// If no buffer contains the full requested range but some buffer within was GPU-modified,
375+
// fall back to ObtainBuffer to create a full buffer and avoid losing GPU modifications.
376+
// This is only done if the request prefers to use GPU memory, otherwise we can skip it.
377+
if (prefer_gpu && memory_tracker.IsRegionGpuModified(gpu_addr, size)) {
378+
return ObtainBuffer(gpu_addr, size, false, false);
379+
}
380+
// In all other cases, just do a CPU copy to the staging buffer.
373381
const u32 offset = staging_buffer.Copy(gpu_addr, size, 16);
374382
return {&staging_buffer, offset};
375383
}

src/video_core/buffer_cache/buffer_cache.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ class BufferCache {
9696
BufferId buffer_id = {});
9797

9898
/// Attempts to obtain a buffer without modifying the cache contents.
99-
[[nodiscard]] std::pair<Buffer*, u32> ObtainViewBuffer(VAddr gpu_addr, u32 size);
99+
[[nodiscard]] std::pair<Buffer*, u32> ObtainViewBuffer(VAddr gpu_addr, u32 size,
100+
bool prefer_gpu);
100101

101102
/// Return true when a region is registered on the cache
102103
[[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size);

src/video_core/renderer_vulkan/vk_pipeline_common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ boost::container::static_vector<vk::BufferView, 8> Pipeline::buffer_views;
1717
boost::container::static_vector<vk::DescriptorBufferInfo, 32> Pipeline::buffer_infos;
1818

1919
Pipeline::Pipeline(const Instance& instance_, Scheduler& scheduler_, DescriptorHeap& desc_heap_,
20-
vk::PipelineCache pipeline_cache)
21-
: instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_} {}
20+
vk::PipelineCache pipeline_cache, bool is_compute_)
21+
: instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_}, is_compute{is_compute_} {}
2222

2323
Pipeline::~Pipeline() = default;
2424

src/video_core/renderer_vulkan/vk_pipeline_common.h

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class DescriptorHeap;
2121
class Pipeline {
2222
public:
2323
Pipeline(const Instance& instance, Scheduler& scheduler, DescriptorHeap& desc_heap,
24-
vk::PipelineCache pipeline_cache);
24+
vk::PipelineCache pipeline_cache, bool is_compute = false);
2525
virtual ~Pipeline();
2626

2727
vk::Pipeline Handle() const noexcept {
@@ -32,6 +32,22 @@ class Pipeline {
3232
return *pipeline_layout;
3333
}
3434

35+
auto GetStages() const {
36+
if (is_compute) {
37+
return std::span{stages.cend() - 1, stages.cend()};
38+
} else {
39+
return std::span{stages.cbegin(), stages.cend() - 1};
40+
}
41+
}
42+
43+
const Shader::Info& GetStage(Shader::Stage stage) const noexcept {
44+
return *stages[u32(stage)];
45+
}
46+
47+
bool IsCompute() const {
48+
return is_compute;
49+
}
50+
3551
using DescriptorWrites = boost::container::small_vector<vk::WriteDescriptorSet, 16>;
3652
using BufferBarriers = boost::container::small_vector<vk::BufferMemoryBarrier2, 16>;
3753

@@ -53,6 +69,8 @@ class Pipeline {
5369
static boost::container::static_vector<vk::DescriptorImageInfo, 32> image_infos;
5470
static boost::container::static_vector<vk::BufferView, 8> buffer_views;
5571
static boost::container::static_vector<vk::DescriptorBufferInfo, 32> buffer_infos;
72+
std::array<const Shader::Info*, Shader::MaxStageTypes> stages{};
73+
const bool is_compute;
5674
};
5775

5876
} // namespace Vulkan

src/video_core/renderer_vulkan/vk_rasterizer.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,9 @@
77
#include "video_core/amdgpu/liverpool.h"
88
#include "video_core/renderer_vulkan/vk_instance.h"
99
#include "video_core/renderer_vulkan/vk_rasterizer.h"
10+
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
1011
#include "video_core/renderer_vulkan/vk_scheduler.h"
12+
#include "video_core/renderer_vulkan/vk_shader_hle.h"
1113
#include "video_core/texture_cache/image_view.h"
1214
#include "video_core/texture_cache/texture_cache.h"
1315
#include "vk_rasterizer.h"
@@ -214,6 +216,11 @@ void Rasterizer::DispatchDirect() {
214216
return;
215217
}
216218

219+
const auto& cs = pipeline->GetStage(Shader::Stage::Compute);
220+
if (ExecuteShaderHLE(cs, liverpool->regs, *this)) {
221+
return;
222+
}
223+
217224
try {
218225
const auto has_resources = pipeline->BindResources(buffer_cache, texture_cache);
219226
if (!has_resources) {

src/video_core/renderer_vulkan/vk_rasterizer.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,14 @@ class Rasterizer {
2727
AmdGpu::Liverpool* liverpool);
2828
~Rasterizer();
2929

30+
[[nodiscard]] Scheduler& GetScheduler() noexcept {
31+
return scheduler;
32+
}
33+
34+
[[nodiscard]] VideoCore::BufferCache& GetBufferCache() noexcept {
35+
return buffer_cache;
36+
}
37+
3038
[[nodiscard]] VideoCore::TextureCache& GetTextureCache() noexcept {
3139
return texture_cache;
3240
}

src/video_core/renderer_vulkan/vk_scheduler.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
1111
#include "video_core/renderer_vulkan/vk_resource_pool.h"
1212

13+
namespace tracy {
14+
class VkCtxScope;
15+
}
16+
1317
namespace Vulkan {
1418

1519
class Instance;
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
2+
// SPDX-License-Identifier: GPL-2.0-or-later
3+
4+
#include "shader_recompiler/info.h"
5+
#include "video_core/renderer_vulkan/vk_scheduler.h"
6+
#include "video_core/renderer_vulkan/vk_shader_hle.h"
7+
8+
#include "vk_rasterizer.h"
9+
10+
namespace Vulkan {
11+
12+
static constexpr u64 COPY_SHADER_HASH = 0xfefebf9f;
13+
14+
bool ExecuteCopyShaderHLE(const Shader::Info& info, const AmdGpu::Liverpool::Regs& regs,
15+
Rasterizer& rasterizer) {
16+
auto& scheduler = rasterizer.GetScheduler();
17+
auto& buffer_cache = rasterizer.GetBufferCache();
18+
19+
// Copy shader defines three formatted buffers as inputs: control, source, and destination.
20+
const auto ctl_buf_sharp = info.texture_buffers[0].GetSharp(info);
21+
const auto src_buf_sharp = info.texture_buffers[1].GetSharp(info);
22+
const auto dst_buf_sharp = info.texture_buffers[2].GetSharp(info);
23+
const auto buf_stride = src_buf_sharp.GetStride();
24+
ASSERT(buf_stride == dst_buf_sharp.GetStride());
25+
26+
struct CopyShaderControl {
27+
u32 dst_idx;
28+
u32 src_idx;
29+
u32 end;
30+
};
31+
static_assert(sizeof(CopyShaderControl) == 12);
32+
ASSERT(ctl_buf_sharp.GetStride() == sizeof(CopyShaderControl));
33+
const auto ctl_buf = reinterpret_cast<const CopyShaderControl*>(ctl_buf_sharp.base_address);
34+
35+
static std::vector<vk::BufferCopy> copies;
36+
copies.clear();
37+
copies.reserve(regs.cs_program.dim_x);
38+
39+
for (u32 i = 0; i < regs.cs_program.dim_x; i++) {
40+
const auto& [dst_idx, src_idx, end] = ctl_buf[i];
41+
const u32 local_dst_offset = dst_idx * buf_stride;
42+
const u32 local_src_offset = src_idx * buf_stride;
43+
const u32 local_size = (end + 1) * buf_stride;
44+
copies.emplace_back(local_src_offset, local_dst_offset, local_size);
45+
}
46+
47+
scheduler.EndRendering();
48+
49+
static constexpr vk::MemoryBarrier READ_BARRIER{
50+
.srcAccessMask = vk::AccessFlagBits::eMemoryWrite,
51+
.dstAccessMask = vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite,
52+
};
53+
static constexpr vk::MemoryBarrier WRITE_BARRIER{
54+
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
55+
.dstAccessMask = vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite,
56+
};
57+
scheduler.CommandBuffer().pipelineBarrier(
58+
vk::PipelineStageFlagBits::eAllCommands, vk::PipelineStageFlagBits::eTransfer,
59+
vk::DependencyFlagBits::eByRegion, READ_BARRIER, {}, {});
60+
61+
static constexpr vk::DeviceSize MaxDistanceForMerge = 64_MB;
62+
u32 batch_start = 0;
63+
u32 batch_end = 1;
64+
65+
while (batch_end < copies.size()) {
66+
// Place first copy into the current batch
67+
const auto& copy = copies[batch_start];
68+
auto src_offset_min = copy.srcOffset;
69+
auto src_offset_max = copy.srcOffset + copy.size;
70+
auto dst_offset_min = copy.dstOffset;
71+
auto dst_offset_max = copy.dstOffset + copy.size;
72+
73+
for (int i = batch_start + 1; i < copies.size(); i++) {
74+
// Compute new src and dst bounds if we were to batch this copy
75+
const auto [src_offset, dst_offset, size] = copies[i];
76+
auto new_src_offset_min = std::min(src_offset_min, src_offset);
77+
auto new_src_offset_max = std::max(src_offset_max, src_offset + size);
78+
if (new_src_offset_max - new_src_offset_min > MaxDistanceForMerge) {
79+
continue;
80+
}
81+
82+
auto new_dst_offset_min = std::min(dst_offset_min, dst_offset);
83+
auto new_dst_offset_max = std::max(dst_offset_max, dst_offset + size);
84+
if (new_dst_offset_max - new_dst_offset_min > MaxDistanceForMerge) {
85+
continue;
86+
}
87+
88+
// We can batch this copy
89+
src_offset_min = new_src_offset_min;
90+
src_offset_max = new_src_offset_max;
91+
dst_offset_min = new_dst_offset_min;
92+
dst_offset_max = new_dst_offset_max;
93+
if (i != batch_end) {
94+
std::swap(copies[i], copies[batch_end]);
95+
}
96+
++batch_end;
97+
}
98+
99+
// Obtain buffers for the total source and destination ranges.
100+
const auto [src_buf, src_buf_offset] =
101+
buffer_cache.ObtainBuffer(src_buf_sharp.base_address + src_offset_min,
102+
src_offset_max - src_offset_min, false, false);
103+
const auto [dst_buf, dst_buf_offset] =
104+
buffer_cache.ObtainBuffer(dst_buf_sharp.base_address + dst_offset_min,
105+
dst_offset_max - dst_offset_min, true, false);
106+
107+
// Apply found buffer base.
108+
const auto vk_copies = std::span{copies}.subspan(batch_start, batch_end - batch_start);
109+
for (auto& copy : vk_copies) {
110+
copy.srcOffset = copy.srcOffset - src_offset_min + src_buf_offset;
111+
copy.dstOffset = copy.dstOffset - dst_offset_min + dst_buf_offset;
112+
}
113+
114+
// Execute buffer copies.
115+
LOG_TRACE(Render_Vulkan, "HLE buffer copy: src_size = {}, dst_size = {}",
116+
src_offset_max - src_offset_min, dst_offset_max - dst_offset_min);
117+
scheduler.CommandBuffer().copyBuffer(src_buf->Handle(), dst_buf->Handle(), vk_copies);
118+
batch_start = batch_end;
119+
++batch_end;
120+
}
121+
122+
scheduler.CommandBuffer().pipelineBarrier(
123+
vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eAllCommands,
124+
vk::DependencyFlagBits::eByRegion, WRITE_BARRIER, {}, {});
125+
126+
return true;
127+
}
128+
129+
bool ExecuteShaderHLE(const Shader::Info& info, const AmdGpu::Liverpool::Regs& regs,
130+
Rasterizer& rasterizer) {
131+
switch (info.pgm_hash) {
132+
case COPY_SHADER_HASH:
133+
return ExecuteCopyShaderHLE(info, regs, rasterizer);
134+
default:
135+
return false;
136+
}
137+
}
138+
139+
} // namespace Vulkan
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
2+
// SPDX-License-Identifier: GPL-2.0-or-later
3+
4+
#pragma once
5+
6+
#include "video_core/amdgpu/liverpool.h"
7+
8+
namespace Shader {
9+
struct Info;
10+
}
11+
12+
namespace Vulkan {
13+
14+
class Rasterizer;
15+
16+
/// Attempts to execute a shader using HLE if possible.
17+
bool ExecuteShaderHLE(const Shader::Info& info, const AmdGpu::Liverpool::Regs& regs,
18+
Rasterizer& rasterizer);
19+
20+
} // namespace Vulkan

src/video_core/texture_cache/texture_cache.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,9 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
434434
const auto& num_mips = image.info.resources.levels;
435435
ASSERT(num_mips == image.info.mips_layout.size());
436436

437+
const bool is_gpu_modified = True(image.flags & ImageFlagBits::GpuModified);
438+
const bool is_gpu_dirty = True(image.flags & ImageFlagBits::GpuDirty);
439+
437440
boost::container::small_vector<vk::BufferImageCopy, 14> image_copy{};
438441
for (u32 m = 0; m < num_mips; m++) {
439442
const u32 width = std::max(image.info.size.width >> m, 1u);
@@ -443,8 +446,6 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
443446
const auto& mip = image.info.mips_layout[m];
444447

445448
// Protect GPU modified resources from accidental CPU reuploads.
446-
const bool is_gpu_modified = True(image.flags & ImageFlagBits::GpuModified);
447-
const bool is_gpu_dirty = True(image.flags & ImageFlagBits::GpuDirty);
448449
if (is_gpu_modified && !is_gpu_dirty) {
449450
const u8* addr = std::bit_cast<u8*>(image.info.guest_address);
450451
const u64 hash = XXH3_64bits(addr + mip.offset, mip.size);
@@ -483,7 +484,8 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
483484

484485
const VAddr image_addr = image.info.guest_address;
485486
const size_t image_size = image.info.guest_size_bytes;
486-
const auto [vk_buffer, buf_offset] = buffer_cache.ObtainViewBuffer(image_addr, image_size);
487+
const auto [vk_buffer, buf_offset] =
488+
buffer_cache.ObtainViewBuffer(image_addr, image_size, is_gpu_dirty);
487489
// The obtained buffer may be written by a shader so we need to emit a barrier to prevent RAW
488490
// hazard
489491
if (auto barrier = vk_buffer->GetBarrier(vk::AccessFlagBits2::eTransferRead,

0 commit comments

Comments
 (0)