@@ -466,6 +466,9 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
466
466
const auto & num_mips = image.info .resources .levels ;
467
467
ASSERT (num_mips == image.info .mips_layout .size ());
468
468
469
+ const bool is_gpu_modified = True (image.flags & ImageFlagBits::GpuModified);
470
+ const bool is_gpu_dirty = True (image.flags & ImageFlagBits::GpuDirty);
471
+
469
472
boost::container::small_vector<vk::BufferImageCopy, 14 > image_copy{};
470
473
for (u32 m = 0 ; m < num_mips; m++) {
471
474
const u32 width = std::max (image.info .size .width >> m, 1u );
@@ -475,8 +478,6 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
475
478
const auto & mip = image.info .mips_layout [m];
476
479
477
480
// Protect GPU modified resources from accidental CPU reuploads.
478
- const bool is_gpu_modified = True (image.flags & ImageFlagBits::GpuModified);
479
- const bool is_gpu_dirty = True (image.flags & ImageFlagBits::GpuDirty);
480
481
if (is_gpu_modified && !is_gpu_dirty) {
481
482
const u8 * addr = std::bit_cast<u8 *>(image.info .guest_address );
482
483
const u64 hash = XXH3_64bits (addr + mip.offset , mip.size );
@@ -515,7 +516,8 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
515
516
516
517
const VAddr image_addr = image.info .guest_address ;
517
518
const size_t image_size = image.info .guest_size_bytes ;
518
- const auto [vk_buffer, buf_offset] = buffer_cache.ObtainViewBuffer (image_addr, image_size);
519
+ const auto [vk_buffer, buf_offset] =
520
+ buffer_cache.ObtainViewBuffer (image_addr, image_size, is_gpu_dirty);
519
521
// The obtained buffer may be written by a shader so we need to emit a barrier to prevent RAW
520
522
// hazard
521
523
if (auto barrier = vk_buffer->GetBarrier (vk::AccessFlagBits2::eTransferRead,
0 commit comments