@@ -29,9 +29,12 @@ TextureCache::TextureCache(const Vulkan::Instance& instance_, Vulkan::Scheduler&
29
29
info.UpdateSize ();
30
30
const ImageId null_id = slot_images.insert (instance, scheduler, info);
31
31
ASSERT (null_id.index == NULL_IMAGE_ID.index );
32
- const vk::Image& null_image = slot_images[null_id].image ;
32
+ auto & img = slot_images[null_id];
33
+ const vk::Image& null_image = img.image ;
33
34
Vulkan::SetObjectName (instance.GetDevice (), null_image, " Null Image" );
34
- slot_images[null_id].flags = ImageFlagBits::Tracked;
35
+ img.flags = ImageFlagBits::Empty;
36
+ img.track_addr = img.info .guest_address ;
37
+ img.track_addr_end = img.info .guest_address + img.info .guest_size_bytes ;
35
38
36
39
ImageViewInfo view_info;
37
40
const auto null_view_id =
@@ -43,37 +46,49 @@ TextureCache::TextureCache(const Vulkan::Instance& instance_, Vulkan::Scheduler&
43
46
44
47
TextureCache::~TextureCache () = default ;
45
48
49
+ void TextureCache::MarkAsMaybeDirty (ImageId image_id, Image& image) {
50
+ if (image.hash == 0 ) {
51
+ // Initialize hash
52
+ const u8 * addr = std::bit_cast<u8 *>(image.info .guest_address );
53
+ image.hash = XXH3_64bits (addr, image.info .guest_size_bytes );
54
+ }
55
+ image.flags |= ImageFlagBits::MaybeCpuDirty;
56
+ UntrackImage (image_id);
57
+ }
58
+
46
59
void TextureCache::InvalidateMemory (VAddr addr, VAddr page_addr, size_t size) {
47
60
std::scoped_lock lock{mutex};
48
61
ForEachImageInRegion (page_addr, size, [&](ImageId image_id, Image& image) {
49
- if (addr < image.cpu_addr ) {
62
+ const auto image_begin = image.info .guest_address ;
63
+ const auto image_end = image.info .guest_address + image.info .guest_size_bytes ;
64
+ const auto page_end = page_addr + size;
65
+ if (image_begin <= addr && addr < image_end) {
66
+ // This image was definitely accessed by this page fault.
67
+ // Untrack image, so the range is unprotected and the guest can write freely
68
+ image.flags |= ImageFlagBits::CpuDirty;
69
+ UntrackImage (image_id);
70
+ } else if (page_end < image_end) {
50
71
// This page access may or may not modify the image.
51
- // We should not mark it as dirty now, if it really was modified,
52
- // it will receive more invalidations on subsequent pages.
53
- const auto page_end = page_addr + size;
54
- if (image. cpu_addr_end <= page_end) {
55
- if (image. hash == 0 ) {
56
- // Initialize hash
57
- const u8 * addr = std::bit_cast< u8 *>(image. info . guest_address );
58
- image. hash = XXH3_64bits (addr, image. info . guest_size_bytes );
59
- }
60
- // Image ends on this page so it can not receive any more invalidations.
61
- // We will check it's hash later to see if it really was modified .
62
- image. flags |= ImageFlagBits::MaybeCpuDirty;
63
- UntrackImage (image_id);
64
- } else {
65
- // Remove tracking from this page only.
66
- UntrackImageHead (image_id );
72
+ // We should not mark it as dirty now. If it really was modified
73
+ // it will receive more invalidations on its other pages.
74
+ // Remove tracking from this page only.
75
+ UntrackImageHead (image_id);
76
+ } else if (image_begin < page_addr ) {
77
+ // This page access does not modify the image but the page should be untracked.
78
+ // We should not mark this image as dirty now. If it really was modified
79
+ // it will receive more invalidations on its other pages.
80
+ UntrackImageTail (image_id);
81
+ } else {
82
+ // Image begins and ends on this page so it can not receive any more invalidations .
83
+ // We will check it's hash later to see if it really was modified.
84
+ if (image. hash == 0 ) {
85
+ // Initialize hash
86
+ const u8 * addr = std::bit_cast< u8 *>(image. info . guest_address );
87
+ image. hash = XXH3_64bits (addr, image. info . guest_size_bytes );
67
88
}
68
- return ;
89
+ image.flags |= ImageFlagBits::MaybeCpuDirty;
90
+ UntrackImage (image_id);
69
91
}
70
-
71
- if (addr < image.cpu_addr_end ) {
72
- // Ensure image is reuploaded when accessed again.
73
- image.flags |= ImageFlagBits::CpuDirty;
74
- }
75
- // Untrack image, so the range is unprotected and the guest can write freely.
76
- UntrackImage (image_id);
77
92
});
78
93
}
79
94
@@ -443,9 +458,12 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
443
458
False (image.flags & ImageFlagBits::CpuDirty)) {
444
459
// The image size should be less than page size to be considered MaybeCpuDirty
445
460
// So this calculation should be very uncommon and reasonably fast
446
- ASSERT (image.info .guest_size_bytes <= 4_KB);
447
- const u8 * addr = std::bit_cast<u8 *>(image.info .guest_address );
448
- const u64 hash = XXH3_64bits (addr, image.info .guest_size_bytes );
461
+ // For now we'll just check up to 64 first pixels
462
+ const auto addr = std::bit_cast<u8 *>(image.info .guest_address );
463
+ const auto w = std::min (image.info .size .width , u32 (8 ));
464
+ const auto h = std::min (image.info .size .height , u32 (8 ));
465
+ const auto size = w * h * image.info .num_bits / 8 ;
466
+ const u64 hash = XXH3_64bits (addr, size);
449
467
if (image.hash == hash) {
450
468
image.flags &= ~ImageFlagBits::MaybeCpuDirty;
451
469
return ;
@@ -539,7 +557,7 @@ void TextureCache::RegisterImage(ImageId image_id) {
539
557
ASSERT_MSG (False (image.flags & ImageFlagBits::Registered),
540
558
" Trying to register an already registered image" );
541
559
image.flags |= ImageFlagBits::Registered;
542
- ForEachPage (image.cpu_addr , image.info .guest_size_bytes ,
560
+ ForEachPage (image.info . guest_address , image.info .guest_size_bytes ,
543
561
[this , image_id](u64 page) { page_table[page].push_back (image_id); });
544
562
}
545
563
@@ -548,7 +566,7 @@ void TextureCache::UnregisterImage(ImageId image_id) {
548
566
ASSERT_MSG (True (image.flags & ImageFlagBits::Registered),
549
567
" Trying to unregister an already unregistered image" );
550
568
image.flags &= ~ImageFlagBits::Registered;
551
- ForEachPage (image.cpu_addr , image.info .guest_size_bytes , [this , image_id](u64 page) {
569
+ ForEachPage (image.info . guest_address , image.info .guest_size_bytes , [this , image_id](u64 page) {
552
570
const auto page_it = page_table.find (page);
553
571
if (page_it == nullptr ) {
554
572
UNREACHABLE_MSG (" Unregistering unregistered page=0x{:x}" , page << PageShift);
@@ -566,62 +584,106 @@ void TextureCache::UnregisterImage(ImageId image_id) {
566
584
567
585
void TextureCache::TrackImage (ImageId image_id) {
568
586
auto & image = slot_images[image_id];
569
- if (True (image.flags & ImageFlagBits::Tracked)) {
587
+ const auto image_begin = image.info .guest_address ;
588
+ const auto image_end = image.info .guest_address + image.info .guest_size_bytes ;
589
+ if (image_begin == image.track_addr && image_end == image.track_addr_end ) {
570
590
return ;
571
591
}
572
- if (True (image.flags & ImageFlagBits::TailTracked)) {
573
- // Re-track only image head
574
- TrackImageHead (image_id);
575
- } else {
592
+
593
+ if (!image.IsTracked ()) {
576
594
// Re-track the whole image
577
- image.flags |= ImageFlagBits::Tracked;
578
- tracker.UpdatePagesCachedCount (image.cpu_addr , image.info .guest_size_bytes , 1 );
595
+ image.track_addr = image_begin;
596
+ image.track_addr_end = image_end;
597
+ tracker.UpdatePagesCachedCount (image_begin, image.info .guest_size_bytes , 1 );
598
+ } else {
599
+ if (image_begin < image.track_addr ) {
600
+ TrackImageHead (image_id);
601
+ }
602
+ if (image.track_addr_end < image_end) {
603
+ TrackImageTail (image_id);
604
+ }
579
605
}
580
606
}
581
607
582
608
void TextureCache::TrackImageHead (ImageId image_id) {
583
609
auto & image = slot_images[image_id];
584
- if (True (image.flags & ImageFlagBits::Tracked)) {
610
+ const auto image_begin = image.info .guest_address ;
611
+ if (image_begin == image.track_addr ) {
585
612
return ;
586
613
}
587
- ASSERT (True (image.flags & ImageFlagBits::TailTracked));
588
- image.flags |= ImageFlagBits::Tracked;
589
- image.flags &= ~ImageFlagBits::TailTracked;
590
- const auto size = tracker.GetNextPageAddr (image.cpu_addr ) - image.cpu_addr ;
591
- tracker.UpdatePagesCachedCount (image.cpu_addr , size, 1 );
614
+ ASSERT (image.track_addr != 0 && image_begin < image.track_addr );
615
+ const auto size = image.track_addr - image_begin;
616
+ image.track_addr = image_begin;
617
+ tracker.UpdatePagesCachedCount (image_begin, size, 1 );
618
+ }
619
+
620
+ void TextureCache::TrackImageTail (ImageId image_id) {
621
+ auto & image = slot_images[image_id];
622
+ const auto image_end = image.info .guest_address + image.info .guest_size_bytes ;
623
+ if (image_end == image.track_addr_end ) {
624
+ return ;
625
+ }
626
+ ASSERT (image.track_addr_end != 0 && image.track_addr_end < image_end);
627
+ const auto addr = image.track_addr_end ;
628
+ const auto size = image_end - image.track_addr_end ;
629
+ image.track_addr_end = image_end;
630
+ tracker.UpdatePagesCachedCount (addr, size, 1 );
592
631
}
593
632
594
633
void TextureCache::UntrackImage (ImageId image_id) {
595
634
auto & image = slot_images[image_id];
596
- ASSERT (!True (image.flags & ImageFlagBits::Tracked) ||
597
- !True (image.flags & ImageFlagBits::TailTracked));
598
- if (True (image.flags & ImageFlagBits::Tracked)) {
599
- image.flags &= ~ImageFlagBits::Tracked;
600
- tracker.UpdatePagesCachedCount (image.cpu_addr , image.info .guest_size_bytes , -1 );
601
- }
602
- if (True (image.flags & ImageFlagBits::TailTracked)) {
603
- image.flags &= ~ImageFlagBits::TailTracked;
604
- const auto addr = tracker.GetNextPageAddr (image.cpu_addr );
605
- const auto size = image.info .guest_size_bytes - (addr - image.cpu_addr );
635
+ if (!image.IsTracked ()) {
636
+ return ;
637
+ }
638
+ const auto addr = image.track_addr ;
639
+ const auto size = image.track_addr_end - image.track_addr ;
640
+ image.track_addr = 0 ;
641
+ image.track_addr_end = 0 ;
642
+ if (size != 0 ) {
606
643
tracker.UpdatePagesCachedCount (addr, size, -1 );
607
644
}
608
645
}
609
646
610
647
void TextureCache::UntrackImageHead (ImageId image_id) {
611
648
auto & image = slot_images[image_id];
612
- if (False (image.flags & ImageFlagBits::Tracked)) {
649
+ const auto image_begin = image.info .guest_address ;
650
+ if (!image.IsTracked () || image_begin < image.track_addr ) {
651
+ return ;
652
+ }
653
+ const auto addr = tracker.GetNextPageAddr (image_begin);
654
+ const auto size = addr - image_begin;
655
+ image.track_addr = addr;
656
+ if (image.track_addr == image.track_addr_end ) {
657
+ // This image spans only 2 pages and both are modified,
658
+ // but the image itself was not directly affected.
659
+ // Cehck its hash later.
660
+ MarkAsMaybeDirty (image_id, image);
661
+ }
662
+ tracker.UpdatePagesCachedCount (image_begin, size, -1 );
663
+ }
664
+
665
+ void TextureCache::UntrackImageTail (ImageId image_id) {
666
+ auto & image = slot_images[image_id];
667
+ const auto image_end = image.info .guest_address + image.info .guest_size_bytes ;
668
+ if (!image.IsTracked () || image.track_addr_end < image_end) {
613
669
return ;
614
670
}
615
- image.flags |= ImageFlagBits::TailTracked;
616
- image.flags &= ~ImageFlagBits::Tracked;
617
- const auto size = tracker.GetNextPageAddr (image.cpu_addr ) - image.cpu_addr ;
618
- tracker.UpdatePagesCachedCount (image.cpu_addr , size, -1 );
671
+ ASSERT (image.track_addr_end != 0 );
672
+ const auto addr = tracker.GetPageAddr (image_end);
673
+ const auto size = image_end - addr;
674
+ image.track_addr_end = addr;
675
+ if (image.track_addr == image.track_addr_end ) {
676
+ // This image spans only 2 pages and both are modified,
677
+ // but the image itself was not directly affected.
678
+ // Cehck its hash later.
679
+ MarkAsMaybeDirty (image_id, image);
680
+ }
681
+ tracker.UpdatePagesCachedCount (addr, size, -1 );
619
682
}
620
683
621
684
void TextureCache::DeleteImage (ImageId image_id) {
622
685
Image& image = slot_images[image_id];
623
- ASSERT_MSG (False (image.flags & ImageFlagBits::Tracked), " Image was not untracked" );
624
- ASSERT_MSG (False (image.flags & ImageFlagBits::TailTracked), " Image was not untracked" );
686
+ ASSERT_MSG (!image.IsTracked (), " Image was not untracked" );
625
687
ASSERT_MSG (False (image.flags & ImageFlagBits::Registered), " Image was not unregistered" );
626
688
627
689
// Remove any registered meta areas.
0 commit comments