@@ -521,7 +521,7 @@ float ToScaledDepthFromIntegerScale(float z) {
521
521
const float depthSliceFactor = DepthSliceFactor ();
522
522
if (gstate_c.Use (GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT)) {
523
523
const double doffset = 0.5 * (depthSliceFactor - 1.0 ) * (1.0 / depthSliceFactor);
524
- // Use one bit for each value, rather than 1.0 / (25535 .0 * 256.0).
524
+ // Use one bit for each value, rather than 1.0 / (65535 .0 * 256.0).
525
525
return (float )((double )z * (1.0 / 16777215.0 ) + doffset);
526
526
} else {
527
527
const float offset = 0 .5f * (depthSliceFactor - 1 .0f ) * (1 .0f / depthSliceFactor);
@@ -730,6 +730,12 @@ void ConvertViewportAndScissor(bool useBufferedRendering, float renderWidth, flo
730
730
if (maxz == 65535 ) {
731
731
maxz += fullDepthRange;
732
732
}
733
+ } else if (maxz == 65535 ) {
734
+ // This means clamp isn't enabled, but we still want to allow values up to 65535.99.
735
+ // If DepthSliceFactor() is 1.0, though, this would make out.depthRangeMax exceed 1.
736
+ // Since that would clamp, it would make Z=1234 not match between draws when maxz changes.
737
+ if (DepthSliceFactor () > 1 .0f )
738
+ maxz = 65535 .99f ;
733
739
}
734
740
// Okay. So, in our shader, -1 will map to minz, and +1 will map to maxz.
735
741
float halfActualZRange = (maxz - minz) * (1 .0f / 2 .0f );
@@ -748,6 +754,7 @@ void ConvertViewportAndScissor(bool useBufferedRendering, float renderWidth, flo
748
754
}
749
755
750
756
// OpenGL will clamp these for us anyway, and Direct3D will error if not clamped.
757
+ // Of course, if this happens we've skewed out.depthScale/out.zOffset and may get z-fighting.
751
758
out.depthRangeMin = std::max (out.depthRangeMin , 0 .0f );
752
759
out.depthRangeMax = std::min (out.depthRangeMax , 1 .0f );
753
760
}
0 commit comments