1 // Copyright 2010 Dolphin Emulator Project
2 // Licensed under GPLv2+
3 // Refer to the license.txt file included.
4 
5 #include "VideoCommon/TextureCacheBase.h"
6 
7 #include <algorithm>
8 #include <cmath>
9 #include <cstring>
10 #include <memory>
11 #include <string>
12 #include <utility>
13 #include <vector>
14 #if defined(_M_X86) || defined(_M_X86_64)
15 #include <pmmintrin.h>
16 #endif
17 
18 #include <fmt/format.h>
19 
20 #include "Common/Align.h"
21 #include "Common/Assert.h"
22 #include "Common/ChunkFile.h"
23 #include "Common/CommonTypes.h"
24 #include "Common/FileUtil.h"
25 #include "Common/Hash.h"
26 #include "Common/Logging/Log.h"
27 #include "Common/MathUtil.h"
28 #include "Common/MemoryUtil.h"
29 
30 #include "Core/Config/GraphicsSettings.h"
31 #include "Core/ConfigManager.h"
32 #include "Core/FifoPlayer/FifoPlayer.h"
33 #include "Core/FifoPlayer/FifoRecorder.h"
34 #include "Core/HW/Memmap.h"
35 
36 #include "VideoCommon/AbstractFramebuffer.h"
37 #include "VideoCommon/AbstractStagingTexture.h"
38 #include "VideoCommon/BPMemory.h"
39 #include "VideoCommon/FramebufferManager.h"
40 #include "VideoCommon/HiresTextures.h"
41 #include "VideoCommon/OpcodeDecoding.h"
42 #include "VideoCommon/PixelShaderManager.h"
43 #include "VideoCommon/RenderBase.h"
44 #include "VideoCommon/SamplerCommon.h"
45 #include "VideoCommon/ShaderCache.h"
46 #include "VideoCommon/Statistics.h"
47 #include "VideoCommon/TextureConversionShader.h"
48 #include "VideoCommon/TextureConverterShaderGen.h"
49 #include "VideoCommon/TextureDecoder.h"
50 #include "VideoCommon/VertexManagerBase.h"
51 #include "VideoCommon/VideoCommon.h"
52 #include "VideoCommon/VideoConfig.h"
53 
54 static const u64 TEXHASH_INVALID = 0;
55 // Sonic the Fighters (inside Sonic Gems Collection) loops a 64 frames animation
56 static const int TEXTURE_KILL_THRESHOLD = 64;
57 static const int TEXTURE_POOL_KILL_THRESHOLD = 3;
58 
59 std::unique_ptr<TextureCacheBase> g_texture_cache;
60 
61 std::bitset<8> TextureCacheBase::valid_bind_points;
62 
TCacheEntry(std::unique_ptr<AbstractTexture> tex,std::unique_ptr<AbstractFramebuffer> fb)63 TextureCacheBase::TCacheEntry::TCacheEntry(std::unique_ptr<AbstractTexture> tex,
64                                            std::unique_ptr<AbstractFramebuffer> fb)
65     : texture(std::move(tex)), framebuffer(std::move(fb))
66 {
67 }
68 
~TCacheEntry()69 TextureCacheBase::TCacheEntry::~TCacheEntry()
70 {
71   for (auto& reference : references)
72     reference->references.erase(this);
73 }
74 
CheckTempSize(size_t required_size)75 void TextureCacheBase::CheckTempSize(size_t required_size)
76 {
77   if (required_size <= temp_size)
78     return;
79 
80   temp_size = required_size;
81   Common::FreeAlignedMemory(temp);
82   temp = static_cast<u8*>(Common::AllocateAlignedMemory(temp_size, 16));
83 }
84 
TextureCacheBase()85 TextureCacheBase::TextureCacheBase()
86 {
87   SetBackupConfig(g_ActiveConfig);
88 
89   temp_size = 2048 * 2048 * 4;
90   temp = static_cast<u8*>(Common::AllocateAlignedMemory(temp_size, 16));
91 
92   TexDecoder_SetTexFmtOverlayOptions(backup_config.texfmt_overlay,
93                                      backup_config.texfmt_overlay_center);
94 
95   HiresTexture::Init();
96 
97   Common::SetHash64Function();
98 
99   InvalidateAllBindPoints();
100 }
101 
~TextureCacheBase()102 TextureCacheBase::~TextureCacheBase()
103 {
104   // Clear pending EFB copies first, so we don't try to flush them.
105   m_pending_efb_copies.clear();
106 
107   HiresTexture::Shutdown();
108   Invalidate();
109   Common::FreeAlignedMemory(temp);
110   temp = nullptr;
111 }
112 
Initialize()113 bool TextureCacheBase::Initialize()
114 {
115   if (!CreateUtilityTextures())
116   {
117     PanicAlert("Failed to create utility textures.");
118     return false;
119   }
120 
121   return true;
122 }
123 
Invalidate()124 void TextureCacheBase::Invalidate()
125 {
126   FlushEFBCopies();
127   InvalidateAllBindPoints();
128 
129   bound_textures.fill(nullptr);
130   for (auto& tex : textures_by_address)
131   {
132     delete tex.second;
133   }
134   textures_by_address.clear();
135   textures_by_hash.clear();
136 
137   texture_pool.clear();
138 }
139 
OnConfigChanged(const VideoConfig & config)140 void TextureCacheBase::OnConfigChanged(const VideoConfig& config)
141 {
142   if (config.bHiresTextures != backup_config.hires_textures ||
143       config.bCacheHiresTextures != backup_config.cache_hires_textures)
144   {
145     HiresTexture::Update();
146   }
147 
148   // TODO: Invalidating texcache is really stupid in some of these cases
149   if (config.iSafeTextureCache_ColorSamples != backup_config.color_samples ||
150       config.bTexFmtOverlayEnable != backup_config.texfmt_overlay ||
151       config.bTexFmtOverlayCenter != backup_config.texfmt_overlay_center ||
152       config.bHiresTextures != backup_config.hires_textures ||
153       config.bEnableGPUTextureDecoding != backup_config.gpu_texture_decoding ||
154       config.bDisableCopyToVRAM != backup_config.disable_vram_copies ||
155       config.bArbitraryMipmapDetection != backup_config.arbitrary_mipmap_detection)
156   {
157     Invalidate();
158     TexDecoder_SetTexFmtOverlayOptions(config.bTexFmtOverlayEnable, config.bTexFmtOverlayCenter);
159   }
160 
161   SetBackupConfig(config);
162 }
163 
Cleanup(int _frameCount)164 void TextureCacheBase::Cleanup(int _frameCount)
165 {
166   TexAddrCache::iterator iter = textures_by_address.begin();
167   TexAddrCache::iterator tcend = textures_by_address.end();
168   while (iter != tcend)
169   {
170     if (iter->second->tmem_only)
171     {
172       iter = InvalidateTexture(iter);
173     }
174     else if (iter->second->frameCount == FRAMECOUNT_INVALID)
175     {
176       iter->second->frameCount = _frameCount;
177       ++iter;
178     }
179     else if (_frameCount > TEXTURE_KILL_THRESHOLD + iter->second->frameCount)
180     {
181       if (iter->second->IsCopy())
182       {
183         // Only remove EFB copies when they wouldn't be used anymore(changed hash), because EFB
184         // copies living on the
185         // host GPU are unrecoverable. Perform this check only every TEXTURE_KILL_THRESHOLD for
186         // performance reasons
187         if ((_frameCount - iter->second->frameCount) % TEXTURE_KILL_THRESHOLD == 1 &&
188             iter->second->hash != iter->second->CalculateHash())
189         {
190           iter = InvalidateTexture(iter);
191         }
192         else
193         {
194           ++iter;
195         }
196       }
197       else
198       {
199         iter = InvalidateTexture(iter);
200       }
201     }
202     else
203     {
204       ++iter;
205     }
206   }
207 
208   TexPool::iterator iter2 = texture_pool.begin();
209   TexPool::iterator tcend2 = texture_pool.end();
210   while (iter2 != tcend2)
211   {
212     if (iter2->second.frameCount == FRAMECOUNT_INVALID)
213     {
214       iter2->second.frameCount = _frameCount;
215     }
216     if (_frameCount > TEXTURE_POOL_KILL_THRESHOLD + iter2->second.frameCount)
217     {
218       iter2 = texture_pool.erase(iter2);
219     }
220     else
221     {
222       ++iter2;
223     }
224   }
225 }
226 
OverlapsMemoryRange(u32 range_address,u32 range_size) const227 bool TextureCacheBase::TCacheEntry::OverlapsMemoryRange(u32 range_address, u32 range_size) const
228 {
229   if (addr + size_in_bytes <= range_address)
230     return false;
231 
232   if (addr >= range_address + range_size)
233     return false;
234 
235   return true;
236 }
237 
SetBackupConfig(const VideoConfig & config)238 void TextureCacheBase::SetBackupConfig(const VideoConfig& config)
239 {
240   backup_config.color_samples = config.iSafeTextureCache_ColorSamples;
241   backup_config.texfmt_overlay = config.bTexFmtOverlayEnable;
242   backup_config.texfmt_overlay_center = config.bTexFmtOverlayCenter;
243   backup_config.hires_textures = config.bHiresTextures;
244   backup_config.cache_hires_textures = config.bCacheHiresTextures;
245   backup_config.stereo_3d = config.stereo_mode != StereoMode::Off;
246   backup_config.efb_mono_depth = config.bStereoEFBMonoDepth;
247   backup_config.gpu_texture_decoding = config.bEnableGPUTextureDecoding;
248   backup_config.disable_vram_copies = config.bDisableCopyToVRAM;
249   backup_config.arbitrary_mipmap_detection = config.bArbitraryMipmapDetection;
250 }
251 
252 TextureCacheBase::TCacheEntry*
ApplyPaletteToEntry(TCacheEntry * entry,u8 * palette,TLUTFormat tlutfmt)253 TextureCacheBase::ApplyPaletteToEntry(TCacheEntry* entry, u8* palette, TLUTFormat tlutfmt)
254 {
255   DEBUG_ASSERT(g_ActiveConfig.backend_info.bSupportsPaletteConversion);
256 
257   const AbstractPipeline* pipeline = g_shader_cache->GetPaletteConversionPipeline(tlutfmt);
258   if (!pipeline)
259   {
260     ERROR_LOG(VIDEO, "Failed to get conversion pipeline for format 0x%02X",
261               static_cast<u32>(tlutfmt));
262     return nullptr;
263   }
264 
265   TextureConfig new_config = entry->texture->GetConfig();
266   new_config.levels = 1;
267   new_config.flags |= AbstractTextureFlag_RenderTarget;
268 
269   TCacheEntry* decoded_entry = AllocateCacheEntry(new_config);
270   if (!decoded_entry)
271     return nullptr;
272 
273   decoded_entry->SetGeneralParameters(entry->addr, entry->size_in_bytes, entry->format,
274                                       entry->should_force_safe_hashing);
275   decoded_entry->SetDimensions(entry->native_width, entry->native_height, 1);
276   decoded_entry->SetHashes(entry->base_hash, entry->hash);
277   decoded_entry->frameCount = FRAMECOUNT_INVALID;
278   decoded_entry->should_force_safe_hashing = false;
279   decoded_entry->SetNotCopy();
280   decoded_entry->may_have_overlapping_textures = entry->may_have_overlapping_textures;
281 
282   g_renderer->BeginUtilityDrawing();
283 
284   const u32 palette_size = entry->format == TextureFormat::I4 ? 32 : 512;
285   u32 texel_buffer_offset;
286   if (g_vertex_manager->UploadTexelBuffer(palette, palette_size,
287                                           TexelBufferFormat::TEXEL_BUFFER_FORMAT_R16_UINT,
288                                           &texel_buffer_offset))
289   {
290     struct Uniforms
291     {
292       float multiplier;
293       u32 texel_buffer_offset;
294       u32 pad[2];
295     };
296     static_assert(std::is_standard_layout<Uniforms>::value);
297     Uniforms uniforms = {};
298     uniforms.multiplier = entry->format == TextureFormat::I4 ? 15.0f : 255.0f;
299     uniforms.texel_buffer_offset = texel_buffer_offset;
300     g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
301 
302     g_renderer->SetAndDiscardFramebuffer(decoded_entry->framebuffer.get());
303     g_renderer->SetViewportAndScissor(decoded_entry->texture->GetRect());
304     g_renderer->SetPipeline(pipeline);
305     g_renderer->SetTexture(1, entry->texture.get());
306     g_renderer->SetSamplerState(1, RenderState::GetPointSamplerState());
307     g_renderer->Draw(0, 3);
308     g_renderer->EndUtilityDrawing();
309     decoded_entry->texture->FinishedRendering();
310   }
311   else
312   {
313     ERROR_LOG(VIDEO, "Texel buffer upload of %u bytes failed", palette_size);
314     g_renderer->EndUtilityDrawing();
315   }
316 
317   textures_by_address.emplace(decoded_entry->addr, decoded_entry);
318 
319   return decoded_entry;
320 }
321 
ReinterpretEntry(const TCacheEntry * existing_entry,TextureFormat new_format)322 TextureCacheBase::TCacheEntry* TextureCacheBase::ReinterpretEntry(const TCacheEntry* existing_entry,
323                                                                   TextureFormat new_format)
324 {
325   const AbstractPipeline* pipeline =
326       g_shader_cache->GetTextureReinterpretPipeline(existing_entry->format.texfmt, new_format);
327   if (!pipeline)
328   {
329     ERROR_LOG(VIDEO,
330               "Failed to obtain texture reinterpreting pipeline from format 0x%02X to 0x%02X",
331               static_cast<u32>(existing_entry->format.texfmt), static_cast<u32>(new_format));
332     return nullptr;
333   }
334 
335   TextureConfig new_config = existing_entry->texture->GetConfig();
336   new_config.levels = 1;
337   new_config.flags |= AbstractTextureFlag_RenderTarget;
338 
339   TCacheEntry* reinterpreted_entry = AllocateCacheEntry(new_config);
340   if (!reinterpreted_entry)
341     return nullptr;
342 
343   reinterpreted_entry->SetGeneralParameters(existing_entry->addr, existing_entry->size_in_bytes,
344                                             new_format, existing_entry->should_force_safe_hashing);
345   reinterpreted_entry->SetDimensions(existing_entry->native_width, existing_entry->native_height,
346                                      1);
347   reinterpreted_entry->SetHashes(existing_entry->base_hash, existing_entry->hash);
348   reinterpreted_entry->frameCount = existing_entry->frameCount;
349   reinterpreted_entry->SetNotCopy();
350   reinterpreted_entry->is_efb_copy = existing_entry->is_efb_copy;
351   reinterpreted_entry->may_have_overlapping_textures =
352       existing_entry->may_have_overlapping_textures;
353 
354   g_renderer->BeginUtilityDrawing();
355   g_renderer->SetAndDiscardFramebuffer(reinterpreted_entry->framebuffer.get());
356   g_renderer->SetViewportAndScissor(reinterpreted_entry->texture->GetRect());
357   g_renderer->SetPipeline(pipeline);
358   g_renderer->SetTexture(0, existing_entry->texture.get());
359   g_renderer->SetSamplerState(1, RenderState::GetPointSamplerState());
360   g_renderer->Draw(0, 3);
361   g_renderer->EndUtilityDrawing();
362   reinterpreted_entry->texture->FinishedRendering();
363 
364   textures_by_address.emplace(reinterpreted_entry->addr, reinterpreted_entry);
365 
366   return reinterpreted_entry;
367 }
368 
ScaleTextureCacheEntryTo(TextureCacheBase::TCacheEntry * entry,u32 new_width,u32 new_height)369 void TextureCacheBase::ScaleTextureCacheEntryTo(TextureCacheBase::TCacheEntry* entry, u32 new_width,
370                                                 u32 new_height)
371 {
372   if (entry->GetWidth() == new_width && entry->GetHeight() == new_height)
373   {
374     return;
375   }
376 
377   const u32 max = g_ActiveConfig.backend_info.MaxTextureSize;
378   if (max < new_width || max < new_height)
379   {
380     ERROR_LOG(VIDEO, "Texture too big, width = %d, height = %d", new_width, new_height);
381     return;
382   }
383 
384   const TextureConfig newconfig(new_width, new_height, 1, entry->GetNumLayers(), 1,
385                                 AbstractTextureFormat::RGBA8, AbstractTextureFlag_RenderTarget);
386   std::optional<TexPoolEntry> new_texture = AllocateTexture(newconfig);
387   if (!new_texture)
388   {
389     ERROR_LOG(VIDEO, "Scaling failed due to texture allocation failure");
390     return;
391   }
392 
393   // No need to convert the coordinates here since they'll be the same.
394   g_renderer->ScaleTexture(new_texture->framebuffer.get(),
395                            new_texture->texture->GetConfig().GetRect(), entry->texture.get(),
396                            entry->texture->GetConfig().GetRect());
397   entry->texture.swap(new_texture->texture);
398   entry->framebuffer.swap(new_texture->framebuffer);
399 
400   // At this point new_texture has the old texture in it,
401   // we can potentially reuse this, so let's move it back to the pool
402   auto config = new_texture->texture->GetConfig();
403   texture_pool.emplace(
404       config, TexPoolEntry(std::move(new_texture->texture), std::move(new_texture->framebuffer)));
405 }
406 
CheckReadbackTexture(u32 width,u32 height,AbstractTextureFormat format)407 bool TextureCacheBase::CheckReadbackTexture(u32 width, u32 height, AbstractTextureFormat format)
408 {
409   if (m_readback_texture && m_readback_texture->GetConfig().width >= width &&
410       m_readback_texture->GetConfig().height >= height &&
411       m_readback_texture->GetConfig().format == format)
412   {
413     return true;
414   }
415 
416   TextureConfig staging_config(std::max(width, 128u), std::max(height, 128u), 1, 1, 1, format, 0);
417   m_readback_texture.reset();
418   m_readback_texture =
419       g_renderer->CreateStagingTexture(StagingTextureType::Readback, staging_config);
420   return m_readback_texture != nullptr;
421 }
422 
SerializeTexture(AbstractTexture * tex,const TextureConfig & config,PointerWrap & p)423 void TextureCacheBase::SerializeTexture(AbstractTexture* tex, const TextureConfig& config,
424                                         PointerWrap& p)
425 {
426   // If we're in measure mode, skip the actual readback to save some time.
427   const bool skip_readback = p.GetMode() == PointerWrap::MODE_MEASURE;
428   p.DoPOD(config);
429 
430   std::vector<u8> texture_data;
431   if (skip_readback || CheckReadbackTexture(config.width, config.height, config.format))
432   {
433     // Save out each layer of the texture to the staging texture, and then
434     // append it onto the end of the vector. This gives us all the sub-images
435     // in one single buffer which can be written out to the save state.
436     for (u32 layer = 0; layer < config.layers; layer++)
437     {
438       for (u32 level = 0; level < config.levels; level++)
439       {
440         u32 level_width = std::max(config.width >> level, 1u);
441         u32 level_height = std::max(config.height >> level, 1u);
442         auto rect = tex->GetConfig().GetMipRect(level);
443         if (!skip_readback)
444           m_readback_texture->CopyFromTexture(tex, rect, layer, level, rect);
445 
446         size_t stride = AbstractTexture::CalculateStrideForFormat(config.format, level_width);
447         size_t size = stride * level_height;
448         size_t start = texture_data.size();
449         texture_data.resize(texture_data.size() + size);
450         if (!skip_readback)
451           m_readback_texture->ReadTexels(rect, &texture_data[start], static_cast<u32>(stride));
452       }
453     }
454   }
455   else
456   {
457     PanicAlert("Failed to create staging texture for serialization");
458   }
459 
460   p.Do(texture_data);
461 }
462 
DeserializeTexture(PointerWrap & p)463 std::optional<TextureCacheBase::TexPoolEntry> TextureCacheBase::DeserializeTexture(PointerWrap& p)
464 {
465   TextureConfig config;
466   p.Do(config);
467 
468   std::vector<u8> texture_data;
469   p.Do(texture_data);
470 
471   if (p.GetMode() != PointerWrap::MODE_READ || texture_data.empty())
472     return std::nullopt;
473 
474   auto tex = AllocateTexture(config);
475   if (!tex)
476   {
477     PanicAlert("Failed to create texture for deserialization");
478     return std::nullopt;
479   }
480 
481   size_t start = 0;
482   for (u32 layer = 0; layer < config.layers; layer++)
483   {
484     for (u32 level = 0; level < config.levels; level++)
485     {
486       u32 level_width = std::max(config.width >> level, 1u);
487       u32 level_height = std::max(config.height >> level, 1u);
488       size_t stride = AbstractTexture::CalculateStrideForFormat(config.format, level_width);
489       size_t size = stride * level_height;
490       if ((start + size) > texture_data.size())
491       {
492         ERROR_LOG(VIDEO, "Insufficient texture data for layer %u level %u", layer, level);
493         return tex;
494       }
495 
496       tex->texture->Load(level, level_width, level_height, level_width, &texture_data[start], size);
497       start += size;
498     }
499   }
500 
501   return tex;
502 }
503 
DoState(PointerWrap & p)504 void TextureCacheBase::DoState(PointerWrap& p)
505 {
506   // Flush all pending XFB copies before either loading or saving.
507   FlushEFBCopies();
508 
509   p.Do(last_entry_id);
510 
511   if (p.GetMode() == PointerWrap::MODE_WRITE || p.GetMode() == PointerWrap::MODE_MEASURE)
512     DoSaveState(p);
513   else
514     DoLoadState(p);
515 }
516 
DoSaveState(PointerWrap & p)517 void TextureCacheBase::DoSaveState(PointerWrap& p)
518 {
519   std::map<const TCacheEntry*, u32> entry_map;
520   std::vector<TCacheEntry*> entries_to_save;
521   auto ShouldSaveEntry = [](const TCacheEntry* entry) {
522     // We skip non-copies as they can be decoded from RAM when the state is loaded.
523     // Storing them would duplicate data in the save state file, adding to decompression time.
524     return entry->IsCopy();
525   };
526   auto AddCacheEntryToMap = [&entry_map, &entries_to_save](TCacheEntry* entry) -> u32 {
527     auto iter = entry_map.find(entry);
528     if (iter != entry_map.end())
529       return iter->second;
530 
531     // Since we are sequentially allocating texture entries, we need to save the textures in the
532     // same order they were collected. This is because of iterating both the address and hash maps.
533     // Therefore, the map is used for fast lookup, and the vector for ordering.
534     u32 id = static_cast<u32>(entry_map.size());
535     entry_map.emplace(entry, id);
536     entries_to_save.push_back(entry);
537     return id;
538   };
539   auto GetCacheEntryId = [&entry_map](const TCacheEntry* entry) -> std::optional<u32> {
540     auto iter = entry_map.find(entry);
541     return iter != entry_map.end() ? std::make_optional(iter->second) : std::nullopt;
542   };
543 
544   // Transform the textures_by_address and textures_by_hash maps to a mapping
545   // of address/hash to entry ID.
546   std::vector<std::pair<u32, u32>> textures_by_address_list;
547   std::vector<std::pair<u64, u32>> textures_by_hash_list;
548   if (Config::Get(Config::GFX_SAVE_TEXTURE_CACHE_TO_STATE))
549   {
550     for (const auto& it : textures_by_address)
551     {
552       if (ShouldSaveEntry(it.second))
553       {
554         const u32 id = AddCacheEntryToMap(it.second);
555         textures_by_address_list.emplace_back(it.first, id);
556       }
557     }
558     for (const auto& it : textures_by_hash)
559     {
560       if (ShouldSaveEntry(it.second))
561       {
562         const u32 id = AddCacheEntryToMap(it.second);
563         textures_by_hash_list.emplace_back(it.first, id);
564       }
565     }
566   }
567 
568   // Save the texture cache entries out in the order the were referenced.
569   u32 size = static_cast<u32>(entries_to_save.size());
570   p.Do(size);
571   for (TCacheEntry* entry : entries_to_save)
572   {
573     SerializeTexture(entry->texture.get(), entry->texture->GetConfig(), p);
574     entry->DoState(p);
575   }
576   p.DoMarker("TextureCacheEntries");
577 
578   // Save references for each cache entry.
579   // As references are circular, we need to have everything created before linking entries.
580   std::set<std::pair<u32, u32>> reference_pairs;
581   for (const auto& it : entry_map)
582   {
583     const TCacheEntry* entry = it.first;
584     auto id1 = GetCacheEntryId(entry);
585     if (!id1)
586       continue;
587 
588     for (const TCacheEntry* referenced_entry : entry->references)
589     {
590       auto id2 = GetCacheEntryId(referenced_entry);
591       if (!id2)
592         continue;
593 
594       auto refpair1 = std::make_pair(*id1, *id2);
595       auto refpair2 = std::make_pair(*id2, *id1);
596       if (reference_pairs.count(refpair1) == 0 && reference_pairs.count(refpair2) == 0)
597         reference_pairs.insert(refpair1);
598     }
599   }
600 
601   size = static_cast<u32>(reference_pairs.size());
602   p.Do(size);
603   for (const auto& it : reference_pairs)
604   {
605     p.Do(it.first);
606     p.Do(it.second);
607   }
608 
609   size = static_cast<u32>(textures_by_address_list.size());
610   p.Do(size);
611   for (const auto& it : textures_by_address_list)
612   {
613     p.Do(it.first);
614     p.Do(it.second);
615   }
616 
617   size = static_cast<u32>(textures_by_hash_list.size());
618   p.Do(size);
619   for (const auto& it : textures_by_hash_list)
620   {
621     p.Do(it.first);
622     p.Do(it.second);
623   }
624 
625   // Free the readback texture to potentially save host-mapped GPU memory, depending on where
626   // the driver mapped the staging buffer.
627   m_readback_texture.reset();
628 }
629 
DoLoadState(PointerWrap & p)630 void TextureCacheBase::DoLoadState(PointerWrap& p)
631 {
632   // Helper for getting a cache entry from an ID.
633   std::map<u32, TCacheEntry*> id_map;
634   auto GetEntry = [&id_map](u32 id) {
635     auto iter = id_map.find(id);
636     return iter == id_map.end() ? nullptr : iter->second;
637   };
638 
639   // Only clear out state when actually restoring/loading.
640   // Since we throw away entries when not in loading mode now, we don't need to check
641   // before inserting entries into the cache, as GetEntry will always return null.
642   const bool commit_state = p.GetMode() == PointerWrap::MODE_READ;
643   if (commit_state)
644     Invalidate();
645 
646   // Preload all cache entries.
647   u32 size = 0;
648   p.Do(size);
649   for (u32 i = 0; i < size; i++)
650   {
651     // Even if the texture isn't valid, we still need to create the cache entry object
652     // to update the point in the state state. We'll just throw it away if it's invalid.
653     auto tex = DeserializeTexture(p);
654     TCacheEntry* entry = new TCacheEntry(std::move(tex->texture), std::move(tex->framebuffer));
655     entry->textures_by_hash_iter = textures_by_hash.end();
656     entry->DoState(p);
657     if (entry->texture && commit_state)
658       id_map.emplace(i, entry);
659     else
660       delete entry;
661   }
662   p.DoMarker("TextureCacheEntries");
663 
664   // Link all cache entry references.
665   p.Do(size);
666   for (u32 i = 0; i < size; i++)
667   {
668     u32 id1 = 0, id2 = 0;
669     p.Do(id1);
670     p.Do(id2);
671     TCacheEntry* e1 = GetEntry(id1);
672     TCacheEntry* e2 = GetEntry(id2);
673     if (e1 && e2)
674       e1->CreateReference(e2);
675   }
676 
677   // Fill in address map.
678   p.Do(size);
679   for (u32 i = 0; i < size; i++)
680   {
681     u32 addr = 0;
682     u32 id = 0;
683     p.Do(addr);
684     p.Do(id);
685 
686     TCacheEntry* entry = GetEntry(id);
687     if (entry)
688       textures_by_address.emplace(addr, entry);
689   }
690 
691   // Fill in hash map.
692   p.Do(size);
693   for (u32 i = 0; i < size; i++)
694   {
695     u64 hash = 0;
696     u32 id = 0;
697     p.Do(hash);
698     p.Do(id);
699 
700     TCacheEntry* entry = GetEntry(id);
701     if (entry)
702       entry->textures_by_hash_iter = textures_by_hash.emplace(hash, entry);
703   }
704 }
705 
DoState(PointerWrap & p)706 void TextureCacheBase::TCacheEntry::DoState(PointerWrap& p)
707 {
708   p.Do(addr);
709   p.Do(size_in_bytes);
710   p.Do(base_hash);
711   p.Do(hash);
712   p.Do(format);
713   p.Do(memory_stride);
714   p.Do(is_efb_copy);
715   p.Do(is_custom_tex);
716   p.Do(may_have_overlapping_textures);
717   p.Do(tmem_only);
718   p.Do(has_arbitrary_mips);
719   p.Do(should_force_safe_hashing);
720   p.Do(is_xfb_copy);
721   p.Do(is_xfb_container);
722   p.Do(id);
723   p.Do(reference_changed);
724   p.Do(native_width);
725   p.Do(native_height);
726   p.Do(native_levels);
727   p.Do(frameCount);
728 }
729 
730 TextureCacheBase::TCacheEntry*
DoPartialTextureUpdates(TCacheEntry * entry_to_update,u8 * palette,TLUTFormat tlutfmt)731 TextureCacheBase::DoPartialTextureUpdates(TCacheEntry* entry_to_update, u8* palette,
732                                           TLUTFormat tlutfmt)
733 {
734   // If the flag may_have_overlapping_textures is cleared, there are no overlapping EFB copies,
735   // which aren't applied already. It is set for new textures, and for the affected range
736   // on each EFB copy.
737   if (!entry_to_update->may_have_overlapping_textures)
738     return entry_to_update;
739   entry_to_update->may_have_overlapping_textures = false;
740 
741   const bool isPaletteTexture = IsColorIndexed(entry_to_update->format.texfmt);
742 
743   // EFB copies are excluded from these updates, until there's an example where a game would
744   // benefit from updating. This would require more work to be done.
745   if (entry_to_update->IsCopy())
746     return entry_to_update;
747 
748   u32 block_width = TexDecoder_GetBlockWidthInTexels(entry_to_update->format.texfmt);
749   u32 block_height = TexDecoder_GetBlockHeightInTexels(entry_to_update->format.texfmt);
750   u32 block_size = block_width * block_height *
751                    TexDecoder_GetTexelSizeInNibbles(entry_to_update->format.texfmt) / 2;
752 
753   u32 numBlocksX = (entry_to_update->native_width + block_width - 1) / block_width;
754 
755   auto iter = FindOverlappingTextures(entry_to_update->addr, entry_to_update->size_in_bytes);
756   while (iter.first != iter.second)
757   {
758     TCacheEntry* entry = iter.first->second;
759     if (entry != entry_to_update && entry->IsCopy() && !entry->tmem_only &&
760         entry->references.count(entry_to_update) == 0 &&
761         entry->OverlapsMemoryRange(entry_to_update->addr, entry_to_update->size_in_bytes) &&
762         entry->memory_stride == numBlocksX * block_size)
763     {
764       if (entry->hash == entry->CalculateHash())
765       {
766         // If the texture formats are not compatible or convertible, skip it.
767         if (!IsCompatibleTextureFormat(entry_to_update->format.texfmt, entry->format.texfmt))
768         {
769           if (!CanReinterpretTextureOnGPU(entry_to_update->format.texfmt, entry->format.texfmt))
770           {
771             ++iter.first;
772             continue;
773           }
774 
775           TCacheEntry* reinterpreted_entry =
776               ReinterpretEntry(entry, entry_to_update->format.texfmt);
777           if (reinterpreted_entry)
778             entry = reinterpreted_entry;
779         }
780 
781         if (isPaletteTexture)
782         {
783           TCacheEntry* decoded_entry = ApplyPaletteToEntry(entry, palette, tlutfmt);
784           if (decoded_entry)
785           {
786             // Link the efb copy with the partially updated texture, so we won't apply this partial
787             // update again
788             entry->CreateReference(entry_to_update);
789             // Mark the texture update as used, as if it was loaded directly
790             entry->frameCount = FRAMECOUNT_INVALID;
791             entry = decoded_entry;
792           }
793           else
794           {
795             ++iter.first;
796             continue;
797           }
798         }
799 
800         u32 src_x, src_y, dst_x, dst_y;
801 
802         // Note for understanding the math:
803         // Normal textures can't be strided, so the 2 missing cases with src_x > 0 don't exist
804         if (entry->addr >= entry_to_update->addr)
805         {
806           u32 block_offset = (entry->addr - entry_to_update->addr) / block_size;
807           u32 block_x = block_offset % numBlocksX;
808           u32 block_y = block_offset / numBlocksX;
809           src_x = 0;
810           src_y = 0;
811           dst_x = block_x * block_width;
812           dst_y = block_y * block_height;
813         }
814         else
815         {
816           u32 block_offset = (entry_to_update->addr - entry->addr) / block_size;
817           u32 block_x = (~block_offset + 1) % numBlocksX;
818           u32 block_y = (block_offset + block_x) / numBlocksX;
819           src_x = 0;
820           src_y = block_y * block_height;
821           dst_x = block_x * block_width;
822           dst_y = 0;
823         }
824 
825         u32 copy_width =
826             std::min(entry->native_width - src_x, entry_to_update->native_width - dst_x);
827         u32 copy_height =
828             std::min(entry->native_height - src_y, entry_to_update->native_height - dst_y);
829 
830         // If one of the textures is scaled, scale both with the current efb scaling factor
831         if (entry_to_update->native_width != entry_to_update->GetWidth() ||
832             entry_to_update->native_height != entry_to_update->GetHeight() ||
833             entry->native_width != entry->GetWidth() || entry->native_height != entry->GetHeight())
834         {
835           ScaleTextureCacheEntryTo(entry_to_update,
836                                    g_renderer->EFBToScaledX(entry_to_update->native_width),
837                                    g_renderer->EFBToScaledY(entry_to_update->native_height));
838           ScaleTextureCacheEntryTo(entry, g_renderer->EFBToScaledX(entry->native_width),
839                                    g_renderer->EFBToScaledY(entry->native_height));
840 
841           src_x = g_renderer->EFBToScaledX(src_x);
842           src_y = g_renderer->EFBToScaledY(src_y);
843           dst_x = g_renderer->EFBToScaledX(dst_x);
844           dst_y = g_renderer->EFBToScaledY(dst_y);
845           copy_width = g_renderer->EFBToScaledX(copy_width);
846           copy_height = g_renderer->EFBToScaledY(copy_height);
847         }
848 
849         // If the source rectangle is outside of what we actually have in VRAM, skip the copy.
850         // The backend doesn't do any clamping, so if we don't, we'd pass out-of-range coordinates
851         // to the graphics driver, which can cause GPU resets.
852         if (static_cast<u32>(src_x + copy_width) > entry->GetWidth() ||
853             static_cast<u32>(src_y + copy_height) > entry->GetHeight() ||
854             static_cast<u32>(dst_x + copy_width) > entry_to_update->GetWidth() ||
855             static_cast<u32>(dst_y + copy_height) > entry_to_update->GetHeight())
856         {
857           ++iter.first;
858           continue;
859         }
860 
861         MathUtil::Rectangle<int> srcrect, dstrect;
862         srcrect.left = src_x;
863         srcrect.top = src_y;
864         srcrect.right = (src_x + copy_width);
865         srcrect.bottom = (src_y + copy_height);
866         dstrect.left = dst_x;
867         dstrect.top = dst_y;
868         dstrect.right = (dst_x + copy_width);
869         dstrect.bottom = (dst_y + copy_height);
870 
871         // If one copy is stereo, and the other isn't... not much we can do here :/
872         const u32 layers_to_copy = std::min(entry->GetNumLayers(), entry_to_update->GetNumLayers());
873         for (u32 layer = 0; layer < layers_to_copy; layer++)
874         {
875           entry_to_update->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer,
876                                                              0, dstrect, layer, 0);
877         }
878 
879         if (isPaletteTexture)
880         {
881           // Remove the temporary converted texture, it won't be used anywhere else
882           // TODO: It would be nice to convert and copy in one step, but this code path isn't common
883           iter.first = InvalidateTexture(iter.first);
884           continue;
885         }
886         else
887         {
888           // Link the two textures together, so we won't apply this partial update again
889           entry->CreateReference(entry_to_update);
890           // Mark the texture update as used, as if it was loaded directly
891           entry->frameCount = FRAMECOUNT_INVALID;
892         }
893       }
894       else
895       {
896         // If the hash does not match, this EFB copy will not be used for anything, so remove it
897         iter.first = InvalidateTexture(iter.first);
898         continue;
899       }
900     }
901     ++iter.first;
902   }
903 
904   return entry_to_update;
905 }
906 
DumpTexture(TCacheEntry * entry,std::string basename,unsigned int level,bool is_arbitrary)907 void TextureCacheBase::DumpTexture(TCacheEntry* entry, std::string basename, unsigned int level,
908                                    bool is_arbitrary)
909 {
910   std::string szDir = File::GetUserPath(D_DUMPTEXTURES_IDX) + SConfig::GetInstance().GetGameID();
911 
912   // make sure that the directory exists
913   if (!File::IsDirectory(szDir))
914     File::CreateDir(szDir);
915 
916   if (is_arbitrary)
917   {
918     basename += "_arb";
919   }
920 
921   if (level > 0)
922   {
923     if (!g_ActiveConfig.bDumpMipmapTextures)
924       return;
925     basename += fmt::format("_mip{}", level);
926   }
927   else
928   {
929     if (!g_ActiveConfig.bDumpBaseTextures)
930       return;
931   }
932 
933   const std::string filename = fmt::format("{}/{}.png", szDir, basename);
934   if (File::Exists(filename))
935     return;
936 
937   entry->texture->Save(filename, level);
938 }
939 
CalculateLevelSize(u32 level_0_size,u32 level)940 static u32 CalculateLevelSize(u32 level_0_size, u32 level)
941 {
942   return std::max(level_0_size >> level, 1u);
943 }
944 
SetSamplerState(u32 index,float custom_tex_scale,bool custom_tex,bool has_arbitrary_mips)945 static void SetSamplerState(u32 index, float custom_tex_scale, bool custom_tex,
946                             bool has_arbitrary_mips)
947 {
948   const FourTexUnits& tex = bpmem.tex[index / 4];
949   const TexMode0& tm0 = tex.texMode0[index % 4];
950 
951   SamplerState state = {};
952   state.Generate(bpmem, index);
953 
954   // Force texture filtering config option.
955   if (g_ActiveConfig.bForceFiltering)
956   {
957     state.min_filter = SamplerState::Filter::Linear;
958     state.mag_filter = SamplerState::Filter::Linear;
959     state.mipmap_filter = SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0) ?
960                               SamplerState::Filter::Linear :
961                               SamplerState::Filter::Point;
962   }
963 
964   // Custom textures may have a greater number of mips
965   if (custom_tex)
966     state.max_lod = 255;
967 
968   // Anisotropic filtering option.
969   if (g_ActiveConfig.iMaxAnisotropy != 0 && !SamplerCommon::IsBpTexMode0PointFiltering(tm0))
970   {
971     // https://www.opengl.org/registry/specs/EXT/texture_filter_anisotropic.txt
972     // For predictable results on all hardware/drivers, only use one of:
973     //	GL_LINEAR + GL_LINEAR (No Mipmaps [Bilinear])
974     //	GL_LINEAR + GL_LINEAR_MIPMAP_LINEAR (w/ Mipmaps [Trilinear])
975     // Letting the game set other combinations will have varying arbitrary results;
976     // possibly being interpreted as equal to bilinear/trilinear, implicitly
977     // disabling anisotropy, or changing the anisotropic algorithm employed.
978     state.min_filter = SamplerState::Filter::Linear;
979     state.mag_filter = SamplerState::Filter::Linear;
980     if (SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0))
981       state.mipmap_filter = SamplerState::Filter::Linear;
982     state.anisotropic_filtering = 1;
983   }
984   else
985   {
986     state.anisotropic_filtering = 0;
987   }
988 
989   if (has_arbitrary_mips && SamplerCommon::AreBpTexMode0MipmapsEnabled(tm0))
990   {
991     // Apply a secondary bias calculated from the IR scale to pull inwards mipmaps
992     // that have arbitrary contents, eg. are used for fog effects where the
993     // distance they kick in at is important to preserve at any resolution.
994     // Correct this with the upscaling factor of custom textures.
995     s64 lod_offset = std::log2(g_renderer->GetEFBScale() / custom_tex_scale) * 256.f;
996     state.lod_bias = std::clamp<s64>(state.lod_bias + lod_offset, -32768, 32767);
997 
998     // Anisotropic also pushes mips farther away so it cannot be used either
999     state.anisotropic_filtering = 0;
1000   }
1001 
1002   g_renderer->SetSamplerState(index, state);
1003 }
1004 
BindTextures()1005 void TextureCacheBase::BindTextures()
1006 {
1007   for (u32 i = 0; i < bound_textures.size(); i++)
1008   {
1009     const TCacheEntry* tentry = bound_textures[i];
1010     if (IsValidBindPoint(i) && tentry)
1011     {
1012       g_renderer->SetTexture(i, tentry->texture.get());
1013       PixelShaderManager::SetTexDims(i, tentry->native_width, tentry->native_height);
1014 
1015       const float custom_tex_scale = tentry->GetWidth() / float(tentry->native_width);
1016       SetSamplerState(i, custom_tex_scale, tentry->is_custom_tex, tentry->has_arbitrary_mips);
1017     }
1018   }
1019 }
1020 
1021 class ArbitraryMipmapDetector
1022 {
1023 private:
1024   using PixelRGBAf = std::array<float, 4>;
1025   using PixelRGBAu8 = std::array<u8, 4>;
1026 
1027 public:
1028   explicit ArbitraryMipmapDetector() = default;
1029 
AddLevel(u32 width,u32 height,u32 row_length,const u8 * buffer)1030   void AddLevel(u32 width, u32 height, u32 row_length, const u8* buffer)
1031   {
1032     levels.push_back({{width, height, row_length}, buffer});
1033   }
1034 
HasArbitraryMipmaps(u8 * downsample_buffer) const1035   bool HasArbitraryMipmaps(u8* downsample_buffer) const
1036   {
1037     if (levels.size() < 2)
1038       return false;
1039 
1040     if (!g_ActiveConfig.bArbitraryMipmapDetection)
1041       return false;
1042 
1043     // This is the average per-pixel, per-channel difference in percent between what we
1044     // expect a normal blurred mipmap to look like and what we actually received
1045     // 4.5% was chosen because it's just below the lowest clearly-arbitrary texture
1046     // I found in my tests, the background clouds in Mario Galaxy's Observatory lobby.
1047     const auto threshold = g_ActiveConfig.fArbitraryMipmapDetectionThreshold;
1048 
1049     auto* src = downsample_buffer;
1050     auto* dst = downsample_buffer + levels[1].shape.row_length * levels[1].shape.height * 4;
1051 
1052     float total_diff = 0.f;
1053 
1054     for (std::size_t i = 0; i < levels.size() - 1; ++i)
1055     {
1056       const auto& level = levels[i];
1057       const auto& mip = levels[i + 1];
1058 
1059       u64 level_pixel_count = level.shape.width;
1060       level_pixel_count *= level.shape.height;
1061 
1062       // AverageDiff stores the difference sum in a u64, so make sure we can't overflow
1063       ASSERT(level_pixel_count < (std::numeric_limits<u64>::max() / (255 * 255 * 4)));
1064 
1065       // Manually downsample the past downsample with a simple box blur
1066       // This is not necessarily close to whatever the original artists used, however
1067       // It should still be closer than a thing that's not a downscale at all
1068       Level::Downsample(i ? src : level.pixels, level.shape, dst, mip.shape);
1069 
1070       // Find the average difference between pixels in this level but downsampled
1071       // and the next level
1072       auto diff = mip.AverageDiff(dst);
1073       total_diff += diff;
1074 
1075       std::swap(src, dst);
1076     }
1077 
1078     auto all_levels = total_diff / (levels.size() - 1);
1079     return all_levels > threshold;
1080   }
1081 
1082 private:
1083   struct Shape
1084   {
1085     u32 width;
1086     u32 height;
1087     u32 row_length;
1088   };
1089 
1090   struct Level
1091   {
1092     Shape shape;
1093     const u8* pixels;
1094 
SampleLinearArbitraryMipmapDetector::Level1095     static PixelRGBAu8 SampleLinear(const u8* src, const Shape& src_shape, u32 x, u32 y)
1096     {
1097       const auto* p = src + (x + y * src_shape.row_length) * 4;
1098       return {{p[0], p[1], p[2], p[3]}};
1099     }
1100 
1101     // Puts a downsampled image in dst. dst must be at least width*height*4
DownsampleArbitraryMipmapDetector::Level1102     static void Downsample(const u8* src, const Shape& src_shape, u8* dst, const Shape& dst_shape)
1103     {
1104       for (u32 i = 0; i < dst_shape.height; ++i)
1105       {
1106         for (u32 j = 0; j < dst_shape.width; ++j)
1107         {
1108           auto x = j * 2;
1109           auto y = i * 2;
1110           const std::array<PixelRGBAu8, 4> samples{{
1111               SampleLinear(src, src_shape, x, y),
1112               SampleLinear(src, src_shape, x + 1, y),
1113               SampleLinear(src, src_shape, x, y + 1),
1114               SampleLinear(src, src_shape, x + 1, y + 1),
1115           }};
1116 
1117           auto* dst_pixel = dst + (j + i * dst_shape.row_length) * 4;
1118           for (int channel = 0; channel < 4; channel++)
1119           {
1120             uint32_t channel_value = samples[0][channel] + samples[1][channel] +
1121                                      samples[2][channel] + samples[3][channel];
1122             dst_pixel[channel] = (channel_value + 2) / 4;
1123           }
1124         }
1125       }
1126     }
1127 
AverageDiffArbitraryMipmapDetector::Level1128     float AverageDiff(const u8* other) const
1129     {
1130       // As textures are stored in (at most) 8 bit precision, each channel can
1131       // have a max diff of (2^8)^2, multiply by 4 channels = 2^18 per pixel.
1132       // That means to overflow, we must have a texture with more than 2^46
1133       // pixels - which is way beyond anything the original hardware could do,
1134       // and likely a sane assumption going forward for some significant time.
1135       u64 current_diff_sum = 0;
1136       const auto* ptr1 = pixels;
1137       const auto* ptr2 = other;
1138       for (u32 i = 0; i < shape.height; ++i)
1139       {
1140         const auto* row1 = ptr1;
1141         const auto* row2 = ptr2;
1142         for (u32 j = 0; j < shape.width; ++j, row1 += 4, row2 += 4)
1143         {
1144           int pixel_diff = 0;
1145           for (int channel = 0; channel < 4; channel++)
1146           {
1147             const int diff = static_cast<int>(row1[channel]) - static_cast<int>(row2[channel]);
1148             const int diff_squared = diff * diff;
1149             pixel_diff += diff_squared;
1150           }
1151           current_diff_sum += pixel_diff;
1152         }
1153         ptr1 += shape.row_length;
1154         ptr2 += shape.row_length;
1155       }
1156       // calculate the MSE over all pixels, divide by 2.56 to make it a percent
1157       // (IE scale to 0..100 instead of 0..256)
1158 
1159       return std::sqrt(static_cast<float>(current_diff_sum) / (shape.width * shape.height * 4)) /
1160              2.56f;
1161     }
1162   };
1163   std::vector<Level> levels;
1164 };
1165 
Load(const u32 stage)1166 TextureCacheBase::TCacheEntry* TextureCacheBase::Load(const u32 stage)
1167 {
1168   // if this stage was not invalidated by changes to texture registers, keep the current texture
1169   if (IsValidBindPoint(stage) && bound_textures[stage])
1170   {
1171     return bound_textures[stage];
1172   }
1173 
1174   const FourTexUnits& tex = bpmem.tex[stage >> 2];
1175   const u32 id = stage & 3;
1176   const u32 address = (tex.texImage3[id].image_base /* & 0x1FFFFF*/) << 5;
1177   u32 width = tex.texImage0[id].width + 1;
1178   u32 height = tex.texImage0[id].height + 1;
1179   const TextureFormat texformat = static_cast<TextureFormat>(tex.texImage0[id].format);
1180   const u32 tlutaddr = tex.texTlut[id].tmem_offset << 9;
1181   const TLUTFormat tlutfmt = static_cast<TLUTFormat>(tex.texTlut[id].tlut_format);
1182   const bool use_mipmaps = SamplerCommon::AreBpTexMode0MipmapsEnabled(tex.texMode0[id]);
1183   u32 tex_levels = use_mipmaps ? ((tex.texMode1[id].max_lod + 0xf) / 0x10 + 1) : 1;
1184   const bool from_tmem = tex.texImage1[id].image_type != 0;
1185   const u32 tmem_address_even = from_tmem ? tex.texImage1[id].tmem_even * TMEM_LINE_SIZE : 0;
1186   const u32 tmem_address_odd = from_tmem ? tex.texImage2[id].tmem_odd * TMEM_LINE_SIZE : 0;
1187 
1188   auto entry = GetTexture(address, width, height, texformat,
1189                           g_ActiveConfig.iSafeTextureCache_ColorSamples, tlutaddr, tlutfmt,
1190                           use_mipmaps, tex_levels, from_tmem, tmem_address_even, tmem_address_odd);
1191 
1192   if (!entry)
1193     return nullptr;
1194 
1195   entry->frameCount = FRAMECOUNT_INVALID;
1196   bound_textures[stage] = entry;
1197 
1198   // We need to keep track of invalided textures until they have actually been replaced or
1199   // re-loaded
1200   valid_bind_points.set(stage);
1201 
1202   return entry;
1203 }
1204 
1205 TextureCacheBase::TCacheEntry*
GetTexture(u32 address,u32 width,u32 height,const TextureFormat texformat,const int textureCacheSafetyColorSampleSize,u32 tlutaddr,TLUTFormat tlutfmt,bool use_mipmaps,u32 tex_levels,bool from_tmem,u32 tmem_address_even,u32 tmem_address_odd)1206 TextureCacheBase::GetTexture(u32 address, u32 width, u32 height, const TextureFormat texformat,
1207                              const int textureCacheSafetyColorSampleSize, u32 tlutaddr,
1208                              TLUTFormat tlutfmt, bool use_mipmaps, u32 tex_levels, bool from_tmem,
1209                              u32 tmem_address_even, u32 tmem_address_odd)
1210 {
1211   // TexelSizeInNibbles(format) * width * height / 16;
1212   const unsigned int bsw = TexDecoder_GetBlockWidthInTexels(texformat);
1213   const unsigned int bsh = TexDecoder_GetBlockHeightInTexels(texformat);
1214 
1215   unsigned int expandedWidth = Common::AlignUp(width, bsw);
1216   unsigned int expandedHeight = Common::AlignUp(height, bsh);
1217   const unsigned int nativeW = width;
1218   const unsigned int nativeH = height;
1219 
1220   // Hash assigned to texcache entry (also used to generate filenames used for texture dumping and
1221   // custom texture lookup)
1222   u64 base_hash = TEXHASH_INVALID;
1223   u64 full_hash = TEXHASH_INVALID;
1224 
1225   TextureAndTLUTFormat full_format(texformat, tlutfmt);
1226 
1227   const bool isPaletteTexture = IsColorIndexed(texformat);
1228 
1229   // Reject invalid tlut format.
1230   if (isPaletteTexture && !IsValidTLUTFormat(tlutfmt))
1231     return nullptr;
1232 
1233   const u32 texture_size =
1234       TexDecoder_GetTextureSizeInBytes(expandedWidth, expandedHeight, texformat);
1235   u32 bytes_per_block = (bsw * bsh * TexDecoder_GetTexelSizeInNibbles(texformat)) / 2;
1236   u32 additional_mips_size = 0;  // not including level 0, which is texture_size
1237 
1238   // GPUs don't like when the specified mipmap count would require more than one 1x1-sized LOD in
1239   // the mipmap chain
1240   // e.g. 64x64 with 7 LODs would have the mipmap chain 64x64,32x32,16x16,8x8,4x4,2x2,1x1,0x0, so we
1241   // limit the mipmap count to 6 there
1242   tex_levels = std::min<u32>(IntLog2(std::max(width, height)) + 1, tex_levels);
1243 
1244   for (u32 level = 1; level != tex_levels; ++level)
1245   {
1246     // We still need to calculate the original size of the mips
1247     const u32 expanded_mip_width = Common::AlignUp(CalculateLevelSize(width, level), bsw);
1248     const u32 expanded_mip_height = Common::AlignUp(CalculateLevelSize(height, level), bsh);
1249 
1250     additional_mips_size +=
1251         TexDecoder_GetTextureSizeInBytes(expanded_mip_width, expanded_mip_height, texformat);
1252   }
1253 
1254   // TODO: the texture cache lookup is based on address, but a texture from tmem has no reason
1255   //       to have a unique and valid address. This could result in a regular texture and a tmem
1256   //       texture aliasing onto the same texture cache entry.
1257   const u8* src_data;
1258   if (from_tmem)
1259     src_data = &texMem[tmem_address_even];
1260   else
1261     src_data = Memory::GetPointer(address);
1262 
1263   if (!src_data)
1264   {
1265     ERROR_LOG(VIDEO, "Trying to use an invalid texture address 0x%8x", address);
1266     return nullptr;
1267   }
1268 
1269   // If we are recording a FifoLog, keep track of what memory we read. FifoRecorder does
1270   // its own memory modification tracking independent of the texture hashing below.
1271   if (OpcodeDecoder::g_record_fifo_data && !from_tmem)
1272   {
1273     FifoRecorder::GetInstance().UseMemory(address, texture_size + additional_mips_size,
1274                                           MemoryUpdate::TEXTURE_MAP);
1275   }
1276 
1277   // TODO: This doesn't hash GB tiles for preloaded RGBA8 textures (instead, it's hashing more data
1278   // from the low tmem bank than it should)
1279   base_hash = Common::GetHash64(src_data, texture_size, textureCacheSafetyColorSampleSize);
1280   u32 palette_size = 0;
1281   if (isPaletteTexture)
1282   {
1283     palette_size = TexDecoder_GetPaletteSize(texformat);
1284     full_hash = base_hash ^ Common::GetHash64(&texMem[tlutaddr], palette_size,
1285                                               textureCacheSafetyColorSampleSize);
1286   }
1287   else
1288   {
1289     full_hash = base_hash;
1290   }
1291 
1292   // Search the texture cache for textures by address
1293   //
1294   // Find all texture cache entries for the current texture address, and decide whether to use one
1295   // of
1296   // them, or to create a new one
1297   //
1298   // In most cases, the fastest way is to use only one texture cache entry for the same address.
1299   // Usually,
1300   // when a texture changes, the old version of the texture is unlikely to be used again. If there
1301   // were
1302   // new cache entries created for normal texture updates, there would be a slowdown due to a huge
1303   // amount
1304   // of unused cache entries. Also thanks to texture pooling, overwriting an existing cache entry is
1305   // faster than creating a new one from scratch.
1306   //
1307   // Some games use the same address for different textures though. If the same cache entry was used
1308   // in
1309   // this case, it would be constantly overwritten, and effectively there wouldn't be any caching
1310   // for
1311   // those textures. Examples for this are Metroid Prime and Castlevania 3. Metroid Prime has
1312   // multiple
1313   // sets of fonts on each other stored in a single texture and uses the palette to make different
1314   // characters visible or invisible. In Castlevania 3 some textures are used for 2 different things
1315   // or
1316   // at least in 2 different ways(size 1024x1024 vs 1024x256).
1317   //
1318   // To determine whether to use multiple cache entries or a single entry, use the following
1319   // heuristic:
1320   // If the same texture address is used several times during the same frame, assume the address is
1321   // used
1322   // for different purposes and allow creating an additional cache entry. If there's at least one
1323   // entry
1324   // that hasn't been used for the same frame, then overwrite it, in order to keep the cache as
1325   // small as
1326   // possible. If the current texture is found in the cache, use that entry.
1327   //
1328   // For efb copies, the entry created in CopyRenderTargetToTexture always has to be used, or else
1329   // it was
1330   // done in vain.
1331   auto iter_range = textures_by_address.equal_range(address);
1332   TexAddrCache::iterator iter = iter_range.first;
1333   TexAddrCache::iterator oldest_entry = iter;
1334   int temp_frameCount = 0x7fffffff;
1335   TexAddrCache::iterator unconverted_copy = textures_by_address.end();
1336   TexAddrCache::iterator unreinterpreted_copy = textures_by_address.end();
1337 
1338   while (iter != iter_range.second)
1339   {
1340     TCacheEntry* entry = iter->second;
1341 
1342     // Skip entries that are only left in our texture cache for the tmem cache emulation
1343     if (entry->tmem_only)
1344     {
1345       ++iter;
1346       continue;
1347     }
1348 
1349     // TODO: Some games (Rogue Squadron 3, Twin Snakes) seem to load a previously made XFB
1350     // copy as a regular texture. You can see this particularly well in RS3 whenever the
1351     // game freezes the image and fades it out to black on screen transitions, which fades
1352     // out a purple screen in XFB2Tex. Check for this here and convert them if necessary.
1353 
1354     // Do not load strided EFB copies, they are not meant to be used directly.
1355     // Also do not directly load EFB copies, which were partly overwritten.
1356     if (entry->IsEfbCopy() && entry->native_width == nativeW && entry->native_height == nativeH &&
1357         entry->memory_stride == entry->BytesPerRow() && !entry->may_have_overlapping_textures)
1358     {
1359       // EFB copies have slightly different rules as EFB copy formats have different
1360       // meanings from texture formats.
1361       if ((base_hash == entry->hash &&
1362            (!isPaletteTexture || g_Config.backend_info.bSupportsPaletteConversion)) ||
1363           IsPlayingBackFifologWithBrokenEFBCopies)
1364       {
1365         // The texture format in VRAM must match the format that the copy was created with. Some
1366         // formats are inherently compatible, as the channel and bit layout is identical (e.g.
1367         // I8/C8). Others have the same number of bits per texel, and can be reinterpreted on the
1368         // GPU (e.g. IA4 and I8 or RGB565 and RGBA5). The only known game which reinteprets texels
1369         // in this manner is Spiderman Shattered Dimensions, where it creates a copy in B8 format,
1370         // and sets it up as a IA4 texture.
1371         if (!IsCompatibleTextureFormat(entry->format.texfmt, texformat))
1372         {
1373           // Can we reinterpret this in VRAM?
1374           if (CanReinterpretTextureOnGPU(entry->format.texfmt, texformat))
1375           {
1376             // Delay the conversion until afterwards, it's possible this texture has already been
1377             // converted.
1378             unreinterpreted_copy = iter++;
1379             continue;
1380           }
1381           else
1382           {
1383             // If the EFB copies are in a different format and are not reinterpretable, use the RAM
1384             // copy.
1385             ++iter;
1386             continue;
1387           }
1388         }
1389         else
1390         {
1391           // Prefer the already-converted copy.
1392           unconverted_copy = textures_by_address.end();
1393         }
1394 
1395         // TODO: We should check width/height/levels for EFB copies. I'm not sure what effect
1396         // checking width/height/levels would have.
1397         if (!isPaletteTexture || !g_Config.backend_info.bSupportsPaletteConversion)
1398           return entry;
1399 
1400         // Note that we found an unconverted EFB copy, then continue.  We'll
1401         // perform the conversion later.  Currently, we only convert EFB copies to
1402         // palette textures; we could do other conversions if it proved to be
1403         // beneficial.
1404         unconverted_copy = iter;
1405       }
1406       else
1407       {
1408         // Aggressively prune EFB copies: if it isn't useful here, it will probably
1409         // never be useful again.  It's theoretically possible for a game to do
1410         // something weird where the copy could become useful in the future, but in
1411         // practice it doesn't happen.
1412         iter = InvalidateTexture(iter);
1413         continue;
1414       }
1415     }
1416     else
1417     {
1418       // For normal textures, all texture parameters need to match
1419       if (!entry->IsEfbCopy() && entry->hash == full_hash && entry->format == full_format &&
1420           entry->native_levels >= tex_levels && entry->native_width == nativeW &&
1421           entry->native_height == nativeH)
1422       {
1423         entry = DoPartialTextureUpdates(iter->second, &texMem[tlutaddr], tlutfmt);
1424         entry->texture->FinishedRendering();
1425         return entry;
1426       }
1427     }
1428 
1429     // Find the texture which hasn't been used for the longest time. Count paletted
1430     // textures as the same texture here, when the texture itself is the same. This
1431     // improves the performance a lot in some games that use paletted textures.
1432     // Example: Sonic the Fighters (inside Sonic Gems Collection)
1433     // Skip EFB copies here, so they can be used for partial texture updates
1434     // Also skip XFB copies, we might need to still scan them out
1435     // or load them as regular textures later.
1436     if (entry->frameCount != FRAMECOUNT_INVALID && entry->frameCount < temp_frameCount &&
1437         !entry->IsCopy() && !(isPaletteTexture && entry->base_hash == base_hash))
1438     {
1439       temp_frameCount = entry->frameCount;
1440       oldest_entry = iter;
1441     }
1442     ++iter;
1443   }
1444 
1445   if (unreinterpreted_copy != textures_by_address.end())
1446   {
1447     TCacheEntry* decoded_entry = ReinterpretEntry(unreinterpreted_copy->second, texformat);
1448 
1449     // It's possible to combine reinterpreted textures + palettes.
1450     if (unreinterpreted_copy == unconverted_copy && decoded_entry)
1451       decoded_entry = ApplyPaletteToEntry(decoded_entry, &texMem[tlutaddr], tlutfmt);
1452 
1453     if (decoded_entry)
1454       return decoded_entry;
1455   }
1456 
1457   if (unconverted_copy != textures_by_address.end())
1458   {
1459     TCacheEntry* decoded_entry =
1460         ApplyPaletteToEntry(unconverted_copy->second, &texMem[tlutaddr], tlutfmt);
1461 
1462     if (decoded_entry)
1463     {
1464       return decoded_entry;
1465     }
1466   }
1467 
1468   // Search the texture cache for normal textures by hash
1469   //
1470   // If the texture was fully hashed, the address does not need to match. Identical duplicate
1471   // textures cause unnecessary slowdowns
1472   // Example: Tales of Symphonia (GC) uses over 500 small textures in menus, but only around 70
1473   // different ones
1474   if (textureCacheSafetyColorSampleSize == 0 ||
1475       std::max(texture_size, palette_size) <= (u32)textureCacheSafetyColorSampleSize * 8)
1476   {
1477     auto hash_range = textures_by_hash.equal_range(full_hash);
1478     TexHashCache::iterator hash_iter = hash_range.first;
1479     while (hash_iter != hash_range.second)
1480     {
1481       TCacheEntry* entry = hash_iter->second;
1482       // All parameters, except the address, need to match here
1483       if (entry->format == full_format && entry->native_levels >= tex_levels &&
1484           entry->native_width == nativeW && entry->native_height == nativeH)
1485       {
1486         entry = DoPartialTextureUpdates(hash_iter->second, &texMem[tlutaddr], tlutfmt);
1487         entry->texture->FinishedRendering();
1488         return entry;
1489       }
1490       ++hash_iter;
1491     }
1492   }
1493 
1494   // If at least one entry was not used for the same frame, overwrite the oldest one
1495   if (temp_frameCount != 0x7fffffff)
1496   {
1497     // pool this texture and make a new one later
1498     InvalidateTexture(oldest_entry);
1499   }
1500 
1501   std::shared_ptr<HiresTexture> hires_tex;
1502   if (g_ActiveConfig.bHiresTextures)
1503   {
1504     hires_tex = HiresTexture::Search(src_data, texture_size, &texMem[tlutaddr], palette_size, width,
1505                                      height, texformat, use_mipmaps);
1506 
1507     if (hires_tex)
1508     {
1509       const auto& level = hires_tex->m_levels[0];
1510       if (level.width != width || level.height != height)
1511       {
1512         width = level.width;
1513         height = level.height;
1514       }
1515       expandedWidth = level.width;
1516       expandedHeight = level.height;
1517     }
1518   }
1519 
1520   // how many levels the allocated texture shall have
1521   const u32 texLevels = hires_tex ? (u32)hires_tex->m_levels.size() : tex_levels;
1522 
1523   // We can decode on the GPU if it is a supported format and the flag is enabled.
1524   // Currently we don't decode RGBA8 textures from Tmem, as that would require copying from both
1525   // banks, and if we're doing an copy we may as well just do the whole thing on the CPU, since
1526   // there's no conversion between formats. In the future this could be extended with a separate
1527   // shader, however.
1528   const bool decode_on_gpu = !hires_tex && g_ActiveConfig.UseGPUTextureDecoding() &&
1529                              !(from_tmem && texformat == TextureFormat::RGBA8);
1530 
1531   // create the entry/texture
1532   const TextureConfig config(width, height, texLevels, 1, 1,
1533                              hires_tex ? hires_tex->GetFormat() : AbstractTextureFormat::RGBA8, 0);
1534   TCacheEntry* entry = AllocateCacheEntry(config);
1535   if (!entry)
1536     return nullptr;
1537 
1538   ArbitraryMipmapDetector arbitrary_mip_detector;
1539   const u8* tlut = &texMem[tlutaddr];
1540   if (hires_tex)
1541   {
1542     const auto& level = hires_tex->m_levels[0];
1543     entry->texture->Load(0, level.width, level.height, level.row_length, level.data.data(),
1544                          level.data.size());
1545   }
1546 
1547   // Initialized to null because only software loading uses this buffer
1548   u8* dst_buffer = nullptr;
1549 
1550   if (!hires_tex)
1551   {
1552     if (!decode_on_gpu ||
1553         !DecodeTextureOnGPU(entry, 0, src_data, texture_size, texformat, width, height,
1554                             expandedWidth, expandedHeight, bytes_per_block * (expandedWidth / bsw),
1555                             tlut, tlutfmt))
1556     {
1557       size_t decoded_texture_size = expandedWidth * sizeof(u32) * expandedHeight;
1558 
1559       // Allocate memory for all levels at once
1560       size_t total_texture_size = decoded_texture_size;
1561 
1562       // For the downsample, we need 2 buffers; 1 is 1/4 of the original texture, the other 1/16
1563       size_t mip_downsample_buffer_size = decoded_texture_size * 5 / 16;
1564 
1565       size_t prev_level_size = decoded_texture_size;
1566       for (u32 i = 1; i < tex_levels; ++i)
1567       {
1568         prev_level_size /= 4;
1569         total_texture_size += prev_level_size;
1570       }
1571 
1572       // Add space for the downsampling at the end
1573       total_texture_size += mip_downsample_buffer_size;
1574 
1575       CheckTempSize(total_texture_size);
1576       dst_buffer = temp;
1577       if (!(texformat == TextureFormat::RGBA8 && from_tmem))
1578       {
1579         TexDecoder_Decode(dst_buffer, src_data, expandedWidth, expandedHeight, texformat, tlut,
1580                           tlutfmt);
1581       }
1582       else
1583       {
1584         u8* src_data_gb = &texMem[tmem_address_odd];
1585         TexDecoder_DecodeRGBA8FromTmem(dst_buffer, src_data, src_data_gb, expandedWidth,
1586                                        expandedHeight);
1587       }
1588 
1589       entry->texture->Load(0, width, height, expandedWidth, dst_buffer, decoded_texture_size);
1590 
1591       arbitrary_mip_detector.AddLevel(width, height, expandedWidth, dst_buffer);
1592 
1593       dst_buffer += decoded_texture_size;
1594     }
1595   }
1596 
1597   iter = textures_by_address.emplace(address, entry);
1598   if (textureCacheSafetyColorSampleSize == 0 ||
1599       std::max(texture_size, palette_size) <= (u32)textureCacheSafetyColorSampleSize * 8)
1600   {
1601     entry->textures_by_hash_iter = textures_by_hash.emplace(full_hash, entry);
1602   }
1603 
1604   entry->SetGeneralParameters(address, texture_size, full_format, false);
1605   entry->SetDimensions(nativeW, nativeH, tex_levels);
1606   entry->SetHashes(base_hash, full_hash);
1607   entry->is_custom_tex = hires_tex != nullptr;
1608   entry->memory_stride = entry->BytesPerRow();
1609   entry->SetNotCopy();
1610 
1611   std::string basename;
1612   if (g_ActiveConfig.bDumpTextures && !hires_tex)
1613   {
1614     basename = HiresTexture::GenBaseName(src_data, texture_size, &texMem[tlutaddr], palette_size,
1615                                          width, height, texformat, use_mipmaps, true);
1616   }
1617 
1618   if (hires_tex)
1619   {
1620     for (u32 level_index = 1; level_index != texLevels; ++level_index)
1621     {
1622       const auto& level = hires_tex->m_levels[level_index];
1623       entry->texture->Load(level_index, level.width, level.height, level.row_length,
1624                            level.data.data(), level.data.size());
1625     }
1626   }
1627   else
1628   {
1629     // load mips - TODO: Loading mipmaps from tmem is untested!
1630     src_data += texture_size;
1631 
1632     const u8* ptr_even = nullptr;
1633     const u8* ptr_odd = nullptr;
1634     if (from_tmem)
1635     {
1636       ptr_even = &texMem[tmem_address_even + texture_size];
1637       ptr_odd = &texMem[tmem_address_odd];
1638     }
1639 
1640     for (u32 level = 1; level != texLevels; ++level)
1641     {
1642       const u32 mip_width = CalculateLevelSize(width, level);
1643       const u32 mip_height = CalculateLevelSize(height, level);
1644       const u32 expanded_mip_width = Common::AlignUp(mip_width, bsw);
1645       const u32 expanded_mip_height = Common::AlignUp(mip_height, bsh);
1646 
1647       const u8*& mip_src_data = from_tmem ? ((level % 2) ? ptr_odd : ptr_even) : src_data;
1648       const u32 mip_size =
1649           TexDecoder_GetTextureSizeInBytes(expanded_mip_width, expanded_mip_height, texformat);
1650 
1651       if (!decode_on_gpu ||
1652           !DecodeTextureOnGPU(entry, level, mip_src_data, mip_size, texformat, mip_width,
1653                               mip_height, expanded_mip_width, expanded_mip_height,
1654                               bytes_per_block * (expanded_mip_width / bsw), tlut, tlutfmt))
1655       {
1656         // No need to call CheckTempSize here, as the whole buffer is preallocated at the beginning
1657         const u32 decoded_mip_size = expanded_mip_width * sizeof(u32) * expanded_mip_height;
1658         TexDecoder_Decode(dst_buffer, mip_src_data, expanded_mip_width, expanded_mip_height,
1659                           texformat, tlut, tlutfmt);
1660         entry->texture->Load(level, mip_width, mip_height, expanded_mip_width, dst_buffer,
1661                              decoded_mip_size);
1662 
1663         arbitrary_mip_detector.AddLevel(mip_width, mip_height, expanded_mip_width, dst_buffer);
1664 
1665         dst_buffer += decoded_mip_size;
1666       }
1667 
1668       mip_src_data += mip_size;
1669     }
1670   }
1671 
1672   entry->has_arbitrary_mips = hires_tex ? hires_tex->HasArbitraryMipmaps() :
1673                                           arbitrary_mip_detector.HasArbitraryMipmaps(dst_buffer);
1674 
1675   if (g_ActiveConfig.bDumpTextures && !hires_tex)
1676   {
1677     for (u32 level = 0; level < texLevels; ++level)
1678     {
1679       DumpTexture(entry, basename, level, entry->has_arbitrary_mips);
1680     }
1681   }
1682 
1683   INCSTAT(g_stats.num_textures_uploaded);
1684   SETSTAT(g_stats.num_textures_alive, static_cast<int>(textures_by_address.size()));
1685 
1686   entry = DoPartialTextureUpdates(iter->second, &texMem[tlutaddr], tlutfmt);
1687 
1688   // This should only be needed if the texture was updated, or used GPU decoding.
1689   entry->texture->FinishedRendering();
1690   return entry;
1691 }
1692 
GetDisplayRectForXFBEntry(TextureCacheBase::TCacheEntry * entry,u32 width,u32 height,MathUtil::Rectangle<int> * display_rect)1693 static void GetDisplayRectForXFBEntry(TextureCacheBase::TCacheEntry* entry, u32 width, u32 height,
1694                                       MathUtil::Rectangle<int>* display_rect)
1695 {
1696   // Scale the sub-rectangle to the full resolution of the texture.
1697   display_rect->left = 0;
1698   display_rect->top = 0;
1699   display_rect->right = static_cast<int>(width * entry->GetWidth() / entry->native_width);
1700   display_rect->bottom = static_cast<int>(height * entry->GetHeight() / entry->native_height);
1701 }
1702 
1703 TextureCacheBase::TCacheEntry*
GetXFBTexture(u32 address,u32 width,u32 height,u32 stride,MathUtil::Rectangle<int> * display_rect)1704 TextureCacheBase::GetXFBTexture(u32 address, u32 width, u32 height, u32 stride,
1705                                 MathUtil::Rectangle<int>* display_rect)
1706 {
1707   const u8* src_data = Memory::GetPointer(address);
1708   if (!src_data)
1709   {
1710     ERROR_LOG(VIDEO, "Trying to load XFB texture from invalid address 0x%8x", address);
1711     return nullptr;
1712   }
1713 
1714   // Compute total texture size. XFB textures aren't tiled, so this is simple.
1715   const u32 total_size = height * stride;
1716   const u64 hash = Common::GetHash64(src_data, total_size, 0);
1717 
1718   // Do we currently have a version of this XFB copy in VRAM?
1719   TCacheEntry* entry = GetXFBFromCache(address, width, height, stride, hash);
1720   if (entry)
1721   {
1722     if (entry->is_xfb_container)
1723     {
1724       StitchXFBCopy(entry);
1725       entry->texture->FinishedRendering();
1726     }
1727 
1728     GetDisplayRectForXFBEntry(entry, width, height, display_rect);
1729     return entry;
1730   }
1731 
1732   // Create a new VRAM texture, and fill it with the data from guest RAM.
1733   entry = AllocateCacheEntry(TextureConfig(width, height, 1, 1, 1, AbstractTextureFormat::RGBA8,
1734                                            AbstractTextureFlag_RenderTarget));
1735   entry->SetGeneralParameters(address, total_size,
1736                               TextureAndTLUTFormat(TextureFormat::XFB, TLUTFormat::IA8), true);
1737   entry->SetDimensions(width, height, 1);
1738   entry->SetHashes(hash, hash);
1739   entry->SetXfbCopy(stride);
1740   entry->is_xfb_container = true;
1741   entry->is_custom_tex = false;
1742   entry->may_have_overlapping_textures = false;
1743   entry->frameCount = FRAMECOUNT_INVALID;
1744   if (!g_ActiveConfig.UseGPUTextureDecoding() ||
1745       !DecodeTextureOnGPU(entry, 0, src_data, total_size, entry->format.texfmt, width, height,
1746                           width, height, stride, texMem, entry->format.tlutfmt))
1747   {
1748     const u32 decoded_size = width * height * sizeof(u32);
1749     CheckTempSize(decoded_size);
1750     TexDecoder_DecodeXFB(temp, src_data, width, height, stride);
1751     entry->texture->Load(0, width, height, width, temp, decoded_size);
1752   }
1753 
1754   // Stitch any VRAM copies into the new RAM copy.
1755   StitchXFBCopy(entry);
1756   entry->texture->FinishedRendering();
1757 
1758   // Insert into the texture cache so we can re-use it next frame, if needed.
1759   textures_by_address.emplace(entry->addr, entry);
1760   SETSTAT(g_stats.num_textures_alive, static_cast<int>(textures_by_address.size()));
1761   INCSTAT(g_stats.num_textures_uploaded);
1762 
1763   if (g_ActiveConfig.bDumpXFBTarget)
1764   {
1765     // While this isn't really an xfb copy, we can treat it as such for dumping purposes
1766     static int xfb_count = 0;
1767     entry->texture->Save(
1768         fmt::format("{}xfb_loaded_{}.png", File::GetUserPath(D_DUMPTEXTURES_IDX), xfb_count++), 0);
1769   }
1770 
1771   GetDisplayRectForXFBEntry(entry, width, height, display_rect);
1772   return entry;
1773 }
1774 
GetXFBFromCache(u32 address,u32 width,u32 height,u32 stride,u64 hash)1775 TextureCacheBase::TCacheEntry* TextureCacheBase::GetXFBFromCache(u32 address, u32 width, u32 height,
1776                                                                  u32 stride, u64 hash)
1777 {
1778   auto iter_range = textures_by_address.equal_range(address);
1779   TexAddrCache::iterator iter = iter_range.first;
1780 
1781   while (iter != iter_range.second)
1782   {
1783     TCacheEntry* entry = iter->second;
1784 
1785     // The only thing which has to match exactly is the stride. We can use a partial rectangle if
1786     // the VI width/height differs from that of the XFB copy.
1787     if (entry->is_xfb_copy && entry->memory_stride == stride && entry->native_width >= width &&
1788         entry->native_height >= height && !entry->may_have_overlapping_textures)
1789     {
1790       // But if the dimensions do differ, we must compute the hash on the sub-rectangle.
1791       u64 check_hash = hash;
1792       if (entry->native_width != width || entry->native_height != height)
1793       {
1794         check_hash = Common::GetHash64(Memory::GetPointer(entry->addr),
1795                                        entry->memory_stride * entry->native_height, 0);
1796       }
1797 
1798       if (entry->hash == check_hash && !entry->reference_changed)
1799       {
1800         return entry;
1801       }
1802       else
1803       {
1804         // At this point, we either have an xfb copy that has changed its hash
1805         // or an xfb created by stitching or from memory that has been changed
1806         // we are safe to invalidate this
1807         iter = InvalidateTexture(iter);
1808         continue;
1809       }
1810     }
1811 
1812     ++iter;
1813   }
1814 
1815   return nullptr;
1816 }
1817 
StitchXFBCopy(TCacheEntry * stitched_entry)1818 void TextureCacheBase::StitchXFBCopy(TCacheEntry* stitched_entry)
1819 {
1820   // It is possible that some of the overlapping textures overlap each other. This behavior has been
1821   // seen with XFB copies in Rogue Leader. To get the correct result, we apply the texture updates
1822   // in the order the textures were originally loaded. This ensures that the parts of the texture
1823   // that would have been overwritten in memory on real hardware get overwritten the same way here
1824   // too. This should work, but it may be a better idea to keep track of partial XFB copy
1825   // invalidations instead, which would reduce the amount of copying work here.
1826   std::vector<TCacheEntry*> candidates;
1827   bool create_upscaled_copy = false;
1828 
1829   auto iter = FindOverlappingTextures(stitched_entry->addr, stitched_entry->size_in_bytes);
1830   while (iter.first != iter.second)
1831   {
1832     // Currently, this checks the stride of the VRAM copy against the VI request. Therefore, for
1833     // interlaced modes, VRAM copies won't be considered candidates. This is okay for now, because
1834     // our force progressive hack means that an XFB copy should always have a matching stride. If
1835     // the hack is disabled, XFB2RAM should also be enabled. Should we wish to implement interlaced
1836     // stitching in the future, this would require a shader which grabs every second line.
1837     TCacheEntry* entry = iter.first->second;
1838     if (entry != stitched_entry && entry->IsCopy() && !entry->tmem_only &&
1839         entry->OverlapsMemoryRange(stitched_entry->addr, stitched_entry->size_in_bytes) &&
1840         entry->memory_stride == stitched_entry->memory_stride)
1841     {
1842       if (entry->hash == entry->CalculateHash())
1843       {
1844         // Can't check the height here because of Y scaling.
1845         if (entry->native_width != entry->GetWidth())
1846           create_upscaled_copy = true;
1847 
1848         candidates.emplace_back(entry);
1849       }
1850       else
1851       {
1852         // If the hash does not match, this EFB copy will not be used for anything, so remove it
1853         iter.first = InvalidateTexture(iter.first);
1854         continue;
1855       }
1856     }
1857     ++iter.first;
1858   }
1859 
1860   if (candidates.empty())
1861     return;
1862 
1863   std::sort(candidates.begin(), candidates.end(),
1864             [](const TCacheEntry* a, const TCacheEntry* b) { return a->id < b->id; });
1865 
1866   // We only upscale when necessary to preserve resolution. i.e. when there are upscaled partial
1867   // copies to be stitched together.
1868   if (create_upscaled_copy)
1869   {
1870     ScaleTextureCacheEntryTo(stitched_entry, g_renderer->EFBToScaledX(stitched_entry->native_width),
1871                              g_renderer->EFBToScaledY(stitched_entry->native_height));
1872   }
1873 
1874   for (TCacheEntry* entry : candidates)
1875   {
1876     int src_x, src_y, dst_x, dst_y;
1877     if (entry->addr >= stitched_entry->addr)
1878     {
1879       int pixel_offset = (entry->addr - stitched_entry->addr) / 2;
1880       src_x = 0;
1881       src_y = 0;
1882       dst_x = pixel_offset % stitched_entry->native_width;
1883       dst_y = pixel_offset / stitched_entry->native_width;
1884     }
1885     else
1886     {
1887       int pixel_offset = (stitched_entry->addr - entry->addr) / 2;
1888       src_x = pixel_offset % entry->native_width;
1889       src_y = pixel_offset / entry->native_width;
1890       dst_x = 0;
1891       dst_y = 0;
1892     }
1893 
1894     const int native_width =
1895         std::min(entry->native_width - src_x, stitched_entry->native_width - dst_x);
1896     const int native_height =
1897         std::min(entry->native_height - src_y, stitched_entry->native_height - dst_y);
1898     int src_width = native_width;
1899     int src_height = native_height;
1900     int dst_width = native_width;
1901     int dst_height = native_height;
1902 
1903     // Scale to internal resolution.
1904     if (entry->native_width != entry->GetWidth())
1905     {
1906       src_x = g_renderer->EFBToScaledX(src_x);
1907       src_y = g_renderer->EFBToScaledY(src_y);
1908       src_width = g_renderer->EFBToScaledX(src_width);
1909       src_height = g_renderer->EFBToScaledY(src_height);
1910     }
1911     if (create_upscaled_copy)
1912     {
1913       dst_x = g_renderer->EFBToScaledX(dst_x);
1914       dst_y = g_renderer->EFBToScaledY(dst_y);
1915       dst_width = g_renderer->EFBToScaledX(dst_width);
1916       dst_height = g_renderer->EFBToScaledY(dst_height);
1917     }
1918 
1919     // If the source rectangle is outside of what we actually have in VRAM, skip the copy.
1920     // The backend doesn't do any clamping, so if we don't, we'd pass out-of-range coordinates
1921     // to the graphics driver, which can cause GPU resets.
1922     if (static_cast<u32>(src_x + src_width) > entry->GetWidth() ||
1923         static_cast<u32>(src_y + src_height) > entry->GetHeight() ||
1924         static_cast<u32>(dst_x + dst_width) > stitched_entry->GetWidth() ||
1925         static_cast<u32>(dst_y + dst_height) > stitched_entry->GetHeight())
1926     {
1927       continue;
1928     }
1929 
1930     MathUtil::Rectangle<int> srcrect, dstrect;
1931     srcrect.left = src_x;
1932     srcrect.top = src_y;
1933     srcrect.right = (src_x + src_width);
1934     srcrect.bottom = (src_y + src_height);
1935     dstrect.left = dst_x;
1936     dstrect.top = dst_y;
1937     dstrect.right = (dst_x + dst_width);
1938     dstrect.bottom = (dst_y + dst_height);
1939 
1940     // We may have to scale if one of the copies is not internal resolution.
1941     if (srcrect.GetWidth() != dstrect.GetWidth() || srcrect.GetHeight() != dstrect.GetHeight())
1942     {
1943       g_renderer->ScaleTexture(stitched_entry->framebuffer.get(), dstrect, entry->texture.get(),
1944                                srcrect);
1945     }
1946     else
1947     {
1948       // If one copy is stereo, and the other isn't... not much we can do here :/
1949       const u32 layers_to_copy = std::min(entry->GetNumLayers(), stitched_entry->GetNumLayers());
1950       for (u32 layer = 0; layer < layers_to_copy; layer++)
1951       {
1952         stitched_entry->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer, 0,
1953                                                           dstrect, layer, 0);
1954       }
1955     }
1956 
1957     // Link the two textures together, so we won't apply this partial update again
1958     entry->CreateReference(stitched_entry);
1959 
1960     // Mark the texture update as used, as if it was loaded directly
1961     entry->frameCount = FRAMECOUNT_INVALID;
1962   }
1963 }
1964 
1965 EFBCopyFilterCoefficients
GetRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values & coefficients)1966 TextureCacheBase::GetRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values& coefficients)
1967 {
1968   // To simplify the backend, we precalculate the three coefficients in common. Coefficients 0, 1
1969   // are for the row above, 2, 3, 4 are for the current pixel, and 5, 6 are for the row below.
1970   return EFBCopyFilterCoefficients{
1971       static_cast<float>(static_cast<u32>(coefficients[0]) + static_cast<u32>(coefficients[1])) /
1972           64.0f,
1973       static_cast<float>(static_cast<u32>(coefficients[2]) + static_cast<u32>(coefficients[3]) +
1974                          static_cast<u32>(coefficients[4])) /
1975           64.0f,
1976       static_cast<float>(static_cast<u32>(coefficients[5]) + static_cast<u32>(coefficients[6])) /
1977           64.0f,
1978   };
1979 }
1980 
1981 EFBCopyFilterCoefficients
GetVRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values & coefficients)1982 TextureCacheBase::GetVRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values& coefficients)
1983 {
1984   // If the user disables the copy filter, only apply it to the VRAM copy.
1985   // This way games which are sensitive to changes to the RAM copy of the XFB will be unaffected.
1986   EFBCopyFilterCoefficients res = GetRAMCopyFilterCoefficients(coefficients);
1987   if (!g_ActiveConfig.bDisableCopyFilter)
1988     return res;
1989 
1990   // Disabling the copy filter in options should not ignore the values the game sets completely,
1991   // as some games use the filter coefficients to control the brightness of the screen. Instead,
1992   // add all coefficients to the middle sample, so the deflicker/vertical filter has no effect.
1993   res.middle = res.upper + res.middle + res.lower;
1994   res.upper = 0.0f;
1995   res.lower = 0.0f;
1996   return res;
1997 }
1998 
NeedsCopyFilterInShader(const EFBCopyFilterCoefficients & coefficients)1999 bool TextureCacheBase::NeedsCopyFilterInShader(const EFBCopyFilterCoefficients& coefficients)
2000 {
2001   // If the top/bottom coefficients are zero, no point sampling/blending from these rows.
2002   return coefficients.upper != 0 || coefficients.lower != 0;
2003 }
2004 
CopyRenderTargetToTexture(u32 dstAddr,EFBCopyFormat dstFormat,u32 width,u32 height,u32 dstStride,bool is_depth_copy,const MathUtil::Rectangle<int> & srcRect,bool isIntensity,bool scaleByHalf,float y_scale,float gamma,bool clamp_top,bool clamp_bottom,const CopyFilterCoefficients::Values & filter_coefficients)2005 void TextureCacheBase::CopyRenderTargetToTexture(
2006     u32 dstAddr, EFBCopyFormat dstFormat, u32 width, u32 height, u32 dstStride, bool is_depth_copy,
2007     const MathUtil::Rectangle<int>& srcRect, bool isIntensity, bool scaleByHalf, float y_scale,
2008     float gamma, bool clamp_top, bool clamp_bottom,
2009     const CopyFilterCoefficients::Values& filter_coefficients)
2010 {
2011   // Emulation methods:
2012   //
2013   // - EFB to RAM:
2014   //      Encodes the requested EFB data at its native resolution to the emulated RAM using shaders.
2015   //      Load() decodes the data from there again (using TextureDecoder) if the EFB copy is being
2016   //      used as a texture again.
2017   //      Advantage: CPU can read data from the EFB copy and we don't lose any important updates to
2018   //      the texture
2019   //      Disadvantage: Encoding+decoding steps often are redundant because only some games read or
2020   //      modify EFB copies before using them as textures.
2021   //
2022   // - EFB to texture:
2023   //      Copies the requested EFB data to a texture object in VRAM, performing any color conversion
2024   //      using shaders.
2025   //      Advantage: Works for many games, since in most cases EFB copies aren't read or modified at
2026   //      all before being used as a texture again.
2027   //                 Since we don't do any further encoding or decoding here, this method is much
2028   //                 faster.
2029   //                 It also allows enhancing the visual quality by doing scaled EFB copies.
2030   //
2031   // - Hybrid EFB copies:
2032   //      1a) Whenever this function gets called, encode the requested EFB data to RAM (like EFB to
2033   //      RAM)
2034   //      1b) Set type to TCET_EC_DYNAMIC for all texture cache entries in the destination address
2035   //      range.
2036   //          If EFB copy caching is enabled, further checks will (try to) prevent redundant EFB
2037   //          copies.
2038   //      2) Check if a texture cache entry for the specified dstAddr already exists (i.e. if an EFB
2039   //      copy was triggered to that address before):
2040   //      2a) Entry doesn't exist:
2041   //          - Also copy the requested EFB data to a texture object in VRAM (like EFB to texture)
2042   //          - Create a texture cache entry for the target (type = TCET_EC_VRAM)
2043   //          - Store a hash of the encoded RAM data in the texcache entry.
2044   //      2b) Entry exists AND type is TCET_EC_VRAM:
2045   //          - Like case 2a, but reuse the old texcache entry instead of creating a new one.
2046   //      2c) Entry exists AND type is TCET_EC_DYNAMIC:
2047   //          - Only encode the texture to RAM (like EFB to RAM) and store a hash of the encoded
2048   //          data in the existing texcache entry.
2049   //          - Do NOT copy the requested EFB data to a VRAM object. Reason: the texture is dynamic,
2050   //          i.e. the CPU is modifying it. Storing a VRAM copy is useless, because we'd always end
2051   //          up deleting it and reloading the data from RAM anyway.
2052   //      3) If the EFB copy gets used as a texture, compare the source RAM hash with the hash you
2053   //      stored when encoding the EFB data to RAM.
2054   //      3a) If the two hashes match AND type is TCET_EC_VRAM, reuse the VRAM copy you created
2055   //      3b) If the two hashes differ AND type is TCET_EC_VRAM, screw your existing VRAM copy. Set
2056   //      type to TCET_EC_DYNAMIC.
2057   //          Redecode the source RAM data to a VRAM object. The entry basically behaves like a
2058   //          normal texture now.
2059   //      3c) If type is TCET_EC_DYNAMIC, treat the EFB copy like a normal texture.
2060   //      Advantage: Non-dynamic EFB copies can be visually enhanced like with EFB to texture.
2061   //                 Compatibility is as good as EFB to RAM.
2062   //      Disadvantage: Slower than EFB to texture and often even slower than EFB to RAM.
2063   //                    EFB copy cache depends on accurate texture hashing being enabled. However,
2064   //                    with accurate hashing you end up being as slow as without a copy cache
2065   //                    anyway.
2066   //
2067   // Disadvantage of all methods: Calling this function requires the GPU to perform a pipeline flush
2068   // which stalls any further CPU processing.
2069   const bool is_xfb_copy = !is_depth_copy && !isIntensity && dstFormat == EFBCopyFormat::XFB;
2070   bool copy_to_vram =
2071       g_ActiveConfig.backend_info.bSupportsCopyToVram && !g_ActiveConfig.bDisableCopyToVRAM;
2072   bool copy_to_ram =
2073       !(is_xfb_copy ? g_ActiveConfig.bSkipXFBCopyToRam : g_ActiveConfig.bSkipEFBCopyToRam) ||
2074       !copy_to_vram;
2075 
2076   u8* dst = Memory::GetPointer(dstAddr);
2077   if (dst == nullptr)
2078   {
2079     ERROR_LOG(VIDEO, "Trying to copy from EFB to invalid address 0x%8x", dstAddr);
2080     return;
2081   }
2082 
2083   // tex_w and tex_h are the native size of the texture in the GC memory.
2084   // The size scaled_* represents the emulated texture. Those differ
2085   // because of upscaling and because of yscaling of XFB copies.
2086   // For the latter, we keep the EFB resolution for the virtual XFB blit.
2087   u32 tex_w = width;
2088   u32 tex_h = height;
2089   u32 scaled_tex_w = g_renderer->EFBToScaledX(width);
2090   u32 scaled_tex_h = g_renderer->EFBToScaledY(height);
2091 
2092   if (scaleByHalf)
2093   {
2094     tex_w /= 2;
2095     tex_h /= 2;
2096     scaled_tex_w /= 2;
2097     scaled_tex_h /= 2;
2098   }
2099 
2100   if (!is_xfb_copy && !g_ActiveConfig.bCopyEFBScaled)
2101   {
2102     // No upscaling
2103     scaled_tex_w = tex_w;
2104     scaled_tex_h = tex_h;
2105   }
2106 
2107   // Get the base (in memory) format of this efb copy.
2108   TextureFormat baseFormat = TexDecoder_GetEFBCopyBaseFormat(dstFormat);
2109 
2110   u32 blockH = TexDecoder_GetBlockHeightInTexels(baseFormat);
2111   const u32 blockW = TexDecoder_GetBlockWidthInTexels(baseFormat);
2112 
2113   // Round up source height to multiple of block size
2114   u32 actualHeight = Common::AlignUp(tex_h, blockH);
2115   const u32 actualWidth = Common::AlignUp(tex_w, blockW);
2116 
2117   u32 num_blocks_y = actualHeight / blockH;
2118   const u32 num_blocks_x = actualWidth / blockW;
2119 
2120   // RGBA takes two cache lines per block; all others take one
2121   const u32 bytes_per_block = baseFormat == TextureFormat::RGBA8 ? 64 : 32;
2122 
2123   const u32 bytes_per_row = num_blocks_x * bytes_per_block;
2124   const u32 covered_range = num_blocks_y * dstStride;
2125 
2126   if (dstStride < bytes_per_row)
2127   {
2128     // This kind of efb copy results in a scrambled image.
2129     // I'm pretty sure no game actually wants to do this, it might be caused by a
2130     // programming bug in the game, or a CPU/Bounding box emulation issue with dolphin.
2131     // The copy_to_ram code path above handles this "correctly" and scrambles the image
2132     // but the copy_to_vram code path just saves and uses unscrambled texture instead.
2133 
2134     // To avoid a "incorrect" result, we simply skip doing the copy_to_vram code path
2135     // so if the game does try to use the scrambled texture, dolphin will grab the scrambled
2136     // texture (or black if copy_to_ram is also disabled) out of ram.
2137     ERROR_LOG(VIDEO, "Memory stride too small (%i < %i)", dstStride, bytes_per_row);
2138     copy_to_vram = false;
2139   }
2140 
2141   // We also linear filtering for both box filtering and downsampling higher resolutions to 1x.
2142   // TODO: This only produces perfect downsampling for 2x IR, other resolutions will need more
2143   //       complex down filtering to average all pixels and produce the correct result.
2144   const bool linear_filter =
2145       !is_depth_copy && (scaleByHalf || g_renderer->GetEFBScale() != 1 || y_scale > 1.0f);
2146 
2147   TCacheEntry* entry = nullptr;
2148   if (copy_to_vram)
2149   {
2150     // create the texture
2151     const TextureConfig config(scaled_tex_w, scaled_tex_h, 1, g_framebuffer_manager->GetEFBLayers(),
2152                                1, AbstractTextureFormat::RGBA8, AbstractTextureFlag_RenderTarget);
2153     entry = AllocateCacheEntry(config);
2154     if (entry)
2155     {
2156       entry->SetGeneralParameters(dstAddr, 0, baseFormat, is_xfb_copy);
2157       entry->SetDimensions(tex_w, tex_h, 1);
2158       entry->frameCount = FRAMECOUNT_INVALID;
2159       if (is_xfb_copy)
2160       {
2161         entry->should_force_safe_hashing = is_xfb_copy;
2162         entry->SetXfbCopy(dstStride);
2163       }
2164       else
2165       {
2166         entry->SetEfbCopy(dstStride);
2167       }
2168       entry->may_have_overlapping_textures = false;
2169       entry->is_custom_tex = false;
2170 
2171       CopyEFBToCacheEntry(entry, is_depth_copy, srcRect, scaleByHalf, linear_filter, dstFormat,
2172                           isIntensity, gamma, clamp_top, clamp_bottom,
2173                           GetVRAMCopyFilterCoefficients(filter_coefficients));
2174 
2175       if (g_ActiveConfig.bDumpEFBTarget && !is_xfb_copy)
2176       {
2177         static int efb_count = 0;
2178         entry->texture->Save(
2179             fmt::format("{}efb_frame_{}.png", File::GetUserPath(D_DUMPTEXTURES_IDX), efb_count++),
2180             0);
2181       }
2182 
2183       if (g_ActiveConfig.bDumpXFBTarget && is_xfb_copy)
2184       {
2185         static int xfb_count = 0;
2186         entry->texture->Save(
2187             fmt::format("{}xfb_copy_{}.png", File::GetUserPath(D_DUMPTEXTURES_IDX), xfb_count++),
2188             0);
2189       }
2190     }
2191   }
2192 
2193   if (copy_to_ram)
2194   {
2195     EFBCopyFilterCoefficients coefficients = GetRAMCopyFilterCoefficients(filter_coefficients);
2196     PEControl::PixelFormat srcFormat = bpmem.zcontrol.pixel_format;
2197     EFBCopyParams format(srcFormat, dstFormat, is_depth_copy, isIntensity,
2198                          NeedsCopyFilterInShader(coefficients));
2199 
2200     std::unique_ptr<AbstractStagingTexture> staging_texture = GetEFBCopyStagingTexture();
2201     if (staging_texture)
2202     {
2203       CopyEFB(staging_texture.get(), format, tex_w, bytes_per_row, num_blocks_y, dstStride, srcRect,
2204               scaleByHalf, linear_filter, y_scale, gamma, clamp_top, clamp_bottom, coefficients);
2205 
2206       // We can't defer if there is no VRAM copy (since we need to update the hash).
2207       if (!copy_to_vram || !g_ActiveConfig.bDeferEFBCopies)
2208       {
2209         // Immediately flush it.
2210         WriteEFBCopyToRAM(dst, bytes_per_row / sizeof(u32), num_blocks_y, dstStride,
2211                           std::move(staging_texture));
2212       }
2213       else
2214       {
2215         // Defer the flush until later.
2216         entry->pending_efb_copy = std::move(staging_texture);
2217         entry->pending_efb_copy_width = bytes_per_row / sizeof(u32);
2218         entry->pending_efb_copy_height = num_blocks_y;
2219         entry->pending_efb_copy_invalidated = false;
2220         m_pending_efb_copies.push_back(entry);
2221       }
2222     }
2223   }
2224   else
2225   {
2226     if (is_xfb_copy)
2227     {
2228       UninitializeXFBMemory(dst, dstStride, bytes_per_row, num_blocks_y);
2229     }
2230     else
2231     {
2232       // Hack: Most games don't actually need the correct texture data in RAM
2233       //       and we can just keep a copy in VRAM. We zero the memory so we
2234       //       can check it hasn't changed before using our copy in VRAM.
2235       u8* ptr = dst;
2236       for (u32 i = 0; i < num_blocks_y; i++)
2237       {
2238         std::memset(ptr, 0, bytes_per_row);
2239         ptr += dstStride;
2240       }
2241     }
2242   }
2243 
2244   // Invalidate all textures, if they are either fully overwritten by our efb copy, or if they
2245   // have a different stride than our efb copy. Partly overwritten textures with the same stride
2246   // as our efb copy are marked to check them for partial texture updates.
2247   // TODO: The logic to detect overlapping strided efb copies is not 100% accurate.
2248   bool strided_efb_copy = dstStride != bytes_per_row;
2249   auto iter = FindOverlappingTextures(dstAddr, covered_range);
2250   while (iter.first != iter.second)
2251   {
2252     TCacheEntry* overlapping_entry = iter.first->second;
2253 
2254     if (overlapping_entry->addr == dstAddr && overlapping_entry->is_xfb_copy)
2255     {
2256       for (auto& reference : overlapping_entry->references)
2257       {
2258         reference->reference_changed = true;
2259       }
2260     }
2261 
2262     if (overlapping_entry->OverlapsMemoryRange(dstAddr, covered_range))
2263     {
2264       u32 overlap_range = std::min(overlapping_entry->addr + overlapping_entry->size_in_bytes,
2265                                    dstAddr + covered_range) -
2266                           std::max(overlapping_entry->addr, dstAddr);
2267       if (!copy_to_vram || overlapping_entry->memory_stride != dstStride ||
2268           (!strided_efb_copy && overlapping_entry->size_in_bytes == overlap_range) ||
2269           (strided_efb_copy && overlapping_entry->size_in_bytes == overlap_range &&
2270            overlapping_entry->addr == dstAddr))
2271       {
2272         // Pending EFB copies which are completely covered by this new copy can simply be tossed,
2273         // instead of having to flush them later on, since this copy will write over everything.
2274         iter.first = InvalidateTexture(iter.first, true);
2275         continue;
2276       }
2277 
2278       // We don't want to change the may_have_overlapping_textures flag on XFB container entries
2279       // because otherwise they can't be re-used/updated, leaking textures for several frames.
2280       if (!overlapping_entry->is_xfb_container)
2281         overlapping_entry->may_have_overlapping_textures = true;
2282 
2283       // There are cases (Rogue Squadron 2 / Texas Holdem on Wiiware) where
2284       // for xfb copies the textures overlap which causes the hash of the first copy
2285       // to be different (from when it was originally created).  This has no implications
2286       // for XFB2Tex because the underlying memory doesn't change (dummy values) but
2287       // can affect XFB2Ram when we compare the texture cache copy hash with the
2288       // newly computed hash
2289       // By calculating the hash when we receive overlapping xfbs, we are able
2290       // to mitigate this
2291       if (overlapping_entry->is_xfb_copy && copy_to_ram)
2292       {
2293         overlapping_entry->hash = overlapping_entry->CalculateHash();
2294       }
2295 
2296       // Do not load textures by hash, if they were at least partly overwritten by an efb copy.
2297       // In this case, comparing the hash is not enough to check, if two textures are identical.
2298       if (overlapping_entry->textures_by_hash_iter != textures_by_hash.end())
2299       {
2300         textures_by_hash.erase(overlapping_entry->textures_by_hash_iter);
2301         overlapping_entry->textures_by_hash_iter = textures_by_hash.end();
2302       }
2303     }
2304     ++iter.first;
2305   }
2306 
2307   if (OpcodeDecoder::g_record_fifo_data)
2308   {
2309     // Mark the memory behind this efb copy as dynamicly generated for the Fifo log
2310     u32 address = dstAddr;
2311     for (u32 i = 0; i < num_blocks_y; i++)
2312     {
2313       FifoRecorder::GetInstance().UseMemory(address, bytes_per_row, MemoryUpdate::TEXTURE_MAP,
2314                                             true);
2315       address += dstStride;
2316     }
2317   }
2318 
2319   // Even if the copy is deferred, still compute the hash. This way if the copy is used as a texture
2320   // in a subsequent draw before it is flushed, it will have the same hash.
2321   if (entry)
2322   {
2323     const u64 hash = entry->CalculateHash();
2324     entry->SetHashes(hash, hash);
2325     textures_by_address.emplace(dstAddr, entry);
2326   }
2327 }
2328 
FlushEFBCopies()2329 void TextureCacheBase::FlushEFBCopies()
2330 {
2331   if (m_pending_efb_copies.empty())
2332     return;
2333 
2334   for (TCacheEntry* entry : m_pending_efb_copies)
2335     FlushEFBCopy(entry);
2336   m_pending_efb_copies.clear();
2337 }
2338 
WriteEFBCopyToRAM(u8 * dst_ptr,u32 width,u32 height,u32 stride,std::unique_ptr<AbstractStagingTexture> staging_texture)2339 void TextureCacheBase::WriteEFBCopyToRAM(u8* dst_ptr, u32 width, u32 height, u32 stride,
2340                                          std::unique_ptr<AbstractStagingTexture> staging_texture)
2341 {
2342   MathUtil::Rectangle<int> copy_rect(0, 0, static_cast<int>(width), static_cast<int>(height));
2343   staging_texture->ReadTexels(copy_rect, dst_ptr, stride);
2344   ReleaseEFBCopyStagingTexture(std::move(staging_texture));
2345 }
2346 
FlushEFBCopy(TCacheEntry * entry)2347 void TextureCacheBase::FlushEFBCopy(TCacheEntry* entry)
2348 {
2349   // Copy from texture -> guest memory.
2350   u8* const dst = Memory::GetPointer(entry->addr);
2351   WriteEFBCopyToRAM(dst, entry->pending_efb_copy_width, entry->pending_efb_copy_height,
2352                     entry->memory_stride, std::move(entry->pending_efb_copy));
2353 
2354   // If the EFB copy was invalidated (e.g. the bloom case mentioned in InvalidateTexture), now is
2355   // the time to clean up the TCacheEntry. In which case, we don't need to compute the new hash of
2356   // the RAM copy. But we need to clean up the TCacheEntry, as InvalidateTexture doesn't free it.
2357   if (entry->pending_efb_copy_invalidated)
2358   {
2359     delete entry;
2360     return;
2361   }
2362 
2363   // Re-hash the texture now that the guest memory is populated.
2364   // This should be safe because we'll catch any writes before the game can modify it.
2365   const u64 hash = entry->CalculateHash();
2366   entry->SetHashes(hash, hash);
2367 
2368   // Check for any overlapping XFB copies which now need the hash recomputed.
2369   // See the comment above regarding Rogue Squadron 2.
2370   if (entry->is_xfb_copy)
2371   {
2372     const u32 covered_range = entry->pending_efb_copy_height * entry->memory_stride;
2373     auto range = FindOverlappingTextures(entry->addr, covered_range);
2374     for (auto iter = range.first; iter != range.second; ++iter)
2375     {
2376       TCacheEntry* overlapping_entry = iter->second;
2377       if (overlapping_entry->may_have_overlapping_textures && overlapping_entry->is_xfb_copy &&
2378           overlapping_entry->OverlapsMemoryRange(entry->addr, covered_range))
2379       {
2380         const u64 overlapping_hash = overlapping_entry->CalculateHash();
2381         entry->SetHashes(overlapping_hash, overlapping_hash);
2382       }
2383     }
2384   }
2385 }
2386 
GetEFBCopyStagingTexture()2387 std::unique_ptr<AbstractStagingTexture> TextureCacheBase::GetEFBCopyStagingTexture()
2388 {
2389   // Pull off the back first to re-use the most frequently used textures.
2390   if (!m_efb_copy_staging_texture_pool.empty())
2391   {
2392     auto ptr = std::move(m_efb_copy_staging_texture_pool.back());
2393     m_efb_copy_staging_texture_pool.pop_back();
2394     return ptr;
2395   }
2396 
2397   std::unique_ptr<AbstractStagingTexture> tex = g_renderer->CreateStagingTexture(
2398       StagingTextureType::Readback, m_efb_encoding_texture->GetConfig());
2399   if (!tex)
2400     WARN_LOG(VIDEO, "Failed to create EFB copy staging texture");
2401 
2402   return tex;
2403 }
2404 
ReleaseEFBCopyStagingTexture(std::unique_ptr<AbstractStagingTexture> tex)2405 void TextureCacheBase::ReleaseEFBCopyStagingTexture(std::unique_ptr<AbstractStagingTexture> tex)
2406 {
2407   m_efb_copy_staging_texture_pool.push_back(std::move(tex));
2408 }
2409 
UninitializeXFBMemory(u8 * dst,u32 stride,u32 bytes_per_row,u32 num_blocks_y)2410 void TextureCacheBase::UninitializeXFBMemory(u8* dst, u32 stride, u32 bytes_per_row,
2411                                              u32 num_blocks_y)
2412 {
2413   // Originally, we planned on using a 'key color'
2414   // for alpha to address partial xfbs (Mario Strikers / Chicken Little).
2415   // This work was removed since it was unfinished but there
2416   // was still a desire to differentiate between the old and the new approach
2417   // which is why we still set uninitialized xfb memory to fuchsia
2418   // (Y=1,U=254,V=254) instead of dark green (Y=0,U=0,V=0) in YUV
2419   // like is done in the EFB path.
2420 
2421 #if defined(_M_X86) || defined(_M_X86_64)
2422   __m128i sixteenBytes = _mm_set1_epi16((s16)(u16)0xFE01);
2423 #endif
2424 
2425   for (u32 i = 0; i < num_blocks_y; i++)
2426   {
2427     u32 size = bytes_per_row;
2428     u8* rowdst = dst;
2429 #if defined(_M_X86) || defined(_M_X86_64)
2430     while (size >= 16)
2431     {
2432       _mm_storeu_si128((__m128i*)rowdst, sixteenBytes);
2433       size -= 16;
2434       rowdst += 16;
2435     }
2436 #endif
2437     for (u32 offset = 0; offset < size; offset++)
2438     {
2439       if (offset & 1)
2440       {
2441         rowdst[offset] = 254;
2442       }
2443       else
2444       {
2445         rowdst[offset] = 1;
2446       }
2447     }
2448     dst += stride;
2449   }
2450 }
2451 
AllocateCacheEntry(const TextureConfig & config)2452 TextureCacheBase::TCacheEntry* TextureCacheBase::AllocateCacheEntry(const TextureConfig& config)
2453 {
2454   std::optional<TexPoolEntry> alloc = AllocateTexture(config);
2455   if (!alloc)
2456     return nullptr;
2457 
2458   TCacheEntry* cacheEntry =
2459       new TCacheEntry(std::move(alloc->texture), std::move(alloc->framebuffer));
2460   cacheEntry->textures_by_hash_iter = textures_by_hash.end();
2461   cacheEntry->id = last_entry_id++;
2462   return cacheEntry;
2463 }
2464 
2465 std::optional<TextureCacheBase::TexPoolEntry>
AllocateTexture(const TextureConfig & config)2466 TextureCacheBase::AllocateTexture(const TextureConfig& config)
2467 {
2468   TexPool::iterator iter = FindMatchingTextureFromPool(config);
2469   if (iter != texture_pool.end())
2470   {
2471     auto entry = std::move(iter->second);
2472     texture_pool.erase(iter);
2473     return std::move(entry);
2474   }
2475 
2476   std::unique_ptr<AbstractTexture> texture = g_renderer->CreateTexture(config);
2477   if (!texture)
2478   {
2479     WARN_LOG(VIDEO, "Failed to allocate a %ux%ux%u texture", config.width, config.height,
2480              config.layers);
2481     return {};
2482   }
2483 
2484   std::unique_ptr<AbstractFramebuffer> framebuffer;
2485   if (config.IsRenderTarget())
2486   {
2487     framebuffer = g_renderer->CreateFramebuffer(texture.get(), nullptr);
2488     if (!framebuffer)
2489     {
2490       WARN_LOG(VIDEO, "Failed to allocate a %ux%ux%u framebuffer", config.width, config.height,
2491                config.layers);
2492       return {};
2493     }
2494   }
2495 
2496   INCSTAT(g_stats.num_textures_created);
2497   return TexPoolEntry(std::move(texture), std::move(framebuffer));
2498 }
2499 
2500 TextureCacheBase::TexPool::iterator
FindMatchingTextureFromPool(const TextureConfig & config)2501 TextureCacheBase::FindMatchingTextureFromPool(const TextureConfig& config)
2502 {
2503   // Find a texture from the pool that does not have a frameCount of FRAMECOUNT_INVALID.
2504   // This prevents a texture from being used twice in a single frame with different data,
2505   // which potentially means that a driver has to maintain two copies of the texture anyway.
2506   // Render-target textures are fine through, as they have to be generated in a seperated pass.
2507   // As non-render-target textures are usually static, this should not matter much.
2508   auto range = texture_pool.equal_range(config);
2509   auto matching_iter = std::find_if(range.first, range.second, [](const auto& iter) {
2510     return iter.first.IsRenderTarget() || iter.second.frameCount != FRAMECOUNT_INVALID;
2511   });
2512   return matching_iter != range.second ? matching_iter : texture_pool.end();
2513 }
2514 
2515 TextureCacheBase::TexAddrCache::iterator
GetTexCacheIter(TextureCacheBase::TCacheEntry * entry)2516 TextureCacheBase::GetTexCacheIter(TextureCacheBase::TCacheEntry* entry)
2517 {
2518   auto iter_range = textures_by_address.equal_range(entry->addr);
2519   TexAddrCache::iterator iter = iter_range.first;
2520   while (iter != iter_range.second)
2521   {
2522     if (iter->second == entry)
2523     {
2524       return iter;
2525     }
2526     ++iter;
2527   }
2528   return textures_by_address.end();
2529 }
2530 
2531 std::pair<TextureCacheBase::TexAddrCache::iterator, TextureCacheBase::TexAddrCache::iterator>
FindOverlappingTextures(u32 addr,u32 size_in_bytes)2532 TextureCacheBase::FindOverlappingTextures(u32 addr, u32 size_in_bytes)
2533 {
2534   // We index by the starting address only, so there is no way to query all textures
2535   // which end after the given addr. But the GC textures have a limited size, so we
2536   // look for all textures which have a start address bigger than addr minus the maximal
2537   // texture size. But this yields false-positives which must be checked later on.
2538 
2539   // 1024 x 1024 texel times 8 nibbles per texel
2540   constexpr u32 max_texture_size = 1024 * 1024 * 4;
2541   u32 lower_addr = addr > max_texture_size ? addr - max_texture_size : 0;
2542   auto begin = textures_by_address.lower_bound(lower_addr);
2543   auto end = textures_by_address.upper_bound(addr + size_in_bytes);
2544 
2545   return std::make_pair(begin, end);
2546 }
2547 
2548 TextureCacheBase::TexAddrCache::iterator
InvalidateTexture(TexAddrCache::iterator iter,bool discard_pending_efb_copy)2549 TextureCacheBase::InvalidateTexture(TexAddrCache::iterator iter, bool discard_pending_efb_copy)
2550 {
2551   if (iter == textures_by_address.end())
2552     return textures_by_address.end();
2553 
2554   TCacheEntry* entry = iter->second;
2555 
2556   if (entry->textures_by_hash_iter != textures_by_hash.end())
2557   {
2558     textures_by_hash.erase(entry->textures_by_hash_iter);
2559     entry->textures_by_hash_iter = textures_by_hash.end();
2560   }
2561 
2562   for (size_t i = 0; i < bound_textures.size(); ++i)
2563   {
2564     // If the entry is currently bound and not invalidated, keep it, but mark it as invalidated.
2565     // This way it can still be used via tmem cache emulation, but nothing else.
2566     // Spyro: A Hero's Tail is known for using such overwritten textures.
2567     if (bound_textures[i] == entry && IsValidBindPoint(static_cast<u32>(i)))
2568     {
2569       bound_textures[i]->tmem_only = true;
2570       return ++iter;
2571     }
2572   }
2573 
2574   // If this is a pending EFB copy, we don't want to flush it here.
2575   // Why? Because let's say a game is rendering a bloom-type effect, using EFB copies to essentially
2576   // downscale the framebuffer. Copy from EFB->Texture, draw texture to EFB, copy EFB->Texture,
2577   // draw, repeat. The second copy will invalidate the first, forcing a flush. Which means we lose
2578   // any benefit of EFB copy batching. So instead, let's just leave the EFB copy pending, but remove
2579   // it from the texture cache. This way we don't use the old VRAM copy. When the EFB copies are
2580   // eventually flushed, they will overwrite each other, and the end result should be the same.
2581   if (entry->pending_efb_copy)
2582   {
2583     if (discard_pending_efb_copy)
2584     {
2585       // If the RAM copy is being completely overwritten by a new EFB copy, we can discard the
2586       // existing pending copy, and not bother waiting for it in the future. This happens in
2587       // Xenoblade's sunset scene, where 35 copies are done per frame, and 25 of them are
2588       // copied to the same address, and can be skipped.
2589       ReleaseEFBCopyStagingTexture(std::move(entry->pending_efb_copy));
2590       auto pending_it = std::find(m_pending_efb_copies.begin(), m_pending_efb_copies.end(), entry);
2591       if (pending_it != m_pending_efb_copies.end())
2592         m_pending_efb_copies.erase(pending_it);
2593     }
2594     else
2595     {
2596       entry->pending_efb_copy_invalidated = true;
2597     }
2598   }
2599 
2600   auto config = entry->texture->GetConfig();
2601   texture_pool.emplace(config,
2602                        TexPoolEntry(std::move(entry->texture), std::move(entry->framebuffer)));
2603 
2604   // Don't delete if there's a pending EFB copy, as we need the TCacheEntry alive.
2605   if (!entry->pending_efb_copy)
2606     delete entry;
2607 
2608   return textures_by_address.erase(iter);
2609 }
2610 
CreateUtilityTextures()2611 bool TextureCacheBase::CreateUtilityTextures()
2612 {
2613   constexpr TextureConfig encoding_texture_config(
2614       EFB_WIDTH * 4, 1024, 1, 1, 1, AbstractTextureFormat::BGRA8, AbstractTextureFlag_RenderTarget);
2615   m_efb_encoding_texture = g_renderer->CreateTexture(encoding_texture_config);
2616   if (!m_efb_encoding_texture)
2617     return false;
2618 
2619   m_efb_encoding_framebuffer = g_renderer->CreateFramebuffer(m_efb_encoding_texture.get(), nullptr);
2620   if (!m_efb_encoding_framebuffer)
2621     return false;
2622 
2623   if (g_ActiveConfig.backend_info.bSupportsGPUTextureDecoding)
2624   {
2625     constexpr TextureConfig decoding_texture_config(
2626         1024, 1024, 1, 1, 1, AbstractTextureFormat::RGBA8, AbstractTextureFlag_ComputeImage);
2627     m_decoding_texture = g_renderer->CreateTexture(decoding_texture_config);
2628     if (!m_decoding_texture)
2629       return false;
2630   }
2631 
2632   return true;
2633 }
2634 
CopyEFBToCacheEntry(TCacheEntry * entry,bool is_depth_copy,const MathUtil::Rectangle<int> & src_rect,bool scale_by_half,bool linear_filter,EFBCopyFormat dst_format,bool is_intensity,float gamma,bool clamp_top,bool clamp_bottom,const EFBCopyFilterCoefficients & filter_coefficients)2635 void TextureCacheBase::CopyEFBToCacheEntry(TCacheEntry* entry, bool is_depth_copy,
2636                                            const MathUtil::Rectangle<int>& src_rect,
2637                                            bool scale_by_half, bool linear_filter,
2638                                            EFBCopyFormat dst_format, bool is_intensity, float gamma,
2639                                            bool clamp_top, bool clamp_bottom,
2640                                            const EFBCopyFilterCoefficients& filter_coefficients)
2641 {
2642   // Flush EFB pokes first, as they're expected to be included.
2643   g_framebuffer_manager->FlushEFBPokes();
2644 
2645   // Get the pipeline which we will be using. If the compilation failed, this will be null.
2646   const AbstractPipeline* copy_pipeline =
2647       g_shader_cache->GetEFBCopyToVRAMPipeline(TextureConversionShaderGen::GetShaderUid(
2648           dst_format, is_depth_copy, is_intensity, scale_by_half,
2649           NeedsCopyFilterInShader(filter_coefficients)));
2650   if (!copy_pipeline)
2651   {
2652     WARN_LOG(VIDEO, "Skipping EFB copy to VRAM due to missing pipeline.");
2653     return;
2654   }
2655 
2656   const auto scaled_src_rect = g_renderer->ConvertEFBRectangle(src_rect);
2657   const auto framebuffer_rect = g_renderer->ConvertFramebufferRectangle(
2658       scaled_src_rect, g_framebuffer_manager->GetEFBFramebuffer());
2659   AbstractTexture* src_texture =
2660       is_depth_copy ? g_framebuffer_manager->ResolveEFBDepthTexture(framebuffer_rect) :
2661                       g_framebuffer_manager->ResolveEFBColorTexture(framebuffer_rect);
2662 
2663   src_texture->FinishedRendering();
2664   g_renderer->BeginUtilityDrawing();
2665 
2666   // Fill uniform buffer.
2667   struct Uniforms
2668   {
2669     float src_left, src_top, src_width, src_height;
2670     float filter_coefficients[3];
2671     float gamma_rcp;
2672     float clamp_top;
2673     float clamp_bottom;
2674     float pixel_height;
2675     u32 padding;
2676   };
2677   Uniforms uniforms;
2678   const float rcp_efb_width = 1.0f / static_cast<float>(g_framebuffer_manager->GetEFBWidth());
2679   const float rcp_efb_height = 1.0f / static_cast<float>(g_framebuffer_manager->GetEFBHeight());
2680   uniforms.src_left = framebuffer_rect.left * rcp_efb_width;
2681   uniforms.src_top = framebuffer_rect.top * rcp_efb_height;
2682   uniforms.src_width = framebuffer_rect.GetWidth() * rcp_efb_width;
2683   uniforms.src_height = framebuffer_rect.GetHeight() * rcp_efb_height;
2684   uniforms.filter_coefficients[0] = filter_coefficients.upper;
2685   uniforms.filter_coefficients[1] = filter_coefficients.middle;
2686   uniforms.filter_coefficients[2] = filter_coefficients.lower;
2687   uniforms.gamma_rcp = 1.0f / gamma;
2688   uniforms.clamp_top = clamp_top ? framebuffer_rect.top * rcp_efb_height : 0.0f;
2689   uniforms.clamp_bottom = clamp_bottom ? framebuffer_rect.bottom * rcp_efb_height : 1.0f;
2690   uniforms.pixel_height = g_ActiveConfig.bCopyEFBScaled ? rcp_efb_height : 1.0f / EFB_HEIGHT;
2691   uniforms.padding = 0;
2692   g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
2693 
2694   // Use the copy pipeline to render the VRAM copy.
2695   g_renderer->SetAndDiscardFramebuffer(entry->framebuffer.get());
2696   g_renderer->SetViewportAndScissor(entry->framebuffer->GetRect());
2697   g_renderer->SetPipeline(copy_pipeline);
2698   g_renderer->SetTexture(0, src_texture);
2699   g_renderer->SetSamplerState(0, linear_filter ? RenderState::GetLinearSamplerState() :
2700                                                  RenderState::GetPointSamplerState());
2701   g_renderer->Draw(0, 3);
2702   g_renderer->EndUtilityDrawing();
2703   entry->texture->FinishedRendering();
2704 }
2705 
CopyEFB(AbstractStagingTexture * dst,const EFBCopyParams & params,u32 native_width,u32 bytes_per_row,u32 num_blocks_y,u32 memory_stride,const MathUtil::Rectangle<int> & src_rect,bool scale_by_half,bool linear_filter,float y_scale,float gamma,bool clamp_top,bool clamp_bottom,const EFBCopyFilterCoefficients & filter_coefficients)2706 void TextureCacheBase::CopyEFB(AbstractStagingTexture* dst, const EFBCopyParams& params,
2707                                u32 native_width, u32 bytes_per_row, u32 num_blocks_y,
2708                                u32 memory_stride, const MathUtil::Rectangle<int>& src_rect,
2709                                bool scale_by_half, bool linear_filter, float y_scale, float gamma,
2710                                bool clamp_top, bool clamp_bottom,
2711                                const EFBCopyFilterCoefficients& filter_coefficients)
2712 {
2713   // Flush EFB pokes first, as they're expected to be included.
2714   g_framebuffer_manager->FlushEFBPokes();
2715 
2716   // Get the pipeline which we will be using. If the compilation failed, this will be null.
2717   const AbstractPipeline* copy_pipeline = g_shader_cache->GetEFBCopyToRAMPipeline(params);
2718   if (!copy_pipeline)
2719   {
2720     WARN_LOG(VIDEO, "Skipping EFB copy to VRAM due to missing pipeline.");
2721     return;
2722   }
2723 
2724   const auto scaled_src_rect = g_renderer->ConvertEFBRectangle(src_rect);
2725   const auto framebuffer_rect = g_renderer->ConvertFramebufferRectangle(
2726       scaled_src_rect, g_framebuffer_manager->GetEFBFramebuffer());
2727   AbstractTexture* src_texture =
2728       params.depth ? g_framebuffer_manager->ResolveEFBDepthTexture(framebuffer_rect) :
2729                      g_framebuffer_manager->ResolveEFBColorTexture(framebuffer_rect);
2730 
2731   src_texture->FinishedRendering();
2732   g_renderer->BeginUtilityDrawing();
2733 
2734   // Fill uniform buffer.
2735   struct Uniforms
2736   {
2737     std::array<s32, 4> position_uniform;
2738     float y_scale;
2739     float gamma_rcp;
2740     float clamp_top;
2741     float clamp_bottom;
2742     float filter_coefficients[3];
2743     u32 padding;
2744   };
2745   Uniforms encoder_params;
2746   const float rcp_efb_height = 1.0f / static_cast<float>(g_framebuffer_manager->GetEFBHeight());
2747   encoder_params.position_uniform[0] = src_rect.left;
2748   encoder_params.position_uniform[1] = src_rect.top;
2749   encoder_params.position_uniform[2] = static_cast<s32>(native_width);
2750   encoder_params.position_uniform[3] = scale_by_half ? 2 : 1;
2751   encoder_params.y_scale = y_scale;
2752   encoder_params.gamma_rcp = 1.0f / gamma;
2753   encoder_params.clamp_top = clamp_top ? framebuffer_rect.top * rcp_efb_height : 0.0f;
2754   encoder_params.clamp_bottom = clamp_bottom ? framebuffer_rect.bottom * rcp_efb_height : 1.0f;
2755   encoder_params.filter_coefficients[0] = filter_coefficients.upper;
2756   encoder_params.filter_coefficients[1] = filter_coefficients.middle;
2757   encoder_params.filter_coefficients[2] = filter_coefficients.lower;
2758   g_vertex_manager->UploadUtilityUniforms(&encoder_params, sizeof(encoder_params));
2759 
2760   // Because the shader uses gl_FragCoord and we read it back, we must render to the lower-left.
2761   const u32 render_width = bytes_per_row / sizeof(u32);
2762   const u32 render_height = num_blocks_y;
2763   const auto encode_rect = MathUtil::Rectangle<int>(0, 0, render_width, render_height);
2764 
2765   // Render to GPU texture, and then copy to CPU-accessible texture.
2766   g_renderer->SetAndDiscardFramebuffer(m_efb_encoding_framebuffer.get());
2767   g_renderer->SetViewportAndScissor(encode_rect);
2768   g_renderer->SetPipeline(copy_pipeline);
2769   g_renderer->SetTexture(0, src_texture);
2770   g_renderer->SetSamplerState(0, linear_filter ? RenderState::GetLinearSamplerState() :
2771                                                  RenderState::GetPointSamplerState());
2772   g_renderer->Draw(0, 3);
2773   dst->CopyFromTexture(m_efb_encoding_texture.get(), encode_rect, 0, 0, encode_rect);
2774   g_renderer->EndUtilityDrawing();
2775 
2776   // Flush if there's sufficient draws between this copy and the last.
2777   g_vertex_manager->OnEFBCopyToRAM();
2778 }
2779 
DecodeTextureOnGPU(TCacheEntry * entry,u32 dst_level,const u8 * data,u32 data_size,TextureFormat format,u32 width,u32 height,u32 aligned_width,u32 aligned_height,u32 row_stride,const u8 * palette,TLUTFormat palette_format)2780 bool TextureCacheBase::DecodeTextureOnGPU(TCacheEntry* entry, u32 dst_level, const u8* data,
2781                                           u32 data_size, TextureFormat format, u32 width,
2782                                           u32 height, u32 aligned_width, u32 aligned_height,
2783                                           u32 row_stride, const u8* palette,
2784                                           TLUTFormat palette_format)
2785 {
2786   const auto* info = TextureConversionShaderTiled::GetDecodingShaderInfo(format);
2787   if (!info)
2788     return false;
2789 
2790   const AbstractShader* shader = g_shader_cache->GetTextureDecodingShader(format, palette_format);
2791   if (!shader)
2792     return false;
2793 
2794   // Copy to GPU-visible buffer, aligned to the data type.
2795   const u32 bytes_per_buffer_elem =
2796       VertexManagerBase::GetTexelBufferElementSize(info->buffer_format);
2797 
2798   // Allocate space in stream buffer, and copy texture + palette across.
2799   u32 src_offset = 0, palette_offset = 0;
2800   if (info->palette_size > 0)
2801   {
2802     if (!g_vertex_manager->UploadTexelBuffer(data, data_size, info->buffer_format, &src_offset,
2803                                              palette, info->palette_size,
2804                                              TEXEL_BUFFER_FORMAT_R16_UINT, &palette_offset))
2805     {
2806       return false;
2807     }
2808   }
2809   else
2810   {
2811     if (!g_vertex_manager->UploadTexelBuffer(data, data_size, info->buffer_format, &src_offset))
2812       return false;
2813   }
2814 
2815   // Set up uniforms.
2816   struct Uniforms
2817   {
2818     u32 dst_width, dst_height;
2819     u32 src_width, src_height;
2820     u32 src_offset, src_row_stride;
2821     u32 palette_offset, unused;
2822   } uniforms = {width,          height,     aligned_width,
2823                 aligned_height, src_offset, row_stride / bytes_per_buffer_elem,
2824                 palette_offset};
2825   g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
2826   g_renderer->SetComputeImageTexture(m_decoding_texture.get(), false, true);
2827 
2828   auto dispatch_groups =
2829       TextureConversionShaderTiled::GetDispatchCount(info, aligned_width, aligned_height);
2830   g_renderer->DispatchComputeShader(shader, dispatch_groups.first, dispatch_groups.second, 1);
2831 
2832   // Copy from decoding texture -> final texture
2833   // This is because we don't want to have to create compute view for every layer
2834   const auto copy_rect = entry->texture->GetConfig().GetMipRect(dst_level);
2835   entry->texture->CopyRectangleFromTexture(m_decoding_texture.get(), copy_rect, 0, 0, copy_rect, 0,
2836                                            dst_level);
2837   entry->texture->FinishedRendering();
2838   return true;
2839 }
2840 
BytesPerRow() const2841 u32 TextureCacheBase::TCacheEntry::BytesPerRow() const
2842 {
2843   const u32 blockW = TexDecoder_GetBlockWidthInTexels(format.texfmt);
2844 
2845   // Round up source height to multiple of block size
2846   const u32 actualWidth = Common::AlignUp(native_width, blockW);
2847 
2848   const u32 numBlocksX = actualWidth / blockW;
2849 
2850   // RGBA takes two cache lines per block; all others take one
2851   const u32 bytes_per_block = format == TextureFormat::RGBA8 ? 64 : 32;
2852 
2853   return numBlocksX * bytes_per_block;
2854 }
2855 
NumBlocksY() const2856 u32 TextureCacheBase::TCacheEntry::NumBlocksY() const
2857 {
2858   u32 blockH = TexDecoder_GetBlockHeightInTexels(format.texfmt);
2859   // Round up source height to multiple of block size
2860   u32 actualHeight = Common::AlignUp(native_height, blockH);
2861 
2862   return actualHeight / blockH;
2863 }
2864 
SetXfbCopy(u32 stride)2865 void TextureCacheBase::TCacheEntry::SetXfbCopy(u32 stride)
2866 {
2867   is_efb_copy = false;
2868   is_xfb_copy = true;
2869   is_xfb_container = false;
2870   memory_stride = stride;
2871 
2872   ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
2873 
2874   size_in_bytes = memory_stride * NumBlocksY();
2875 }
2876 
SetEfbCopy(u32 stride)2877 void TextureCacheBase::TCacheEntry::SetEfbCopy(u32 stride)
2878 {
2879   is_efb_copy = true;
2880   is_xfb_copy = false;
2881   is_xfb_container = false;
2882   memory_stride = stride;
2883 
2884   ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
2885 
2886   size_in_bytes = memory_stride * NumBlocksY();
2887 }
2888 
SetNotCopy()2889 void TextureCacheBase::TCacheEntry::SetNotCopy()
2890 {
2891   is_efb_copy = false;
2892   is_xfb_copy = false;
2893   is_xfb_container = false;
2894 }
2895 
HashSampleSize() const2896 int TextureCacheBase::TCacheEntry::HashSampleSize() const
2897 {
2898   if (should_force_safe_hashing)
2899   {
2900     return 0;
2901   }
2902 
2903   return g_ActiveConfig.iSafeTextureCache_ColorSamples;
2904 }
2905 
CalculateHash() const2906 u64 TextureCacheBase::TCacheEntry::CalculateHash() const
2907 {
2908   u8* ptr = Memory::GetPointer(addr);
2909   if (memory_stride == BytesPerRow())
2910   {
2911     return Common::GetHash64(ptr, size_in_bytes, HashSampleSize());
2912   }
2913   else
2914   {
2915     u32 blocks = NumBlocksY();
2916     u64 temp_hash = size_in_bytes;
2917 
2918     u32 samples_per_row = 0;
2919     if (HashSampleSize() != 0)
2920     {
2921       // Hash at least 4 samples per row to avoid hashing in a bad pattern, like just on the left
2922       // side of the efb copy
2923       samples_per_row = std::max(HashSampleSize() / blocks, 4u);
2924     }
2925 
2926     for (u32 i = 0; i < blocks; i++)
2927     {
2928       // Multiply by a prime number to mix the hash up a bit. This prevents identical blocks from
2929       // canceling each other out
2930       temp_hash = (temp_hash * 397) ^ Common::GetHash64(ptr, BytesPerRow(), samples_per_row);
2931       ptr += memory_stride;
2932     }
2933     return temp_hash;
2934   }
2935 }
2936 
TexPoolEntry(std::unique_ptr<AbstractTexture> tex,std::unique_ptr<AbstractFramebuffer> fb)2937 TextureCacheBase::TexPoolEntry::TexPoolEntry(std::unique_ptr<AbstractTexture> tex,
2938                                              std::unique_ptr<AbstractFramebuffer> fb)
2939     : texture(std::move(tex)), framebuffer(std::move(fb))
2940 {
2941 }
2942