Skip to content

Commit

Permalink
Don't load the shader cache on a separate thread - all it does is alr…
Browse files Browse the repository at this point in the history
…eady async
  • Loading branch information
hrydgard committed Sep 24, 2023
1 parent 1c58617 commit d31ba39
Show file tree
Hide file tree
Showing 7 changed files with 8 additions and 46 deletions.
2 changes: 1 addition & 1 deletion Common/Data/Collections/Hashmaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class DenseHashMap {
}

bool ContainsKey(const Key &key) const {
// Slightly wasteful.
// Slightly wasteful, though compiler might optimize it.
Value value;
return Get(key, &value);
}
Expand Down
21 changes: 2 additions & 19 deletions GPU/Vulkan/GPU_Vulkan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,25 +93,12 @@ GPU_Vulkan::GPU_Vulkan(GraphicsContext *gfxCtx, Draw::DrawContext *draw)
if (discID.size()) {
File::CreateFullPath(GetSysDirectory(DIRECTORY_APP_CACHE));
shaderCachePath_ = GetSysDirectory(DIRECTORY_APP_CACHE) / (discID + ".vkshadercache");
shaderCacheLoaded_ = false;

shaderCacheLoadThread_ = std::thread([&] {
SetCurrentThreadName("VulkanLoadCache");
AndroidJNIThreadContext ctx;
LoadCache(shaderCachePath_);
shaderCacheLoaded_ = true;
});
} else {
shaderCacheLoaded_ = true;
LoadCache(shaderCachePath_);
}
}

bool GPU_Vulkan::IsReady() {
return shaderCacheLoaded_;
}

void GPU_Vulkan::CancelReady() {
pipelineManager_->CancelCache();
return true;
}

void GPU_Vulkan::LoadCache(const Path &filename) {
Expand Down Expand Up @@ -182,10 +169,6 @@ void GPU_Vulkan::SaveCache(const Path &filename) {
}

GPU_Vulkan::~GPU_Vulkan() {
if (shaderCacheLoadThread_.joinable()) {
shaderCacheLoadThread_.join();
}

if (draw_) {
VulkanRenderManager *rm = (VulkanRenderManager *)draw_->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
rm->DrainAndBlockCompileQueue();
Expand Down
4 changes: 0 additions & 4 deletions GPU/Vulkan/GPU_Vulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ class GPU_Vulkan : public GPUCommonHW {
u32 CheckGPUFeatures() const override;

bool IsReady() override;
void CancelReady() override;

// These are where we can reset command buffers etc.
void BeginHostFrame() override;
Expand Down Expand Up @@ -84,7 +83,4 @@ class GPU_Vulkan : public GPUCommonHW {
PipelineManagerVulkan *pipelineManager_;

Path shaderCachePath_;
std::atomic<bool> shaderCacheLoaded_{};

std::thread shaderCacheLoadThread_;
};
8 changes: 1 addition & 7 deletions GPU/Vulkan/PipelineManagerVulkan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -719,8 +719,6 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
VulkanRenderManager *rm = (VulkanRenderManager *)drawContext->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
VulkanQueueRunner *queueRunner = rm->GetQueueRunner();

cancelCache_ = false;

uint32_t size = 0;
if (loadRawPipelineCache) {
NOTICE_LOG(G3D, "WARNING: Using the badly tested raw pipeline cache path!!!!");
Expand Down Expand Up @@ -779,7 +777,7 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
int pipelineCreateFailCount = 0;
int shaderFailCount = 0;
for (uint32_t i = 0; i < size; i++) {
if (failed || cancelCache_) {
if (failed) {
break;
}
StoredVulkanPipelineKey key;
Expand Down Expand Up @@ -824,7 +822,3 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
// We just ignore any failures.
return true;
}

void PipelineManagerVulkan::CancelCache() {
cancelCache_ = true;
}
2 changes: 0 additions & 2 deletions GPU/Vulkan/PipelineManagerVulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,9 @@ class PipelineManagerVulkan {
// Saves data for faster creation next time.
void SavePipelineCache(FILE *file, bool saveRawPipelineCache, ShaderManagerVulkan *shaderManager, Draw::DrawContext *drawContext);
bool LoadPipelineCache(FILE *file, bool loadRawPipelineCache, ShaderManagerVulkan *shaderManager, Draw::DrawContext *drawContext, VkPipelineLayout layout, int multiSampleLevel);
void CancelCache();

private:
DenseHashMap<VulkanPipelineKey, VulkanPipeline *> pipelines_;
VkPipelineCache pipelineCache_ = VK_NULL_HANDLE;
VulkanContext *vulkan_;
bool cancelCache_ = false;
};
10 changes: 1 addition & 9 deletions GPU/Vulkan/ShaderManagerVulkan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,6 @@ void ShaderManagerVulkan::DeviceRestore(Draw::DrawContext *draw) {
}

void ShaderManagerVulkan::Clear() {
std::lock_guard<std::mutex> guard(cacheLock_);

fsCache_.Iterate([&](const FShaderID &key, VulkanFragmentShader *shader) {
delete shader;
});
Expand Down Expand Up @@ -335,8 +333,6 @@ void ShaderManagerVulkan::GetShaders(int prim, VertexDecoder *decoder, VulkanVer
return;
}

std::lock_guard<std::mutex> guard(cacheLock_);

VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
VulkanVertexShader *vs = nullptr;
if (!vsCache_.Get(VSID, &vs)) {
Expand Down Expand Up @@ -399,7 +395,6 @@ void ShaderManagerVulkan::GetShaders(int prim, VertexDecoder *decoder, VulkanVer
}

std::vector<std::string> ShaderManagerVulkan::DebugGetShaderIDs(DebugShaderType type) {
std::lock_guard<std::mutex> guard(cacheLock_);
std::vector<std::string> ids;
switch (type) {
case SHADER_TYPE_VERTEX:
Expand Down Expand Up @@ -586,8 +581,7 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
continue;
}
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
// Don't add the new shader if already compiled (can happen since this is a background thread).
std::lock_guard<std::mutex> guard(cacheLock_);
// Don't add the new shader if already compiled - though this should no longer happen.
if (!vsCache_.ContainsKey(id)) {
VulkanVertexShader *vs = new VulkanVertexShader(vulkan, id, flags, codeBuffer_, useHWTransform);
vsCache_.Insert(id, vs);
Expand All @@ -611,7 +605,6 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
continue;
}
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
std::lock_guard<std::mutex> guard(cacheLock_);
if (!fsCache_.ContainsKey(id)) {
VulkanFragmentShader *fs = new VulkanFragmentShader(vulkan, id, flags, codeBuffer_);
fsCache_.Insert(id, fs);
Expand All @@ -634,7 +627,6 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
continue;
}
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
std::lock_guard<std::mutex> guard(cacheLock_);
if (!gsCache_.ContainsKey(id)) {
VulkanGeometryShader *gs = new VulkanGeometryShader(vulkan, id, codeBuffer_);
gsCache_.Insert(id, gs);
Expand Down
7 changes: 3 additions & 4 deletions GPU/Vulkan/ShaderManagerVulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ class ShaderManagerVulkan : public ShaderManagerCommon {
int GetNumGeometryShaders() const { return (int)gsCache_.size(); }

// Used for saving/loading the cache. Don't need to be particularly fast.
VulkanVertexShader *GetVertexShaderFromID(VShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return vsCache_.GetOrNull(id); }
VulkanFragmentShader *GetFragmentShaderFromID(FShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return fsCache_.GetOrNull(id); }
VulkanGeometryShader *GetGeometryShaderFromID(GShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return gsCache_.GetOrNull(id); }
VulkanVertexShader *GetVertexShaderFromID(VShaderID id) { return vsCache_.GetOrNull(id); }
VulkanFragmentShader *GetFragmentShaderFromID(FShaderID id) { return fsCache_.GetOrNull(id); }
VulkanGeometryShader *GetGeometryShaderFromID(GShaderID id) { return gsCache_.GetOrNull(id); }

VulkanVertexShader *GetVertexShaderFromModule(VkShaderModule module);
VulkanFragmentShader *GetFragmentShaderFromModule(VkShaderModule module);
Expand Down Expand Up @@ -170,7 +170,6 @@ class ShaderManagerVulkan : public ShaderManagerCommon {
GSCache gsCache_;

char *codeBuffer_;
std::mutex cacheLock_;

uint64_t uboAlignment_;
// Uniform block scratchpad. These (the relevant ones) are copied to the current pushbuffer at draw time.
Expand Down

0 comments on commit d31ba39

Please sign in to comment.