|
|
@@ -1,7 +1,7 @@
|
|
|
// Copyright (c) 2017 The Khronos Group Inc.
|
|
|
// Copyright (c) 2017 Valve Corporation
|
|
|
// Copyright (c) 2017 LunarG Inc.
|
|
|
-// Copyright (c) 2018 Google LLC
|
|
|
+// Copyright (c) 2018-2021 Google LLC
|
|
|
//
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
@@ -23,6 +23,7 @@
|
|
|
#include "source/cfa.h"
|
|
|
#include "source/latest_version_glsl_std_450_header.h"
|
|
|
#include "source/opt/eliminate_dead_functions_util.h"
|
|
|
+#include "source/opt/ir_builder.h"
|
|
|
#include "source/opt/iterator.h"
|
|
|
#include "source/opt/reflect.h"
|
|
|
#include "source/spirv_constant.h"
|
|
|
@@ -38,6 +39,7 @@ const uint32_t kSelectionMergeMergeBlockIdInIdx = 0;
|
|
|
const uint32_t kLoopMergeContinueBlockIdInIdx = 1;
|
|
|
const uint32_t kCopyMemoryTargetAddrInIdx = 0;
|
|
|
const uint32_t kCopyMemorySourceAddrInIdx = 1;
|
|
|
+const uint32_t kLoadSourceAddrInIdx = 0;
|
|
|
const uint32_t kDebugDeclareOperandVariableIndex = 5;
|
|
|
const uint32_t kGlobalVariableVariableIndex = 12;
|
|
|
|
|
|
@@ -95,16 +97,21 @@ bool AggressiveDCEPass::IsVarOfStorage(uint32_t varId, uint32_t storageClass) {
|
|
|
storageClass;
|
|
|
}
|
|
|
|
|
|
-bool AggressiveDCEPass::IsLocalVar(uint32_t varId) {
|
|
|
+bool AggressiveDCEPass::IsLocalVar(uint32_t varId, Function* func) {
|
|
|
if (IsVarOfStorage(varId, SpvStorageClassFunction)) {
|
|
|
return true;
|
|
|
}
|
|
|
- if (!private_like_local_) {
|
|
|
+
|
|
|
+ if (!IsVarOfStorage(varId, SpvStorageClassPrivate) &&
|
|
|
+ !IsVarOfStorage(varId, SpvStorageClassWorkgroup)) {
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- return IsVarOfStorage(varId, SpvStorageClassPrivate) ||
|
|
|
- IsVarOfStorage(varId, SpvStorageClassWorkgroup);
|
|
|
+ // For a variable in the Private or WorkGroup storage class, the variable will
|
|
|
+ // get a new instance for every call to an entry point. If the entry point
|
|
|
+ // does not have a call, then no other function can read or write to that
|
|
|
+ // instance of the variable.
|
|
|
+ return IsEntryPointWithNoCalls(func);
|
|
|
}
|
|
|
|
|
|
void AggressiveDCEPass::AddStores(Function* func, uint32_t ptrId) {
|
|
|
@@ -160,14 +167,6 @@ bool AggressiveDCEPass::AllExtensionsSupported() const {
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-bool AggressiveDCEPass::IsDead(Instruction* inst) {
|
|
|
- if (IsLive(inst)) return false;
|
|
|
- if ((inst->IsBranch() || inst->opcode() == SpvOpUnreachable) &&
|
|
|
- context()->get_instr_block(inst)->GetMergeInst() == nullptr)
|
|
|
- return false;
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
bool AggressiveDCEPass::IsTargetDead(Instruction* inst) {
|
|
|
const uint32_t tId = inst->GetSingleWordInOperand(0);
|
|
|
Instruction* tInst = get_def_use_mgr()->GetDef(tId);
|
|
|
@@ -184,12 +183,12 @@ bool AggressiveDCEPass::IsTargetDead(Instruction* inst) {
|
|
|
});
|
|
|
return dead;
|
|
|
}
|
|
|
- return IsDead(tInst);
|
|
|
+ return !IsLive(tInst);
|
|
|
}
|
|
|
|
|
|
void AggressiveDCEPass::ProcessLoad(Function* func, uint32_t varId) {
|
|
|
// Only process locals
|
|
|
- if (!IsLocalVar(varId)) return;
|
|
|
+ if (!IsLocalVar(varId, func)) return;
|
|
|
// Return if already processed
|
|
|
if (live_local_vars_.find(varId) != live_local_vars_.end()) return;
|
|
|
// Mark all stores to varId as live
|
|
|
@@ -265,170 +264,36 @@ void AggressiveDCEPass::AddBreaksAndContinuesToWorklist(
|
|
|
}
|
|
|
|
|
|
bool AggressiveDCEPass::AggressiveDCE(Function* func) {
|
|
|
- // Mark function parameters as live.
|
|
|
- AddToWorklist(&func->DefInst());
|
|
|
- MarkFunctionParameterAsLive(func);
|
|
|
-
|
|
|
- std::list<BasicBlock*> structuredOrder;
|
|
|
- cfg()->ComputeStructuredOrder(func, &*func->begin(), &structuredOrder);
|
|
|
- bool modified = false;
|
|
|
+ std::list<BasicBlock*> structured_order;
|
|
|
+ cfg()->ComputeStructuredOrder(func, &*func->begin(), &structured_order);
|
|
|
live_local_vars_.clear();
|
|
|
- InitializeWorkList(func, structuredOrder);
|
|
|
-
|
|
|
- // Perform closure on live instruction set.
|
|
|
- while (!worklist_.empty()) {
|
|
|
- Instruction* liveInst = worklist_.front();
|
|
|
- // Add all operand instructions of Debug[No]Lines
|
|
|
- for (auto& lineInst : liveInst->dbg_line_insts()) {
|
|
|
- if (lineInst.IsDebugLineInst()) {
|
|
|
- lineInst.ForEachInId([this](const uint32_t* iid) {
|
|
|
- AddToWorklist(get_def_use_mgr()->GetDef(*iid));
|
|
|
- });
|
|
|
- }
|
|
|
- }
|
|
|
- // Add all operand instructions if not already live
|
|
|
- liveInst->ForEachInId([&liveInst, this](const uint32_t* iid) {
|
|
|
- Instruction* inInst = get_def_use_mgr()->GetDef(*iid);
|
|
|
- // Do not add label if an operand of a branch. This is not needed
|
|
|
- // as part of live code discovery and can create false live code,
|
|
|
- // for example, the branch to a header of a loop.
|
|
|
- if (inInst->opcode() == SpvOpLabel && liveInst->IsBranch()) return;
|
|
|
- AddToWorklist(inInst);
|
|
|
- });
|
|
|
- if (liveInst->type_id() != 0) {
|
|
|
- AddToWorklist(get_def_use_mgr()->GetDef(liveInst->type_id()));
|
|
|
- }
|
|
|
- BasicBlock* basic_block = context()->get_instr_block(liveInst);
|
|
|
- if (basic_block != nullptr) {
|
|
|
- AddToWorklist(basic_block->GetLabelInst());
|
|
|
- }
|
|
|
-
|
|
|
- // If in a structured if or loop construct, add the controlling
|
|
|
- // conditional branch and its merge.
|
|
|
- BasicBlock* blk = context()->get_instr_block(liveInst);
|
|
|
- Instruction* branchInst = GetHeaderBranch(blk);
|
|
|
- if (branchInst != nullptr) {
|
|
|
- AddToWorklist(branchInst);
|
|
|
- Instruction* mergeInst = GetMergeInstruction(branchInst);
|
|
|
- AddToWorklist(mergeInst);
|
|
|
- }
|
|
|
- // If the block is a header, add the next outermost controlling
|
|
|
- // conditional branch and its merge.
|
|
|
- Instruction* nextBranchInst = GetBranchForNextHeader(blk);
|
|
|
- if (nextBranchInst != nullptr) {
|
|
|
- AddToWorklist(nextBranchInst);
|
|
|
- Instruction* mergeInst = GetMergeInstruction(nextBranchInst);
|
|
|
- AddToWorklist(mergeInst);
|
|
|
- }
|
|
|
- // If local load, add all variable's stores if variable not already live
|
|
|
- if (liveInst->opcode() == SpvOpLoad || liveInst->IsAtomicWithLoad()) {
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(liveInst, &varId);
|
|
|
- if (varId != 0) {
|
|
|
- ProcessLoad(func, varId);
|
|
|
- }
|
|
|
- // Process memory copies like loads
|
|
|
- } else if (liveInst->opcode() == SpvOpCopyMemory ||
|
|
|
- liveInst->opcode() == SpvOpCopyMemorySized) {
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(liveInst->GetSingleWordInOperand(kCopyMemorySourceAddrInIdx),
|
|
|
- &varId);
|
|
|
- if (varId != 0) {
|
|
|
- ProcessLoad(func, varId);
|
|
|
- }
|
|
|
- // If DebugDeclare, process as load of variable
|
|
|
- } else if (liveInst->GetCommonDebugOpcode() ==
|
|
|
- CommonDebugInfoDebugDeclare) {
|
|
|
- uint32_t varId =
|
|
|
- liveInst->GetSingleWordOperand(kDebugDeclareOperandVariableIndex);
|
|
|
- ProcessLoad(func, varId);
|
|
|
- // If DebugValue with Deref, process as load of variable
|
|
|
- } else if (liveInst->GetCommonDebugOpcode() == CommonDebugInfoDebugValue) {
|
|
|
- uint32_t varId = context()
|
|
|
- ->get_debug_info_mgr()
|
|
|
- ->GetVariableIdOfDebugValueUsedForDeclare(liveInst);
|
|
|
- if (varId != 0) ProcessLoad(func, varId);
|
|
|
- // If merge, add other branches that are part of its control structure
|
|
|
- } else if (liveInst->opcode() == SpvOpLoopMerge ||
|
|
|
- liveInst->opcode() == SpvOpSelectionMerge) {
|
|
|
- AddBreaksAndContinuesToWorklist(liveInst);
|
|
|
- // If function call, treat as if it loads from all pointer arguments
|
|
|
- } else if (liveInst->opcode() == SpvOpFunctionCall) {
|
|
|
- liveInst->ForEachInId([this, func](const uint32_t* iid) {
|
|
|
- // Skip non-ptr args
|
|
|
- if (!IsPtr(*iid)) return;
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(*iid, &varId);
|
|
|
- ProcessLoad(func, varId);
|
|
|
- });
|
|
|
- // If function parameter, treat as if it's result id is loaded from
|
|
|
- } else if (liveInst->opcode() == SpvOpFunctionParameter) {
|
|
|
- ProcessLoad(func, liveInst->result_id());
|
|
|
- // We treat an OpImageTexelPointer as a load of the pointer, and
|
|
|
- // that value is manipulated to get the result.
|
|
|
- } else if (liveInst->opcode() == SpvOpImageTexelPointer) {
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(liveInst, &varId);
|
|
|
- if (varId != 0) {
|
|
|
- ProcessLoad(func, varId);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // Add OpDecorateId instructions that apply to this instruction to the work
|
|
|
- // list. We use the decoration manager to look through the group
|
|
|
- // decorations to get to the OpDecorate* instructions themselves.
|
|
|
- auto decorations =
|
|
|
- get_decoration_mgr()->GetDecorationsFor(liveInst->result_id(), false);
|
|
|
- for (Instruction* dec : decorations) {
|
|
|
- // We only care about OpDecorateId instructions because the are the only
|
|
|
- // decorations that will reference an id that will have to be kept live
|
|
|
- // because of that use.
|
|
|
- if (dec->opcode() != SpvOpDecorateId) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (dec->GetSingleWordInOperand(1) ==
|
|
|
- SpvDecorationHlslCounterBufferGOOGLE) {
|
|
|
- // These decorations should not force the use id to be live. It will be
|
|
|
- // removed if either the target or the in operand are dead.
|
|
|
- continue;
|
|
|
- }
|
|
|
- AddToWorklist(dec);
|
|
|
- }
|
|
|
-
|
|
|
- // Add DebugScope and DebugInlinedAt for |liveInst| to the work list.
|
|
|
- if (liveInst->GetDebugScope().GetLexicalScope() != kNoDebugScope) {
|
|
|
- auto* scope = get_def_use_mgr()->GetDef(
|
|
|
- liveInst->GetDebugScope().GetLexicalScope());
|
|
|
- AddToWorklist(scope);
|
|
|
- }
|
|
|
- if (liveInst->GetDebugInlinedAt() != kNoInlinedAt) {
|
|
|
- auto* inlined_at =
|
|
|
- get_def_use_mgr()->GetDef(liveInst->GetDebugInlinedAt());
|
|
|
- AddToWorklist(inlined_at);
|
|
|
- }
|
|
|
- worklist_.pop();
|
|
|
- }
|
|
|
+ InitializeWorkList(func, structured_order);
|
|
|
+ ProcessWorkList(func);
|
|
|
+ return KillDeadInstructions(func, structured_order);
|
|
|
+}
|
|
|
|
|
|
- // Kill dead instructions and remember dead blocks
|
|
|
- for (auto bi = structuredOrder.begin(); bi != structuredOrder.end();) {
|
|
|
- uint32_t mergeBlockId = 0;
|
|
|
- (*bi)->ForEachInst([this, &modified, &mergeBlockId](Instruction* inst) {
|
|
|
- if (!IsDead(inst)) return;
|
|
|
+bool AggressiveDCEPass::KillDeadInstructions(
|
|
|
+ const Function* func, std::list<BasicBlock*>& structured_order) {
|
|
|
+ bool modified = false;
|
|
|
+ for (auto bi = structured_order.begin(); bi != structured_order.end();) {
|
|
|
+ uint32_t merge_block_id = 0;
|
|
|
+ (*bi)->ForEachInst([this, &modified, &merge_block_id](Instruction* inst) {
|
|
|
+ if (IsLive(inst)) return;
|
|
|
if (inst->opcode() == SpvOpLabel) return;
|
|
|
// If dead instruction is selection merge, remember merge block
|
|
|
// for new branch at end of block
|
|
|
if (inst->opcode() == SpvOpSelectionMerge ||
|
|
|
inst->opcode() == SpvOpLoopMerge)
|
|
|
- mergeBlockId = inst->GetSingleWordInOperand(0);
|
|
|
+ merge_block_id = inst->GetSingleWordInOperand(0);
|
|
|
to_kill_.push_back(inst);
|
|
|
modified = true;
|
|
|
});
|
|
|
// If a structured if or loop was deleted, add a branch to its merge
|
|
|
// block, and traverse to the merge block and continue processing there.
|
|
|
// We know the block still exists because the label is not deleted.
|
|
|
- if (mergeBlockId != 0) {
|
|
|
- AddBranch(mergeBlockId, *bi);
|
|
|
- for (++bi; (*bi)->id() != mergeBlockId; ++bi) {
|
|
|
+ if (merge_block_id != 0) {
|
|
|
+ AddBranch(merge_block_id, *bi);
|
|
|
+ for (++bi; (*bi)->id() != merge_block_id; ++bi) {
|
|
|
}
|
|
|
|
|
|
auto merge_terminator = (*bi)->terminator();
|
|
|
@@ -451,94 +316,250 @@ bool AggressiveDCEPass::AggressiveDCE(Function* func) {
|
|
|
live_insts_.Set(merge_terminator->unique_id());
|
|
|
}
|
|
|
} else {
|
|
|
+ Instruction* inst = (*bi)->terminator();
|
|
|
+ if (!IsLive(inst)) {
|
|
|
+ // If the terminator is not live, this block has no live instructions,
|
|
|
+ // and it will be unreachable.
|
|
|
+ AddUnreachable(*bi);
|
|
|
+ }
|
|
|
++bi;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
return modified;
|
|
|
}
|
|
|
|
|
|
+void AggressiveDCEPass::ProcessWorkList(Function* func) {
|
|
|
+ while (!worklist_.empty()) {
|
|
|
+ Instruction* live_inst = worklist_.front();
|
|
|
+ worklist_.pop();
|
|
|
+ AddOperandsToWorkList(live_inst);
|
|
|
+ MarkBlockAsLive(live_inst);
|
|
|
+ MarkLoadedVariablesAsLive(func, live_inst);
|
|
|
+ AddDecorationsToWorkList(live_inst);
|
|
|
+ AddDebugInstructionsToWorkList(live_inst);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::AddDebugInstructionsToWorkList(
|
|
|
+ const Instruction* inst) {
|
|
|
+ for (auto& line_inst : inst->dbg_line_insts()) {
|
|
|
+ if (line_inst.IsDebugLineInst()) {
|
|
|
+ AddOperandsToWorkList(&line_inst);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (inst->GetDebugScope().GetLexicalScope() != kNoDebugScope) {
|
|
|
+ auto* scope =
|
|
|
+ get_def_use_mgr()->GetDef(inst->GetDebugScope().GetLexicalScope());
|
|
|
+ AddToWorklist(scope);
|
|
|
+ }
|
|
|
+ if (inst->GetDebugInlinedAt() != kNoInlinedAt) {
|
|
|
+ auto* inlined_at = get_def_use_mgr()->GetDef(inst->GetDebugInlinedAt());
|
|
|
+ AddToWorklist(inlined_at);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::AddDecorationsToWorkList(const Instruction* inst) {
|
|
|
+ // Add OpDecorateId instructions that apply to this instruction to the work
|
|
|
+ // list. We use the decoration manager to look through the group
|
|
|
+ // decorations to get to the OpDecorate* instructions themselves.
|
|
|
+ auto decorations =
|
|
|
+ get_decoration_mgr()->GetDecorationsFor(inst->result_id(), false);
|
|
|
+ for (Instruction* dec : decorations) {
|
|
|
+ // We only care about OpDecorateId instructions because the are the only
|
|
|
+ // decorations that will reference an id that will have to be kept live
|
|
|
+ // because of that use.
|
|
|
+ if (dec->opcode() != SpvOpDecorateId) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (dec->GetSingleWordInOperand(1) ==
|
|
|
+ SpvDecorationHlslCounterBufferGOOGLE) {
|
|
|
+ // These decorations should not force the use id to be live. It will be
|
|
|
+ // removed if either the target or the in operand are dead.
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ AddToWorklist(dec);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::MarkLoadedVariablesAsLive(Function* func,
|
|
|
+ Instruction* inst) {
|
|
|
+ std::vector<uint32_t> live_variables = GetLoadedVariables(inst);
|
|
|
+ for (uint32_t var_id : live_variables) {
|
|
|
+ ProcessLoad(func, var_id);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+std::vector<uint32_t> AggressiveDCEPass::GetLoadedVariables(Instruction* inst) {
|
|
|
+ if (inst->opcode() == SpvOpFunctionCall) {
|
|
|
+ return GetLoadedVariablesFromFunctionCall(inst);
|
|
|
+ }
|
|
|
+ uint32_t var_id = GetLoadedVariableFromNonFunctionCalls(inst);
|
|
|
+ if (var_id == 0) {
|
|
|
+ return {};
|
|
|
+ }
|
|
|
+ return {var_id};
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t AggressiveDCEPass::GetLoadedVariableFromNonFunctionCalls(
|
|
|
+ Instruction* inst) {
|
|
|
+ std::vector<uint32_t> live_variables;
|
|
|
+ if (inst->IsAtomicWithLoad()) {
|
|
|
+ return GetVariableId(inst->GetSingleWordInOperand(kLoadSourceAddrInIdx));
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (inst->opcode()) {
|
|
|
+ case SpvOpLoad:
|
|
|
+ case SpvOpImageTexelPointer:
|
|
|
+ return GetVariableId(inst->GetSingleWordInOperand(kLoadSourceAddrInIdx));
|
|
|
+ case SpvOpCopyMemory:
|
|
|
+ case SpvOpCopyMemorySized:
|
|
|
+ return GetVariableId(
|
|
|
+ inst->GetSingleWordInOperand(kCopyMemorySourceAddrInIdx));
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (inst->GetCommonDebugOpcode()) {
|
|
|
+ case CommonDebugInfoDebugDeclare:
|
|
|
+ return inst->GetSingleWordOperand(kDebugDeclareOperandVariableIndex);
|
|
|
+ case CommonDebugInfoDebugValue: {
|
|
|
+ analysis::DebugInfoManager* debug_info_mgr =
|
|
|
+ context()->get_debug_info_mgr();
|
|
|
+ return debug_info_mgr->GetVariableIdOfDebugValueUsedForDeclare(inst);
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+std::vector<uint32_t> AggressiveDCEPass::GetLoadedVariablesFromFunctionCall(
|
|
|
+ const Instruction* inst) {
|
|
|
+ assert(inst->opcode() == SpvOpFunctionCall);
|
|
|
+ std::vector<uint32_t> live_variables;
|
|
|
+ inst->ForEachInId([this, &live_variables](const uint32_t* operand_id) {
|
|
|
+ if (!IsPtr(*operand_id)) return;
|
|
|
+ uint32_t var_id = GetVariableId(*operand_id);
|
|
|
+ live_variables.push_back(var_id);
|
|
|
+ });
|
|
|
+ return live_variables;
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t AggressiveDCEPass::GetVariableId(uint32_t ptr_id) {
|
|
|
+ assert(IsPtr(ptr_id) &&
|
|
|
+ "Cannot get the variable when input is not a pointer.");
|
|
|
+ uint32_t varId = 0;
|
|
|
+ (void)GetPtr(ptr_id, &varId);
|
|
|
+ return varId;
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::MarkBlockAsLive(Instruction* inst) {
|
|
|
+ BasicBlock* basic_block = context()->get_instr_block(inst);
|
|
|
+ if (basic_block == nullptr) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ // If we intend to keep this instruction, we need the block label and
|
|
|
+ // block terminator to have a valid block for the instruction.
|
|
|
+ AddToWorklist(basic_block->GetLabelInst());
|
|
|
+
|
|
|
+ // We need to mark the successors blocks that follow as live. If this is
|
|
|
+ // header of the merge construct, the construct may be folded, but we will
|
|
|
+ // definitely need the merge label. If it is not a construct, the terminator
|
|
|
+ // must be live, and the successor blocks will be marked as live when
|
|
|
+ // processing the terminator.
|
|
|
+ uint32_t merge_id = basic_block->MergeBlockIdIfAny();
|
|
|
+ if (merge_id == 0) {
|
|
|
+ AddToWorklist(basic_block->terminator());
|
|
|
+ } else {
|
|
|
+ AddToWorklist(context()->get_def_use_mgr()->GetDef(merge_id));
|
|
|
+ }
|
|
|
+
|
|
|
+ // Mark the structured control flow constructs that contains this block as
|
|
|
+ // live. If |inst| is an instruction in the loop header, then it is part of
|
|
|
+ // the loop, so the loop construct must be live. We exclude the label because
|
|
|
+ // it does not matter how many times it is executed. This could be extended
|
|
|
+ // to more instructions, but we will need it for now.
|
|
|
+ if (inst->opcode() != SpvOpLabel)
|
|
|
+ MarkLoopConstructAsLiveIfLoopHeader(basic_block);
|
|
|
+
|
|
|
+ Instruction* next_branch_inst = GetBranchForNextHeader(basic_block);
|
|
|
+ if (next_branch_inst != nullptr) {
|
|
|
+ AddToWorklist(next_branch_inst);
|
|
|
+ Instruction* mergeInst = GetMergeInstruction(next_branch_inst);
|
|
|
+ AddToWorklist(mergeInst);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (inst->opcode() == SpvOpLoopMerge ||
|
|
|
+ inst->opcode() == SpvOpSelectionMerge) {
|
|
|
+ AddBreaksAndContinuesToWorklist(inst);
|
|
|
+ }
|
|
|
+}
|
|
|
+void AggressiveDCEPass::MarkLoopConstructAsLiveIfLoopHeader(
|
|
|
+ BasicBlock* basic_block) {
|
|
|
+ // If this is the header for a loop, then loop structure needs to keep as well
|
|
|
+ // because the loop header is also part of the loop.
|
|
|
+ Instruction* merge_inst = basic_block->GetLoopMergeInst();
|
|
|
+ if (merge_inst != nullptr) {
|
|
|
+ AddToWorklist(basic_block->terminator());
|
|
|
+ AddToWorklist(merge_inst);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::AddOperandsToWorkList(const Instruction* inst) {
|
|
|
+ inst->ForEachInId([this](const uint32_t* iid) {
|
|
|
+ Instruction* inInst = get_def_use_mgr()->GetDef(*iid);
|
|
|
+ AddToWorklist(inInst);
|
|
|
+ });
|
|
|
+ if (inst->type_id() != 0) {
|
|
|
+ AddToWorklist(get_def_use_mgr()->GetDef(inst->type_id()));
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void AggressiveDCEPass::InitializeWorkList(
|
|
|
- Function* func, std::list<BasicBlock*>& structuredOrder) {
|
|
|
+ Function* func, std::list<BasicBlock*>& structured_order) {
|
|
|
+ AddToWorklist(&func->DefInst());
|
|
|
+ MarkFunctionParameterAsLive(func);
|
|
|
+ MarkFirstBlockAsLive(func);
|
|
|
+
|
|
|
// Add instructions with external side effects to the worklist. Also add
|
|
|
// branches that are not attached to a structured construct.
|
|
|
// TODO(s-perron): The handling of branch seems to be adhoc. This needs to be
|
|
|
// cleaned up.
|
|
|
- bool call_in_func = false;
|
|
|
- bool func_is_entry_point = false;
|
|
|
-
|
|
|
- // TODO(s-perron): We need to check if this is actually needed. In cases
|
|
|
- // where private variable can be treated as if they are function scope, the
|
|
|
- // private-to-local pass should be able to change them to function scope.
|
|
|
- std::vector<Instruction*> private_stores;
|
|
|
-
|
|
|
- for (auto bi = structuredOrder.begin(); bi != structuredOrder.end(); ++bi) {
|
|
|
- for (auto ii = (*bi)->begin(); ii != (*bi)->end(); ++ii) {
|
|
|
+ for (auto& bi : structured_order) {
|
|
|
+ for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
|
|
|
SpvOp op = ii->opcode();
|
|
|
+ if (ii->IsBranch()) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
switch (op) {
|
|
|
case SpvOpStore: {
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(&*ii, &varId);
|
|
|
- // Mark stores as live if their variable is not function scope
|
|
|
- // and is not private scope. Remember private stores for possible
|
|
|
- // later inclusion. We cannot call IsLocalVar at this point because
|
|
|
- // private_like_local_ has not been set yet.
|
|
|
- if (IsVarOfStorage(varId, SpvStorageClassPrivate) ||
|
|
|
- IsVarOfStorage(varId, SpvStorageClassWorkgroup))
|
|
|
- private_stores.push_back(&*ii);
|
|
|
- else if (!IsVarOfStorage(varId, SpvStorageClassFunction))
|
|
|
- AddToWorklist(&*ii);
|
|
|
+ uint32_t var_id = 0;
|
|
|
+ (void)GetPtr(&*ii, &var_id);
|
|
|
+ if (!IsLocalVar(var_id, func)) AddToWorklist(&*ii);
|
|
|
} break;
|
|
|
case SpvOpCopyMemory:
|
|
|
case SpvOpCopyMemorySized: {
|
|
|
- uint32_t varId;
|
|
|
- (void)GetPtr(ii->GetSingleWordInOperand(kCopyMemoryTargetAddrInIdx),
|
|
|
- &varId);
|
|
|
- if (IsVarOfStorage(varId, SpvStorageClassPrivate) ||
|
|
|
- IsVarOfStorage(varId, SpvStorageClassWorkgroup))
|
|
|
- private_stores.push_back(&*ii);
|
|
|
- else if (!IsVarOfStorage(varId, SpvStorageClassFunction))
|
|
|
- AddToWorklist(&*ii);
|
|
|
- } break;
|
|
|
- case SpvOpSwitch:
|
|
|
- case SpvOpBranch:
|
|
|
- case SpvOpBranchConditional:
|
|
|
- case SpvOpUnreachable: {
|
|
|
- bool branchRelatedToConstruct =
|
|
|
- (GetMergeInstruction(&*ii) == nullptr &&
|
|
|
- GetHeaderBlock(context()->get_instr_block(&*ii)) == nullptr);
|
|
|
- if (branchRelatedToConstruct) {
|
|
|
- AddToWorklist(&*ii);
|
|
|
- }
|
|
|
+ uint32_t var_id = 0;
|
|
|
+ uint32_t target_addr_id =
|
|
|
+ ii->GetSingleWordInOperand(kCopyMemoryTargetAddrInIdx);
|
|
|
+ (void)GetPtr(target_addr_id, &var_id);
|
|
|
+ if (!IsLocalVar(var_id, func)) AddToWorklist(&*ii);
|
|
|
} break;
|
|
|
case SpvOpLoopMerge:
|
|
|
case SpvOpSelectionMerge:
|
|
|
+ case SpvOpUnreachable:
|
|
|
break;
|
|
|
default: {
|
|
|
// Function calls, atomics, function params, function returns, etc.
|
|
|
if (!ii->IsOpcodeSafeToDelete()) {
|
|
|
AddToWorklist(&*ii);
|
|
|
}
|
|
|
- // Remember function calls
|
|
|
- if (op == SpvOpFunctionCall) call_in_func = true;
|
|
|
} break;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- // See if current function is an entry point
|
|
|
- for (auto& ei : get_module()->entry_points()) {
|
|
|
- if (ei.GetSingleWordInOperand(kEntryPointFunctionIdInIdx) ==
|
|
|
- func->result_id()) {
|
|
|
- func_is_entry_point = true;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- // If the current function is an entry point and has no function calls,
|
|
|
- // we can optimize private variables as locals
|
|
|
- private_like_local_ = func_is_entry_point && !call_in_func;
|
|
|
- // If privates are not like local, add their stores to worklist
|
|
|
- if (!private_like_local_)
|
|
|
- for (auto& ps : private_stores) AddToWorklist(ps);
|
|
|
}
|
|
|
|
|
|
void AggressiveDCEPass::InitializeModuleScopeLiveInstructions() {
|
|
|
@@ -596,16 +617,25 @@ void AggressiveDCEPass::InitializeModuleScopeLiveInstructions() {
|
|
|
}
|
|
|
|
|
|
// For each DebugInfo GlobalVariable keep all operands except the Variable.
|
|
|
- // Later, if the variable is dead, we will set the operand to DebugInfoNone.
|
|
|
+ // Later, if the variable is killed with KillInst(), we will set the operand
|
|
|
+ // to DebugInfoNone. Create and save DebugInfoNone now for this possible
|
|
|
+ // later use. This is slightly unoptimal, but it avoids generating it during
|
|
|
+ // instruction killing when the module is not consistent.
|
|
|
+ bool debug_global_seen = false;
|
|
|
for (auto& dbg : get_module()->ext_inst_debuginfo()) {
|
|
|
if (dbg.GetCommonDebugOpcode() != CommonDebugInfoDebugGlobalVariable)
|
|
|
continue;
|
|
|
+ debug_global_seen = true;
|
|
|
dbg.ForEachInId([this](const uint32_t* iid) {
|
|
|
- Instruction* inInst = get_def_use_mgr()->GetDef(*iid);
|
|
|
- if (inInst->opcode() == SpvOpVariable) return;
|
|
|
- AddToWorklist(inInst);
|
|
|
+ Instruction* in_inst = get_def_use_mgr()->GetDef(*iid);
|
|
|
+ if (in_inst->opcode() == SpvOpVariable) return;
|
|
|
+ AddToWorklist(in_inst);
|
|
|
});
|
|
|
}
|
|
|
+ if (debug_global_seen) {
|
|
|
+ auto dbg_none = context()->get_debug_info_mgr()->GetDebugInfoNone();
|
|
|
+ AddToWorklist(dbg_none);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
Pass::Status AggressiveDCEPass::ProcessImpl() {
|
|
|
@@ -743,7 +773,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
uint32_t counter_buffer_id = annotation->GetSingleWordInOperand(2);
|
|
|
Instruction* counter_buffer_inst =
|
|
|
get_def_use_mgr()->GetDef(counter_buffer_id);
|
|
|
- if (IsDead(counter_buffer_inst)) {
|
|
|
+ if (!IsLive(counter_buffer_inst)) {
|
|
|
context()->KillInst(annotation);
|
|
|
modified = true;
|
|
|
}
|
|
|
@@ -758,7 +788,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
for (uint32_t i = 1; i < annotation->NumOperands();) {
|
|
|
Instruction* opInst =
|
|
|
get_def_use_mgr()->GetDef(annotation->GetSingleWordOperand(i));
|
|
|
- if (IsDead(opInst)) {
|
|
|
+ if (!IsLive(opInst)) {
|
|
|
// Don't increment |i|.
|
|
|
annotation->RemoveOperand(i);
|
|
|
modified = true;
|
|
|
@@ -785,7 +815,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
for (uint32_t i = 1; i < annotation->NumOperands();) {
|
|
|
Instruction* opInst =
|
|
|
get_def_use_mgr()->GetDef(annotation->GetSingleWordOperand(i));
|
|
|
- if (IsDead(opInst)) {
|
|
|
+ if (!IsLive(opInst)) {
|
|
|
// Don't increment |i|.
|
|
|
annotation->RemoveOperand(i + 1);
|
|
|
annotation->RemoveOperand(i);
|
|
|
@@ -819,13 +849,13 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
}
|
|
|
|
|
|
for (auto& dbg : get_module()->ext_inst_debuginfo()) {
|
|
|
- if (!IsDead(&dbg)) continue;
|
|
|
+ if (IsLive(&dbg)) continue;
|
|
|
// Save GlobalVariable if its variable is live, otherwise null out variable
|
|
|
// index
|
|
|
if (dbg.GetCommonDebugOpcode() == CommonDebugInfoDebugGlobalVariable) {
|
|
|
auto var_id = dbg.GetSingleWordOperand(kGlobalVariableVariableIndex);
|
|
|
Instruction* var_inst = get_def_use_mgr()->GetDef(var_id);
|
|
|
- if (!IsDead(var_inst)) continue;
|
|
|
+ if (IsLive(var_inst)) continue;
|
|
|
context()->ForgetUses(&dbg);
|
|
|
dbg.SetOperand(
|
|
|
kGlobalVariableVariableIndex,
|
|
|
@@ -840,7 +870,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
// Since ADCE is disabled for non-shaders, we don't check for export linkage
|
|
|
// attributes here.
|
|
|
for (auto& val : get_module()->types_values()) {
|
|
|
- if (IsDead(&val)) {
|
|
|
+ if (!IsLive(&val)) {
|
|
|
// Save forwarded pointer if pointer is live since closure does not mark
|
|
|
// this live as it does not have a result id. This is a little too
|
|
|
// conservative since it is not known if the structure type that needed
|
|
|
@@ -848,7 +878,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
if (val.opcode() == SpvOpTypeForwardPointer) {
|
|
|
uint32_t ptr_ty_id = val.GetSingleWordInOperand(0);
|
|
|
Instruction* ptr_ty_inst = get_def_use_mgr()->GetDef(ptr_ty_id);
|
|
|
- if (!IsDead(ptr_ty_inst)) continue;
|
|
|
+ if (IsLive(ptr_ty_inst)) continue;
|
|
|
}
|
|
|
to_kill_.push_back(&val);
|
|
|
modified = true;
|
|
|
@@ -867,7 +897,7 @@ bool AggressiveDCEPass::ProcessGlobalValues() {
|
|
|
} else {
|
|
|
auto* var =
|
|
|
get_def_use_mgr()->GetDef(entry.GetSingleWordInOperand(i));
|
|
|
- if (!IsDead(var)) {
|
|
|
+ if (IsLive(var)) {
|
|
|
new_operands.push_back(entry.GetInOperand(i));
|
|
|
}
|
|
|
}
|
|
|
@@ -1018,5 +1048,43 @@ bool AggressiveDCEPass::BlockIsInConstruct(BasicBlock* header_block,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+bool AggressiveDCEPass::IsEntryPointWithNoCalls(Function* func) {
|
|
|
+ auto cached_result = entry_point_with_no_calls_cache_.find(func->result_id());
|
|
|
+ if (cached_result != entry_point_with_no_calls_cache_.end()) {
|
|
|
+ return cached_result->second;
|
|
|
+ }
|
|
|
+ bool result = IsEntryPoint(func) && !HasCall(func);
|
|
|
+ entry_point_with_no_calls_cache_[func->result_id()] = result;
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
+bool AggressiveDCEPass::IsEntryPoint(Function* func) {
|
|
|
+ for (const Instruction& entry_point : get_module()->entry_points()) {
|
|
|
+ uint32_t entry_point_id =
|
|
|
+ entry_point.GetSingleWordInOperand(kEntryPointFunctionIdInIdx);
|
|
|
+ if (entry_point_id == func->result_id()) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+bool AggressiveDCEPass::HasCall(Function* func) {
|
|
|
+ return !func->WhileEachInst(
|
|
|
+ [](Instruction* inst) { return inst->opcode() != SpvOpFunctionCall; });
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::MarkFirstBlockAsLive(Function* func) {
|
|
|
+ BasicBlock* first_block = &*func->begin();
|
|
|
+ MarkBlockAsLive(first_block->GetLabelInst());
|
|
|
+}
|
|
|
+
|
|
|
+void AggressiveDCEPass::AddUnreachable(BasicBlock*& block) {
|
|
|
+ InstructionBuilder builder(
|
|
|
+ context(), block,
|
|
|
+ IRContext::kAnalysisInstrToBlockMapping | IRContext::kAnalysisDefUse);
|
|
|
+ builder.AddUnreachable();
|
|
|
+}
|
|
|
+
|
|
|
} // namespace opt
|
|
|
} // namespace spvtools
|