Ver código fonte

[linux-port] Unused variables in conditionals (#1336)

A surprising number of if statements declare variables in their
parentheses. Usually, these variables are used afterward. When they
are not, gcc produces a warning. clang does not.
Fixes gcc warnings.
Greg Roth 7 anos atrás
pai
commit
a6f857371d

+ 4 - 4
lib/HLSL/ComputeViewIdState.cpp

@@ -432,7 +432,7 @@ void DxilViewIdState::CollectValuesContributingToOutputs(EntryInfo &Entry) {
 void DxilViewIdState::CollectValuesContributingToOutputRec(EntryInfo &Entry,
                                                            Value *pContributingValue,
                                                            InstructionSetType &ContributingInstructions) {
-  if (Argument *pArg = dyn_cast<Argument>(pContributingValue)) {
+  if (dyn_cast<Argument>(pContributingValue)) {
     // This must be a leftover signature argument of an entry function.
     DXASSERT_NOMSG(Entry.pEntryFunc == m_pModule->GetEntryFunction() ||
                    Entry.pEntryFunc == m_pModule->GetPatchConstantFunction());
@@ -610,7 +610,7 @@ void DxilViewIdState::CollectReachingDeclsRec(Value *pValue, ValueSetType &Reach
     }
   }
 
-  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(pValue)) {
+  if (dyn_cast<GlobalVariable>(pValue)) {
     ReachingDecls.emplace(pValue);
     return;
   }
@@ -621,7 +621,7 @@ void DxilViewIdState::CollectReachingDeclsRec(Value *pValue, ValueSetType &Reach
   } else if (GEPOperator *pGepOp = dyn_cast<GEPOperator>(pValue)) {
     Value *pPtrValue = pGepOp->getPointerOperand();
     CollectReachingDeclsRec(pPtrValue, ReachingDecls, Visited);
-  } else if (AllocaInst *AI = dyn_cast<AllocaInst>(pValue)) {
+  } else if (dyn_cast<AllocaInst>(pValue)) {
     ReachingDecls.emplace(pValue);
   } else if (PHINode *phi = dyn_cast<PHINode>(pValue)) {
     for (Value *pPtrValue : phi->operands()) {
@@ -630,7 +630,7 @@ void DxilViewIdState::CollectReachingDeclsRec(Value *pValue, ValueSetType &Reach
   } else if (SelectInst *SelI = dyn_cast<SelectInst>(pValue)) {
     CollectReachingDeclsRec(SelI->getTrueValue(), ReachingDecls, Visited);
     CollectReachingDeclsRec(SelI->getFalseValue(), ReachingDecls, Visited);
-  } else if (Argument *pArg = dyn_cast<Argument>(pValue)) {
+  } else if (dyn_cast<Argument>(pValue)) {
     ReachingDecls.emplace(pValue);
   } else {
     IFT(DXC_E_GENERAL_INTERNAL_ERROR);

+ 6 - 6
lib/HLSL/DxilGenerationPass.cpp

@@ -464,7 +464,7 @@ void DxilGenerationPass::TranslateParamDxilResourceHandles(Function *F, std::uno
               userBuilder, HLOpcodeGroup::HLCast, 0, handleTy, {res},
               *F->getParent());
           userBuilder.CreateStore(handle, castToHandle);
-        } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
+        } else if (dyn_cast<CallInst>(U)) {
           // Don't flatten argument here.
           continue;
         } else {
@@ -740,7 +740,7 @@ UpdateHandleOperands(Instruction *Res,
 
   unsigned startOpIdx = 0;
   // Skip Cond for Select.
-  if (SelectInst *Sel = dyn_cast<SelectInst>(Res))
+  if (dyn_cast<SelectInst>(Res))
     startOpIdx = 1;
 
   CallInst *Handle = handleMap[Res];
@@ -878,7 +878,7 @@ void DxilGenerationPass::AddCreateHandleForPhiNodeAndSelect(OP *hlslOP) {
 
       unsigned startOpIdx = 0;
       // Skip Cond for Select.
-      if (SelectInst *Sel = dyn_cast<SelectInst>(I))
+      if (dyn_cast<SelectInst>(I))
         startOpIdx = 1;
       if (MergeHandleOpWithSameValue(I, startOpIdx, numOperands)) {
         nonUniformOps.erase(I);
@@ -1060,7 +1060,7 @@ static void TranslatePreciseAttributeOnFunction(Function &F, Module &M) {
   for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
     BasicBlock *BB = BBI;
     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
-      if (FPMathOperator *FPMath = dyn_cast<FPMathOperator>(I)) {
+      if (dyn_cast<FPMathOperator>(I)) {
         // Set precise fast math on those instructions that support it.
         if (DxilModule::PreservesFastMathFlags(I))
           I->copyFastMathFlags(FMF);
@@ -1438,12 +1438,12 @@ PropagatePreciseAttribute(Instruction *I, DxilTypeSystem &typeSys,
   LLVMContext &Context = I->getContext();
   if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
     PropagatePreciseAttributeOnPointer(AI, typeSys, Context, processedSet);
-  } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
+  } else if (dyn_cast<CallInst>(I)) {
     // Propagate every argument.
     // TODO: only propagate precise argument.
     for (Value *src : I->operands())
       PropagatePreciseAttributeOnOperand(src, typeSys, Context, processedSet);
-  } else if (FPMathOperator *FPMath = dyn_cast<FPMathOperator>(I)) {
+  } else if (dyn_cast<FPMathOperator>(I)) {
     // TODO: only propagate precise argument.
     for (Value *src : I->operands())
       PropagatePreciseAttributeOnOperand(src, typeSys, Context, processedSet);

+ 2 - 3
lib/HLSL/DxilLinker.cpp

@@ -573,8 +573,7 @@ DxilLinkJob::Link(std::pair<DxilFunctionLinkInfo *, DxilLib *> &entryLinkPair,
     if (!NewF->hasFnAttribute(llvm::Attribute::NoInline))
       NewF->addFnAttr(llvm::Attribute::AlwaysInline);
 
-    if (DxilFunctionAnnotation *funcAnnotation =
-            tmpTypeSys.GetFunctionAnnotation(F)) {
+    if (tmpTypeSys.GetFunctionAnnotation(F)) {
       // Clone funcAnnotation to typeSys.
       typeSys.CopyFunctionAnnotation(NewF, F, tmpTypeSys);
     }
@@ -937,4 +936,4 @@ namespace hlsl {
 DxilLinker *DxilLinker::CreateLinker(LLVMContext &Ctx, unsigned valMajor, unsigned valMinor) {
   return new DxilLinkerImpl(Ctx, valMajor, valMinor);
 }
-} // namespace hlsl
+} // namespace hlsl

+ 1 - 1
lib/HLSL/DxilModule.cpp

@@ -939,7 +939,7 @@ static void CollectUsedResource(Value *resID,
     return;
 
   usedResID.insert(resID);
-  if (ConstantInt *cResID = dyn_cast<ConstantInt>(resID)) {
+  if (dyn_cast<ConstantInt>(resID)) {
     // Do nothing
   } else if (ZExtInst *ZEI = dyn_cast<ZExtInst>(resID)) {
     if (ZEI->getSrcTy()->isIntegerTy()) {

+ 1 - 1
lib/HLSL/DxilTargetTransformInfo.cpp

@@ -75,7 +75,7 @@ bool IsDxilOpSourceOfDivergence(const CallInst *CI, OP *hlslOP,
 /// different across dispatch or thread group.
 bool DxilTTIImpl::isSourceOfDivergence(const Value *V) const {
 
-  if (const Argument *A = dyn_cast<Argument>(V))
+  if (dyn_cast<Argument>(V))
     return true;
 
   // Atomics are divergent because they are executed sequentially: when an

+ 4 - 4
lib/HLSL/HLMatrixLowerPass.cpp

@@ -2006,7 +2006,7 @@ void HLMatrixLowerPass::TranslateMatArrayGEP(Value *matInst,
         DXASSERT(0, "invalid operation");
         break;
       }
-    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(useInst)) {
+    } else if (dyn_cast<BitCastInst>(useInst)) {
       // Just replace the src with vec version.
       useInst->setOperand(0, newGEP);
     } else {
@@ -2032,7 +2032,7 @@ void HLMatrixLowerPass::replaceMatWithVec(Instruction *matInst,
           hlsl::GetHLOpcodeGroupByName(useCall->getCalledFunction());
       switch (group) {
       case HLOpcodeGroup::HLIntrinsic: {
-        if (CallInst *matCI = dyn_cast<CallInst>(matInst)) {
+        if (dyn_cast<CallInst>(matInst)) {
           MatIntrinsicReplace(cast<CallInst>(matInst), vecInst, useCall);
         } else {
           IntrinsicOp opcode = static_cast<IntrinsicOp>(GetHLOpcode(useCall));
@@ -2068,7 +2068,7 @@ void HLMatrixLowerPass::replaceMatWithVec(Instruction *matInst,
       case HLOpcodeGroup::HLMatLoadStore: {
         DXASSERT(matToVecMap.count(useCall), "must has vec version");
         Value *vecUser = matToVecMap[useCall];
-        if (AllocaInst *AI = dyn_cast<AllocaInst>(matInst)) {
+        if (dyn_cast<AllocaInst>(matInst)) {
           // Load Already translated in lowerToVec.
           // Store val operand will be set by the val use.
           // Do nothing here.
@@ -2090,7 +2090,7 @@ void HLMatrixLowerPass::replaceMatWithVec(Instruction *matInst,
         TranslateMatInit(useCall);
       } break;
       }
-    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(useInst)) {
+    } else if (dyn_cast<BitCastInst>(useInst)) {
       // Just replace the src with vec version.
       useInst->setOperand(0, vecInst);
     } else {

+ 2 - 2
lib/HLSL/HLModule.cpp

@@ -922,7 +922,7 @@ void HLModule::MergeGepUse(Value *V) {
       } else {
         MergeGepUse(*Use);
       }
-    } else if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(*Use)) {
+    } else if (dyn_cast<GEPOperator>(*Use)) {
       if (GEPOperator *prevGEP = dyn_cast<GEPOperator>(V)) {
         // merge the 2 GEPs
         Value *newGEP = MergeGEP(prevGEP, GEP);
@@ -1073,7 +1073,7 @@ void HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(llvm::Value *Ptr,
                                                llvm::Module &M) {
   for (User *U : Ptr->users()) {
     // Skip load inst.
-    if (LoadInst *LI = dyn_cast<LoadInst>(U))
+    if (dyn_cast<LoadInst>(U))
       continue;
     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
       Value *V = SI->getValueOperand();

+ 2 - 2
lib/HLSL/HLOperationLower.cpp

@@ -723,7 +723,7 @@ Constant *GetLoadInputsForEvaluate(Value *V, std::vector<CallInst*> &loadList) {
 // for temporary insertelement instructions should maintain the existing size of the loadinput.
 // So we have to analyze the type of src in order to determine the actual size required.
 Type *GetInsertElementTypeForEvaluate(Value *src) {
-  if (InsertElementInst *IE = dyn_cast<InsertElementInst>(src)) {
+  if (dyn_cast<InsertElementInst>(src)) {
     return src->getType();
   }
   else if (ShuffleVectorInst *SV = dyn_cast<ShuffleVectorInst>(src)) {
@@ -4623,7 +4623,7 @@ Value *GenerateVecEltFromGEP(Value *ldData, GetElementPtrInst *GEP,
   DXASSERT_LOCALVAR(baseIdx && zeroIdx, baseIdx == zeroIdx,
                     "base index must be 0");
   Value *idx = (GEP->idx_begin() + 1)->get();
-  if (ConstantInt *cidx = dyn_cast<ConstantInt>(idx)) {
+  if (dyn_cast<ConstantInt>(idx)) {
     return Builder.CreateExtractElement(ldData, idx);
   } else {
     // Dynamic indexing.

+ 1 - 1
lib/Transforms/Scalar/GVN.cpp

@@ -2488,7 +2488,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
 
   // HLSL Change Begin - Don't do PRE on pointer which may generate phi of
   // pointers.
-  if (PointerType *PT = dyn_cast<PointerType>(CurInst->getType())) {
+  if (dyn_cast<PointerType>(CurInst->getType())) {
     return false;
   }
   // HLSL Change End

+ 3 - 3
lib/Transforms/Scalar/LoopStrengthReduce.cpp

@@ -688,12 +688,12 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
 /// getAccessType - Return the type of the memory being accessed.
 static Type *getAccessType(const Instruction *Inst) {
   Type *AccessTy = Inst->getType();
-  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
+  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
     AccessTy = SI->getOperand(0)->getType();
-  else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+#if 0 // HLSL Change - remove platform intrinsics
+  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
     // Addressing modes can also be folded into prefetches and a variety
     // of intrinsics.
-#if 0 // HLSL Change - remove platform intrinsics
     switch (II->getIntrinsicID()) {
     default: break;
     case Intrinsic::x86_sse_storeu_ps:

+ 9 - 9
lib/Transforms/Scalar/ScalarReplAggregatesHLSL.cpp

@@ -1517,10 +1517,10 @@ bool SROA_HLSL::performPromotion(Function &F) {
 bool SROA_HLSL::ShouldAttemptScalarRepl(AllocaInst *AI) {
   Type *T = AI->getAllocatedType();
   // promote every struct.
-  if (StructType *ST = dyn_cast<StructType>(T))
+  if (dyn_cast<StructType>(T))
     return true;
   // promote every array.
-  if (ArrayType *AT = dyn_cast<ArrayType>(T))
+  if (dyn_cast<ArrayType>(T))
     return true;
   return false;
 }
@@ -3412,9 +3412,9 @@ static Constant *GetEltInit(Type *Ty, Constant *Init, unsigned idx,
   if (isa<UndefValue>(Init))
     return UndefValue::get(EltTy);
 
-  if (StructType *ST = dyn_cast<StructType>(Ty)) {
+  if (dyn_cast<StructType>(Ty)) {
     return Init->getAggregateElement(idx);
-  } else if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
+  } else if (dyn_cast<VectorType>(Ty)) {
     return Init->getAggregateElement(idx);
   } else {
     ArrayType *AT = cast<ArrayType>(Ty);
@@ -3738,7 +3738,7 @@ void PointerStatus::analyzePointer(const Value *V, PointerStatus &PS,
       } else {
         PS.MarkAsStored();
       }
-    } else if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
+    } else if (dyn_cast<LoadInst>(U)) {
       PS.MarkAsLoaded();
     } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
       Function *F = CI->getCalledFunction();
@@ -4342,7 +4342,7 @@ bool SROA_Parameter_HLSL::hasDynamicVectorIndexing(Value *V) {
     if (!U->getType()->isPointerTy())
       continue;
 
-    if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+    if (dyn_cast<GEPOperator>(U)) {
 
       gep_type_iterator GEPIt = gep_type_begin(U), E = gep_type_end(U);
 
@@ -5778,9 +5778,9 @@ static void CheckArgUsage(Value *V, bool &bLoad, bool &bStore) {
   if (bLoad && bStore)
     return;
   for (User *user : V->users()) {
-    if (LoadInst *LI = dyn_cast<LoadInst>(user)) {
+    if (dyn_cast<LoadInst>(user)) {
       bLoad = true;
-    } else if (StoreInst *SI = dyn_cast<StoreInst>(user)) {
+    } else if (dyn_cast<StoreInst>(user)) {
       bStore = true;
     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) {
       CheckArgUsage(GEP, bLoad, bStore);
@@ -6817,7 +6817,7 @@ void DynamicIndexingVectorToArray::ReplaceStaticIndexingOnVector(Value *V) {
 
 bool DynamicIndexingVectorToArray::needToLower(Value *V) {
   Type *Ty = V->getType()->getPointerElementType();
-  if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
+  if (dyn_cast<VectorType>(Ty)) {
     if (isa<GlobalVariable>(V) || ReplaceAllVectors) {
       return true;
     }

+ 1 - 1
tools/clang/lib/AST/ASTContextHLSL.cpp

@@ -1164,7 +1164,7 @@ static bool HasTessFactorSemanticRecurse(const ValueDecl *decl, QualType Ty) {
     return false;
   }
 
-  if (const clang::ArrayType *arrayTy = Ty->getAsArrayTypeUnsafe())
+  if (Ty->getAsArrayTypeUnsafe())
     return HasTessFactorSemantic(decl);
 
   return false;

+ 1 - 1
tools/clang/lib/AST/ItaniumMangle.cpp

@@ -933,7 +933,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
   //                     ::= <source-name>
   switch (Name.getNameKind()) {
   case DeclarationName::Identifier: {
-    if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+    if (Name.getAsIdentifierInfo()) {
       // We must avoid conflicts between internally- and externally-
       // linked variable and function declaration names in the same TU:
       //   void test() { extern void foo(); }

+ 5 - 5
tools/clang/lib/CodeGen/CGHLSLMS.cpp

@@ -3607,9 +3607,9 @@ static void SimplifyBitCast(BitCastOperator *BC, SmallInstSet &deadInsts) {
           I->dropAllReferences();
           deadInsts.insert(I);
         }
-    } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
+    } else if (dyn_cast<CallInst>(U)) {
       // Skip function call.
-    } else if (BitCastInst *Cast = dyn_cast<BitCastInst>(U)) {
+    } else if (dyn_cast<BitCastInst>(U)) {
       // Skip bitcast.
     } else {
       DXASSERT(0, "not support yet");
@@ -5098,7 +5098,7 @@ static void FlatConstToList(Constant *C, SmallVector<Constant *, 4> &EltValList,
       FlatConstToList(C->getAggregateElement(i), EltValList, EltTy, Types,
                       bDefaultRowMajor);
     }
-  } else if (llvm::StructType *ST = dyn_cast<llvm::StructType>(Ty)) {
+  } else if (dyn_cast<llvm::StructType>(Ty)) {
     RecordDecl *RD = Type->getAsStructureType()->getDecl();
     const CGRecordLayout &RL = Types.getCGRecordLayout(RD);
     // Take care base.
@@ -5488,7 +5488,7 @@ Value *CGMSHLSLRuntime::EmitHLSLLiteralCast(CodeGenFunction &CGF, Value *Src,
         return Builder.CreateFPTrunc(Src, DstTy);
       }
     }
-  } else if (UndefValue *UV = dyn_cast<UndefValue>(Src)) {
+  } else if (dyn_cast<UndefValue>(Src)) {
     return UndefValue::get(DstTy);
   } else {
     Instruction *I = cast<Instruction>(Src);
@@ -5543,7 +5543,7 @@ Value *CGMSHLSLRuntime::EmitHLSLLiteralCast(CodeGenFunction &CGF, Value *Src,
             CalcHLSLLiteralToLowestPrecision(Builder, BO, bSigned);
         if (!CastResult)
           return nullptr;
-        if (llvm::IntegerType *IT = dyn_cast<llvm::IntegerType>(DstTy)) {
+        if (dyn_cast<llvm::IntegerType>(DstTy)) {
           if (DstTy == CastResult->getType()) {
             return CastResult;
           } else {

+ 1 - 1
tools/clang/lib/CodeGen/CGLoopInfo.cpp

@@ -112,7 +112,7 @@ void LoopInfoStack::push(BasicBlock *Header,
   for (const auto *Attr : Attrs) {
     const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
     // HLSL Change Begins
-    if (const HLSLLoopAttr *LoopAttr = dyn_cast<HLSLLoopAttr>(Attr)) {
+    if (dyn_cast<HLSLLoopAttr>(Attr)) {
       setHlslLoop(true);
     } else if (const HLSLUnrollAttr *UnrollAttr =
                    dyn_cast<HLSLUnrollAttr>(Attr)) {

+ 5 - 5
tools/clang/lib/SPIRV/SPIRVEmitter.cpp

@@ -752,9 +752,9 @@ void SPIRVEmitter::doStmt(const Stmt *stmt,
     doIfStmt(ifStmt, attrs);
   } else if (const auto *switchStmt = dyn_cast<SwitchStmt>(stmt)) {
     doSwitchStmt(switchStmt, attrs);
-  } else if (const auto *caseStmt = dyn_cast<CaseStmt>(stmt)) {
+  } else if (dyn_cast<CaseStmt>(stmt)) {
     processCaseStmtOrDefaultStmt(stmt);
-  } else if (const auto *defaultStmt = dyn_cast<DefaultStmt>(stmt)) {
+  } else if (dyn_cast<DefaultStmt>(stmt)) {
     processCaseStmtOrDefaultStmt(stmt);
   } else if (const auto *breakStmt = dyn_cast<BreakStmt>(stmt)) {
     doBreakStmt(breakStmt);
@@ -768,7 +768,7 @@ void SPIRVEmitter::doStmt(const Stmt *stmt,
     doWhileStmt(whileStmt, attrs);
   } else if (const auto *forStmt = dyn_cast<ForStmt>(stmt)) {
     doForStmt(forStmt, attrs);
-  } else if (const auto *nullStmt = dyn_cast<NullStmt>(stmt)) {
+  } else if (dyn_cast<NullStmt>(stmt)) {
     // For the null statement ";". We don't need to do anything.
   } else if (const auto *expr = dyn_cast<Expr>(stmt)) {
     // All cases for expressions used as statements
@@ -1141,7 +1141,7 @@ bool SPIRVEmitter::validateVKAttributes(const NamedDecl *decl) {
     }
   }
 
-  if (const auto *iaiAttr = decl->getAttr<VKInputAttachmentIndexAttr>()) {
+  if (decl->getAttr<VKInputAttachmentIndexAttr>()) {
     if (!shaderModel.IsPS()) {
       emitError("SubpassInput(MS) only allowed in pixel shader",
                 decl->getLocation());
@@ -9088,7 +9088,7 @@ bool SPIRVEmitter::processGeometryShaderAttributes(const FunctionDecl *decl,
 void SPIRVEmitter::processPixelShaderAttributes(const FunctionDecl *decl) {
   theBuilder.addExecutionMode(entryFunctionId,
                               spv::ExecutionMode::OriginUpperLeft, {});
-  if (auto *numThreadsAttr = decl->getAttr<HLSLEarlyDepthStencilAttr>()) {
+  if (decl->getAttr<HLSLEarlyDepthStencilAttr>()) {
     theBuilder.addExecutionMode(entryFunctionId,
                                 spv::ExecutionMode::EarlyFragmentTests, {});
   }

+ 1 - 1
tools/clang/lib/SPIRV/TypeTranslator.cpp

@@ -322,7 +322,7 @@ uint32_t TypeTranslator::getLocationCount(QualType type) {
            static_cast<uint32_t>(arrayType->getSize().getZExtValue());
 
   // Struct type
-  if (const auto *structType = type->getAs<RecordType>()) {
+  if (type->getAs<RecordType>()) {
     assert(false && "all structs should already be flattened");
     return 0;
   }