Ver Fonte

[linux-port] Eliminate warnings on release builds (#1361)

A number of warnings unique to release buildsi persist. Some of these
same errors were fixed earlier for debug builds.

DXASSERT_LOCALVAR still took a variadic macro on release builds.
Fixes 85 clang warnings

Expressions used only as conditionals for asserts were not properly
dummied out with (void) cast.
Fixes 7 clang warnings

Variables used only in asserts dummied out.
Fixes 27 clang warnings

Potential uninitilized uses of variables assigned in switches/ifs.
fixes 5 clang warnings

put braces around empty else statements
11 gcc warnings
Greg Roth há 7 anos atrás
pai
commit
ae615d47a8

+ 2 - 2
include/dxc/Support/Global.h

@@ -231,12 +231,12 @@ inline void OutputDebugFormatA(_In_ _Printf_format_string_ _Null_terminated_ con
 #define DXASSERT(exp, msg) _Analysis_assume_(exp)
 
 // DXASSERT_LOCALVAR is disabled in free builds, but we keep the local referenced to avoid a warning.
-#define DXASSERT_LOCALVAR(local, exp, s, ...) do { (void)(local); _Analysis_assume_(exp); } while (0)
+#define DXASSERT_LOCALVAR(local, exp, msg) do { (void)(local); _Analysis_assume_(exp); } while (0)
 
 // DXASSERT_NOMSG is disabled in free builds.
 #define DXASSERT_NOMSG(exp) _Analysis_assume_(exp)
 
 // DXVERIFY is patterned after NT_VERIFY and will evaluate the expression
-#define DXVERIFY_NOMSG(exp) do { (exp); _Analysis_assume_(exp); } while (0)
+#define DXVERIFY_NOMSG(exp) do { (void)(exp); _Analysis_assume_(exp); } while (0)
 
 #endif // DBG

+ 1 - 0
lib/HLSL/ComputeViewIdState.cpp

@@ -248,6 +248,7 @@ void DxilViewIdState::ComputeReachableFunctionsRec(CallGraph &CG, CallGraphNode
   if (F->empty()) return;
   auto itIns = FuncSet.emplace(F);
   DXASSERT_NOMSG(itIns.second);
+  (void)itIns;
   for (auto it = pNode->begin(), itEnd = pNode->end(); it != itEnd; ++it) {
     CallGraphNode *pSuccNode = it->second;
     ComputeReachableFunctionsRec(CG, pSuccNode, FuncSet);

+ 2 - 2
lib/HLSL/DxilDebugInstrumentation.cpp

@@ -473,7 +473,7 @@ void DxilDebugInstrumentation::addUAV(BuilderContext &BC)
 void DxilDebugInstrumentation::addInvocationSelectionProlog(BuilderContext &BC, SystemValueIndices SVIndices) {
   auto ShaderModel = BC.DM.GetShaderModel();
 
-  Value * ParameterTestResult;
+  Value * ParameterTestResult = nullptr;
   switch (ShaderModel->GetKind()) {
   case DXIL::ShaderKind::Pixel:
     ParameterTestResult = addPixelShaderProlog(BC, SVIndices);
@@ -583,7 +583,7 @@ void DxilDebugInstrumentation::addDebugEntryValue(BuilderContext &BC, Value * Th
     Function* StoreValue = BC.HlslOP->GetOpFunc(OP::OpCode::BufferStore, TheValue->getType()); // Type::getInt32Ty(BC.Ctx));
     Constant* StoreValueOpcode = BC.HlslOP->GetU32Const((unsigned)DXIL::OpCode::BufferStore);
     UndefValue* Undef32Arg = UndefValue::get(Type::getInt32Ty(BC.Ctx));
-    UndefValue* UndefArg;
+    UndefValue* UndefArg = nullptr;
     if (TheValueTypeID == Type::TypeID::IntegerTyID) {
         UndefArg = UndefValue::get(Type::getInt32Ty(BC.Ctx));
     }

+ 3 - 2
lib/HLSL/DxilGenerationPass.cpp

@@ -1693,8 +1693,9 @@ static void ReplaceResUseWithHandle(Instruction *Res, Value *Handle) {
     } else if (isa<CallInst>(I)) {
       if (I->getType() == HandleTy)
         I->replaceAllUsesWith(Handle);
-      else
+      else {
         DXASSERT(0, "must createHandle here");
+      }
     } else {
       DXASSERT(0, "should only used by load and createHandle");
     }
@@ -2079,7 +2080,7 @@ void DxilTranslateRawBuffer::ReplaceMinPrecisionRawBufferLoadByType(
           ArrayRef<unsigned> Indices = EV->getIndices();
           DXASSERT(Indices.size() == 1, "Otherwise we have wrong extract value.");
           Value *newEV = EVBuilder.CreateExtractValue(newCI, Indices);
-          Value *newTruncV;
+          Value *newTruncV = nullptr;
           if (4 == Indices[0]) { // Don't truncate status
             newTruncV = newEV;
           }

+ 7 - 4
lib/HLSL/HLMatrixLowerPass.cpp

@@ -1202,7 +1202,7 @@ void HLMatrixLowerPass::TranslateMatMajorCast(CallInst *matInst,
     Type *castTy = GetMatrixInfo(castInst->getType(), castCol, castRow);
     unsigned srcCol, srcRow;
     Type *srcTy = GetMatrixInfo(matInst->getType(), srcCol, srcRow);
-    DXASSERT_LOCALVAR((castTy, srcTy), srcTy == castTy, "type must match");
+    DXASSERT_LOCALVAR((castTy == srcTy), srcTy == castTy, "type must match");
     DXASSERT(castCol == srcRow && castRow == srcCol, "col row must match");
     col = srcCol;
     row = srcRow;
@@ -1339,8 +1339,9 @@ void HLMatrixLowerPass::TranslateMatCast(CallInst *matInst,
       TranslateMatMatCast(matInst, vecInst, castInst);
     } else if (FromMat)
       TranslateMatToOtherCast(matInst, vecInst, castInst);
-    else
+    else {
       DXASSERT(0, "Not translate as user of matInst");
+    }
   }
 }
 
@@ -1434,8 +1435,9 @@ void HLMatrixLowerPass::TranslateMatSubscript(Value *matInst, Value *vecInst,
           vecLd = Builder.CreateInsertElement(vecLd, val, shufMask[0]);
           Builder.CreateStore(vecLd, vecInst);
         }
-      } else
+      } else {
         DXASSERT(0, "matrix element should only used by load/store.");
+      }
       AddToDeadInsts(CallUser);
     }
   } else {
@@ -1525,8 +1527,9 @@ void HLMatrixLowerPass::TranslateMatSubscript(Value *matInst, Value *vecInst,
         Value *GEPOffset = HLMatrixLower::LowerGEPOnMatIndexListToIndex(GEP, idxList);
         Value *NewGEP = Builder.CreateGEP(vecInst, {zero, GEPOffset});
         GEP->replaceAllUsesWith(NewGEP);
-      } else
+      } else {
         DXASSERT(0, "matrix subscript should only used by load/store.");
+      }
       AddToDeadInsts(CallUser);
     }
   }

+ 3 - 2
lib/HLSL/HLOperationLower.cpp

@@ -552,7 +552,7 @@ Value *TrivialBarrier(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode,
   unsigned t = static_cast<unsigned>(DXIL::BarrierMode::SyncThreadGroup);
   // unsigned ut = static_cast<unsigned>(DXIL::BarrierMode::UAVFenceThreadGroup);
 
-  unsigned barrierMode;
+  unsigned barrierMode = 0;
   switch (IOP) {
   case IntrinsicOp::IOP_AllMemoryBarrier:
     barrierMode = uglobal | g;
@@ -3317,7 +3317,7 @@ void TranslateStore(DxilResource::Kind RK, Value *handle, Value *val,
                     Value *offset, IRBuilder<> &Builder, hlsl::OP *OP) {
   Type *Ty = val->getType();
 
-  OP::OpCode opcode;
+  OP::OpCode opcode = OP::OpCode::NumOpCodes;
   switch (RK) {
   case DxilResource::Kind::RawBuffer:
   case DxilResource::Kind::StructuredBuffer:
@@ -4222,6 +4222,7 @@ Value *TranslateProcessTessFactors(CallInst *CI, IntrinsicOp IOP, OP::OpCode opc
   Type *outFactorTy = unroundedInsideFactor->getType()->getPointerElementType();
   if (outFactorTy != clampedI->getType()) {
     DXASSERT(isQuad, "quad only write one channel of out factor");
+    (void)isQuad;
     clampedI = Builder.CreateExtractElement(clampedI, (uint64_t)0);
     // Splat clampedI to float2.
     clampedI = SplatToVector(clampedI, outFactorTy, Builder);

+ 9 - 5
lib/HLSL/HLSignatureLower.cpp

@@ -489,7 +489,7 @@ void replaceStWithStOutput(Function *stOutput, StoreInst *stInst,
   Value *val = stInst->getValueOperand();
 
   if (VectorType *VT = dyn_cast<VectorType>(val->getType())) {
-    DXASSERT(cols == VT->getNumElements(), "vec size must match");
+    DXASSERT_LOCALVAR(VT, cols == VT->getNumElements(), "vec size must match");
     for (unsigned col = 0; col < cols; col++) {
       Value *subVal = Builder.CreateExtractElement(val, col);
       Value *colIdx = Builder.getInt8(col);
@@ -817,14 +817,16 @@ void collectInputOutputAccessInfo(
             InputOutputAccessInfo info = {idxVal, CI, vertexID, vectorIdx};
             accessInfoList.push_back(info);
           }
-        } else
+        } else {
           DXASSERT(0, "input output should only used by ld/st");
+        }
       }
     } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
       InputOutputAccessInfo info = {constZero, CI};
       accessInfoList.push_back(info);
-    } else
+    } else {
       DXASSERT(0, "input output should only used by ld/st");
+    }
   }
 }
 
@@ -998,8 +1000,9 @@ void GenerateInputOutputUserCall(InputOutputAccessInfo &info, Value *undefVertex
       CI->eraseFromParent();
     } break;
     }
-  } else
+  } else {
     DXASSERT(0, "invalid operation on input output");
+  }
 }
 
 } // namespace
@@ -1352,8 +1355,9 @@ void HLSignatureLower::GenerateDxilPatchConstantFunctionInputs() {
           Value *args[] = {OpArg, inputID, info.idx, info.vectorIdx,
                            info.vertexID};
           replaceLdWithLdInput(dxilLdFunc, ldInst, cols, args, bI1Cast);
-        } else
+        } else {
           DXASSERT(0, "input should only be ld");
+        }
       }
     }
   }

+ 3 - 2
lib/Transforms/Scalar/IndVarSimplify.cpp

@@ -83,11 +83,10 @@ static cl::opt<ReplaceExitVal> ReplaceExitValue(
                           "always replace exit value whenever possible"),
                clEnumValEnd));
 #else
-static const bool VerifyIndvars = false;
 static const bool ReduceLiveIVs = false;
 enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl };
 static const ReplaceExitVal ReplaceExitValue = OnlyCheapRepl;
-#endif
+#endif // HLSL Change Ends - option pending
 
 namespace {
 struct RewritePhi;
@@ -2090,6 +2089,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
   assert(L->isLCSSAForm(*DT) &&
          "Indvars did not leave the loop in lcssa form!");
 
+#if 0 // HLSL Change Starts - option pending
   // Verify that LFTR, and any other change have not interfered with SCEV's
   // ability to compute trip count.
 #ifndef NDEBUG
@@ -2106,6 +2106,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
     assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV");
   }
 #endif
+#endif // HLSL Change Ends - option pending
 
   return Changed;
 }

+ 1 - 0
lib/Transforms/Scalar/PlaceSafepoints.cpp

@@ -431,6 +431,7 @@ static Instruction *findLocationForEntrySafepoint(Function &F,
   auto nextInstruction = [&hasNextInstruction](Instruction *I) {
     assert(hasNextInstruction(I) &&
            "first check if there is a next instruction!");
+    (void)hasNextInstruction; // HLSL Change - unused var
     if (I->isTerminator()) {
       return I->getParent()->getUniqueSuccessor()->begin();
     } else {

+ 3 - 1
lib/Transforms/Scalar/SROA.cpp

@@ -94,7 +94,6 @@ static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
                                         cl::Hidden);
 #else
 static const bool ForceSSAUpdater = false;
-static const bool SROARandomShuffleSlices = false;
 static const bool SROAStrictInbounds = false;
 #endif // HLSL Change Ends
 
@@ -1025,12 +1024,14 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
                               }),
                Slices.end());
 
+#if 0 // HLSL Change Starts - option pending
 #if __cplusplus >= 201103L && !defined(NDEBUG)
   if (SROARandomShuffleSlices) {
     std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec()));
     std::shuffle(Slices.begin(), Slices.end(), MT);
   }
 #endif
+#endif // HLSL Change Ends - option pending
 
   // Sort the uses. This arranges for the offsets to be in ascending order,
   // and the sizes to be in descending order.
@@ -2072,6 +2073,7 @@ static VectorType *isVectorPromotionViable(AllocaSlices::Partition &P,
              "All non-integer types eliminated!");
       assert(LHSTy->getElementType()->isIntegerTy() &&
              "All non-integer types eliminated!");
+      (void)DL;// HLSL Change - unused var
       return RHSTy->getNumElements() < LHSTy->getNumElements();
     };
     std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);

+ 1 - 1
lib/Transforms/Scalar/ScalarReplAggregatesHLSL.cpp

@@ -6188,7 +6188,7 @@ void SROA_Parameter_HLSL::createFlattenedFunction(Function *F) {
   }
   flatF->setAttributes(flatAS);
 
-  DXASSERT(flatF->arg_size() == (extraParamSize + FlatParamAnnotationList.size()), "parameter count mismatch");
+  DXASSERT_LOCALVAR(extraParamSize, flatF->arg_size() == (extraParamSize + FlatParamAnnotationList.size()), "parameter count mismatch");
   // ShaderProps.
   if (m_pHLModule->HasDxilFunctionProps(F)) {
     DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(F);

+ 1 - 0
tools/clang/lib/CodeGen/CGCall.cpp

@@ -499,6 +499,7 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
   assert(std::all_of(argTypes.begin(), argTypes.end(),
                      isCanonicalAsParam)); // HLSL Change - skip array when
                                            // check isCanonicalAsParam
+  (void)isCanonicalAsParam;
 
   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
 

+ 6 - 3
tools/clang/lib/CodeGen/CGHLSLMS.cpp

@@ -848,9 +848,10 @@ void CGMSHLSLRuntime::ConstructFieldAttributedAnnotation(
     if (const BuiltinType *BTy =
             dyn_cast<BuiltinType>(type->getCanonicalTypeInternal()))
       fieldAnnotation.SetCompType(BuiltinTyToCompTy(BTy, bSNorm, bUNorm));
-  } else
+  } else {
     DXASSERT(!bSNorm && !bUNorm,
              "snorm/unorm on invalid type, validate at handleHLSLTypeAttr");
+  }
 }
 
 static void ConstructFieldInterpolation(DxilFieldAnnotation &fieldAnnotation,
@@ -1050,8 +1051,9 @@ unsigned CGMSHLSLRuntime::AddTypeAnnotation(QualType Ty,
     else if (Ty->isIncompleteArrayType()) {
       const IncompleteArrayType *arrayTy = CGM.getContext().getAsIncompleteArrayType(Ty);
       arrayElementTy = arrayTy->getElementType();
-    } else
+    } else {
       DXASSERT(0, "Must array type here");
+    }
 
     unsigned elementSize = AddTypeAnnotation(arrayElementTy, dxilTypeSys, arrayEltSize);
     // Only set arrayEltSize once.
@@ -4063,8 +4065,9 @@ bool BuildImmInit(Function *Ctor) {
         if (GlobalVariable *pGV = dyn_cast<GlobalVariable>(Ptr)) {
           if (GV == nullptr)
             GV = pGV;
-          else
+          else {
             DXASSERT(GV == pGV, "else pointer mismatch");
+          }
         }
       }
     } else {

+ 3 - 2
tools/clang/lib/Frontend/FrontendActions.cpp

@@ -728,12 +728,13 @@ void HLSLRootSignatureAction::ExecuteAction() {
   }
   else {
     assert(rootSigMinor == 1 &&
-      "else CGMSHLSLRuntime Constructor needs to be updated");
+      "else HLSLRootSignatureAction Constructor needs to be updated");
     rootSigVer = hlsl::DxilRootSignatureVersion::Version_1_1;
   }
 
   assert(rootSigMajor == 1 &&
-           "else CGMSHLSLRuntime Constructor needs to be updated");
+           "else HLSLRootSignatureAction Constructor needs to be updated");
+  (void)rootSigMajor;
 
   // Try to find HLSLRootSignatureMacro in macros.
   MacroInfo *rootSigMacro = hlsl::MacroExpander::FindMacroInfo(PP, HLSLRootSignatureMacro);

+ 19 - 4
tools/clang/lib/SPIRV/SPIRVEmitter.cpp

@@ -926,6 +926,7 @@ SpirvEvalInfo SPIRVEmitter::loadIfGLValue(const Expr *expr,
         const bool isMat =
             TypeTranslator::isMxNMatrix(exprType, nullptr, &numRows, &numCols);
         assert(isMat);
+        (void)isMat;
         const auto uintRowQualType =
             astContext.getExtVectorType(uintType, numCols);
         const auto uintRowQualTypeId =
@@ -2253,6 +2254,7 @@ SpirvEvalInfo SPIRVEmitter::doCastExpr(const CastExpr *expr) {
         TypeTranslator::isMxNMatrix(toType, &elemType, &rowCount, &colCount);
 
     assert(isMat && rowCount == 2 && colCount == 2);
+    (void)isMat;
 
     uint32_t vec2Type =
         theBuilder.getVecType(typeTranslator.translateType(elemType), 2);
@@ -3174,15 +3176,14 @@ SpirvEvalInfo SPIRVEmitter::processByteAddressBufferLoadStore(
     const CXXMemberCallExpr *expr, uint32_t numWords, bool doStore) {
   uint32_t resultId = 0;
   const auto object = expr->getImplicitObjectArgument();
-  const auto type = object->getType();
   const auto objectInfo = loadIfAliasVarRef(object);
   assert(numWords >= 1 && numWords <= 4);
   if (doStore) {
-    assert(typeTranslator.isRWByteAddressBuffer(type));
+    assert(typeTranslator.isRWByteAddressBuffer(object->getType()));
     assert(expr->getNumArgs() == 2);
   } else {
-    assert(typeTranslator.isRWByteAddressBuffer(type) ||
-           typeTranslator.isByteAddressBuffer(type));
+    assert(typeTranslator.isRWByteAddressBuffer(object->getType()) ||
+           typeTranslator.isByteAddressBuffer(object->getType()));
     if (expr->getNumArgs() == 2) {
       emitError(
           "(RW)ByteAddressBuffer::Load(in address, out status) not supported",
@@ -5628,6 +5629,7 @@ SPIRVEmitter::tryToAssignToVectorElements(const Expr *lhs,
           baseTypeId, oldVec, {accessor.Swz0}, rhs);
       const auto result = tryToAssignToRWBufferRWTexture(base, newVec);
       assert(result); // Definitely RWBuffer/RWTexture assignment
+      (void)result;
       return rhs;     // TODO: incorrect for compound assignments
     } else {
       // Assigning to one normal vector component. Nothing special, just fall
@@ -6115,6 +6117,7 @@ uint32_t SPIRVEmitter::castToInt(uint32_t fromVal, QualType fromType,
       const bool isMat = TypeTranslator::isMxNMatrix(toIntType, &toElemType,
                                                      &toNumRows, &toNumCols);
       assert(isMat && numRows == toNumRows && numCols == toNumCols);
+      (void)isMat;
       (void)toNumRows;
       (void)toNumCols;
 
@@ -6215,6 +6218,7 @@ uint32_t SPIRVEmitter::castToFloat(uint32_t fromVal, QualType fromType,
       const auto isMat = TypeTranslator::isMxNMatrix(toFloatType, &toElemType,
                                                      &toNumRows, &toNumCols);
       assert(isMat && numRows == toNumRows && numCols == toNumCols);
+      (void)isMat;
       (void)toNumRows;
       (void)toNumCols;
 
@@ -7602,6 +7606,7 @@ uint32_t SPIRVEmitter::processNonFpMatrixTranspose(QualType matType,
   const bool isMat =
       TypeTranslator::isMxNMatrix(matType, &elemType, &numRows, &numCols);
   assert(isMat && !elemType->isFloatingType());
+  (void)isMat;
 
   const auto colQualType = astContext.getExtVectorType(elemType, numRows);
   const uint32_t colTypeId = typeTranslator.translateType(colQualType);
@@ -7663,6 +7668,7 @@ uint32_t SPIRVEmitter::processNonFpScalarTimesMatrix(QualType scalarType,
       TypeTranslator::isMxNMatrix(matrixType, &elemType, &numRows, &numCols);
   assert(isMat);
   assert(typeTranslator.isSameType(scalarType, elemType));
+  (void)isMat;
 
   // We need to multiply the scalar by each vector of the matrix.
   // The front-end guarantees that the scalar and matrix element type are
@@ -7703,6 +7709,8 @@ uint32_t SPIRVEmitter::processNonFpVectorTimesMatrix(QualType vecType,
   assert(isVec);
   assert(isMat);
   assert(vecSize == numRows);
+  (void)isVec;
+  (void)isMat;
 
   // When processing vector times matrix, the vector is a row vector, and it
   // should be multiplied by the matrix *columns*. The most efficient way to
@@ -7740,6 +7748,8 @@ uint32_t SPIRVEmitter::processNonFpMatrixTimesVector(QualType matType,
   assert(isVec);
   assert(isMat);
   assert(vecSize == numCols);
+  (void)isVec;
+  (void)isMat;
 
   // When processing matrix times vector, the vector is a column vector. So we
   // simply get each row of the matrix and perform a dot product with the
@@ -7773,6 +7783,8 @@ uint32_t SPIRVEmitter::processNonFpMatrixTimesMatrix(QualType lhsType,
   assert(typeTranslator.isSameType(lhsElemType, rhsElemType));
   assert(lhsIsMat && rhsIsMat);
   assert(lhsNumCols == rhsNumRows);
+  (void)rhsIsMat;
+  (void)lhsIsMat;
 
   const uint32_t rhsTranspose = processNonFpMatrixTranspose(rhsType, rhsId);
 
@@ -7972,6 +7984,9 @@ uint32_t SPIRVEmitter::processIntrinsicDot(const CallExpr *callExpr) {
   assert(vec0ComponentType == vec1ComponentType);
   assert(vec0Size == vec1Size);
   assert(vec0Size >= 1 && vec0Size <= 4);
+  (void)vec0ComponentType;
+  (void)vec1ComponentType;
+  (void)vec1Size;
 
   // According to HLSL reference, the dot function only works on integers
   // and floats.

+ 1 - 0
tools/clang/lib/SPIRV/TypeTranslator.cpp

@@ -426,6 +426,7 @@ uint32_t TypeTranslator::getElementSpirvBitwidth(QualType type) {
   QualType ty = {};
   const bool isScalar = isScalarType(type, &ty);
   assert(isScalar);
+  (void)isScalar;
   if (const auto *builtinType = ty->getAs<BuiltinType>()) {
     switch (builtinType->getKind()) {
     case BuiltinType::Bool:

+ 3 - 0
tools/clang/lib/Sema/SemaDecl.cpp

@@ -3005,6 +3005,9 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
     assert(!(MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
              (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) &&
            "else fn with no prototype found");
+    (void)OldFuncType;
+    (void)NewFuncType;
+    (void)OldProto;
 #else
     if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
         (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {