|
@@ -130,8 +130,9 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
|
|
|
}
|
|
|
|
|
|
// Convert the children's type shape to be compatible.
|
|
|
- right = addShapeConversion(op, left->getType(), right);
|
|
|
- left = addShapeConversion(op, right->getType(), left);
|
|
|
+ addBiShapeConversion(op, left, right);
|
|
|
+ if (left == nullptr || right == nullptr)
|
|
|
+ return nullptr;
|
|
|
|
|
|
//
|
|
|
// Need a new node holding things together. Make
|
|
@@ -147,8 +148,8 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
|
|
|
// If they are both (non-specialization) constants, they must be folded.
|
|
|
// (Unless it's the sequence (comma) operator, but that's handled in addComma().)
|
|
|
//
|
|
|
- TIntermConstantUnion *leftTempConstant = left->getAsConstantUnion();
|
|
|
- TIntermConstantUnion *rightTempConstant = right->getAsConstantUnion();
|
|
|
+ TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
|
|
|
+ TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
|
|
|
if (leftTempConstant && rightTempConstant) {
|
|
|
TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
|
|
|
if (folded)
|
|
@@ -157,7 +158,7 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
|
|
|
|
|
|
// If can propagate spec-constantness and if the operation is an allowed
|
|
|
// specialization-constant operation, make a spec-constant.
|
|
|
- if (specConstantPropagates(*left, *right) && isSpecializationOperation(*node))
|
|
|
+ if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
|
|
|
node->getWritableType().getQualifier().makeSpecConstant();
|
|
|
|
|
|
return node;
|
|
@@ -238,7 +239,7 @@ TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TInterm
|
|
|
return nullptr;
|
|
|
|
|
|
// convert shape
|
|
|
- right = addShapeConversion(op, left->getType(), right);
|
|
|
+ right = addUniShapeConversion(op, left->getType(), right);
|
|
|
|
|
|
// build the node
|
|
|
TIntermBinary* node = addBinaryNode(op, left, right, loc);
|
|
@@ -307,6 +308,10 @@ TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSo
|
|
|
case EOpConstructUint: newType = EbtUint; break;
|
|
|
case EOpConstructInt64: newType = EbtInt64; break;
|
|
|
case EOpConstructUint64: newType = EbtUint64; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EOpConstructInt16: newType = EbtInt16; break;
|
|
|
+ case EOpConstructUint16: newType = EbtUint16; break;
|
|
|
+#endif
|
|
|
case EOpConstructBool: newType = EbtBool; break;
|
|
|
case EOpConstructFloat: newType = EbtFloat; break;
|
|
|
case EOpConstructDouble: newType = EbtDouble; break;
|
|
@@ -335,6 +340,10 @@ TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSo
|
|
|
case EOpConstructUint:
|
|
|
case EOpConstructInt64:
|
|
|
case EOpConstructUint64:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EOpConstructInt16:
|
|
|
+ case EOpConstructUint16:
|
|
|
+#endif
|
|
|
case EOpConstructBool:
|
|
|
case EOpConstructFloat:
|
|
|
case EOpConstructDouble:
|
|
@@ -356,12 +365,12 @@ TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSo
|
|
|
node->updatePrecision();
|
|
|
|
|
|
// If it's a (non-specialization) constant, it must be folded.
|
|
|
- if (child->getAsConstantUnion())
|
|
|
- return child->getAsConstantUnion()->fold(op, node->getType());
|
|
|
+ if (node->getOperand()->getAsConstantUnion())
|
|
|
+ return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
|
|
|
|
|
|
// If it's a specialization constant, the result is too,
|
|
|
// if the operation is allowed for specialization constants.
|
|
|
- if (child->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
|
|
|
+ if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
|
|
|
node->getWritableType().getQualifier().makeSpecConstant();
|
|
|
|
|
|
return node;
|
|
@@ -527,6 +536,14 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
case EOpConstructUint64:
|
|
|
promoteTo = EbtUint64;
|
|
|
break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EOpConstructInt16:
|
|
|
+ promoteTo = EbtInt16;
|
|
|
+ break;
|
|
|
+ case EOpConstructUint16:
|
|
|
+ promoteTo = EbtUint16;
|
|
|
+ break;
|
|
|
+#endif
|
|
|
|
|
|
//
|
|
|
// List all the binary ops that can implicitly convert one operand to the other's type;
|
|
@@ -615,15 +632,26 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
case EOpRightShiftAssign:
|
|
|
if ((type.getBasicType() == EbtInt ||
|
|
|
type.getBasicType() == EbtUint ||
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ type.getBasicType() == EbtInt16 ||
|
|
|
+ type.getBasicType() == EbtUint16 ||
|
|
|
+#endif
|
|
|
type.getBasicType() == EbtInt64 ||
|
|
|
type.getBasicType() == EbtUint64) &&
|
|
|
(node->getType().getBasicType() == EbtInt ||
|
|
|
node->getType().getBasicType() == EbtUint ||
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ node->getType().getBasicType() == EbtInt16 ||
|
|
|
+ node->getType().getBasicType() == EbtUint16 ||
|
|
|
+#endif
|
|
|
node->getType().getBasicType() == EbtInt64 ||
|
|
|
node->getType().getBasicType() == EbtUint64))
|
|
|
|
|
|
return node;
|
|
|
- else
|
|
|
+ else if (source == EShSourceHlsl && node->getType().getBasicType() == EbtBool) {
|
|
|
+ promoteTo = type.getBasicType();
|
|
|
+ break;
|
|
|
+ } else
|
|
|
return nullptr;
|
|
|
|
|
|
default:
|
|
@@ -659,6 +687,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToDouble; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToDouble; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToDouble; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToDouble; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -674,6 +706,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToFloat; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToFloat; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToFloat; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToFloat; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -688,6 +724,8 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
|
|
|
case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -704,6 +742,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToBool; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToBool; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToBool; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToBool; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -719,6 +761,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToInt; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToInt; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToInt; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToInt; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -734,6 +780,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToUint; break;
|
|
|
case EbtUint64: newOp = EOpConvUint64ToUint; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToUint; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToUint; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -749,6 +799,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
|
|
|
#endif
|
|
|
case EbtUint64: newOp = EOpConvUint64ToInt64; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToInt64; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToInt64; break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -764,10 +818,46 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
|
|
|
#endif
|
|
|
case EbtInt64: newOp = EOpConvInt64ToUint64; break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToUint64; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToUint64; break;
|
|
|
+#endif
|
|
|
+ default:
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+ switch (node->getBasicType()) {
|
|
|
+ case EbtInt: newOp = EOpConvIntToInt16; break;
|
|
|
+ case EbtUint: newOp = EOpConvUintToInt16; break;
|
|
|
+ case EbtBool: newOp = EOpConvBoolToInt16; break;
|
|
|
+ case EbtFloat: newOp = EOpConvFloatToInt16; break;
|
|
|
+ case EbtDouble: newOp = EOpConvDoubleToInt16; break;
|
|
|
+ case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
|
|
|
+ case EbtInt64: newOp = EOpConvInt64ToInt16; break;
|
|
|
+ case EbtUint64: newOp = EOpConvUint64ToInt16; break;
|
|
|
+ case EbtUint16: newOp = EOpConvUint16ToInt16; break;
|
|
|
+ default:
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case EbtUint16:
|
|
|
+ switch (node->getBasicType()) {
|
|
|
+ case EbtInt: newOp = EOpConvIntToUint16; break;
|
|
|
+ case EbtUint: newOp = EOpConvUintToUint16; break;
|
|
|
+ case EbtBool: newOp = EOpConvBoolToUint16; break;
|
|
|
+ case EbtFloat: newOp = EOpConvFloatToUint16; break;
|
|
|
+ case EbtDouble: newOp = EOpConvDoubleToUint16; break;
|
|
|
+ case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
|
|
|
+ case EbtInt64: newOp = EOpConvInt64ToUint16; break;
|
|
|
+ case EbtUint64: newOp = EOpConvUint64ToUint16; break;
|
|
|
+ case EbtInt16: newOp = EOpConvInt16ToUint16; break;
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
|
break;
|
|
|
+#endif
|
|
|
default:
|
|
|
return nullptr;
|
|
|
}
|
|
@@ -785,7 +875,10 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
}
|
|
|
|
|
|
// Convert the node's shape of type for the given type, as allowed by the
|
|
|
-// operation involved: 'op'.
|
|
|
+// operation involved: 'op'. This is for situations where there is only one
|
|
|
+// direction to consider doing the shape conversion.
|
|
|
+//
|
|
|
+// This implements policy, it call addShapeConversion() for the mechanism.
|
|
|
//
|
|
|
// Generally, the AST represents allowed GLSL shapes, so this isn't needed
|
|
|
// for GLSL. Bad shapes are caught in conversion or promotion.
|
|
@@ -793,7 +886,7 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
|
|
|
// Return 'node' if no conversion was done. Promotion handles final shape
|
|
|
// checking.
|
|
|
//
|
|
|
-TIntermTyped* TIntermediate::addShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
|
|
|
+TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
|
|
|
{
|
|
|
// some source languages don't do this
|
|
|
switch (source) {
|
|
@@ -806,23 +899,142 @@ TIntermTyped* TIntermediate::addShapeConversion(TOperator op, const TType& type,
|
|
|
|
|
|
// some operations don't do this
|
|
|
switch (op) {
|
|
|
+ case EOpFunctionCall:
|
|
|
+ case EOpReturn:
|
|
|
+ break;
|
|
|
+
|
|
|
+ case EOpMulAssign:
|
|
|
+ // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
|
|
|
+ // matrix *= scalar, etc.
|
|
|
+
|
|
|
+ case EOpAddAssign:
|
|
|
+ case EOpSubAssign:
|
|
|
+ case EOpDivAssign:
|
|
|
+ case EOpAndAssign:
|
|
|
+ case EOpInclusiveOrAssign:
|
|
|
+ case EOpExclusiveOrAssign:
|
|
|
+ case EOpRightShiftAssign:
|
|
|
+ case EOpLeftShiftAssign:
|
|
|
+ if (node->getVectorSize() == 1)
|
|
|
+ return node;
|
|
|
+ break;
|
|
|
+
|
|
|
case EOpAssign:
|
|
|
+ break;
|
|
|
+
|
|
|
+ case EOpMix:
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ return node;
|
|
|
+ }
|
|
|
+
|
|
|
+ return addShapeConversion(type, node);
|
|
|
+}
|
|
|
+
|
|
|
+// Convert the nodes' shapes to be compatible for the operation 'op'.
|
|
|
+//
|
|
|
+// This implements policy, it call addShapeConversion() for the mechanism.
|
|
|
+//
|
|
|
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
|
|
|
+// for GLSL. Bad shapes are caught in conversion or promotion.
|
|
|
+//
|
|
|
+void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
|
|
|
+{
|
|
|
+ // some source languages don't do this
|
|
|
+ switch (source) {
|
|
|
+ case EShSourceHlsl:
|
|
|
+ break;
|
|
|
+ case EShSourceGlsl:
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ // some operations don't do this
|
|
|
+ // 'break' will mean attempt bidirectional conversion
|
|
|
+ switch (op) {
|
|
|
+ case EOpMulAssign:
|
|
|
+ case EOpAssign:
|
|
|
+ case EOpAddAssign:
|
|
|
+ case EOpSubAssign:
|
|
|
+ case EOpDivAssign:
|
|
|
+ case EOpAndAssign:
|
|
|
+ case EOpInclusiveOrAssign:
|
|
|
+ case EOpExclusiveOrAssign:
|
|
|
+ case EOpRightShiftAssign:
|
|
|
+ case EOpLeftShiftAssign:
|
|
|
+ // switch to unidirectional conversion (the lhs can't change)
|
|
|
+ rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
|
|
|
+ return;
|
|
|
+
|
|
|
+ case EOpAdd:
|
|
|
+ case EOpSub:
|
|
|
+ case EOpMul:
|
|
|
+ case EOpDiv:
|
|
|
+ // want to support vector * scalar native ops in AST and lower, not smear, similarly for
|
|
|
+ // matrix * vector, etc.
|
|
|
+ if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
|
|
|
+ return;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case EOpRightShift:
|
|
|
+ case EOpLeftShift:
|
|
|
+ // can natively support the right operand being a scalar and the left a vector,
|
|
|
+ // but not the reverse
|
|
|
+ if (rhsNode->getVectorSize() == 1)
|
|
|
+ return;
|
|
|
+ break;
|
|
|
+
|
|
|
case EOpLessThan:
|
|
|
case EOpGreaterThan:
|
|
|
case EOpLessThanEqual:
|
|
|
case EOpGreaterThanEqual:
|
|
|
+
|
|
|
case EOpEqual:
|
|
|
case EOpNotEqual:
|
|
|
- case EOpFunctionCall:
|
|
|
- case EOpReturn:
|
|
|
+
|
|
|
case EOpLogicalAnd:
|
|
|
case EOpLogicalOr:
|
|
|
case EOpLogicalXor:
|
|
|
+
|
|
|
+ case EOpAnd:
|
|
|
+ case EOpInclusiveOr:
|
|
|
+ case EOpExclusiveOr:
|
|
|
+
|
|
|
+ case EOpMix:
|
|
|
break;
|
|
|
+
|
|
|
default:
|
|
|
- return node;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
+ // Do bidirectional conversions
|
|
|
+ if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
|
|
|
+ if (lhsNode->getType().isScalarOrVec1())
|
|
|
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
|
|
|
+ else
|
|
|
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
|
|
|
+ }
|
|
|
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
|
|
|
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
|
|
|
+}
|
|
|
+
|
|
|
+// Convert the node's shape of type for the given type. It's not necessarily
|
|
|
+// an error if they are different and not converted, as some operations accept
|
|
|
+// mixed types. Promotion will do final shape checking.
|
|
|
+//
|
|
|
+// If there is a chance of two nodes, with conversions possible in each direction,
|
|
|
+// the policy for what to ask for must be in the caller; this will do what is asked.
|
|
|
+//
|
|
|
+// Return 'node' if no conversion was done. Promotion handles final shape
|
|
|
+// checking.
|
|
|
+//
|
|
|
+TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
|
|
|
+{
|
|
|
+ // no conversion needed
|
|
|
+ if (node->getType() == type)
|
|
|
+ return node;
|
|
|
+
|
|
|
// structures and arrays don't change shape, either to or from
|
|
|
if (node->getType().isStruct() || node->getType().isArray() ||
|
|
|
type.isStruct() || type.isArray())
|
|
@@ -831,12 +1043,12 @@ TIntermTyped* TIntermediate::addShapeConversion(TOperator op, const TType& type,
|
|
|
// The new node that handles the conversion
|
|
|
TOperator constructorOp = mapTypeToConstructorOp(type);
|
|
|
|
|
|
- // scalar -> smeared -> vector, or
|
|
|
- // vec1 -> scalar, or
|
|
|
- // bigger vector -> smaller vector or scalar
|
|
|
- if ((type.isVector() && node->getType().isScalar()) ||
|
|
|
- (node->getType().isVector() && node->getVectorSize() == 1 && type.isScalar()) ||
|
|
|
- (node->getVectorSize() > type.getVectorSize() && type.isVector()))
|
|
|
+ // scalar -> vector or vec1 -> vector or
|
|
|
+ // vector -> scalar or
|
|
|
+ // bigger vector -> smaller vector
|
|
|
+ if ((node->getType().isScalarOrVec1() && type.isVector()) ||
|
|
|
+ (node->getType().isVector() && type.isScalar()) ||
|
|
|
+ (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
|
|
|
return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
|
|
|
|
|
|
return node;
|
|
@@ -894,6 +1106,10 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
case EbtUint:
|
|
|
case EbtInt64:
|
|
|
case EbtUint64:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+#endif
|
|
|
case EbtFloat:
|
|
|
case EbtDouble:
|
|
|
#ifdef AMD_EXTENSIONS
|
|
@@ -907,6 +1123,10 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
switch (from) {
|
|
|
case EbtInt:
|
|
|
case EbtUint:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+#endif
|
|
|
case EbtFloat:
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
case EbtFloat16:
|
|
@@ -920,8 +1140,12 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
case EbtUint:
|
|
|
switch (from) {
|
|
|
case EbtInt:
|
|
|
- return version >= 400;
|
|
|
+ return version >= 400 || (source == EShSourceHlsl);
|
|
|
case EbtUint:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+#endif
|
|
|
return true;
|
|
|
case EbtBool:
|
|
|
return (source == EShSourceHlsl);
|
|
@@ -931,6 +1155,9 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
case EbtInt:
|
|
|
switch (from) {
|
|
|
case EbtInt:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+#endif
|
|
|
return true;
|
|
|
case EbtBool:
|
|
|
return (source == EShSourceHlsl);
|
|
@@ -943,6 +1170,10 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
case EbtUint:
|
|
|
case EbtInt64:
|
|
|
case EbtUint64:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+#endif
|
|
|
return true;
|
|
|
default:
|
|
|
return false;
|
|
@@ -951,10 +1182,32 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
|
|
|
switch (from) {
|
|
|
case EbtInt:
|
|
|
case EbtInt64:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
+#endif
|
|
|
+ return true;
|
|
|
+ default:
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtFloat16:
|
|
|
+ switch (from) {
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+ case EbtFloat16:
|
|
|
return true;
|
|
|
default:
|
|
|
return false;
|
|
|
}
|
|
|
+ case EbtUint16:
|
|
|
+ switch (from) {
|
|
|
+ case EbtInt16:
|
|
|
+ case EbtUint16:
|
|
|
+ return true;
|
|
|
+ default:
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+#endif
|
|
|
default:
|
|
|
return false;
|
|
|
}
|
|
@@ -1094,21 +1347,79 @@ TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
|
|
|
break;
|
|
|
#endif
|
|
|
case EbtInt:
|
|
|
- switch(type.getVectorSize()) {
|
|
|
- case 1: op = EOpConstructInt; break;
|
|
|
- case 2: op = EOpConstructIVec2; break;
|
|
|
- case 3: op = EOpConstructIVec3; break;
|
|
|
- case 4: op = EOpConstructIVec4; break;
|
|
|
- default: break; // some compilers want this
|
|
|
+ if (type.getMatrixCols()) {
|
|
|
+ switch (type.getMatrixCols()) {
|
|
|
+ case 2:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructIMat2x2; break;
|
|
|
+ case 3: op = EOpConstructIMat2x3; break;
|
|
|
+ case 4: op = EOpConstructIMat2x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructIMat3x2; break;
|
|
|
+ case 3: op = EOpConstructIMat3x3; break;
|
|
|
+ case 4: op = EOpConstructIMat3x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructIMat4x2; break;
|
|
|
+ case 3: op = EOpConstructIMat4x3; break;
|
|
|
+ case 4: op = EOpConstructIMat4x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ switch(type.getVectorSize()) {
|
|
|
+ case 1: op = EOpConstructInt; break;
|
|
|
+ case 2: op = EOpConstructIVec2; break;
|
|
|
+ case 3: op = EOpConstructIVec3; break;
|
|
|
+ case 4: op = EOpConstructIVec4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
}
|
|
|
break;
|
|
|
case EbtUint:
|
|
|
- switch(type.getVectorSize()) {
|
|
|
- case 1: op = EOpConstructUint; break;
|
|
|
- case 2: op = EOpConstructUVec2; break;
|
|
|
- case 3: op = EOpConstructUVec3; break;
|
|
|
- case 4: op = EOpConstructUVec4; break;
|
|
|
- default: break; // some compilers want this
|
|
|
+ if (type.getMatrixCols()) {
|
|
|
+ switch (type.getMatrixCols()) {
|
|
|
+ case 2:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructUMat2x2; break;
|
|
|
+ case 3: op = EOpConstructUMat2x3; break;
|
|
|
+ case 4: op = EOpConstructUMat2x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructUMat3x2; break;
|
|
|
+ case 3: op = EOpConstructUMat3x3; break;
|
|
|
+ case 4: op = EOpConstructUMat3x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructUMat4x2; break;
|
|
|
+ case 3: op = EOpConstructUMat4x3; break;
|
|
|
+ case 4: op = EOpConstructUMat4x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ switch(type.getVectorSize()) {
|
|
|
+ case 1: op = EOpConstructUint; break;
|
|
|
+ case 2: op = EOpConstructUVec2; break;
|
|
|
+ case 3: op = EOpConstructUVec3; break;
|
|
|
+ case 4: op = EOpConstructUVec4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
}
|
|
|
break;
|
|
|
case EbtInt64:
|
|
@@ -1129,15 +1440,64 @@ TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
|
|
|
default: break; // some compilers want this
|
|
|
}
|
|
|
break;
|
|
|
- case EbtBool:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EbtInt16:
|
|
|
switch(type.getVectorSize()) {
|
|
|
- case 1: op = EOpConstructBool; break;
|
|
|
- case 2: op = EOpConstructBVec2; break;
|
|
|
- case 3: op = EOpConstructBVec3; break;
|
|
|
- case 4: op = EOpConstructBVec4; break;
|
|
|
+ case 1: op = EOpConstructInt16; break;
|
|
|
+ case 2: op = EOpConstructI16Vec2; break;
|
|
|
+ case 3: op = EOpConstructI16Vec3; break;
|
|
|
+ case 4: op = EOpConstructI16Vec4; break;
|
|
|
default: break; // some compilers want this
|
|
|
}
|
|
|
break;
|
|
|
+ case EbtUint16:
|
|
|
+ switch(type.getVectorSize()) {
|
|
|
+ case 1: op = EOpConstructUint16; break;
|
|
|
+ case 2: op = EOpConstructU16Vec2; break;
|
|
|
+ case 3: op = EOpConstructU16Vec3; break;
|
|
|
+ case 4: op = EOpConstructU16Vec4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+#endif
|
|
|
+ case EbtBool:
|
|
|
+ if (type.getMatrixCols()) {
|
|
|
+ switch (type.getMatrixCols()) {
|
|
|
+ case 2:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructBMat2x2; break;
|
|
|
+ case 3: op = EOpConstructBMat2x3; break;
|
|
|
+ case 4: op = EOpConstructBMat2x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructBMat3x2; break;
|
|
|
+ case 3: op = EOpConstructBMat3x3; break;
|
|
|
+ case 4: op = EOpConstructBMat3x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ switch (type.getMatrixRows()) {
|
|
|
+ case 2: op = EOpConstructBMat4x2; break;
|
|
|
+ case 3: op = EOpConstructBMat4x3; break;
|
|
|
+ case 4: op = EOpConstructBMat4x4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ switch(type.getVectorSize()) {
|
|
|
+ case 1: op = EOpConstructBool; break;
|
|
|
+ case 2: op = EOpConstructBVec2; break;
|
|
|
+ case 3: op = EOpConstructBVec3; break;
|
|
|
+ case 4: op = EOpConstructBVec4; break;
|
|
|
+ default: break; // some compilers want this
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
@@ -1158,15 +1518,15 @@ TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* r
|
|
|
return nullptr;
|
|
|
|
|
|
TIntermAggregate* aggNode = nullptr;
|
|
|
- if (left)
|
|
|
+ if (left != nullptr)
|
|
|
aggNode = left->getAsAggregate();
|
|
|
- if (! aggNode || aggNode->getOp() != EOpNull) {
|
|
|
+ if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
|
|
|
aggNode = new TIntermAggregate;
|
|
|
- if (left)
|
|
|
+ if (left != nullptr)
|
|
|
aggNode->getSequence().push_back(left);
|
|
|
}
|
|
|
|
|
|
- if (right)
|
|
|
+ if (right != nullptr)
|
|
|
aggNode->getSequence().push_back(right);
|
|
|
|
|
|
return aggNode;
|
|
@@ -1271,7 +1631,8 @@ TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type,
|
|
|
//
|
|
|
// For "?:" test nodes. There are three children; a condition,
|
|
|
// a true path, and a false path. The two paths are specified
|
|
|
-// as separate parameters.
|
|
|
+// as separate parameters. For vector 'cond', the true and false
|
|
|
+// are not paths, but vectors to mix.
|
|
|
//
|
|
|
// Specialization constant operations include
|
|
|
// - The ternary operator ( ? : )
|
|
@@ -1300,14 +1661,39 @@ TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* true
|
|
|
return nullptr;
|
|
|
}
|
|
|
|
|
|
+ // Handle a vector condition as a mix
|
|
|
+ if (!cond->getType().isScalarOrVec1()) {
|
|
|
+ TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
|
|
|
+ cond->getType().getVectorSize());
|
|
|
+ // smear true/false operands as needed
|
|
|
+ trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
|
|
|
+ falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
|
|
|
+
|
|
|
+ // After conversion, types have to match.
|
|
|
+ if (falseBlock->getType() != trueBlock->getType())
|
|
|
+ return nullptr;
|
|
|
+
|
|
|
+ // make the mix operation
|
|
|
+ TIntermAggregate* mix = makeAggregate(loc);
|
|
|
+ mix = growAggregate(mix, falseBlock);
|
|
|
+ mix = growAggregate(mix, trueBlock);
|
|
|
+ mix = growAggregate(mix, cond);
|
|
|
+ mix->setType(targetVectorType);
|
|
|
+ mix->setOp(EOpMix);
|
|
|
+
|
|
|
+ return mix;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Now have a scalar condition...
|
|
|
+
|
|
|
+ // Convert true and false expressions to matching types
|
|
|
+ addBiShapeConversion(EOpMix, trueBlock, falseBlock);
|
|
|
+
|
|
|
// After conversion, types have to match.
|
|
|
if (falseBlock->getType() != trueBlock->getType())
|
|
|
return nullptr;
|
|
|
|
|
|
- //
|
|
|
- // See if all the operands are constant, then fold it otherwise not.
|
|
|
- //
|
|
|
-
|
|
|
+ // Eliminate the selection when the condition is a scalar and all operands are constant.
|
|
|
if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
|
|
|
if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
|
|
|
return trueBlock;
|
|
@@ -1381,6 +1767,24 @@ TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, co
|
|
|
return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
|
|
|
}
|
|
|
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+TIntermConstantUnion* TIntermediate::addConstantUnion(short i16, const TSourceLoc& loc, bool literal) const
|
|
|
+{
|
|
|
+ TConstUnionArray unionArray(1);
|
|
|
+ unionArray[0].setIConst(i16);
|
|
|
+
|
|
|
+ return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
|
|
|
+}
|
|
|
+
|
|
|
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
|
|
|
+{
|
|
|
+ TConstUnionArray unionArray(1);
|
|
|
+ unionArray[0].setUConst(u16);
|
|
|
+
|
|
|
+ return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
|
|
|
{
|
|
|
TConstUnionArray unionArray(1);
|
|
@@ -1478,10 +1882,11 @@ const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool
|
|
|
//
|
|
|
// Create while and do-while loop nodes.
|
|
|
//
|
|
|
-TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc)
|
|
|
+TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TLoopControl control)
|
|
|
{
|
|
|
TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
|
|
|
node->setLoc(loc);
|
|
|
+ node->setLoopControl(control);
|
|
|
|
|
|
return node;
|
|
|
}
|
|
@@ -1489,13 +1894,19 @@ TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TInte
|
|
|
//
|
|
|
// Create a for-loop sequence.
|
|
|
//
|
|
|
-TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc)
|
|
|
+TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TLoopControl control)
|
|
|
{
|
|
|
TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
|
|
|
node->setLoc(loc);
|
|
|
-
|
|
|
- // make a sequence of the initializer and statement
|
|
|
- TIntermAggregate* loopSequence = makeAggregate(initializer, loc);
|
|
|
+ node->setLoopControl(control);
|
|
|
+
|
|
|
+ // make a sequence of the initializer and statement, but try to reuse the
|
|
|
+ // aggregate already created for whatever is in the initializer, if there is one
|
|
|
+ TIntermAggregate* loopSequence = (initializer == nullptr ||
|
|
|
+ initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
|
|
|
+ : initializer->getAsAggregate();
|
|
|
+ if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence)
|
|
|
+ loopSequence->setOp(EOpNull);
|
|
|
loopSequence = growAggregate(loopSequence, node);
|
|
|
loopSequence->setOperator(EOpSequence);
|
|
|
|
|
@@ -1535,6 +1946,14 @@ bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
|
|
|
// Propagate 'noContraction' label in backward from 'precise' variables.
|
|
|
glslang::PropagateNoContraction(*this);
|
|
|
|
|
|
+ switch (textureSamplerTransformMode) {
|
|
|
+ case EShTexSampTransKeep:
|
|
|
+ break;
|
|
|
+ case EShTexSampTransUpgradeTextureRemoveSampler:
|
|
|
+ performTextureUpgradeAndSamplerRemovalTransformation(root);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -1725,6 +2144,30 @@ bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
|
|
|
case EOpConvUintToInt64:
|
|
|
case EOpConvUint64ToInt:
|
|
|
case EOpConvIntToUint64:
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ case EOpConvInt16ToBool:
|
|
|
+ case EOpConvBoolToInt16:
|
|
|
+ case EOpConvInt16ToInt:
|
|
|
+ case EOpConvIntToInt16:
|
|
|
+ case EOpConvInt16ToUint:
|
|
|
+ case EOpConvUintToInt16:
|
|
|
+ case EOpConvInt16ToInt64:
|
|
|
+ case EOpConvInt64ToInt16:
|
|
|
+ case EOpConvInt16ToUint64:
|
|
|
+ case EOpConvUint64ToInt16:
|
|
|
+ case EOpConvUint16ToBool:
|
|
|
+ case EOpConvBoolToUint16:
|
|
|
+ case EOpConvUint16ToInt:
|
|
|
+ case EOpConvIntToUint16:
|
|
|
+ case EOpConvUint16ToUint:
|
|
|
+ case EOpConvUintToUint16:
|
|
|
+ case EOpConvUint16ToInt64:
|
|
|
+ case EOpConvInt64ToUint16:
|
|
|
+ case EOpConvUint16ToUint64:
|
|
|
+ case EOpConvUint64ToUint16:
|
|
|
+ case EOpConvInt16ToUint16:
|
|
|
+ case EOpConvUint16ToInt16:
|
|
|
+#endif
|
|
|
|
|
|
// unary operations
|
|
|
case EOpNegative:
|
|
@@ -1853,6 +2296,10 @@ bool TIntermediate::promoteUnary(TIntermUnary& node)
|
|
|
case EOpBitwiseNot:
|
|
|
if (operand->getBasicType() != EbtInt &&
|
|
|
operand->getBasicType() != EbtUint &&
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ operand->getBasicType() != EbtInt16 &&
|
|
|
+ operand->getBasicType() != EbtUint16 &&
|
|
|
+#endif
|
|
|
operand->getBasicType() != EbtInt64 &&
|
|
|
operand->getBasicType() != EbtUint64)
|
|
|
|
|
@@ -1867,6 +2314,10 @@ bool TIntermediate::promoteUnary(TIntermUnary& node)
|
|
|
operand->getBasicType() != EbtUint &&
|
|
|
operand->getBasicType() != EbtInt64 &&
|
|
|
operand->getBasicType() != EbtUint64 &&
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ operand->getBasicType() != EbtInt16 &&
|
|
|
+ operand->getBasicType() != EbtUint16 &&
|
|
|
+#endif
|
|
|
operand->getBasicType() != EbtFloat &&
|
|
|
#ifdef AMD_EXTENSIONS
|
|
|
operand->getBasicType() != EbtFloat16 &&
|
|
@@ -1967,6 +2418,42 @@ bool TIntermediate::promoteBinary(TIntermBinary& node)
|
|
|
// We now have only scalars, vectors, and matrices to worry about.
|
|
|
//
|
|
|
|
|
|
+ // HLSL implicitly promotes bool -> int for numeric operations.
|
|
|
+ // (Implicit conversions to make the operands match each other's types were already done.)
|
|
|
+ if (getSource() == EShSourceHlsl &&
|
|
|
+ (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
|
|
|
+ switch (op) {
|
|
|
+ case EOpLessThan:
|
|
|
+ case EOpGreaterThan:
|
|
|
+ case EOpLessThanEqual:
|
|
|
+ case EOpGreaterThanEqual:
|
|
|
+
|
|
|
+ case EOpRightShift:
|
|
|
+ case EOpLeftShift:
|
|
|
+
|
|
|
+ case EOpMod:
|
|
|
+
|
|
|
+ case EOpAnd:
|
|
|
+ case EOpInclusiveOr:
|
|
|
+ case EOpExclusiveOr:
|
|
|
+
|
|
|
+ case EOpAdd:
|
|
|
+ case EOpSub:
|
|
|
+ case EOpDiv:
|
|
|
+ case EOpMul:
|
|
|
+ left = addConversion(op, TType(EbtInt, EvqTemporary, left->getVectorSize()), left);
|
|
|
+ right = addConversion(op, TType(EbtInt, EvqTemporary, right->getVectorSize()), right);
|
|
|
+ if (left == nullptr || right == nullptr)
|
|
|
+ return false;
|
|
|
+ node.setLeft(left);
|
|
|
+ node.setRight(right);
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
// Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
|
|
|
switch (op) {
|
|
|
case EOpLessThan:
|
|
@@ -2037,8 +2524,14 @@ bool TIntermediate::promoteBinary(TIntermBinary& node)
|
|
|
|
|
|
// Check for integer-only operands.
|
|
|
if ((left->getBasicType() != EbtInt && left->getBasicType() != EbtUint &&
|
|
|
- left->getBasicType() != EbtInt64 && left->getBasicType() != EbtUint64) ||
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ left->getBasicType() != EbtInt16 && left->getBasicType() != EbtUint16 &&
|
|
|
+#endif
|
|
|
+ left->getBasicType() != EbtInt64 && left->getBasicType() != EbtUint64) ||
|
|
|
(right->getBasicType() != EbtInt && right->getBasicType() != EbtUint &&
|
|
|
+#ifdef AMD_EXTENSIONS
|
|
|
+ right->getBasicType() != EbtInt16 && right->getBasicType() != EbtUint16 &&
|
|
|
+#endif
|
|
|
right->getBasicType() != EbtInt64 && right->getBasicType() != EbtUint64))
|
|
|
return false;
|
|
|
if (left->isMatrix() || right->isMatrix())
|
|
@@ -2079,8 +2572,6 @@ bool TIntermediate::promoteBinary(TIntermBinary& node)
|
|
|
case EOpLogicalXor:
|
|
|
return left->getType() == right->getType();
|
|
|
|
|
|
- // no shifts: they can mix types (scalar int can shift a vector uint, etc.)
|
|
|
-
|
|
|
case EOpMod:
|
|
|
case EOpModAssign:
|
|
|
|
|
@@ -2094,6 +2585,7 @@ bool TIntermediate::promoteBinary(TIntermBinary& node)
|
|
|
case EOpAdd:
|
|
|
case EOpSub:
|
|
|
case EOpDiv:
|
|
|
+
|
|
|
case EOpAddAssign:
|
|
|
case EOpSubAssign:
|
|
|
case EOpDivAssign:
|
|
@@ -2118,7 +2610,7 @@ bool TIntermediate::promoteBinary(TIntermBinary& node)
|
|
|
return true;
|
|
|
|
|
|
// Finish handling the case, for all ops, where there are two vectors of different sizes
|
|
|
- if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
|
|
|
+ if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
|
|
|
return false;
|
|
|
|
|
|
//
|
|
@@ -2667,4 +3159,41 @@ bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TInt
|
|
|
(node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
|
|
|
}
|
|
|
|
|
|
+struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
|
|
|
+ bool visitAggregate(TVisit, TIntermAggregate* ag) override {
|
|
|
+ using namespace std;
|
|
|
+ TIntermSequence& seq = ag->getSequence();
|
|
|
+ // remove pure sampler variables
|
|
|
+ TIntermSequence::iterator newEnd = remove_if(seq.begin(), seq.end(), [](TIntermNode* node) {
|
|
|
+ TIntermSymbol* symbol = node->getAsSymbolNode();
|
|
|
+ if (!symbol)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler());
|
|
|
+ });
|
|
|
+ seq.erase(newEnd, seq.end());
|
|
|
+ // replace constructors with sampler/textures
|
|
|
+ // update textures into sampled textures
|
|
|
+ for_each(seq.begin(), seq.end(), [](TIntermNode*& node) {
|
|
|
+ TIntermSymbol* symbol = node->getAsSymbolNode();
|
|
|
+ if (!symbol) {
|
|
|
+ TIntermAggregate *constructor = node->getAsAggregate();
|
|
|
+ if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
|
|
|
+ if (!constructor->getSequence().empty())
|
|
|
+ node = constructor->getSequence()[0];
|
|
|
+ }
|
|
|
+ } else if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
|
|
|
+ symbol->getWritableType().getSampler().combined = true;
|
|
|
+ }
|
|
|
+ });
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
+void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
|
|
|
+{
|
|
|
+ TextureUpgradeAndSamplerRemovalTransform transform;
|
|
|
+ root->traverse(&transform);
|
|
|
+}
|
|
|
+
|
|
|
} // end namespace glslang
|