|
@@ -66,7 +66,7 @@ static const _s20sseconsts _S20SSECONSTANTS;
|
|
|
|
|
|
namespace ZeroTier {
|
|
|
|
|
|
-void Salsa20::init(const void *key,unsigned int kbits,const void *iv,unsigned int rounds)
|
|
|
+void Salsa20::init(const void *key,unsigned int kbits,const void *iv)
|
|
|
throw()
|
|
|
{
|
|
|
#ifdef ZT_SALSA20_SSE
|
|
@@ -121,11 +121,9 @@ void Salsa20::init(const void *key,unsigned int kbits,const void *iv,unsigned in
|
|
|
_state.i[15] = U8TO32_LITTLE(constants + 12);
|
|
|
_state.i[0] = U8TO32_LITTLE(constants + 0);
|
|
|
#endif
|
|
|
-
|
|
|
- _roundsDiv2 = rounds / 2;
|
|
|
}
|
|
|
|
|
|
-void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
|
|
|
+void Salsa20::encrypt12(const void *in,void *out,unsigned int bytes)
|
|
|
throw()
|
|
|
{
|
|
|
uint8_t tmp[64];
|
|
@@ -175,68 +173,169 @@ void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
|
|
|
__m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
|
|
|
__m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
|
|
|
__m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
|
|
|
+ __m128i T;
|
|
|
__m128i X0s = X0;
|
|
|
__m128i X1s = X1;
|
|
|
__m128i X2s = X2;
|
|
|
__m128i X3s = X3;
|
|
|
|
|
|
- for (i=0;i<_roundsDiv2;++i) {
|
|
|
- __m128i T = _mm_add_epi32(X0, X3);
|
|
|
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
|
|
|
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
|
|
|
- T = _mm_add_epi32(X1, X0);
|
|
|
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
|
|
|
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
|
|
|
- T = _mm_add_epi32(X2, X1);
|
|
|
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
|
|
|
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
|
|
|
- T = _mm_add_epi32(X3, X2);
|
|
|
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
|
|
|
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
|
|
|
-
|
|
|
- X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
- X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
- X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
-
|
|
|
- T = _mm_add_epi32(X0, X1);
|
|
|
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
|
|
|
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
|
|
|
- T = _mm_add_epi32(X3, X0);
|
|
|
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
|
|
|
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
|
|
|
- T = _mm_add_epi32(X2, X3);
|
|
|
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
|
|
|
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
|
|
|
- T = _mm_add_epi32(X1, X2);
|
|
|
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
|
|
|
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
|
|
|
-
|
|
|
- X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
- X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
- X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
- }
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
|
|
|
X0 = _mm_add_epi32(X0s,X0);
|
|
|
X1 = _mm_add_epi32(X1s,X1);
|
|
|
X2 = _mm_add_epi32(X2s,X2);
|
|
|
X3 = _mm_add_epi32(X3s,X3);
|
|
|
|
|
|
- {
|
|
|
- __m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
|
|
|
- k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
- __m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
|
|
|
- k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
- __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
|
|
|
- __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
|
|
|
-
|
|
|
- const float *const mv = (const float *)m;
|
|
|
- float *const cv = (float *)c;
|
|
|
-
|
|
|
- _mm_storeu_ps(cv,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(mv)))));
|
|
|
- _mm_storeu_ps(cv + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(mv + 4)))));
|
|
|
- _mm_storeu_ps(cv + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(mv + 8)))));
|
|
|
- _mm_storeu_ps(cv + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(mv + 12)))));
|
|
|
- }
|
|
|
+ __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
+ __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
+ __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
|
|
|
+ __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
|
|
|
|
|
|
if (!(++_state.i[8])) {
|
|
|
++_state.i[5]; // state reordered for SSE
|
|
@@ -260,41 +359,942 @@ void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
|
|
|
x14 = j14;
|
|
|
x15 = j15;
|
|
|
|
|
|
- for(i=0;i<_roundsDiv2;++i) {
|
|
|
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ x0 = PLUS(x0,j0);
|
|
|
+ x1 = PLUS(x1,j1);
|
|
|
+ x2 = PLUS(x2,j2);
|
|
|
+ x3 = PLUS(x3,j3);
|
|
|
+ x4 = PLUS(x4,j4);
|
|
|
+ x5 = PLUS(x5,j5);
|
|
|
+ x6 = PLUS(x6,j6);
|
|
|
+ x7 = PLUS(x7,j7);
|
|
|
+ x8 = PLUS(x8,j8);
|
|
|
+ x9 = PLUS(x9,j9);
|
|
|
+ x10 = PLUS(x10,j10);
|
|
|
+ x11 = PLUS(x11,j11);
|
|
|
+ x12 = PLUS(x12,j12);
|
|
|
+ x13 = PLUS(x13,j13);
|
|
|
+ x14 = PLUS(x14,j14);
|
|
|
+ x15 = PLUS(x15,j15);
|
|
|
+
|
|
|
+ U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
|
|
|
+ U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
|
|
|
+ U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
|
|
|
+ U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
|
|
|
+ U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
|
|
|
+ U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
|
|
|
+ U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
|
|
|
+ U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
|
|
|
+ U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
|
|
|
+ U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
|
|
|
+ U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
|
|
|
+ U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
|
|
|
+ U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
|
|
|
+ U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
|
|
|
+ U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
|
|
|
+ U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
|
|
|
+
|
|
|
+ if (!(++j8)) {
|
|
|
+ ++j9;
|
|
|
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (bytes <= 64) {
|
|
|
+ if (bytes < 64) {
|
|
|
+ for (i = 0;i < bytes;++i)
|
|
|
+ ctarget[i] = c[i];
|
|
|
+ }
|
|
|
+
|
|
|
+#ifndef ZT_SALSA20_SSE
|
|
|
+ _state.i[8] = j8;
|
|
|
+ _state.i[9] = j9;
|
|
|
+#endif
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ bytes -= 64;
|
|
|
+ c += 64;
|
|
|
+ m += 64;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void Salsa20::encrypt20(const void *in,void *out,unsigned int bytes)
|
|
|
+ throw()
|
|
|
+{
|
|
|
+ uint8_t tmp[64];
|
|
|
+ const uint8_t *m = (const uint8_t *)in;
|
|
|
+ uint8_t *c = (uint8_t *)out;
|
|
|
+ uint8_t *ctarget = c;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+#ifndef ZT_SALSA20_SSE
|
|
|
+ uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
|
|
|
+ uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (!bytes)
|
|
|
+ return;
|
|
|
+
|
|
|
+#ifndef ZT_SALSA20_SSE
|
|
|
+ j0 = _state.i[0];
|
|
|
+ j1 = _state.i[1];
|
|
|
+ j2 = _state.i[2];
|
|
|
+ j3 = _state.i[3];
|
|
|
+ j4 = _state.i[4];
|
|
|
+ j5 = _state.i[5];
|
|
|
+ j6 = _state.i[6];
|
|
|
+ j7 = _state.i[7];
|
|
|
+ j8 = _state.i[8];
|
|
|
+ j9 = _state.i[9];
|
|
|
+ j10 = _state.i[10];
|
|
|
+ j11 = _state.i[11];
|
|
|
+ j12 = _state.i[12];
|
|
|
+ j13 = _state.i[13];
|
|
|
+ j14 = _state.i[14];
|
|
|
+ j15 = _state.i[15];
|
|
|
+#endif
|
|
|
+
|
|
|
+ for (;;) {
|
|
|
+ if (bytes < 64) {
|
|
|
+ for (i = 0;i < bytes;++i)
|
|
|
+ tmp[i] = m[i];
|
|
|
+ m = tmp;
|
|
|
+ ctarget = c;
|
|
|
+ c = tmp;
|
|
|
}
|
|
|
|
|
|
+#ifdef ZT_SALSA20_SSE
|
|
|
+ __m128i X0 = _mm_loadu_si128((const __m128i *)&(_state.v[0]));
|
|
|
+ __m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
|
|
|
+ __m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
|
|
|
+ __m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
|
|
|
+ __m128i T;
|
|
|
+ __m128i X0s = X0;
|
|
|
+ __m128i X1s = X1;
|
|
|
+ __m128i X2s = X2;
|
|
|
+ __m128i X3s = X3;
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ T = _mm_add_epi32(X0, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X1, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X3, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x93);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x39);
|
|
|
+ T = _mm_add_epi32(X0, X1);
|
|
|
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
|
|
|
+ T = _mm_add_epi32(X3, X0);
|
|
|
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
|
|
|
+ T = _mm_add_epi32(X2, X3);
|
|
|
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
|
|
|
+ T = _mm_add_epi32(X1, X2);
|
|
|
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
|
|
|
+ X1 = _mm_shuffle_epi32(X1, 0x39);
|
|
|
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
|
|
|
+ X3 = _mm_shuffle_epi32(X3, 0x93);
|
|
|
+
|
|
|
+ X0 = _mm_add_epi32(X0s,X0);
|
|
|
+ X1 = _mm_add_epi32(X1s,X1);
|
|
|
+ X2 = _mm_add_epi32(X2s,X2);
|
|
|
+ X3 = _mm_add_epi32(X3s,X3);
|
|
|
+
|
|
|
+ __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
+ __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
|
|
|
+ __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
|
|
|
+ __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
|
|
|
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
|
|
|
+
|
|
|
+ if (!(++_state.i[8])) {
|
|
|
+ ++_state.i[5]; // state reordered for SSE
|
|
|
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
|
|
|
+ }
|
|
|
+#else
|
|
|
+ x0 = j0;
|
|
|
+ x1 = j1;
|
|
|
+ x2 = j2;
|
|
|
+ x3 = j3;
|
|
|
+ x4 = j4;
|
|
|
+ x5 = j5;
|
|
|
+ x6 = j6;
|
|
|
+ x7 = j7;
|
|
|
+ x8 = j8;
|
|
|
+ x9 = j9;
|
|
|
+ x10 = j10;
|
|
|
+ x11 = j11;
|
|
|
+ x12 = j12;
|
|
|
+ x13 = j13;
|
|
|
+ x14 = j14;
|
|
|
+ x15 = j15;
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
+ // 2X round -------------------------------------------------------------
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
|
|
|
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
|
|
|
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
|
|
|
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
|
|
|
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
|
|
|
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
|
|
|
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
|
|
|
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
|
|
|
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
|
|
|
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
|
|
|
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
|
|
|
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
|
|
|
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
|
|
|
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
|
|
|
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
|
|
|
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
|
|
|
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
|
|
|
+
|
|
|
x0 = PLUS(x0,j0);
|
|
|
x1 = PLUS(x1,j1);
|
|
|
x2 = PLUS(x2,j2);
|