2
0

Salsa20.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. /*
  2. * Based on public domain code available at: http://cr.yp.to/snuffle.html
  3. *
  4. * Modifications and C-native SSE macro based SSE implementation by
  5. * Adam Ierymenko <[email protected]>.
  6. *
  7. * Since the original was public domain, this is too.
  8. */
  9. #include "Constants.hpp"
  10. #include "Salsa20.hpp"
  11. #define ROTATE(v, c) (((v) << (c)) | ((v) >> (32 - (c))))
  12. #define XOR(v, w) ((v) ^ (w))
  13. #define PLUS(v, w) ((uint32_t)((v) + (w)))
  14. #ifndef ZT_SALSA20_SSE
  15. #if __BYTE_ORDER == __LITTLE_ENDIAN
  16. #ifdef ZT_NO_UNALIGNED_ACCESS
  17. // Slower version that does not use type punning
  18. #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
  19. static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
  20. #else
  21. // Fast version that just does 32-bit load/store
  22. #define U8TO32_LITTLE(p) (*((const uint32_t *)((const void *)(p))))
  23. #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = (v)
  24. #endif // ZT_NO_UNALIGNED_ACCESS
  25. #else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
  26. #ifdef __GNUC__
  27. // Use GNUC builtin bswap macros on big-endian machines if available
  28. #define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t *)((const void *)(p))))
  29. #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = __builtin_bswap32((v))
  30. #else // no __GNUC__
  31. // Otherwise do it the slow, manual way on BE machines
  32. #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
  33. static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
  34. #endif // __GNUC__ or not
  35. #endif // __BYTE_ORDER little or big?
  36. #endif // !ZT_SALSA20_SSE
  37. #ifdef ZT_SALSA20_SSE
  38. class _s20sseconsts
  39. {
  40. public:
  41. _s20sseconsts() noexcept
  42. {
  43. maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
  44. maskHi32 = _mm_slli_epi64(maskLo32, 32);
  45. }
  46. __m128i maskLo32, maskHi32;
  47. };
  48. static const _s20sseconsts s_S20SSECONSTANTS;
  49. #endif
  50. namespace ZeroTier {
  51. void Salsa20::init(const void *key, const void *iv) noexcept
  52. {
  53. #ifdef ZT_SALSA20_SSE
  54. const uint32_t *const k = (const uint32_t *)key;
  55. _state.i[0] = 0x61707865;
  56. _state.i[1] = 0x3320646e;
  57. _state.i[2] = 0x79622d32;
  58. _state.i[3] = 0x6b206574;
  59. _state.i[4] = k[3];
  60. _state.i[5] = 0;
  61. _state.i[6] = k[7];
  62. _state.i[7] = k[2];
  63. _state.i[8] = 0;
  64. _state.i[9] = k[6];
  65. _state.i[10] = k[1];
  66. _state.i[11] = ((const uint32_t *)iv)[1];
  67. _state.i[12] = k[5];
  68. _state.i[13] = k[0];
  69. _state.i[14] = ((const uint32_t *)iv)[0];
  70. _state.i[15] = k[4];
  71. #else
  72. const char *const constants = "expand 32-byte k";
  73. const uint8_t *const k = (const uint8_t *)key;
  74. _state.i[0] = U8TO32_LITTLE(constants + 0);
  75. _state.i[1] = U8TO32_LITTLE(k + 0);
  76. _state.i[2] = U8TO32_LITTLE(k + 4);
  77. _state.i[3] = U8TO32_LITTLE(k + 8);
  78. _state.i[4] = U8TO32_LITTLE(k + 12);
  79. _state.i[5] = U8TO32_LITTLE(constants + 4);
  80. _state.i[6] = U8TO32_LITTLE(((const uint8_t *)iv) + 0);
  81. _state.i[7] = U8TO32_LITTLE(((const uint8_t *)iv) + 4);
  82. _state.i[8] = 0;
  83. _state.i[9] = 0;
  84. _state.i[10] = U8TO32_LITTLE(constants + 8);
  85. _state.i[11] = U8TO32_LITTLE(k + 16);
  86. _state.i[12] = U8TO32_LITTLE(k + 20);
  87. _state.i[13] = U8TO32_LITTLE(k + 24);
  88. _state.i[14] = U8TO32_LITTLE(k + 28);
  89. _state.i[15] = U8TO32_LITTLE(constants + 12);
  90. #endif
  91. }
  92. union p_SalsaState
  93. {
  94. #ifdef ZT_SALSA20_SSE
  95. __m128i v[4];
  96. #endif // ZT_SALSA20_SSE
  97. uint32_t i[16];
  98. };
  99. template< unsigned int R >
  100. static ZT_INLINE void p_salsaCrypt(p_SalsaState *const state, const uint8_t *m, uint8_t *c, unsigned int bytes) noexcept
  101. {
  102. if (unlikely(bytes == 0))
  103. return;
  104. uint8_t tmp[64];
  105. uint8_t *ctarget = c;
  106. #ifdef ZT_SALSA20_SSE
  107. __m128i X0 = state->v[0];
  108. __m128i X1 = state->v[1];
  109. __m128i X2 = state->v[2];
  110. __m128i X3 = state->v[3];
  111. const __m128i maskLo32 = s_S20SSECONSTANTS.maskLo32;
  112. const __m128i maskHi32 = s_S20SSECONSTANTS.maskHi32;
  113. const __m128i add1 = _mm_set_epi32(0, 0, 0, 1);
  114. #else
  115. uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  116. uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
  117. j0 = state->i[0];
  118. j1 = state->i[1];
  119. j2 = state->i[2];
  120. j3 = state->i[3];
  121. j4 = state->i[4];
  122. j5 = state->i[5];
  123. j6 = state->i[6];
  124. j7 = state->i[7];
  125. j8 = state->i[8];
  126. j9 = state->i[9];
  127. j10 = state->i[10];
  128. j11 = state->i[11];
  129. j12 = state->i[12];
  130. j13 = state->i[13];
  131. j14 = state->i[14];
  132. j15 = state->i[15];
  133. #endif
  134. for (;;) {
  135. if (unlikely(bytes < 64)) {
  136. for (unsigned int i = 0; i < bytes; ++i)
  137. tmp[i] = m[i];
  138. m = tmp;
  139. ctarget = c;
  140. c = tmp;
  141. }
  142. #ifdef ZT_SALSA20_SSE
  143. __m128i X0s = X0;
  144. __m128i X1s = X1;
  145. __m128i X2s = X2;
  146. __m128i X3s = X3;
  147. __m128i T;
  148. for (unsigned int rr = 0; rr < (R / 2); ++rr) {
  149. T = _mm_add_epi32(X0, X3);
  150. X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
  151. T = _mm_add_epi32(X1, X0);
  152. X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
  153. T = _mm_add_epi32(X2, X1);
  154. X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
  155. T = _mm_add_epi32(X3, X2);
  156. X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
  157. X1 = _mm_shuffle_epi32(X1, 0x93);
  158. X2 = _mm_shuffle_epi32(X2, 0x4E);
  159. X3 = _mm_shuffle_epi32(X3, 0x39);
  160. T = _mm_add_epi32(X0, X1);
  161. X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
  162. T = _mm_add_epi32(X3, X0);
  163. X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
  164. T = _mm_add_epi32(X2, X3);
  165. X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
  166. T = _mm_add_epi32(X1, X2);
  167. X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
  168. X1 = _mm_shuffle_epi32(X1, 0x39);
  169. X2 = _mm_shuffle_epi32(X2, 0x4E);
  170. X3 = _mm_shuffle_epi32(X3, 0x93);
  171. }
  172. X0 = _mm_add_epi32(X0s, X0);
  173. X1 = _mm_add_epi32(X1s, X1);
  174. X2 = _mm_add_epi32(X2s, X2);
  175. X3 = _mm_add_epi32(X3s, X3);
  176. __m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
  177. __m128i k20 = _mm_or_si128(_mm_and_si128(X2, maskLo32), _mm_and_si128(X1, maskHi32));
  178. __m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
  179. __m128i k31 = _mm_or_si128(_mm_and_si128(X3, maskLo32), _mm_and_si128(X2, maskHi32));
  180. k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
  181. k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
  182. _mm_storeu_si128(reinterpret_cast<__m128i *>(c), _mm_xor_si128(_mm_unpackhi_epi64(k02, k20), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m))));
  183. _mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 1, _mm_xor_si128(_mm_unpackhi_epi64(k13, k31), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 1)));
  184. _mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 2, _mm_xor_si128(_mm_unpacklo_epi64(k20, k02), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 2)));
  185. _mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 3, _mm_xor_si128(_mm_unpacklo_epi64(k31, k13), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 3)));
  186. X0 = X0s;
  187. X1 = X1s;
  188. X2 = _mm_add_epi32(X2s, add1);
  189. X3 = X3s;
  190. #else
  191. x0 = j0;
  192. x1 = j1;
  193. x2 = j2;
  194. x3 = j3;
  195. x4 = j4;
  196. x5 = j5;
  197. x6 = j6;
  198. x7 = j7;
  199. x8 = j8;
  200. x9 = j9;
  201. x10 = j10;
  202. x11 = j11;
  203. x12 = j12;
  204. x13 = j13;
  205. x14 = j14;
  206. x15 = j15;
  207. for(unsigned int rr=0;rr<(R/2);++rr) {
  208. x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
  209. x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
  210. x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
  211. x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
  212. x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
  213. x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
  214. x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
  215. x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
  216. x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
  217. x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
  218. x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
  219. x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
  220. x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
  221. x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
  222. x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
  223. x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
  224. x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
  225. x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
  226. x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
  227. x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
  228. x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
  229. x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
  230. x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
  231. x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
  232. x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
  233. x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
  234. x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
  235. x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
  236. x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
  237. x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
  238. x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
  239. x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
  240. }
  241. x0 = PLUS(x0,j0);
  242. x1 = PLUS(x1,j1);
  243. x2 = PLUS(x2,j2);
  244. x3 = PLUS(x3,j3);
  245. x4 = PLUS(x4,j4);
  246. x5 = PLUS(x5,j5);
  247. x6 = PLUS(x6,j6);
  248. x7 = PLUS(x7,j7);
  249. x8 = PLUS(x8,j8);
  250. x9 = PLUS(x9,j9);
  251. x10 = PLUS(x10,j10);
  252. x11 = PLUS(x11,j11);
  253. x12 = PLUS(x12,j12);
  254. x13 = PLUS(x13,j13);
  255. x14 = PLUS(x14,j14);
  256. x15 = PLUS(x15,j15);
  257. U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
  258. U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
  259. U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
  260. U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
  261. U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
  262. U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
  263. U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
  264. U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
  265. U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
  266. U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
  267. U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
  268. U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
  269. U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
  270. U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
  271. U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
  272. U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
  273. ++j8;
  274. #endif
  275. if (likely(bytes > 64)) {
  276. bytes -= 64;
  277. c += 64;
  278. m += 64;
  279. } else {
  280. if (bytes < 64) {
  281. for (unsigned int i = 0; i < bytes; ++i)
  282. ctarget[i] = c[i];
  283. }
  284. #ifdef ZT_SALSA20_SSE
  285. state->v[2] = X2;
  286. #else
  287. state->i[8] = j8;
  288. #endif
  289. return;
  290. }
  291. }
  292. }
  293. void Salsa20::crypt12(const void *in, void *out, unsigned int bytes) noexcept
  294. {
  295. p_salsaCrypt< 12 >(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes);
  296. }
  297. void Salsa20::crypt20(const void *in, void *out, unsigned int bytes) noexcept
  298. {
  299. p_salsaCrypt< 20 >(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes);
  300. }
  301. } // namespace ZeroTier