Salsa20.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Based on public domain code available at: http://cr.yp.to/snuffle.html
  3. *
  4. * Modifications and C-native SSE macro based SSE implementation by
  5. * Adam Ierymenko <[email protected]>.
  6. *
  7. * Since the original was public domain, this is too.
  8. */
  9. #include "Constants.hpp"
  10. #include "Salsa20.hpp"
  11. #define ROTATE(v,c) (((v) << (c)) | ((v) >> (32 - (c))))
  12. #define XOR(v,w) ((v) ^ (w))
  13. #define PLUS(v,w) ((uint32_t)((v) + (w)))
  14. // Set up laod/store macros with appropriate endianness (we don't use these in SSE mode)
  15. #ifndef ZT_SALSA20_SSE
  16. #if __BYTE_ORDER == __LITTLE_ENDIAN
  17. #ifdef ZT_NO_TYPE_PUNNING
  18. // Slower version that does not use type punning
  19. #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
  20. static inline void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
  21. #else
  22. // Fast version that just does 32-bit load/store
  23. #define U8TO32_LITTLE(p) (*((const uint32_t *)((const void *)(p))))
  24. #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = (v)
  25. #endif // ZT_NO_TYPE_PUNNING
  26. #else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
  27. #ifdef __GNUC__
  28. // Use GNUC builtin bswap macros on big-endian machines if available
  29. #define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t *)((const void *)(p))))
  30. #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = __builtin_bswap32((v))
  31. #else // no __GNUC__
  32. // Otherwise do it the slow, manual way on BE machines
  33. #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
  34. static inline void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
  35. #endif // __GNUC__ or not
  36. #endif // __BYTE_ORDER little or big?
  37. #endif // !ZT_SALSA20_SSE
  38. // Statically compute and define SSE constants
  39. #ifdef ZT_SALSA20_SSE
  40. class _s20sseconsts
  41. {
  42. public:
  43. _s20sseconsts()
  44. {
  45. maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
  46. maskHi32 = _mm_slli_epi64(maskLo32, 32);
  47. }
  48. __m128i maskLo32,maskHi32;
  49. };
  50. static const _s20sseconsts _S20SSECONSTANTS;
  51. #endif
  52. namespace ZeroTier {
  53. void Salsa20::init(const void *key,unsigned int kbits,const void *iv,unsigned int rounds)
  54. throw()
  55. {
  56. #ifdef ZT_SALSA20_SSE
  57. const uint32_t *k = (const uint32_t *)key;
  58. _state.i[0] = 0x61707865;
  59. _state.i[3] = 0x6b206574;
  60. _state.i[13] = k[0];
  61. _state.i[10] = k[1];
  62. _state.i[7] = k[2];
  63. _state.i[4] = k[3];
  64. if (kbits == 256) {
  65. k += 4;
  66. _state.i[1] = 0x3320646e;
  67. _state.i[2] = 0x79622d32;
  68. } else {
  69. _state.i[1] = 0x3120646e;
  70. _state.i[2] = 0x79622d36;
  71. }
  72. _state.i[15] = k[0];
  73. _state.i[12] = k[1];
  74. _state.i[9] = k[2];
  75. _state.i[6] = k[3];
  76. _state.i[14] = ((const uint32_t *)iv)[0];
  77. _state.i[11] = ((const uint32_t *)iv)[1];
  78. _state.i[5] = 0;
  79. _state.i[8] = 0;
  80. #else
  81. const char *constants;
  82. const uint8_t *k = (const uint8_t *)key;
  83. _state.i[1] = U8TO32_LITTLE(k + 0);
  84. _state.i[2] = U8TO32_LITTLE(k + 4);
  85. _state.i[3] = U8TO32_LITTLE(k + 8);
  86. _state.i[4] = U8TO32_LITTLE(k + 12);
  87. if (kbits == 256) { /* recommended */
  88. k += 16;
  89. constants = "expand 32-byte k";
  90. } else { /* kbits == 128 */
  91. constants = "expand 16-byte k";
  92. }
  93. _state.i[5] = U8TO32_LITTLE(constants + 4);
  94. _state.i[6] = U8TO32_LITTLE(((const uint8_t *)iv) + 0);
  95. _state.i[7] = U8TO32_LITTLE(((const uint8_t *)iv) + 4);
  96. _state.i[8] = 0;
  97. _state.i[9] = 0;
  98. _state.i[10] = U8TO32_LITTLE(constants + 8);
  99. _state.i[11] = U8TO32_LITTLE(k + 0);
  100. _state.i[12] = U8TO32_LITTLE(k + 4);
  101. _state.i[13] = U8TO32_LITTLE(k + 8);
  102. _state.i[14] = U8TO32_LITTLE(k + 12);
  103. _state.i[15] = U8TO32_LITTLE(constants + 12);
  104. _state.i[0] = U8TO32_LITTLE(constants + 0);
  105. #endif
  106. _roundsDiv4 = rounds / 4;
  107. }
  108. void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
  109. throw()
  110. {
  111. uint8_t tmp[64];
  112. const uint8_t *m = (const uint8_t *)in;
  113. uint8_t *c = (uint8_t *)out;
  114. uint8_t *ctarget = c;
  115. unsigned int i;
  116. #ifndef ZT_SALSA20_SSE
  117. uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  118. uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
  119. #endif
  120. if (!bytes)
  121. return;
  122. #ifndef ZT_SALSA20_SSE
  123. j0 = _state.i[0];
  124. j1 = _state.i[1];
  125. j2 = _state.i[2];
  126. j3 = _state.i[3];
  127. j4 = _state.i[4];
  128. j5 = _state.i[5];
  129. j6 = _state.i[6];
  130. j7 = _state.i[7];
  131. j8 = _state.i[8];
  132. j9 = _state.i[9];
  133. j10 = _state.i[10];
  134. j11 = _state.i[11];
  135. j12 = _state.i[12];
  136. j13 = _state.i[13];
  137. j14 = _state.i[14];
  138. j15 = _state.i[15];
  139. #endif
  140. for (;;) {
  141. if (bytes < 64) {
  142. for (i = 0;i < bytes;++i)
  143. tmp[i] = m[i];
  144. m = tmp;
  145. ctarget = c;
  146. c = tmp;
  147. }
  148. #ifdef ZT_SALSA20_SSE
  149. __m128i X0 = _mm_loadu_si128((const __m128i *)&(_state.v[0]));
  150. __m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
  151. __m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
  152. __m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
  153. __m128i X0s = X0;
  154. __m128i X1s = X1;
  155. __m128i X2s = X2;
  156. __m128i X3s = X3;
  157. for (i=0;i<_roundsDiv4;++i) {
  158. __m128i T = _mm_add_epi32(X0, X3);
  159. X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
  160. X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
  161. T = _mm_add_epi32(X1, X0);
  162. X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
  163. X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
  164. T = _mm_add_epi32(X2, X1);
  165. X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
  166. X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
  167. T = _mm_add_epi32(X3, X2);
  168. X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
  169. X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
  170. X1 = _mm_shuffle_epi32(X1, 0x93);
  171. X2 = _mm_shuffle_epi32(X2, 0x4E);
  172. X3 = _mm_shuffle_epi32(X3, 0x39);
  173. T = _mm_add_epi32(X0, X1);
  174. X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
  175. X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
  176. T = _mm_add_epi32(X3, X0);
  177. X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
  178. X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
  179. T = _mm_add_epi32(X2, X3);
  180. X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
  181. X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
  182. T = _mm_add_epi32(X1, X2);
  183. X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
  184. X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
  185. X1 = _mm_shuffle_epi32(X1, 0x39);
  186. X2 = _mm_shuffle_epi32(X2, 0x4E);
  187. X3 = _mm_shuffle_epi32(X3, 0x93);
  188. // --
  189. T = _mm_add_epi32(X0, X3);
  190. X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
  191. X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
  192. T = _mm_add_epi32(X1, X0);
  193. X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
  194. X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
  195. T = _mm_add_epi32(X2, X1);
  196. X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
  197. X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
  198. T = _mm_add_epi32(X3, X2);
  199. X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
  200. X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
  201. X1 = _mm_shuffle_epi32(X1, 0x93);
  202. X2 = _mm_shuffle_epi32(X2, 0x4E);
  203. X3 = _mm_shuffle_epi32(X3, 0x39);
  204. T = _mm_add_epi32(X0, X1);
  205. X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
  206. X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
  207. T = _mm_add_epi32(X3, X0);
  208. X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
  209. X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
  210. T = _mm_add_epi32(X2, X3);
  211. X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
  212. X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
  213. T = _mm_add_epi32(X1, X2);
  214. X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
  215. X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
  216. X1 = _mm_shuffle_epi32(X1, 0x39);
  217. X2 = _mm_shuffle_epi32(X2, 0x4E);
  218. X3 = _mm_shuffle_epi32(X3, 0x93);
  219. }
  220. X0 = _mm_add_epi32(X0s,X0);
  221. X1 = _mm_add_epi32(X1s,X1);
  222. X2 = _mm_add_epi32(X2s,X2);
  223. X3 = _mm_add_epi32(X3s,X3);
  224. {
  225. __m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
  226. k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
  227. __m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
  228. k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
  229. __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
  230. __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
  231. const float *const mv = (const float *)m;
  232. float *const cv = (float *)c;
  233. _mm_storeu_ps(cv,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(mv)))));
  234. _mm_storeu_ps(cv + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(mv + 4)))));
  235. _mm_storeu_ps(cv + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(mv + 8)))));
  236. _mm_storeu_ps(cv + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(mv + 12)))));
  237. }
  238. if (!(++_state.i[8])) {
  239. ++_state.i[5]; // state reordered for SSE
  240. /* stopping at 2^70 bytes per nonce is user's responsibility */
  241. }
  242. #else
  243. x0 = j0;
  244. x1 = j1;
  245. x2 = j2;
  246. x3 = j3;
  247. x4 = j4;
  248. x5 = j5;
  249. x6 = j6;
  250. x7 = j7;
  251. x8 = j8;
  252. x9 = j9;
  253. x10 = j10;
  254. x11 = j11;
  255. x12 = j12;
  256. x13 = j13;
  257. x14 = j14;
  258. x15 = j15;
  259. for(i=0;i<_roundsDiv4;++i) {
  260. x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
  261. x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
  262. x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
  263. x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
  264. x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
  265. x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
  266. x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
  267. x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
  268. x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
  269. x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
  270. x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
  271. x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
  272. x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
  273. x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
  274. x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
  275. x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
  276. x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
  277. x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
  278. x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
  279. x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
  280. x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
  281. x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
  282. x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
  283. x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
  284. x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
  285. x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
  286. x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
  287. x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
  288. x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
  289. x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
  290. x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
  291. x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
  292. // --
  293. x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
  294. x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
  295. x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
  296. x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
  297. x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
  298. x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
  299. x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
  300. x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
  301. x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
  302. x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
  303. x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
  304. x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
  305. x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
  306. x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
  307. x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
  308. x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
  309. x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
  310. x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
  311. x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
  312. x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
  313. x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
  314. x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
  315. x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
  316. x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
  317. x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
  318. x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
  319. x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
  320. x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
  321. x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
  322. x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
  323. x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
  324. x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
  325. }
  326. x0 = PLUS(x0,j0);
  327. x1 = PLUS(x1,j1);
  328. x2 = PLUS(x2,j2);
  329. x3 = PLUS(x3,j3);
  330. x4 = PLUS(x4,j4);
  331. x5 = PLUS(x5,j5);
  332. x6 = PLUS(x6,j6);
  333. x7 = PLUS(x7,j7);
  334. x8 = PLUS(x8,j8);
  335. x9 = PLUS(x9,j9);
  336. x10 = PLUS(x10,j10);
  337. x11 = PLUS(x11,j11);
  338. x12 = PLUS(x12,j12);
  339. x13 = PLUS(x13,j13);
  340. x14 = PLUS(x14,j14);
  341. x15 = PLUS(x15,j15);
  342. U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
  343. U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
  344. U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
  345. U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
  346. U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
  347. U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
  348. U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
  349. U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
  350. U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
  351. U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
  352. U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
  353. U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
  354. U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
  355. U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
  356. U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
  357. U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
  358. if (!(++j8)) {
  359. ++j9;
  360. /* stopping at 2^70 bytes per nonce is user's responsibility */
  361. }
  362. #endif
  363. if (bytes <= 64) {
  364. if (bytes < 64) {
  365. for (i = 0;i < bytes;++i)
  366. ctarget[i] = c[i];
  367. }
  368. #ifndef ZT_SALSA20_SSE
  369. _state.i[8] = j8;
  370. _state.i[9] = j9;
  371. #endif
  372. return;
  373. }
  374. bytes -= 64;
  375. c += 64;
  376. m += 64;
  377. }
  378. }
  379. } // namespace ZeroTier