AES.hpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * Copyright (c)2019 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2023-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_AES_HPP
  14. #define ZT_AES_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
  18. #include <wmmintrin.h>
  19. #include <emmintrin.h>
  20. #include <smmintrin.h>
  21. #define ZT_AES_AESNI 1
  22. #endif
  23. #if defined(_M_ARM64) || defined(__aarch64__) || defined(__aarch64) || defined(__AARCH64__)
  24. #include <arm64intr.h>
  25. #include <arm64_neon.h>
  26. #ifndef ZT_AES_ARMNEON
  27. #define ZT_AES_ARMNEON 1
  28. #endif
  29. #if defined(__GNUC__) && !defined(__apple_build_version__) && (defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO))
  30. #include <arm_acle.h>
  31. #endif
  32. #endif
  33. #define ZT_AES_KEY_SIZE 32
  34. #define ZT_AES_BLOCK_SIZE 16
  35. namespace ZeroTier {
  36. /**
  37. * AES-256 and AES-GCM AEAD
  38. */
  39. class AES
  40. {
  41. public:
  42. /**
  43. * This will be true if your platform's type of AES acceleration is supported on this machine
  44. */
  45. static const bool HW_ACCEL;
  46. inline AES() {}
  47. inline AES(const uint8_t key[32]) { this->init(key); }
  48. inline ~AES() { Utils::burn(&_k,sizeof(_k)); }
  49. inline void init(const uint8_t key[32])
  50. {
  51. #ifdef ZT_AES_AESNI
  52. if (likely(HW_ACCEL)) {
  53. _init_aesni(key);
  54. return;
  55. }
  56. #endif
  57. _initSW(key);
  58. }
  59. inline void encrypt(const uint8_t in[16],uint8_t out[16]) const
  60. {
  61. #ifdef ZT_AES_AESNI
  62. if (likely(HW_ACCEL)) {
  63. _encrypt_aesni(in,out);
  64. return;
  65. }
  66. #endif
  67. _encryptSW(in,out);
  68. }
  69. inline void gmac(const uint8_t iv[12],const void *in,const unsigned int len,uint8_t out[16]) const
  70. {
  71. #ifdef ZT_AES_AESNI
  72. if (likely(HW_ACCEL)) {
  73. _gmac_aesni(iv,(const uint8_t *)in,len,out);
  74. return;
  75. }
  76. #endif
  77. }
  78. inline void ctr(const uint8_t iv[16],const void *in,unsigned int len,void *out) const
  79. {
  80. #ifdef ZT_AES_AESNI
  81. if (likely(HW_ACCEL)) {
  82. _crypt_ctr_aesni(iv,(const uint8_t *)in,len,(uint8_t *)out);
  83. return;
  84. }
  85. #endif
  86. uint64_t ctr[2],cenc[2];
  87. memcpy(ctr,iv,16);
  88. uint64_t bctr = Utils::ntoh(ctr[1]);
  89. const uint8_t *i = (const uint8_t *)in;
  90. uint8_t *o = (uint8_t *)out;
  91. while (len >= 16) {
  92. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  93. ctr[1] = Utils::hton(++bctr);
  94. for(unsigned int k=0;k<16;++k)
  95. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  96. len -= 16;
  97. }
  98. if (len) {
  99. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  100. for(unsigned int k=0;k<len;++k)
  101. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  102. }
  103. }
  104. /**
  105. * Perform AES-256-GMAC-CTR encryption
  106. *
  107. * This mode combines the two standard modes AES256-GMAC and AES256-CTR to
  108. * yield a mode similar to AES256-GCM-SIV that is resistant to accidental
  109. * message IV duplication.
  110. *
  111. * @param iv 64-bit message IV
  112. * @param in Message plaintext
  113. * @param len Length of plaintext
  114. * @param out Output buffer to receive ciphertext
  115. * @param tag Output buffer to receive 64-bit authentication tag
  116. */
  117. inline void ztGmacCtrEncrypt(const uint8_t iv[8],const void *in,unsigned int len,void *out,uint8_t tag[8])
  118. {
  119. uint8_t ctrIv[16],gmacIv[12];
  120. // (1) Compute AES256-GMAC(in) using a 96-bit IV constructed from
  121. // the 64-bit supplied IV and the message size.
  122. #ifdef ZT_NO_TYPE_PUNNING
  123. for(unsigned int i=0;i<8;++i) gmacIv[i] = iv[i];
  124. gmacIv[8] = (uint8_t)(len >> 24);
  125. gmacIv[9] = (uint8_t)(len >> 16);
  126. gmacIv[10] = (uint8_t)(len >> 8);
  127. gmacIv[11] = (uint8_t)len;
  128. #else
  129. *((uint64_t *)gmacIv) = *((const uint64_t *)iv);
  130. *((uint32_t *)(gmacIv + 8)) = Utils::hton((uint32_t)len);
  131. #endif
  132. gmac(gmacIv,in,len,ctrIv);
  133. // (2) The first 64 bits of GMAC output are the auth tag. Create
  134. // a secret synthetic AES256-CTR IV by encrypting these and the
  135. // original supplied IV.
  136. #ifdef ZT_NO_TYPE_PUNNING
  137. for(unsigned int i=0;i<8;++i) tag[i] = ctrIv[i];
  138. for(unsigned int i=0;i<8;++i) ctrIv[i+8] = iv[i];
  139. #else
  140. *((uint64_t *)tag) = *((const uint64_t *)ctrIv);
  141. *((uint64_t *)(ctrIv + 8)) = *((const uint64_t *)iv);
  142. #endif
  143. encrypt(ctrIv,ctrIv);
  144. // (3) Encrypt input using AES256-CTR
  145. ctr(ctrIv,in,len,out);
  146. }
  147. /**
  148. * Decrypt a message encrypted with AES-256-GMAC-CTR and check its authenticity
  149. *
  150. * @param iv 64-bit message IV
  151. * @param in Message ciphertext
  152. * @param len Length of ciphertext
  153. * @param out Output buffer to receive plaintext
  154. * @param tag Authentication tag supplied with message
  155. * @return True if authentication tags match and message appears authentic
  156. */
  157. inline bool ztGmacCtrDecrypt(const uint8_t iv[8],const void *in,unsigned int len,void *out,const uint8_t tag[8])
  158. {
  159. uint8_t ctrIv[16],gmacOut[16],gmacIv[12];
  160. // (1) Re-create the original secret synthetic AES256-CTR IV.
  161. #ifdef ZT_NO_TYPE_PUNNING
  162. for(unsigned int i=0;i<8;++i) ctrIv[i] = tag[i];
  163. for(unsigned int i=0;i<8;++i) ctrIv[i+8] = iv[i];
  164. #else
  165. *((uint64_t *)ctrIv) = *((const uint8_t *)tag);
  166. *((uint64_t *)(ctrIv + 8)) = *((const uint64_t *)iv);
  167. #endif
  168. encrypt(ctrIv,ctrIv);
  169. // (2) Decrypt input using AES256-CTR
  170. ctr(ctrIv,in,len,out);
  171. // (3) Compute AES256-GMAC(out) using the re-created 96-bit
  172. // GMAC IV built from the message IV and the message size.
  173. #ifdef ZT_NO_TYPE_PUNNING
  174. for(unsigned int i=0;i<8;++i) gmacIv[i] = iv[i];
  175. gmacIv[8] = (uint8_t)(len >> 24);
  176. gmacIv[9] = (uint8_t)(len >> 16);
  177. gmacIv[10] = (uint8_t)(len >> 8);
  178. gmacIv[11] = (uint8_t)len;
  179. #else
  180. *((uint64_t *)gmacIv) = *((const uint64_t *)iv);
  181. *((uint32_t *)(gmacIv + 8)) = Utils::hton((uint32_t)len);
  182. #endif
  183. gmac(gmacIv,out,len,gmacOut);
  184. // (4) Compare first 64 bits of GMAC output with tag.
  185. #ifdef ZT_NO_TYPE_PUNNING
  186. return Utils::secureEq(gmacOut,tag,8);
  187. #else
  188. return (*((const uint64_t *)gmacOut) == *((const uint64_t *)tag));
  189. #endif
  190. }
  191. private:
  192. static const uint32_t Te0[256];
  193. static const uint32_t Te1[256];
  194. static const uint32_t Te2[256];
  195. static const uint32_t Te3[256];
  196. static const uint32_t rcon[10];
  197. void _initSW(const uint8_t key[32]);
  198. void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
  199. /**************************************************************************/
  200. union {
  201. #ifdef ZT_AES_ARMNEON
  202. struct {
  203. uint32x4_t k[15];
  204. } neon;
  205. #endif
  206. #ifdef ZT_AES_AESNI
  207. struct {
  208. __m128i k[15];
  209. __m128i h,hh,hhh,hhhh;
  210. } ni;
  211. #endif
  212. struct {
  213. uint32_t ek[30];
  214. } sw;
  215. } _k;
  216. /**************************************************************************/
  217. #ifdef ZT_AES_ARMNEON /******************************************************/
  218. static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2)
  219. {
  220. uint32_t round1[4], round2[4], prv1[4], prv2[4];
  221. vst1q_u32(prv1, prev1);
  222. vst1q_u32(prv2, prev2);
  223. round1[0] = sub_word(rot_word(prv2[3])) ^ rcon ^ prv1[0];
  224. round1[1] = sub_word(rot_word(round1[0])) ^ rcon ^ prv1[1];
  225. round1[2] = sub_word(rot_word(round1[1])) ^ rcon ^ prv1[2];
  226. round1[3] = sub_word(rot_word(round1[2])) ^ rcon ^ prv1[3];
  227. round2[0] = sub_word(rot_word(round1[3])) ^ rcon ^ prv2[0];
  228. round2[1] = sub_word(rot_word(round2[0])) ^ rcon ^ prv2[1];
  229. round2[2] = sub_word(rot_word(round2[1])) ^ rcon ^ prv2[2];
  230. round2[3] = sub_word(rot_word(round2[2])) ^ rcon ^ prv2[3];
  231. *e1 = vld1q_u3(round1);
  232. *e2 = vld1q_u3(round2);
  233. //uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
  234. //return expansion;
  235. }
  236. inline void _init_armneon(uint8x16_t encKey)
  237. {
  238. uint32x4_t *schedule = _k.neon.k;
  239. uint32x4_t e1,e2;
  240. (*schedule)[0] = vld1q_u32(encKey);
  241. (*schedule)[1] = vld1q_u32(encKey + 16);
  242. _aes_256_expAssist_armneon((*schedule)[0],(*schedule)[1],0x01,&e1,&e2);
  243. (*schedule)[2] = e1; (*schedule)[3] = e2;
  244. _aes_256_expAssist_armneon((*schedule)[2],(*schedule)[3],0x01,&e1,&e2);
  245. (*schedule)[4] = e1; (*schedule)[5] = e2;
  246. _aes_256_expAssist_armneon((*schedule)[4],(*schedule)[5],0x01,&e1,&e2);
  247. (*schedule)[6] = e1; (*schedule)[7] = e2;
  248. _aes_256_expAssist_armneon((*schedule)[6],(*schedule)[7],0x01,&e1,&e2);
  249. (*schedule)[8] = e1; (*schedule)[9] = e2;
  250. _aes_256_expAssist_armneon((*schedule)[8],(*schedule)[9],0x01,&e1,&e2);
  251. (*schedule)[10] = e1; (*schedule)[11] = e2;
  252. _aes_256_expAssist_armneon((*schedule)[10],(*schedule)[11],0x01,&e1,&e2);
  253. (*schedule)[12] = e1; (*schedule)[13] = e2;
  254. _aes_256_expAssist_armneon((*schedule)[12],(*schedule)[13],0x01,&e1,&e2);
  255. (*schedule)[14] = e1;
  256. /*
  257. doubleRound = _aes_256_expAssist_armneon((*schedule)[0], (*schedule)[1], 0x01);
  258. (*schedule)[2] = doubleRound[0];
  259. (*schedule)[3] = doubleRound[1];
  260. doubleRound = _aes_256_expAssist_armneon((*schedule)[2], (*schedule)[3], 0x02);
  261. (*schedule)[4] = doubleRound[0];
  262. (*schedule)[5] = doubleRound[1];
  263. doubleRound = _aes_256_expAssist_armneon((*schedule)[4], (*schedule)[5], 0x04);
  264. (*schedule)[6] = doubleRound[0];
  265. (*schedule)[7] = doubleRound[1];
  266. doubleRound = _aes_256_expAssist_armneon((*schedule)[6], (*schedule)[7], 0x08);
  267. (*schedule)[8] = doubleRound[0];
  268. (*schedule)[9] = doubleRound[1];
  269. doubleRound = _aes_256_expAssist_armneon((*schedule)[8], (*schedule)[9], 0x10);
  270. (*schedule)[10] = doubleRound[0];
  271. (*schedule)[11] = doubleRound[1];
  272. doubleRound = _aes_256_expAssist_armneon((*schedule)[10], (*schedule)[11], 0x20);
  273. (*schedule)[12] = doubleRound[0];
  274. (*schedule)[13] = doubleRound[1];
  275. doubleRound = _aes_256_expAssist_armneon((*schedule)[12], (*schedule)[13], 0x40);
  276. (*schedule)[14] = doubleRound[0];
  277. */
  278. }
  279. inline void _encrypt_armneon(uint8x16_t *data) const
  280. {
  281. *data = veorq_u8(*data, _k.neon.k[0]);
  282. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
  283. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[2]));
  284. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[3]));
  285. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[4]));
  286. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[5]));
  287. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[6]));
  288. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[7]));
  289. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[8]));
  290. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[9]));
  291. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[10]));
  292. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[11]));
  293. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[12]));
  294. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
  295. *data = vaeseq_u8(*data, _k.neon.k[14]);
  296. }
  297. #endif /*********************************************************************/
  298. #ifdef ZT_AES_AESNI /********************************************************/
  299. static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b)
  300. {
  301. __m128i x,y;
  302. b = _mm_shuffle_epi32(b,0xff);
  303. y = _mm_slli_si128(a,0x04);
  304. x = _mm_xor_si128(a,y);
  305. y = _mm_slli_si128(y,0x04);
  306. x = _mm_xor_si128(x,y);
  307. y = _mm_slli_si128(y,0x04);
  308. x = _mm_xor_si128(x,y);
  309. x = _mm_xor_si128(x,b);
  310. return x;
  311. }
  312. static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b)
  313. {
  314. __m128i x,y,z;
  315. y = _mm_aeskeygenassist_si128(a,0x00);
  316. z = _mm_shuffle_epi32(y,0xaa);
  317. y = _mm_slli_si128(b,0x04);
  318. x = _mm_xor_si128(b,y);
  319. y = _mm_slli_si128(y,0x04);
  320. x = _mm_xor_si128(x,y);
  321. y = _mm_slli_si128(y,0x04);
  322. x = _mm_xor_si128(x,y);
  323. x = _mm_xor_si128(x,z);
  324. return x;
  325. }
  326. ZT_ALWAYS_INLINE void _init_aesni(const uint8_t key[32])
  327. {
  328. __m128i t1,t2;
  329. _k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
  330. _k.ni.k[1] = t2 = _mm_loadu_si128((const __m128i *)(key+16));
  331. _k.ni.k[2] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x01));
  332. _k.ni.k[3] = t2 = _init256_2_aesni(t1,t2);
  333. _k.ni.k[4] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x02));
  334. _k.ni.k[5] = t2 = _init256_2_aesni(t1,t2);
  335. _k.ni.k[6] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x04));
  336. _k.ni.k[7] = t2 = _init256_2_aesni(t1,t2);
  337. _k.ni.k[8] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x08));
  338. _k.ni.k[9] = t2 = _init256_2_aesni(t1,t2);
  339. _k.ni.k[10] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x10));
  340. _k.ni.k[11] = t2 = _init256_2_aesni(t1,t2);
  341. _k.ni.k[12] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x20));
  342. _k.ni.k[13] = t2 = _init256_2_aesni(t1,t2);
  343. _k.ni.k[14] = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x40));
  344. __m128i h = _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
  345. h = _mm_aesenc_si128(h,_k.ni.k[1]);
  346. h = _mm_aesenc_si128(h,_k.ni.k[2]);
  347. h = _mm_aesenc_si128(h,_k.ni.k[3]);
  348. h = _mm_aesenc_si128(h,_k.ni.k[4]);
  349. h = _mm_aesenc_si128(h,_k.ni.k[5]);
  350. h = _mm_aesenc_si128(h,_k.ni.k[6]);
  351. h = _mm_aesenc_si128(h,_k.ni.k[7]);
  352. h = _mm_aesenc_si128(h,_k.ni.k[8]);
  353. h = _mm_aesenc_si128(h,_k.ni.k[9]);
  354. h = _mm_aesenc_si128(h,_k.ni.k[10]);
  355. h = _mm_aesenc_si128(h,_k.ni.k[11]);
  356. h = _mm_aesenc_si128(h,_k.ni.k[12]);
  357. h = _mm_aesenc_si128(h,_k.ni.k[13]);
  358. h = _mm_aesenclast_si128(h,_k.ni.k[14]);
  359. __m128i hswap = _swap128_aesni(h);
  360. __m128i hh = _mult_block_aesni(hswap,h);
  361. __m128i hhh = _mult_block_aesni(hswap,hh);
  362. __m128i hhhh = _mult_block_aesni(hswap,hhh);
  363. _k.ni.h = hswap;
  364. _k.ni.hh = _swap128_aesni(hh);
  365. _k.ni.hhh = _swap128_aesni(hhh);
  366. _k.ni.hhhh = _swap128_aesni(hhhh);
  367. }
  368. ZT_ALWAYS_INLINE void _encrypt_aesni(const void *in,void *out) const
  369. {
  370. __m128i tmp;
  371. tmp = _mm_loadu_si128((const __m128i *)in);
  372. tmp = _mm_xor_si128(tmp,_k.ni.k[0]);
  373. tmp = _mm_aesenc_si128(tmp,_k.ni.k[1]);
  374. tmp = _mm_aesenc_si128(tmp,_k.ni.k[2]);
  375. tmp = _mm_aesenc_si128(tmp,_k.ni.k[3]);
  376. tmp = _mm_aesenc_si128(tmp,_k.ni.k[4]);
  377. tmp = _mm_aesenc_si128(tmp,_k.ni.k[5]);
  378. tmp = _mm_aesenc_si128(tmp,_k.ni.k[6]);
  379. tmp = _mm_aesenc_si128(tmp,_k.ni.k[7]);
  380. tmp = _mm_aesenc_si128(tmp,_k.ni.k[8]);
  381. tmp = _mm_aesenc_si128(tmp,_k.ni.k[9]);
  382. tmp = _mm_aesenc_si128(tmp,_k.ni.k[10]);
  383. tmp = _mm_aesenc_si128(tmp,_k.ni.k[11]);
  384. tmp = _mm_aesenc_si128(tmp,_k.ni.k[12]);
  385. tmp = _mm_aesenc_si128(tmp,_k.ni.k[13]);
  386. _mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
  387. }
  388. ZT_ALWAYS_INLINE void _crypt_ctr_aesni(const uint8_t iv[16],const uint8_t *in,unsigned int len,uint8_t *out) const
  389. {
  390. const __m64 iv0 = (__m64)(*((const uint64_t *)iv));
  391. uint64_t ctr = Utils::ntoh(*((const uint64_t *)(iv+8)));
  392. const __m128i k0 = _k.ni.k[0];
  393. const __m128i k1 = _k.ni.k[1];
  394. const __m128i k2 = _k.ni.k[2];
  395. const __m128i k3 = _k.ni.k[3];
  396. const __m128i k4 = _k.ni.k[4];
  397. const __m128i k5 = _k.ni.k[5];
  398. const __m128i k6 = _k.ni.k[6];
  399. const __m128i k7 = _k.ni.k[7];
  400. const __m128i k8 = _k.ni.k[8];
  401. const __m128i k9 = _k.ni.k[9];
  402. const __m128i k10 = _k.ni.k[10];
  403. const __m128i k11 = _k.ni.k[11];
  404. const __m128i k12 = _k.ni.k[12];
  405. const __m128i k13 = _k.ni.k[13];
  406. const __m128i k14 = _k.ni.k[14];
  407. while (len >= 64) {
  408. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr),iv0),k0);
  409. __m128i c1 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr+1ULL),iv0),k0);
  410. __m128i c2 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr+2ULL),iv0),k0);
  411. __m128i c3 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr+3ULL),iv0),k0);
  412. ctr += 4;
  413. c0 = _mm_aesenc_si128(c0,k1);
  414. c1 = _mm_aesenc_si128(c1,k1);
  415. c2 = _mm_aesenc_si128(c2,k1);
  416. c3 = _mm_aesenc_si128(c3,k1);
  417. c0 = _mm_aesenc_si128(c0,k2);
  418. c1 = _mm_aesenc_si128(c1,k2);
  419. c2 = _mm_aesenc_si128(c2,k2);
  420. c3 = _mm_aesenc_si128(c3,k2);
  421. c0 = _mm_aesenc_si128(c0,k3);
  422. c1 = _mm_aesenc_si128(c1,k3);
  423. c2 = _mm_aesenc_si128(c2,k3);
  424. c3 = _mm_aesenc_si128(c3,k3);
  425. c0 = _mm_aesenc_si128(c0,k4);
  426. c1 = _mm_aesenc_si128(c1,k4);
  427. c2 = _mm_aesenc_si128(c2,k4);
  428. c3 = _mm_aesenc_si128(c3,k4);
  429. c0 = _mm_aesenc_si128(c0,k5);
  430. c1 = _mm_aesenc_si128(c1,k5);
  431. c2 = _mm_aesenc_si128(c2,k5);
  432. c3 = _mm_aesenc_si128(c3,k5);
  433. c0 = _mm_aesenc_si128(c0,k6);
  434. c1 = _mm_aesenc_si128(c1,k6);
  435. c2 = _mm_aesenc_si128(c2,k6);
  436. c3 = _mm_aesenc_si128(c3,k6);
  437. c0 = _mm_aesenc_si128(c0,k7);
  438. c1 = _mm_aesenc_si128(c1,k7);
  439. c2 = _mm_aesenc_si128(c2,k7);
  440. c3 = _mm_aesenc_si128(c3,k7);
  441. c0 = _mm_aesenc_si128(c0,k8);
  442. c1 = _mm_aesenc_si128(c1,k8);
  443. c2 = _mm_aesenc_si128(c2,k8);
  444. c3 = _mm_aesenc_si128(c3,k8);
  445. c0 = _mm_aesenc_si128(c0,k9);
  446. c1 = _mm_aesenc_si128(c1,k9);
  447. c2 = _mm_aesenc_si128(c2,k9);
  448. c3 = _mm_aesenc_si128(c3,k9);
  449. c0 = _mm_aesenc_si128(c0,k10);
  450. c1 = _mm_aesenc_si128(c1,k10);
  451. c2 = _mm_aesenc_si128(c2,k10);
  452. c3 = _mm_aesenc_si128(c3,k10);
  453. c0 = _mm_aesenc_si128(c0,k11);
  454. c1 = _mm_aesenc_si128(c1,k11);
  455. c2 = _mm_aesenc_si128(c2,k11);
  456. c3 = _mm_aesenc_si128(c3,k11);
  457. c0 = _mm_aesenc_si128(c0,k12);
  458. c1 = _mm_aesenc_si128(c1,k12);
  459. c2 = _mm_aesenc_si128(c2,k12);
  460. c3 = _mm_aesenc_si128(c3,k12);
  461. c0 = _mm_aesenc_si128(c0,k13);
  462. c1 = _mm_aesenc_si128(c1,k13);
  463. c2 = _mm_aesenc_si128(c2,k13);
  464. c3 = _mm_aesenc_si128(c3,k13);
  465. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  466. _mm_storeu_si128((__m128i *)(out + 16),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 16)),_mm_aesenclast_si128(c1,k14)));
  467. _mm_storeu_si128((__m128i *)(out + 32),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 32)),_mm_aesenclast_si128(c2,k14)));
  468. _mm_storeu_si128((__m128i *)(out + 48),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 48)),_mm_aesenclast_si128(c3,k14)));
  469. in += 64;
  470. out += 64;
  471. len -= 64;
  472. }
  473. while (len >= 16) {
  474. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  475. c0 = _mm_aesenc_si128(c0,k1);
  476. c0 = _mm_aesenc_si128(c0,k2);
  477. c0 = _mm_aesenc_si128(c0,k3);
  478. c0 = _mm_aesenc_si128(c0,k4);
  479. c0 = _mm_aesenc_si128(c0,k5);
  480. c0 = _mm_aesenc_si128(c0,k6);
  481. c0 = _mm_aesenc_si128(c0,k7);
  482. c0 = _mm_aesenc_si128(c0,k8);
  483. c0 = _mm_aesenc_si128(c0,k9);
  484. c0 = _mm_aesenc_si128(c0,k10);
  485. c0 = _mm_aesenc_si128(c0,k11);
  486. c0 = _mm_aesenc_si128(c0,k12);
  487. c0 = _mm_aesenc_si128(c0,k13);
  488. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  489. in += 16;
  490. out += 16;
  491. len -= 16;
  492. }
  493. if (len) {
  494. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  495. c0 = _mm_aesenc_si128(c0,k1);
  496. c0 = _mm_aesenc_si128(c0,k2);
  497. c0 = _mm_aesenc_si128(c0,k3);
  498. c0 = _mm_aesenc_si128(c0,k4);
  499. c0 = _mm_aesenc_si128(c0,k5);
  500. c0 = _mm_aesenc_si128(c0,k6);
  501. c0 = _mm_aesenc_si128(c0,k7);
  502. c0 = _mm_aesenc_si128(c0,k8);
  503. c0 = _mm_aesenc_si128(c0,k9);
  504. c0 = _mm_aesenc_si128(c0,k10);
  505. c0 = _mm_aesenc_si128(c0,k11);
  506. c0 = _mm_aesenc_si128(c0,k12);
  507. c0 = _mm_aesenc_si128(c0,k13);
  508. c0 = _mm_aesenclast_si128(c0,k14);
  509. for(unsigned int i=0;i<len;++i)
  510. out[i] = in[i] ^ ((const uint8_t *)&c0)[i];
  511. }
  512. }
  513. static ZT_ALWAYS_INLINE __m128i _swap128_aesni(__m128i x) { return _mm_shuffle_epi8(x,_mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)); }
  514. static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i h,__m128i y)
  515. {
  516. y = _swap128_aesni(y);
  517. __m128i t1 = _mm_clmulepi64_si128(h,y,0x00);
  518. __m128i t2 = _mm_clmulepi64_si128(h,y,0x01);
  519. __m128i t3 = _mm_clmulepi64_si128(h,y,0x10);
  520. __m128i t4 = _mm_clmulepi64_si128(h,y,0x11);
  521. t2 = _mm_xor_si128(t2,t3);
  522. t3 = _mm_slli_si128(t2,8);
  523. t2 = _mm_srli_si128(t2,8);
  524. t1 = _mm_xor_si128(t1,t3);
  525. t4 = _mm_xor_si128(t4,t2);
  526. __m128i t5 = _mm_srli_epi32(t1,31);
  527. t1 = _mm_slli_epi32(t1,1);
  528. __m128i t6 = _mm_srli_epi32(t4,31);
  529. t4 = _mm_slli_epi32(t4,1);
  530. t3 = _mm_srli_si128(t5,12);
  531. t6 = _mm_slli_si128(t6,4);
  532. t5 = _mm_slli_si128(t5,4);
  533. t1 = _mm_or_si128(t1,t5);
  534. t4 = _mm_or_si128(t4,t6);
  535. t4 = _mm_or_si128(t4,t3);
  536. t5 = _mm_slli_epi32(t1,31);
  537. t6 = _mm_slli_epi32(t1,30);
  538. t3 = _mm_slli_epi32(t1,25);
  539. t5 = _mm_xor_si128(t5,t6);
  540. t5 = _mm_xor_si128(t5,t3);
  541. t6 = _mm_srli_si128(t5,4);
  542. t4 = _mm_xor_si128(t4,t6);
  543. t5 = _mm_slli_si128(t5,12);
  544. t1 = _mm_xor_si128(t1,t5);
  545. t4 = _mm_xor_si128(t4,t1);
  546. t5 = _mm_srli_epi32(t1,1);
  547. t2 = _mm_srli_epi32(t1,2);
  548. t3 = _mm_srli_epi32(t1,7);
  549. t4 = _mm_xor_si128(t4,t2);
  550. t4 = _mm_xor_si128(t4,t3);
  551. t4 = _mm_xor_si128(t4,t5);
  552. return _swap128_aesni(t4);
  553. }
  554. static ZT_ALWAYS_INLINE __m128i _mult4xor_aesni(__m128i h1,__m128i h2,__m128i h3,__m128i h4,__m128i d1,__m128i d2,__m128i d3,__m128i d4)
  555. {
  556. d1 = _swap128_aesni(d1);
  557. d2 = _swap128_aesni(d2);
  558. d3 = _swap128_aesni(d3);
  559. d4 = _swap128_aesni(d4);
  560. __m128i t0 = _mm_clmulepi64_si128(h1,d1,0x00);
  561. __m128i t1 = _mm_clmulepi64_si128(h2,d2,0x00);
  562. __m128i t2 = _mm_clmulepi64_si128(h3,d3,0x00);
  563. __m128i t3 = _mm_clmulepi64_si128(h4,d4,0x00);
  564. __m128i t8 = _mm_xor_si128(t0,t1);
  565. t8 = _mm_xor_si128(t8,t2);
  566. t8 = _mm_xor_si128(t8,t3);
  567. __m128i t4 = _mm_clmulepi64_si128(h1,d1,0x11);
  568. __m128i t5 = _mm_clmulepi64_si128(h2,d2,0x11);
  569. __m128i t6 = _mm_clmulepi64_si128(h3,d3,0x11);
  570. __m128i t7 = _mm_clmulepi64_si128(h4,d4,0x11);
  571. __m128i t9 = _mm_xor_si128(t4,t5);
  572. t9 = _mm_xor_si128(t9,t6);
  573. t9 = _mm_xor_si128(t9,t7);
  574. t0 = _mm_shuffle_epi32(h1,78);
  575. t4 = _mm_shuffle_epi32(d1,78);
  576. t0 = _mm_xor_si128(t0,h1);
  577. t4 = _mm_xor_si128(t4,d1);
  578. t1 = _mm_shuffle_epi32(h2,78);
  579. t5 = _mm_shuffle_epi32(d2,78);
  580. t1 = _mm_xor_si128(t1,h2);
  581. t5 = _mm_xor_si128(t5,d2);
  582. t2 = _mm_shuffle_epi32(h3,78);
  583. t6 = _mm_shuffle_epi32(d3,78);
  584. t2 = _mm_xor_si128(t2,h3);
  585. t6 = _mm_xor_si128(t6,d3);
  586. t3 = _mm_shuffle_epi32(h4,78);
  587. t7 = _mm_shuffle_epi32(d4,78);
  588. t3 = _mm_xor_si128(t3,h4);
  589. t7 = _mm_xor_si128(t7,d4);
  590. t0 = _mm_clmulepi64_si128(t0,t4,0x00);
  591. t1 = _mm_clmulepi64_si128(t1,t5,0x00);
  592. t2 = _mm_clmulepi64_si128(t2,t6,0x00);
  593. t3 = _mm_clmulepi64_si128(t3,t7,0x00);
  594. t0 = _mm_xor_si128(t0,t8);
  595. t0 = _mm_xor_si128(t0,t9);
  596. t0 = _mm_xor_si128(t1,t0);
  597. t0 = _mm_xor_si128(t2,t0);
  598. t0 = _mm_xor_si128(t3,t0);
  599. t4 = _mm_slli_si128(t0,8);
  600. t0 = _mm_srli_si128(t0,8);
  601. t3 = _mm_xor_si128(t4,t8);
  602. t6 = _mm_xor_si128(t0,t9);
  603. t7 = _mm_srli_epi32(t3,31);
  604. t8 = _mm_srli_epi32(t6,31);
  605. t3 = _mm_slli_epi32(t3,1);
  606. t6 = _mm_slli_epi32(t6,1);
  607. t9 = _mm_srli_si128(t7,12);
  608. t8 = _mm_slli_si128(t8,4);
  609. t7 = _mm_slli_si128(t7,4);
  610. t3 = _mm_or_si128(t3,t7);
  611. t6 = _mm_or_si128(t6,t8);
  612. t6 = _mm_or_si128(t6,t9);
  613. t7 = _mm_slli_epi32(t3,31);
  614. t8 = _mm_slli_epi32(t3,30);
  615. t9 = _mm_slli_epi32(t3,25);
  616. t7 = _mm_xor_si128(t7,t8);
  617. t7 = _mm_xor_si128(t7,t9);
  618. t8 = _mm_srli_si128(t7,4);
  619. t7 = _mm_slli_si128(t7,12);
  620. t3 = _mm_xor_si128(t3,t7);
  621. t2 = _mm_srli_epi32(t3,1);
  622. t4 = _mm_srli_epi32(t3,2);
  623. t5 = _mm_srli_epi32(t3,7);
  624. t2 = _mm_xor_si128(t2,t4);
  625. t2 = _mm_xor_si128(t2,t5);
  626. t2 = _mm_xor_si128(t2,t8);
  627. t3 = _mm_xor_si128(t3,t2);
  628. t6 = _mm_xor_si128(t6,t3);
  629. return _swap128_aesni(t6);
  630. }
  631. static ZT_ALWAYS_INLINE __m128i _ghash_aesni(__m128i h,__m128i y,__m128i x) { return _mult_block_aesni(h,_mm_xor_si128(y,x)); }
  632. ZT_ALWAYS_INLINE void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int len,uint8_t out[16]) const
  633. {
  634. const __m128i *ab = (const __m128i *)in;
  635. unsigned int blocks = len / 16;
  636. unsigned int pblocks = blocks - (blocks % 4);
  637. unsigned int rem = len % 16;
  638. __m128i h1 = _k.ni.hhhh;
  639. __m128i h2 = _k.ni.hhh;
  640. __m128i h3 = _k.ni.hh;
  641. __m128i h4 = _k.ni.h;
  642. __m128i y = _mm_setzero_si128();
  643. for (unsigned int i=0;i<pblocks;i+=4) {
  644. __m128i d1 = _mm_loadu_si128(ab + i + 0);
  645. __m128i d2 = _mm_loadu_si128(ab + i + 1);
  646. __m128i d3 = _mm_loadu_si128(ab + i + 2);
  647. __m128i d4 = _mm_loadu_si128(ab + i + 3);
  648. y = _mm_xor_si128(y,d1);
  649. y = _mult4xor_aesni(h1,h2,h3,h4,y,d2,d3,d4);
  650. }
  651. for (unsigned int i=pblocks;i<blocks;++i)
  652. y = _ghash_aesni(_k.ni.h,y,_mm_loadu_si128(ab + i));
  653. if (rem) {
  654. __m128i last = _mm_setzero_si128();
  655. memcpy(&last,ab + blocks,rem);
  656. y = _ghash_aesni(_k.ni.h,y,last);
  657. }
  658. y = _ghash_aesni(_k.ni.h,y,_mm_set_epi64((__m64)0LL,(__m64)Utils::hton((uint64_t)len * (uint64_t)8)));
  659. __m128i t = _mm_xor_si128(_mm_set_epi32(0x01000000,(int)*((const uint32_t *)(iv+8)),(int)*((const uint32_t *)(iv+4)),(int)*((const uint32_t *)(iv))),_k.ni.k[0]);
  660. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  661. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  662. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  663. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  664. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  665. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  666. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  667. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  668. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  669. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  670. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  671. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  672. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  673. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  674. t = _mm_xor_si128(y,t);
  675. _mm_storeu_si128((__m128i *)out,t);
  676. }
  677. #endif /* ZT_AES_AESNI ******************************************************/
  678. };
  679. } // namespace ZeroTier
  680. #endif