AES.hpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /*
  2. * Copyright (c)2019 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2023-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_AES_HPP
  14. #define ZT_AES_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
  18. #include <wmmintrin.h>
  19. #include <emmintrin.h>
  20. #include <smmintrin.h>
  21. #define ZT_AES_AESNI 1
  22. #endif
  23. #define ZT_AES_KEY_SIZE 32
  24. #define ZT_AES_BLOCK_SIZE 16
  25. namespace ZeroTier {
  26. /**
  27. * AES-256 and AES-GCM AEAD
  28. */
  29. class AES
  30. {
  31. public:
  32. /**
  33. * This will be true if your platform's type of AES acceleration is supported on this machine
  34. */
  35. static const bool HW_ACCEL;
  36. inline AES() {}
  37. inline AES(const uint8_t key[32]) { this->init(key); }
  38. inline ~AES() { Utils::burn(&_k,sizeof(_k)); }
  39. /**
  40. * Set (or re-set) this AES256 cipher's key
  41. */
  42. inline void init(const uint8_t key[32])
  43. {
  44. #ifdef ZT_AES_AESNI
  45. if (likely(HW_ACCEL)) {
  46. _init_aesni(key);
  47. return;
  48. }
  49. #endif
  50. _initSW(key);
  51. }
  52. /**
  53. * Encrypt a single AES block (ECB mode)
  54. *
  55. * @param in Input block
  56. * @param out Output block (can be same as input)
  57. */
  58. inline void encrypt(const uint8_t in[16],uint8_t out[16]) const
  59. {
  60. #ifdef ZT_AES_AESNI
  61. if (likely(HW_ACCEL)) {
  62. _encrypt_aesni(in,out);
  63. return;
  64. }
  65. #endif
  66. _encryptSW(in,out);
  67. }
  68. /**
  69. * Compute GMAC-AES256 (GCM without ciphertext)
  70. *
  71. * @param iv 96-bit IV
  72. * @param in Input data
  73. * @param len Length of input
  74. * @param out 128-bit authorization tag from GMAC
  75. */
  76. inline void gmac(const uint8_t iv[12],const void *in,const unsigned int len,uint8_t out[16]) const
  77. {
  78. #ifdef ZT_AES_AESNI
  79. if (likely(HW_ACCEL)) {
  80. _gmac_aesni(iv,(const uint8_t *)in,len,out);
  81. return;
  82. }
  83. #endif
  84. _gmacSW(iv,(const uint8_t *)in,len,out);
  85. }
  86. /**
  87. * Encrypt or decrypt (they're the same) using AES256-CTR
  88. *
  89. * The counter here is a 128-bit big-endian that starts at the IV. The code only
  90. * increments the least significant 64 bits, making it only safe to use for a
  91. * maximum of 2^64-1 bytes (much larger than we ever do).
  92. *
  93. * @param iv 128-bit CTR IV
  94. * @param in Input plaintext or ciphertext
  95. * @param len Length of input
  96. * @param out Output plaintext or ciphertext
  97. */
  98. inline void ctr(const uint8_t iv[16],const void *in,unsigned int len,void *out) const
  99. {
  100. #ifdef ZT_AES_AESNI
  101. if (likely(HW_ACCEL)) {
  102. _crypt_ctr_aesni(iv,(const uint8_t *)in,len,(uint8_t *)out);
  103. return;
  104. }
  105. #endif
  106. uint64_t ctr[2],cenc[2];
  107. memcpy(ctr,iv,16);
  108. uint64_t bctr = Utils::ntoh(ctr[1]);
  109. const uint8_t *i = (const uint8_t *)in;
  110. uint8_t *o = (uint8_t *)out;
  111. while (len >= 16) {
  112. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  113. ctr[1] = Utils::hton(++bctr);
  114. #ifdef ZT_NO_TYPE_PUNNING
  115. for(unsigned int k=0;k<16;++k)
  116. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  117. #else
  118. *((uint64_t *)o) = *((const uint64_t *)i) ^ cenc[0];
  119. o += 8;
  120. i += 8;
  121. *((uint64_t *)o) = *((const uint64_t *)i) ^ cenc[1];
  122. o += 8;
  123. i += 8;
  124. #endif
  125. len -= 16;
  126. }
  127. if (len) {
  128. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  129. for(unsigned int k=0;k<len;++k)
  130. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  131. }
  132. }
  133. /**
  134. * Perform AES-GMAC-CTR encryption
  135. *
  136. * This is an AES mode built from GMAC and AES-CTR that is similar to the
  137. * various SIV (synthetic IV) modes for AES and is resistant to nonce
  138. * re-use. It's specifically tweaked for ZeroTier's packet structure with
  139. * a 64-bit IV (extended to 96 bits by including packet size and other info)
  140. * and a 64-bit auth tag.
  141. *
  142. * The use of separate keys for MAC and encrypt is precautionary. It
  143. * ensures that the CTR IV (and CTR output) are always secrets regardless
  144. * of what an attacker might do with accumulated IVs and auth tags.
  145. *
  146. * @param k1 GMAC key
  147. * @param k2 GMAC auth tag masking (ECB encryption) key
  148. * @param k3 CTR IV masking (ECB encryption) key
  149. * @param k4 AES-CTR key
  150. * @param iv 96-bit message IV
  151. * @param in Message plaintext
  152. * @param len Length of plaintext
  153. * @param out Output buffer to receive ciphertext
  154. * @param tag Output buffer to receive 64-bit authentication tag
  155. */
  156. static inline void ztGmacCtrEncrypt(const AES &k1,const AES &k2,const AES &k3,const AES &k4,const uint8_t iv[12],const void *in,unsigned int len,void *out,uint8_t tag[8])
  157. {
  158. uint8_t ctrIv[16];
  159. // Compute AES[k2](GMAC[k1](iv,plaintext))
  160. k1.gmac(iv,in,len,ctrIv);
  161. k2.encrypt(ctrIv,ctrIv); // ECB mode encrypt step is because GMAC is not a PRF
  162. // Auth tag for packet is first 64 bits of AES(GMAC) (rest is discarded)
  163. #ifdef ZT_NO_TYPE_PUNNING
  164. for(unsigned int i=0;i<8;++i) tag[i] = ctrIv[i];
  165. #else
  166. *((uint64_t *)tag) = *((uint64_t *)ctrIv);
  167. #endif
  168. // Create synthetic CTR IV
  169. #ifdef ZT_NO_TYPE_PUNNING
  170. for(unsigned int i=0;i<4;++i) ctrIv[i+8] = iv[i];
  171. for(unsigned int i=4;i<8;++i) ctrIv[i+8] = iv[i] ^ iv[i+4];
  172. #else
  173. ((uint32_t *)ctrIv)[2] = ((const uint32_t *)iv)[0];
  174. ((uint32_t *)ctrIv)[3] = ((const uint32_t *)iv)[1] ^ ((const uint32_t *)iv)[2];
  175. #endif
  176. k3.encrypt(ctrIv,ctrIv);
  177. // Encrypt with AES[k4]-CTR
  178. k4.ctr(ctrIv,in,len,out);
  179. }
  180. /**
  181. * Decrypt a message encrypted with AES-GMAC-CTR and check its authenticity
  182. *
  183. * @param k1 GMAC key
  184. * @param k2 GMAC auth tag masking (ECB encryption) key
  185. * @param k3 CTR IV masking (ECB encryption) key
  186. * @param k4 AES-CTR key
  187. * @param iv 96-bit message IV
  188. * @param in Message ciphertext
  189. * @param len Length of ciphertext
  190. * @param out Output buffer to receive plaintext
  191. * @param tag Authentication tag supplied with message
  192. * @return True if authentication tags match and message appears authentic
  193. */
  194. static inline bool ztGmacCtrDecrypt(const AES &k1,const AES &k2,const AES &k3,const AES &k4,const uint8_t iv[12],const void *in,unsigned int len,void *out,const uint8_t tag[8])
  195. {
  196. uint8_t ctrIv[16],gmacOut[16];
  197. // Recover synthetic and secret CTR IV from auth tag and packet IV
  198. #ifdef ZT_NO_TYPE_PUNNING
  199. for(unsigned int i=0;i<8;++i) ctrIv[i] = tag[i];
  200. for(unsigned int i=0;i<4;++i) ctrIv[i+8] = iv[i];
  201. for(unsigned int i=4;i<8;++i) ctrIv[i+8] = iv[i] ^ iv[i+4];
  202. #else
  203. *((uint64_t *)ctrIv) = *((const uint64_t *)tag);
  204. ((uint32_t *)ctrIv)[2] = ((const uint32_t *)iv)[0];
  205. ((uint32_t *)ctrIv)[3] = ((const uint32_t *)iv)[1] ^ ((const uint32_t *)iv)[2];
  206. #endif
  207. k3.encrypt(ctrIv,ctrIv);
  208. // Decrypt with AES[k4]-CTR
  209. k4.ctr(ctrIv,in,len,out);
  210. // Compute AES[k2](GMAC[k1](iv,plaintext))
  211. k1.gmac(iv,out,len,gmacOut);
  212. k2.encrypt(gmacOut,gmacOut);
  213. // Check that packet's auth tag matches first 64 bits of AES(GMAC)
  214. #ifdef ZT_NO_TYPE_PUNNING
  215. return Utils::secureEq(gmacOut,tag,8);
  216. #else
  217. return (*((const uint64_t *)gmacOut) == *((const uint64_t *)tag));
  218. #endif
  219. }
  220. private:
  221. static const uint32_t Te0[256];
  222. static const uint32_t Te1[256];
  223. static const uint32_t Te2[256];
  224. static const uint32_t Te3[256];
  225. static const uint32_t rcon[10];
  226. void _initSW(const uint8_t key[32]);
  227. void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
  228. void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
  229. /**************************************************************************/
  230. union {
  231. #ifdef ZT_AES_ARMNEON
  232. struct {
  233. uint32x4_t k[15];
  234. } neon;
  235. #endif
  236. #ifdef ZT_AES_AESNI
  237. struct {
  238. __m128i k[15];
  239. __m128i h,hh,hhh,hhhh;
  240. } ni;
  241. #endif
  242. struct {
  243. uint64_t h[2];
  244. uint32_t ek[60];
  245. } sw;
  246. } _k;
  247. /**************************************************************************/
  248. #ifdef ZT_AES_ARMNEON /******************************************************/
  249. static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2)
  250. {
  251. uint32_t round1[4], round2[4], prv1[4], prv2[4];
  252. vst1q_u32(prv1, prev1);
  253. vst1q_u32(prv2, prev2);
  254. round1[0] = sub_word(rot_word(prv2[3])) ^ rcon ^ prv1[0];
  255. round1[1] = sub_word(rot_word(round1[0])) ^ rcon ^ prv1[1];
  256. round1[2] = sub_word(rot_word(round1[1])) ^ rcon ^ prv1[2];
  257. round1[3] = sub_word(rot_word(round1[2])) ^ rcon ^ prv1[3];
  258. round2[0] = sub_word(rot_word(round1[3])) ^ rcon ^ prv2[0];
  259. round2[1] = sub_word(rot_word(round2[0])) ^ rcon ^ prv2[1];
  260. round2[2] = sub_word(rot_word(round2[1])) ^ rcon ^ prv2[2];
  261. round2[3] = sub_word(rot_word(round2[2])) ^ rcon ^ prv2[3];
  262. *e1 = vld1q_u3(round1);
  263. *e2 = vld1q_u3(round2);
  264. //uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
  265. //return expansion;
  266. }
  267. inline void _init_armneon(uint8x16_t encKey)
  268. {
  269. uint32x4_t *schedule = _k.neon.k;
  270. uint32x4_t e1,e2;
  271. (*schedule)[0] = vld1q_u32(encKey);
  272. (*schedule)[1] = vld1q_u32(encKey + 16);
  273. _aes_256_expAssist_armneon((*schedule)[0],(*schedule)[1],0x01,&e1,&e2);
  274. (*schedule)[2] = e1; (*schedule)[3] = e2;
  275. _aes_256_expAssist_armneon((*schedule)[2],(*schedule)[3],0x01,&e1,&e2);
  276. (*schedule)[4] = e1; (*schedule)[5] = e2;
  277. _aes_256_expAssist_armneon((*schedule)[4],(*schedule)[5],0x01,&e1,&e2);
  278. (*schedule)[6] = e1; (*schedule)[7] = e2;
  279. _aes_256_expAssist_armneon((*schedule)[6],(*schedule)[7],0x01,&e1,&e2);
  280. (*schedule)[8] = e1; (*schedule)[9] = e2;
  281. _aes_256_expAssist_armneon((*schedule)[8],(*schedule)[9],0x01,&e1,&e2);
  282. (*schedule)[10] = e1; (*schedule)[11] = e2;
  283. _aes_256_expAssist_armneon((*schedule)[10],(*schedule)[11],0x01,&e1,&e2);
  284. (*schedule)[12] = e1; (*schedule)[13] = e2;
  285. _aes_256_expAssist_armneon((*schedule)[12],(*schedule)[13],0x01,&e1,&e2);
  286. (*schedule)[14] = e1;
  287. /*
  288. doubleRound = _aes_256_expAssist_armneon((*schedule)[0], (*schedule)[1], 0x01);
  289. (*schedule)[2] = doubleRound[0];
  290. (*schedule)[3] = doubleRound[1];
  291. doubleRound = _aes_256_expAssist_armneon((*schedule)[2], (*schedule)[3], 0x02);
  292. (*schedule)[4] = doubleRound[0];
  293. (*schedule)[5] = doubleRound[1];
  294. doubleRound = _aes_256_expAssist_armneon((*schedule)[4], (*schedule)[5], 0x04);
  295. (*schedule)[6] = doubleRound[0];
  296. (*schedule)[7] = doubleRound[1];
  297. doubleRound = _aes_256_expAssist_armneon((*schedule)[6], (*schedule)[7], 0x08);
  298. (*schedule)[8] = doubleRound[0];
  299. (*schedule)[9] = doubleRound[1];
  300. doubleRound = _aes_256_expAssist_armneon((*schedule)[8], (*schedule)[9], 0x10);
  301. (*schedule)[10] = doubleRound[0];
  302. (*schedule)[11] = doubleRound[1];
  303. doubleRound = _aes_256_expAssist_armneon((*schedule)[10], (*schedule)[11], 0x20);
  304. (*schedule)[12] = doubleRound[0];
  305. (*schedule)[13] = doubleRound[1];
  306. doubleRound = _aes_256_expAssist_armneon((*schedule)[12], (*schedule)[13], 0x40);
  307. (*schedule)[14] = doubleRound[0];
  308. */
  309. }
  310. inline void _encrypt_armneon(uint8x16_t *data) const
  311. {
  312. *data = veorq_u8(*data, _k.neon.k[0]);
  313. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
  314. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[2]));
  315. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[3]));
  316. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[4]));
  317. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[5]));
  318. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[6]));
  319. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[7]));
  320. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[8]));
  321. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[9]));
  322. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[10]));
  323. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[11]));
  324. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[12]));
  325. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
  326. *data = vaeseq_u8(*data, _k.neon.k[14]);
  327. }
  328. #endif /*********************************************************************/
  329. #ifdef ZT_AES_AESNI /********************************************************/
  330. static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b)
  331. {
  332. __m128i x,y;
  333. b = _mm_shuffle_epi32(b,0xff);
  334. y = _mm_slli_si128(a,0x04);
  335. x = _mm_xor_si128(a,y);
  336. y = _mm_slli_si128(y,0x04);
  337. x = _mm_xor_si128(x,y);
  338. y = _mm_slli_si128(y,0x04);
  339. x = _mm_xor_si128(x,y);
  340. x = _mm_xor_si128(x,b);
  341. return x;
  342. }
  343. static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b)
  344. {
  345. __m128i x,y,z;
  346. y = _mm_aeskeygenassist_si128(a,0x00);
  347. z = _mm_shuffle_epi32(y,0xaa);
  348. y = _mm_slli_si128(b,0x04);
  349. x = _mm_xor_si128(b,y);
  350. y = _mm_slli_si128(y,0x04);
  351. x = _mm_xor_si128(x,y);
  352. y = _mm_slli_si128(y,0x04);
  353. x = _mm_xor_si128(x,y);
  354. x = _mm_xor_si128(x,z);
  355. return x;
  356. }
  357. ZT_ALWAYS_INLINE void _init_aesni(const uint8_t key[32])
  358. {
  359. __m128i t1,t2;
  360. _k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
  361. _k.ni.k[1] = t2 = _mm_loadu_si128((const __m128i *)(key+16));
  362. _k.ni.k[2] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x01));
  363. _k.ni.k[3] = t2 = _init256_2_aesni(t1,t2);
  364. _k.ni.k[4] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x02));
  365. _k.ni.k[5] = t2 = _init256_2_aesni(t1,t2);
  366. _k.ni.k[6] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x04));
  367. _k.ni.k[7] = t2 = _init256_2_aesni(t1,t2);
  368. _k.ni.k[8] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x08));
  369. _k.ni.k[9] = t2 = _init256_2_aesni(t1,t2);
  370. _k.ni.k[10] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x10));
  371. _k.ni.k[11] = t2 = _init256_2_aesni(t1,t2);
  372. _k.ni.k[12] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x20));
  373. _k.ni.k[13] = t2 = _init256_2_aesni(t1,t2);
  374. _k.ni.k[14] = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x40));
  375. __m128i h = _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
  376. h = _mm_aesenc_si128(h,_k.ni.k[1]);
  377. h = _mm_aesenc_si128(h,_k.ni.k[2]);
  378. h = _mm_aesenc_si128(h,_k.ni.k[3]);
  379. h = _mm_aesenc_si128(h,_k.ni.k[4]);
  380. h = _mm_aesenc_si128(h,_k.ni.k[5]);
  381. h = _mm_aesenc_si128(h,_k.ni.k[6]);
  382. h = _mm_aesenc_si128(h,_k.ni.k[7]);
  383. h = _mm_aesenc_si128(h,_k.ni.k[8]);
  384. h = _mm_aesenc_si128(h,_k.ni.k[9]);
  385. h = _mm_aesenc_si128(h,_k.ni.k[10]);
  386. h = _mm_aesenc_si128(h,_k.ni.k[11]);
  387. h = _mm_aesenc_si128(h,_k.ni.k[12]);
  388. h = _mm_aesenc_si128(h,_k.ni.k[13]);
  389. h = _mm_aesenclast_si128(h,_k.ni.k[14]);
  390. const __m128i shuf = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15);
  391. __m128i hswap = _mm_shuffle_epi8(h,shuf);
  392. __m128i hh = _mult_block_aesni(shuf,hswap,h);
  393. __m128i hhh = _mult_block_aesni(shuf,hswap,hh);
  394. __m128i hhhh = _mult_block_aesni(shuf,hswap,hhh);
  395. _k.ni.h = hswap;
  396. _k.ni.hh = _mm_shuffle_epi8(hh,shuf);
  397. _k.ni.hhh = _mm_shuffle_epi8(hhh,shuf);
  398. _k.ni.hhhh = _mm_shuffle_epi8(hhhh,shuf);
  399. }
  400. ZT_ALWAYS_INLINE void _encrypt_aesni(const void *in,void *out) const
  401. {
  402. __m128i tmp;
  403. tmp = _mm_loadu_si128((const __m128i *)in);
  404. tmp = _mm_xor_si128(tmp,_k.ni.k[0]);
  405. tmp = _mm_aesenc_si128(tmp,_k.ni.k[1]);
  406. tmp = _mm_aesenc_si128(tmp,_k.ni.k[2]);
  407. tmp = _mm_aesenc_si128(tmp,_k.ni.k[3]);
  408. tmp = _mm_aesenc_si128(tmp,_k.ni.k[4]);
  409. tmp = _mm_aesenc_si128(tmp,_k.ni.k[5]);
  410. tmp = _mm_aesenc_si128(tmp,_k.ni.k[6]);
  411. tmp = _mm_aesenc_si128(tmp,_k.ni.k[7]);
  412. tmp = _mm_aesenc_si128(tmp,_k.ni.k[8]);
  413. tmp = _mm_aesenc_si128(tmp,_k.ni.k[9]);
  414. tmp = _mm_aesenc_si128(tmp,_k.ni.k[10]);
  415. tmp = _mm_aesenc_si128(tmp,_k.ni.k[11]);
  416. tmp = _mm_aesenc_si128(tmp,_k.ni.k[12]);
  417. tmp = _mm_aesenc_si128(tmp,_k.ni.k[13]);
  418. _mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
  419. }
  420. ZT_ALWAYS_INLINE void _crypt_ctr_aesni(const uint8_t iv[16],const uint8_t *in,unsigned int len,uint8_t *out) const
  421. {
  422. const __m64 iv0 = (__m64)(*((const uint64_t *)iv));
  423. uint64_t ctr = Utils::ntoh(*((const uint64_t *)(iv+8)));
  424. const __m128i k0 = _k.ni.k[0];
  425. const __m128i k1 = _k.ni.k[1];
  426. const __m128i k2 = _k.ni.k[2];
  427. const __m128i k3 = _k.ni.k[3];
  428. const __m128i k4 = _k.ni.k[4];
  429. const __m128i k5 = _k.ni.k[5];
  430. const __m128i k6 = _k.ni.k[6];
  431. const __m128i k7 = _k.ni.k[7];
  432. const __m128i k8 = _k.ni.k[8];
  433. const __m128i k9 = _k.ni.k[9];
  434. const __m128i k10 = _k.ni.k[10];
  435. const __m128i k11 = _k.ni.k[11];
  436. const __m128i k12 = _k.ni.k[12];
  437. const __m128i k13 = _k.ni.k[13];
  438. const __m128i k14 = _k.ni.k[14];
  439. while (len >= 64) {
  440. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr),iv0),k0);
  441. __m128i c1 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+1ULL)),iv0),k0);
  442. __m128i c2 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+2ULL)),iv0),k0);
  443. __m128i c3 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+3ULL)),iv0),k0);
  444. ctr += 4;
  445. c0 = _mm_aesenc_si128(c0,k1);
  446. c1 = _mm_aesenc_si128(c1,k1);
  447. c2 = _mm_aesenc_si128(c2,k1);
  448. c3 = _mm_aesenc_si128(c3,k1);
  449. c0 = _mm_aesenc_si128(c0,k2);
  450. c1 = _mm_aesenc_si128(c1,k2);
  451. c2 = _mm_aesenc_si128(c2,k2);
  452. c3 = _mm_aesenc_si128(c3,k2);
  453. c0 = _mm_aesenc_si128(c0,k3);
  454. c1 = _mm_aesenc_si128(c1,k3);
  455. c2 = _mm_aesenc_si128(c2,k3);
  456. c3 = _mm_aesenc_si128(c3,k3);
  457. c0 = _mm_aesenc_si128(c0,k4);
  458. c1 = _mm_aesenc_si128(c1,k4);
  459. c2 = _mm_aesenc_si128(c2,k4);
  460. c3 = _mm_aesenc_si128(c3,k4);
  461. c0 = _mm_aesenc_si128(c0,k5);
  462. c1 = _mm_aesenc_si128(c1,k5);
  463. c2 = _mm_aesenc_si128(c2,k5);
  464. c3 = _mm_aesenc_si128(c3,k5);
  465. c0 = _mm_aesenc_si128(c0,k6);
  466. c1 = _mm_aesenc_si128(c1,k6);
  467. c2 = _mm_aesenc_si128(c2,k6);
  468. c3 = _mm_aesenc_si128(c3,k6);
  469. c0 = _mm_aesenc_si128(c0,k7);
  470. c1 = _mm_aesenc_si128(c1,k7);
  471. c2 = _mm_aesenc_si128(c2,k7);
  472. c3 = _mm_aesenc_si128(c3,k7);
  473. c0 = _mm_aesenc_si128(c0,k8);
  474. c1 = _mm_aesenc_si128(c1,k8);
  475. c2 = _mm_aesenc_si128(c2,k8);
  476. c3 = _mm_aesenc_si128(c3,k8);
  477. c0 = _mm_aesenc_si128(c0,k9);
  478. c1 = _mm_aesenc_si128(c1,k9);
  479. c2 = _mm_aesenc_si128(c2,k9);
  480. c3 = _mm_aesenc_si128(c3,k9);
  481. c0 = _mm_aesenc_si128(c0,k10);
  482. c1 = _mm_aesenc_si128(c1,k10);
  483. c2 = _mm_aesenc_si128(c2,k10);
  484. c3 = _mm_aesenc_si128(c3,k10);
  485. c0 = _mm_aesenc_si128(c0,k11);
  486. c1 = _mm_aesenc_si128(c1,k11);
  487. c2 = _mm_aesenc_si128(c2,k11);
  488. c3 = _mm_aesenc_si128(c3,k11);
  489. c0 = _mm_aesenc_si128(c0,k12);
  490. c1 = _mm_aesenc_si128(c1,k12);
  491. c2 = _mm_aesenc_si128(c2,k12);
  492. c3 = _mm_aesenc_si128(c3,k12);
  493. c0 = _mm_aesenc_si128(c0,k13);
  494. c1 = _mm_aesenc_si128(c1,k13);
  495. c2 = _mm_aesenc_si128(c2,k13);
  496. c3 = _mm_aesenc_si128(c3,k13);
  497. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  498. _mm_storeu_si128((__m128i *)(out + 16),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 16)),_mm_aesenclast_si128(c1,k14)));
  499. _mm_storeu_si128((__m128i *)(out + 32),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 32)),_mm_aesenclast_si128(c2,k14)));
  500. _mm_storeu_si128((__m128i *)(out + 48),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 48)),_mm_aesenclast_si128(c3,k14)));
  501. in += 64;
  502. out += 64;
  503. len -= 64;
  504. }
  505. while (len >= 16) {
  506. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  507. c0 = _mm_aesenc_si128(c0,k1);
  508. c0 = _mm_aesenc_si128(c0,k2);
  509. c0 = _mm_aesenc_si128(c0,k3);
  510. c0 = _mm_aesenc_si128(c0,k4);
  511. c0 = _mm_aesenc_si128(c0,k5);
  512. c0 = _mm_aesenc_si128(c0,k6);
  513. c0 = _mm_aesenc_si128(c0,k7);
  514. c0 = _mm_aesenc_si128(c0,k8);
  515. c0 = _mm_aesenc_si128(c0,k9);
  516. c0 = _mm_aesenc_si128(c0,k10);
  517. c0 = _mm_aesenc_si128(c0,k11);
  518. c0 = _mm_aesenc_si128(c0,k12);
  519. c0 = _mm_aesenc_si128(c0,k13);
  520. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  521. in += 16;
  522. out += 16;
  523. len -= 16;
  524. }
  525. if (len) {
  526. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  527. c0 = _mm_aesenc_si128(c0,k1);
  528. c0 = _mm_aesenc_si128(c0,k2);
  529. c0 = _mm_aesenc_si128(c0,k3);
  530. c0 = _mm_aesenc_si128(c0,k4);
  531. c0 = _mm_aesenc_si128(c0,k5);
  532. c0 = _mm_aesenc_si128(c0,k6);
  533. c0 = _mm_aesenc_si128(c0,k7);
  534. c0 = _mm_aesenc_si128(c0,k8);
  535. c0 = _mm_aesenc_si128(c0,k9);
  536. c0 = _mm_aesenc_si128(c0,k10);
  537. c0 = _mm_aesenc_si128(c0,k11);
  538. c0 = _mm_aesenc_si128(c0,k12);
  539. c0 = _mm_aesenc_si128(c0,k13);
  540. c0 = _mm_aesenclast_si128(c0,k14);
  541. for(unsigned int i=0;i<len;++i)
  542. out[i] = in[i] ^ ((const uint8_t *)&c0)[i];
  543. }
  544. }
  545. static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i shuf,__m128i h,__m128i y)
  546. {
  547. y = _mm_shuffle_epi8(y,shuf);
  548. __m128i t1 = _mm_clmulepi64_si128(h,y,0x00);
  549. __m128i t2 = _mm_clmulepi64_si128(h,y,0x01);
  550. __m128i t3 = _mm_clmulepi64_si128(h,y,0x10);
  551. __m128i t4 = _mm_clmulepi64_si128(h,y,0x11);
  552. t2 = _mm_xor_si128(t2,t3);
  553. t3 = _mm_slli_si128(t2,8);
  554. t2 = _mm_srli_si128(t2,8);
  555. t1 = _mm_xor_si128(t1,t3);
  556. t4 = _mm_xor_si128(t4,t2);
  557. __m128i t5 = _mm_srli_epi32(t1,31);
  558. t1 = _mm_slli_epi32(t1,1);
  559. __m128i t6 = _mm_srli_epi32(t4,31);
  560. t4 = _mm_slli_epi32(t4,1);
  561. t3 = _mm_srli_si128(t5,12);
  562. t6 = _mm_slli_si128(t6,4);
  563. t5 = _mm_slli_si128(t5,4);
  564. t1 = _mm_or_si128(t1,t5);
  565. t4 = _mm_or_si128(t4,t6);
  566. t4 = _mm_or_si128(t4,t3);
  567. t5 = _mm_slli_epi32(t1,31);
  568. t6 = _mm_slli_epi32(t1,30);
  569. t3 = _mm_slli_epi32(t1,25);
  570. t5 = _mm_xor_si128(t5,t6);
  571. t5 = _mm_xor_si128(t5,t3);
  572. t6 = _mm_srli_si128(t5,4);
  573. t4 = _mm_xor_si128(t4,t6);
  574. t5 = _mm_slli_si128(t5,12);
  575. t1 = _mm_xor_si128(t1,t5);
  576. t4 = _mm_xor_si128(t4,t1);
  577. t5 = _mm_srli_epi32(t1,1);
  578. t2 = _mm_srli_epi32(t1,2);
  579. t3 = _mm_srli_epi32(t1,7);
  580. t4 = _mm_xor_si128(t4,t2);
  581. t4 = _mm_xor_si128(t4,t3);
  582. t4 = _mm_xor_si128(t4,t5);
  583. return _mm_shuffle_epi8(t4,shuf);
  584. }
  585. static ZT_ALWAYS_INLINE __m128i _ghash_aesni(__m128i shuf,__m128i h,__m128i y,__m128i x) { return _mult_block_aesni(shuf,h,_mm_xor_si128(y,x)); }
  586. ZT_ALWAYS_INLINE void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int len,uint8_t out[16]) const
  587. {
  588. const __m128i *ab = (const __m128i *)in;
  589. unsigned int blocks = len / 16;
  590. unsigned int pblocks = blocks - (blocks % 4);
  591. unsigned int rem = len % 16;
  592. const __m128i h1 = _k.ni.hhhh;
  593. const __m128i h2 = _k.ni.hhh;
  594. const __m128i h3 = _k.ni.hh;
  595. const __m128i h4 = _k.ni.h;
  596. const __m128i shuf = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15);
  597. __m128i y = _mm_setzero_si128();
  598. unsigned int i = 0;
  599. for (;i<pblocks;i+=4) {
  600. __m128i d1 = _mm_shuffle_epi8(_mm_xor_si128(y,_mm_loadu_si128(ab + i + 0)),shuf);
  601. __m128i d2 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 1),shuf);
  602. __m128i d3 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 2),shuf);
  603. __m128i d4 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 3),shuf);
  604. __m128i t0 = _mm_clmulepi64_si128(h1,d1,0x00);
  605. __m128i t1 = _mm_clmulepi64_si128(h2,d2,0x00);
  606. __m128i t2 = _mm_clmulepi64_si128(h3,d3,0x00);
  607. __m128i t3 = _mm_clmulepi64_si128(h4,d4,0x00);
  608. __m128i t8 = _mm_xor_si128(t0,t1);
  609. t8 = _mm_xor_si128(t8,t2);
  610. t8 = _mm_xor_si128(t8,t3);
  611. __m128i t4 = _mm_clmulepi64_si128(h1,d1,0x11);
  612. __m128i t5 = _mm_clmulepi64_si128(h2,d2,0x11);
  613. __m128i t6 = _mm_clmulepi64_si128(h3,d3,0x11);
  614. __m128i t7 = _mm_clmulepi64_si128(h4,d4,0x11);
  615. __m128i t9 = _mm_xor_si128(t4,t5);
  616. t9 = _mm_xor_si128(t9,t6);
  617. t9 = _mm_xor_si128(t9,t7);
  618. t0 = _mm_shuffle_epi32(h1,78);
  619. t4 = _mm_shuffle_epi32(d1,78);
  620. t0 = _mm_xor_si128(t0,h1);
  621. t4 = _mm_xor_si128(t4,d1);
  622. t1 = _mm_shuffle_epi32(h2,78);
  623. t5 = _mm_shuffle_epi32(d2,78);
  624. t1 = _mm_xor_si128(t1,h2);
  625. t5 = _mm_xor_si128(t5,d2);
  626. t2 = _mm_shuffle_epi32(h3,78);
  627. t6 = _mm_shuffle_epi32(d3,78);
  628. t2 = _mm_xor_si128(t2,h3);
  629. t6 = _mm_xor_si128(t6,d3);
  630. t3 = _mm_shuffle_epi32(h4,78);
  631. t7 = _mm_shuffle_epi32(d4,78);
  632. t3 = _mm_xor_si128(t3,h4);
  633. t7 = _mm_xor_si128(t7,d4);
  634. t0 = _mm_clmulepi64_si128(t0,t4,0x00);
  635. t1 = _mm_clmulepi64_si128(t1,t5,0x00);
  636. t2 = _mm_clmulepi64_si128(t2,t6,0x00);
  637. t3 = _mm_clmulepi64_si128(t3,t7,0x00);
  638. t0 = _mm_xor_si128(t0,t8);
  639. t0 = _mm_xor_si128(t0,t9);
  640. t0 = _mm_xor_si128(t1,t0);
  641. t0 = _mm_xor_si128(t2,t0);
  642. t0 = _mm_xor_si128(t3,t0);
  643. t4 = _mm_slli_si128(t0,8);
  644. t0 = _mm_srli_si128(t0,8);
  645. t3 = _mm_xor_si128(t4,t8);
  646. t6 = _mm_xor_si128(t0,t9);
  647. t7 = _mm_srli_epi32(t3,31);
  648. t8 = _mm_srli_epi32(t6,31);
  649. t3 = _mm_slli_epi32(t3,1);
  650. t6 = _mm_slli_epi32(t6,1);
  651. t9 = _mm_srli_si128(t7,12);
  652. t8 = _mm_slli_si128(t8,4);
  653. t7 = _mm_slli_si128(t7,4);
  654. t3 = _mm_or_si128(t3,t7);
  655. t6 = _mm_or_si128(t6,t8);
  656. t6 = _mm_or_si128(t6,t9);
  657. t7 = _mm_slli_epi32(t3,31);
  658. t8 = _mm_slli_epi32(t3,30);
  659. t9 = _mm_slli_epi32(t3,25);
  660. t7 = _mm_xor_si128(t7,t8);
  661. t7 = _mm_xor_si128(t7,t9);
  662. t8 = _mm_srli_si128(t7,4);
  663. t7 = _mm_slli_si128(t7,12);
  664. t3 = _mm_xor_si128(t3,t7);
  665. t2 = _mm_srli_epi32(t3,1);
  666. t4 = _mm_srli_epi32(t3,2);
  667. t5 = _mm_srli_epi32(t3,7);
  668. t2 = _mm_xor_si128(t2,t4);
  669. t2 = _mm_xor_si128(t2,t5);
  670. t2 = _mm_xor_si128(t2,t8);
  671. t3 = _mm_xor_si128(t3,t2);
  672. t6 = _mm_xor_si128(t6,t3);
  673. y = _mm_shuffle_epi8(t6,shuf);
  674. }
  675. for (;i<blocks;++i)
  676. y = _ghash_aesni(shuf,h4,y,_mm_loadu_si128(ab + i));
  677. if (rem) {
  678. __m128i last = _mm_setzero_si128();
  679. memcpy(&last,ab + blocks,rem);
  680. y = _ghash_aesni(shuf,h4,y,last);
  681. }
  682. y = _ghash_aesni(shuf,h4,y,_mm_set_epi64((__m64)0LL,(__m64)Utils::hton((uint64_t)len * (uint64_t)8)));
  683. __m128i t = _mm_xor_si128(_mm_set_epi32(0x01000000,(int)*((const uint32_t *)(iv+8)),(int)*((const uint32_t *)(iv+4)),(int)*((const uint32_t *)(iv))),_k.ni.k[0]);
  684. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  685. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  686. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  687. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  688. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  689. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  690. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  691. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  692. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  693. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  694. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  695. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  696. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  697. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  698. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(y,t));
  699. }
  700. #endif /* ZT_AES_AESNI ******************************************************/
  701. };
  702. } // namespace ZeroTier
  703. #endif