Bladeren bron

Merge pull request #4329 from mrezai/openssl-1.0.2g

Update OpenSSL to version 1.0.2g
Rémi Verschelde 9 jaren geleden
bovenliggende
commit
206895afae
100 gewijzigde bestanden met toevoegingen van 1406 en 63451 verwijderingen
  1. 8 3
      drivers/builtin_openssl2/SCsub
  2. 4 182
      drivers/builtin_openssl2/crypto/aes/aes_wrap.c
  3. 4 2
      drivers/builtin_openssl2/crypto/aes/aes_x86core.c
  4. 0 2980
      drivers/builtin_openssl2/crypto/aes/asm/aes-586.pl
  5. 0 1134
      drivers/builtin_openssl2/crypto/aes/asm/aes-armv4.pl
  6. 0 1123
      drivers/builtin_openssl2/crypto/aes/asm/aes-ia64.S
  7. 0 1611
      drivers/builtin_openssl2/crypto/aes/asm/aes-mips.pl
  8. 0 1022
      drivers/builtin_openssl2/crypto/aes/asm/aes-parisc.pl
  9. 0 1365
      drivers/builtin_openssl2/crypto/aes/asm/aes-ppc.pl
  10. 0 2237
      drivers/builtin_openssl2/crypto/aes/asm/aes-s390x.pl
  11. 0 1182
      drivers/builtin_openssl2/crypto/aes/asm/aes-sparcv9.pl
  12. 0 2819
      drivers/builtin_openssl2/crypto/aes/asm/aes-x86_64.pl
  13. 0 1250
      drivers/builtin_openssl2/crypto/aes/asm/aesni-sha1-x86_64.pl
  14. 0 2189
      drivers/builtin_openssl2/crypto/aes/asm/aesni-x86.pl
  15. 0 3071
      drivers/builtin_openssl2/crypto/aes/asm/aesni-x86_64.pl
  16. 0 3108
      drivers/builtin_openssl2/crypto/aes/asm/bsaes-x86_64.pl
  17. 0 903
      drivers/builtin_openssl2/crypto/aes/asm/vpaes-x86.pl
  18. 0 1207
      drivers/builtin_openssl2/crypto/aes/asm/vpaes-x86_64.pl
  19. 0 126
      drivers/builtin_openssl2/crypto/alphacpuid.pl
  20. 31 4
      drivers/builtin_openssl2/crypto/arm_arch.h
  21. 90 7
      drivers/builtin_openssl2/crypto/armcap.c
  22. 0 154
      drivers/builtin_openssl2/crypto/armv4cpuid.S
  23. 44 2
      drivers/builtin_openssl2/crypto/asn1/a_gentm.c
  24. 30 0
      drivers/builtin_openssl2/crypto/asn1/a_time.c
  25. 57 35
      drivers/builtin_openssl2/crypto/asn1/a_utctm.c
  26. 23 1
      drivers/builtin_openssl2/crypto/asn1/ameth_lib.c
  27. 3 0
      drivers/builtin_openssl2/crypto/asn1/asn1_locl.h
  28. 0 83
      drivers/builtin_openssl2/crypto/asn1/charmap.pl
  29. 15 0
      drivers/builtin_openssl2/crypto/asn1/t_x509.c
  30. 6 8
      drivers/builtin_openssl2/crypto/asn1/tasn_dec.c
  31. 3 1
      drivers/builtin_openssl2/crypto/asn1/x_crl.c
  32. 20 0
      drivers/builtin_openssl2/crypto/asn1/x_x509.c
  33. 5 2
      drivers/builtin_openssl2/crypto/asn1/x_x509a.c
  34. 0 137
      drivers/builtin_openssl2/crypto/bf/asm/bf-586.pl
  35. 0 127
      drivers/builtin_openssl2/crypto/bf/asm/bf-686.pl
  36. 0 538
      drivers/builtin_openssl2/crypto/bf/bftest.c
  37. 25 0
      drivers/builtin_openssl2/crypto/bio/b_dump.c
  38. 6 2
      drivers/builtin_openssl2/crypto/bio/b_sock.c
  39. 1 1
      drivers/builtin_openssl2/crypto/bio/bio_err.c
  40. 1 1
      drivers/builtin_openssl2/crypto/bio/bss_acpt.c
  41. 1 1
      drivers/builtin_openssl2/crypto/bio/bss_conn.c
  42. 77 7
      drivers/builtin_openssl2/crypto/bio/bss_dgram.c
  43. 20 2
      drivers/builtin_openssl2/crypto/bio/bss_fd.c
  44. 4 2
      drivers/builtin_openssl2/crypto/bio/bss_mem.c
  45. 0 321
      drivers/builtin_openssl2/crypto/bn/asm/alpha-mont.pl
  46. 0 278
      drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl
  47. 0 204
      drivers/builtin_openssl2/crypto/bn/asm/armv4-mont.pl
  48. 0 774
      drivers/builtin_openssl2/crypto/bn/asm/bn-586.pl
  49. 0 287
      drivers/builtin_openssl2/crypto/bn/asm/co-586.pl
  50. 0 851
      drivers/builtin_openssl2/crypto/bn/asm/ia64-mont.pl
  51. 0 1555
      drivers/builtin_openssl2/crypto/bn/asm/ia64.S
  52. 0 426
      drivers/builtin_openssl2/crypto/bn/asm/mips-mont.pl
  53. 0 2234
      drivers/builtin_openssl2/crypto/bn/asm/mips.pl
  54. 0 327
      drivers/builtin_openssl2/crypto/bn/asm/mips3-mont.pl
  55. 0 2201
      drivers/builtin_openssl2/crypto/bn/asm/mips3.s
  56. 0 1497
      drivers/builtin_openssl2/crypto/bn/asm/modexp512-x86_64.pl
  57. 0 1618
      drivers/builtin_openssl2/crypto/bn/asm/pa-risc2.s
  58. 0 1605
      drivers/builtin_openssl2/crypto/bn/asm/pa-risc2W.s
  59. 0 995
      drivers/builtin_openssl2/crypto/bn/asm/parisc-mont.pl
  60. 0 334
      drivers/builtin_openssl2/crypto/bn/asm/ppc-mont.pl
  61. 0 1998
      drivers/builtin_openssl2/crypto/bn/asm/ppc.pl
  62. 0 1088
      drivers/builtin_openssl2/crypto/bn/asm/ppc64-mont.pl
  63. 0 221
      drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl
  64. 0 277
      drivers/builtin_openssl2/crypto/bn/asm/s390x-mont.pl
  65. 0 678
      drivers/builtin_openssl2/crypto/bn/asm/s390x.S
  66. 0 1458
      drivers/builtin_openssl2/crypto/bn/asm/sparcv8.S
  67. 0 1558
      drivers/builtin_openssl2/crypto/bn/asm/sparcv8plus.S
  68. 0 606
      drivers/builtin_openssl2/crypto/bn/asm/sparcv9-mont.pl
  69. 0 882
      drivers/builtin_openssl2/crypto/bn/asm/sparcv9a-mont.pl
  70. 0 242
      drivers/builtin_openssl2/crypto/bn/asm/via-mont.pl
  71. 0 313
      drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl
  72. 0 593
      drivers/builtin_openssl2/crypto/bn/asm/x86-mont.pl
  73. 0 28
      drivers/builtin_openssl2/crypto/bn/asm/x86.pl
  74. 0 76
      drivers/builtin_openssl2/crypto/bn/asm/x86/add.pl
  75. 0 277
      drivers/builtin_openssl2/crypto/bn/asm/x86/comba.pl
  76. 0 15
      drivers/builtin_openssl2/crypto/bn/asm/x86/div.pl
  77. 0 77
      drivers/builtin_openssl2/crypto/bn/asm/x86/mul.pl
  78. 0 87
      drivers/builtin_openssl2/crypto/bn/asm/x86/mul_add.pl
  79. 0 60
      drivers/builtin_openssl2/crypto/bn/asm/x86/sqr.pl
  80. 0 76
      drivers/builtin_openssl2/crypto/bn/asm/x86/sub.pl
  81. 56 54
      drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c
  82. 0 390
      drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl
  83. 0 1681
      drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont.pl
  84. 0 1186
      drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl
  85. 121 122
      drivers/builtin_openssl2/crypto/bn/bn_asm.c
  86. 296 22
      drivers/builtin_openssl2/crypto/bn/bn_const.c
  87. 1 2
      drivers/builtin_openssl2/crypto/bn/bn_gf2m.c
  88. 27 0
      drivers/builtin_openssl2/crypto/bn/bn_lcl.h
  89. 0 119
      drivers/builtin_openssl2/crypto/bn/bn_prime.pl
  90. 0 2137
      drivers/builtin_openssl2/crypto/bn/bntest.c
  91. 0 42
      drivers/builtin_openssl2/crypto/bn/divtest.c
  92. 0 313
      drivers/builtin_openssl2/crypto/bn/exptest.c
  93. 346 0
      drivers/builtin_openssl2/crypto/bn/rsaz_exp.c
  94. 68 0
      drivers/builtin_openssl2/crypto/bn/rsaz_exp.h
  95. 11 0
      drivers/builtin_openssl2/crypto/buffer/buf_str.c
  96. 0 1138
      drivers/builtin_openssl2/crypto/camellia/asm/cmll-x86.pl
  97. 0 1081
      drivers/builtin_openssl2/crypto/camellia/asm/cmll-x86_64.pl
  98. 0 177
      drivers/builtin_openssl2/crypto/cast/asm/cast-586.pl
  99. 2 0
      drivers/builtin_openssl2/crypto/cast/cast_lcl.h
  100. 0 241
      drivers/builtin_openssl2/crypto/cast/casttest.c

+ 8 - 3
drivers/builtin_openssl2/SCsub

@@ -3,6 +3,7 @@ Import('env')
 openssl_sources = [
 "builtin_openssl2/nocpuid.c",
 "builtin_openssl2/ssl/t1_lib.c",
+"builtin_openssl2/ssl/t1_ext.c",
 "builtin_openssl2/ssl/s3_srvr.c",
 "builtin_openssl2/ssl/t1_enc.c",
 "builtin_openssl2/ssl/t1_meth.c",
@@ -11,7 +12,6 @@ openssl_sources = [
 "builtin_openssl2/ssl/tls_srp.c",
 "builtin_openssl2/ssl/kssl.c",
 "builtin_openssl2/ssl/d1_both.c",
-"builtin_openssl2/ssl/d1_enc.c",
 "builtin_openssl2/ssl/t1_clnt.c",
 "builtin_openssl2/ssl/bio_ssl.c",
 "builtin_openssl2/ssl/d1_srtp.c",
@@ -209,12 +209,12 @@ openssl_sources = [
 "builtin_openssl2/crypto/evp/c_all.c",
 "builtin_openssl2/crypto/evp/m_md2.c",
 "builtin_openssl2/crypto/evp/e_xcbc_d.c",
-"builtin_openssl2/crypto/evp/evp_fips.c",
 "builtin_openssl2/crypto/evp/pmeth_fn.c",
 "builtin_openssl2/crypto/evp/p_lib.c",
 "builtin_openssl2/crypto/evp/evp_key.c",
 "builtin_openssl2/crypto/evp/encode.c",
 "builtin_openssl2/crypto/evp/e_aes_cbc_hmac_sha1.c",
+"builtin_openssl2/crypto/evp/e_aes_cbc_hmac_sha256.c",
 "builtin_openssl2/crypto/evp/m_mdc2.c",
 "builtin_openssl2/crypto/evp/e_null.c",
 "builtin_openssl2/crypto/evp/p_sign.c",
@@ -242,6 +242,7 @@ openssl_sources = [
 "builtin_openssl2/crypto/ecdh/ech_ossl.c",
 "builtin_openssl2/crypto/ecdh/ech_lib.c",
 "builtin_openssl2/crypto/ecdh/ech_err.c",
+"builtin_openssl2/crypto/ecdh/ech_kdf.c",
 "builtin_openssl2/crypto/o_str.c",
 "builtin_openssl2/crypto/conf/conf_api.c",
 "builtin_openssl2/crypto/conf/conf_err.c",
@@ -296,6 +297,7 @@ openssl_sources = [
 "builtin_openssl2/crypto/cms/cms_env.c",
 "builtin_openssl2/crypto/cms/cms_enc.c",
 "builtin_openssl2/crypto/cms/cms_ess.c",
+"builtin_openssl2/crypto/cms/cms_kari.c",
 "builtin_openssl2/crypto/mem_dbg.c",
 "builtin_openssl2/crypto/uid.c",
 "builtin_openssl2/crypto/stack/stack.c",
@@ -362,6 +364,7 @@ openssl_sources = [
 "builtin_openssl2/crypto/x509v3/v3_genn.c",
 "builtin_openssl2/crypto/x509v3/pcy_cache.c",
 "builtin_openssl2/crypto/x509v3/v3_sxnet.c",
+"builtin_openssl2/crypto/x509v3/v3_scts.c",
 "builtin_openssl2/crypto/x509v3/v3err.c",
 "builtin_openssl2/crypto/x509v3/v3_conf.c",
 "builtin_openssl2/crypto/x509v3/v3_utl.c",
@@ -420,7 +423,6 @@ openssl_sources = [
 "builtin_openssl2/crypto/o_fips.c",
 "builtin_openssl2/crypto/engine/eng_rdrand.c",
 "builtin_openssl2/crypto/engine/eng_err.c",
-"builtin_openssl2/crypto/engine/eng_rsax.c",
 "builtin_openssl2/crypto/engine/tb_ecdsa.c",
 "builtin_openssl2/crypto/engine/tb_rsa.c",
 "builtin_openssl2/crypto/engine/tb_cipher.c",
@@ -487,6 +489,8 @@ openssl_sources = [
 "builtin_openssl2/crypto/dh/dh_ameth.c",
 "builtin_openssl2/crypto/dh/dh_check.c",
 "builtin_openssl2/crypto/dh/dh_err.c",
+"builtin_openssl2/crypto/dh/dh_kdf.c",
+"builtin_openssl2/crypto/dh/dh_rfc5114.c",
 "builtin_openssl2/crypto/modes/ccm128.c",
 "builtin_openssl2/crypto/modes/ofb128.c",
 "builtin_openssl2/crypto/modes/cts128.c",
@@ -495,6 +499,7 @@ openssl_sources = [
 "builtin_openssl2/crypto/modes/cbc128.c",
 "builtin_openssl2/crypto/modes/cfb128.c",
 "builtin_openssl2/crypto/modes/xts128.c",
+"builtin_openssl2/crypto/modes/wrap128.c",
 "builtin_openssl2/crypto/camellia/cmll_cfb.c",
 "builtin_openssl2/crypto/camellia/cmll_ecb.c",
 "builtin_openssl2/crypto/camellia/cmll_utl.c",

+ 4 - 182
drivers/builtin_openssl2/crypto/aes/aes_wrap.c

@@ -54,197 +54,19 @@
 
 #include "cryptlib.h"
 #include <openssl/aes.h>
-#include <openssl/bio.h>
-
-static const unsigned char default_iv[] = {
-    0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
-};
+#include <openssl/modes.h>
 
 int AES_wrap_key(AES_KEY *key, const unsigned char *iv,
                  unsigned char *out,
                  const unsigned char *in, unsigned int inlen)
 {
-    unsigned char *A, B[16], *R;
-    unsigned int i, j, t;
-    if ((inlen & 0x7) || (inlen < 8))
-        return -1;
-    A = B;
-    t = 1;
-    memcpy(out + 8, in, inlen);
-    if (!iv)
-        iv = default_iv;
-
-    memcpy(A, iv, 8);
-
-    for (j = 0; j < 6; j++) {
-        R = out + 8;
-        for (i = 0; i < inlen; i += 8, t++, R += 8) {
-            memcpy(B + 8, R, 8);
-            AES_encrypt(B, B, key);
-            A[7] ^= (unsigned char)(t & 0xff);
-            if (t > 0xff) {
-                A[6] ^= (unsigned char)((t >> 8) & 0xff);
-                A[5] ^= (unsigned char)((t >> 16) & 0xff);
-                A[4] ^= (unsigned char)((t >> 24) & 0xff);
-            }
-            memcpy(R, B + 8, 8);
-        }
-    }
-    memcpy(out, A, 8);
-    return inlen + 8;
+    return CRYPTO_128_wrap(key, iv, out, in, inlen, (block128_f) AES_encrypt);
 }
 
 int AES_unwrap_key(AES_KEY *key, const unsigned char *iv,
                    unsigned char *out,
                    const unsigned char *in, unsigned int inlen)
 {
-    unsigned char *A, B[16], *R;
-    unsigned int i, j, t;
-    inlen -= 8;
-    if (inlen & 0x7)
-        return -1;
-    if (inlen < 8)
-        return -1;
-    A = B;
-    t = 6 * (inlen >> 3);
-    memcpy(A, in, 8);
-    memcpy(out, in + 8, inlen);
-    for (j = 0; j < 6; j++) {
-        R = out + inlen - 8;
-        for (i = 0; i < inlen; i += 8, t--, R -= 8) {
-            A[7] ^= (unsigned char)(t & 0xff);
-            if (t > 0xff) {
-                A[6] ^= (unsigned char)((t >> 8) & 0xff);
-                A[5] ^= (unsigned char)((t >> 16) & 0xff);
-                A[4] ^= (unsigned char)((t >> 24) & 0xff);
-            }
-            memcpy(B + 8, R, 8);
-            AES_decrypt(B, B, key);
-            memcpy(R, B + 8, 8);
-        }
-    }
-    if (!iv)
-        iv = default_iv;
-    if (memcmp(A, iv, 8)) {
-        OPENSSL_cleanse(out, inlen);
-        return 0;
-    }
-    return inlen;
-}
-
-#ifdef AES_WRAP_TEST
-
-int AES_wrap_unwrap_test(const unsigned char *kek, int keybits,
-                         const unsigned char *iv,
-                         const unsigned char *eout,
-                         const unsigned char *key, int keylen)
-{
-    unsigned char *otmp = NULL, *ptmp = NULL;
-    int r, ret = 0;
-    AES_KEY wctx;
-    otmp = OPENSSL_malloc(keylen + 8);
-    ptmp = OPENSSL_malloc(keylen);
-    if (!otmp || !ptmp)
-        return 0;
-    if (AES_set_encrypt_key(kek, keybits, &wctx))
-        goto err;
-    r = AES_wrap_key(&wctx, iv, otmp, key, keylen);
-    if (r <= 0)
-        goto err;
-
-    if (eout && memcmp(eout, otmp, keylen))
-        goto err;
-
-    if (AES_set_decrypt_key(kek, keybits, &wctx))
-        goto err;
-    r = AES_unwrap_key(&wctx, iv, ptmp, otmp, r);
-
-    if (memcmp(key, ptmp, keylen))
-        goto err;
-
-    ret = 1;
-
- err:
-    if (otmp)
-        OPENSSL_free(otmp);
-    if (ptmp)
-        OPENSSL_free(ptmp);
-
-    return ret;
-
+    return CRYPTO_128_unwrap(key, iv, out, in, inlen,
+                             (block128_f) AES_decrypt);
 }
-
-int main(int argc, char **argv)
-{
-
-    static const unsigned char kek[] = {
-        0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-        0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-        0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-        0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
-    };
-
-    static const unsigned char key[] = {
-        0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
-        0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
-        0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-        0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
-    };
-
-    static const unsigned char e1[] = {
-        0x1f, 0xa6, 0x8b, 0x0a, 0x81, 0x12, 0xb4, 0x47,
-        0xae, 0xf3, 0x4b, 0xd8, 0xfb, 0x5a, 0x7b, 0x82,
-        0x9d, 0x3e, 0x86, 0x23, 0x71, 0xd2, 0xcf, 0xe5
-    };
-
-    static const unsigned char e2[] = {
-        0x96, 0x77, 0x8b, 0x25, 0xae, 0x6c, 0xa4, 0x35,
-        0xf9, 0x2b, 0x5b, 0x97, 0xc0, 0x50, 0xae, 0xd2,
-        0x46, 0x8a, 0xb8, 0xa1, 0x7a, 0xd8, 0x4e, 0x5d
-    };
-
-    static const unsigned char e3[] = {
-        0x64, 0xe8, 0xc3, 0xf9, 0xce, 0x0f, 0x5b, 0xa2,
-        0x63, 0xe9, 0x77, 0x79, 0x05, 0x81, 0x8a, 0x2a,
-        0x93, 0xc8, 0x19, 0x1e, 0x7d, 0x6e, 0x8a, 0xe7
-    };
-
-    static const unsigned char e4[] = {
-        0x03, 0x1d, 0x33, 0x26, 0x4e, 0x15, 0xd3, 0x32,
-        0x68, 0xf2, 0x4e, 0xc2, 0x60, 0x74, 0x3e, 0xdc,
-        0xe1, 0xc6, 0xc7, 0xdd, 0xee, 0x72, 0x5a, 0x93,
-        0x6b, 0xa8, 0x14, 0x91, 0x5c, 0x67, 0x62, 0xd2
-    };
-
-    static const unsigned char e5[] = {
-        0xa8, 0xf9, 0xbc, 0x16, 0x12, 0xc6, 0x8b, 0x3f,
-        0xf6, 0xe6, 0xf4, 0xfb, 0xe3, 0x0e, 0x71, 0xe4,
-        0x76, 0x9c, 0x8b, 0x80, 0xa3, 0x2c, 0xb8, 0x95,
-        0x8c, 0xd5, 0xd1, 0x7d, 0x6b, 0x25, 0x4d, 0xa1
-    };
-
-    static const unsigned char e6[] = {
-        0x28, 0xc9, 0xf4, 0x04, 0xc4, 0xb8, 0x10, 0xf4,
-        0xcb, 0xcc, 0xb3, 0x5c, 0xfb, 0x87, 0xf8, 0x26,
-        0x3f, 0x57, 0x86, 0xe2, 0xd8, 0x0e, 0xd3, 0x26,
-        0xcb, 0xc7, 0xf0, 0xe7, 0x1a, 0x99, 0xf4, 0x3b,
-        0xfb, 0x98, 0x8b, 0x9b, 0x7a, 0x02, 0xdd, 0x21
-    };
-
-    AES_KEY wctx, xctx;
-    int ret;
-    ret = AES_wrap_unwrap_test(kek, 128, NULL, e1, key, 16);
-    fprintf(stderr, "Key test result %d\n", ret);
-    ret = AES_wrap_unwrap_test(kek, 192, NULL, e2, key, 16);
-    fprintf(stderr, "Key test result %d\n", ret);
-    ret = AES_wrap_unwrap_test(kek, 256, NULL, e3, key, 16);
-    fprintf(stderr, "Key test result %d\n", ret);
-    ret = AES_wrap_unwrap_test(kek, 192, NULL, e4, key, 24);
-    fprintf(stderr, "Key test result %d\n", ret);
-    ret = AES_wrap_unwrap_test(kek, 256, NULL, e5, key, 24);
-    fprintf(stderr, "Key test result %d\n", ret);
-    ret = AES_wrap_unwrap_test(kek, 256, NULL, e6, key, 32);
-    fprintf(stderr, "Key test result %d\n", ret);
-}
-
-#endif

+ 4 - 2
drivers/builtin_openssl2/crypto/aes/aes_x86core.c

@@ -89,8 +89,10 @@ typedef unsigned long long u64;
 #endif
 
 #undef ROTATE
-#if defined(_MSC_VER) || defined(__ICC)
-# define ROTATE(a,n)	_lrotl(a,n)
+#if defined(_MSC_VER)
+# define ROTATE(a,n)    _lrotl(a,n)
+#elif defined(__ICC)
+# define ROTATE(a,n)    _rotl(a,n)
 #elif defined(__GNUC__) && __GNUC__>=2
 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
 #   define ROTATE(a,n)  ({ register unsigned int ret;   \

+ 0 - 2980
drivers/builtin_openssl2/crypto/aes/asm/aes-586.pl

@@ -1,2980 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# Version 4.3.
-#
-# You might fail to appreciate this module performance from the first
-# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
-# to be *the* best Intel C compiler without -KPIC, performance appears
-# to be virtually identical... But try to re-configure with shared
-# library support... Aha! Intel compiler "suddenly" lags behind by 30%
-# [on P4, more on others]:-) And if compared to position-independent
-# code generated by GNU C, this code performs *more* than *twice* as
-# fast! Yes, all this buzz about PIC means that unlike other hand-
-# coded implementations, this one was explicitly designed to be safe
-# to use even in shared library context... This also means that this
-# code isn't necessarily absolutely fastest "ever," because in order
-# to achieve position independence an extra register has to be
-# off-loaded to stack, which affects the benchmark result.
-#
-# Special note about instruction choice. Do you recall RC4_INT code
-# performing poorly on P4? It might be the time to figure out why.
-# RC4_INT code implies effective address calculations in base+offset*4
-# form. Trouble is that it seems that offset scaling turned to be
-# critical path... At least eliminating scaling resulted in 2.8x RC4
-# performance improvement [as you might recall]. As AES code is hungry
-# for scaling too, I [try to] avoid the latter by favoring off-by-2
-# shifts and masking the result with 0xFF<<2 instead of "boring" 0xFF.
-#
-# As was shown by Dean Gaudet <[email protected]>, the above note turned
-# void. Performance improvement with off-by-2 shifts was observed on
-# intermediate implementation, which was spilling yet another register
-# to stack... Final offset*4 code below runs just a tad faster on P4,
-# but exhibits up to 10% improvement on other cores.
-#
-# Second version is "monolithic" replacement for aes_core.c, which in
-# addition to AES_[de|en]crypt implements private_AES_set_[de|en]cryption_key.
-# This made it possible to implement little-endian variant of the
-# algorithm without modifying the base C code. Motivating factor for
-# the undertaken effort was that it appeared that in tight IA-32
-# register window little-endian flavor could achieve slightly higher
-# Instruction Level Parallelism, and it indeed resulted in up to 15%
-# better performance on most recent µ-archs...
-#
-# Third version adds AES_cbc_encrypt implementation, which resulted in
-# up to 40% performance imrovement of CBC benchmark results. 40% was
-# observed on P4 core, where "overall" imrovement coefficient, i.e. if
-# compared to PIC generated by GCC and in CBC mode, was observed to be
-# as large as 4x:-) CBC performance is virtually identical to ECB now
-# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
-# Opteron, because certain function prologues and epilogues are
-# effectively taken out of the loop...
-#
-# Version 3.2 implements compressed tables and prefetch of these tables
-# in CBC[!] mode. Former means that 3/4 of table references are now
-# misaligned, which unfortunately has negative impact on elder IA-32
-# implementations, Pentium suffered 30% penalty, PIII - 10%.
-#
-# Version 3.3 avoids L1 cache aliasing between stack frame and
-# S-boxes, and 3.4 - L1 cache aliasing even between key schedule. The
-# latter is achieved by copying the key schedule to controlled place in
-# stack. This unfortunately has rather strong impact on small block CBC
-# performance, ~2x deterioration on 16-byte block if compared to 3.3.
-#
-# Version 3.5 checks if there is L1 cache aliasing between user-supplied
-# key schedule and S-boxes and abstains from copying the former if
-# there is no. This allows end-user to consciously retain small block
-# performance by aligning key schedule in specific manner.
-#
-# Version 3.6 compresses Td4 to 256 bytes and prefetches it in ECB.
-#
-# Current ECB performance numbers for 128-bit key in CPU cycles per
-# processed byte [measure commonly used by AES benchmarkers] are:
-#
-#		small footprint		fully unrolled
-# P4		24			22
-# AMD K8	20			19
-# PIII		25			23
-# Pentium	81			78
-#
-# Version 3.7 reimplements outer rounds as "compact." Meaning that
-# first and last rounds reference compact 256 bytes S-box. This means
-# that first round consumes a lot more CPU cycles and that encrypt
-# and decrypt performance becomes asymmetric. Encrypt performance
-# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
-# aggressively pre-fetched.
-#
-# Version 4.0 effectively rolls back to 3.6 and instead implements
-# additional set of functions, _[x86|sse]_AES_[en|de]crypt_compact,
-# which use exclusively 256 byte S-box. These functions are to be
-# called in modes not concealing plain text, such as ECB, or when
-# we're asked to process smaller amount of data [or unconditionally
-# on hyper-threading CPU]. Currently it's called unconditionally from
-# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
-# still needs to be modified to switch between slower and faster
-# mode when appropriate... But in either case benchmark landscape
-# changes dramatically and below numbers are CPU cycles per processed
-# byte for 128-bit key.
-#
-#		ECB encrypt	ECB decrypt	CBC large chunk
-# P4		56[60]		84[100]		23
-# AMD K8	48[44]		70[79]		18
-# PIII		41[50]		61[91]		24
-# Core 2	32[38]		45[70]		18.5
-# Pentium	120		160		77
-#
-# Version 4.1 switches to compact S-box even in key schedule setup.
-#
-# Version 4.2 prefetches compact S-box in every SSE round or in other
-# words every cache-line is *guaranteed* to be accessed within ~50
-# cycles window. Why just SSE? Because it's needed on hyper-threading
-# CPU! Which is also why it's prefetched with 64 byte stride. Best
-# part is that it has no negative effect on performance:-)  
-#
-# Version 4.3 implements switch between compact and non-compact block
-# functions in AES_cbc_encrypt depending on how much data was asked
-# to be processed in one stroke.
-#
-######################################################################
-# Timing attacks are classified in two classes: synchronous when
-# attacker consciously initiates cryptographic operation and collects
-# timing data of various character afterwards, and asynchronous when
-# malicious code is executed on same CPU simultaneously with AES,
-# instruments itself and performs statistical analysis of this data.
-#
-# As far as synchronous attacks go the root to the AES timing
-# vulnerability is twofold. Firstly, of 256 S-box elements at most 160
-# are referred to in single 128-bit block operation. Well, in C
-# implementation with 4 distinct tables it's actually as little as 40
-# references per 256 elements table, but anyway... Secondly, even
-# though S-box elements are clustered into smaller amount of cache-
-# lines, smaller than 160 and even 40, it turned out that for certain
-# plain-text pattern[s] or simply put chosen plain-text and given key
-# few cache-lines remain unaccessed during block operation. Now, if
-# attacker can figure out this access pattern, he can deduct the key
-# [or at least part of it]. The natural way to mitigate this kind of
-# attacks is to minimize the amount of cache-lines in S-box and/or
-# prefetch them to ensure that every one is accessed for more uniform
-# timing. But note that *if* plain-text was concealed in such way that
-# input to block function is distributed *uniformly*, then attack
-# wouldn't apply. Now note that some encryption modes, most notably
-# CBC, do mask the plain-text in this exact way [secure cipher output
-# is distributed uniformly]. Yes, one still might find input that
-# would reveal the information about given key, but if amount of
-# candidate inputs to be tried is larger than amount of possible key
-# combinations then attack becomes infeasible. This is why revised
-# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
-# of data is to be processed in one stroke. The current size limit of
-# 512 bytes is chosen to provide same [diminishigly low] probability
-# for cache-line to remain untouched in large chunk operation with
-# large S-box as for single block operation with compact S-box and
-# surely needs more careful consideration...
-#
-# As for asynchronous attacks. There are two flavours: attacker code
-# being interleaved with AES on hyper-threading CPU at *instruction*
-# level, and two processes time sharing single core. As for latter.
-# Two vectors. 1. Given that attacker process has higher priority,
-# yield execution to process performing AES just before timer fires
-# off the scheduler, immediately regain control of CPU and analyze the
-# cache state. For this attack to be efficient attacker would have to
-# effectively slow down the operation by several *orders* of magnitute,
-# by ratio of time slice to duration of handful of AES rounds, which
-# unlikely to remain unnoticed. Not to mention that this also means
-# that he would spend correspondigly more time to collect enough
-# statistical data to mount the attack. It's probably appropriate to
-# say that if adeversary reckons that this attack is beneficial and
-# risks to be noticed, you probably have larger problems having him
-# mere opportunity. In other words suggested code design expects you
-# to preclude/mitigate this attack by overall system security design.
-# 2. Attacker manages to make his code interrupt driven. In order for
-# this kind of attack to be feasible, interrupt rate has to be high
-# enough, again comparable to duration of handful of AES rounds. But
-# is there interrupt source of such rate? Hardly, not even 1Gbps NIC
-# generates interrupts at such raging rate...
-#
-# And now back to the former, hyper-threading CPU or more specifically
-# Intel P4. Recall that asynchronous attack implies that malicious
-# code instruments itself. And naturally instrumentation granularity
-# has be noticeably lower than duration of codepath accessing S-box.
-# Given that all cache-lines are accessed during that time that is.
-# Current implementation accesses *all* cache-lines within ~50 cycles
-# window, which is actually *less* than RDTSC latency on Intel P4!
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"aes-586.pl",$x86only = $ARGV[$#ARGV] eq "386");
-&static_label("AES_Te");
-&static_label("AES_Td");
-
-$s0="eax";
-$s1="ebx";
-$s2="ecx";
-$s3="edx";
-$key="edi";
-$acc="esi";
-$tbl="ebp";
-
-# stack frame layout in _[x86|sse]_AES_* routines, frame is allocated
-# by caller
-$__ra=&DWP(0,"esp");	# return address
-$__s0=&DWP(4,"esp");	# s0 backing store
-$__s1=&DWP(8,"esp");	# s1 backing store
-$__s2=&DWP(12,"esp");	# s2 backing store
-$__s3=&DWP(16,"esp");	# s3 backing store
-$__key=&DWP(20,"esp");	# pointer to key schedule
-$__end=&DWP(24,"esp");	# pointer to end of key schedule
-$__tbl=&DWP(28,"esp");	# %ebp backing store
-
-# stack frame layout in AES_[en|crypt] routines, which differs from
-# above by 4 and overlaps by %ebp backing store
-$_tbl=&DWP(24,"esp");
-$_esp=&DWP(28,"esp");
-
-sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
-
-$speed_limit=512;	# chunks smaller than $speed_limit are
-			# processed with compact routine in CBC mode
-$small_footprint=1;	# $small_footprint=1 code is ~5% slower [on
-			# recent µ-archs], but ~5 times smaller!
-			# I favor compact code to minimize cache
-			# contention and in hope to "collect" 5% back
-			# in real-life applications...
-
-$vertical_spin=0;	# shift "verticaly" defaults to 0, because of
-			# its proof-of-concept status...
-# Note that there is no decvert(), as well as last encryption round is
-# performed with "horizontal" shifts. This is because this "vertical"
-# implementation [one which groups shifts on a given $s[i] to form a
-# "column," unlike "horizontal" one, which groups shifts on different
-# $s[i] to form a "row"] is work in progress. It was observed to run
-# few percents faster on Intel cores, but not AMD. On AMD K8 core it's
-# whole 12% slower:-( So we face a trade-off... Shall it be resolved
-# some day? Till then the code is considered experimental and by
-# default remains dormant...
-
-sub encvert()
-{ my ($te,@s) = @_;
-  my $v0 = $acc, $v1 = $key;
-
-	&mov	($v0,$s[3]);				# copy s3
-	&mov	(&DWP(4,"esp"),$s[2]);			# save s2
-	&mov	($v1,$s[0]);				# copy s0
-	&mov	(&DWP(8,"esp"),$s[1]);			# save s1
-
-	&movz	($s[2],&HB($s[0]));
-	&and	($s[0],0xFF);
-	&mov	($s[0],&DWP(0,$te,$s[0],8));		# s0>>0
-	&shr	($v1,16);
-	&mov	($s[3],&DWP(3,$te,$s[2],8));		# s0>>8
-	&movz	($s[1],&HB($v1));
-	&and	($v1,0xFF);
-	&mov	($s[2],&DWP(2,$te,$v1,8));		# s0>>16
-	 &mov	($v1,$v0);
-	&mov	($s[1],&DWP(1,$te,$s[1],8));		# s0>>24
-
-	&and	($v0,0xFF);
-	&xor	($s[3],&DWP(0,$te,$v0,8));		# s3>>0
-	&movz	($v0,&HB($v1));
-	&shr	($v1,16);
-	&xor	($s[2],&DWP(3,$te,$v0,8));		# s3>>8
-	&movz	($v0,&HB($v1));
-	&and	($v1,0xFF);
-	&xor	($s[1],&DWP(2,$te,$v1,8));		# s3>>16
-	 &mov	($v1,&DWP(4,"esp"));			# restore s2
-	&xor	($s[0],&DWP(1,$te,$v0,8));		# s3>>24
-
-	&mov	($v0,$v1);
-	&and	($v1,0xFF);
-	&xor	($s[2],&DWP(0,$te,$v1,8));		# s2>>0
-	&movz	($v1,&HB($v0));
-	&shr	($v0,16);
-	&xor	($s[1],&DWP(3,$te,$v1,8));		# s2>>8
-	&movz	($v1,&HB($v0));
-	&and	($v0,0xFF);
-	&xor	($s[0],&DWP(2,$te,$v0,8));		# s2>>16
-	 &mov	($v0,&DWP(8,"esp"));			# restore s1
-	&xor	($s[3],&DWP(1,$te,$v1,8));		# s2>>24
-
-	&mov	($v1,$v0);
-	&and	($v0,0xFF);
-	&xor	($s[1],&DWP(0,$te,$v0,8));		# s1>>0
-	&movz	($v0,&HB($v1));
-	&shr	($v1,16);
-	&xor	($s[0],&DWP(3,$te,$v0,8));		# s1>>8
-	&movz	($v0,&HB($v1));
-	&and	($v1,0xFF);
-	&xor	($s[3],&DWP(2,$te,$v1,8));		# s1>>16
-	 &mov	($key,$__key);				# reincarnate v1 as key
-	&xor	($s[2],&DWP(1,$te,$v0,8));		# s1>>24
-}
-
-# Another experimental routine, which features "horizontal spin," but
-# eliminates one reference to stack. Strangely enough runs slower...
-sub enchoriz()
-{ my $v0 = $key, $v1 = $acc;
-
-	&movz	($v0,&LB($s0));			#  3, 2, 1, 0*
-	&rotr	($s2,8);			#  8,11,10, 9
-	&mov	($v1,&DWP(0,$te,$v0,8));	#  0
-	&movz	($v0,&HB($s1));			#  7, 6, 5*, 4
-	&rotr	($s3,16);			# 13,12,15,14
-	&xor	($v1,&DWP(3,$te,$v0,8));	#  5
-	&movz	($v0,&HB($s2));			#  8,11,10*, 9
-	&rotr	($s0,16);			#  1, 0, 3, 2
-	&xor	($v1,&DWP(2,$te,$v0,8));	# 10
-	&movz	($v0,&HB($s3));			# 13,12,15*,14
-	&xor	($v1,&DWP(1,$te,$v0,8));	# 15, t[0] collected
-	&mov	($__s0,$v1);			# t[0] saved
-
-	&movz	($v0,&LB($s1));			#  7, 6, 5, 4*
-	&shr	($s1,16);			#  -, -, 7, 6
-	&mov	($v1,&DWP(0,$te,$v0,8));	#  4
-	&movz	($v0,&LB($s3));			# 13,12,15,14*
-	&xor	($v1,&DWP(2,$te,$v0,8));	# 14
-	&movz	($v0,&HB($s0));			#  1, 0, 3*, 2
-	&and	($s3,0xffff0000);		# 13,12, -, -
-	&xor	($v1,&DWP(1,$te,$v0,8));	#  3
-	&movz	($v0,&LB($s2));			#  8,11,10, 9*
-	&or	($s3,$s1);			# 13,12, 7, 6
-	&xor	($v1,&DWP(3,$te,$v0,8));	#  9, t[1] collected
-	&mov	($s1,$v1);			#  s[1]=t[1]
-
-	&movz	($v0,&LB($s0));			#  1, 0, 3, 2*
-	&shr	($s2,16);			#  -, -, 8,11
-	&mov	($v1,&DWP(2,$te,$v0,8));	#  2
-	&movz	($v0,&HB($s3));			# 13,12, 7*, 6
-	&xor	($v1,&DWP(1,$te,$v0,8));	#  7
-	&movz	($v0,&HB($s2));			#  -, -, 8*,11
-	&xor	($v1,&DWP(0,$te,$v0,8));	#  8
-	&mov	($v0,$s3);
-	&shr	($v0,24);			# 13
-	&xor	($v1,&DWP(3,$te,$v0,8));	# 13, t[2] collected
-
-	&movz	($v0,&LB($s2));			#  -, -, 8,11*
-	&shr	($s0,24);			#  1*
-	&mov	($s2,&DWP(1,$te,$v0,8));	# 11
-	&xor	($s2,&DWP(3,$te,$s0,8));	#  1
-	&mov	($s0,$__s0);			# s[0]=t[0]
-	&movz	($v0,&LB($s3));			# 13,12, 7, 6*
-	&shr	($s3,16);			#   ,  ,13,12
-	&xor	($s2,&DWP(2,$te,$v0,8));	#  6
-	&mov	($key,$__key);			# reincarnate v0 as key
-	&and	($s3,0xff);			#   ,  ,13,12*
-	&mov	($s3,&DWP(0,$te,$s3,8));	# 12
-	&xor	($s3,$s2);			# s[2]=t[3] collected
-	&mov	($s2,$v1);			# s[2]=t[2]
-}
-
-# More experimental code... SSE one... Even though this one eliminates
-# *all* references to stack, it's not faster...
-sub sse_encbody()
-{
-	&movz	($acc,&LB("eax"));		#  0
-	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  0
-	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
-	&movz	("edx",&HB("eax"));		#  1
-	&mov	("edx",&DWP(3,$tbl,"edx",8));	#  1
-	&shr	("eax",16);			#  5, 4
-
-	&movz	($acc,&LB("ebx"));		# 10
-	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 10
-	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
-	&movz	($acc,&HB("ebx"));		# 11
-	&xor	("edx",&DWP(1,$tbl,$acc,8));	# 11
-	&shr	("ebx",16);			# 15,14
-
-	&movz	($acc,&HB("eax"));		#  5
-	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  5
-	&movq	("mm3",QWP(16,$key));
-	&movz	($acc,&HB("ebx"));		# 15
-	&xor	("ecx",&DWP(1,$tbl,$acc,8));	# 15
-	&movd	("mm0","ecx");			# t[0] collected
-
-	&movz	($acc,&LB("eax"));		#  4
-	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  4
-	&movd	("eax","mm2");			#  7, 6, 3, 2
-	&movz	($acc,&LB("ebx"));		# 14
-	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 14
-	&movd	("ebx","mm6");			# 13,12, 9, 8
-
-	&movz	($acc,&HB("eax"));		#  3
-	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  3
-	&movz	($acc,&HB("ebx"));		#  9
-	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  9
-	&movd	("mm1","ecx");			# t[1] collected
-
-	&movz	($acc,&LB("eax"));		#  2
-	&mov	("ecx",&DWP(2,$tbl,$acc,8));	#  2
-	&shr	("eax",16);			#  7, 6
-	&punpckldq	("mm0","mm1");		# t[0,1] collected
-	&movz	($acc,&LB("ebx"));		#  8
-	&xor	("ecx",&DWP(0,$tbl,$acc,8));	#  8
-	&shr	("ebx",16);			# 13,12
-
-	&movz	($acc,&HB("eax"));		#  7
-	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  7
-	&pxor	("mm0","mm3");
-	&movz	("eax",&LB("eax"));		#  6
-	&xor	("edx",&DWP(2,$tbl,"eax",8));	#  6
-	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
-	&movz	($acc,&HB("ebx"));		# 13
-	&xor	("ecx",&DWP(3,$tbl,$acc,8));	# 13
-	&xor	("ecx",&DWP(24,$key));		# t[2]
-	&movd	("mm4","ecx");			# t[2] collected
-	&movz	("ebx",&LB("ebx"));		# 12
-	&xor	("edx",&DWP(0,$tbl,"ebx",8));	# 12
-	&shr	("ecx",16);
-	&movd	("eax","mm1");			#  5, 4, 1, 0
-	&mov	("ebx",&DWP(28,$key));		# t[3]
-	&xor	("ebx","edx");
-	&movd	("mm5","ebx");			# t[3] collected
-	&and	("ebx",0xffff0000);
-	&or	("ebx","ecx");
-
-	&punpckldq	("mm4","mm5");		# t[2,3] collected
-}
-
-######################################################################
-# "Compact" block function
-######################################################################
-
-sub enccompact()
-{ my $Fn = mov;
-  while ($#_>5) { pop(@_); $Fn=sub{}; }
-  my ($i,$te,@s)=@_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	# $Fn is used in first compact round and its purpose is to
-	# void restoration of some values from stack, so that after
-	# 4xenccompact with extra argument $key value is left there...
-	if ($i==3)  {	&$Fn	($key,$__key);			}##%edx
-	else        {	&mov	($out,$s[0]);			}
-			&and	($out,0xFF);
-	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
-	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
-			&movz	($out,&BP(-128,$te,$out,1));
-
-	if ($i==3)  {	$tmp=$s[1];				}##%eax
-			&movz	($tmp,&HB($s[1]));
-			&movz	($tmp,&BP(-128,$te,$tmp,1));
-			&shl	($tmp,8);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
-	else        {	&mov	($tmp,$s[2]);
-			&shr	($tmp,16);			}
-	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
-			&and	($tmp,0xFF);
-			&movz	($tmp,&BP(-128,$te,$tmp,1));
-			&shl	($tmp,16);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
-	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
-	else        {	&mov	($tmp,$s[3]);
-			&shr	($tmp,24);			}
-			&movz	($tmp,&BP(-128,$te,$tmp,1));
-			&shl	($tmp,24);
-			&xor	($out,$tmp);
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&mov	($s[3],$acc);			}
-	&comment();
-}
-
-sub enctransform()
-{ my @s = ($s0,$s1,$s2,$s3);
-  my $i = shift;
-  my $tmp = $tbl;
-  my $r2  = $key ;
-
-	&mov	($acc,$s[$i]);
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($r2,&DWP(0,$s[$i],$s[$i]));
-	&sub	($acc,$tmp);
-	&and	($r2,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	&mov	($tmp,$s[$i]);
-	&xor	($acc,$r2);	# r2
-
-	&xor	($s[$i],$acc);	# r0 ^ r2
-	&rotl	($s[$i],24);
-	&xor	($s[$i],$acc)	# ROTATE(r2^r0,24) ^ r2
-	&rotr	($tmp,16);
-	&xor	($s[$i],$tmp);
-	&rotr	($tmp,8);
-	&xor	($s[$i],$tmp);
-}
-
-&function_begin_B("_x86_AES_encrypt_compact");
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($__key,$key);			# save key
-
-	&xor	($s0,&DWP(0,$key));		# xor with key
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-	&lea	($acc,&DWP(-2,$acc,$acc));
-	&lea	($acc,&DWP(0,$key,$acc,8));
-	&mov	($__end,$acc);			# end of key schedule
-
-	# prefetch Te4
-	&mov	($key,&DWP(0-128,$tbl));
-	&mov	($acc,&DWP(32-128,$tbl));
-	&mov	($key,&DWP(64-128,$tbl));
-	&mov	($acc,&DWP(96-128,$tbl));
-	&mov	($key,&DWP(128-128,$tbl));
-	&mov	($acc,&DWP(160-128,$tbl));
-	&mov	($key,&DWP(192-128,$tbl));
-	&mov	($acc,&DWP(224-128,$tbl));
-
-	&set_label("loop",16);
-
-		&enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
-		&enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
-		&enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
-		&enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
-		&enctransform(2);
-		&enctransform(3);
-		&enctransform(0);
-		&enctransform(1);
-		&mov 	($key,$__key);
-		&mov	($tbl,$__tbl);
-		&add	($key,16);		# advance rd_key
-		&xor	($s0,&DWP(0,$key));
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-	&cmp	($key,$__end);
-	&mov	($__key,$key);
-	&jb	(&label("loop"));
-
-	&enccompact(0,$tbl,$s0,$s1,$s2,$s3);
-	&enccompact(1,$tbl,$s1,$s2,$s3,$s0);
-	&enccompact(2,$tbl,$s2,$s3,$s0,$s1);
-	&enccompact(3,$tbl,$s3,$s0,$s1,$s2);
-
-	&xor	($s0,&DWP(16,$key));
-	&xor	($s1,&DWP(20,$key));
-	&xor	($s2,&DWP(24,$key));
-	&xor	($s3,&DWP(28,$key));
-
-	&ret	();
-&function_end_B("_x86_AES_encrypt_compact");
-
-######################################################################
-# "Compact" SSE block function.
-######################################################################
-#
-# Performance is not actually extraordinary in comparison to pure
-# x86 code. In particular encrypt performance is virtually the same.
-# Decrypt performance on the other hand is 15-20% better on newer
-# µ-archs [but we're thankful for *any* improvement here], and ~50%
-# better on PIII:-) And additionally on the pros side this code
-# eliminates redundant references to stack and thus relieves/
-# minimizes the pressure on the memory bus.
-#
-# MMX register layout                           lsb
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# |          mm4          |          mm0          |
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# |     s3    |     s2    |     s1    |     s0    |    
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#
-# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
-# In this terms encryption and decryption "compact" permutation
-# matrices can be depicted as following:
-#
-# encryption              lsb	# decryption              lsb
-# +----++----+----+----+----+	# +----++----+----+----+----+
-# | t0 || 15 | 10 |  5 |  0 |	# | t0 ||  7 | 10 | 13 |  0 |
-# +----++----+----+----+----+	# +----++----+----+----+----+
-# | t1 ||  3 | 14 |  9 |  4 |	# | t1 || 11 | 14 |  1 |  4 |
-# +----++----+----+----+----+	# +----++----+----+----+----+
-# | t2 ||  7 |  2 | 13 |  8 |	# | t2 || 15 |  2 |  5 |  8 |
-# +----++----+----+----+----+	# +----++----+----+----+----+
-# | t3 || 11 |  6 |  1 | 12 |	# | t3 ||  3 |  6 |  9 | 12 |
-# +----++----+----+----+----+	# +----++----+----+----+----+
-#
-######################################################################
-# Why not xmm registers? Short answer. It was actually tested and
-# was not any faster, but *contrary*, most notably on Intel CPUs.
-# Longer answer. Main advantage of using mm registers is that movd
-# latency is lower, especially on Intel P4. While arithmetic
-# instructions are twice as many, they can be scheduled every cycle
-# and not every second one when they are operating on xmm register,
-# so that "arithmetic throughput" remains virtually the same. And
-# finally the code can be executed even on elder SSE-only CPUs:-)
-
-sub sse_enccompact()
-{
-	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
-	&pshufw	("mm5","mm4",0x0d);		# 15,14,11,10
-	&movd	("eax","mm1");			#  5, 4, 1, 0
-	&movd	("ebx","mm5");			# 15,14,11,10
-
-	&movz	($acc,&LB("eax"));		#  0
-	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
-	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
-	&movz	("edx",&HB("eax"));		#  1
-	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
-	&shl	("edx",8);			#  1
-	&shr	("eax",16);			#  5, 4
-
-	&movz	($acc,&LB("ebx"));		# 10
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 10
-	&shl	($acc,16);			# 10
-	&or	("ecx",$acc);			# 10
-	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
-	&movz	($acc,&HB("ebx"));		# 11
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 11
-	&shl	($acc,24);			# 11
-	&or	("edx",$acc);			# 11
-	&shr	("ebx",16);			# 15,14
-
-	&movz	($acc,&HB("eax"));		#  5
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  5
-	&shl	($acc,8);			#  5
-	&or	("ecx",$acc);			#  5
-	&movz	($acc,&HB("ebx"));		# 15
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 15
-	&shl	($acc,24);			# 15
-	&or	("ecx",$acc);			# 15
-	&movd	("mm0","ecx");			# t[0] collected
-
-	&movz	($acc,&LB("eax"));		#  4
-	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  4
-	&movd	("eax","mm2");			#  7, 6, 3, 2
-	&movz	($acc,&LB("ebx"));		# 14
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 14
-	&shl	($acc,16);			# 14
-	&or	("ecx",$acc);			# 14
-
-	&movd	("ebx","mm6");			# 13,12, 9, 8
-	&movz	($acc,&HB("eax"));		#  3
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  3
-	&shl	($acc,24);			#  3
-	&or	("ecx",$acc);			#  3
-	&movz	($acc,&HB("ebx"));		#  9
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  9
-	&shl	($acc,8);			#  9
-	&or	("ecx",$acc);			#  9
-	&movd	("mm1","ecx");			# t[1] collected
-
-	&movz	($acc,&LB("ebx"));		#  8
-	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  8
-	&shr	("ebx",16);			# 13,12
-	&movz	($acc,&LB("eax"));		#  2
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  2
-	&shl	($acc,16);			#  2
-	&or	("ecx",$acc);			#  2
-	&shr	("eax",16);			#  7, 6
-
-	&punpckldq	("mm0","mm1");		# t[0,1] collected
-
-	&movz	($acc,&HB("eax"));		#  7
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  7
-	&shl	($acc,24);			#  7
-	&or	("ecx",$acc);			#  7
-	&and	("eax",0xff);			#  6
-	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  6
-	&shl	("eax",16);			#  6
-	&or	("edx","eax");			#  6
-	&movz	($acc,&HB("ebx"));		# 13
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 13
-	&shl	($acc,8);			# 13
-	&or	("ecx",$acc);			# 13
-	&movd	("mm4","ecx");			# t[2] collected
-	&and	("ebx",0xff);			# 12
-	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	# 12
-	&or	("edx","ebx");			# 12
-	&movd	("mm5","edx");			# t[3] collected
-
-	&punpckldq	("mm4","mm5");		# t[2,3] collected
-}
-
-					if (!$x86only) {
-&function_begin_B("_sse_AES_encrypt_compact");
-	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
-	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
-
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-	&lea	($acc,&DWP(-2,$acc,$acc));
-	&lea	($acc,&DWP(0,$key,$acc,8));
-	&mov	($__end,$acc);			# end of key schedule
-
-	&mov	($s0,0x1b1b1b1b);		# magic constant
-	&mov	(&DWP(8,"esp"),$s0);
-	&mov	(&DWP(12,"esp"),$s0);
-
-	# prefetch Te4
-	&mov	($s0,&DWP(0-128,$tbl));
-	&mov	($s1,&DWP(32-128,$tbl));
-	&mov	($s2,&DWP(64-128,$tbl));
-	&mov	($s3,&DWP(96-128,$tbl));
-	&mov	($s0,&DWP(128-128,$tbl));
-	&mov	($s1,&DWP(160-128,$tbl));
-	&mov	($s2,&DWP(192-128,$tbl));
-	&mov	($s3,&DWP(224-128,$tbl));
-
-	&set_label("loop",16);
-		&sse_enccompact();
-		&add	($key,16);
-		&cmp	($key,$__end);
-		&ja	(&label("out"));
-
-		&movq	("mm2",&QWP(8,"esp"));
-		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
-		&movq	("mm1","mm0");		&movq	("mm5","mm4");	# r0
-		&pcmpgtb("mm3","mm0");		&pcmpgtb("mm7","mm4");
-		&pand	("mm3","mm2");		&pand	("mm7","mm2");
-		&pshufw	("mm2","mm0",0xb1);	&pshufw	("mm6","mm4",0xb1);# ROTATE(r0,16)
-		&paddb	("mm0","mm0");		&paddb	("mm4","mm4");
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# = r2
-		&pshufw	("mm3","mm2",0xb1);	&pshufw	("mm7","mm6",0xb1);# r0
-		&pxor	("mm1","mm0");		&pxor	("mm5","mm4");	# r0^r2
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(r0,16)
-
-		&movq	("mm2","mm3");		&movq	("mm6","mm7");
-		&pslld	("mm3",8);		&pslld	("mm7",8);
-		&psrld	("mm2",24);		&psrld	("mm6",24);
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= r0<<8
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= r0>>24
-
-		&movq	("mm3","mm1");		&movq	("mm7","mm5");
-		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
-		&psrld	("mm1",8);		&psrld	("mm5",8);
-		&mov	($s0,&DWP(0-128,$tbl));
-		&pslld	("mm3",24);		&pslld	("mm7",24);
-		&mov	($s1,&DWP(64-128,$tbl));
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= (r2^r0)<<8
-		&mov	($s2,&DWP(128-128,$tbl));
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= (r2^r0)>>24
-		&mov	($s3,&DWP(192-128,$tbl));
-
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
-	&jmp	(&label("loop"));
-
-	&set_label("out",16);
-	&pxor	("mm0",&QWP(0,$key));
-	&pxor	("mm4",&QWP(8,$key));
-
-	&ret	();
-&function_end_B("_sse_AES_encrypt_compact");
-					}
-
-######################################################################
-# Vanilla block function.
-######################################################################
-
-sub encstep()
-{ my ($i,$te,@s) = @_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	# lines marked with #%e?x[i] denote "reordered" instructions...
-	if ($i==3)  {	&mov	($key,$__key);			}##%edx
-	else        {	&mov	($out,$s[0]);
-			&and	($out,0xFF);			}
-	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
-	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
-			&mov	($out,&DWP(0,$te,$out,8));
-
-	if ($i==3)  {	$tmp=$s[1];				}##%eax
-			&movz	($tmp,&HB($s[1]));
-			&xor	($out,&DWP(3,$te,$tmp,8));
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
-	else        {	&mov	($tmp,$s[2]);
-			&shr	($tmp,16);			}
-	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
-			&and	($tmp,0xFF);
-			&xor	($out,&DWP(2,$te,$tmp,8));
-
-	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
-	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
-	else        {	&mov	($tmp,$s[3]); 
-			&shr	($tmp,24)			}
-			&xor	($out,&DWP(1,$te,$tmp,8));
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&mov	($s[3],$acc);			}
-			&comment();
-}
-
-sub enclast()
-{ my ($i,$te,@s)=@_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	if ($i==3)  {	&mov	($key,$__key);			}##%edx
-	else        {	&mov	($out,$s[0]);			}
-			&and	($out,0xFF);
-	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
-	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
-			&mov	($out,&DWP(2,$te,$out,8));
-			&and	($out,0x000000ff);
-
-	if ($i==3)  {	$tmp=$s[1];				}##%eax
-			&movz	($tmp,&HB($s[1]));
-			&mov	($tmp,&DWP(0,$te,$tmp,8));
-			&and	($tmp,0x0000ff00);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
-	else        {	&mov	($tmp,$s[2]);
-			&shr	($tmp,16);			}
-	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
-			&and	($tmp,0xFF);
-			&mov	($tmp,&DWP(0,$te,$tmp,8));
-			&and	($tmp,0x00ff0000);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
-	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
-	else        {	&mov	($tmp,$s[3]);
-			&shr	($tmp,24);			}
-			&mov	($tmp,&DWP(2,$te,$tmp,8));
-			&and	($tmp,0xff000000);
-			&xor	($out,$tmp);
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&mov	($s[3],$acc);			}
-}
-
-&function_begin_B("_x86_AES_encrypt");
-	if ($vertical_spin) {
-		# I need high parts of volatile registers to be accessible...
-		&exch	($s1="edi",$key="ebx");
-		&mov	($s2="esi",$acc="ecx");
-	}
-
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($__key,$key);			# save key
-
-	&xor	($s0,&DWP(0,$key));		# xor with key
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-
-	if ($small_footprint) {
-	    &lea	($acc,&DWP(-2,$acc,$acc));
-	    &lea	($acc,&DWP(0,$key,$acc,8));
-	    &mov	($__end,$acc);		# end of key schedule
-
-	    &set_label("loop",16);
-		if ($vertical_spin) {
-		    &encvert($tbl,$s0,$s1,$s2,$s3);
-		} else {
-		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
-		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
-		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
-		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
-		}
-		&add	($key,16);		# advance rd_key
-		&xor	($s0,&DWP(0,$key));
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-	    &cmp	($key,$__end);
-	    &mov	($__key,$key);
-	    &jb		(&label("loop"));
-	}
-	else {
-	    &cmp	($acc,10);
-	    &jle	(&label("10rounds"));
-	    &cmp	($acc,12);
-	    &jle	(&label("12rounds"));
-
-	&set_label("14rounds",4);
-	    for ($i=1;$i<3;$i++) {
-		if ($vertical_spin) {
-		    &encvert($tbl,$s0,$s1,$s2,$s3);
-		} else {
-		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
-		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
-		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
-		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
-		}
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	    &add	($key,32);
-	    &mov	($__key,$key);		# advance rd_key
-	&set_label("12rounds",4);
-	    for ($i=1;$i<3;$i++) {
-		if ($vertical_spin) {
-		    &encvert($tbl,$s0,$s1,$s2,$s3);
-		} else {
-		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
-		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
-		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
-		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
-		}
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	    &add	($key,32);
-	    &mov	($__key,$key);		# advance rd_key
-	&set_label("10rounds",4);
-	    for ($i=1;$i<10;$i++) {
-		if ($vertical_spin) {
-		    &encvert($tbl,$s0,$s1,$s2,$s3);
-		} else {
-		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
-		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
-		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
-		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
-		}
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	}
-
-	if ($vertical_spin) {
-	    # "reincarnate" some registers for "horizontal" spin...
-	    &mov	($s1="ebx",$key="edi");
-	    &mov	($s2="ecx",$acc="esi");
-	}
-	&enclast(0,$tbl,$s0,$s1,$s2,$s3);
-	&enclast(1,$tbl,$s1,$s2,$s3,$s0);
-	&enclast(2,$tbl,$s2,$s3,$s0,$s1);
-	&enclast(3,$tbl,$s3,$s0,$s1,$s2);
-
-	&add	($key,$small_footprint?16:160);
-	&xor	($s0,&DWP(0,$key));
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&ret	();
-
-&set_label("AES_Te",64);	# Yes! I keep it in the code segment!
-	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
-	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
-	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
-	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
-	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
-	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
-	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
-	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
-	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
-	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
-	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
-	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
-	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
-	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
-	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
-	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
-	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
-	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
-	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
-	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
-	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
-	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
-	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
-	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
-	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
-	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
-	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
-	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
-	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
-	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
-	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
-	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
-	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
-	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
-	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
-	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
-	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
-	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
-	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
-	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
-	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
-	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
-	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
-	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
-	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
-	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
-	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
-	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
-	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
-	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
-	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
-	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
-	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
-	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
-	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
-	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
-	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
-	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
-	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
-	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
-	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
-	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
-	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
-	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
-
-#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-#rcon:
-	&data_word(0x00000001, 0x00000002, 0x00000004, 0x00000008);
-	&data_word(0x00000010, 0x00000020, 0x00000040, 0x00000080);
-	&data_word(0x0000001b, 0x00000036, 0x00000000, 0x00000000);
-	&data_word(0x00000000, 0x00000000, 0x00000000, 0x00000000);
-&function_end_B("_x86_AES_encrypt");
-
-# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
-&function_begin("AES_encrypt");
-	&mov	($acc,&wparam(0));		# load inp
-	&mov	($key,&wparam(2));		# load key
-
-	&mov	($s0,"esp");
-	&sub	("esp",36);
-	&and	("esp",-64);			# align to cache-line
-
-	# place stack frame just "above" the key schedule
-	&lea	($s1,&DWP(-64-63,$key));
-	&sub	($s1,"esp");
-	&neg	($s1);
-	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp",$s1);
-	&add	("esp",4);	# 4 is reserved for caller's return address
-	&mov	($_esp,$s0);			# save stack pointer
-
-	&call   (&label("pic_point"));          # make it PIC!
-	&set_label("pic_point");
-	&blindpop($tbl);
-	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if (!$x86only);
-	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
-
-	# pick Te4 copy which can't "overlap" with stack frame or key schedule
-	&lea	($s1,&DWP(768-4,"esp"));
-	&sub	($s1,$tbl);
-	&and	($s1,0x300);
-	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
-
-					if (!$x86only) {
-	&bt	(&DWP(0,$s0),25);	# check for SSE bit
-	&jnc	(&label("x86"));
-
-	&movq	("mm0",&QWP(0,$acc));
-	&movq	("mm4",&QWP(8,$acc));
-	&call	("_sse_AES_encrypt_compact");
-	&mov	("esp",$_esp);			# restore stack pointer
-	&mov	($acc,&wparam(1));		# load out
-	&movq	(&QWP(0,$acc),"mm0");		# write output data
-	&movq	(&QWP(8,$acc),"mm4");
-	&emms	();
-	&function_end_A();
-					}
-	&set_label("x86",16);
-	&mov	($_tbl,$tbl);
-	&mov	($s0,&DWP(0,$acc));		# load input data
-	&mov	($s1,&DWP(4,$acc));
-	&mov	($s2,&DWP(8,$acc));
-	&mov	($s3,&DWP(12,$acc));
-	&call	("_x86_AES_encrypt_compact");
-	&mov	("esp",$_esp);			# restore stack pointer
-	&mov	($acc,&wparam(1));		# load out
-	&mov	(&DWP(0,$acc),$s0);		# write output data
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-&function_end("AES_encrypt");
-
-#--------------------------------------------------------------------#
-
-######################################################################
-# "Compact" block function
-######################################################################
-
-sub deccompact()
-{ my $Fn = mov;
-  while ($#_>5) { pop(@_); $Fn=sub{}; }
-  my ($i,$td,@s)=@_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	# $Fn is used in first compact round and its purpose is to
-	# void restoration of some values from stack, so that after
-	# 4xdeccompact with extra argument $key, $s0 and $s1 values
-	# are left there...
-	if($i==3)   {	&$Fn	($key,$__key);			}
-	else        {	&mov	($out,$s[0]);			}
-			&and	($out,0xFF);
-			&movz	($out,&BP(-128,$td,$out,1));
-
-	if ($i==3)  {	$tmp=$s[1];				}
-			&movz	($tmp,&HB($s[1]));
-			&movz	($tmp,&BP(-128,$td,$tmp,1));
-			&shl	($tmp,8);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
-	else        {	mov	($tmp,$s[2]);			}
-			&shr	($tmp,16);
-			&and	($tmp,0xFF);
-			&movz	($tmp,&BP(-128,$td,$tmp,1));
-			&shl	($tmp,16);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[3]; &$Fn ($s[2],$__s1);		}
-	else        {	&mov	($tmp,$s[3]);			}
-			&shr	($tmp,24);
-			&movz	($tmp,&BP(-128,$td,$tmp,1));
-			&shl	($tmp,24);
-			&xor	($out,$tmp);
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&$Fn	($s[3],$__s0);			}
-}
-
-# must be called with 2,3,0,1 as argument sequence!!!
-sub dectransform()
-{ my @s = ($s0,$s1,$s2,$s3);
-  my $i = shift;
-  my $tmp = $key;
-  my $tp2 = @s[($i+2)%4]; $tp2 = @s[2] if ($i==1);
-  my $tp4 = @s[($i+3)%4]; $tp4 = @s[3] if ($i==1);
-  my $tp8 = $tbl;
-
-	&mov	($acc,$s[$i]);
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp2,&DWP(0,$s[$i],$s[$i]));
-	&sub	($acc,$tmp);
-	&and	($tp2,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	&xor	($acc,$tp2);
-	&mov	($tp2,$acc);
-
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp4,&DWP(0,$tp2,$tp2));
-	&sub	($acc,$tmp);
-	&and	($tp4,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	 &xor	($tp2,$s[$i]);	# tp2^tp1
-	&xor	($acc,$tp4);
-	&mov	($tp4,$acc);
-
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp8,&DWP(0,$tp4,$tp4));
-	&sub	($acc,$tmp);
-	&and	($tp8,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	 &xor	($tp4,$s[$i]);	# tp4^tp1
-	 &rotl	($s[$i],8);	# = ROTATE(tp1,8)
-	&xor	($tp8,$acc);
-
-	&xor	($s[$i],$tp2);
-	&xor	($tp2,$tp8);
-	&rotl	($tp2,24);
-	&xor	($s[$i],$tp4);
-	&xor	($tp4,$tp8);
-	&rotl	($tp4,16);
-	&xor	($s[$i],$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
-	&rotl	($tp8,8);
-	&xor	($s[$i],$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
-	&xor	($s[$i],$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
-	 &mov	($s[0],$__s0)			if($i==2); #prefetch $s0
-	 &mov	($s[1],$__s1)			if($i==3); #prefetch $s1
-	 &mov	($s[2],$__s2)			if($i==1);
-	&xor	($s[$i],$tp8);	# ^= ROTATE(tp8,8)
-
-	&mov	($s[3],$__s3)			if($i==1);
-	&mov	(&DWP(4+4*$i,"esp"),$s[$i])	if($i>=2);
-}
-
-&function_begin_B("_x86_AES_decrypt_compact");
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($__key,$key);			# save key
-
-	&xor	($s0,&DWP(0,$key));		# xor with key
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-
-	&lea	($acc,&DWP(-2,$acc,$acc));
-	&lea	($acc,&DWP(0,$key,$acc,8));
-	&mov	($__end,$acc);			# end of key schedule
-
-	# prefetch Td4
-	&mov	($key,&DWP(0-128,$tbl));
-	&mov	($acc,&DWP(32-128,$tbl));
-	&mov	($key,&DWP(64-128,$tbl));
-	&mov	($acc,&DWP(96-128,$tbl));
-	&mov	($key,&DWP(128-128,$tbl));
-	&mov	($acc,&DWP(160-128,$tbl));
-	&mov	($key,&DWP(192-128,$tbl));
-	&mov	($acc,&DWP(224-128,$tbl));
-
-	&set_label("loop",16);
-
-		&deccompact(0,$tbl,$s0,$s3,$s2,$s1,1);
-		&deccompact(1,$tbl,$s1,$s0,$s3,$s2,1);
-		&deccompact(2,$tbl,$s2,$s1,$s0,$s3,1);
-		&deccompact(3,$tbl,$s3,$s2,$s1,$s0,1);
-		&dectransform(2);
-		&dectransform(3);
-		&dectransform(0);
-		&dectransform(1);
-		&mov 	($key,$__key);
-		&mov	($tbl,$__tbl);
-		&add	($key,16);		# advance rd_key
-		&xor	($s0,&DWP(0,$key));
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-	&cmp	($key,$__end);
-	&mov	($__key,$key);
-	&jb	(&label("loop"));
-
-	&deccompact(0,$tbl,$s0,$s3,$s2,$s1);
-	&deccompact(1,$tbl,$s1,$s0,$s3,$s2);
-	&deccompact(2,$tbl,$s2,$s1,$s0,$s3);
-	&deccompact(3,$tbl,$s3,$s2,$s1,$s0);
-
-	&xor	($s0,&DWP(16,$key));
-	&xor	($s1,&DWP(20,$key));
-	&xor	($s2,&DWP(24,$key));
-	&xor	($s3,&DWP(28,$key));
-
-	&ret	();
-&function_end_B("_x86_AES_decrypt_compact");
-
-######################################################################
-# "Compact" SSE block function.
-######################################################################
-
-sub sse_deccompact()
-{
-	&pshufw	("mm1","mm0",0x0c);		#  7, 6, 1, 0
-	&movd	("eax","mm1");			#  7, 6, 1, 0
-
-	&pshufw	("mm5","mm4",0x09);		# 13,12,11,10
-	&movz	($acc,&LB("eax"));		#  0
-	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
-	&movd	("ebx","mm5");			# 13,12,11,10
-	&movz	("edx",&HB("eax"));		#  1
-	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
-	&shl	("edx",8);			#  1
-
-	&pshufw	("mm2","mm0",0x06);		#  3, 2, 5, 4
-	&movz	($acc,&LB("ebx"));		# 10
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 10
-	&shl	($acc,16);			# 10
-	&or	("ecx",$acc);			# 10
-	&shr	("eax",16);			#  7, 6
-	&movz	($acc,&HB("ebx"));		# 11
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 11
-	&shl	($acc,24);			# 11
-	&or	("edx",$acc);			# 11
-	&shr	("ebx",16);			# 13,12
-
-	&pshufw	("mm6","mm4",0x03);		# 9, 8,15,14
-	&movz	($acc,&HB("eax"));		#  7
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  7
-	&shl	($acc,24);			#  7
-	&or	("ecx",$acc);			#  7
-	&movz	($acc,&HB("ebx"));		# 13
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 13
-	&shl	($acc,8);			# 13
-	&or	("ecx",$acc);			# 13
-	&movd	("mm0","ecx");			# t[0] collected
-
-	&movz	($acc,&LB("eax"));		#  6
-	&movd	("eax","mm2");			#  3, 2, 5, 4
-	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  6
-	&shl	("ecx",16);			#  6
-	&movz	($acc,&LB("ebx"));		# 12
-	&movd	("ebx","mm6");			#  9, 8,15,14
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 12
-	&or	("ecx",$acc);			# 12
-
-	&movz	($acc,&LB("eax"));		#  4
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  4
-	&or	("edx",$acc);			#  4
-	&movz	($acc,&LB("ebx"));		# 14
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 14
-	&shl	($acc,16);			# 14
-	&or	("edx",$acc);			# 14
-	&movd	("mm1","edx");			# t[1] collected
-
-	&movz	($acc,&HB("eax"));		#  5
-	&movz	("edx",&BP(-128,$tbl,$acc,1));	#  5
-	&shl	("edx",8);			#  5
-	&movz	($acc,&HB("ebx"));		# 15
-	&shr	("eax",16);			#  3, 2
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 15
-	&shl	($acc,24);			# 15
-	&or	("edx",$acc);			# 15
-	&shr	("ebx",16);			#  9, 8
-
-	&punpckldq	("mm0","mm1");		# t[0,1] collected
-
-	&movz	($acc,&HB("ebx"));		#  9
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  9
-	&shl	($acc,8);			#  9
-	&or	("ecx",$acc);			#  9
-	&and	("ebx",0xff);			#  8
-	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	#  8
-	&or	("edx","ebx");			#  8
-	&movz	($acc,&LB("eax"));		#  2
-	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  2
-	&shl	($acc,16);			#  2
-	&or	("edx",$acc);			#  2
-	&movd	("mm4","edx");			# t[2] collected
-	&movz	("eax",&HB("eax"));		#  3
-	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  3
-	&shl	("eax",24);			#  3
-	&or	("ecx","eax");			#  3
-	&movd	("mm5","ecx");			# t[3] collected
-
-	&punpckldq	("mm4","mm5");		# t[2,3] collected
-}
-
-					if (!$x86only) {
-&function_begin_B("_sse_AES_decrypt_compact");
-	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
-	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
-
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-	&lea	($acc,&DWP(-2,$acc,$acc));
-	&lea	($acc,&DWP(0,$key,$acc,8));
-	&mov	($__end,$acc);			# end of key schedule
-
-	&mov	($s0,0x1b1b1b1b);		# magic constant
-	&mov	(&DWP(8,"esp"),$s0);
-	&mov	(&DWP(12,"esp"),$s0);
-
-	# prefetch Td4
-	&mov	($s0,&DWP(0-128,$tbl));
-	&mov	($s1,&DWP(32-128,$tbl));
-	&mov	($s2,&DWP(64-128,$tbl));
-	&mov	($s3,&DWP(96-128,$tbl));
-	&mov	($s0,&DWP(128-128,$tbl));
-	&mov	($s1,&DWP(160-128,$tbl));
-	&mov	($s2,&DWP(192-128,$tbl));
-	&mov	($s3,&DWP(224-128,$tbl));
-
-	&set_label("loop",16);
-		&sse_deccompact();
-		&add	($key,16);
-		&cmp	($key,$__end);
-		&ja	(&label("out"));
-
-		# ROTATE(x^y,N) == ROTATE(x,N)^ROTATE(y,N)
-		&movq	("mm3","mm0");		&movq	("mm7","mm4");
-		&movq	("mm2","mm0",1);	&movq	("mm6","mm4",1);
-		&movq	("mm1","mm0");		&movq	("mm5","mm4");
-		&pshufw	("mm0","mm0",0xb1);	&pshufw	("mm4","mm4",0xb1);# = ROTATE(tp0,16)
-		&pslld	("mm2",8);		&pslld	("mm6",8);
-		&psrld	("mm3",8);		&psrld	("mm7",8);
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<8
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>8
-		&pslld	("mm2",16);		&pslld	("mm6",16);
-		&psrld	("mm3",16);		&psrld	("mm7",16);
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<24
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>24
-
-		&movq	("mm3",&QWP(8,"esp"));
-		&pxor	("mm2","mm2");		&pxor	("mm6","mm6");
-		&pcmpgtb("mm2","mm1");		&pcmpgtb("mm6","mm5");
-		&pand	("mm2","mm3");		&pand	("mm6","mm3");
-		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
-		&pxor	("mm1","mm2");		&pxor	("mm5","mm6");	# tp2
-		&movq	("mm3","mm1");		&movq	("mm7","mm5");
-		&movq	("mm2","mm1");		&movq	("mm6","mm5");
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp2
-		&pslld	("mm3",24);		&pslld	("mm7",24);
-		&psrld	("mm2",8);		&psrld	("mm6",8);
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp2<<24
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp2>>8
-
-		&movq	("mm2",&QWP(8,"esp"));
-		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
-		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
-		&pand	("mm3","mm2");		&pand	("mm7","mm2");
-		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
-		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp4
-		&pshufw	("mm3","mm1",0xb1);	&pshufw	("mm7","mm5",0xb1);
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp4
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= ROTATE(tp4,16)	
-
-		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
-		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
-		&pand	("mm3","mm2");		&pand	("mm7","mm2");
-		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
-		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp8
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8
-		&movq	("mm3","mm1");		&movq	("mm7","mm5");
-		&pshufw	("mm2","mm1",0xb1);	&pshufw	("mm6","mm5",0xb1);
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(tp8,16)
-		&pslld	("mm1",8);		&pslld	("mm5",8);
-		&psrld	("mm3",8);		&psrld	("mm7",8);
-		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<8
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>8
-		&mov	($s0,&DWP(0-128,$tbl));
-		&pslld	("mm1",16);		&pslld	("mm5",16);
-		&mov	($s1,&DWP(64-128,$tbl));
-		&psrld	("mm3",16);		&psrld	("mm7",16);
-		&mov	($s2,&DWP(128-128,$tbl));
-		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<24
-		&mov	($s3,&DWP(192-128,$tbl));
-		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>24
-
-		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
-	&jmp	(&label("loop"));
-
-	&set_label("out",16);
-	&pxor	("mm0",&QWP(0,$key));
-	&pxor	("mm4",&QWP(8,$key));
-
-	&ret	();
-&function_end_B("_sse_AES_decrypt_compact");
-					}
-
-######################################################################
-# Vanilla block function.
-######################################################################
-
-sub decstep()
-{ my ($i,$td,@s) = @_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	# no instructions are reordered, as performance appears
-	# optimal... or rather that all attempts to reorder didn't
-	# result in better performance [which by the way is not a
-	# bit lower than ecryption].
-	if($i==3)   {	&mov	($key,$__key);			}
-	else        {	&mov	($out,$s[0]);			}
-			&and	($out,0xFF);
-			&mov	($out,&DWP(0,$td,$out,8));
-
-	if ($i==3)  {	$tmp=$s[1];				}
-			&movz	($tmp,&HB($s[1]));
-			&xor	($out,&DWP(3,$td,$tmp,8));
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
-	else        {	&mov	($tmp,$s[2]);			}
-			&shr	($tmp,16);
-			&and	($tmp,0xFF);
-			&xor	($out,&DWP(2,$td,$tmp,8));
-
-	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
-	else        {	&mov	($tmp,$s[3]);			}
-			&shr	($tmp,24);
-			&xor	($out,&DWP(1,$td,$tmp,8));
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&mov	($s[3],$__s0);			}
-			&comment();
-}
-
-sub declast()
-{ my ($i,$td,@s)=@_;
-  my $tmp = $key;
-  my $out = $i==3?$s[0]:$acc;
-
-	if($i==0)   {	&lea	($td,&DWP(2048+128,$td));
-			&mov	($tmp,&DWP(0-128,$td));
-			&mov	($acc,&DWP(32-128,$td));
-			&mov	($tmp,&DWP(64-128,$td));
-			&mov	($acc,&DWP(96-128,$td));
-			&mov	($tmp,&DWP(128-128,$td));
-			&mov	($acc,&DWP(160-128,$td));
-			&mov	($tmp,&DWP(192-128,$td));
-			&mov	($acc,&DWP(224-128,$td));
-			&lea	($td,&DWP(-128,$td));		}
-	if($i==3)   {	&mov	($key,$__key);			}
-	else        {	&mov	($out,$s[0]);			}
-			&and	($out,0xFF);
-			&movz	($out,&BP(0,$td,$out,1));
-
-	if ($i==3)  {	$tmp=$s[1];				}
-			&movz	($tmp,&HB($s[1]));
-			&movz	($tmp,&BP(0,$td,$tmp,1));
-			&shl	($tmp,8);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
-	else        {	mov	($tmp,$s[2]);			}
-			&shr	($tmp,16);
-			&and	($tmp,0xFF);
-			&movz	($tmp,&BP(0,$td,$tmp,1));
-			&shl	($tmp,16);
-			&xor	($out,$tmp);
-
-	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
-	else        {	&mov	($tmp,$s[3]);			}
-			&shr	($tmp,24);
-			&movz	($tmp,&BP(0,$td,$tmp,1));
-			&shl	($tmp,24);
-			&xor	($out,$tmp);
-	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
-	if ($i==3)  {	&mov	($s[3],$__s0);
-			&lea	($td,&DWP(-2048,$td));		}
-}
-
-&function_begin_B("_x86_AES_decrypt");
-	# note that caller is expected to allocate stack frame for me!
-	&mov	($__key,$key);			# save key
-
-	&xor	($s0,&DWP(0,$key));		# xor with key
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&mov	($acc,&DWP(240,$key));		# load key->rounds
-
-	if ($small_footprint) {
-	    &lea	($acc,&DWP(-2,$acc,$acc));
-	    &lea	($acc,&DWP(0,$key,$acc,8));
-	    &mov	($__end,$acc);		# end of key schedule
-	    &set_label("loop",16);
-		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
-		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
-		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
-		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
-		&add	($key,16);		# advance rd_key
-		&xor	($s0,&DWP(0,$key));
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-	    &cmp	($key,$__end);
-	    &mov	($__key,$key);
-	    &jb		(&label("loop"));
-	}
-	else {
-	    &cmp	($acc,10);
-	    &jle	(&label("10rounds"));
-	    &cmp	($acc,12);
-	    &jle	(&label("12rounds"));
-
-	&set_label("14rounds",4);
-	    for ($i=1;$i<3;$i++) {
-		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
-		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
-		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
-		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	    &add	($key,32);
-	    &mov	($__key,$key);		# advance rd_key
-	&set_label("12rounds",4);
-	    for ($i=1;$i<3;$i++) {
-		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
-		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
-		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
-		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	    &add	($key,32);
-	    &mov	($__key,$key);		# advance rd_key
-	&set_label("10rounds",4);
-	    for ($i=1;$i<10;$i++) {
-		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
-		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
-		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
-		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
-		&xor	($s0,&DWP(16*$i+0,$key));
-		&xor	($s1,&DWP(16*$i+4,$key));
-		&xor	($s2,&DWP(16*$i+8,$key));
-		&xor	($s3,&DWP(16*$i+12,$key));
-	    }
-	}
-
-	&declast(0,$tbl,$s0,$s3,$s2,$s1);
-	&declast(1,$tbl,$s1,$s0,$s3,$s2);
-	&declast(2,$tbl,$s2,$s1,$s0,$s3);
-	&declast(3,$tbl,$s3,$s2,$s1,$s0);
-
-	&add	($key,$small_footprint?16:160);
-	&xor	($s0,&DWP(0,$key));
-	&xor	($s1,&DWP(4,$key));
-	&xor	($s2,&DWP(8,$key));
-	&xor	($s3,&DWP(12,$key));
-
-	&ret	();
-
-&set_label("AES_Td",64);	# Yes! I keep it in the code segment!
-	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
-	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
-	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
-	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
-	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
-	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
-	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
-	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
-	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
-	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
-	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
-	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
-	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
-	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
-	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
-	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
-	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
-	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
-	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
-	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
-	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
-	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
-	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
-	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
-	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
-	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
-	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
-	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
-	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
-	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
-	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
-	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
-	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
-	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
-	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
-	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
-	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
-	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
-	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
-	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
-	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
-	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
-	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
-	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
-	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
-	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
-	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
-	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
-	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
-	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
-	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
-	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
-	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
-	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
-	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
-	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
-	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
-	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
-	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
-	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
-	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
-	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
-	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
-	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-
-#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-&function_end_B("_x86_AES_decrypt");
-
-# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
-&function_begin("AES_decrypt");
-	&mov	($acc,&wparam(0));		# load inp
-	&mov	($key,&wparam(2));		# load key
-
-	&mov	($s0,"esp");
-	&sub	("esp",36);
-	&and	("esp",-64);			# align to cache-line
-
-	# place stack frame just "above" the key schedule
-	&lea	($s1,&DWP(-64-63,$key));
-	&sub	($s1,"esp");
-	&neg	($s1);
-	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp",$s1);
-	&add	("esp",4);	# 4 is reserved for caller's return address
-	&mov	($_esp,$s0);	# save stack pointer
-
-	&call   (&label("pic_point"));          # make it PIC!
-	&set_label("pic_point");
-	&blindpop($tbl);
-	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
-	&lea    ($tbl,&DWP(&label("AES_Td")."-".&label("pic_point"),$tbl));
-
-	# pick Td4 copy which can't "overlap" with stack frame or key schedule
-	&lea	($s1,&DWP(768-4,"esp"));
-	&sub	($s1,$tbl);
-	&and	($s1,0x300);
-	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
-
-					if (!$x86only) {
-	&bt	(&DWP(0,$s0),25);	# check for SSE bit
-	&jnc	(&label("x86"));
-
-	&movq	("mm0",&QWP(0,$acc));
-	&movq	("mm4",&QWP(8,$acc));
-	&call	("_sse_AES_decrypt_compact");
-	&mov	("esp",$_esp);			# restore stack pointer
-	&mov	($acc,&wparam(1));		# load out
-	&movq	(&QWP(0,$acc),"mm0");		# write output data
-	&movq	(&QWP(8,$acc),"mm4");
-	&emms	();
-	&function_end_A();
-					}
-	&set_label("x86",16);
-	&mov	($_tbl,$tbl);
-	&mov	($s0,&DWP(0,$acc));		# load input data
-	&mov	($s1,&DWP(4,$acc));
-	&mov	($s2,&DWP(8,$acc));
-	&mov	($s3,&DWP(12,$acc));
-	&call	("_x86_AES_decrypt_compact");
-	&mov	("esp",$_esp);			# restore stack pointer
-	&mov	($acc,&wparam(1));		# load out
-	&mov	(&DWP(0,$acc),$s0);		# write output data
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-&function_end("AES_decrypt");
-
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-#			size_t length, const AES_KEY *key,
-#			unsigned char *ivp,const int enc);
-{
-# stack frame layout
-#             -4(%esp)		# return address	 0(%esp)
-#              0(%esp)		# s0 backing store	 4(%esp)	
-#              4(%esp)		# s1 backing store	 8(%esp)
-#              8(%esp)		# s2 backing store	12(%esp)
-#             12(%esp)		# s3 backing store	16(%esp)
-#             16(%esp)		# key backup		20(%esp)
-#             20(%esp)		# end of key schedule	24(%esp)
-#             24(%esp)		# %ebp backup		28(%esp)
-#             28(%esp)		# %esp backup
-my $_inp=&DWP(32,"esp");	# copy of wparam(0)
-my $_out=&DWP(36,"esp");	# copy of wparam(1)
-my $_len=&DWP(40,"esp");	# copy of wparam(2)
-my $_key=&DWP(44,"esp");	# copy of wparam(3)
-my $_ivp=&DWP(48,"esp");	# copy of wparam(4)
-my $_tmp=&DWP(52,"esp");	# volatile variable
-#
-my $ivec=&DWP(60,"esp");	# ivec[16]
-my $aes_key=&DWP(76,"esp");	# copy of aes_key
-my $mark=&DWP(76+240,"esp");	# copy of aes_key->rounds
-
-&function_begin("AES_cbc_encrypt");
-	&mov	($s2 eq "ecx"? $s2 : "",&wparam(2));	# load len
-	&cmp	($s2,0);
-	&je	(&label("drop_out"));
-
-	&call   (&label("pic_point"));		# make it PIC!
-	&set_label("pic_point");
-	&blindpop($tbl);
-	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
-
-	&cmp	(&wparam(5),0);
-	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
-	&jne	(&label("picked_te"));
-	&lea	($tbl,&DWP(&label("AES_Td")."-".&label("AES_Te"),$tbl));
-	&set_label("picked_te");
-
-	# one can argue if this is required
-	&pushf	();
-	&cld	();
-
-	&cmp	($s2,$speed_limit);
-	&jb	(&label("slow_way"));
-	&test	($s2,15);
-	&jnz	(&label("slow_way"));
-					if (!$x86only) {
-	&bt	(&DWP(0,$s0),28);	# check for hyper-threading bit
-	&jc	(&label("slow_way"));
-					}
-	# pre-allocate aligned stack frame...
-	&lea	($acc,&DWP(-80-244,"esp"));
-	&and	($acc,-64);
-
-	# ... and make sure it doesn't alias with $tbl modulo 4096
-	&mov	($s0,$tbl);
-	&lea	($s1,&DWP(2048+256,$tbl));
-	&mov	($s3,$acc);
-	&and	($s0,0xfff);		# s = %ebp&0xfff
-	&and	($s1,0xfff);		# e = (%ebp+2048+256)&0xfff
-	&and	($s3,0xfff);		# p = %esp&0xfff
-
-	&cmp	($s3,$s1);		# if (p>=e) %esp =- (p-e);
-	&jb	(&label("tbl_break_out"));
-	&sub	($s3,$s1);
-	&sub	($acc,$s3);
-	&jmp	(&label("tbl_ok"));
-	&set_label("tbl_break_out",4);	# else %esp -= (p-s)&0xfff + framesz;
-	&sub	($s3,$s0);
-	&and	($s3,0xfff);
-	&add	($s3,384);
-	&sub	($acc,$s3);
-	&set_label("tbl_ok",4);
-
-	&lea	($s3,&wparam(0));	# obtain pointer to parameter block
-	&exch	("esp",$acc);		# allocate stack frame
-	&add	("esp",4);		# reserve for return address!
-	&mov	($_tbl,$tbl);		# save %ebp
-	&mov	($_esp,$acc);		# save %esp
-
-	&mov	($s0,&DWP(0,$s3));	# load inp
-	&mov	($s1,&DWP(4,$s3));	# load out
-	#&mov	($s2,&DWP(8,$s3));	# load len
-	&mov	($key,&DWP(12,$s3));	# load key
-	&mov	($acc,&DWP(16,$s3));	# load ivp
-	&mov	($s3,&DWP(20,$s3));	# load enc flag
-
-	&mov	($_inp,$s0);		# save copy of inp
-	&mov	($_out,$s1);		# save copy of out
-	&mov	($_len,$s2);		# save copy of len
-	&mov	($_key,$key);		# save copy of key
-	&mov	($_ivp,$acc);		# save copy of ivp
-
-	&mov	($mark,0);		# copy of aes_key->rounds = 0;
-	# do we copy key schedule to stack?
-	&mov	($s1 eq "ebx" ? $s1 : "",$key);
-	&mov	($s2 eq "ecx" ? $s2 : "",244/4);
-	&sub	($s1,$tbl);
-	&mov	("esi",$key);
-	&and	($s1,0xfff);
-	&lea	("edi",$aes_key);
-	&cmp	($s1,2048+256);
-	&jb	(&label("do_copy"));
-	&cmp	($s1,4096-244);
-	&jb	(&label("skip_copy"));
-	&set_label("do_copy",4);
-		&mov	($_key,"edi");
-		&data_word(0xA5F3F689);	# rep movsd
-	&set_label("skip_copy");
-
-	&mov	($key,16);
-	&set_label("prefetch_tbl",4);
-		&mov	($s0,&DWP(0,$tbl));
-		&mov	($s1,&DWP(32,$tbl));
-		&mov	($s2,&DWP(64,$tbl));
-		&mov	($acc,&DWP(96,$tbl));
-		&lea	($tbl,&DWP(128,$tbl));
-		&sub	($key,1);
-	&jnz	(&label("prefetch_tbl"));
-	&sub	($tbl,2048);
-
-	&mov	($acc,$_inp);
-	&mov	($key,$_ivp);
-
-	&cmp	($s3,0);
-	&je	(&label("fast_decrypt"));
-
-#----------------------------- ENCRYPT -----------------------------#
-	&mov	($s0,&DWP(0,$key));		# load iv
-	&mov	($s1,&DWP(4,$key));
-
-	&set_label("fast_enc_loop",16);
-		&mov	($s2,&DWP(8,$key));
-		&mov	($s3,&DWP(12,$key));
-
-		&xor	($s0,&DWP(0,$acc));	# xor input data
-		&xor	($s1,&DWP(4,$acc));
-		&xor	($s2,&DWP(8,$acc));
-		&xor	($s3,&DWP(12,$acc));
-
-		&mov	($key,$_key);		# load key
-		&call	("_x86_AES_encrypt");
-
-		&mov	($acc,$_inp);		# load inp
-		&mov	($key,$_out);		# load out
-
-		&mov	(&DWP(0,$key),$s0);	# save output data
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($s2,$_len);		# load len
-		&mov	($_inp,$acc);		# save inp
-		&lea	($s3,&DWP(16,$key));	# advance out
-		&mov	($_out,$s3);		# save out
-		&sub	($s2,16);		# decrease len
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("fast_enc_loop"));
-	&mov	($acc,$_ivp);		# load ivp
-	&mov	($s2,&DWP(8,$key));	# restore last 2 dwords
-	&mov	($s3,&DWP(12,$key));
-	&mov	(&DWP(0,$acc),$s0);	# save ivec
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-
-	&cmp	($mark,0);		# was the key schedule copied?
-	&mov	("edi",$_key);
-	&je	(&label("skip_ezero"));
-	# zero copy of key schedule
-	&mov	("ecx",240/4);
-	&xor	("eax","eax");
-	&align	(4);
-	&data_word(0xABF3F689);	# rep stosd
-	&set_label("skip_ezero")
-	&mov	("esp",$_esp);
-	&popf	();
-    &set_label("drop_out");
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-#----------------------------- DECRYPT -----------------------------#
-&set_label("fast_decrypt",16);
-
-	&cmp	($acc,$_out);
-	&je	(&label("fast_dec_in_place"));	# in-place processing...
-
-	&mov	($_tmp,$key);
-
-	&align	(4);
-	&set_label("fast_dec_loop",16);
-		&mov	($s0,&DWP(0,$acc));	# read input
-		&mov	($s1,&DWP(4,$acc));
-		&mov	($s2,&DWP(8,$acc));
-		&mov	($s3,&DWP(12,$acc));
-
-		&mov	($key,$_key);		# load key
-		&call	("_x86_AES_decrypt");
-
-		&mov	($key,$_tmp);		# load ivp
-		&mov	($acc,$_len);		# load len
-		&xor	($s0,&DWP(0,$key));	# xor iv
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-		&mov	($key,$_out);		# load out
-		&mov	($acc,$_inp);		# load inp
-
-		&mov	(&DWP(0,$key),$s0);	# write output
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($s2,$_len);		# load len
-		&mov	($_tmp,$acc);		# save ivp
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-		&lea	($key,&DWP(16,$key));	# advance out
-		&mov	($_out,$key);		# save out
-		&sub	($s2,16);		# decrease len
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("fast_dec_loop"));
-	&mov	($key,$_tmp);		# load temp ivp
-	&mov	($acc,$_ivp);		# load user ivp
-	&mov	($s0,&DWP(0,$key));	# load iv
-	&mov	($s1,&DWP(4,$key));
-	&mov	($s2,&DWP(8,$key));
-	&mov	($s3,&DWP(12,$key));
-	&mov	(&DWP(0,$acc),$s0);	# copy back to user
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-	&jmp	(&label("fast_dec_out"));
-
-    &set_label("fast_dec_in_place",16);
-	&set_label("fast_dec_in_place_loop");
-		&mov	($s0,&DWP(0,$acc));	# read input
-		&mov	($s1,&DWP(4,$acc));
-		&mov	($s2,&DWP(8,$acc));
-		&mov	($s3,&DWP(12,$acc));
-
-		&lea	($key,$ivec);
-		&mov	(&DWP(0,$key),$s0);	# copy to temp
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($key,$_key);		# load key
-		&call	("_x86_AES_decrypt");
-
-		&mov	($key,$_ivp);		# load ivp
-		&mov	($acc,$_out);		# load out
-		&xor	($s0,&DWP(0,$key));	# xor iv
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-		&mov	(&DWP(0,$acc),$s0);	# write output
-		&mov	(&DWP(4,$acc),$s1);
-		&mov	(&DWP(8,$acc),$s2);
-		&mov	(&DWP(12,$acc),$s3);
-
-		&lea	($acc,&DWP(16,$acc));	# advance out
-		&mov	($_out,$acc);		# save out
-
-		&lea	($acc,$ivec);
-		&mov	($s0,&DWP(0,$acc));	# read temp
-		&mov	($s1,&DWP(4,$acc));
-		&mov	($s2,&DWP(8,$acc));
-		&mov	($s3,&DWP(12,$acc));
-
-		&mov	(&DWP(0,$key),$s0);	# copy iv
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($acc,$_inp);		# load inp
-		&mov	($s2,$_len);		# load len
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-		&sub	($s2,16);		# decrease len
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("fast_dec_in_place_loop"));
-
-    &set_label("fast_dec_out",4);
-	&cmp	($mark,0);		# was the key schedule copied?
-	&mov	("edi",$_key);
-	&je	(&label("skip_dzero"));
-	# zero copy of key schedule
-	&mov	("ecx",240/4);
-	&xor	("eax","eax");
-	&align	(4);
-	&data_word(0xABF3F689);	# rep stosd
-	&set_label("skip_dzero")
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-#--------------------------- SLOW ROUTINE ---------------------------#
-&set_label("slow_way",16);
-
-	&mov	($s0,&DWP(0,$s0)) if (!$x86only);# load OPENSSL_ia32cap
-	&mov	($key,&wparam(3));	# load key
-
-	# pre-allocate aligned stack frame...
-	&lea	($acc,&DWP(-80,"esp"));
-	&and	($acc,-64);
-
-	# ... and make sure it doesn't alias with $key modulo 1024
-	&lea	($s1,&DWP(-80-63,$key));
-	&sub	($s1,$acc);
-	&neg	($s1);
-	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	($acc,$s1);
-
-	# pick S-box copy which can't overlap with stack frame or $key
-	&lea	($s1,&DWP(768,$acc));
-	&sub	($s1,$tbl);
-	&and	($s1,0x300);
-	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
-
-	&lea	($s3,&wparam(0));	# pointer to parameter block
-
-	&exch	("esp",$acc);
-	&add	("esp",4);		# reserve for return address!
-	&mov	($_tbl,$tbl);		# save %ebp
-	&mov	($_esp,$acc);		# save %esp
-	&mov	($_tmp,$s0);		# save OPENSSL_ia32cap
-
-	&mov	($s0,&DWP(0,$s3));	# load inp
-	&mov	($s1,&DWP(4,$s3));	# load out
-	#&mov	($s2,&DWP(8,$s3));	# load len
-	#&mov	($key,&DWP(12,$s3));	# load key
-	&mov	($acc,&DWP(16,$s3));	# load ivp
-	&mov	($s3,&DWP(20,$s3));	# load enc flag
-
-	&mov	($_inp,$s0);		# save copy of inp
-	&mov	($_out,$s1);		# save copy of out
-	&mov	($_len,$s2);		# save copy of len
-	&mov	($_key,$key);		# save copy of key
-	&mov	($_ivp,$acc);		# save copy of ivp
-
-	&mov	($key,$acc);
-	&mov	($acc,$s0);
-
-	&cmp	($s3,0);
-	&je	(&label("slow_decrypt"));
-
-#--------------------------- SLOW ENCRYPT ---------------------------#
-	&cmp	($s2,16);
-	&mov	($s3,$s1);
-	&jb	(&label("slow_enc_tail"));
-
-					if (!$x86only) {
-	&bt	($_tmp,25);		# check for SSE bit
-	&jnc	(&label("slow_enc_x86"));
-
-	&movq	("mm0",&QWP(0,$key));	# load iv
-	&movq	("mm4",&QWP(8,$key));
-
-	&set_label("slow_enc_loop_sse",16);
-		&pxor	("mm0",&QWP(0,$acc));	# xor input data
-		&pxor	("mm4",&QWP(8,$acc));
-
-		&mov	($key,$_key);
-		&call	("_sse_AES_encrypt_compact");
-
-		&mov	($acc,$_inp);		# load inp
-		&mov	($key,$_out);		# load out
-		&mov	($s2,$_len);		# load len
-
-		&movq	(&QWP(0,$key),"mm0");	# save output data
-		&movq	(&QWP(8,$key),"mm4");
-
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-		&lea	($s3,&DWP(16,$key));	# advance out
-		&mov	($_out,$s3);		# save out
-		&sub	($s2,16);		# decrease len
-		&cmp	($s2,16);
-		&mov	($_len,$s2);		# save len
-	&jae	(&label("slow_enc_loop_sse"));
-	&test	($s2,15);
-	&jnz	(&label("slow_enc_tail"));
-	&mov	($acc,$_ivp);		# load ivp
-	&movq	(&QWP(0,$acc),"mm0");	# save ivec
-	&movq	(&QWP(8,$acc),"mm4");
-	&emms	();
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-					}
-    &set_label("slow_enc_x86",16);
-	&mov	($s0,&DWP(0,$key));	# load iv
-	&mov	($s1,&DWP(4,$key));
-
-	&set_label("slow_enc_loop_x86",4);
-		&mov	($s2,&DWP(8,$key));
-		&mov	($s3,&DWP(12,$key));
-
-		&xor	($s0,&DWP(0,$acc));	# xor input data
-		&xor	($s1,&DWP(4,$acc));
-		&xor	($s2,&DWP(8,$acc));
-		&xor	($s3,&DWP(12,$acc));
-
-		&mov	($key,$_key);		# load key
-		&call	("_x86_AES_encrypt_compact");
-
-		&mov	($acc,$_inp);		# load inp
-		&mov	($key,$_out);		# load out
-
-		&mov	(&DWP(0,$key),$s0);	# save output data
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($s2,$_len);		# load len
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-		&lea	($s3,&DWP(16,$key));	# advance out
-		&mov	($_out,$s3);		# save out
-		&sub	($s2,16);		# decrease len
-		&cmp	($s2,16);
-		&mov	($_len,$s2);		# save len
-	&jae	(&label("slow_enc_loop_x86"));
-	&test	($s2,15);
-	&jnz	(&label("slow_enc_tail"));
-	&mov	($acc,$_ivp);		# load ivp
-	&mov	($s2,&DWP(8,$key));	# restore last dwords
-	&mov	($s3,&DWP(12,$key));
-	&mov	(&DWP(0,$acc),$s0);	# save ivec
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-    &set_label("slow_enc_tail",16);
-	&emms	()	if (!$x86only);
-	&mov	($key eq "edi"? $key:"",$s3);	# load out to edi
-	&mov	($s1,16);
-	&sub	($s1,$s2);
-	&cmp	($key,$acc eq "esi"? $acc:"");	# compare with inp
-	&je	(&label("enc_in_place"));
-	&align	(4);
-	&data_word(0xA4F3F689);	# rep movsb	# copy input
-	&jmp	(&label("enc_skip_in_place"));
-    &set_label("enc_in_place");
-	&lea	($key,&DWP(0,$key,$s2));
-    &set_label("enc_skip_in_place");
-	&mov	($s2,$s1);
-	&xor	($s0,$s0);
-	&align	(4);
-	&data_word(0xAAF3F689);	# rep stosb	# zero tail
-
-	&mov	($key,$_ivp);			# restore ivp
-	&mov	($acc,$s3);			# output as input
-	&mov	($s0,&DWP(0,$key));
-	&mov	($s1,&DWP(4,$key));
-	&mov	($_len,16);			# len=16
-	&jmp	(&label("slow_enc_loop_x86"));	# one more spin...
-
-#--------------------------- SLOW DECRYPT ---------------------------#
-&set_label("slow_decrypt",16);
-					if (!$x86only) {
-	&bt	($_tmp,25);		# check for SSE bit
-	&jnc	(&label("slow_dec_loop_x86"));
-
-	&set_label("slow_dec_loop_sse",4);
-		&movq	("mm0",&QWP(0,$acc));	# read input
-		&movq	("mm4",&QWP(8,$acc));
-
-		&mov	($key,$_key);
-		&call	("_sse_AES_decrypt_compact");
-
-		&mov	($acc,$_inp);		# load inp
-		&lea	($s0,$ivec);
-		&mov	($s1,$_out);		# load out
-		&mov	($s2,$_len);		# load len
-		&mov	($key,$_ivp);		# load ivp
-
-		&movq	("mm1",&QWP(0,$acc));	# re-read input
-		&movq	("mm5",&QWP(8,$acc));
-
-		&pxor	("mm0",&QWP(0,$key));	# xor iv
-		&pxor	("mm4",&QWP(8,$key));
-
-		&movq	(&QWP(0,$key),"mm1");	# copy input to iv
-		&movq	(&QWP(8,$key),"mm5");
-
-		&sub	($s2,16);		# decrease len
-		&jc	(&label("slow_dec_partial_sse"));
-
-		&movq	(&QWP(0,$s1),"mm0");	# write output
-		&movq	(&QWP(8,$s1),"mm4");
-
-		&lea	($s1,&DWP(16,$s1));	# advance out
-		&mov	($_out,$s1);		# save out
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("slow_dec_loop_sse"));
-	&emms	();
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-    &set_label("slow_dec_partial_sse",16);
-	&movq	(&QWP(0,$s0),"mm0");	# save output to temp
-	&movq	(&QWP(8,$s0),"mm4");
-	&emms	();
-
-	&add	($s2 eq "ecx" ? "ecx":"",16);
-	&mov	("edi",$s1);		# out
-	&mov	("esi",$s0);		# temp
-	&align	(4);
-	&data_word(0xA4F3F689);		# rep movsb # copy partial output
-
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-					}
-	&set_label("slow_dec_loop_x86",16);
-		&mov	($s0,&DWP(0,$acc));	# read input
-		&mov	($s1,&DWP(4,$acc));
-		&mov	($s2,&DWP(8,$acc));
-		&mov	($s3,&DWP(12,$acc));
-
-		&lea	($key,$ivec);
-		&mov	(&DWP(0,$key),$s0);	# copy to temp
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($key,$_key);		# load key
-		&call	("_x86_AES_decrypt_compact");
-
-		&mov	($key,$_ivp);		# load ivp
-		&mov	($acc,$_len);		# load len
-		&xor	($s0,&DWP(0,$key));	# xor iv
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-		&sub	($acc,16);
-		&jc	(&label("slow_dec_partial_x86"));
-
-		&mov	($_len,$acc);		# save len
-		&mov	($acc,$_out);		# load out
-
-		&mov	(&DWP(0,$acc),$s0);	# write output
-		&mov	(&DWP(4,$acc),$s1);
-		&mov	(&DWP(8,$acc),$s2);
-		&mov	(&DWP(12,$acc),$s3);
-
-		&lea	($acc,&DWP(16,$acc));	# advance out
-		&mov	($_out,$acc);		# save out
-
-		&lea	($acc,$ivec);
-		&mov	($s0,&DWP(0,$acc));	# read temp
-		&mov	($s1,&DWP(4,$acc));
-		&mov	($s2,&DWP(8,$acc));
-		&mov	($s3,&DWP(12,$acc));
-
-		&mov	(&DWP(0,$key),$s0);	# copy it to iv
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($acc,$_inp);		# load inp
-		&lea	($acc,&DWP(16,$acc));	# advance inp
-		&mov	($_inp,$acc);		# save inp
-	&jnz	(&label("slow_dec_loop_x86"));
-	&mov	("esp",$_esp);
-	&popf	();
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-    &set_label("slow_dec_partial_x86",16);
-	&lea	($acc,$ivec);
-	&mov	(&DWP(0,$acc),$s0);	# save output to temp
-	&mov	(&DWP(4,$acc),$s1);
-	&mov	(&DWP(8,$acc),$s2);
-	&mov	(&DWP(12,$acc),$s3);
-
-	&mov	($acc,$_inp);
-	&mov	($s0,&DWP(0,$acc));	# re-read input
-	&mov	($s1,&DWP(4,$acc));
-	&mov	($s2,&DWP(8,$acc));
-	&mov	($s3,&DWP(12,$acc));
-
-	&mov	(&DWP(0,$key),$s0);	# copy it to iv
-	&mov	(&DWP(4,$key),$s1);
-	&mov	(&DWP(8,$key),$s2);
-	&mov	(&DWP(12,$key),$s3);
-
-	&mov	("ecx",$_len);
-	&mov	("edi",$_out);
-	&lea	("esi",$ivec);
-	&align	(4);
-	&data_word(0xA4F3F689);		# rep movsb # copy partial output
-
-	&mov	("esp",$_esp);
-	&popf	();
-&function_end("AES_cbc_encrypt");
-}
-
-#------------------------------------------------------------------#
-
-sub enckey()
-{
-	&movz	("esi",&LB("edx"));		# rk[i]>>0
-	&movz	("ebx",&BP(-128,$tbl,"esi",1));
-	&movz	("esi",&HB("edx"));		# rk[i]>>8
-	&shl	("ebx",24);
-	&xor	("eax","ebx");
-
-	&movz	("ebx",&BP(-128,$tbl,"esi",1));
-	&shr	("edx",16);
-	&movz	("esi",&LB("edx"));		# rk[i]>>16
-	&xor	("eax","ebx");
-
-	&movz	("ebx",&BP(-128,$tbl,"esi",1));
-	&movz	("esi",&HB("edx"));		# rk[i]>>24
-	&shl	("ebx",8);
-	&xor	("eax","ebx");
-
-	&movz	("ebx",&BP(-128,$tbl,"esi",1));
-	&shl	("ebx",16);
-	&xor	("eax","ebx");
-
-	&xor	("eax",&DWP(1024-128,$tbl,"ecx",4));	# rcon
-}
-
-&function_begin("_x86_AES_set_encrypt_key");
-	&mov	("esi",&wparam(1));		# user supplied key
-	&mov	("edi",&wparam(3));		# private key schedule
-
-	&test	("esi",-1);
-	&jz	(&label("badpointer"));
-	&test	("edi",-1);
-	&jz	(&label("badpointer"));
-
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($tbl);
-	&lea	($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
-	&lea	($tbl,&DWP(2048+128,$tbl));
-
-	# prefetch Te4
-	&mov	("eax",&DWP(0-128,$tbl));
-	&mov	("ebx",&DWP(32-128,$tbl));
-	&mov	("ecx",&DWP(64-128,$tbl));
-	&mov	("edx",&DWP(96-128,$tbl));
-	&mov	("eax",&DWP(128-128,$tbl));
-	&mov	("ebx",&DWP(160-128,$tbl));
-	&mov	("ecx",&DWP(192-128,$tbl));
-	&mov	("edx",&DWP(224-128,$tbl));
-
-	&mov	("ecx",&wparam(2));		# number of bits in key
-	&cmp	("ecx",128);
-	&je	(&label("10rounds"));
-	&cmp	("ecx",192);
-	&je	(&label("12rounds"));
-	&cmp	("ecx",256);
-	&je	(&label("14rounds"));
-	&mov	("eax",-2);			# invalid number of bits
-	&jmp	(&label("exit"));
-
-    &set_label("10rounds");
-	&mov	("eax",&DWP(0,"esi"));		# copy first 4 dwords
-	&mov	("ebx",&DWP(4,"esi"));
-	&mov	("ecx",&DWP(8,"esi"));
-	&mov	("edx",&DWP(12,"esi"));
-	&mov	(&DWP(0,"edi"),"eax");
-	&mov	(&DWP(4,"edi"),"ebx");
-	&mov	(&DWP(8,"edi"),"ecx");
-	&mov	(&DWP(12,"edi"),"edx");
-
-	&xor	("ecx","ecx");
-	&jmp	(&label("10shortcut"));
-
-	&align	(4);
-	&set_label("10loop");
-		&mov	("eax",&DWP(0,"edi"));		# rk[0]
-		&mov	("edx",&DWP(12,"edi"));		# rk[3]
-	&set_label("10shortcut");
-		&enckey	();
-
-		&mov	(&DWP(16,"edi"),"eax");		# rk[4]
-		&xor	("eax",&DWP(4,"edi"));
-		&mov	(&DWP(20,"edi"),"eax");		# rk[5]
-		&xor	("eax",&DWP(8,"edi"));
-		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
-		&xor	("eax",&DWP(12,"edi"));
-		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
-		&inc	("ecx");
-		&add	("edi",16);
-		&cmp	("ecx",10);
-	&jl	(&label("10loop"));
-
-	&mov	(&DWP(80,"edi"),10);		# setup number of rounds
-	&xor	("eax","eax");
-	&jmp	(&label("exit"));
-		
-    &set_label("12rounds");
-	&mov	("eax",&DWP(0,"esi"));		# copy first 6 dwords
-	&mov	("ebx",&DWP(4,"esi"));
-	&mov	("ecx",&DWP(8,"esi"));
-	&mov	("edx",&DWP(12,"esi"));
-	&mov	(&DWP(0,"edi"),"eax");
-	&mov	(&DWP(4,"edi"),"ebx");
-	&mov	(&DWP(8,"edi"),"ecx");
-	&mov	(&DWP(12,"edi"),"edx");
-	&mov	("ecx",&DWP(16,"esi"));
-	&mov	("edx",&DWP(20,"esi"));
-	&mov	(&DWP(16,"edi"),"ecx");
-	&mov	(&DWP(20,"edi"),"edx");
-
-	&xor	("ecx","ecx");
-	&jmp	(&label("12shortcut"));
-
-	&align	(4);
-	&set_label("12loop");
-		&mov	("eax",&DWP(0,"edi"));		# rk[0]
-		&mov	("edx",&DWP(20,"edi"));		# rk[5]
-	&set_label("12shortcut");
-		&enckey	();
-
-		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
-		&xor	("eax",&DWP(4,"edi"));
-		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
-		&xor	("eax",&DWP(8,"edi"));
-		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
-		&xor	("eax",&DWP(12,"edi"));
-		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
-
-		&cmp	("ecx",7);
-		&je	(&label("12break"));
-		&inc	("ecx");
-
-		&xor	("eax",&DWP(16,"edi"));
-		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
-		&xor	("eax",&DWP(20,"edi"));
-		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
-
-		&add	("edi",24);
-	&jmp	(&label("12loop"));
-
-	&set_label("12break");
-	&mov	(&DWP(72,"edi"),12);		# setup number of rounds
-	&xor	("eax","eax");
-	&jmp	(&label("exit"));
-
-    &set_label("14rounds");
-	&mov	("eax",&DWP(0,"esi"));		# copy first 8 dwords
-	&mov	("ebx",&DWP(4,"esi"));
-	&mov	("ecx",&DWP(8,"esi"));
-	&mov	("edx",&DWP(12,"esi"));
-	&mov	(&DWP(0,"edi"),"eax");
-	&mov	(&DWP(4,"edi"),"ebx");
-	&mov	(&DWP(8,"edi"),"ecx");
-	&mov	(&DWP(12,"edi"),"edx");
-	&mov	("eax",&DWP(16,"esi"));
-	&mov	("ebx",&DWP(20,"esi"));
-	&mov	("ecx",&DWP(24,"esi"));
-	&mov	("edx",&DWP(28,"esi"));
-	&mov	(&DWP(16,"edi"),"eax");
-	&mov	(&DWP(20,"edi"),"ebx");
-	&mov	(&DWP(24,"edi"),"ecx");
-	&mov	(&DWP(28,"edi"),"edx");
-
-	&xor	("ecx","ecx");
-	&jmp	(&label("14shortcut"));
-
-	&align	(4);
-	&set_label("14loop");
-		&mov	("edx",&DWP(28,"edi"));		# rk[7]
-	&set_label("14shortcut");
-		&mov	("eax",&DWP(0,"edi"));		# rk[0]
-
-		&enckey	();
-
-		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
-		&xor	("eax",&DWP(4,"edi"));
-		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
-		&xor	("eax",&DWP(8,"edi"));
-		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
-		&xor	("eax",&DWP(12,"edi"));
-		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
-
-		&cmp	("ecx",6);
-		&je	(&label("14break"));
-		&inc	("ecx");
-
-		&mov	("edx","eax");
-		&mov	("eax",&DWP(16,"edi"));		# rk[4]
-		&movz	("esi",&LB("edx"));		# rk[11]>>0
-		&movz	("ebx",&BP(-128,$tbl,"esi",1));
-		&movz	("esi",&HB("edx"));		# rk[11]>>8
-		&xor	("eax","ebx");
-
-		&movz	("ebx",&BP(-128,$tbl,"esi",1));
-		&shr	("edx",16);
-		&shl	("ebx",8);
-		&movz	("esi",&LB("edx"));		# rk[11]>>16
-		&xor	("eax","ebx");
-
-		&movz	("ebx",&BP(-128,$tbl,"esi",1));
-		&movz	("esi",&HB("edx"));		# rk[11]>>24
-		&shl	("ebx",16);
-		&xor	("eax","ebx");
-
-		&movz	("ebx",&BP(-128,$tbl,"esi",1));
-		&shl	("ebx",24);
-		&xor	("eax","ebx");
-
-		&mov	(&DWP(48,"edi"),"eax");		# rk[12]
-		&xor	("eax",&DWP(20,"edi"));
-		&mov	(&DWP(52,"edi"),"eax");		# rk[13]
-		&xor	("eax",&DWP(24,"edi"));
-		&mov	(&DWP(56,"edi"),"eax");		# rk[14]
-		&xor	("eax",&DWP(28,"edi"));
-		&mov	(&DWP(60,"edi"),"eax");		# rk[15]
-
-		&add	("edi",32);
-	&jmp	(&label("14loop"));
-
-	&set_label("14break");
-	&mov	(&DWP(48,"edi"),14);		# setup number of rounds
-	&xor	("eax","eax");
-	&jmp	(&label("exit"));
-
-    &set_label("badpointer");
-	&mov	("eax",-1);
-    &set_label("exit");
-&function_end("_x86_AES_set_encrypt_key");
-
-# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-#                        AES_KEY *key)
-&function_begin_B("private_AES_set_encrypt_key");
-	&call	("_x86_AES_set_encrypt_key");
-	&ret	();
-&function_end_B("private_AES_set_encrypt_key");
-
-sub deckey()
-{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
-  my $tmp = $tbl;
-
-	&mov	($acc,$tp1);
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp2,&DWP(0,$tp1,$tp1));
-	&sub	($acc,$tmp);
-	&and	($tp2,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	&xor	($acc,$tp2);
-	&mov	($tp2,$acc);
-
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp4,&DWP(0,$tp2,$tp2));
-	&sub	($acc,$tmp);
-	&and	($tp4,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	 &xor	($tp2,$tp1);	# tp2^tp1
-	&xor	($acc,$tp4);
-	&mov	($tp4,$acc);
-
-	&and	($acc,0x80808080);
-	&mov	($tmp,$acc);
-	&shr	($tmp,7);
-	&lea	($tp8,&DWP(0,$tp4,$tp4));
-	 &xor	($tp4,$tp1);	# tp4^tp1
-	&sub	($acc,$tmp);
-	&and	($tp8,0xfefefefe);
-	&and	($acc,0x1b1b1b1b);
-	 &rotl	($tp1,8);	# = ROTATE(tp1,8)
-	&xor	($tp8,$acc);
-
-	&mov	($tmp,&DWP(4*($i+1),$key));	# modulo-scheduled load
-
-	&xor	($tp1,$tp2);
-	&xor	($tp2,$tp8);
-	&xor	($tp1,$tp4);
-	&rotl	($tp2,24);
-	&xor	($tp4,$tp8);
-	&xor	($tp1,$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
-	&rotl	($tp4,16);
-	&xor	($tp1,$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
-	&rotl	($tp8,8);
-	&xor	($tp1,$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
-	&mov	($tp2,$tmp);
-	&xor	($tp1,$tp8);	# ^= ROTATE(tp8,8)
-
-	&mov	(&DWP(4*$i,$key),$tp1);
-}
-
-# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
-#                        AES_KEY *key)
-&function_begin_B("private_AES_set_decrypt_key");
-	&call	("_x86_AES_set_encrypt_key");
-	&cmp	("eax",0);
-	&je	(&label("proceed"));
-	&ret	();
-
-    &set_label("proceed");
-	&push	("ebp");
-	&push	("ebx");
-	&push	("esi");
-	&push	("edi");
-
-	&mov	("esi",&wparam(2));
-	&mov	("ecx",&DWP(240,"esi"));	# pull number of rounds
-	&lea	("ecx",&DWP(0,"","ecx",4));
-	&lea	("edi",&DWP(0,"esi","ecx",4));	# pointer to last chunk
-
-	&set_label("invert",4);			# invert order of chunks
-		&mov	("eax",&DWP(0,"esi"));
-		&mov	("ebx",&DWP(4,"esi"));
-		&mov	("ecx",&DWP(0,"edi"));
-		&mov	("edx",&DWP(4,"edi"));
-		&mov	(&DWP(0,"edi"),"eax");
-		&mov	(&DWP(4,"edi"),"ebx");
-		&mov	(&DWP(0,"esi"),"ecx");
-		&mov	(&DWP(4,"esi"),"edx");
-		&mov	("eax",&DWP(8,"esi"));
-		&mov	("ebx",&DWP(12,"esi"));
-		&mov	("ecx",&DWP(8,"edi"));
-		&mov	("edx",&DWP(12,"edi"));
-		&mov	(&DWP(8,"edi"),"eax");
-		&mov	(&DWP(12,"edi"),"ebx");
-		&mov	(&DWP(8,"esi"),"ecx");
-		&mov	(&DWP(12,"esi"),"edx");
-		&add	("esi",16);
-		&sub	("edi",16);
-		&cmp	("esi","edi");
-	&jne	(&label("invert"));
-
-	&mov	($key,&wparam(2));
-	&mov	($acc,&DWP(240,$key));		# pull number of rounds
-	&lea	($acc,&DWP(-2,$acc,$acc));
-	&lea	($acc,&DWP(0,$key,$acc,8));
-	&mov	(&wparam(2),$acc);
-
-	&mov	($s0,&DWP(16,$key));		# modulo-scheduled load
-	&set_label("permute",4);		# permute the key schedule
-		&add	($key,16);
-		&deckey	(0,$key,$s0,$s1,$s2,$s3);
-		&deckey	(1,$key,$s1,$s2,$s3,$s0);
-		&deckey	(2,$key,$s2,$s3,$s0,$s1);
-		&deckey	(3,$key,$s3,$s0,$s1,$s2);
-		&cmp	($key,&wparam(2));
-	&jb	(&label("permute"));
-
-	&xor	("eax","eax");			# return success
-&function_end("private_AES_set_decrypt_key");
-&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 1134
drivers/builtin_openssl2/crypto/aes/asm/aes-armv4.pl

@@ -1,1134 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for ARMv4
-
-# January 2007.
-#
-# Code uses single 1K S-box and is >2 times faster than code generated
-# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
-# allows to merge logical or arithmetic operation with shift or rotate
-# in one instruction and emit combined result every cycle. The module
-# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
-# key [on single-issue Xscale PXA250 core].
-
-# May 2007.
-#
-# AES_set_[en|de]crypt_key is added.
-
-# July 2010.
-#
-# Rescheduling for dual-issue pipeline resulted in 12% improvement on
-# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
-
-# February 2011.
-#
-# Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~21.5 cycles per byte.
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$s0="r0";
-$s1="r1";
-$s2="r2";
-$s3="r3";
-$t1="r4";
-$t2="r5";
-$t3="r6";
-$i1="r7";
-$i2="r8";
-$i3="r9";
-
-$tbl="r10";
-$key="r11";
-$rounds="r12";
-
-$code=<<___;
-#include "arm_arch.h"
-.text
-.code	32
-
-.type	AES_Te,%object
-.align	5
-AES_Te:
-.word	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
-.word	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
-.word	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
-.word	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
-.word	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
-.word	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
-.word	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
-.word	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
-.word	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
-.word	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
-.word	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
-.word	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
-.word	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
-.word	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
-.word	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
-.word	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
-.word	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
-.word	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
-.word	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
-.word	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
-.word	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
-.word	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
-.word	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
-.word	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
-.word	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
-.word	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
-.word	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
-.word	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
-.word	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
-.word	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
-.word	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
-.word	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
-.word	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
-.word	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
-.word	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
-.word	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
-.word	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
-.word	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
-.word	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
-.word	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
-.word	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
-.word	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
-.word	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
-.word	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
-.word	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
-.word	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
-.word	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
-.word	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
-.word	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
-.word	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
-.word	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
-.word	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
-.word	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
-.word	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
-.word	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
-.word	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
-.word	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
-.word	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
-.word	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
-.word	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
-.word	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
-.word	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
-.word	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
-.word	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
-@ Te4[256]
-.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-@ rcon[]
-.word	0x01000000, 0x02000000, 0x04000000, 0x08000000
-.word	0x10000000, 0x20000000, 0x40000000, 0x80000000
-.word	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
-.size	AES_Te,.-AES_Te
-
-@ void AES_encrypt(const unsigned char *in, unsigned char *out,
-@ 		 const AES_KEY *key) {
-.global AES_encrypt
-.type   AES_encrypt,%function
-.align	5
-AES_encrypt:
-	sub	r3,pc,#8		@ AES_encrypt
-	stmdb   sp!,{r1,r4-r12,lr}
-	mov	$rounds,r0		@ inp
-	mov	$key,r2
-	sub	$tbl,r3,#AES_encrypt-AES_Te	@ Te
-#if __ARM_ARCH__<7
-	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
-	ldrb	$t1,[$rounds,#2]	@ manner...
-	ldrb	$t2,[$rounds,#1]
-	ldrb	$t3,[$rounds,#0]
-	orr	$s0,$s0,$t1,lsl#8
-	ldrb	$s1,[$rounds,#7]
-	orr	$s0,$s0,$t2,lsl#16
-	ldrb	$t1,[$rounds,#6]
-	orr	$s0,$s0,$t3,lsl#24
-	ldrb	$t2,[$rounds,#5]
-	ldrb	$t3,[$rounds,#4]
-	orr	$s1,$s1,$t1,lsl#8
-	ldrb	$s2,[$rounds,#11]
-	orr	$s1,$s1,$t2,lsl#16
-	ldrb	$t1,[$rounds,#10]
-	orr	$s1,$s1,$t3,lsl#24
-	ldrb	$t2,[$rounds,#9]
-	ldrb	$t3,[$rounds,#8]
-	orr	$s2,$s2,$t1,lsl#8
-	ldrb	$s3,[$rounds,#15]
-	orr	$s2,$s2,$t2,lsl#16
-	ldrb	$t1,[$rounds,#14]
-	orr	$s2,$s2,$t3,lsl#24
-	ldrb	$t2,[$rounds,#13]
-	ldrb	$t3,[$rounds,#12]
-	orr	$s3,$s3,$t1,lsl#8
-	orr	$s3,$s3,$t2,lsl#16
-	orr	$s3,$s3,$t3,lsl#24
-#else
-	ldr	$s0,[$rounds,#0]
-	ldr	$s1,[$rounds,#4]
-	ldr	$s2,[$rounds,#8]
-	ldr	$s3,[$rounds,#12]
-#ifdef __ARMEL__
-	rev	$s0,$s0
-	rev	$s1,$s1
-	rev	$s2,$s2
-	rev	$s3,$s3
-#endif
-#endif
-	bl	_armv4_AES_encrypt
-
-	ldr	$rounds,[sp],#4		@ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
-	rev	$s0,$s0
-	rev	$s1,$s1
-	rev	$s2,$s2
-	rev	$s3,$s3
-#endif
-	str	$s0,[$rounds,#0]
-	str	$s1,[$rounds,#4]
-	str	$s2,[$rounds,#8]
-	str	$s3,[$rounds,#12]
-#else
-	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
-	mov	$t2,$s0,lsr#16		@ manner...
-	mov	$t3,$s0,lsr#8
-	strb	$t1,[$rounds,#0]
-	strb	$t2,[$rounds,#1]
-	mov	$t1,$s1,lsr#24
-	strb	$t3,[$rounds,#2]
-	mov	$t2,$s1,lsr#16
-	strb	$s0,[$rounds,#3]
-	mov	$t3,$s1,lsr#8
-	strb	$t1,[$rounds,#4]
-	strb	$t2,[$rounds,#5]
-	mov	$t1,$s2,lsr#24
-	strb	$t3,[$rounds,#6]
-	mov	$t2,$s2,lsr#16
-	strb	$s1,[$rounds,#7]
-	mov	$t3,$s2,lsr#8
-	strb	$t1,[$rounds,#8]
-	strb	$t2,[$rounds,#9]
-	mov	$t1,$s3,lsr#24
-	strb	$t3,[$rounds,#10]
-	mov	$t2,$s3,lsr#16
-	strb	$s2,[$rounds,#11]
-	mov	$t3,$s3,lsr#8
-	strb	$t1,[$rounds,#12]
-	strb	$t2,[$rounds,#13]
-	strb	$t3,[$rounds,#14]
-	strb	$s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
-	ldmia	sp!,{r4-r12,pc}
-#else
-	ldmia   sp!,{r4-r12,lr}
-	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-#endif
-.size	AES_encrypt,.-AES_encrypt
-
-.type   _armv4_AES_encrypt,%function
-.align	2
-_armv4_AES_encrypt:
-	str	lr,[sp,#-4]!		@ push lr
-	ldmia	$key!,{$t1-$i1}
-	eor	$s0,$s0,$t1
-	ldr	$rounds,[$key,#240-16]
-	eor	$s1,$s1,$t2
-	eor	$s2,$s2,$t3
-	eor	$s3,$s3,$i1
-	sub	$rounds,$rounds,#1
-	mov	lr,#255
-
-	and	$i1,lr,$s0
-	and	$i2,lr,$s0,lsr#8
-	and	$i3,lr,$s0,lsr#16
-	mov	$s0,$s0,lsr#24
-.Lenc_loop:
-	ldr	$t1,[$tbl,$i1,lsl#2]	@ Te3[s0>>0]
-	and	$i1,lr,$s1,lsr#16	@ i0
-	ldr	$t2,[$tbl,$i2,lsl#2]	@ Te2[s0>>8]
-	and	$i2,lr,$s1
-	ldr	$t3,[$tbl,$i3,lsl#2]	@ Te1[s0>>16]
-	and	$i3,lr,$s1,lsr#8
-	ldr	$s0,[$tbl,$s0,lsl#2]	@ Te0[s0>>24]
-	mov	$s1,$s1,lsr#24
-
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te1[s1>>16]
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te3[s1>>0]
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te2[s1>>8]
-	eor	$s0,$s0,$i1,ror#8
-	ldr	$s1,[$tbl,$s1,lsl#2]	@ Te0[s1>>24]
-	and	$i1,lr,$s2,lsr#8	@ i0
-	eor	$t2,$t2,$i2,ror#8
-	and	$i2,lr,$s2,lsr#16	@ i1
-	eor	$t3,$t3,$i3,ror#8
-	and	$i3,lr,$s2
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te2[s2>>8]
-	eor	$s1,$s1,$t1,ror#24
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te1[s2>>16]
-	mov	$s2,$s2,lsr#24
-
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te3[s2>>0]
-	eor	$s0,$s0,$i1,ror#16
-	ldr	$s2,[$tbl,$s2,lsl#2]	@ Te0[s2>>24]
-	and	$i1,lr,$s3		@ i0
-	eor	$s1,$s1,$i2,ror#8
-	and	$i2,lr,$s3,lsr#8	@ i1
-	eor	$t3,$t3,$i3,ror#16
-	and	$i3,lr,$s3,lsr#16	@ i2
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te3[s3>>0]
-	eor	$s2,$s2,$t2,ror#16
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te2[s3>>8]
-	mov	$s3,$s3,lsr#24
-
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te1[s3>>16]
-	eor	$s0,$s0,$i1,ror#24
-	ldr	$i1,[$key],#16
-	eor	$s1,$s1,$i2,ror#16
-	ldr	$s3,[$tbl,$s3,lsl#2]	@ Te0[s3>>24]
-	eor	$s2,$s2,$i3,ror#8
-	ldr	$t1,[$key,#-12]
-	eor	$s3,$s3,$t3,ror#8
-
-	ldr	$t2,[$key,#-8]
-	eor	$s0,$s0,$i1
-	ldr	$t3,[$key,#-4]
-	and	$i1,lr,$s0
-	eor	$s1,$s1,$t1
-	and	$i2,lr,$s0,lsr#8
-	eor	$s2,$s2,$t2
-	and	$i3,lr,$s0,lsr#16
-	eor	$s3,$s3,$t3
-	mov	$s0,$s0,lsr#24
-
-	subs	$rounds,$rounds,#1
-	bne	.Lenc_loop
-
-	add	$tbl,$tbl,#2
-
-	ldrb	$t1,[$tbl,$i1,lsl#2]	@ Te4[s0>>0]
-	and	$i1,lr,$s1,lsr#16	@ i0
-	ldrb	$t2,[$tbl,$i2,lsl#2]	@ Te4[s0>>8]
-	and	$i2,lr,$s1
-	ldrb	$t3,[$tbl,$i3,lsl#2]	@ Te4[s0>>16]
-	and	$i3,lr,$s1,lsr#8
-	ldrb	$s0,[$tbl,$s0,lsl#2]	@ Te4[s0>>24]
-	mov	$s1,$s1,lsr#24
-
-	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s1>>16]
-	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s1>>0]
-	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s1>>8]
-	eor	$s0,$i1,$s0,lsl#8
-	ldrb	$s1,[$tbl,$s1,lsl#2]	@ Te4[s1>>24]
-	and	$i1,lr,$s2,lsr#8	@ i0
-	eor	$t2,$i2,$t2,lsl#8
-	and	$i2,lr,$s2,lsr#16	@ i1
-	eor	$t3,$i3,$t3,lsl#8
-	and	$i3,lr,$s2
-	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s2>>8]
-	eor	$s1,$t1,$s1,lsl#24
-	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s2>>16]
-	mov	$s2,$s2,lsr#24
-
-	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s2>>0]
-	eor	$s0,$i1,$s0,lsl#8
-	ldrb	$s2,[$tbl,$s2,lsl#2]	@ Te4[s2>>24]
-	and	$i1,lr,$s3		@ i0
-	eor	$s1,$s1,$i2,lsl#16
-	and	$i2,lr,$s3,lsr#8	@ i1
-	eor	$t3,$i3,$t3,lsl#8
-	and	$i3,lr,$s3,lsr#16	@ i2
-	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s3>>0]
-	eor	$s2,$t2,$s2,lsl#24
-	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s3>>8]
-	mov	$s3,$s3,lsr#24
-
-	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s3>>16]
-	eor	$s0,$i1,$s0,lsl#8
-	ldr	$i1,[$key,#0]
-	ldrb	$s3,[$tbl,$s3,lsl#2]	@ Te4[s3>>24]
-	eor	$s1,$s1,$i2,lsl#8
-	ldr	$t1,[$key,#4]
-	eor	$s2,$s2,$i3,lsl#16
-	ldr	$t2,[$key,#8]
-	eor	$s3,$t3,$s3,lsl#24
-	ldr	$t3,[$key,#12]
-
-	eor	$s0,$s0,$i1
-	eor	$s1,$s1,$t1
-	eor	$s2,$s2,$t2
-	eor	$s3,$s3,$t3
-
-	sub	$tbl,$tbl,#2
-	ldr	pc,[sp],#4		@ pop and return
-.size	_armv4_AES_encrypt,.-_armv4_AES_encrypt
-
-.global private_AES_set_encrypt_key
-.type   private_AES_set_encrypt_key,%function
-.align	5
-private_AES_set_encrypt_key:
-_armv4_AES_set_encrypt_key:
-	sub	r3,pc,#8		@ AES_set_encrypt_key
-	teq	r0,#0
-	moveq	r0,#-1
-	beq	.Labrt
-	teq	r2,#0
-	moveq	r0,#-1
-	beq	.Labrt
-
-	teq	r1,#128
-	beq	.Lok
-	teq	r1,#192
-	beq	.Lok
-	teq	r1,#256
-	movne	r0,#-1
-	bne	.Labrt
-
-.Lok:	stmdb   sp!,{r4-r12,lr}
-	sub	$tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024	@ Te4
-
-	mov	$rounds,r0		@ inp
-	mov	lr,r1			@ bits
-	mov	$key,r2			@ key
-
-#if __ARM_ARCH__<7
-	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
-	ldrb	$t1,[$rounds,#2]	@ manner...
-	ldrb	$t2,[$rounds,#1]
-	ldrb	$t3,[$rounds,#0]
-	orr	$s0,$s0,$t1,lsl#8
-	ldrb	$s1,[$rounds,#7]
-	orr	$s0,$s0,$t2,lsl#16
-	ldrb	$t1,[$rounds,#6]
-	orr	$s0,$s0,$t3,lsl#24
-	ldrb	$t2,[$rounds,#5]
-	ldrb	$t3,[$rounds,#4]
-	orr	$s1,$s1,$t1,lsl#8
-	ldrb	$s2,[$rounds,#11]
-	orr	$s1,$s1,$t2,lsl#16
-	ldrb	$t1,[$rounds,#10]
-	orr	$s1,$s1,$t3,lsl#24
-	ldrb	$t2,[$rounds,#9]
-	ldrb	$t3,[$rounds,#8]
-	orr	$s2,$s2,$t1,lsl#8
-	ldrb	$s3,[$rounds,#15]
-	orr	$s2,$s2,$t2,lsl#16
-	ldrb	$t1,[$rounds,#14]
-	orr	$s2,$s2,$t3,lsl#24
-	ldrb	$t2,[$rounds,#13]
-	ldrb	$t3,[$rounds,#12]
-	orr	$s3,$s3,$t1,lsl#8
-	str	$s0,[$key],#16
-	orr	$s3,$s3,$t2,lsl#16
-	str	$s1,[$key,#-12]
-	orr	$s3,$s3,$t3,lsl#24
-	str	$s2,[$key,#-8]
-	str	$s3,[$key,#-4]
-#else
-	ldr	$s0,[$rounds,#0]
-	ldr	$s1,[$rounds,#4]
-	ldr	$s2,[$rounds,#8]
-	ldr	$s3,[$rounds,#12]
-#ifdef __ARMEL__
-	rev	$s0,$s0
-	rev	$s1,$s1
-	rev	$s2,$s2
-	rev	$s3,$s3
-#endif
-	str	$s0,[$key],#16
-	str	$s1,[$key,#-12]
-	str	$s2,[$key,#-8]
-	str	$s3,[$key,#-4]
-#endif
-
-	teq	lr,#128
-	bne	.Lnot128
-	mov	$rounds,#10
-	str	$rounds,[$key,#240-16]
-	add	$t3,$tbl,#256			@ rcon
-	mov	lr,#255
-
-.L128_loop:
-	and	$t2,lr,$s3,lsr#24
-	and	$i1,lr,$s3,lsr#16
-	ldrb	$t2,[$tbl,$t2]
-	and	$i2,lr,$s3,lsr#8
-	ldrb	$i1,[$tbl,$i1]
-	and	$i3,lr,$s3
-	ldrb	$i2,[$tbl,$i2]
-	orr	$t2,$t2,$i1,lsl#24
-	ldrb	$i3,[$tbl,$i3]
-	orr	$t2,$t2,$i2,lsl#16
-	ldr	$t1,[$t3],#4			@ rcon[i++]
-	orr	$t2,$t2,$i3,lsl#8
-	eor	$t2,$t2,$t1
-	eor	$s0,$s0,$t2			@ rk[4]=rk[0]^...
-	eor	$s1,$s1,$s0			@ rk[5]=rk[1]^rk[4]
-	str	$s0,[$key],#16
-	eor	$s2,$s2,$s1			@ rk[6]=rk[2]^rk[5]
-	str	$s1,[$key,#-12]
-	eor	$s3,$s3,$s2			@ rk[7]=rk[3]^rk[6]
-	str	$s2,[$key,#-8]
-	subs	$rounds,$rounds,#1
-	str	$s3,[$key,#-4]
-	bne	.L128_loop
-	sub	r2,$key,#176
-	b	.Ldone
-
-.Lnot128:
-#if __ARM_ARCH__<7
-	ldrb	$i2,[$rounds,#19]
-	ldrb	$t1,[$rounds,#18]
-	ldrb	$t2,[$rounds,#17]
-	ldrb	$t3,[$rounds,#16]
-	orr	$i2,$i2,$t1,lsl#8
-	ldrb	$i3,[$rounds,#23]
-	orr	$i2,$i2,$t2,lsl#16
-	ldrb	$t1,[$rounds,#22]
-	orr	$i2,$i2,$t3,lsl#24
-	ldrb	$t2,[$rounds,#21]
-	ldrb	$t3,[$rounds,#20]
-	orr	$i3,$i3,$t1,lsl#8
-	orr	$i3,$i3,$t2,lsl#16
-	str	$i2,[$key],#8
-	orr	$i3,$i3,$t3,lsl#24
-	str	$i3,[$key,#-4]
-#else
-	ldr	$i2,[$rounds,#16]
-	ldr	$i3,[$rounds,#20]
-#ifdef __ARMEL__
-	rev	$i2,$i2
-	rev	$i3,$i3
-#endif
-	str	$i2,[$key],#8
-	str	$i3,[$key,#-4]
-#endif
-
-	teq	lr,#192
-	bne	.Lnot192
-	mov	$rounds,#12
-	str	$rounds,[$key,#240-24]
-	add	$t3,$tbl,#256			@ rcon
-	mov	lr,#255
-	mov	$rounds,#8
-
-.L192_loop:
-	and	$t2,lr,$i3,lsr#24
-	and	$i1,lr,$i3,lsr#16
-	ldrb	$t2,[$tbl,$t2]
-	and	$i2,lr,$i3,lsr#8
-	ldrb	$i1,[$tbl,$i1]
-	and	$i3,lr,$i3
-	ldrb	$i2,[$tbl,$i2]
-	orr	$t2,$t2,$i1,lsl#24
-	ldrb	$i3,[$tbl,$i3]
-	orr	$t2,$t2,$i2,lsl#16
-	ldr	$t1,[$t3],#4			@ rcon[i++]
-	orr	$t2,$t2,$i3,lsl#8
-	eor	$i3,$t2,$t1
-	eor	$s0,$s0,$i3			@ rk[6]=rk[0]^...
-	eor	$s1,$s1,$s0			@ rk[7]=rk[1]^rk[6]
-	str	$s0,[$key],#24
-	eor	$s2,$s2,$s1			@ rk[8]=rk[2]^rk[7]
-	str	$s1,[$key,#-20]
-	eor	$s3,$s3,$s2			@ rk[9]=rk[3]^rk[8]
-	str	$s2,[$key,#-16]
-	subs	$rounds,$rounds,#1
-	str	$s3,[$key,#-12]
-	subeq	r2,$key,#216
-	beq	.Ldone
-
-	ldr	$i1,[$key,#-32]
-	ldr	$i2,[$key,#-28]
-	eor	$i1,$i1,$s3			@ rk[10]=rk[4]^rk[9]
-	eor	$i3,$i2,$i1			@ rk[11]=rk[5]^rk[10]
-	str	$i1,[$key,#-8]
-	str	$i3,[$key,#-4]
-	b	.L192_loop
-
-.Lnot192:
-#if __ARM_ARCH__<7
-	ldrb	$i2,[$rounds,#27]
-	ldrb	$t1,[$rounds,#26]
-	ldrb	$t2,[$rounds,#25]
-	ldrb	$t3,[$rounds,#24]
-	orr	$i2,$i2,$t1,lsl#8
-	ldrb	$i3,[$rounds,#31]
-	orr	$i2,$i2,$t2,lsl#16
-	ldrb	$t1,[$rounds,#30]
-	orr	$i2,$i2,$t3,lsl#24
-	ldrb	$t2,[$rounds,#29]
-	ldrb	$t3,[$rounds,#28]
-	orr	$i3,$i3,$t1,lsl#8
-	orr	$i3,$i3,$t2,lsl#16
-	str	$i2,[$key],#8
-	orr	$i3,$i3,$t3,lsl#24
-	str	$i3,[$key,#-4]
-#else
-	ldr	$i2,[$rounds,#24]
-	ldr	$i3,[$rounds,#28]
-#ifdef __ARMEL__
-	rev	$i2,$i2
-	rev	$i3,$i3
-#endif
-	str	$i2,[$key],#8
-	str	$i3,[$key,#-4]
-#endif
-
-	mov	$rounds,#14
-	str	$rounds,[$key,#240-32]
-	add	$t3,$tbl,#256			@ rcon
-	mov	lr,#255
-	mov	$rounds,#7
-
-.L256_loop:
-	and	$t2,lr,$i3,lsr#24
-	and	$i1,lr,$i3,lsr#16
-	ldrb	$t2,[$tbl,$t2]
-	and	$i2,lr,$i3,lsr#8
-	ldrb	$i1,[$tbl,$i1]
-	and	$i3,lr,$i3
-	ldrb	$i2,[$tbl,$i2]
-	orr	$t2,$t2,$i1,lsl#24
-	ldrb	$i3,[$tbl,$i3]
-	orr	$t2,$t2,$i2,lsl#16
-	ldr	$t1,[$t3],#4			@ rcon[i++]
-	orr	$t2,$t2,$i3,lsl#8
-	eor	$i3,$t2,$t1
-	eor	$s0,$s0,$i3			@ rk[8]=rk[0]^...
-	eor	$s1,$s1,$s0			@ rk[9]=rk[1]^rk[8]
-	str	$s0,[$key],#32
-	eor	$s2,$s2,$s1			@ rk[10]=rk[2]^rk[9]
-	str	$s1,[$key,#-28]
-	eor	$s3,$s3,$s2			@ rk[11]=rk[3]^rk[10]
-	str	$s2,[$key,#-24]
-	subs	$rounds,$rounds,#1
-	str	$s3,[$key,#-20]
-	subeq	r2,$key,#256
-	beq	.Ldone
-
-	and	$t2,lr,$s3
-	and	$i1,lr,$s3,lsr#8
-	ldrb	$t2,[$tbl,$t2]
-	and	$i2,lr,$s3,lsr#16
-	ldrb	$i1,[$tbl,$i1]
-	and	$i3,lr,$s3,lsr#24
-	ldrb	$i2,[$tbl,$i2]
-	orr	$t2,$t2,$i1,lsl#8
-	ldrb	$i3,[$tbl,$i3]
-	orr	$t2,$t2,$i2,lsl#16
-	ldr	$t1,[$key,#-48]
-	orr	$t2,$t2,$i3,lsl#24
-
-	ldr	$i1,[$key,#-44]
-	ldr	$i2,[$key,#-40]
-	eor	$t1,$t1,$t2			@ rk[12]=rk[4]^...
-	ldr	$i3,[$key,#-36]
-	eor	$i1,$i1,$t1			@ rk[13]=rk[5]^rk[12]
-	str	$t1,[$key,#-16]
-	eor	$i2,$i2,$i1			@ rk[14]=rk[6]^rk[13]
-	str	$i1,[$key,#-12]
-	eor	$i3,$i3,$i2			@ rk[15]=rk[7]^rk[14]
-	str	$i2,[$key,#-8]
-	str	$i3,[$key,#-4]
-	b	.L256_loop
-
-.Ldone:	mov	r0,#0
-	ldmia   sp!,{r4-r12,lr}
-.Labrt:	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.global private_AES_set_decrypt_key
-.type   private_AES_set_decrypt_key,%function
-.align	5
-private_AES_set_decrypt_key:
-	str	lr,[sp,#-4]!            @ push lr
-	bl	_armv4_AES_set_encrypt_key
-	teq	r0,#0
-	ldrne	lr,[sp],#4              @ pop lr
-	bne	.Labrt
-
-	stmdb   sp!,{r4-r12}
-
-	ldr	$rounds,[r2,#240]	@ AES_set_encrypt_key preserves r2,
-	mov	$key,r2			@ which is AES_KEY *key
-	mov	$i1,r2
-	add	$i2,r2,$rounds,lsl#4
-
-.Linv:	ldr	$s0,[$i1]
-	ldr	$s1,[$i1,#4]
-	ldr	$s2,[$i1,#8]
-	ldr	$s3,[$i1,#12]
-	ldr	$t1,[$i2]
-	ldr	$t2,[$i2,#4]
-	ldr	$t3,[$i2,#8]
-	ldr	$i3,[$i2,#12]
-	str	$s0,[$i2],#-16
-	str	$s1,[$i2,#16+4]
-	str	$s2,[$i2,#16+8]
-	str	$s3,[$i2,#16+12]
-	str	$t1,[$i1],#16
-	str	$t2,[$i1,#-12]
-	str	$t3,[$i1,#-8]
-	str	$i3,[$i1,#-4]
-	teq	$i1,$i2
-	bne	.Linv
-___
-$mask80=$i1;
-$mask1b=$i2;
-$mask7f=$i3;
-$code.=<<___;
-	ldr	$s0,[$key,#16]!		@ prefetch tp1
-	mov	$mask80,#0x80
-	mov	$mask1b,#0x1b
-	orr	$mask80,$mask80,#0x8000
-	orr	$mask1b,$mask1b,#0x1b00
-	orr	$mask80,$mask80,$mask80,lsl#16
-	orr	$mask1b,$mask1b,$mask1b,lsl#16
-	sub	$rounds,$rounds,#1
-	mvn	$mask7f,$mask80
-	mov	$rounds,$rounds,lsl#2	@ (rounds-1)*4
-
-.Lmix:	and	$t1,$s0,$mask80
-	and	$s1,$s0,$mask7f
-	sub	$t1,$t1,$t1,lsr#7
-	and	$t1,$t1,$mask1b
-	eor	$s1,$t1,$s1,lsl#1	@ tp2
-
-	and	$t1,$s1,$mask80
-	and	$s2,$s1,$mask7f
-	sub	$t1,$t1,$t1,lsr#7
-	and	$t1,$t1,$mask1b
-	eor	$s2,$t1,$s2,lsl#1	@ tp4
-
-	and	$t1,$s2,$mask80
-	and	$s3,$s2,$mask7f
-	sub	$t1,$t1,$t1,lsr#7
-	and	$t1,$t1,$mask1b
-	eor	$s3,$t1,$s3,lsl#1	@ tp8
-
-	eor	$t1,$s1,$s2
-	eor	$t2,$s0,$s3		@ tp9
-	eor	$t1,$t1,$s3		@ tpe
-	eor	$t1,$t1,$s1,ror#24
-	eor	$t1,$t1,$t2,ror#24	@ ^= ROTATE(tpb=tp9^tp2,8)
-	eor	$t1,$t1,$s2,ror#16
-	eor	$t1,$t1,$t2,ror#16	@ ^= ROTATE(tpd=tp9^tp4,16)
-	eor	$t1,$t1,$t2,ror#8	@ ^= ROTATE(tp9,24)
-
-	ldr	$s0,[$key,#4]		@ prefetch tp1
-	str	$t1,[$key],#4
-	subs	$rounds,$rounds,#1
-	bne	.Lmix
-
-	mov	r0,#0
-#if __ARM_ARCH__>=5
-	ldmia	sp!,{r4-r12,pc}
-#else
-	ldmia   sp!,{r4-r12,lr}
-	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-#endif
-.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-
-.type	AES_Td,%object
-.align	5
-AES_Td:
-.word	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
-.word	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
-.word	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
-.word	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
-.word	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
-.word	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
-.word	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
-.word	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
-.word	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
-.word	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
-.word	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
-.word	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
-.word	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
-.word	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
-.word	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
-.word	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
-.word	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
-.word	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
-.word	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
-.word	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
-.word	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
-.word	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
-.word	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
-.word	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
-.word	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
-.word	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
-.word	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
-.word	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
-.word	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
-.word	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
-.word	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
-.word	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
-.word	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
-.word	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
-.word	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
-.word	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
-.word	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
-.word	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
-.word	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
-.word	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
-.word	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
-.word	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
-.word	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
-.word	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
-.word	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
-.word	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
-.word	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
-.word	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
-.word	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
-.word	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
-.word	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
-.word	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
-.word	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
-.word	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
-.word	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
-.word	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
-.word	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
-.word	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
-.word	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
-.word	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
-.word	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
-.word	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
-.word	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
-.word	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
-@ Td4[256]
-.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size	AES_Td,.-AES_Td
-
-@ void AES_decrypt(const unsigned char *in, unsigned char *out,
-@ 		 const AES_KEY *key) {
-.global AES_decrypt
-.type   AES_decrypt,%function
-.align	5
-AES_decrypt:
-	sub	r3,pc,#8		@ AES_decrypt
-	stmdb   sp!,{r1,r4-r12,lr}
-	mov	$rounds,r0		@ inp
-	mov	$key,r2
-	sub	$tbl,r3,#AES_decrypt-AES_Td		@ Td
-#if __ARM_ARCH__<7
-	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
-	ldrb	$t1,[$rounds,#2]	@ manner...
-	ldrb	$t2,[$rounds,#1]
-	ldrb	$t3,[$rounds,#0]
-	orr	$s0,$s0,$t1,lsl#8
-	ldrb	$s1,[$rounds,#7]
-	orr	$s0,$s0,$t2,lsl#16
-	ldrb	$t1,[$rounds,#6]
-	orr	$s0,$s0,$t3,lsl#24
-	ldrb	$t2,[$rounds,#5]
-	ldrb	$t3,[$rounds,#4]
-	orr	$s1,$s1,$t1,lsl#8
-	ldrb	$s2,[$rounds,#11]
-	orr	$s1,$s1,$t2,lsl#16
-	ldrb	$t1,[$rounds,#10]
-	orr	$s1,$s1,$t3,lsl#24
-	ldrb	$t2,[$rounds,#9]
-	ldrb	$t3,[$rounds,#8]
-	orr	$s2,$s2,$t1,lsl#8
-	ldrb	$s3,[$rounds,#15]
-	orr	$s2,$s2,$t2,lsl#16
-	ldrb	$t1,[$rounds,#14]
-	orr	$s2,$s2,$t3,lsl#24
-	ldrb	$t2,[$rounds,#13]
-	ldrb	$t3,[$rounds,#12]
-	orr	$s3,$s3,$t1,lsl#8
-	orr	$s3,$s3,$t2,lsl#16
-	orr	$s3,$s3,$t3,lsl#24
-#else
-	ldr	$s0,[$rounds,#0]
-	ldr	$s1,[$rounds,#4]
-	ldr	$s2,[$rounds,#8]
-	ldr	$s3,[$rounds,#12]
-#ifdef __ARMEL__
-	rev	$s0,$s0
-	rev	$s1,$s1
-	rev	$s2,$s2
-	rev	$s3,$s3
-#endif
-#endif
-	bl	_armv4_AES_decrypt
-
-	ldr	$rounds,[sp],#4		@ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
-	rev	$s0,$s0
-	rev	$s1,$s1
-	rev	$s2,$s2
-	rev	$s3,$s3
-#endif
-	str	$s0,[$rounds,#0]
-	str	$s1,[$rounds,#4]
-	str	$s2,[$rounds,#8]
-	str	$s3,[$rounds,#12]
-#else
-	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
-	mov	$t2,$s0,lsr#16		@ manner...
-	mov	$t3,$s0,lsr#8
-	strb	$t1,[$rounds,#0]
-	strb	$t2,[$rounds,#1]
-	mov	$t1,$s1,lsr#24
-	strb	$t3,[$rounds,#2]
-	mov	$t2,$s1,lsr#16
-	strb	$s0,[$rounds,#3]
-	mov	$t3,$s1,lsr#8
-	strb	$t1,[$rounds,#4]
-	strb	$t2,[$rounds,#5]
-	mov	$t1,$s2,lsr#24
-	strb	$t3,[$rounds,#6]
-	mov	$t2,$s2,lsr#16
-	strb	$s1,[$rounds,#7]
-	mov	$t3,$s2,lsr#8
-	strb	$t1,[$rounds,#8]
-	strb	$t2,[$rounds,#9]
-	mov	$t1,$s3,lsr#24
-	strb	$t3,[$rounds,#10]
-	mov	$t2,$s3,lsr#16
-	strb	$s2,[$rounds,#11]
-	mov	$t3,$s3,lsr#8
-	strb	$t1,[$rounds,#12]
-	strb	$t2,[$rounds,#13]
-	strb	$t3,[$rounds,#14]
-	strb	$s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
-	ldmia	sp!,{r4-r12,pc}
-#else
-	ldmia   sp!,{r4-r12,lr}
-	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-#endif
-.size	AES_decrypt,.-AES_decrypt
-
-.type   _armv4_AES_decrypt,%function
-.align	2
-_armv4_AES_decrypt:
-	str	lr,[sp,#-4]!		@ push lr
-	ldmia	$key!,{$t1-$i1}
-	eor	$s0,$s0,$t1
-	ldr	$rounds,[$key,#240-16]
-	eor	$s1,$s1,$t2
-	eor	$s2,$s2,$t3
-	eor	$s3,$s3,$i1
-	sub	$rounds,$rounds,#1
-	mov	lr,#255
-
-	and	$i1,lr,$s0,lsr#16
-	and	$i2,lr,$s0,lsr#8
-	and	$i3,lr,$s0
-	mov	$s0,$s0,lsr#24
-.Ldec_loop:
-	ldr	$t1,[$tbl,$i1,lsl#2]	@ Td1[s0>>16]
-	and	$i1,lr,$s1		@ i0
-	ldr	$t2,[$tbl,$i2,lsl#2]	@ Td2[s0>>8]
-	and	$i2,lr,$s1,lsr#16
-	ldr	$t3,[$tbl,$i3,lsl#2]	@ Td3[s0>>0]
-	and	$i3,lr,$s1,lsr#8
-	ldr	$s0,[$tbl,$s0,lsl#2]	@ Td0[s0>>24]
-	mov	$s1,$s1,lsr#24
-
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td3[s1>>0]
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td1[s1>>16]
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td2[s1>>8]
-	eor	$s0,$s0,$i1,ror#24
-	ldr	$s1,[$tbl,$s1,lsl#2]	@ Td0[s1>>24]
-	and	$i1,lr,$s2,lsr#8	@ i0
-	eor	$t2,$i2,$t2,ror#8
-	and	$i2,lr,$s2		@ i1
-	eor	$t3,$i3,$t3,ror#8
-	and	$i3,lr,$s2,lsr#16
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td2[s2>>8]
-	eor	$s1,$s1,$t1,ror#8
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td3[s2>>0]
-	mov	$s2,$s2,lsr#24
-
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td1[s2>>16]
-	eor	$s0,$s0,$i1,ror#16
-	ldr	$s2,[$tbl,$s2,lsl#2]	@ Td0[s2>>24]
-	and	$i1,lr,$s3,lsr#16	@ i0
-	eor	$s1,$s1,$i2,ror#24
-	and	$i2,lr,$s3,lsr#8	@ i1
-	eor	$t3,$i3,$t3,ror#8
-	and	$i3,lr,$s3		@ i2
-	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td1[s3>>16]
-	eor	$s2,$s2,$t2,ror#8
-	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td2[s3>>8]
-	mov	$s3,$s3,lsr#24
-
-	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td3[s3>>0]
-	eor	$s0,$s0,$i1,ror#8
-	ldr	$i1,[$key],#16
-	eor	$s1,$s1,$i2,ror#16
-	ldr	$s3,[$tbl,$s3,lsl#2]	@ Td0[s3>>24]
-	eor	$s2,$s2,$i3,ror#24
-
-	ldr	$t1,[$key,#-12]
-	eor	$s0,$s0,$i1
-	ldr	$t2,[$key,#-8]
-	eor	$s3,$s3,$t3,ror#8
-	ldr	$t3,[$key,#-4]
-	and	$i1,lr,$s0,lsr#16
-	eor	$s1,$s1,$t1
-	and	$i2,lr,$s0,lsr#8
-	eor	$s2,$s2,$t2
-	and	$i3,lr,$s0
-	eor	$s3,$s3,$t3
-	mov	$s0,$s0,lsr#24
-
-	subs	$rounds,$rounds,#1
-	bne	.Ldec_loop
-
-	add	$tbl,$tbl,#1024
-
-	ldr	$t2,[$tbl,#0]		@ prefetch Td4
-	ldr	$t3,[$tbl,#32]
-	ldr	$t1,[$tbl,#64]
-	ldr	$t2,[$tbl,#96]
-	ldr	$t3,[$tbl,#128]
-	ldr	$t1,[$tbl,#160]
-	ldr	$t2,[$tbl,#192]
-	ldr	$t3,[$tbl,#224]
-
-	ldrb	$s0,[$tbl,$s0]		@ Td4[s0>>24]
-	ldrb	$t1,[$tbl,$i1]		@ Td4[s0>>16]
-	and	$i1,lr,$s1		@ i0
-	ldrb	$t2,[$tbl,$i2]		@ Td4[s0>>8]
-	and	$i2,lr,$s1,lsr#16
-	ldrb	$t3,[$tbl,$i3]		@ Td4[s0>>0]
-	and	$i3,lr,$s1,lsr#8
-
-	ldrb	$i1,[$tbl,$i1]		@ Td4[s1>>0]
-	ldrb	$s1,[$tbl,$s1,lsr#24]	@ Td4[s1>>24]
-	ldrb	$i2,[$tbl,$i2]		@ Td4[s1>>16]
-	eor	$s0,$i1,$s0,lsl#24
-	ldrb	$i3,[$tbl,$i3]		@ Td4[s1>>8]
-	eor	$s1,$t1,$s1,lsl#8
-	and	$i1,lr,$s2,lsr#8	@ i0
-	eor	$t2,$t2,$i2,lsl#8
-	and	$i2,lr,$s2		@ i1
-	ldrb	$i1,[$tbl,$i1]		@ Td4[s2>>8]
-	eor	$t3,$t3,$i3,lsl#8
-	ldrb	$i2,[$tbl,$i2]		@ Td4[s2>>0]
-	and	$i3,lr,$s2,lsr#16
-
-	ldrb	$s2,[$tbl,$s2,lsr#24]	@ Td4[s2>>24]
-	eor	$s0,$s0,$i1,lsl#8
-	ldrb	$i3,[$tbl,$i3]		@ Td4[s2>>16]
-	eor	$s1,$i2,$s1,lsl#16
-	and	$i1,lr,$s3,lsr#16	@ i0
-	eor	$s2,$t2,$s2,lsl#16
-	and	$i2,lr,$s3,lsr#8	@ i1
-	ldrb	$i1,[$tbl,$i1]		@ Td4[s3>>16]
-	eor	$t3,$t3,$i3,lsl#16
-	ldrb	$i2,[$tbl,$i2]		@ Td4[s3>>8]
-	and	$i3,lr,$s3		@ i2
-
-	ldrb	$i3,[$tbl,$i3]		@ Td4[s3>>0]
-	ldrb	$s3,[$tbl,$s3,lsr#24]	@ Td4[s3>>24]
-	eor	$s0,$s0,$i1,lsl#16
-	ldr	$i1,[$key,#0]
-	eor	$s1,$s1,$i2,lsl#8
-	ldr	$t1,[$key,#4]
-	eor	$s2,$i3,$s2,lsl#8
-	ldr	$t2,[$key,#8]
-	eor	$s3,$t3,$s3,lsl#24
-	ldr	$t3,[$key,#12]
-
-	eor	$s0,$s0,$i1
-	eor	$s1,$s1,$t1
-	eor	$s2,$s2,$t2
-	eor	$s3,$s3,$t3
-
-	sub	$tbl,$tbl,#1024
-	ldr	pc,[sp],#4		@ pop and return
-.size	_armv4_AES_decrypt,.-_armv4_AES_decrypt
-.asciz	"AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align	2
-___
-
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
-print $code;
-close STDOUT;	# enforce flush

+ 0 - 1123
drivers/builtin_openssl2/crypto/aes/asm/aes-ia64.S

@@ -1,1123 +0,0 @@
-// ====================================================================
-// Written by Andy Polyakov <[email protected]> for the OpenSSL
-// project. Rights for redistribution and usage in source and binary
-// forms are granted according to the OpenSSL license.
-// ====================================================================
-//
-// What's wrong with compiler generated code? Compiler never uses
-// variable 'shr' which is pairable with 'extr'/'dep' instructions.
-// Then it uses 'zxt' which is an I-type, but can be replaced with
-// 'and' which in turn can be assigned to M-port [there're double as
-// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
-// registers for small constants (255, 24 and 16) to be used with
-// 'shr' and 'and' instructions I can achieve better ILP, Intruction
-// Level Parallelism, and performance. This code outperforms GCC 3.3
-// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
-// HP C - by 40%. Measured best-case scenario, i.e. aligned
-// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
-// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
-
-// Version 1.2 mitigates the hazard of cache-timing attacks by
-// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
-// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
-// prior last round. As result performance dropped to (26 + 15*rounds)
-// ticks per block or 11 cycles per byte processed with 128-bit key.
-// This is ~16% deterioration. For reference Itanium 2 L1 cache has
-// 64 bytes line size and L2 - 128 bytes...
-
-.ident	"aes-ia64.S, version 1.2"
-.ident	"IA-64 ISA artwork by Andy Polyakov <[email protected]>"
-.explicit
-.text
-
-rk0=r8;     rk1=r9;
-
-pfssave=r2;
-lcsave=r10;
-prsave=r3;
-maskff=r11;
-twenty4=r14;
-sixteen=r15;
-
-te00=r16;   te11=r17;   te22=r18;   te33=r19;
-te01=r20;   te12=r21;   te23=r22;   te30=r23;
-te02=r24;   te13=r25;   te20=r26;   te31=r27;
-te03=r28;   te10=r29;   te21=r30;   te32=r31;
-
-// these are rotating...
-t0=r32;     s0=r33;
-t1=r34;     s1=r35;
-t2=r36;     s2=r37;
-t3=r38;     s3=r39;
-
-te0=r40;    te1=r41;    te2=r42;    te3=r43;
-
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-# define ADDP	addp4
-#else
-# define ADDP	add
-#endif
-
-// Offsets from Te0
-#define TE0	0
-#define TE2	2
-#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
-#define TE1	3
-#define TE3	1
-#else
-#define TE1	1
-#define TE3	3
-#endif
-
-// This implies that AES_KEY comprises 32-bit key schedule elements
-// even on LP64 platforms.
-#ifndef	KSZ
-# define KSZ	4
-# define LDKEY	ld4
-#endif
-
-.proc	_ia64_AES_encrypt#
-// Input:	rk0-rk1
-//		te0
-//		te3	as AES_KEY->rounds!!!
-//		s0-s3
-//		maskff,twenty4,sixteen
-// Output:	r16,r20,r24,r28 as s0-s3
-// Clobber:	r16-r31,rk0-rk1,r32-r43
-.align	32
-_ia64_AES_encrypt:
-	.prologue
-	.altrp	b6
-	.body
-{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
-	LDKEY	t0=[rk0],2*KSZ
-	mov	pr.rot=1<<16	}
-{ .mmi;	LDKEY	t1=[rk1],2*KSZ
-	add	te1=TE1,te0
-	add	te3=-3,te3	};;
-{ .mib;	LDKEY	t2=[rk0],2*KSZ
-	mov	ar.ec=2		}
-{ .mib;	LDKEY	t3=[rk1],2*KSZ
-	add	te2=TE2,te0
-	brp.loop.imp	.Le_top,.Le_end-16	};;
-
-{ .mmi;	xor	s0=s0,t0
-	xor	s1=s1,t1
-	mov	ar.lc=te3	}
-{ .mmi;	xor	s2=s2,t2
-	xor	s3=s3,t3
-	add	te3=TE3,te0	};;
-
-.align	32
-.Le_top:
-{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
-	(p0)	and	te33=s3,maskff		// 0/0:s3&0xff
-	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
-{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
-	(p0)	and	te30=s0,maskff		// 0/1:s0&0xff
-	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
-{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
-	(p0)	shladd	te33=te33,3,te3		// 1/0:te0+s0>>24
-	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
-{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
-	(p0)	shladd	te30=te30,3,te3		// 1/1:te3+s0
-	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
-{ .mmi;	(p0)	ld4	te33=[te33]		// 2/0:te3[s3&0xff]
-	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
-	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
-{ .mmi;	(p0)	ld4	te30=[te30]		// 2/1:te3[s0]
-	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
-	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
-{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
-	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
-	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
-{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
-	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
-	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
-{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
-	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
-	(p0)	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
-{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
-	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
-	(p0)	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
-{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
-	(p0)	shladd	te11=te11,3,te1		// 5/0:te1+s1>>16
-	(p0)	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
-{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
-	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
-	(p0)	and	te31=s1,maskff	};;	// 5/2:s1&0xff
-{ .mmi;	(p0)	ld4	te11=[te11]		// 6/0:te1[s1>>16]
-	(p0)	shladd	te12=te12,3,te1		// 6/1:te1+s2>>16
-	(p0)	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
-{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
-	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
-	(p0)	and	te32=s2,maskff	};;	// 6/3:s2&0xff
-
-{ .mmi;	(p0)	ld4	te12=[te12]		// 7/1:te1[s2>>16]
-	(p0)	shladd	te31=te31,3,te3		// 7/2:te3+s1&0xff
-	(p0)	and	te13=te13,maskff}	// 7/2:s3>>16&0xff
-{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
-	(p0)	shladd	te32=te32,3,te3		// 7/3:te3+s2
-	(p0)	xor	t0=t0,te33	};;	// 7/0:
-{ .mmi;	(p0)	ld4	te31=[te31]		// 8/2:te3[s1]
-	(p0)	shladd	te13=te13,3,te1		// 8/2:te1+s3>>16
-	(p0)	xor	t0=t0,te22	}	// 8/0:
-{ .mmi;	(p0)	ld4	te32=[te32]		// 8/3:te3[s2]
-	(p0)	shladd	te10=te10,3,te1		// 8/3:te1+s0>>16
-	(p0)	xor	t1=t1,te30	};;	// 8/1:
-{ .mmi;	(p0)	ld4	te13=[te13]		// 9/2:te1[s3>>16]
-	(p0)	ld4	te10=[te10]		// 9/3:te1[s0>>16]
-	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
-{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
-	(p0)	xor	t2=t2,te20		// 10[9]/2:
-	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
-{ .mmi;	(p0)	xor	t0=t0,te11		// 11[10]/0:done!
-	(p0)	xor	t1=t1,te01		// 11[10]/1:
-	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
-{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
-	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
-{ .mmi;	(p0)	xor	t1=t1,te12		// 13[11]/1:done!
-	(p0)	xor	t2=t2,te31		// 13[11]/2:
-	(p0)	xor	t3=t3,te32	}	// 13[11]/3:
-{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
-	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
-{ .mib;	(p0)	xor	t2=t2,te13		// 14[12]/2:done!
-	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
-{ .mib;	(p0)	xor	t3=t3,te10		// 14[12]/3:done!
-	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
-	br.ctop.sptk	.Le_top		};;
-.Le_end:
-
-
-{ .mmi;	ld8	te12=[te0]		// prefetch Te4
-	ld8	te31=[te1]	}
-{ .mmi;	ld8	te10=[te2]
-	ld8	te32=[te3]	}
-
-{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
-	and	te33=s3,maskff		// 0/0:s3&0xff
-	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
-{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
-	and	te30=s0,maskff		// 0/1:s0&0xff
-	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
-{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
-	add	te33=te33,te0		// 1/0:te0+s0>>24
-	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
-{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
-	add	te30=te30,te0		// 1/1:te0+s0
-	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
-{ .mmi;	ld1	te33=[te33]		// 2/0:te0[s3&0xff]
-	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
-	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
-{ .mmi;	ld1	te30=[te30]		// 2/1:te0[s0]
-	add	te23=te23,te0		// 2/1:te0+s3>>8
-	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
-{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
-	add	te20=te20,te0		// 3/2:te0+s0>>8
-	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
-{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
-	add	te00=te00,te0		// 3/0:te0+s0>>24
-	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
-{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
-	add	te21=te21,te0		// 4/3:te0+s2
-	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
-{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
-	add	te01=te01,te0		// 4/1:te0+s1>>24
-	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
-{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
-	add	te11=te11,te0		// 5/0:te0+s1>>16
-	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
-{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
-	add	te02=te02,te0		// 5/2:te0+s2>>24
-	and	te31=s1,maskff	};;	// 5/2:s1&0xff
-{ .mmi;	ld1	te11=[te11]		// 6/0:te0[s1>>16]
-	add	te12=te12,te0		// 6/1:te0+s2>>16
-	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
-{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
-	add	te03=te03,te0		// 6/3:te0+s0>>16
-	and	te32=s2,maskff	};;	// 6/3:s2&0xff
-
-{ .mmi;	ld1	te12=[te12]		// 7/1:te0[s2>>16]
-	add	te31=te31,te0		// 7/2:te0+s1&0xff
-	dep	te33=te22,te33,8,8}	// 7/0:
-{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
-	add	te32=te32,te0		// 7/3:te0+s2
-	and	te13=te13,maskff};;	// 7/2:s3>>16&0xff
-{ .mmi;	ld1	te31=[te31]		// 8/2:te0[s1]
-	add	te13=te13,te0		// 8/2:te0+s3>>16
-	dep	te30=te23,te30,8,8}	// 8/1:
-{ .mmi;	ld1	te32=[te32]		// 8/3:te0[s2]
-	add	te10=te10,te0		// 8/3:te0+s0>>16
-	shl	te00=te00,twenty4};;	// 8/0:
-{ .mii;	ld1	te13=[te13]		// 9/2:te0[s3>>16]
-	dep	te33=te11,te33,16,8	// 9/0:
-	shl	te01=te01,twenty4};;	// 9/1:
-{ .mii;	ld1	te10=[te10]		// 10/3:te0[s0>>16]
-	dep	te31=te20,te31,8,8	// 10/2:
-	shl	te02=te02,twenty4};;	// 10/2:
-{ .mii;	xor	t0=t0,te33		// 11/0:
-	dep	te32=te21,te32,8,8	// 11/3:
-	shl	te12=te12,sixteen};;	// 11/1:
-{ .mii;	xor	r16=t0,te00		// 12/0:done!
-	dep	te31=te13,te31,16,8	// 12/2:
-	shl	te03=te03,twenty4};;	// 12/3:
-{ .mmi;	xor	t1=t1,te01		// 13/1:
-	xor	t2=t2,te02		// 13/2:
-	dep	te32=te10,te32,16,8};;	// 13/3:
-{ .mmi;	xor	t1=t1,te30		// 14/1:
-	xor	r24=t2,te31		// 14/2:done!
-	xor	t3=t3,te32	};;	// 14/3:
-{ .mib;	xor	r20=t1,te12		// 15/1:done!
-	xor	r28=t3,te03		// 15/3:done!
-	br.ret.sptk	b6	};;
-.endp	_ia64_AES_encrypt#
-
-// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
-.global	AES_encrypt#
-.proc	AES_encrypt#
-.align	32
-AES_encrypt:
-	.prologue
-	.save	ar.pfs,pfssave
-{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
-	and	out0=3,in0
-	mov	r3=ip			}
-{ .mmi;	ADDP	in0=0,in0
-	mov	loc0=psr.um
-	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
-
-{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
-	add	out8=(AES_Te#-AES_encrypt#),r3	// Te0
-	.save	pr,prsave
-	mov	prsave=pr		}
-{ .mmi;	rum	1<<3				// clear um.ac
-	.save	ar.lc,lcsave
-	mov	lcsave=ar.lc		};;
-
-	.body
-#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
-{ .mib; cmp.ne	p6,p0=out0,r0
-	add	out0=4,in0
-(p6)	br.dpnt.many	.Le_i_unaligned	};;
-
-{ .mmi;	ld4	out1=[in0],8		// s0
-	and	out9=3,in1
-	mov	twenty4=24		}
-{ .mmi;	ld4	out3=[out0],8		// s1
-	ADDP	rk0=0,in2
-	mov	sixteen=16		};;
-{ .mmi;	ld4	out5=[in0]		// s2
-	cmp.ne	p6,p0=out9,r0
-	mov	maskff=0xff		}
-{ .mmb;	ld4	out7=[out0]		// s3
-	ADDP	rk1=KSZ,in2
-	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
-
-{ .mib;	ADDP	in0=4,in1
-	ADDP	in1=0,in1
-(p6)	br.spnt	.Le_o_unaligned		};;
-
-{ .mii;	mov	psr.um=loc0
-	mov	ar.pfs=pfssave
-	mov	ar.lc=lcsave		};;
-{ .mmi;	st4	[in1]=r16,8		// s0
-	st4	[in0]=r20,8		// s1
-	mov	pr=prsave,0x1ffff	};;
-{ .mmb;	st4	[in1]=r24		// s2
-	st4	[in0]=r28		// s3
-	br.ret.sptk.many	b0	};;
-#endif
-
-.align	32
-.Le_i_unaligned:
-{ .mmi;	add	out0=1,in0
-	add	out2=2,in0
-	add	out4=3,in0	};;
-{ .mmi;	ld1	r16=[in0],4
-	ld1	r17=[out0],4	}//;;
-{ .mmi;	ld1	r18=[out2],4
-	ld1	out1=[out4],4	};;	// s0
-{ .mmi;	ld1	r20=[in0],4
-	ld1	r21=[out0],4	}//;;
-{ .mmi;	ld1	r22=[out2],4
-	ld1	out3=[out4],4	};;	// s1
-{ .mmi;	ld1	r24=[in0],4
-	ld1	r25=[out0],4	}//;;
-{ .mmi;	ld1	r26=[out2],4
-	ld1	out5=[out4],4	};;	// s2
-{ .mmi;	ld1	r28=[in0]
-	ld1	r29=[out0]	}//;;
-{ .mmi;	ld1	r30=[out2]
-	ld1	out7=[out4]	};;	// s3
-
-{ .mii;
-	dep	out1=r16,out1,24,8	//;;
-	dep	out3=r20,out3,24,8	}//;;
-{ .mii;	ADDP	rk0=0,in2
-	dep	out5=r24,out5,24,8	//;;
-	dep	out7=r28,out7,24,8	};;
-{ .mii;	ADDP	rk1=KSZ,in2
-	dep	out1=r17,out1,16,8	//;;
-	dep	out3=r21,out3,16,8	}//;;
-{ .mii;	mov	twenty4=24
-	dep	out5=r25,out5,16,8	//;;
-	dep	out7=r29,out7,16,8	};;
-{ .mii;	mov	sixteen=16
-	dep	out1=r18,out1,8,8	//;;
-	dep	out3=r22,out3,8,8	}//;;
-{ .mii;	mov	maskff=0xff
-	dep	out5=r26,out5,8,8	//;;
-	dep	out7=r30,out7,8,8	};;
-
-{ .mib;	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
-
-.Le_o_unaligned:
-{ .mii;	ADDP	out0=0,in1
-	extr.u	r17=r16,8,8			// s0
-	shr.u	r19=r16,twenty4		}//;;
-{ .mii;	ADDP	out1=1,in1
-	extr.u	r18=r16,16,8
-	shr.u	r23=r20,twenty4		}//;;	// s1
-{ .mii;	ADDP	out2=2,in1
-	extr.u	r21=r20,8,8
-	shr.u	r22=r20,sixteen		}//;;
-{ .mii;	ADDP	out3=3,in1
-	extr.u	r25=r24,8,8			// s2
-	shr.u	r27=r24,twenty4		};;
-{ .mii;	st1	[out3]=r16,4
-	extr.u	r26=r24,16,8
-	shr.u	r31=r28,twenty4		}//;;	// s3
-{ .mii;	st1	[out2]=r17,4
-	extr.u	r29=r28,8,8
-	shr.u	r30=r28,sixteen		}//;;
-
-{ .mmi;	st1	[out1]=r18,4
-	st1	[out0]=r19,4		};;
-{ .mmi;	st1	[out3]=r20,4
-	st1	[out2]=r21,4		}//;;
-{ .mmi;	st1	[out1]=r22,4
-	st1	[out0]=r23,4		};;
-{ .mmi;	st1	[out3]=r24,4
-	st1	[out2]=r25,4
-	mov	pr=prsave,0x1ffff	}//;;
-{ .mmi;	st1	[out1]=r26,4
-	st1	[out0]=r27,4
-	mov	ar.pfs=pfssave		};;
-{ .mmi;	st1	[out3]=r28
-	st1	[out2]=r29
-	mov	ar.lc=lcsave		}//;;
-{ .mmi;	st1	[out1]=r30
-	st1	[out0]=r31		}
-{ .mfb;	mov	psr.um=loc0			// restore user mask
-	br.ret.sptk.many	b0	};;
-.endp	AES_encrypt#
-
-// *AES_decrypt are autogenerated by the following script:
-#if 0
-#!/usr/bin/env perl
-print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
-open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
-print "#endif\n";
-while(<>) {
-	$process=1	if (/\.proc\s+_ia64_AES_encrypt/);
-	next		if (!$process);
-
-	#s/te00=s0/td00=s0/;	s/te00/td00/g;
-	s/te11=s1/td13=s3/;	s/te11/td13/g;
-	#s/te22=s2/td22=s2/;	s/te22/td22/g;
-	s/te33=s3/td31=s1/;	s/te33/td31/g;
-
-	#s/te01=s1/td01=s1/;	s/te01/td01/g;
-	s/te12=s2/td10=s0/;	s/te12/td10/g;
-	#s/te23=s3/td23=s3/;	s/te23/td23/g;
-	s/te30=s0/td32=s2/;	s/te30/td32/g;
-
-	#s/te02=s2/td02=s2/;	s/te02/td02/g;
-	s/te13=s3/td11=s1/;	s/te13/td11/g;
-	#s/te20=s0/td20=s0/;	s/te20/td20/g;
-	s/te31=s1/td33=s3/;	s/te31/td33/g;
-
-	#s/te03=s3/td03=s3/;	s/te03/td03/g;
-	s/te10=s0/td12=s2/;	s/te10/td12/g;
-	#s/te21=s1/td21=s1/;	s/te21/td21/g;
-	s/te32=s2/td30=s0/;	s/te32/td30/g;
-
-	s/td/te/g;
-
-	s/AES_encrypt/AES_decrypt/g;
-	s/\.Le_/.Ld_/g;
-	s/AES_Te#/AES_Td#/g;
-
-	print;
-
-	exit		if (/\.endp\s+AES_decrypt/);
-}
-#endif
-.proc	_ia64_AES_decrypt#
-// Input:	rk0-rk1
-//		te0
-//		te3	as AES_KEY->rounds!!!
-//		s0-s3
-//		maskff,twenty4,sixteen
-// Output:	r16,r20,r24,r28 as s0-s3
-// Clobber:	r16-r31,rk0-rk1,r32-r43
-.align	32
-_ia64_AES_decrypt:
-	.prologue
-	.altrp	b6
-	.body
-{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
-	LDKEY	t0=[rk0],2*KSZ
-	mov	pr.rot=1<<16	}
-{ .mmi;	LDKEY	t1=[rk1],2*KSZ
-	add	te1=TE1,te0
-	add	te3=-3,te3	};;
-{ .mib;	LDKEY	t2=[rk0],2*KSZ
-	mov	ar.ec=2		}
-{ .mib;	LDKEY	t3=[rk1],2*KSZ
-	add	te2=TE2,te0
-	brp.loop.imp	.Ld_top,.Ld_end-16	};;
-
-{ .mmi;	xor	s0=s0,t0
-	xor	s1=s1,t1
-	mov	ar.lc=te3	}
-{ .mmi;	xor	s2=s2,t2
-	xor	s3=s3,t3
-	add	te3=TE3,te0	};;
-
-.align	32
-.Ld_top:
-{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
-	(p0)	and	te31=s1,maskff		// 0/0:s3&0xff
-	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
-{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
-	(p0)	and	te32=s2,maskff		// 0/1:s0&0xff
-	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
-{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
-	(p0)	shladd	te31=te31,3,te3		// 1/0:te0+s0>>24
-	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
-{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
-	(p0)	shladd	te32=te32,3,te3		// 1/1:te3+s0
-	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
-{ .mmi;	(p0)	ld4	te31=[te31]		// 2/0:te3[s3&0xff]
-	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
-	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
-{ .mmi;	(p0)	ld4	te32=[te32]		// 2/1:te3[s0]
-	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
-	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
-{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
-	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
-	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
-{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
-	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
-	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
-{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
-	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
-	(p0)	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
-{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
-	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
-	(p0)	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
-{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
-	(p0)	shladd	te13=te13,3,te1		// 5/0:te1+s1>>16
-	(p0)	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
-{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
-	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
-	(p0)	and	te33=s3,maskff	};;	// 5/2:s1&0xff
-{ .mmi;	(p0)	ld4	te13=[te13]		// 6/0:te1[s1>>16]
-	(p0)	shladd	te10=te10,3,te1		// 6/1:te1+s2>>16
-	(p0)	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
-{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
-	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
-	(p0)	and	te30=s0,maskff	};;	// 6/3:s2&0xff
-
-{ .mmi;	(p0)	ld4	te10=[te10]		// 7/1:te1[s2>>16]
-	(p0)	shladd	te33=te33,3,te3		// 7/2:te3+s1&0xff
-	(p0)	and	te11=te11,maskff}	// 7/2:s3>>16&0xff
-{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
-	(p0)	shladd	te30=te30,3,te3		// 7/3:te3+s2
-	(p0)	xor	t0=t0,te31	};;	// 7/0:
-{ .mmi;	(p0)	ld4	te33=[te33]		// 8/2:te3[s1]
-	(p0)	shladd	te11=te11,3,te1		// 8/2:te1+s3>>16
-	(p0)	xor	t0=t0,te22	}	// 8/0:
-{ .mmi;	(p0)	ld4	te30=[te30]		// 8/3:te3[s2]
-	(p0)	shladd	te12=te12,3,te1		// 8/3:te1+s0>>16
-	(p0)	xor	t1=t1,te32	};;	// 8/1:
-{ .mmi;	(p0)	ld4	te11=[te11]		// 9/2:te1[s3>>16]
-	(p0)	ld4	te12=[te12]		// 9/3:te1[s0>>16]
-	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
-{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
-	(p0)	xor	t2=t2,te20		// 10[9]/2:
-	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
-{ .mmi;	(p0)	xor	t0=t0,te13		// 11[10]/0:done!
-	(p0)	xor	t1=t1,te01		// 11[10]/1:
-	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
-{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
-	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
-{ .mmi;	(p0)	xor	t1=t1,te10		// 13[11]/1:done!
-	(p0)	xor	t2=t2,te33		// 13[11]/2:
-	(p0)	xor	t3=t3,te30	}	// 13[11]/3:
-{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
-	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
-{ .mib;	(p0)	xor	t2=t2,te11		// 14[12]/2:done!
-	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
-{ .mib;	(p0)	xor	t3=t3,te12		// 14[12]/3:done!
-	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
-	br.ctop.sptk	.Ld_top		};;
-.Ld_end:
-
-
-{ .mmi;	ld8	te10=[te0]		// prefetch Td4
-	ld8	te33=[te1]	}
-{ .mmi;	ld8	te12=[te2]
-	ld8	te30=[te3]	}
-
-{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
-	and	te31=s1,maskff		// 0/0:s3&0xff
-	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
-{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
-	and	te32=s2,maskff		// 0/1:s0&0xff
-	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
-{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
-	add	te31=te31,te0		// 1/0:te0+s0>>24
-	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
-{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
-	add	te32=te32,te0		// 1/1:te0+s0
-	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
-{ .mmi;	ld1	te31=[te31]		// 2/0:te0[s3&0xff]
-	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
-	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
-{ .mmi;	ld1	te32=[te32]		// 2/1:te0[s0]
-	add	te23=te23,te0		// 2/1:te0+s3>>8
-	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
-{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
-	add	te20=te20,te0		// 3/2:te0+s0>>8
-	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
-{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
-	add	te00=te00,te0		// 3/0:te0+s0>>24
-	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
-{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
-	add	te21=te21,te0		// 4/3:te0+s2
-	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
-{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
-	add	te01=te01,te0		// 4/1:te0+s1>>24
-	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
-{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
-	add	te13=te13,te0		// 5/0:te0+s1>>16
-	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
-{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
-	add	te02=te02,te0		// 5/2:te0+s2>>24
-	and	te33=s3,maskff	};;	// 5/2:s1&0xff
-{ .mmi;	ld1	te13=[te13]		// 6/0:te0[s1>>16]
-	add	te10=te10,te0		// 6/1:te0+s2>>16
-	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
-{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
-	add	te03=te03,te0		// 6/3:te0+s0>>16
-	and	te30=s0,maskff	};;	// 6/3:s2&0xff
-
-{ .mmi;	ld1	te10=[te10]		// 7/1:te0[s2>>16]
-	add	te33=te33,te0		// 7/2:te0+s1&0xff
-	dep	te31=te22,te31,8,8}	// 7/0:
-{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
-	add	te30=te30,te0		// 7/3:te0+s2
-	and	te11=te11,maskff};;	// 7/2:s3>>16&0xff
-{ .mmi;	ld1	te33=[te33]		// 8/2:te0[s1]
-	add	te11=te11,te0		// 8/2:te0+s3>>16
-	dep	te32=te23,te32,8,8}	// 8/1:
-{ .mmi;	ld1	te30=[te30]		// 8/3:te0[s2]
-	add	te12=te12,te0		// 8/3:te0+s0>>16
-	shl	te00=te00,twenty4};;	// 8/0:
-{ .mii;	ld1	te11=[te11]		// 9/2:te0[s3>>16]
-	dep	te31=te13,te31,16,8	// 9/0:
-	shl	te01=te01,twenty4};;	// 9/1:
-{ .mii;	ld1	te12=[te12]		// 10/3:te0[s0>>16]
-	dep	te33=te20,te33,8,8	// 10/2:
-	shl	te02=te02,twenty4};;	// 10/2:
-{ .mii;	xor	t0=t0,te31		// 11/0:
-	dep	te30=te21,te30,8,8	// 11/3:
-	shl	te10=te10,sixteen};;	// 11/1:
-{ .mii;	xor	r16=t0,te00		// 12/0:done!
-	dep	te33=te11,te33,16,8	// 12/2:
-	shl	te03=te03,twenty4};;	// 12/3:
-{ .mmi;	xor	t1=t1,te01		// 13/1:
-	xor	t2=t2,te02		// 13/2:
-	dep	te30=te12,te30,16,8};;	// 13/3:
-{ .mmi;	xor	t1=t1,te32		// 14/1:
-	xor	r24=t2,te33		// 14/2:done!
-	xor	t3=t3,te30	};;	// 14/3:
-{ .mib;	xor	r20=t1,te10		// 15/1:done!
-	xor	r28=t3,te03		// 15/3:done!
-	br.ret.sptk	b6	};;
-.endp	_ia64_AES_decrypt#
-
-// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
-.global	AES_decrypt#
-.proc	AES_decrypt#
-.align	32
-AES_decrypt:
-	.prologue
-	.save	ar.pfs,pfssave
-{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
-	and	out0=3,in0
-	mov	r3=ip			}
-{ .mmi;	ADDP	in0=0,in0
-	mov	loc0=psr.um
-	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
-
-{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
-	add	out8=(AES_Td#-AES_decrypt#),r3	// Te0
-	.save	pr,prsave
-	mov	prsave=pr		}
-{ .mmi;	rum	1<<3				// clear um.ac
-	.save	ar.lc,lcsave
-	mov	lcsave=ar.lc		};;
-
-	.body
-#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
-{ .mib; cmp.ne	p6,p0=out0,r0
-	add	out0=4,in0
-(p6)	br.dpnt.many	.Ld_i_unaligned	};;
-
-{ .mmi;	ld4	out1=[in0],8		// s0
-	and	out9=3,in1
-	mov	twenty4=24		}
-{ .mmi;	ld4	out3=[out0],8		// s1
-	ADDP	rk0=0,in2
-	mov	sixteen=16		};;
-{ .mmi;	ld4	out5=[in0]		// s2
-	cmp.ne	p6,p0=out9,r0
-	mov	maskff=0xff		}
-{ .mmb;	ld4	out7=[out0]		// s3
-	ADDP	rk1=KSZ,in2
-	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
-
-{ .mib;	ADDP	in0=4,in1
-	ADDP	in1=0,in1
-(p6)	br.spnt	.Ld_o_unaligned		};;
-
-{ .mii;	mov	psr.um=loc0
-	mov	ar.pfs=pfssave
-	mov	ar.lc=lcsave		};;
-{ .mmi;	st4	[in1]=r16,8		// s0
-	st4	[in0]=r20,8		// s1
-	mov	pr=prsave,0x1ffff	};;
-{ .mmb;	st4	[in1]=r24		// s2
-	st4	[in0]=r28		// s3
-	br.ret.sptk.many	b0	};;
-#endif
-
-.align	32
-.Ld_i_unaligned:
-{ .mmi;	add	out0=1,in0
-	add	out2=2,in0
-	add	out4=3,in0	};;
-{ .mmi;	ld1	r16=[in0],4
-	ld1	r17=[out0],4	}//;;
-{ .mmi;	ld1	r18=[out2],4
-	ld1	out1=[out4],4	};;	// s0
-{ .mmi;	ld1	r20=[in0],4
-	ld1	r21=[out0],4	}//;;
-{ .mmi;	ld1	r22=[out2],4
-	ld1	out3=[out4],4	};;	// s1
-{ .mmi;	ld1	r24=[in0],4
-	ld1	r25=[out0],4	}//;;
-{ .mmi;	ld1	r26=[out2],4
-	ld1	out5=[out4],4	};;	// s2
-{ .mmi;	ld1	r28=[in0]
-	ld1	r29=[out0]	}//;;
-{ .mmi;	ld1	r30=[out2]
-	ld1	out7=[out4]	};;	// s3
-
-{ .mii;
-	dep	out1=r16,out1,24,8	//;;
-	dep	out3=r20,out3,24,8	}//;;
-{ .mii;	ADDP	rk0=0,in2
-	dep	out5=r24,out5,24,8	//;;
-	dep	out7=r28,out7,24,8	};;
-{ .mii;	ADDP	rk1=KSZ,in2
-	dep	out1=r17,out1,16,8	//;;
-	dep	out3=r21,out3,16,8	}//;;
-{ .mii;	mov	twenty4=24
-	dep	out5=r25,out5,16,8	//;;
-	dep	out7=r29,out7,16,8	};;
-{ .mii;	mov	sixteen=16
-	dep	out1=r18,out1,8,8	//;;
-	dep	out3=r22,out3,8,8	}//;;
-{ .mii;	mov	maskff=0xff
-	dep	out5=r26,out5,8,8	//;;
-	dep	out7=r30,out7,8,8	};;
-
-{ .mib;	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
-
-.Ld_o_unaligned:
-{ .mii;	ADDP	out0=0,in1
-	extr.u	r17=r16,8,8			// s0
-	shr.u	r19=r16,twenty4		}//;;
-{ .mii;	ADDP	out1=1,in1
-	extr.u	r18=r16,16,8
-	shr.u	r23=r20,twenty4		}//;;	// s1
-{ .mii;	ADDP	out2=2,in1
-	extr.u	r21=r20,8,8
-	shr.u	r22=r20,sixteen		}//;;
-{ .mii;	ADDP	out3=3,in1
-	extr.u	r25=r24,8,8			// s2
-	shr.u	r27=r24,twenty4		};;
-{ .mii;	st1	[out3]=r16,4
-	extr.u	r26=r24,16,8
-	shr.u	r31=r28,twenty4		}//;;	// s3
-{ .mii;	st1	[out2]=r17,4
-	extr.u	r29=r28,8,8
-	shr.u	r30=r28,sixteen		}//;;
-
-{ .mmi;	st1	[out1]=r18,4
-	st1	[out0]=r19,4		};;
-{ .mmi;	st1	[out3]=r20,4
-	st1	[out2]=r21,4		}//;;
-{ .mmi;	st1	[out1]=r22,4
-	st1	[out0]=r23,4		};;
-{ .mmi;	st1	[out3]=r24,4
-	st1	[out2]=r25,4
-	mov	pr=prsave,0x1ffff	}//;;
-{ .mmi;	st1	[out1]=r26,4
-	st1	[out0]=r27,4
-	mov	ar.pfs=pfssave		};;
-{ .mmi;	st1	[out3]=r28
-	st1	[out2]=r29
-	mov	ar.lc=lcsave		}//;;
-{ .mmi;	st1	[out1]=r30
-	st1	[out0]=r31		}
-{ .mfb;	mov	psr.um=loc0			// restore user mask
-	br.ret.sptk.many	b0	};;
-.endp	AES_decrypt#
-
-// leave it in .text segment...
-.align	64
-.global	AES_Te#
-.type	AES_Te#,@object
-AES_Te:	data4	0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
-	data4	0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
-	data4	0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
-	data4	0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
-	data4	0x60303050,0x60303050, 0x02010103,0x02010103
-	data4	0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
-	data4	0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
-	data4	0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
-	data4	0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
-	data4	0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
-	data4	0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
-	data4	0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
-	data4	0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
-	data4	0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
-	data4	0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
-	data4	0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
-	data4	0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
-	data4	0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
-	data4	0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
-	data4	0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
-	data4	0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
-	data4	0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
-	data4	0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
-	data4	0x62313153,0x62313153, 0x2a15153f,0x2a15153f
-	data4	0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
-	data4	0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
-	data4	0x30181828,0x30181828, 0x379696a1,0x379696a1
-	data4	0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
-	data4	0x0e070709,0x0e070709, 0x24121236,0x24121236
-	data4	0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
-	data4	0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
-	data4	0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
-	data4	0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
-	data4	0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
-	data4	0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
-	data4	0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
-	data4	0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
-	data4	0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
-	data4	0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
-	data4	0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
-	data4	0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
-	data4	0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
-	data4	0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
-	data4	0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
-	data4	0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
-	data4	0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
-	data4	0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
-	data4	0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
-	data4	0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
-	data4	0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
-	data4	0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
-	data4	0x66333355,0x66333355, 0x11858594,0x11858594
-	data4	0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
-	data4	0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
-	data4	0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
-	data4	0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
-	data4	0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
-	data4	0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
-	data4	0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
-	data4	0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
-	data4	0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
-	data4	0xafdada75,0xafdada75, 0x42212163,0x42212163
-	data4	0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
-	data4	0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
-	data4	0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
-	data4	0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
-	data4	0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
-	data4	0x884444cc,0x884444cc, 0x2e171739,0x2e171739
-	data4	0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
-	data4	0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
-	data4	0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
-	data4	0x3219192b,0x3219192b, 0xe6737395,0xe6737395
-	data4	0xc06060a0,0xc06060a0, 0x19818198,0x19818198
-	data4	0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
-	data4	0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
-	data4	0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
-	data4	0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
-	data4	0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
-	data4	0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
-	data4	0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
-	data4	0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
-	data4	0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
-	data4	0x924949db,0x924949db, 0x0c06060a,0x0c06060a
-	data4	0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
-	data4	0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
-	data4	0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
-	data4	0x399191a8,0x399191a8, 0x319595a4,0x319595a4
-	data4	0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
-	data4	0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
-	data4	0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
-	data4	0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
-	data4	0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
-	data4	0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
-	data4	0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
-	data4	0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
-	data4	0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
-	data4	0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
-	data4	0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
-	data4	0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
-	data4	0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
-	data4	0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
-	data4	0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
-	data4	0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
-	data4	0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
-	data4	0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
-	data4	0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
-	data4	0x904848d8,0x904848d8, 0x06030305,0x06030305
-	data4	0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
-	data4	0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
-	data4	0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
-	data4	0x17868691,0x17868691, 0x99c1c158,0x99c1c158
-	data4	0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
-	data4	0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
-	data4	0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
-	data4	0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
-	data4	0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
-	data4	0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
-	data4	0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
-	data4	0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
-	data4	0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
-	data4	0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
-	data4	0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
-	data4	0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
-	data4	0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
-	data4	0x824141c3,0x824141c3, 0x299999b0,0x299999b0
-	data4	0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
-	data4	0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
-	data4	0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
-// Te4:
-	data1	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-	data1	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-	data1	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-	data1	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-	data1	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-	data1	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-	data1	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-	data1	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-	data1	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-	data1	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-	data1	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-	data1	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-	data1	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-	data1	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-	data1	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-	data1	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-	data1	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-	data1	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-	data1	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-	data1	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-	data1	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-	data1	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-	data1	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-	data1	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-	data1	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-	data1	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-	data1	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-	data1	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-	data1	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-	data1	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-	data1	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-	data1	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-.size	AES_Te#,2048+256	// HP-UX assembler fails to ".-AES_Te#"
-
-.align	64
-.global	AES_Td#
-.type	AES_Td#,@object
-AES_Td:	data4	0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
-	data4	0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
-	data4	0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
-	data4	0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
-	data4	0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
-	data4	0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
-	data4	0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
-	data4	0x26354480,0x26354480, 0xb562a38f,0xb562a38f
-	data4	0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
-	data4	0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
-	data4	0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
-	data4	0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
-	data4	0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
-	data4	0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
-	data4	0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
-	data4	0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
-	data4	0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
-	data4	0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
-	data4	0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
-	data4	0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
-	data4	0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
-	data4	0x97513360,0x97513360, 0x62537f45,0x62537f45
-	data4	0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
-	data4	0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
-	data4	0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
-	data4	0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
-	data4	0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
-	data4	0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
-	data4	0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
-	data4	0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
-	data4	0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
-	data4	0x02036aba,0x02036aba, 0xed16825c,0xed16825c
-	data4	0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
-	data4	0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
-	data4	0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
-	data4	0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
-	data4	0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
-	data4	0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
-	data4	0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
-	data4	0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
-	data4	0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
-	data4	0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
-	data4	0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
-	data4	0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
-	data4	0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
-	data4	0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
-	data4	0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
-	data4	0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
-	data4	0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
-	data4	0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
-	data4	0x09808683,0x09808683, 0x322bed48,0x322bed48
-	data4	0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
-	data4	0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
-	data4	0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
-	data4	0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
-	data4	0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
-	data4	0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
-	data4	0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
-	data4	0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
-	data4	0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
-	data4	0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
-	data4	0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
-	data4	0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
-	data4	0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
-	data4	0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
-	data4	0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
-	data4	0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
-	data4	0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
-	data4	0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
-	data4	0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
-	data4	0xd731dcca,0xd731dcca, 0x42638510,0x42638510
-	data4	0x13972240,0x13972240, 0x84c61120,0x84c61120
-	data4	0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
-	data4	0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
-	data4	0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
-	data4	0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
-	data4	0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
-	data4	0x119448fa,0x119448fa, 0x47e96422,0x47e96422
-	data4	0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
-	data4	0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
-	data4	0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
-	data4	0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
-	data4	0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
-	data4	0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
-	data4	0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
-	data4	0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
-	data4	0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
-	data4	0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
-	data4	0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
-	data4	0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
-	data4	0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
-	data4	0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
-	data4	0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
-	data4	0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
-	data4	0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
-	data4	0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
-	data4	0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
-	data4	0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
-	data4	0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
-	data4	0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
-	data4	0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
-	data4	0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
-	data4	0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
-	data4	0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
-	data4	0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
-	data4	0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
-	data4	0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
-	data4	0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
-	data4	0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
-	data4	0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
-	data4	0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
-	data4	0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
-	data4	0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
-	data4	0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
-	data4	0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
-	data4	0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
-	data4	0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
-	data4	0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
-	data4	0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
-	data4	0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
-	data4	0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
-	data4	0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
-	data4	0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
-	data4	0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
-	data4	0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
-	data4	0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
-	data4	0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
-	data4	0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
-// Td4:
-	data1	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-	data1	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-	data1	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-	data1	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-	data1	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-	data1	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-	data1	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-	data1	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-	data1	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-	data1	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-	data1	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-	data1	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-	data1	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-	data1	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-	data1	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-	data1	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-	data1	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-	data1	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-	data1	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-	data1	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-	data1	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-	data1	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-	data1	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-	data1	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-	data1	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-	data1	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-	data1	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-	data1	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-	data1	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-	data1	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-	data1	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-	data1	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size	AES_Td#,2048+256	// HP-UX assembler fails to ".-AES_Td#"

+ 0 - 1611
drivers/builtin_openssl2/crypto/aes/asm/aes-mips.pl

@@ -1,1611 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for MIPS
-
-# October 2010
-#
-# Code uses 1K[+256B] S-box and on single-issue core [such as R5000]
-# spends ~68 cycles per byte processed with 128-bit key. This is ~16%
-# faster than gcc-generated code, which is not very impressive. But
-# recall that compressed S-box requires extra processing, namely
-# additional rotations. Rotations are implemented with lwl/lwr pairs,
-# which is normally used for loading unaligned data. Another cool
-# thing about this module is its endian neutrality, which means that
-# it processes data without ever changing byte order...
-
-######################################################################
-# There is a number of MIPS ABI in use, O32 and N32/64 are most
-# widely used. Then there is a new contender: NUBI. It appears that if
-# one picks the latter, it's possible to arrange code in ABI neutral
-# manner. Therefore let's stick to NUBI register layout:
-#
-($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
-($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
-($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
-#
-# The return value is placed in $a0. Following coding rules facilitate
-# interoperability:
-#
-# - never ever touch $tp, "thread pointer", former $gp;
-# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
-#   old code];
-# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
-#
-# For reference here is register layout for N32/64 MIPS ABIs:
-#
-# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
-# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
-# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
-# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
-#
-$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
-
-if ($flavour =~ /64|n32/i) {
-	$PTR_ADD="dadd";	# incidentally works even on n32
-	$PTR_SUB="dsub";	# incidentally works even on n32
-	$REG_S="sd";
-	$REG_L="ld";
-	$PTR_SLL="dsll";	# incidentally works even on n32
-	$SZREG=8;
-} else {
-	$PTR_ADD="add";
-	$PTR_SUB="sub";
-	$REG_S="sw";
-	$REG_L="lw";
-	$PTR_SLL="sll";
-	$SZREG=4;
-}
-$pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
-#
-# <[email protected]>
-#
-######################################################################
-
-$big_endian=(`echo MIPSEL | $ENV{CC} -E -`=~/MIPSEL/)?1:0 if ($ENV{CC});
-
-for (@ARGV) {	$output=$_ if (/^\w[\w\-]*\.\w+$/);	}
-open STDOUT,">$output";
-
-if (!defined($big_endian))
-{    $big_endian=(unpack('L',pack('N',1))==1);   }
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-my ($MSB,$LSB)=(0,3);	# automatically converted to little-endian
-
-$code.=<<___;
-.text
-#ifdef OPENSSL_FIPSCANISTER
-# include <openssl/fipssyms.h>
-#endif
-
-#if !defined(__vxworks) || defined(__pic__)
-.option	pic2
-#endif
-.set	noat
-___
-
-{{{
-my $FRAMESIZE=16*$SZREG;
-my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000;
-
-my ($inp,$out,$key,$Tbl,$s0,$s1,$s2,$s3)=($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7);
-my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
-my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
-my ($key0,$cnt)=($gp,$fp);
-
-# instuction ordering is "stolen" from output from MIPSpro assembler
-# invoked with -mips3 -O3 arguments...
-$code.=<<___;
-.align	5
-.ent	_mips_AES_encrypt
-_mips_AES_encrypt:
-	.frame	$sp,0,$ra
-	.set	reorder
-	lw	$t0,0($key)
-	lw	$t1,4($key)
-	lw	$t2,8($key)
-	lw	$t3,12($key)
-	lw	$cnt,240($key)
-	$PTR_ADD $key0,$key,16
-
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-
-	sub	$cnt,1
-	_xtr	$i0,$s1,16-2
-.Loop_enc:
-	_xtr	$i1,$s2,16-2
-	_xtr	$i2,$s3,16-2
-	_xtr	$i3,$s0,16-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t0,3($i0)		# Te1[s1>>16]
-	lwl	$t1,3($i1)		# Te1[s2>>16]
-	lwl	$t2,3($i2)		# Te1[s3>>16]
-	lwl	$t3,3($i3)		# Te1[s0>>16]
-	lwr	$t0,2($i0)		# Te1[s1>>16]
-	lwr	$t1,2($i1)		# Te1[s2>>16]
-	lwr	$t2,2($i2)		# Te1[s3>>16]
-	lwr	$t3,2($i3)		# Te1[s0>>16]
-
-	_xtr	$i0,$s2,8-2
-	_xtr	$i1,$s3,8-2
-	_xtr	$i2,$s0,8-2
-	_xtr	$i3,$s1,8-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t4,2($i0)		# Te2[s2>>8]
-	lwl	$t5,2($i1)		# Te2[s3>>8]
-	lwl	$t6,2($i2)		# Te2[s0>>8]
-	lwl	$t7,2($i3)		# Te2[s1>>8]
-	lwr	$t4,1($i0)		# Te2[s2>>8]
-	lwr	$t5,1($i1)		# Te2[s3>>8]
-	lwr	$t6,1($i2)		# Te2[s0>>8]
-	lwr	$t7,1($i3)		# Te2[s1>>8]
-
-	_xtr	$i0,$s3,0-2
-	_xtr	$i1,$s0,0-2
-	_xtr	$i2,$s1,0-2
-	_xtr	$i3,$s2,0-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t8,1($i0)		# Te3[s3]
-	lwl	$t9,1($i1)		# Te3[s0]
-	lwl	$t10,1($i2)		# Te3[s1]
-	lwl	$t11,1($i3)		# Te3[s2]
-	lwr	$t8,0($i0)		# Te3[s3]
-	lwr	$t9,0($i1)		# Te3[s0]
-	lwr	$t10,0($i2)		# Te3[s1]
-	lwr	$t11,0($i3)		# Te3[s2]
-
-	_xtr	$i0,$s0,24-2
-	_xtr	$i1,$s1,24-2
-	_xtr	$i2,$s2,24-2
-	_xtr	$i3,$s3,24-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-	lw	$t4,0($i0)		# Te0[s0>>24]
-	lw	$t5,0($i1)		# Te0[s1>>24]
-	lw	$t6,0($i2)		# Te0[s2>>24]
-	lw	$t7,0($i3)		# Te0[s3>>24]
-
-	lw	$s0,0($key0)
-	lw	$s1,4($key0)
-	lw	$s2,8($key0)
-	lw	$s3,12($key0)
-
-	xor	$t0,$t8
-	xor	$t1,$t9
-	xor	$t2,$t10
-	xor	$t3,$t11
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	sub	$cnt,1
-	$PTR_ADD $key0,16
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-	.set	noreorder
-	bnez	$cnt,.Loop_enc
-	_xtr	$i0,$s1,16-2
-
-	.set	reorder
-	_xtr	$i1,$s2,16-2
-	_xtr	$i2,$s3,16-2
-	_xtr	$i3,$s0,16-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t0,2($i0)		# Te4[s1>>16]
-	lbu	$t1,2($i1)		# Te4[s2>>16]
-	lbu	$t2,2($i2)		# Te4[s3>>16]
-	lbu	$t3,2($i3)		# Te4[s0>>16]
-
-	_xtr	$i0,$s2,8-2
-	_xtr	$i1,$s3,8-2
-	_xtr	$i2,$s0,8-2
-	_xtr	$i3,$s1,8-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t4,2($i0)		# Te4[s2>>8]
-	lbu	$t5,2($i1)		# Te4[s3>>8]
-	lbu	$t6,2($i2)		# Te4[s0>>8]
-	lbu	$t7,2($i3)		# Te4[s1>>8]
-
-	_xtr	$i0,$s0,24-2
-	_xtr	$i1,$s1,24-2
-	_xtr	$i2,$s2,24-2
-	_xtr	$i3,$s3,24-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t8,2($i0)		# Te4[s0>>24]
-	lbu	$t9,2($i1)		# Te4[s1>>24]
-	lbu	$t10,2($i2)		# Te4[s2>>24]
-	lbu	$t11,2($i3)		# Te4[s3>>24]
-
-	_xtr	$i0,$s3,0-2
-	_xtr	$i1,$s0,0-2
-	_xtr	$i2,$s1,0-2
-	_xtr	$i3,$s2,0-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-
-	_ins	$t0,16
-	_ins	$t1,16
-	_ins	$t2,16
-	_ins	$t3,16
-
-	_ins	$t4,8
-	_ins	$t5,8
-	_ins	$t6,8
-	_ins	$t7,8
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t4,2($i0)		# Te4[s3]
-	lbu	$t5,2($i1)		# Te4[s0]
-	lbu	$t6,2($i2)		# Te4[s1]
-	lbu	$t7,2($i3)		# Te4[s2]
-
-	_ins	$t8,24
-	_ins	$t9,24
-	_ins	$t10,24
-	_ins	$t11,24
-
-	lw	$s0,0($key0)
-	lw	$s1,4($key0)
-	lw	$s2,8($key0)
-	lw	$s3,12($key0)
-
-	xor	$t0,$t8
-	xor	$t1,$t9
-	xor	$t2,$t10
-	xor	$t3,$t11
-
-	_ins	$t4,0
-	_ins	$t5,0
-	_ins	$t6,0
-	_ins	$t7,0
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-
-	jr	$ra
-.end	_mips_AES_encrypt
-
-.align	5
-.globl	AES_encrypt
-.ent	AES_encrypt
-AES_encrypt:
-	.frame	$sp,$FRAMESIZE,$ra
-	.mask	$SAVED_REGS_MASK,-$SZREG
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
-	.cpload	$pf
-___
-$code.=<<___;
-	$PTR_SUB $sp,$FRAMESIZE
-	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
-	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
-	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
-	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
-	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
-	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
-	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
-	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
-	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
-	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
-	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
-	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
-	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
-	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
-	.cplocal	$Tbl
-	.cpsetup	$pf,$zero,AES_encrypt
-___
-$code.=<<___;
-	.set	reorder
-	la	$Tbl,AES_Te		# PIC-ified 'load address'
-
-	lwl	$s0,0+$MSB($inp)
-	lwl	$s1,4+$MSB($inp)
-	lwl	$s2,8+$MSB($inp)
-	lwl	$s3,12+$MSB($inp)
-	lwr	$s0,0+$LSB($inp)
-	lwr	$s1,4+$LSB($inp)
-	lwr	$s2,8+$LSB($inp)
-	lwr	$s3,12+$LSB($inp)
-
-	bal	_mips_AES_encrypt
-
-	swr	$s0,0+$LSB($out)
-	swr	$s1,4+$LSB($out)
-	swr	$s2,8+$LSB($out)
-	swr	$s3,12+$LSB($out)
-	swl	$s0,0+$MSB($out)
-	swl	$s1,4+$MSB($out)
-	swl	$s2,8+$MSB($out)
-	swl	$s3,12+$MSB($out)
-
-	.set	noreorder
-	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
-	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
-	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
-	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
-	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
-	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
-	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
-	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
-	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
-	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
-	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
-	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
-	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
-	jr	$ra
-	$PTR_ADD $sp,$FRAMESIZE
-.end	AES_encrypt
-___
-
-$code.=<<___;
-.align	5
-.ent	_mips_AES_decrypt
-_mips_AES_decrypt:
-	.frame	$sp,0,$ra
-	.set	reorder
-	lw	$t0,0($key)
-	lw	$t1,4($key)
-	lw	$t2,8($key)
-	lw	$t3,12($key)
-	lw	$cnt,240($key)
-	$PTR_ADD $key0,$key,16
-
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-
-	sub	$cnt,1
-	_xtr	$i0,$s3,16-2
-.Loop_dec:
-	_xtr	$i1,$s0,16-2
-	_xtr	$i2,$s1,16-2
-	_xtr	$i3,$s2,16-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t0,3($i0)		# Td1[s3>>16]
-	lwl	$t1,3($i1)		# Td1[s0>>16]
-	lwl	$t2,3($i2)		# Td1[s1>>16]
-	lwl	$t3,3($i3)		# Td1[s2>>16]
-	lwr	$t0,2($i0)		# Td1[s3>>16]
-	lwr	$t1,2($i1)		# Td1[s0>>16]
-	lwr	$t2,2($i2)		# Td1[s1>>16]
-	lwr	$t3,2($i3)		# Td1[s2>>16]
-
-	_xtr	$i0,$s2,8-2
-	_xtr	$i1,$s3,8-2
-	_xtr	$i2,$s0,8-2
-	_xtr	$i3,$s1,8-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t4,2($i0)		# Td2[s2>>8]
-	lwl	$t5,2($i1)		# Td2[s3>>8]
-	lwl	$t6,2($i2)		# Td2[s0>>8]
-	lwl	$t7,2($i3)		# Td2[s1>>8]
-	lwr	$t4,1($i0)		# Td2[s2>>8]
-	lwr	$t5,1($i1)		# Td2[s3>>8]
-	lwr	$t6,1($i2)		# Td2[s0>>8]
-	lwr	$t7,1($i3)		# Td2[s1>>8]
-
-	_xtr	$i0,$s1,0-2
-	_xtr	$i1,$s2,0-2
-	_xtr	$i2,$s3,0-2
-	_xtr	$i3,$s0,0-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lwl	$t8,1($i0)		# Td3[s1]
-	lwl	$t9,1($i1)		# Td3[s2]
-	lwl	$t10,1($i2)		# Td3[s3]
-	lwl	$t11,1($i3)		# Td3[s0]
-	lwr	$t8,0($i0)		# Td3[s1]
-	lwr	$t9,0($i1)		# Td3[s2]
-	lwr	$t10,0($i2)		# Td3[s3]
-	lwr	$t11,0($i3)		# Td3[s0]
-
-	_xtr	$i0,$s0,24-2
-	_xtr	$i1,$s1,24-2
-	_xtr	$i2,$s2,24-2
-	_xtr	$i3,$s3,24-2
-	and	$i0,0x3fc
-	and	$i1,0x3fc
-	and	$i2,0x3fc
-	and	$i3,0x3fc
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-
-	lw	$t4,0($i0)		# Td0[s0>>24]
-	lw	$t5,0($i1)		# Td0[s1>>24]
-	lw	$t6,0($i2)		# Td0[s2>>24]
-	lw	$t7,0($i3)		# Td0[s3>>24]
-
-	lw	$s0,0($key0)
-	lw	$s1,4($key0)
-	lw	$s2,8($key0)
-	lw	$s3,12($key0)
-
-	xor	$t0,$t8
-	xor	$t1,$t9
-	xor	$t2,$t10
-	xor	$t3,$t11
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	sub	$cnt,1
-	$PTR_ADD $key0,16
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-	.set	noreorder
-	bnez	$cnt,.Loop_dec
-	_xtr	$i0,$s3,16-2
-
-	.set	reorder
-	lw	$t4,1024($Tbl)		# prefetch Td4
-	lw	$t5,1024+32($Tbl)
-	lw	$t6,1024+64($Tbl)
-	lw	$t7,1024+96($Tbl)
-	lw	$t8,1024+128($Tbl)
-	lw	$t9,1024+160($Tbl)
-	lw	$t10,1024+192($Tbl)
-	lw	$t11,1024+224($Tbl)
-
-	_xtr	$i0,$s3,16
-	_xtr	$i1,$s0,16
-	_xtr	$i2,$s1,16
-	_xtr	$i3,$s2,16
-	and	$i0,0xff
-	and	$i1,0xff
-	and	$i2,0xff
-	and	$i3,0xff
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t0,1024($i0)		# Td4[s3>>16]
-	lbu	$t1,1024($i1)		# Td4[s0>>16]
-	lbu	$t2,1024($i2)		# Td4[s1>>16]
-	lbu	$t3,1024($i3)		# Td4[s2>>16]
-
-	_xtr	$i0,$s2,8
-	_xtr	$i1,$s3,8
-	_xtr	$i2,$s0,8
-	_xtr	$i3,$s1,8
-	and	$i0,0xff
-	and	$i1,0xff
-	and	$i2,0xff
-	and	$i3,0xff
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t4,1024($i0)		# Td4[s2>>8]
-	lbu	$t5,1024($i1)		# Td4[s3>>8]
-	lbu	$t6,1024($i2)		# Td4[s0>>8]
-	lbu	$t7,1024($i3)		# Td4[s1>>8]
-
-	_xtr	$i0,$s0,24
-	_xtr	$i1,$s1,24
-	_xtr	$i2,$s2,24
-	_xtr	$i3,$s3,24
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t8,1024($i0)		# Td4[s0>>24]
-	lbu	$t9,1024($i1)		# Td4[s1>>24]
-	lbu	$t10,1024($i2)		# Td4[s2>>24]
-	lbu	$t11,1024($i3)		# Td4[s3>>24]
-
-	_xtr	$i0,$s1,0
-	_xtr	$i1,$s2,0
-	_xtr	$i2,$s3,0
-	_xtr	$i3,$s0,0
-
-	_ins	$t0,16
-	_ins	$t1,16
-	_ins	$t2,16
-	_ins	$t3,16
-
-	_ins	$t4,8
-	_ins	$t5,8
-	_ins	$t6,8
-	_ins	$t7,8
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$t4,1024($i0)		# Td4[s1]
-	lbu	$t5,1024($i1)		# Td4[s2]
-	lbu	$t6,1024($i2)		# Td4[s3]
-	lbu	$t7,1024($i3)		# Td4[s0]
-
-	_ins	$t8,24
-	_ins	$t9,24
-	_ins	$t10,24
-	_ins	$t11,24
-
-	lw	$s0,0($key0)
-	lw	$s1,4($key0)
-	lw	$s2,8($key0)
-	lw	$s3,12($key0)
-
-	_ins	$t4,0
-	_ins	$t5,0
-	_ins	$t6,0
-	_ins	$t7,0
-
-
-	xor	$t0,$t8
-	xor	$t1,$t9
-	xor	$t2,$t10
-	xor	$t3,$t11
-
-	xor	$t0,$t4
-	xor	$t1,$t5
-	xor	$t2,$t6
-	xor	$t3,$t7
-
-	xor	$s0,$t0
-	xor	$s1,$t1
-	xor	$s2,$t2
-	xor	$s3,$t3
-
-	jr	$ra
-.end	_mips_AES_decrypt
-
-.align	5
-.globl	AES_decrypt
-.ent	AES_decrypt
-AES_decrypt:
-	.frame	$sp,$FRAMESIZE,$ra
-	.mask	$SAVED_REGS_MASK,-$SZREG
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
-	.cpload	$pf
-___
-$code.=<<___;
-	$PTR_SUB $sp,$FRAMESIZE
-	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
-	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
-	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
-	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
-	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
-	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
-	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
-	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
-	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
-	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
-	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
-	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
-	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
-	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
-	.cplocal	$Tbl
-	.cpsetup	$pf,$zero,AES_decrypt
-___
-$code.=<<___;
-	.set	reorder
-	la	$Tbl,AES_Td		# PIC-ified 'load address'
-
-	lwl	$s0,0+$MSB($inp)
-	lwl	$s1,4+$MSB($inp)
-	lwl	$s2,8+$MSB($inp)
-	lwl	$s3,12+$MSB($inp)
-	lwr	$s0,0+$LSB($inp)
-	lwr	$s1,4+$LSB($inp)
-	lwr	$s2,8+$LSB($inp)
-	lwr	$s3,12+$LSB($inp)
-
-	bal	_mips_AES_decrypt
-
-	swr	$s0,0+$LSB($out)
-	swr	$s1,4+$LSB($out)
-	swr	$s2,8+$LSB($out)
-	swr	$s3,12+$LSB($out)
-	swl	$s0,0+$MSB($out)
-	swl	$s1,4+$MSB($out)
-	swl	$s2,8+$MSB($out)
-	swl	$s3,12+$MSB($out)
-
-	.set	noreorder
-	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
-	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
-	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
-	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
-	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
-	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
-	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
-	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
-	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
-	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
-	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
-	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
-	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
-	jr	$ra
-	$PTR_ADD $sp,$FRAMESIZE
-.end	AES_decrypt
-___
-}}}
-
-{{{
-my $FRAMESIZE=8*$SZREG;
-my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc000f008 : 0xc0000000;
-
-my ($inp,$bits,$key,$Tbl)=($a0,$a1,$a2,$a3);
-my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
-my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
-my ($rcon,$cnt)=($gp,$fp);
-
-$code.=<<___;
-.align	5
-.ent	_mips_AES_set_encrypt_key
-_mips_AES_set_encrypt_key:
-	.frame	$sp,0,$ra
-	.set	noreorder
-	beqz	$inp,.Lekey_done
-	li	$t0,-1
-	beqz	$key,.Lekey_done
-	$PTR_ADD $rcon,$Tbl,1024+256
-
-	.set	reorder
-	lwl	$rk0,0+$MSB($inp)	# load 128 bits
-	lwl	$rk1,4+$MSB($inp)
-	lwl	$rk2,8+$MSB($inp)
-	lwl	$rk3,12+$MSB($inp)
-	li	$at,128
-	lwr	$rk0,0+$LSB($inp)
-	lwr	$rk1,4+$LSB($inp)
-	lwr	$rk2,8+$LSB($inp)
-	lwr	$rk3,12+$LSB($inp)
-	.set	noreorder
-	beq	$bits,$at,.L128bits
-	li	$cnt,10
-
-	.set	reorder
-	lwl	$rk4,16+$MSB($inp)	# load 192 bits
-	lwl	$rk5,20+$MSB($inp)
-	li	$at,192
-	lwr	$rk4,16+$LSB($inp)
-	lwr	$rk5,20+$LSB($inp)
-	.set	noreorder
-	beq	$bits,$at,.L192bits
-	li	$cnt,8
-
-	.set	reorder
-	lwl	$rk6,24+$MSB($inp)	# load 256 bits
-	lwl	$rk7,28+$MSB($inp)
-	li	$at,256
-	lwr	$rk6,24+$LSB($inp)
-	lwr	$rk7,28+$LSB($inp)
-	.set	noreorder
-	beq	$bits,$at,.L256bits
-	li	$cnt,7
-
-	b	.Lekey_done
-	li	$t0,-2
-
-.align	4
-.L128bits:
-	.set	reorder
-	srl	$i0,$rk3,16
-	srl	$i1,$rk3,8
-	and	$i0,0xff
-	and	$i1,0xff
-	and	$i2,$rk3,0xff
-	srl	$i3,$rk3,24
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$i0,1024($i0)
-	lbu	$i1,1024($i1)
-	lbu	$i2,1024($i2)
-	lbu	$i3,1024($i3)
-
-	sw	$rk0,0($key)
-	sw	$rk1,4($key)
-	sw	$rk2,8($key)
-	sw	$rk3,12($key)
-	sub	$cnt,1
-	$PTR_ADD $key,16
-
-	_bias	$i0,24
-	_bias	$i1,16
-	_bias	$i2,8
-	_bias	$i3,0
-
-	xor	$rk0,$i0
-	lw	$i0,0($rcon)
-	xor	$rk0,$i1
-	xor	$rk0,$i2
-	xor	$rk0,$i3
-	xor	$rk0,$i0
-
-	xor	$rk1,$rk0
-	xor	$rk2,$rk1
-	xor	$rk3,$rk2
-
-	.set	noreorder
-	bnez	$cnt,.L128bits
-	$PTR_ADD $rcon,4
-
-	sw	$rk0,0($key)
-	sw	$rk1,4($key)
-	sw	$rk2,8($key)
-	li	$cnt,10
-	sw	$rk3,12($key)
-	li	$t0,0
-	sw	$cnt,80($key)
-	b	.Lekey_done
-	$PTR_SUB $key,10*16
-
-.align	4
-.L192bits:
-	.set	reorder
-	srl	$i0,$rk5,16
-	srl	$i1,$rk5,8
-	and	$i0,0xff
-	and	$i1,0xff
-	and	$i2,$rk5,0xff
-	srl	$i3,$rk5,24
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$i0,1024($i0)
-	lbu	$i1,1024($i1)
-	lbu	$i2,1024($i2)
-	lbu	$i3,1024($i3)
-
-	sw	$rk0,0($key)
-	sw	$rk1,4($key)
-	sw	$rk2,8($key)
-	sw	$rk3,12($key)
-	sw	$rk4,16($key)
-	sw	$rk5,20($key)
-	sub	$cnt,1
-	$PTR_ADD $key,24
-
-	_bias	$i0,24
-	_bias	$i1,16
-	_bias	$i2,8
-	_bias	$i3,0
-
-	xor	$rk0,$i0
-	lw	$i0,0($rcon)
-	xor	$rk0,$i1
-	xor	$rk0,$i2
-	xor	$rk0,$i3
-	xor	$rk0,$i0
-
-	xor	$rk1,$rk0
-	xor	$rk2,$rk1
-	xor	$rk3,$rk2
-	xor	$rk4,$rk3
-	xor	$rk5,$rk4
-
-	.set	noreorder
-	bnez	$cnt,.L192bits
-	$PTR_ADD $rcon,4
-
-	sw	$rk0,0($key)
-	sw	$rk1,4($key)
-	sw	$rk2,8($key)
-	li	$cnt,12
-	sw	$rk3,12($key)
-	li	$t0,0
-	sw	$cnt,48($key)
-	b	.Lekey_done
-	$PTR_SUB $key,12*16
-
-.align	4
-.L256bits:
-	.set	reorder
-	srl	$i0,$rk7,16
-	srl	$i1,$rk7,8
-	and	$i0,0xff
-	and	$i1,0xff
-	and	$i2,$rk7,0xff
-	srl	$i3,$rk7,24
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$i0,1024($i0)
-	lbu	$i1,1024($i1)
-	lbu	$i2,1024($i2)
-	lbu	$i3,1024($i3)
-
-	sw	$rk0,0($key)
-	sw	$rk1,4($key)
-	sw	$rk2,8($key)
-	sw	$rk3,12($key)
-	sw	$rk4,16($key)
-	sw	$rk5,20($key)
-	sw	$rk6,24($key)
-	sw	$rk7,28($key)
-	sub	$cnt,1
-
-	_bias	$i0,24
-	_bias	$i1,16
-	_bias	$i2,8
-	_bias	$i3,0
-
-	xor	$rk0,$i0
-	lw	$i0,0($rcon)
-	xor	$rk0,$i1
-	xor	$rk0,$i2
-	xor	$rk0,$i3
-	xor	$rk0,$i0
-
-	xor	$rk1,$rk0
-	xor	$rk2,$rk1
-	xor	$rk3,$rk2
-	beqz	$cnt,.L256bits_done
-
-	srl	$i0,$rk3,24
-	srl	$i1,$rk3,16
-	srl	$i2,$rk3,8
-	and	$i3,$rk3,0xff
-	and	$i1,0xff
-	and	$i2,0xff
-	$PTR_ADD $i0,$Tbl
-	$PTR_ADD $i1,$Tbl
-	$PTR_ADD $i2,$Tbl
-	$PTR_ADD $i3,$Tbl
-	lbu	$i0,1024($i0)
-	lbu	$i1,1024($i1)
-	lbu	$i2,1024($i2)
-	lbu	$i3,1024($i3)
-	sll	$i0,24
-	sll	$i1,16
-	sll	$i2,8
-
-	xor	$rk4,$i0
-	xor	$rk4,$i1
-	xor	$rk4,$i2
-	xor	$rk4,$i3
-
-	xor	$rk5,$rk4
-	xor	$rk6,$rk5
-	xor	$rk7,$rk6
-
-	$PTR_ADD $key,32
-	.set	noreorder
-	b	.L256bits
-	$PTR_ADD $rcon,4
-
-.L256bits_done:
-	sw	$rk0,32($key)
-	sw	$rk1,36($key)
-	sw	$rk2,40($key)
-	li	$cnt,14
-	sw	$rk3,44($key)
-	li	$t0,0
-	sw	$cnt,48($key)
-	$PTR_SUB $key,12*16
-
-.Lekey_done:
-	jr	$ra
-	nop
-.end	_mips_AES_set_encrypt_key
-
-.globl	private_AES_set_encrypt_key
-.ent	private_AES_set_encrypt_key
-private_AES_set_encrypt_key:
-	.frame	$sp,$FRAMESIZE,$ra
-	.mask	$SAVED_REGS_MASK,-$SZREG
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
-	.cpload	$pf
-___
-$code.=<<___;
-	$PTR_SUB $sp,$FRAMESIZE
-	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
-	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
-	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
-	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
-	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
-	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
-	.cplocal	$Tbl
-	.cpsetup	$pf,$zero,private_AES_set_encrypt_key
-___
-$code.=<<___;
-	.set	reorder
-	la	$Tbl,AES_Te		# PIC-ified 'load address'
-
-	bal	_mips_AES_set_encrypt_key
-
-	.set	noreorder
-	move	$a0,$t0
-	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
-	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
-	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
-	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
-	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
-	jr	$ra
-	$PTR_ADD $sp,$FRAMESIZE
-.end	private_AES_set_encrypt_key
-___
-
-my ($head,$tail)=($inp,$bits);
-my ($tp1,$tp2,$tp4,$tp8,$tp9,$tpb,$tpd,$tpe)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
-my ($m,$x80808080,$x7f7f7f7f,$x1b1b1b1b)=($at,$t0,$t1,$t2);
-$code.=<<___;
-.align	5
-.globl	private_AES_set_decrypt_key
-.ent	private_AES_set_decrypt_key
-private_AES_set_decrypt_key:
-	.frame	$sp,$FRAMESIZE,$ra
-	.mask	$SAVED_REGS_MASK,-$SZREG
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
-	.cpload	$pf
-___
-$code.=<<___;
-	$PTR_SUB $sp,$FRAMESIZE
-	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
-	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
-	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
-	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
-	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
-	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
-	.cplocal	$Tbl
-	.cpsetup	$pf,$zero,private_AES_set_decrypt_key
-___
-$code.=<<___;
-	.set	reorder
-	la	$Tbl,AES_Te		# PIC-ified 'load address'
-
-	bal	_mips_AES_set_encrypt_key
-
-	bltz	$t0,.Ldkey_done
-
-	sll	$at,$cnt,4
-	$PTR_ADD $head,$key,0
-	$PTR_ADD $tail,$key,$at
-.align	4
-.Lswap:
-	lw	$rk0,0($head)
-	lw	$rk1,4($head)
-	lw	$rk2,8($head)
-	lw	$rk3,12($head)
-	lw	$rk4,0($tail)
-	lw	$rk5,4($tail)
-	lw	$rk6,8($tail)
-	lw	$rk7,12($tail)
-	sw	$rk0,0($tail)
-	sw	$rk1,4($tail)
-	sw	$rk2,8($tail)
-	sw	$rk3,12($tail)
-	$PTR_ADD $head,16
-	$PTR_SUB $tail,16
-	sw	$rk4,-16($head)
-	sw	$rk5,-12($head)
-	sw	$rk6,-8($head)
-	sw	$rk7,-4($head)
-	bne	$head,$tail,.Lswap
-
-	lw	$tp1,16($key)		# modulo-scheduled
-	lui	$x80808080,0x8080
-	sub	$cnt,1
-	or	$x80808080,0x8080
-	sll	$cnt,2
-	$PTR_ADD $key,16
-	lui	$x1b1b1b1b,0x1b1b
-	nor	$x7f7f7f7f,$zero,$x80808080
-	or	$x1b1b1b1b,0x1b1b
-.align	4
-.Lmix:
-	and	$m,$tp1,$x80808080
-	and	$tp2,$tp1,$x7f7f7f7f
-	srl	$tp4,$m,7
-	addu	$tp2,$tp2		# tp2<<1
-	subu	$m,$tp4
-	and	$m,$x1b1b1b1b
-	xor	$tp2,$m
-
-	and	$m,$tp2,$x80808080
-	and	$tp4,$tp2,$x7f7f7f7f
-	srl	$tp8,$m,7
-	addu	$tp4,$tp4		# tp4<<1
-	subu	$m,$tp8
-	and	$m,$x1b1b1b1b
-	xor	$tp4,$m
-
-	and	$m,$tp4,$x80808080
-	and	$tp8,$tp4,$x7f7f7f7f
-	srl	$tp9,$m,7
-	addu	$tp8,$tp8		# tp8<<1
-	subu	$m,$tp9
-	and	$m,$x1b1b1b1b
-	xor	$tp8,$m
-
-	xor	$tp9,$tp8,$tp1
-	xor	$tpe,$tp8,$tp4
-	xor	$tpb,$tp9,$tp2
-	xor	$tpd,$tp9,$tp4
-
-	_ror	$tp1,$tpd,16
-	 xor	$tpe,$tp2
-	_ror	$tp2,$tpd,-16
-	xor	$tpe,$tp1
-	_ror	$tp1,$tp9,8
-	xor	$tpe,$tp2
-	_ror	$tp2,$tp9,-24
-	xor	$tpe,$tp1
-	_ror	$tp1,$tpb,24
-	xor	$tpe,$tp2
-	_ror	$tp2,$tpb,-8
-	xor	$tpe,$tp1
-	lw	$tp1,4($key)		# modulo-scheduled
-	xor	$tpe,$tp2
-	sub	$cnt,1
-	sw	$tpe,0($key)
-	$PTR_ADD $key,4
-	bnez	$cnt,.Lmix
-
-	li	$t0,0
-.Ldkey_done:
-	.set	noreorder
-	move	$a0,$t0
-	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
-	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
-	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
-	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
-	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
-	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
-	jr	$ra
-	$PTR_ADD $sp,$FRAMESIZE
-.end	private_AES_set_decrypt_key
-___
-}}}
-
-######################################################################
-# Tables are kept in endian-neutral manner
-$code.=<<___;
-.rdata
-.align	6
-AES_Te:
-.byte	0xc6,0x63,0x63,0xa5,	0xf8,0x7c,0x7c,0x84	# Te0
-.byte	0xee,0x77,0x77,0x99,	0xf6,0x7b,0x7b,0x8d
-.byte	0xff,0xf2,0xf2,0x0d,	0xd6,0x6b,0x6b,0xbd
-.byte	0xde,0x6f,0x6f,0xb1,	0x91,0xc5,0xc5,0x54
-.byte	0x60,0x30,0x30,0x50,	0x02,0x01,0x01,0x03
-.byte	0xce,0x67,0x67,0xa9,	0x56,0x2b,0x2b,0x7d
-.byte	0xe7,0xfe,0xfe,0x19,	0xb5,0xd7,0xd7,0x62
-.byte	0x4d,0xab,0xab,0xe6,	0xec,0x76,0x76,0x9a
-.byte	0x8f,0xca,0xca,0x45,	0x1f,0x82,0x82,0x9d
-.byte	0x89,0xc9,0xc9,0x40,	0xfa,0x7d,0x7d,0x87
-.byte	0xef,0xfa,0xfa,0x15,	0xb2,0x59,0x59,0xeb
-.byte	0x8e,0x47,0x47,0xc9,	0xfb,0xf0,0xf0,0x0b
-.byte	0x41,0xad,0xad,0xec,	0xb3,0xd4,0xd4,0x67
-.byte	0x5f,0xa2,0xa2,0xfd,	0x45,0xaf,0xaf,0xea
-.byte	0x23,0x9c,0x9c,0xbf,	0x53,0xa4,0xa4,0xf7
-.byte	0xe4,0x72,0x72,0x96,	0x9b,0xc0,0xc0,0x5b
-.byte	0x75,0xb7,0xb7,0xc2,	0xe1,0xfd,0xfd,0x1c
-.byte	0x3d,0x93,0x93,0xae,	0x4c,0x26,0x26,0x6a
-.byte	0x6c,0x36,0x36,0x5a,	0x7e,0x3f,0x3f,0x41
-.byte	0xf5,0xf7,0xf7,0x02,	0x83,0xcc,0xcc,0x4f
-.byte	0x68,0x34,0x34,0x5c,	0x51,0xa5,0xa5,0xf4
-.byte	0xd1,0xe5,0xe5,0x34,	0xf9,0xf1,0xf1,0x08
-.byte	0xe2,0x71,0x71,0x93,	0xab,0xd8,0xd8,0x73
-.byte	0x62,0x31,0x31,0x53,	0x2a,0x15,0x15,0x3f
-.byte	0x08,0x04,0x04,0x0c,	0x95,0xc7,0xc7,0x52
-.byte	0x46,0x23,0x23,0x65,	0x9d,0xc3,0xc3,0x5e
-.byte	0x30,0x18,0x18,0x28,	0x37,0x96,0x96,0xa1
-.byte	0x0a,0x05,0x05,0x0f,	0x2f,0x9a,0x9a,0xb5
-.byte	0x0e,0x07,0x07,0x09,	0x24,0x12,0x12,0x36
-.byte	0x1b,0x80,0x80,0x9b,	0xdf,0xe2,0xe2,0x3d
-.byte	0xcd,0xeb,0xeb,0x26,	0x4e,0x27,0x27,0x69
-.byte	0x7f,0xb2,0xb2,0xcd,	0xea,0x75,0x75,0x9f
-.byte	0x12,0x09,0x09,0x1b,	0x1d,0x83,0x83,0x9e
-.byte	0x58,0x2c,0x2c,0x74,	0x34,0x1a,0x1a,0x2e
-.byte	0x36,0x1b,0x1b,0x2d,	0xdc,0x6e,0x6e,0xb2
-.byte	0xb4,0x5a,0x5a,0xee,	0x5b,0xa0,0xa0,0xfb
-.byte	0xa4,0x52,0x52,0xf6,	0x76,0x3b,0x3b,0x4d
-.byte	0xb7,0xd6,0xd6,0x61,	0x7d,0xb3,0xb3,0xce
-.byte	0x52,0x29,0x29,0x7b,	0xdd,0xe3,0xe3,0x3e
-.byte	0x5e,0x2f,0x2f,0x71,	0x13,0x84,0x84,0x97
-.byte	0xa6,0x53,0x53,0xf5,	0xb9,0xd1,0xd1,0x68
-.byte	0x00,0x00,0x00,0x00,	0xc1,0xed,0xed,0x2c
-.byte	0x40,0x20,0x20,0x60,	0xe3,0xfc,0xfc,0x1f
-.byte	0x79,0xb1,0xb1,0xc8,	0xb6,0x5b,0x5b,0xed
-.byte	0xd4,0x6a,0x6a,0xbe,	0x8d,0xcb,0xcb,0x46
-.byte	0x67,0xbe,0xbe,0xd9,	0x72,0x39,0x39,0x4b
-.byte	0x94,0x4a,0x4a,0xde,	0x98,0x4c,0x4c,0xd4
-.byte	0xb0,0x58,0x58,0xe8,	0x85,0xcf,0xcf,0x4a
-.byte	0xbb,0xd0,0xd0,0x6b,	0xc5,0xef,0xef,0x2a
-.byte	0x4f,0xaa,0xaa,0xe5,	0xed,0xfb,0xfb,0x16
-.byte	0x86,0x43,0x43,0xc5,	0x9a,0x4d,0x4d,0xd7
-.byte	0x66,0x33,0x33,0x55,	0x11,0x85,0x85,0x94
-.byte	0x8a,0x45,0x45,0xcf,	0xe9,0xf9,0xf9,0x10
-.byte	0x04,0x02,0x02,0x06,	0xfe,0x7f,0x7f,0x81
-.byte	0xa0,0x50,0x50,0xf0,	0x78,0x3c,0x3c,0x44
-.byte	0x25,0x9f,0x9f,0xba,	0x4b,0xa8,0xa8,0xe3
-.byte	0xa2,0x51,0x51,0xf3,	0x5d,0xa3,0xa3,0xfe
-.byte	0x80,0x40,0x40,0xc0,	0x05,0x8f,0x8f,0x8a
-.byte	0x3f,0x92,0x92,0xad,	0x21,0x9d,0x9d,0xbc
-.byte	0x70,0x38,0x38,0x48,	0xf1,0xf5,0xf5,0x04
-.byte	0x63,0xbc,0xbc,0xdf,	0x77,0xb6,0xb6,0xc1
-.byte	0xaf,0xda,0xda,0x75,	0x42,0x21,0x21,0x63
-.byte	0x20,0x10,0x10,0x30,	0xe5,0xff,0xff,0x1a
-.byte	0xfd,0xf3,0xf3,0x0e,	0xbf,0xd2,0xd2,0x6d
-.byte	0x81,0xcd,0xcd,0x4c,	0x18,0x0c,0x0c,0x14
-.byte	0x26,0x13,0x13,0x35,	0xc3,0xec,0xec,0x2f
-.byte	0xbe,0x5f,0x5f,0xe1,	0x35,0x97,0x97,0xa2
-.byte	0x88,0x44,0x44,0xcc,	0x2e,0x17,0x17,0x39
-.byte	0x93,0xc4,0xc4,0x57,	0x55,0xa7,0xa7,0xf2
-.byte	0xfc,0x7e,0x7e,0x82,	0x7a,0x3d,0x3d,0x47
-.byte	0xc8,0x64,0x64,0xac,	0xba,0x5d,0x5d,0xe7
-.byte	0x32,0x19,0x19,0x2b,	0xe6,0x73,0x73,0x95
-.byte	0xc0,0x60,0x60,0xa0,	0x19,0x81,0x81,0x98
-.byte	0x9e,0x4f,0x4f,0xd1,	0xa3,0xdc,0xdc,0x7f
-.byte	0x44,0x22,0x22,0x66,	0x54,0x2a,0x2a,0x7e
-.byte	0x3b,0x90,0x90,0xab,	0x0b,0x88,0x88,0x83
-.byte	0x8c,0x46,0x46,0xca,	0xc7,0xee,0xee,0x29
-.byte	0x6b,0xb8,0xb8,0xd3,	0x28,0x14,0x14,0x3c
-.byte	0xa7,0xde,0xde,0x79,	0xbc,0x5e,0x5e,0xe2
-.byte	0x16,0x0b,0x0b,0x1d,	0xad,0xdb,0xdb,0x76
-.byte	0xdb,0xe0,0xe0,0x3b,	0x64,0x32,0x32,0x56
-.byte	0x74,0x3a,0x3a,0x4e,	0x14,0x0a,0x0a,0x1e
-.byte	0x92,0x49,0x49,0xdb,	0x0c,0x06,0x06,0x0a
-.byte	0x48,0x24,0x24,0x6c,	0xb8,0x5c,0x5c,0xe4
-.byte	0x9f,0xc2,0xc2,0x5d,	0xbd,0xd3,0xd3,0x6e
-.byte	0x43,0xac,0xac,0xef,	0xc4,0x62,0x62,0xa6
-.byte	0x39,0x91,0x91,0xa8,	0x31,0x95,0x95,0xa4
-.byte	0xd3,0xe4,0xe4,0x37,	0xf2,0x79,0x79,0x8b
-.byte	0xd5,0xe7,0xe7,0x32,	0x8b,0xc8,0xc8,0x43
-.byte	0x6e,0x37,0x37,0x59,	0xda,0x6d,0x6d,0xb7
-.byte	0x01,0x8d,0x8d,0x8c,	0xb1,0xd5,0xd5,0x64
-.byte	0x9c,0x4e,0x4e,0xd2,	0x49,0xa9,0xa9,0xe0
-.byte	0xd8,0x6c,0x6c,0xb4,	0xac,0x56,0x56,0xfa
-.byte	0xf3,0xf4,0xf4,0x07,	0xcf,0xea,0xea,0x25
-.byte	0xca,0x65,0x65,0xaf,	0xf4,0x7a,0x7a,0x8e
-.byte	0x47,0xae,0xae,0xe9,	0x10,0x08,0x08,0x18
-.byte	0x6f,0xba,0xba,0xd5,	0xf0,0x78,0x78,0x88
-.byte	0x4a,0x25,0x25,0x6f,	0x5c,0x2e,0x2e,0x72
-.byte	0x38,0x1c,0x1c,0x24,	0x57,0xa6,0xa6,0xf1
-.byte	0x73,0xb4,0xb4,0xc7,	0x97,0xc6,0xc6,0x51
-.byte	0xcb,0xe8,0xe8,0x23,	0xa1,0xdd,0xdd,0x7c
-.byte	0xe8,0x74,0x74,0x9c,	0x3e,0x1f,0x1f,0x21
-.byte	0x96,0x4b,0x4b,0xdd,	0x61,0xbd,0xbd,0xdc
-.byte	0x0d,0x8b,0x8b,0x86,	0x0f,0x8a,0x8a,0x85
-.byte	0xe0,0x70,0x70,0x90,	0x7c,0x3e,0x3e,0x42
-.byte	0x71,0xb5,0xb5,0xc4,	0xcc,0x66,0x66,0xaa
-.byte	0x90,0x48,0x48,0xd8,	0x06,0x03,0x03,0x05
-.byte	0xf7,0xf6,0xf6,0x01,	0x1c,0x0e,0x0e,0x12
-.byte	0xc2,0x61,0x61,0xa3,	0x6a,0x35,0x35,0x5f
-.byte	0xae,0x57,0x57,0xf9,	0x69,0xb9,0xb9,0xd0
-.byte	0x17,0x86,0x86,0x91,	0x99,0xc1,0xc1,0x58
-.byte	0x3a,0x1d,0x1d,0x27,	0x27,0x9e,0x9e,0xb9
-.byte	0xd9,0xe1,0xe1,0x38,	0xeb,0xf8,0xf8,0x13
-.byte	0x2b,0x98,0x98,0xb3,	0x22,0x11,0x11,0x33
-.byte	0xd2,0x69,0x69,0xbb,	0xa9,0xd9,0xd9,0x70
-.byte	0x07,0x8e,0x8e,0x89,	0x33,0x94,0x94,0xa7
-.byte	0x2d,0x9b,0x9b,0xb6,	0x3c,0x1e,0x1e,0x22
-.byte	0x15,0x87,0x87,0x92,	0xc9,0xe9,0xe9,0x20
-.byte	0x87,0xce,0xce,0x49,	0xaa,0x55,0x55,0xff
-.byte	0x50,0x28,0x28,0x78,	0xa5,0xdf,0xdf,0x7a
-.byte	0x03,0x8c,0x8c,0x8f,	0x59,0xa1,0xa1,0xf8
-.byte	0x09,0x89,0x89,0x80,	0x1a,0x0d,0x0d,0x17
-.byte	0x65,0xbf,0xbf,0xda,	0xd7,0xe6,0xe6,0x31
-.byte	0x84,0x42,0x42,0xc6,	0xd0,0x68,0x68,0xb8
-.byte	0x82,0x41,0x41,0xc3,	0x29,0x99,0x99,0xb0
-.byte	0x5a,0x2d,0x2d,0x77,	0x1e,0x0f,0x0f,0x11
-.byte	0x7b,0xb0,0xb0,0xcb,	0xa8,0x54,0x54,0xfc
-.byte	0x6d,0xbb,0xbb,0xd6,	0x2c,0x16,0x16,0x3a
-
-.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5	# Te4
-.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-
-.byte	0x01,0x00,0x00,0x00,	0x02,0x00,0x00,0x00	# rcon
-.byte	0x04,0x00,0x00,0x00,	0x08,0x00,0x00,0x00
-.byte	0x10,0x00,0x00,0x00,	0x20,0x00,0x00,0x00
-.byte	0x40,0x00,0x00,0x00,	0x80,0x00,0x00,0x00
-.byte	0x1B,0x00,0x00,0x00,	0x36,0x00,0x00,0x00
-
-.align	6
-AES_Td:
-.byte	0x51,0xf4,0xa7,0x50,	0x7e,0x41,0x65,0x53	# Td0
-.byte	0x1a,0x17,0xa4,0xc3,	0x3a,0x27,0x5e,0x96
-.byte	0x3b,0xab,0x6b,0xcb,	0x1f,0x9d,0x45,0xf1
-.byte	0xac,0xfa,0x58,0xab,	0x4b,0xe3,0x03,0x93
-.byte	0x20,0x30,0xfa,0x55,	0xad,0x76,0x6d,0xf6
-.byte	0x88,0xcc,0x76,0x91,	0xf5,0x02,0x4c,0x25
-.byte	0x4f,0xe5,0xd7,0xfc,	0xc5,0x2a,0xcb,0xd7
-.byte	0x26,0x35,0x44,0x80,	0xb5,0x62,0xa3,0x8f
-.byte	0xde,0xb1,0x5a,0x49,	0x25,0xba,0x1b,0x67
-.byte	0x45,0xea,0x0e,0x98,	0x5d,0xfe,0xc0,0xe1
-.byte	0xc3,0x2f,0x75,0x02,	0x81,0x4c,0xf0,0x12
-.byte	0x8d,0x46,0x97,0xa3,	0x6b,0xd3,0xf9,0xc6
-.byte	0x03,0x8f,0x5f,0xe7,	0x15,0x92,0x9c,0x95
-.byte	0xbf,0x6d,0x7a,0xeb,	0x95,0x52,0x59,0xda
-.byte	0xd4,0xbe,0x83,0x2d,	0x58,0x74,0x21,0xd3
-.byte	0x49,0xe0,0x69,0x29,	0x8e,0xc9,0xc8,0x44
-.byte	0x75,0xc2,0x89,0x6a,	0xf4,0x8e,0x79,0x78
-.byte	0x99,0x58,0x3e,0x6b,	0x27,0xb9,0x71,0xdd
-.byte	0xbe,0xe1,0x4f,0xb6,	0xf0,0x88,0xad,0x17
-.byte	0xc9,0x20,0xac,0x66,	0x7d,0xce,0x3a,0xb4
-.byte	0x63,0xdf,0x4a,0x18,	0xe5,0x1a,0x31,0x82
-.byte	0x97,0x51,0x33,0x60,	0x62,0x53,0x7f,0x45
-.byte	0xb1,0x64,0x77,0xe0,	0xbb,0x6b,0xae,0x84
-.byte	0xfe,0x81,0xa0,0x1c,	0xf9,0x08,0x2b,0x94
-.byte	0x70,0x48,0x68,0x58,	0x8f,0x45,0xfd,0x19
-.byte	0x94,0xde,0x6c,0x87,	0x52,0x7b,0xf8,0xb7
-.byte	0xab,0x73,0xd3,0x23,	0x72,0x4b,0x02,0xe2
-.byte	0xe3,0x1f,0x8f,0x57,	0x66,0x55,0xab,0x2a
-.byte	0xb2,0xeb,0x28,0x07,	0x2f,0xb5,0xc2,0x03
-.byte	0x86,0xc5,0x7b,0x9a,	0xd3,0x37,0x08,0xa5
-.byte	0x30,0x28,0x87,0xf2,	0x23,0xbf,0xa5,0xb2
-.byte	0x02,0x03,0x6a,0xba,	0xed,0x16,0x82,0x5c
-.byte	0x8a,0xcf,0x1c,0x2b,	0xa7,0x79,0xb4,0x92
-.byte	0xf3,0x07,0xf2,0xf0,	0x4e,0x69,0xe2,0xa1
-.byte	0x65,0xda,0xf4,0xcd,	0x06,0x05,0xbe,0xd5
-.byte	0xd1,0x34,0x62,0x1f,	0xc4,0xa6,0xfe,0x8a
-.byte	0x34,0x2e,0x53,0x9d,	0xa2,0xf3,0x55,0xa0
-.byte	0x05,0x8a,0xe1,0x32,	0xa4,0xf6,0xeb,0x75
-.byte	0x0b,0x83,0xec,0x39,	0x40,0x60,0xef,0xaa
-.byte	0x5e,0x71,0x9f,0x06,	0xbd,0x6e,0x10,0x51
-.byte	0x3e,0x21,0x8a,0xf9,	0x96,0xdd,0x06,0x3d
-.byte	0xdd,0x3e,0x05,0xae,	0x4d,0xe6,0xbd,0x46
-.byte	0x91,0x54,0x8d,0xb5,	0x71,0xc4,0x5d,0x05
-.byte	0x04,0x06,0xd4,0x6f,	0x60,0x50,0x15,0xff
-.byte	0x19,0x98,0xfb,0x24,	0xd6,0xbd,0xe9,0x97
-.byte	0x89,0x40,0x43,0xcc,	0x67,0xd9,0x9e,0x77
-.byte	0xb0,0xe8,0x42,0xbd,	0x07,0x89,0x8b,0x88
-.byte	0xe7,0x19,0x5b,0x38,	0x79,0xc8,0xee,0xdb
-.byte	0xa1,0x7c,0x0a,0x47,	0x7c,0x42,0x0f,0xe9
-.byte	0xf8,0x84,0x1e,0xc9,	0x00,0x00,0x00,0x00
-.byte	0x09,0x80,0x86,0x83,	0x32,0x2b,0xed,0x48
-.byte	0x1e,0x11,0x70,0xac,	0x6c,0x5a,0x72,0x4e
-.byte	0xfd,0x0e,0xff,0xfb,	0x0f,0x85,0x38,0x56
-.byte	0x3d,0xae,0xd5,0x1e,	0x36,0x2d,0x39,0x27
-.byte	0x0a,0x0f,0xd9,0x64,	0x68,0x5c,0xa6,0x21
-.byte	0x9b,0x5b,0x54,0xd1,	0x24,0x36,0x2e,0x3a
-.byte	0x0c,0x0a,0x67,0xb1,	0x93,0x57,0xe7,0x0f
-.byte	0xb4,0xee,0x96,0xd2,	0x1b,0x9b,0x91,0x9e
-.byte	0x80,0xc0,0xc5,0x4f,	0x61,0xdc,0x20,0xa2
-.byte	0x5a,0x77,0x4b,0x69,	0x1c,0x12,0x1a,0x16
-.byte	0xe2,0x93,0xba,0x0a,	0xc0,0xa0,0x2a,0xe5
-.byte	0x3c,0x22,0xe0,0x43,	0x12,0x1b,0x17,0x1d
-.byte	0x0e,0x09,0x0d,0x0b,	0xf2,0x8b,0xc7,0xad
-.byte	0x2d,0xb6,0xa8,0xb9,	0x14,0x1e,0xa9,0xc8
-.byte	0x57,0xf1,0x19,0x85,	0xaf,0x75,0x07,0x4c
-.byte	0xee,0x99,0xdd,0xbb,	0xa3,0x7f,0x60,0xfd
-.byte	0xf7,0x01,0x26,0x9f,	0x5c,0x72,0xf5,0xbc
-.byte	0x44,0x66,0x3b,0xc5,	0x5b,0xfb,0x7e,0x34
-.byte	0x8b,0x43,0x29,0x76,	0xcb,0x23,0xc6,0xdc
-.byte	0xb6,0xed,0xfc,0x68,	0xb8,0xe4,0xf1,0x63
-.byte	0xd7,0x31,0xdc,0xca,	0x42,0x63,0x85,0x10
-.byte	0x13,0x97,0x22,0x40,	0x84,0xc6,0x11,0x20
-.byte	0x85,0x4a,0x24,0x7d,	0xd2,0xbb,0x3d,0xf8
-.byte	0xae,0xf9,0x32,0x11,	0xc7,0x29,0xa1,0x6d
-.byte	0x1d,0x9e,0x2f,0x4b,	0xdc,0xb2,0x30,0xf3
-.byte	0x0d,0x86,0x52,0xec,	0x77,0xc1,0xe3,0xd0
-.byte	0x2b,0xb3,0x16,0x6c,	0xa9,0x70,0xb9,0x99
-.byte	0x11,0x94,0x48,0xfa,	0x47,0xe9,0x64,0x22
-.byte	0xa8,0xfc,0x8c,0xc4,	0xa0,0xf0,0x3f,0x1a
-.byte	0x56,0x7d,0x2c,0xd8,	0x22,0x33,0x90,0xef
-.byte	0x87,0x49,0x4e,0xc7,	0xd9,0x38,0xd1,0xc1
-.byte	0x8c,0xca,0xa2,0xfe,	0x98,0xd4,0x0b,0x36
-.byte	0xa6,0xf5,0x81,0xcf,	0xa5,0x7a,0xde,0x28
-.byte	0xda,0xb7,0x8e,0x26,	0x3f,0xad,0xbf,0xa4
-.byte	0x2c,0x3a,0x9d,0xe4,	0x50,0x78,0x92,0x0d
-.byte	0x6a,0x5f,0xcc,0x9b,	0x54,0x7e,0x46,0x62
-.byte	0xf6,0x8d,0x13,0xc2,	0x90,0xd8,0xb8,0xe8
-.byte	0x2e,0x39,0xf7,0x5e,	0x82,0xc3,0xaf,0xf5
-.byte	0x9f,0x5d,0x80,0xbe,	0x69,0xd0,0x93,0x7c
-.byte	0x6f,0xd5,0x2d,0xa9,	0xcf,0x25,0x12,0xb3
-.byte	0xc8,0xac,0x99,0x3b,	0x10,0x18,0x7d,0xa7
-.byte	0xe8,0x9c,0x63,0x6e,	0xdb,0x3b,0xbb,0x7b
-.byte	0xcd,0x26,0x78,0x09,	0x6e,0x59,0x18,0xf4
-.byte	0xec,0x9a,0xb7,0x01,	0x83,0x4f,0x9a,0xa8
-.byte	0xe6,0x95,0x6e,0x65,	0xaa,0xff,0xe6,0x7e
-.byte	0x21,0xbc,0xcf,0x08,	0xef,0x15,0xe8,0xe6
-.byte	0xba,0xe7,0x9b,0xd9,	0x4a,0x6f,0x36,0xce
-.byte	0xea,0x9f,0x09,0xd4,	0x29,0xb0,0x7c,0xd6
-.byte	0x31,0xa4,0xb2,0xaf,	0x2a,0x3f,0x23,0x31
-.byte	0xc6,0xa5,0x94,0x30,	0x35,0xa2,0x66,0xc0
-.byte	0x74,0x4e,0xbc,0x37,	0xfc,0x82,0xca,0xa6
-.byte	0xe0,0x90,0xd0,0xb0,	0x33,0xa7,0xd8,0x15
-.byte	0xf1,0x04,0x98,0x4a,	0x41,0xec,0xda,0xf7
-.byte	0x7f,0xcd,0x50,0x0e,	0x17,0x91,0xf6,0x2f
-.byte	0x76,0x4d,0xd6,0x8d,	0x43,0xef,0xb0,0x4d
-.byte	0xcc,0xaa,0x4d,0x54,	0xe4,0x96,0x04,0xdf
-.byte	0x9e,0xd1,0xb5,0xe3,	0x4c,0x6a,0x88,0x1b
-.byte	0xc1,0x2c,0x1f,0xb8,	0x46,0x65,0x51,0x7f
-.byte	0x9d,0x5e,0xea,0x04,	0x01,0x8c,0x35,0x5d
-.byte	0xfa,0x87,0x74,0x73,	0xfb,0x0b,0x41,0x2e
-.byte	0xb3,0x67,0x1d,0x5a,	0x92,0xdb,0xd2,0x52
-.byte	0xe9,0x10,0x56,0x33,	0x6d,0xd6,0x47,0x13
-.byte	0x9a,0xd7,0x61,0x8c,	0x37,0xa1,0x0c,0x7a
-.byte	0x59,0xf8,0x14,0x8e,	0xeb,0x13,0x3c,0x89
-.byte	0xce,0xa9,0x27,0xee,	0xb7,0x61,0xc9,0x35
-.byte	0xe1,0x1c,0xe5,0xed,	0x7a,0x47,0xb1,0x3c
-.byte	0x9c,0xd2,0xdf,0x59,	0x55,0xf2,0x73,0x3f
-.byte	0x18,0x14,0xce,0x79,	0x73,0xc7,0x37,0xbf
-.byte	0x53,0xf7,0xcd,0xea,	0x5f,0xfd,0xaa,0x5b
-.byte	0xdf,0x3d,0x6f,0x14,	0x78,0x44,0xdb,0x86
-.byte	0xca,0xaf,0xf3,0x81,	0xb9,0x68,0xc4,0x3e
-.byte	0x38,0x24,0x34,0x2c,	0xc2,0xa3,0x40,0x5f
-.byte	0x16,0x1d,0xc3,0x72,	0xbc,0xe2,0x25,0x0c
-.byte	0x28,0x3c,0x49,0x8b,	0xff,0x0d,0x95,0x41
-.byte	0x39,0xa8,0x01,0x71,	0x08,0x0c,0xb3,0xde
-.byte	0xd8,0xb4,0xe4,0x9c,	0x64,0x56,0xc1,0x90
-.byte	0x7b,0xcb,0x84,0x61,	0xd5,0x32,0xb6,0x70
-.byte	0x48,0x6c,0x5c,0x74,	0xd0,0xb8,0x57,0x42
-
-.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38	# Td4
-.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-___
-
-foreach (split("\n",$code)) {
-	s/\`([^\`]*)\`/eval $1/ge;
-
-	# made-up _instructions, _xtr, _ins, _ror and _bias, cope
-	# with byte order dependencies...
-	if (/^\s+_/) {
-	    s/(_[a-z]+\s+)(\$[0-9]+),([^,]+)(#.*)*$/$1$2,$2,$3/;
-
-	    s/_xtr\s+(\$[0-9]+),(\$[0-9]+),([0-9]+(\-2)*)/
-		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
-					:		eval("24-$3"))/e or
-	    s/_ins\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
-		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
-					:		eval("24-$3"))/e or
-	    s/_ror\s+(\$[0-9]+),(\$[0-9]+),(\-?[0-9]+)/
-		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
-					:		eval("$3*-1"))/e or
-	    s/_bias\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
-		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
-					:		eval("($3-16)&31"))/e;
-
-	    s/srl\s+(\$[0-9]+),(\$[0-9]+),\-([0-9]+)/
-		sprintf("sll\t$1,$2,$3")/e				or
-	    s/srl\s+(\$[0-9]+),(\$[0-9]+),0/
-		sprintf("and\t$1,$2,0xff")/e				or
-	    s/(sll\s+\$[0-9]+,\$[0-9]+,0)/#$1/;
-	}
-
-	# convert lwl/lwr and swr/swl to little-endian order
-	if (!$big_endian && /^\s+[sl]w[lr]\s+/) {
-	    s/([sl]wl.*)([0-9]+)\((\$[0-9]+)\)/
-		sprintf("$1%d($3)",eval("$2-$2%4+($2%4-1)&3"))/e	or
-	    s/([sl]wr.*)([0-9]+)\((\$[0-9]+)\)/
-		sprintf("$1%d($3)",eval("$2-$2%4+($2%4+1)&3"))/e;
-	}
-
-	print $_,"\n";
-}
-
-close STDOUT;

+ 0 - 1022
drivers/builtin_openssl2/crypto/aes/asm/aes-parisc.pl

@@ -1,1022 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for PA-RISC.
-#
-# June 2009.
-#
-# The module is mechanical transliteration of aes-sparcv9.pl, but with
-# a twist: S-boxes are compressed even further down to 1K+256B. On
-# PA-7100LC performance is ~40% better than gcc 3.2 generated code and
-# is about 33 cycles per byte processed with 128-bit key. Newer CPUs
-# perform at 16 cycles per byte. It's not faster than code generated
-# by vendor compiler, but recall that it has compressed S-boxes, which
-# requires extra processing.
-#
-# Special thanks to polarhome.com for providing HP-UX account.
-
-$flavour = shift;
-$output = shift;
-open STDOUT,">$output";
-
-if ($flavour =~ /64/) {
-	$LEVEL		="2.0W";
-	$SIZE_T		=8;
-	$FRAME_MARKER	=80;
-	$SAVED_RP	=16;
-	$PUSH		="std";
-	$PUSHMA		="std,ma";
-	$POP		="ldd";
-	$POPMB		="ldd,mb";
-} else {
-	$LEVEL		="1.0";
-	$SIZE_T		=4;
-	$FRAME_MARKER	=48;
-	$SAVED_RP	=20;
-	$PUSH		="stw";
-	$PUSHMA		="stwm";
-	$POP		="ldw";
-	$POPMB		="ldwm";
-}
-
-$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker
-				#                 [+ argument transfer]
-$inp="%r26";	# arg0
-$out="%r25";	# arg1
-$key="%r24";	# arg2
-
-($s0,$s1,$s2,$s3) = ("%r1","%r2","%r3","%r4");
-($t0,$t1,$t2,$t3) = ("%r5","%r6","%r7","%r8");
-
-($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
- $acc8, $acc9,$acc10,$acc11,$acc12,$acc13,$acc14,$acc15) =
-("%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16",
-"%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r26");
-
-$tbl="%r28";
-$rounds="%r29";
-
-$code=<<___;
-	.LEVEL	$LEVEL
-	.SPACE	\$TEXT\$
-	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
-
-	.EXPORT	AES_encrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
-	.ALIGN	64
-AES_encrypt
-	.PROC
-	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
-	.ENTRY
-	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
-	$PUSHMA	%r3,$FRAME(%sp)
-	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
-	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
-	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
-	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
-	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
-	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
-	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
-	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
-	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
-	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
-	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
-	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
-	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
-	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
-	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
-
-	blr	%r0,$tbl
-	ldi	3,$t0
-L\$enc_pic
-	andcm	$tbl,$t0,$tbl
-	ldo	L\$AES_Te-L\$enc_pic($tbl),$tbl
-
-	and	$inp,$t0,$t0
-	sub	$inp,$t0,$inp
-	ldw	0($inp),$s0
-	ldw	4($inp),$s1
-	ldw	8($inp),$s2
-	comib,=	0,$t0,L\$enc_inp_aligned
-	ldw	12($inp),$s3
-
-	sh3addl	$t0,%r0,$t0
-	subi	32,$t0,$t0
-	mtctl	$t0,%cr11
-	ldw	16($inp),$t1
-	vshd	$s0,$s1,$s0
-	vshd	$s1,$s2,$s1
-	vshd	$s2,$s3,$s2
-	vshd	$s3,$t1,$s3
-
-L\$enc_inp_aligned
-	bl	_parisc_AES_encrypt,%r31
-	nop
-
-	extru,<> $out,31,2,%r0
-	b	L\$enc_out_aligned
-	nop
-
-	_srm	$s0,24,$acc0
-	_srm	$s0,16,$acc1
-	stb	$acc0,0($out)
-	_srm	$s0,8,$acc2
-	stb	$acc1,1($out)
-	_srm	$s1,24,$acc4
-	stb	$acc2,2($out)
-	_srm	$s1,16,$acc5
-	stb	$s0,3($out)
-	_srm	$s1,8,$acc6
-	stb	$acc4,4($out)
-	_srm	$s2,24,$acc0
-	stb	$acc5,5($out)
-	_srm	$s2,16,$acc1
-	stb	$acc6,6($out)
-	_srm	$s2,8,$acc2
-	stb	$s1,7($out)
-	_srm	$s3,24,$acc4
-	stb	$acc0,8($out)
-	_srm	$s3,16,$acc5
-	stb	$acc1,9($out)
-	_srm	$s3,8,$acc6
-	stb	$acc2,10($out)
-	stb	$s2,11($out)
-	stb	$acc4,12($out)
-	stb	$acc5,13($out)
-	stb	$acc6,14($out)
-	b	L\$enc_done
-	stb	$s3,15($out)
-
-L\$enc_out_aligned
-	stw	$s0,0($out)
-	stw	$s1,4($out)
-	stw	$s2,8($out)
-	stw	$s3,12($out)
-
-L\$enc_done
-	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
-	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
-	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
-	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
-	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
-	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
-	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
-	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
-	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
-	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
-	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
-	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
-	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
-	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
-	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
-	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
-	bv	(%r2)
-	.EXIT
-	$POPMB	-$FRAME(%sp),%r3
-	.PROCEND
-
-	.ALIGN	16
-_parisc_AES_encrypt
-	.PROC
-	.CALLINFO	MILLICODE
-	.ENTRY
-	ldw	240($key),$rounds
-	ldw	0($key),$t0
-	ldw	4($key),$t1
-	ldw	8($key),$t2
-	_srm	$rounds,1,$rounds
-	xor	$t0,$s0,$s0
-	ldw	12($key),$t3
-	_srm	$s0,24,$acc0
-	xor	$t1,$s1,$s1
-	ldw	16($key),$t0
-	_srm	$s1,16,$acc1
-	xor	$t2,$s2,$s2
-	ldw	20($key),$t1
-	xor	$t3,$s3,$s3
-	ldw	24($key),$t2
-	ldw	28($key),$t3
-L\$enc_loop
-	_srm	$s2,8,$acc2
-	ldwx,s	$acc0($tbl),$acc0
-	_srm	$s3,0,$acc3
-	ldwx,s	$acc1($tbl),$acc1
-	_srm	$s1,24,$acc4
-	ldwx,s	$acc2($tbl),$acc2
-	_srm	$s2,16,$acc5
-	ldwx,s	$acc3($tbl),$acc3
-	_srm	$s3,8,$acc6
-	ldwx,s	$acc4($tbl),$acc4
-	_srm	$s0,0,$acc7
-	ldwx,s	$acc5($tbl),$acc5
-	_srm	$s2,24,$acc8
-	ldwx,s	$acc6($tbl),$acc6
-	_srm	$s3,16,$acc9
-	ldwx,s	$acc7($tbl),$acc7
-	_srm	$s0,8,$acc10
-	ldwx,s	$acc8($tbl),$acc8
-	_srm	$s1,0,$acc11
-	ldwx,s	$acc9($tbl),$acc9
-	_srm	$s3,24,$acc12
-	ldwx,s	$acc10($tbl),$acc10
-	_srm	$s0,16,$acc13
-	ldwx,s	$acc11($tbl),$acc11
-	_srm	$s1,8,$acc14
-	ldwx,s	$acc12($tbl),$acc12
-	_srm	$s2,0,$acc15
-	ldwx,s	$acc13($tbl),$acc13
-	ldwx,s	$acc14($tbl),$acc14
-	ldwx,s	$acc15($tbl),$acc15
-	addib,= -1,$rounds,L\$enc_last
-	ldo	32($key),$key
-
-		_ror	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ldw	0($key),$s0
-		_ror	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ldw	4($key),$s1
-		_ror	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ldw	8($key),$s2
-		_ror	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ldw	12($key),$s3
-		_ror	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		_ror	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		_ror	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		_ror	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		_ror	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		_ror	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		_ror	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		_ror	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	_srm	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-	_srm	$t1,16,$acc1
-		xor	$acc15,$t3,$t3
-
-	_srm	$t2,8,$acc2
-	ldwx,s	$acc0($tbl),$acc0
-	_srm	$t3,0,$acc3
-	ldwx,s	$acc1($tbl),$acc1
-	_srm	$t1,24,$acc4
-	ldwx,s	$acc2($tbl),$acc2
-	_srm	$t2,16,$acc5
-	ldwx,s	$acc3($tbl),$acc3
-	_srm	$t3,8,$acc6
-	ldwx,s	$acc4($tbl),$acc4
-	_srm	$t0,0,$acc7
-	ldwx,s	$acc5($tbl),$acc5
-	_srm	$t2,24,$acc8
-	ldwx,s	$acc6($tbl),$acc6
-	_srm	$t3,16,$acc9
-	ldwx,s	$acc7($tbl),$acc7
-	_srm	$t0,8,$acc10
-	ldwx,s	$acc8($tbl),$acc8
-	_srm	$t1,0,$acc11
-	ldwx,s	$acc9($tbl),$acc9
-	_srm	$t3,24,$acc12
-	ldwx,s	$acc10($tbl),$acc10
-	_srm	$t0,16,$acc13
-	ldwx,s	$acc11($tbl),$acc11
-	_srm	$t1,8,$acc14
-	ldwx,s	$acc12($tbl),$acc12
-	_srm	$t2,0,$acc15
-	ldwx,s	$acc13($tbl),$acc13
-		_ror	$acc1,8,$acc1
-	ldwx,s	$acc14($tbl),$acc14
-
-		_ror	$acc2,16,$acc2
-		xor	$acc0,$s0,$s0
-	ldwx,s	$acc15($tbl),$acc15
-		_ror	$acc3,24,$acc3
-		xor	$acc1,$s0,$s0
-	ldw	16($key),$t0
-		_ror	$acc5,8,$acc5
-		xor	$acc2,$s0,$s0
-	ldw	20($key),$t1
-		_ror	$acc6,16,$acc6
-		xor	$acc3,$s0,$s0
-	ldw	24($key),$t2
-		_ror	$acc7,24,$acc7
-		xor	$acc4,$s1,$s1
-	ldw	28($key),$t3
-		_ror	$acc9,8,$acc9
-		xor	$acc5,$s1,$s1
-	ldw	1024+0($tbl),%r0		; prefetch te4
-		_ror	$acc10,16,$acc10
-		xor	$acc6,$s1,$s1
-	ldw	1024+32($tbl),%r0		; prefetch te4
-		_ror	$acc11,24,$acc11
-		xor	$acc7,$s1,$s1
-	ldw	1024+64($tbl),%r0		; prefetch te4
-		_ror	$acc13,8,$acc13
-		xor	$acc8,$s2,$s2
-	ldw	1024+96($tbl),%r0		; prefetch te4
-		_ror	$acc14,16,$acc14
-		xor	$acc9,$s2,$s2
-	ldw	1024+128($tbl),%r0		; prefetch te4
-		_ror	$acc15,24,$acc15
-		xor	$acc10,$s2,$s2
-	ldw	1024+160($tbl),%r0		; prefetch te4
-	_srm	$s0,24,$acc0
-		xor	$acc11,$s2,$s2
-	ldw	1024+192($tbl),%r0		; prefetch te4
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-	ldw	1024+224($tbl),%r0		; prefetch te4
-	_srm	$s1,16,$acc1
-		xor	$acc14,$s3,$s3
-	b	L\$enc_loop
-		xor	$acc15,$s3,$s3
-
-	.ALIGN	16
-L\$enc_last
-	ldo	1024($tbl),$rounds
-		_ror	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ldw	0($key),$s0
-		_ror	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ldw	4($key),$s1
-		_ror	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ldw	8($key),$s2
-		_ror	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ldw	12($key),$s3
-		_ror	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		_ror	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		_ror	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		_ror	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		_ror	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		_ror	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		_ror	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		_ror	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	_srm	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-	_srm	$t1,16,$acc1
-		xor	$acc15,$t3,$t3
-
-	_srm	$t2,8,$acc2
-	ldbx	$acc0($rounds),$acc0
-	_srm	$t1,24,$acc4
-	ldbx	$acc1($rounds),$acc1
-	_srm	$t2,16,$acc5
-	_srm	$t3,0,$acc3
-	ldbx	$acc2($rounds),$acc2
-	ldbx	$acc3($rounds),$acc3
-	_srm	$t3,8,$acc6
-	ldbx	$acc4($rounds),$acc4
-	_srm	$t2,24,$acc8
-	ldbx	$acc5($rounds),$acc5
-	_srm	$t3,16,$acc9
-	_srm	$t0,0,$acc7
-	ldbx	$acc6($rounds),$acc6
-	ldbx	$acc7($rounds),$acc7
-	_srm	$t0,8,$acc10
-	ldbx	$acc8($rounds),$acc8
-	_srm	$t3,24,$acc12
-	ldbx	$acc9($rounds),$acc9
-	_srm	$t0,16,$acc13
-	_srm	$t1,0,$acc11
-	ldbx	$acc10($rounds),$acc10
-	_srm	$t1,8,$acc14
-	ldbx	$acc11($rounds),$acc11
-	ldbx	$acc12($rounds),$acc12
-	ldbx	$acc13($rounds),$acc13
-	_srm	$t2,0,$acc15
-	ldbx	$acc14($rounds),$acc14
-
-		dep	$acc0,7,8,$acc3
-	ldbx	$acc15($rounds),$acc15
-		dep	$acc4,7,8,$acc7
-		dep	$acc1,15,8,$acc3
-		dep	$acc5,15,8,$acc7
-		dep	$acc2,23,8,$acc3
-		dep	$acc6,23,8,$acc7
-		xor	$acc3,$s0,$s0
-		xor	$acc7,$s1,$s1
-		dep	$acc8,7,8,$acc11
-		dep	$acc12,7,8,$acc15
-		dep	$acc9,15,8,$acc11
-		dep	$acc13,15,8,$acc15
-		dep	$acc10,23,8,$acc11
-		dep	$acc14,23,8,$acc15
-		xor	$acc11,$s2,$s2
-
-	bv	(%r31)
-	.EXIT
-		xor	$acc15,$s3,$s3
-	.PROCEND
-
-	.ALIGN	64
-L\$AES_Te
-	.WORD	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
-	.WORD	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
-	.WORD	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
-	.WORD	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
-	.WORD	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
-	.WORD	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
-	.WORD	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
-	.WORD	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
-	.WORD	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
-	.WORD	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
-	.WORD	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
-	.WORD	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
-	.WORD	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
-	.WORD	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
-	.WORD	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
-	.WORD	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
-	.WORD	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
-	.WORD	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
-	.WORD	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
-	.WORD	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
-	.WORD	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
-	.WORD	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
-	.WORD	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
-	.WORD	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
-	.WORD	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
-	.WORD	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
-	.WORD	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
-	.WORD	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
-	.WORD	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
-	.WORD	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
-	.WORD	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
-	.WORD	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
-	.WORD	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
-	.WORD	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
-	.WORD	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
-	.WORD	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
-	.WORD	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
-	.WORD	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
-	.WORD	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
-	.WORD	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
-	.WORD	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
-	.WORD	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
-	.WORD	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
-	.WORD	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
-	.WORD	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
-	.WORD	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
-	.WORD	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
-	.WORD	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
-	.WORD	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
-	.WORD	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
-	.WORD	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
-	.WORD	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
-	.WORD	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
-	.WORD	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
-	.WORD	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
-	.WORD	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
-	.WORD	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
-	.WORD	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
-	.WORD	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
-	.WORD	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
-	.WORD	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
-	.WORD	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
-	.WORD	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
-	.WORD	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
-	.BYTE	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-	.BYTE	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-	.BYTE	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-	.BYTE	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-	.BYTE	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-	.BYTE	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-	.BYTE	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-	.BYTE	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-	.BYTE	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-	.BYTE	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-	.BYTE	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-	.BYTE	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-	.BYTE	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-	.BYTE	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-	.BYTE	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-	.BYTE	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-	.BYTE	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-	.BYTE	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-	.BYTE	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-	.BYTE	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-	.BYTE	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-	.BYTE	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-	.BYTE	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-	.BYTE	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-	.BYTE	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-	.BYTE	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-	.BYTE	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-	.BYTE	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-	.BYTE	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-	.BYTE	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-	.BYTE	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-	.BYTE	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-___
-
-$code.=<<___;
-	.EXPORT	AES_decrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
-	.ALIGN	16
-AES_decrypt
-	.PROC
-	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
-	.ENTRY
-	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
-	$PUSHMA	%r3,$FRAME(%sp)
-	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
-	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
-	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
-	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
-	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
-	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
-	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
-	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
-	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
-	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
-	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
-	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
-	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
-	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
-	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
-
-	blr	%r0,$tbl
-	ldi	3,$t0
-L\$dec_pic
-	andcm	$tbl,$t0,$tbl
-	ldo	L\$AES_Td-L\$dec_pic($tbl),$tbl
-
-	and	$inp,$t0,$t0
-	sub	$inp,$t0,$inp
-	ldw	0($inp),$s0
-	ldw	4($inp),$s1
-	ldw	8($inp),$s2
-	comib,=	0,$t0,L\$dec_inp_aligned
-	ldw	12($inp),$s3
-
-	sh3addl	$t0,%r0,$t0
-	subi	32,$t0,$t0
-	mtctl	$t0,%cr11
-	ldw	16($inp),$t1
-	vshd	$s0,$s1,$s0
-	vshd	$s1,$s2,$s1
-	vshd	$s2,$s3,$s2
-	vshd	$s3,$t1,$s3
-
-L\$dec_inp_aligned
-	bl	_parisc_AES_decrypt,%r31
-	nop
-
-	extru,<> $out,31,2,%r0
-	b	L\$dec_out_aligned
-	nop
-
-	_srm	$s0,24,$acc0
-	_srm	$s0,16,$acc1
-	stb	$acc0,0($out)
-	_srm	$s0,8,$acc2
-	stb	$acc1,1($out)
-	_srm	$s1,24,$acc4
-	stb	$acc2,2($out)
-	_srm	$s1,16,$acc5
-	stb	$s0,3($out)
-	_srm	$s1,8,$acc6
-	stb	$acc4,4($out)
-	_srm	$s2,24,$acc0
-	stb	$acc5,5($out)
-	_srm	$s2,16,$acc1
-	stb	$acc6,6($out)
-	_srm	$s2,8,$acc2
-	stb	$s1,7($out)
-	_srm	$s3,24,$acc4
-	stb	$acc0,8($out)
-	_srm	$s3,16,$acc5
-	stb	$acc1,9($out)
-	_srm	$s3,8,$acc6
-	stb	$acc2,10($out)
-	stb	$s2,11($out)
-	stb	$acc4,12($out)
-	stb	$acc5,13($out)
-	stb	$acc6,14($out)
-	b	L\$dec_done
-	stb	$s3,15($out)
-
-L\$dec_out_aligned
-	stw	$s0,0($out)
-	stw	$s1,4($out)
-	stw	$s2,8($out)
-	stw	$s3,12($out)
-
-L\$dec_done
-	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
-	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
-	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
-	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
-	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
-	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
-	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
-	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
-	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
-	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
-	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
-	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
-	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
-	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
-	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
-	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
-	bv	(%r2)
-	.EXIT
-	$POPMB	-$FRAME(%sp),%r3
-	.PROCEND
-
-	.ALIGN	16
-_parisc_AES_decrypt
-	.PROC
-	.CALLINFO	MILLICODE
-	.ENTRY
-	ldw	240($key),$rounds
-	ldw	0($key),$t0
-	ldw	4($key),$t1
-	ldw	8($key),$t2
-	ldw	12($key),$t3
-	_srm	$rounds,1,$rounds
-	xor	$t0,$s0,$s0
-	ldw	16($key),$t0
-	xor	$t1,$s1,$s1
-	ldw	20($key),$t1
-	_srm	$s0,24,$acc0
-	xor	$t2,$s2,$s2
-	ldw	24($key),$t2
-	xor	$t3,$s3,$s3
-	ldw	28($key),$t3
-	_srm	$s3,16,$acc1
-L\$dec_loop
-	_srm	$s2,8,$acc2
-	ldwx,s	$acc0($tbl),$acc0
-	_srm	$s1,0,$acc3
-	ldwx,s	$acc1($tbl),$acc1
-	_srm	$s1,24,$acc4
-	ldwx,s	$acc2($tbl),$acc2
-	_srm	$s0,16,$acc5
-	ldwx,s	$acc3($tbl),$acc3
-	_srm	$s3,8,$acc6
-	ldwx,s	$acc4($tbl),$acc4
-	_srm	$s2,0,$acc7
-	ldwx,s	$acc5($tbl),$acc5
-	_srm	$s2,24,$acc8
-	ldwx,s	$acc6($tbl),$acc6
-	_srm	$s1,16,$acc9
-	ldwx,s	$acc7($tbl),$acc7
-	_srm	$s0,8,$acc10
-	ldwx,s	$acc8($tbl),$acc8
-	_srm	$s3,0,$acc11
-	ldwx,s	$acc9($tbl),$acc9
-	_srm	$s3,24,$acc12
-	ldwx,s	$acc10($tbl),$acc10
-	_srm	$s2,16,$acc13
-	ldwx,s	$acc11($tbl),$acc11
-	_srm	$s1,8,$acc14
-	ldwx,s	$acc12($tbl),$acc12
-	_srm	$s0,0,$acc15
-	ldwx,s	$acc13($tbl),$acc13
-	ldwx,s	$acc14($tbl),$acc14
-	ldwx,s	$acc15($tbl),$acc15
-	addib,= -1,$rounds,L\$dec_last
-	ldo	32($key),$key
-
-		_ror	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ldw	0($key),$s0
-		_ror	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ldw	4($key),$s1
-		_ror	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ldw	8($key),$s2
-		_ror	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ldw	12($key),$s3
-		_ror	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		_ror	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		_ror	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		_ror	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		_ror	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		_ror	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		_ror	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		_ror	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	_srm	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-		xor	$acc15,$t3,$t3
-	_srm	$t3,16,$acc1
-
-	_srm	$t2,8,$acc2
-	ldwx,s	$acc0($tbl),$acc0
-	_srm	$t1,0,$acc3
-	ldwx,s	$acc1($tbl),$acc1
-	_srm	$t1,24,$acc4
-	ldwx,s	$acc2($tbl),$acc2
-	_srm	$t0,16,$acc5
-	ldwx,s	$acc3($tbl),$acc3
-	_srm	$t3,8,$acc6
-	ldwx,s	$acc4($tbl),$acc4
-	_srm	$t2,0,$acc7
-	ldwx,s	$acc5($tbl),$acc5
-	_srm	$t2,24,$acc8
-	ldwx,s	$acc6($tbl),$acc6
-	_srm	$t1,16,$acc9
-	ldwx,s	$acc7($tbl),$acc7
-	_srm	$t0,8,$acc10
-	ldwx,s	$acc8($tbl),$acc8
-	_srm	$t3,0,$acc11
-	ldwx,s	$acc9($tbl),$acc9
-	_srm	$t3,24,$acc12
-	ldwx,s	$acc10($tbl),$acc10
-	_srm	$t2,16,$acc13
-	ldwx,s	$acc11($tbl),$acc11
-	_srm	$t1,8,$acc14
-	ldwx,s	$acc12($tbl),$acc12
-	_srm	$t0,0,$acc15
-	ldwx,s	$acc13($tbl),$acc13
-		_ror	$acc1,8,$acc1
-	ldwx,s	$acc14($tbl),$acc14
-
-		_ror	$acc2,16,$acc2
-		xor	$acc0,$s0,$s0
-	ldwx,s	$acc15($tbl),$acc15
-		_ror	$acc3,24,$acc3
-		xor	$acc1,$s0,$s0
-	ldw	16($key),$t0
-		_ror	$acc5,8,$acc5
-		xor	$acc2,$s0,$s0
-	ldw	20($key),$t1
-		_ror	$acc6,16,$acc6
-		xor	$acc3,$s0,$s0
-	ldw	24($key),$t2
-		_ror	$acc7,24,$acc7
-		xor	$acc4,$s1,$s1
-	ldw	28($key),$t3
-		_ror	$acc9,8,$acc9
-		xor	$acc5,$s1,$s1
-	ldw	1024+0($tbl),%r0		; prefetch td4
-		_ror	$acc10,16,$acc10
-		xor	$acc6,$s1,$s1
-	ldw	1024+32($tbl),%r0		; prefetch td4
-		_ror	$acc11,24,$acc11
-		xor	$acc7,$s1,$s1
-	ldw	1024+64($tbl),%r0		; prefetch td4
-		_ror	$acc13,8,$acc13
-		xor	$acc8,$s2,$s2
-	ldw	1024+96($tbl),%r0		; prefetch td4
-		_ror	$acc14,16,$acc14
-		xor	$acc9,$s2,$s2
-	ldw	1024+128($tbl),%r0		; prefetch td4
-		_ror	$acc15,24,$acc15
-		xor	$acc10,$s2,$s2
-	ldw	1024+160($tbl),%r0		; prefetch td4
-	_srm	$s0,24,$acc0
-		xor	$acc11,$s2,$s2
-	ldw	1024+192($tbl),%r0		; prefetch td4
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-	ldw	1024+224($tbl),%r0		; prefetch td4
-		xor	$acc14,$s3,$s3
-		xor	$acc15,$s3,$s3
-	b	L\$dec_loop
-	_srm	$s3,16,$acc1
-
-	.ALIGN	16
-L\$dec_last
-	ldo	1024($tbl),$rounds
-		_ror	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ldw	0($key),$s0
-		_ror	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ldw	4($key),$s1
-		_ror	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ldw	8($key),$s2
-		_ror	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ldw	12($key),$s3
-		_ror	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		_ror	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		_ror	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		_ror	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		_ror	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		_ror	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		_ror	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		_ror	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	_srm	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-		xor	$acc15,$t3,$t3
-	_srm	$t3,16,$acc1
-
-	_srm	$t2,8,$acc2
-	ldbx	$acc0($rounds),$acc0
-	_srm	$t1,24,$acc4
-	ldbx	$acc1($rounds),$acc1
-	_srm	$t0,16,$acc5
-	_srm	$t1,0,$acc3
-	ldbx	$acc2($rounds),$acc2
-	ldbx	$acc3($rounds),$acc3
-	_srm	$t3,8,$acc6
-	ldbx	$acc4($rounds),$acc4
-	_srm	$t2,24,$acc8
-	ldbx	$acc5($rounds),$acc5
-	_srm	$t1,16,$acc9
-	_srm	$t2,0,$acc7
-	ldbx	$acc6($rounds),$acc6
-	ldbx	$acc7($rounds),$acc7
-	_srm	$t0,8,$acc10
-	ldbx	$acc8($rounds),$acc8
-	_srm	$t3,24,$acc12
-	ldbx	$acc9($rounds),$acc9
-	_srm	$t2,16,$acc13
-	_srm	$t3,0,$acc11
-	ldbx	$acc10($rounds),$acc10
-	_srm	$t1,8,$acc14
-	ldbx	$acc11($rounds),$acc11
-	ldbx	$acc12($rounds),$acc12
-	ldbx	$acc13($rounds),$acc13
-	_srm	$t0,0,$acc15
-	ldbx	$acc14($rounds),$acc14
-
-		dep	$acc0,7,8,$acc3
-	ldbx	$acc15($rounds),$acc15
-		dep	$acc4,7,8,$acc7
-		dep	$acc1,15,8,$acc3
-		dep	$acc5,15,8,$acc7
-		dep	$acc2,23,8,$acc3
-		dep	$acc6,23,8,$acc7
-		xor	$acc3,$s0,$s0
-		xor	$acc7,$s1,$s1
-		dep	$acc8,7,8,$acc11
-		dep	$acc12,7,8,$acc15
-		dep	$acc9,15,8,$acc11
-		dep	$acc13,15,8,$acc15
-		dep	$acc10,23,8,$acc11
-		dep	$acc14,23,8,$acc15
-		xor	$acc11,$s2,$s2
-
-	bv	(%r31)
-	.EXIT
-		xor	$acc15,$s3,$s3
-	.PROCEND
-
-	.ALIGN	64
-L\$AES_Td
-	.WORD	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
-	.WORD	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
-	.WORD	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
-	.WORD	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
-	.WORD	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
-	.WORD	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
-	.WORD	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
-	.WORD	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
-	.WORD	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
-	.WORD	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
-	.WORD	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
-	.WORD	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
-	.WORD	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
-	.WORD	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
-	.WORD	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
-	.WORD	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
-	.WORD	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
-	.WORD	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
-	.WORD	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
-	.WORD	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
-	.WORD	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
-	.WORD	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
-	.WORD	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
-	.WORD	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
-	.WORD	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
-	.WORD	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
-	.WORD	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
-	.WORD	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
-	.WORD	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
-	.WORD	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
-	.WORD	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
-	.WORD	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
-	.WORD	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
-	.WORD	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
-	.WORD	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
-	.WORD	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
-	.WORD	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
-	.WORD	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
-	.WORD	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
-	.WORD	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
-	.WORD	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
-	.WORD	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
-	.WORD	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
-	.WORD	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
-	.WORD	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
-	.WORD	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
-	.WORD	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
-	.WORD	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
-	.WORD	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
-	.WORD	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
-	.WORD	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
-	.WORD	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
-	.WORD	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
-	.WORD	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
-	.WORD	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
-	.WORD	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
-	.WORD	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
-	.WORD	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
-	.WORD	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
-	.WORD	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
-	.WORD	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
-	.WORD	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
-	.WORD	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
-	.WORD	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
-	.BYTE	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-	.BYTE	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-	.BYTE	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-	.BYTE	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-	.BYTE	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-	.BYTE	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-	.BYTE	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-	.BYTE	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-	.BYTE	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-	.BYTE	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-	.BYTE	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-	.BYTE	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-	.BYTE	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-	.BYTE	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-	.BYTE	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-	.BYTE	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-	.BYTE	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-	.BYTE	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-	.BYTE	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-	.BYTE	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-	.BYTE	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-	.BYTE	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-	.BYTE	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-	.BYTE	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-	.BYTE	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-	.BYTE	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-	.BYTE	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-	.BYTE	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-	.BYTE	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-	.BYTE	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-	.BYTE	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-	.BYTE	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-	.STRINGZ "AES for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-foreach (split("\n",$code)) {
-	s/\`([^\`]*)\`/eval $1/ge;
-
-	# translate made up instructons: _ror, _srm
-	s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/				or
-
-	s/_srm(\s+%r[0-9]+),([0-9]+),/
-		$SIZE_T==4 ? sprintf("extru%s,%d,8,",$1,31-$2)
-		:            sprintf("extrd,u%s,%d,8,",$1,63-$2)/e;
-
-	s/,\*/,/			if ($SIZE_T==4);
-	s/\bbv\b(.*\(%r2\))/bve$1/	if ($SIZE_T==8);
-	print $_,"\n";
-}
-close STDOUT;

+ 0 - 1365
drivers/builtin_openssl2/crypto/aes/asm/aes-ppc.pl

@@ -1,1365 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# Needs more work: key setup, CBC routine...
-#
-# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
-# 128-bit key, which is ~40% better than 64-bit code generated by gcc
-# 4.0. But these are not the ones currently used! Their "compact"
-# counterparts are, for security reason. ppc_AES_encrypt_compact runs
-# at 1/2 of ppc_AES_encrypt speed, while ppc_AES_decrypt_compact -
-# at 1/3 of ppc_AES_decrypt.
-
-# February 2010
-#
-# Rescheduling instructions to favour Power6 pipeline gave 10%
-# performance improvement on the platfrom in question (and marginal
-# improvement even on others). It should be noted that Power6 fails
-# to process byte in 18 cycles, only in 23, because it fails to issue
-# 4 load instructions in two cycles, only in 3. As result non-compact
-# block subroutines are 25% slower than one would expect. Compact
-# functions scale better, because they have pure computational part,
-# which scales perfectly with clock frequency. To be specific
-# ppc_AES_encrypt_compact operates at 42 cycles per byte, while
-# ppc_AES_decrypt_compact - at 55 (in 64-bit build).
-
-$flavour = shift;
-
-if ($flavour =~ /64/) {
-	$SIZE_T	=8;
-	$LRSAVE	=2*$SIZE_T;
-	$STU	="stdu";
-	$POP	="ld";
-	$PUSH	="std";
-} elsif ($flavour =~ /32/) {
-	$SIZE_T	=4;
-	$LRSAVE	=$SIZE_T;
-	$STU	="stwu";
-	$POP	="lwz";
-	$PUSH	="stw";
-} else { die "nonsense $flavour"; }
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$FRAME=32*$SIZE_T;
-
-sub _data_word()
-{ my $i;
-    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$sp="r1";
-$toc="r2";
-$inp="r3";
-$out="r4";
-$key="r5";
-
-$Tbl0="r3";
-$Tbl1="r6";
-$Tbl2="r7";
-$Tbl3="r2";
-
-$s0="r8";
-$s1="r9";
-$s2="r10";
-$s3="r11";
-
-$t0="r12";
-$t1="r13";
-$t2="r14";
-$t3="r15";
-
-$acc00="r16";
-$acc01="r17";
-$acc02="r18";
-$acc03="r19";
-
-$acc04="r20";
-$acc05="r21";
-$acc06="r22";
-$acc07="r23";
-
-$acc08="r24";
-$acc09="r25";
-$acc10="r26";
-$acc11="r27";
-
-$acc12="r28";
-$acc13="r29";
-$acc14="r30";
-$acc15="r31";
-
-# stay away from TLS pointer
-if ($SIZE_T==8)	{ die if ($t1 ne "r13");  $t1="r0";		}
-else		{ die if ($Tbl3 ne "r2"); $Tbl3=$t0; $t0="r0";	}
-$mask80=$Tbl2;
-$mask1b=$Tbl3;
-
-$code.=<<___;
-.machine	"any"
-.text
-
-.align	7
-LAES_Te:
-	mflr	r0
-	bcl	20,31,\$+4
-	mflr	$Tbl0	;    vvvvv "distance" between . and 1st data entry
-	addi	$Tbl0,$Tbl0,`128-8`
-	mtlr	r0
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-	.space	`64-9*4`
-LAES_Td:
-	mflr	r0
-	bcl	20,31,\$+4
-	mflr	$Tbl0	;    vvvvvvvv "distance" between . and 1st data entry
-	addi	$Tbl0,$Tbl0,`128-64-8+2048+256`
-	mtlr	r0
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-	.space	`128-64-9*4`
-___
-&_data_word(
-	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
-	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
-	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
-	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
-	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
-	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
-	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
-	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
-	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
-	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
-	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
-	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
-	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
-	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
-	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
-	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
-	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
-	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
-	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
-	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
-	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
-	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
-	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
-	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
-	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
-	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
-	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
-	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
-	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
-	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
-	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
-	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
-	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
-	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
-	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
-	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
-	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
-	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
-	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
-	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
-	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
-	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
-	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
-	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
-	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
-	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
-	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
-	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
-	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
-	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
-	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
-	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
-	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
-	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
-	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
-	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
-	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
-	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
-	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
-	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
-	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
-	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
-	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
-	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
-.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-___
-&_data_word(
-	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
-	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
-	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
-	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
-	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
-	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
-	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
-	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
-	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
-	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
-	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
-	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
-	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
-	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
-	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
-	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
-	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
-	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
-	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
-	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
-	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
-	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
-	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
-	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
-	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
-	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
-	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
-	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
-	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
-	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
-	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
-	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
-	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
-	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
-	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
-	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
-	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
-	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
-	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
-	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
-	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
-	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
-	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
-	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
-	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
-	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
-	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
-	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
-	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
-	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
-	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
-	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
-	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
-	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
-	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
-	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
-	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
-	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
-	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
-	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
-	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
-	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
-	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
-	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
-.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-
-
-.globl	.AES_encrypt
-.align	7
-.AES_encrypt:
-	$STU	$sp,-$FRAME($sp)
-	mflr	r0
-
-	$PUSH	$toc,`$FRAME-$SIZE_T*20`($sp)
-	$PUSH	r13,`$FRAME-$SIZE_T*19`($sp)
-	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
-	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
-	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
-	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
-	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
-	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
-	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
-	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
-	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
-	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
-	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
-	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
-	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
-	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
-	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
-	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
-	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
-	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
-	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
-
-	andi.	$t0,$inp,3
-	andi.	$t1,$out,3
-	or.	$t0,$t0,$t1
-	bne	Lenc_unaligned
-
-Lenc_unaligned_ok:
-	lwz	$s0,0($inp)
-	lwz	$s1,4($inp)
-	lwz	$s2,8($inp)
-	lwz	$s3,12($inp)
-	bl	LAES_Te
-	bl	Lppc_AES_encrypt_compact
-	stw	$s0,0($out)
-	stw	$s1,4($out)
-	stw	$s2,8($out)
-	stw	$s3,12($out)
-	b	Lenc_done
-
-Lenc_unaligned:
-	subfic	$t0,$inp,4096
-	subfic	$t1,$out,4096
-	andi.	$t0,$t0,4096-16
-	beq	Lenc_xpage
-	andi.	$t1,$t1,4096-16
-	bne	Lenc_unaligned_ok
-
-Lenc_xpage:
-	lbz	$acc00,0($inp)
-	lbz	$acc01,1($inp)
-	lbz	$acc02,2($inp)
-	lbz	$s0,3($inp)
-	lbz	$acc04,4($inp)
-	lbz	$acc05,5($inp)
-	lbz	$acc06,6($inp)
-	lbz	$s1,7($inp)
-	lbz	$acc08,8($inp)
-	lbz	$acc09,9($inp)
-	lbz	$acc10,10($inp)
-	insrwi	$s0,$acc00,8,0
-	lbz	$s2,11($inp)
-	insrwi	$s1,$acc04,8,0
-	lbz	$acc12,12($inp)
-	insrwi	$s0,$acc01,8,8
-	lbz	$acc13,13($inp)
-	insrwi	$s1,$acc05,8,8
-	lbz	$acc14,14($inp)
-	insrwi	$s0,$acc02,8,16
-	lbz	$s3,15($inp)
-	insrwi	$s1,$acc06,8,16
-	insrwi	$s2,$acc08,8,0
-	insrwi	$s3,$acc12,8,0
-	insrwi	$s2,$acc09,8,8
-	insrwi	$s3,$acc13,8,8
-	insrwi	$s2,$acc10,8,16
-	insrwi	$s3,$acc14,8,16
-
-	bl	LAES_Te
-	bl	Lppc_AES_encrypt_compact
-
-	extrwi	$acc00,$s0,8,0
-	extrwi	$acc01,$s0,8,8
-	stb	$acc00,0($out)
-	extrwi	$acc02,$s0,8,16
-	stb	$acc01,1($out)
-	stb	$acc02,2($out)
-	extrwi	$acc04,$s1,8,0
-	stb	$s0,3($out)
-	extrwi	$acc05,$s1,8,8
-	stb	$acc04,4($out)
-	extrwi	$acc06,$s1,8,16
-	stb	$acc05,5($out)
-	stb	$acc06,6($out)
-	extrwi	$acc08,$s2,8,0
-	stb	$s1,7($out)
-	extrwi	$acc09,$s2,8,8
-	stb	$acc08,8($out)
-	extrwi	$acc10,$s2,8,16
-	stb	$acc09,9($out)
-	stb	$acc10,10($out)
-	extrwi	$acc12,$s3,8,0
-	stb	$s2,11($out)
-	extrwi	$acc13,$s3,8,8
-	stb	$acc12,12($out)
-	extrwi	$acc14,$s3,8,16
-	stb	$acc13,13($out)
-	stb	$acc14,14($out)
-	stb	$s3,15($out)
-
-Lenc_done:
-	$POP	r0,`$FRAME+$LRSAVE`($sp)
-	$POP	$toc,`$FRAME-$SIZE_T*20`($sp)
-	$POP	r13,`$FRAME-$SIZE_T*19`($sp)
-	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
-	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
-	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
-	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
-	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
-	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
-	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
-	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
-	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
-	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
-	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
-	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
-	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
-	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
-	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
-	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
-	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
-	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
-	mtlr	r0
-	addi	$sp,$sp,$FRAME
-	blr
-	.long	0
-	.byte	0,12,4,1,0x80,18,3,0
-	.long	0
-
-.align	5
-Lppc_AES_encrypt:
-	lwz	$acc00,240($key)
-	addi	$Tbl1,$Tbl0,3
-	lwz	$t0,0($key)
-	addi	$Tbl2,$Tbl0,2
-	lwz	$t1,4($key)
-	addi	$Tbl3,$Tbl0,1
-	lwz	$t2,8($key)
-	addi	$acc00,$acc00,-1
-	lwz	$t3,12($key)
-	addi	$key,$key,16
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	mtctr	$acc00
-.align	4
-Lenc_loop:
-	rlwinm	$acc00,$s0,`32-24+3`,21,28
-	rlwinm	$acc01,$s1,`32-24+3`,21,28
-	rlwinm	$acc02,$s2,`32-24+3`,21,28
-	rlwinm	$acc03,$s3,`32-24+3`,21,28
-	lwz	$t0,0($key)
-	rlwinm	$acc04,$s1,`32-16+3`,21,28
-	lwz	$t1,4($key)
-	rlwinm	$acc05,$s2,`32-16+3`,21,28
-	lwz	$t2,8($key)
-	rlwinm	$acc06,$s3,`32-16+3`,21,28
-	lwz	$t3,12($key)
-	rlwinm	$acc07,$s0,`32-16+3`,21,28
-	lwzx	$acc00,$Tbl0,$acc00
-	rlwinm	$acc08,$s2,`32-8+3`,21,28
-	lwzx	$acc01,$Tbl0,$acc01
-	rlwinm	$acc09,$s3,`32-8+3`,21,28
-	lwzx	$acc02,$Tbl0,$acc02
-	rlwinm	$acc10,$s0,`32-8+3`,21,28
-	lwzx	$acc03,$Tbl0,$acc03
-	rlwinm	$acc11,$s1,`32-8+3`,21,28
-	lwzx	$acc04,$Tbl1,$acc04
-	rlwinm	$acc12,$s3,`0+3`,21,28
-	lwzx	$acc05,$Tbl1,$acc05
-	rlwinm	$acc13,$s0,`0+3`,21,28
-	lwzx	$acc06,$Tbl1,$acc06
-	rlwinm	$acc14,$s1,`0+3`,21,28
-	lwzx	$acc07,$Tbl1,$acc07
-	rlwinm	$acc15,$s2,`0+3`,21,28
-	lwzx	$acc08,$Tbl2,$acc08
-	xor	$t0,$t0,$acc00
-	lwzx	$acc09,$Tbl2,$acc09
-	xor	$t1,$t1,$acc01
-	lwzx	$acc10,$Tbl2,$acc10
-	xor	$t2,$t2,$acc02
-	lwzx	$acc11,$Tbl2,$acc11
-	xor	$t3,$t3,$acc03
-	lwzx	$acc12,$Tbl3,$acc12
-	xor	$t0,$t0,$acc04
-	lwzx	$acc13,$Tbl3,$acc13
-	xor	$t1,$t1,$acc05
-	lwzx	$acc14,$Tbl3,$acc14
-	xor	$t2,$t2,$acc06
-	lwzx	$acc15,$Tbl3,$acc15
-	xor	$t3,$t3,$acc07
-	xor	$t0,$t0,$acc08
-	xor	$t1,$t1,$acc09
-	xor	$t2,$t2,$acc10
-	xor	$t3,$t3,$acc11
-	xor	$s0,$t0,$acc12
-	xor	$s1,$t1,$acc13
-	xor	$s2,$t2,$acc14
-	xor	$s3,$t3,$acc15
-	addi	$key,$key,16
-	bdnz-	Lenc_loop
-
-	addi	$Tbl2,$Tbl0,2048
-	nop
-	lwz	$t0,0($key)
-	rlwinm	$acc00,$s0,`32-24`,24,31
-	lwz	$t1,4($key)
-	rlwinm	$acc01,$s1,`32-24`,24,31
-	lwz	$t2,8($key)
-	rlwinm	$acc02,$s2,`32-24`,24,31
-	lwz	$t3,12($key)
-	rlwinm	$acc03,$s3,`32-24`,24,31
-	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Te4
-	rlwinm	$acc04,$s1,`32-16`,24,31
-	lwz	$acc09,`2048+32`($Tbl0)
-	rlwinm	$acc05,$s2,`32-16`,24,31
-	lwz	$acc10,`2048+64`($Tbl0)
-	rlwinm	$acc06,$s3,`32-16`,24,31
-	lwz	$acc11,`2048+96`($Tbl0)
-	rlwinm	$acc07,$s0,`32-16`,24,31
-	lwz	$acc12,`2048+128`($Tbl0)
-	rlwinm	$acc08,$s2,`32-8`,24,31
-	lwz	$acc13,`2048+160`($Tbl0)
-	rlwinm	$acc09,$s3,`32-8`,24,31
-	lwz	$acc14,`2048+192`($Tbl0)
-	rlwinm	$acc10,$s0,`32-8`,24,31
-	lwz	$acc15,`2048+224`($Tbl0)
-	rlwinm	$acc11,$s1,`32-8`,24,31
-	lbzx	$acc00,$Tbl2,$acc00
-	rlwinm	$acc12,$s3,`0`,24,31
-	lbzx	$acc01,$Tbl2,$acc01
-	rlwinm	$acc13,$s0,`0`,24,31
-	lbzx	$acc02,$Tbl2,$acc02
-	rlwinm	$acc14,$s1,`0`,24,31
-	lbzx	$acc03,$Tbl2,$acc03
-	rlwinm	$acc15,$s2,`0`,24,31
-	lbzx	$acc04,$Tbl2,$acc04
-	rlwinm	$s0,$acc00,24,0,7
-	lbzx	$acc05,$Tbl2,$acc05
-	rlwinm	$s1,$acc01,24,0,7
-	lbzx	$acc06,$Tbl2,$acc06
-	rlwinm	$s2,$acc02,24,0,7
-	lbzx	$acc07,$Tbl2,$acc07
-	rlwinm	$s3,$acc03,24,0,7
-	lbzx	$acc08,$Tbl2,$acc08
-	rlwimi	$s0,$acc04,16,8,15
-	lbzx	$acc09,$Tbl2,$acc09
-	rlwimi	$s1,$acc05,16,8,15
-	lbzx	$acc10,$Tbl2,$acc10
-	rlwimi	$s2,$acc06,16,8,15
-	lbzx	$acc11,$Tbl2,$acc11
-	rlwimi	$s3,$acc07,16,8,15
-	lbzx	$acc12,$Tbl2,$acc12
-	rlwimi	$s0,$acc08,8,16,23
-	lbzx	$acc13,$Tbl2,$acc13
-	rlwimi	$s1,$acc09,8,16,23
-	lbzx	$acc14,$Tbl2,$acc14
-	rlwimi	$s2,$acc10,8,16,23
-	lbzx	$acc15,$Tbl2,$acc15
-	rlwimi	$s3,$acc11,8,16,23
-	or	$s0,$s0,$acc12
-	or	$s1,$s1,$acc13
-	or	$s2,$s2,$acc14
-	or	$s3,$s3,$acc15
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-
-.align	4
-Lppc_AES_encrypt_compact:
-	lwz	$acc00,240($key)
-	addi	$Tbl1,$Tbl0,2048
-	lwz	$t0,0($key)
-	lis	$mask80,0x8080
-	lwz	$t1,4($key)
-	lis	$mask1b,0x1b1b
-	lwz	$t2,8($key)
-	ori	$mask80,$mask80,0x8080
-	lwz	$t3,12($key)
-	ori	$mask1b,$mask1b,0x1b1b
-	addi	$key,$key,16
-	mtctr	$acc00
-.align	4
-Lenc_compact_loop:
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	rlwinm	$acc00,$s0,`32-24`,24,31
-	xor	$s2,$s2,$t2
-	rlwinm	$acc01,$s1,`32-24`,24,31
-	xor	$s3,$s3,$t3
-	rlwinm	$acc02,$s2,`32-24`,24,31
-	rlwinm	$acc03,$s3,`32-24`,24,31
-	rlwinm	$acc04,$s1,`32-16`,24,31
-	rlwinm	$acc05,$s2,`32-16`,24,31
-	rlwinm	$acc06,$s3,`32-16`,24,31
-	rlwinm	$acc07,$s0,`32-16`,24,31
-	lbzx	$acc00,$Tbl1,$acc00
-	rlwinm	$acc08,$s2,`32-8`,24,31
-	lbzx	$acc01,$Tbl1,$acc01
-	rlwinm	$acc09,$s3,`32-8`,24,31
-	lbzx	$acc02,$Tbl1,$acc02
-	rlwinm	$acc10,$s0,`32-8`,24,31
-	lbzx	$acc03,$Tbl1,$acc03
-	rlwinm	$acc11,$s1,`32-8`,24,31
-	lbzx	$acc04,$Tbl1,$acc04
-	rlwinm	$acc12,$s3,`0`,24,31
-	lbzx	$acc05,$Tbl1,$acc05
-	rlwinm	$acc13,$s0,`0`,24,31
-	lbzx	$acc06,$Tbl1,$acc06
-	rlwinm	$acc14,$s1,`0`,24,31
-	lbzx	$acc07,$Tbl1,$acc07
-	rlwinm	$acc15,$s2,`0`,24,31
-	lbzx	$acc08,$Tbl1,$acc08
-	rlwinm	$s0,$acc00,24,0,7
-	lbzx	$acc09,$Tbl1,$acc09
-	rlwinm	$s1,$acc01,24,0,7
-	lbzx	$acc10,$Tbl1,$acc10
-	rlwinm	$s2,$acc02,24,0,7
-	lbzx	$acc11,$Tbl1,$acc11
-	rlwinm	$s3,$acc03,24,0,7
-	lbzx	$acc12,$Tbl1,$acc12
-	rlwimi	$s0,$acc04,16,8,15
-	lbzx	$acc13,$Tbl1,$acc13
-	rlwimi	$s1,$acc05,16,8,15
-	lbzx	$acc14,$Tbl1,$acc14
-	rlwimi	$s2,$acc06,16,8,15
-	lbzx	$acc15,$Tbl1,$acc15
-	rlwimi	$s3,$acc07,16,8,15
-	rlwimi	$s0,$acc08,8,16,23
-	rlwimi	$s1,$acc09,8,16,23
-	rlwimi	$s2,$acc10,8,16,23
-	rlwimi	$s3,$acc11,8,16,23
-	lwz	$t0,0($key)
-	or	$s0,$s0,$acc12
-	lwz	$t1,4($key)
-	or	$s1,$s1,$acc13
-	lwz	$t2,8($key)
-	or	$s2,$s2,$acc14
-	lwz	$t3,12($key)
-	or	$s3,$s3,$acc15
-
-	addi	$key,$key,16
-	bdz	Lenc_compact_done
-
-	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
-	and	$acc01,$s1,$mask80
-	and	$acc02,$s2,$mask80
-	and	$acc03,$s3,$mask80
-	srwi	$acc04,$acc00,7		# r1>>7
-	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
-	srwi	$acc05,$acc01,7
-	andc	$acc09,$s1,$mask80
-	srwi	$acc06,$acc02,7
-	andc	$acc10,$s2,$mask80
-	srwi	$acc07,$acc03,7
-	andc	$acc11,$s3,$mask80
-	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
-	sub	$acc01,$acc01,$acc05
-	sub	$acc02,$acc02,$acc06
-	sub	$acc03,$acc03,$acc07
-	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
-	add	$acc09,$acc09,$acc09
-	add	$acc10,$acc10,$acc10
-	add	$acc11,$acc11,$acc11
-	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc01,$acc01,$mask1b
-	and	$acc02,$acc02,$mask1b
-	and	$acc03,$acc03,$mask1b
-	xor	$acc00,$acc00,$acc08	# r2
-	xor	$acc01,$acc01,$acc09
-	 rotlwi	$acc12,$s0,16		# ROTATE(r0,16)
-	xor	$acc02,$acc02,$acc10
-	 rotlwi	$acc13,$s1,16
-	xor	$acc03,$acc03,$acc11
-	 rotlwi	$acc14,$s2,16
-
-	xor	$s0,$s0,$acc00		# r0^r2
-	rotlwi	$acc15,$s3,16
-	xor	$s1,$s1,$acc01
-	rotrwi	$s0,$s0,24		# ROTATE(r2^r0,24)
-	xor	$s2,$s2,$acc02
-	rotrwi	$s1,$s1,24
-	xor	$s3,$s3,$acc03
-	rotrwi	$s2,$s2,24
-	xor	$s0,$s0,$acc00		# ROTATE(r2^r0,24)^r2
-	rotrwi	$s3,$s3,24
-	xor	$s1,$s1,$acc01
-	xor	$s2,$s2,$acc02
-	xor	$s3,$s3,$acc03
-	rotlwi	$acc08,$acc12,8		# ROTATE(r0,24)
-	xor	$s0,$s0,$acc12		#
-	rotlwi	$acc09,$acc13,8
-	xor	$s1,$s1,$acc13
-	rotlwi	$acc10,$acc14,8
-	xor	$s2,$s2,$acc14
-	rotlwi	$acc11,$acc15,8
-	xor	$s3,$s3,$acc15
-	xor	$s0,$s0,$acc08		#
-	xor	$s1,$s1,$acc09
-	xor	$s2,$s2,$acc10
-	xor	$s3,$s3,$acc11
-
-	b	Lenc_compact_loop
-.align	4
-Lenc_compact_done:
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-
-.globl	.AES_decrypt
-.align	7
-.AES_decrypt:
-	$STU	$sp,-$FRAME($sp)
-	mflr	r0
-
-	$PUSH	$toc,`$FRAME-$SIZE_T*20`($sp)
-	$PUSH	r13,`$FRAME-$SIZE_T*19`($sp)
-	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
-	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
-	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
-	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
-	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
-	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
-	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
-	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
-	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
-	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
-	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
-	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
-	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
-	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
-	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
-	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
-	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
-	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
-	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
-
-	andi.	$t0,$inp,3
-	andi.	$t1,$out,3
-	or.	$t0,$t0,$t1
-	bne	Ldec_unaligned
-
-Ldec_unaligned_ok:
-	lwz	$s0,0($inp)
-	lwz	$s1,4($inp)
-	lwz	$s2,8($inp)
-	lwz	$s3,12($inp)
-	bl	LAES_Td
-	bl	Lppc_AES_decrypt_compact
-	stw	$s0,0($out)
-	stw	$s1,4($out)
-	stw	$s2,8($out)
-	stw	$s3,12($out)
-	b	Ldec_done
-
-Ldec_unaligned:
-	subfic	$t0,$inp,4096
-	subfic	$t1,$out,4096
-	andi.	$t0,$t0,4096-16
-	beq	Ldec_xpage
-	andi.	$t1,$t1,4096-16
-	bne	Ldec_unaligned_ok
-
-Ldec_xpage:
-	lbz	$acc00,0($inp)
-	lbz	$acc01,1($inp)
-	lbz	$acc02,2($inp)
-	lbz	$s0,3($inp)
-	lbz	$acc04,4($inp)
-	lbz	$acc05,5($inp)
-	lbz	$acc06,6($inp)
-	lbz	$s1,7($inp)
-	lbz	$acc08,8($inp)
-	lbz	$acc09,9($inp)
-	lbz	$acc10,10($inp)
-	insrwi	$s0,$acc00,8,0
-	lbz	$s2,11($inp)
-	insrwi	$s1,$acc04,8,0
-	lbz	$acc12,12($inp)
-	insrwi	$s0,$acc01,8,8
-	lbz	$acc13,13($inp)
-	insrwi	$s1,$acc05,8,8
-	lbz	$acc14,14($inp)
-	insrwi	$s0,$acc02,8,16
-	lbz	$s3,15($inp)
-	insrwi	$s1,$acc06,8,16
-	insrwi	$s2,$acc08,8,0
-	insrwi	$s3,$acc12,8,0
-	insrwi	$s2,$acc09,8,8
-	insrwi	$s3,$acc13,8,8
-	insrwi	$s2,$acc10,8,16
-	insrwi	$s3,$acc14,8,16
-
-	bl	LAES_Td
-	bl	Lppc_AES_decrypt_compact
-
-	extrwi	$acc00,$s0,8,0
-	extrwi	$acc01,$s0,8,8
-	stb	$acc00,0($out)
-	extrwi	$acc02,$s0,8,16
-	stb	$acc01,1($out)
-	stb	$acc02,2($out)
-	extrwi	$acc04,$s1,8,0
-	stb	$s0,3($out)
-	extrwi	$acc05,$s1,8,8
-	stb	$acc04,4($out)
-	extrwi	$acc06,$s1,8,16
-	stb	$acc05,5($out)
-	stb	$acc06,6($out)
-	extrwi	$acc08,$s2,8,0
-	stb	$s1,7($out)
-	extrwi	$acc09,$s2,8,8
-	stb	$acc08,8($out)
-	extrwi	$acc10,$s2,8,16
-	stb	$acc09,9($out)
-	stb	$acc10,10($out)
-	extrwi	$acc12,$s3,8,0
-	stb	$s2,11($out)
-	extrwi	$acc13,$s3,8,8
-	stb	$acc12,12($out)
-	extrwi	$acc14,$s3,8,16
-	stb	$acc13,13($out)
-	stb	$acc14,14($out)
-	stb	$s3,15($out)
-
-Ldec_done:
-	$POP	r0,`$FRAME+$LRSAVE`($sp)
-	$POP	$toc,`$FRAME-$SIZE_T*20`($sp)
-	$POP	r13,`$FRAME-$SIZE_T*19`($sp)
-	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
-	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
-	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
-	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
-	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
-	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
-	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
-	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
-	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
-	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
-	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
-	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
-	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
-	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
-	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
-	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
-	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
-	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
-	mtlr	r0
-	addi	$sp,$sp,$FRAME
-	blr
-	.long	0
-	.byte	0,12,4,1,0x80,18,3,0
-	.long	0
-
-.align	5
-Lppc_AES_decrypt:
-	lwz	$acc00,240($key)
-	addi	$Tbl1,$Tbl0,3
-	lwz	$t0,0($key)
-	addi	$Tbl2,$Tbl0,2
-	lwz	$t1,4($key)
-	addi	$Tbl3,$Tbl0,1
-	lwz	$t2,8($key)
-	addi	$acc00,$acc00,-1
-	lwz	$t3,12($key)
-	addi	$key,$key,16
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	mtctr	$acc00
-.align	4
-Ldec_loop:
-	rlwinm	$acc00,$s0,`32-24+3`,21,28
-	rlwinm	$acc01,$s1,`32-24+3`,21,28
-	rlwinm	$acc02,$s2,`32-24+3`,21,28
-	rlwinm	$acc03,$s3,`32-24+3`,21,28
-	lwz	$t0,0($key)
-	rlwinm	$acc04,$s3,`32-16+3`,21,28
-	lwz	$t1,4($key)
-	rlwinm	$acc05,$s0,`32-16+3`,21,28
-	lwz	$t2,8($key)
-	rlwinm	$acc06,$s1,`32-16+3`,21,28
-	lwz	$t3,12($key)
-	rlwinm	$acc07,$s2,`32-16+3`,21,28
-	lwzx	$acc00,$Tbl0,$acc00
-	rlwinm	$acc08,$s2,`32-8+3`,21,28
-	lwzx	$acc01,$Tbl0,$acc01
-	rlwinm	$acc09,$s3,`32-8+3`,21,28
-	lwzx	$acc02,$Tbl0,$acc02
-	rlwinm	$acc10,$s0,`32-8+3`,21,28
-	lwzx	$acc03,$Tbl0,$acc03
-	rlwinm	$acc11,$s1,`32-8+3`,21,28
-	lwzx	$acc04,$Tbl1,$acc04
-	rlwinm	$acc12,$s1,`0+3`,21,28
-	lwzx	$acc05,$Tbl1,$acc05
-	rlwinm	$acc13,$s2,`0+3`,21,28
-	lwzx	$acc06,$Tbl1,$acc06
-	rlwinm	$acc14,$s3,`0+3`,21,28
-	lwzx	$acc07,$Tbl1,$acc07
-	rlwinm	$acc15,$s0,`0+3`,21,28
-	lwzx	$acc08,$Tbl2,$acc08
-	xor	$t0,$t0,$acc00
-	lwzx	$acc09,$Tbl2,$acc09
-	xor	$t1,$t1,$acc01
-	lwzx	$acc10,$Tbl2,$acc10
-	xor	$t2,$t2,$acc02
-	lwzx	$acc11,$Tbl2,$acc11
-	xor	$t3,$t3,$acc03
-	lwzx	$acc12,$Tbl3,$acc12
-	xor	$t0,$t0,$acc04
-	lwzx	$acc13,$Tbl3,$acc13
-	xor	$t1,$t1,$acc05
-	lwzx	$acc14,$Tbl3,$acc14
-	xor	$t2,$t2,$acc06
-	lwzx	$acc15,$Tbl3,$acc15
-	xor	$t3,$t3,$acc07
-	xor	$t0,$t0,$acc08
-	xor	$t1,$t1,$acc09
-	xor	$t2,$t2,$acc10
-	xor	$t3,$t3,$acc11
-	xor	$s0,$t0,$acc12
-	xor	$s1,$t1,$acc13
-	xor	$s2,$t2,$acc14
-	xor	$s3,$t3,$acc15
-	addi	$key,$key,16
-	bdnz-	Ldec_loop
-
-	addi	$Tbl2,$Tbl0,2048
-	nop
-	lwz	$t0,0($key)
-	rlwinm	$acc00,$s0,`32-24`,24,31
-	lwz	$t1,4($key)
-	rlwinm	$acc01,$s1,`32-24`,24,31
-	lwz	$t2,8($key)
-	rlwinm	$acc02,$s2,`32-24`,24,31
-	lwz	$t3,12($key)
-	rlwinm	$acc03,$s3,`32-24`,24,31
-	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Td4
-	rlwinm	$acc04,$s3,`32-16`,24,31
-	lwz	$acc09,`2048+32`($Tbl0)
-	rlwinm	$acc05,$s0,`32-16`,24,31
-	lwz	$acc10,`2048+64`($Tbl0)
-	lbzx	$acc00,$Tbl2,$acc00
-	lwz	$acc11,`2048+96`($Tbl0)
-	lbzx	$acc01,$Tbl2,$acc01
-	lwz	$acc12,`2048+128`($Tbl0)
-	rlwinm	$acc06,$s1,`32-16`,24,31
-	lwz	$acc13,`2048+160`($Tbl0)
-	rlwinm	$acc07,$s2,`32-16`,24,31
-	lwz	$acc14,`2048+192`($Tbl0)
-	rlwinm	$acc08,$s2,`32-8`,24,31
-	lwz	$acc15,`2048+224`($Tbl0)
-	rlwinm	$acc09,$s3,`32-8`,24,31
-	lbzx	$acc02,$Tbl2,$acc02
-	rlwinm	$acc10,$s0,`32-8`,24,31
-	lbzx	$acc03,$Tbl2,$acc03
-	rlwinm	$acc11,$s1,`32-8`,24,31
-	lbzx	$acc04,$Tbl2,$acc04
-	rlwinm	$acc12,$s1,`0`,24,31
-	lbzx	$acc05,$Tbl2,$acc05
-	rlwinm	$acc13,$s2,`0`,24,31
-	lbzx	$acc06,$Tbl2,$acc06
-	rlwinm	$acc14,$s3,`0`,24,31
-	lbzx	$acc07,$Tbl2,$acc07
-	rlwinm	$acc15,$s0,`0`,24,31
-	lbzx	$acc08,$Tbl2,$acc08
-	rlwinm	$s0,$acc00,24,0,7
-	lbzx	$acc09,$Tbl2,$acc09
-	rlwinm	$s1,$acc01,24,0,7
-	lbzx	$acc10,$Tbl2,$acc10
-	rlwinm	$s2,$acc02,24,0,7
-	lbzx	$acc11,$Tbl2,$acc11
-	rlwinm	$s3,$acc03,24,0,7
-	lbzx	$acc12,$Tbl2,$acc12
-	rlwimi	$s0,$acc04,16,8,15
-	lbzx	$acc13,$Tbl2,$acc13
-	rlwimi	$s1,$acc05,16,8,15
-	lbzx	$acc14,$Tbl2,$acc14
-	rlwimi	$s2,$acc06,16,8,15
-	lbzx	$acc15,$Tbl2,$acc15
-	rlwimi	$s3,$acc07,16,8,15
-	rlwimi	$s0,$acc08,8,16,23
-	rlwimi	$s1,$acc09,8,16,23
-	rlwimi	$s2,$acc10,8,16,23
-	rlwimi	$s3,$acc11,8,16,23
-	or	$s0,$s0,$acc12
-	or	$s1,$s1,$acc13
-	or	$s2,$s2,$acc14
-	or	$s3,$s3,$acc15
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-
-.align	4
-Lppc_AES_decrypt_compact:
-	lwz	$acc00,240($key)
-	addi	$Tbl1,$Tbl0,2048
-	lwz	$t0,0($key)
-	lis	$mask80,0x8080
-	lwz	$t1,4($key)
-	lis	$mask1b,0x1b1b
-	lwz	$t2,8($key)
-	ori	$mask80,$mask80,0x8080
-	lwz	$t3,12($key)
-	ori	$mask1b,$mask1b,0x1b1b
-	addi	$key,$key,16
-___
-$code.=<<___ if ($SIZE_T==8);
-	insrdi	$mask80,$mask80,32,0
-	insrdi	$mask1b,$mask1b,32,0
-___
-$code.=<<___;
-	mtctr	$acc00
-.align	4
-Ldec_compact_loop:
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	rlwinm	$acc00,$s0,`32-24`,24,31
-	xor	$s2,$s2,$t2
-	rlwinm	$acc01,$s1,`32-24`,24,31
-	xor	$s3,$s3,$t3
-	rlwinm	$acc02,$s2,`32-24`,24,31
-	rlwinm	$acc03,$s3,`32-24`,24,31
-	rlwinm	$acc04,$s3,`32-16`,24,31
-	rlwinm	$acc05,$s0,`32-16`,24,31
-	rlwinm	$acc06,$s1,`32-16`,24,31
-	rlwinm	$acc07,$s2,`32-16`,24,31
-	lbzx	$acc00,$Tbl1,$acc00
-	rlwinm	$acc08,$s2,`32-8`,24,31
-	lbzx	$acc01,$Tbl1,$acc01
-	rlwinm	$acc09,$s3,`32-8`,24,31
-	lbzx	$acc02,$Tbl1,$acc02
-	rlwinm	$acc10,$s0,`32-8`,24,31
-	lbzx	$acc03,$Tbl1,$acc03
-	rlwinm	$acc11,$s1,`32-8`,24,31
-	lbzx	$acc04,$Tbl1,$acc04
-	rlwinm	$acc12,$s1,`0`,24,31
-	lbzx	$acc05,$Tbl1,$acc05
-	rlwinm	$acc13,$s2,`0`,24,31
-	lbzx	$acc06,$Tbl1,$acc06
-	rlwinm	$acc14,$s3,`0`,24,31
-	lbzx	$acc07,$Tbl1,$acc07
-	rlwinm	$acc15,$s0,`0`,24,31
-	lbzx	$acc08,$Tbl1,$acc08
-	rlwinm	$s0,$acc00,24,0,7
-	lbzx	$acc09,$Tbl1,$acc09
-	rlwinm	$s1,$acc01,24,0,7
-	lbzx	$acc10,$Tbl1,$acc10
-	rlwinm	$s2,$acc02,24,0,7
-	lbzx	$acc11,$Tbl1,$acc11
-	rlwinm	$s3,$acc03,24,0,7
-	lbzx	$acc12,$Tbl1,$acc12
-	rlwimi	$s0,$acc04,16,8,15
-	lbzx	$acc13,$Tbl1,$acc13
-	rlwimi	$s1,$acc05,16,8,15
-	lbzx	$acc14,$Tbl1,$acc14
-	rlwimi	$s2,$acc06,16,8,15
-	lbzx	$acc15,$Tbl1,$acc15
-	rlwimi	$s3,$acc07,16,8,15
-	rlwimi	$s0,$acc08,8,16,23
-	rlwimi	$s1,$acc09,8,16,23
-	rlwimi	$s2,$acc10,8,16,23
-	rlwimi	$s3,$acc11,8,16,23
-	lwz	$t0,0($key)
-	or	$s0,$s0,$acc12
-	lwz	$t1,4($key)
-	or	$s1,$s1,$acc13
-	lwz	$t2,8($key)
-	or	$s2,$s2,$acc14
-	lwz	$t3,12($key)
-	or	$s3,$s3,$acc15
-
-	addi	$key,$key,16
-	bdz	Ldec_compact_done
-___
-$code.=<<___ if ($SIZE_T==8);
-	# vectorized permutation improves decrypt performance by 10%
-	insrdi	$s0,$s1,32,0
-	insrdi	$s2,$s3,32,0
-
-	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
-	and	$acc02,$s2,$mask80
-	srdi	$acc04,$acc00,7		# r1>>7
-	srdi	$acc06,$acc02,7
-	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
-	andc	$acc10,$s2,$mask80
-	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
-	sub	$acc02,$acc02,$acc06
-	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
-	add	$acc10,$acc10,$acc10
-	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc02,$acc02,$mask1b
-	xor	$acc00,$acc00,$acc08	# r2
-	xor	$acc02,$acc02,$acc10
-
-	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
-	and	$acc06,$acc02,$mask80
-	srdi	$acc08,$acc04,7		# r1>>7
-	srdi	$acc10,$acc06,7
-	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
-	andc	$acc14,$acc02,$mask80
-	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
-	sub	$acc06,$acc06,$acc10
-	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
-	add	$acc14,$acc14,$acc14
-	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc06,$acc06,$mask1b
-	xor	$acc04,$acc04,$acc12	# r4
-	xor	$acc06,$acc06,$acc14
-
-	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
-	and	$acc10,$acc06,$mask80
-	srdi	$acc12,$acc08,7		# r1>>7
-	srdi	$acc14,$acc10,7
-	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
-	sub	$acc10,$acc10,$acc14
-	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
-	andc	$acc14,$acc06,$mask80
-	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
-	add	$acc14,$acc14,$acc14
-	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc10,$acc10,$mask1b
-	xor	$acc08,$acc08,$acc12	# r8
-	xor	$acc10,$acc10,$acc14
-
-	xor	$acc00,$acc00,$s0	# r2^r0
-	xor	$acc02,$acc02,$s2
-	xor	$acc04,$acc04,$s0	# r4^r0
-	xor	$acc06,$acc06,$s2
-
-	extrdi	$acc01,$acc00,32,0
-	extrdi	$acc03,$acc02,32,0
-	extrdi	$acc05,$acc04,32,0
-	extrdi	$acc07,$acc06,32,0
-	extrdi	$acc09,$acc08,32,0
-	extrdi	$acc11,$acc10,32,0
-___
-$code.=<<___ if ($SIZE_T==4);
-	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
-	and	$acc01,$s1,$mask80
-	and	$acc02,$s2,$mask80
-	and	$acc03,$s3,$mask80
-	srwi	$acc04,$acc00,7		# r1>>7
-	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
-	srwi	$acc05,$acc01,7
-	andc	$acc09,$s1,$mask80
-	srwi	$acc06,$acc02,7
-	andc	$acc10,$s2,$mask80
-	srwi	$acc07,$acc03,7
-	andc	$acc11,$s3,$mask80
-	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
-	sub	$acc01,$acc01,$acc05
-	sub	$acc02,$acc02,$acc06
-	sub	$acc03,$acc03,$acc07
-	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
-	add	$acc09,$acc09,$acc09
-	add	$acc10,$acc10,$acc10
-	add	$acc11,$acc11,$acc11
-	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc01,$acc01,$mask1b
-	and	$acc02,$acc02,$mask1b
-	and	$acc03,$acc03,$mask1b
-	xor	$acc00,$acc00,$acc08	# r2
-	xor	$acc01,$acc01,$acc09
-	xor	$acc02,$acc02,$acc10
-	xor	$acc03,$acc03,$acc11
-
-	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
-	and	$acc05,$acc01,$mask80
-	and	$acc06,$acc02,$mask80
-	and	$acc07,$acc03,$mask80
-	srwi	$acc08,$acc04,7		# r1>>7
-	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
-	srwi	$acc09,$acc05,7
-	andc	$acc13,$acc01,$mask80
-	srwi	$acc10,$acc06,7
-	andc	$acc14,$acc02,$mask80
-	srwi	$acc11,$acc07,7
-	andc	$acc15,$acc03,$mask80
-	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
-	sub	$acc05,$acc05,$acc09
-	sub	$acc06,$acc06,$acc10
-	sub	$acc07,$acc07,$acc11
-	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
-	add	$acc13,$acc13,$acc13
-	add	$acc14,$acc14,$acc14
-	add	$acc15,$acc15,$acc15
-	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc05,$acc05,$mask1b
-	and	$acc06,$acc06,$mask1b
-	and	$acc07,$acc07,$mask1b
-	xor	$acc04,$acc04,$acc12	# r4
-	xor	$acc05,$acc05,$acc13
-	xor	$acc06,$acc06,$acc14
-	xor	$acc07,$acc07,$acc15
-
-	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
-	and	$acc09,$acc05,$mask80
-	srwi	$acc12,$acc08,7		# r1>>7
-	and	$acc10,$acc06,$mask80
-	srwi	$acc13,$acc09,7
-	and	$acc11,$acc07,$mask80
-	srwi	$acc14,$acc10,7
-	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
-	srwi	$acc15,$acc11,7
-	sub	$acc09,$acc09,$acc13
-	sub	$acc10,$acc10,$acc14
-	sub	$acc11,$acc11,$acc15
-	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
-	andc	$acc13,$acc05,$mask80
-	andc	$acc14,$acc06,$mask80
-	andc	$acc15,$acc07,$mask80
-	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
-	add	$acc13,$acc13,$acc13
-	add	$acc14,$acc14,$acc14
-	add	$acc15,$acc15,$acc15
-	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
-	and	$acc09,$acc09,$mask1b
-	and	$acc10,$acc10,$mask1b
-	and	$acc11,$acc11,$mask1b
-	xor	$acc08,$acc08,$acc12	# r8
-	xor	$acc09,$acc09,$acc13
-	xor	$acc10,$acc10,$acc14
-	xor	$acc11,$acc11,$acc15
-
-	xor	$acc00,$acc00,$s0	# r2^r0
-	xor	$acc01,$acc01,$s1
-	xor	$acc02,$acc02,$s2
-	xor	$acc03,$acc03,$s3
-	xor	$acc04,$acc04,$s0	# r4^r0
-	xor	$acc05,$acc05,$s1
-	xor	$acc06,$acc06,$s2
-	xor	$acc07,$acc07,$s3
-___
-$code.=<<___;
-	rotrwi	$s0,$s0,8		# = ROTATE(r0,8)
-	rotrwi	$s1,$s1,8
-	xor	$s0,$s0,$acc00		# ^= r2^r0
-	rotrwi	$s2,$s2,8
-	xor	$s1,$s1,$acc01
-	rotrwi	$s3,$s3,8
-	xor	$s2,$s2,$acc02
-	xor	$s3,$s3,$acc03
-	xor	$acc00,$acc00,$acc08
-	xor	$acc01,$acc01,$acc09
-	xor	$acc02,$acc02,$acc10
-	xor	$acc03,$acc03,$acc11
-	xor	$s0,$s0,$acc04		# ^= r4^r0
-	rotrwi	$acc00,$acc00,24
-	xor	$s1,$s1,$acc05
-	rotrwi	$acc01,$acc01,24
-	xor	$s2,$s2,$acc06
-	rotrwi	$acc02,$acc02,24
-	xor	$s3,$s3,$acc07
-	rotrwi	$acc03,$acc03,24
-	xor	$acc04,$acc04,$acc08
-	xor	$acc05,$acc05,$acc09
-	xor	$acc06,$acc06,$acc10
-	xor	$acc07,$acc07,$acc11
-	xor	$s0,$s0,$acc08		# ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
-	rotrwi	$acc04,$acc04,16
-	xor	$s1,$s1,$acc09
-	rotrwi	$acc05,$acc05,16
-	xor	$s2,$s2,$acc10
-	rotrwi	$acc06,$acc06,16
-	xor	$s3,$s3,$acc11
-	rotrwi	$acc07,$acc07,16
-	xor	$s0,$s0,$acc00		# ^= ROTATE(r8^r2^r0,24)
-	rotrwi	$acc08,$acc08,8
-	xor	$s1,$s1,$acc01
-	rotrwi	$acc09,$acc09,8
-	xor	$s2,$s2,$acc02
-	rotrwi	$acc10,$acc10,8
-	xor	$s3,$s3,$acc03
-	rotrwi	$acc11,$acc11,8
-	xor	$s0,$s0,$acc04		# ^= ROTATE(r8^r4^r0,16)
-	xor	$s1,$s1,$acc05
-	xor	$s2,$s2,$acc06
-	xor	$s3,$s3,$acc07
-	xor	$s0,$s0,$acc08		# ^= ROTATE(r8,8)	
-	xor	$s1,$s1,$acc09	
-	xor	$s2,$s2,$acc10	
-	xor	$s3,$s3,$acc11	
-
-	b	Ldec_compact_loop
-.align	4
-Ldec_compact_done:
-	xor	$s0,$s0,$t0
-	xor	$s1,$s1,$t1
-	xor	$s2,$s2,$t2
-	xor	$s3,$s3,$t3
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,0,0
-
-.asciz	"AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
-.align	7
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;

+ 0 - 2237
drivers/builtin_openssl2/crypto/aes/asm/aes-s390x.pl

@@ -1,2237 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for s390x.
-
-# April 2007.
-#
-# Software performance improvement over gcc-generated code is ~70% and
-# in absolute terms is ~73 cycles per byte processed with 128-bit key.
-# You're likely to exclaim "why so slow?" Keep in mind that z-CPUs are
-# *strictly* in-order execution and issued instruction [in this case
-# load value from memory is critical] has to complete before execution
-# flow proceeds. S-boxes are compressed to 2KB[+256B].
-#
-# As for hardware acceleration support. It's basically a "teaser," as
-# it can and should be improved in several ways. Most notably support
-# for CBC is not utilized, nor multiple blocks are ever processed.
-# Then software key schedule can be postponed till hardware support
-# detection... Performance improvement over assembler is reportedly
-# ~2.5x, but can reach >8x [naturally on larger chunks] if proper
-# support is implemented.
-
-# May 2007.
-#
-# Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided
-# for 128-bit keys, if hardware support is detected.
-
-# Januray 2009.
-#
-# Add support for hardware AES192/256 and reschedule instructions to
-# minimize/avoid Address Generation Interlock hazard and to favour
-# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
-# almost 50% on z9. The gain is smaller on z10, because being dual-
-# issue z10 makes it improssible to eliminate the interlock condition:
-# critial path is not long enough. Yet it spends ~24 cycles per byte
-# processed with 128-bit key.
-#
-# Unlike previous version hardware support detection takes place only
-# at the moment of key schedule setup, which is denoted in key->rounds.
-# This is done, because deferred key setup can't be made MT-safe, not
-# for keys longer than 128 bits.
-#
-# Add AES_cbc_encrypt, which gives incredible performance improvement,
-# it was measured to be ~6.6x. It's less than previously mentioned 8x,
-# because software implementation was optimized.
-
-# May 2010.
-#
-# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
-# performance improvement over "generic" counter mode routine relying
-# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
-# to the fact that exact throughput value depends on current stack
-# frame alignment within 4KB page. In worst case you get ~75% of the
-# maximum, but *on average* it would be as much as ~98%. Meaning that
-# worst case is unlike, it's like hitting ravine on plateau.
-
-# November 2010.
-#
-# Adapt for -m31 build. If kernel supports what's called "highgprs"
-# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
-# instructions and achieve "64-bit" performance even in 31-bit legacy
-# application context. The feature is not specific to any particular
-# processor, as long as it's "z-CPU". Latter implies that the code
-# remains z/Architecture specific. On z990 it was measured to perform
-# 2x better than code generated by gcc 4.3.
-
-# December 2010.
-#
-# Add support for z196 "cipher message with counter" instruction.
-# Note however that it's disengaged, because it was measured to
-# perform ~12% worse than vanilla km-based code...
-
-# February 2011.
-#
-# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
-# instructions, which deliver ~70% improvement at 8KB block size over
-# vanilla km-based code, 37% - at most like 512-bytes block size.
-
-$flavour = shift;
-
-if ($flavour =~ /3[12]/) {
-	$SIZE_T=4;
-	$g="";
-} else {
-	$SIZE_T=8;
-	$g="g";
-}
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$softonly=0;	# allow hardware support
-
-$t0="%r0";	$mask="%r0";
-$t1="%r1";
-$t2="%r2";	$inp="%r2";
-$t3="%r3";	$out="%r3";	$bits="%r3";
-$key="%r4";
-$i1="%r5";
-$i2="%r6";
-$i3="%r7";
-$s0="%r8";
-$s1="%r9";
-$s2="%r10";
-$s3="%r11";
-$tbl="%r12";
-$rounds="%r13";
-$ra="%r14";
-$sp="%r15";
-
-$stdframe=16*$SIZE_T+4*8;
-
-sub _data_word()
-{ my $i;
-    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$code=<<___;
-.text
-
-.type	AES_Te,\@object
-.align	256
-AES_Te:
-___
-&_data_word(
-	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
-	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
-	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
-	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
-	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
-	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
-	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
-	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
-	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
-	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
-	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
-	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
-	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
-	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
-	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
-	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
-	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
-	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
-	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
-	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
-	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
-	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
-	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
-	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
-	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
-	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
-	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
-	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
-	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
-	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
-	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
-	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
-	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
-	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
-	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
-	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
-	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
-	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
-	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
-	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
-	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
-	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
-	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
-	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
-	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
-	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
-	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
-	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
-	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
-	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
-	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
-	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
-	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
-	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
-	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
-	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
-	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
-	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
-	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
-	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
-	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
-	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
-	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
-	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
-# Te4[256]
-.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-# rcon[]
-.long	0x01000000, 0x02000000, 0x04000000, 0x08000000
-.long	0x10000000, 0x20000000, 0x40000000, 0x80000000
-.long	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
-.align	256
-.size	AES_Te,.-AES_Te
-
-# void AES_encrypt(const unsigned char *inp, unsigned char *out,
-# 		 const AES_KEY *key) {
-.globl	AES_encrypt
-.type	AES_encrypt,\@function
-AES_encrypt:
-___
-$code.=<<___ if (!$softonly);
-	l	%r0,240($key)
-	lhi	%r1,16
-	clr	%r0,%r1
-	jl	.Lesoft
-
-	la	%r1,0($key)
-	#la	%r2,0($inp)
-	la	%r4,0($out)
-	lghi	%r3,16		# single block length
-	.long	0xb92e0042	# km %r4,%r2
-	brc	1,.-4		# can this happen?
-	br	%r14
-.align	64
-.Lesoft:
-___
-$code.=<<___;
-	stm${g}	%r3,$ra,3*$SIZE_T($sp)
-
-	llgf	$s0,0($inp)
-	llgf	$s1,4($inp)
-	llgf	$s2,8($inp)
-	llgf	$s3,12($inp)
-
-	larl	$tbl,AES_Te
-	bras	$ra,_s390x_AES_encrypt
-
-	l${g}	$out,3*$SIZE_T($sp)
-	st	$s0,0($out)
-	st	$s1,4($out)
-	st	$s2,8($out)
-	st	$s3,12($out)
-
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	br	$ra
-.size	AES_encrypt,.-AES_encrypt
-
-.type   _s390x_AES_encrypt,\@function
-.align	16
-_s390x_AES_encrypt:
-	st${g}	$ra,15*$SIZE_T($sp)
-	x	$s0,0($key)
-	x	$s1,4($key)
-	x	$s2,8($key)
-	x	$s3,12($key)
-	l	$rounds,240($key)
-	llill	$mask,`0xff<<3`
-	aghi	$rounds,-1
-	j	.Lenc_loop
-.align	16
-.Lenc_loop:
-	sllg	$t1,$s0,`0+3`
-	srlg	$t2,$s0,`8-3`
-	srlg	$t3,$s0,`16-3`
-	srl	$s0,`24-3`
-	nr	$s0,$mask
-	ngr	$t1,$mask
-	nr	$t2,$mask
-	nr	$t3,$mask
-
-	srlg	$i1,$s1,`16-3`	# i0
-	sllg	$i2,$s1,`0+3`
-	srlg	$i3,$s1,`8-3`
-	srl	$s1,`24-3`
-	nr	$i1,$mask
-	nr	$s1,$mask
-	ngr	$i2,$mask
-	nr	$i3,$mask
-
-	l	$s0,0($s0,$tbl)	# Te0[s0>>24]
-	l	$t1,1($t1,$tbl)	# Te3[s0>>0]
-	l	$t2,2($t2,$tbl) # Te2[s0>>8]
-	l	$t3,3($t3,$tbl)	# Te1[s0>>16]
-
-	x	$s0,3($i1,$tbl)	# Te1[s1>>16]
-	l	$s1,0($s1,$tbl)	# Te0[s1>>24]
-	x	$t2,1($i2,$tbl)	# Te3[s1>>0]
-	x	$t3,2($i3,$tbl)	# Te2[s1>>8]
-
-	srlg	$i1,$s2,`8-3`	# i0
-	srlg	$i2,$s2,`16-3`	# i1
-	nr	$i1,$mask
-	nr	$i2,$mask
-	sllg	$i3,$s2,`0+3`
-	srl	$s2,`24-3`
-	nr	$s2,$mask
-	ngr	$i3,$mask
-
-	xr	$s1,$t1
-	srlg	$ra,$s3,`8-3`	# i1
-	sllg	$t1,$s3,`0+3`	# i0
-	nr	$ra,$mask
-	la	$key,16($key)
-	ngr	$t1,$mask
-
-	x	$s0,2($i1,$tbl)	# Te2[s2>>8]
-	x	$s1,3($i2,$tbl)	# Te1[s2>>16]
-	l	$s2,0($s2,$tbl)	# Te0[s2>>24]
-	x	$t3,1($i3,$tbl)	# Te3[s2>>0]
-
-	srlg	$i3,$s3,`16-3`	# i2
-	xr	$s2,$t2
-	srl	$s3,`24-3`
-	nr	$i3,$mask
-	nr	$s3,$mask
-
-	x	$s0,0($key)
-	x	$s1,4($key)
-	x	$s2,8($key)
-	x	$t3,12($key)
-
-	x	$s0,1($t1,$tbl)	# Te3[s3>>0]
-	x	$s1,2($ra,$tbl)	# Te2[s3>>8]
-	x	$s2,3($i3,$tbl)	# Te1[s3>>16]
-	l	$s3,0($s3,$tbl)	# Te0[s3>>24]
-	xr	$s3,$t3
-
-	brct	$rounds,.Lenc_loop
-	.align	16
-
-	sllg	$t1,$s0,`0+3`
-	srlg	$t2,$s0,`8-3`
-	ngr	$t1,$mask
-	srlg	$t3,$s0,`16-3`
-	srl	$s0,`24-3`
-	nr	$s0,$mask
-	nr	$t2,$mask
-	nr	$t3,$mask
-
-	srlg	$i1,$s1,`16-3`	# i0
-	sllg	$i2,$s1,`0+3`
-	ngr	$i2,$mask
-	srlg	$i3,$s1,`8-3`
-	srl	$s1,`24-3`
-	nr	$i1,$mask
-	nr	$s1,$mask
-	nr	$i3,$mask
-
-	llgc	$s0,2($s0,$tbl)	# Te4[s0>>24]
-	llgc	$t1,2($t1,$tbl)	# Te4[s0>>0]
-	sll	$s0,24
-	llgc	$t2,2($t2,$tbl)	# Te4[s0>>8]
-	llgc	$t3,2($t3,$tbl)	# Te4[s0>>16]
-	sll	$t2,8
-	sll	$t3,16
-
-	llgc	$i1,2($i1,$tbl)	# Te4[s1>>16]
-	llgc	$s1,2($s1,$tbl)	# Te4[s1>>24]
-	llgc	$i2,2($i2,$tbl)	# Te4[s1>>0]
-	llgc	$i3,2($i3,$tbl)	# Te4[s1>>8]
-	sll	$i1,16
-	sll	$s1,24
-	sll	$i3,8
-	or	$s0,$i1
-	or	$s1,$t1
-	or	$t2,$i2
-	or	$t3,$i3
-	
-	srlg	$i1,$s2,`8-3`	# i0
-	srlg	$i2,$s2,`16-3`	# i1
-	nr	$i1,$mask
-	nr	$i2,$mask
-	sllg	$i3,$s2,`0+3`
-	srl	$s2,`24-3`
-	ngr	$i3,$mask
-	nr	$s2,$mask
-
-	sllg	$t1,$s3,`0+3`	# i0
-	srlg	$ra,$s3,`8-3`	# i1
-	ngr	$t1,$mask
-
-	llgc	$i1,2($i1,$tbl)	# Te4[s2>>8]
-	llgc	$i2,2($i2,$tbl)	# Te4[s2>>16]
-	sll	$i1,8
-	llgc	$s2,2($s2,$tbl)	# Te4[s2>>24]
-	llgc	$i3,2($i3,$tbl)	# Te4[s2>>0]
-	sll	$i2,16
-	nr	$ra,$mask
-	sll	$s2,24
-	or	$s0,$i1
-	or	$s1,$i2
-	or	$s2,$t2
-	or	$t3,$i3
-
-	srlg	$i3,$s3,`16-3`	# i2
-	srl	$s3,`24-3`
-	nr	$i3,$mask
-	nr	$s3,$mask
-
-	l	$t0,16($key)
-	l	$t2,20($key)
-
-	llgc	$i1,2($t1,$tbl)	# Te4[s3>>0]
-	llgc	$i2,2($ra,$tbl)	# Te4[s3>>8]
-	llgc	$i3,2($i3,$tbl)	# Te4[s3>>16]
-	llgc	$s3,2($s3,$tbl)	# Te4[s3>>24]
-	sll	$i2,8
-	sll	$i3,16
-	sll	$s3,24
-	or	$s0,$i1
-	or	$s1,$i2
-	or	$s2,$i3
-	or	$s3,$t3
-
-	l${g}	$ra,15*$SIZE_T($sp)
-	xr	$s0,$t0
-	xr	$s1,$t2
-	x	$s2,24($key)
-	x	$s3,28($key)
-
-	br	$ra	
-.size	_s390x_AES_encrypt,.-_s390x_AES_encrypt
-___
-
-$code.=<<___;
-.type	AES_Td,\@object
-.align	256
-AES_Td:
-___
-&_data_word(
-	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
-	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
-	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
-	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
-	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
-	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
-	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
-	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
-	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
-	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
-	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
-	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
-	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
-	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
-	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
-	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
-	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
-	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
-	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
-	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
-	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
-	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
-	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
-	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
-	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
-	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
-	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
-	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
-	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
-	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
-	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
-	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
-	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
-	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
-	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
-	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
-	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
-	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
-	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
-	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
-	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
-	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
-	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
-	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
-	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
-	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
-	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
-	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
-	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
-	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
-	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
-	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
-	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
-	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
-	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
-	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
-	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
-	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
-	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
-	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
-	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
-	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
-	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
-	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
-# Td4[256]
-.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size	AES_Td,.-AES_Td
-
-# void AES_decrypt(const unsigned char *inp, unsigned char *out,
-# 		 const AES_KEY *key) {
-.globl	AES_decrypt
-.type	AES_decrypt,\@function
-AES_decrypt:
-___
-$code.=<<___ if (!$softonly);
-	l	%r0,240($key)
-	lhi	%r1,16
-	clr	%r0,%r1
-	jl	.Ldsoft
-
-	la	%r1,0($key)
-	#la	%r2,0($inp)
-	la	%r4,0($out)
-	lghi	%r3,16		# single block length
-	.long	0xb92e0042	# km %r4,%r2
-	brc	1,.-4		# can this happen?
-	br	%r14
-.align	64
-.Ldsoft:
-___
-$code.=<<___;
-	stm${g}	%r3,$ra,3*$SIZE_T($sp)
-
-	llgf	$s0,0($inp)
-	llgf	$s1,4($inp)
-	llgf	$s2,8($inp)
-	llgf	$s3,12($inp)
-
-	larl	$tbl,AES_Td
-	bras	$ra,_s390x_AES_decrypt
-
-	l${g}	$out,3*$SIZE_T($sp)
-	st	$s0,0($out)
-	st	$s1,4($out)
-	st	$s2,8($out)
-	st	$s3,12($out)
-
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	br	$ra
-.size	AES_decrypt,.-AES_decrypt
-
-.type   _s390x_AES_decrypt,\@function
-.align	16
-_s390x_AES_decrypt:
-	st${g}	$ra,15*$SIZE_T($sp)
-	x	$s0,0($key)
-	x	$s1,4($key)
-	x	$s2,8($key)
-	x	$s3,12($key)
-	l	$rounds,240($key)
-	llill	$mask,`0xff<<3`
-	aghi	$rounds,-1
-	j	.Ldec_loop
-.align	16
-.Ldec_loop:
-	srlg	$t1,$s0,`16-3`
-	srlg	$t2,$s0,`8-3`
-	sllg	$t3,$s0,`0+3`
-	srl	$s0,`24-3`
-	nr	$s0,$mask
-	nr	$t1,$mask
-	nr	$t2,$mask
-	ngr	$t3,$mask
-
-	sllg	$i1,$s1,`0+3`	# i0
-	srlg	$i2,$s1,`16-3`
-	srlg	$i3,$s1,`8-3`
-	srl	$s1,`24-3`
-	ngr	$i1,$mask
-	nr	$s1,$mask
-	nr	$i2,$mask
-	nr	$i3,$mask
-
-	l	$s0,0($s0,$tbl)	# Td0[s0>>24]
-	l	$t1,3($t1,$tbl)	# Td1[s0>>16]
-	l	$t2,2($t2,$tbl)	# Td2[s0>>8]
-	l	$t3,1($t3,$tbl)	# Td3[s0>>0]
-
-	x	$s0,1($i1,$tbl)	# Td3[s1>>0]
-	l	$s1,0($s1,$tbl)	# Td0[s1>>24]
-	x	$t2,3($i2,$tbl)	# Td1[s1>>16]
-	x	$t3,2($i3,$tbl)	# Td2[s1>>8]
-
-	srlg	$i1,$s2,`8-3`	# i0
-	sllg	$i2,$s2,`0+3`	# i1
-	srlg	$i3,$s2,`16-3`
-	srl	$s2,`24-3`
-	nr	$i1,$mask
-	ngr	$i2,$mask
-	nr	$s2,$mask
-	nr	$i3,$mask
-
-	xr	$s1,$t1
-	srlg	$ra,$s3,`8-3`	# i1
-	srlg	$t1,$s3,`16-3`	# i0
-	nr	$ra,$mask
-	la	$key,16($key)
-	nr	$t1,$mask
-
-	x	$s0,2($i1,$tbl)	# Td2[s2>>8]
-	x	$s1,1($i2,$tbl)	# Td3[s2>>0]
-	l	$s2,0($s2,$tbl)	# Td0[s2>>24]
-	x	$t3,3($i3,$tbl)	# Td1[s2>>16]
-
-	sllg	$i3,$s3,`0+3`	# i2
-	srl	$s3,`24-3`
-	ngr	$i3,$mask
-	nr	$s3,$mask
-
-	xr	$s2,$t2
-	x	$s0,0($key)
-	x	$s1,4($key)
-	x	$s2,8($key)
-	x	$t3,12($key)
-
-	x	$s0,3($t1,$tbl)	# Td1[s3>>16]
-	x	$s1,2($ra,$tbl)	# Td2[s3>>8]
-	x	$s2,1($i3,$tbl)	# Td3[s3>>0]
-	l	$s3,0($s3,$tbl)	# Td0[s3>>24]
-	xr	$s3,$t3
-
-	brct	$rounds,.Ldec_loop
-	.align	16
-
-	l	$t1,`2048+0`($tbl)	# prefetch Td4
-	l	$t2,`2048+64`($tbl)
-	l	$t3,`2048+128`($tbl)
-	l	$i1,`2048+192`($tbl)
-	llill	$mask,0xff
-
-	srlg	$i3,$s0,24	# i0
-	srlg	$t1,$s0,16
-	srlg	$t2,$s0,8
-	nr	$s0,$mask	# i3
-	nr	$t1,$mask
-
-	srlg	$i1,$s1,24
-	nr	$t2,$mask
-	srlg	$i2,$s1,16
-	srlg	$ra,$s1,8
-	nr	$s1,$mask	# i0
-	nr	$i2,$mask
-	nr	$ra,$mask
-
-	llgc	$i3,2048($i3,$tbl)	# Td4[s0>>24]
-	llgc	$t1,2048($t1,$tbl)	# Td4[s0>>16]
-	llgc	$t2,2048($t2,$tbl)	# Td4[s0>>8]
-	sll	$t1,16
-	llgc	$t3,2048($s0,$tbl)	# Td4[s0>>0]
-	sllg	$s0,$i3,24
-	sll	$t2,8
-
-	llgc	$s1,2048($s1,$tbl)	# Td4[s1>>0]
-	llgc	$i1,2048($i1,$tbl)	# Td4[s1>>24]
-	llgc	$i2,2048($i2,$tbl)	# Td4[s1>>16]
-	sll	$i1,24
-	llgc	$i3,2048($ra,$tbl)	# Td4[s1>>8]
-	sll	$i2,16
-	sll	$i3,8
-	or	$s0,$s1
-	or	$t1,$i1
-	or	$t2,$i2
-	or	$t3,$i3
-
-	srlg	$i1,$s2,8	# i0
-	srlg	$i2,$s2,24
-	srlg	$i3,$s2,16
-	nr	$s2,$mask	# i1
-	nr	$i1,$mask
-	nr	$i3,$mask
-	llgc	$i1,2048($i1,$tbl)	# Td4[s2>>8]
-	llgc	$s1,2048($s2,$tbl)	# Td4[s2>>0]
-	llgc	$i2,2048($i2,$tbl)	# Td4[s2>>24]
-	llgc	$i3,2048($i3,$tbl)	# Td4[s2>>16]
-	sll	$i1,8
-	sll	$i2,24
-	or	$s0,$i1
-	sll	$i3,16
-	or	$t2,$i2
-	or	$t3,$i3
-
-	srlg	$i1,$s3,16	# i0
-	srlg	$i2,$s3,8	# i1
-	srlg	$i3,$s3,24
-	nr	$s3,$mask	# i2
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-	l${g}	$ra,15*$SIZE_T($sp)
-	or	$s1,$t1
-	l	$t0,16($key)
-	l	$t1,20($key)
-
-	llgc	$i1,2048($i1,$tbl)	# Td4[s3>>16]
-	llgc	$i2,2048($i2,$tbl)	# Td4[s3>>8]
-	sll	$i1,16
-	llgc	$s2,2048($s3,$tbl)	# Td4[s3>>0]
-	llgc	$s3,2048($i3,$tbl)	# Td4[s3>>24]
-	sll	$i2,8
-	sll	$s3,24
-	or	$s0,$i1
-	or	$s1,$i2
-	or	$s2,$t2
-	or	$s3,$t3
-
-	xr	$s0,$t0
-	xr	$s1,$t1
-	x	$s2,24($key)
-	x	$s3,28($key)
-
-	br	$ra	
-.size	_s390x_AES_decrypt,.-_s390x_AES_decrypt
-___
-
-$code.=<<___;
-# void AES_set_encrypt_key(const unsigned char *in, int bits,
-# 		 AES_KEY *key) {
-.globl	private_AES_set_encrypt_key
-.type	private_AES_set_encrypt_key,\@function
-.align	16
-private_AES_set_encrypt_key:
-_s390x_AES_set_encrypt_key:
-	lghi	$t0,0
-	cl${g}r	$inp,$t0
-	je	.Lminus1
-	cl${g}r	$key,$t0
-	je	.Lminus1
-
-	lghi	$t0,128
-	clr	$bits,$t0
-	je	.Lproceed
-	lghi	$t0,192
-	clr	$bits,$t0
-	je	.Lproceed
-	lghi	$t0,256
-	clr	$bits,$t0
-	je	.Lproceed
-	lghi	%r2,-2
-	br	%r14
-
-.align	16
-.Lproceed:
-___
-$code.=<<___ if (!$softonly);
-	# convert bits to km code, [128,192,256]->[18,19,20]
-	lhi	%r5,-128
-	lhi	%r0,18
-	ar	%r5,$bits
-	srl	%r5,6
-	ar	%r5,%r0
-
-	larl	%r1,OPENSSL_s390xcap_P
-	lg	%r0,0(%r1)
-	tmhl	%r0,0x4000	# check for message-security assist
-	jz	.Lekey_internal
-
-	lghi	%r0,0		# query capability vector
-	la	%r1,16($sp)
-	.long	0xb92f0042	# kmc %r4,%r2
-
-	llihh	%r1,0x8000
-	srlg	%r1,%r1,0(%r5)
-	ng	%r1,16($sp)
-	jz	.Lekey_internal
-
-	lmg	%r0,%r1,0($inp)	# just copy 128 bits...
-	stmg	%r0,%r1,0($key)
-	lhi	%r0,192
-	cr	$bits,%r0
-	jl	1f
-	lg	%r1,16($inp)
-	stg	%r1,16($key)
-	je	1f
-	lg	%r1,24($inp)
-	stg	%r1,24($key)
-1:	st	$bits,236($key)	# save bits [for debugging purposes]
-	lgr	$t0,%r5
-	st	%r5,240($key)	# save km code
-	lghi	%r2,0
-	br	%r14
-___
-$code.=<<___;
-.align	16
-.Lekey_internal:
-	stm${g}	%r4,%r13,4*$SIZE_T($sp)	# all non-volatile regs and $key
-
-	larl	$tbl,AES_Te+2048
-
-	llgf	$s0,0($inp)
-	llgf	$s1,4($inp)
-	llgf	$s2,8($inp)
-	llgf	$s3,12($inp)
-	st	$s0,0($key)
-	st	$s1,4($key)
-	st	$s2,8($key)
-	st	$s3,12($key)
-	lghi	$t0,128
-	cr	$bits,$t0
-	jne	.Lnot128
-
-	llill	$mask,0xff
-	lghi	$t3,0			# i=0
-	lghi	$rounds,10
-	st	$rounds,240($key)
-
-	llgfr	$t2,$s3			# temp=rk[3]
-	srlg	$i1,$s3,8
-	srlg	$i2,$s3,16
-	srlg	$i3,$s3,24
-	nr	$t2,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-.align	16
-.L128_loop:
-	la	$t2,0($t2,$tbl)
-	la	$i1,0($i1,$tbl)
-	la	$i2,0($i2,$tbl)
-	la	$i3,0($i3,$tbl)
-	icm	$t2,2,0($t2)		# Te4[rk[3]>>0]<<8
-	icm	$t2,4,0($i1)		# Te4[rk[3]>>8]<<16
-	icm	$t2,8,0($i2)		# Te4[rk[3]>>16]<<24
-	icm	$t2,1,0($i3)		# Te4[rk[3]>>24]
-	x	$t2,256($t3,$tbl)	# rcon[i]
-	xr	$s0,$t2			# rk[4]=rk[0]^...
-	xr	$s1,$s0			# rk[5]=rk[1]^rk[4]
-	xr	$s2,$s1			# rk[6]=rk[2]^rk[5]
-	xr	$s3,$s2			# rk[7]=rk[3]^rk[6]
-
-	llgfr	$t2,$s3			# temp=rk[3]
-	srlg	$i1,$s3,8
-	srlg	$i2,$s3,16
-	nr	$t2,$mask
-	nr	$i1,$mask
-	srlg	$i3,$s3,24
-	nr	$i2,$mask
-
-	st	$s0,16($key)
-	st	$s1,20($key)
-	st	$s2,24($key)
-	st	$s3,28($key)
-	la	$key,16($key)		# key+=4
-	la	$t3,4($t3)		# i++
-	brct	$rounds,.L128_loop
-	lghi	$t0,10
-	lghi	%r2,0
-	lm${g}	%r4,%r13,4*$SIZE_T($sp)
-	br	$ra
-
-.align	16
-.Lnot128:
-	llgf	$t0,16($inp)
-	llgf	$t1,20($inp)
-	st	$t0,16($key)
-	st	$t1,20($key)
-	lghi	$t0,192
-	cr	$bits,$t0
-	jne	.Lnot192
-
-	llill	$mask,0xff
-	lghi	$t3,0			# i=0
-	lghi	$rounds,12
-	st	$rounds,240($key)
-	lghi	$rounds,8
-
-	srlg	$i1,$t1,8
-	srlg	$i2,$t1,16
-	srlg	$i3,$t1,24
-	nr	$t1,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-.align	16
-.L192_loop:
-	la	$t1,0($t1,$tbl)
-	la	$i1,0($i1,$tbl)
-	la	$i2,0($i2,$tbl)
-	la	$i3,0($i3,$tbl)
-	icm	$t1,2,0($t1)		# Te4[rk[5]>>0]<<8
-	icm	$t1,4,0($i1)		# Te4[rk[5]>>8]<<16
-	icm	$t1,8,0($i2)		# Te4[rk[5]>>16]<<24
-	icm	$t1,1,0($i3)		# Te4[rk[5]>>24]
-	x	$t1,256($t3,$tbl)	# rcon[i]
-	xr	$s0,$t1			# rk[6]=rk[0]^...
-	xr	$s1,$s0			# rk[7]=rk[1]^rk[6]
-	xr	$s2,$s1			# rk[8]=rk[2]^rk[7]
-	xr	$s3,$s2			# rk[9]=rk[3]^rk[8]
-
-	st	$s0,24($key)
-	st	$s1,28($key)
-	st	$s2,32($key)
-	st	$s3,36($key)
-	brct	$rounds,.L192_continue
-	lghi	$t0,12
-	lghi	%r2,0
-	lm${g}	%r4,%r13,4*$SIZE_T($sp)
-	br	$ra
-
-.align	16
-.L192_continue:
-	lgr	$t1,$s3
-	x	$t1,16($key)		# rk[10]=rk[4]^rk[9]
-	st	$t1,40($key)
-	x	$t1,20($key)		# rk[11]=rk[5]^rk[10]
-	st	$t1,44($key)
-
-	srlg	$i1,$t1,8
-	srlg	$i2,$t1,16
-	srlg	$i3,$t1,24
-	nr	$t1,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-	la	$key,24($key)		# key+=6
-	la	$t3,4($t3)		# i++
-	j	.L192_loop
-
-.align	16
-.Lnot192:
-	llgf	$t0,24($inp)
-	llgf	$t1,28($inp)
-	st	$t0,24($key)
-	st	$t1,28($key)
-	llill	$mask,0xff
-	lghi	$t3,0			# i=0
-	lghi	$rounds,14
-	st	$rounds,240($key)
-	lghi	$rounds,7
-
-	srlg	$i1,$t1,8
-	srlg	$i2,$t1,16
-	srlg	$i3,$t1,24
-	nr	$t1,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-.align	16
-.L256_loop:
-	la	$t1,0($t1,$tbl)
-	la	$i1,0($i1,$tbl)
-	la	$i2,0($i2,$tbl)
-	la	$i3,0($i3,$tbl)
-	icm	$t1,2,0($t1)		# Te4[rk[7]>>0]<<8
-	icm	$t1,4,0($i1)		# Te4[rk[7]>>8]<<16
-	icm	$t1,8,0($i2)		# Te4[rk[7]>>16]<<24
-	icm	$t1,1,0($i3)		# Te4[rk[7]>>24]
-	x	$t1,256($t3,$tbl)	# rcon[i]
-	xr	$s0,$t1			# rk[8]=rk[0]^...
-	xr	$s1,$s0			# rk[9]=rk[1]^rk[8]
-	xr	$s2,$s1			# rk[10]=rk[2]^rk[9]
-	xr	$s3,$s2			# rk[11]=rk[3]^rk[10]
-	st	$s0,32($key)
-	st	$s1,36($key)
-	st	$s2,40($key)
-	st	$s3,44($key)
-	brct	$rounds,.L256_continue
-	lghi	$t0,14
-	lghi	%r2,0
-	lm${g}	%r4,%r13,4*$SIZE_T($sp)
-	br	$ra
-
-.align	16
-.L256_continue:
-	lgr	$t1,$s3			# temp=rk[11]
-	srlg	$i1,$s3,8
-	srlg	$i2,$s3,16
-	srlg	$i3,$s3,24
-	nr	$t1,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-	la	$t1,0($t1,$tbl)
-	la	$i1,0($i1,$tbl)
-	la	$i2,0($i2,$tbl)
-	la	$i3,0($i3,$tbl)
-	llgc	$t1,0($t1)		# Te4[rk[11]>>0]
-	icm	$t1,2,0($i1)		# Te4[rk[11]>>8]<<8
-	icm	$t1,4,0($i2)		# Te4[rk[11]>>16]<<16
-	icm	$t1,8,0($i3)		# Te4[rk[11]>>24]<<24
-	x	$t1,16($key)		# rk[12]=rk[4]^...
-	st	$t1,48($key)
-	x	$t1,20($key)		# rk[13]=rk[5]^rk[12]
-	st	$t1,52($key)
-	x	$t1,24($key)		# rk[14]=rk[6]^rk[13]
-	st	$t1,56($key)
-	x	$t1,28($key)		# rk[15]=rk[7]^rk[14]
-	st	$t1,60($key)
-
-	srlg	$i1,$t1,8
-	srlg	$i2,$t1,16
-	srlg	$i3,$t1,24
-	nr	$t1,$mask
-	nr	$i1,$mask
-	nr	$i2,$mask
-
-	la	$key,32($key)		# key+=8
-	la	$t3,4($t3)		# i++
-	j	.L256_loop
-
-.Lminus1:
-	lghi	%r2,-1
-	br	$ra
-.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-# void AES_set_decrypt_key(const unsigned char *in, int bits,
-# 		 AES_KEY *key) {
-.globl	private_AES_set_decrypt_key
-.type	private_AES_set_decrypt_key,\@function
-.align	16
-private_AES_set_decrypt_key:
-	#st${g}	$key,4*$SIZE_T($sp)	# I rely on AES_set_encrypt_key to
-	st${g}	$ra,14*$SIZE_T($sp)	# save non-volatile registers and $key!
-	bras	$ra,_s390x_AES_set_encrypt_key
-	#l${g}	$key,4*$SIZE_T($sp)
-	l${g}	$ra,14*$SIZE_T($sp)
-	ltgr	%r2,%r2
-	bnzr	$ra
-___
-$code.=<<___ if (!$softonly);
-	#l	$t0,240($key)
-	lhi	$t1,16
-	cr	$t0,$t1
-	jl	.Lgo
-	oill	$t0,0x80	# set "decrypt" bit
-	st	$t0,240($key)
-	br	$ra
-___
-$code.=<<___;
-.align	16
-.Lgo:	lgr	$rounds,$t0	#llgf	$rounds,240($key)
-	la	$i1,0($key)
-	sllg	$i2,$rounds,4
-	la	$i2,0($i2,$key)
-	srl	$rounds,1
-	lghi	$t1,-16
-
-.align	16
-.Linv:	lmg	$s0,$s1,0($i1)
-	lmg	$s2,$s3,0($i2)
-	stmg	$s0,$s1,0($i2)
-	stmg	$s2,$s3,0($i1)
-	la	$i1,16($i1)
-	la	$i2,0($t1,$i2)
-	brct	$rounds,.Linv
-___
-$mask80=$i1;
-$mask1b=$i2;
-$maskfe=$i3;
-$code.=<<___;
-	llgf	$rounds,240($key)
-	aghi	$rounds,-1
-	sll	$rounds,2	# (rounds-1)*4
-	llilh	$mask80,0x8080
-	llilh	$mask1b,0x1b1b
-	llilh	$maskfe,0xfefe
-	oill	$mask80,0x8080
-	oill	$mask1b,0x1b1b
-	oill	$maskfe,0xfefe
-
-.align	16
-.Lmix:	l	$s0,16($key)	# tp1
-	lr	$s1,$s0
-	ngr	$s1,$mask80
-	srlg	$t1,$s1,7
-	slr	$s1,$t1
-	nr	$s1,$mask1b
-	sllg	$t1,$s0,1
-	nr	$t1,$maskfe
-	xr	$s1,$t1		# tp2
-
-	lr	$s2,$s1
-	ngr	$s2,$mask80
-	srlg	$t1,$s2,7
-	slr	$s2,$t1
-	nr	$s2,$mask1b
-	sllg	$t1,$s1,1
-	nr	$t1,$maskfe
-	xr	$s2,$t1		# tp4
-
-	lr	$s3,$s2
-	ngr	$s3,$mask80
-	srlg	$t1,$s3,7
-	slr	$s3,$t1
-	nr	$s3,$mask1b
-	sllg	$t1,$s2,1
-	nr	$t1,$maskfe
-	xr	$s3,$t1		# tp8
-
-	xr	$s1,$s0		# tp2^tp1
-	xr	$s2,$s0		# tp4^tp1
-	rll	$s0,$s0,24	# = ROTATE(tp1,8)
-	xr	$s2,$s3		# ^=tp8
-	xr	$s0,$s1		# ^=tp2^tp1
-	xr	$s1,$s3		# tp2^tp1^tp8
-	xr	$s0,$s2		# ^=tp4^tp1^tp8
-	rll	$s1,$s1,8
-	rll	$s2,$s2,16
-	xr	$s0,$s1		# ^= ROTATE(tp8^tp2^tp1,24)
-	rll	$s3,$s3,24
-	xr	$s0,$s2    	# ^= ROTATE(tp8^tp4^tp1,16)
-	xr	$s0,$s3		# ^= ROTATE(tp8,8)
-
-	st	$s0,16($key)
-	la	$key,4($key)
-	brct	$rounds,.Lmix
-
-	lm${g}	%r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
-	lghi	%r2,0
-	br	$ra
-.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-___
-
-########################################################################
-# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
-#                     size_t length, const AES_KEY *key,
-#                     unsigned char *ivec, const int enc)
-{
-my $inp="%r2";
-my $out="%r4";	# length and out are swapped
-my $len="%r3";
-my $key="%r5";
-my $ivp="%r6";
-
-$code.=<<___;
-.globl	AES_cbc_encrypt
-.type	AES_cbc_encrypt,\@function
-.align	16
-AES_cbc_encrypt:
-	xgr	%r3,%r4		# flip %r3 and %r4, out and len
-	xgr	%r4,%r3
-	xgr	%r3,%r4
-___
-$code.=<<___ if (!$softonly);
-	lhi	%r0,16
-	cl	%r0,240($key)
-	jh	.Lcbc_software
-
-	lg	%r0,0($ivp)	# copy ivec
-	lg	%r1,8($ivp)
-	stmg	%r0,%r1,16($sp)
-	lmg	%r0,%r1,0($key)	# copy key, cover 256 bit
-	stmg	%r0,%r1,32($sp)
-	lmg	%r0,%r1,16($key)
-	stmg	%r0,%r1,48($sp)
-	l	%r0,240($key)	# load kmc code
-	lghi	$key,15		# res=len%16, len-=res;
-	ngr	$key,$len
-	sl${g}r	$len,$key
-	la	%r1,16($sp)	# parameter block - ivec || key
-	jz	.Lkmc_truncated
-	.long	0xb92f0042	# kmc %r4,%r2
-	brc	1,.-4		# pay attention to "partial completion"
-	ltr	$key,$key
-	jnz	.Lkmc_truncated
-.Lkmc_done:
-	lmg	%r0,%r1,16($sp)	# copy ivec to caller
-	stg	%r0,0($ivp)
-	stg	%r1,8($ivp)
-	br	$ra
-.align	16
-.Lkmc_truncated:
-	ahi	$key,-1		# it's the way it's encoded in mvc
-	tmll	%r0,0x80
-	jnz	.Lkmc_truncated_dec
-	lghi	%r1,0
-	stg	%r1,16*$SIZE_T($sp)
-	stg	%r1,16*$SIZE_T+8($sp)
-	bras	%r1,1f
-	mvc	16*$SIZE_T(1,$sp),0($inp)
-1:	ex	$key,0(%r1)
-	la	%r1,16($sp)	# restore parameter block
-	la	$inp,16*$SIZE_T($sp)
-	lghi	$len,16
-	.long	0xb92f0042	# kmc %r4,%r2
-	j	.Lkmc_done
-.align	16
-.Lkmc_truncated_dec:
-	st${g}	$out,4*$SIZE_T($sp)
-	la	$out,16*$SIZE_T($sp)
-	lghi	$len,16
-	.long	0xb92f0042	# kmc %r4,%r2
-	l${g}	$out,4*$SIZE_T($sp)
-	bras	%r1,2f
-	mvc	0(1,$out),16*$SIZE_T($sp)
-2:	ex	$key,0(%r1)
-	j	.Lkmc_done
-.align	16
-.Lcbc_software:
-___
-$code.=<<___;
-	stm${g}	$key,$ra,5*$SIZE_T($sp)
-	lhi	%r0,0
-	cl	%r0,`$stdframe+$SIZE_T-4`($sp)
-	je	.Lcbc_decrypt
-
-	larl	$tbl,AES_Te
-
-	llgf	$s0,0($ivp)
-	llgf	$s1,4($ivp)
-	llgf	$s2,8($ivp)
-	llgf	$s3,12($ivp)
-
-	lghi	$t0,16
-	sl${g}r	$len,$t0
-	brc	4,.Lcbc_enc_tail	# if borrow
-.Lcbc_enc_loop:
-	stm${g}	$inp,$out,2*$SIZE_T($sp)
-	x	$s0,0($inp)
-	x	$s1,4($inp)
-	x	$s2,8($inp)
-	x	$s3,12($inp)
-	lgr	%r4,$key
-
-	bras	$ra,_s390x_AES_encrypt
-
-	lm${g}	$inp,$key,2*$SIZE_T($sp)
-	st	$s0,0($out)
-	st	$s1,4($out)
-	st	$s2,8($out)
-	st	$s3,12($out)
-
-	la	$inp,16($inp)
-	la	$out,16($out)
-	lghi	$t0,16
-	lt${g}r	$len,$len
-	jz	.Lcbc_enc_done
-	sl${g}r	$len,$t0
-	brc	4,.Lcbc_enc_tail	# if borrow
-	j	.Lcbc_enc_loop
-.align	16
-.Lcbc_enc_done:
-	l${g}	$ivp,6*$SIZE_T($sp)
-	st	$s0,0($ivp)
-	st	$s1,4($ivp)	
-	st	$s2,8($ivp)
-	st	$s3,12($ivp)
-
-	lm${g}	%r7,$ra,7*$SIZE_T($sp)
-	br	$ra
-
-.align	16
-.Lcbc_enc_tail:
-	aghi	$len,15
-	lghi	$t0,0
-	stg	$t0,16*$SIZE_T($sp)
-	stg	$t0,16*$SIZE_T+8($sp)
-	bras	$t1,3f
-	mvc	16*$SIZE_T(1,$sp),0($inp)
-3:	ex	$len,0($t1)
-	lghi	$len,0
-	la	$inp,16*$SIZE_T($sp)
-	j	.Lcbc_enc_loop
-
-.align	16
-.Lcbc_decrypt:
-	larl	$tbl,AES_Td
-
-	lg	$t0,0($ivp)
-	lg	$t1,8($ivp)
-	stmg	$t0,$t1,16*$SIZE_T($sp)
-
-.Lcbc_dec_loop:
-	stm${g}	$inp,$out,2*$SIZE_T($sp)
-	llgf	$s0,0($inp)
-	llgf	$s1,4($inp)
-	llgf	$s2,8($inp)
-	llgf	$s3,12($inp)
-	lgr	%r4,$key
-
-	bras	$ra,_s390x_AES_decrypt
-
-	lm${g}	$inp,$key,2*$SIZE_T($sp)
-	sllg	$s0,$s0,32
-	sllg	$s2,$s2,32
-	lr	$s0,$s1
-	lr	$s2,$s3
-
-	lg	$t0,0($inp)
-	lg	$t1,8($inp)
-	xg	$s0,16*$SIZE_T($sp)
-	xg	$s2,16*$SIZE_T+8($sp)
-	lghi	$s1,16
-	sl${g}r	$len,$s1
-	brc	4,.Lcbc_dec_tail	# if borrow
-	brc	2,.Lcbc_dec_done	# if zero
-	stg	$s0,0($out)
-	stg	$s2,8($out)
-	stmg	$t0,$t1,16*$SIZE_T($sp)
-
-	la	$inp,16($inp)
-	la	$out,16($out)
-	j	.Lcbc_dec_loop
-
-.Lcbc_dec_done:
-	stg	$s0,0($out)
-	stg	$s2,8($out)
-.Lcbc_dec_exit:
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	stmg	$t0,$t1,0($ivp)
-
-	br	$ra
-
-.align	16
-.Lcbc_dec_tail:
-	aghi	$len,15
-	stg	$s0,16*$SIZE_T($sp)
-	stg	$s2,16*$SIZE_T+8($sp)
-	bras	$s1,4f
-	mvc	0(1,$out),16*$SIZE_T($sp)
-4:	ex	$len,0($s1)
-	j	.Lcbc_dec_exit
-.size	AES_cbc_encrypt,.-AES_cbc_encrypt
-___
-}
-########################################################################
-# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
-#                     size_t blocks, const AES_KEY *key,
-#                     const unsigned char *ivec)
-{
-my $inp="%r2";
-my $out="%r4";	# blocks and out are swapped
-my $len="%r3";
-my $key="%r5";	my $iv0="%r5";
-my $ivp="%r6";
-my $fp ="%r7";
-
-$code.=<<___;
-.globl	AES_ctr32_encrypt
-.type	AES_ctr32_encrypt,\@function
-.align	16
-AES_ctr32_encrypt:
-	xgr	%r3,%r4		# flip %r3 and %r4, $out and $len
-	xgr	%r4,%r3
-	xgr	%r3,%r4
-	llgfr	$len,$len	# safe in ctr32 subroutine even in 64-bit case
-___
-$code.=<<___ if (!$softonly);
-	l	%r0,240($key)
-	lhi	%r1,16
-	clr	%r0,%r1
-	jl	.Lctr32_software
-
-	stm${g}	%r6,$s3,6*$SIZE_T($sp)
-
-	slgr	$out,$inp
-	la	%r1,0($key)	# %r1 is permanent copy of $key
-	lg	$iv0,0($ivp)	# load ivec
-	lg	$ivp,8($ivp)
-
-	# prepare and allocate stack frame at the top of 4K page
-	# with 1K reserved for eventual signal handling
-	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
-	lghi	$s1,-4096
-	algr	$s0,$sp
-	lgr	$fp,$sp
-	ngr	$s0,$s1		# align at page boundary
-	slgr	$fp,$s0		# total buffer size
-	lgr	$s2,$sp
-	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
-	slgr	$fp,$s1		# deduct reservation to get usable buffer size
-	# buffer size is at lest 256 and at most 3072+256-16
-
-	la	$sp,1024($s0)	# alloca
-	srlg	$fp,$fp,4	# convert bytes to blocks, minimum 16
-	st${g}	$s2,0($sp)	# back-chain
-	st${g}	$fp,$SIZE_T($sp)
-
-	slgr	$len,$fp
-	brc	1,.Lctr32_hw_switch	# not zero, no borrow
-	algr	$fp,$len	# input is shorter than allocated buffer
-	lghi	$len,0
-	st${g}	$fp,$SIZE_T($sp)
-
-.Lctr32_hw_switch:
-___
-$code.=<<___ if (0);	######### kmctr code was measured to be ~12% slower
-	larl	$s0,OPENSSL_s390xcap_P
-	lg	$s0,8($s0)
-	tmhh	$s0,0x0004	# check for message_security-assist-4
-	jz	.Lctr32_km_loop
-
-	llgfr	$s0,%r0
-	lgr	$s1,%r1
-	lghi	%r0,0
-	la	%r1,16($sp)
-	.long	0xb92d2042	# kmctr %r4,%r2,%r2
-
-	llihh	%r0,0x8000	# check if kmctr supports the function code
-	srlg	%r0,%r0,0($s0)
-	ng	%r0,16($sp)
-	lgr	%r0,$s0
-	lgr	%r1,$s1
-	jz	.Lctr32_km_loop
-
-####### kmctr code
-	algr	$out,$inp	# restore $out
-	lgr	$s1,$len	# $s1 undertakes $len
-	j	.Lctr32_kmctr_loop
-.align	16
-.Lctr32_kmctr_loop:
-	la	$s2,16($sp)
-	lgr	$s3,$fp
-.Lctr32_kmctr_prepare:
-	stg	$iv0,0($s2)
-	stg	$ivp,8($s2)
-	la	$s2,16($s2)
-	ahi	$ivp,1		# 32-bit increment, preserves upper half
-	brct	$s3,.Lctr32_kmctr_prepare
-
-	#la	$inp,0($inp)	# inp
-	sllg	$len,$fp,4	# len
-	#la	$out,0($out)	# out
-	la	$s2,16($sp)	# iv
-	.long	0xb92da042	# kmctr $out,$s2,$inp
-	brc	1,.-4		# pay attention to "partial completion"
-
-	slgr	$s1,$fp
-	brc	1,.Lctr32_kmctr_loop	# not zero, no borrow
-	algr	$fp,$s1
-	lghi	$s1,0
-	brc	4+1,.Lctr32_kmctr_loop	# not zero
-
-	l${g}	$sp,0($sp)
-	lm${g}	%r6,$s3,6*$SIZE_T($sp)
-	br	$ra
-.align	16
-___
-$code.=<<___;
-.Lctr32_km_loop:
-	la	$s2,16($sp)
-	lgr	$s3,$fp
-.Lctr32_km_prepare:
-	stg	$iv0,0($s2)
-	stg	$ivp,8($s2)
-	la	$s2,16($s2)
-	ahi	$ivp,1		# 32-bit increment, preserves upper half
-	brct	$s3,.Lctr32_km_prepare
-
-	la	$s0,16($sp)	# inp
-	sllg	$s1,$fp,4	# len
-	la	$s2,16($sp)	# out
-	.long	0xb92e00a8	# km %r10,%r8
-	brc	1,.-4		# pay attention to "partial completion"
-
-	la	$s2,16($sp)
-	lgr	$s3,$fp
-	slgr	$s2,$inp
-.Lctr32_km_xor:
-	lg	$s0,0($inp)
-	lg	$s1,8($inp)
-	xg	$s0,0($s2,$inp)
-	xg	$s1,8($s2,$inp)
-	stg	$s0,0($out,$inp)
-	stg	$s1,8($out,$inp)
-	la	$inp,16($inp)
-	brct	$s3,.Lctr32_km_xor
-
-	slgr	$len,$fp
-	brc	1,.Lctr32_km_loop	# not zero, no borrow
-	algr	$fp,$len
-	lghi	$len,0
-	brc	4+1,.Lctr32_km_loop	# not zero
-
-	l${g}	$s0,0($sp)
-	l${g}	$s1,$SIZE_T($sp)
-	la	$s2,16($sp)
-.Lctr32_km_zap:
-	stg	$s0,0($s2)
-	stg	$s0,8($s2)
-	la	$s2,16($s2)
-	brct	$s1,.Lctr32_km_zap
-
-	la	$sp,0($s0)
-	lm${g}	%r6,$s3,6*$SIZE_T($sp)
-	br	$ra
-.align	16
-.Lctr32_software:
-___
-$code.=<<___;
-	stm${g}	$key,$ra,5*$SIZE_T($sp)
-	sl${g}r	$inp,$out
-	larl	$tbl,AES_Te
-	llgf	$t1,12($ivp)
-
-.Lctr32_loop:
-	stm${g}	$inp,$out,2*$SIZE_T($sp)
-	llgf	$s0,0($ivp)
-	llgf	$s1,4($ivp)
-	llgf	$s2,8($ivp)
-	lgr	$s3,$t1
-	st	$t1,16*$SIZE_T($sp)
-	lgr	%r4,$key
-
-	bras	$ra,_s390x_AES_encrypt
-
-	lm${g}	$inp,$ivp,2*$SIZE_T($sp)
-	llgf	$t1,16*$SIZE_T($sp)
-	x	$s0,0($inp,$out)
-	x	$s1,4($inp,$out)
-	x	$s2,8($inp,$out)
-	x	$s3,12($inp,$out)
-	stm	$s0,$s3,0($out)
-
-	la	$out,16($out)
-	ahi	$t1,1		# 32-bit increment
-	brct	$len,.Lctr32_loop
-
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	br	$ra
-.size	AES_ctr32_encrypt,.-AES_ctr32_encrypt
-___
-}
-
-########################################################################
-# void AES_xts_encrypt(const char *inp,char *out,size_t len,
-#	const AES_KEY *key1, const AES_KEY *key2,
-#	const unsigned char iv[16]);
-#
-{
-my $inp="%r2";
-my $out="%r4";	# len and out are swapped
-my $len="%r3";
-my $key1="%r5";	# $i1
-my $key2="%r6";	# $i2
-my $fp="%r7";	# $i3
-my $tweak=16*$SIZE_T+16;	# or $stdframe-16, bottom of the frame...
-
-$code.=<<___;
-.type	_s390x_xts_km,\@function
-.align	16
-_s390x_xts_km:
-___
-$code.=<<___ if(1);
-	llgfr	$s0,%r0			# put aside the function code
-	lghi	$s1,0x7f
-	nr	$s1,%r0
-	lghi	%r0,0			# query capability vector
-	la	%r1,$tweak-16($sp)
-	.long	0xb92e0042		# km %r4,%r2
-	llihh	%r1,0x8000
-	srlg	%r1,%r1,32($s1)		# check for 32+function code
-	ng	%r1,$tweak-16($sp)
-	lgr	%r0,$s0			# restore the function code
-	la	%r1,0($key1)		# restore $key1
-	jz	.Lxts_km_vanilla
-
-	lmg	$i2,$i3,$tweak($sp)	# put aside the tweak value
-	algr	$out,$inp
-
-	oill	%r0,32			# switch to xts function code
-	aghi	$s1,-18			#
-	sllg	$s1,$s1,3		# (function code - 18)*8, 0 or 16
-	la	%r1,$tweak-16($sp)
-	slgr	%r1,$s1			# parameter block position
-	lmg	$s0,$s3,0($key1)	# load 256 bits of key material,
-	stmg	$s0,$s3,0(%r1)		# and copy it to parameter block.
-					# yes, it contains junk and overlaps
-					# with the tweak in 128-bit case.
-					# it's done to avoid conditional
-					# branch.
-	stmg	$i2,$i3,$tweak($sp)	# "re-seat" the tweak value
-
-	.long	0xb92e0042		# km %r4,%r2
-	brc	1,.-4			# pay attention to "partial completion"
-
-	lrvg	$s0,$tweak+0($sp)	# load the last tweak
-	lrvg	$s1,$tweak+8($sp)
-	stmg	%r0,%r3,$tweak-32($sp)	# wipe copy of the key
-
-	nill	%r0,0xffdf		# switch back to original function code
-	la	%r1,0($key1)		# restore pointer to $key1
-	slgr	$out,$inp
-
-	llgc	$len,2*$SIZE_T-1($sp)
-	nill	$len,0x0f		# $len%=16
-	br	$ra
-	
-.align	16
-.Lxts_km_vanilla:
-___
-$code.=<<___;
-	# prepare and allocate stack frame at the top of 4K page
-	# with 1K reserved for eventual signal handling
-	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
-	lghi	$s1,-4096
-	algr	$s0,$sp
-	lgr	$fp,$sp
-	ngr	$s0,$s1		# align at page boundary
-	slgr	$fp,$s0		# total buffer size
-	lgr	$s2,$sp
-	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
-	slgr	$fp,$s1		# deduct reservation to get usable buffer size
-	# buffer size is at lest 256 and at most 3072+256-16
-
-	la	$sp,1024($s0)	# alloca
-	nill	$fp,0xfff0	# round to 16*n
-	st${g}	$s2,0($sp)	# back-chain
-	nill	$len,0xfff0	# redundant
-	st${g}	$fp,$SIZE_T($sp)
-
-	slgr	$len,$fp
-	brc	1,.Lxts_km_go	# not zero, no borrow
-	algr	$fp,$len	# input is shorter than allocated buffer
-	lghi	$len,0
-	st${g}	$fp,$SIZE_T($sp)
-
-.Lxts_km_go:
-	lrvg	$s0,$tweak+0($s2)	# load the tweak value in little-endian
-	lrvg	$s1,$tweak+8($s2)
-
-	la	$s2,16($sp)		# vector of ascending tweak values
-	slgr	$s2,$inp
-	srlg	$s3,$fp,4
-	j	.Lxts_km_start
-
-.Lxts_km_loop:
-	la	$s2,16($sp)
-	slgr	$s2,$inp
-	srlg	$s3,$fp,4
-.Lxts_km_prepare:
-	lghi	$i1,0x87
-	srag	$i2,$s1,63		# broadcast upper bit
-	ngr	$i1,$i2			# rem
-	algr	$s0,$s0
-	alcgr	$s1,$s1
-	xgr	$s0,$i1
-.Lxts_km_start:
-	lrvgr	$i1,$s0			# flip byte order
-	lrvgr	$i2,$s1
-	stg	$i1,0($s2,$inp)
-	stg	$i2,8($s2,$inp)
-	xg	$i1,0($inp)
-	xg	$i2,8($inp)
-	stg	$i1,0($out,$inp)
-	stg	$i2,8($out,$inp)
-	la	$inp,16($inp)
-	brct	$s3,.Lxts_km_prepare
-
-	slgr	$inp,$fp		# rewind $inp
-	la	$s2,0($out,$inp)
-	lgr	$s3,$fp
-	.long	0xb92e00aa		# km $s2,$s2
-	brc	1,.-4			# pay attention to "partial completion"
-
-	la	$s2,16($sp)
-	slgr	$s2,$inp
-	srlg	$s3,$fp,4
-.Lxts_km_xor:
-	lg	$i1,0($out,$inp)
-	lg	$i2,8($out,$inp)
-	xg	$i1,0($s2,$inp)
-	xg	$i2,8($s2,$inp)
-	stg	$i1,0($out,$inp)
-	stg	$i2,8($out,$inp)
-	la	$inp,16($inp)
-	brct	$s3,.Lxts_km_xor
-
-	slgr	$len,$fp
-	brc	1,.Lxts_km_loop		# not zero, no borrow
-	algr	$fp,$len
-	lghi	$len,0
-	brc	4+1,.Lxts_km_loop	# not zero
-
-	l${g}	$i1,0($sp)		# back-chain
-	llgf	$fp,`2*$SIZE_T-4`($sp)	# bytes used
-	la	$i2,16($sp)
-	srlg	$fp,$fp,4
-.Lxts_km_zap:
-	stg	$i1,0($i2)
-	stg	$i1,8($i2)
-	la	$i2,16($i2)
-	brct	$fp,.Lxts_km_zap
-
-	la	$sp,0($i1)
-	llgc	$len,2*$SIZE_T-1($i1)
-	nill	$len,0x0f		# $len%=16
-	bzr	$ra
-
-	# generate one more tweak...
-	lghi	$i1,0x87
-	srag	$i2,$s1,63		# broadcast upper bit
-	ngr	$i1,$i2			# rem
-	algr	$s0,$s0
-	alcgr	$s1,$s1
-	xgr	$s0,$i1
-
-	ltr	$len,$len		# clear zero flag
-	br	$ra
-.size	_s390x_xts_km,.-_s390x_xts_km
-
-.globl	AES_xts_encrypt
-.type	AES_xts_encrypt,\@function
-.align	16
-AES_xts_encrypt:
-	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
-	xgr	%r4,%r3
-	xgr	%r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
-	llgfr	$len,$len
-___
-$code.=<<___;
-	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
-	srag	$len,$len,4		# formally wrong, because it expands
-					# sign byte, but who can afford asking
-					# to process more than 2^63-1 bytes?
-					# I use it, because it sets condition
-					# code...
-	bcr	8,$ra			# abort if zero (i.e. less than 16)
-___
-$code.=<<___ if (!$softonly);
-	llgf	%r0,240($key2)
-	lhi	%r1,16
-	clr	%r0,%r1
-	jl	.Lxts_enc_software
-
-	st${g}	$ra,5*$SIZE_T($sp)
-	stm${g}	%r6,$s3,6*$SIZE_T($sp)
-
-	sllg	$len,$len,4		# $len&=~15
-	slgr	$out,$inp
-
-	# generate the tweak value
-	l${g}	$s3,$stdframe($sp)	# pointer to iv
-	la	$s2,$tweak($sp)
-	lmg	$s0,$s1,0($s3)
-	lghi	$s3,16
-	stmg	$s0,$s1,0($s2)
-	la	%r1,0($key2)		# $key2 is not needed anymore
-	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
-	brc	1,.-4			# can this happen?
-
-	l	%r0,240($key1)
-	la	%r1,0($key1)		# $key1 is not needed anymore
-	bras	$ra,_s390x_xts_km
-	jz	.Lxts_enc_km_done
-
-	aghi	$inp,-16		# take one step back
-	la	$i3,0($out,$inp)	# put aside real $out
-.Lxts_enc_km_steal:
-	llgc	$i1,16($inp)
-	llgc	$i2,0($out,$inp)
-	stc	$i1,0($out,$inp)
-	stc	$i2,16($out,$inp)
-	la	$inp,1($inp)
-	brct	$len,.Lxts_enc_km_steal
-
-	la	$s2,0($i3)
-	lghi	$s3,16
-	lrvgr	$i1,$s0			# flip byte order
-	lrvgr	$i2,$s1
-	xg	$i1,0($s2)
-	xg	$i2,8($s2)
-	stg	$i1,0($s2)
-	stg	$i2,8($s2)
-	.long	0xb92e00aa		# km $s2,$s2
-	brc	1,.-4			# can this happen?
-	lrvgr	$i1,$s0			# flip byte order
-	lrvgr	$i2,$s1
-	xg	$i1,0($i3)
-	xg	$i2,8($i3)
-	stg	$i1,0($i3)
-	stg	$i2,8($i3)
-
-.Lxts_enc_km_done:
-	stg	$sp,$tweak+0($sp)	# wipe tweak
-	stg	$sp,$tweak+8($sp)
-	l${g}	$ra,5*$SIZE_T($sp)
-	lm${g}	%r6,$s3,6*$SIZE_T($sp)
-	br	$ra
-.align	16
-.Lxts_enc_software:
-___
-$code.=<<___;
-	stm${g}	%r6,$ra,6*$SIZE_T($sp)
-
-	slgr	$out,$inp
-
-	l${g}	$s3,$stdframe($sp)	# ivp
-	llgf	$s0,0($s3)		# load iv
-	llgf	$s1,4($s3)
-	llgf	$s2,8($s3)
-	llgf	$s3,12($s3)
-	stm${g}	%r2,%r5,2*$SIZE_T($sp)
-	la	$key,0($key2)
-	larl	$tbl,AES_Te
-	bras	$ra,_s390x_AES_encrypt	# generate the tweak
-	lm${g}	%r2,%r5,2*$SIZE_T($sp)
-	stm	$s0,$s3,$tweak($sp)	# save the tweak
-	j	.Lxts_enc_enter
-
-.align	16
-.Lxts_enc_loop:
-	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
-	lrvg	$s3,$tweak+8($sp)
-	lghi	%r1,0x87
-	srag	%r0,$s3,63		# broadcast upper bit
-	ngr	%r1,%r0			# rem
-	algr	$s1,$s1
-	alcgr	$s3,$s3
-	xgr	$s1,%r1
-	lrvgr	$s1,$s1			# flip byte order
-	lrvgr	$s3,$s3
-	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
-	stg	$s1,$tweak+0($sp)	# save the tweak
-	llgfr	$s1,$s1
-	srlg	$s2,$s3,32
-	stg	$s3,$tweak+8($sp)
-	llgfr	$s3,$s3
-	la	$inp,16($inp)		# $inp+=16
-.Lxts_enc_enter:
-	x	$s0,0($inp)		# ^=*($inp)
-	x	$s1,4($inp)
-	x	$s2,8($inp)
-	x	$s3,12($inp)
-	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
-	la	$key,0($key1)
-	bras	$ra,_s390x_AES_encrypt
-	lm${g}	%r2,%r5,2*$SIZE_T($sp)
-	x	$s0,$tweak+0($sp)	# ^=tweak
-	x	$s1,$tweak+4($sp)
-	x	$s2,$tweak+8($sp)
-	x	$s3,$tweak+12($sp)
-	st	$s0,0($out,$inp)
-	st	$s1,4($out,$inp)
-	st	$s2,8($out,$inp)
-	st	$s3,12($out,$inp)
-	brct${g}	$len,.Lxts_enc_loop
-
-	llgc	$len,`2*$SIZE_T-1`($sp)
-	nill	$len,0x0f		# $len%16
-	jz	.Lxts_enc_done
-
-	la	$i3,0($inp,$out)	# put aside real $out
-.Lxts_enc_steal:
-	llgc	%r0,16($inp)
-	llgc	%r1,0($out,$inp)
-	stc	%r0,0($out,$inp)
-	stc	%r1,16($out,$inp)
-	la	$inp,1($inp)
-	brct	$len,.Lxts_enc_steal
-	la	$out,0($i3)		# restore real $out
-
-	# generate last tweak...
-	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
-	lrvg	$s3,$tweak+8($sp)
-	lghi	%r1,0x87
-	srag	%r0,$s3,63		# broadcast upper bit
-	ngr	%r1,%r0			# rem
-	algr	$s1,$s1
-	alcgr	$s3,$s3
-	xgr	$s1,%r1
-	lrvgr	$s1,$s1			# flip byte order
-	lrvgr	$s3,$s3
-	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
-	stg	$s1,$tweak+0($sp)	# save the tweak
-	llgfr	$s1,$s1
-	srlg	$s2,$s3,32
-	stg	$s3,$tweak+8($sp)
-	llgfr	$s3,$s3
-
-	x	$s0,0($out)		# ^=*(inp)|stolen cipther-text
-	x	$s1,4($out)
-	x	$s2,8($out)
-	x	$s3,12($out)
-	st${g}	$out,4*$SIZE_T($sp)
-	la	$key,0($key1)
-	bras	$ra,_s390x_AES_encrypt
-	l${g}	$out,4*$SIZE_T($sp)
-	x	$s0,`$tweak+0`($sp)	# ^=tweak
-	x	$s1,`$tweak+4`($sp)
-	x	$s2,`$tweak+8`($sp)
-	x	$s3,`$tweak+12`($sp)
-	st	$s0,0($out)
-	st	$s1,4($out)
-	st	$s2,8($out)
-	st	$s3,12($out)
-
-.Lxts_enc_done:
-	stg	$sp,$tweak+0($sp)	# wipe tweak
-	stg	$sp,$twesk+8($sp)
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	br	$ra
-.size	AES_xts_encrypt,.-AES_xts_encrypt
-___
-# void AES_xts_decrypt(const char *inp,char *out,size_t len,
-#	const AES_KEY *key1, const AES_KEY *key2,
-#	const unsigned char iv[16]);
-#
-$code.=<<___;
-.globl	AES_xts_decrypt
-.type	AES_xts_decrypt,\@function
-.align	16
-AES_xts_decrypt:
-	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
-	xgr	%r4,%r3
-	xgr	%r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
-	llgfr	$len,$len
-___
-$code.=<<___;
-	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
-	aghi	$len,-16
-	bcr	4,$ra			# abort if less than zero. formally
-					# wrong, because $len is unsigned,
-					# but who can afford asking to
-					# process more than 2^63-1 bytes?
-	tmll	$len,0x0f
-	jnz	.Lxts_dec_proceed
-	aghi	$len,16
-.Lxts_dec_proceed:
-___
-$code.=<<___ if (!$softonly);
-	llgf	%r0,240($key2)
-	lhi	%r1,16
-	clr	%r0,%r1
-	jl	.Lxts_dec_software
-
-	st${g}	$ra,5*$SIZE_T($sp)
-	stm${g}	%r6,$s3,6*$SIZE_T($sp)
-
-	nill	$len,0xfff0		# $len&=~15
-	slgr	$out,$inp
-
-	# generate the tweak value
-	l${g}	$s3,$stdframe($sp)	# pointer to iv
-	la	$s2,$tweak($sp)
-	lmg	$s0,$s1,0($s3)
-	lghi	$s3,16
-	stmg	$s0,$s1,0($s2)
-	la	%r1,0($key2)		# $key2 is not needed past this point
-	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
-	brc	1,.-4			# can this happen?
-
-	l	%r0,240($key1)
-	la	%r1,0($key1)		# $key1 is not needed anymore
-
-	ltgr	$len,$len
-	jz	.Lxts_dec_km_short
-	bras	$ra,_s390x_xts_km
-	jz	.Lxts_dec_km_done
-
-	lrvgr	$s2,$s0			# make copy in reverse byte order
-	lrvgr	$s3,$s1
-	j	.Lxts_dec_km_2ndtweak
-
-.Lxts_dec_km_short:
-	llgc	$len,`2*$SIZE_T-1`($sp)
-	nill	$len,0x0f		# $len%=16
-	lrvg	$s0,$tweak+0($sp)	# load the tweak
-	lrvg	$s1,$tweak+8($sp)
-	lrvgr	$s2,$s0			# make copy in reverse byte order
-	lrvgr	$s3,$s1
-
-.Lxts_dec_km_2ndtweak:
-	lghi	$i1,0x87
-	srag	$i2,$s1,63		# broadcast upper bit
-	ngr	$i1,$i2			# rem
-	algr	$s0,$s0
-	alcgr	$s1,$s1
-	xgr	$s0,$i1
-	lrvgr	$i1,$s0			# flip byte order
-	lrvgr	$i2,$s1
-
-	xg	$i1,0($inp)
-	xg	$i2,8($inp)
-	stg	$i1,0($out,$inp)
-	stg	$i2,8($out,$inp)
-	la	$i2,0($out,$inp)
-	lghi	$i3,16
-	.long	0xb92e0066		# km $i2,$i2
-	brc	1,.-4			# can this happen?
-	lrvgr	$i1,$s0
-	lrvgr	$i2,$s1
-	xg	$i1,0($out,$inp)
-	xg	$i2,8($out,$inp)
-	stg	$i1,0($out,$inp)
-	stg	$i2,8($out,$inp)
-
-	la	$i3,0($out,$inp)	# put aside real $out
-.Lxts_dec_km_steal:
-	llgc	$i1,16($inp)
-	llgc	$i2,0($out,$inp)
-	stc	$i1,0($out,$inp)
-	stc	$i2,16($out,$inp)
-	la	$inp,1($inp)
-	brct	$len,.Lxts_dec_km_steal
-
-	lgr	$s0,$s2
-	lgr	$s1,$s3
-	xg	$s0,0($i3)
-	xg	$s1,8($i3)
-	stg	$s0,0($i3)
-	stg	$s1,8($i3)
-	la	$s0,0($i3)
-	lghi	$s1,16
-	.long	0xb92e0088		# km $s0,$s0
-	brc	1,.-4			# can this happen?
-	xg	$s2,0($i3)
-	xg	$s3,8($i3)
-	stg	$s2,0($i3)
-	stg	$s3,8($i3)
-.Lxts_dec_km_done:
-	stg	$sp,$tweak+0($sp)	# wipe tweak
-	stg	$sp,$tweak+8($sp)
-	l${g}	$ra,5*$SIZE_T($sp)
-	lm${g}	%r6,$s3,6*$SIZE_T($sp)
-	br	$ra
-.align	16
-.Lxts_dec_software:
-___
-$code.=<<___;
-	stm${g}	%r6,$ra,6*$SIZE_T($sp)
-
-	srlg	$len,$len,4
-	slgr	$out,$inp
-
-	l${g}	$s3,$stdframe($sp)	# ivp
-	llgf	$s0,0($s3)		# load iv
-	llgf	$s1,4($s3)
-	llgf	$s2,8($s3)
-	llgf	$s3,12($s3)
-	stm${g}	%r2,%r5,2*$SIZE_T($sp)
-	la	$key,0($key2)
-	larl	$tbl,AES_Te
-	bras	$ra,_s390x_AES_encrypt	# generate the tweak
-	lm${g}	%r2,%r5,2*$SIZE_T($sp)
-	larl	$tbl,AES_Td
-	lt${g}r	$len,$len
-	stm	$s0,$s3,$tweak($sp)	# save the tweak
-	jz	.Lxts_dec_short
-	j	.Lxts_dec_enter
-
-.align	16
-.Lxts_dec_loop:
-	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
-	lrvg	$s3,$tweak+8($sp)
-	lghi	%r1,0x87
-	srag	%r0,$s3,63		# broadcast upper bit
-	ngr	%r1,%r0			# rem
-	algr	$s1,$s1
-	alcgr	$s3,$s3
-	xgr	$s1,%r1
-	lrvgr	$s1,$s1			# flip byte order
-	lrvgr	$s3,$s3
-	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
-	stg	$s1,$tweak+0($sp)	# save the tweak
-	llgfr	$s1,$s1
-	srlg	$s2,$s3,32
-	stg	$s3,$tweak+8($sp)
-	llgfr	$s3,$s3
-.Lxts_dec_enter:
-	x	$s0,0($inp)		# tweak^=*(inp)
-	x	$s1,4($inp)
-	x	$s2,8($inp)
-	x	$s3,12($inp)
-	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
-	la	$key,0($key1)
-	bras	$ra,_s390x_AES_decrypt
-	lm${g}	%r2,%r5,2*$SIZE_T($sp)
-	x	$s0,$tweak+0($sp)	# ^=tweak
-	x	$s1,$tweak+4($sp)
-	x	$s2,$tweak+8($sp)
-	x	$s3,$tweak+12($sp)
-	st	$s0,0($out,$inp)
-	st	$s1,4($out,$inp)
-	st	$s2,8($out,$inp)
-	st	$s3,12($out,$inp)
-	la	$inp,16($inp)
-	brct${g}	$len,.Lxts_dec_loop
-
-	llgc	$len,`2*$SIZE_T-1`($sp)
-	nill	$len,0x0f		# $len%16
-	jz	.Lxts_dec_done
-
-	# generate pair of tweaks...
-	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
-	lrvg	$s3,$tweak+8($sp)
-	lghi	%r1,0x87
-	srag	%r0,$s3,63		# broadcast upper bit
-	ngr	%r1,%r0			# rem
-	algr	$s1,$s1
-	alcgr	$s3,$s3
-	xgr	$s1,%r1
-	lrvgr	$i2,$s1			# flip byte order
-	lrvgr	$i3,$s3
-	stmg	$i2,$i3,$tweak($sp)	# save the 1st tweak
-	j	.Lxts_dec_2ndtweak
-
-.align	16
-.Lxts_dec_short:
-	llgc	$len,`2*$SIZE_T-1`($sp)
-	nill	$len,0x0f		# $len%16
-	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
-	lrvg	$s3,$tweak+8($sp)
-.Lxts_dec_2ndtweak:
-	lghi	%r1,0x87
-	srag	%r0,$s3,63		# broadcast upper bit
-	ngr	%r1,%r0			# rem
-	algr	$s1,$s1
-	alcgr	$s3,$s3
-	xgr	$s1,%r1
-	lrvgr	$s1,$s1			# flip byte order
-	lrvgr	$s3,$s3
-	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
-	stg	$s1,$tweak-16+0($sp)	# save the 2nd tweak
-	llgfr	$s1,$s1
-	srlg	$s2,$s3,32
-	stg	$s3,$tweak-16+8($sp)
-	llgfr	$s3,$s3
-
-	x	$s0,0($inp)		# tweak_the_2nd^=*(inp)
-	x	$s1,4($inp)
-	x	$s2,8($inp)
-	x	$s3,12($inp)
-	stm${g}	%r2,%r3,2*$SIZE_T($sp)
-	la	$key,0($key1)
-	bras	$ra,_s390x_AES_decrypt
-	lm${g}	%r2,%r5,2*$SIZE_T($sp)
-	x	$s0,$tweak-16+0($sp)	# ^=tweak_the_2nd
-	x	$s1,$tweak-16+4($sp)
-	x	$s2,$tweak-16+8($sp)
-	x	$s3,$tweak-16+12($sp)
-	st	$s0,0($out,$inp)
-	st	$s1,4($out,$inp)
-	st	$s2,8($out,$inp)
-	st	$s3,12($out,$inp)
-
-	la	$i3,0($out,$inp)	# put aside real $out
-.Lxts_dec_steal:
-	llgc	%r0,16($inp)
-	llgc	%r1,0($out,$inp)
-	stc	%r0,0($out,$inp)
-	stc	%r1,16($out,$inp)
-	la	$inp,1($inp)
-	brct	$len,.Lxts_dec_steal
-	la	$out,0($i3)		# restore real $out
-
-	lm	$s0,$s3,$tweak($sp)	# load the 1st tweak
-	x	$s0,0($out)		# tweak^=*(inp)|stolen cipher-text
-	x	$s1,4($out)
-	x	$s2,8($out)
-	x	$s3,12($out)
-	st${g}	$out,4*$SIZE_T($sp)
-	la	$key,0($key1)
-	bras	$ra,_s390x_AES_decrypt
-	l${g}	$out,4*$SIZE_T($sp)
-	x	$s0,$tweak+0($sp)	# ^=tweak
-	x	$s1,$tweak+4($sp)
-	x	$s2,$tweak+8($sp)
-	x	$s3,$tweak+12($sp)
-	st	$s0,0($out)
-	st	$s1,4($out)
-	st	$s2,8($out)
-	st	$s3,12($out)
-	stg	$sp,$tweak-16+0($sp)	# wipe 2nd tweak
-	stg	$sp,$tweak-16+8($sp)
-.Lxts_dec_done:
-	stg	$sp,$tweak+0($sp)	# wipe tweak
-	stg	$sp,$twesk+8($sp)
-	lm${g}	%r6,$ra,6*$SIZE_T($sp)
-	br	$ra
-.size	AES_xts_decrypt,.-AES_xts_decrypt
-___
-}
-$code.=<<___;
-.string	"AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm	OPENSSL_s390xcap_P,16,8
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;	# force flush

+ 0 - 1182
drivers/builtin_openssl2/crypto/aes/asm/aes-sparcv9.pl

@@ -1,1182 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
-# ====================================================================
-#
-# Version 1.1
-#
-# The major reason for undertaken effort was to mitigate the hazard of
-# cache-timing attack. This is [currently and initially!] addressed in
-# two ways. 1. S-boxes are compressed from 5KB to 2KB+256B size each.
-# 2. References to them are scheduled for L2 cache latency, meaning
-# that the tables don't have to reside in L1 cache. Once again, this
-# is an initial draft and one should expect more countermeasures to
-# be implemented...
-#
-# Version 1.1 prefetches T[ed]4 in order to mitigate attack on last
-# round.
-#
-# Even though performance was not the primary goal [on the contrary,
-# extra shifts "induced" by compressed S-box and longer loop epilogue
-# "induced" by scheduling for L2 have negative effect on performance],
-# the code turned out to run in ~23 cycles per processed byte en-/
-# decrypted with 128-bit key. This is pretty good result for code
-# with mentioned qualities and UltraSPARC core. Compared to Sun C
-# generated code my encrypt procedure runs just few percents faster,
-# while decrypt one - whole 50% faster [yes, Sun C failed to generate
-# optimal decrypt procedure]. Compared to GNU C generated code both
-# procedures are more than 60% faster:-)
-
-$bits=32;
-for (@ARGV)	{ $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64)	{ $bias=2047; $frame=192; }
-else		{ $bias=0;    $frame=112; }
-$locals=16;
-
-$acc0="%l0";
-$acc1="%o0";
-$acc2="%o1";
-$acc3="%o2";
-
-$acc4="%l1";
-$acc5="%o3";
-$acc6="%o4";
-$acc7="%o5";
-
-$acc8="%l2";
-$acc9="%o7";
-$acc10="%g1";
-$acc11="%g2";
-
-$acc12="%l3";
-$acc13="%g3";
-$acc14="%g4";
-$acc15="%g5";
-
-$t0="%l4";
-$t1="%l5";
-$t2="%l6";
-$t3="%l7";
-
-$s0="%i0";
-$s1="%i1";
-$s2="%i2";
-$s3="%i3";
-$tbl="%i4";
-$key="%i5";
-$rounds="%i7";	# aliases with return address, which is off-loaded to stack
-
-sub _data_word()
-{ my $i;
-    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$code.=<<___ if ($bits==64);
-.register	%g2,#scratch
-.register	%g3,#scratch
-___
-$code.=<<___;
-.section	".text",#alloc,#execinstr
-
-.align	256
-AES_Te:
-___
-&_data_word(
-	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
-	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
-	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
-	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
-	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
-	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
-	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
-	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
-	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
-	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
-	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
-	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
-	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
-	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
-	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
-	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
-	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
-	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
-	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
-	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
-	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
-	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
-	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
-	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
-	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
-	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
-	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
-	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
-	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
-	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
-	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
-	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
-	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
-	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
-	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
-	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
-	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
-	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
-	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
-	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
-	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
-	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
-	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
-	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
-	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
-	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
-	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
-	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
-	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
-	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
-	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
-	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
-	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
-	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
-	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
-	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
-	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
-	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
-	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
-	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
-	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
-	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
-	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
-	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
-	.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-	.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-	.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-	.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-	.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-	.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-	.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-	.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-	.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-	.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-	.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-	.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-	.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-	.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-	.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-	.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-	.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-	.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-	.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-	.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-	.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-	.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-	.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-	.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-	.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-	.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-	.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-	.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-	.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-	.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-	.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-	.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-.type	AES_Te,#object
-.size	AES_Te,(.-AES_Te)
-
-.align	64
-.skip	16
-_sparcv9_AES_encrypt:
-	save	%sp,-$frame-$locals,%sp
-	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
-	ld	[$key+240],$rounds
-	ld	[$key+0],$t0
-	ld	[$key+4],$t1			!
-	ld	[$key+8],$t2
-	srl	$rounds,1,$rounds
-	xor	$t0,$s0,$s0
-	ld	[$key+12],$t3
-	srl	$s0,21,$acc0
-	xor	$t1,$s1,$s1
-	ld	[$key+16],$t0
-	srl	$s1,13,$acc1			!
-	xor	$t2,$s2,$s2
-	ld	[$key+20],$t1
-	xor	$t3,$s3,$s3
-	ld	[$key+24],$t2
-	and	$acc0,2040,$acc0
-	ld	[$key+28],$t3
-	nop
-.Lenc_loop:
-	srl	$s2,5,$acc2			!
-	and	$acc1,2040,$acc1
-	ldx	[$tbl+$acc0],$acc0
-	sll	$s3,3,$acc3
-	and	$acc2,2040,$acc2
-	ldx	[$tbl+$acc1],$acc1
-	srl	$s1,21,$acc4
-	and	$acc3,2040,$acc3
-	ldx	[$tbl+$acc2],$acc2		!
-	srl	$s2,13,$acc5
-	and	$acc4,2040,$acc4
-	ldx	[$tbl+$acc3],$acc3
-	srl	$s3,5,$acc6
-	and	$acc5,2040,$acc5
-	ldx	[$tbl+$acc4],$acc4
-	fmovs	%f0,%f0
-	sll	$s0,3,$acc7			!
-	and	$acc6,2040,$acc6
-	ldx	[$tbl+$acc5],$acc5
-	srl	$s2,21,$acc8
-	and	$acc7,2040,$acc7
-	ldx	[$tbl+$acc6],$acc6
-	srl	$s3,13,$acc9
-	and	$acc8,2040,$acc8
-	ldx	[$tbl+$acc7],$acc7		!
-	srl	$s0,5,$acc10
-	and	$acc9,2040,$acc9
-	ldx	[$tbl+$acc8],$acc8
-	sll	$s1,3,$acc11
-	and	$acc10,2040,$acc10
-	ldx	[$tbl+$acc9],$acc9
-	fmovs	%f0,%f0
-	srl	$s3,21,$acc12			!
-	and	$acc11,2040,$acc11
-	ldx	[$tbl+$acc10],$acc10
-	srl	$s0,13,$acc13
-	and	$acc12,2040,$acc12
-	ldx	[$tbl+$acc11],$acc11
-	srl	$s1,5,$acc14
-	and	$acc13,2040,$acc13
-	ldx	[$tbl+$acc12],$acc12		!
-	sll	$s2,3,$acc15
-	and	$acc14,2040,$acc14
-	ldx	[$tbl+$acc13],$acc13
-	and	$acc15,2040,$acc15
-	add	$key,32,$key
-	ldx	[$tbl+$acc14],$acc14
-	fmovs	%f0,%f0
-	subcc	$rounds,1,$rounds		!
-	ldx	[$tbl+$acc15],$acc15
-	bz,a,pn	%icc,.Lenc_last
-	add	$tbl,2048,$rounds
-
-		srlx	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ld	[$key+0],$s0
-	fmovs	%f0,%f0
-		srlx	$acc2,16,$acc2		!
-		xor	$acc1,$t0,$t0
-	ld	[$key+4],$s1
-		srlx	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ld	[$key+8],$s2
-		srlx	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ld	[$key+12],$s3			!
-		srlx	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-	fmovs	%f0,%f0
-		srlx	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		srlx	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		srlx	$acc10,16,$acc10	!
-		xor	$acc7,$t1,$t1
-		srlx	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		srlx	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		srlx	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		srlx	$acc15,24,$acc15	!
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	srl	$t0,21,$acc0
-		xor	$acc14,$t3,$t3
-	srl	$t1,13,$acc1
-		xor	$acc15,$t3,$t3
-
-	and	$acc0,2040,$acc0		!
-	srl	$t2,5,$acc2
-	and	$acc1,2040,$acc1
-	ldx	[$tbl+$acc0],$acc0
-	sll	$t3,3,$acc3
-	and	$acc2,2040,$acc2
-	ldx	[$tbl+$acc1],$acc1
-	fmovs	%f0,%f0
-	srl	$t1,21,$acc4			!
-	and	$acc3,2040,$acc3
-	ldx	[$tbl+$acc2],$acc2
-	srl	$t2,13,$acc5
-	and	$acc4,2040,$acc4
-	ldx	[$tbl+$acc3],$acc3
-	srl	$t3,5,$acc6
-	and	$acc5,2040,$acc5
-	ldx	[$tbl+$acc4],$acc4		!
-	sll	$t0,3,$acc7
-	and	$acc6,2040,$acc6
-	ldx	[$tbl+$acc5],$acc5
-	srl	$t2,21,$acc8
-	and	$acc7,2040,$acc7
-	ldx	[$tbl+$acc6],$acc6
-	fmovs	%f0,%f0
-	srl	$t3,13,$acc9			!
-	and	$acc8,2040,$acc8
-	ldx	[$tbl+$acc7],$acc7
-	srl	$t0,5,$acc10
-	and	$acc9,2040,$acc9
-	ldx	[$tbl+$acc8],$acc8
-	sll	$t1,3,$acc11
-	and	$acc10,2040,$acc10
-	ldx	[$tbl+$acc9],$acc9		!
-	srl	$t3,21,$acc12
-	and	$acc11,2040,$acc11
-	ldx	[$tbl+$acc10],$acc10
-	srl	$t0,13,$acc13
-	and	$acc12,2040,$acc12
-	ldx	[$tbl+$acc11],$acc11
-	fmovs	%f0,%f0
-	srl	$t1,5,$acc14			!
-	and	$acc13,2040,$acc13
-	ldx	[$tbl+$acc12],$acc12
-	sll	$t2,3,$acc15
-	and	$acc14,2040,$acc14
-	ldx	[$tbl+$acc13],$acc13
-		srlx	$acc1,8,$acc1
-	and	$acc15,2040,$acc15
-	ldx	[$tbl+$acc14],$acc14		!
-
-		srlx	$acc2,16,$acc2
-		xor	$acc0,$s0,$s0
-	ldx	[$tbl+$acc15],$acc15
-		srlx	$acc3,24,$acc3
-		xor	$acc1,$s0,$s0
-	ld	[$key+16],$t0
-	fmovs	%f0,%f0
-		srlx	$acc5,8,$acc5		!
-		xor	$acc2,$s0,$s0
-	ld	[$key+20],$t1
-		srlx	$acc6,16,$acc6
-		xor	$acc3,$s0,$s0
-	ld	[$key+24],$t2
-		srlx	$acc7,24,$acc7
-		xor	$acc4,$s1,$s1
-	ld	[$key+28],$t3			!
-		srlx	$acc9,8,$acc9
-		xor	$acc5,$s1,$s1
-	ldx	[$tbl+2048+0],%g0		! prefetch te4
-		srlx	$acc10,16,$acc10
-		xor	$acc6,$s1,$s1
-	ldx	[$tbl+2048+32],%g0		! prefetch te4
-		srlx	$acc11,24,$acc11
-		xor	$acc7,$s1,$s1
-	ldx	[$tbl+2048+64],%g0		! prefetch te4
-		srlx	$acc13,8,$acc13
-		xor	$acc8,$s2,$s2
-	ldx	[$tbl+2048+96],%g0		! prefetch te4
-		srlx	$acc14,16,$acc14	!
-		xor	$acc9,$s2,$s2
-	ldx	[$tbl+2048+128],%g0		! prefetch te4
-		srlx	$acc15,24,$acc15
-		xor	$acc10,$s2,$s2
-	ldx	[$tbl+2048+160],%g0		! prefetch te4
-	srl	$s0,21,$acc0
-		xor	$acc11,$s2,$s2
-	ldx	[$tbl+2048+192],%g0		! prefetch te4
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-	ldx	[$tbl+2048+224],%g0		! prefetch te4
-	srl	$s1,13,$acc1			!
-		xor	$acc14,$s3,$s3
-		xor	$acc15,$s3,$s3
-	ba	.Lenc_loop
-	and	$acc0,2040,$acc0
-
-.align	32
-.Lenc_last:
-		srlx	$acc1,8,$acc1		!
-		xor	$acc0,$t0,$t0
-	ld	[$key+0],$s0
-		srlx	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ld	[$key+4],$s1
-		srlx	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ld	[$key+8],$s2			!
-		srlx	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ld	[$key+12],$s3
-		srlx	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		srlx	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		srlx	$acc9,8,$acc9		!
-		xor	$acc6,$t1,$t1
-		srlx	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		srlx	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		srlx	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		srlx	$acc14,16,$acc14	!
-		xor	$acc10,$t2,$t2
-		srlx	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	srl	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-	srl	$t1,16,$acc1			!
-		xor	$acc15,$t3,$t3
-
-	srl	$t2,8,$acc2
-	and	$acc1,255,$acc1
-	ldub	[$rounds+$acc0],$acc0
-	srl	$t1,24,$acc4
-	and	$acc2,255,$acc2
-	ldub	[$rounds+$acc1],$acc1
-	srl	$t2,16,$acc5			!
-	and	$t3,255,$acc3
-	ldub	[$rounds+$acc2],$acc2
-	ldub	[$rounds+$acc3],$acc3
-	srl	$t3,8,$acc6
-	and	$acc5,255,$acc5
-	ldub	[$rounds+$acc4],$acc4
-	fmovs	%f0,%f0
-	srl	$t2,24,$acc8			!
-	and	$acc6,255,$acc6
-	ldub	[$rounds+$acc5],$acc5
-	srl	$t3,16,$acc9
-	and	$t0,255,$acc7
-	ldub	[$rounds+$acc6],$acc6
-	ldub	[$rounds+$acc7],$acc7
-	fmovs	%f0,%f0
-	srl	$t0,8,$acc10			!
-	and	$acc9,255,$acc9
-	ldub	[$rounds+$acc8],$acc8
-	srl	$t3,24,$acc12
-	and	$acc10,255,$acc10
-	ldub	[$rounds+$acc9],$acc9
-	srl	$t0,16,$acc13
-	and	$t1,255,$acc11
-	ldub	[$rounds+$acc10],$acc10		!
-	srl	$t1,8,$acc14
-	and	$acc13,255,$acc13
-	ldub	[$rounds+$acc11],$acc11
-	ldub	[$rounds+$acc12],$acc12
-	and	$acc14,255,$acc14
-	ldub	[$rounds+$acc13],$acc13
-	and	$t2,255,$acc15
-	ldub	[$rounds+$acc14],$acc14		!
-
-		sll	$acc0,24,$acc0
-		xor	$acc3,$s0,$s0
-	ldub	[$rounds+$acc15],$acc15
-		sll	$acc1,16,$acc1
-		xor	$acc0,$s0,$s0
-	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
-	fmovs	%f0,%f0
-		sll	$acc2,8,$acc2		!
-		xor	$acc1,$s0,$s0
-		sll	$acc4,24,$acc4
-		xor	$acc2,$s0,$s0
-		sll	$acc5,16,$acc5
-		xor	$acc7,$s1,$s1
-		sll	$acc6,8,$acc6
-		xor	$acc4,$s1,$s1
-		sll	$acc8,24,$acc8		!
-		xor	$acc5,$s1,$s1
-		sll	$acc9,16,$acc9
-		xor	$acc11,$s2,$s2
-		sll	$acc10,8,$acc10
-		xor	$acc6,$s1,$s1
-		sll	$acc12,24,$acc12
-		xor	$acc8,$s2,$s2
-		sll	$acc13,16,$acc13	!
-		xor	$acc9,$s2,$s2
-		sll	$acc14,8,$acc14
-		xor	$acc10,$s2,$s2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-		xor	$acc14,$s3,$s3
-		xor	$acc15,$s3,$s3
-
-	ret
-	restore
-.type	_sparcv9_AES_encrypt,#function
-.size	_sparcv9_AES_encrypt,(.-_sparcv9_AES_encrypt)
-
-.align	32
-.globl	AES_encrypt
-AES_encrypt:
-	or	%o0,%o1,%g1
-	andcc	%g1,3,%g0
-	bnz,pn	%xcc,.Lunaligned_enc
-	save	%sp,-$frame,%sp
-
-	ld	[%i0+0],%o0
-	ld	[%i0+4],%o1
-	ld	[%i0+8],%o2
-	ld	[%i0+12],%o3
-
-1:	call	.+8
-	add	%o7,AES_Te-1b,%o4
-	call	_sparcv9_AES_encrypt
-	mov	%i2,%o5
-
-	st	%o0,[%i1+0]
-	st	%o1,[%i1+4]
-	st	%o2,[%i1+8]
-	st	%o3,[%i1+12]
-
-	ret
-	restore
-
-.align	32
-.Lunaligned_enc:
-	ldub	[%i0+0],%l0
-	ldub	[%i0+1],%l1
-	ldub	[%i0+2],%l2
-
-	sll	%l0,24,%l0
-	ldub	[%i0+3],%l3
-	sll	%l1,16,%l1
-	ldub	[%i0+4],%l4
-	sll	%l2,8,%l2
-	or	%l1,%l0,%l0
-	ldub	[%i0+5],%l5
-	sll	%l4,24,%l4
-	or	%l3,%l2,%l2
-	ldub	[%i0+6],%l6
-	sll	%l5,16,%l5
-	or	%l0,%l2,%o0
-	ldub	[%i0+7],%l7
-
-	sll	%l6,8,%l6
-	or	%l5,%l4,%l4
-	ldub	[%i0+8],%l0
-	or	%l7,%l6,%l6
-	ldub	[%i0+9],%l1
-	or	%l4,%l6,%o1
-	ldub	[%i0+10],%l2
-
-	sll	%l0,24,%l0
-	ldub	[%i0+11],%l3
-	sll	%l1,16,%l1
-	ldub	[%i0+12],%l4
-	sll	%l2,8,%l2
-	or	%l1,%l0,%l0
-	ldub	[%i0+13],%l5
-	sll	%l4,24,%l4
-	or	%l3,%l2,%l2
-	ldub	[%i0+14],%l6
-	sll	%l5,16,%l5
-	or	%l0,%l2,%o2
-	ldub	[%i0+15],%l7
-
-	sll	%l6,8,%l6
-	or	%l5,%l4,%l4
-	or	%l7,%l6,%l6
-	or	%l4,%l6,%o3
-
-1:	call	.+8
-	add	%o7,AES_Te-1b,%o4
-	call	_sparcv9_AES_encrypt
-	mov	%i2,%o5
-
-	srl	%o0,24,%l0
-	srl	%o0,16,%l1
-	stb	%l0,[%i1+0]
-	srl	%o0,8,%l2
-	stb	%l1,[%i1+1]
-	stb	%l2,[%i1+2]
-	srl	%o1,24,%l4
-	stb	%o0,[%i1+3]
-
-	srl	%o1,16,%l5
-	stb	%l4,[%i1+4]
-	srl	%o1,8,%l6
-	stb	%l5,[%i1+5]
-	stb	%l6,[%i1+6]
-	srl	%o2,24,%l0
-	stb	%o1,[%i1+7]
-
-	srl	%o2,16,%l1
-	stb	%l0,[%i1+8]
-	srl	%o2,8,%l2
-	stb	%l1,[%i1+9]
-	stb	%l2,[%i1+10]
-	srl	%o3,24,%l4
-	stb	%o2,[%i1+11]
-
-	srl	%o3,16,%l5
-	stb	%l4,[%i1+12]
-	srl	%o3,8,%l6
-	stb	%l5,[%i1+13]
-	stb	%l6,[%i1+14]
-	stb	%o3,[%i1+15]
-
-	ret
-	restore
-.type	AES_encrypt,#function
-.size	AES_encrypt,(.-AES_encrypt)
-
-___
-
-$code.=<<___;
-.align	256
-AES_Td:
-___
-&_data_word(
-	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
-	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
-	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
-	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
-	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
-	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
-	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
-	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
-	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
-	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
-	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
-	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
-	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
-	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
-	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
-	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
-	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
-	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
-	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
-	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
-	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
-	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
-	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
-	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
-	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
-	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
-	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
-	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
-	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
-	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
-	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
-	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
-	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
-	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
-	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
-	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
-	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
-	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
-	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
-	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
-	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
-	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
-	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
-	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
-	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
-	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
-	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
-	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
-	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
-	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
-	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
-	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
-	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
-	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
-	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
-	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
-	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
-	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
-	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
-	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
-	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
-	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
-	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
-	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
-	.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-	.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-	.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-	.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-	.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-	.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-	.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-	.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-	.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-	.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-	.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-	.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-	.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-	.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-	.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-	.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-	.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-	.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-	.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-	.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-	.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-	.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-	.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-	.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-	.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-	.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-	.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-	.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-	.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-	.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-	.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-	.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.type	AES_Td,#object
-.size	AES_Td,(.-AES_Td)
-
-.align	64
-.skip	16
-_sparcv9_AES_decrypt:
-	save	%sp,-$frame-$locals,%sp
-	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
-	ld	[$key+240],$rounds
-	ld	[$key+0],$t0
-	ld	[$key+4],$t1			!
-	ld	[$key+8],$t2
-	ld	[$key+12],$t3
-	srl	$rounds,1,$rounds
-	xor	$t0,$s0,$s0
-	ld	[$key+16],$t0
-	xor	$t1,$s1,$s1
-	ld	[$key+20],$t1
-	srl	$s0,21,$acc0			!
-	xor	$t2,$s2,$s2
-	ld	[$key+24],$t2
-	xor	$t3,$s3,$s3
-	and	$acc0,2040,$acc0
-	ld	[$key+28],$t3
-	srl	$s3,13,$acc1
-	nop
-.Ldec_loop:
-	srl	$s2,5,$acc2			!
-	and	$acc1,2040,$acc1
-	ldx	[$tbl+$acc0],$acc0
-	sll	$s1,3,$acc3
-	and	$acc2,2040,$acc2
-	ldx	[$tbl+$acc1],$acc1
-	srl	$s1,21,$acc4
-	and	$acc3,2040,$acc3
-	ldx	[$tbl+$acc2],$acc2		!
-	srl	$s0,13,$acc5
-	and	$acc4,2040,$acc4
-	ldx	[$tbl+$acc3],$acc3
-	srl	$s3,5,$acc6
-	and	$acc5,2040,$acc5
-	ldx	[$tbl+$acc4],$acc4
-	fmovs	%f0,%f0
-	sll	$s2,3,$acc7			!
-	and	$acc6,2040,$acc6
-	ldx	[$tbl+$acc5],$acc5
-	srl	$s2,21,$acc8
-	and	$acc7,2040,$acc7
-	ldx	[$tbl+$acc6],$acc6
-	srl	$s1,13,$acc9
-	and	$acc8,2040,$acc8
-	ldx	[$tbl+$acc7],$acc7		!
-	srl	$s0,5,$acc10
-	and	$acc9,2040,$acc9
-	ldx	[$tbl+$acc8],$acc8
-	sll	$s3,3,$acc11
-	and	$acc10,2040,$acc10
-	ldx	[$tbl+$acc9],$acc9
-	fmovs	%f0,%f0
-	srl	$s3,21,$acc12			!
-	and	$acc11,2040,$acc11
-	ldx	[$tbl+$acc10],$acc10
-	srl	$s2,13,$acc13
-	and	$acc12,2040,$acc12
-	ldx	[$tbl+$acc11],$acc11
-	srl	$s1,5,$acc14
-	and	$acc13,2040,$acc13
-	ldx	[$tbl+$acc12],$acc12		!
-	sll	$s0,3,$acc15
-	and	$acc14,2040,$acc14
-	ldx	[$tbl+$acc13],$acc13
-	and	$acc15,2040,$acc15
-	add	$key,32,$key
-	ldx	[$tbl+$acc14],$acc14
-	fmovs	%f0,%f0
-	subcc	$rounds,1,$rounds		!
-	ldx	[$tbl+$acc15],$acc15
-	bz,a,pn	%icc,.Ldec_last
-	add	$tbl,2048,$rounds
-
-		srlx	$acc1,8,$acc1
-		xor	$acc0,$t0,$t0
-	ld	[$key+0],$s0
-	fmovs	%f0,%f0
-		srlx	$acc2,16,$acc2		!
-		xor	$acc1,$t0,$t0
-	ld	[$key+4],$s1
-		srlx	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ld	[$key+8],$s2
-		srlx	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ld	[$key+12],$s3			!
-		srlx	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-	fmovs	%f0,%f0
-		srlx	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		srlx	$acc9,8,$acc9
-		xor	$acc6,$t1,$t1
-		srlx	$acc10,16,$acc10	!
-		xor	$acc7,$t1,$t1
-		srlx	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		srlx	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		srlx	$acc14,16,$acc14
-		xor	$acc10,$t2,$t2
-		srlx	$acc15,24,$acc15	!
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	srl	$t0,21,$acc0
-		xor	$acc14,$t3,$t3
-		xor	$acc15,$t3,$t3
-	srl	$t3,13,$acc1
-
-	and	$acc0,2040,$acc0		!
-	srl	$t2,5,$acc2
-	and	$acc1,2040,$acc1
-	ldx	[$tbl+$acc0],$acc0
-	sll	$t1,3,$acc3
-	and	$acc2,2040,$acc2
-	ldx	[$tbl+$acc1],$acc1
-	fmovs	%f0,%f0
-	srl	$t1,21,$acc4			!
-	and	$acc3,2040,$acc3
-	ldx	[$tbl+$acc2],$acc2
-	srl	$t0,13,$acc5
-	and	$acc4,2040,$acc4
-	ldx	[$tbl+$acc3],$acc3
-	srl	$t3,5,$acc6
-	and	$acc5,2040,$acc5
-	ldx	[$tbl+$acc4],$acc4		!
-	sll	$t2,3,$acc7
-	and	$acc6,2040,$acc6
-	ldx	[$tbl+$acc5],$acc5
-	srl	$t2,21,$acc8
-	and	$acc7,2040,$acc7
-	ldx	[$tbl+$acc6],$acc6
-	fmovs	%f0,%f0
-	srl	$t1,13,$acc9			!
-	and	$acc8,2040,$acc8
-	ldx	[$tbl+$acc7],$acc7
-	srl	$t0,5,$acc10
-	and	$acc9,2040,$acc9
-	ldx	[$tbl+$acc8],$acc8
-	sll	$t3,3,$acc11
-	and	$acc10,2040,$acc10
-	ldx	[$tbl+$acc9],$acc9		!
-	srl	$t3,21,$acc12
-	and	$acc11,2040,$acc11
-	ldx	[$tbl+$acc10],$acc10
-	srl	$t2,13,$acc13
-	and	$acc12,2040,$acc12
-	ldx	[$tbl+$acc11],$acc11
-	fmovs	%f0,%f0
-	srl	$t1,5,$acc14			!
-	and	$acc13,2040,$acc13
-	ldx	[$tbl+$acc12],$acc12
-	sll	$t0,3,$acc15
-	and	$acc14,2040,$acc14
-	ldx	[$tbl+$acc13],$acc13
-		srlx	$acc1,8,$acc1
-	and	$acc15,2040,$acc15
-	ldx	[$tbl+$acc14],$acc14		!
-
-		srlx	$acc2,16,$acc2
-		xor	$acc0,$s0,$s0
-	ldx	[$tbl+$acc15],$acc15
-		srlx	$acc3,24,$acc3
-		xor	$acc1,$s0,$s0
-	ld	[$key+16],$t0
-	fmovs	%f0,%f0
-		srlx	$acc5,8,$acc5		!
-		xor	$acc2,$s0,$s0
-	ld	[$key+20],$t1
-		srlx	$acc6,16,$acc6
-		xor	$acc3,$s0,$s0
-	ld	[$key+24],$t2
-		srlx	$acc7,24,$acc7
-		xor	$acc4,$s1,$s1
-	ld	[$key+28],$t3			!
-		srlx	$acc9,8,$acc9
-		xor	$acc5,$s1,$s1
-	ldx	[$tbl+2048+0],%g0		! prefetch td4
-		srlx	$acc10,16,$acc10
-		xor	$acc6,$s1,$s1
-	ldx	[$tbl+2048+32],%g0		! prefetch td4
-		srlx	$acc11,24,$acc11
-		xor	$acc7,$s1,$s1
-	ldx	[$tbl+2048+64],%g0		! prefetch td4
-		srlx	$acc13,8,$acc13
-		xor	$acc8,$s2,$s2
-	ldx	[$tbl+2048+96],%g0		! prefetch td4
-		srlx	$acc14,16,$acc14	!
-		xor	$acc9,$s2,$s2
-	ldx	[$tbl+2048+128],%g0		! prefetch td4
-		srlx	$acc15,24,$acc15
-		xor	$acc10,$s2,$s2
-	ldx	[$tbl+2048+160],%g0		! prefetch td4
-	srl	$s0,21,$acc0
-		xor	$acc11,$s2,$s2
-	ldx	[$tbl+2048+192],%g0		! prefetch td4
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-	ldx	[$tbl+2048+224],%g0		! prefetch td4
-	and	$acc0,2040,$acc0		!
-		xor	$acc14,$s3,$s3
-		xor	$acc15,$s3,$s3
-	ba	.Ldec_loop
-	srl	$s3,13,$acc1
-
-.align	32
-.Ldec_last:
-		srlx	$acc1,8,$acc1		!
-		xor	$acc0,$t0,$t0
-	ld	[$key+0],$s0
-		srlx	$acc2,16,$acc2
-		xor	$acc1,$t0,$t0
-	ld	[$key+4],$s1
-		srlx	$acc3,24,$acc3
-		xor	$acc2,$t0,$t0
-	ld	[$key+8],$s2			!
-		srlx	$acc5,8,$acc5
-		xor	$acc3,$t0,$t0
-	ld	[$key+12],$s3
-		srlx	$acc6,16,$acc6
-		xor	$acc4,$t1,$t1
-		srlx	$acc7,24,$acc7
-		xor	$acc5,$t1,$t1
-		srlx	$acc9,8,$acc9		!
-		xor	$acc6,$t1,$t1
-		srlx	$acc10,16,$acc10
-		xor	$acc7,$t1,$t1
-		srlx	$acc11,24,$acc11
-		xor	$acc8,$t2,$t2
-		srlx	$acc13,8,$acc13
-		xor	$acc9,$t2,$t2
-		srlx	$acc14,16,$acc14	!
-		xor	$acc10,$t2,$t2
-		srlx	$acc15,24,$acc15
-		xor	$acc11,$t2,$t2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$t3,$t3
-	srl	$t0,24,$acc0
-		xor	$acc14,$t3,$t3
-		xor	$acc15,$t3,$t3		!
-	srl	$t3,16,$acc1
-
-	srl	$t2,8,$acc2
-	and	$acc1,255,$acc1
-	ldub	[$rounds+$acc0],$acc0
-	srl	$t1,24,$acc4
-	and	$acc2,255,$acc2
-	ldub	[$rounds+$acc1],$acc1
-	srl	$t0,16,$acc5			!
-	and	$t1,255,$acc3
-	ldub	[$rounds+$acc2],$acc2
-	ldub	[$rounds+$acc3],$acc3
-	srl	$t3,8,$acc6
-	and	$acc5,255,$acc5
-	ldub	[$rounds+$acc4],$acc4
-	fmovs	%f0,%f0
-	srl	$t2,24,$acc8			!
-	and	$acc6,255,$acc6
-	ldub	[$rounds+$acc5],$acc5
-	srl	$t1,16,$acc9
-	and	$t2,255,$acc7
-	ldub	[$rounds+$acc6],$acc6
-	ldub	[$rounds+$acc7],$acc7
-	fmovs	%f0,%f0
-	srl	$t0,8,$acc10			!
-	and	$acc9,255,$acc9
-	ldub	[$rounds+$acc8],$acc8
-	srl	$t3,24,$acc12
-	and	$acc10,255,$acc10
-	ldub	[$rounds+$acc9],$acc9
-	srl	$t2,16,$acc13
-	and	$t3,255,$acc11
-	ldub	[$rounds+$acc10],$acc10		!
-	srl	$t1,8,$acc14
-	and	$acc13,255,$acc13
-	ldub	[$rounds+$acc11],$acc11
-	ldub	[$rounds+$acc12],$acc12
-	and	$acc14,255,$acc14
-	ldub	[$rounds+$acc13],$acc13
-	and	$t0,255,$acc15
-	ldub	[$rounds+$acc14],$acc14		!
-
-		sll	$acc0,24,$acc0
-		xor	$acc3,$s0,$s0
-	ldub	[$rounds+$acc15],$acc15
-		sll	$acc1,16,$acc1
-		xor	$acc0,$s0,$s0
-	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
-	fmovs	%f0,%f0
-		sll	$acc2,8,$acc2		!
-		xor	$acc1,$s0,$s0
-		sll	$acc4,24,$acc4
-		xor	$acc2,$s0,$s0
-		sll	$acc5,16,$acc5
-		xor	$acc7,$s1,$s1
-		sll	$acc6,8,$acc6
-		xor	$acc4,$s1,$s1
-		sll	$acc8,24,$acc8		!
-		xor	$acc5,$s1,$s1
-		sll	$acc9,16,$acc9
-		xor	$acc11,$s2,$s2
-		sll	$acc10,8,$acc10
-		xor	$acc6,$s1,$s1
-		sll	$acc12,24,$acc12
-		xor	$acc8,$s2,$s2
-		sll	$acc13,16,$acc13	!
-		xor	$acc9,$s2,$s2
-		sll	$acc14,8,$acc14
-		xor	$acc10,$s2,$s2
-		xor	$acc12,$acc14,$acc14
-		xor	$acc13,$s3,$s3
-		xor	$acc14,$s3,$s3
-		xor	$acc15,$s3,$s3
-
-	ret
-	restore
-.type	_sparcv9_AES_decrypt,#function
-.size	_sparcv9_AES_decrypt,(.-_sparcv9_AES_decrypt)
-
-.align	32
-.globl	AES_decrypt
-AES_decrypt:
-	or	%o0,%o1,%g1
-	andcc	%g1,3,%g0
-	bnz,pn	%xcc,.Lunaligned_dec
-	save	%sp,-$frame,%sp
-
-	ld	[%i0+0],%o0
-	ld	[%i0+4],%o1
-	ld	[%i0+8],%o2
-	ld	[%i0+12],%o3
-
-1:	call	.+8
-	add	%o7,AES_Td-1b,%o4
-	call	_sparcv9_AES_decrypt
-	mov	%i2,%o5
-
-	st	%o0,[%i1+0]
-	st	%o1,[%i1+4]
-	st	%o2,[%i1+8]
-	st	%o3,[%i1+12]
-
-	ret
-	restore
-
-.align	32
-.Lunaligned_dec:
-	ldub	[%i0+0],%l0
-	ldub	[%i0+1],%l1
-	ldub	[%i0+2],%l2
-
-	sll	%l0,24,%l0
-	ldub	[%i0+3],%l3
-	sll	%l1,16,%l1
-	ldub	[%i0+4],%l4
-	sll	%l2,8,%l2
-	or	%l1,%l0,%l0
-	ldub	[%i0+5],%l5
-	sll	%l4,24,%l4
-	or	%l3,%l2,%l2
-	ldub	[%i0+6],%l6
-	sll	%l5,16,%l5
-	or	%l0,%l2,%o0
-	ldub	[%i0+7],%l7
-
-	sll	%l6,8,%l6
-	or	%l5,%l4,%l4
-	ldub	[%i0+8],%l0
-	or	%l7,%l6,%l6
-	ldub	[%i0+9],%l1
-	or	%l4,%l6,%o1
-	ldub	[%i0+10],%l2
-
-	sll	%l0,24,%l0
-	ldub	[%i0+11],%l3
-	sll	%l1,16,%l1
-	ldub	[%i0+12],%l4
-	sll	%l2,8,%l2
-	or	%l1,%l0,%l0
-	ldub	[%i0+13],%l5
-	sll	%l4,24,%l4
-	or	%l3,%l2,%l2
-	ldub	[%i0+14],%l6
-	sll	%l5,16,%l5
-	or	%l0,%l2,%o2
-	ldub	[%i0+15],%l7
-
-	sll	%l6,8,%l6
-	or	%l5,%l4,%l4
-	or	%l7,%l6,%l6
-	or	%l4,%l6,%o3
-
-1:	call	.+8
-	add	%o7,AES_Td-1b,%o4
-	call	_sparcv9_AES_decrypt
-	mov	%i2,%o5
-
-	srl	%o0,24,%l0
-	srl	%o0,16,%l1
-	stb	%l0,[%i1+0]
-	srl	%o0,8,%l2
-	stb	%l1,[%i1+1]
-	stb	%l2,[%i1+2]
-	srl	%o1,24,%l4
-	stb	%o0,[%i1+3]
-
-	srl	%o1,16,%l5
-	stb	%l4,[%i1+4]
-	srl	%o1,8,%l6
-	stb	%l5,[%i1+5]
-	stb	%l6,[%i1+6]
-	srl	%o2,24,%l0
-	stb	%o1,[%i1+7]
-
-	srl	%o2,16,%l1
-	stb	%l0,[%i1+8]
-	srl	%o2,8,%l2
-	stb	%l1,[%i1+9]
-	stb	%l2,[%i1+10]
-	srl	%o3,24,%l4
-	stb	%o2,[%i1+11]
-
-	srl	%o3,16,%l5
-	stb	%l4,[%i1+12]
-	srl	%o3,8,%l6
-	stb	%l5,[%i1+13]
-	stb	%l6,[%i1+14]
-	stb	%o3,[%i1+15]
-
-	ret
-	restore
-.type	AES_decrypt,#function
-.size	AES_decrypt,(.-AES_decrypt)
-___
-
-# fmovs instructions substituting for FP nops were originally added
-# to meet specific instruction alignment requirements to maximize ILP.
-# As UltraSPARC T1, a.k.a. Niagara, has shared FPU, FP nops can have
-# undesired effect, so just omit them and sacrifice some portion of
-# percent in performance...
-$code =~ s/fmovs.*$//gm;
-
-print $code;
-close STDOUT;	# ensure flush

+ 0 - 2819
drivers/builtin_openssl2/crypto/aes/asm/aes-x86_64.pl

@@ -1,2819 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# Version 2.1.
-#
-# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
-# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
-# [you'll notice a lot of resemblance], such as compressed S-boxes
-# in little-endian byte order, prefetch of these tables in CBC mode,
-# as well as avoiding L1 cache aliasing between stack frame and key
-# schedule and already mentioned tables, compressed Td4...
-#
-# Performance in number of cycles per processed byte for 128-bit key:
-#
-#		ECB encrypt	ECB decrypt	CBC large chunk
-# AMD64		33		41		13.0
-# EM64T		38		59		18.6(*)
-# Core 2	30		43		14.5(*)
-#
-# (*) with hyper-threading off
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$verticalspin=1;	# unlike 32-bit version $verticalspin performs
-			# ~15% better on both AMD and Intel cores
-$speed_limit=512;	# see aes-586.pl for details
-
-$code=".text\n";
-
-$s0="%eax";
-$s1="%ebx";
-$s2="%ecx";
-$s3="%edx";
-$acc0="%esi";	$mask80="%rsi";
-$acc1="%edi";	$maskfe="%rdi";
-$acc2="%ebp";	$mask1b="%rbp";
-$inp="%r8";
-$out="%r9";
-$t0="%r10d";
-$t1="%r11d";
-$t2="%r12d";
-$rnds="%r13d";
-$sbox="%r14";
-$key="%r15";
-
-sub hi() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1h/;	$r; }
-sub lo() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1l/;
-			$r =~ s/%[er]([sd]i)/%\1l/;
-			$r =~ s/%(r[0-9]+)[d]?/%\1b/;	$r; }
-sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
-			$r =~ s/%r([0-9]+)/%r\1d/;	$r; }
-sub _data_word()
-{ my $i;
-    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
-}
-sub data_word()
-{ my $i;
-  my $last=pop(@_);
-    $code.=".long\t";
-    while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
-    $code.=sprintf"0x%08x\n",$last;
-}
-
-sub data_byte()
-{ my $i;
-  my $last=pop(@_);
-    $code.=".byte\t";
-    while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
-    $code.=sprintf"0x%02x\n",$last&0xff;
-}
-
-sub encvert()
-{ my $t3="%r8d";	# zaps $inp!
-
-$code.=<<___;
-	# favor 3-way issue Opteron pipeline...
-	movzb	`&lo("$s0")`,$acc0
-	movzb	`&lo("$s1")`,$acc1
-	movzb	`&lo("$s2")`,$acc2
-	mov	0($sbox,$acc0,8),$t0
-	mov	0($sbox,$acc1,8),$t1
-	mov	0($sbox,$acc2,8),$t2
-
-	movzb	`&hi("$s1")`,$acc0
-	movzb	`&hi("$s2")`,$acc1
-	movzb	`&lo("$s3")`,$acc2
-	xor	3($sbox,$acc0,8),$t0
-	xor	3($sbox,$acc1,8),$t1
-	mov	0($sbox,$acc2,8),$t3
-
-	movzb	`&hi("$s3")`,$acc0
-	shr	\$16,$s2
-	movzb	`&hi("$s0")`,$acc2
-	xor	3($sbox,$acc0,8),$t2
-	shr	\$16,$s3
-	xor	3($sbox,$acc2,8),$t3
-
-	shr	\$16,$s1
-	lea	16($key),$key
-	shr	\$16,$s0
-
-	movzb	`&lo("$s2")`,$acc0
-	movzb	`&lo("$s3")`,$acc1
-	movzb	`&lo("$s0")`,$acc2
-	xor	2($sbox,$acc0,8),$t0
-	xor	2($sbox,$acc1,8),$t1
-	xor	2($sbox,$acc2,8),$t2
-
-	movzb	`&hi("$s3")`,$acc0
-	movzb	`&hi("$s0")`,$acc1
-	movzb	`&lo("$s1")`,$acc2
-	xor	1($sbox,$acc0,8),$t0
-	xor	1($sbox,$acc1,8),$t1
-	xor	2($sbox,$acc2,8),$t3
-
-	mov	12($key),$s3
-	movzb	`&hi("$s1")`,$acc1
-	movzb	`&hi("$s2")`,$acc2
-	mov	0($key),$s0
-	xor	1($sbox,$acc1,8),$t2
-	xor	1($sbox,$acc2,8),$t3
-
-	mov	4($key),$s1
-	mov	8($key),$s2
-	xor	$t0,$s0
-	xor	$t1,$s1
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-sub enclastvert()
-{ my $t3="%r8d";	# zaps $inp!
-
-$code.=<<___;
-	movzb	`&lo("$s0")`,$acc0
-	movzb	`&lo("$s1")`,$acc1
-	movzb	`&lo("$s2")`,$acc2
-	movzb	2($sbox,$acc0,8),$t0
-	movzb	2($sbox,$acc1,8),$t1
-	movzb	2($sbox,$acc2,8),$t2
-
-	movzb	`&lo("$s3")`,$acc0
-	movzb	`&hi("$s1")`,$acc1
-	movzb	`&hi("$s2")`,$acc2
-	movzb	2($sbox,$acc0,8),$t3
-	mov	0($sbox,$acc1,8),$acc1	#$t0
-	mov	0($sbox,$acc2,8),$acc2	#$t1
-
-	and	\$0x0000ff00,$acc1
-	and	\$0x0000ff00,$acc2
-
-	xor	$acc1,$t0
-	xor	$acc2,$t1
-	shr	\$16,$s2
-
-	movzb	`&hi("$s3")`,$acc0
-	movzb	`&hi("$s0")`,$acc1
-	shr	\$16,$s3
-	mov	0($sbox,$acc0,8),$acc0	#$t2
-	mov	0($sbox,$acc1,8),$acc1	#$t3
-
-	and	\$0x0000ff00,$acc0
-	and	\$0x0000ff00,$acc1
-	shr	\$16,$s1
-	xor	$acc0,$t2
-	xor	$acc1,$t3
-	shr	\$16,$s0
-
-	movzb	`&lo("$s2")`,$acc0
-	movzb	`&lo("$s3")`,$acc1
-	movzb	`&lo("$s0")`,$acc2
-	mov	0($sbox,$acc0,8),$acc0	#$t0
-	mov	0($sbox,$acc1,8),$acc1	#$t1
-	mov	0($sbox,$acc2,8),$acc2	#$t2
-
-	and	\$0x00ff0000,$acc0
-	and	\$0x00ff0000,$acc1
-	and	\$0x00ff0000,$acc2
-
-	xor	$acc0,$t0
-	xor	$acc1,$t1
-	xor	$acc2,$t2
-
-	movzb	`&lo("$s1")`,$acc0
-	movzb	`&hi("$s3")`,$acc1
-	movzb	`&hi("$s0")`,$acc2
-	mov	0($sbox,$acc0,8),$acc0	#$t3
-	mov	2($sbox,$acc1,8),$acc1	#$t0
-	mov	2($sbox,$acc2,8),$acc2	#$t1
-
-	and	\$0x00ff0000,$acc0
-	and	\$0xff000000,$acc1
-	and	\$0xff000000,$acc2
-
-	xor	$acc0,$t3
-	xor	$acc1,$t0
-	xor	$acc2,$t1
-
-	movzb	`&hi("$s1")`,$acc0
-	movzb	`&hi("$s2")`,$acc1
-	mov	16+12($key),$s3
-	mov	2($sbox,$acc0,8),$acc0	#$t2
-	mov	2($sbox,$acc1,8),$acc1	#$t3
-	mov	16+0($key),$s0
-
-	and	\$0xff000000,$acc0
-	and	\$0xff000000,$acc1
-
-	xor	$acc0,$t2
-	xor	$acc1,$t3
-
-	mov	16+4($key),$s1
-	mov	16+8($key),$s2
-	xor	$t0,$s0
-	xor	$t1,$s1
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-sub encstep()
-{ my ($i,@s) = @_;
-  my $tmp0=$acc0;
-  my $tmp1=$acc1;
-  my $tmp2=$acc2;
-  my $out=($t0,$t1,$t2,$s[0])[$i];
-
-	if ($i==3) {
-		$tmp0=$s[1];
-		$tmp1=$s[2];
-		$tmp2=$s[3];
-	}
-	$code.="	movzb	".&lo($s[0]).",$out\n";
-	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
-	$code.="	lea	16($key),$key\n"	if ($i==0);
-
-	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
-	$code.="	mov	0($sbox,$out,8),$out\n";
-
-	$code.="	shr	\$16,$tmp1\n";
-	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
-	$code.="	xor	3($sbox,$tmp0,8),$out\n";
-
-	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
-	$code.="	shr	\$24,$tmp2\n";
-	$code.="	xor	4*$i($key),$out\n";
-
-	$code.="	xor	2($sbox,$tmp1,8),$out\n";
-	$code.="	xor	1($sbox,$tmp2,8),$out\n";
-
-	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
-	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
-	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
-	$code.="\n";
-}
-
-sub enclast()
-{ my ($i,@s)=@_;
-  my $tmp0=$acc0;
-  my $tmp1=$acc1;
-  my $tmp2=$acc2;
-  my $out=($t0,$t1,$t2,$s[0])[$i];
-
-	if ($i==3) {
-		$tmp0=$s[1];
-		$tmp1=$s[2];
-		$tmp2=$s[3];
-	}
-	$code.="	movzb	".&lo($s[0]).",$out\n";
-	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
-
-	$code.="	mov	2($sbox,$out,8),$out\n";
-	$code.="	shr	\$16,$tmp1\n";
-	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
-
-	$code.="	and	\$0x000000ff,$out\n";
-	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
-	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
-	$code.="	shr	\$24,$tmp2\n";
-
-	$code.="	mov	0($sbox,$tmp0,8),$tmp0\n";
-	$code.="	mov	0($sbox,$tmp1,8),$tmp1\n";
-	$code.="	mov	2($sbox,$tmp2,8),$tmp2\n";
-
-	$code.="	and	\$0x0000ff00,$tmp0\n";
-	$code.="	and	\$0x00ff0000,$tmp1\n";
-	$code.="	and	\$0xff000000,$tmp2\n";
-
-	$code.="	xor	$tmp0,$out\n";
-	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
-	$code.="	xor	$tmp1,$out\n";
-	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
-	$code.="	xor	$tmp2,$out\n";
-	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
-	$code.="\n";
-}
-
-$code.=<<___;
-.type	_x86_64_AES_encrypt,\@abi-omnipotent
-.align	16
-_x86_64_AES_encrypt:
-	xor	0($key),$s0			# xor with key
-	xor	4($key),$s1
-	xor	8($key),$s2
-	xor	12($key),$s3
-
-	mov	240($key),$rnds			# load key->rounds
-	sub	\$1,$rnds
-	jmp	.Lenc_loop
-.align	16
-.Lenc_loop:
-___
-	if ($verticalspin) { &encvert(); }
-	else {	&encstep(0,$s0,$s1,$s2,$s3);
-		&encstep(1,$s1,$s2,$s3,$s0);
-		&encstep(2,$s2,$s3,$s0,$s1);
-		&encstep(3,$s3,$s0,$s1,$s2);
-	}
-$code.=<<___;
-	sub	\$1,$rnds
-	jnz	.Lenc_loop
-___
-	if ($verticalspin) { &enclastvert(); }
-	else {	&enclast(0,$s0,$s1,$s2,$s3);
-		&enclast(1,$s1,$s2,$s3,$s0);
-		&enclast(2,$s2,$s3,$s0,$s1);
-		&enclast(3,$s3,$s0,$s1,$s2);
-		$code.=<<___;
-		xor	16+0($key),$s0		# xor with key
-		xor	16+4($key),$s1
-		xor	16+8($key),$s2
-		xor	16+12($key),$s3
-___
-	}
-$code.=<<___;
-	.byte	0xf3,0xc3			# rep ret
-.size	_x86_64_AES_encrypt,.-_x86_64_AES_encrypt
-___
-
-# it's possible to implement this by shifting tN by 8, filling least
-# significant byte with byte load and finally bswap-ing at the end,
-# but such partial register load kills Core 2...
-sub enccompactvert()
-{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
-	movzb	`&lo("$s0")`,$t0
-	movzb	`&lo("$s1")`,$t1
-	movzb	`&lo("$s2")`,$t2
-	movzb	($sbox,$t0,1),$t0
-	movzb	($sbox,$t1,1),$t1
-	movzb	($sbox,$t2,1),$t2
-
-	movzb	`&lo("$s3")`,$t3
-	movzb	`&hi("$s1")`,$acc0
-	movzb	`&hi("$s2")`,$acc1
-	movzb	($sbox,$t3,1),$t3
-	movzb	($sbox,$acc0,1),$t4	#$t0
-	movzb	($sbox,$acc1,1),$t5	#$t1
-
-	movzb	`&hi("$s3")`,$acc2
-	movzb	`&hi("$s0")`,$acc0
-	shr	\$16,$s2
-	movzb	($sbox,$acc2,1),$acc2	#$t2
-	movzb	($sbox,$acc0,1),$acc0	#$t3
-	shr	\$16,$s3
-
-	movzb	`&lo("$s2")`,$acc1
-	shl	\$8,$t4
-	shl	\$8,$t5
-	movzb	($sbox,$acc1,1),$acc1	#$t0
-	xor	$t4,$t0
-	xor	$t5,$t1
-
-	movzb	`&lo("$s3")`,$t4
-	shr	\$16,$s0
-	shr	\$16,$s1
-	movzb	`&lo("$s0")`,$t5
-	shl	\$8,$acc2
-	shl	\$8,$acc0
-	movzb	($sbox,$t4,1),$t4	#$t1
-	movzb	($sbox,$t5,1),$t5	#$t2
-	xor	$acc2,$t2
-	xor	$acc0,$t3
-
-	movzb	`&lo("$s1")`,$acc2
-	movzb	`&hi("$s3")`,$acc0
-	shl	\$16,$acc1
-	movzb	($sbox,$acc2,1),$acc2	#$t3
-	movzb	($sbox,$acc0,1),$acc0	#$t0
-	xor	$acc1,$t0
-
-	movzb	`&hi("$s0")`,$acc1
-	shr	\$8,$s2
-	shr	\$8,$s1
-	movzb	($sbox,$acc1,1),$acc1	#$t1
-	movzb	($sbox,$s2,1),$s3	#$t3
-	movzb	($sbox,$s1,1),$s2	#$t2
-	shl	\$16,$t4
-	shl	\$16,$t5
-	shl	\$16,$acc2
-	xor	$t4,$t1
-	xor	$t5,$t2
-	xor	$acc2,$t3
-
-	shl	\$24,$acc0
-	shl	\$24,$acc1
-	shl	\$24,$s3
-	xor	$acc0,$t0
-	shl	\$24,$s2
-	xor	$acc1,$t1
-	mov	$t0,$s0
-	mov	$t1,$s1
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-sub enctransform_ref()
-{ my $sn = shift;
-  my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
-	mov	$sn,$acc
-	and	\$0x80808080,$acc
-	mov	$acc,$tmp
-	shr	\$7,$tmp
-	lea	($sn,$sn),$r2
-	sub	$tmp,$acc
-	and	\$0xfefefefe,$r2
-	and	\$0x1b1b1b1b,$acc
-	mov	$sn,$tmp
-	xor	$acc,$r2
-
-	xor	$r2,$sn
-	rol	\$24,$sn
-	xor	$r2,$sn
-	ror	\$16,$tmp
-	xor	$tmp,$sn
-	ror	\$8,$tmp
-	xor	$tmp,$sn
-___
-}
-
-# unlike decrypt case it does not pay off to parallelize enctransform
-sub enctransform()
-{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
-
-$code.=<<___;
-	mov	$s0,$acc0
-	mov	$s1,$acc1
-	and	\$0x80808080,$acc0
-	and	\$0x80808080,$acc1
-	mov	$acc0,$t0
-	mov	$acc1,$t1
-	shr	\$7,$t0
-	lea	($s0,$s0),$r20
-	shr	\$7,$t1
-	lea	($s1,$s1),$r21
-	sub	$t0,$acc0
-	sub	$t1,$acc1
-	and	\$0xfefefefe,$r20
-	and	\$0xfefefefe,$r21
-	and	\$0x1b1b1b1b,$acc0
-	and	\$0x1b1b1b1b,$acc1
-	mov	$s0,$t0
-	mov	$s1,$t1
-	xor	$acc0,$r20
-	xor	$acc1,$r21
-
-	xor	$r20,$s0
-	xor	$r21,$s1
-	 mov	$s2,$acc0
-	 mov	$s3,$acc1
-	rol	\$24,$s0
-	rol	\$24,$s1
-	 and	\$0x80808080,$acc0
-	 and	\$0x80808080,$acc1
-	xor	$r20,$s0
-	xor	$r21,$s1
-	 mov	$acc0,$t2
-	 mov	$acc1,$t3
-	ror	\$16,$t0
-	ror	\$16,$t1
-	 shr	\$7,$t2
-	 lea	($s2,$s2),$r20
-	xor	$t0,$s0
-	xor	$t1,$s1
-	 shr	\$7,$t3
-	 lea	($s3,$s3),$r21
-	ror	\$8,$t0
-	ror	\$8,$t1
-	 sub	$t2,$acc0
-	 sub	$t3,$acc1
-	xor	$t0,$s0
-	xor	$t1,$s1
-
-	and	\$0xfefefefe,$r20
-	and	\$0xfefefefe,$r21
-	and	\$0x1b1b1b1b,$acc0
-	and	\$0x1b1b1b1b,$acc1
-	mov	$s2,$t2
-	mov	$s3,$t3
-	xor	$acc0,$r20
-	xor	$acc1,$r21
-
-	xor	$r20,$s2
-	xor	$r21,$s3
-	rol	\$24,$s2
-	rol	\$24,$s3
-	xor	$r20,$s2
-	xor	$r21,$s3
-	mov	0($sbox),$acc0			# prefetch Te4
-	ror	\$16,$t2
-	ror	\$16,$t3
-	mov	64($sbox),$acc1
-	xor	$t2,$s2
-	xor	$t3,$s3
-	mov	128($sbox),$r20
-	ror	\$8,$t2
-	ror	\$8,$t3
-	mov	192($sbox),$r21
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-$code.=<<___;
-.type	_x86_64_AES_encrypt_compact,\@abi-omnipotent
-.align	16
-_x86_64_AES_encrypt_compact:
-	lea	128($sbox),$inp			# size optimization
-	mov	0-128($inp),$acc1		# prefetch Te4
-	mov	32-128($inp),$acc2
-	mov	64-128($inp),$t0
-	mov	96-128($inp),$t1
-	mov	128-128($inp),$acc1
-	mov	160-128($inp),$acc2
-	mov	192-128($inp),$t0
-	mov	224-128($inp),$t1
-	jmp	.Lenc_loop_compact
-.align	16
-.Lenc_loop_compact:
-		xor	0($key),$s0		# xor with key
-		xor	4($key),$s1
-		xor	8($key),$s2
-		xor	12($key),$s3
-		lea	16($key),$key
-___
-		&enccompactvert();
-$code.=<<___;
-		cmp	16(%rsp),$key
-		je	.Lenc_compact_done
-___
-		&enctransform();
-$code.=<<___;
-	jmp	.Lenc_loop_compact
-.align	16
-.Lenc_compact_done:
-	xor	0($key),$s0
-	xor	4($key),$s1
-	xor	8($key),$s2
-	xor	12($key),$s3
-	.byte	0xf3,0xc3			# rep ret
-.size	_x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
-___
-
-# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
-$code.=<<___;
-.globl	AES_encrypt
-.type	AES_encrypt,\@function,3
-.align	16
-.globl	asm_AES_encrypt
-.hidden	asm_AES_encrypt
-asm_AES_encrypt:
-AES_encrypt:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-	# allocate frame "above" key schedule
-	mov	%rsp,%r10
-	lea	-63(%rdx),%rcx	# %rdx is key argument
-	and	\$-64,%rsp
-	sub	%rsp,%rcx
-	neg	%rcx
-	and	\$0x3c0,%rcx
-	sub	%rcx,%rsp
-	sub	\$32,%rsp
-
-	mov	%rsi,16(%rsp)	# save out
-	mov	%r10,24(%rsp)	# save real stack pointer
-.Lenc_prologue:
-
-	mov	%rdx,$key
-	mov	240($key),$rnds	# load rounds
-
-	mov	0(%rdi),$s0	# load input vector
-	mov	4(%rdi),$s1
-	mov	8(%rdi),$s2
-	mov	12(%rdi),$s3
-
-	shl	\$4,$rnds
-	lea	($key,$rnds),%rbp
-	mov	$key,(%rsp)	# key schedule
-	mov	%rbp,8(%rsp)	# end of key schedule
-
-	# pick Te4 copy which can't "overlap" with stack frame or key schedule
-	lea	.LAES_Te+2048(%rip),$sbox
-	lea	768(%rsp),%rbp
-	sub	$sbox,%rbp
-	and	\$0x300,%rbp
-	lea	($sbox,%rbp),$sbox
-
-	call	_x86_64_AES_encrypt_compact
-
-	mov	16(%rsp),$out	# restore out
-	mov	24(%rsp),%rsi	# restore saved stack pointer
-	mov	$s0,0($out)	# write output vector
-	mov	$s1,4($out)
-	mov	$s2,8($out)
-	mov	$s3,12($out)
-
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lenc_epilogue:
-	ret
-.size	AES_encrypt,.-AES_encrypt
-___
-
-#------------------------------------------------------------------#
-
-sub decvert()
-{ my $t3="%r8d";	# zaps $inp!
-
-$code.=<<___;
-	# favor 3-way issue Opteron pipeline...
-	movzb	`&lo("$s0")`,$acc0
-	movzb	`&lo("$s1")`,$acc1
-	movzb	`&lo("$s2")`,$acc2
-	mov	0($sbox,$acc0,8),$t0
-	mov	0($sbox,$acc1,8),$t1
-	mov	0($sbox,$acc2,8),$t2
-
-	movzb	`&hi("$s3")`,$acc0
-	movzb	`&hi("$s0")`,$acc1
-	movzb	`&lo("$s3")`,$acc2
-	xor	3($sbox,$acc0,8),$t0
-	xor	3($sbox,$acc1,8),$t1
-	mov	0($sbox,$acc2,8),$t3
-
-	movzb	`&hi("$s1")`,$acc0
-	shr	\$16,$s0
-	movzb	`&hi("$s2")`,$acc2
-	xor	3($sbox,$acc0,8),$t2
-	shr	\$16,$s3
-	xor	3($sbox,$acc2,8),$t3
-
-	shr	\$16,$s1
-	lea	16($key),$key
-	shr	\$16,$s2
-
-	movzb	`&lo("$s2")`,$acc0
-	movzb	`&lo("$s3")`,$acc1
-	movzb	`&lo("$s0")`,$acc2
-	xor	2($sbox,$acc0,8),$t0
-	xor	2($sbox,$acc1,8),$t1
-	xor	2($sbox,$acc2,8),$t2
-
-	movzb	`&hi("$s1")`,$acc0
-	movzb	`&hi("$s2")`,$acc1
-	movzb	`&lo("$s1")`,$acc2
-	xor	1($sbox,$acc0,8),$t0
-	xor	1($sbox,$acc1,8),$t1
-	xor	2($sbox,$acc2,8),$t3
-
-	movzb	`&hi("$s3")`,$acc0
-	mov	12($key),$s3
-	movzb	`&hi("$s0")`,$acc2
-	xor	1($sbox,$acc0,8),$t2
-	mov	0($key),$s0
-	xor	1($sbox,$acc2,8),$t3
-
-	xor	$t0,$s0
-	mov	4($key),$s1
-	mov	8($key),$s2
-	xor	$t2,$s2
-	xor	$t1,$s1
-	xor	$t3,$s3
-___
-}
-
-sub declastvert()
-{ my $t3="%r8d";	# zaps $inp!
-
-$code.=<<___;
-	lea	2048($sbox),$sbox	# size optimization
-	movzb	`&lo("$s0")`,$acc0
-	movzb	`&lo("$s1")`,$acc1
-	movzb	`&lo("$s2")`,$acc2
-	movzb	($sbox,$acc0,1),$t0
-	movzb	($sbox,$acc1,1),$t1
-	movzb	($sbox,$acc2,1),$t2
-
-	movzb	`&lo("$s3")`,$acc0
-	movzb	`&hi("$s3")`,$acc1
-	movzb	`&hi("$s0")`,$acc2
-	movzb	($sbox,$acc0,1),$t3
-	movzb	($sbox,$acc1,1),$acc1	#$t0
-	movzb	($sbox,$acc2,1),$acc2	#$t1
-
-	shl	\$8,$acc1
-	shl	\$8,$acc2
-
-	xor	$acc1,$t0
-	xor	$acc2,$t1
-	shr	\$16,$s3
-
-	movzb	`&hi("$s1")`,$acc0
-	movzb	`&hi("$s2")`,$acc1
-	shr	\$16,$s0
-	movzb	($sbox,$acc0,1),$acc0	#$t2
-	movzb	($sbox,$acc1,1),$acc1	#$t3
-
-	shl	\$8,$acc0
-	shl	\$8,$acc1
-	shr	\$16,$s1
-	xor	$acc0,$t2
-	xor	$acc1,$t3
-	shr	\$16,$s2
-
-	movzb	`&lo("$s2")`,$acc0
-	movzb	`&lo("$s3")`,$acc1
-	movzb	`&lo("$s0")`,$acc2
-	movzb	($sbox,$acc0,1),$acc0	#$t0
-	movzb	($sbox,$acc1,1),$acc1	#$t1
-	movzb	($sbox,$acc2,1),$acc2	#$t2
-
-	shl	\$16,$acc0
-	shl	\$16,$acc1
-	shl	\$16,$acc2
-
-	xor	$acc0,$t0
-	xor	$acc1,$t1
-	xor	$acc2,$t2
-
-	movzb	`&lo("$s1")`,$acc0
-	movzb	`&hi("$s1")`,$acc1
-	movzb	`&hi("$s2")`,$acc2
-	movzb	($sbox,$acc0,1),$acc0	#$t3
-	movzb	($sbox,$acc1,1),$acc1	#$t0
-	movzb	($sbox,$acc2,1),$acc2	#$t1
-
-	shl	\$16,$acc0
-	shl	\$24,$acc1
-	shl	\$24,$acc2
-
-	xor	$acc0,$t3
-	xor	$acc1,$t0
-	xor	$acc2,$t1
-
-	movzb	`&hi("$s3")`,$acc0
-	movzb	`&hi("$s0")`,$acc1
-	mov	16+12($key),$s3
-	movzb	($sbox,$acc0,1),$acc0	#$t2
-	movzb	($sbox,$acc1,1),$acc1	#$t3
-	mov	16+0($key),$s0
-
-	shl	\$24,$acc0
-	shl	\$24,$acc1
-
-	xor	$acc0,$t2
-	xor	$acc1,$t3
-
-	mov	16+4($key),$s1
-	mov	16+8($key),$s2
-	lea	-2048($sbox),$sbox
-	xor	$t0,$s0
-	xor	$t1,$s1
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-sub decstep()
-{ my ($i,@s) = @_;
-  my $tmp0=$acc0;
-  my $tmp1=$acc1;
-  my $tmp2=$acc2;
-  my $out=($t0,$t1,$t2,$s[0])[$i];
-
-	$code.="	mov	$s[0],$out\n"		if ($i!=3);
-			$tmp1=$s[2]			if ($i==3);
-	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
-	$code.="	and	\$0xFF,$out\n";
-
-	$code.="	mov	0($sbox,$out,8),$out\n";
-	$code.="	shr	\$16,$tmp1\n";
-			$tmp2=$s[3]			if ($i==3);
-	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
-
-			$tmp0=$s[1]			if ($i==3);
-	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
-	$code.="	and	\$0xFF,$tmp1\n";
-	$code.="	shr	\$24,$tmp2\n";
-
-	$code.="	xor	3($sbox,$tmp0,8),$out\n";
-	$code.="	xor	2($sbox,$tmp1,8),$out\n";
-	$code.="	xor	1($sbox,$tmp2,8),$out\n";
-
-	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
-	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
-	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
-	$code.="\n";
-}
-
-sub declast()
-{ my ($i,@s)=@_;
-  my $tmp0=$acc0;
-  my $tmp1=$acc1;
-  my $tmp2=$acc2;
-  my $out=($t0,$t1,$t2,$s[0])[$i];
-
-	$code.="	mov	$s[0],$out\n"		if ($i!=3);
-			$tmp1=$s[2]			if ($i==3);
-	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
-	$code.="	and	\$0xFF,$out\n";
-
-	$code.="	movzb	2048($sbox,$out,1),$out\n";
-	$code.="	shr	\$16,$tmp1\n";
-			$tmp2=$s[3]			if ($i==3);
-	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
-
-			$tmp0=$s[1]			if ($i==3);
-	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
-	$code.="	and	\$0xFF,$tmp1\n";
-	$code.="	shr	\$24,$tmp2\n";
-
-	$code.="	movzb	2048($sbox,$tmp0,1),$tmp0\n";
-	$code.="	movzb	2048($sbox,$tmp1,1),$tmp1\n";
-	$code.="	movzb	2048($sbox,$tmp2,1),$tmp2\n";
-
-	$code.="	shl	\$8,$tmp0\n";
-	$code.="	shl	\$16,$tmp1\n";
-	$code.="	shl	\$24,$tmp2\n";
-
-	$code.="	xor	$tmp0,$out\n";
-	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
-	$code.="	xor	$tmp1,$out\n";
-	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
-	$code.="	xor	$tmp2,$out\n";
-	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
-	$code.="\n";
-}
-
-$code.=<<___;
-.type	_x86_64_AES_decrypt,\@abi-omnipotent
-.align	16
-_x86_64_AES_decrypt:
-	xor	0($key),$s0			# xor with key
-	xor	4($key),$s1
-	xor	8($key),$s2
-	xor	12($key),$s3
-
-	mov	240($key),$rnds			# load key->rounds
-	sub	\$1,$rnds
-	jmp	.Ldec_loop
-.align	16
-.Ldec_loop:
-___
-	if ($verticalspin) { &decvert(); }
-	else {	&decstep(0,$s0,$s3,$s2,$s1);
-		&decstep(1,$s1,$s0,$s3,$s2);
-		&decstep(2,$s2,$s1,$s0,$s3);
-		&decstep(3,$s3,$s2,$s1,$s0);
-		$code.=<<___;
-		lea	16($key),$key
-		xor	0($key),$s0			# xor with key
-		xor	4($key),$s1
-		xor	8($key),$s2
-		xor	12($key),$s3
-___
-	}
-$code.=<<___;
-	sub	\$1,$rnds
-	jnz	.Ldec_loop
-___
-	if ($verticalspin) { &declastvert(); }
-	else {	&declast(0,$s0,$s3,$s2,$s1);
-		&declast(1,$s1,$s0,$s3,$s2);
-		&declast(2,$s2,$s1,$s0,$s3);
-		&declast(3,$s3,$s2,$s1,$s0);
-		$code.=<<___;
-		xor	16+0($key),$s0			# xor with key
-		xor	16+4($key),$s1
-		xor	16+8($key),$s2
-		xor	16+12($key),$s3
-___
-	}
-$code.=<<___;
-	.byte	0xf3,0xc3			# rep ret
-.size	_x86_64_AES_decrypt,.-_x86_64_AES_decrypt
-___
-
-sub deccompactvert()
-{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
-	movzb	`&lo("$s0")`,$t0
-	movzb	`&lo("$s1")`,$t1
-	movzb	`&lo("$s2")`,$t2
-	movzb	($sbox,$t0,1),$t0
-	movzb	($sbox,$t1,1),$t1
-	movzb	($sbox,$t2,1),$t2
-
-	movzb	`&lo("$s3")`,$t3
-	movzb	`&hi("$s3")`,$acc0
-	movzb	`&hi("$s0")`,$acc1
-	movzb	($sbox,$t3,1),$t3
-	movzb	($sbox,$acc0,1),$t4	#$t0
-	movzb	($sbox,$acc1,1),$t5	#$t1
-
-	movzb	`&hi("$s1")`,$acc2
-	movzb	`&hi("$s2")`,$acc0
-	shr	\$16,$s2
-	movzb	($sbox,$acc2,1),$acc2	#$t2
-	movzb	($sbox,$acc0,1),$acc0	#$t3
-	shr	\$16,$s3
-
-	movzb	`&lo("$s2")`,$acc1
-	shl	\$8,$t4
-	shl	\$8,$t5
-	movzb	($sbox,$acc1,1),$acc1	#$t0
-	xor	$t4,$t0
-	xor	$t5,$t1
-
-	movzb	`&lo("$s3")`,$t4
-	shr	\$16,$s0
-	shr	\$16,$s1
-	movzb	`&lo("$s0")`,$t5
-	shl	\$8,$acc2
-	shl	\$8,$acc0
-	movzb	($sbox,$t4,1),$t4	#$t1
-	movzb	($sbox,$t5,1),$t5	#$t2
-	xor	$acc2,$t2
-	xor	$acc0,$t3
-
-	movzb	`&lo("$s1")`,$acc2
-	movzb	`&hi("$s1")`,$acc0
-	shl	\$16,$acc1
-	movzb	($sbox,$acc2,1),$acc2	#$t3
-	movzb	($sbox,$acc0,1),$acc0	#$t0
-	xor	$acc1,$t0
-
-	movzb	`&hi("$s2")`,$acc1
-	shl	\$16,$t4
-	shl	\$16,$t5
-	movzb	($sbox,$acc1,1),$s1	#$t1
-	xor	$t4,$t1
-	xor	$t5,$t2
-
-	movzb	`&hi("$s3")`,$acc1
-	shr	\$8,$s0
-	shl	\$16,$acc2
-	movzb	($sbox,$acc1,1),$s2	#$t2
-	movzb	($sbox,$s0,1),$s3	#$t3
-	xor	$acc2,$t3
-
-	shl	\$24,$acc0
-	shl	\$24,$s1
-	shl	\$24,$s2
-	xor	$acc0,$t0
-	shl	\$24,$s3
-	xor	$t1,$s1
-	mov	$t0,$s0
-	xor	$t2,$s2
-	xor	$t3,$s3
-___
-}
-
-# parallelized version! input is pair of 64-bit values: %rax=s1.s0
-# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
-# %ecx=s2 and %edx=s3.
-sub dectransform()
-{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
-  my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
-  my $prefetch = shift;
-
-$code.=<<___;
-	mov	$tp10,$acc0
-	mov	$tp18,$acc8
-	and	$mask80,$acc0
-	and	$mask80,$acc8
-	mov	$acc0,$tp40
-	mov	$acc8,$tp48
-	shr	\$7,$tp40
-	lea	($tp10,$tp10),$tp20
-	shr	\$7,$tp48
-	lea	($tp18,$tp18),$tp28
-	sub	$tp40,$acc0
-	sub	$tp48,$acc8
-	and	$maskfe,$tp20
-	and	$maskfe,$tp28
-	and	$mask1b,$acc0
-	and	$mask1b,$acc8
-	xor	$tp20,$acc0
-	xor	$tp28,$acc8
-	mov	$acc0,$tp20
-	mov	$acc8,$tp28
-
-	and	$mask80,$acc0
-	and	$mask80,$acc8
-	mov	$acc0,$tp80
-	mov	$acc8,$tp88
-	shr	\$7,$tp80
-	lea	($tp20,$tp20),$tp40
-	shr	\$7,$tp88
-	lea	($tp28,$tp28),$tp48
-	sub	$tp80,$acc0
-	sub	$tp88,$acc8
-	and	$maskfe,$tp40
-	and	$maskfe,$tp48
-	and	$mask1b,$acc0
-	and	$mask1b,$acc8
-	xor	$tp40,$acc0
-	xor	$tp48,$acc8
-	mov	$acc0,$tp40
-	mov	$acc8,$tp48
-
-	and	$mask80,$acc0
-	and	$mask80,$acc8
-	mov	$acc0,$tp80
-	mov	$acc8,$tp88
-	shr	\$7,$tp80
-	 xor	$tp10,$tp20		# tp2^=tp1
-	shr	\$7,$tp88
-	 xor	$tp18,$tp28		# tp2^=tp1
-	sub	$tp80,$acc0
-	sub	$tp88,$acc8
-	lea	($tp40,$tp40),$tp80
-	lea	($tp48,$tp48),$tp88
-	 xor	$tp10,$tp40		# tp4^=tp1
-	 xor	$tp18,$tp48		# tp4^=tp1
-	and	$maskfe,$tp80
-	and	$maskfe,$tp88
-	and	$mask1b,$acc0
-	and	$mask1b,$acc8
-	xor	$acc0,$tp80
-	xor	$acc8,$tp88
-
-	xor	$tp80,$tp10		# tp1^=tp8
-	xor	$tp88,$tp18		# tp1^=tp8
-	xor	$tp80,$tp20		# tp2^tp1^=tp8
-	xor	$tp88,$tp28		# tp2^tp1^=tp8
-	mov	$tp10,$acc0
-	mov	$tp18,$acc8
-	xor	$tp80,$tp40		# tp4^tp1^=tp8
-	xor	$tp88,$tp48		# tp4^tp1^=tp8
-	shr	\$32,$acc0
-	shr	\$32,$acc8
-	xor	$tp20,$tp80		# tp8^=tp8^tp2^tp1=tp2^tp1
-	xor	$tp28,$tp88		# tp8^=tp8^tp2^tp1=tp2^tp1
-	rol	\$8,`&LO("$tp10")`	# ROTATE(tp1^tp8,8)
-	rol	\$8,`&LO("$tp18")`	# ROTATE(tp1^tp8,8)
-	xor	$tp40,$tp80		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
-	xor	$tp48,$tp88		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
-
-	rol	\$8,`&LO("$acc0")`	# ROTATE(tp1^tp8,8)
-	rol	\$8,`&LO("$acc8")`	# ROTATE(tp1^tp8,8)
-	xor	`&LO("$tp80")`,`&LO("$tp10")`
-	xor	`&LO("$tp88")`,`&LO("$tp18")`
-	shr	\$32,$tp80
-	shr	\$32,$tp88
-	xor	`&LO("$tp80")`,`&LO("$acc0")`
-	xor	`&LO("$tp88")`,`&LO("$acc8")`
-
-	mov	$tp20,$tp80
-	mov	$tp28,$tp88
-	shr	\$32,$tp80
-	shr	\$32,$tp88
-	rol	\$24,`&LO("$tp20")`	# ROTATE(tp2^tp1^tp8,24)
-	rol	\$24,`&LO("$tp28")`	# ROTATE(tp2^tp1^tp8,24)
-	rol	\$24,`&LO("$tp80")`	# ROTATE(tp2^tp1^tp8,24)
-	rol	\$24,`&LO("$tp88")`	# ROTATE(tp2^tp1^tp8,24)
-	xor	`&LO("$tp20")`,`&LO("$tp10")`
-	xor	`&LO("$tp28")`,`&LO("$tp18")`
-	mov	$tp40,$tp20
-	mov	$tp48,$tp28
-	xor	`&LO("$tp80")`,`&LO("$acc0")`
-	xor	`&LO("$tp88")`,`&LO("$acc8")`
-
-	`"mov	0($sbox),$mask80"	if ($prefetch)`
-	shr	\$32,$tp20
-	shr	\$32,$tp28
-	`"mov	64($sbox),$maskfe"	if ($prefetch)`
-	rol	\$16,`&LO("$tp40")`	# ROTATE(tp4^tp1^tp8,16)
-	rol	\$16,`&LO("$tp48")`	# ROTATE(tp4^tp1^tp8,16)
-	`"mov	128($sbox),$mask1b"	if ($prefetch)`
-	rol	\$16,`&LO("$tp20")`	# ROTATE(tp4^tp1^tp8,16)
-	rol	\$16,`&LO("$tp28")`	# ROTATE(tp4^tp1^tp8,16)
-	`"mov	192($sbox),$tp80"	if ($prefetch)`
-	xor	`&LO("$tp40")`,`&LO("$tp10")`
-	xor	`&LO("$tp48")`,`&LO("$tp18")`
-	`"mov	256($sbox),$tp88"	if ($prefetch)`
-	xor	`&LO("$tp20")`,`&LO("$acc0")`
-	xor	`&LO("$tp28")`,`&LO("$acc8")`
-___
-}
-
-$code.=<<___;
-.type	_x86_64_AES_decrypt_compact,\@abi-omnipotent
-.align	16
-_x86_64_AES_decrypt_compact:
-	lea	128($sbox),$inp			# size optimization
-	mov	0-128($inp),$acc1		# prefetch Td4
-	mov	32-128($inp),$acc2
-	mov	64-128($inp),$t0
-	mov	96-128($inp),$t1
-	mov	128-128($inp),$acc1
-	mov	160-128($inp),$acc2
-	mov	192-128($inp),$t0
-	mov	224-128($inp),$t1
-	jmp	.Ldec_loop_compact
-
-.align	16
-.Ldec_loop_compact:
-		xor	0($key),$s0		# xor with key
-		xor	4($key),$s1
-		xor	8($key),$s2
-		xor	12($key),$s3
-		lea	16($key),$key
-___
-		&deccompactvert();
-$code.=<<___;
-		cmp	16(%rsp),$key
-		je	.Ldec_compact_done
-
-		mov	256+0($sbox),$mask80
-		shl	\$32,%rbx
-		shl	\$32,%rdx
-		mov	256+8($sbox),$maskfe
-		or	%rbx,%rax
-		or	%rdx,%rcx
-		mov	256+16($sbox),$mask1b
-___
-		&dectransform(1);
-$code.=<<___;
-	jmp	.Ldec_loop_compact
-.align	16
-.Ldec_compact_done:
-	xor	0($key),$s0
-	xor	4($key),$s1
-	xor	8($key),$s2
-	xor	12($key),$s3
-	.byte	0xf3,0xc3			# rep ret
-.size	_x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
-___
-
-# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
-$code.=<<___;
-.globl	AES_decrypt
-.type	AES_decrypt,\@function,3
-.align	16
-.globl	asm_AES_decrypt
-.hidden	asm_AES_decrypt
-asm_AES_decrypt:
-AES_decrypt:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-	# allocate frame "above" key schedule
-	mov	%rsp,%r10
-	lea	-63(%rdx),%rcx	# %rdx is key argument
-	and	\$-64,%rsp
-	sub	%rsp,%rcx
-	neg	%rcx
-	and	\$0x3c0,%rcx
-	sub	%rcx,%rsp
-	sub	\$32,%rsp
-
-	mov	%rsi,16(%rsp)	# save out
-	mov	%r10,24(%rsp)	# save real stack pointer
-.Ldec_prologue:
-
-	mov	%rdx,$key
-	mov	240($key),$rnds	# load rounds
-
-	mov	0(%rdi),$s0	# load input vector
-	mov	4(%rdi),$s1
-	mov	8(%rdi),$s2
-	mov	12(%rdi),$s3
-
-	shl	\$4,$rnds
-	lea	($key,$rnds),%rbp
-	mov	$key,(%rsp)	# key schedule
-	mov	%rbp,8(%rsp)	# end of key schedule
-
-	# pick Td4 copy which can't "overlap" with stack frame or key schedule
-	lea	.LAES_Td+2048(%rip),$sbox
-	lea	768(%rsp),%rbp
-	sub	$sbox,%rbp
-	and	\$0x300,%rbp
-	lea	($sbox,%rbp),$sbox
-	shr	\$3,%rbp	# recall "magic" constants!
-	add	%rbp,$sbox
-
-	call	_x86_64_AES_decrypt_compact
-
-	mov	16(%rsp),$out	# restore out
-	mov	24(%rsp),%rsi	# restore saved stack pointer
-	mov	$s0,0($out)	# write output vector
-	mov	$s1,4($out)
-	mov	$s2,8($out)
-	mov	$s3,12($out)
-
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Ldec_epilogue:
-	ret
-.size	AES_decrypt,.-AES_decrypt
-___
-#------------------------------------------------------------------#
-
-sub enckey()
-{
-$code.=<<___;
-	movz	%dl,%esi		# rk[i]>>0
-	movzb	-128(%rbp,%rsi),%ebx
-	movz	%dh,%esi		# rk[i]>>8
-	shl	\$24,%ebx
-	xor	%ebx,%eax
-
-	movzb	-128(%rbp,%rsi),%ebx
-	shr	\$16,%edx
-	movz	%dl,%esi		# rk[i]>>16
-	xor	%ebx,%eax
-
-	movzb	-128(%rbp,%rsi),%ebx
-	movz	%dh,%esi		# rk[i]>>24
-	shl	\$8,%ebx
-	xor	%ebx,%eax
-
-	movzb	-128(%rbp,%rsi),%ebx
-	shl	\$16,%ebx
-	xor	%ebx,%eax
-
-	xor	1024-128(%rbp,%rcx,4),%eax		# rcon
-___
-}
-
-# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-#                        AES_KEY *key)
-$code.=<<___;
-.globl	private_AES_set_encrypt_key
-.type	private_AES_set_encrypt_key,\@function,3
-.align	16
-private_AES_set_encrypt_key:
-	push	%rbx
-	push	%rbp
-	push	%r12			# redundant, but allows to share 
-	push	%r13			# exception handler...
-	push	%r14
-	push	%r15
-	sub	\$8,%rsp
-.Lenc_key_prologue:
-
-	call	_x86_64_AES_set_encrypt_key
-
-	mov	8(%rsp),%r15
-	mov	16(%rsp),%r14
-	mov	24(%rsp),%r13
-	mov	32(%rsp),%r12
-	mov	40(%rsp),%rbp
-	mov	48(%rsp),%rbx
-	add	\$56,%rsp
-.Lenc_key_epilogue:
-	ret
-.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.type	_x86_64_AES_set_encrypt_key,\@abi-omnipotent
-.align	16
-_x86_64_AES_set_encrypt_key:
-	mov	%esi,%ecx			# %ecx=bits
-	mov	%rdi,%rsi			# %rsi=userKey
-	mov	%rdx,%rdi			# %rdi=key
-
-	test	\$-1,%rsi
-	jz	.Lbadpointer
-	test	\$-1,%rdi
-	jz	.Lbadpointer
-
-	lea	.LAES_Te(%rip),%rbp
-	lea	2048+128(%rbp),%rbp
-
-	# prefetch Te4
-	mov	0-128(%rbp),%eax
-	mov	32-128(%rbp),%ebx
-	mov	64-128(%rbp),%r8d
-	mov	96-128(%rbp),%edx
-	mov	128-128(%rbp),%eax
-	mov	160-128(%rbp),%ebx
-	mov	192-128(%rbp),%r8d
-	mov	224-128(%rbp),%edx
-
-	cmp	\$128,%ecx
-	je	.L10rounds
-	cmp	\$192,%ecx
-	je	.L12rounds
-	cmp	\$256,%ecx
-	je	.L14rounds
-	mov	\$-2,%rax			# invalid number of bits
-	jmp	.Lexit
-
-.L10rounds:
-	mov	0(%rsi),%rax			# copy first 4 dwords
-	mov	8(%rsi),%rdx
-	mov	%rax,0(%rdi)
-	mov	%rdx,8(%rdi)
-
-	shr	\$32,%rdx
-	xor	%ecx,%ecx
-	jmp	.L10shortcut
-.align	4
-.L10loop:
-		mov	0(%rdi),%eax			# rk[0]
-		mov	12(%rdi),%edx			# rk[3]
-.L10shortcut:
-___
-		&enckey	();
-$code.=<<___;
-		mov	%eax,16(%rdi)			# rk[4]
-		xor	4(%rdi),%eax
-		mov	%eax,20(%rdi)			# rk[5]
-		xor	8(%rdi),%eax
-		mov	%eax,24(%rdi)			# rk[6]
-		xor	12(%rdi),%eax
-		mov	%eax,28(%rdi)			# rk[7]
-		add	\$1,%ecx
-		lea	16(%rdi),%rdi
-		cmp	\$10,%ecx
-	jl	.L10loop
-
-	movl	\$10,80(%rdi)			# setup number of rounds
-	xor	%rax,%rax
-	jmp	.Lexit
-
-.L12rounds:
-	mov	0(%rsi),%rax			# copy first 6 dwords
-	mov	8(%rsi),%rbx
-	mov	16(%rsi),%rdx
-	mov	%rax,0(%rdi)
-	mov	%rbx,8(%rdi)
-	mov	%rdx,16(%rdi)
-
-	shr	\$32,%rdx
-	xor	%ecx,%ecx
-	jmp	.L12shortcut
-.align	4
-.L12loop:
-		mov	0(%rdi),%eax			# rk[0]
-		mov	20(%rdi),%edx			# rk[5]
-.L12shortcut:
-___
-		&enckey	();
-$code.=<<___;
-		mov	%eax,24(%rdi)			# rk[6]
-		xor	4(%rdi),%eax
-		mov	%eax,28(%rdi)			# rk[7]
-		xor	8(%rdi),%eax
-		mov	%eax,32(%rdi)			# rk[8]
-		xor	12(%rdi),%eax
-		mov	%eax,36(%rdi)			# rk[9]
-
-		cmp	\$7,%ecx
-		je	.L12break
-		add	\$1,%ecx
-
-		xor	16(%rdi),%eax
-		mov	%eax,40(%rdi)			# rk[10]
-		xor	20(%rdi),%eax
-		mov	%eax,44(%rdi)			# rk[11]
-
-		lea	24(%rdi),%rdi
-	jmp	.L12loop
-.L12break:
-	movl	\$12,72(%rdi)		# setup number of rounds
-	xor	%rax,%rax
-	jmp	.Lexit
-
-.L14rounds:		
-	mov	0(%rsi),%rax			# copy first 8 dwords
-	mov	8(%rsi),%rbx
-	mov	16(%rsi),%rcx
-	mov	24(%rsi),%rdx
-	mov	%rax,0(%rdi)
-	mov	%rbx,8(%rdi)
-	mov	%rcx,16(%rdi)
-	mov	%rdx,24(%rdi)
-
-	shr	\$32,%rdx
-	xor	%ecx,%ecx
-	jmp	.L14shortcut
-.align	4
-.L14loop:
-		mov	0(%rdi),%eax			# rk[0]
-		mov	28(%rdi),%edx			# rk[4]
-.L14shortcut:
-___
-		&enckey	();
-$code.=<<___;
-		mov	%eax,32(%rdi)			# rk[8]
-		xor	4(%rdi),%eax
-		mov	%eax,36(%rdi)			# rk[9]
-		xor	8(%rdi),%eax
-		mov	%eax,40(%rdi)			# rk[10]
-		xor	12(%rdi),%eax
-		mov	%eax,44(%rdi)			# rk[11]
-
-		cmp	\$6,%ecx
-		je	.L14break
-		add	\$1,%ecx
-
-		mov	%eax,%edx
-		mov	16(%rdi),%eax			# rk[4]
-		movz	%dl,%esi			# rk[11]>>0
-		movzb	-128(%rbp,%rsi),%ebx
-		movz	%dh,%esi			# rk[11]>>8
-		xor	%ebx,%eax
-
-		movzb	-128(%rbp,%rsi),%ebx
-		shr	\$16,%edx
-		shl	\$8,%ebx
-		movz	%dl,%esi			# rk[11]>>16
-		xor	%ebx,%eax
-
-		movzb	-128(%rbp,%rsi),%ebx
-		movz	%dh,%esi			# rk[11]>>24
-		shl	\$16,%ebx
-		xor	%ebx,%eax
-
-		movzb	-128(%rbp,%rsi),%ebx
-		shl	\$24,%ebx
-		xor	%ebx,%eax
-
-		mov	%eax,48(%rdi)			# rk[12]
-		xor	20(%rdi),%eax
-		mov	%eax,52(%rdi)			# rk[13]
-		xor	24(%rdi),%eax
-		mov	%eax,56(%rdi)			# rk[14]
-		xor	28(%rdi),%eax
-		mov	%eax,60(%rdi)			# rk[15]
-
-		lea	32(%rdi),%rdi
-	jmp	.L14loop
-.L14break:
-	movl	\$14,48(%rdi)		# setup number of rounds
-	xor	%rax,%rax
-	jmp	.Lexit
-
-.Lbadpointer:
-	mov	\$-1,%rax
-.Lexit:
-	.byte	0xf3,0xc3			# rep ret
-.size	_x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
-___
-
-sub deckey_ref()
-{ my ($i,$ptr,$te,$td) = @_;
-  my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
-$code.=<<___;
-	mov	$i($ptr),$tp1
-	mov	$tp1,$acc
-	and	\$0x80808080,$acc
-	mov	$acc,$tp4
-	shr	\$7,$tp4
-	lea	0($tp1,$tp1),$tp2
-	sub	$tp4,$acc
-	and	\$0xfefefefe,$tp2
-	and	\$0x1b1b1b1b,$acc
-	xor	$tp2,$acc
-	mov	$acc,$tp2
-
-	and	\$0x80808080,$acc
-	mov	$acc,$tp8
-	shr	\$7,$tp8
-	lea	0($tp2,$tp2),$tp4
-	sub	$tp8,$acc
-	and	\$0xfefefefe,$tp4
-	and	\$0x1b1b1b1b,$acc
-	 xor	$tp1,$tp2		# tp2^tp1
-	xor	$tp4,$acc
-	mov	$acc,$tp4
-
-	and	\$0x80808080,$acc
-	mov	$acc,$tp8
-	shr	\$7,$tp8
-	sub	$tp8,$acc
-	lea	0($tp4,$tp4),$tp8
-	 xor	$tp1,$tp4		# tp4^tp1
-	and	\$0xfefefefe,$tp8
-	and	\$0x1b1b1b1b,$acc
-	xor	$acc,$tp8
-
-	xor	$tp8,$tp1		# tp1^tp8
-	rol	\$8,$tp1		# ROTATE(tp1^tp8,8)
-	xor	$tp8,$tp2		# tp2^tp1^tp8
-	xor	$tp8,$tp4		# tp4^tp1^tp8
-	xor	$tp2,$tp8
-	xor	$tp4,$tp8		# tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
-
-	xor	$tp8,$tp1
-	rol	\$24,$tp2		# ROTATE(tp2^tp1^tp8,24)
-	xor	$tp2,$tp1
-	rol	\$16,$tp4		# ROTATE(tp4^tp1^tp8,16)
-	xor	$tp4,$tp1
-
-	mov	$tp1,$i($ptr)
-___
-}
-
-# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
-#                        AES_KEY *key)
-$code.=<<___;
-.globl	private_AES_set_decrypt_key
-.type	private_AES_set_decrypt_key,\@function,3
-.align	16
-private_AES_set_decrypt_key:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	push	%rdx			# save key schedule
-.Ldec_key_prologue:
-
-	call	_x86_64_AES_set_encrypt_key
-	mov	(%rsp),%r8		# restore key schedule
-	cmp	\$0,%eax
-	jne	.Labort
-
-	mov	240(%r8),%r14d		# pull number of rounds
-	xor	%rdi,%rdi
-	lea	(%rdi,%r14d,4),%rcx
-	mov	%r8,%rsi
-	lea	(%r8,%rcx,4),%rdi	# pointer to last chunk
-.align	4
-.Linvert:
-		mov	0(%rsi),%rax
-		mov	8(%rsi),%rbx
-		mov	0(%rdi),%rcx
-		mov	8(%rdi),%rdx
-		mov	%rax,0(%rdi)
-		mov	%rbx,8(%rdi)
-		mov	%rcx,0(%rsi)
-		mov	%rdx,8(%rsi)
-		lea	16(%rsi),%rsi
-		lea	-16(%rdi),%rdi
-		cmp	%rsi,%rdi
-	jne	.Linvert
-
-	lea	.LAES_Te+2048+1024(%rip),%rax	# rcon
-
-	mov	40(%rax),$mask80
-	mov	48(%rax),$maskfe
-	mov	56(%rax),$mask1b
-
-	mov	%r8,$key
-	sub	\$1,%r14d
-.align	4
-.Lpermute:
-		lea	16($key),$key
-		mov	0($key),%rax
-		mov	8($key),%rcx
-___
-		&dectransform ();
-$code.=<<___;
-		mov	%eax,0($key)
-		mov	%ebx,4($key)
-		mov	%ecx,8($key)
-		mov	%edx,12($key)
-		sub	\$1,%r14d
-	jnz	.Lpermute
-
-	xor	%rax,%rax
-.Labort:
-	mov	8(%rsp),%r15
-	mov	16(%rsp),%r14
-	mov	24(%rsp),%r13
-	mov	32(%rsp),%r12
-	mov	40(%rsp),%rbp
-	mov	48(%rsp),%rbx
-	add	\$56,%rsp
-.Ldec_key_epilogue:
-	ret
-.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-___
-
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-#			size_t length, const AES_KEY *key,
-#			unsigned char *ivp,const int enc);
-{
-# stack frame layout
-# -8(%rsp)		return address
-my $keyp="0(%rsp)";		# one to pass as $key
-my $keyend="8(%rsp)";		# &(keyp->rd_key[4*keyp->rounds])
-my $_rsp="16(%rsp)";		# saved %rsp
-my $_inp="24(%rsp)";		# copy of 1st parameter, inp
-my $_out="32(%rsp)";		# copy of 2nd parameter, out
-my $_len="40(%rsp)";		# copy of 3rd parameter, length
-my $_key="48(%rsp)";		# copy of 4th parameter, key
-my $_ivp="56(%rsp)";		# copy of 5th parameter, ivp
-my $ivec="64(%rsp)";		# ivec[16]
-my $aes_key="80(%rsp)";		# copy of aes_key
-my $mark="80+240(%rsp)";	# copy of aes_key->rounds
-
-$code.=<<___;
-.globl	AES_cbc_encrypt
-.type	AES_cbc_encrypt,\@function,6
-.align	16
-.extern	OPENSSL_ia32cap_P
-.globl	asm_AES_cbc_encrypt
-.hidden	asm_AES_cbc_encrypt
-asm_AES_cbc_encrypt:
-AES_cbc_encrypt:
-	cmp	\$0,%rdx	# check length
-	je	.Lcbc_epilogue
-	pushfq
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-.Lcbc_prologue:
-
-	cld
-	mov	%r9d,%r9d	# clear upper half of enc
-
-	lea	.LAES_Te(%rip),$sbox
-	cmp	\$0,%r9
-	jne	.Lcbc_picked_te
-	lea	.LAES_Td(%rip),$sbox
-.Lcbc_picked_te:
-
-	mov	OPENSSL_ia32cap_P(%rip),%r10d
-	cmp	\$$speed_limit,%rdx
-	jb	.Lcbc_slow_prologue
-	test	\$15,%rdx
-	jnz	.Lcbc_slow_prologue
-	bt	\$28,%r10d
-	jc	.Lcbc_slow_prologue
-
-	# allocate aligned stack frame...
-	lea	-88-248(%rsp),$key
-	and	\$-64,$key
-
-	# ... and make sure it doesn't alias with AES_T[ed] modulo 4096
-	mov	$sbox,%r10
-	lea	2304($sbox),%r11
-	mov	$key,%r12
-	and	\$0xFFF,%r10	# s = $sbox&0xfff
-	and	\$0xFFF,%r11	# e = ($sbox+2048)&0xfff
-	and	\$0xFFF,%r12	# p = %rsp&0xfff
-
-	cmp	%r11,%r12	# if (p=>e) %rsp =- (p-e);
-	jb	.Lcbc_te_break_out
-	sub	%r11,%r12
-	sub	%r12,$key
-	jmp	.Lcbc_te_ok
-.Lcbc_te_break_out:		# else %rsp -= (p-s)&0xfff + framesz
-	sub	%r10,%r12
-	and	\$0xFFF,%r12
-	add	\$320,%r12
-	sub	%r12,$key
-.align	4
-.Lcbc_te_ok:
-
-	xchg	%rsp,$key
-	#add	\$8,%rsp	# reserve for return address!
-	mov	$key,$_rsp	# save %rsp
-.Lcbc_fast_body:
-	mov	%rdi,$_inp	# save copy of inp
-	mov	%rsi,$_out	# save copy of out
-	mov	%rdx,$_len	# save copy of len
-	mov	%rcx,$_key	# save copy of key
-	mov	%r8,$_ivp	# save copy of ivp
-	movl	\$0,$mark	# copy of aes_key->rounds = 0;
-	mov	%r8,%rbp	# rearrange input arguments
-	mov	%r9,%rbx
-	mov	%rsi,$out
-	mov	%rdi,$inp
-	mov	%rcx,$key
-
-	mov	240($key),%eax		# key->rounds
-	# do we copy key schedule to stack?
-	mov	$key,%r10
-	sub	$sbox,%r10
-	and	\$0xfff,%r10
-	cmp	\$2304,%r10
-	jb	.Lcbc_do_ecopy
-	cmp	\$4096-248,%r10
-	jb	.Lcbc_skip_ecopy
-.align	4
-.Lcbc_do_ecopy:
-		mov	$key,%rsi
-		lea	$aes_key,%rdi
-		lea	$aes_key,$key
-		mov	\$240/8,%ecx
-		.long	0x90A548F3	# rep movsq
-		mov	%eax,(%rdi)	# copy aes_key->rounds
-.Lcbc_skip_ecopy:
-	mov	$key,$keyp	# save key pointer
-
-	mov	\$18,%ecx
-.align	4
-.Lcbc_prefetch_te:
-		mov	0($sbox),%r10
-		mov	32($sbox),%r11
-		mov	64($sbox),%r12
-		mov	96($sbox),%r13
-		lea	128($sbox),$sbox
-		sub	\$1,%ecx
-	jnz	.Lcbc_prefetch_te
-	lea	-2304($sbox),$sbox
-
-	cmp	\$0,%rbx
-	je	.LFAST_DECRYPT
-
-#----------------------------- ENCRYPT -----------------------------#
-	mov	0(%rbp),$s0		# load iv
-	mov	4(%rbp),$s1
-	mov	8(%rbp),$s2
-	mov	12(%rbp),$s3
-
-.align	4
-.Lcbc_fast_enc_loop:
-		xor	0($inp),$s0
-		xor	4($inp),$s1
-		xor	8($inp),$s2
-		xor	12($inp),$s3
-		mov	$keyp,$key	# restore key
-		mov	$inp,$_inp	# if ($verticalspin) save inp
-
-		call	_x86_64_AES_encrypt
-
-		mov	$_inp,$inp	# if ($verticalspin) restore inp
-		mov	$_len,%r10
-		mov	$s0,0($out)
-		mov	$s1,4($out)
-		mov	$s2,8($out)
-		mov	$s3,12($out)
-
-		lea	16($inp),$inp
-		lea	16($out),$out
-		sub	\$16,%r10
-		test	\$-16,%r10
-		mov	%r10,$_len
-	jnz	.Lcbc_fast_enc_loop
-	mov	$_ivp,%rbp	# restore ivp
-	mov	$s0,0(%rbp)	# save ivec
-	mov	$s1,4(%rbp)
-	mov	$s2,8(%rbp)
-	mov	$s3,12(%rbp)
-
-	jmp	.Lcbc_fast_cleanup
-
-#----------------------------- DECRYPT -----------------------------#
-.align	16
-.LFAST_DECRYPT:
-	cmp	$inp,$out
-	je	.Lcbc_fast_dec_in_place
-
-	mov	%rbp,$ivec
-.align	4
-.Lcbc_fast_dec_loop:
-		mov	0($inp),$s0	# read input
-		mov	4($inp),$s1
-		mov	8($inp),$s2
-		mov	12($inp),$s3
-		mov	$keyp,$key	# restore key
-		mov	$inp,$_inp	# if ($verticalspin) save inp
-
-		call	_x86_64_AES_decrypt
-
-		mov	$ivec,%rbp	# load ivp
-		mov	$_inp,$inp	# if ($verticalspin) restore inp
-		mov	$_len,%r10	# load len
-		xor	0(%rbp),$s0	# xor iv
-		xor	4(%rbp),$s1
-		xor	8(%rbp),$s2
-		xor	12(%rbp),$s3
-		mov	$inp,%rbp	# current input, next iv
-
-		sub	\$16,%r10
-		mov	%r10,$_len	# update len
-		mov	%rbp,$ivec	# update ivp
-
-		mov	$s0,0($out)	# write output
-		mov	$s1,4($out)
-		mov	$s2,8($out)
-		mov	$s3,12($out)
-
-		lea	16($inp),$inp
-		lea	16($out),$out
-	jnz	.Lcbc_fast_dec_loop
-	mov	$_ivp,%r12		# load user ivp
-	mov	0(%rbp),%r10		# load iv
-	mov	8(%rbp),%r11
-	mov	%r10,0(%r12)		# copy back to user
-	mov	%r11,8(%r12)
-	jmp	.Lcbc_fast_cleanup
-
-.align	16
-.Lcbc_fast_dec_in_place:
-	mov	0(%rbp),%r10		# copy iv to stack
-	mov	8(%rbp),%r11
-	mov	%r10,0+$ivec
-	mov	%r11,8+$ivec
-.align	4
-.Lcbc_fast_dec_in_place_loop:
-		mov	0($inp),$s0	# load input
-		mov	4($inp),$s1
-		mov	8($inp),$s2
-		mov	12($inp),$s3
-		mov	$keyp,$key	# restore key
-		mov	$inp,$_inp	# if ($verticalspin) save inp
-
-		call	_x86_64_AES_decrypt
-
-		mov	$_inp,$inp	# if ($verticalspin) restore inp
-		mov	$_len,%r10
-		xor	0+$ivec,$s0
-		xor	4+$ivec,$s1
-		xor	8+$ivec,$s2
-		xor	12+$ivec,$s3
-
-		mov	0($inp),%r11	# load input
-		mov	8($inp),%r12
-		sub	\$16,%r10
-		jz	.Lcbc_fast_dec_in_place_done
-
-		mov	%r11,0+$ivec	# copy input to iv
-		mov	%r12,8+$ivec
-
-		mov	$s0,0($out)	# save output [zaps input]
-		mov	$s1,4($out)
-		mov	$s2,8($out)
-		mov	$s3,12($out)
-
-		lea	16($inp),$inp
-		lea	16($out),$out
-		mov	%r10,$_len
-	jmp	.Lcbc_fast_dec_in_place_loop
-.Lcbc_fast_dec_in_place_done:
-	mov	$_ivp,%rdi
-	mov	%r11,0(%rdi)	# copy iv back to user
-	mov	%r12,8(%rdi)
-
-	mov	$s0,0($out)	# save output [zaps input]
-	mov	$s1,4($out)
-	mov	$s2,8($out)
-	mov	$s3,12($out)
-
-.align	4
-.Lcbc_fast_cleanup:
-	cmpl	\$0,$mark	# was the key schedule copied?
-	lea	$aes_key,%rdi
-	je	.Lcbc_exit
-		mov	\$240/8,%ecx
-		xor	%rax,%rax
-		.long	0x90AB48F3	# rep stosq
-
-	jmp	.Lcbc_exit
-
-#--------------------------- SLOW ROUTINE ---------------------------#
-.align	16
-.Lcbc_slow_prologue:
-	# allocate aligned stack frame...
-	lea	-88(%rsp),%rbp
-	and	\$-64,%rbp
-	# ... just "above" key schedule
-	lea	-88-63(%rcx),%r10
-	sub	%rbp,%r10
-	neg	%r10
-	and	\$0x3c0,%r10
-	sub	%r10,%rbp
-
-	xchg	%rsp,%rbp
-	#add	\$8,%rsp	# reserve for return address!
-	mov	%rbp,$_rsp	# save %rsp
-.Lcbc_slow_body:
-	#mov	%rdi,$_inp	# save copy of inp
-	#mov	%rsi,$_out	# save copy of out
-	#mov	%rdx,$_len	# save copy of len
-	#mov	%rcx,$_key	# save copy of key
-	mov	%r8,$_ivp	# save copy of ivp
-	mov	%r8,%rbp	# rearrange input arguments
-	mov	%r9,%rbx
-	mov	%rsi,$out
-	mov	%rdi,$inp
-	mov	%rcx,$key
-	mov	%rdx,%r10
-
-	mov	240($key),%eax
-	mov	$key,$keyp	# save key pointer
-	shl	\$4,%eax
-	lea	($key,%rax),%rax
-	mov	%rax,$keyend
-
-	# pick Te4 copy which can't "overlap" with stack frame or key scdedule
-	lea	2048($sbox),$sbox
-	lea	768-8(%rsp),%rax
-	sub	$sbox,%rax
-	and	\$0x300,%rax
-	lea	($sbox,%rax),$sbox
-
-	cmp	\$0,%rbx
-	je	.LSLOW_DECRYPT
-
-#--------------------------- SLOW ENCRYPT ---------------------------#
-	test	\$-16,%r10		# check upon length
-	mov	0(%rbp),$s0		# load iv
-	mov	4(%rbp),$s1
-	mov	8(%rbp),$s2
-	mov	12(%rbp),$s3
-	jz	.Lcbc_slow_enc_tail	# short input...
-
-.align	4
-.Lcbc_slow_enc_loop:
-		xor	0($inp),$s0
-		xor	4($inp),$s1
-		xor	8($inp),$s2
-		xor	12($inp),$s3
-		mov	$keyp,$key	# restore key
-		mov	$inp,$_inp	# save inp
-		mov	$out,$_out	# save out
-		mov	%r10,$_len	# save len
-
-		call	_x86_64_AES_encrypt_compact
-
-		mov	$_inp,$inp	# restore inp
-		mov	$_out,$out	# restore out
-		mov	$_len,%r10	# restore len
-		mov	$s0,0($out)
-		mov	$s1,4($out)
-		mov	$s2,8($out)
-		mov	$s3,12($out)
-
-		lea	16($inp),$inp
-		lea	16($out),$out
-		sub	\$16,%r10
-		test	\$-16,%r10
-	jnz	.Lcbc_slow_enc_loop
-	test	\$15,%r10
-	jnz	.Lcbc_slow_enc_tail
-	mov	$_ivp,%rbp	# restore ivp
-	mov	$s0,0(%rbp)	# save ivec
-	mov	$s1,4(%rbp)
-	mov	$s2,8(%rbp)
-	mov	$s3,12(%rbp)
-
-	jmp	.Lcbc_exit
-
-.align	4
-.Lcbc_slow_enc_tail:
-	mov	%rax,%r11
-	mov	%rcx,%r12
-	mov	%r10,%rcx
-	mov	$inp,%rsi
-	mov	$out,%rdi
-	.long	0x9066A4F3		# rep movsb
-	mov	\$16,%rcx		# zero tail
-	sub	%r10,%rcx
-	xor	%rax,%rax
-	.long	0x9066AAF3		# rep stosb
-	mov	$out,$inp		# this is not a mistake!
-	mov	\$16,%r10		# len=16
-	mov	%r11,%rax
-	mov	%r12,%rcx
-	jmp	.Lcbc_slow_enc_loop	# one more spin...
-#--------------------------- SLOW DECRYPT ---------------------------#
-.align	16
-.LSLOW_DECRYPT:
-	shr	\$3,%rax
-	add	%rax,$sbox		# recall "magic" constants!
-
-	mov	0(%rbp),%r11		# copy iv to stack
-	mov	8(%rbp),%r12
-	mov	%r11,0+$ivec
-	mov	%r12,8+$ivec
-
-.align	4
-.Lcbc_slow_dec_loop:
-		mov	0($inp),$s0	# load input
-		mov	4($inp),$s1
-		mov	8($inp),$s2
-		mov	12($inp),$s3
-		mov	$keyp,$key	# restore key
-		mov	$inp,$_inp	# save inp
-		mov	$out,$_out	# save out
-		mov	%r10,$_len	# save len
-
-		call	_x86_64_AES_decrypt_compact
-
-		mov	$_inp,$inp	# restore inp
-		mov	$_out,$out	# restore out
-		mov	$_len,%r10
-		xor	0+$ivec,$s0
-		xor	4+$ivec,$s1
-		xor	8+$ivec,$s2
-		xor	12+$ivec,$s3
-
-		mov	0($inp),%r11	# load input
-		mov	8($inp),%r12
-		sub	\$16,%r10
-		jc	.Lcbc_slow_dec_partial
-		jz	.Lcbc_slow_dec_done
-
-		mov	%r11,0+$ivec	# copy input to iv
-		mov	%r12,8+$ivec
-
-		mov	$s0,0($out)	# save output [can zap input]
-		mov	$s1,4($out)
-		mov	$s2,8($out)
-		mov	$s3,12($out)
-
-		lea	16($inp),$inp
-		lea	16($out),$out
-	jmp	.Lcbc_slow_dec_loop
-.Lcbc_slow_dec_done:
-	mov	$_ivp,%rdi
-	mov	%r11,0(%rdi)		# copy iv back to user
-	mov	%r12,8(%rdi)
-
-	mov	$s0,0($out)		# save output [can zap input]
-	mov	$s1,4($out)
-	mov	$s2,8($out)
-	mov	$s3,12($out)
-
-	jmp	.Lcbc_exit
-
-.align	4
-.Lcbc_slow_dec_partial:
-	mov	$_ivp,%rdi
-	mov	%r11,0(%rdi)		# copy iv back to user
-	mov	%r12,8(%rdi)
-
-	mov	$s0,0+$ivec		# save output to stack
-	mov	$s1,4+$ivec
-	mov	$s2,8+$ivec
-	mov	$s3,12+$ivec
-
-	mov	$out,%rdi
-	lea	$ivec,%rsi
-	lea	16(%r10),%rcx
-	.long	0x9066A4F3	# rep movsb
-	jmp	.Lcbc_exit
-
-.align	16
-.Lcbc_exit:
-	mov	$_rsp,%rsi
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lcbc_popfq:
-	popfq
-.Lcbc_epilogue:
-	ret
-.size	AES_cbc_encrypt,.-AES_cbc_encrypt
-___
-}
-
-$code.=<<___;
-.align	64
-.LAES_Te:
-___
-	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
-	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
-	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
-	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
-	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
-	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
-	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
-	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
-	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
-	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
-	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
-	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
-	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
-	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
-	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
-	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
-	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
-	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
-	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
-	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
-	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
-	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
-	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
-	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
-	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
-	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
-	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
-	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
-	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
-	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
-	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
-	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
-	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
-	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
-	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
-	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
-	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
-	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
-	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
-	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
-	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
-	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
-	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
-	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
-	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
-	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
-	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
-	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
-	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
-	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
-	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
-	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
-	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
-	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
-	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
-	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
-	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
-	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
-	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
-	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
-	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
-	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
-	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
-	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
-
-#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
-	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
-	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
-	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
-	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
-	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
-	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
-	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
-	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
-	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
-	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
-	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
-	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
-	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
-	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
-	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
-	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
-	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
-	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
-	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
-	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
-	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
-	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
-	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
-	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
-	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
-	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
-	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
-	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
-	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
-	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
-	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
-	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-#rcon:
-$code.=<<___;
-	.long	0x00000001, 0x00000002, 0x00000004, 0x00000008
-	.long	0x00000010, 0x00000020, 0x00000040, 0x00000080
-	.long	0x0000001b, 0x00000036, 0x80808080, 0x80808080
-	.long	0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
-___
-$code.=<<___;
-.align	64
-.LAES_Td:
-___
-	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
-	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
-	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
-	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
-	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
-	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
-	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
-	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
-	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
-	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
-	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
-	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
-	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
-	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
-	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
-	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
-	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
-	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
-	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
-	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
-	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
-	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
-	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
-	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
-	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
-	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
-	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
-	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
-	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
-	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
-	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
-	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
-	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
-	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
-	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
-	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
-	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
-	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
-	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
-	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
-	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
-	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
-	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
-	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
-	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
-	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
-	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
-	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
-	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
-	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
-	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
-	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
-	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
-	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
-	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
-	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
-	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
-	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
-	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
-	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
-	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
-	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
-	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
-	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-
-#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
-	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
-	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
-	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
-	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
-	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
-	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
-	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
-	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
-	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
-	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
-	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
-	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
-	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
-	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
-	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
-	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
-	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
-	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
-	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
-	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
-	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
-	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
-	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
-	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
-	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
-	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
-	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
-	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
-	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
-	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
-	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
-	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
-	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
-	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
-	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
-	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.asciz  "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align	64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	block_se_handler,\@abi-omnipotent
-.align	16
-block_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_block_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_block_prologue
-
-	mov	24(%rax),%rax		# pull saved real stack pointer
-	lea	48(%rax),%rax		# adjust...
-
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_block_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	jmp	.Lcommon_seh_exit
-.size	block_se_handler,.-block_se_handler
-
-.type	key_se_handler,\@abi-omnipotent
-.align	16
-key_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_key_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_key_prologue
-
-	lea	56(%rax),%rax
-
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_key_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	jmp	.Lcommon_seh_exit
-.size	key_se_handler,.-key_se_handler
-
-.type	cbc_se_handler,\@abi-omnipotent
-.align	16
-cbc_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lcbc_prologue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_prologue
-	jb	.Lin_cbc_prologue
-
-	lea	.Lcbc_fast_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_fast_body
-	jb	.Lin_cbc_frame_setup
-
-	lea	.Lcbc_slow_prologue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_prologue
-	jb	.Lin_cbc_body
-
-	lea	.Lcbc_slow_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_body
-	jb	.Lin_cbc_frame_setup
-
-.Lin_cbc_body:
-	mov	152($context),%rax	# pull context->Rsp
-
-	lea	.Lcbc_epilogue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>=.Lcbc_epilogue
-	jae	.Lin_cbc_prologue
-
-	lea	8(%rax),%rax
-
-	lea	.Lcbc_popfq(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>=.Lcbc_popfq
-	jae	.Lin_cbc_prologue
-
-	mov	`16-8`(%rax),%rax	# biased $_rsp
-	lea	56(%rax),%rax
-
-.Lin_cbc_frame_setup:
-	mov	-16(%rax),%rbx
-	mov	-24(%rax),%rbp
-	mov	-32(%rax),%r12
-	mov	-40(%rax),%r13
-	mov	-48(%rax),%r14
-	mov	-56(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_cbc_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-.Lcommon_seh_exit:
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	cbc_se_handler,.-cbc_se_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_AES_encrypt
-	.rva	.LSEH_end_AES_encrypt
-	.rva	.LSEH_info_AES_encrypt
-
-	.rva	.LSEH_begin_AES_decrypt
-	.rva	.LSEH_end_AES_decrypt
-	.rva	.LSEH_info_AES_decrypt
-
-	.rva	.LSEH_begin_private_AES_set_encrypt_key
-	.rva	.LSEH_end_private_AES_set_encrypt_key
-	.rva	.LSEH_info_private_AES_set_encrypt_key
-
-	.rva	.LSEH_begin_private_AES_set_decrypt_key
-	.rva	.LSEH_end_private_AES_set_decrypt_key
-	.rva	.LSEH_info_private_AES_set_decrypt_key
-
-	.rva	.LSEH_begin_AES_cbc_encrypt
-	.rva	.LSEH_end_AES_cbc_encrypt
-	.rva	.LSEH_info_AES_cbc_encrypt
-
-.section	.xdata
-.align	8
-.LSEH_info_AES_encrypt:
-	.byte	9,0,0,0
-	.rva	block_se_handler
-	.rva	.Lenc_prologue,.Lenc_epilogue	# HandlerData[]
-.LSEH_info_AES_decrypt:
-	.byte	9,0,0,0
-	.rva	block_se_handler
-	.rva	.Ldec_prologue,.Ldec_epilogue	# HandlerData[]
-.LSEH_info_private_AES_set_encrypt_key:
-	.byte	9,0,0,0
-	.rva	key_se_handler
-	.rva	.Lenc_key_prologue,.Lenc_key_epilogue	# HandlerData[]
-.LSEH_info_private_AES_set_decrypt_key:
-	.byte	9,0,0,0
-	.rva	key_se_handler
-	.rva	.Ldec_key_prologue,.Ldec_key_epilogue	# HandlerData[]
-.LSEH_info_AES_cbc_encrypt:
-	.byte	9,0,0,0
-	.rva	cbc_se_handler
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;

+ 0 - 1250
drivers/builtin_openssl2/crypto/aes/asm/aesni-sha1-x86_64.pl

@@ -1,1250 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# June 2011
-#
-# This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
-# in http://download.intel.com/design/intarch/papers/323686.pdf, is
-# that since AESNI-CBC encrypt exhibit *very* low instruction-level
-# parallelism, interleaving it with another algorithm would allow to
-# utilize processor resources better and achieve better performance.
-# SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
-# AESNI code is weaved into it. Below are performance numbers in
-# cycles per processed byte, less is better, for standalone AESNI-CBC
-# encrypt, sum of the latter and standalone SHA1, and "stitched"
-# subroutine:
-#
-#		AES-128-CBC	+SHA1		stitch      gain
-# Westmere	3.77[+5.6]	9.37		6.65	    +41%
-# Sandy Bridge	5.05[+5.2(6.3)]	10.25(11.35)	6.16(7.08)  +67%(+60%)
-#
-#		AES-192-CBC
-# Westmere	4.51		10.11		6.97	    +45%
-# Sandy Bridge	6.05		11.25(12.35)	6.34(7.27)  +77%(+70%)
-#
-#		AES-256-CBC
-# Westmere	5.25		10.85		7.25	    +50%
-# Sandy Bridge	7.05		12.25(13.35)	7.06(7.70)  +74%(+73%)
-#
-# (*)	There are two code paths: SSSE3 and AVX. See sha1-568.pl for
-#	background information. Above numbers in parentheses are SSSE3
-#	results collected on AVX-capable CPU, i.e. apply on OSes that
-#	don't support AVX.
-#
-# Needless to mention that it makes no sense to implement "stitched"
-# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
-# fully utilize parallelism, so stitching would not give any gain
-# anyway. Well, there might be some, e.g. because of better cache
-# locality... For reference, here are performance results for
-# standalone AESNI-CBC decrypt:
-#
-#		AES-128-CBC	AES-192-CBC	AES-256-CBC
-# Westmere	1.31		1.55		1.80
-# Sandy Bridge	0.93		1.06		1.22
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
-		=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
-	   $1>=2.19);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
-	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
-	   $1>=2.09);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
-	   `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
-	   $1>=10);
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-# void aesni_cbc_sha1_enc(const void *inp,
-#			void *out,
-#			size_t length,
-#			const AES_KEY *key,
-#			unsigned char *iv,
-#			SHA_CTX *ctx,
-#			const void *in0);
-
-$code.=<<___;
-.text
-.extern	OPENSSL_ia32cap_P
-
-.globl	aesni_cbc_sha1_enc
-.type	aesni_cbc_sha1_enc,\@abi-omnipotent
-.align	16
-aesni_cbc_sha1_enc:
-	# caller should check for SSSE3 and AES-NI bits
-	mov	OPENSSL_ia32cap_P+0(%rip),%r10d
-	mov	OPENSSL_ia32cap_P+4(%rip),%r11d
-___
-$code.=<<___ if ($avx);
-	and	\$`1<<28`,%r11d		# mask AVX bit
-	and	\$`1<<30`,%r10d		# mask "Intel CPU" bit
-	or	%r11d,%r10d
-	cmp	\$`1<<28|1<<30`,%r10d
-	je	aesni_cbc_sha1_enc_avx
-___
-$code.=<<___;
-	jmp	aesni_cbc_sha1_enc_ssse3
-	ret
-.size	aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
-___
-
-my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
-
-my $Xi=4;
-my @X=map("%xmm$_",(4..7,0..3));
-my @Tx=map("%xmm$_",(8..10));
-my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
-my @T=("%esi","%edi");
-my $j=0; my $jj=0; my $r=0; my $sn=0;
-my $K_XX_XX="%r11";
-my ($iv,$in,$rndkey0)=map("%xmm$_",(11..13));
-my @rndkey=("%xmm14","%xmm15");
-
-sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
-{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
-  my $arg = pop;
-    $arg = "\$$arg" if ($arg*1 eq $arg);
-    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
-}
-
-my $_rol=sub { &rol(@_) };
-my $_ror=sub { &ror(@_) };
-
-$code.=<<___;
-.type	aesni_cbc_sha1_enc_ssse3,\@function,6
-.align	16
-aesni_cbc_sha1_enc_ssse3:
-	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
-	#shr	\$6,$len			# debugging artefact
-	#jz	.Lepilogue_ssse3		# debugging artefact
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
-	#mov	$in0,$inp			# debugging artefact
-	#lea	64(%rsp),$ctx			# debugging artefact
-___
-$code.=<<___ if ($win64);
-	movaps	%xmm6,96+0(%rsp)
-	movaps	%xmm7,96+16(%rsp)
-	movaps	%xmm8,96+32(%rsp)
-	movaps	%xmm9,96+48(%rsp)
-	movaps	%xmm10,96+64(%rsp)
-	movaps	%xmm11,96+80(%rsp)
-	movaps	%xmm12,96+96(%rsp)
-	movaps	%xmm13,96+112(%rsp)
-	movaps	%xmm14,96+128(%rsp)
-	movaps	%xmm15,96+144(%rsp)
-.Lprologue_ssse3:
-___
-$code.=<<___;
-	mov	$in0,%r12			# reassign arguments
-	mov	$out,%r13
-	mov	$len,%r14
-	mov	$key,%r15
-	movdqu	($ivp),$iv			# load IV
-	mov	$ivp,88(%rsp)			# save $ivp
-___
-my ($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
-my $rounds="${ivp}d";
-$code.=<<___;
-	shl	\$6,$len
-	sub	$in0,$out
-	mov	240($key),$rounds
-	add	$inp,$len		# end of input
-
-	lea	K_XX_XX(%rip),$K_XX_XX
-	mov	0($ctx),$A		# load context
-	mov	4($ctx),$B
-	mov	8($ctx),$C
-	mov	12($ctx),$D
-	mov	$B,@T[0]		# magic seed
-	mov	16($ctx),$E
-
-	movdqa	64($K_XX_XX),@X[2]	# pbswap mask
-	movdqa	0($K_XX_XX),@Tx[1]	# K_00_19
-	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
-	movdqu	16($inp),@X[-3&7]
-	movdqu	32($inp),@X[-2&7]
-	movdqu	48($inp),@X[-1&7]
-	pshufb	@X[2],@X[-4&7]		# byte swap
-	add	\$64,$inp
-	pshufb	@X[2],@X[-3&7]
-	pshufb	@X[2],@X[-2&7]
-	pshufb	@X[2],@X[-1&7]
-	paddd	@Tx[1],@X[-4&7]		# add K_00_19
-	paddd	@Tx[1],@X[-3&7]
-	paddd	@Tx[1],@X[-2&7]
-	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
-	psubd	@Tx[1],@X[-4&7]		# restore X[]
-	movdqa	@X[-3&7],16(%rsp)
-	psubd	@Tx[1],@X[-3&7]
-	movdqa	@X[-2&7],32(%rsp)
-	psubd	@Tx[1],@X[-2&7]
-	movups	($key),$rndkey0		# $key[0]
-	movups	16($key),$rndkey[0]	# forward reference
-	jmp	.Loop_ssse3
-___
-
-my $aesenc=sub {
-  use integer;
-  my ($n,$k)=($r/10,$r%10);
-    if ($k==0) {
-      $code.=<<___;
-	movups		`16*$n`($in0),$in		# load input
-	xorps		$rndkey0,$in
-___
-      $code.=<<___ if ($n);
-	movups		$iv,`16*($n-1)`($out,$in0)	# write output
-___
-      $code.=<<___;
-	xorps		$in,$iv
-	aesenc		$rndkey[0],$iv
-	movups		`32+16*$k`($key),$rndkey[1]
-___
-    } elsif ($k==9) {
-      $sn++;
-      $code.=<<___;
-	cmp		\$11,$rounds
-	jb		.Laesenclast$sn
-	movups		`32+16*($k+0)`($key),$rndkey[1]
-	aesenc		$rndkey[0],$iv
-	movups		`32+16*($k+1)`($key),$rndkey[0]
-	aesenc		$rndkey[1],$iv
-	je		.Laesenclast$sn
-	movups		`32+16*($k+2)`($key),$rndkey[1]
-	aesenc		$rndkey[0],$iv
-	movups		`32+16*($k+3)`($key),$rndkey[0]
-	aesenc		$rndkey[1],$iv
-.Laesenclast$sn:
-	aesenclast	$rndkey[0],$iv
-	movups		16($key),$rndkey[1]		# forward reference
-___
-    } else {
-      $code.=<<___;
-	aesenc		$rndkey[0],$iv
-	movups		`32+16*$k`($key),$rndkey[1]
-___
-    }
-    $r++;	unshift(@rndkey,pop(@rndkey));
-};
-
-sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
-  my ($a,$b,$c,$d,$e);
-
-	&movdqa	(@X[0],@X[-3&7]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&movdqa	(@Tx[0],@X[-1&7]);
-	&palignr(@X[0],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	  &paddd	(@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&psrldq	(@Tx[0],4);		# "X[-3]", 3 dwords
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&pxor	(@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&movdqa	(@Tx[2],@X[0]);
-	&movdqa	(@Tx[0],@X[0]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&pslldq	(@Tx[2],12);		# "X[0]"<<96, extract one dword
-	&paddd	(@X[0],@X[0]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&psrld	(@Tx[0],31);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&movdqa	(@Tx[1],@Tx[2]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&psrld	(@Tx[2],30);
-	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=1
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&pslld	(@Tx[1],2);
-	&pxor	(@X[0],@Tx[2]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	  &movdqa	(@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)");	# K_XX_XX
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&pxor	(@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
-
-	 foreach (@insns) { eval; }	# remaining instructions [if any]
-
-  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
-		push(@Tx,shift(@Tx));
-}
-
-sub Xupdate_ssse3_32_79()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 48 instructions
-  my ($a,$b,$c,$d,$e);
-
-	&movdqa	(@Tx[0],@X[-1&7])	if ($Xi==8);
-	 eval(shift(@insns));		# body_20_39
-	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
-	&palignr(@Tx[0],@X[-2&7],8);	# compose "X[-6]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-
-	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
-	 eval(shift(@insns));
-	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
-	if ($Xi%5) {
-	  &movdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
-	} else {			# ... or load next one
-	  &movdqa	(@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
-	}
-	  &paddd	(@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-
-	&movdqa	(@Tx[0],@X[0]);
-	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&pslld	(@X[0],2);
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	&psrld	(@Tx[0],30);
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=2
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	  &movdqa	(@Tx[1],@X[0])	if ($Xi<19);
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-
-	 foreach (@insns) { eval; }	# remaining instructions
-
-  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
-		push(@Tx,shift(@Tx));
-}
-
-sub Xuplast_ssse3_80()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	 eval(shift(@insns));
-	  &paddd	(@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
-
-	 foreach (@insns) { eval; }		# remaining instructions
-
-	&cmp	($inp,$len);
-	&je	(".Ldone_ssse3");
-
-	unshift(@Tx,pop(@Tx));
-
-	&movdqa	(@X[2],"64($K_XX_XX)");		# pbswap mask
-	&movdqa	(@Tx[1],"0($K_XX_XX)");		# K_00_19
-	&movdqu	(@X[-4&7],"0($inp)");		# load input
-	&movdqu	(@X[-3&7],"16($inp)");
-	&movdqu	(@X[-2&7],"32($inp)");
-	&movdqu	(@X[-1&7],"48($inp)");
-	&pshufb	(@X[-4&7],@X[2]);		# byte swap
-	&add	($inp,64);
-
-  $Xi=0;
-}
-
-sub Xloop_ssse3()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&pshufb	(@X[($Xi-3)&7],@X[2]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&paddd	(@X[($Xi-4)&7],@Tx[1]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&movdqa	(eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&psubd	(@X[($Xi-4)&7],@Tx[1]);
-
-	foreach (@insns) { eval; }
-  $Xi++;
-}
-
-sub Xtail_ssse3()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	foreach (@insns) { eval; }
-}
-
-sub body_00_19 () {
-  use integer;
-  my ($k,$n);
-  my @r=(
-	'($a,$b,$c,$d,$e)=@V;'.
-	'&add	($e,eval(4*($j&15))."(%rsp)");',	# X[]+K xfer
-	'&xor	($c,$d);',
-	'&mov	(@T[1],$a);',	# $b in next round
-	'&$_rol	($a,5);',
-	'&and	(@T[0],$c);',	# ($b&($c^$d))
-	'&xor	($c,$d);',	# restore $c
-	'&xor	(@T[0],$d);',
-	'&add	($e,$a);',
-	'&$_ror	($b,$j?7:2);',	# $b>>>2
-	'&add	($e,@T[0]);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
-	);
-	$n = scalar(@r);
-	$k = (($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
-	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
-	$jj++;
-    return @r;
-}
-
-sub body_20_39 () {
-  use integer;
-  my ($k,$n);
-  my @r=(
-	'($a,$b,$c,$d,$e)=@V;'.
-	'&add	($e,eval(4*($j++&15))."(%rsp)");',	# X[]+K xfer
-	'&xor	(@T[0],$d);',	# ($b^$d)
-	'&mov	(@T[1],$a);',	# $b in next round
-	'&$_rol	($a,5);',
-	'&xor	(@T[0],$c);',	# ($b^$d^$c)
-	'&add	($e,$a);',
-	'&$_ror	($b,7);',	# $b>>>2
-	'&add	($e,@T[0]);'	.'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
-	);
-	$n = scalar(@r);
-	$k = (($jj+1)*8/20)*20*$n/8;	# 8 aesencs per these 20 rounds
-	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
-	$jj++;
-    return @r;
-}
-
-sub body_40_59 () {
-  use integer;
-  my ($k,$n);
-  my @r=(
-	'($a,$b,$c,$d,$e)=@V;'.
-	'&mov	(@T[1],$c);',
-	'&xor	($c,$d);',
-	'&add	($e,eval(4*($j++&15))."(%rsp)");',	# X[]+K xfer
-	'&and	(@T[1],$d);',
-	'&and	(@T[0],$c);',	# ($b&($c^$d))
-	'&$_ror	($b,7);',	# $b>>>2
-	'&add	($e,@T[1]);',
-	'&mov	(@T[1],$a);',	# $b in next round
-	'&$_rol	($a,5);',
-	'&add	($e,@T[0]);',
-	'&xor	($c,$d);',	# restore $c
-	'&add	($e,$a);'	.'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
-	);
-	$n = scalar(@r);
-	$k=(($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
-	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
-	$jj++;
-    return @r;
-}
-$code.=<<___;
-.align	16
-.Loop_ssse3:
-___
-	&Xupdate_ssse3_16_31(\&body_00_19);
-	&Xupdate_ssse3_16_31(\&body_00_19);
-	&Xupdate_ssse3_16_31(\&body_00_19);
-	&Xupdate_ssse3_16_31(\&body_00_19);
-	&Xupdate_ssse3_32_79(\&body_00_19);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xupdate_ssse3_32_79(\&body_40_59);
-	&Xupdate_ssse3_32_79(\&body_40_59);
-	&Xupdate_ssse3_32_79(\&body_40_59);
-	&Xupdate_ssse3_32_79(\&body_40_59);
-	&Xupdate_ssse3_32_79(\&body_40_59);
-	&Xupdate_ssse3_32_79(\&body_20_39);
-	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
-
-				$saved_j=$j; @saved_V=@V;
-				$saved_r=$r; @saved_rndkey=@rndkey;
-
-	&Xloop_ssse3(\&body_20_39);
-	&Xloop_ssse3(\&body_20_39);
-	&Xloop_ssse3(\&body_20_39);
-
-$code.=<<___;
-	movups	$iv,48($out,$in0)		# write output
-	lea	64($in0),$in0
-
-	add	0($ctx),$A			# update context
-	add	4($ctx),@T[0]
-	add	8($ctx),$C
-	add	12($ctx),$D
-	mov	$A,0($ctx)
-	add	16($ctx),$E
-	mov	@T[0],4($ctx)
-	mov	@T[0],$B			# magic seed
-	mov	$C,8($ctx)
-	mov	$D,12($ctx)
-	mov	$E,16($ctx)
-	jmp	.Loop_ssse3
-
-.align	16
-.Ldone_ssse3:
-___
-				$jj=$j=$saved_j; @V=@saved_V;
-				$r=$saved_r;     @rndkey=@saved_rndkey;
-
-	&Xtail_ssse3(\&body_20_39);
-	&Xtail_ssse3(\&body_20_39);
-	&Xtail_ssse3(\&body_20_39);
-
-$code.=<<___;
-	movups	$iv,48($out,$in0)		# write output
-	mov	88(%rsp),$ivp			# restore $ivp
-
-	add	0($ctx),$A			# update context
-	add	4($ctx),@T[0]
-	add	8($ctx),$C
-	mov	$A,0($ctx)
-	add	12($ctx),$D
-	mov	@T[0],4($ctx)
-	add	16($ctx),$E
-	mov	$C,8($ctx)
-	mov	$D,12($ctx)
-	mov	$E,16($ctx)
-	movups	$iv,($ivp)			# write IV
-___
-$code.=<<___ if ($win64);
-	movaps	96+0(%rsp),%xmm6
-	movaps	96+16(%rsp),%xmm7
-	movaps	96+32(%rsp),%xmm8
-	movaps	96+48(%rsp),%xmm9
-	movaps	96+64(%rsp),%xmm10
-	movaps	96+80(%rsp),%xmm11
-	movaps	96+96(%rsp),%xmm12
-	movaps	96+112(%rsp),%xmm13
-	movaps	96+128(%rsp),%xmm14
-	movaps	96+144(%rsp),%xmm15
-___
-$code.=<<___;
-	lea	`104+($win64?10*16:0)`(%rsp),%rsi
-	mov	0(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lepilogue_ssse3:
-	ret
-.size	aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
-___
-
-$j=$jj=$r=$sn=0;
-
-if ($avx) {
-my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
-
-my $Xi=4;
-my @X=map("%xmm$_",(4..7,0..3));
-my @Tx=map("%xmm$_",(8..10));
-my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
-my @T=("%esi","%edi");
-
-my $_rol=sub { &shld(@_[0],@_) };
-my $_ror=sub { &shrd(@_[0],@_) };
-
-$code.=<<___;
-.type	aesni_cbc_sha1_enc_avx,\@function,6
-.align	16
-aesni_cbc_sha1_enc_avx:
-	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
-	#shr	\$6,$len			# debugging artefact
-	#jz	.Lepilogue_avx			# debugging artefact
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
-	#mov	$in0,$inp			# debugging artefact
-	#lea	64(%rsp),$ctx			# debugging artefact
-___
-$code.=<<___ if ($win64);
-	movaps	%xmm6,96+0(%rsp)
-	movaps	%xmm7,96+16(%rsp)
-	movaps	%xmm8,96+32(%rsp)
-	movaps	%xmm9,96+48(%rsp)
-	movaps	%xmm10,96+64(%rsp)
-	movaps	%xmm11,96+80(%rsp)
-	movaps	%xmm12,96+96(%rsp)
-	movaps	%xmm13,96+112(%rsp)
-	movaps	%xmm14,96+128(%rsp)
-	movaps	%xmm15,96+144(%rsp)
-.Lprologue_avx:
-___
-$code.=<<___;
-	vzeroall
-	mov	$in0,%r12			# reassign arguments
-	mov	$out,%r13
-	mov	$len,%r14
-	mov	$key,%r15
-	vmovdqu	($ivp),$iv			# load IV
-	mov	$ivp,88(%rsp)			# save $ivp
-___
-my ($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
-my $rounds="${ivp}d";
-$code.=<<___;
-	shl	\$6,$len
-	sub	$in0,$out
-	mov	240($key),$rounds
-	add	\$112,$key		# size optimization
-	add	$inp,$len		# end of input
-
-	lea	K_XX_XX(%rip),$K_XX_XX
-	mov	0($ctx),$A		# load context
-	mov	4($ctx),$B
-	mov	8($ctx),$C
-	mov	12($ctx),$D
-	mov	$B,@T[0]		# magic seed
-	mov	16($ctx),$E
-
-	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
-	vmovdqa	0($K_XX_XX),@Tx[1]	# K_00_19
-	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
-	vmovdqu	16($inp),@X[-3&7]
-	vmovdqu	32($inp),@X[-2&7]
-	vmovdqu	48($inp),@X[-1&7]
-	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
-	add	\$64,$inp
-	vpshufb	@X[2],@X[-3&7],@X[-3&7]
-	vpshufb	@X[2],@X[-2&7],@X[-2&7]
-	vpshufb	@X[2],@X[-1&7],@X[-1&7]
-	vpaddd	@Tx[1],@X[-4&7],@X[0]	# add K_00_19
-	vpaddd	@Tx[1],@X[-3&7],@X[1]
-	vpaddd	@Tx[1],@X[-2&7],@X[2]
-	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
-	vmovdqa	@X[1],16(%rsp)
-	vmovdqa	@X[2],32(%rsp)
-	vmovups	-112($key),$rndkey0	# $key[0]
-	vmovups	16-112($key),$rndkey[0]	# forward reference
-	jmp	.Loop_avx
-___
-
-my $aesenc=sub {
-  use integer;
-  my ($n,$k)=($r/10,$r%10);
-    if ($k==0) {
-      $code.=<<___;
-	vmovups		`16*$n`($in0),$in		# load input
-	vxorps		$rndkey0,$in,$in
-___
-      $code.=<<___ if ($n);
-	vmovups		$iv,`16*($n-1)`($out,$in0)	# write output
-___
-      $code.=<<___;
-	vxorps		$in,$iv,$iv
-	vaesenc		$rndkey[0],$iv,$iv
-	vmovups		`32+16*$k-112`($key),$rndkey[1]
-___
-    } elsif ($k==9) {
-      $sn++;
-      $code.=<<___;
-	cmp		\$11,$rounds
-	jb		.Lvaesenclast$sn
-	vaesenc		$rndkey[0],$iv,$iv
-	vmovups		`32+16*($k+0)-112`($key),$rndkey[1]
-	vaesenc		$rndkey[1],$iv,$iv
-	vmovups		`32+16*($k+1)-112`($key),$rndkey[0]
-	je		.Lvaesenclast$sn
-	vaesenc		$rndkey[0],$iv,$iv
-	vmovups		`32+16*($k+2)-112`($key),$rndkey[1]
-	vaesenc		$rndkey[1],$iv,$iv
-	vmovups		`32+16*($k+3)-112`($key),$rndkey[0]
-.Lvaesenclast$sn:
-	vaesenclast	$rndkey[0],$iv,$iv
-	vmovups		16-112($key),$rndkey[1]		# forward reference
-___
-    } else {
-      $code.=<<___;
-	vaesenc		$rndkey[0],$iv,$iv
-	vmovups		`32+16*$k-112`($key),$rndkey[1]
-___
-    }
-    $r++;	unshift(@rndkey,pop(@rndkey));
-};
-
-sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
-  my ($a,$b,$c,$d,$e);
-
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vpsrldq(@Tx[0],@X[-1&7],4);	# "X[-3]", 3 dwords
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpsrld	(@Tx[0],@X[0],31);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
-	&vpaddd	(@X[0],@X[0],@X[0]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpsrld	(@Tx[1],@Tx[2],30);
-	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpslld	(@Tx[2],@Tx[2],2);
-	&vpxor	(@X[0],@X[0],@Tx[1]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	  &vmovdqa	(@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)");	# K_XX_XX
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-
-	 foreach (@insns) { eval; }	# remaining instructions [if any]
-
-  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
-		push(@Tx,shift(@Tx));
-}
-
-sub Xupdate_avx_32_79()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 48 instructions
-  my ($a,$b,$c,$d,$e);
-
-	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
-	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-
-	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
-	 eval(shift(@insns));
-	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
-	if ($Xi%5) {
-	  &vmovdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
-	} else {			# ... or load next one
-	  &vmovdqa	(@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
-	}
-	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-
-	&vpsrld	(@Tx[0],@X[0],30);
-	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&vpslld	(@X[0],@X[0],2);
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# ror
-	 eval(shift(@insns));
-
-	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
-	 eval(shift(@insns));		# body_20_39
-	 eval(shift(@insns));
-	  &vmovdqa	(@Tx[1],@X[0])	if ($Xi<19);
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));		# rol
-	 eval(shift(@insns));
-
-	 foreach (@insns) { eval; }	# remaining instructions
-
-  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
-		push(@Tx,shift(@Tx));
-}
-
-sub Xuplast_avx_80()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	 eval(shift(@insns));
-	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
-
-	 foreach (@insns) { eval; }		# remaining instructions
-
-	&cmp	($inp,$len);
-	&je	(".Ldone_avx");
-
-	unshift(@Tx,pop(@Tx));
-
-	&vmovdqa(@X[2],"64($K_XX_XX)");		# pbswap mask
-	&vmovdqa(@Tx[1],"0($K_XX_XX)");		# K_00_19
-	&vmovdqu(@X[-4&7],"0($inp)");		# load input
-	&vmovdqu(@X[-3&7],"16($inp)");
-	&vmovdqu(@X[-2&7],"32($inp)");
-	&vmovdqu(@X[-1&7],"48($inp)");
-	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
-	&add	($inp,64);
-
-  $Xi=0;
-}
-
-sub Xloop_avx()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-	&vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]);	# X[]+K xfer to IALU
-	 eval(shift(@insns));
-	 eval(shift(@insns));
-
-	foreach (@insns) { eval; }
-  $Xi++;
-}
-
-sub Xtail_avx()
-{ use integer;
-  my $body = shift;
-  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
-  my ($a,$b,$c,$d,$e);
-
-	foreach (@insns) { eval; }
-}
-
-$code.=<<___;
-.align	16
-.Loop_avx:
-___
-	&Xupdate_avx_16_31(\&body_00_19);
-	&Xupdate_avx_16_31(\&body_00_19);
-	&Xupdate_avx_16_31(\&body_00_19);
-	&Xupdate_avx_16_31(\&body_00_19);
-	&Xupdate_avx_32_79(\&body_00_19);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xupdate_avx_32_79(\&body_40_59);
-	&Xupdate_avx_32_79(\&body_40_59);
-	&Xupdate_avx_32_79(\&body_40_59);
-	&Xupdate_avx_32_79(\&body_40_59);
-	&Xupdate_avx_32_79(\&body_40_59);
-	&Xupdate_avx_32_79(\&body_20_39);
-	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
-
-				$saved_j=$j; @saved_V=@V;
-				$saved_r=$r; @saved_rndkey=@rndkey;
-
-	&Xloop_avx(\&body_20_39);
-	&Xloop_avx(\&body_20_39);
-	&Xloop_avx(\&body_20_39);
-
-$code.=<<___;
-	vmovups	$iv,48($out,$in0)		# write output
-	lea	64($in0),$in0
-
-	add	0($ctx),$A			# update context
-	add	4($ctx),@T[0]
-	add	8($ctx),$C
-	add	12($ctx),$D
-	mov	$A,0($ctx)
-	add	16($ctx),$E
-	mov	@T[0],4($ctx)
-	mov	@T[0],$B			# magic seed
-	mov	$C,8($ctx)
-	mov	$D,12($ctx)
-	mov	$E,16($ctx)
-	jmp	.Loop_avx
-
-.align	16
-.Ldone_avx:
-___
-				$jj=$j=$saved_j; @V=@saved_V;
-				$r=$saved_r;     @rndkey=@saved_rndkey;
-
-	&Xtail_avx(\&body_20_39);
-	&Xtail_avx(\&body_20_39);
-	&Xtail_avx(\&body_20_39);
-
-$code.=<<___;
-	vmovups	$iv,48($out,$in0)		# write output
-	mov	88(%rsp),$ivp			# restore $ivp
-
-	add	0($ctx),$A			# update context
-	add	4($ctx),@T[0]
-	add	8($ctx),$C
-	mov	$A,0($ctx)
-	add	12($ctx),$D
-	mov	@T[0],4($ctx)
-	add	16($ctx),$E
-	mov	$C,8($ctx)
-	mov	$D,12($ctx)
-	mov	$E,16($ctx)
-	vmovups	$iv,($ivp)			# write IV
-	vzeroall
-___
-$code.=<<___ if ($win64);
-	movaps	96+0(%rsp),%xmm6
-	movaps	96+16(%rsp),%xmm7
-	movaps	96+32(%rsp),%xmm8
-	movaps	96+48(%rsp),%xmm9
-	movaps	96+64(%rsp),%xmm10
-	movaps	96+80(%rsp),%xmm11
-	movaps	96+96(%rsp),%xmm12
-	movaps	96+112(%rsp),%xmm13
-	movaps	96+128(%rsp),%xmm14
-	movaps	96+144(%rsp),%xmm15
-___
-$code.=<<___;
-	lea	`104+($win64?10*16:0)`(%rsp),%rsi
-	mov	0(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lepilogue_avx:
-	ret
-.size	aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
-___
-}
-$code.=<<___;
-.align	64
-K_XX_XX:
-.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
-.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
-.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
-.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
-.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
-
-.asciz	"AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align	64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	ssse3_handler,\@abi-omnipotent
-.align	16
-ssse3_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lcommon_seh_tail
-
-	lea	96(%rax),%rsi
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$20,%ecx
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	`104+10*16`(%rax),%rax	# adjust stack pointer
-
-	mov	0(%rax),%r15
-	mov	8(%rax),%r14
-	mov	16(%rax),%r13
-	mov	24(%rax),%r12
-	mov	32(%rax),%rbp
-	mov	40(%rax),%rbx
-	lea	48(%rax),%rax
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lcommon_seh_tail:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	ssse3_handler,.-ssse3_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_aesni_cbc_sha1_enc_ssse3
-	.rva	.LSEH_end_aesni_cbc_sha1_enc_ssse3
-	.rva	.LSEH_info_aesni_cbc_sha1_enc_ssse3
-___
-$code.=<<___ if ($avx);
-	.rva	.LSEH_begin_aesni_cbc_sha1_enc_avx
-	.rva	.LSEH_end_aesni_cbc_sha1_enc_avx
-	.rva	.LSEH_info_aesni_cbc_sha1_enc_avx
-___
-$code.=<<___;
-.section	.xdata
-.align	8
-.LSEH_info_aesni_cbc_sha1_enc_ssse3:
-	.byte	9,0,0,0
-	.rva	ssse3_handler
-	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
-___
-$code.=<<___ if ($avx);
-.LSEH_info_aesni_cbc_sha1_enc_avx:
-	.byte	9,0,0,0
-	.rva	ssse3_handler
-	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
-___
-}
-
-####################################################################
-sub rex {
-  local *opcode=shift;
-  my ($dst,$src)=@_;
-  my $rex=0;
-
-    $rex|=0x04			if($dst>=8);
-    $rex|=0x01			if($src>=8);
-    push @opcode,$rex|0x40	if($rex);
-}
-
-sub aesni {
-  my $line=shift;
-  my @opcode=(0x66);
-
-    if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
-	my %opcodelet = (
-		"aesenc" => 0xdc,	"aesenclast" => 0xdd
-	);
-	return undef if (!defined($opcodelet{$1}));
-	rex(\@opcode,$3,$2);
-	push @opcode,0x0f,0x38,$opcodelet{$1};
-	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
-	return ".byte\t".join(',',@opcode);
-    }
-    return $line;
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
-
-print $code;
-close STDOUT;

+ 0 - 2189
drivers/builtin_openssl2/crypto/aes/asm/aesni-x86.pl

@@ -1,2189 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for Intel AES-NI extension. In
-# OpenSSL context it's used with Intel engine, but can also be used as
-# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
-# details].
-#
-# Performance.
-#
-# To start with see corresponding paragraph in aesni-x86_64.pl...
-# Instead of filling table similar to one found there I've chosen to
-# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
-# The simplified table below represents 32-bit performance relative
-# to 64-bit one in every given point. Ratios vary for different
-# encryption modes, therefore interval values.
-#
-#	16-byte     64-byte     256-byte    1-KB        8-KB
-#	53-67%      67-84%      91-94%      95-98%      97-99.5%
-#
-# Lower ratios for smaller block sizes are perfectly understandable,
-# because function call overhead is higher in 32-bit mode. Largest
-# 8-KB block performance is virtually same: 32-bit code is less than
-# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
-
-# January 2011
-#
-# See aesni-x86_64.pl for details. Unlike x86_64 version this module
-# interleaves at most 6 aes[enc|dec] instructions, because there are
-# not enough registers for 8x interleave [which should be optimal for
-# Sandy Bridge]. Actually, performance results for 6x interleave
-# factor presented in aesni-x86_64.pl (except for CTR) are for this
-# module.
-
-# April 2011
-#
-# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
-# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
-
-$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
-			# generates drop-in replacement for
-			# crypto/aes/asm/aes-586.pl:-)
-$inline=1;		# inline _aesni_[en|de]crypt
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0);
-
-if ($PREFIX eq "aesni")	{ $movekey=*movups; }
-else			{ $movekey=*movups; }
-
-$len="eax";
-$rounds="ecx";
-$key="edx";
-$inp="esi";
-$out="edi";
-$rounds_="ebx";	# backup copy for $rounds
-$key_="ebp";	# backup copy for $key
-
-$rndkey0="xmm0";
-$rndkey1="xmm1";
-$inout0="xmm2";
-$inout1="xmm3";
-$inout2="xmm4";
-$inout3="xmm5";	$in1="xmm5";
-$inout4="xmm6";	$in0="xmm6";
-$inout5="xmm7";	$ivec="xmm7";
-
-# AESNI extension
-sub aeskeygenassist
-{ my($dst,$src,$imm)=@_;
-    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
-    {	&data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm);	}
-}
-sub aescommon
-{ my($opcodelet,$dst,$src)=@_;
-    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
-    {	&data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
-}
-sub aesimc	{ aescommon(0xdb,@_); }
-sub aesenc	{ aescommon(0xdc,@_); }
-sub aesenclast	{ aescommon(0xdd,@_); }
-sub aesdec	{ aescommon(0xde,@_); }
-sub aesdeclast	{ aescommon(0xdf,@_); }
-
-# Inline version of internal aesni_[en|de]crypt1
-{ my $sn;
-sub aesni_inline_generate1
-{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
-  $sn++;
-
-    &$movekey		($rndkey0,&QWP(0,$key));
-    &$movekey		($rndkey1,&QWP(16,$key));
-    &xorps		($ivec,$rndkey0)	if (defined($ivec));
-    &lea		($key,&DWP(32,$key));
-    &xorps		($inout,$ivec)		if (defined($ivec));
-    &xorps		($inout,$rndkey0)	if (!defined($ivec));
-    &set_label("${p}1_loop_$sn");
-	eval"&aes${p}	($inout,$rndkey1)";
-	&dec		($rounds);
-	&$movekey	($rndkey1,&QWP(0,$key));
-	&lea		($key,&DWP(16,$key));
-    &jnz		(&label("${p}1_loop_$sn"));
-    eval"&aes${p}last	($inout,$rndkey1)";
-}}
-
-sub aesni_generate1	# fully unrolled loop
-{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
-
-    &function_begin_B("_aesni_${p}rypt1");
-	&movups		($rndkey0,&QWP(0,$key));
-	&$movekey	($rndkey1,&QWP(0x10,$key));
-	&xorps		($inout,$rndkey0);
-	&$movekey	($rndkey0,&QWP(0x20,$key));
-	&lea		($key,&DWP(0x30,$key));
-	&cmp		($rounds,11);
-	&jb		(&label("${p}128"));
-	&lea		($key,&DWP(0x20,$key));
-	&je		(&label("${p}192"));
-	&lea		($key,&DWP(0x20,$key));
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(-0x40,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(-0x30,$key));
-    &set_label("${p}192");
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(-0x20,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(-0x10,$key));
-    &set_label("${p}128");
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(0,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0x10,$key));
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(0x20,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0x30,$key));
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(0x40,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0x50,$key));
-	eval"&aes${p}	($inout,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(0x60,$key));
-	eval"&aes${p}	($inout,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0x70,$key));
-	eval"&aes${p}	($inout,$rndkey1)";
-    eval"&aes${p}last	($inout,$rndkey0)";
-    &ret();
-    &function_end_B("_aesni_${p}rypt1");
-}
-
-# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
-&aesni_generate1("enc") if (!$inline);
-&function_begin_B("${PREFIX}_encrypt");
-	&mov	("eax",&wparam(0));
-	&mov	($key,&wparam(2));
-	&movups	($inout0,&QWP(0,"eax"));
-	&mov	($rounds,&DWP(240,$key));
-	&mov	("eax",&wparam(1));
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&movups	(&QWP(0,"eax"),$inout0);
-	&ret	();
-&function_end_B("${PREFIX}_encrypt");
-
-# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
-&aesni_generate1("dec") if(!$inline);
-&function_begin_B("${PREFIX}_decrypt");
-	&mov	("eax",&wparam(0));
-	&mov	($key,&wparam(2));
-	&movups	($inout0,&QWP(0,"eax"));
-	&mov	($rounds,&DWP(240,$key));
-	&mov	("eax",&wparam(1));
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&movups	(&QWP(0,"eax"),$inout0);
-	&ret	();
-&function_end_B("${PREFIX}_decrypt");
-
-# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
-# factor. Why 3x subroutine were originally used in loops? Even though
-# aes[enc|dec] latency was originally 6, it could be scheduled only
-# every *2nd* cycle. Thus 3x interleave was the one providing optimal
-# utilization, i.e. when subroutine's throughput is virtually same as
-# of non-interleaved subroutine [for number of input blocks up to 3].
-# This is why it makes no sense to implement 2x subroutine.
-# aes[enc|dec] latency in next processor generation is 8, but the
-# instructions can be scheduled every cycle. Optimal interleave for
-# new processor is therefore 8x, but it's unfeasible to accommodate it
-# in XMM registers addreassable in 32-bit mode and therefore 6x is
-# used instead...
-
-sub aesni_generate3
-{ my $p=shift;
-
-    &function_begin_B("_aesni_${p}rypt3");
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&shr		($rounds,1);
-	&$movekey	($rndkey1,&QWP(16,$key));
-	&lea		($key,&DWP(32,$key));
-	&xorps		($inout0,$rndkey0);
-	&pxor		($inout1,$rndkey0);
-	&pxor		($inout2,$rndkey0);
-	&$movekey	($rndkey0,&QWP(0,$key));
-
-    &set_label("${p}3_loop");
-	eval"&aes${p}	($inout0,$rndkey1)";
-	eval"&aes${p}	($inout1,$rndkey1)";
-	&dec		($rounds);
-	eval"&aes${p}	($inout2,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(16,$key));
-	eval"&aes${p}	($inout0,$rndkey0)";
-	eval"&aes${p}	($inout1,$rndkey0)";
-	&lea		($key,&DWP(32,$key));
-	eval"&aes${p}	($inout2,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&jnz		(&label("${p}3_loop"));
-    eval"&aes${p}	($inout0,$rndkey1)";
-    eval"&aes${p}	($inout1,$rndkey1)";
-    eval"&aes${p}	($inout2,$rndkey1)";
-    eval"&aes${p}last	($inout0,$rndkey0)";
-    eval"&aes${p}last	($inout1,$rndkey0)";
-    eval"&aes${p}last	($inout2,$rndkey0)";
-    &ret();
-    &function_end_B("_aesni_${p}rypt3");
-}
-
-# 4x interleave is implemented to improve small block performance,
-# most notably [and naturally] 4 block by ~30%. One can argue that one
-# should have implemented 5x as well, but improvement  would be <20%,
-# so it's not worth it...
-sub aesni_generate4
-{ my $p=shift;
-
-    &function_begin_B("_aesni_${p}rypt4");
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&$movekey	($rndkey1,&QWP(16,$key));
-	&shr		($rounds,1);
-	&lea		($key,&DWP(32,$key));
-	&xorps		($inout0,$rndkey0);
-	&pxor		($inout1,$rndkey0);
-	&pxor		($inout2,$rndkey0);
-	&pxor		($inout3,$rndkey0);
-	&$movekey	($rndkey0,&QWP(0,$key));
-
-    &set_label("${p}4_loop");
-	eval"&aes${p}	($inout0,$rndkey1)";
-	eval"&aes${p}	($inout1,$rndkey1)";
-	&dec		($rounds);
-	eval"&aes${p}	($inout2,$rndkey1)";
-	eval"&aes${p}	($inout3,$rndkey1)";
-	&$movekey	($rndkey1,&QWP(16,$key));
-	eval"&aes${p}	($inout0,$rndkey0)";
-	eval"&aes${p}	($inout1,$rndkey0)";
-	&lea		($key,&DWP(32,$key));
-	eval"&aes${p}	($inout2,$rndkey0)";
-	eval"&aes${p}	($inout3,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0,$key));
-    &jnz		(&label("${p}4_loop"));
-
-    eval"&aes${p}	($inout0,$rndkey1)";
-    eval"&aes${p}	($inout1,$rndkey1)";
-    eval"&aes${p}	($inout2,$rndkey1)";
-    eval"&aes${p}	($inout3,$rndkey1)";
-    eval"&aes${p}last	($inout0,$rndkey0)";
-    eval"&aes${p}last	($inout1,$rndkey0)";
-    eval"&aes${p}last	($inout2,$rndkey0)";
-    eval"&aes${p}last	($inout3,$rndkey0)";
-    &ret();
-    &function_end_B("_aesni_${p}rypt4");
-}
-
-sub aesni_generate6
-{ my $p=shift;
-
-    &function_begin_B("_aesni_${p}rypt6");
-    &static_label("_aesni_${p}rypt6_enter");
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&shr		($rounds,1);
-	&$movekey	($rndkey1,&QWP(16,$key));
-	&lea		($key,&DWP(32,$key));
-	&xorps		($inout0,$rndkey0);
-	&pxor		($inout1,$rndkey0);	# pxor does better here
-	eval"&aes${p}	($inout0,$rndkey1)";
-	&pxor		($inout2,$rndkey0);
-	eval"&aes${p}	($inout1,$rndkey1)";
-	&pxor		($inout3,$rndkey0);
-	&dec		($rounds);
-	eval"&aes${p}	($inout2,$rndkey1)";
-	&pxor		($inout4,$rndkey0);
-	eval"&aes${p}	($inout3,$rndkey1)";
-	&pxor		($inout5,$rndkey0);
-	eval"&aes${p}	($inout4,$rndkey1)";
-	&$movekey	($rndkey0,&QWP(0,$key));
-	eval"&aes${p}	($inout5,$rndkey1)";
-	&jmp		(&label("_aesni_${p}rypt6_enter"));
-
-    &set_label("${p}6_loop",16);
-	eval"&aes${p}	($inout0,$rndkey1)";
-	eval"&aes${p}	($inout1,$rndkey1)";
-	&dec		($rounds);
-	eval"&aes${p}	($inout2,$rndkey1)";
-	eval"&aes${p}	($inout3,$rndkey1)";
-	eval"&aes${p}	($inout4,$rndkey1)";
-	eval"&aes${p}	($inout5,$rndkey1)";
-    &set_label("_aesni_${p}rypt6_enter",16);
-	&$movekey	($rndkey1,&QWP(16,$key));
-	eval"&aes${p}	($inout0,$rndkey0)";
-	eval"&aes${p}	($inout1,$rndkey0)";
-	&lea		($key,&DWP(32,$key));
-	eval"&aes${p}	($inout2,$rndkey0)";
-	eval"&aes${p}	($inout3,$rndkey0)";
-	eval"&aes${p}	($inout4,$rndkey0)";
-	eval"&aes${p}	($inout5,$rndkey0)";
-	&$movekey	($rndkey0,&QWP(0,$key));
-    &jnz		(&label("${p}6_loop"));
-
-    eval"&aes${p}	($inout0,$rndkey1)";
-    eval"&aes${p}	($inout1,$rndkey1)";
-    eval"&aes${p}	($inout2,$rndkey1)";
-    eval"&aes${p}	($inout3,$rndkey1)";
-    eval"&aes${p}	($inout4,$rndkey1)";
-    eval"&aes${p}	($inout5,$rndkey1)";
-    eval"&aes${p}last	($inout0,$rndkey0)";
-    eval"&aes${p}last	($inout1,$rndkey0)";
-    eval"&aes${p}last	($inout2,$rndkey0)";
-    eval"&aes${p}last	($inout3,$rndkey0)";
-    eval"&aes${p}last	($inout4,$rndkey0)";
-    eval"&aes${p}last	($inout5,$rndkey0)";
-    &ret();
-    &function_end_B("_aesni_${p}rypt6");
-}
-&aesni_generate3("enc") if ($PREFIX eq "aesni");
-&aesni_generate3("dec");
-&aesni_generate4("enc") if ($PREFIX eq "aesni");
-&aesni_generate4("dec");
-&aesni_generate6("enc") if ($PREFIX eq "aesni");
-&aesni_generate6("dec");
-
-if ($PREFIX eq "aesni") {
-######################################################################
-# void aesni_ecb_encrypt (const void *in, void *out,
-#                         size_t length, const AES_KEY *key,
-#                         int enc);
-&function_begin("aesni_ecb_encrypt");
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));
-	&mov	($rounds_,&wparam(4));
-	&and	($len,-16);
-	&jz	(&label("ecb_ret"));
-	&mov	($rounds,&DWP(240,$key));
-	&test	($rounds_,$rounds_);
-	&jz	(&label("ecb_decrypt"));
-
-	&mov	($key_,$key);		# backup $key
-	&mov	($rounds_,$rounds);	# backup $rounds
-	&cmp	($len,0x60);
-	&jb	(&label("ecb_enc_tail"));
-
-	&movdqu	($inout0,&QWP(0,$inp));
-	&movdqu	($inout1,&QWP(0x10,$inp));
-	&movdqu	($inout2,&QWP(0x20,$inp));
-	&movdqu	($inout3,&QWP(0x30,$inp));
-	&movdqu	($inout4,&QWP(0x40,$inp));
-	&movdqu	($inout5,&QWP(0x50,$inp));
-	&lea	($inp,&DWP(0x60,$inp));
-	&sub	($len,0x60);
-	&jmp	(&label("ecb_enc_loop6_enter"));
-
-&set_label("ecb_enc_loop6",16);
-	&movups	(&QWP(0,$out),$inout0);
-	&movdqu	($inout0,&QWP(0,$inp));
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movdqu	($inout1,&QWP(0x10,$inp));
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movdqu	($inout2,&QWP(0x20,$inp));
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movdqu	($inout3,&QWP(0x30,$inp));
-	&movups	(&QWP(0x40,$out),$inout4);
-	&movdqu	($inout4,&QWP(0x40,$inp));
-	&movups	(&QWP(0x50,$out),$inout5);
-	&lea	($out,&DWP(0x60,$out));
-	&movdqu	($inout5,&QWP(0x50,$inp));
-	&lea	($inp,&DWP(0x60,$inp));
-&set_label("ecb_enc_loop6_enter");
-
-	&call	("_aesni_encrypt6");
-
-	&mov	($key,$key_);		# restore $key
-	&mov	($rounds,$rounds_);	# restore $rounds
-	&sub	($len,0x60);
-	&jnc	(&label("ecb_enc_loop6"));
-
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movups	(&QWP(0x40,$out),$inout4);
-	&movups	(&QWP(0x50,$out),$inout5);
-	&lea	($out,&DWP(0x60,$out));
-	&add	($len,0x60);
-	&jz	(&label("ecb_ret"));
-
-&set_label("ecb_enc_tail");
-	&movups	($inout0,&QWP(0,$inp));
-	&cmp	($len,0x20);
-	&jb	(&label("ecb_enc_one"));
-	&movups	($inout1,&QWP(0x10,$inp));
-	&je	(&label("ecb_enc_two"));
-	&movups	($inout2,&QWP(0x20,$inp));
-	&cmp	($len,0x40);
-	&jb	(&label("ecb_enc_three"));
-	&movups	($inout3,&QWP(0x30,$inp));
-	&je	(&label("ecb_enc_four"));
-	&movups	($inout4,&QWP(0x40,$inp));
-	&xorps	($inout5,$inout5);
-	&call	("_aesni_encrypt6");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movups	(&QWP(0x40,$out),$inout4);
-	jmp	(&label("ecb_ret"));
-
-&set_label("ecb_enc_one",16);
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&movups	(&QWP(0,$out),$inout0);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_enc_two",16);
-	&xorps	($inout2,$inout2);
-	&call	("_aesni_encrypt3");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_enc_three",16);
-	&call	("_aesni_encrypt3");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_enc_four",16);
-	&call	("_aesni_encrypt4");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&jmp	(&label("ecb_ret"));
-######################################################################
-&set_label("ecb_decrypt",16);
-	&mov	($key_,$key);		# backup $key
-	&mov	($rounds_,$rounds);	# backup $rounds
-	&cmp	($len,0x60);
-	&jb	(&label("ecb_dec_tail"));
-
-	&movdqu	($inout0,&QWP(0,$inp));
-	&movdqu	($inout1,&QWP(0x10,$inp));
-	&movdqu	($inout2,&QWP(0x20,$inp));
-	&movdqu	($inout3,&QWP(0x30,$inp));
-	&movdqu	($inout4,&QWP(0x40,$inp));
-	&movdqu	($inout5,&QWP(0x50,$inp));
-	&lea	($inp,&DWP(0x60,$inp));
-	&sub	($len,0x60);
-	&jmp	(&label("ecb_dec_loop6_enter"));
-
-&set_label("ecb_dec_loop6",16);
-	&movups	(&QWP(0,$out),$inout0);
-	&movdqu	($inout0,&QWP(0,$inp));
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movdqu	($inout1,&QWP(0x10,$inp));
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movdqu	($inout2,&QWP(0x20,$inp));
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movdqu	($inout3,&QWP(0x30,$inp));
-	&movups	(&QWP(0x40,$out),$inout4);
-	&movdqu	($inout4,&QWP(0x40,$inp));
-	&movups	(&QWP(0x50,$out),$inout5);
-	&lea	($out,&DWP(0x60,$out));
-	&movdqu	($inout5,&QWP(0x50,$inp));
-	&lea	($inp,&DWP(0x60,$inp));
-&set_label("ecb_dec_loop6_enter");
-
-	&call	("_aesni_decrypt6");
-
-	&mov	($key,$key_);		# restore $key
-	&mov	($rounds,$rounds_);	# restore $rounds
-	&sub	($len,0x60);
-	&jnc	(&label("ecb_dec_loop6"));
-
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movups	(&QWP(0x40,$out),$inout4);
-	&movups	(&QWP(0x50,$out),$inout5);
-	&lea	($out,&DWP(0x60,$out));
-	&add	($len,0x60);
-	&jz	(&label("ecb_ret"));
-
-&set_label("ecb_dec_tail");
-	&movups	($inout0,&QWP(0,$inp));
-	&cmp	($len,0x20);
-	&jb	(&label("ecb_dec_one"));
-	&movups	($inout1,&QWP(0x10,$inp));
-	&je	(&label("ecb_dec_two"));
-	&movups	($inout2,&QWP(0x20,$inp));
-	&cmp	($len,0x40);
-	&jb	(&label("ecb_dec_three"));
-	&movups	($inout3,&QWP(0x30,$inp));
-	&je	(&label("ecb_dec_four"));
-	&movups	($inout4,&QWP(0x40,$inp));
-	&xorps	($inout5,$inout5);
-	&call	("_aesni_decrypt6");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movups	(&QWP(0x40,$out),$inout4);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_dec_one",16);
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&movups	(&QWP(0,$out),$inout0);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_dec_two",16);
-	&xorps	($inout2,$inout2);
-	&call	("_aesni_decrypt3");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_dec_three",16);
-	&call	("_aesni_decrypt3");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&jmp	(&label("ecb_ret"));
-
-&set_label("ecb_dec_four",16);
-	&call	("_aesni_decrypt4");
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-
-&set_label("ecb_ret");
-&function_end("aesni_ecb_encrypt");
-
-######################################################################
-# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
-#                         size_t blocks, const AES_KEY *key,
-#                         const char *ivec,char *cmac);
-#
-# Handles only complete blocks, operates on 64-bit counter and
-# does not update *ivec! Nor does it finalize CMAC value
-# (see engine/eng_aesni.c for details)
-#
-{ my $cmac=$inout1;
-&function_begin("aesni_ccm64_encrypt_blocks");
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));
-	&mov	($rounds_,&wparam(4));
-	&mov	($rounds,&wparam(5));
-	&mov	($key_,"esp");
-	&sub	("esp",60);
-	&and	("esp",-16);			# align stack
-	&mov	(&DWP(48,"esp"),$key_);
-
-	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
-	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
-	&mov	($rounds,&DWP(240,$key));
-
-	# compose byte-swap control mask for pshufb on stack
-	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
-	&mov	(&DWP(4,"esp"),0x08090a0b);
-	&mov	(&DWP(8,"esp"),0x04050607);
-	&mov	(&DWP(12,"esp"),0x00010203);
-
-	# compose counter increment vector on stack
-	&mov	($rounds_,1);
-	&xor	($key_,$key_);
-	&mov	(&DWP(16,"esp"),$rounds_);
-	&mov	(&DWP(20,"esp"),$key_);
-	&mov	(&DWP(24,"esp"),$key_);
-	&mov	(&DWP(28,"esp"),$key_);
-
-	&shr	($rounds,1);
-	&lea	($key_,&DWP(0,$key));
-	&movdqa	($inout3,&QWP(0,"esp"));
-	&movdqa	($inout0,$ivec);
-	&mov	($rounds_,$rounds);
-	&pshufb	($ivec,$inout3);
-
-&set_label("ccm64_enc_outer");
-	&$movekey	($rndkey0,&QWP(0,$key_));
-	&mov		($rounds,$rounds_);
-	&movups		($in0,&QWP(0,$inp));
-
-	&xorps		($inout0,$rndkey0);
-	&$movekey	($rndkey1,&QWP(16,$key_));
-	&xorps		($rndkey0,$in0);
-	&lea		($key,&DWP(32,$key_));
-	&xorps		($cmac,$rndkey0);		# cmac^=inp
-	&$movekey	($rndkey0,&QWP(0,$key));
-
-&set_label("ccm64_enc2_loop");
-	&aesenc		($inout0,$rndkey1);
-	&dec		($rounds);
-	&aesenc		($cmac,$rndkey1);
-	&$movekey	($rndkey1,&QWP(16,$key));
-	&aesenc		($inout0,$rndkey0);
-	&lea		($key,&DWP(32,$key));
-	&aesenc		($cmac,$rndkey0);
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&jnz		(&label("ccm64_enc2_loop"));
-	&aesenc		($inout0,$rndkey1);
-	&aesenc		($cmac,$rndkey1);
-	&paddq		($ivec,&QWP(16,"esp"));
-	&aesenclast	($inout0,$rndkey0);
-	&aesenclast	($cmac,$rndkey0);
-
-	&dec	($len);
-	&lea	($inp,&DWP(16,$inp));
-	&xorps	($in0,$inout0);			# inp^=E(ivec)
-	&movdqa	($inout0,$ivec);
-	&movups	(&QWP(0,$out),$in0);		# save output
-	&lea	($out,&DWP(16,$out));
-	&pshufb	($inout0,$inout3);
-	&jnz	(&label("ccm64_enc_outer"));
-
-	&mov	("esp",&DWP(48,"esp"));
-	&mov	($out,&wparam(5));
-	&movups	(&QWP(0,$out),$cmac);
-&function_end("aesni_ccm64_encrypt_blocks");
-
-&function_begin("aesni_ccm64_decrypt_blocks");
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));
-	&mov	($rounds_,&wparam(4));
-	&mov	($rounds,&wparam(5));
-	&mov	($key_,"esp");
-	&sub	("esp",60);
-	&and	("esp",-16);			# align stack
-	&mov	(&DWP(48,"esp"),$key_);
-
-	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
-	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
-	&mov	($rounds,&DWP(240,$key));
-
-	# compose byte-swap control mask for pshufb on stack
-	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
-	&mov	(&DWP(4,"esp"),0x08090a0b);
-	&mov	(&DWP(8,"esp"),0x04050607);
-	&mov	(&DWP(12,"esp"),0x00010203);
-
-	# compose counter increment vector on stack
-	&mov	($rounds_,1);
-	&xor	($key_,$key_);
-	&mov	(&DWP(16,"esp"),$rounds_);
-	&mov	(&DWP(20,"esp"),$key_);
-	&mov	(&DWP(24,"esp"),$key_);
-	&mov	(&DWP(28,"esp"),$key_);
-
-	&movdqa	($inout3,&QWP(0,"esp"));	# bswap mask
-	&movdqa	($inout0,$ivec);
-
-	&mov	($key_,$key);
-	&mov	($rounds_,$rounds);
-
-	&pshufb	($ivec,$inout3);
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&movups	($in0,&QWP(0,$inp));		# load inp
-	&paddq	($ivec,&QWP(16,"esp"));
-	&lea	($inp,&QWP(16,$inp));
-	&jmp	(&label("ccm64_dec_outer"));
-
-&set_label("ccm64_dec_outer",16);
-	&xorps	($in0,$inout0);			# inp ^= E(ivec)
-	&movdqa	($inout0,$ivec);
-	&mov	($rounds,$rounds_);
-	&movups	(&QWP(0,$out),$in0);		# save output
-	&lea	($out,&DWP(16,$out));
-	&pshufb	($inout0,$inout3);
-
-	&sub	($len,1);
-	&jz	(&label("ccm64_dec_break"));
-
-	&$movekey	($rndkey0,&QWP(0,$key_));
-	&shr		($rounds,1);
-	&$movekey	($rndkey1,&QWP(16,$key_));
-	&xorps		($in0,$rndkey0);
-	&lea		($key,&DWP(32,$key_));
-	&xorps		($inout0,$rndkey0);
-	&xorps		($cmac,$in0);		# cmac^=out
-	&$movekey	($rndkey0,&QWP(0,$key));
-
-&set_label("ccm64_dec2_loop");
-	&aesenc		($inout0,$rndkey1);
-	&dec		($rounds);
-	&aesenc		($cmac,$rndkey1);
-	&$movekey	($rndkey1,&QWP(16,$key));
-	&aesenc		($inout0,$rndkey0);
-	&lea		($key,&DWP(32,$key));
-	&aesenc		($cmac,$rndkey0);
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&jnz		(&label("ccm64_dec2_loop"));
-	&movups		($in0,&QWP(0,$inp));	# load inp
-	&paddq		($ivec,&QWP(16,"esp"));
-	&aesenc		($inout0,$rndkey1);
-	&aesenc		($cmac,$rndkey1);
-	&lea		($inp,&QWP(16,$inp));
-	&aesenclast	($inout0,$rndkey0);
-	&aesenclast	($cmac,$rndkey0);
-	&jmp	(&label("ccm64_dec_outer"));
-
-&set_label("ccm64_dec_break",16);
-	&mov	($key,$key_);
-	if ($inline)
-	{   &aesni_inline_generate1("enc",$cmac,$in0);	}
-	else
-	{   &call	("_aesni_encrypt1",$cmac);	}
-
-	&mov	("esp",&DWP(48,"esp"));
-	&mov	($out,&wparam(5));
-	&movups	(&QWP(0,$out),$cmac);
-&function_end("aesni_ccm64_decrypt_blocks");
-}
-
-######################################################################
-# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
-#                         size_t blocks, const AES_KEY *key,
-#                         const char *ivec);
-#
-# Handles only complete blocks, operates on 32-bit counter and
-# does not update *ivec! (see engine/eng_aesni.c for details)
-#
-# stack layout:
-#	0	pshufb mask
-#	16	vector addend: 0,6,6,6
-# 	32	counter-less ivec
-#	48	1st triplet of counter vector
-#	64	2nd triplet of counter vector
-#	80	saved %esp
-
-&function_begin("aesni_ctr32_encrypt_blocks");
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));
-	&mov	($rounds_,&wparam(4));
-	&mov	($key_,"esp");
-	&sub	("esp",88);
-	&and	("esp",-16);			# align stack
-	&mov	(&DWP(80,"esp"),$key_);
-
-	&cmp	($len,1);
-	&je	(&label("ctr32_one_shortcut"));
-
-	&movdqu	($inout5,&QWP(0,$rounds_));	# load ivec
-
-	# compose byte-swap control mask for pshufb on stack
-	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
-	&mov	(&DWP(4,"esp"),0x08090a0b);
-	&mov	(&DWP(8,"esp"),0x04050607);
-	&mov	(&DWP(12,"esp"),0x00010203);
-
-	# compose counter increment vector on stack
-	&mov	($rounds,6);
-	&xor	($key_,$key_);
-	&mov	(&DWP(16,"esp"),$rounds);
-	&mov	(&DWP(20,"esp"),$rounds);
-	&mov	(&DWP(24,"esp"),$rounds);
-	&mov	(&DWP(28,"esp"),$key_);
-
-	&pextrd	($rounds_,$inout5,3);		# pull 32-bit counter
-	&pinsrd	($inout5,$key_,3);		# wipe 32-bit counter
-
-	&mov	($rounds,&DWP(240,$key));	# key->rounds
-
-	# compose 2 vectors of 3x32-bit counters
-	&bswap	($rounds_);
-	&pxor	($rndkey1,$rndkey1);
-	&pxor	($rndkey0,$rndkey0);
-	&movdqa	($inout0,&QWP(0,"esp"));	# load byte-swap mask
-	&pinsrd	($rndkey1,$rounds_,0);
-	&lea	($key_,&DWP(3,$rounds_));
-	&pinsrd	($rndkey0,$key_,0);
-	&inc	($rounds_);
-	&pinsrd	($rndkey1,$rounds_,1);
-	&inc	($key_);
-	&pinsrd	($rndkey0,$key_,1);
-	&inc	($rounds_);
-	&pinsrd	($rndkey1,$rounds_,2);
-	&inc	($key_);
-	&pinsrd	($rndkey0,$key_,2);
-	&movdqa	(&QWP(48,"esp"),$rndkey1);	# save 1st triplet
-	&pshufb	($rndkey1,$inout0);		# byte swap
-	&movdqa	(&QWP(64,"esp"),$rndkey0);	# save 2nd triplet
-	&pshufb	($rndkey0,$inout0);		# byte swap
-
-	&pshufd	($inout0,$rndkey1,3<<6);	# place counter to upper dword
-	&pshufd	($inout1,$rndkey1,2<<6);
-	&cmp	($len,6);
-	&jb	(&label("ctr32_tail"));
-	&movdqa	(&QWP(32,"esp"),$inout5);	# save counter-less ivec
-	&shr	($rounds,1);
-	&mov	($key_,$key);			# backup $key
-	&mov	($rounds_,$rounds);		# backup $rounds
-	&sub	($len,6);
-	&jmp	(&label("ctr32_loop6"));
-
-&set_label("ctr32_loop6",16);
-	&pshufd	($inout2,$rndkey1,1<<6);
-	&movdqa	($rndkey1,&QWP(32,"esp"));	# pull counter-less ivec
-	&pshufd	($inout3,$rndkey0,3<<6);
-	&por	($inout0,$rndkey1);		# merge counter-less ivec
-	&pshufd	($inout4,$rndkey0,2<<6);
-	&por	($inout1,$rndkey1);
-	&pshufd	($inout5,$rndkey0,1<<6);
-	&por	($inout2,$rndkey1);
-	&por	($inout3,$rndkey1);
-	&por	($inout4,$rndkey1);
-	&por	($inout5,$rndkey1);
-
-	# inlining _aesni_encrypt6's prologue gives ~4% improvement...
-	&$movekey	($rndkey0,&QWP(0,$key_));
-	&$movekey	($rndkey1,&QWP(16,$key_));
-	&lea		($key,&DWP(32,$key_));
-	&dec		($rounds);
-	&pxor		($inout0,$rndkey0);
-	&pxor		($inout1,$rndkey0);
-	&aesenc		($inout0,$rndkey1);
-	&pxor		($inout2,$rndkey0);
-	&aesenc		($inout1,$rndkey1);
-	&pxor		($inout3,$rndkey0);
-	&aesenc		($inout2,$rndkey1);
-	&pxor		($inout4,$rndkey0);
-	&aesenc		($inout3,$rndkey1);
-	&pxor		($inout5,$rndkey0);
-	&aesenc		($inout4,$rndkey1);
-	&$movekey	($rndkey0,&QWP(0,$key));
-	&aesenc		($inout5,$rndkey1);
-
-	&call		(&label("_aesni_encrypt6_enter"));
-
-	&movups	($rndkey1,&QWP(0,$inp));
-	&movups	($rndkey0,&QWP(0x10,$inp));
-	&xorps	($inout0,$rndkey1);
-	&movups	($rndkey1,&QWP(0x20,$inp));
-	&xorps	($inout1,$rndkey0);
-	&movups	(&QWP(0,$out),$inout0);
-	&movdqa	($rndkey0,&QWP(16,"esp"));	# load increment
-	&xorps	($inout2,$rndkey1);
-	&movdqa	($rndkey1,&QWP(48,"esp"));	# load 1st triplet
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-
-	&paddd	($rndkey1,$rndkey0);		# 1st triplet increment
-	&paddd	($rndkey0,&QWP(64,"esp"));	# 2nd triplet increment
-	&movdqa	($inout0,&QWP(0,"esp"));	# load byte swap mask
-
-	&movups	($inout1,&QWP(0x30,$inp));
-	&movups	($inout2,&QWP(0x40,$inp));
-	&xorps	($inout3,$inout1);
-	&movups	($inout1,&QWP(0x50,$inp));
-	&lea	($inp,&DWP(0x60,$inp));
-	&movdqa	(&QWP(48,"esp"),$rndkey1);	# save 1st triplet
-	&pshufb	($rndkey1,$inout0);		# byte swap
-	&xorps	($inout4,$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&xorps	($inout5,$inout1);
-	&movdqa	(&QWP(64,"esp"),$rndkey0);	# save 2nd triplet
-	&pshufb	($rndkey0,$inout0);		# byte swap
-	&movups	(&QWP(0x40,$out),$inout4);
-	&pshufd	($inout0,$rndkey1,3<<6);
-	&movups	(&QWP(0x50,$out),$inout5);
-	&lea	($out,&DWP(0x60,$out));
-
-	&mov	($rounds,$rounds_);
-	&pshufd	($inout1,$rndkey1,2<<6);
-	&sub	($len,6);
-	&jnc	(&label("ctr32_loop6"));
-
-	&add	($len,6);
-	&jz	(&label("ctr32_ret"));
-	&mov	($key,$key_);
-	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
-	&movdqa	($inout5,&QWP(32,"esp"));	# pull count-less ivec
-
-&set_label("ctr32_tail");
-	&por	($inout0,$inout5);
-	&cmp	($len,2);
-	&jb	(&label("ctr32_one"));
-
-	&pshufd	($inout2,$rndkey1,1<<6);
-	&por	($inout1,$inout5);
-	&je	(&label("ctr32_two"));
-
-	&pshufd	($inout3,$rndkey0,3<<6);
-	&por	($inout2,$inout5);
-	&cmp	($len,4);
-	&jb	(&label("ctr32_three"));
-
-	&pshufd	($inout4,$rndkey0,2<<6);
-	&por	($inout3,$inout5);
-	&je	(&label("ctr32_four"));
-
-	&por	($inout4,$inout5);
-	&call	("_aesni_encrypt6");
-	&movups	($rndkey1,&QWP(0,$inp));
-	&movups	($rndkey0,&QWP(0x10,$inp));
-	&xorps	($inout0,$rndkey1);
-	&movups	($rndkey1,&QWP(0x20,$inp));
-	&xorps	($inout1,$rndkey0);
-	&movups	($rndkey0,&QWP(0x30,$inp));
-	&xorps	($inout2,$rndkey1);
-	&movups	($rndkey1,&QWP(0x40,$inp));
-	&xorps	($inout3,$rndkey0);
-	&movups	(&QWP(0,$out),$inout0);
-	&xorps	($inout4,$rndkey1);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&movups	(&QWP(0x40,$out),$inout4);
-	&jmp	(&label("ctr32_ret"));
-
-&set_label("ctr32_one_shortcut",16);
-	&movups	($inout0,&QWP(0,$rounds_));	# load ivec
-	&mov	($rounds,&DWP(240,$key));
-	
-&set_label("ctr32_one");
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&movups	($in0,&QWP(0,$inp));
-	&xorps	($in0,$inout0);
-	&movups	(&QWP(0,$out),$in0);
-	&jmp	(&label("ctr32_ret"));
-
-&set_label("ctr32_two",16);
-	&call	("_aesni_encrypt3");
-	&movups	($inout3,&QWP(0,$inp));
-	&movups	($inout4,&QWP(0x10,$inp));
-	&xorps	($inout0,$inout3);
-	&xorps	($inout1,$inout4);
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&jmp	(&label("ctr32_ret"));
-
-&set_label("ctr32_three",16);
-	&call	("_aesni_encrypt3");
-	&movups	($inout3,&QWP(0,$inp));
-	&movups	($inout4,&QWP(0x10,$inp));
-	&xorps	($inout0,$inout3);
-	&movups	($inout5,&QWP(0x20,$inp));
-	&xorps	($inout1,$inout4);
-	&movups	(&QWP(0,$out),$inout0);
-	&xorps	($inout2,$inout5);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&jmp	(&label("ctr32_ret"));
-
-&set_label("ctr32_four",16);
-	&call	("_aesni_encrypt4");
-	&movups	($inout4,&QWP(0,$inp));
-	&movups	($inout5,&QWP(0x10,$inp));
-	&movups	($rndkey1,&QWP(0x20,$inp));
-	&xorps	($inout0,$inout4);
-	&movups	($rndkey0,&QWP(0x30,$inp));
-	&xorps	($inout1,$inout5);
-	&movups	(&QWP(0,$out),$inout0);
-	&xorps	($inout2,$rndkey1);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&xorps	($inout3,$rndkey0);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-
-&set_label("ctr32_ret");
-	&mov	("esp",&DWP(80,"esp"));
-&function_end("aesni_ctr32_encrypt_blocks");
-
-######################################################################
-# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-#	const AES_KEY *key1, const AES_KEY *key2
-#	const unsigned char iv[16]);
-#
-{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
-
-&function_begin("aesni_xts_encrypt");
-	&mov	($key,&wparam(4));		# key2
-	&mov	($inp,&wparam(5));		# clear-text tweak
-
-	&mov	($rounds,&DWP(240,$key));	# key2->rounds
-	&movups	($inout0,&QWP(0,$inp));
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));		# key1
-
-	&mov	($key_,"esp");
-	&sub	("esp",16*7+8);
-	&mov	($rounds,&DWP(240,$key));	# key1->rounds
-	&and	("esp",-16);			# align stack
-
-	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
-	&mov	(&DWP(16*6+4,"esp"),0);
-	&mov	(&DWP(16*6+8,"esp"),1);
-	&mov	(&DWP(16*6+12,"esp"),0);
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
-	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
-
-	&movdqa	($tweak,$inout0);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-
-	&and	($len,-16);
-	&mov	($key_,$key);			# backup $key
-	&mov	($rounds_,$rounds);		# backup $rounds
-	&sub	($len,16*6);
-	&jc	(&label("xts_enc_short"));
-
-	&shr	($rounds,1);
-	&mov	($rounds_,$rounds);
-	&jmp	(&label("xts_enc_loop6"));
-
-&set_label("xts_enc_loop6",16);
-	for ($i=0;$i<4;$i++) {
-	    &pshufd	($twres,$twtmp,0x13);
-	    &pxor	($twtmp,$twtmp);
-	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
-	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
-	    &pand	($twres,$twmask);	# isolate carry and residue
-	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
-	    &pxor	($tweak,$twres);
-	}
-	&pshufd	($inout5,$twtmp,0x13);
-	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	 &$movekey	($rndkey0,&QWP(0,$key_));
-	&pand	($inout5,$twmask);		# isolate carry and residue
-	 &movups	($inout0,&QWP(0,$inp));	# load input
-	&pxor	($inout5,$tweak);
-
-	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
-	&movdqu	($inout1,&QWP(16*1,$inp));
-	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
-	&movdqu	($inout2,&QWP(16*2,$inp));
-	 &pxor		($inout1,$rndkey0);
-	&movdqu	($inout3,&QWP(16*3,$inp));
-	 &pxor		($inout2,$rndkey0);
-	&movdqu	($inout4,&QWP(16*4,$inp));
-	 &pxor		($inout3,$rndkey0);
-	&movdqu	($rndkey1,&QWP(16*5,$inp));
-	 &pxor		($inout4,$rndkey0);
-	&lea	($inp,&DWP(16*6,$inp));
-	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
-	&pxor	($inout5,$rndkey1);
-
-	 &$movekey	($rndkey1,&QWP(16,$key_));
-	 &lea		($key,&DWP(32,$key_));
-	&pxor	($inout1,&QWP(16*1,"esp"));
-	 &aesenc	($inout0,$rndkey1);
-	&pxor	($inout2,&QWP(16*2,"esp"));
-	 &aesenc	($inout1,$rndkey1);
-	&pxor	($inout3,&QWP(16*3,"esp"));
-	 &dec		($rounds);
-	 &aesenc	($inout2,$rndkey1);
-	&pxor	($inout4,&QWP(16*4,"esp"));
-	 &aesenc	($inout3,$rndkey1);
-	&pxor		($inout5,$rndkey0);
-	 &aesenc	($inout4,$rndkey1);
-	 &$movekey	($rndkey0,&QWP(0,$key));
-	 &aesenc	($inout5,$rndkey1);
-	&call		(&label("_aesni_encrypt6_enter"));
-
-	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
-       &pxor	($twtmp,$twtmp);
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout2,&QWP(16*2,"esp"));
-	&movups	(&QWP(16*1,$out),$inout1);
-	&xorps	($inout3,&QWP(16*3,"esp"));
-	&movups	(&QWP(16*2,$out),$inout2);
-	&xorps	($inout4,&QWP(16*4,"esp"));
-	&movups	(&QWP(16*3,$out),$inout3);
-	&xorps	($inout5,$tweak);
-	&movups	(&QWP(16*4,$out),$inout4);
-       &pshufd	($twres,$twtmp,0x13);
-	&movups	(&QWP(16*5,$out),$inout5);
-	&lea	($out,&DWP(16*6,$out));
-       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
-
-	&pxor	($twtmp,$twtmp);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&mov	($rounds,$rounds_);		# restore $rounds
-	&pxor	($tweak,$twres);
-
-	&sub	($len,16*6);
-	&jnc	(&label("xts_enc_loop6"));
-
-	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
-	&mov	($key,$key_);			# restore $key
-	&mov	($rounds_,$rounds);
-
-&set_label("xts_enc_short");
-	&add	($len,16*6);
-	&jz	(&label("xts_enc_done6x"));
-
-	&movdqa	($inout3,$tweak);		# put aside previous tweak
-	&cmp	($len,0x20);
-	&jb	(&label("xts_enc_one"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&je	(&label("xts_enc_two"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($inout4,$tweak);		# put aside previous tweak
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&cmp	($len,0x40);
-	&jb	(&label("xts_enc_three"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($inout5,$tweak);		# put aside previous tweak
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&movdqa	(&QWP(16*0,"esp"),$inout3);
-	&movdqa	(&QWP(16*1,"esp"),$inout4);
-	&je	(&label("xts_enc_four"));
-
-	&movdqa	(&QWP(16*2,"esp"),$inout5);
-	&pshufd	($inout5,$twtmp,0x13);
-	&movdqa	(&QWP(16*3,"esp"),$tweak);
-	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
-	&pand	($inout5,$twmask);		# isolate carry and residue
-	&pxor	($inout5,$tweak);
-
-	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
-	&movdqu	($inout1,&QWP(16*1,$inp));
-	&movdqu	($inout2,&QWP(16*2,$inp));
-	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movdqu	($inout3,&QWP(16*3,$inp));
-	&pxor	($inout1,&QWP(16*1,"esp"));
-	&movdqu	($inout4,&QWP(16*4,$inp));
-	&pxor	($inout2,&QWP(16*2,"esp"));
-	&lea	($inp,&DWP(16*5,$inp));
-	&pxor	($inout3,&QWP(16*3,"esp"));
-	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
-	&pxor	($inout4,$inout5);
-
-	&call	("_aesni_encrypt6");
-
-	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,&QWP(16*2,"esp"));
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout3,&QWP(16*3,"esp"));
-	&movups	(&QWP(16*1,$out),$inout1);
-	&xorps	($inout4,$tweak);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&movups	(&QWP(16*3,$out),$inout3);
-	&movups	(&QWP(16*4,$out),$inout4);
-	&lea	($out,&DWP(16*5,$out));
-	&jmp	(&label("xts_enc_done"));
-
-&set_label("xts_enc_one",16);
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&lea	($inp,&DWP(16*1,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&lea	($out,&DWP(16*1,$out));
-
-	&movdqa	($tweak,$inout3);		# last tweak
-	&jmp	(&label("xts_enc_done"));
-
-&set_label("xts_enc_two",16);
-	&movaps	($inout4,$tweak);		# put aside last tweak
-
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&lea	($inp,&DWP(16*2,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	&xorps	($inout1,$inout4);
-	&xorps	($inout2,$inout2);
-
-	&call	("_aesni_encrypt3");
-
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&xorps	($inout1,$inout4);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&movups	(&QWP(16*1,$out),$inout1);
-	&lea	($out,&DWP(16*2,$out));
-
-	&movdqa	($tweak,$inout4);		# last tweak
-	&jmp	(&label("xts_enc_done"));
-
-&set_label("xts_enc_three",16);
-	&movaps	($inout5,$tweak);		# put aside last tweak
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&movups	($inout2,&QWP(16*2,$inp));
-	&lea	($inp,&DWP(16*3,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	&xorps	($inout1,$inout4);
-	&xorps	($inout2,$inout5);
-
-	&call	("_aesni_encrypt3");
-
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&xorps	($inout1,$inout4);
-	&xorps	($inout2,$inout5);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&movups	(&QWP(16*1,$out),$inout1);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&lea	($out,&DWP(16*3,$out));
-
-	&movdqa	($tweak,$inout5);		# last tweak
-	&jmp	(&label("xts_enc_done"));
-
-&set_label("xts_enc_four",16);
-	&movaps	($inout4,$tweak);		# put aside last tweak
-
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&movups	($inout2,&QWP(16*2,$inp));
-	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movups	($inout3,&QWP(16*3,$inp));
-	&lea	($inp,&DWP(16*4,$inp));
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,$inout5);
-	&xorps	($inout3,$inout4);
-
-	&call	("_aesni_encrypt4");
-
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,$inout5);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout3,$inout4);
-	&movups	(&QWP(16*1,$out),$inout1);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&movups	(&QWP(16*3,$out),$inout3);
-	&lea	($out,&DWP(16*4,$out));
-
-	&movdqa	($tweak,$inout4);		# last tweak
-	&jmp	(&label("xts_enc_done"));
-
-&set_label("xts_enc_done6x",16);		# $tweak is pre-calculated
-	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
-	&and	($len,15);
-	&jz	(&label("xts_enc_ret"));
-	&movdqa	($inout3,$tweak);
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
-	&jmp	(&label("xts_enc_steal"));
-
-&set_label("xts_enc_done",16);
-	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
-	&pxor	($twtmp,$twtmp);
-	&and	($len,15);
-	&jz	(&label("xts_enc_ret"));
-
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
-	&pshufd	($inout3,$twtmp,0x13);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($inout3,&QWP(16*6,"esp"));	# isolate carry and residue
-	&pxor	($inout3,$tweak);
-
-&set_label("xts_enc_steal");
-	&movz	($rounds,&BP(0,$inp));
-	&movz	($key,&BP(-16,$out));
-	&lea	($inp,&DWP(1,$inp));
-	&mov	(&BP(-16,$out),&LB($rounds));
-	&mov	(&BP(0,$out),&LB($key));
-	&lea	($out,&DWP(1,$out));
-	&sub	($len,1);
-	&jnz	(&label("xts_enc_steal"));
-
-	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
-	&mov	($key,$key_);			# restore $key
-	&mov	($rounds,$rounds_);		# restore $rounds
-
-	&movups	($inout0,&QWP(-16,$out));	# load input
-	&xorps	($inout0,$inout3);		# input^=tweak
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&movups	(&QWP(-16,$out),$inout0);	# write output
-
-&set_label("xts_enc_ret");
-	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
-&function_end("aesni_xts_encrypt");
-
-&function_begin("aesni_xts_decrypt");
-	&mov	($key,&wparam(4));		# key2
-	&mov	($inp,&wparam(5));		# clear-text tweak
-
-	&mov	($rounds,&DWP(240,$key));	# key2->rounds
-	&movups	($inout0,&QWP(0,$inp));
-	if ($inline)
-	{   &aesni_inline_generate1("enc");	}
-	else
-	{   &call	("_aesni_encrypt1");	}
-
-	&mov	($inp,&wparam(0));
-	&mov	($out,&wparam(1));
-	&mov	($len,&wparam(2));
-	&mov	($key,&wparam(3));		# key1
-
-	&mov	($key_,"esp");
-	&sub	("esp",16*7+8);
-	&and	("esp",-16);			# align stack
-
-	&xor	($rounds_,$rounds_);		# if(len%16) len-=16;
-	&test	($len,15);
-	&setnz	(&LB($rounds_));
-	&shl	($rounds_,4);
-	&sub	($len,$rounds_);
-
-	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
-	&mov	(&DWP(16*6+4,"esp"),0);
-	&mov	(&DWP(16*6+8,"esp"),1);
-	&mov	(&DWP(16*6+12,"esp"),0);
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
-	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
-
-	&mov	($rounds,&DWP(240,$key));	# key1->rounds
-	&mov	($key_,$key);			# backup $key
-	&mov	($rounds_,$rounds);		# backup $rounds
-
-	&movdqa	($tweak,$inout0);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-
-	&and	($len,-16);
-	&sub	($len,16*6);
-	&jc	(&label("xts_dec_short"));
-
-	&shr	($rounds,1);
-	&mov	($rounds_,$rounds);
-	&jmp	(&label("xts_dec_loop6"));
-
-&set_label("xts_dec_loop6",16);
-	for ($i=0;$i<4;$i++) {
-	    &pshufd	($twres,$twtmp,0x13);
-	    &pxor	($twtmp,$twtmp);
-	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
-	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
-	    &pand	($twres,$twmask);	# isolate carry and residue
-	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
-	    &pxor	($tweak,$twres);
-	}
-	&pshufd	($inout5,$twtmp,0x13);
-	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	 &$movekey	($rndkey0,&QWP(0,$key_));
-	&pand	($inout5,$twmask);		# isolate carry and residue
-	 &movups	($inout0,&QWP(0,$inp));	# load input
-	&pxor	($inout5,$tweak);
-
-	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
-	&movdqu	($inout1,&QWP(16*1,$inp));
-	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
-	&movdqu	($inout2,&QWP(16*2,$inp));
-	 &pxor		($inout1,$rndkey0);
-	&movdqu	($inout3,&QWP(16*3,$inp));
-	 &pxor		($inout2,$rndkey0);
-	&movdqu	($inout4,&QWP(16*4,$inp));
-	 &pxor		($inout3,$rndkey0);
-	&movdqu	($rndkey1,&QWP(16*5,$inp));
-	 &pxor		($inout4,$rndkey0);
-	&lea	($inp,&DWP(16*6,$inp));
-	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
-	&pxor	($inout5,$rndkey1);
-
-	 &$movekey	($rndkey1,&QWP(16,$key_));
-	 &lea		($key,&DWP(32,$key_));
-	&pxor	($inout1,&QWP(16*1,"esp"));
-	 &aesdec	($inout0,$rndkey1);
-	&pxor	($inout2,&QWP(16*2,"esp"));
-	 &aesdec	($inout1,$rndkey1);
-	&pxor	($inout3,&QWP(16*3,"esp"));
-	 &dec		($rounds);
-	 &aesdec	($inout2,$rndkey1);
-	&pxor	($inout4,&QWP(16*4,"esp"));
-	 &aesdec	($inout3,$rndkey1);
-	&pxor		($inout5,$rndkey0);
-	 &aesdec	($inout4,$rndkey1);
-	 &$movekey	($rndkey0,&QWP(0,$key));
-	 &aesdec	($inout5,$rndkey1);
-	&call		(&label("_aesni_decrypt6_enter"));
-
-	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
-       &pxor	($twtmp,$twtmp);
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout2,&QWP(16*2,"esp"));
-	&movups	(&QWP(16*1,$out),$inout1);
-	&xorps	($inout3,&QWP(16*3,"esp"));
-	&movups	(&QWP(16*2,$out),$inout2);
-	&xorps	($inout4,&QWP(16*4,"esp"));
-	&movups	(&QWP(16*3,$out),$inout3);
-	&xorps	($inout5,$tweak);
-	&movups	(&QWP(16*4,$out),$inout4);
-       &pshufd	($twres,$twtmp,0x13);
-	&movups	(&QWP(16*5,$out),$inout5);
-	&lea	($out,&DWP(16*6,$out));
-       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
-
-	&pxor	($twtmp,$twtmp);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&mov	($rounds,$rounds_);		# restore $rounds
-	&pxor	($tweak,$twres);
-
-	&sub	($len,16*6);
-	&jnc	(&label("xts_dec_loop6"));
-
-	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
-	&mov	($key,$key_);			# restore $key
-	&mov	($rounds_,$rounds);
-
-&set_label("xts_dec_short");
-	&add	($len,16*6);
-	&jz	(&label("xts_dec_done6x"));
-
-	&movdqa	($inout3,$tweak);		# put aside previous tweak
-	&cmp	($len,0x20);
-	&jb	(&label("xts_dec_one"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&je	(&label("xts_dec_two"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($inout4,$tweak);		# put aside previous tweak
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&cmp	($len,0x40);
-	&jb	(&label("xts_dec_three"));
-
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($inout5,$tweak);		# put aside previous tweak
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-	&movdqa	(&QWP(16*0,"esp"),$inout3);
-	&movdqa	(&QWP(16*1,"esp"),$inout4);
-	&je	(&label("xts_dec_four"));
-
-	&movdqa	(&QWP(16*2,"esp"),$inout5);
-	&pshufd	($inout5,$twtmp,0x13);
-	&movdqa	(&QWP(16*3,"esp"),$tweak);
-	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
-	&pand	($inout5,$twmask);		# isolate carry and residue
-	&pxor	($inout5,$tweak);
-
-	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
-	&movdqu	($inout1,&QWP(16*1,$inp));
-	&movdqu	($inout2,&QWP(16*2,$inp));
-	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movdqu	($inout3,&QWP(16*3,$inp));
-	&pxor	($inout1,&QWP(16*1,"esp"));
-	&movdqu	($inout4,&QWP(16*4,$inp));
-	&pxor	($inout2,&QWP(16*2,"esp"));
-	&lea	($inp,&DWP(16*5,$inp));
-	&pxor	($inout3,&QWP(16*3,"esp"));
-	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
-	&pxor	($inout4,$inout5);
-
-	&call	("_aesni_decrypt6");
-
-	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,&QWP(16*2,"esp"));
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout3,&QWP(16*3,"esp"));
-	&movups	(&QWP(16*1,$out),$inout1);
-	&xorps	($inout4,$tweak);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&movups	(&QWP(16*3,$out),$inout3);
-	&movups	(&QWP(16*4,$out),$inout4);
-	&lea	($out,&DWP(16*5,$out));
-	&jmp	(&label("xts_dec_done"));
-
-&set_label("xts_dec_one",16);
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&lea	($inp,&DWP(16*1,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&lea	($out,&DWP(16*1,$out));
-
-	&movdqa	($tweak,$inout3);		# last tweak
-	&jmp	(&label("xts_dec_done"));
-
-&set_label("xts_dec_two",16);
-	&movaps	($inout4,$tweak);		# put aside last tweak
-
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&lea	($inp,&DWP(16*2,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	&xorps	($inout1,$inout4);
-
-	&call	("_aesni_decrypt3");
-
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&xorps	($inout1,$inout4);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&movups	(&QWP(16*1,$out),$inout1);
-	&lea	($out,&DWP(16*2,$out));
-
-	&movdqa	($tweak,$inout4);		# last tweak
-	&jmp	(&label("xts_dec_done"));
-
-&set_label("xts_dec_three",16);
-	&movaps	($inout5,$tweak);		# put aside last tweak
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&movups	($inout2,&QWP(16*2,$inp));
-	&lea	($inp,&DWP(16*3,$inp));
-	&xorps	($inout0,$inout3);		# input^=tweak
-	&xorps	($inout1,$inout4);
-	&xorps	($inout2,$inout5);
-
-	&call	("_aesni_decrypt3");
-
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&xorps	($inout1,$inout4);
-	&xorps	($inout2,$inout5);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&movups	(&QWP(16*1,$out),$inout1);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&lea	($out,&DWP(16*3,$out));
-
-	&movdqa	($tweak,$inout5);		# last tweak
-	&jmp	(&label("xts_dec_done"));
-
-&set_label("xts_dec_four",16);
-	&movaps	($inout4,$tweak);		# put aside last tweak
-
-	&movups	($inout0,&QWP(16*0,$inp));	# load input
-	&movups	($inout1,&QWP(16*1,$inp));
-	&movups	($inout2,&QWP(16*2,$inp));
-	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
-	&movups	($inout3,&QWP(16*3,$inp));
-	&lea	($inp,&DWP(16*4,$inp));
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,$inout5);
-	&xorps	($inout3,$inout4);
-
-	&call	("_aesni_decrypt4");
-
-	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
-	&xorps	($inout1,&QWP(16*1,"esp"));
-	&xorps	($inout2,$inout5);
-	&movups	(&QWP(16*0,$out),$inout0);	# write output
-	&xorps	($inout3,$inout4);
-	&movups	(&QWP(16*1,$out),$inout1);
-	&movups	(&QWP(16*2,$out),$inout2);
-	&movups	(&QWP(16*3,$out),$inout3);
-	&lea	($out,&DWP(16*4,$out));
-
-	&movdqa	($tweak,$inout4);		# last tweak
-	&jmp	(&label("xts_dec_done"));
-
-&set_label("xts_dec_done6x",16);		# $tweak is pre-calculated
-	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
-	&and	($len,15);
-	&jz	(&label("xts_dec_ret"));
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
-	&jmp	(&label("xts_dec_only_one_more"));
-
-&set_label("xts_dec_done",16);
-	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
-	&pxor	($twtmp,$twtmp);
-	&and	($len,15);
-	&jz	(&label("xts_dec_ret"));
-
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
-	&pshufd	($twres,$twtmp,0x13);
-	&pxor	($twtmp,$twtmp);
-	&movdqa	($twmask,&QWP(16*6,"esp"));
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($twres,$twmask);		# isolate carry and residue
-	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
-	&pxor	($tweak,$twres);
-
-&set_label("xts_dec_only_one_more");
-	&pshufd	($inout3,$twtmp,0x13);
-	&movdqa	($inout4,$tweak);		# put aside previous tweak
-	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
-	&pand	($inout3,$twmask);		# isolate carry and residue
-	&pxor	($inout3,$tweak);
-
-	&mov	($key,$key_);			# restore $key
-	&mov	($rounds,$rounds_);		# restore $rounds
-
-	&movups	($inout0,&QWP(0,$inp));		# load input
-	&xorps	($inout0,$inout3);		# input^=tweak
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&xorps	($inout0,$inout3);		# output^=tweak
-	&movups	(&QWP(0,$out),$inout0);		# write output
-
-&set_label("xts_dec_steal");
-	&movz	($rounds,&BP(16,$inp));
-	&movz	($key,&BP(0,$out));
-	&lea	($inp,&DWP(1,$inp));
-	&mov	(&BP(0,$out),&LB($rounds));
-	&mov	(&BP(16,$out),&LB($key));
-	&lea	($out,&DWP(1,$out));
-	&sub	($len,1);
-	&jnz	(&label("xts_dec_steal"));
-
-	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
-	&mov	($key,$key_);			# restore $key
-	&mov	($rounds,$rounds_);		# restore $rounds
-
-	&movups	($inout0,&QWP(0,$out));		# load input
-	&xorps	($inout0,$inout4);		# input^=tweak
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&xorps	($inout0,$inout4);		# output^=tweak
-	&movups	(&QWP(0,$out),$inout0);		# write output
-
-&set_label("xts_dec_ret");
-	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
-&function_end("aesni_xts_decrypt");
-}
-}
-
-######################################################################
-# void $PREFIX_cbc_encrypt (const void *inp, void *out,
-#                           size_t length, const AES_KEY *key,
-#                           unsigned char *ivp,const int enc);
-&function_begin("${PREFIX}_cbc_encrypt");
-	&mov	($inp,&wparam(0));
-	&mov	($rounds_,"esp");
-	&mov	($out,&wparam(1));
-	&sub	($rounds_,24);
-	&mov	($len,&wparam(2));
-	&and	($rounds_,-16);
-	&mov	($key,&wparam(3));
-	&mov	($key_,&wparam(4));
-	&test	($len,$len);
-	&jz	(&label("cbc_abort"));
-
-	&cmp	(&wparam(5),0);
-	&xchg	($rounds_,"esp");		# alloca
-	&movups	($ivec,&QWP(0,$key_));		# load IV
-	&mov	($rounds,&DWP(240,$key));
-	&mov	($key_,$key);			# backup $key
-	&mov	(&DWP(16,"esp"),$rounds_);	# save original %esp
-	&mov	($rounds_,$rounds);		# backup $rounds
-	&je	(&label("cbc_decrypt"));
-
-	&movaps	($inout0,$ivec);
-	&cmp	($len,16);
-	&jb	(&label("cbc_enc_tail"));
-	&sub	($len,16);
-	&jmp	(&label("cbc_enc_loop"));
-
-&set_label("cbc_enc_loop",16);
-	&movups	($ivec,&QWP(0,$inp));		# input actually
-	&lea	($inp,&DWP(16,$inp));
-	if ($inline)
-	{   &aesni_inline_generate1("enc",$inout0,$ivec);	}
-	else
-	{   &xorps($inout0,$ivec); &call("_aesni_encrypt1");	}
-	&mov	($rounds,$rounds_);	# restore $rounds
-	&mov	($key,$key_);		# restore $key
-	&movups	(&QWP(0,$out),$inout0);	# store output
-	&lea	($out,&DWP(16,$out));
-	&sub	($len,16);
-	&jnc	(&label("cbc_enc_loop"));
-	&add	($len,16);
-	&jnz	(&label("cbc_enc_tail"));
-	&movaps	($ivec,$inout0);
-	&jmp	(&label("cbc_ret"));
-
-&set_label("cbc_enc_tail");
-	&mov	("ecx",$len);		# zaps $rounds
-	&data_word(0xA4F3F689);		# rep movsb
-	&mov	("ecx",16);		# zero tail
-	&sub	("ecx",$len);
-	&xor	("eax","eax");		# zaps $len
-	&data_word(0xAAF3F689);		# rep stosb
-	&lea	($out,&DWP(-16,$out));	# rewind $out by 1 block
-	&mov	($rounds,$rounds_);	# restore $rounds
-	&mov	($inp,$out);		# $inp and $out are the same
-	&mov	($key,$key_);		# restore $key
-	&jmp	(&label("cbc_enc_loop"));
-######################################################################
-&set_label("cbc_decrypt",16);
-	&cmp	($len,0x50);
-	&jbe	(&label("cbc_dec_tail"));
-	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
-	&sub	($len,0x50);
-	&jmp	(&label("cbc_dec_loop6_enter"));
-
-&set_label("cbc_dec_loop6",16);
-	&movaps	(&QWP(0,"esp"),$rndkey0);	# save IV
-	&movups	(&QWP(0,$out),$inout5);
-	&lea	($out,&DWP(0x10,$out));
-&set_label("cbc_dec_loop6_enter");
-	&movdqu	($inout0,&QWP(0,$inp));
-	&movdqu	($inout1,&QWP(0x10,$inp));
-	&movdqu	($inout2,&QWP(0x20,$inp));
-	&movdqu	($inout3,&QWP(0x30,$inp));
-	&movdqu	($inout4,&QWP(0x40,$inp));
-	&movdqu	($inout5,&QWP(0x50,$inp));
-
-	&call	("_aesni_decrypt6");
-
-	&movups	($rndkey1,&QWP(0,$inp));
-	&movups	($rndkey0,&QWP(0x10,$inp));
-	&xorps	($inout0,&QWP(0,"esp"));	# ^=IV
-	&xorps	($inout1,$rndkey1);
-	&movups	($rndkey1,&QWP(0x20,$inp));
-	&xorps	($inout2,$rndkey0);
-	&movups	($rndkey0,&QWP(0x30,$inp));
-	&xorps	($inout3,$rndkey1);
-	&movups	($rndkey1,&QWP(0x40,$inp));
-	&xorps	($inout4,$rndkey0);
-	&movups	($rndkey0,&QWP(0x50,$inp));	# IV
-	&xorps	($inout5,$rndkey1);
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&lea	($inp,&DWP(0x60,$inp));
-	&movups	(&QWP(0x20,$out),$inout2);
-	&mov	($rounds,$rounds_)		# restore $rounds
-	&movups	(&QWP(0x30,$out),$inout3);
-	&mov	($key,$key_);			# restore $key
-	&movups	(&QWP(0x40,$out),$inout4);
-	&lea	($out,&DWP(0x50,$out));
-	&sub	($len,0x60);
-	&ja	(&label("cbc_dec_loop6"));
-
-	&movaps	($inout0,$inout5);
-	&movaps	($ivec,$rndkey0);
-	&add	($len,0x50);
-	&jle	(&label("cbc_dec_tail_collected"));
-	&movups	(&QWP(0,$out),$inout0);
-	&lea	($out,&DWP(0x10,$out));
-&set_label("cbc_dec_tail");
-	&movups	($inout0,&QWP(0,$inp));
-	&movaps	($in0,$inout0);
-	&cmp	($len,0x10);
-	&jbe	(&label("cbc_dec_one"));
-
-	&movups	($inout1,&QWP(0x10,$inp));
-	&movaps	($in1,$inout1);
-	&cmp	($len,0x20);
-	&jbe	(&label("cbc_dec_two"));
-
-	&movups	($inout2,&QWP(0x20,$inp));
-	&cmp	($len,0x30);
-	&jbe	(&label("cbc_dec_three"));
-
-	&movups	($inout3,&QWP(0x30,$inp));
-	&cmp	($len,0x40);
-	&jbe	(&label("cbc_dec_four"));
-
-	&movups	($inout4,&QWP(0x40,$inp));
-	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
-	&movups	($inout0,&QWP(0,$inp));
-	&xorps	($inout5,$inout5);
-	&call	("_aesni_decrypt6");
-	&movups	($rndkey1,&QWP(0,$inp));
-	&movups	($rndkey0,&QWP(0x10,$inp));
-	&xorps	($inout0,&QWP(0,"esp"));	# ^= IV
-	&xorps	($inout1,$rndkey1);
-	&movups	($rndkey1,&QWP(0x20,$inp));
-	&xorps	($inout2,$rndkey0);
-	&movups	($rndkey0,&QWP(0x30,$inp));
-	&xorps	($inout3,$rndkey1);
-	&movups	($ivec,&QWP(0x40,$inp));	# IV
-	&xorps	($inout4,$rndkey0);
-	&movups	(&QWP(0,$out),$inout0);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&movups	(&QWP(0x30,$out),$inout3);
-	&lea	($out,&DWP(0x40,$out));
-	&movaps	($inout0,$inout4);
-	&sub	($len,0x50);
-	&jmp	(&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_one",16);
-	if ($inline)
-	{   &aesni_inline_generate1("dec");	}
-	else
-	{   &call	("_aesni_decrypt1");	}
-	&xorps	($inout0,$ivec);
-	&movaps	($ivec,$in0);
-	&sub	($len,0x10);
-	&jmp	(&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_two",16);
-	&xorps	($inout2,$inout2);
-	&call	("_aesni_decrypt3");
-	&xorps	($inout0,$ivec);
-	&xorps	($inout1,$in0);
-	&movups	(&QWP(0,$out),$inout0);
-	&movaps	($inout0,$inout1);
-	&lea	($out,&DWP(0x10,$out));
-	&movaps	($ivec,$in1);
-	&sub	($len,0x20);
-	&jmp	(&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_three",16);
-	&call	("_aesni_decrypt3");
-	&xorps	($inout0,$ivec);
-	&xorps	($inout1,$in0);
-	&xorps	($inout2,$in1);
-	&movups	(&QWP(0,$out),$inout0);
-	&movaps	($inout0,$inout2);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&lea	($out,&DWP(0x20,$out));
-	&movups	($ivec,&QWP(0x20,$inp));
-	&sub	($len,0x30);
-	&jmp	(&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_four",16);
-	&call	("_aesni_decrypt4");
-	&movups	($rndkey1,&QWP(0x10,$inp));
-	&movups	($rndkey0,&QWP(0x20,$inp));
-	&xorps	($inout0,$ivec);
-	&movups	($ivec,&QWP(0x30,$inp));
-	&xorps	($inout1,$in0);
-	&movups	(&QWP(0,$out),$inout0);
-	&xorps	($inout2,$rndkey1);
-	&movups	(&QWP(0x10,$out),$inout1);
-	&xorps	($inout3,$rndkey0);
-	&movups	(&QWP(0x20,$out),$inout2);
-	&lea	($out,&DWP(0x30,$out));
-	&movaps	($inout0,$inout3);
-	&sub	($len,0x40);
-
-&set_label("cbc_dec_tail_collected");
-	&and	($len,15);
-	&jnz	(&label("cbc_dec_tail_partial"));
-	&movups	(&QWP(0,$out),$inout0);
-	&jmp	(&label("cbc_ret"));
-
-&set_label("cbc_dec_tail_partial",16);
-	&movaps	(&QWP(0,"esp"),$inout0);
-	&mov	("ecx",16);
-	&mov	($inp,"esp");
-	&sub	("ecx",$len);
-	&data_word(0xA4F3F689);		# rep movsb
-
-&set_label("cbc_ret");
-	&mov	("esp",&DWP(16,"esp"));	# pull original %esp
-	&mov	($key_,&wparam(4));
-	&movups	(&QWP(0,$key_),$ivec);	# output IV
-&set_label("cbc_abort");
-&function_end("${PREFIX}_cbc_encrypt");
-
-######################################################################
-# Mechanical port from aesni-x86_64.pl.
-#
-# _aesni_set_encrypt_key is private interface,
-# input:
-#	"eax"	const unsigned char *userKey
-#	$rounds	int bits
-#	$key	AES_KEY *key
-# output:
-#	"eax"	return code
-#	$round	rounds
-
-&function_begin_B("_aesni_set_encrypt_key");
-	&test	("eax","eax");
-	&jz	(&label("bad_pointer"));
-	&test	($key,$key);
-	&jz	(&label("bad_pointer"));
-
-	&movups	("xmm0",&QWP(0,"eax"));	# pull first 128 bits of *userKey
-	&xorps	("xmm4","xmm4");	# low dword of xmm4 is assumed 0
-	&lea	($key,&DWP(16,$key));
-	&cmp	($rounds,256);
-	&je	(&label("14rounds"));
-	&cmp	($rounds,192);
-	&je	(&label("12rounds"));
-	&cmp	($rounds,128);
-	&jne	(&label("bad_keybits"));
-
-&set_label("10rounds",16);
-	&mov		($rounds,9);
-	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
-	&aeskeygenassist("xmm1","xmm0",0x01);		# round 1
-	&call		(&label("key_128_cold"));
-	&aeskeygenassist("xmm1","xmm0",0x2);		# round 2
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x04);		# round 3
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x08);		# round 4
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x10);		# round 5
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x20);		# round 6
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x40);		# round 7
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x80);		# round 8
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x1b);		# round 9
-	&call		(&label("key_128"));
-	&aeskeygenassist("xmm1","xmm0",0x36);		# round 10
-	&call		(&label("key_128"));
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&mov		(&DWP(80,$key),$rounds);
-	&xor		("eax","eax");
-	&ret();
-
-&set_label("key_128",16);
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&lea		($key,&DWP(16,$key));
-&set_label("key_128_cold");
-	&shufps		("xmm4","xmm0",0b00010000);
-	&xorps		("xmm0","xmm4");
-	&shufps		("xmm4","xmm0",0b10001100);
-	&xorps		("xmm0","xmm4");
-	&shufps		("xmm1","xmm1",0b11111111);	# critical path
-	&xorps		("xmm0","xmm1");
-	&ret();
-
-&set_label("12rounds",16);
-	&movq		("xmm2",&QWP(16,"eax"));	# remaining 1/3 of *userKey
-	&mov		($rounds,11);
-	&$movekey	(&QWP(-16,$key),"xmm0")		# round 0
-	&aeskeygenassist("xmm1","xmm2",0x01);		# round 1,2
-	&call		(&label("key_192a_cold"));
-	&aeskeygenassist("xmm1","xmm2",0x02);		# round 2,3
-	&call		(&label("key_192b"));
-	&aeskeygenassist("xmm1","xmm2",0x04);		# round 4,5
-	&call		(&label("key_192a"));
-	&aeskeygenassist("xmm1","xmm2",0x08);		# round 5,6
-	&call		(&label("key_192b"));
-	&aeskeygenassist("xmm1","xmm2",0x10);		# round 7,8
-	&call		(&label("key_192a"));
-	&aeskeygenassist("xmm1","xmm2",0x20);		# round 8,9
-	&call		(&label("key_192b"));
-	&aeskeygenassist("xmm1","xmm2",0x40);		# round 10,11
-	&call		(&label("key_192a"));
-	&aeskeygenassist("xmm1","xmm2",0x80);		# round 11,12
-	&call		(&label("key_192b"));
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&mov		(&DWP(48,$key),$rounds);
-	&xor		("eax","eax");
-	&ret();
-
-&set_label("key_192a",16);
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&lea		($key,&DWP(16,$key));
-&set_label("key_192a_cold",16);
-	&movaps		("xmm5","xmm2");
-&set_label("key_192b_warm");
-	&shufps		("xmm4","xmm0",0b00010000);
-	&movdqa		("xmm3","xmm2");
-	&xorps		("xmm0","xmm4");
-	&shufps		("xmm4","xmm0",0b10001100);
-	&pslldq		("xmm3",4);
-	&xorps		("xmm0","xmm4");
-	&pshufd		("xmm1","xmm1",0b01010101);	# critical path
-	&pxor		("xmm2","xmm3");
-	&pxor		("xmm0","xmm1");
-	&pshufd		("xmm3","xmm0",0b11111111);
-	&pxor		("xmm2","xmm3");
-	&ret();
-
-&set_label("key_192b",16);
-	&movaps		("xmm3","xmm0");
-	&shufps		("xmm5","xmm0",0b01000100);
-	&$movekey	(&QWP(0,$key),"xmm5");
-	&shufps		("xmm3","xmm2",0b01001110);
-	&$movekey	(&QWP(16,$key),"xmm3");
-	&lea		($key,&DWP(32,$key));
-	&jmp		(&label("key_192b_warm"));
-
-&set_label("14rounds",16);
-	&movups		("xmm2",&QWP(16,"eax"));	# remaining half of *userKey
-	&mov		($rounds,13);
-	&lea		($key,&DWP(16,$key));
-	&$movekey	(&QWP(-32,$key),"xmm0");	# round 0
-	&$movekey	(&QWP(-16,$key),"xmm2");	# round 1
-	&aeskeygenassist("xmm1","xmm2",0x01);		# round 2
-	&call		(&label("key_256a_cold"));
-	&aeskeygenassist("xmm1","xmm0",0x01);		# round 3
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x02);		# round 4
-	&call		(&label("key_256a"));
-	&aeskeygenassist("xmm1","xmm0",0x02);		# round 5
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x04);		# round 6
-	&call		(&label("key_256a"));
-	&aeskeygenassist("xmm1","xmm0",0x04);		# round 7
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x08);		# round 8
-	&call		(&label("key_256a"));
-	&aeskeygenassist("xmm1","xmm0",0x08);		# round 9
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x10);		# round 10
-	&call		(&label("key_256a"));
-	&aeskeygenassist("xmm1","xmm0",0x10);		# round 11
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x20);		# round 12
-	&call		(&label("key_256a"));
-	&aeskeygenassist("xmm1","xmm0",0x20);		# round 13
-	&call		(&label("key_256b"));
-	&aeskeygenassist("xmm1","xmm2",0x40);		# round 14
-	&call		(&label("key_256a"));
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&mov		(&DWP(16,$key),$rounds);
-	&xor		("eax","eax");
-	&ret();
-
-&set_label("key_256a",16);
-	&$movekey	(&QWP(0,$key),"xmm2");
-	&lea		($key,&DWP(16,$key));
-&set_label("key_256a_cold");
-	&shufps		("xmm4","xmm0",0b00010000);
-	&xorps		("xmm0","xmm4");
-	&shufps		("xmm4","xmm0",0b10001100);
-	&xorps		("xmm0","xmm4");
-	&shufps		("xmm1","xmm1",0b11111111);	# critical path
-	&xorps		("xmm0","xmm1");
-	&ret();
-
-&set_label("key_256b",16);
-	&$movekey	(&QWP(0,$key),"xmm0");
-	&lea		($key,&DWP(16,$key));
-
-	&shufps		("xmm4","xmm2",0b00010000);
-	&xorps		("xmm2","xmm4");
-	&shufps		("xmm4","xmm2",0b10001100);
-	&xorps		("xmm2","xmm4");
-	&shufps		("xmm1","xmm1",0b10101010);	# critical path
-	&xorps		("xmm2","xmm1");
-	&ret();
-
-&set_label("bad_pointer",4);
-	&mov	("eax",-1);
-	&ret	();
-&set_label("bad_keybits",4);
-	&mov	("eax",-2);
-	&ret	();
-&function_end_B("_aesni_set_encrypt_key");
-
-# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
-#                              AES_KEY *key)
-&function_begin_B("${PREFIX}_set_encrypt_key");
-	&mov	("eax",&wparam(0));
-	&mov	($rounds,&wparam(1));
-	&mov	($key,&wparam(2));
-	&call	("_aesni_set_encrypt_key");
-	&ret	();
-&function_end_B("${PREFIX}_set_encrypt_key");
-
-# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
-#                              AES_KEY *key)
-&function_begin_B("${PREFIX}_set_decrypt_key");
-	&mov	("eax",&wparam(0));
-	&mov	($rounds,&wparam(1));
-	&mov	($key,&wparam(2));
-	&call	("_aesni_set_encrypt_key");
-	&mov	($key,&wparam(2));
-	&shl	($rounds,4)	# rounds-1 after _aesni_set_encrypt_key
-	&test	("eax","eax");
-	&jnz	(&label("dec_key_ret"));
-	&lea	("eax",&DWP(16,$key,$rounds));	# end of key schedule
-
-	&$movekey	("xmm0",&QWP(0,$key));	# just swap
-	&$movekey	("xmm1",&QWP(0,"eax"));
-	&$movekey	(&QWP(0,"eax"),"xmm0");
-	&$movekey	(&QWP(0,$key),"xmm1");
-	&lea		($key,&DWP(16,$key));
-	&lea		("eax",&DWP(-16,"eax"));
-
-&set_label("dec_key_inverse");
-	&$movekey	("xmm0",&QWP(0,$key));	# swap and inverse
-	&$movekey	("xmm1",&QWP(0,"eax"));
-	&aesimc		("xmm0","xmm0");
-	&aesimc		("xmm1","xmm1");
-	&lea		($key,&DWP(16,$key));
-	&lea		("eax",&DWP(-16,"eax"));
-	&$movekey	(&QWP(16,"eax"),"xmm0");
-	&$movekey	(&QWP(-16,$key),"xmm1");
-	&cmp		("eax",$key);
-	&ja		(&label("dec_key_inverse"));
-
-	&$movekey	("xmm0",&QWP(0,$key));	# inverse middle
-	&aesimc		("xmm0","xmm0");
-	&$movekey	(&QWP(0,$key),"xmm0");
-
-	&xor		("eax","eax");		# return success
-&set_label("dec_key_ret");
-	&ret	();
-&function_end_B("${PREFIX}_set_decrypt_key");
-&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 3071
drivers/builtin_openssl2/crypto/aes/asm/aesni-x86_64.pl

@@ -1,3071 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for Intel AES-NI extension. In
-# OpenSSL context it's used with Intel engine, but can also be used as
-# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
-# details].
-#
-# Performance.
-#
-# Given aes(enc|dec) instructions' latency asymptotic performance for
-# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
-# processed with 128-bit key. And given their throughput asymptotic
-# performance for parallelizable modes is 1.25 cycles per byte. Being
-# asymptotic limit it's not something you commonly achieve in reality,
-# but how close does one get? Below are results collected for
-# different modes and block sized. Pairs of numbers are for en-/
-# decryption.
-#
-#	16-byte     64-byte     256-byte    1-KB        8-KB
-# ECB	4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26	1.26/1.26
-# CTR	5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
-# CBC	4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
-# CCM	5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07   
-# OFB	5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
-# CFB	5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
-#
-# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
-# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
-# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
-# The results were collected with specially crafted speed.c benchmark
-# in order to compare them with results reported in "Intel Advanced
-# Encryption Standard (AES) New Instruction Set" White Paper Revision
-# 3.0 dated May 2010. All above results are consistently better. This
-# module also provides better performance for block sizes smaller than
-# 128 bytes in points *not* represented in the above table.
-#
-# Looking at the results for 8-KB buffer.
-#
-# CFB and OFB results are far from the limit, because implementation
-# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
-# single-block aesni_encrypt, which is not the most optimal way to go.
-# CBC encrypt result is unexpectedly high and there is no documented
-# explanation for it. Seemingly there is a small penalty for feeding
-# the result back to AES unit the way it's done in CBC mode. There is
-# nothing one can do and the result appears optimal. CCM result is
-# identical to CBC, because CBC-MAC is essentially CBC encrypt without
-# saving output. CCM CTR "stays invisible," because it's neatly
-# interleaved wih CBC-MAC. This provides ~30% improvement over
-# "straghtforward" CCM implementation with CTR and CBC-MAC performed
-# disjointly. Parallelizable modes practically achieve the theoretical
-# limit.
-#
-# Looking at how results vary with buffer size.
-#
-# Curves are practically saturated at 1-KB buffer size. In most cases
-# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
-# CTR curve doesn't follow this pattern and is "slowest" changing one
-# with "256-byte" result being 87% of "8-KB." This is because overhead
-# in CTR mode is most computationally intensive. Small-block CCM
-# decrypt is slower than encrypt, because first CTR and last CBC-MAC
-# iterations can't be interleaved.
-#
-# Results for 192- and 256-bit keys.
-#
-# EVP-free results were observed to scale perfectly with number of
-# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
-# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
-# are a tad smaller, because the above mentioned penalty biases all
-# results by same constant value. In similar way function call
-# overhead affects small-block performance, as well as OFB and CFB
-# results. Differences are not large, most common coefficients are
-# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
-# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
-
-# January 2011
-#
-# While Westmere processor features 6 cycles latency for aes[enc|dec]
-# instructions, which can be scheduled every second cycle, Sandy
-# Bridge spends 8 cycles per instruction, but it can schedule them
-# every cycle. This means that code targeting Westmere would perform
-# suboptimally on Sandy Bridge. Therefore this update.
-#
-# In addition, non-parallelizable CBC encrypt (as well as CCM) is
-# optimized. Relative improvement might appear modest, 8% on Westmere,
-# but in absolute terms it's 3.77 cycles per byte encrypted with
-# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
-# should be compared to asymptotic limits of 3.75 for Westmere and
-# 5.00 for Sandy Bridge. Actually, the fact that they get this close
-# to asymptotic limits is quite amazing. Indeed, the limit is
-# calculated as latency times number of rounds, 10 for 128-bit key,
-# and divided by 16, the number of bytes in block, or in other words
-# it accounts *solely* for aesenc instructions. But there are extra
-# instructions, and numbers so close to the asymptotic limits mean
-# that it's as if it takes as little as *one* additional cycle to
-# execute all of them. How is it possible? It is possible thanks to
-# out-of-order execution logic, which manages to overlap post-
-# processing of previous block, things like saving the output, with
-# actual encryption of current block, as well as pre-processing of
-# current block, things like fetching input and xor-ing it with
-# 0-round element of the key schedule, with actual encryption of
-# previous block. Keep this in mind...
-#
-# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
-# performance is achieved by interleaving instructions working on
-# independent blocks. In which case asymptotic limit for such modes
-# can be obtained by dividing above mentioned numbers by AES
-# instructions' interleave factor. Westmere can execute at most 3 
-# instructions at a time, meaning that optimal interleave factor is 3,
-# and that's where the "magic" number of 1.25 come from. "Optimal
-# interleave factor" means that increase of interleave factor does
-# not improve performance. The formula has proven to reflect reality
-# pretty well on Westmere... Sandy Bridge on the other hand can
-# execute up to 8 AES instructions at a time, so how does varying
-# interleave factor affect the performance? Here is table for ECB
-# (numbers are cycles per byte processed with 128-bit key):
-#
-# instruction interleave factor		3x	6x	8x
-# theoretical asymptotic limit		1.67	0.83	0.625
-# measured performance for 8KB block	1.05	0.86	0.84
-#
-# "as if" interleave factor		4.7x	5.8x	6.0x
-#
-# Further data for other parallelizable modes:
-#
-# CBC decrypt				1.16	0.93	0.93
-# CTR					1.14	0.91	n/a
-#
-# Well, given 3x column it's probably inappropriate to call the limit
-# asymptotic, if it can be surpassed, isn't it? What happens there?
-# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
-# magic is responsible for this. Processor overlaps not only the
-# additional instructions with AES ones, but even AES instuctions
-# processing adjacent triplets of independent blocks. In the 6x case
-# additional instructions  still claim disproportionally small amount
-# of additional cycles, but in 8x case number of instructions must be
-# a tad too high for out-of-order logic to cope with, and AES unit
-# remains underutilized... As you can see 8x interleave is hardly
-# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
-# utilizies 6x interleave because of limited register bank capacity.
-#
-# Higher interleave factors do have negative impact on Westmere
-# performance. While for ECB mode it's negligible ~1.5%, other
-# parallelizables perform ~5% worse, which is outweighed by ~25%
-# improvement on Sandy Bridge. To balance regression on Westmere
-# CTR mode was implemented with 6x aesenc interleave factor.
-
-# April 2011
-#
-# Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
-# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
-# in CTR mode AES instruction interleave factor was chosen to be 6x.
-
-$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
-			# generates drop-in replacement for
-			# crypto/aes/asm/aes-x86_64.pl:-)
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
-@_4args=$win64?	("%rcx","%rdx","%r8", "%r9") :	# Win64 order
-		("%rdi","%rsi","%rdx","%rcx");	# Unix order
-
-$code=".text\n";
-
-$rounds="%eax";	# input to and changed by aesni_[en|de]cryptN !!!
-# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
-$inp="%rdi";
-$out="%rsi";
-$len="%rdx";
-$key="%rcx";	# input to and changed by aesni_[en|de]cryptN !!!
-$ivp="%r8";	# cbc, ctr, ...
-
-$rnds_="%r10d";	# backup copy for $rounds
-$key_="%r11";	# backup copy for $key
-
-# %xmm register layout
-$rndkey0="%xmm0";	$rndkey1="%xmm1";
-$inout0="%xmm2";	$inout1="%xmm3";
-$inout2="%xmm4";	$inout3="%xmm5";
-$inout4="%xmm6";	$inout5="%xmm7";
-$inout6="%xmm8";	$inout7="%xmm9";
-
-$in2="%xmm6";		$in1="%xmm7";	# used in CBC decrypt, CTR, ...
-$in0="%xmm8";		$iv="%xmm9";
-
-# Inline version of internal aesni_[en|de]crypt1.
-#
-# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
-# cycles which take care of loop variables...
-{ my $sn;
-sub aesni_generate1 {
-my ($p,$key,$rounds,$inout,$ivec)=@_;	$inout=$inout0 if (!defined($inout));
-++$sn;
-$code.=<<___;
-	$movkey	($key),$rndkey0
-	$movkey	16($key),$rndkey1
-___
-$code.=<<___ if (defined($ivec));
-	xorps	$rndkey0,$ivec
-	lea	32($key),$key
-	xorps	$ivec,$inout
-___
-$code.=<<___ if (!defined($ivec));
-	lea	32($key),$key
-	xorps	$rndkey0,$inout
-___
-$code.=<<___;
-.Loop_${p}1_$sn:
-	aes${p}	$rndkey1,$inout
-	dec	$rounds
-	$movkey	($key),$rndkey1
-	lea	16($key),$key
-	jnz	.Loop_${p}1_$sn	# loop body is 16 bytes
-	aes${p}last	$rndkey1,$inout
-___
-}}
-# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
-#
-{ my ($inp,$out,$key) = @_4args;
-
-$code.=<<___;
-.globl	${PREFIX}_encrypt
-.type	${PREFIX}_encrypt,\@abi-omnipotent
-.align	16
-${PREFIX}_encrypt:
-	movups	($inp),$inout0		# load input
-	mov	240($key),$rounds	# key->rounds
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	movups	$inout0,($out)		# output
-	ret
-.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
-
-.globl	${PREFIX}_decrypt
-.type	${PREFIX}_decrypt,\@abi-omnipotent
-.align	16
-${PREFIX}_decrypt:
-	movups	($inp),$inout0		# load input
-	mov	240($key),$rounds	# key->rounds
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	movups	$inout0,($out)		# output
-	ret
-.size	${PREFIX}_decrypt, .-${PREFIX}_decrypt
-___
-}
-
-# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
-# factor. Why 3x subroutine were originally used in loops? Even though
-# aes[enc|dec] latency was originally 6, it could be scheduled only
-# every *2nd* cycle. Thus 3x interleave was the one providing optimal
-# utilization, i.e. when subroutine's throughput is virtually same as
-# of non-interleaved subroutine [for number of input blocks up to 3].
-# This is why it makes no sense to implement 2x subroutine.
-# aes[enc|dec] latency in next processor generation is 8, but the
-# instructions can be scheduled every cycle. Optimal interleave for
-# new processor is therefore 8x...
-sub aesni_generate3 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-2] is cipher/clear text...
-$code.=<<___;
-.type	_aesni_${dir}rypt3,\@abi-omnipotent
-.align	16
-_aesni_${dir}rypt3:
-	$movkey	($key),$rndkey0
-	shr	\$1,$rounds
-	$movkey	16($key),$rndkey1
-	lea	32($key),$key
-	xorps	$rndkey0,$inout0
-	xorps	$rndkey0,$inout1
-	xorps	$rndkey0,$inout2
-	$movkey		($key),$rndkey0
-
-.L${dir}_loop3:
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout2
-	$movkey		16($key),$rndkey1
-	aes${dir}	$rndkey0,$inout0
-	aes${dir}	$rndkey0,$inout1
-	lea		32($key),$key
-	aes${dir}	$rndkey0,$inout2
-	$movkey		($key),$rndkey0
-	jnz		.L${dir}_loop3
-
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}last	$rndkey0,$inout0
-	aes${dir}last	$rndkey0,$inout1
-	aes${dir}last	$rndkey0,$inout2
-	ret
-.size	_aesni_${dir}rypt3,.-_aesni_${dir}rypt3
-___
-}
-# 4x interleave is implemented to improve small block performance,
-# most notably [and naturally] 4 block by ~30%. One can argue that one
-# should have implemented 5x as well, but improvement would be <20%,
-# so it's not worth it...
-sub aesni_generate4 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-3] is cipher/clear text...
-$code.=<<___;
-.type	_aesni_${dir}rypt4,\@abi-omnipotent
-.align	16
-_aesni_${dir}rypt4:
-	$movkey	($key),$rndkey0
-	shr	\$1,$rounds
-	$movkey	16($key),$rndkey1
-	lea	32($key),$key
-	xorps	$rndkey0,$inout0
-	xorps	$rndkey0,$inout1
-	xorps	$rndkey0,$inout2
-	xorps	$rndkey0,$inout3
-	$movkey	($key),$rndkey0
-
-.L${dir}_loop4:
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	$movkey		16($key),$rndkey1
-	aes${dir}	$rndkey0,$inout0
-	aes${dir}	$rndkey0,$inout1
-	lea		32($key),$key
-	aes${dir}	$rndkey0,$inout2
-	aes${dir}	$rndkey0,$inout3
-	$movkey		($key),$rndkey0
-	jnz		.L${dir}_loop4
-
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	aes${dir}last	$rndkey0,$inout0
-	aes${dir}last	$rndkey0,$inout1
-	aes${dir}last	$rndkey0,$inout2
-	aes${dir}last	$rndkey0,$inout3
-	ret
-.size	_aesni_${dir}rypt4,.-_aesni_${dir}rypt4
-___
-}
-sub aesni_generate6 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-5] is cipher/clear text...
-$code.=<<___;
-.type	_aesni_${dir}rypt6,\@abi-omnipotent
-.align	16
-_aesni_${dir}rypt6:
-	$movkey		($key),$rndkey0
-	shr		\$1,$rounds
-	$movkey		16($key),$rndkey1
-	lea		32($key),$key
-	xorps		$rndkey0,$inout0
-	pxor		$rndkey0,$inout1
-	aes${dir}	$rndkey1,$inout0
-	pxor		$rndkey0,$inout2
-	aes${dir}	$rndkey1,$inout1
-	pxor		$rndkey0,$inout3
-	aes${dir}	$rndkey1,$inout2
-	pxor		$rndkey0,$inout4
-	aes${dir}	$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout4
-	$movkey		($key),$rndkey0
-	aes${dir}	$rndkey1,$inout5
-	jmp		.L${dir}_loop6_enter
-.align	16
-.L${dir}_loop6:
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	aes${dir}	$rndkey1,$inout4
-	aes${dir}	$rndkey1,$inout5
-.L${dir}_loop6_enter:				# happens to be 16-byte aligned
-	$movkey		16($key),$rndkey1
-	aes${dir}	$rndkey0,$inout0
-	aes${dir}	$rndkey0,$inout1
-	lea		32($key),$key
-	aes${dir}	$rndkey0,$inout2
-	aes${dir}	$rndkey0,$inout3
-	aes${dir}	$rndkey0,$inout4
-	aes${dir}	$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	jnz		.L${dir}_loop6
-
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	aes${dir}	$rndkey1,$inout4
-	aes${dir}	$rndkey1,$inout5
-	aes${dir}last	$rndkey0,$inout0
-	aes${dir}last	$rndkey0,$inout1
-	aes${dir}last	$rndkey0,$inout2
-	aes${dir}last	$rndkey0,$inout3
-	aes${dir}last	$rndkey0,$inout4
-	aes${dir}last	$rndkey0,$inout5
-	ret
-.size	_aesni_${dir}rypt6,.-_aesni_${dir}rypt6
-___
-}
-sub aesni_generate8 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-7] is cipher/clear text...
-$code.=<<___;
-.type	_aesni_${dir}rypt8,\@abi-omnipotent
-.align	16
-_aesni_${dir}rypt8:
-	$movkey		($key),$rndkey0
-	shr		\$1,$rounds
-	$movkey		16($key),$rndkey1
-	lea		32($key),$key
-	xorps		$rndkey0,$inout0
-	xorps		$rndkey0,$inout1
-	aes${dir}	$rndkey1,$inout0
-	pxor		$rndkey0,$inout2
-	aes${dir}	$rndkey1,$inout1
-	pxor		$rndkey0,$inout3
-	aes${dir}	$rndkey1,$inout2
-	pxor		$rndkey0,$inout4
-	aes${dir}	$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout4
-	pxor		$rndkey0,$inout6
-	aes${dir}	$rndkey1,$inout5
-	pxor		$rndkey0,$inout7
-	$movkey		($key),$rndkey0
-	aes${dir}	$rndkey1,$inout6
-	aes${dir}	$rndkey1,$inout7
-	$movkey		16($key),$rndkey1
-	jmp		.L${dir}_loop8_enter
-.align	16
-.L${dir}_loop8:
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	dec		$rounds
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	aes${dir}	$rndkey1,$inout4
-	aes${dir}	$rndkey1,$inout5
-	aes${dir}	$rndkey1,$inout6
-	aes${dir}	$rndkey1,$inout7
-	$movkey		16($key),$rndkey1
-.L${dir}_loop8_enter:				# happens to be 16-byte aligned
-	aes${dir}	$rndkey0,$inout0
-	aes${dir}	$rndkey0,$inout1
-	lea		32($key),$key
-	aes${dir}	$rndkey0,$inout2
-	aes${dir}	$rndkey0,$inout3
-	aes${dir}	$rndkey0,$inout4
-	aes${dir}	$rndkey0,$inout5
-	aes${dir}	$rndkey0,$inout6
-	aes${dir}	$rndkey0,$inout7
-	$movkey		($key),$rndkey0
-	jnz		.L${dir}_loop8
-
-	aes${dir}	$rndkey1,$inout0
-	aes${dir}	$rndkey1,$inout1
-	aes${dir}	$rndkey1,$inout2
-	aes${dir}	$rndkey1,$inout3
-	aes${dir}	$rndkey1,$inout4
-	aes${dir}	$rndkey1,$inout5
-	aes${dir}	$rndkey1,$inout6
-	aes${dir}	$rndkey1,$inout7
-	aes${dir}last	$rndkey0,$inout0
-	aes${dir}last	$rndkey0,$inout1
-	aes${dir}last	$rndkey0,$inout2
-	aes${dir}last	$rndkey0,$inout3
-	aes${dir}last	$rndkey0,$inout4
-	aes${dir}last	$rndkey0,$inout5
-	aes${dir}last	$rndkey0,$inout6
-	aes${dir}last	$rndkey0,$inout7
-	ret
-.size	_aesni_${dir}rypt8,.-_aesni_${dir}rypt8
-___
-}
-&aesni_generate3("enc") if ($PREFIX eq "aesni");
-&aesni_generate3("dec");
-&aesni_generate4("enc") if ($PREFIX eq "aesni");
-&aesni_generate4("dec");
-&aesni_generate6("enc") if ($PREFIX eq "aesni");
-&aesni_generate6("dec");
-&aesni_generate8("enc") if ($PREFIX eq "aesni");
-&aesni_generate8("dec");
-
-if ($PREFIX eq "aesni") {
-########################################################################
-# void aesni_ecb_encrypt (const void *in, void *out,
-#			  size_t length, const AES_KEY *key,
-#			  int enc);
-$code.=<<___;
-.globl	aesni_ecb_encrypt
-.type	aesni_ecb_encrypt,\@function,5
-.align	16
-aesni_ecb_encrypt:
-___
-$code.=<<___ if ($win64);
-	lea	-0x58(%rsp),%rsp
-	movaps	%xmm6,(%rsp)
-	movaps	%xmm7,0x10(%rsp)
-	movaps	%xmm8,0x20(%rsp)
-	movaps	%xmm9,0x30(%rsp)
-.Lecb_enc_body:
-___
-$code.=<<___;
-	and	\$-16,$len
-	jz	.Lecb_ret
-
-	mov	240($key),$rounds	# key->rounds
-	$movkey	($key),$rndkey0
-	mov	$key,$key_		# backup $key
-	mov	$rounds,$rnds_		# backup $rounds
-	test	%r8d,%r8d		# 5th argument
-	jz	.Lecb_decrypt
-#--------------------------- ECB ENCRYPT ------------------------------#
-	cmp	\$0x80,$len
-	jb	.Lecb_enc_tail
-
-	movdqu	($inp),$inout0
-	movdqu	0x10($inp),$inout1
-	movdqu	0x20($inp),$inout2
-	movdqu	0x30($inp),$inout3
-	movdqu	0x40($inp),$inout4
-	movdqu	0x50($inp),$inout5
-	movdqu	0x60($inp),$inout6
-	movdqu	0x70($inp),$inout7
-	lea	0x80($inp),$inp
-	sub	\$0x80,$len
-	jmp	.Lecb_enc_loop8_enter
-.align 16
-.Lecb_enc_loop8:
-	movups	$inout0,($out)
-	mov	$key_,$key		# restore $key
-	movdqu	($inp),$inout0
-	mov	$rnds_,$rounds		# restore $rounds
-	movups	$inout1,0x10($out)
-	movdqu	0x10($inp),$inout1
-	movups	$inout2,0x20($out)
-	movdqu	0x20($inp),$inout2
-	movups	$inout3,0x30($out)
-	movdqu	0x30($inp),$inout3
-	movups	$inout4,0x40($out)
-	movdqu	0x40($inp),$inout4
-	movups	$inout5,0x50($out)
-	movdqu	0x50($inp),$inout5
-	movups	$inout6,0x60($out)
-	movdqu	0x60($inp),$inout6
-	movups	$inout7,0x70($out)
-	lea	0x80($out),$out
-	movdqu	0x70($inp),$inout7
-	lea	0x80($inp),$inp
-.Lecb_enc_loop8_enter:
-
-	call	_aesni_encrypt8
-
-	sub	\$0x80,$len
-	jnc	.Lecb_enc_loop8
-
-	movups	$inout0,($out)
-	mov	$key_,$key		# restore $key
-	movups	$inout1,0x10($out)
-	mov	$rnds_,$rounds		# restore $rounds
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	movups	$inout6,0x60($out)
-	movups	$inout7,0x70($out)
-	lea	0x80($out),$out
-	add	\$0x80,$len
-	jz	.Lecb_ret
-
-.Lecb_enc_tail:
-	movups	($inp),$inout0
-	cmp	\$0x20,$len
-	jb	.Lecb_enc_one
-	movups	0x10($inp),$inout1
-	je	.Lecb_enc_two
-	movups	0x20($inp),$inout2
-	cmp	\$0x40,$len
-	jb	.Lecb_enc_three
-	movups	0x30($inp),$inout3
-	je	.Lecb_enc_four
-	movups	0x40($inp),$inout4
-	cmp	\$0x60,$len
-	jb	.Lecb_enc_five
-	movups	0x50($inp),$inout5
-	je	.Lecb_enc_six
-	movdqu	0x60($inp),$inout6
-	call	_aesni_encrypt8
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	movups	$inout6,0x60($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_one:
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	movups	$inout0,($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_two:
-	xorps	$inout2,$inout2
-	call	_aesni_encrypt3
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_three:
-	call	_aesni_encrypt3
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_four:
-	call	_aesni_encrypt4
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_five:
-	xorps	$inout5,$inout5
-	call	_aesni_encrypt6
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_enc_six:
-	call	_aesni_encrypt6
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	jmp	.Lecb_ret
-#--------------------------- ECB DECRYPT ------------------------------#
-.align	16
-.Lecb_decrypt:
-	cmp	\$0x80,$len
-	jb	.Lecb_dec_tail
-
-	movdqu	($inp),$inout0
-	movdqu	0x10($inp),$inout1
-	movdqu	0x20($inp),$inout2
-	movdqu	0x30($inp),$inout3
-	movdqu	0x40($inp),$inout4
-	movdqu	0x50($inp),$inout5
-	movdqu	0x60($inp),$inout6
-	movdqu	0x70($inp),$inout7
-	lea	0x80($inp),$inp
-	sub	\$0x80,$len
-	jmp	.Lecb_dec_loop8_enter
-.align 16
-.Lecb_dec_loop8:
-	movups	$inout0,($out)
-	mov	$key_,$key		# restore $key
-	movdqu	($inp),$inout0
-	mov	$rnds_,$rounds		# restore $rounds
-	movups	$inout1,0x10($out)
-	movdqu	0x10($inp),$inout1
-	movups	$inout2,0x20($out)
-	movdqu	0x20($inp),$inout2
-	movups	$inout3,0x30($out)
-	movdqu	0x30($inp),$inout3
-	movups	$inout4,0x40($out)
-	movdqu	0x40($inp),$inout4
-	movups	$inout5,0x50($out)
-	movdqu	0x50($inp),$inout5
-	movups	$inout6,0x60($out)
-	movdqu	0x60($inp),$inout6
-	movups	$inout7,0x70($out)
-	lea	0x80($out),$out
-	movdqu	0x70($inp),$inout7
-	lea	0x80($inp),$inp
-.Lecb_dec_loop8_enter:
-
-	call	_aesni_decrypt8
-
-	$movkey	($key_),$rndkey0
-	sub	\$0x80,$len
-	jnc	.Lecb_dec_loop8
-
-	movups	$inout0,($out)
-	mov	$key_,$key		# restore $key
-	movups	$inout1,0x10($out)
-	mov	$rnds_,$rounds		# restore $rounds
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	movups	$inout6,0x60($out)
-	movups	$inout7,0x70($out)
-	lea	0x80($out),$out
-	add	\$0x80,$len
-	jz	.Lecb_ret
-
-.Lecb_dec_tail:
-	movups	($inp),$inout0
-	cmp	\$0x20,$len
-	jb	.Lecb_dec_one
-	movups	0x10($inp),$inout1
-	je	.Lecb_dec_two
-	movups	0x20($inp),$inout2
-	cmp	\$0x40,$len
-	jb	.Lecb_dec_three
-	movups	0x30($inp),$inout3
-	je	.Lecb_dec_four
-	movups	0x40($inp),$inout4
-	cmp	\$0x60,$len
-	jb	.Lecb_dec_five
-	movups	0x50($inp),$inout5
-	je	.Lecb_dec_six
-	movups	0x60($inp),$inout6
-	$movkey	($key),$rndkey0
-	call	_aesni_decrypt8
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	movups	$inout6,0x60($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_one:
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	movups	$inout0,($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_two:
-	xorps	$inout2,$inout2
-	call	_aesni_decrypt3
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_three:
-	call	_aesni_decrypt3
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_four:
-	call	_aesni_decrypt4
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_five:
-	xorps	$inout5,$inout5
-	call	_aesni_decrypt6
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	jmp	.Lecb_ret
-.align	16
-.Lecb_dec_six:
-	call	_aesni_decrypt6
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-
-.Lecb_ret:
-___
-$code.=<<___ if ($win64);
-	movaps	(%rsp),%xmm6
-	movaps	0x10(%rsp),%xmm7
-	movaps	0x20(%rsp),%xmm8
-	movaps	0x30(%rsp),%xmm9
-	lea	0x58(%rsp),%rsp
-.Lecb_enc_ret:
-___
-$code.=<<___;
-	ret
-.size	aesni_ecb_encrypt,.-aesni_ecb_encrypt
-___
-
-{
-######################################################################
-# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
-#                         size_t blocks, const AES_KEY *key,
-#                         const char *ivec,char *cmac);
-#
-# Handles only complete blocks, operates on 64-bit counter and
-# does not update *ivec! Nor does it finalize CMAC value
-# (see engine/eng_aesni.c for details)
-#
-{
-my $cmac="%r9";	# 6th argument
-
-my $increment="%xmm6";
-my $bswap_mask="%xmm7";
-
-$code.=<<___;
-.globl	aesni_ccm64_encrypt_blocks
-.type	aesni_ccm64_encrypt_blocks,\@function,6
-.align	16
-aesni_ccm64_encrypt_blocks:
-___
-$code.=<<___ if ($win64);
-	lea	-0x58(%rsp),%rsp
-	movaps	%xmm6,(%rsp)
-	movaps	%xmm7,0x10(%rsp)
-	movaps	%xmm8,0x20(%rsp)
-	movaps	%xmm9,0x30(%rsp)
-.Lccm64_enc_body:
-___
-$code.=<<___;
-	mov	240($key),$rounds		# key->rounds
-	movdqu	($ivp),$iv
-	movdqa	.Lincrement64(%rip),$increment
-	movdqa	.Lbswap_mask(%rip),$bswap_mask
-
-	shr	\$1,$rounds
-	lea	0($key),$key_
-	movdqu	($cmac),$inout1
-	movdqa	$iv,$inout0
-	mov	$rounds,$rnds_
-	pshufb	$bswap_mask,$iv
-	jmp	.Lccm64_enc_outer
-.align	16
-.Lccm64_enc_outer:
-	$movkey	($key_),$rndkey0
-	mov	$rnds_,$rounds
-	movups	($inp),$in0			# load inp
-
-	xorps	$rndkey0,$inout0		# counter
-	$movkey	16($key_),$rndkey1
-	xorps	$in0,$rndkey0
-	lea	32($key_),$key
-	xorps	$rndkey0,$inout1		# cmac^=inp
-	$movkey	($key),$rndkey0
-
-.Lccm64_enc2_loop:
-	aesenc	$rndkey1,$inout0
-	dec	$rounds
-	aesenc	$rndkey1,$inout1
-	$movkey	16($key),$rndkey1
-	aesenc	$rndkey0,$inout0
-	lea	32($key),$key
-	aesenc	$rndkey0,$inout1
-	$movkey	0($key),$rndkey0
-	jnz	.Lccm64_enc2_loop
-	aesenc	$rndkey1,$inout0
-	aesenc	$rndkey1,$inout1
-	paddq	$increment,$iv
-	aesenclast	$rndkey0,$inout0
-	aesenclast	$rndkey0,$inout1
-
-	dec	$len
-	lea	16($inp),$inp
-	xorps	$inout0,$in0			# inp ^= E(iv)
-	movdqa	$iv,$inout0
-	movups	$in0,($out)			# save output
-	lea	16($out),$out
-	pshufb	$bswap_mask,$inout0
-	jnz	.Lccm64_enc_outer
-
-	movups	$inout1,($cmac)
-___
-$code.=<<___ if ($win64);
-	movaps	(%rsp),%xmm6
-	movaps	0x10(%rsp),%xmm7
-	movaps	0x20(%rsp),%xmm8
-	movaps	0x30(%rsp),%xmm9
-	lea	0x58(%rsp),%rsp
-.Lccm64_enc_ret:
-___
-$code.=<<___;
-	ret
-.size	aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
-___
-######################################################################
-$code.=<<___;
-.globl	aesni_ccm64_decrypt_blocks
-.type	aesni_ccm64_decrypt_blocks,\@function,6
-.align	16
-aesni_ccm64_decrypt_blocks:
-___
-$code.=<<___ if ($win64);
-	lea	-0x58(%rsp),%rsp
-	movaps	%xmm6,(%rsp)
-	movaps	%xmm7,0x10(%rsp)
-	movaps	%xmm8,0x20(%rsp)
-	movaps	%xmm9,0x30(%rsp)
-.Lccm64_dec_body:
-___
-$code.=<<___;
-	mov	240($key),$rounds		# key->rounds
-	movups	($ivp),$iv
-	movdqu	($cmac),$inout1
-	movdqa	.Lincrement64(%rip),$increment
-	movdqa	.Lbswap_mask(%rip),$bswap_mask
-
-	movaps	$iv,$inout0
-	mov	$rounds,$rnds_
-	mov	$key,$key_
-	pshufb	$bswap_mask,$iv
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	movups	($inp),$in0			# load inp
-	paddq	$increment,$iv
-	lea	16($inp),$inp
-	jmp	.Lccm64_dec_outer
-.align	16
-.Lccm64_dec_outer:
-	xorps	$inout0,$in0			# inp ^= E(iv)
-	movdqa	$iv,$inout0
-	mov	$rnds_,$rounds
-	movups	$in0,($out)			# save output
-	lea	16($out),$out
-	pshufb	$bswap_mask,$inout0
-
-	sub	\$1,$len
-	jz	.Lccm64_dec_break
-
-	$movkey	($key_),$rndkey0
-	shr	\$1,$rounds
-	$movkey	16($key_),$rndkey1
-	xorps	$rndkey0,$in0
-	lea	32($key_),$key
-	xorps	$rndkey0,$inout0
-	xorps	$in0,$inout1			# cmac^=out
-	$movkey	($key),$rndkey0
-
-.Lccm64_dec2_loop:
-	aesenc	$rndkey1,$inout0
-	dec	$rounds
-	aesenc	$rndkey1,$inout1
-	$movkey	16($key),$rndkey1
-	aesenc	$rndkey0,$inout0
-	lea	32($key),$key
-	aesenc	$rndkey0,$inout1
-	$movkey	0($key),$rndkey0
-	jnz	.Lccm64_dec2_loop
-	movups	($inp),$in0			# load inp
-	paddq	$increment,$iv
-	aesenc	$rndkey1,$inout0
-	aesenc	$rndkey1,$inout1
-	lea	16($inp),$inp
-	aesenclast	$rndkey0,$inout0
-	aesenclast	$rndkey0,$inout1
-	jmp	.Lccm64_dec_outer
-
-.align	16
-.Lccm64_dec_break:
-	#xorps	$in0,$inout1			# cmac^=out
-___
-	&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
-$code.=<<___;
-	movups	$inout1,($cmac)
-___
-$code.=<<___ if ($win64);
-	movaps	(%rsp),%xmm6
-	movaps	0x10(%rsp),%xmm7
-	movaps	0x20(%rsp),%xmm8
-	movaps	0x30(%rsp),%xmm9
-	lea	0x58(%rsp),%rsp
-.Lccm64_dec_ret:
-___
-$code.=<<___;
-	ret
-.size	aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
-___
-}
-######################################################################
-# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
-#                         size_t blocks, const AES_KEY *key,
-#                         const char *ivec);
-#
-# Handles only complete blocks, operates on 32-bit counter and
-# does not update *ivec! (see engine/eng_aesni.c for details)
-#
-{
-my $reserved = $win64?0:-0x28;
-my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
-my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
-my $bswap_mask="%xmm15";
-
-$code.=<<___;
-.globl	aesni_ctr32_encrypt_blocks
-.type	aesni_ctr32_encrypt_blocks,\@function,5
-.align	16
-aesni_ctr32_encrypt_blocks:
-___
-$code.=<<___ if ($win64);
-	lea	-0xc8(%rsp),%rsp
-	movaps	%xmm6,0x20(%rsp)
-	movaps	%xmm7,0x30(%rsp)
-	movaps	%xmm8,0x40(%rsp)
-	movaps	%xmm9,0x50(%rsp)
-	movaps	%xmm10,0x60(%rsp)
-	movaps	%xmm11,0x70(%rsp)
-	movaps	%xmm12,0x80(%rsp)
-	movaps	%xmm13,0x90(%rsp)
-	movaps	%xmm14,0xa0(%rsp)
-	movaps	%xmm15,0xb0(%rsp)
-.Lctr32_body:
-___
-$code.=<<___;
-	cmp	\$1,$len
-	je	.Lctr32_one_shortcut
-
-	movdqu	($ivp),$ivec
-	movdqa	.Lbswap_mask(%rip),$bswap_mask
-	xor	$rounds,$rounds
-	pextrd	\$3,$ivec,$rnds_		# pull 32-bit counter
-	pinsrd	\$3,$rounds,$ivec		# wipe 32-bit counter
-
-	mov	240($key),$rounds		# key->rounds
-	bswap	$rnds_
-	pxor	$iv0,$iv0			# vector of 3 32-bit counters
-	pxor	$iv1,$iv1			# vector of 3 32-bit counters
-	pinsrd	\$0,$rnds_,$iv0
-	lea	3($rnds_),$key_
-	pinsrd	\$0,$key_,$iv1
-	inc	$rnds_
-	pinsrd	\$1,$rnds_,$iv0
-	inc	$key_
-	pinsrd	\$1,$key_,$iv1
-	inc	$rnds_
-	pinsrd	\$2,$rnds_,$iv0
-	inc	$key_
-	pinsrd	\$2,$key_,$iv1
-	movdqa	$iv0,$reserved(%rsp)
-	pshufb	$bswap_mask,$iv0
-	movdqa	$iv1,`$reserved+0x10`(%rsp)
-	pshufb	$bswap_mask,$iv1
-
-	pshufd	\$`3<<6`,$iv0,$inout0		# place counter to upper dword
-	pshufd	\$`2<<6`,$iv0,$inout1
-	pshufd	\$`1<<6`,$iv0,$inout2
-	cmp	\$6,$len
-	jb	.Lctr32_tail
-	shr	\$1,$rounds
-	mov	$key,$key_			# backup $key
-	mov	$rounds,$rnds_			# backup $rounds
-	sub	\$6,$len
-	jmp	.Lctr32_loop6
-
-.align	16
-.Lctr32_loop6:
-	pshufd	\$`3<<6`,$iv1,$inout3
-	por	$ivec,$inout0			# merge counter-less ivec
-	 $movkey	($key_),$rndkey0
-	pshufd	\$`2<<6`,$iv1,$inout4
-	por	$ivec,$inout1
-	 $movkey	16($key_),$rndkey1
-	pshufd	\$`1<<6`,$iv1,$inout5
-	por	$ivec,$inout2
-	por	$ivec,$inout3
-	 xorps		$rndkey0,$inout0
-	por	$ivec,$inout4
-	por	$ivec,$inout5
-
-	# inline _aesni_encrypt6 and interleave last rounds
-	# with own code...
-
-	pxor		$rndkey0,$inout1
-	aesenc		$rndkey1,$inout0
-	lea		32($key_),$key
-	pxor		$rndkey0,$inout2
-	aesenc		$rndkey1,$inout1
-	 movdqa		.Lincrement32(%rip),$iv1
-	pxor		$rndkey0,$inout3
-	aesenc		$rndkey1,$inout2
-	 movdqa		$reserved(%rsp),$iv0
-	pxor		$rndkey0,$inout4
-	aesenc		$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	dec		$rounds
-	aesenc		$rndkey1,$inout4
-	aesenc		$rndkey1,$inout5
-	jmp		.Lctr32_enc_loop6_enter
-.align	16
-.Lctr32_enc_loop6:
-	aesenc		$rndkey1,$inout0
-	aesenc		$rndkey1,$inout1
-	dec		$rounds
-	aesenc		$rndkey1,$inout2
-	aesenc		$rndkey1,$inout3
-	aesenc		$rndkey1,$inout4
-	aesenc		$rndkey1,$inout5
-.Lctr32_enc_loop6_enter:
-	$movkey		16($key),$rndkey1
-	aesenc		$rndkey0,$inout0
-	aesenc		$rndkey0,$inout1
-	lea		32($key),$key
-	aesenc		$rndkey0,$inout2
-	aesenc		$rndkey0,$inout3
-	aesenc		$rndkey0,$inout4
-	aesenc		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	jnz		.Lctr32_enc_loop6
-
-	aesenc		$rndkey1,$inout0
-	 paddd		$iv1,$iv0		# increment counter vector
-	aesenc		$rndkey1,$inout1
-	 paddd		`$reserved+0x10`(%rsp),$iv1
-	aesenc		$rndkey1,$inout2
-	 movdqa		$iv0,$reserved(%rsp)	# save counter vector
-	aesenc		$rndkey1,$inout3
-	 movdqa		$iv1,`$reserved+0x10`(%rsp)
-	aesenc		$rndkey1,$inout4
-	 pshufb		$bswap_mask,$iv0	# byte swap
-	aesenc		$rndkey1,$inout5
-	 pshufb		$bswap_mask,$iv1
-
-	aesenclast	$rndkey0,$inout0
-	 movups		($inp),$in0		# load input
-	aesenclast	$rndkey0,$inout1
-	 movups		0x10($inp),$in1
-	aesenclast	$rndkey0,$inout2
-	 movups		0x20($inp),$in2
-	aesenclast	$rndkey0,$inout3
-	 movups		0x30($inp),$in3
-	aesenclast	$rndkey0,$inout4
-	 movups		0x40($inp),$rndkey1
-	aesenclast	$rndkey0,$inout5
-	 movups		0x50($inp),$rndkey0
-	 lea	0x60($inp),$inp
-
-	xorps	$inout0,$in0			# xor
-	 pshufd	\$`3<<6`,$iv0,$inout0
-	xorps	$inout1,$in1
-	 pshufd	\$`2<<6`,$iv0,$inout1
-	movups	$in0,($out)			# store output
-	xorps	$inout2,$in2
-	 pshufd	\$`1<<6`,$iv0,$inout2
-	movups	$in1,0x10($out)
-	xorps	$inout3,$in3
-	movups	$in2,0x20($out)
-	xorps	$inout4,$rndkey1
-	movups	$in3,0x30($out)
-	xorps	$inout5,$rndkey0
-	movups	$rndkey1,0x40($out)
-	movups	$rndkey0,0x50($out)
-	lea	0x60($out),$out
-	mov	$rnds_,$rounds
-	sub	\$6,$len
-	jnc	.Lctr32_loop6
-
-	add	\$6,$len
-	jz	.Lctr32_done
-	mov	$key_,$key			# restore $key
-	lea	1($rounds,$rounds),$rounds	# restore original value
-
-.Lctr32_tail:
-	por	$ivec,$inout0
-	movups	($inp),$in0
-	cmp	\$2,$len
-	jb	.Lctr32_one
-
-	por	$ivec,$inout1
-	movups	0x10($inp),$in1
-	je	.Lctr32_two
-
-	pshufd	\$`3<<6`,$iv1,$inout3
-	por	$ivec,$inout2
-	movups	0x20($inp),$in2
-	cmp	\$4,$len
-	jb	.Lctr32_three
-
-	pshufd	\$`2<<6`,$iv1,$inout4
-	por	$ivec,$inout3
-	movups	0x30($inp),$in3
-	je	.Lctr32_four
-
-	por	$ivec,$inout4
-	xorps	$inout5,$inout5
-
-	call	_aesni_encrypt6
-
-	movups	0x40($inp),$rndkey1
-	xorps	$inout0,$in0
-	xorps	$inout1,$in1
-	movups	$in0,($out)
-	xorps	$inout2,$in2
-	movups	$in1,0x10($out)
-	xorps	$inout3,$in3
-	movups	$in2,0x20($out)
-	xorps	$inout4,$rndkey1
-	movups	$in3,0x30($out)
-	movups	$rndkey1,0x40($out)
-	jmp	.Lctr32_done
-
-.align	16
-.Lctr32_one_shortcut:
-	movups	($ivp),$inout0
-	movups	($inp),$in0
-	mov	240($key),$rounds		# key->rounds
-.Lctr32_one:
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	xorps	$inout0,$in0
-	movups	$in0,($out)
-	jmp	.Lctr32_done
-
-.align	16
-.Lctr32_two:
-	xorps	$inout2,$inout2
-	call	_aesni_encrypt3
-	xorps	$inout0,$in0
-	xorps	$inout1,$in1
-	movups	$in0,($out)
-	movups	$in1,0x10($out)
-	jmp	.Lctr32_done
-
-.align	16
-.Lctr32_three:
-	call	_aesni_encrypt3
-	xorps	$inout0,$in0
-	xorps	$inout1,$in1
-	movups	$in0,($out)
-	xorps	$inout2,$in2
-	movups	$in1,0x10($out)
-	movups	$in2,0x20($out)
-	jmp	.Lctr32_done
-
-.align	16
-.Lctr32_four:
-	call	_aesni_encrypt4
-	xorps	$inout0,$in0
-	xorps	$inout1,$in1
-	movups	$in0,($out)
-	xorps	$inout2,$in2
-	movups	$in1,0x10($out)
-	xorps	$inout3,$in3
-	movups	$in2,0x20($out)
-	movups	$in3,0x30($out)
-
-.Lctr32_done:
-___
-$code.=<<___ if ($win64);
-	movaps	0x20(%rsp),%xmm6
-	movaps	0x30(%rsp),%xmm7
-	movaps	0x40(%rsp),%xmm8
-	movaps	0x50(%rsp),%xmm9
-	movaps	0x60(%rsp),%xmm10
-	movaps	0x70(%rsp),%xmm11
-	movaps	0x80(%rsp),%xmm12
-	movaps	0x90(%rsp),%xmm13
-	movaps	0xa0(%rsp),%xmm14
-	movaps	0xb0(%rsp),%xmm15
-	lea	0xc8(%rsp),%rsp
-.Lctr32_ret:
-___
-$code.=<<___;
-	ret
-.size	aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
-___
-}
-
-######################################################################
-# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-#	const AES_KEY *key1, const AES_KEY *key2
-#	const unsigned char iv[16]);
-#
-{
-my @tweak=map("%xmm$_",(10..15));
-my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
-my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
-my $frame_size = 0x68 + ($win64?160:0);
-
-$code.=<<___;
-.globl	aesni_xts_encrypt
-.type	aesni_xts_encrypt,\@function,6
-.align	16
-aesni_xts_encrypt:
-	lea	-$frame_size(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
-	movaps	%xmm6,0x60(%rsp)
-	movaps	%xmm7,0x70(%rsp)
-	movaps	%xmm8,0x80(%rsp)
-	movaps	%xmm9,0x90(%rsp)
-	movaps	%xmm10,0xa0(%rsp)
-	movaps	%xmm11,0xb0(%rsp)
-	movaps	%xmm12,0xc0(%rsp)
-	movaps	%xmm13,0xd0(%rsp)
-	movaps	%xmm14,0xe0(%rsp)
-	movaps	%xmm15,0xf0(%rsp)
-.Lxts_enc_body:
-___
-$code.=<<___;
-	movups	($ivp),@tweak[5]		# load clear-text tweak
-	mov	240(%r8),$rounds		# key2->rounds
-	mov	240($key),$rnds_		# key1->rounds
-___
-	# generate the tweak
-	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
-$code.=<<___;
-	mov	$key,$key_			# backup $key
-	mov	$rnds_,$rounds			# backup $rounds
-	mov	$len,$len_			# backup $len
-	and	\$-16,$len
-
-	movdqa	.Lxts_magic(%rip),$twmask
-	pxor	$twtmp,$twtmp
-	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
-___
-    for ($i=0;$i<4;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[$i]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	pand	$twmask,$twres			# isolate carry and residue
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	pxor	$twres,@tweak[5]
-___
-    }
-$code.=<<___;
-	sub	\$16*6,$len
-	jc	.Lxts_enc_short
-
-	shr	\$1,$rounds
-	sub	\$1,$rounds
-	mov	$rounds,$rnds_
-	jmp	.Lxts_enc_grandloop
-
-.align	16
-.Lxts_enc_grandloop:
-	pshufd	\$0x13,$twtmp,$twres
-	movdqa	@tweak[5],@tweak[4]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	movdqu	`16*0`($inp),$inout0		# load input
-	pand	$twmask,$twres			# isolate carry and residue
-	movdqu	`16*1`($inp),$inout1
-	pxor	$twres,@tweak[5]
-
-	movdqu	`16*2`($inp),$inout2
-	pxor	@tweak[0],$inout0		# input^=tweak
-	movdqu	`16*3`($inp),$inout3
-	pxor	@tweak[1],$inout1
-	movdqu	`16*4`($inp),$inout4
-	pxor	@tweak[2],$inout2
-	movdqu	`16*5`($inp),$inout5
-	lea	`16*6`($inp),$inp
-	pxor	@tweak[3],$inout3
-	$movkey		($key_),$rndkey0
-	pxor	@tweak[4],$inout4
-	pxor	@tweak[5],$inout5
-
-	# inline _aesni_encrypt6 and interleave first and last rounds
-	# with own code...
-	$movkey		16($key_),$rndkey1
-	pxor		$rndkey0,$inout0
-	pxor		$rndkey0,$inout1
-	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
-	aesenc		$rndkey1,$inout0
-	lea		32($key_),$key
-	pxor		$rndkey0,$inout2
-	 movdqa	@tweak[1],`16*1`(%rsp)
-	aesenc		$rndkey1,$inout1
-	pxor		$rndkey0,$inout3
-	 movdqa	@tweak[2],`16*2`(%rsp)
-	aesenc		$rndkey1,$inout2
-	pxor		$rndkey0,$inout4
-	 movdqa	@tweak[3],`16*3`(%rsp)
-	aesenc		$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	dec		$rounds
-	 movdqa	@tweak[4],`16*4`(%rsp)
-	aesenc		$rndkey1,$inout4
-	 movdqa	@tweak[5],`16*5`(%rsp)
-	aesenc		$rndkey1,$inout5
-	pxor	$twtmp,$twtmp
-	pcmpgtd	@tweak[5],$twtmp
-	jmp		.Lxts_enc_loop6_enter
-
-.align	16
-.Lxts_enc_loop6:
-	aesenc		$rndkey1,$inout0
-	aesenc		$rndkey1,$inout1
-	dec		$rounds
-	aesenc		$rndkey1,$inout2
-	aesenc		$rndkey1,$inout3
-	aesenc		$rndkey1,$inout4
-	aesenc		$rndkey1,$inout5
-.Lxts_enc_loop6_enter:
-	$movkey		16($key),$rndkey1
-	aesenc		$rndkey0,$inout0
-	aesenc		$rndkey0,$inout1
-	lea		32($key),$key
-	aesenc		$rndkey0,$inout2
-	aesenc		$rndkey0,$inout3
-	aesenc		$rndkey0,$inout4
-	aesenc		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	jnz		.Lxts_enc_loop6
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesenc		$rndkey1,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesenc		$rndkey1,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
-	 aesenc		$rndkey1,$inout2
-	pxor	$twres,@tweak[5]
-	 aesenc		$rndkey1,$inout3
-	 aesenc		$rndkey1,$inout4
-	 aesenc		$rndkey1,$inout5
-	 $movkey	16($key),$rndkey1
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[0]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesenc		$rndkey0,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesenc		$rndkey0,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesenc		$rndkey0,$inout2
-	pxor	$twres,@tweak[5]
-	 aesenc		$rndkey0,$inout3
-	 aesenc		$rndkey0,$inout4
-	 aesenc		$rndkey0,$inout5
-	 $movkey	32($key),$rndkey0
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[1]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesenc		$rndkey1,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesenc		$rndkey1,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesenc		$rndkey1,$inout2
-	pxor	$twres,@tweak[5]
-	 aesenc		$rndkey1,$inout3
-	 aesenc		$rndkey1,$inout4
-	 aesenc		$rndkey1,$inout5
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[2]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesenclast	$rndkey0,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesenclast	$rndkey0,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesenclast	$rndkey0,$inout2
-	pxor	$twres,@tweak[5]
-	 aesenclast	$rndkey0,$inout3
-	 aesenclast	$rndkey0,$inout4
-	 aesenclast	$rndkey0,$inout5
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[3]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
-	pand	$twmask,$twres			# isolate carry and residue
-	 xorps	`16*1`(%rsp),$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	pxor	$twres,@tweak[5]
-
-	xorps	`16*2`(%rsp),$inout2
-	movups	$inout0,`16*0`($out)		# write output
-	xorps	`16*3`(%rsp),$inout3
-	movups	$inout1,`16*1`($out)
-	xorps	`16*4`(%rsp),$inout4
-	movups	$inout2,`16*2`($out)
-	xorps	`16*5`(%rsp),$inout5
-	movups	$inout3,`16*3`($out)
-	mov	$rnds_,$rounds			# restore $rounds
-	movups	$inout4,`16*4`($out)
-	movups	$inout5,`16*5`($out)
-	lea	`16*6`($out),$out
-	sub	\$16*6,$len
-	jnc	.Lxts_enc_grandloop
-
-	lea	3($rounds,$rounds),$rounds	# restore original value
-	mov	$key_,$key			# restore $key
-	mov	$rounds,$rnds_			# backup $rounds
-
-.Lxts_enc_short:
-	add	\$16*6,$len
-	jz	.Lxts_enc_done
-
-	cmp	\$0x20,$len
-	jb	.Lxts_enc_one
-	je	.Lxts_enc_two
-
-	cmp	\$0x40,$len
-	jb	.Lxts_enc_three
-	je	.Lxts_enc_four
-
-	pshufd	\$0x13,$twtmp,$twres
-	movdqa	@tweak[5],@tweak[4]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	 movdqu	($inp),$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 movdqu	16*1($inp),$inout1
-	pxor	$twres,@tweak[5]
-
-	movdqu	16*2($inp),$inout2
-	pxor	@tweak[0],$inout0
-	movdqu	16*3($inp),$inout3
-	pxor	@tweak[1],$inout1
-	movdqu	16*4($inp),$inout4
-	lea	16*5($inp),$inp
-	pxor	@tweak[2],$inout2
-	pxor	@tweak[3],$inout3
-	pxor	@tweak[4],$inout4
-
-	call	_aesni_encrypt6
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[5],@tweak[0]
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	movdqu	$inout0,($out)
-	xorps	@tweak[3],$inout3
-	movdqu	$inout1,16*1($out)
-	xorps	@tweak[4],$inout4
-	movdqu	$inout2,16*2($out)
-	movdqu	$inout3,16*3($out)
-	movdqu	$inout4,16*4($out)
-	lea	16*5($out),$out
-	jmp	.Lxts_enc_done
-
-.align	16
-.Lxts_enc_one:
-	movups	($inp),$inout0
-	lea	16*1($inp),$inp
-	xorps	@tweak[0],$inout0
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[1],@tweak[0]
-	movups	$inout0,($out)
-	lea	16*1($out),$out
-	jmp	.Lxts_enc_done
-
-.align	16
-.Lxts_enc_two:
-	movups	($inp),$inout0
-	movups	16($inp),$inout1
-	lea	32($inp),$inp
-	xorps	@tweak[0],$inout0
-	xorps	@tweak[1],$inout1
-
-	call	_aesni_encrypt3
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[2],@tweak[0]
-	xorps	@tweak[1],$inout1
-	movups	$inout0,($out)
-	movups	$inout1,16*1($out)
-	lea	16*2($out),$out
-	jmp	.Lxts_enc_done
-
-.align	16
-.Lxts_enc_three:
-	movups	($inp),$inout0
-	movups	16*1($inp),$inout1
-	movups	16*2($inp),$inout2
-	lea	16*3($inp),$inp
-	xorps	@tweak[0],$inout0
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-
-	call	_aesni_encrypt3
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[3],@tweak[0]
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	movups	$inout0,($out)
-	movups	$inout1,16*1($out)
-	movups	$inout2,16*2($out)
-	lea	16*3($out),$out
-	jmp	.Lxts_enc_done
-
-.align	16
-.Lxts_enc_four:
-	movups	($inp),$inout0
-	movups	16*1($inp),$inout1
-	movups	16*2($inp),$inout2
-	xorps	@tweak[0],$inout0
-	movups	16*3($inp),$inout3
-	lea	16*4($inp),$inp
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	xorps	@tweak[3],$inout3
-
-	call	_aesni_encrypt4
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[5],@tweak[0]
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	movups	$inout0,($out)
-	xorps	@tweak[3],$inout3
-	movups	$inout1,16*1($out)
-	movups	$inout2,16*2($out)
-	movups	$inout3,16*3($out)
-	lea	16*4($out),$out
-	jmp	.Lxts_enc_done
-
-.align	16
-.Lxts_enc_done:
-	and	\$15,$len_
-	jz	.Lxts_enc_ret
-	mov	$len_,$len
-
-.Lxts_enc_steal:
-	movzb	($inp),%eax			# borrow $rounds ...
-	movzb	-16($out),%ecx			# ... and $key
-	lea	1($inp),$inp
-	mov	%al,-16($out)
-	mov	%cl,0($out)
-	lea	1($out),$out
-	sub	\$1,$len
-	jnz	.Lxts_enc_steal
-
-	sub	$len_,$out			# rewind $out
-	mov	$key_,$key			# restore $key
-	mov	$rnds_,$rounds			# restore $rounds
-
-	movups	-16($out),$inout0
-	xorps	@tweak[0],$inout0
-___
-	&aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
-	xorps	@tweak[0],$inout0
-	movups	$inout0,-16($out)
-
-.Lxts_enc_ret:
-___
-$code.=<<___ if ($win64);
-	movaps	0x60(%rsp),%xmm6
-	movaps	0x70(%rsp),%xmm7
-	movaps	0x80(%rsp),%xmm8
-	movaps	0x90(%rsp),%xmm9
-	movaps	0xa0(%rsp),%xmm10
-	movaps	0xb0(%rsp),%xmm11
-	movaps	0xc0(%rsp),%xmm12
-	movaps	0xd0(%rsp),%xmm13
-	movaps	0xe0(%rsp),%xmm14
-	movaps	0xf0(%rsp),%xmm15
-___
-$code.=<<___;
-	lea	$frame_size(%rsp),%rsp
-.Lxts_enc_epilogue:
-	ret
-.size	aesni_xts_encrypt,.-aesni_xts_encrypt
-___
-
-$code.=<<___;
-.globl	aesni_xts_decrypt
-.type	aesni_xts_decrypt,\@function,6
-.align	16
-aesni_xts_decrypt:
-	lea	-$frame_size(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
-	movaps	%xmm6,0x60(%rsp)
-	movaps	%xmm7,0x70(%rsp)
-	movaps	%xmm8,0x80(%rsp)
-	movaps	%xmm9,0x90(%rsp)
-	movaps	%xmm10,0xa0(%rsp)
-	movaps	%xmm11,0xb0(%rsp)
-	movaps	%xmm12,0xc0(%rsp)
-	movaps	%xmm13,0xd0(%rsp)
-	movaps	%xmm14,0xe0(%rsp)
-	movaps	%xmm15,0xf0(%rsp)
-.Lxts_dec_body:
-___
-$code.=<<___;
-	movups	($ivp),@tweak[5]		# load clear-text tweak
-	mov	240($key2),$rounds		# key2->rounds
-	mov	240($key),$rnds_		# key1->rounds
-___
-	# generate the tweak
-	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
-$code.=<<___;
-	xor	%eax,%eax			# if ($len%16) len-=16;
-	test	\$15,$len
-	setnz	%al
-	shl	\$4,%rax
-	sub	%rax,$len
-
-	mov	$key,$key_			# backup $key
-	mov	$rnds_,$rounds			# backup $rounds
-	mov	$len,$len_			# backup $len
-	and	\$-16,$len
-
-	movdqa	.Lxts_magic(%rip),$twmask
-	pxor	$twtmp,$twtmp
-	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
-___
-    for ($i=0;$i<4;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[$i]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	pand	$twmask,$twres			# isolate carry and residue
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	pxor	$twres,@tweak[5]
-___
-    }
-$code.=<<___;
-	sub	\$16*6,$len
-	jc	.Lxts_dec_short
-
-	shr	\$1,$rounds
-	sub	\$1,$rounds
-	mov	$rounds,$rnds_
-	jmp	.Lxts_dec_grandloop
-
-.align	16
-.Lxts_dec_grandloop:
-	pshufd	\$0x13,$twtmp,$twres
-	movdqa	@tweak[5],@tweak[4]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	movdqu	`16*0`($inp),$inout0		# load input
-	pand	$twmask,$twres			# isolate carry and residue
-	movdqu	`16*1`($inp),$inout1
-	pxor	$twres,@tweak[5]
-
-	movdqu	`16*2`($inp),$inout2
-	pxor	@tweak[0],$inout0		# input^=tweak
-	movdqu	`16*3`($inp),$inout3
-	pxor	@tweak[1],$inout1
-	movdqu	`16*4`($inp),$inout4
-	pxor	@tweak[2],$inout2
-	movdqu	`16*5`($inp),$inout5
-	lea	`16*6`($inp),$inp
-	pxor	@tweak[3],$inout3
-	$movkey		($key_),$rndkey0
-	pxor	@tweak[4],$inout4
-	pxor	@tweak[5],$inout5
-
-	# inline _aesni_decrypt6 and interleave first and last rounds
-	# with own code...
-	$movkey		16($key_),$rndkey1
-	pxor		$rndkey0,$inout0
-	pxor		$rndkey0,$inout1
-	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
-	aesdec		$rndkey1,$inout0
-	lea		32($key_),$key
-	pxor		$rndkey0,$inout2
-	 movdqa	@tweak[1],`16*1`(%rsp)
-	aesdec		$rndkey1,$inout1
-	pxor		$rndkey0,$inout3
-	 movdqa	@tweak[2],`16*2`(%rsp)
-	aesdec		$rndkey1,$inout2
-	pxor		$rndkey0,$inout4
-	 movdqa	@tweak[3],`16*3`(%rsp)
-	aesdec		$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	dec		$rounds
-	 movdqa	@tweak[4],`16*4`(%rsp)
-	aesdec		$rndkey1,$inout4
-	 movdqa	@tweak[5],`16*5`(%rsp)
-	aesdec		$rndkey1,$inout5
-	pxor	$twtmp,$twtmp
-	pcmpgtd	@tweak[5],$twtmp
-	jmp		.Lxts_dec_loop6_enter
-
-.align	16
-.Lxts_dec_loop6:
-	aesdec		$rndkey1,$inout0
-	aesdec		$rndkey1,$inout1
-	dec		$rounds
-	aesdec		$rndkey1,$inout2
-	aesdec		$rndkey1,$inout3
-	aesdec		$rndkey1,$inout4
-	aesdec		$rndkey1,$inout5
-.Lxts_dec_loop6_enter:
-	$movkey		16($key),$rndkey1
-	aesdec		$rndkey0,$inout0
-	aesdec		$rndkey0,$inout1
-	lea		32($key),$key
-	aesdec		$rndkey0,$inout2
-	aesdec		$rndkey0,$inout3
-	aesdec		$rndkey0,$inout4
-	aesdec		$rndkey0,$inout5
-	$movkey		($key),$rndkey0
-	jnz		.Lxts_dec_loop6
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesdec		$rndkey1,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesdec		$rndkey1,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
-	 aesdec		$rndkey1,$inout2
-	pxor	$twres,@tweak[5]
-	 aesdec		$rndkey1,$inout3
-	 aesdec		$rndkey1,$inout4
-	 aesdec		$rndkey1,$inout5
-	 $movkey	16($key),$rndkey1
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[0]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesdec		$rndkey0,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesdec		$rndkey0,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesdec		$rndkey0,$inout2
-	pxor	$twres,@tweak[5]
-	 aesdec		$rndkey0,$inout3
-	 aesdec		$rndkey0,$inout4
-	 aesdec		$rndkey0,$inout5
-	 $movkey	32($key),$rndkey0
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[1]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesdec		$rndkey1,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesdec		$rndkey1,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesdec		$rndkey1,$inout2
-	pxor	$twres,@tweak[5]
-	 aesdec		$rndkey1,$inout3
-	 aesdec		$rndkey1,$inout4
-	 aesdec		$rndkey1,$inout5
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[2]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 aesdeclast	$rndkey0,$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 aesdeclast	$rndkey0,$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	 aesdeclast	$rndkey0,$inout2
-	pxor	$twres,@tweak[5]
-	 aesdeclast	$rndkey0,$inout3
-	 aesdeclast	$rndkey0,$inout4
-	 aesdeclast	$rndkey0,$inout5
-
-	pshufd	\$0x13,$twtmp,$twres
-	pxor	$twtmp,$twtmp
-	movdqa	@tweak[5],@tweak[3]
-	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
-	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
-	pand	$twmask,$twres			# isolate carry and residue
-	 xorps	`16*1`(%rsp),$inout1
-	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
-	pxor	$twres,@tweak[5]
-
-	xorps	`16*2`(%rsp),$inout2
-	movups	$inout0,`16*0`($out)		# write output
-	xorps	`16*3`(%rsp),$inout3
-	movups	$inout1,`16*1`($out)
-	xorps	`16*4`(%rsp),$inout4
-	movups	$inout2,`16*2`($out)
-	xorps	`16*5`(%rsp),$inout5
-	movups	$inout3,`16*3`($out)
-	mov	$rnds_,$rounds			# restore $rounds
-	movups	$inout4,`16*4`($out)
-	movups	$inout5,`16*5`($out)
-	lea	`16*6`($out),$out
-	sub	\$16*6,$len
-	jnc	.Lxts_dec_grandloop
-
-	lea	3($rounds,$rounds),$rounds	# restore original value
-	mov	$key_,$key			# restore $key
-	mov	$rounds,$rnds_			# backup $rounds
-
-.Lxts_dec_short:
-	add	\$16*6,$len
-	jz	.Lxts_dec_done
-
-	cmp	\$0x20,$len
-	jb	.Lxts_dec_one
-	je	.Lxts_dec_two
-
-	cmp	\$0x40,$len
-	jb	.Lxts_dec_three
-	je	.Lxts_dec_four
-
-	pshufd	\$0x13,$twtmp,$twres
-	movdqa	@tweak[5],@tweak[4]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	 movdqu	($inp),$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 movdqu	16*1($inp),$inout1
-	pxor	$twres,@tweak[5]
-
-	movdqu	16*2($inp),$inout2
-	pxor	@tweak[0],$inout0
-	movdqu	16*3($inp),$inout3
-	pxor	@tweak[1],$inout1
-	movdqu	16*4($inp),$inout4
-	lea	16*5($inp),$inp
-	pxor	@tweak[2],$inout2
-	pxor	@tweak[3],$inout3
-	pxor	@tweak[4],$inout4
-
-	call	_aesni_decrypt6
-
-	xorps	@tweak[0],$inout0
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	movdqu	$inout0,($out)
-	xorps	@tweak[3],$inout3
-	movdqu	$inout1,16*1($out)
-	xorps	@tweak[4],$inout4
-	movdqu	$inout2,16*2($out)
-	 pxor		$twtmp,$twtmp
-	movdqu	$inout3,16*3($out)
-	 pcmpgtd	@tweak[5],$twtmp
-	movdqu	$inout4,16*4($out)
-	lea	16*5($out),$out
-	 pshufd		\$0x13,$twtmp,@tweak[1]	# $twres
-	and	\$15,$len_
-	jz	.Lxts_dec_ret
-
-	movdqa	@tweak[5],@tweak[0]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	pand	$twmask,@tweak[1]		# isolate carry and residue
-	pxor	@tweak[5],@tweak[1]
-	jmp	.Lxts_dec_done2
-
-.align	16
-.Lxts_dec_one:
-	movups	($inp),$inout0
-	lea	16*1($inp),$inp
-	xorps	@tweak[0],$inout0
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[1],@tweak[0]
-	movups	$inout0,($out)
-	movdqa	@tweak[2],@tweak[1]
-	lea	16*1($out),$out
-	jmp	.Lxts_dec_done
-
-.align	16
-.Lxts_dec_two:
-	movups	($inp),$inout0
-	movups	16($inp),$inout1
-	lea	32($inp),$inp
-	xorps	@tweak[0],$inout0
-	xorps	@tweak[1],$inout1
-
-	call	_aesni_decrypt3
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[2],@tweak[0]
-	xorps	@tweak[1],$inout1
-	movdqa	@tweak[3],@tweak[1]
-	movups	$inout0,($out)
-	movups	$inout1,16*1($out)
-	lea	16*2($out),$out
-	jmp	.Lxts_dec_done
-
-.align	16
-.Lxts_dec_three:
-	movups	($inp),$inout0
-	movups	16*1($inp),$inout1
-	movups	16*2($inp),$inout2
-	lea	16*3($inp),$inp
-	xorps	@tweak[0],$inout0
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-
-	call	_aesni_decrypt3
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[3],@tweak[0]
-	xorps	@tweak[1],$inout1
-	movdqa	@tweak[5],@tweak[1]
-	xorps	@tweak[2],$inout2
-	movups	$inout0,($out)
-	movups	$inout1,16*1($out)
-	movups	$inout2,16*2($out)
-	lea	16*3($out),$out
-	jmp	.Lxts_dec_done
-
-.align	16
-.Lxts_dec_four:
-	pshufd	\$0x13,$twtmp,$twres
-	movdqa	@tweak[5],@tweak[4]
-	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
-	 movups	($inp),$inout0
-	pand	$twmask,$twres			# isolate carry and residue
-	 movups	16*1($inp),$inout1
-	pxor	$twres,@tweak[5]
-
-	movups	16*2($inp),$inout2
-	xorps	@tweak[0],$inout0
-	movups	16*3($inp),$inout3
-	lea	16*4($inp),$inp
-	xorps	@tweak[1],$inout1
-	xorps	@tweak[2],$inout2
-	xorps	@tweak[3],$inout3
-
-	call	_aesni_decrypt4
-
-	xorps	@tweak[0],$inout0
-	movdqa	@tweak[4],@tweak[0]
-	xorps	@tweak[1],$inout1
-	movdqa	@tweak[5],@tweak[1]
-	xorps	@tweak[2],$inout2
-	movups	$inout0,($out)
-	xorps	@tweak[3],$inout3
-	movups	$inout1,16*1($out)
-	movups	$inout2,16*2($out)
-	movups	$inout3,16*3($out)
-	lea	16*4($out),$out
-	jmp	.Lxts_dec_done
-
-.align	16
-.Lxts_dec_done:
-	and	\$15,$len_
-	jz	.Lxts_dec_ret
-.Lxts_dec_done2:
-	mov	$len_,$len
-	mov	$key_,$key			# restore $key
-	mov	$rnds_,$rounds			# restore $rounds
-
-	movups	($inp),$inout0
-	xorps	@tweak[1],$inout0
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	xorps	@tweak[1],$inout0
-	movups	$inout0,($out)
-
-.Lxts_dec_steal:
-	movzb	16($inp),%eax			# borrow $rounds ...
-	movzb	($out),%ecx			# ... and $key
-	lea	1($inp),$inp
-	mov	%al,($out)
-	mov	%cl,16($out)
-	lea	1($out),$out
-	sub	\$1,$len
-	jnz	.Lxts_dec_steal
-
-	sub	$len_,$out			# rewind $out
-	mov	$key_,$key			# restore $key
-	mov	$rnds_,$rounds			# restore $rounds
-
-	movups	($out),$inout0
-	xorps	@tweak[0],$inout0
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	xorps	@tweak[0],$inout0
-	movups	$inout0,($out)
-
-.Lxts_dec_ret:
-___
-$code.=<<___ if ($win64);
-	movaps	0x60(%rsp),%xmm6
-	movaps	0x70(%rsp),%xmm7
-	movaps	0x80(%rsp),%xmm8
-	movaps	0x90(%rsp),%xmm9
-	movaps	0xa0(%rsp),%xmm10
-	movaps	0xb0(%rsp),%xmm11
-	movaps	0xc0(%rsp),%xmm12
-	movaps	0xd0(%rsp),%xmm13
-	movaps	0xe0(%rsp),%xmm14
-	movaps	0xf0(%rsp),%xmm15
-___
-$code.=<<___;
-	lea	$frame_size(%rsp),%rsp
-.Lxts_dec_epilogue:
-	ret
-.size	aesni_xts_decrypt,.-aesni_xts_decrypt
-___
-} }}
-
-########################################################################
-# void $PREFIX_cbc_encrypt (const void *inp, void *out,
-#			    size_t length, const AES_KEY *key,
-#			    unsigned char *ivp,const int enc);
-{
-my $reserved = $win64?0x40:-0x18;	# used in decrypt
-$code.=<<___;
-.globl	${PREFIX}_cbc_encrypt
-.type	${PREFIX}_cbc_encrypt,\@function,6
-.align	16
-${PREFIX}_cbc_encrypt:
-	test	$len,$len		# check length
-	jz	.Lcbc_ret
-
-	mov	240($key),$rnds_	# key->rounds
-	mov	$key,$key_		# backup $key
-	test	%r9d,%r9d		# 6th argument
-	jz	.Lcbc_decrypt
-#--------------------------- CBC ENCRYPT ------------------------------#
-	movups	($ivp),$inout0		# load iv as initial state
-	mov	$rnds_,$rounds
-	cmp	\$16,$len
-	jb	.Lcbc_enc_tail
-	sub	\$16,$len
-	jmp	.Lcbc_enc_loop
-.align	16
-.Lcbc_enc_loop:
-	movups	($inp),$inout1		# load input
-	lea	16($inp),$inp
-	#xorps	$inout1,$inout0
-___
-	&aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
-$code.=<<___;
-	mov	$rnds_,$rounds		# restore $rounds
-	mov	$key_,$key		# restore $key
-	movups	$inout0,0($out)		# store output
-	lea	16($out),$out
-	sub	\$16,$len
-	jnc	.Lcbc_enc_loop
-	add	\$16,$len
-	jnz	.Lcbc_enc_tail
-	movups	$inout0,($ivp)
-	jmp	.Lcbc_ret
-
-.Lcbc_enc_tail:
-	mov	$len,%rcx	# zaps $key
-	xchg	$inp,$out	# $inp is %rsi and $out is %rdi now
-	.long	0x9066A4F3	# rep movsb
-	mov	\$16,%ecx	# zero tail
-	sub	$len,%rcx
-	xor	%eax,%eax
-	.long	0x9066AAF3	# rep stosb
-	lea	-16(%rdi),%rdi	# rewind $out by 1 block
-	mov	$rnds_,$rounds	# restore $rounds
-	mov	%rdi,%rsi	# $inp and $out are the same
-	mov	$key_,$key	# restore $key
-	xor	$len,$len	# len=16
-	jmp	.Lcbc_enc_loop	# one more spin
-#--------------------------- CBC DECRYPT ------------------------------#
-.align	16
-.Lcbc_decrypt:
-___
-$code.=<<___ if ($win64);
-	lea	-0x58(%rsp),%rsp
-	movaps	%xmm6,(%rsp)
-	movaps	%xmm7,0x10(%rsp)
-	movaps	%xmm8,0x20(%rsp)
-	movaps	%xmm9,0x30(%rsp)
-.Lcbc_decrypt_body:
-___
-$code.=<<___;
-	movups	($ivp),$iv
-	mov	$rnds_,$rounds
-	cmp	\$0x70,$len
-	jbe	.Lcbc_dec_tail
-	shr	\$1,$rnds_
-	sub	\$0x70,$len
-	mov	$rnds_,$rounds
-	movaps	$iv,$reserved(%rsp)
-	jmp	.Lcbc_dec_loop8_enter
-.align	16
-.Lcbc_dec_loop8:
-	movaps	$rndkey0,$reserved(%rsp)	# save IV
-	movups	$inout7,($out)
-	lea	0x10($out),$out
-.Lcbc_dec_loop8_enter:
-	$movkey		($key),$rndkey0
-	movups	($inp),$inout0			# load input
-	movups	0x10($inp),$inout1
-	$movkey		16($key),$rndkey1
-
-	lea		32($key),$key
-	movdqu	0x20($inp),$inout2
-	xorps		$rndkey0,$inout0
-	movdqu	0x30($inp),$inout3
-	xorps		$rndkey0,$inout1
-	movdqu	0x40($inp),$inout4
-	aesdec		$rndkey1,$inout0
-	pxor		$rndkey0,$inout2
-	movdqu	0x50($inp),$inout5
-	aesdec		$rndkey1,$inout1
-	pxor		$rndkey0,$inout3
-	movdqu	0x60($inp),$inout6
-	aesdec		$rndkey1,$inout2
-	pxor		$rndkey0,$inout4
-	movdqu	0x70($inp),$inout7
-	aesdec		$rndkey1,$inout3
-	pxor		$rndkey0,$inout5
-	dec		$rounds
-	aesdec		$rndkey1,$inout4
-	pxor		$rndkey0,$inout6
-	aesdec		$rndkey1,$inout5
-	pxor		$rndkey0,$inout7
-	$movkey		($key),$rndkey0
-	aesdec		$rndkey1,$inout6
-	aesdec		$rndkey1,$inout7
-	$movkey		16($key),$rndkey1
-
-	call		.Ldec_loop8_enter
-
-	movups	($inp),$rndkey1		# re-load input
-	movups	0x10($inp),$rndkey0
-	xorps	$reserved(%rsp),$inout0	# ^= IV
-	xorps	$rndkey1,$inout1
-	movups	0x20($inp),$rndkey1
-	xorps	$rndkey0,$inout2
-	movups	0x30($inp),$rndkey0
-	xorps	$rndkey1,$inout3
-	movups	0x40($inp),$rndkey1
-	xorps	$rndkey0,$inout4
-	movups	0x50($inp),$rndkey0
-	xorps	$rndkey1,$inout5
-	movups	0x60($inp),$rndkey1
-	xorps	$rndkey0,$inout6
-	movups	0x70($inp),$rndkey0	# IV
-	xorps	$rndkey1,$inout7
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	mov	$rnds_,$rounds		# restore $rounds
-	movups	$inout4,0x40($out)
-	mov	$key_,$key		# restore $key
-	movups	$inout5,0x50($out)
-	lea	0x80($inp),$inp
-	movups	$inout6,0x60($out)
-	lea	0x70($out),$out
-	sub	\$0x80,$len
-	ja	.Lcbc_dec_loop8
-
-	movaps	$inout7,$inout0
-	movaps	$rndkey0,$iv
-	add	\$0x70,$len
-	jle	.Lcbc_dec_tail_collected
-	movups	$inout0,($out)
-	lea	1($rnds_,$rnds_),$rounds
-	lea	0x10($out),$out
-.Lcbc_dec_tail:
-	movups	($inp),$inout0
-	movaps	$inout0,$in0
-	cmp	\$0x10,$len
-	jbe	.Lcbc_dec_one
-
-	movups	0x10($inp),$inout1
-	movaps	$inout1,$in1
-	cmp	\$0x20,$len
-	jbe	.Lcbc_dec_two
-
-	movups	0x20($inp),$inout2
-	movaps	$inout2,$in2
-	cmp	\$0x30,$len
-	jbe	.Lcbc_dec_three
-
-	movups	0x30($inp),$inout3
-	cmp	\$0x40,$len
-	jbe	.Lcbc_dec_four
-
-	movups	0x40($inp),$inout4
-	cmp	\$0x50,$len
-	jbe	.Lcbc_dec_five
-
-	movups	0x50($inp),$inout5
-	cmp	\$0x60,$len
-	jbe	.Lcbc_dec_six
-
-	movups	0x60($inp),$inout6
-	movaps	$iv,$reserved(%rsp)	# save IV
-	call	_aesni_decrypt8
-	movups	($inp),$rndkey1
-	movups	0x10($inp),$rndkey0
-	xorps	$reserved(%rsp),$inout0	# ^= IV
-	xorps	$rndkey1,$inout1
-	movups	0x20($inp),$rndkey1
-	xorps	$rndkey0,$inout2
-	movups	0x30($inp),$rndkey0
-	xorps	$rndkey1,$inout3
-	movups	0x40($inp),$rndkey1
-	xorps	$rndkey0,$inout4
-	movups	0x50($inp),$rndkey0
-	xorps	$rndkey1,$inout5
-	movups	0x60($inp),$iv		# IV
-	xorps	$rndkey0,$inout6
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	movups	$inout5,0x50($out)
-	lea	0x60($out),$out
-	movaps	$inout6,$inout0
-	sub	\$0x70,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_one:
-___
-	&aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
-	xorps	$iv,$inout0
-	movaps	$in0,$iv
-	sub	\$0x10,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_two:
-	xorps	$inout2,$inout2
-	call	_aesni_decrypt3
-	xorps	$iv,$inout0
-	xorps	$in0,$inout1
-	movups	$inout0,($out)
-	movaps	$in1,$iv
-	movaps	$inout1,$inout0
-	lea	0x10($out),$out
-	sub	\$0x20,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_three:
-	call	_aesni_decrypt3
-	xorps	$iv,$inout0
-	xorps	$in0,$inout1
-	movups	$inout0,($out)
-	xorps	$in1,$inout2
-	movups	$inout1,0x10($out)
-	movaps	$in2,$iv
-	movaps	$inout2,$inout0
-	lea	0x20($out),$out
-	sub	\$0x30,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_four:
-	call	_aesni_decrypt4
-	xorps	$iv,$inout0
-	movups	0x30($inp),$iv
-	xorps	$in0,$inout1
-	movups	$inout0,($out)
-	xorps	$in1,$inout2
-	movups	$inout1,0x10($out)
-	xorps	$in2,$inout3
-	movups	$inout2,0x20($out)
-	movaps	$inout3,$inout0
-	lea	0x30($out),$out
-	sub	\$0x40,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_five:
-	xorps	$inout5,$inout5
-	call	_aesni_decrypt6
-	movups	0x10($inp),$rndkey1
-	movups	0x20($inp),$rndkey0
-	xorps	$iv,$inout0
-	xorps	$in0,$inout1
-	xorps	$rndkey1,$inout2
-	movups	0x30($inp),$rndkey1
-	xorps	$rndkey0,$inout3
-	movups	0x40($inp),$iv
-	xorps	$rndkey1,$inout4
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	lea	0x40($out),$out
-	movaps	$inout4,$inout0
-	sub	\$0x50,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_six:
-	call	_aesni_decrypt6
-	movups	0x10($inp),$rndkey1
-	movups	0x20($inp),$rndkey0
-	xorps	$iv,$inout0
-	xorps	$in0,$inout1
-	xorps	$rndkey1,$inout2
-	movups	0x30($inp),$rndkey1
-	xorps	$rndkey0,$inout3
-	movups	0x40($inp),$rndkey0
-	xorps	$rndkey1,$inout4
-	movups	0x50($inp),$iv
-	xorps	$rndkey0,$inout5
-	movups	$inout0,($out)
-	movups	$inout1,0x10($out)
-	movups	$inout2,0x20($out)
-	movups	$inout3,0x30($out)
-	movups	$inout4,0x40($out)
-	lea	0x50($out),$out
-	movaps	$inout5,$inout0
-	sub	\$0x60,$len
-	jmp	.Lcbc_dec_tail_collected
-.align	16
-.Lcbc_dec_tail_collected:
-	and	\$15,$len
-	movups	$iv,($ivp)
-	jnz	.Lcbc_dec_tail_partial
-	movups	$inout0,($out)
-	jmp	.Lcbc_dec_ret
-.align	16
-.Lcbc_dec_tail_partial:
-	movaps	$inout0,$reserved(%rsp)
-	mov	\$16,%rcx
-	mov	$out,%rdi
-	sub	$len,%rcx
-	lea	$reserved(%rsp),%rsi
-	.long	0x9066A4F3	# rep movsb
-
-.Lcbc_dec_ret:
-___
-$code.=<<___ if ($win64);
-	movaps	(%rsp),%xmm6
-	movaps	0x10(%rsp),%xmm7
-	movaps	0x20(%rsp),%xmm8
-	movaps	0x30(%rsp),%xmm9
-	lea	0x58(%rsp),%rsp
-___
-$code.=<<___;
-.Lcbc_ret:
-	ret
-.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
-___
-} 
-# int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
-#				int bits, AES_KEY *key)
-{ my ($inp,$bits,$key) = @_4args;
-  $bits =~ s/%r/%e/;
-
-$code.=<<___;
-.globl	${PREFIX}_set_decrypt_key
-.type	${PREFIX}_set_decrypt_key,\@abi-omnipotent
-.align	16
-${PREFIX}_set_decrypt_key:
-	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
-	call	__aesni_set_encrypt_key
-	shl	\$4,$bits		# rounds-1 after _aesni_set_encrypt_key
-	test	%eax,%eax
-	jnz	.Ldec_key_ret
-	lea	16($key,$bits),$inp	# points at the end of key schedule
-
-	$movkey	($key),%xmm0		# just swap
-	$movkey	($inp),%xmm1
-	$movkey	%xmm0,($inp)
-	$movkey	%xmm1,($key)
-	lea	16($key),$key
-	lea	-16($inp),$inp
-
-.Ldec_key_inverse:
-	$movkey	($key),%xmm0		# swap and inverse
-	$movkey	($inp),%xmm1
-	aesimc	%xmm0,%xmm0
-	aesimc	%xmm1,%xmm1
-	lea	16($key),$key
-	lea	-16($inp),$inp
-	$movkey	%xmm0,16($inp)
-	$movkey	%xmm1,-16($key)
-	cmp	$key,$inp
-	ja	.Ldec_key_inverse
-
-	$movkey	($key),%xmm0		# inverse middle
-	aesimc	%xmm0,%xmm0
-	$movkey	%xmm0,($inp)
-.Ldec_key_ret:
-	add	\$8,%rsp
-	ret
-.LSEH_end_set_decrypt_key:
-.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
-___
-
-# This is based on submission by
-#
-#	Huang Ying <[email protected]>
-#	Vinodh Gopal <[email protected]>
-#	Kahraman Akdemir
-#
-# Agressively optimized in respect to aeskeygenassist's critical path
-# and is contained in %xmm0-5 to meet Win64 ABI requirement.
-#
-$code.=<<___;
-.globl	${PREFIX}_set_encrypt_key
-.type	${PREFIX}_set_encrypt_key,\@abi-omnipotent
-.align	16
-${PREFIX}_set_encrypt_key:
-__aesni_set_encrypt_key:
-	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
-	mov	\$-1,%rax
-	test	$inp,$inp
-	jz	.Lenc_key_ret
-	test	$key,$key
-	jz	.Lenc_key_ret
-
-	movups	($inp),%xmm0		# pull first 128 bits of *userKey
-	xorps	%xmm4,%xmm4		# low dword of xmm4 is assumed 0
-	lea	16($key),%rax
-	cmp	\$256,$bits
-	je	.L14rounds
-	cmp	\$192,$bits
-	je	.L12rounds
-	cmp	\$128,$bits
-	jne	.Lbad_keybits
-
-.L10rounds:
-	mov	\$9,$bits			# 10 rounds for 128-bit key
-	$movkey	%xmm0,($key)			# round 0
-	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 1
-	call		.Lkey_expansion_128_cold
-	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 2
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 3
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 4
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 5
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 6
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x40,%xmm0,%xmm1	# round 7
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x80,%xmm0,%xmm1	# round 8
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x1b,%xmm0,%xmm1	# round 9
-	call		.Lkey_expansion_128
-	aeskeygenassist	\$0x36,%xmm0,%xmm1	# round 10
-	call		.Lkey_expansion_128
-	$movkey	%xmm0,(%rax)
-	mov	$bits,80(%rax)	# 240(%rdx)
-	xor	%eax,%eax
-	jmp	.Lenc_key_ret
-
-.align	16
-.L12rounds:
-	movq	16($inp),%xmm2			# remaining 1/3 of *userKey
-	mov	\$11,$bits			# 12 rounds for 192
-	$movkey	%xmm0,($key)			# round 0
-	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 1,2
-	call		.Lkey_expansion_192a_cold
-	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 2,3
-	call		.Lkey_expansion_192b
-	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 4,5
-	call		.Lkey_expansion_192a
-	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 5,6
-	call		.Lkey_expansion_192b
-	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 7,8
-	call		.Lkey_expansion_192a
-	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 8,9
-	call		.Lkey_expansion_192b
-	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 10,11
-	call		.Lkey_expansion_192a
-	aeskeygenassist	\$0x80,%xmm2,%xmm1	# round 11,12
-	call		.Lkey_expansion_192b
-	$movkey	%xmm0,(%rax)
-	mov	$bits,48(%rax)	# 240(%rdx)
-	xor	%rax, %rax
-	jmp	.Lenc_key_ret
-
-.align	16
-.L14rounds:
-	movups	16($inp),%xmm2			# remaning half of *userKey
-	mov	\$13,$bits			# 14 rounds for 256
-	lea	16(%rax),%rax
-	$movkey	%xmm0,($key)			# round 0
-	$movkey	%xmm2,16($key)			# round 1
-	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 2
-	call		.Lkey_expansion_256a_cold
-	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 3
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 4
-	call		.Lkey_expansion_256a
-	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 5
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 6
-	call		.Lkey_expansion_256a
-	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 7
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 8
-	call		.Lkey_expansion_256a
-	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 9
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 10
-	call		.Lkey_expansion_256a
-	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 11
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 12
-	call		.Lkey_expansion_256a
-	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 13
-	call		.Lkey_expansion_256b
-	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 14
-	call		.Lkey_expansion_256a
-	$movkey	%xmm0,(%rax)
-	mov	$bits,16(%rax)	# 240(%rdx)
-	xor	%rax,%rax
-	jmp	.Lenc_key_ret
-
-.align	16
-.Lbad_keybits:
-	mov	\$-2,%rax
-.Lenc_key_ret:
-	add	\$8,%rsp
-	ret
-.LSEH_end_set_encrypt_key:
-
-.align	16
-.Lkey_expansion_128:
-	$movkey	%xmm0,(%rax)
-	lea	16(%rax),%rax
-.Lkey_expansion_128_cold:
-	shufps	\$0b00010000,%xmm0,%xmm4
-	xorps	%xmm4, %xmm0
-	shufps	\$0b10001100,%xmm0,%xmm4
-	xorps	%xmm4, %xmm0
-	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
-	xorps	%xmm1,%xmm0
-	ret
-
-.align 16
-.Lkey_expansion_192a:
-	$movkey	%xmm0,(%rax)
-	lea	16(%rax),%rax
-.Lkey_expansion_192a_cold:
-	movaps	%xmm2, %xmm5
-.Lkey_expansion_192b_warm:
-	shufps	\$0b00010000,%xmm0,%xmm4
-	movdqa	%xmm2,%xmm3
-	xorps	%xmm4,%xmm0
-	shufps	\$0b10001100,%xmm0,%xmm4
-	pslldq	\$4,%xmm3
-	xorps	%xmm4,%xmm0
-	pshufd	\$0b01010101,%xmm1,%xmm1	# critical path
-	pxor	%xmm3,%xmm2
-	pxor	%xmm1,%xmm0
-	pshufd	\$0b11111111,%xmm0,%xmm3
-	pxor	%xmm3,%xmm2
-	ret
-
-.align 16
-.Lkey_expansion_192b:
-	movaps	%xmm0,%xmm3
-	shufps	\$0b01000100,%xmm0,%xmm5
-	$movkey	%xmm5,(%rax)
-	shufps	\$0b01001110,%xmm2,%xmm3
-	$movkey	%xmm3,16(%rax)
-	lea	32(%rax),%rax
-	jmp	.Lkey_expansion_192b_warm
-
-.align	16
-.Lkey_expansion_256a:
-	$movkey	%xmm2,(%rax)
-	lea	16(%rax),%rax
-.Lkey_expansion_256a_cold:
-	shufps	\$0b00010000,%xmm0,%xmm4
-	xorps	%xmm4,%xmm0
-	shufps	\$0b10001100,%xmm0,%xmm4
-	xorps	%xmm4,%xmm0
-	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
-	xorps	%xmm1,%xmm0
-	ret
-
-.align 16
-.Lkey_expansion_256b:
-	$movkey	%xmm0,(%rax)
-	lea	16(%rax),%rax
-
-	shufps	\$0b00010000,%xmm2,%xmm4
-	xorps	%xmm4,%xmm2
-	shufps	\$0b10001100,%xmm2,%xmm4
-	xorps	%xmm4,%xmm2
-	shufps	\$0b10101010,%xmm1,%xmm1	# critical path
-	xorps	%xmm1,%xmm2
-	ret
-.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
-.size	__aesni_set_encrypt_key,.-__aesni_set_encrypt_key
-___
-}
-
-$code.=<<___;
-.align	64
-.Lbswap_mask:
-	.byte	15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
-.Lincrement32:
-	.long	6,6,6,0
-.Lincrement64:
-	.long	1,0,0,0
-.Lxts_magic:
-	.long	0x87,0,1,0
-
-.asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
-.align	64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-___
-$code.=<<___ if ($PREFIX eq "aesni");
-.type	ecb_ccm64_se_handler,\@abi-omnipotent
-.align	16
-ecb_ccm64_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lcommon_seh_tail
-
-	lea	0(%rax),%rsi		# %xmm save area
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0x58(%rax),%rax		# adjust stack pointer
-
-	jmp	.Lcommon_seh_tail
-.size	ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
-
-.type	ctr32_se_handler,\@abi-omnipotent
-.align	16
-ctr32_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lctr32_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<"prologue" label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	lea	.Lctr32_ret(%rip),%r10
-	cmp	%r10,%rbx
-	jae	.Lcommon_seh_tail
-
-	lea	0x20(%rax),%rsi		# %xmm save area
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0xc8(%rax),%rax		# adjust stack pointer
-
-	jmp	.Lcommon_seh_tail
-.size	ctr32_se_handler,.-ctr32_se_handler
-
-.type	xts_se_handler,\@abi-omnipotent
-.align	16
-xts_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue lable
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lcommon_seh_tail
-
-	lea	0x60(%rax),%rsi		# %xmm save area
-	lea	512($context),%rdi	# & context.Xmm6
-	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0x68+160(%rax),%rax	# adjust stack pointer
-
-	jmp	.Lcommon_seh_tail
-.size	xts_se_handler,.-xts_se_handler
-___
-$code.=<<___;
-.type	cbc_se_handler,\@abi-omnipotent
-.align	16
-cbc_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	152($context),%rax	# pull context->Rsp
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lcbc_decrypt(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<"prologue" label
-	jb	.Lcommon_seh_tail
-
-	lea	.Lcbc_decrypt_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<cbc_decrypt_body
-	jb	.Lrestore_cbc_rax
-
-	lea	.Lcbc_ret(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>="epilogue" label
-	jae	.Lcommon_seh_tail
-
-	lea	0(%rax),%rsi		# top of stack
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0x58(%rax),%rax		# adjust stack pointer
-	jmp	.Lcommon_seh_tail
-
-.Lrestore_cbc_rax:
-	mov	120($context),%rax
-
-.Lcommon_seh_tail:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	cbc_se_handler,.-cbc_se_handler
-
-.section	.pdata
-.align	4
-___
-$code.=<<___ if ($PREFIX eq "aesni");
-	.rva	.LSEH_begin_aesni_ecb_encrypt
-	.rva	.LSEH_end_aesni_ecb_encrypt
-	.rva	.LSEH_info_ecb
-
-	.rva	.LSEH_begin_aesni_ccm64_encrypt_blocks
-	.rva	.LSEH_end_aesni_ccm64_encrypt_blocks
-	.rva	.LSEH_info_ccm64_enc
-
-	.rva	.LSEH_begin_aesni_ccm64_decrypt_blocks
-	.rva	.LSEH_end_aesni_ccm64_decrypt_blocks
-	.rva	.LSEH_info_ccm64_dec
-
-	.rva	.LSEH_begin_aesni_ctr32_encrypt_blocks
-	.rva	.LSEH_end_aesni_ctr32_encrypt_blocks
-	.rva	.LSEH_info_ctr32
-
-	.rva	.LSEH_begin_aesni_xts_encrypt
-	.rva	.LSEH_end_aesni_xts_encrypt
-	.rva	.LSEH_info_xts_enc
-
-	.rva	.LSEH_begin_aesni_xts_decrypt
-	.rva	.LSEH_end_aesni_xts_decrypt
-	.rva	.LSEH_info_xts_dec
-___
-$code.=<<___;
-	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
-	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
-	.rva	.LSEH_info_cbc
-
-	.rva	${PREFIX}_set_decrypt_key
-	.rva	.LSEH_end_set_decrypt_key
-	.rva	.LSEH_info_key
-
-	.rva	${PREFIX}_set_encrypt_key
-	.rva	.LSEH_end_set_encrypt_key
-	.rva	.LSEH_info_key
-.section	.xdata
-.align	8
-___
-$code.=<<___ if ($PREFIX eq "aesni");
-.LSEH_info_ecb:
-	.byte	9,0,0,0
-	.rva	ecb_ccm64_se_handler
-	.rva	.Lecb_enc_body,.Lecb_enc_ret		# HandlerData[]
-.LSEH_info_ccm64_enc:
-	.byte	9,0,0,0
-	.rva	ecb_ccm64_se_handler
-	.rva	.Lccm64_enc_body,.Lccm64_enc_ret	# HandlerData[]
-.LSEH_info_ccm64_dec:
-	.byte	9,0,0,0
-	.rva	ecb_ccm64_se_handler
-	.rva	.Lccm64_dec_body,.Lccm64_dec_ret	# HandlerData[]
-.LSEH_info_ctr32:
-	.byte	9,0,0,0
-	.rva	ctr32_se_handler
-.LSEH_info_xts_enc:
-	.byte	9,0,0,0
-	.rva	xts_se_handler
-	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
-.LSEH_info_xts_dec:
-	.byte	9,0,0,0
-	.rva	xts_se_handler
-	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
-___
-$code.=<<___;
-.LSEH_info_cbc:
-	.byte	9,0,0,0
-	.rva	cbc_se_handler
-.LSEH_info_key:
-	.byte	0x01,0x04,0x01,0x00
-	.byte	0x04,0x02,0x00,0x00	# sub rsp,8
-___
-}
-
-sub rex {
-  local *opcode=shift;
-  my ($dst,$src)=@_;
-  my $rex=0;
-
-    $rex|=0x04			if($dst>=8);
-    $rex|=0x01			if($src>=8);
-    push @opcode,$rex|0x40	if($rex);
-}
-
-sub aesni {
-  my $line=shift;
-  my @opcode=(0x66);
-
-    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
-	rex(\@opcode,$4,$3);
-	push @opcode,0x0f,0x3a,0xdf;
-	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
-	my $c=$2;
-	push @opcode,$c=~/^0/?oct($c):$c;
-	return ".byte\t".join(',',@opcode);
-    }
-    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
-	my %opcodelet = (
-		"aesimc" => 0xdb,
-		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
-		"aesdec" => 0xde,	"aesdeclast" => 0xdf
-	);
-	return undef if (!defined($opcodelet{$1}));
-	rex(\@opcode,$3,$2);
-	push @opcode,0x0f,0x38,$opcodelet{$1};
-	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
-	return ".byte\t".join(',',@opcode);
-    }
-    return $line;
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
-
-print $code;
-
-close STDOUT;

+ 0 - 3108
drivers/builtin_openssl2/crypto/aes/asm/bsaes-x86_64.pl

@@ -1,3108 +0,0 @@
-#!/usr/bin/env perl
-
-###################################################################
-### AES-128 [originally in CTR mode]				###
-### bitsliced implementation for Intel Core 2 processors	###
-### requires support of SSE extensions up to SSSE3		###
-### Author: Emilia Käsper and Peter Schwabe			###
-### Date: 2009-03-19						###
-### Public domain						###
-###								###
-### See http://homes.esat.kuleuven.be/~ekasper/#software for	###
-### further information.					###
-###################################################################
-#
-# September 2011.
-#
-# Started as transliteration to "perlasm" the original code has
-# undergone following changes:
-#
-# - code was made position-independent;
-# - rounds were folded into a loop resulting in >5x size reduction
-#   from 12.5KB to 2.2KB;
-# - above was possibile thanks to mixcolumns() modification that
-#   allowed to feed its output back to aesenc[last], this was
-#   achieved at cost of two additional inter-registers moves;
-# - some instruction reordering and interleaving;
-# - this module doesn't implement key setup subroutine, instead it
-#   relies on conversion of "conventional" key schedule as returned
-#   by AES_set_encrypt_key (see discussion below);
-# - first and last round keys are treated differently, which allowed
-#   to skip one shiftrows(), reduce bit-sliced key schedule and
-#   speed-up conversion by 22%;
-# - support for 192- and 256-bit keys was added;
-#
-# Resulting performance in CPU cycles spent to encrypt one byte out
-# of 4096-byte buffer with 128-bit key is:
-#
-#		Emilia's	this(*)		difference
-#
-# Core 2    	9.30		8.69		+7%
-# Nehalem(**) 	7.63		6.98		+9%
-# Atom	    	17.1		17.4		-2%(***)
-#
-# (*)	Comparison is not completely fair, because "this" is ECB,
-#	i.e. no extra processing such as counter values calculation
-#	and xor-ing input as in Emilia's CTR implementation is
-#	performed. However, the CTR calculations stand for not more
-#	than 1% of total time, so comparison is *rather* fair.
-#
-# (**)	Results were collected on Westmere, which is considered to
-#	be equivalent to Nehalem for this code.
-#
-# (***)	Slowdown on Atom is rather strange per se, because original
-#	implementation has a number of 9+-bytes instructions, which
-#	are bad for Atom front-end, and which I eliminated completely.
-#	In attempt to address deterioration sbox() was tested in FP
-#	SIMD "domain" (movaps instead of movdqa, xorps instead of
-#	pxor, etc.). While it resulted in nominal 4% improvement on
-#	Atom, it hurted Westmere by more than 2x factor.
-#
-# As for key schedule conversion subroutine. Interface to OpenSSL
-# relies on per-invocation on-the-fly conversion. This naturally
-# has impact on performance, especially for short inputs. Conversion
-# time in CPU cycles and its ratio to CPU cycles spent in 8x block
-# function is:
-#
-# 		conversion	conversion/8x block
-# Core 2	240		0.22
-# Nehalem	180		0.20
-# Atom		430		0.19
-#
-# The ratio values mean that 128-byte blocks will be processed
-# 16-18% slower, 256-byte blocks - 9-10%, 384-byte blocks - 6-7%,
-# etc. Then keep in mind that input sizes not divisible by 128 are
-# *effectively* slower, especially shortest ones, e.g. consecutive
-# 144-byte blocks are processed 44% slower than one would expect,
-# 272 - 29%, 400 - 22%, etc. Yet, despite all these "shortcomings"
-# it's still faster than ["hyper-threading-safe" code path in]
-# aes-x86_64.pl on all lengths above 64 bytes...
-#
-# October 2011.
-#
-# Add decryption procedure. Performance in CPU cycles spent to decrypt
-# one byte out of 4096-byte buffer with 128-bit key is:
-#
-# Core 2	9.83
-# Nehalem	7.74
-# Atom		19.0
-#
-# November 2011.
-#
-# Add bsaes_xts_[en|de]crypt. Less-than-80-bytes-block performance is
-# suboptimal, but XTS is meant to be used with larger blocks...
-#
-#						<[email protected]>
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-my ($inp,$out,$len,$key,$ivp)=("%rdi","%rsi","%rdx","%rcx");
-my @XMM=map("%xmm$_",(15,0..14));	# best on Atom, +10% over (0..15)
-my $ecb=0;	# suppress unreferenced ECB subroutines, spare some space...
-
-{
-my ($key,$rounds,$const)=("%rax","%r10d","%r11");
-
-sub Sbox {
-# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
-	&InBasisChange	(@b);
-	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
-	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
-}
-
-sub InBasisChange {
-# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
-my @b=@_[0..7];
-$code.=<<___;
-	pxor	@b[6], @b[5]
-	pxor	@b[1], @b[2]
-	pxor	@b[0], @b[3]
-	pxor	@b[2], @b[6]
-	pxor 	@b[0], @b[5]
-
-	pxor	@b[3], @b[6]
-	pxor	@b[7], @b[3]
-	pxor	@b[5], @b[7]
-	pxor	@b[4], @b[3]
-	pxor	@b[5], @b[4]
-	pxor	@b[1], @b[3]
-
-	pxor	@b[7], @b[2]
-	pxor	@b[5], @b[1]
-___
-}
-
-sub OutBasisChange {
-# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
-my @b=@_[0..7];
-$code.=<<___;
-	pxor	@b[6], @b[0]
-	pxor	@b[4], @b[1]
-	pxor	@b[0], @b[2]
-	pxor	@b[6], @b[4]
-	pxor	@b[1], @b[6]
-
-	pxor	@b[5], @b[1]
-	pxor	@b[3], @b[5]
-	pxor	@b[7], @b[3]
-	pxor	@b[5], @b[7]
-	pxor	@b[5], @b[2]
-
-	pxor	@b[7], @b[4]
-___
-}
-
-sub InvSbox {
-# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
-	&InvInBasisChange	(@b);
-	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
-	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
-}
-
-sub InvInBasisChange {		# OutBasisChange in reverse
-my @b=@_[5,1,2,6,3,7,0,4];
-$code.=<<___
-	pxor	@b[7], @b[4]
-
-	pxor	@b[5], @b[7]
-	pxor	@b[5], @b[2]
-	pxor	@b[7], @b[3]
-	pxor	@b[3], @b[5]
-	pxor	@b[5], @b[1]
-
-	pxor	@b[1], @b[6]
-	pxor	@b[0], @b[2]
-	pxor	@b[6], @b[4]
-	pxor	@b[6], @b[0]
-	pxor	@b[4], @b[1]
-___
-}
-
-sub InvOutBasisChange {		# InBasisChange in reverse
-my @b=@_[2,5,7,3,6,1,0,4];
-$code.=<<___;
-	pxor	@b[5], @b[1]
-	pxor	@b[7], @b[2]
-
-	pxor	@b[1], @b[3]
-	pxor	@b[5], @b[4]
-	pxor	@b[5], @b[7]
-	pxor	@b[4], @b[3]
-	 pxor 	@b[0], @b[5]
-	pxor	@b[7], @b[3]
-	 pxor	@b[2], @b[6]
-	 pxor	@b[1], @b[2]
-	pxor	@b[3], @b[6]
-
-	pxor	@b[0], @b[3]
-	pxor	@b[6], @b[5]
-___
-}
-
-sub Mul_GF4 {
-#;*************************************************************
-#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
-#;*************************************************************
-my ($x0,$x1,$y0,$y1,$t0)=@_;
-$code.=<<___;
-	movdqa	$y0, $t0
-	pxor 	$y1, $t0
-	pand	$x0, $t0
-	pxor	$x1, $x0
-	pand	$y0, $x1
-	pand	$y1, $x0
-	pxor	$x1, $x0
-	pxor	$t0, $x1
-___
-}
-
-sub Mul_GF4_N {				# not used, see next subroutine
-# multiply and scale by N
-my ($x0,$x1,$y0,$y1,$t0)=@_;
-$code.=<<___;
-	movdqa	$y0, $t0
-	pxor	$y1, $t0
-	pand	$x0, $t0
-	pxor	$x1, $x0
-	pand	$y0, $x1
-	pand	$y1, $x0
-	pxor	$x0, $x1
-	pxor	$t0, $x0
-___
-}
-
-sub Mul_GF4_N_GF4 {
-# interleaved Mul_GF4_N and Mul_GF4
-my ($x0,$x1,$y0,$y1,$t0,
-    $x2,$x3,$y2,$y3,$t1)=@_;
-$code.=<<___;
-	movdqa	$y0, $t0
-	 movdqa	$y2, $t1
-	pxor	$y1, $t0
-	 pxor 	$y3, $t1
-	pand	$x0, $t0
-	 pand	$x2, $t1
-	pxor	$x1, $x0
-	 pxor	$x3, $x2
-	pand	$y0, $x1
-	 pand	$y2, $x3
-	pand	$y1, $x0
-	 pand	$y3, $x2
-	pxor	$x0, $x1
-	 pxor	$x3, $x2
-	pxor	$t0, $x0
-	 pxor	$t1, $x3
-___
-}
-sub Mul_GF16_2 {
-my @x=@_[0..7];
-my @y=@_[8..11];
-my @t=@_[12..15];
-$code.=<<___;
-	movdqa	@x[0], @t[0]
-	movdqa	@x[1], @t[1]
-___
-	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2]);
-$code.=<<___;
-	pxor	@x[2], @t[0]
-	pxor	@x[3], @t[1]
-	pxor	@y[2], @y[0]
-	pxor	@y[3], @y[1]
-___
-	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
-			 @x[2], @x[3], @y[2], @y[3], @t[2]);
-$code.=<<___;
-	pxor	@t[0], @x[0]
-	pxor	@t[0], @x[2]
-	pxor	@t[1], @x[1]
-	pxor	@t[1], @x[3]
-
-	movdqa	@x[4], @t[0]
-	movdqa	@x[5], @t[1]
-	pxor	@x[6], @t[0]
-	pxor	@x[7], @t[1]
-___
-	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
-			 @x[6], @x[7], @y[2], @y[3], @t[2]);
-$code.=<<___;
-	pxor	@y[2], @y[0]
-	pxor	@y[3], @y[1]
-___
-	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[3]);
-$code.=<<___;
-	pxor	@t[0], @x[4]
-	pxor	@t[0], @x[6]
-	pxor	@t[1], @x[5]
-	pxor	@t[1], @x[7]
-___
-}
-sub Inv_GF256 {
-#;********************************************************************
-#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
-#;********************************************************************
-my @x=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
-# direct optimizations from hardware
-$code.=<<___;
-	movdqa	@x[4], @t[3]
-	movdqa	@x[5], @t[2]
-	movdqa	@x[1], @t[1]
-	movdqa	@x[7], @s[1]
-	movdqa	@x[0], @s[0]
-
-	pxor	@x[6], @t[3]
-	pxor	@x[7], @t[2]
-	pxor	@x[3], @t[1]
-	 movdqa	@t[3], @s[2]
-	pxor	@x[6], @s[1]
-	 movdqa	@t[2], @t[0]
-	pxor	@x[2], @s[0]
-	 movdqa	@t[3], @s[3]
-
-	por	@t[1], @t[2]
-	por	@s[0], @t[3]
-	pxor	@t[0], @s[3]
-	pand	@s[0], @s[2]
-	pxor	@t[1], @s[0]
-	pand	@t[1], @t[0]
-	pand	@s[0], @s[3]
-	movdqa	@x[3], @s[0]
-	pxor	@x[2], @s[0]
-	pand	@s[0], @s[1]
-	pxor	@s[1], @t[3]
-	pxor	@s[1], @t[2]
-	movdqa	@x[4], @s[1]
-	movdqa	@x[1], @s[0]
-	pxor	@x[5], @s[1]
-	pxor	@x[0], @s[0]
-	movdqa	@s[1], @t[1]
-	pand	@s[0], @s[1]
-	por	@s[0], @t[1]
-	pxor	@s[1], @t[0]
-	pxor	@s[3], @t[3]
-	pxor	@s[2], @t[2]
-	pxor	@s[3], @t[1]
-	movdqa	@x[7], @s[0]
-	pxor	@s[2], @t[0]
-	movdqa	@x[6], @s[1]
-	pxor	@s[2], @t[1]
-	movdqa	@x[5], @s[2]
-	pand	@x[3], @s[0]
-	movdqa	@x[4], @s[3]
-	pand	@x[2], @s[1]
-	pand	@x[1], @s[2]
-	por	@x[0], @s[3]
-	pxor	@s[0], @t[3]
-	pxor	@s[1], @t[2]
-	pxor	@s[2], @t[1]
-	pxor	@s[3], @t[0] 
-
-	#Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
-
-	# new smaller inversion
-
-	movdqa	@t[3], @s[0]
-	pand	@t[1], @t[3]
-	pxor	@t[2], @s[0]
-
-	movdqa	@t[0], @s[2]
-	movdqa	@s[0], @s[3]
-	pxor	@t[3], @s[2]
-	pand	@s[2], @s[3]
-
-	movdqa	@t[1], @s[1]
-	pxor	@t[2], @s[3]
-	pxor	@t[0], @s[1]
-
-	pxor	@t[2], @t[3]
-
-	pand	@t[3], @s[1]
-
-	movdqa	@s[2], @t[2]
-	pxor	@t[0], @s[1]
-
-	pxor	@s[1], @t[2]
-	pxor	@s[1], @t[1]
-
-	pand	@t[0], @t[2]
-
-	pxor	@t[2], @s[2]
-	pxor	@t[2], @t[1]
-
-	pand	@s[3], @s[2]
-
-	pxor	@s[0], @s[2]
-___
-# output in s3, s2, s1, t1
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
-	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
-
-### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
-}
-
-# AES linear components
-
-sub ShiftRows {
-my @x=@_[0..7];
-my $mask=pop;
-$code.=<<___;
-	pxor	0x00($key),@x[0]
-	pxor	0x10($key),@x[1]
-	pshufb	$mask,@x[0]
-	pxor	0x20($key),@x[2]
-	pshufb	$mask,@x[1]
-	pxor	0x30($key),@x[3]
-	pshufb	$mask,@x[2]
-	pxor	0x40($key),@x[4]
-	pshufb	$mask,@x[3]
-	pxor	0x50($key),@x[5]
-	pshufb	$mask,@x[4]
-	pxor	0x60($key),@x[6]
-	pshufb	$mask,@x[5]
-	pxor	0x70($key),@x[7]
-	pshufb	$mask,@x[6]
-	lea	0x80($key),$key
-	pshufb	$mask,@x[7]
-___
-}
-
-sub MixColumns {
-# modified to emit output in order suitable for feeding back to aesenc[last]
-my @x=@_[0..7];
-my @t=@_[8..15];
-my $inv=@_[16];	# optional
-$code.=<<___;
-	pshufd	\$0x93, @x[0], @t[0]	# x0 <<< 32
-	pshufd	\$0x93, @x[1], @t[1]
-	 pxor	@t[0], @x[0]		# x0 ^ (x0 <<< 32)
-	pshufd	\$0x93, @x[2], @t[2]
-	 pxor	@t[1], @x[1]
-	pshufd	\$0x93, @x[3], @t[3]
-	 pxor	@t[2], @x[2]
-	pshufd	\$0x93, @x[4], @t[4]
-	 pxor	@t[3], @x[3]
-	pshufd	\$0x93, @x[5], @t[5]
-	 pxor	@t[4], @x[4]
-	pshufd	\$0x93, @x[6], @t[6]
-	 pxor	@t[5], @x[5]
-	pshufd	\$0x93, @x[7], @t[7]
-	 pxor	@t[6], @x[6]
-	 pxor	@t[7], @x[7]
-
-	pxor	@x[0], @t[1]
-	pxor	@x[7], @t[0]
-	pxor	@x[7], @t[1]
-	 pshufd	\$0x4E, @x[0], @x[0] 	# (x0 ^ (x0 <<< 32)) <<< 64)
-	pxor	@x[1], @t[2]
-	 pshufd	\$0x4E, @x[1], @x[1]
-	pxor	@x[4], @t[5]
-	 pxor	@t[0], @x[0]
-	pxor	@x[5], @t[6]
-	 pxor	@t[1], @x[1]
-	pxor	@x[3], @t[4]
-	 pshufd	\$0x4E, @x[4], @t[0]
-	pxor	@x[6], @t[7]
-	 pshufd	\$0x4E, @x[5], @t[1]
-	pxor	@x[2], @t[3]
-	 pshufd	\$0x4E, @x[3], @x[4]
-	pxor	@x[7], @t[3]
-	 pshufd	\$0x4E, @x[7], @x[5]
-	pxor	@x[7], @t[4]
-	 pshufd	\$0x4E, @x[6], @x[3]
-	pxor	@t[4], @t[0]
-	 pshufd	\$0x4E, @x[2], @x[6]
-	pxor	@t[5], @t[1]
-___
-$code.=<<___ if (!$inv);
-	pxor	@t[3], @x[4]
-	pxor	@t[7], @x[5]
-	pxor	@t[6], @x[3]
-	 movdqa	@t[0], @x[2]
-	pxor	@t[2], @x[6]
-	 movdqa	@t[1], @x[7]
-___
-$code.=<<___ if ($inv);
-	pxor	@x[4], @t[3]
-	pxor	@t[7], @x[5]
-	pxor	@x[3], @t[6]
-	 movdqa	@t[0], @x[3]
-	pxor	@t[2], @x[6]
-	 movdqa	@t[6], @x[2]
-	 movdqa	@t[1], @x[7]
-	 movdqa	@x[6], @x[4]
-	 movdqa	@t[3], @x[6]
-___
-}
-
-sub InvMixColumns_orig {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-$code.=<<___;
-	# multiplication by 0x0e
-	pshufd	\$0x93, @x[7], @t[7]
-	movdqa	@x[2], @t[2]
-	pxor	@x[5], @x[7]		# 7 5
-	pxor	@x[5], @x[2]		# 2 5
-	pshufd	\$0x93, @x[0], @t[0]
-	movdqa	@x[5], @t[5]
-	pxor	@x[0], @x[5]		# 5 0		[1]
-	pxor	@x[1], @x[0]		# 0 1
-	pshufd	\$0x93, @x[1], @t[1]
-	pxor	@x[2], @x[1]		# 1 25
-	pxor	@x[6], @x[0]		# 01 6		[2]
-	pxor	@x[3], @x[1]		# 125 3		[4]
-	pshufd	\$0x93, @x[3], @t[3]
-	pxor	@x[0], @x[2]		# 25 016	[3]
-	pxor	@x[7], @x[3]		# 3 75
-	pxor	@x[6], @x[7]		# 75 6		[0]
-	pshufd	\$0x93, @x[6], @t[6]
-	movdqa	@x[4], @t[4]
-	pxor	@x[4], @x[6]		# 6 4
-	pxor	@x[3], @x[4]		# 4 375		[6]
-	pxor	@x[7], @x[3]		# 375 756=36
-	pxor	@t[5], @x[6]		# 64 5		[7]
-	pxor	@t[2], @x[3]		# 36 2
-	pxor	@t[4], @x[3]		# 362 4		[5]
-	pshufd	\$0x93, @t[5], @t[5]
-___
-					my @y = @x[7,5,0,2,1,3,4,6];
-$code.=<<___;
-	# multiplication by 0x0b
-	pxor	@y[0], @y[1]
-	pxor	@t[0], @y[0]
-	pxor	@t[1], @y[1]
-	pshufd	\$0x93, @t[2], @t[2]
-	pxor	@t[5], @y[0]
-	pxor	@t[6], @y[1]
-	pxor	@t[7], @y[0]
-	pshufd	\$0x93, @t[4], @t[4]
-	pxor	@t[6], @t[7]		# clobber t[7]
-	pxor	@y[0], @y[1]
-
-	pxor	@t[0], @y[3]
-	pshufd	\$0x93, @t[0], @t[0]
-	pxor	@t[1], @y[2]
-	pxor	@t[1], @y[4]
-	pxor	@t[2], @y[2]
-	pshufd	\$0x93, @t[1], @t[1]
-	pxor	@t[2], @y[3]
-	pxor	@t[2], @y[5]
-	pxor	@t[7], @y[2]
-	pshufd	\$0x93, @t[2], @t[2]
-	pxor	@t[3], @y[3]
-	pxor	@t[3], @y[6]
-	pxor	@t[3], @y[4]
-	pshufd	\$0x93, @t[3], @t[3]
-	pxor	@t[4], @y[7]
-	pxor	@t[4], @y[5]
-	pxor	@t[7], @y[7]
-	pxor	@t[5], @y[3]
-	pxor	@t[4], @y[4]
-	pxor	@t[5], @t[7]		# clobber t[7] even more
-
-	pxor	@t[7], @y[5]
-	pshufd	\$0x93, @t[4], @t[4]
-	pxor	@t[7], @y[6]
-	pxor	@t[7], @y[4]
-
-	pxor	@t[5], @t[7]
-	pshufd	\$0x93, @t[5], @t[5]
-	pxor	@t[6], @t[7]		# restore t[7]
-
-	# multiplication by 0x0d
-	pxor	@y[7], @y[4]
-	pxor	@t[4], @y[7]
-	pshufd	\$0x93, @t[6], @t[6]
-	pxor	@t[0], @y[2]
-	pxor	@t[5], @y[7]
-	pxor	@t[2], @y[2]
-	pshufd	\$0x93, @t[7], @t[7]
-
-	pxor	@y[1], @y[3]
-	pxor	@t[1], @y[1]
-	pxor	@t[0], @y[0]
-	pxor	@t[0], @y[3]
-	pxor	@t[5], @y[1]
-	pxor	@t[5], @y[0]
-	pxor	@t[7], @y[1]
-	pshufd	\$0x93, @t[0], @t[0]
-	pxor	@t[6], @y[0]
-	pxor	@y[1], @y[3]
-	pxor	@t[1], @y[4]
-	pshufd	\$0x93, @t[1], @t[1]
-
-	pxor	@t[7], @y[7]
-	pxor	@t[2], @y[4]
-	pxor	@t[2], @y[5]
-	pshufd	\$0x93, @t[2], @t[2]
-	pxor	@t[6], @y[2]
-	pxor	@t[3], @t[6]		# clobber t[6]
-	pxor	@y[7], @y[4]
-	pxor	@t[6], @y[3]
-
-	pxor	@t[6], @y[6]
-	pxor	@t[5], @y[5]
-	pxor	@t[4], @y[6]
-	pshufd	\$0x93, @t[4], @t[4]
-	pxor	@t[6], @y[5]
-	pxor	@t[7], @y[6]
-	pxor	@t[3], @t[6]		# restore t[6]
-
-	pshufd	\$0x93, @t[5], @t[5]
-	pshufd	\$0x93, @t[6], @t[6]
-	pshufd	\$0x93, @t[7], @t[7]
-	pshufd	\$0x93, @t[3], @t[3]
-
-	# multiplication by 0x09
-	pxor	@y[1], @y[4]
-	pxor	@y[1], @t[1]		# t[1]=y[1]
-	pxor	@t[5], @t[0]		# clobber t[0]
-	pxor	@t[5], @t[1]
-	pxor	@t[0], @y[3]
-	pxor	@y[0], @t[0]		# t[0]=y[0]
-	pxor	@t[6], @t[1]
-	pxor	@t[7], @t[6]		# clobber t[6]
-	pxor	@t[1], @y[4]
-	pxor	@t[4], @y[7]
-	pxor	@y[4], @t[4]		# t[4]=y[4]
-	pxor	@t[3], @y[6]
-	pxor	@y[3], @t[3]		# t[3]=y[3]
-	pxor	@t[2], @y[5]
-	pxor	@y[2], @t[2]		# t[2]=y[2]
-	pxor	@t[7], @t[3]
-	pxor	@y[5], @t[5]		# t[5]=y[5]
-	pxor	@t[6], @t[2]
-	pxor	@t[6], @t[5]
-	pxor	@y[6], @t[6]		# t[6]=y[6]
-	pxor	@y[7], @t[7]		# t[7]=y[7]
-
-	movdqa	@t[0],@XMM[0]
-	movdqa	@t[1],@XMM[1]
-	movdqa	@t[2],@XMM[2]
-	movdqa	@t[3],@XMM[3]
-	movdqa	@t[4],@XMM[4]
-	movdqa	@t[5],@XMM[5]
-	movdqa	@t[6],@XMM[6]
-	movdqa	@t[7],@XMM[7]
-___
-}
-
-sub InvMixColumns {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-# Thanks to Jussi Kivilinna for providing pointer to
-#
-# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
-# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
-# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
-# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
-
-$code.=<<___;
-	# multiplication by 0x05-0x00-0x04-0x00
-	pshufd	\$0x4E, @x[0], @t[0]
-	pshufd	\$0x4E, @x[6], @t[6]
-	pxor	@x[0], @t[0]
-	pshufd	\$0x4E, @x[7], @t[7]
-	pxor	@x[6], @t[6]
-	pshufd	\$0x4E, @x[1], @t[1]
-	pxor	@x[7], @t[7]
-	pshufd	\$0x4E, @x[2], @t[2]
-	pxor	@x[1], @t[1]
-	pshufd	\$0x4E, @x[3], @t[3]
-	pxor	@x[2], @t[2]
-	 pxor	@t[6], @x[0]
-	 pxor	@t[6], @x[1]
-	pshufd	\$0x4E, @x[4], @t[4]
-	pxor	@x[3], @t[3]
-	 pxor	@t[0], @x[2]
-	 pxor	@t[1], @x[3]
-	pshufd	\$0x4E, @x[5], @t[5]
-	pxor	@x[4], @t[4]
-	 pxor	@t[7], @x[1]
-	 pxor	@t[2], @x[4]
-	pxor	@x[5], @t[5]
-
-	 pxor	@t[7], @x[2]
-	 pxor	@t[6], @x[3]
-	 pxor	@t[6], @x[4]
-	 pxor	@t[3], @x[5]
-	 pxor	@t[4], @x[6]
-	 pxor	@t[7], @x[4]
-	 pxor	@t[7], @x[5]
-	 pxor	@t[5], @x[7]
-___
-	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
-}
-
-sub aesenc {				# not used
-my @b=@_[0..7];
-my @t=@_[8..15];
-$code.=<<___;
-	movdqa	0x30($const),@t[0]	# .LSR
-___
-	&ShiftRows	(@b,@t[0]);
-	&Sbox		(@b,@t);
-	&MixColumns	(@b[0,1,4,6,3,7,2,5],@t);
-}
-
-sub aesenclast {			# not used
-my @b=@_[0..7];
-my @t=@_[8..15];
-$code.=<<___;
-	movdqa	0x40($const),@t[0]	# .LSRM0
-___
-	&ShiftRows	(@b,@t[0]);
-	&Sbox		(@b,@t);
-$code.=<<___
-	pxor	0x00($key),@b[0]
-	pxor	0x10($key),@b[1]
-	pxor	0x20($key),@b[4]
-	pxor	0x30($key),@b[6]
-	pxor	0x40($key),@b[3]
-	pxor	0x50($key),@b[7]
-	pxor	0x60($key),@b[2]
-	pxor	0x70($key),@b[5]
-___
-}
-
-sub swapmove {
-my ($a,$b,$n,$mask,$t)=@_;
-$code.=<<___;
-	movdqa	$b,$t
-	psrlq	\$$n,$b
-	pxor  	$a,$b
-	pand	$mask,$b
-	pxor	$b,$a
-	psllq	\$$n,$b
-	pxor	$t,$b
-___
-}
-sub swapmove2x {
-my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
-$code.=<<___;
-	movdqa	$b0,$t0
-	psrlq	\$$n,$b0
-	 movdqa	$b1,$t1
-	 psrlq	\$$n,$b1
-	pxor  	$a0,$b0
-	 pxor  	$a1,$b1
-	pand	$mask,$b0
-	 pand	$mask,$b1
-	pxor	$b0,$a0
-	psllq	\$$n,$b0
-	 pxor	$b1,$a1
-	 psllq	\$$n,$b1
-	pxor	$t0,$b0
-	 pxor	$t1,$b1
-___
-}
-
-sub bitslice {
-my @x=reverse(@_[0..7]);
-my ($t0,$t1,$t2,$t3)=@_[8..11];
-$code.=<<___;
-	movdqa	0x00($const),$t0	# .LBS0
-	movdqa	0x10($const),$t1	# .LBS1
-___
-	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
-	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-$code.=<<___;
-	movdqa	0x20($const),$t0	# .LBS2
-___
-	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
-	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
-
-	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
-	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
-}
-
-$code.=<<___;
-.text
-
-.extern	asm_AES_encrypt
-.extern	asm_AES_decrypt
-
-.type	_bsaes_encrypt8,\@abi-omnipotent
-.align	64
-_bsaes_encrypt8:
-	lea	.LBS0(%rip), $const	# constants table
-
-	movdqa	($key), @XMM[9]		# round 0 key
-	lea	0x10($key), $key
-	movdqa	0x50($const), @XMM[8]	# .LM0SR
-	pxor	@XMM[9], @XMM[0]	# xor with round0 key
-	pxor	@XMM[9], @XMM[1]
-	 pshufb	@XMM[8], @XMM[0]
-	pxor	@XMM[9], @XMM[2]
-	 pshufb	@XMM[8], @XMM[1]
-	pxor	@XMM[9], @XMM[3]
-	 pshufb	@XMM[8], @XMM[2]
-	pxor	@XMM[9], @XMM[4]
-	 pshufb	@XMM[8], @XMM[3]
-	pxor	@XMM[9], @XMM[5]
-	 pshufb	@XMM[8], @XMM[4]
-	pxor	@XMM[9], @XMM[6]
-	 pshufb	@XMM[8], @XMM[5]
-	pxor	@XMM[9], @XMM[7]
-	 pshufb	@XMM[8], @XMM[6]
-	 pshufb	@XMM[8], @XMM[7]
-_bsaes_encrypt8_bitslice:
-___
-	&bitslice	(@XMM[0..7, 8..11]);
-$code.=<<___;
-	dec	$rounds
-	jmp	.Lenc_sbox
-.align	16
-.Lenc_loop:
-___
-	&ShiftRows	(@XMM[0..7, 8]);
-$code.=".Lenc_sbox:\n";
-	&Sbox		(@XMM[0..7, 8..15]);
-$code.=<<___;
-	dec	$rounds
-	jl	.Lenc_done
-___
-	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
-$code.=<<___;
-	movdqa	0x30($const), @XMM[8]	# .LSR
-	jnz	.Lenc_loop
-	movdqa	0x40($const), @XMM[8]	# .LSRM0
-	jmp	.Lenc_loop
-.align	16
-.Lenc_done:
-___
-	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
-	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
-$code.=<<___;
-	movdqa	($key), @XMM[8]		# last round key
-	pxor	@XMM[8], @XMM[4]
-	pxor	@XMM[8], @XMM[6]
-	pxor	@XMM[8], @XMM[3]
-	pxor	@XMM[8], @XMM[7]
-	pxor	@XMM[8], @XMM[2]
-	pxor	@XMM[8], @XMM[5]
-	pxor	@XMM[8], @XMM[0]
-	pxor	@XMM[8], @XMM[1]
-	ret
-.size	_bsaes_encrypt8,.-_bsaes_encrypt8
-
-.type	_bsaes_decrypt8,\@abi-omnipotent
-.align	64
-_bsaes_decrypt8:
-	lea	.LBS0(%rip), $const	# constants table
-
-	movdqa	($key), @XMM[9]		# round 0 key
-	lea	0x10($key), $key
-	movdqa	-0x30($const), @XMM[8]	# .LM0ISR
-	pxor	@XMM[9], @XMM[0]	# xor with round0 key
-	pxor	@XMM[9], @XMM[1]
-	 pshufb	@XMM[8], @XMM[0]
-	pxor	@XMM[9], @XMM[2]
-	 pshufb	@XMM[8], @XMM[1]
-	pxor	@XMM[9], @XMM[3]
-	 pshufb	@XMM[8], @XMM[2]
-	pxor	@XMM[9], @XMM[4]
-	 pshufb	@XMM[8], @XMM[3]
-	pxor	@XMM[9], @XMM[5]
-	 pshufb	@XMM[8], @XMM[4]
-	pxor	@XMM[9], @XMM[6]
-	 pshufb	@XMM[8], @XMM[5]
-	pxor	@XMM[9], @XMM[7]
-	 pshufb	@XMM[8], @XMM[6]
-	 pshufb	@XMM[8], @XMM[7]
-___
-	&bitslice	(@XMM[0..7, 8..11]);
-$code.=<<___;
-	dec	$rounds
-	jmp	.Ldec_sbox
-.align	16
-.Ldec_loop:
-___
-	&ShiftRows	(@XMM[0..7, 8]);
-$code.=".Ldec_sbox:\n";
-	&InvSbox	(@XMM[0..7, 8..15]);
-$code.=<<___;
-	dec	$rounds
-	jl	.Ldec_done
-___
-	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
-$code.=<<___;
-	movdqa	-0x10($const), @XMM[8]	# .LISR
-	jnz	.Ldec_loop
-	movdqa	-0x20($const), @XMM[8]	# .LISRM0
-	jmp	.Ldec_loop
-.align	16
-.Ldec_done:
-___
-	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
-$code.=<<___;
-	movdqa	($key), @XMM[8]		# last round key
-	pxor	@XMM[8], @XMM[6]
-	pxor	@XMM[8], @XMM[4]
-	pxor	@XMM[8], @XMM[2]
-	pxor	@XMM[8], @XMM[7]
-	pxor	@XMM[8], @XMM[3]
-	pxor	@XMM[8], @XMM[5]
-	pxor	@XMM[8], @XMM[0]
-	pxor	@XMM[8], @XMM[1]
-	ret
-.size	_bsaes_decrypt8,.-_bsaes_decrypt8
-___
-}
-{
-my ($out,$inp,$rounds,$const)=("%rax","%rcx","%r10d","%r11");
-
-sub bitslice_key {
-my @x=reverse(@_[0..7]);
-my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
-
-	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
-$code.=<<___;
-	#&swapmove(@x[2,3],1,$t0,$t2,$t3);
-	movdqa	@x[0], @x[2]
-	movdqa	@x[1], @x[3]
-___
-	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-
-	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
-$code.=<<___;
-	#&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
-	movdqa	@x[0], @x[4]
-	movdqa	@x[2], @x[6]
-	movdqa	@x[1], @x[5]
-	movdqa	@x[3], @x[7]
-___
-	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
-	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
-}
-
-$code.=<<___;
-.type	_bsaes_key_convert,\@abi-omnipotent
-.align	16
-_bsaes_key_convert:
-	lea	.Lmasks(%rip), $const
-	movdqu	($inp), %xmm7		# load round 0 key
-	lea	0x10($inp), $inp
-	movdqa	0x00($const), %xmm0	# 0x01...
-	movdqa	0x10($const), %xmm1	# 0x02...
-	movdqa	0x20($const), %xmm2	# 0x04...
-	movdqa	0x30($const), %xmm3	# 0x08...
-	movdqa	0x40($const), %xmm4	# .LM0
-	pcmpeqd	%xmm5, %xmm5		# .LNOT
-
-	movdqu	($inp), %xmm6		# load round 1 key
-	movdqa	%xmm7, ($out)		# save round 0 key
-	lea	0x10($out), $out
-	dec	$rounds
-	jmp	.Lkey_loop
-.align	16
-.Lkey_loop:
-	pshufb	%xmm4, %xmm6		# .LM0
-
-	movdqa	%xmm0,	%xmm8
-	movdqa	%xmm1,	%xmm9
-
-	pand	%xmm6,	%xmm8
-	pand	%xmm6,	%xmm9
-	movdqa	%xmm2,	%xmm10
-	pcmpeqb	%xmm0,	%xmm8
-	psllq	\$4,	%xmm0		# 0x10...
-	movdqa	%xmm3,	%xmm11
-	pcmpeqb	%xmm1,	%xmm9
-	psllq	\$4,	%xmm1		# 0x20...
-
-	pand	%xmm6,	%xmm10
-	pand	%xmm6,	%xmm11
-	movdqa	%xmm0,	%xmm12
-	pcmpeqb	%xmm2,	%xmm10
-	psllq	\$4,	%xmm2		# 0x40...
-	movdqa	%xmm1,	%xmm13
-	pcmpeqb	%xmm3,	%xmm11
-	psllq	\$4,	%xmm3		# 0x80...
-
-	movdqa	%xmm2,	%xmm14
-	movdqa	%xmm3,	%xmm15
-	 pxor	%xmm5,	%xmm8		# "pnot"
-	 pxor	%xmm5,	%xmm9
-
-	pand	%xmm6,	%xmm12
-	pand	%xmm6,	%xmm13
-	 movdqa	%xmm8, 0x00($out)	# write bit-sliced round key
-	pcmpeqb	%xmm0,	%xmm12
-	psrlq	\$4,	%xmm0		# 0x01...
-	 movdqa	%xmm9, 0x10($out)
-	pcmpeqb	%xmm1,	%xmm13
-	psrlq	\$4,	%xmm1		# 0x02...
-	 lea	0x10($inp), $inp
-
-	pand	%xmm6,	%xmm14
-	pand	%xmm6,	%xmm15
-	 movdqa	%xmm10, 0x20($out)
-	pcmpeqb	%xmm2,	%xmm14
-	psrlq	\$4,	%xmm2		# 0x04...
-	 movdqa	%xmm11, 0x30($out)
-	pcmpeqb	%xmm3,	%xmm15
-	psrlq	\$4,	%xmm3		# 0x08...
-	 movdqu	($inp), %xmm6		# load next round key
-
-	pxor	%xmm5, %xmm13		# "pnot"
-	pxor	%xmm5, %xmm14
-	movdqa	%xmm12, 0x40($out)
-	movdqa	%xmm13, 0x50($out)
-	movdqa	%xmm14, 0x60($out)
-	movdqa	%xmm15, 0x70($out)
-	lea	0x80($out),$out
-	dec	$rounds
-	jnz	.Lkey_loop
-
-	movdqa	0x50($const), %xmm7	# .L63
-	#movdqa	%xmm6, ($out)		# don't save last round key
-	ret
-.size	_bsaes_key_convert,.-_bsaes_key_convert
-___
-}
-
-if (0 && !$win64) {	# following four functions are unsupported interface
-			# used for benchmarking...
-$code.=<<___;
-.globl	bsaes_enc_key_convert
-.type	bsaes_enc_key_convert,\@function,2
-.align	16
-bsaes_enc_key_convert:
-	mov	240($inp),%r10d		# pass rounds
-	mov	$inp,%rcx		# pass key
-	mov	$out,%rax		# pass key schedule
-	call	_bsaes_key_convert
-	pxor	%xmm6,%xmm7		# fix up last round key
-	movdqa	%xmm7,(%rax)		# save last round key
-	ret
-.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
-
-.globl	bsaes_encrypt_128
-.type	bsaes_encrypt_128,\@function,4
-.align	16
-bsaes_encrypt_128:
-.Lenc128_loop:
-	movdqu	0x00($inp), @XMM[0]	# load input
-	movdqu	0x10($inp), @XMM[1]
-	movdqu	0x20($inp), @XMM[2]
-	movdqu	0x30($inp), @XMM[3]
-	movdqu	0x40($inp), @XMM[4]
-	movdqu	0x50($inp), @XMM[5]
-	movdqu	0x60($inp), @XMM[6]
-	movdqu	0x70($inp), @XMM[7]
-	mov	$key, %rax		# pass the $key
-	lea	0x80($inp), $inp
-	mov	\$10,%r10d
-
-	call	_bsaes_encrypt8
-
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[2], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	sub	\$0x80,$len
-	ja	.Lenc128_loop
-	ret
-.size	bsaes_encrypt_128,.-bsaes_encrypt_128
-
-.globl	bsaes_dec_key_convert
-.type	bsaes_dec_key_convert,\@function,2
-.align	16
-bsaes_dec_key_convert:
-	mov	240($inp),%r10d		# pass rounds
-	mov	$inp,%rcx		# pass key
-	mov	$out,%rax		# pass key schedule
-	call	_bsaes_key_convert
-	pxor	($out),%xmm7		# fix up round 0 key
-	movdqa	%xmm6,(%rax)		# save last round key
-	movdqa	%xmm7,($out)
-	ret
-.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
-
-.globl	bsaes_decrypt_128
-.type	bsaes_decrypt_128,\@function,4
-.align	16
-bsaes_decrypt_128:
-.Ldec128_loop:
-	movdqu	0x00($inp), @XMM[0]	# load input
-	movdqu	0x10($inp), @XMM[1]
-	movdqu	0x20($inp), @XMM[2]
-	movdqu	0x30($inp), @XMM[3]
-	movdqu	0x40($inp), @XMM[4]
-	movdqu	0x50($inp), @XMM[5]
-	movdqu	0x60($inp), @XMM[6]
-	movdqu	0x70($inp), @XMM[7]
-	mov	$key, %rax		# pass the $key
-	lea	0x80($inp), $inp
-	mov	\$10,%r10d
-
-	call	_bsaes_decrypt8
-
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	sub	\$0x80,$len
-	ja	.Ldec128_loop
-	ret
-.size	bsaes_decrypt_128,.-bsaes_decrypt_128
-___
-}
-{
-######################################################################
-#
-# OpenSSL interface
-#
-my ($arg1,$arg2,$arg3,$arg4,$arg5,$arg6)=$win64	? ("%rcx","%rdx","%r8","%r9","%r10","%r11d")
-						: ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
-my ($inp,$out,$len,$key)=("%r12","%r13","%r14","%r15");
-
-if ($ecb) {
-$code.=<<___;
-.globl	bsaes_ecb_encrypt_blocks
-.type	bsaes_ecb_encrypt_blocks,\@abi-omnipotent
-.align	16
-bsaes_ecb_encrypt_blocks:
-	mov	%rsp, %rax
-.Lecb_enc_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lecb_enc_body:
-___
-$code.=<<___;
-	mov	%rsp,%rbp		# backup %rsp
-	mov	240($arg4),%eax		# rounds
-	mov	$arg1,$inp		# backup arguments
-	mov	$arg2,$out
-	mov	$arg3,$len
-	mov	$arg4,$key
-	cmp	\$8,$arg3
-	jb	.Lecb_enc_short
-
-	mov	%eax,%ebx		# backup rounds
-	shl	\$7,%rax		# 128 bytes per inner round key
-	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
-	sub	%rax,%rsp
-	mov	%rsp,%rax		# pass key schedule
-	mov	$key,%rcx		# pass key
-	mov	%ebx,%r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	%xmm6,%xmm7		# fix up last round key
-	movdqa	%xmm7,(%rax)		# save last round key
-
-	sub	\$8,$len
-.Lecb_enc_loop:
-	movdqu	0x00($inp), @XMM[0]	# load input
-	movdqu	0x10($inp), @XMM[1]
-	movdqu	0x20($inp), @XMM[2]
-	movdqu	0x30($inp), @XMM[3]
-	movdqu	0x40($inp), @XMM[4]
-	movdqu	0x50($inp), @XMM[5]
-	mov	%rsp, %rax		# pass key schedule
-	movdqu	0x60($inp), @XMM[6]
-	mov	%ebx,%r10d		# pass rounds
-	movdqu	0x70($inp), @XMM[7]
-	lea	0x80($inp), $inp
-
-	call	_bsaes_encrypt8
-
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[2], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	sub	\$8,$len
-	jnc	.Lecb_enc_loop
-
-	add	\$8,$len
-	jz	.Lecb_enc_done
-
-	movdqu	0x00($inp), @XMM[0]	# load input
-	mov	%rsp, %rax		# pass key schedule
-	mov	%ebx,%r10d		# pass rounds
-	cmp	\$2,$len
-	jb	.Lecb_enc_one
-	movdqu	0x10($inp), @XMM[1]
-	je	.Lecb_enc_two
-	movdqu	0x20($inp), @XMM[2]
-	cmp	\$4,$len
-	jb	.Lecb_enc_three
-	movdqu	0x30($inp), @XMM[3]
-	je	.Lecb_enc_four
-	movdqu	0x40($inp), @XMM[4]
-	cmp	\$6,$len
-	jb	.Lecb_enc_five
-	movdqu	0x50($inp), @XMM[5]
-	je	.Lecb_enc_six
-	movdqu	0x60($inp), @XMM[6]
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[2], 0x60($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_six:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_five:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_four:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_three:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_two:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_one:
-	call	_bsaes_encrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	jmp	.Lecb_enc_done
-.align	16
-.Lecb_enc_short:
-	lea	($inp), $arg1
-	lea	($out), $arg2
-	lea	($key), $arg3
-	call	asm_AES_encrypt
-	lea	16($inp), $inp
-	lea	16($out), $out
-	dec	$len
-	jnz	.Lecb_enc_short
-
-.Lecb_enc_done:
-	lea	(%rsp),%rax
-	pxor	%xmm0, %xmm0
-.Lecb_enc_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	jb	.Lecb_enc_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lecb_enc_epilogue:
-	ret
-.size	bsaes_ecb_encrypt_blocks,.-bsaes_ecb_encrypt_blocks
-
-.globl	bsaes_ecb_decrypt_blocks
-.type	bsaes_ecb_decrypt_blocks,\@abi-omnipotent
-.align	16
-bsaes_ecb_decrypt_blocks:
-	mov	%rsp, %rax
-.Lecb_dec_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lecb_dec_body:
-___
-$code.=<<___;
-	mov	%rsp,%rbp		# backup %rsp
-	mov	240($arg4),%eax		# rounds
-	mov	$arg1,$inp		# backup arguments
-	mov	$arg2,$out
-	mov	$arg3,$len
-	mov	$arg4,$key
-	cmp	\$8,$arg3
-	jb	.Lecb_dec_short
-
-	mov	%eax,%ebx		# backup rounds
-	shl	\$7,%rax		# 128 bytes per inner round key
-	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
-	sub	%rax,%rsp
-	mov	%rsp,%rax		# pass key schedule
-	mov	$key,%rcx		# pass key
-	mov	%ebx,%r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	(%rsp),%xmm7		# fix up 0 round key
-	movdqa	%xmm6,(%rax)		# save last round key
-	movdqa	%xmm7,(%rsp)
-
-	sub	\$8,$len
-.Lecb_dec_loop:
-	movdqu	0x00($inp), @XMM[0]	# load input
-	movdqu	0x10($inp), @XMM[1]
-	movdqu	0x20($inp), @XMM[2]
-	movdqu	0x30($inp), @XMM[3]
-	movdqu	0x40($inp), @XMM[4]
-	movdqu	0x50($inp), @XMM[5]
-	mov	%rsp, %rax		# pass key schedule
-	movdqu	0x60($inp), @XMM[6]
-	mov	%ebx,%r10d		# pass rounds
-	movdqu	0x70($inp), @XMM[7]
-	lea	0x80($inp), $inp
-
-	call	_bsaes_decrypt8
-
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	sub	\$8,$len
-	jnc	.Lecb_dec_loop
-
-	add	\$8,$len
-	jz	.Lecb_dec_done
-
-	movdqu	0x00($inp), @XMM[0]	# load input
-	mov	%rsp, %rax		# pass key schedule
-	mov	%ebx,%r10d		# pass rounds
-	cmp	\$2,$len
-	jb	.Lecb_dec_one
-	movdqu	0x10($inp), @XMM[1]
-	je	.Lecb_dec_two
-	movdqu	0x20($inp), @XMM[2]
-	cmp	\$4,$len
-	jb	.Lecb_dec_three
-	movdqu	0x30($inp), @XMM[3]
-	je	.Lecb_dec_four
-	movdqu	0x40($inp), @XMM[4]
-	cmp	\$6,$len
-	jb	.Lecb_dec_five
-	movdqu	0x50($inp), @XMM[5]
-	je	.Lecb_dec_six
-	movdqu	0x60($inp), @XMM[6]
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_six:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_five:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_four:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_three:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_two:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_one:
-	call	_bsaes_decrypt8
-	movdqu	@XMM[0], 0x00($out)	# write output
-	jmp	.Lecb_dec_done
-.align	16
-.Lecb_dec_short:
-	lea	($inp), $arg1
-	lea	($out), $arg2
-	lea	($key), $arg3
-	call	asm_AES_decrypt
-	lea	16($inp), $inp
-	lea	16($out), $out
-	dec	$len
-	jnz	.Lecb_dec_short
-
-.Lecb_dec_done:
-	lea	(%rsp),%rax
-	pxor	%xmm0, %xmm0
-.Lecb_dec_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	jb	.Lecb_dec_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lecb_dec_epilogue:
-	ret
-.size	bsaes_ecb_decrypt_blocks,.-bsaes_ecb_decrypt_blocks
-___
-}
-$code.=<<___;
-.extern	asm_AES_cbc_encrypt
-.globl	bsaes_cbc_encrypt
-.type	bsaes_cbc_encrypt,\@abi-omnipotent
-.align	16
-bsaes_cbc_encrypt:
-___
-$code.=<<___ if ($win64);
-	mov	48(%rsp),$arg6		# pull direction flag
-___
-$code.=<<___;
-	cmp	\$0,$arg6
-	jne	asm_AES_cbc_encrypt
-	cmp	\$128,$arg3
-	jb	asm_AES_cbc_encrypt
-
-	mov	%rsp, %rax
-.Lcbc_dec_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
-	mov	0xa0(%rsp),$arg5	# pull ivp
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lcbc_dec_body:
-___
-$code.=<<___;
-	mov	%rsp, %rbp		# backup %rsp
-	mov	240($arg4), %eax	# rounds
-	mov	$arg1, $inp		# backup arguments
-	mov	$arg2, $out
-	mov	$arg3, $len
-	mov	$arg4, $key
-	mov	$arg5, %rbx
-	shr	\$4, $len		# bytes to blocks
-
-	mov	%eax, %edx		# rounds
-	shl	\$7, %rax		# 128 bytes per inner round key
-	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
-	sub	%rax, %rsp
-
-	mov	%rsp, %rax		# pass key schedule
-	mov	$key, %rcx		# pass key
-	mov	%edx, %r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	(%rsp),%xmm7		# fix up 0 round key
-	movdqa	%xmm6,(%rax)		# save last round key
-	movdqa	%xmm7,(%rsp)
-
-	movdqu	(%rbx), @XMM[15]	# load IV
-	sub	\$8,$len
-.Lcbc_dec_loop:
-	movdqu	0x00($inp), @XMM[0]	# load input
-	movdqu	0x10($inp), @XMM[1]
-	movdqu	0x20($inp), @XMM[2]
-	movdqu	0x30($inp), @XMM[3]
-	movdqu	0x40($inp), @XMM[4]
-	movdqu	0x50($inp), @XMM[5]
-	mov	%rsp, %rax		# pass key schedule
-	movdqu	0x60($inp), @XMM[6]
-	mov	%edx,%r10d		# pass rounds
-	movdqu	0x70($inp), @XMM[7]
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-
-	call	_bsaes_decrypt8
-
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[9], @XMM[6]
-	movdqu	0x30($inp), @XMM[11]
-	pxor	@XMM[10], @XMM[4]
-	movdqu	0x40($inp), @XMM[12]
-	pxor	@XMM[11], @XMM[2]
-	movdqu	0x50($inp), @XMM[13]
-	pxor	@XMM[12], @XMM[7]
-	movdqu	0x60($inp), @XMM[14]
-	pxor	@XMM[13], @XMM[3]
-	movdqu	0x70($inp), @XMM[15]	# IV
-	pxor	@XMM[14], @XMM[5]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	lea	0x80($inp), $inp
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	sub	\$8,$len
-	jnc	.Lcbc_dec_loop
-
-	add	\$8,$len
-	jz	.Lcbc_dec_done
-
-	movdqu	0x00($inp), @XMM[0]	# load input
-	mov	%rsp, %rax		# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-	cmp	\$2,$len
-	jb	.Lcbc_dec_one
-	movdqu	0x10($inp), @XMM[1]
-	je	.Lcbc_dec_two
-	movdqu	0x20($inp), @XMM[2]
-	cmp	\$4,$len
-	jb	.Lcbc_dec_three
-	movdqu	0x30($inp), @XMM[3]
-	je	.Lcbc_dec_four
-	movdqu	0x40($inp), @XMM[4]
-	cmp	\$6,$len
-	jb	.Lcbc_dec_five
-	movdqu	0x50($inp), @XMM[5]
-	je	.Lcbc_dec_six
-	movdqu	0x60($inp), @XMM[6]
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[9], @XMM[6]
-	movdqu	0x30($inp), @XMM[11]
-	pxor	@XMM[10], @XMM[4]
-	movdqu	0x40($inp), @XMM[12]
-	pxor	@XMM[11], @XMM[2]
-	movdqu	0x50($inp), @XMM[13]
-	pxor	@XMM[12], @XMM[7]
-	movdqu	0x60($inp), @XMM[15]	# IV
-	pxor	@XMM[13], @XMM[3]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_six:
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[9], @XMM[6]
-	movdqu	0x30($inp), @XMM[11]
-	pxor	@XMM[10], @XMM[4]
-	movdqu	0x40($inp), @XMM[12]
-	pxor	@XMM[11], @XMM[2]
-	movdqu	0x50($inp), @XMM[15]	# IV
-	pxor	@XMM[12], @XMM[7]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_five:
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[9], @XMM[6]
-	movdqu	0x30($inp), @XMM[11]
-	pxor	@XMM[10], @XMM[4]
-	movdqu	0x40($inp), @XMM[15]	# IV
-	pxor	@XMM[11], @XMM[2]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_four:
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[9], @XMM[6]
-	movdqu	0x30($inp), @XMM[15]	# IV
-	pxor	@XMM[10], @XMM[4]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_three:
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[8], @XMM[1]
-	movdqu	0x20($inp), @XMM[15]	# IV
-	pxor	@XMM[9], @XMM[6]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_two:
-	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
-	call	_bsaes_decrypt8
-	pxor	0x20(%rbp), @XMM[0]	# ^= IV
-	movdqu	0x00($inp), @XMM[8]	# re-load input
-	movdqu	0x10($inp), @XMM[15]	# IV
-	pxor	@XMM[8], @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	jmp	.Lcbc_dec_done
-.align	16
-.Lcbc_dec_one:
-	lea	($inp), $arg1
-	lea	0x20(%rbp), $arg2	# buffer output
-	lea	($key), $arg3
-	call	asm_AES_decrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[15]	# ^= IV
-	movdqu	@XMM[15], ($out)	# write output
-	movdqa	@XMM[0], @XMM[15]	# IV
-
-.Lcbc_dec_done:
-	movdqu	@XMM[15], (%rbx)	# return IV
-	lea	(%rsp), %rax
-	pxor	%xmm0, %xmm0
-.Lcbc_dec_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	ja	.Lcbc_dec_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lcbc_dec_epilogue:
-	ret
-.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
-
-.globl	bsaes_ctr32_encrypt_blocks
-.type	bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
-.align	16
-bsaes_ctr32_encrypt_blocks:
-	mov	%rsp, %rax
-.Lctr_enc_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
-	mov	0xa0(%rsp),$arg5	# pull ivp
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lctr_enc_body:
-___
-$code.=<<___;
-	mov	%rsp, %rbp		# backup %rsp
-	movdqu	($arg5), %xmm0		# load counter
-	mov	240($arg4), %eax	# rounds
-	mov	$arg1, $inp		# backup arguments
-	mov	$arg2, $out
-	mov	$arg3, $len
-	mov	$arg4, $key
-	movdqa	%xmm0, 0x20(%rbp)	# copy counter
-	cmp	\$8, $arg3
-	jb	.Lctr_enc_short
-
-	mov	%eax, %ebx		# rounds
-	shl	\$7, %rax		# 128 bytes per inner round key
-	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
-	sub	%rax, %rsp
-
-	mov	%rsp, %rax		# pass key schedule
-	mov	$key, %rcx		# pass key
-	mov	%ebx, %r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	%xmm6,%xmm7		# fix up last round key
-	movdqa	%xmm7,(%rax)		# save last round key
-
-	movdqa	(%rsp), @XMM[9]		# load round0 key
-	lea	.LADD1(%rip), %r11
-	movdqa	0x20(%rbp), @XMM[0]	# counter copy
-	movdqa	-0x20(%r11), @XMM[8]	# .LSWPUP
-	pshufb	@XMM[8], @XMM[9]	# byte swap upper part
-	pshufb	@XMM[8], @XMM[0]
-	movdqa	@XMM[9], (%rsp)		# save adjusted round0 key
-	jmp	.Lctr_enc_loop
-.align	16
-.Lctr_enc_loop:
-	movdqa	@XMM[0], 0x20(%rbp)	# save counter
-	movdqa	@XMM[0], @XMM[1]	# prepare 8 counter values
-	movdqa	@XMM[0], @XMM[2]
-	paddd	0x00(%r11), @XMM[1]	# .LADD1
-	movdqa	@XMM[0], @XMM[3]
-	paddd	0x10(%r11), @XMM[2]	# .LADD2
-	movdqa	@XMM[0], @XMM[4]
-	paddd	0x20(%r11), @XMM[3]	# .LADD3
-	movdqa	@XMM[0], @XMM[5]
-	paddd	0x30(%r11), @XMM[4]	# .LADD4
-	movdqa	@XMM[0], @XMM[6]
-	paddd	0x40(%r11), @XMM[5]	# .LADD5
-	movdqa	@XMM[0], @XMM[7]
-	paddd	0x50(%r11), @XMM[6]	# .LADD6
-	paddd	0x60(%r11), @XMM[7]	# .LADD7
-
-	# Borrow prologue from _bsaes_encrypt8 to use the opportunity
-	# to flip byte order in 32-bit counter
-	movdqa	(%rsp), @XMM[9]		# round 0 key
-	lea	0x10(%rsp), %rax	# pass key schedule
-	movdqa	-0x10(%r11), @XMM[8]	# .LSWPUPM0SR
-	pxor	@XMM[9], @XMM[0]	# xor with round0 key
-	pxor	@XMM[9], @XMM[1]
-	 pshufb	@XMM[8], @XMM[0]
-	pxor	@XMM[9], @XMM[2]
-	 pshufb	@XMM[8], @XMM[1]
-	pxor	@XMM[9], @XMM[3]
-	 pshufb	@XMM[8], @XMM[2]
-	pxor	@XMM[9], @XMM[4]
-	 pshufb	@XMM[8], @XMM[3]
-	pxor	@XMM[9], @XMM[5]
-	 pshufb	@XMM[8], @XMM[4]
-	pxor	@XMM[9], @XMM[6]
-	 pshufb	@XMM[8], @XMM[5]
-	pxor	@XMM[9], @XMM[7]
-	 pshufb	@XMM[8], @XMM[6]
-	lea	.LBS0(%rip), %r11	# constants table
-	 pshufb	@XMM[8], @XMM[7]
-	mov	%ebx,%r10d		# pass rounds
-
-	call	_bsaes_encrypt8_bitslice
-
-	sub	\$8,$len
-	jc	.Lctr_enc_loop_done
-
-	movdqu	0x00($inp), @XMM[8]	# load input
-	movdqu	0x10($inp), @XMM[9]
-	movdqu	0x20($inp), @XMM[10]
-	movdqu	0x30($inp), @XMM[11]
-	movdqu	0x40($inp), @XMM[12]
-	movdqu	0x50($inp), @XMM[13]
-	movdqu	0x60($inp), @XMM[14]
-	movdqu	0x70($inp), @XMM[15]
-	lea	0x80($inp),$inp
-	pxor	@XMM[0], @XMM[8]
-	movdqa	0x20(%rbp), @XMM[0]	# load counter
-	pxor	@XMM[9], @XMM[1]
-	movdqu	@XMM[8], 0x00($out)	# write output
-	pxor	@XMM[10], @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	@XMM[11], @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	pxor	@XMM[12], @XMM[3]
-	movdqu	@XMM[6], 0x30($out)
-	pxor	@XMM[13], @XMM[7]
-	movdqu	@XMM[3], 0x40($out)
-	pxor	@XMM[14], @XMM[2]
-	movdqu	@XMM[7], 0x50($out)
-	pxor	@XMM[15], @XMM[5]
-	movdqu	@XMM[2], 0x60($out)
-	lea	.LADD1(%rip), %r11
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-	paddd	0x70(%r11), @XMM[0]	# .LADD8
-	jnz	.Lctr_enc_loop
-
-	jmp	.Lctr_enc_done
-.align	16
-.Lctr_enc_loop_done:
-	add	\$8, $len
-	movdqu	0x00($inp), @XMM[8]	# load input
-	pxor	@XMM[8], @XMM[0]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	cmp	\$2,$len
-	jb	.Lctr_enc_done
-	movdqu	0x10($inp), @XMM[9]
-	pxor	@XMM[9], @XMM[1]
-	movdqu	@XMM[1], 0x10($out)
-	je	.Lctr_enc_done
-	movdqu	0x20($inp), @XMM[10]
-	pxor	@XMM[10], @XMM[4]
-	movdqu	@XMM[4], 0x20($out)
-	cmp	\$4,$len
-	jb	.Lctr_enc_done
-	movdqu	0x30($inp), @XMM[11]
-	pxor	@XMM[11], @XMM[6]
-	movdqu	@XMM[6], 0x30($out)
-	je	.Lctr_enc_done
-	movdqu	0x40($inp), @XMM[12]
-	pxor	@XMM[12], @XMM[3]
-	movdqu	@XMM[3], 0x40($out)
-	cmp	\$6,$len
-	jb	.Lctr_enc_done
-	movdqu	0x50($inp), @XMM[13]
-	pxor	@XMM[13], @XMM[7]
-	movdqu	@XMM[7], 0x50($out)
-	je	.Lctr_enc_done
-	movdqu	0x60($inp), @XMM[14]
-	pxor	@XMM[14], @XMM[2]
-	movdqu	@XMM[2], 0x60($out)
-	jmp	.Lctr_enc_done
-
-.align	16
-.Lctr_enc_short:
-	lea	0x20(%rbp), $arg1
-	lea	0x30(%rbp), $arg2
-	lea	($key), $arg3
-	call	asm_AES_encrypt
-	movdqu	($inp), @XMM[1]
-	lea	16($inp), $inp
-	mov	0x2c(%rbp), %eax	# load 32-bit counter
-	bswap	%eax
-	pxor	0x30(%rbp), @XMM[1]
-	inc	%eax			# increment
-	movdqu	@XMM[1], ($out)
-	bswap	%eax
-	lea	16($out), $out
-	mov	%eax, 0x2c(%rsp)	# save 32-bit counter
-	dec	$len
-	jnz	.Lctr_enc_short
-
-.Lctr_enc_done:
-	lea	(%rsp), %rax
-	pxor	%xmm0, %xmm0
-.Lctr_enc_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	ja	.Lctr_enc_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lctr_enc_epilogue:
-	ret
-.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
-___
-######################################################################
-# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-#	const AES_KEY *key1, const AES_KEY *key2,
-#	const unsigned char iv[16]);
-#
-my ($twmask,$twres,$twtmp)=@XMM[13..15];
-$arg6=~s/d$//;
-
-$code.=<<___;
-.globl	bsaes_xts_encrypt
-.type	bsaes_xts_encrypt,\@abi-omnipotent
-.align	16
-bsaes_xts_encrypt:
-	mov	%rsp, %rax
-.Lxts_enc_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
-	mov	0xa0(%rsp),$arg5	# pull key2
-	mov	0xa8(%rsp),$arg6	# pull ivp
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lxts_enc_body:
-___
-$code.=<<___;
-	mov	%rsp, %rbp		# backup %rsp
-	mov	$arg1, $inp		# backup arguments
-	mov	$arg2, $out
-	mov	$arg3, $len
-	mov	$arg4, $key
-
-	lea	($arg6), $arg1
-	lea	0x20(%rbp), $arg2
-	lea	($arg5), $arg3
-	call	asm_AES_encrypt		# generate initial tweak
-
-	mov	240($key), %eax		# rounds
-	mov	$len, %rbx		# backup $len
-
-	mov	%eax, %edx		# rounds
-	shl	\$7, %rax		# 128 bytes per inner round key
-	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
-	sub	%rax, %rsp
-
-	mov	%rsp, %rax		# pass key schedule
-	mov	$key, %rcx		# pass key
-	mov	%edx, %r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	%xmm6, %xmm7		# fix up last round key
-	movdqa	%xmm7, (%rax)		# save last round key
-
-	and	\$-16, $len
-	sub	\$0x80, %rsp		# place for tweak[8]
-	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
-
-	pxor	$twtmp, $twtmp
-	movdqa	.Lxts_magic(%rip), $twmask
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-
-	sub	\$0x80, $len
-	jc	.Lxts_enc_short
-	jmp	.Lxts_enc_loop
-
-.align	16
-.Lxts_enc_loop:
-___
-    for ($i=0;$i<7;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	movdqa	@XMM[7], @XMM[$i]
-	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-___
-    $code.=<<___ if ($i>=1);
-	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
-___
-    $code.=<<___ if ($i>=2);
-	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
-    }
-$code.=<<___;
-	movdqu	0x60($inp), @XMM[8+6]
-	pxor	@XMM[8+5], @XMM[5]
-	movdqu	0x70($inp), @XMM[8+7]
-	lea	0x80($inp), $inp
-	movdqa	@XMM[7], 0x70(%rsp)
-	pxor	@XMM[8+6], @XMM[6]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	pxor	@XMM[8+7], @XMM[7]
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	pxor	0x40(%rsp), @XMM[3]
-	movdqu	@XMM[6], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[3], 0x40($out)
-	pxor	0x60(%rsp), @XMM[2]
-	movdqu	@XMM[7], 0x50($out)
-	pxor	0x70(%rsp), @XMM[5]
-	movdqu	@XMM[2], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-
-	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
-	pxor	$twtmp, $twtmp
-	movdqa	.Lxts_magic(%rip), $twmask
-	pcmpgtd	@XMM[7], $twtmp
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-
-	sub	\$0x80,$len
-	jnc	.Lxts_enc_loop
-
-.Lxts_enc_short:
-	add	\$0x80, $len
-	jz	.Lxts_enc_done
-___
-    for ($i=0;$i<7;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	movdqa	@XMM[7], @XMM[$i]
-	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-___
-    $code.=<<___ if ($i>=1);
-	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
-	cmp	\$`0x10*$i`,$len
-	je	.Lxts_enc_$i
-___
-    $code.=<<___ if ($i>=2);
-	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
-    }
-$code.=<<___;
-	movdqu	0x60($inp), @XMM[8+6]
-	pxor	@XMM[8+5], @XMM[5]
-	movdqa	@XMM[7], 0x70(%rsp)
-	lea	0x70($inp), $inp
-	pxor	@XMM[8+6], @XMM[6]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	pxor	0x40(%rsp), @XMM[3]
-	movdqu	@XMM[6], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[3], 0x40($out)
-	pxor	0x60(%rsp), @XMM[2]
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[2], 0x60($out)
-	lea	0x70($out), $out
-
-	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_6:
-	pxor	@XMM[8+4], @XMM[4]
-	lea	0x60($inp), $inp
-	pxor	@XMM[8+5], @XMM[5]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	pxor	0x40(%rsp), @XMM[3]
-	movdqu	@XMM[6], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[3], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	lea	0x60($out), $out
-
-	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_5:
-	pxor	@XMM[8+3], @XMM[3]
-	lea	0x50($inp), $inp
-	pxor	@XMM[8+4], @XMM[4]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	pxor	0x40(%rsp), @XMM[3]
-	movdqu	@XMM[6], 0x30($out)
-	movdqu	@XMM[3], 0x40($out)
-	lea	0x50($out), $out
-
-	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_4:
-	pxor	@XMM[8+2], @XMM[2]
-	lea	0x40($inp), $inp
-	pxor	@XMM[8+3], @XMM[3]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[6]
-	movdqu	@XMM[4], 0x20($out)
-	movdqu	@XMM[6], 0x30($out)
-	lea	0x40($out), $out
-
-	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_3:
-	pxor	@XMM[8+1], @XMM[1]
-	lea	0x30($inp), $inp
-	pxor	@XMM[8+2], @XMM[2]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[4]
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[4], 0x20($out)
-	lea	0x30($out), $out
-
-	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_2:
-	pxor	@XMM[8+0], @XMM[0]
-	lea	0x20($inp), $inp
-	pxor	@XMM[8+1], @XMM[1]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_encrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	lea	0x20($out), $out
-
-	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_enc_done
-.align	16
-.Lxts_enc_1:
-	pxor	@XMM[0], @XMM[8]
-	lea	0x10($inp), $inp
-	movdqa	@XMM[8], 0x20(%rbp)
-	lea	0x20(%rbp), $arg1
-	lea	0x20(%rbp), $arg2
-	lea	($key), $arg3
-	call	asm_AES_encrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
-	#pxor	@XMM[8], @XMM[0]
-	#lea	0x80(%rsp), %rax	# pass key schedule
-	#mov	%edx, %r10d		# pass rounds
-	#call	_bsaes_encrypt8
-	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	lea	0x10($out), $out
-
-	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
-
-.Lxts_enc_done:
-	and	\$15, %ebx
-	jz	.Lxts_enc_ret
-	mov	$out, %rdx
-
-.Lxts_enc_steal:
-	movzb	($inp), %eax
-	movzb	-16(%rdx), %ecx
-	lea	1($inp), $inp
-	mov	%al, -16(%rdx)
-	mov	%cl, 0(%rdx)
-	lea	1(%rdx), %rdx
-	sub	\$1,%ebx
-	jnz	.Lxts_enc_steal
-
-	movdqu	-16($out), @XMM[0]
-	lea	0x20(%rbp), $arg1
-	pxor	@XMM[7], @XMM[0]
-	lea	0x20(%rbp), $arg2
-	movdqa	@XMM[0], 0x20(%rbp)
-	lea	($key), $arg3
-	call	asm_AES_encrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[7]
-	movdqu	@XMM[7], -16($out)
-
-.Lxts_enc_ret:
-	lea	(%rsp), %rax
-	pxor	%xmm0, %xmm0
-.Lxts_enc_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	ja	.Lxts_enc_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lxts_enc_epilogue:
-	ret
-.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
-
-.globl	bsaes_xts_decrypt
-.type	bsaes_xts_decrypt,\@abi-omnipotent
-.align	16
-bsaes_xts_decrypt:
-	mov	%rsp, %rax
-.Lxts_dec_prologue:
-	push	%rbp
-	push	%rbx
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	lea	-0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
-	mov	0xa0(%rsp),$arg5	# pull key2
-	mov	0xa8(%rsp),$arg6	# pull ivp
-	lea	-0xa0(%rsp), %rsp
-	movaps	%xmm6, 0x40(%rsp)
-	movaps	%xmm7, 0x50(%rsp)
-	movaps	%xmm8, 0x60(%rsp)
-	movaps	%xmm9, 0x70(%rsp)
-	movaps	%xmm10, 0x80(%rsp)
-	movaps	%xmm11, 0x90(%rsp)
-	movaps	%xmm12, 0xa0(%rsp)
-	movaps	%xmm13, 0xb0(%rsp)
-	movaps	%xmm14, 0xc0(%rsp)
-	movaps	%xmm15, 0xd0(%rsp)
-.Lxts_dec_body:
-___
-$code.=<<___;
-	mov	%rsp, %rbp		# backup %rsp
-	mov	$arg1, $inp		# backup arguments
-	mov	$arg2, $out
-	mov	$arg3, $len
-	mov	$arg4, $key
-
-	lea	($arg6), $arg1
-	lea	0x20(%rbp), $arg2
-	lea	($arg5), $arg3
-	call	asm_AES_encrypt		# generate initial tweak
-
-	mov	240($key), %eax		# rounds
-	mov	$len, %rbx		# backup $len
-
-	mov	%eax, %edx		# rounds
-	shl	\$7, %rax		# 128 bytes per inner round key
-	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
-	sub	%rax, %rsp
-
-	mov	%rsp, %rax		# pass key schedule
-	mov	$key, %rcx		# pass key
-	mov	%edx, %r10d		# pass rounds
-	call	_bsaes_key_convert
-	pxor	(%rsp), %xmm7		# fix up round 0 key
-	movdqa	%xmm6, (%rax)		# save last round key
-	movdqa	%xmm7, (%rsp)
-
-	xor	%eax, %eax		# if ($len%16) len-=16;
-	and	\$-16, $len
-	test	\$15, %ebx
-	setnz	%al
-	shl	\$4, %rax
-	sub	%rax, $len
-
-	sub	\$0x80, %rsp		# place for tweak[8]
-	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
-
-	pxor	$twtmp, $twtmp
-	movdqa	.Lxts_magic(%rip), $twmask
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-
-	sub	\$0x80, $len
-	jc	.Lxts_dec_short
-	jmp	.Lxts_dec_loop
-
-.align	16
-.Lxts_dec_loop:
-___
-    for ($i=0;$i<7;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	movdqa	@XMM[7], @XMM[$i]
-	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-___
-    $code.=<<___ if ($i>=1);
-	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
-___
-    $code.=<<___ if ($i>=2);
-	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
-    }
-$code.=<<___;
-	movdqu	0x60($inp), @XMM[8+6]
-	pxor	@XMM[8+5], @XMM[5]
-	movdqu	0x70($inp), @XMM[8+7]
-	lea	0x80($inp), $inp
-	movdqa	@XMM[7], 0x70(%rsp)
-	pxor	@XMM[8+6], @XMM[6]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	pxor	@XMM[8+7], @XMM[7]
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[4]
-	movdqu	@XMM[6], 0x20($out)
-	pxor	0x40(%rsp), @XMM[2]
-	movdqu	@XMM[4], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[2], 0x40($out)
-	pxor	0x60(%rsp), @XMM[3]
-	movdqu	@XMM[7], 0x50($out)
-	pxor	0x70(%rsp), @XMM[5]
-	movdqu	@XMM[3], 0x60($out)
-	movdqu	@XMM[5], 0x70($out)
-	lea	0x80($out), $out
-
-	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
-	pxor	$twtmp, $twtmp
-	movdqa	.Lxts_magic(%rip), $twmask
-	pcmpgtd	@XMM[7], $twtmp
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-
-	sub	\$0x80,$len
-	jnc	.Lxts_dec_loop
-
-.Lxts_dec_short:
-	add	\$0x80, $len
-	jz	.Lxts_dec_done
-___
-    for ($i=0;$i<7;$i++) {
-    $code.=<<___;
-	pshufd	\$0x13, $twtmp, $twres
-	pxor	$twtmp, $twtmp
-	movdqa	@XMM[7], @XMM[$i]
-	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
-	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
-	pxor	$twres, @XMM[7]
-___
-    $code.=<<___ if ($i>=1);
-	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
-	cmp	\$`0x10*$i`,$len
-	je	.Lxts_dec_$i
-___
-    $code.=<<___ if ($i>=2);
-	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
-    }
-$code.=<<___;
-	movdqu	0x60($inp), @XMM[8+6]
-	pxor	@XMM[8+5], @XMM[5]
-	movdqa	@XMM[7], 0x70(%rsp)
-	lea	0x70($inp), $inp
-	pxor	@XMM[8+6], @XMM[6]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[4]
-	movdqu	@XMM[6], 0x20($out)
-	pxor	0x40(%rsp), @XMM[2]
-	movdqu	@XMM[4], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[2], 0x40($out)
-	pxor	0x60(%rsp), @XMM[3]
-	movdqu	@XMM[7], 0x50($out)
-	movdqu	@XMM[3], 0x60($out)
-	lea	0x70($out), $out
-
-	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_6:
-	pxor	@XMM[8+4], @XMM[4]
-	lea	0x60($inp), $inp
-	pxor	@XMM[8+5], @XMM[5]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[4]
-	movdqu	@XMM[6], 0x20($out)
-	pxor	0x40(%rsp), @XMM[2]
-	movdqu	@XMM[4], 0x30($out)
-	pxor	0x50(%rsp), @XMM[7]
-	movdqu	@XMM[2], 0x40($out)
-	movdqu	@XMM[7], 0x50($out)
-	lea	0x60($out), $out
-
-	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_5:
-	pxor	@XMM[8+3], @XMM[3]
-	lea	0x50($inp), $inp
-	pxor	@XMM[8+4], @XMM[4]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[4]
-	movdqu	@XMM[6], 0x20($out)
-	pxor	0x40(%rsp), @XMM[2]
-	movdqu	@XMM[4], 0x30($out)
-	movdqu	@XMM[2], 0x40($out)
-	lea	0x50($out), $out
-
-	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_4:
-	pxor	@XMM[8+2], @XMM[2]
-	lea	0x40($inp), $inp
-	pxor	@XMM[8+3], @XMM[3]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	pxor	0x30(%rsp), @XMM[4]
-	movdqu	@XMM[6], 0x20($out)
-	movdqu	@XMM[4], 0x30($out)
-	lea	0x40($out), $out
-
-	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_3:
-	pxor	@XMM[8+1], @XMM[1]
-	lea	0x30($inp), $inp
-	pxor	@XMM[8+2], @XMM[2]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	pxor	0x20(%rsp), @XMM[6]
-	movdqu	@XMM[1], 0x10($out)
-	movdqu	@XMM[6], 0x20($out)
-	lea	0x30($out), $out
-
-	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_2:
-	pxor	@XMM[8+0], @XMM[0]
-	lea	0x20($inp), $inp
-	pxor	@XMM[8+1], @XMM[1]
-	lea	0x80(%rsp), %rax	# pass key schedule
-	mov	%edx, %r10d		# pass rounds
-
-	call	_bsaes_decrypt8
-
-	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	pxor	0x10(%rsp), @XMM[1]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	movdqu	@XMM[1], 0x10($out)
-	lea	0x20($out), $out
-
-	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
-	jmp	.Lxts_dec_done
-.align	16
-.Lxts_dec_1:
-	pxor	@XMM[0], @XMM[8]
-	lea	0x10($inp), $inp
-	movdqa	@XMM[8], 0x20(%rbp)
-	lea	0x20(%rbp), $arg1
-	lea	0x20(%rbp), $arg2
-	lea	($key), $arg3
-	call	asm_AES_decrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
-	#pxor	@XMM[8], @XMM[0]
-	#lea	0x80(%rsp), %rax	# pass key schedule
-	#mov	%edx, %r10d		# pass rounds
-	#call	_bsaes_decrypt8
-	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
-	movdqu	@XMM[0], 0x00($out)	# write output
-	lea	0x10($out), $out
-
-	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
-
-.Lxts_dec_done:
-	and	\$15, %ebx
-	jz	.Lxts_dec_ret
-
-	pxor	$twtmp, $twtmp
-	movdqa	.Lxts_magic(%rip), $twmask
-	pcmpgtd	@XMM[7], $twtmp
-	pshufd	\$0x13, $twtmp, $twres
-	movdqa	@XMM[7], @XMM[6]
-	paddq	@XMM[7], @XMM[7]	# psllq 1,$tweak
-	pand	$twmask, $twres		# isolate carry and residue
-	movdqu	($inp), @XMM[0]
-	pxor	$twres, @XMM[7]
-
-	lea	0x20(%rbp), $arg1
-	pxor	@XMM[7], @XMM[0]
-	lea	0x20(%rbp), $arg2
-	movdqa	@XMM[0], 0x20(%rbp)
-	lea	($key), $arg3
-	call	asm_AES_decrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[7]
-	mov	$out, %rdx
-	movdqu	@XMM[7], ($out)
-
-.Lxts_dec_steal:
-	movzb	16($inp), %eax
-	movzb	(%rdx), %ecx
-	lea	1($inp), $inp
-	mov	%al, (%rdx)
-	mov	%cl, 16(%rdx)
-	lea	1(%rdx), %rdx
-	sub	\$1,%ebx
-	jnz	.Lxts_dec_steal
-
-	movdqu	($out), @XMM[0]
-	lea	0x20(%rbp), $arg1
-	pxor	@XMM[6], @XMM[0]
-	lea	0x20(%rbp), $arg2
-	movdqa	@XMM[0], 0x20(%rbp)
-	lea	($key), $arg3
-	call	asm_AES_decrypt		# doesn't touch %xmm
-	pxor	0x20(%rbp), @XMM[6]
-	movdqu	@XMM[6], ($out)
-
-.Lxts_dec_ret:
-	lea	(%rsp), %rax
-	pxor	%xmm0, %xmm0
-.Lxts_dec_bzero:			# wipe key schedule [if any]
-	movdqa	%xmm0, 0x00(%rax)
-	movdqa	%xmm0, 0x10(%rax)
-	lea	0x20(%rax), %rax
-	cmp	%rax, %rbp
-	ja	.Lxts_dec_bzero
-
-	lea	(%rbp),%rsp		# restore %rsp
-___
-$code.=<<___ if ($win64);
-	movaps	0x40(%rbp), %xmm6
-	movaps	0x50(%rbp), %xmm7
-	movaps	0x60(%rbp), %xmm8
-	movaps	0x70(%rbp), %xmm9
-	movaps	0x80(%rbp), %xmm10
-	movaps	0x90(%rbp), %xmm11
-	movaps	0xa0(%rbp), %xmm12
-	movaps	0xb0(%rbp), %xmm13
-	movaps	0xc0(%rbp), %xmm14
-	movaps	0xd0(%rbp), %xmm15
-	lea	0xa0(%rbp), %rsp
-___
-$code.=<<___;
-	mov	0x48(%rsp), %r15
-	mov	0x50(%rsp), %r14
-	mov	0x58(%rsp), %r13
-	mov	0x60(%rsp), %r12
-	mov	0x68(%rsp), %rbx
-	mov	0x70(%rsp), %rax
-	lea	0x78(%rsp), %rsp
-	mov	%rax, %rbp
-.Lxts_dec_epilogue:
-	ret
-.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
-___
-}
-$code.=<<___;
-.type	_bsaes_const,\@object
-.align	64
-_bsaes_const:
-.LM0ISR:	# InvShiftRows constants
-	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
-.LISRM0:
-	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
-.LISR:
-	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
-.LBS0:		# bit-slice constants
-	.quad	0x5555555555555555, 0x5555555555555555
-.LBS1:
-	.quad	0x3333333333333333, 0x3333333333333333
-.LBS2:
-	.quad	0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
-.LSR:		# shiftrows constants
-	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
-.LSRM0:
-	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
-.LM0SR:
-	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
-.LSWPUP:	# byte-swap upper dword
-	.quad	0x0706050403020100, 0x0c0d0e0f0b0a0908
-.LSWPUPM0SR:
-	.quad	0x0a0d02060c03070b, 0x0004080f05090e01
-.LADD1:		# counter increment constants
-	.quad	0x0000000000000000, 0x0000000100000000
-.LADD2:
-	.quad	0x0000000000000000, 0x0000000200000000
-.LADD3:
-	.quad	0x0000000000000000, 0x0000000300000000
-.LADD4:
-	.quad	0x0000000000000000, 0x0000000400000000
-.LADD5:
-	.quad	0x0000000000000000, 0x0000000500000000
-.LADD6:
-	.quad	0x0000000000000000, 0x0000000600000000
-.LADD7:
-	.quad	0x0000000000000000, 0x0000000700000000
-.LADD8:
-	.quad	0x0000000000000000, 0x0000000800000000
-.Lxts_magic:
-	.long	0x87,0,1,0
-.Lmasks:
-	.quad	0x0101010101010101, 0x0101010101010101
-	.quad	0x0202020202020202, 0x0202020202020202
-	.quad	0x0404040404040404, 0x0404040404040404
-	.quad	0x0808080808080808, 0x0808080808080808
-.LM0:
-	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
-.L63:
-	.quad	0x6363636363636363, 0x6363636363636363
-.asciz	"Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov"
-.align	64
-.size	_bsaes_const,.-_bsaes_const
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	se_handler,\@abi-omnipotent
-.align	16
-se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_prologue
-
-	mov	160($context),%rax	# pull context->Rbp
-
-	lea	0x40(%rax),%rsi		# %xmm save area
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0xa0(%rax),%rax		# adjust stack pointer
-
-	mov	0x70(%rax),%rbp
-	mov	0x68(%rax),%rbx
-	mov	0x60(%rax),%r12
-	mov	0x58(%rax),%r13
-	mov	0x50(%rax),%r14
-	mov	0x48(%rax),%r15
-	lea	0x78(%rax),%rax		# adjust stack pointer
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_prologue:
-	mov	%rax,152($context)	# restore context->Rsp
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	se_handler,.-se_handler
-
-.section	.pdata
-.align	4
-___
-$code.=<<___ if ($ecb);
-	.rva	.Lecb_enc_prologue
-	.rva	.Lecb_enc_epilogue
-	.rva	.Lecb_enc_info
-
-	.rva	.Lecb_dec_prologue
-	.rva	.Lecb_dec_epilogue
-	.rva	.Lecb_dec_info
-___
-$code.=<<___;
-	.rva	.Lcbc_dec_prologue
-	.rva	.Lcbc_dec_epilogue
-	.rva	.Lcbc_dec_info
-
-	.rva	.Lctr_enc_prologue
-	.rva	.Lctr_enc_epilogue
-	.rva	.Lctr_enc_info
-
-	.rva	.Lxts_enc_prologue
-	.rva	.Lxts_enc_epilogue
-	.rva	.Lxts_enc_info
-
-	.rva	.Lxts_dec_prologue
-	.rva	.Lxts_dec_epilogue
-	.rva	.Lxts_dec_info
-
-.section	.xdata
-.align	8
-___
-$code.=<<___ if ($ecb);
-.Lecb_enc_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lecb_enc_body,.Lecb_enc_epilogue	# HandlerData[]
-.Lecb_dec_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lecb_dec_body,.Lecb_dec_epilogue	# HandlerData[]
-___
-$code.=<<___;
-.Lcbc_dec_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lcbc_dec_body,.Lcbc_dec_epilogue	# HandlerData[]
-.Lctr_enc_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lctr_enc_body,.Lctr_enc_epilogue	# HandlerData[]
-.Lxts_enc_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
-.Lxts_dec_info:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;

+ 0 - 903
drivers/builtin_openssl2/crypto/aes/asm/vpaes-x86.pl

@@ -1,903 +0,0 @@
-#!/usr/bin/env perl
-
-######################################################################
-## Constant-time SSSE3 AES core implementation.
-## version 0.1
-##
-## By Mike Hamburg (Stanford University), 2009
-## Public domain.
-##
-## For details see http://shiftleft.org/papers/vector_aes/ and
-## http://crypto.stanford.edu/vpaes/.
-
-######################################################################
-# September 2011.
-#
-# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
-# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
-# doesn't handle partial vectors (doesn't have to if called from
-# EVP only). "Drop-in" implies that this module doesn't share key
-# schedule structure with the original nor does it make assumption
-# about its alignment...
-#
-# Performance summary. aes-586.pl column lists large-block CBC
-# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
-# byte processed with 128-bit key, and vpaes-x86.pl column - [also
-# large-block CBC] encrypt/decrypt.
-#
-#		aes-586.pl		vpaes-x86.pl
-#
-# Core 2(**)	29.1/42.3/18.3		22.0/25.6(***)
-# Nehalem	27.9/40.4/18.1		10.3/12.0
-# Atom		102./119./60.1		64.5/85.3(***)
-#
-# (*)	"Hyper-threading" in the context refers rather to cache shared
-#	among multiple cores, than to specifically Intel HTT. As vast
-#	majority of contemporary cores share cache, slower code path
-#	is common place. In other words "with-hyper-threading-off"
-#	results are presented mostly for reference purposes.
-#
-# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
-#
-# (***)	Less impressive improvement on Core 2 and Atom is due to slow
-#	pshufb,	yet it's respectable +32%/65%  improvement on Core 2
-#	and +58%/40% on Atom (as implied, over "hyper-threading-safe"
-#	code path).
-#
-#						<[email protected]>
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
-
-$PREFIX="vpaes";
-
-my  ($round, $base, $magic, $key, $const, $inp, $out)=
-    ("eax",  "ebx", "ecx",  "edx","ebp",  "esi","edi");
-
-&static_label("_vpaes_consts");
-&static_label("_vpaes_schedule_low_round");
-
-&set_label("_vpaes_consts",64);
-$k_inv=-0x30;		# inv, inva
-	&data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
-	&data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
-
-$k_s0F=-0x10;		# s0F
-	&data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
-
-$k_ipt=0x00;		# input transform (lo, hi)
-	&data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
-	&data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
-
-$k_sb1=0x20;		# sb1u, sb1t
-	&data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
-	&data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
-$k_sb2=0x40;		# sb2u, sb2t
-	&data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
-	&data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
-$k_sbo=0x60;		# sbou, sbot
-	&data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
-	&data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
-
-$k_mc_forward=0x80;	# mc_forward
-	&data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
-	&data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
-	&data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
-	&data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
-
-$k_mc_backward=0xc0;	# mc_backward
-	&data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
-	&data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
-	&data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
-	&data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
-
-$k_sr=0x100;		# sr
-	&data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
-	&data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
-	&data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
-	&data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
-
-$k_rcon=0x140;		# rcon
-	&data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
-
-$k_s63=0x150;		# s63: all equal to 0x63 transformed
-	&data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
-
-$k_opt=0x160;		# output transform
-	&data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
-	&data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
-
-$k_deskew=0x180;	# deskew tables: inverts the sbox's "skew"
-	&data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
-	&data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
-##
-##  Decryption stuff
-##  Key schedule constants
-##
-$k_dksd=0x1a0;		# decryption key schedule: invskew x*D
-	&data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
-	&data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
-$k_dksb=0x1c0;		# decryption key schedule: invskew x*B
-	&data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
-	&data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
-$k_dkse=0x1e0;		# decryption key schedule: invskew x*E + 0x63
-	&data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
-	&data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
-$k_dks9=0x200;		# decryption key schedule: invskew x*9
-	&data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
-	&data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
-
-##
-##  Decryption stuff
-##  Round function constants
-##
-$k_dipt=0x220;		# decryption input transform
-	&data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
-	&data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
-
-$k_dsb9=0x240;		# decryption sbox output *9*u, *9*t
-	&data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
-	&data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
-$k_dsbd=0x260;		# decryption sbox output *D*u, *D*t
-	&data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
-	&data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
-$k_dsbb=0x280;		# decryption sbox output *B*u, *B*t
-	&data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
-	&data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
-$k_dsbe=0x2a0;		# decryption sbox output *E*u, *E*t
-	&data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
-	&data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
-$k_dsbo=0x2c0;		# decryption sbox final output
-	&data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
-	&data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
-&asciz	("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
-&align	(64);
-
-&function_begin_B("_vpaes_preheat");
-	&add	($const,&DWP(0,"esp"));
-	&movdqa	("xmm7",&QWP($k_inv,$const));
-	&movdqa	("xmm6",&QWP($k_s0F,$const));
-	&ret	();
-&function_end_B("_vpaes_preheat");
-
-##
-##  _aes_encrypt_core
-##
-##  AES-encrypt %xmm0.
-##
-##  Inputs:
-##     %xmm0 = input
-##     %xmm6-%xmm7 as in _vpaes_preheat
-##    (%edx) = scheduled keys
-##
-##  Output in %xmm0
-##  Clobbers  %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
-##
-##
-&function_begin_B("_vpaes_encrypt_core");
-	&mov	($magic,16);
-	&mov	($round,&DWP(240,$key));
-	&movdqa	("xmm1","xmm6")
-	&movdqa	("xmm2",&QWP($k_ipt,$const));
-	&pandn	("xmm1","xmm0");
-	&movdqu	("xmm5",&QWP(0,$key));
-	&psrld	("xmm1",4);
-	&pand	("xmm0","xmm6");
-	&pshufb	("xmm2","xmm0");
-	&movdqa	("xmm0",&QWP($k_ipt+16,$const));
-	&pshufb	("xmm0","xmm1");
-	&pxor	("xmm2","xmm5");
-	&pxor	("xmm0","xmm2");
-	&add	($key,16);
-	&lea	($base,&DWP($k_mc_backward,$const));
-	&jmp	(&label("enc_entry"));
-
-
-&set_label("enc_loop",16);
-	# middle of middle round
-	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sb1u
-	&pshufb	("xmm4","xmm2");		# 4 = sb1u
-	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
-	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
-	&pshufb	("xmm0","xmm3");		# 0 = sb1t
-	&pxor	("xmm0","xmm4");		# 0 = A
-	&movdqa	("xmm5",&QWP($k_sb2,$const));	# 4 : sb2u
-	&pshufb	("xmm5","xmm2");		# 4 = sb2u
-	&movdqa	("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
-	&movdqa	("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
-	&pshufb	("xmm2","xmm3");		# 2 = sb2t
-	&pxor	("xmm2","xmm5");		# 2 = 2A
-	&movdqa	("xmm4",&QWP(0,$base,$magic));	# .Lk_mc_backward[]
-	&movdqa	("xmm3","xmm0");		# 3 = A
-	&pshufb	("xmm0","xmm1");		# 0 = B
-	&add	($key,16);			# next key
-	&pxor	("xmm0","xmm2");		# 0 = 2A+B
-	&pshufb	("xmm3","xmm4");		# 3 = D
-	&add	($magic,16);			# next mc
-	&pxor	("xmm3","xmm0");		# 3 = 2A+B+D
-	&pshufb	("xmm0","xmm1");		# 0 = 2B+C
-	&and	($magic,0x30);			# ... mod 4
-	&pxor	("xmm0","xmm3");		# 0 = 2A+3B+C+D
-	&sub	($round,1);			# nr--
-
-&set_label("enc_entry");
-	# top of round
-	&movdqa	("xmm1","xmm6");		# 1 : i
-	&pandn	("xmm1","xmm0");		# 1 = i<<4
-	&psrld	("xmm1",4);			# 1 = i
-	&pand	("xmm0","xmm6");		# 0 = k
-	&movdqa	("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
-	&pshufb	("xmm5","xmm0");		# 2 = a/k
-	&pxor	("xmm0","xmm1");		# 0 = j
-	&movdqa	("xmm3","xmm7");		# 3 : 1/i
-	&pshufb	("xmm3","xmm1");		# 3 = 1/i
-	&pxor	("xmm3","xmm5");		# 3 = iak = 1/i + a/k
-	&movdqa	("xmm4","xmm7");		# 4 : 1/j
-	&pshufb	("xmm4","xmm0");		# 4 = 1/j
-	&pxor	("xmm4","xmm5");		# 4 = jak = 1/j + a/k
-	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
-	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
-	&pxor	("xmm2","xmm0");		# 2 = io
-	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
-	&movdqu	("xmm5",&QWP(0,$key));
-	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
-	&pxor	("xmm3","xmm1");		# 3 = jo
-	&jnz	(&label("enc_loop"));
-
-	# middle of last round
-	&movdqa	("xmm4",&QWP($k_sbo,$const));	# 3 : sbou      .Lk_sbo
-	&movdqa	("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot      .Lk_sbo+16
-	&pshufb	("xmm4","xmm2");		# 4 = sbou
-	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
-	&pshufb	("xmm0","xmm3");		# 0 = sb1t
-	&movdqa	("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
-	&pxor	("xmm0","xmm4");		# 0 = A
-	&pshufb	("xmm0","xmm1");
-	&ret	();
-&function_end_B("_vpaes_encrypt_core");
-
-##
-##  Decryption core
-##
-##  Same API as encryption core.
-##
-&function_begin_B("_vpaes_decrypt_core");
-	&mov	($round,&DWP(240,$key));
-	&lea	($base,&DWP($k_dsbd,$const));
-	&movdqa	("xmm1","xmm6");
-	&movdqa	("xmm2",&QWP($k_dipt-$k_dsbd,$base));
-	&pandn	("xmm1","xmm0");
-	&mov	($magic,$round);
-	&psrld	("xmm1",4)
-	&movdqu	("xmm5",&QWP(0,$key));
-	&shl	($magic,4);
-	&pand	("xmm0","xmm6");
-	&pshufb	("xmm2","xmm0");
-	&movdqa	("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
-	&xor	($magic,0x30);
-	&pshufb	("xmm0","xmm1");
-	&and	($magic,0x30);
-	&pxor	("xmm2","xmm5");
-	&movdqa	("xmm5",&QWP($k_mc_forward+48,$const));
-	&pxor	("xmm0","xmm2");
-	&add	($key,16);
-	&lea	($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
-	&jmp	(&label("dec_entry"));
-
-&set_label("dec_loop",16);
-##
-##  Inverse mix columns
-##
-	&movdqa	("xmm4",&QWP(-0x20,$base));	# 4 : sb9u
-	&pshufb	("xmm4","xmm2");		# 4 = sb9u
-	&pxor	("xmm4","xmm0");
-	&movdqa	("xmm0",&QWP(-0x10,$base));	# 0 : sb9t
-	&pshufb	("xmm0","xmm3");		# 0 = sb9t
-	&pxor	("xmm0","xmm4");		# 0 = ch
-	&add	($key,16);			# next round key
-
-	&pshufb	("xmm0","xmm5");		# MC ch
-	&movdqa	("xmm4",&QWP(0,$base));		# 4 : sbdu
-	&pshufb	("xmm4","xmm2");		# 4 = sbdu
-	&pxor	("xmm4","xmm0");		# 4 = ch
-	&movdqa	("xmm0",&QWP(0x10,$base));	# 0 : sbdt
-	&pshufb	("xmm0","xmm3");		# 0 = sbdt
-	&pxor	("xmm0","xmm4");		# 0 = ch
-	&sub	($round,1);			# nr--
-
-	&pshufb	("xmm0","xmm5");		# MC ch
-	&movdqa	("xmm4",&QWP(0x20,$base));	# 4 : sbbu
-	&pshufb	("xmm4","xmm2");		# 4 = sbbu
-	&pxor	("xmm4","xmm0");		# 4 = ch
-	&movdqa	("xmm0",&QWP(0x30,$base));	# 0 : sbbt
-	&pshufb	("xmm0","xmm3");		# 0 = sbbt
-	&pxor	("xmm0","xmm4");		# 0 = ch
-
-	&pshufb	("xmm0","xmm5");		# MC ch
-	&movdqa	("xmm4",&QWP(0x40,$base));	# 4 : sbeu
-	&pshufb	("xmm4","xmm2");		# 4 = sbeu
-	&pxor	("xmm4","xmm0");		# 4 = ch
-	&movdqa	("xmm0",&QWP(0x50,$base));	# 0 : sbet
-	&pshufb	("xmm0","xmm3");		# 0 = sbet
-	&pxor	("xmm0","xmm4");		# 0 = ch
-
-	&palignr("xmm5","xmm5",12);
-
-&set_label("dec_entry");
-	# top of round
-	&movdqa	("xmm1","xmm6");		# 1 : i
-	&pandn	("xmm1","xmm0");		# 1 = i<<4
-	&psrld	("xmm1",4);			# 1 = i
-	&pand	("xmm0","xmm6");		# 0 = k
-	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
-	&pshufb	("xmm2","xmm0");		# 2 = a/k
-	&pxor	("xmm0","xmm1");		# 0 = j
-	&movdqa	("xmm3","xmm7");		# 3 : 1/i
-	&pshufb	("xmm3","xmm1");		# 3 = 1/i
-	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
-	&movdqa	("xmm4","xmm7");		# 4 : 1/j
-	&pshufb	("xmm4","xmm0");		# 4 = 1/j
-	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
-	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
-	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
-	&pxor	("xmm2","xmm0");		# 2 = io
-	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
-	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
-	&pxor	("xmm3","xmm1");		# 3 = jo
-	&movdqu	("xmm0",&QWP(0,$key));
-	&jnz	(&label("dec_loop"));
-
-	# middle of last round
-	&movdqa	("xmm4",&QWP(0x60,$base));	# 3 : sbou
-	&pshufb	("xmm4","xmm2");		# 4 = sbou
-	&pxor	("xmm4","xmm0");		# 4 = sb1u + k
-	&movdqa	("xmm0",&QWP(0x70,$base));	# 0 : sbot
-	&movdqa	("xmm2",&QWP(0,$magic));
-	&pshufb	("xmm0","xmm3");		# 0 = sb1t
-	&pxor	("xmm0","xmm4");		# 0 = A
-	&pshufb	("xmm0","xmm2");
-	&ret	();
-&function_end_B("_vpaes_decrypt_core");
-
-########################################################
-##                                                    ##
-##                  AES key schedule                  ##
-##                                                    ##
-########################################################
-&function_begin_B("_vpaes_schedule_core");
-	&add	($const,&DWP(0,"esp"));
-	&movdqu	("xmm0",&QWP(0,$inp));		# load key (unaligned)
-	&movdqa	("xmm2",&QWP($k_rcon,$const));	# load rcon
-
-	# input transform
-	&movdqa	("xmm3","xmm0");
-	&lea	($base,&DWP($k_ipt,$const));
-	&movdqa	(&QWP(4,"esp"),"xmm2");		# xmm8
-	&call	("_vpaes_schedule_transform");
-	&movdqa	("xmm7","xmm0");
-
-	&test	($out,$out);
-	&jnz	(&label("schedule_am_decrypting"));
-
-	# encrypting, output zeroth round key after transform
-	&movdqu	(&QWP(0,$key),"xmm0");
-	&jmp	(&label("schedule_go"));
-
-&set_label("schedule_am_decrypting");
-	# decrypting, output zeroth round key after shiftrows
-	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
-	&pshufb	("xmm3","xmm1");
-	&movdqu	(&QWP(0,$key),"xmm3");
-	&xor	($magic,0x30);
-
-&set_label("schedule_go");
-	&cmp	($round,192);
-	&ja	(&label("schedule_256"));
-	&je	(&label("schedule_192"));
-	# 128: fall though
-
-##
-##  .schedule_128
-##
-##  128-bit specific part of key schedule.
-##
-##  This schedule is really simple, because all its parts
-##  are accomplished by the subroutines.
-##
-&set_label("schedule_128");
-	&mov	($round,10);
-
-&set_label("loop_schedule_128");
-	&call	("_vpaes_schedule_round");
-	&dec	($round);
-	&jz	(&label("schedule_mangle_last"));
-	&call	("_vpaes_schedule_mangle");	# write output
-	&jmp	(&label("loop_schedule_128"));
-
-##
-##  .aes_schedule_192
-##
-##  192-bit specific part of key schedule.
-##
-##  The main body of this schedule is the same as the 128-bit
-##  schedule, but with more smearing.  The long, high side is
-##  stored in %xmm7 as before, and the short, low side is in
-##  the high bits of %xmm6.
-##
-##  This schedule is somewhat nastier, however, because each
-##  round produces 192 bits of key material, or 1.5 round keys.
-##  Therefore, on each cycle we do 2 rounds and produce 3 round
-##  keys.
-##
-&set_label("schedule_192",16);
-	&movdqu	("xmm0",&QWP(8,$inp));		# load key part 2 (very unaligned)
-	&call	("_vpaes_schedule_transform");	# input transform	
-	&movdqa	("xmm6","xmm0");		# save short part
-	&pxor	("xmm4","xmm4");		# clear 4
-	&movhlps("xmm6","xmm4");		# clobber low side with zeros
-	&mov	($round,4);
-
-&set_label("loop_schedule_192");
-	&call	("_vpaes_schedule_round");
-	&palignr("xmm0","xmm6",8);
-	&call	("_vpaes_schedule_mangle");	# save key n
-	&call	("_vpaes_schedule_192_smear");
-	&call	("_vpaes_schedule_mangle");	# save key n+1
-	&call	("_vpaes_schedule_round");
-	&dec	($round);
-	&jz	(&label("schedule_mangle_last"));
-	&call	("_vpaes_schedule_mangle");	# save key n+2
-	&call	("_vpaes_schedule_192_smear");
-	&jmp	(&label("loop_schedule_192"));
-
-##
-##  .aes_schedule_256
-##
-##  256-bit specific part of key schedule.
-##
-##  The structure here is very similar to the 128-bit
-##  schedule, but with an additional "low side" in
-##  %xmm6.  The low side's rounds are the same as the
-##  high side's, except no rcon and no rotation.
-##
-&set_label("schedule_256",16);
-	&movdqu	("xmm0",&QWP(16,$inp));		# load key part 2 (unaligned)
-	&call	("_vpaes_schedule_transform");	# input transform	
-	&mov	($round,7);
-
-&set_label("loop_schedule_256");
-	&call	("_vpaes_schedule_mangle");	# output low result
-	&movdqa	("xmm6","xmm0");		# save cur_lo in xmm6
-
-	# high round
-	&call	("_vpaes_schedule_round");
-	&dec	($round);
-	&jz	(&label("schedule_mangle_last"));
-	&call	("_vpaes_schedule_mangle");	
-
-	# low round. swap xmm7 and xmm6
-	&pshufd	("xmm0","xmm0",0xFF);
-	&movdqa	(&QWP(20,"esp"),"xmm7");
-	&movdqa	("xmm7","xmm6");
-	&call	("_vpaes_schedule_low_round");
-	&movdqa	("xmm7",&QWP(20,"esp"));
-
-	&jmp	(&label("loop_schedule_256"));
-
-##
-##  .aes_schedule_mangle_last
-##
-##  Mangler for last round of key schedule
-##  Mangles %xmm0
-##    when encrypting, outputs out(%xmm0) ^ 63
-##    when decrypting, outputs unskew(%xmm0)
-##
-##  Always called right before return... jumps to cleanup and exits
-##
-&set_label("schedule_mangle_last",16);
-	# schedule last round key from xmm0
-	&lea	($base,&DWP($k_deskew,$const));
-	&test	($out,$out);
-	&jnz	(&label("schedule_mangle_last_dec"));
-
-	# encrypting
-	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
-	&pshufb	("xmm0","xmm1");		# output permute
-	&lea	($base,&DWP($k_opt,$const));	# prepare to output transform
-	&add	($key,32);
-
-&set_label("schedule_mangle_last_dec");
-	&add	($key,-16);
-	&pxor	("xmm0",&QWP($k_s63,$const));
-	&call	("_vpaes_schedule_transform");	# output transform
-	&movdqu	(&QWP(0,$key),"xmm0");		# save last key
-
-	# cleanup
-	&pxor	("xmm0","xmm0");
-	&pxor	("xmm1","xmm1");
-	&pxor	("xmm2","xmm2");
-	&pxor	("xmm3","xmm3");
-	&pxor	("xmm4","xmm4");
-	&pxor	("xmm5","xmm5");
-	&pxor	("xmm6","xmm6");
-	&pxor	("xmm7","xmm7");
-	&ret	();
-&function_end_B("_vpaes_schedule_core");
-
-##
-##  .aes_schedule_192_smear
-##
-##  Smear the short, low side in the 192-bit key schedule.
-##
-##  Inputs:
-##    %xmm7: high side, b  a  x  y
-##    %xmm6:  low side, d  c  0  0
-##    %xmm13: 0
-##
-##  Outputs:
-##    %xmm6: b+c+d  b+c  0  0
-##    %xmm0: b+c+d  b+c  b  a
-##
-&function_begin_B("_vpaes_schedule_192_smear");
-	&pshufd	("xmm0","xmm6",0x80);		# d c 0 0 -> c 0 0 0
-	&pxor	("xmm6","xmm0");		# -> c+d c 0 0
-	&pshufd	("xmm0","xmm7",0xFE);		# b a _ _ -> b b b a
-	&pxor	("xmm6","xmm0");		# -> b+c+d b+c b a
-	&movdqa	("xmm0","xmm6");
-	&pxor	("xmm1","xmm1");
-	&movhlps("xmm6","xmm1");		# clobber low side with zeros
-	&ret	();
-&function_end_B("_vpaes_schedule_192_smear");
-
-##
-##  .aes_schedule_round
-##
-##  Runs one main round of the key schedule on %xmm0, %xmm7
-##
-##  Specifically, runs subbytes on the high dword of %xmm0
-##  then rotates it by one byte and xors into the low dword of
-##  %xmm7.
-##
-##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
-##  next rcon.
-##
-##  Smears the dwords of %xmm7 by xoring the low into the
-##  second low, result into third, result into highest.
-##
-##  Returns results in %xmm7 = %xmm0.
-##  Clobbers %xmm1-%xmm5.
-##
-&function_begin_B("_vpaes_schedule_round");
-	# extract rcon from xmm8
-	&movdqa	("xmm2",&QWP(8,"esp"));		# xmm8
-	&pxor	("xmm1","xmm1");
-	&palignr("xmm1","xmm2",15);
-	&palignr("xmm2","xmm2",15);
-	&pxor	("xmm7","xmm1");
-
-	# rotate
-	&pshufd	("xmm0","xmm0",0xFF);
-	&palignr("xmm0","xmm0",1);
-
-	# fall through...
-	&movdqa	(&QWP(8,"esp"),"xmm2");		# xmm8
-
-	# low round: same as high round, but no rotation and no rcon.
-&set_label("_vpaes_schedule_low_round");
-	# smear xmm7
-	&movdqa	("xmm1","xmm7");
-	&pslldq	("xmm7",4);
-	&pxor	("xmm7","xmm1");
-	&movdqa	("xmm1","xmm7");
-	&pslldq	("xmm7",8);
-	&pxor	("xmm7","xmm1");
-	&pxor	("xmm7",&QWP($k_s63,$const));
-
-	# subbyte
-	&movdqa	("xmm4",&QWP($k_s0F,$const));
-	&movdqa	("xmm5",&QWP($k_inv,$const));	# 4 : 1/j
-	&movdqa	("xmm1","xmm4");	
-	&pandn	("xmm1","xmm0");
-	&psrld	("xmm1",4);			# 1 = i
-	&pand	("xmm0","xmm4");		# 0 = k
-	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
-	&pshufb	("xmm2","xmm0");		# 2 = a/k
-	&pxor	("xmm0","xmm1");		# 0 = j
-	&movdqa	("xmm3","xmm5");		# 3 : 1/i
-	&pshufb	("xmm3","xmm1");		# 3 = 1/i
-	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
-	&movdqa	("xmm4","xmm5");		# 4 : 1/j
-	&pshufb	("xmm4","xmm0");		# 4 = 1/j
-	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
-	&movdqa	("xmm2","xmm5");		# 2 : 1/iak
-	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
-	&pxor	("xmm2","xmm0");		# 2 = io
-	&movdqa	("xmm3","xmm5");		# 3 : 1/jak
-	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
-	&pxor	("xmm3","xmm1");		# 3 = jo
-	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sbou
-	&pshufb	("xmm4","xmm2");		# 4 = sbou
-	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
-	&pshufb	("xmm0","xmm3");		# 0 = sb1t
-	&pxor	("xmm0","xmm4");		# 0 = sbox output
-
-	# add in smeared stuff
-	&pxor	("xmm0","xmm7");
-	&movdqa	("xmm7","xmm0");
-	&ret	();
-&function_end_B("_vpaes_schedule_round");
-
-##
-##  .aes_schedule_transform
-##
-##  Linear-transform %xmm0 according to tables at (%ebx)
-##
-##  Output in %xmm0
-##  Clobbers %xmm1, %xmm2
-##
-&function_begin_B("_vpaes_schedule_transform");
-	&movdqa	("xmm2",&QWP($k_s0F,$const));
-	&movdqa	("xmm1","xmm2");
-	&pandn	("xmm1","xmm0");
-	&psrld	("xmm1",4);
-	&pand	("xmm0","xmm2");
-	&movdqa	("xmm2",&QWP(0,$base));
-	&pshufb	("xmm2","xmm0");
-	&movdqa	("xmm0",&QWP(16,$base));
-	&pshufb	("xmm0","xmm1");
-	&pxor	("xmm0","xmm2");
-	&ret	();
-&function_end_B("_vpaes_schedule_transform");
-
-##
-##  .aes_schedule_mangle
-##
-##  Mangle xmm0 from (basis-transformed) standard version
-##  to our version.
-##
-##  On encrypt,
-##    xor with 0x63
-##    multiply by circulant 0,1,1,1
-##    apply shiftrows transform
-##
-##  On decrypt,
-##    xor with 0x63
-##    multiply by "inverse mixcolumns" circulant E,B,D,9
-##    deskew
-##    apply shiftrows transform
-##
-##
-##  Writes out to (%edx), and increments or decrements it
-##  Keeps track of round number mod 4 in %ecx
-##  Preserves xmm0
-##  Clobbers xmm1-xmm5
-##
-&function_begin_B("_vpaes_schedule_mangle");
-	&movdqa	("xmm4","xmm0");	# save xmm0 for later
-	&movdqa	("xmm5",&QWP($k_mc_forward,$const));
-	&test	($out,$out);
-	&jnz	(&label("schedule_mangle_dec"));
-
-	# encrypting
-	&add	($key,16);
-	&pxor	("xmm4",&QWP($k_s63,$const));
-	&pshufb	("xmm4","xmm5");
-	&movdqa	("xmm3","xmm4");
-	&pshufb	("xmm4","xmm5");
-	&pxor	("xmm3","xmm4");
-	&pshufb	("xmm4","xmm5");
-	&pxor	("xmm3","xmm4");
-
-	&jmp	(&label("schedule_mangle_both"));
-
-&set_label("schedule_mangle_dec",16);
-	# inverse mix columns
-	&movdqa	("xmm2",&QWP($k_s0F,$const));
-	&lea	($inp,&DWP($k_dksd,$const));
-	&movdqa	("xmm1","xmm2");
-	&pandn	("xmm1","xmm4");
-	&psrld	("xmm1",4);			# 1 = hi
-	&pand	("xmm4","xmm2");		# 4 = lo
-
-	&movdqa	("xmm2",&QWP(0,$inp));
-	&pshufb	("xmm2","xmm4");
-	&movdqa	("xmm3",&QWP(0x10,$inp));
-	&pshufb	("xmm3","xmm1");
-	&pxor	("xmm3","xmm2");
-	&pshufb	("xmm3","xmm5");
-
-	&movdqa	("xmm2",&QWP(0x20,$inp));
-	&pshufb	("xmm2","xmm4");
-	&pxor	("xmm2","xmm3");
-	&movdqa	("xmm3",&QWP(0x30,$inp));
-	&pshufb	("xmm3","xmm1");
-	&pxor	("xmm3","xmm2");
-	&pshufb	("xmm3","xmm5");
-
-	&movdqa	("xmm2",&QWP(0x40,$inp));
-	&pshufb	("xmm2","xmm4");
-	&pxor	("xmm2","xmm3");
-	&movdqa	("xmm3",&QWP(0x50,$inp));
-	&pshufb	("xmm3","xmm1");
-	&pxor	("xmm3","xmm2");
-	&pshufb	("xmm3","xmm5");
-
-	&movdqa	("xmm2",&QWP(0x60,$inp));
-	&pshufb	("xmm2","xmm4");
-	&pxor	("xmm2","xmm3");
-	&movdqa	("xmm3",&QWP(0x70,$inp));
-	&pshufb	("xmm3","xmm1");
-	&pxor	("xmm3","xmm2");
-
-	&add	($key,-16);
-
-&set_label("schedule_mangle_both");
-	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
-	&pshufb	("xmm3","xmm1");
-	&add	($magic,-16);
-	&and	($magic,0x30);
-	&movdqu	(&QWP(0,$key),"xmm3");
-	&ret	();
-&function_end_B("_vpaes_schedule_mangle");
-
-#
-# Interface to OpenSSL
-#
-&function_begin("${PREFIX}_set_encrypt_key");
-	&mov	($inp,&wparam(0));		# inp
-	&lea	($base,&DWP(-56,"esp"));
-	&mov	($round,&wparam(1));		# bits
-	&and	($base,-16);
-	&mov	($key,&wparam(2));		# key
-	&xchg	($base,"esp");			# alloca
-	&mov	(&DWP(48,"esp"),$base);
-
-	&mov	($base,$round);
-	&shr	($base,5);
-	&add	($base,5);
-	&mov	(&DWP(240,$key),$base);		# AES_KEY->rounds = nbits/32+5;
-	&mov	($magic,0x30);
-	&mov	($out,0);
-
-	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
-	&call	("_vpaes_schedule_core");
-&set_label("pic_point");
-
-	&mov	("esp",&DWP(48,"esp"));
-	&xor	("eax","eax");
-&function_end("${PREFIX}_set_encrypt_key");
-
-&function_begin("${PREFIX}_set_decrypt_key");
-	&mov	($inp,&wparam(0));		# inp
-	&lea	($base,&DWP(-56,"esp"));
-	&mov	($round,&wparam(1));		# bits
-	&and	($base,-16);
-	&mov	($key,&wparam(2));		# key
-	&xchg	($base,"esp");			# alloca
-	&mov	(&DWP(48,"esp"),$base);
-
-	&mov	($base,$round);
-	&shr	($base,5);
-	&add	($base,5);
-	&mov	(&DWP(240,$key),$base);	# AES_KEY->rounds = nbits/32+5;
-	&shl	($base,4);
-	&lea	($key,&DWP(16,$key,$base));
-
-	&mov	($out,1);
-	&mov	($magic,$round);
-	&shr	($magic,1);
-	&and	($magic,32);
-	&xor	($magic,32);			# nbist==192?0:32;
-
-	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
-	&call	("_vpaes_schedule_core");
-&set_label("pic_point");
-
-	&mov	("esp",&DWP(48,"esp"));
-	&xor	("eax","eax");
-&function_end("${PREFIX}_set_decrypt_key");
-
-&function_begin("${PREFIX}_encrypt");
-	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
-	&call	("_vpaes_preheat");
-&set_label("pic_point");
-	&mov	($inp,&wparam(0));		# inp
-	&lea	($base,&DWP(-56,"esp"));
-	&mov	($out,&wparam(1));		# out
-	&and	($base,-16);
-	&mov	($key,&wparam(2));		# key
-	&xchg	($base,"esp");			# alloca
-	&mov	(&DWP(48,"esp"),$base);
-
-	&movdqu	("xmm0",&QWP(0,$inp));
-	&call	("_vpaes_encrypt_core");
-	&movdqu	(&QWP(0,$out),"xmm0");
-
-	&mov	("esp",&DWP(48,"esp"));
-&function_end("${PREFIX}_encrypt");
-
-&function_begin("${PREFIX}_decrypt");
-	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
-	&call	("_vpaes_preheat");
-&set_label("pic_point");
-	&mov	($inp,&wparam(0));		# inp
-	&lea	($base,&DWP(-56,"esp"));
-	&mov	($out,&wparam(1));		# out
-	&and	($base,-16);
-	&mov	($key,&wparam(2));		# key
-	&xchg	($base,"esp");			# alloca
-	&mov	(&DWP(48,"esp"),$base);
-
-	&movdqu	("xmm0",&QWP(0,$inp));
-	&call	("_vpaes_decrypt_core");
-	&movdqu	(&QWP(0,$out),"xmm0");
-
-	&mov	("esp",&DWP(48,"esp"));
-&function_end("${PREFIX}_decrypt");
-
-&function_begin("${PREFIX}_cbc_encrypt");
-	&mov	($inp,&wparam(0));		# inp
-	&mov	($out,&wparam(1));		# out
-	&mov	($round,&wparam(2));		# len
-	&mov	($key,&wparam(3));		# key
-	&sub	($round,16);
-	&jc	(&label("cbc_abort"));
-	&lea	($base,&DWP(-56,"esp"));
-	&mov	($const,&wparam(4));		# ivp
-	&and	($base,-16);
-	&mov	($magic,&wparam(5));		# enc
-	&xchg	($base,"esp");			# alloca
-	&movdqu	("xmm1",&QWP(0,$const));	# load IV
-	&sub	($out,$inp);
-	&mov	(&DWP(48,"esp"),$base);
-
-	&mov	(&DWP(0,"esp"),$out);		# save out
-	&mov	(&DWP(4,"esp"),$key)		# save key
-	&mov	(&DWP(8,"esp"),$const);		# save ivp
-	&mov	($out,$round);			# $out works as $len
-
-	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
-	&call	("_vpaes_preheat");
-&set_label("pic_point");
-	&cmp	($magic,0);
-	&je	(&label("cbc_dec_loop"));
-	&jmp	(&label("cbc_enc_loop"));
-
-&set_label("cbc_enc_loop",16);
-	&movdqu	("xmm0",&QWP(0,$inp));		# load input
-	&pxor	("xmm0","xmm1");		# inp^=iv
-	&call	("_vpaes_encrypt_core");
-	&mov	($base,&DWP(0,"esp"));		# restore out
-	&mov	($key,&DWP(4,"esp"));		# restore key
-	&movdqa	("xmm1","xmm0");
-	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
-	&lea	($inp,&DWP(16,$inp));
-	&sub	($out,16);
-	&jnc	(&label("cbc_enc_loop"));
-	&jmp	(&label("cbc_done"));
-
-&set_label("cbc_dec_loop",16);
-	&movdqu	("xmm0",&QWP(0,$inp));		# load input
-	&movdqa	(&QWP(16,"esp"),"xmm1");	# save IV
-	&movdqa	(&QWP(32,"esp"),"xmm0");	# save future IV
-	&call	("_vpaes_decrypt_core");
-	&mov	($base,&DWP(0,"esp"));		# restore out
-	&mov	($key,&DWP(4,"esp"));		# restore key
-	&pxor	("xmm0",&QWP(16,"esp"));	# out^=iv
-	&movdqa	("xmm1",&QWP(32,"esp"));	# load next IV
-	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
-	&lea	($inp,&DWP(16,$inp));
-	&sub	($out,16);
-	&jnc	(&label("cbc_dec_loop"));
-
-&set_label("cbc_done");
-	&mov	($base,&DWP(8,"esp"));		# restore ivp
-	&mov	("esp",&DWP(48,"esp"));
-	&movdqu	(&QWP(0,$base),"xmm1");		# write IV
-&set_label("cbc_abort");
-&function_end("${PREFIX}_cbc_encrypt");
-
-&asm_finish();

+ 0 - 1207
drivers/builtin_openssl2/crypto/aes/asm/vpaes-x86_64.pl

@@ -1,1207 +0,0 @@
-#!/usr/bin/env perl
-
-######################################################################
-## Constant-time SSSE3 AES core implementation.
-## version 0.1
-##
-## By Mike Hamburg (Stanford University), 2009
-## Public domain.
-##
-## For details see http://shiftleft.org/papers/vector_aes/ and
-## http://crypto.stanford.edu/vpaes/.
-
-######################################################################
-# September 2011.
-#
-# Interface to OpenSSL as "almost" drop-in replacement for
-# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
-# doesn't handle partial vectors (doesn't have to if called from
-# EVP only). "Drop-in" implies that this module doesn't share key
-# schedule structure with the original nor does it make assumption
-# about its alignment...
-#
-# Performance summary. aes-x86_64.pl column lists large-block CBC
-# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
-# byte processed with 128-bit key, and vpaes-x86_64.pl column -
-# [also large-block CBC] encrypt/decrypt.
-#
-#		aes-x86_64.pl		vpaes-x86_64.pl
-#
-# Core 2(**)	30.5/43.7/14.3		21.8/25.7(***)
-# Nehalem	30.5/42.2/14.6		 9.8/11.8
-# Atom		63.9/79.0/32.1		64.0/84.8(***)
-#
-# (*)	"Hyper-threading" in the context refers rather to cache shared
-#	among multiple cores, than to specifically Intel HTT. As vast
-#	majority of contemporary cores share cache, slower code path
-#	is common place. In other words "with-hyper-threading-off"
-#	results are presented mostly for reference purposes.
-#
-# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
-#
-# (***)	Less impressive improvement on Core 2 and Atom is due to slow
-#	pshufb,	yet it's respectable +40%/78% improvement on Core 2
-#	(as implied, over "hyper-threading-safe" code path).
-#
-#						<[email protected]>
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$PREFIX="vpaes";
-
-$code.=<<___;
-.text
-
-##
-##  _aes_encrypt_core
-##
-##  AES-encrypt %xmm0.
-##
-##  Inputs:
-##     %xmm0 = input
-##     %xmm9-%xmm15 as in _vpaes_preheat
-##    (%rdx) = scheduled keys
-##
-##  Output in %xmm0
-##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
-##  Preserves %xmm6 - %xmm8 so you get some local vectors
-##
-##
-.type	_vpaes_encrypt_core,\@abi-omnipotent
-.align 16
-_vpaes_encrypt_core:
-	mov	%rdx,	%r9
-	mov	\$16,	%r11
-	mov	240(%rdx),%eax
-	movdqa	%xmm9,	%xmm1
-	movdqa	.Lk_ipt(%rip), %xmm2	# iptlo
-	pandn	%xmm0,	%xmm1
-	movdqu	(%r9),	%xmm5		# round0 key
-	psrld	\$4,	%xmm1
-	pand	%xmm9,	%xmm0
-	pshufb	%xmm0,	%xmm2
-	movdqa	.Lk_ipt+16(%rip), %xmm0	# ipthi
-	pshufb	%xmm1,	%xmm0
-	pxor	%xmm5,	%xmm2
-	pxor	%xmm2,	%xmm0
-	add	\$16,	%r9
-	lea	.Lk_mc_backward(%rip),%r10
-	jmp	.Lenc_entry
-
-.align 16
-.Lenc_loop:
-	# middle of middle round
-	movdqa  %xmm13,	%xmm4	# 4 : sb1u
-	pshufb  %xmm2,	%xmm4	# 4 = sb1u
-	pxor	%xmm5,	%xmm4	# 4 = sb1u + k
-	movdqa  %xmm12,	%xmm0	# 0 : sb1t
-	pshufb  %xmm3,	%xmm0	# 0 = sb1t
-	pxor	%xmm4,	%xmm0	# 0 = A
-	movdqa  %xmm15,	%xmm5	# 4 : sb2u
-	pshufb	%xmm2,	%xmm5	# 4 = sb2u
-	movdqa	-0x40(%r11,%r10), %xmm1		# .Lk_mc_forward[]
-	movdqa	%xmm14, %xmm2	# 2 : sb2t
-	pshufb	%xmm3,  %xmm2	# 2 = sb2t
-	pxor	%xmm5,	%xmm2	# 2 = 2A
-	movdqa	(%r11,%r10), %xmm4		# .Lk_mc_backward[]
-	movdqa	%xmm0,  %xmm3	# 3 = A
-	pshufb  %xmm1,  %xmm0	# 0 = B
-	add	\$16,	%r9	# next key
-	pxor	%xmm2,  %xmm0	# 0 = 2A+B
-	pshufb	%xmm4,	%xmm3	# 3 = D
-	add	\$16,	%r11	# next mc
-	pxor	%xmm0,	%xmm3	# 3 = 2A+B+D
-	pshufb  %xmm1,	%xmm0	# 0 = 2B+C
-	and	\$0x30,	%r11	# ... mod 4
-	pxor	%xmm3,	%xmm0	# 0 = 2A+3B+C+D
-	sub	\$1,%rax	# nr--
-
-.Lenc_entry:
-	# top of round
-	movdqa  %xmm9, 	%xmm1	# 1 : i
-	pandn	%xmm0, 	%xmm1	# 1 = i<<4
-	psrld	\$4,   	%xmm1   # 1 = i
-	pand	%xmm9, 	%xmm0   # 0 = k
-	movdqa	%xmm11, %xmm5	# 2 : a/k
-	pshufb  %xmm0,  %xmm5	# 2 = a/k
-	pxor	%xmm1,	%xmm0	# 0 = j
-	movdqa	%xmm10,	%xmm3  	# 3 : 1/i
-	pshufb  %xmm1, 	%xmm3  	# 3 = 1/i
-	pxor	%xmm5, 	%xmm3  	# 3 = iak = 1/i + a/k
-	movdqa	%xmm10,	%xmm4  	# 4 : 1/j
-	pshufb	%xmm0, 	%xmm4  	# 4 = 1/j
-	pxor	%xmm5, 	%xmm4  	# 4 = jak = 1/j + a/k
-	movdqa	%xmm10,	%xmm2  	# 2 : 1/iak
-	pshufb  %xmm3,	%xmm2  	# 2 = 1/iak
-	pxor	%xmm0, 	%xmm2  	# 2 = io
-	movdqa	%xmm10, %xmm3   # 3 : 1/jak
-	movdqu	(%r9),	%xmm5
-	pshufb  %xmm4,  %xmm3   # 3 = 1/jak
-	pxor	%xmm1,  %xmm3   # 3 = jo
-	jnz	.Lenc_loop
-
-	# middle of last round
-	movdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
-	movdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
-	pshufb  %xmm2,  %xmm4	# 4 = sbou
-	pxor	%xmm5,  %xmm4	# 4 = sb1u + k
-	pshufb  %xmm3,	%xmm0	# 0 = sb1t
-	movdqa	0x40(%r11,%r10), %xmm1		# .Lk_sr[]
-	pxor	%xmm4,	%xmm0	# 0 = A
-	pshufb	%xmm1,	%xmm0
-	ret
-.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
-	
-##
-##  Decryption core
-##
-##  Same API as encryption core.
-##
-.type	_vpaes_decrypt_core,\@abi-omnipotent
-.align	16
-_vpaes_decrypt_core:
-	mov	%rdx,	%r9		# load key
-	mov	240(%rdx),%eax
-	movdqa	%xmm9,	%xmm1
-	movdqa	.Lk_dipt(%rip), %xmm2	# iptlo
-	pandn	%xmm0,	%xmm1
-	mov	%rax,	%r11
-	psrld	\$4,	%xmm1
-	movdqu	(%r9),	%xmm5		# round0 key
-	shl	\$4,	%r11
-	pand	%xmm9,	%xmm0
-	pshufb	%xmm0,	%xmm2
-	movdqa	.Lk_dipt+16(%rip), %xmm0 # ipthi
-	xor	\$0x30,	%r11
-	lea	.Lk_dsbd(%rip),%r10
-	pshufb	%xmm1,	%xmm0
-	and	\$0x30,	%r11
-	pxor	%xmm5,	%xmm2
-	movdqa	.Lk_mc_forward+48(%rip), %xmm5
-	pxor	%xmm2,	%xmm0
-	add	\$16,	%r9
-	add	%r10,	%r11
-	jmp	.Ldec_entry
-
-.align 16
-.Ldec_loop:
-##
-##  Inverse mix columns
-##
-	movdqa  -0x20(%r10),%xmm4	# 4 : sb9u
-	pshufb	%xmm2,	%xmm4		# 4 = sb9u
-	pxor	%xmm0,	%xmm4
-	movdqa  -0x10(%r10),%xmm0	# 0 : sb9t
-	pshufb	%xmm3,	%xmm0		# 0 = sb9t
-	pxor	%xmm4,	%xmm0		# 0 = ch
-	add	\$16, %r9		# next round key
-
-	pshufb	%xmm5,	%xmm0		# MC ch
-	movdqa  0x00(%r10),%xmm4	# 4 : sbdu
-	pshufb	%xmm2,	%xmm4		# 4 = sbdu
-	pxor	%xmm0,	%xmm4		# 4 = ch
-	movdqa  0x10(%r10),%xmm0	# 0 : sbdt
-	pshufb	%xmm3,	%xmm0		# 0 = sbdt
-	pxor	%xmm4,	%xmm0		# 0 = ch
-	sub	\$1,%rax		# nr--
-	
-	pshufb	%xmm5,	%xmm0		# MC ch
-	movdqa  0x20(%r10),%xmm4	# 4 : sbbu
-	pshufb	%xmm2,	%xmm4		# 4 = sbbu
-	pxor	%xmm0,	%xmm4		# 4 = ch
-	movdqa  0x30(%r10),%xmm0	# 0 : sbbt
-	pshufb	%xmm3,	%xmm0		# 0 = sbbt
-	pxor	%xmm4,	%xmm0		# 0 = ch
-	
-	pshufb	%xmm5,	%xmm0		# MC ch
-	movdqa  0x40(%r10),%xmm4	# 4 : sbeu
-	pshufb	%xmm2,	%xmm4		# 4 = sbeu
-	pxor	%xmm0,	%xmm4		# 4 = ch
-	movdqa  0x50(%r10),%xmm0	# 0 : sbet
-	pshufb	%xmm3,	%xmm0		# 0 = sbet
-	pxor	%xmm4,	%xmm0		# 0 = ch
-
-	palignr	\$12,	%xmm5,	%xmm5
-	
-.Ldec_entry:
-	# top of round
-	movdqa  %xmm9, 	%xmm1	# 1 : i
-	pandn	%xmm0, 	%xmm1	# 1 = i<<4
-	psrld	\$4,    %xmm1	# 1 = i
-	pand	%xmm9, 	%xmm0	# 0 = k
-	movdqa	%xmm11, %xmm2	# 2 : a/k
-	pshufb  %xmm0,  %xmm2	# 2 = a/k
-	pxor	%xmm1,	%xmm0	# 0 = j
-	movdqa	%xmm10,	%xmm3	# 3 : 1/i
-	pshufb  %xmm1, 	%xmm3	# 3 = 1/i
-	pxor	%xmm2, 	%xmm3	# 3 = iak = 1/i + a/k
-	movdqa	%xmm10,	%xmm4	# 4 : 1/j
-	pshufb	%xmm0, 	%xmm4	# 4 = 1/j
-	pxor	%xmm2, 	%xmm4	# 4 = jak = 1/j + a/k
-	movdqa	%xmm10,	%xmm2	# 2 : 1/iak
-	pshufb  %xmm3,	%xmm2	# 2 = 1/iak
-	pxor	%xmm0, 	%xmm2	# 2 = io
-	movdqa	%xmm10, %xmm3	# 3 : 1/jak
-	pshufb  %xmm4,  %xmm3	# 3 = 1/jak
-	pxor	%xmm1,  %xmm3	# 3 = jo
-	movdqu	(%r9),	%xmm0
-	jnz	.Ldec_loop
-
-	# middle of last round
-	movdqa	0x60(%r10), %xmm4	# 3 : sbou
-	pshufb  %xmm2,  %xmm4	# 4 = sbou
-	pxor	%xmm0,  %xmm4	# 4 = sb1u + k
-	movdqa	0x70(%r10), %xmm0	# 0 : sbot
-	movdqa	-0x160(%r11), %xmm2	# .Lk_sr-.Lk_dsbd=-0x160
-	pshufb  %xmm3,	%xmm0	# 0 = sb1t
-	pxor	%xmm4,	%xmm0	# 0 = A
-	pshufb	%xmm2,	%xmm0
-	ret
-.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
-
-########################################################
-##                                                    ##
-##                  AES key schedule                  ##
-##                                                    ##
-########################################################
-.type	_vpaes_schedule_core,\@abi-omnipotent
-.align	16
-_vpaes_schedule_core:
-	# rdi = key
-	# rsi = size in bits
-	# rdx = buffer
-	# rcx = direction.  0=encrypt, 1=decrypt
-
-	call	_vpaes_preheat		# load the tables
-	movdqa	.Lk_rcon(%rip), %xmm8	# load rcon
-	movdqu	(%rdi),	%xmm0		# load key (unaligned)
-
-	# input transform
-	movdqa	%xmm0,	%xmm3
-	lea	.Lk_ipt(%rip), %r11
-	call	_vpaes_schedule_transform
-	movdqa	%xmm0,	%xmm7
-
-	lea	.Lk_sr(%rip),%r10
-	test	%rcx,	%rcx
-	jnz	.Lschedule_am_decrypting
-
-	# encrypting, output zeroth round key after transform
-	movdqu	%xmm0,	(%rdx)
-	jmp	.Lschedule_go
-
-.Lschedule_am_decrypting:
-	# decrypting, output zeroth round key after shiftrows
-	movdqa	(%r8,%r10),%xmm1
-	pshufb  %xmm1,	%xmm3
-	movdqu	%xmm3,	(%rdx)
-	xor	\$0x30, %r8
-
-.Lschedule_go:
-	cmp	\$192,	%esi
-	ja	.Lschedule_256
-	je	.Lschedule_192
-	# 128: fall though
-
-##
-##  .schedule_128
-##
-##  128-bit specific part of key schedule.
-##
-##  This schedule is really simple, because all its parts
-##  are accomplished by the subroutines.
-##
-.Lschedule_128:
-	mov	\$10, %esi
-	
-.Loop_schedule_128:
-	call 	_vpaes_schedule_round
-	dec	%rsi
-	jz 	.Lschedule_mangle_last
-	call	_vpaes_schedule_mangle	# write output
-	jmp 	.Loop_schedule_128
-
-##
-##  .aes_schedule_192
-##
-##  192-bit specific part of key schedule.
-##
-##  The main body of this schedule is the same as the 128-bit
-##  schedule, but with more smearing.  The long, high side is
-##  stored in %xmm7 as before, and the short, low side is in
-##  the high bits of %xmm6.
-##
-##  This schedule is somewhat nastier, however, because each
-##  round produces 192 bits of key material, or 1.5 round keys.
-##  Therefore, on each cycle we do 2 rounds and produce 3 round
-##  keys.
-##
-.align	16
-.Lschedule_192:
-	movdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
-	call	_vpaes_schedule_transform	# input transform
-	movdqa	%xmm0,	%xmm6		# save short part
-	pxor	%xmm4,	%xmm4		# clear 4
-	movhlps	%xmm4,	%xmm6		# clobber low side with zeros
-	mov	\$4,	%esi
-
-.Loop_schedule_192:
-	call	_vpaes_schedule_round
-	palignr	\$8,%xmm6,%xmm0	
-	call	_vpaes_schedule_mangle	# save key n
-	call	_vpaes_schedule_192_smear
-	call	_vpaes_schedule_mangle	# save key n+1
-	call	_vpaes_schedule_round
-	dec	%rsi
-	jz 	.Lschedule_mangle_last
-	call	_vpaes_schedule_mangle	# save key n+2
-	call	_vpaes_schedule_192_smear
-	jmp	.Loop_schedule_192
-
-##
-##  .aes_schedule_256
-##
-##  256-bit specific part of key schedule.
-##
-##  The structure here is very similar to the 128-bit
-##  schedule, but with an additional "low side" in
-##  %xmm6.  The low side's rounds are the same as the
-##  high side's, except no rcon and no rotation.
-##
-.align	16
-.Lschedule_256:
-	movdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
-	call	_vpaes_schedule_transform	# input transform
-	mov	\$7, %esi
-	
-.Loop_schedule_256:
-	call	_vpaes_schedule_mangle	# output low result
-	movdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
-
-	# high round
-	call	_vpaes_schedule_round
-	dec	%rsi
-	jz 	.Lschedule_mangle_last
-	call	_vpaes_schedule_mangle	
-
-	# low round. swap xmm7 and xmm6
-	pshufd	\$0xFF,	%xmm0,	%xmm0
-	movdqa	%xmm7,	%xmm5
-	movdqa	%xmm6,	%xmm7
-	call	_vpaes_schedule_low_round
-	movdqa	%xmm5,	%xmm7
-	
-	jmp	.Loop_schedule_256
-
-	
-##
-##  .aes_schedule_mangle_last
-##
-##  Mangler for last round of key schedule
-##  Mangles %xmm0
-##    when encrypting, outputs out(%xmm0) ^ 63
-##    when decrypting, outputs unskew(%xmm0)
-##
-##  Always called right before return... jumps to cleanup and exits
-##
-.align	16
-.Lschedule_mangle_last:
-	# schedule last round key from xmm0
-	lea	.Lk_deskew(%rip),%r11	# prepare to deskew
-	test	%rcx, 	%rcx
-	jnz	.Lschedule_mangle_last_dec
-
-	# encrypting
-	movdqa	(%r8,%r10),%xmm1
-	pshufb	%xmm1,	%xmm0		# output permute
-	lea	.Lk_opt(%rip),	%r11	# prepare to output transform
-	add	\$32,	%rdx
-
-.Lschedule_mangle_last_dec:
-	add	\$-16,	%rdx
-	pxor	.Lk_s63(%rip),	%xmm0
-	call	_vpaes_schedule_transform # output transform
-	movdqu	%xmm0,	(%rdx)		# save last key
-
-	# cleanup
-	pxor	%xmm0,  %xmm0
-	pxor	%xmm1,  %xmm1
-	pxor	%xmm2,  %xmm2
-	pxor	%xmm3,  %xmm3
-	pxor	%xmm4,  %xmm4
-	pxor	%xmm5,  %xmm5
-	pxor	%xmm6,  %xmm6
-	pxor	%xmm7,  %xmm7
-	ret
-.size	_vpaes_schedule_core,.-_vpaes_schedule_core
-
-##
-##  .aes_schedule_192_smear
-##
-##  Smear the short, low side in the 192-bit key schedule.
-##
-##  Inputs:
-##    %xmm7: high side, b  a  x  y
-##    %xmm6:  low side, d  c  0  0
-##    %xmm13: 0
-##
-##  Outputs:
-##    %xmm6: b+c+d  b+c  0  0
-##    %xmm0: b+c+d  b+c  b  a
-##
-.type	_vpaes_schedule_192_smear,\@abi-omnipotent
-.align	16
-_vpaes_schedule_192_smear:
-	pshufd	\$0x80,	%xmm6,	%xmm0	# d c 0 0 -> c 0 0 0
-	pxor	%xmm0,	%xmm6		# -> c+d c 0 0
-	pshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
-	pxor	%xmm0,	%xmm6		# -> b+c+d b+c b a
-	movdqa	%xmm6,	%xmm0
-	pxor	%xmm1,	%xmm1
-	movhlps	%xmm1,	%xmm6		# clobber low side with zeros
-	ret
-.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
-
-##
-##  .aes_schedule_round
-##
-##  Runs one main round of the key schedule on %xmm0, %xmm7
-##
-##  Specifically, runs subbytes on the high dword of %xmm0
-##  then rotates it by one byte and xors into the low dword of
-##  %xmm7.
-##
-##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
-##  next rcon.
-##
-##  Smears the dwords of %xmm7 by xoring the low into the
-##  second low, result into third, result into highest.
-##
-##  Returns results in %xmm7 = %xmm0.
-##  Clobbers %xmm1-%xmm4, %r11.
-##
-.type	_vpaes_schedule_round,\@abi-omnipotent
-.align	16
-_vpaes_schedule_round:
-	# extract rcon from xmm8
-	pxor	%xmm1,	%xmm1
-	palignr	\$15,	%xmm8,	%xmm1
-	palignr	\$15,	%xmm8,	%xmm8
-	pxor	%xmm1,	%xmm7
-
-	# rotate
-	pshufd	\$0xFF,	%xmm0,	%xmm0
-	palignr	\$1,	%xmm0,	%xmm0
-	
-	# fall through...
-	
-	# low round: same as high round, but no rotation and no rcon.
-_vpaes_schedule_low_round:
-	# smear xmm7
-	movdqa	%xmm7,	%xmm1
-	pslldq	\$4,	%xmm7
-	pxor	%xmm1,	%xmm7
-	movdqa	%xmm7,	%xmm1
-	pslldq	\$8,	%xmm7
-	pxor	%xmm1,	%xmm7
-	pxor	.Lk_s63(%rip), %xmm7
-
-	# subbytes
-	movdqa  %xmm9, 	%xmm1
-	pandn	%xmm0, 	%xmm1
-	psrld	\$4,    %xmm1		# 1 = i
-	pand	%xmm9, 	%xmm0		# 0 = k
-	movdqa	%xmm11, %xmm2		# 2 : a/k
-	pshufb  %xmm0,  %xmm2		# 2 = a/k
-	pxor	%xmm1,	%xmm0		# 0 = j
-	movdqa	%xmm10,	%xmm3		# 3 : 1/i
-	pshufb  %xmm1, 	%xmm3		# 3 = 1/i
-	pxor	%xmm2, 	%xmm3		# 3 = iak = 1/i + a/k
-	movdqa	%xmm10,	%xmm4		# 4 : 1/j
-	pshufb	%xmm0, 	%xmm4		# 4 = 1/j
-	pxor	%xmm2, 	%xmm4		# 4 = jak = 1/j + a/k
-	movdqa	%xmm10,	%xmm2		# 2 : 1/iak
-	pshufb  %xmm3,	%xmm2		# 2 = 1/iak
-	pxor	%xmm0, 	%xmm2		# 2 = io
-	movdqa	%xmm10, %xmm3		# 3 : 1/jak
-	pshufb  %xmm4,  %xmm3		# 3 = 1/jak
-	pxor	%xmm1,  %xmm3		# 3 = jo
-	movdqa	%xmm13, %xmm4		# 4 : sbou
-	pshufb  %xmm2,  %xmm4		# 4 = sbou
-	movdqa	%xmm12, %xmm0		# 0 : sbot
-	pshufb  %xmm3,	%xmm0		# 0 = sb1t
-	pxor	%xmm4, 	%xmm0		# 0 = sbox output
-
-	# add in smeared stuff
-	pxor	%xmm7,	%xmm0	
-	movdqa	%xmm0,	%xmm7
-	ret
-.size	_vpaes_schedule_round,.-_vpaes_schedule_round
-
-##
-##  .aes_schedule_transform
-##
-##  Linear-transform %xmm0 according to tables at (%r11)
-##
-##  Requires that %xmm9 = 0x0F0F... as in preheat
-##  Output in %xmm0
-##  Clobbers %xmm1, %xmm2
-##
-.type	_vpaes_schedule_transform,\@abi-omnipotent
-.align	16
-_vpaes_schedule_transform:
-	movdqa	%xmm9,	%xmm1
-	pandn	%xmm0,	%xmm1
-	psrld	\$4,	%xmm1
-	pand	%xmm9,	%xmm0
-	movdqa	(%r11), %xmm2 	# lo
-	pshufb	%xmm0,	%xmm2
-	movdqa	16(%r11), %xmm0 # hi
-	pshufb	%xmm1,	%xmm0
-	pxor	%xmm2,	%xmm0
-	ret
-.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
-
-##
-##  .aes_schedule_mangle
-##
-##  Mangle xmm0 from (basis-transformed) standard version
-##  to our version.
-##
-##  On encrypt,
-##    xor with 0x63
-##    multiply by circulant 0,1,1,1
-##    apply shiftrows transform
-##
-##  On decrypt,
-##    xor with 0x63
-##    multiply by "inverse mixcolumns" circulant E,B,D,9
-##    deskew
-##    apply shiftrows transform
-##
-##
-##  Writes out to (%rdx), and increments or decrements it
-##  Keeps track of round number mod 4 in %r8
-##  Preserves xmm0
-##  Clobbers xmm1-xmm5
-##
-.type	_vpaes_schedule_mangle,\@abi-omnipotent
-.align	16
-_vpaes_schedule_mangle:
-	movdqa	%xmm0,	%xmm4	# save xmm0 for later
-	movdqa	.Lk_mc_forward(%rip),%xmm5
-	test	%rcx, 	%rcx
-	jnz	.Lschedule_mangle_dec
-
-	# encrypting
-	add	\$16,	%rdx
-	pxor	.Lk_s63(%rip),%xmm4
-	pshufb	%xmm5,	%xmm4
-	movdqa	%xmm4,	%xmm3
-	pshufb	%xmm5,	%xmm4
-	pxor	%xmm4,	%xmm3
-	pshufb	%xmm5,	%xmm4
-	pxor	%xmm4,	%xmm3
-
-	jmp	.Lschedule_mangle_both
-.align	16
-.Lschedule_mangle_dec:
-	# inverse mix columns
-	lea	.Lk_dksd(%rip),%r11
-	movdqa	%xmm9,	%xmm1
-	pandn	%xmm4,	%xmm1
-	psrld	\$4,	%xmm1	# 1 = hi
-	pand	%xmm9,	%xmm4	# 4 = lo
-
-	movdqa	0x00(%r11), %xmm2
-	pshufb	%xmm4,	%xmm2
-	movdqa	0x10(%r11), %xmm3
-	pshufb	%xmm1,	%xmm3
-	pxor	%xmm2,	%xmm3
-	pshufb	%xmm5,	%xmm3
-
-	movdqa	0x20(%r11), %xmm2
-	pshufb	%xmm4,	%xmm2
-	pxor	%xmm3,	%xmm2
-	movdqa	0x30(%r11), %xmm3
-	pshufb	%xmm1,	%xmm3
-	pxor	%xmm2,	%xmm3
-	pshufb	%xmm5,	%xmm3
-
-	movdqa	0x40(%r11), %xmm2
-	pshufb	%xmm4,	%xmm2
-	pxor	%xmm3,	%xmm2
-	movdqa	0x50(%r11), %xmm3
-	pshufb	%xmm1,	%xmm3
-	pxor	%xmm2,	%xmm3
-	pshufb	%xmm5,	%xmm3
-
-	movdqa	0x60(%r11), %xmm2
-	pshufb	%xmm4,	%xmm2
-	pxor	%xmm3,	%xmm2
-	movdqa	0x70(%r11), %xmm3
-	pshufb	%xmm1,	%xmm3
-	pxor	%xmm2,	%xmm3
-
-	add	\$-16,	%rdx
-
-.Lschedule_mangle_both:
-	movdqa	(%r8,%r10),%xmm1
-	pshufb	%xmm1,%xmm3
-	add	\$-16,	%r8
-	and	\$0x30,	%r8
-	movdqu	%xmm3,	(%rdx)
-	ret
-.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
-
-#
-# Interface to OpenSSL
-#
-.globl	${PREFIX}_set_encrypt_key
-.type	${PREFIX}_set_encrypt_key,\@function,3
-.align	16
-${PREFIX}_set_encrypt_key:
-___
-$code.=<<___ if ($win64);
-	lea	-0xb8(%rsp),%rsp
-	movaps	%xmm6,0x10(%rsp)
-	movaps	%xmm7,0x20(%rsp)
-	movaps	%xmm8,0x30(%rsp)
-	movaps	%xmm9,0x40(%rsp)
-	movaps	%xmm10,0x50(%rsp)
-	movaps	%xmm11,0x60(%rsp)
-	movaps	%xmm12,0x70(%rsp)
-	movaps	%xmm13,0x80(%rsp)
-	movaps	%xmm14,0x90(%rsp)
-	movaps	%xmm15,0xa0(%rsp)
-.Lenc_key_body:
-___
-$code.=<<___;
-	mov	%esi,%eax
-	shr	\$5,%eax
-	add	\$5,%eax
-	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
-
-	mov	\$0,%ecx
-	mov	\$0x30,%r8d
-	call	_vpaes_schedule_core
-___
-$code.=<<___ if ($win64);
-	movaps	0x10(%rsp),%xmm6
-	movaps	0x20(%rsp),%xmm7
-	movaps	0x30(%rsp),%xmm8
-	movaps	0x40(%rsp),%xmm9
-	movaps	0x50(%rsp),%xmm10
-	movaps	0x60(%rsp),%xmm11
-	movaps	0x70(%rsp),%xmm12
-	movaps	0x80(%rsp),%xmm13
-	movaps	0x90(%rsp),%xmm14
-	movaps	0xa0(%rsp),%xmm15
-	lea	0xb8(%rsp),%rsp
-.Lenc_key_epilogue:
-___
-$code.=<<___;
-	xor	%eax,%eax
-	ret
-.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
-
-.globl	${PREFIX}_set_decrypt_key
-.type	${PREFIX}_set_decrypt_key,\@function,3
-.align	16
-${PREFIX}_set_decrypt_key:
-___
-$code.=<<___ if ($win64);
-	lea	-0xb8(%rsp),%rsp
-	movaps	%xmm6,0x10(%rsp)
-	movaps	%xmm7,0x20(%rsp)
-	movaps	%xmm8,0x30(%rsp)
-	movaps	%xmm9,0x40(%rsp)
-	movaps	%xmm10,0x50(%rsp)
-	movaps	%xmm11,0x60(%rsp)
-	movaps	%xmm12,0x70(%rsp)
-	movaps	%xmm13,0x80(%rsp)
-	movaps	%xmm14,0x90(%rsp)
-	movaps	%xmm15,0xa0(%rsp)
-.Ldec_key_body:
-___
-$code.=<<___;
-	mov	%esi,%eax
-	shr	\$5,%eax
-	add	\$5,%eax
-	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
-	shl	\$4,%eax
-	lea	16(%rdx,%rax),%rdx
-
-	mov	\$1,%ecx
-	mov	%esi,%r8d
-	shr	\$1,%r8d
-	and	\$32,%r8d
-	xor	\$32,%r8d	# nbits==192?0:32
-	call	_vpaes_schedule_core
-___
-$code.=<<___ if ($win64);
-	movaps	0x10(%rsp),%xmm6
-	movaps	0x20(%rsp),%xmm7
-	movaps	0x30(%rsp),%xmm8
-	movaps	0x40(%rsp),%xmm9
-	movaps	0x50(%rsp),%xmm10
-	movaps	0x60(%rsp),%xmm11
-	movaps	0x70(%rsp),%xmm12
-	movaps	0x80(%rsp),%xmm13
-	movaps	0x90(%rsp),%xmm14
-	movaps	0xa0(%rsp),%xmm15
-	lea	0xb8(%rsp),%rsp
-.Ldec_key_epilogue:
-___
-$code.=<<___;
-	xor	%eax,%eax
-	ret
-.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
-
-.globl	${PREFIX}_encrypt
-.type	${PREFIX}_encrypt,\@function,3
-.align	16
-${PREFIX}_encrypt:
-___
-$code.=<<___ if ($win64);
-	lea	-0xb8(%rsp),%rsp
-	movaps	%xmm6,0x10(%rsp)
-	movaps	%xmm7,0x20(%rsp)
-	movaps	%xmm8,0x30(%rsp)
-	movaps	%xmm9,0x40(%rsp)
-	movaps	%xmm10,0x50(%rsp)
-	movaps	%xmm11,0x60(%rsp)
-	movaps	%xmm12,0x70(%rsp)
-	movaps	%xmm13,0x80(%rsp)
-	movaps	%xmm14,0x90(%rsp)
-	movaps	%xmm15,0xa0(%rsp)
-.Lenc_body:
-___
-$code.=<<___;
-	movdqu	(%rdi),%xmm0
-	call	_vpaes_preheat
-	call	_vpaes_encrypt_core
-	movdqu	%xmm0,(%rsi)
-___
-$code.=<<___ if ($win64);
-	movaps	0x10(%rsp),%xmm6
-	movaps	0x20(%rsp),%xmm7
-	movaps	0x30(%rsp),%xmm8
-	movaps	0x40(%rsp),%xmm9
-	movaps	0x50(%rsp),%xmm10
-	movaps	0x60(%rsp),%xmm11
-	movaps	0x70(%rsp),%xmm12
-	movaps	0x80(%rsp),%xmm13
-	movaps	0x90(%rsp),%xmm14
-	movaps	0xa0(%rsp),%xmm15
-	lea	0xb8(%rsp),%rsp
-.Lenc_epilogue:
-___
-$code.=<<___;
-	ret
-.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
-
-.globl	${PREFIX}_decrypt
-.type	${PREFIX}_decrypt,\@function,3
-.align	16
-${PREFIX}_decrypt:
-___
-$code.=<<___ if ($win64);
-	lea	-0xb8(%rsp),%rsp
-	movaps	%xmm6,0x10(%rsp)
-	movaps	%xmm7,0x20(%rsp)
-	movaps	%xmm8,0x30(%rsp)
-	movaps	%xmm9,0x40(%rsp)
-	movaps	%xmm10,0x50(%rsp)
-	movaps	%xmm11,0x60(%rsp)
-	movaps	%xmm12,0x70(%rsp)
-	movaps	%xmm13,0x80(%rsp)
-	movaps	%xmm14,0x90(%rsp)
-	movaps	%xmm15,0xa0(%rsp)
-.Ldec_body:
-___
-$code.=<<___;
-	movdqu	(%rdi),%xmm0
-	call	_vpaes_preheat
-	call	_vpaes_decrypt_core
-	movdqu	%xmm0,(%rsi)
-___
-$code.=<<___ if ($win64);
-	movaps	0x10(%rsp),%xmm6
-	movaps	0x20(%rsp),%xmm7
-	movaps	0x30(%rsp),%xmm8
-	movaps	0x40(%rsp),%xmm9
-	movaps	0x50(%rsp),%xmm10
-	movaps	0x60(%rsp),%xmm11
-	movaps	0x70(%rsp),%xmm12
-	movaps	0x80(%rsp),%xmm13
-	movaps	0x90(%rsp),%xmm14
-	movaps	0xa0(%rsp),%xmm15
-	lea	0xb8(%rsp),%rsp
-.Ldec_epilogue:
-___
-$code.=<<___;
-	ret
-.size	${PREFIX}_decrypt,.-${PREFIX}_decrypt
-___
-{
-my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-#                       size_t length, const AES_KEY *key,
-#                       unsigned char *ivp,const int enc);
-$code.=<<___;
-.globl	${PREFIX}_cbc_encrypt
-.type	${PREFIX}_cbc_encrypt,\@function,6
-.align	16
-${PREFIX}_cbc_encrypt:
-	xchg	$key,$len
-___
-($len,$key)=($key,$len);
-$code.=<<___;
-	sub	\$16,$len
-	jc	.Lcbc_abort
-___
-$code.=<<___ if ($win64);
-	lea	-0xb8(%rsp),%rsp
-	movaps	%xmm6,0x10(%rsp)
-	movaps	%xmm7,0x20(%rsp)
-	movaps	%xmm8,0x30(%rsp)
-	movaps	%xmm9,0x40(%rsp)
-	movaps	%xmm10,0x50(%rsp)
-	movaps	%xmm11,0x60(%rsp)
-	movaps	%xmm12,0x70(%rsp)
-	movaps	%xmm13,0x80(%rsp)
-	movaps	%xmm14,0x90(%rsp)
-	movaps	%xmm15,0xa0(%rsp)
-.Lcbc_body:
-___
-$code.=<<___;
-	movdqu	($ivp),%xmm6		# load IV
-	sub	$inp,$out
-	call	_vpaes_preheat
-	cmp	\$0,${enc}d
-	je	.Lcbc_dec_loop
-	jmp	.Lcbc_enc_loop
-.align	16
-.Lcbc_enc_loop:
-	movdqu	($inp),%xmm0
-	pxor	%xmm6,%xmm0
-	call	_vpaes_encrypt_core
-	movdqa	%xmm0,%xmm6
-	movdqu	%xmm0,($out,$inp)
-	lea	16($inp),$inp
-	sub	\$16,$len
-	jnc	.Lcbc_enc_loop
-	jmp	.Lcbc_done
-.align	16
-.Lcbc_dec_loop:
-	movdqu	($inp),%xmm0
-	movdqa	%xmm0,%xmm7
-	call	_vpaes_decrypt_core
-	pxor	%xmm6,%xmm0
-	movdqa	%xmm7,%xmm6
-	movdqu	%xmm0,($out,$inp)
-	lea	16($inp),$inp
-	sub	\$16,$len
-	jnc	.Lcbc_dec_loop
-.Lcbc_done:
-	movdqu	%xmm6,($ivp)		# save IV
-___
-$code.=<<___ if ($win64);
-	movaps	0x10(%rsp),%xmm6
-	movaps	0x20(%rsp),%xmm7
-	movaps	0x30(%rsp),%xmm8
-	movaps	0x40(%rsp),%xmm9
-	movaps	0x50(%rsp),%xmm10
-	movaps	0x60(%rsp),%xmm11
-	movaps	0x70(%rsp),%xmm12
-	movaps	0x80(%rsp),%xmm13
-	movaps	0x90(%rsp),%xmm14
-	movaps	0xa0(%rsp),%xmm15
-	lea	0xb8(%rsp),%rsp
-.Lcbc_epilogue:
-___
-$code.=<<___;
-.Lcbc_abort:
-	ret
-.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
-___
-}
-$code.=<<___;
-##
-##  _aes_preheat
-##
-##  Fills register %r10 -> .aes_consts (so you can -fPIC)
-##  and %xmm9-%xmm15 as specified below.
-##
-.type	_vpaes_preheat,\@abi-omnipotent
-.align	16
-_vpaes_preheat:
-	lea	.Lk_s0F(%rip), %r10
-	movdqa	-0x20(%r10), %xmm10	# .Lk_inv
-	movdqa	-0x10(%r10), %xmm11	# .Lk_inv+16
-	movdqa	0x00(%r10), %xmm9	# .Lk_s0F
-	movdqa	0x30(%r10), %xmm13	# .Lk_sb1
-	movdqa	0x40(%r10), %xmm12	# .Lk_sb1+16
-	movdqa	0x50(%r10), %xmm15	# .Lk_sb2
-	movdqa	0x60(%r10), %xmm14	# .Lk_sb2+16
-	ret
-.size	_vpaes_preheat,.-_vpaes_preheat
-########################################################
-##                                                    ##
-##                     Constants                      ##
-##                                                    ##
-########################################################
-.type	_vpaes_consts,\@object
-.align	64
-_vpaes_consts:
-.Lk_inv:	# inv, inva
-	.quad	0x0E05060F0D080180, 0x040703090A0B0C02
-	.quad	0x01040A060F0B0780, 0x030D0E0C02050809
-
-.Lk_s0F:	# s0F
-	.quad	0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
-
-.Lk_ipt:	# input transform (lo, hi)
-	.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
-	.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
-
-.Lk_sb1:	# sb1u, sb1t
-	.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
-	.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
-.Lk_sb2:	# sb2u, sb2t
-	.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
-	.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
-.Lk_sbo:	# sbou, sbot
-	.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
-	.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
-
-.Lk_mc_forward:	# mc_forward
-	.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
-	.quad	0x080B0A0904070605, 0x000302010C0F0E0D
-	.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
-	.quad	0x000302010C0F0E0D, 0x080B0A0904070605
-
-.Lk_mc_backward:# mc_backward
-	.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
-	.quad	0x020100030E0D0C0F, 0x0A09080B06050407
-	.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
-	.quad	0x0A09080B06050407, 0x020100030E0D0C0F
-
-.Lk_sr:		# sr
-	.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
-	.quad	0x030E09040F0A0500, 0x0B06010C07020D08
-	.quad	0x0F060D040B020900, 0x070E050C030A0108
-	.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
-
-.Lk_rcon:	# rcon
-	.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
-
-.Lk_s63:	# s63: all equal to 0x63 transformed
-	.quad	0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
-
-.Lk_opt:	# output transform
-	.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
-	.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
-
-.Lk_deskew:	# deskew tables: inverts the sbox's "skew"
-	.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
-	.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
-
-##
-##  Decryption stuff
-##  Key schedule constants
-##
-.Lk_dksd:	# decryption key schedule: invskew x*D
-	.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
-	.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
-.Lk_dksb:	# decryption key schedule: invskew x*B
-	.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
-	.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
-.Lk_dkse:	# decryption key schedule: invskew x*E + 0x63
-	.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
-	.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
-.Lk_dks9:	# decryption key schedule: invskew x*9
-	.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
-	.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
-
-##
-##  Decryption stuff
-##  Round function constants
-##
-.Lk_dipt:	# decryption input transform
-	.quad	0x0F505B040B545F00, 0x154A411E114E451A
-	.quad	0x86E383E660056500, 0x12771772F491F194
-
-.Lk_dsb9:	# decryption sbox output *9*u, *9*t
-	.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
-	.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
-.Lk_dsbd:	# decryption sbox output *D*u, *D*t
-	.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
-	.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
-.Lk_dsbb:	# decryption sbox output *B*u, *B*t
-	.quad	0xD022649296B44200, 0x602646F6B0F2D404
-	.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
-.Lk_dsbe:	# decryption sbox output *E*u, *E*t
-	.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
-	.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
-.Lk_dsbo:	# decryption sbox final output
-	.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
-	.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
-.asciz	"Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
-.align	64
-.size	_vpaes_consts,.-_vpaes_consts
-___
-
-if ($win64) {
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	se_handler,\@abi-omnipotent
-.align	16
-se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_prologue
-
-	lea	16(%rax),%rsi		# %xmm save area
-	lea	512($context),%rdi	# &context.Xmm6
-	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
-	.long	0xa548f3fc		# cld; rep movsq
-	lea	0xb8(%rax),%rax		# adjust stack pointer
-
-.Lin_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	se_handler,.-se_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_${PREFIX}_set_encrypt_key
-	.rva	.LSEH_end_${PREFIX}_set_encrypt_key
-	.rva	.LSEH_info_${PREFIX}_set_encrypt_key
-
-	.rva	.LSEH_begin_${PREFIX}_set_decrypt_key
-	.rva	.LSEH_end_${PREFIX}_set_decrypt_key
-	.rva	.LSEH_info_${PREFIX}_set_decrypt_key
-
-	.rva	.LSEH_begin_${PREFIX}_encrypt
-	.rva	.LSEH_end_${PREFIX}_encrypt
-	.rva	.LSEH_info_${PREFIX}_encrypt
-
-	.rva	.LSEH_begin_${PREFIX}_decrypt
-	.rva	.LSEH_end_${PREFIX}_decrypt
-	.rva	.LSEH_info_${PREFIX}_decrypt
-
-	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
-	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
-	.rva	.LSEH_info_${PREFIX}_cbc_encrypt
-
-.section	.xdata
-.align	8
-.LSEH_info_${PREFIX}_set_encrypt_key:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lenc_key_body,.Lenc_key_epilogue	# HandlerData[]
-.LSEH_info_${PREFIX}_set_decrypt_key:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Ldec_key_body,.Ldec_key_epilogue	# HandlerData[]
-.LSEH_info_${PREFIX}_encrypt:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lenc_body,.Lenc_epilogue		# HandlerData[]
-.LSEH_info_${PREFIX}_decrypt:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Ldec_body,.Ldec_epilogue		# HandlerData[]
-.LSEH_info_${PREFIX}_cbc_encrypt:
-	.byte	9,0,0,0
-	.rva	se_handler
-	.rva	.Lcbc_body,.Lcbc_epilogue		# HandlerData[]
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;

+ 0 - 126
drivers/builtin_openssl2/crypto/alphacpuid.pl

@@ -1,126 +0,0 @@
-#!/usr/bin/env perl
-print <<'___';
-.text
-
-.set	noat
-
-.globl	OPENSSL_cpuid_setup
-.ent	OPENSSL_cpuid_setup
-OPENSSL_cpuid_setup:
-	.frame	$30,0,$26
-	.prologue 0
-	ret	($26)
-.end	OPENSSL_cpuid_setup
-
-.globl	OPENSSL_wipe_cpu
-.ent	OPENSSL_wipe_cpu
-OPENSSL_wipe_cpu:
-	.frame	$30,0,$26
-	.prologue 0
-	clr	$1
-	clr	$2
-	clr	$3
-	clr	$4
-	clr	$5
-	clr	$6
-	clr	$7
-	clr	$8
-	clr	$16
-	clr	$17
-	clr	$18
-	clr	$19
-	clr	$20
-	clr	$21
-	clr	$22
-	clr	$23
-	clr	$24
-	clr	$25
-	clr	$27
-	clr	$at
-	clr	$29
-	fclr	$f0
-	fclr	$f1
-	fclr	$f10
-	fclr	$f11
-	fclr	$f12
-	fclr	$f13
-	fclr	$f14
-	fclr	$f15
-	fclr	$f16
-	fclr	$f17
-	fclr	$f18
-	fclr	$f19
-	fclr	$f20
-	fclr	$f21
-	fclr	$f22
-	fclr	$f23
-	fclr	$f24
-	fclr	$f25
-	fclr	$f26
-	fclr	$f27
-	fclr	$f28
-	fclr	$f29
-	fclr	$f30
-	mov	$sp,$0
-	ret	($26)
-.end	OPENSSL_wipe_cpu
-
-.globl	OPENSSL_atomic_add
-.ent	OPENSSL_atomic_add
-OPENSSL_atomic_add:
-	.frame	$30,0,$26
-	.prologue 0
-1:	ldl_l	$0,0($16)
-	addl	$0,$17,$1
-	stl_c	$1,0($16)
-	beq	$1,1b
-	addl	$0,$17,$0
-	ret	($26)
-.end	OPENSSL_atomic_add
-
-.globl	OPENSSL_rdtsc
-.ent	OPENSSL_rdtsc
-OPENSSL_rdtsc:
-	.frame	$30,0,$26
-	.prologue 0
-	rpcc	$0
-	ret	($26)
-.end	OPENSSL_rdtsc
-
-.globl	OPENSSL_cleanse
-.ent	OPENSSL_cleanse
-OPENSSL_cleanse:
-	.frame	$30,0,$26
-	.prologue 0
-	beq	$17,.Ldone
-	and	$16,7,$0
-	bic	$17,7,$at
-	beq	$at,.Little
-	beq	$0,.Laligned
-
-.Little:
-	subq	$0,8,$0
-	ldq_u	$1,0($16)
-	mov	$16,$2
-.Lalign:
-	mskbl	$1,$16,$1
-	lda	$16,1($16)
-	subq	$17,1,$17
-	addq	$0,1,$0
-	beq	$17,.Lout
-	bne	$0,.Lalign
-.Lout:	stq_u	$1,0($2)
-	beq	$17,.Ldone
-	bic	$17,7,$at
-	beq	$at,.Little
-
-.Laligned:
-	stq	$31,0($16)
-	subq	$17,8,$17
-	lda	$16,8($16)
-	bic	$17,7,$at
-	bne	$at,.Laligned
-	bne	$17,.Little
-.Ldone: ret	($26)
-.end	OPENSSL_cleanse
-___

+ 31 - 4
drivers/builtin_openssl2/crypto/arm_arch.h

@@ -10,13 +10,24 @@
 #    define __ARMEL__
 #   endif
 #  elif defined(__GNUC__)
+#   if   defined(__aarch64__)
+#    define __ARM_ARCH__ 8
+#    if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+#     define __ARMEB__
+#    else
+#     define __ARMEL__
+#    endif
   /*
    * Why doesn't gcc define __ARM_ARCH__? Instead it defines
    * bunch of below macros. See all_architectires[] table in
    * gcc/config/arm/arm.c. On a side note it defines
    * __ARMEL__/__ARMEB__ for little-/big-endian.
    */
-#   if   defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)     || \
+#   elif defined(__ARM_ARCH)
+#    define __ARM_ARCH__ __ARM_ARCH
+#   elif defined(__ARM_ARCH_8A__)
+#    define __ARM_ARCH__ 8
+#   elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)     || \
         defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__)     || \
         defined(__ARM_ARCH_7EM__)
 #    define __ARM_ARCH__ 7
@@ -41,11 +52,27 @@
 #  include <openssl/fipssyms.h>
 # endif
 
+# if !defined(__ARM_MAX_ARCH__)
+#  define __ARM_MAX_ARCH__ __ARM_ARCH__
+# endif
+
+# if __ARM_MAX_ARCH__<__ARM_ARCH__
+#  error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
+# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
+#  if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
+#   error "can't build universal big-endian binary"
+#  endif
+# endif
+
 # if !__ASSEMBLER__
 extern unsigned int OPENSSL_armcap_P;
-
-#  define ARMV7_NEON      (1<<0)
-#  define ARMV7_TICK      (1<<1)
 # endif
 
+# define ARMV7_NEON      (1<<0)
+# define ARMV7_TICK      (1<<1)
+# define ARMV8_AES       (1<<2)
+# define ARMV8_SHA1      (1<<3)
+# define ARMV8_SHA256    (1<<4)
+# define ARMV8_PMULL     (1<<5)
+
 #endif

+ 90 - 7
drivers/builtin_openssl2/crypto/armcap.c

@@ -7,8 +7,18 @@
 
 #include "arm_arch.h"
 
-unsigned int OPENSSL_armcap_P;
+unsigned int OPENSSL_armcap_P = 0;
 
+#if __ARM_MAX_ARCH__<7
+void OPENSSL_cpuid_setup(void)
+{
+}
+
+unsigned long OPENSSL_rdtsc(void)
+{
+    return 0;
+}
+#else
 static sigset_t all_masked;
 
 static sigjmp_buf ill_jmp;
@@ -22,9 +32,13 @@ static void ill_handler(int sig)
  * ARM compilers support inline assembler...
  */
 void _armv7_neon_probe(void);
-unsigned int _armv7_tick(void);
+void _armv8_aes_probe(void);
+void _armv8_sha1_probe(void);
+void _armv8_sha256_probe(void);
+void _armv8_pmull_probe(void);
+unsigned long _armv7_tick(void);
 
-unsigned int OPENSSL_rdtsc(void)
+unsigned long OPENSSL_rdtsc(void)
 {
     if (OPENSSL_armcap_P & ARMV7_TICK)
         return _armv7_tick();
@@ -32,9 +46,44 @@ unsigned int OPENSSL_rdtsc(void)
         return 0;
 }
 
-#if defined(__GNUC__) && __GNUC__>=2
+/*
+ * Use a weak reference to getauxval() so we can use it if it is available but
+ * don't break the build if it is not.
+ */
+# if defined(__GNUC__) && __GNUC__>=2
 void OPENSSL_cpuid_setup(void) __attribute__ ((constructor));
-#endif
+extern unsigned long getauxval(unsigned long type) __attribute__ ((weak));
+# else
+static unsigned long (*getauxval) (unsigned long) = NULL;
+# endif
+
+/*
+ * ARM puts the the feature bits for Crypto Extensions in AT_HWCAP2, whereas
+ * AArch64 used AT_HWCAP.
+ */
+# if defined(__arm__) || defined (__arm)
+#  define HWCAP                  16
+                                  /* AT_HWCAP */
+#  define HWCAP_NEON             (1 << 12)
+
+#  define HWCAP_CE               26
+                                  /* AT_HWCAP2 */
+#  define HWCAP_CE_AES           (1 << 0)
+#  define HWCAP_CE_PMULL         (1 << 1)
+#  define HWCAP_CE_SHA1          (1 << 2)
+#  define HWCAP_CE_SHA256        (1 << 3)
+# elif defined(__aarch64__)
+#  define HWCAP                  16
+                                  /* AT_HWCAP */
+#  define HWCAP_NEON             (1 << 1)
+
+#  define HWCAP_CE               HWCAP
+#  define HWCAP_CE_AES           (1 << 3)
+#  define HWCAP_CE_PMULL         (1 << 4)
+#  define HWCAP_CE_SHA1          (1 << 5)
+#  define HWCAP_CE_SHA256        (1 << 6)
+# endif
+
 void OPENSSL_cpuid_setup(void)
 {
     char *e;
@@ -47,7 +96,7 @@ void OPENSSL_cpuid_setup(void)
     trigger = 1;
 
     if ((e = getenv("OPENSSL_armcap"))) {
-        OPENSSL_armcap_P = strtoul(e, NULL, 0);
+        OPENSSL_armcap_P = (unsigned int)strtoul(e, NULL, 0);
         return;
     }
 
@@ -67,9 +116,42 @@ void OPENSSL_cpuid_setup(void)
     sigprocmask(SIG_SETMASK, &ill_act.sa_mask, &oset);
     sigaction(SIGILL, &ill_act, &ill_oact);
 
-    if (sigsetjmp(ill_jmp, 1) == 0) {
+    if (getauxval != NULL) {
+        if (getauxval(HWCAP) & HWCAP_NEON) {
+            unsigned long hwcap = getauxval(HWCAP_CE);
+
+            OPENSSL_armcap_P |= ARMV7_NEON;
+
+            if (hwcap & HWCAP_CE_AES)
+                OPENSSL_armcap_P |= ARMV8_AES;
+
+            if (hwcap & HWCAP_CE_PMULL)
+                OPENSSL_armcap_P |= ARMV8_PMULL;
+
+            if (hwcap & HWCAP_CE_SHA1)
+                OPENSSL_armcap_P |= ARMV8_SHA1;
+
+            if (hwcap & HWCAP_CE_SHA256)
+                OPENSSL_armcap_P |= ARMV8_SHA256;
+        }
+    } else if (sigsetjmp(ill_jmp, 1) == 0) {
         _armv7_neon_probe();
         OPENSSL_armcap_P |= ARMV7_NEON;
+        if (sigsetjmp(ill_jmp, 1) == 0) {
+            _armv8_pmull_probe();
+            OPENSSL_armcap_P |= ARMV8_PMULL | ARMV8_AES;
+        } else if (sigsetjmp(ill_jmp, 1) == 0) {
+            _armv8_aes_probe();
+            OPENSSL_armcap_P |= ARMV8_AES;
+        }
+        if (sigsetjmp(ill_jmp, 1) == 0) {
+            _armv8_sha1_probe();
+            OPENSSL_armcap_P |= ARMV8_SHA1;
+        }
+        if (sigsetjmp(ill_jmp, 1) == 0) {
+            _armv8_sha256_probe();
+            OPENSSL_armcap_P |= ARMV8_SHA256;
+        }
     }
     if (sigsetjmp(ill_jmp, 1) == 0) {
         _armv7_tick();
@@ -79,3 +161,4 @@ void OPENSSL_cpuid_setup(void)
     sigaction(SIGILL, &ill_oact, NULL);
     sigprocmask(SIG_SETMASK, &oset, NULL);
 }
+#endif

+ 0 - 154
drivers/builtin_openssl2/crypto/armv4cpuid.S

@@ -1,154 +0,0 @@
-#include "arm_arch.h"
-
-.text
-.code	32
-
-.align	5
-.global	_armv7_neon_probe
-.type	_armv7_neon_probe,%function
-_armv7_neon_probe:
-	.word	0xf26ee1fe	@ vorr	q15,q15,q15
-	.word	0xe12fff1e	@ bx	lr
-.size	_armv7_neon_probe,.-_armv7_neon_probe
-
-.global	_armv7_tick
-.type	_armv7_tick,%function
-_armv7_tick:
-	mrc	p15,0,r0,c9,c13,0
-	.word	0xe12fff1e	@ bx	lr
-.size	_armv7_tick,.-_armv7_tick
-
-.global	OPENSSL_atomic_add
-.type	OPENSSL_atomic_add,%function
-OPENSSL_atomic_add:
-#if __ARM_ARCH__>=6
-.Ladd:	ldrex	r2,[r0]
-	add	r3,r2,r1
-	strex	r2,r3,[r0]
-	cmp	r2,#0
-	bne	.Ladd
-	mov	r0,r3
-	.word	0xe12fff1e	@ bx	lr
-#else
-	stmdb	sp!,{r4-r6,lr}
-	ldr	r2,.Lspinlock
-	adr	r3,.Lspinlock
-	mov	r4,r0
-	mov	r5,r1
-	add	r6,r3,r2	@ &spinlock
-	b	.+8
-.Lspin:	bl	sched_yield
-	mov	r0,#-1
-	swp	r0,r0,[r6]
-	cmp	r0,#0
-	bne	.Lspin
-
-	ldr	r2,[r4]
-	add	r2,r2,r5
-	str	r2,[r4]
-	str	r0,[r6]		@ release spinlock
-	ldmia	sp!,{r4-r6,lr}
-	tst	lr,#1
-	moveq	pc,lr
-	.word	0xe12fff1e	@ bx	lr
-#endif
-.size	OPENSSL_atomic_add,.-OPENSSL_atomic_add
-
-.global	OPENSSL_cleanse
-.type	OPENSSL_cleanse,%function
-OPENSSL_cleanse:
-	eor	ip,ip,ip
-	cmp	r1,#7
-	subhs	r1,r1,#4
-	bhs	.Lot
-	cmp	r1,#0
-	beq	.Lcleanse_done
-.Little:
-	strb	ip,[r0],#1
-	subs	r1,r1,#1
-	bhi	.Little
-	b	.Lcleanse_done
-
-.Lot:	tst	r0,#3
-	beq	.Laligned
-	strb	ip,[r0],#1
-	sub	r1,r1,#1
-	b	.Lot
-.Laligned:
-	str	ip,[r0],#4
-	subs	r1,r1,#4
-	bhs	.Laligned
-	adds	r1,r1,#4
-	bne	.Little
-.Lcleanse_done:
-	tst	lr,#1
-	moveq	pc,lr
-	.word	0xe12fff1e	@ bx	lr
-.size	OPENSSL_cleanse,.-OPENSSL_cleanse
-
-.global	OPENSSL_wipe_cpu
-.type	OPENSSL_wipe_cpu,%function
-OPENSSL_wipe_cpu:
-	ldr	r0,.LOPENSSL_armcap
-	adr	r1,.LOPENSSL_armcap
-	ldr	r0,[r1,r0]
-	eor	r2,r2,r2
-	eor	r3,r3,r3
-	eor	ip,ip,ip
-	tst	r0,#1
-	beq	.Lwipe_done
-	.word	0xf3000150	@ veor    q0, q0, q0
-	.word	0xf3022152	@ veor    q1, q1, q1
-	.word	0xf3044154	@ veor    q2, q2, q2
-	.word	0xf3066156	@ veor    q3, q3, q3
-	.word	0xf34001f0	@ veor    q8, q8, q8
-	.word	0xf34221f2	@ veor    q9, q9, q9
-	.word	0xf34441f4	@ veor    q10, q10, q10
-	.word	0xf34661f6	@ veor    q11, q11, q11
-	.word	0xf34881f8	@ veor    q12, q12, q12
-	.word	0xf34aa1fa	@ veor    q13, q13, q13
-	.word	0xf34cc1fc	@ veor    q14, q14, q14
-	.word	0xf34ee1fe	@ veor    q15, q15, q15
-.Lwipe_done:
-	mov	r0,sp
-	tst	lr,#1
-	moveq	pc,lr
-	.word	0xe12fff1e	@ bx	lr
-.size	OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu
-
-.global	OPENSSL_instrument_bus
-.type	OPENSSL_instrument_bus,%function
-OPENSSL_instrument_bus:
-	eor	r0,r0,r0
-	tst	lr,#1
-	moveq	pc,lr
-	.word	0xe12fff1e	@ bx	lr
-.size	OPENSSL_instrument_bus,.-OPENSSL_instrument_bus
-
-.global	OPENSSL_instrument_bus2
-.type	OPENSSL_instrument_bus2,%function
-OPENSSL_instrument_bus2:
-	eor	r0,r0,r0
-	tst	lr,#1
-	moveq	pc,lr
-	.word	0xe12fff1e	@ bx	lr
-.size	OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2
-
-.align	5
-.LOPENSSL_armcap:
-.word	OPENSSL_armcap_P-.LOPENSSL_armcap
-#if __ARM_ARCH__>=6
-.align	5
-#else
-.Lspinlock:
-.word	atomic_add_spinlock-.Lspinlock
-.align	5
-
-.data
-.align	2
-atomic_add_spinlock:
-.word	0
-#endif
-
-.comm	OPENSSL_armcap_P,4,4
-.hidden	OPENSSL_armcap_P

+ 44 - 2
drivers/builtin_openssl2/crypto/asn1/a_gentm.c

@@ -65,6 +65,7 @@
 #include "cryptlib.h"
 #include "o_time.h"
 #include <openssl/asn1.h>
+#include "asn1_locl.h"
 
 #if 0
 
@@ -117,7 +118,7 @@ ASN1_GENERALIZEDTIME *d2i_ASN1_GENERALIZEDTIME(ASN1_GENERALIZEDTIME **a,
 
 #endif
 
-int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
+int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d)
 {
     static const int min[9] = { 0, 0, 1, 1, 0, 0, 0, 0, 0 };
     static const int max[9] = { 99, 99, 12, 31, 23, 59, 59, 12, 59 };
@@ -139,6 +140,8 @@ int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
     for (i = 0; i < 7; i++) {
         if ((i == 6) && ((a[o] == 'Z') || (a[o] == '+') || (a[o] == '-'))) {
             i++;
+            if (tm)
+                tm->tm_sec = 0;
             break;
         }
         if ((a[o] < '0') || (a[o] > '9'))
@@ -155,6 +158,31 @@ int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
 
         if ((n < min[i]) || (n > max[i]))
             goto err;
+        if (tm) {
+            switch (i) {
+            case 0:
+                tm->tm_year = n * 100 - 1900;
+                break;
+            case 1:
+                tm->tm_year += n;
+                break;
+            case 2:
+                tm->tm_mon = n - 1;
+                break;
+            case 3:
+                tm->tm_mday = n;
+                break;
+            case 4:
+                tm->tm_hour = n;
+                break;
+            case 5:
+                tm->tm_min = n;
+                break;
+            case 6:
+                tm->tm_sec = n;
+                break;
+            }
+        }
     }
     /*
      * Optional fractional seconds: decimal point followed by one or more
@@ -174,6 +202,7 @@ int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
     if (a[o] == 'Z')
         o++;
     else if ((a[o] == '+') || (a[o] == '-')) {
+        int offsign = a[o] == '-' ? -1 : 1, offset = 0;
         o++;
         if (o + 4 > l)
             goto err;
@@ -187,9 +216,17 @@ int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
             n = (n * 10) + a[o] - '0';
             if ((n < min[i]) || (n > max[i]))
                 goto err;
+            if (tm) {
+                if (i == 7)
+                    offset = n * 3600;
+                else if (i == 8)
+                    offset += n * 60;
+            }
             o++;
         }
-    } else {
+        if (offset && !OPENSSL_gmtime_adj(tm, 0, offset * offsign))
+            return 0;
+    } else if (a[o]) {
         /* Missing time zone information. */
         goto err;
     }
@@ -198,6 +235,11 @@ int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *d)
     return (0);
 }
 
+int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *d)
+{
+    return asn1_generalizedtime_to_tm(NULL, d);
+}
+
 int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str)
 {
     ASN1_GENERALIZEDTIME t;

+ 30 - 0
drivers/builtin_openssl2/crypto/asn1/a_time.c

@@ -66,6 +66,7 @@
 #include "cryptlib.h"
 #include "o_time.h"
 #include <openssl/asn1t.h>
+#include "asn1_locl.h"
 
 IMPLEMENT_ASN1_MSTRING(ASN1_TIME, B_ASN1_TIME)
 
@@ -196,3 +197,32 @@ int ASN1_TIME_set_string(ASN1_TIME *s, const char *str)
 
     return 1;
 }
+
+static int asn1_time_to_tm(struct tm *tm, const ASN1_TIME *t)
+{
+    if (t == NULL) {
+        time_t now_t;
+        time(&now_t);
+        if (OPENSSL_gmtime(&now_t, tm))
+            return 1;
+        return 0;
+    }
+
+    if (t->type == V_ASN1_UTCTIME)
+        return asn1_utctime_to_tm(tm, t);
+    else if (t->type == V_ASN1_GENERALIZEDTIME)
+        return asn1_generalizedtime_to_tm(tm, t);
+
+    return 0;
+}
+
+int ASN1_TIME_diff(int *pday, int *psec,
+                   const ASN1_TIME *from, const ASN1_TIME *to)
+{
+    struct tm tm_from, tm_to;
+    if (!asn1_time_to_tm(&tm_from, from))
+        return 0;
+    if (!asn1_time_to_tm(&tm_to, to))
+        return 0;
+    return OPENSSL_gmtime_diff(pday, psec, &tm_from, &tm_to);
+}

+ 57 - 35
drivers/builtin_openssl2/crypto/asn1/a_utctm.c

@@ -61,6 +61,7 @@
 #include "cryptlib.h"
 #include "o_time.h"
 #include <openssl/asn1.h>
+#include "asn1_locl.h"
 
 #if 0
 int i2d_ASN1_UTCTIME(ASN1_UTCTIME *a, unsigned char **pp)
@@ -109,7 +110,7 @@ ASN1_UTCTIME *d2i_ASN1_UTCTIME(ASN1_UTCTIME **a, unsigned char **pp,
 
 #endif
 
-int ASN1_UTCTIME_check(ASN1_UTCTIME *d)
+int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d)
 {
     static const int min[8] = { 0, 1, 1, 0, 0, 0, 0, 0 };
     static const int max[8] = { 99, 12, 31, 23, 59, 59, 12, 59 };
@@ -127,6 +128,8 @@ int ASN1_UTCTIME_check(ASN1_UTCTIME *d)
     for (i = 0; i < 6; i++) {
         if ((i == 5) && ((a[o] == 'Z') || (a[o] == '+') || (a[o] == '-'))) {
             i++;
+            if (tm)
+                tm->tm_sec = 0;
             break;
         }
         if ((a[o] < '0') || (a[o] > '9'))
@@ -143,10 +146,33 @@ int ASN1_UTCTIME_check(ASN1_UTCTIME *d)
 
         if ((n < min[i]) || (n > max[i]))
             goto err;
+        if (tm) {
+            switch (i) {
+            case 0:
+                tm->tm_year = n < 50 ? n + 100 : n;
+                break;
+            case 1:
+                tm->tm_mon = n - 1;
+                break;
+            case 2:
+                tm->tm_mday = n;
+                break;
+            case 3:
+                tm->tm_hour = n;
+                break;
+            case 4:
+                tm->tm_min = n;
+                break;
+            case 5:
+                tm->tm_sec = n;
+                break;
+            }
+        }
     }
     if (a[o] == 'Z')
         o++;
     else if ((a[o] == '+') || (a[o] == '-')) {
+        int offsign = a[o] == '-' ? -1 : 1, offset = 0;
         o++;
         if (o + 4 > l)
             goto err;
@@ -160,12 +186,25 @@ int ASN1_UTCTIME_check(ASN1_UTCTIME *d)
             n = (n * 10) + a[o] - '0';
             if ((n < min[i]) || (n > max[i]))
                 goto err;
+            if (tm) {
+                if (i == 6)
+                    offset = n * 3600;
+                else if (i == 7)
+                    offset += n * 60;
+            }
             o++;
         }
+        if (offset && !OPENSSL_gmtime_adj(tm, 0, offset * offsign))
+            return 0;
     }
-    return (o == l);
+    return o == l;
  err:
-    return (0);
+    return 0;
+}
+
+int ASN1_UTCTIME_check(const ASN1_UTCTIME *d)
+{
+    return asn1_utctime_to_tm(NULL, d);
 }
 
 int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str)
@@ -249,43 +288,26 @@ ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, time_t t,
 
 int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t)
 {
-    struct tm *tm;
-    struct tm data;
-    int offset;
-    int year;
-
-#define g2(p) (((p)[0]-'0')*10+(p)[1]-'0')
-
-    if (s->data[12] == 'Z')
-        offset = 0;
-    else {
-        offset = g2(s->data + 13) * 60 + g2(s->data + 15);
-        if (s->data[12] == '-')
-            offset = -offset;
-    }
+    struct tm stm, ttm;
+    int day, sec;
 
-    t -= offset * 60;           /* FIXME: may overflow in extreme cases */
+    if (!asn1_utctime_to_tm(&stm, s))
+        return -2;
 
-    tm = OPENSSL_gmtime(&t, &data);
-    /*
-     * NB: -1, 0, 1 already valid return values so use -2 to indicate error.
-     */
-    if (tm == NULL)
+    if (!OPENSSL_gmtime(&t, &ttm))
         return -2;
 
-#define return_cmp(a,b) if ((a)<(b)) return -1; else if ((a)>(b)) return 1
-    year = g2(s->data);
-    if (year < 50)
-        year += 100;
-    return_cmp(year, tm->tm_year);
-    return_cmp(g2(s->data + 2) - 1, tm->tm_mon);
-    return_cmp(g2(s->data + 4), tm->tm_mday);
-    return_cmp(g2(s->data + 6), tm->tm_hour);
-    return_cmp(g2(s->data + 8), tm->tm_min);
-    return_cmp(g2(s->data + 10), tm->tm_sec);
-#undef g2
-#undef return_cmp
+    if (!OPENSSL_gmtime_diff(&day, &sec, &ttm, &stm))
+        return -2;
 
+    if (day > 0)
+        return 1;
+    if (day < 0)
+        return -1;
+    if (sec > 0)
+        return 1;
+    if (sec < 0)
+        return -1;
     return 0;
 }
 

+ 23 - 1
drivers/builtin_openssl2/crypto/asn1/ameth_lib.c

@@ -68,6 +68,7 @@
 extern const EVP_PKEY_ASN1_METHOD rsa_asn1_meths[];
 extern const EVP_PKEY_ASN1_METHOD dsa_asn1_meths[];
 extern const EVP_PKEY_ASN1_METHOD dh_asn1_meth;
+extern const EVP_PKEY_ASN1_METHOD dhx_asn1_meth;
 extern const EVP_PKEY_ASN1_METHOD eckey_asn1_meth;
 extern const EVP_PKEY_ASN1_METHOD hmac_asn1_meth;
 extern const EVP_PKEY_ASN1_METHOD cmac_asn1_meth;
@@ -92,7 +93,10 @@ static const EVP_PKEY_ASN1_METHOD *standard_methods[] = {
     &eckey_asn1_meth,
 #endif
     &hmac_asn1_meth,
-    &cmac_asn1_meth
+    &cmac_asn1_meth,
+#ifndef OPENSSL_NO_DH
+    &dhx_asn1_meth
+#endif
 };
 
 typedef int sk_cmp_fn_type(const char *const *a, const char *const *b);
@@ -460,3 +464,21 @@ void EVP_PKEY_asn1_set_ctrl(EVP_PKEY_ASN1_METHOD *ameth,
 {
     ameth->pkey_ctrl = pkey_ctrl;
 }
+
+void EVP_PKEY_asn1_set_item(EVP_PKEY_ASN1_METHOD *ameth,
+                            int (*item_verify) (EVP_MD_CTX *ctx,
+                                                const ASN1_ITEM *it,
+                                                void *asn,
+                                                X509_ALGOR *a,
+                                                ASN1_BIT_STRING *sig,
+                                                EVP_PKEY *pkey),
+                            int (*item_sign) (EVP_MD_CTX *ctx,
+                                              const ASN1_ITEM *it,
+                                              void *asn,
+                                              X509_ALGOR *alg1,
+                                              X509_ALGOR *alg2,
+                                              ASN1_BIT_STRING *sig))
+{
+    ameth->item_sign = item_sign;
+    ameth->item_verify = item_verify;
+}

+ 3 - 0
drivers/builtin_openssl2/crypto/asn1/asn1_locl.h

@@ -59,6 +59,9 @@
 
 /* Internal ASN1 structures and functions: not for application use */
 
+int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d);
+int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d);
+
 /* ASN1 print context structure */
 
 struct asn1_pctx_st {

+ 0 - 83
drivers/builtin_openssl2/crypto/asn1/charmap.pl

@@ -1,83 +0,0 @@
-#!/usr/local/bin/perl -w
-
-# Written by Dr Stephen N Henson ([email protected]).
-# Licensed under the terms of the OpenSSL license.
-
-use strict;
-
-my ($i, @arr);
-
-# Set up an array with the type of ASCII characters
-# Each set bit represents a character property.
-
-# RFC2253 character properties
-my $RFC2253_ESC = 1;	# Character escaped with \
-my $ESC_CTRL	= 2;	# Escaped control character
-# These are used with RFC1779 quoting using "
-my $NOESC_QUOTE	= 8;	# Not escaped if quoted
-my $PSTRING_CHAR = 0x10;	# Valid PrintableString character
-my $RFC2253_FIRST_ESC = 0x20; # Escaped with \ if first character
-my $RFC2253_LAST_ESC = 0x40;  # Escaped with \ if last character
-
-for($i = 0; $i < 128; $i++) {
-	# Set the RFC2253 escape characters (control)
-	$arr[$i] = 0;
-	if(($i < 32) || ($i > 126)) {
-		$arr[$i] |= $ESC_CTRL;
-	}
-
-	# Some PrintableString characters
-	if(		   ( ( $i >= ord("a")) && ( $i <= ord("z")) )
-			|| (  ( $i >= ord("A")) && ( $i <= ord("Z")) )
-			|| (  ( $i >= ord("0")) && ( $i <= ord("9")) )  ) {
-		$arr[$i] |= $PSTRING_CHAR;
-	}
-}
-
-# Now setup the rest
-
-# Remaining RFC2253 escaped characters
-
-$arr[ord(" ")] |= $NOESC_QUOTE | $RFC2253_FIRST_ESC | $RFC2253_LAST_ESC;
-$arr[ord("#")] |= $NOESC_QUOTE | $RFC2253_FIRST_ESC;
-
-$arr[ord(",")] |= $NOESC_QUOTE | $RFC2253_ESC;
-$arr[ord("+")] |= $NOESC_QUOTE | $RFC2253_ESC;
-$arr[ord("\"")] |= $RFC2253_ESC;
-$arr[ord("\\")] |= $RFC2253_ESC;
-$arr[ord("<")] |= $NOESC_QUOTE | $RFC2253_ESC;
-$arr[ord(">")] |= $NOESC_QUOTE | $RFC2253_ESC;
-$arr[ord(";")] |= $NOESC_QUOTE | $RFC2253_ESC;
-
-# Remaining PrintableString characters
-
-$arr[ord(" ")] |= $PSTRING_CHAR;
-$arr[ord("'")] |= $PSTRING_CHAR;
-$arr[ord("(")] |= $PSTRING_CHAR;
-$arr[ord(")")] |= $PSTRING_CHAR;
-$arr[ord("+")] |= $PSTRING_CHAR;
-$arr[ord(",")] |= $PSTRING_CHAR;
-$arr[ord("-")] |= $PSTRING_CHAR;
-$arr[ord(".")] |= $PSTRING_CHAR;
-$arr[ord("/")] |= $PSTRING_CHAR;
-$arr[ord(":")] |= $PSTRING_CHAR;
-$arr[ord("=")] |= $PSTRING_CHAR;
-$arr[ord("?")] |= $PSTRING_CHAR;
-
-# Now generate the C code
-
-print <<EOF;
-/* Auto generated with chartype.pl script.
- * Mask of various character properties
- */
-
-static unsigned char char_type[] = {
-EOF
-
-for($i = 0; $i < 128; $i++) {
-	print("\n") if($i && (($i % 16) == 0));
-	printf("%2d", $arr[$i]);
-	print(",") if ($i != 127);
-}
-print("\n};\n\n");
-

+ 15 - 0
drivers/builtin_openssl2/crypto/asn1/t_x509.c

@@ -228,6 +228,21 @@ int X509_print_ex(BIO *bp, X509 *x, unsigned long nmflags,
         }
     }
 
+    if (!(cflag & X509_FLAG_NO_IDS)) {
+        if (ci->issuerUID) {
+            if (BIO_printf(bp, "%8sIssuer Unique ID: ", "") <= 0)
+                goto err;
+            if (!X509_signature_dump(bp, ci->issuerUID, 12))
+                goto err;
+        }
+        if (ci->subjectUID) {
+            if (BIO_printf(bp, "%8sSubject Unique ID: ", "") <= 0)
+                goto err;
+            if (!X509_signature_dump(bp, ci->subjectUID, 12))
+                goto err;
+        }
+    }
+
     if (!(cflag & X509_FLAG_NO_EXTENSIONS))
         X509V3_extensions_print(bp, "X509v3 extensions",
                                 ci->extensions, cflag, 8);

+ 6 - 8
drivers/builtin_openssl2/crypto/asn1/tasn_dec.c

@@ -717,7 +717,7 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
     long plen;
     char cst, inf, free_cont = 0;
     const unsigned char *p;
-    BUF_MEM buf;
+    BUF_MEM buf = { 0, NULL, 0 };
     const unsigned char *cont = NULL;
     long len;
     if (!pval) {
@@ -793,7 +793,6 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
         } else {
             len = p - cont + plen;
             p += plen;
-            buf.data = NULL;
         }
     } else if (cst) {
         if (utype == V_ASN1_NULL || utype == V_ASN1_BOOLEAN
@@ -802,9 +801,9 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
             ASN1err(ASN1_F_ASN1_D2I_EX_PRIMITIVE, ASN1_R_TYPE_NOT_PRIMITIVE);
             return 0;
         }
-        buf.length = 0;
-        buf.max = 0;
-        buf.data = NULL;
+
+        /* Free any returned 'buf' content */
+        free_cont = 1;
         /*
          * Should really check the internal tags are correct but some things
          * may get this wrong. The relevant specs say that constructed string
@@ -812,18 +811,16 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
          * So instead just check for UNIVERSAL class and ignore the tag.
          */
         if (!asn1_collect(&buf, &p, plen, inf, -1, V_ASN1_UNIVERSAL, 0)) {
-            free_cont = 1;
             goto err;
         }
         len = buf.length;
         /* Append a final null to string */
         if (!BUF_MEM_grow_clean(&buf, len + 1)) {
             ASN1err(ASN1_F_ASN1_D2I_EX_PRIMITIVE, ERR_R_MALLOC_FAILURE);
-            return 0;
+            goto err;
         }
         buf.data[len] = 0;
         cont = (const unsigned char *)buf.data;
-        free_cont = 1;
     } else {
         cont = p;
         len = plen;
@@ -831,6 +828,7 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
     }
 
     /* We now have content length and type: translate into a structure */
+    /* asn1_ex_c2i may reuse allocated buffer, and so sets free_cont to 0 */
     if (!asn1_ex_c2i(pval, cont, len, utype, &free_cont, it))
         goto err;
 

+ 3 - 1
drivers/builtin_openssl2/crypto/asn1/x_crl.c

@@ -58,8 +58,8 @@
 
 #include <stdio.h>
 #include "cryptlib.h"
-#include "asn1_locl.h"
 #include <openssl/asn1t.h>
+#include "asn1_locl.h"
 #include <openssl/x509.h>
 #include <openssl/x509v3.h>
 
@@ -341,6 +341,8 @@ ASN1_SEQUENCE_ref(X509_CRL, crl_cb, CRYPTO_LOCK_X509_CRL) = {
 
 IMPLEMENT_ASN1_FUNCTIONS(X509_REVOKED)
 
+IMPLEMENT_ASN1_DUP_FUNCTION(X509_REVOKED)
+
 IMPLEMENT_ASN1_FUNCTIONS(X509_CRL_INFO)
 
 IMPLEMENT_ASN1_FUNCTIONS(X509_CRL)

+ 20 - 0
drivers/builtin_openssl2/crypto/asn1/x_x509.c

@@ -207,3 +207,23 @@ int i2d_X509_AUX(X509 *a, unsigned char **pp)
         length += i2d_X509_CERT_AUX(a->aux, pp);
     return length;
 }
+
+int i2d_re_X509_tbs(X509 *x, unsigned char **pp)
+{
+    x->cert_info->enc.modified = 1;
+    return i2d_X509_CINF(x->cert_info, pp);
+}
+
+void X509_get0_signature(ASN1_BIT_STRING **psig, X509_ALGOR **palg,
+                         const X509 *x)
+{
+    if (psig)
+        *psig = x->signature;
+    if (palg)
+        *palg = x->sig_alg;
+}
+
+int X509_get_signature_nid(const X509 *x)
+{
+    return OBJ_obj2nid(x->sig_alg->algorithm);
+}

+ 5 - 2
drivers/builtin_openssl2/crypto/asn1/x_x509a.c

@@ -163,10 +163,13 @@ int X509_add1_reject_object(X509 *x, ASN1_OBJECT *obj)
     if (!(objtmp = OBJ_dup(obj)))
         return 0;
     if (!(aux = aux_get(x)))
-        return 0;
+        goto err;
     if (!aux->reject && !(aux->reject = sk_ASN1_OBJECT_new_null()))
-        return 0;
+        goto err;
     return sk_ASN1_OBJECT_push(aux->reject, objtmp);
+ err:
+    ASN1_OBJECT_free(objtmp);
+    return 0;
 }
 
 void X509_trust_clear(X509 *x)

+ 0 - 137
drivers/builtin_openssl2/crypto/bf/asm/bf-586.pl

@@ -1,137 +0,0 @@
-#!/usr/local/bin/perl
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-require "cbc.pl";
-
-&asm_init($ARGV[0],"bf-586.pl",$ARGV[$#ARGV] eq "386");
-
-$BF_ROUNDS=16;
-$BF_OFF=($BF_ROUNDS+2)*4;
-$L="edi";
-$R="esi";
-$P="ebp";
-$tmp1="eax";
-$tmp2="ebx";
-$tmp3="ecx";
-$tmp4="edx";
-
-&BF_encrypt("BF_encrypt",1);
-&BF_encrypt("BF_decrypt",0);
-&cbc("BF_cbc_encrypt","BF_encrypt","BF_decrypt",1,4,5,3,-1,-1);
-&asm_finish();
-
-sub BF_encrypt
-	{
-	local($name,$enc)=@_;
-
-	&function_begin_B($name,"");
-
-	&comment("");
-
-	&push("ebp");
-	&push("ebx");
-	&mov($tmp2,&wparam(0));
-	&mov($P,&wparam(1));
-	&push("esi");
-	&push("edi");
-
-	&comment("Load the 2 words");
-	&mov($L,&DWP(0,$tmp2,"",0));
-	&mov($R,&DWP(4,$tmp2,"",0));
-
-	&xor(	$tmp1,	$tmp1);
-
-	# encrypting part
-
-	if ($enc)
-		{
-		 &mov($tmp2,&DWP(0,$P,"",0));
-		&xor(	$tmp3,	$tmp3);
-
-		&xor($L,$tmp2);
-		for ($i=0; $i<$BF_ROUNDS; $i+=2)
-			{
-			&comment("");
-			&comment("Round $i");
-			&BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
-
-			&comment("");
-			&comment("Round ".sprintf("%d",$i+1));
-			&BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
-			}
-		# &mov($tmp1,&wparam(0)); In last loop
-		&mov($tmp4,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
-		}
-	else
-		{
-		 &mov($tmp2,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
-		&xor(	$tmp3,	$tmp3);
-
-		&xor($L,$tmp2);
-		for ($i=$BF_ROUNDS; $i>0; $i-=2)
-			{
-			&comment("");
-			&comment("Round $i");
-			&BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
-			&comment("");
-			&comment("Round ".sprintf("%d",$i-1));
-			&BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
-			}
-		# &mov($tmp1,&wparam(0)); In last loop
-		&mov($tmp4,&DWP(0,$P,"",0));
-		}
-
-	&xor($R,$tmp4);
-	&mov(&DWP(4,$tmp1,"",0),$L);
-
-	&mov(&DWP(0,$tmp1,"",0),$R);
-	&function_end($name);
-	}
-
-sub BF_ENCRYPT
-	{
-	local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_;
-
-	&mov(	$tmp4,		&DWP(&n2a($i*4),$P,"",0)); # for next round
-
-	&mov(	$tmp2,		$R);
-	&xor(	$L,		$tmp4);
-
-	&shr(	$tmp2,		16);
-	&mov(	$tmp4,		$R);
-
-	&movb(	&LB($tmp1),	&HB($tmp2));	# A
-	&and(	$tmp2,		0xff);		# B
-
-	&movb(	&LB($tmp3),	&HB($tmp4));	# C
-	&and(	$tmp4,		0xff);		# D
-
-	&mov(	$tmp1,		&DWP(&n2a($BF_OFF+0x0000),$P,$tmp1,4));
-	&mov(	$tmp2,		&DWP(&n2a($BF_OFF+0x0400),$P,$tmp2,4));
-
-	&add(	$tmp2,		$tmp1);
-	&mov(	$tmp1,		&DWP(&n2a($BF_OFF+0x0800),$P,$tmp3,4));
-
-	&xor(	$tmp2,		$tmp1);
-	&mov(	$tmp4,		&DWP(&n2a($BF_OFF+0x0C00),$P,$tmp4,4));
-
-	&add(	$tmp2,		$tmp4);
-	if (($enc && ($i != 16)) || ((!$enc) && ($i != 1)))
-		{ &xor(	$tmp1,		$tmp1); }
-	else
-		{
-		&comment("Load parameter 0 ($i) enc=$enc");
-		&mov($tmp1,&wparam(0));
-		} # In last loop
-
-	&xor(	$L,		$tmp2);
-	# delay
-	}
-
-sub n2a
-	{
-	sprintf("%d",$_[0]);
-	}
-

+ 0 - 127
drivers/builtin_openssl2/crypto/bf/asm/bf-686.pl

@@ -1,127 +0,0 @@
-#!/usr/local/bin/perl
-
-push(@INC,"perlasm","../../perlasm");
-require "x86asm.pl";
-require "cbc.pl";
-
-&asm_init($ARGV[0],"bf-686.pl");
-
-$BF_ROUNDS=16;
-$BF_OFF=($BF_ROUNDS+2)*4;
-$L="ecx";
-$R="edx";
-$P="edi";
-$tot="esi";
-$tmp1="eax";
-$tmp2="ebx";
-$tmp3="ebp";
-
-&des_encrypt("BF_encrypt",1);
-&des_encrypt("BF_decrypt",0);
-&cbc("BF_cbc_encrypt","BF_encrypt","BF_decrypt",1,4,5,3,-1,-1);
-
-&asm_finish();
-
-&file_end();
-
-sub des_encrypt
-	{
-	local($name,$enc)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	&comment("Load the 2 words");
-	&mov("eax",&wparam(0));
-	&mov($L,&DWP(0,"eax","",0));
-	&mov($R,&DWP(4,"eax","",0));
-
-	&comment("");
-	&comment("P pointer, s and enc flag");
-	&mov($P,&wparam(1));
-
-	&xor(	$tmp1,	$tmp1);
-	&xor(	$tmp2,	$tmp2);
-
-	# encrypting part
-
-	if ($enc)
-		{
-		&xor($L,&DWP(0,$P,"",0));
-		for ($i=0; $i<$BF_ROUNDS; $i+=2)
-			{
-			&comment("");
-			&comment("Round $i");
-			&BF_ENCRYPT($i+1,$R,$L,$P,$tot,$tmp1,$tmp2,$tmp3);
-
-			&comment("");
-			&comment("Round ".sprintf("%d",$i+1));
-			&BF_ENCRYPT($i+2,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3);
-			}
-		&xor($R,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
-
-		&mov("eax",&wparam(0));
-		&mov(&DWP(0,"eax","",0),$R);
-		&mov(&DWP(4,"eax","",0),$L);
-		&function_end_A($name);
-		}
-	else
-		{
-		&xor($L,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
-		for ($i=$BF_ROUNDS; $i>0; $i-=2)
-			{
-			&comment("");
-			&comment("Round $i");
-			&BF_ENCRYPT($i,$R,$L,$P,$tot,$tmp1,$tmp2,$tmp3);
-			&comment("");
-			&comment("Round ".sprintf("%d",$i-1));
-			&BF_ENCRYPT($i-1,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3);
-			}
-		&xor($R,&DWP(0,$P,"",0));
-
-		&mov("eax",&wparam(0));
-		&mov(&DWP(0,"eax","",0),$R);
-		&mov(&DWP(4,"eax","",0),$L);
-		&function_end_A($name);
-		}
-
-	&function_end_B($name);
-	}
-
-sub BF_ENCRYPT
-	{
-	local($i,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3)=@_;
-
-	&rotr(	$R,		16);
-	&mov(	$tot,		&DWP(&n2a($i*4),$P,"",0));
-
-	&movb(	&LB($tmp1),	&HB($R));
-	&movb(	&LB($tmp2),	&LB($R));
-
-	&rotr(	$R,		16);
-	&xor(	$L,		$tot);
-
-	&mov(	$tot,		&DWP(&n2a($BF_OFF+0x0000),$P,$tmp1,4));
-	&mov(	$tmp3,		&DWP(&n2a($BF_OFF+0x0400),$P,$tmp2,4));
-
-	&movb(	&LB($tmp1),	&HB($R));
-	&movb(	&LB($tmp2),	&LB($R));
-
-	&add(	$tot,		$tmp3);
-	&mov(	$tmp1,		&DWP(&n2a($BF_OFF+0x0800),$P,$tmp1,4)); # delay
-
-	&xor(	$tot,		$tmp1);
-	&mov(	$tmp3,		&DWP(&n2a($BF_OFF+0x0C00),$P,$tmp2,4));
-
-	&add(	$tot,		$tmp3);
-	&xor(	$tmp1,		$tmp1);
-
-	&xor(	$L,		$tot);					
-	# delay
-	}
-
-sub n2a
-	{
-	sprintf("%d",$_[0]);
-	}
-

+ 0 - 538
drivers/builtin_openssl2/crypto/bf/bftest.c

@@ -1,538 +0,0 @@
-/* crypto/bf/bftest.c */
-/* Copyright (C) 1995-1998 Eric Young ([email protected])
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young ([email protected]).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson ([email protected]).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young ([email protected])"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson ([email protected])"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-/*
- * This has been a quickly hacked 'ideatest.c'.  When I add tests for other
- * RC2 modes, more of the code will be uncommented.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <openssl/opensslconf.h> /* To see if OPENSSL_NO_BF is defined */
-
-#include "../e_os.h"
-
-#ifdef OPENSSL_NO_BF
-int main(int argc, char *argv[])
-{
-    printf("No BF support\n");
-    return (0);
-}
-#else
-# include <openssl/blowfish.h>
-
-# ifdef CHARSET_EBCDIC
-#  include <openssl/ebcdic.h>
-# endif
-
-static char *bf_key[2] = {
-    "abcdefghijklmnopqrstuvwxyz",
-    "Who is John Galt?"
-};
-
-/* big endian */
-static BF_LONG bf_plain[2][2] = {
-    {0x424c4f57L, 0x46495348L},
-    {0xfedcba98L, 0x76543210L}
-};
-
-static BF_LONG bf_cipher[2][2] = {
-    {0x324ed0feL, 0xf413a203L},
-    {0xcc91732bL, 0x8022f684L}
-};
-
-/************/
-
-/* Lets use the DES test vectors :-) */
-# define NUM_TESTS 34
-static unsigned char ecb_data[NUM_TESTS][8] = {
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
-    {0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
-    {0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
-    {0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
-    {0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
-    {0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
-    {0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
-    {0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
-    {0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
-    {0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
-    {0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
-    {0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
-    {0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
-    {0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
-    {0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
-    {0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
-    {0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
-    {0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
-    {0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
-    {0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
-    {0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
-    {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
-    {0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
-    {0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}
-};
-
-static unsigned char plain_data[NUM_TESTS][8] = {
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
-    {0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
-    {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
-    {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
-    {0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
-    {0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
-    {0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
-    {0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
-    {0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
-    {0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
-    {0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
-    {0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
-    {0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
-    {0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
-    {0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
-    {0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
-    {0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
-    {0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
-    {0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
-    {0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
-    {0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
-    {0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
-    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
-};
-
-static unsigned char cipher_data[NUM_TESTS][8] = {
-    {0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78},
-    {0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A},
-    {0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2},
-    {0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D},
-    {0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96},
-    {0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7},
-    {0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78},
-    {0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D},
-    {0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B},
-    {0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0},
-    {0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4},
-    {0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB},
-    {0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A},
-    {0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18},
-    {0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98},
-    {0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5},
-    {0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79},
-    {0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3},
-    {0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69},
-    {0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B},
-    {0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E},
-    {0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD},
-    {0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19},
-    {0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3},
-    {0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5},
-    {0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78},
-    {0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01},
-    {0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2},
-    {0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE},
-    {0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D},
-    {0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4},
-    {0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC},
-    {0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A},
-    {0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A},
-};
-
-static unsigned char cbc_key[16] = {
-    0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
-    0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87
-};
-static unsigned char cbc_iv[8] =
-    { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 };
-static char cbc_data[40] = "7654321 Now is the time for ";
-static unsigned char cbc_ok[32] = {
-    0x6B, 0x77, 0xB4, 0xD6, 0x30, 0x06, 0xDE, 0xE6,
-    0x05, 0xB1, 0x56, 0xE2, 0x74, 0x03, 0x97, 0x93,
-    0x58, 0xDE, 0xB9, 0xE7, 0x15, 0x46, 0x16, 0xD9,
-    0x59, 0xF1, 0x65, 0x2B, 0xD5, 0xFF, 0x92, 0xCC
-};
-
-static unsigned char cfb64_ok[] = {
-    0xE7, 0x32, 0x14, 0xA2, 0x82, 0x21, 0x39, 0xCA,
-    0xF2, 0x6E, 0xCF, 0x6D, 0x2E, 0xB9, 0xE7, 0x6E,
-    0x3D, 0xA3, 0xDE, 0x04, 0xD1, 0x51, 0x72, 0x00,
-    0x51, 0x9D, 0x57, 0xA6, 0xC3
-};
-
-static unsigned char ofb64_ok[] = {
-    0xE7, 0x32, 0x14, 0xA2, 0x82, 0x21, 0x39, 0xCA,
-    0x62, 0xB3, 0x43, 0xCC, 0x5B, 0x65, 0x58, 0x73,
-    0x10, 0xDD, 0x90, 0x8D, 0x0C, 0x24, 0x1B, 0x22,
-    0x63, 0xC2, 0xCF, 0x80, 0xDA
-};
-
-# define KEY_TEST_NUM    25
-static unsigned char key_test[KEY_TEST_NUM] = {
-    0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
-    0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f,
-    0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
-    0x88
-};
-
-static unsigned char key_data[8] =
-    { 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10 };
-
-static unsigned char key_out[KEY_TEST_NUM][8] = {
-    {0xF9, 0xAD, 0x59, 0x7C, 0x49, 0xDB, 0x00, 0x5E},
-    {0xE9, 0x1D, 0x21, 0xC1, 0xD9, 0x61, 0xA6, 0xD6},
-    {0xE9, 0xC2, 0xB7, 0x0A, 0x1B, 0xC6, 0x5C, 0xF3},
-    {0xBE, 0x1E, 0x63, 0x94, 0x08, 0x64, 0x0F, 0x05},
-    {0xB3, 0x9E, 0x44, 0x48, 0x1B, 0xDB, 0x1E, 0x6E},
-    {0x94, 0x57, 0xAA, 0x83, 0xB1, 0x92, 0x8C, 0x0D},
-    {0x8B, 0xB7, 0x70, 0x32, 0xF9, 0x60, 0x62, 0x9D},
-    {0xE8, 0x7A, 0x24, 0x4E, 0x2C, 0xC8, 0x5E, 0x82},
-    {0x15, 0x75, 0x0E, 0x7A, 0x4F, 0x4E, 0xC5, 0x77},
-    {0x12, 0x2B, 0xA7, 0x0B, 0x3A, 0xB6, 0x4A, 0xE0},
-    {0x3A, 0x83, 0x3C, 0x9A, 0xFF, 0xC5, 0x37, 0xF6},
-    {0x94, 0x09, 0xDA, 0x87, 0xA9, 0x0F, 0x6B, 0xF2},
-    {0x88, 0x4F, 0x80, 0x62, 0x50, 0x60, 0xB8, 0xB4},
-    {0x1F, 0x85, 0x03, 0x1C, 0x19, 0xE1, 0x19, 0x68},
-    {0x79, 0xD9, 0x37, 0x3A, 0x71, 0x4C, 0xA3, 0x4F},
-    {0x93, 0x14, 0x28, 0x87, 0xEE, 0x3B, 0xE1, 0x5C},
-    {0x03, 0x42, 0x9E, 0x83, 0x8C, 0xE2, 0xD1, 0x4B},
-    {0xA4, 0x29, 0x9E, 0x27, 0x46, 0x9F, 0xF6, 0x7B},
-    {0xAF, 0xD5, 0xAE, 0xD1, 0xC1, 0xBC, 0x96, 0xA8},
-    {0x10, 0x85, 0x1C, 0x0E, 0x38, 0x58, 0xDA, 0x9F},
-    {0xE6, 0xF5, 0x1E, 0xD7, 0x9B, 0x9D, 0xB2, 0x1F},
-    {0x64, 0xA6, 0xE1, 0x4A, 0xFD, 0x36, 0xB4, 0x6F},
-    {0x80, 0xC7, 0xD7, 0xD4, 0x5A, 0x54, 0x79, 0xAD},
-    {0x05, 0x04, 0x4B, 0x62, 0xFA, 0x52, 0xD0, 0x80},
-};
-
-static int test(void);
-static int print_test_data(void);
-int main(int argc, char *argv[])
-{
-    int ret;
-
-    if (argc > 1)
-        ret = print_test_data();
-    else
-        ret = test();
-
-# ifdef OPENSSL_SYS_NETWARE
-    if (ret)
-        printf("ERROR: %d\n", ret);
-# endif
-    EXIT(ret);
-    return (0);
-}
-
-static int print_test_data(void)
-{
-    unsigned int i, j;
-
-    printf("ecb test data\n");
-    printf("key bytes\t\tclear bytes\t\tcipher bytes\n");
-    for (i = 0; i < NUM_TESTS; i++) {
-        for (j = 0; j < 8; j++)
-            printf("%02X", ecb_data[i][j]);
-        printf("\t");
-        for (j = 0; j < 8; j++)
-            printf("%02X", plain_data[i][j]);
-        printf("\t");
-        for (j = 0; j < 8; j++)
-            printf("%02X", cipher_data[i][j]);
-        printf("\n");
-    }
-
-    printf("set_key test data\n");
-    printf("data[8]= ");
-    for (j = 0; j < 8; j++)
-        printf("%02X", key_data[j]);
-    printf("\n");
-    for (i = 0; i < KEY_TEST_NUM - 1; i++) {
-        printf("c=");
-        for (j = 0; j < 8; j++)
-            printf("%02X", key_out[i][j]);
-        printf(" k[%2u]=", i + 1);
-        for (j = 0; j < i + 1; j++)
-            printf("%02X", key_test[j]);
-        printf("\n");
-    }
-
-    printf("\nchaining mode test data\n");
-    printf("key[16]   = ");
-    for (j = 0; j < 16; j++)
-        printf("%02X", cbc_key[j]);
-    printf("\niv[8]     = ");
-    for (j = 0; j < 8; j++)
-        printf("%02X", cbc_iv[j]);
-    printf("\ndata[%d]  = '%s'", (int)strlen(cbc_data) + 1, cbc_data);
-    printf("\ndata[%d]  = ", (int)strlen(cbc_data) + 1);
-    for (j = 0; j < strlen(cbc_data) + 1; j++)
-        printf("%02X", cbc_data[j]);
-    printf("\n");
-    printf("cbc cipher text\n");
-    printf("cipher[%d]= ", 32);
-    for (j = 0; j < 32; j++)
-        printf("%02X", cbc_ok[j]);
-    printf("\n");
-
-    printf("cfb64 cipher text\n");
-    printf("cipher[%d]= ", (int)strlen(cbc_data) + 1);
-    for (j = 0; j < strlen(cbc_data) + 1; j++)
-        printf("%02X", cfb64_ok[j]);
-    printf("\n");
-
-    printf("ofb64 cipher text\n");
-    printf("cipher[%d]= ", (int)strlen(cbc_data) + 1);
-    for (j = 0; j < strlen(cbc_data) + 1; j++)
-        printf("%02X", ofb64_ok[j]);
-    printf("\n");
-    return (0);
-}
-
-static int test(void)
-{
-    unsigned char cbc_in[40], cbc_out[40], iv[8];
-    int i, n, err = 0;
-    BF_KEY key;
-    BF_LONG data[2];
-    unsigned char out[8];
-    BF_LONG len;
-
-# ifdef CHARSET_EBCDIC
-    ebcdic2ascii(cbc_data, cbc_data, strlen(cbc_data));
-# endif
-
-    printf("testing blowfish in raw ecb mode\n");
-    for (n = 0; n < 2; n++) {
-# ifdef CHARSET_EBCDIC
-        ebcdic2ascii(bf_key[n], bf_key[n], strlen(bf_key[n]));
-# endif
-        BF_set_key(&key, strlen(bf_key[n]), (unsigned char *)bf_key[n]);
-
-        data[0] = bf_plain[n][0];
-        data[1] = bf_plain[n][1];
-        BF_encrypt(data, &key);
-        if (memcmp(&(bf_cipher[n][0]), &(data[0]), 8) != 0) {
-            printf("BF_encrypt error encrypting\n");
-            printf("got     :");
-            for (i = 0; i < 2; i++)
-                printf("%08lX ", (unsigned long)data[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 2; i++)
-                printf("%08lX ", (unsigned long)bf_cipher[n][i]);
-            err = 1;
-            printf("\n");
-        }
-
-        BF_decrypt(&(data[0]), &key);
-        if (memcmp(&(bf_plain[n][0]), &(data[0]), 8) != 0) {
-            printf("BF_encrypt error decrypting\n");
-            printf("got     :");
-            for (i = 0; i < 2; i++)
-                printf("%08lX ", (unsigned long)data[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 2; i++)
-                printf("%08lX ", (unsigned long)bf_plain[n][i]);
-            printf("\n");
-            err = 1;
-        }
-    }
-
-    printf("testing blowfish in ecb mode\n");
-
-    for (n = 0; n < NUM_TESTS; n++) {
-        BF_set_key(&key, 8, ecb_data[n]);
-
-        BF_ecb_encrypt(&(plain_data[n][0]), out, &key, BF_ENCRYPT);
-        if (memcmp(&(cipher_data[n][0]), out, 8) != 0) {
-            printf("BF_ecb_encrypt blowfish error encrypting\n");
-            printf("got     :");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", out[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", cipher_data[n][i]);
-            err = 1;
-            printf("\n");
-        }
-
-        BF_ecb_encrypt(out, out, &key, BF_DECRYPT);
-        if (memcmp(&(plain_data[n][0]), out, 8) != 0) {
-            printf("BF_ecb_encrypt error decrypting\n");
-            printf("got     :");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", out[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", plain_data[n][i]);
-            printf("\n");
-            err = 1;
-        }
-    }
-
-    printf("testing blowfish set_key\n");
-    for (n = 1; n < KEY_TEST_NUM; n++) {
-        BF_set_key(&key, n, key_test);
-        BF_ecb_encrypt(key_data, out, &key, BF_ENCRYPT);
-        /* mips-sgi-irix6.5-gcc  vv  -mabi=64 bug workaround */
-        if (memcmp(out, &(key_out[i = n - 1][0]), 8) != 0) {
-            printf("blowfish setkey error\n");
-            err = 1;
-        }
-    }
-
-    printf("testing blowfish in cbc mode\n");
-    len = strlen(cbc_data) + 1;
-
-    BF_set_key(&key, 16, cbc_key);
-    memset(cbc_in, 0, sizeof cbc_in);
-    memset(cbc_out, 0, sizeof cbc_out);
-    memcpy(iv, cbc_iv, sizeof iv);
-    BF_cbc_encrypt((unsigned char *)cbc_data, cbc_out, len,
-                   &key, iv, BF_ENCRYPT);
-    if (memcmp(cbc_out, cbc_ok, 32) != 0) {
-        err = 1;
-        printf("BF_cbc_encrypt encrypt error\n");
-        for (i = 0; i < 32; i++)
-            printf("0x%02X,", cbc_out[i]);
-    }
-    memcpy(iv, cbc_iv, 8);
-    BF_cbc_encrypt(cbc_out, cbc_in, len, &key, iv, BF_DECRYPT);
-    if (memcmp(cbc_in, cbc_data, strlen(cbc_data) + 1) != 0) {
-        printf("BF_cbc_encrypt decrypt error\n");
-        err = 1;
-    }
-
-    printf("testing blowfish in cfb64 mode\n");
-
-    BF_set_key(&key, 16, cbc_key);
-    memset(cbc_in, 0, 40);
-    memset(cbc_out, 0, 40);
-    memcpy(iv, cbc_iv, 8);
-    n = 0;
-    BF_cfb64_encrypt((unsigned char *)cbc_data, cbc_out, (long)13,
-                     &key, iv, &n, BF_ENCRYPT);
-    BF_cfb64_encrypt((unsigned char *)&(cbc_data[13]), &(cbc_out[13]),
-                     len - 13, &key, iv, &n, BF_ENCRYPT);
-    if (memcmp(cbc_out, cfb64_ok, (int)len) != 0) {
-        err = 1;
-        printf("BF_cfb64_encrypt encrypt error\n");
-        for (i = 0; i < (int)len; i++)
-            printf("0x%02X,", cbc_out[i]);
-    }
-    n = 0;
-    memcpy(iv, cbc_iv, 8);
-    BF_cfb64_encrypt(cbc_out, cbc_in, 17, &key, iv, &n, BF_DECRYPT);
-    BF_cfb64_encrypt(&(cbc_out[17]), &(cbc_in[17]), len - 17,
-                     &key, iv, &n, BF_DECRYPT);
-    if (memcmp(cbc_in, cbc_data, (int)len) != 0) {
-        printf("BF_cfb64_encrypt decrypt error\n");
-        err = 1;
-    }
-
-    printf("testing blowfish in ofb64\n");
-
-    BF_set_key(&key, 16, cbc_key);
-    memset(cbc_in, 0, 40);
-    memset(cbc_out, 0, 40);
-    memcpy(iv, cbc_iv, 8);
-    n = 0;
-    BF_ofb64_encrypt((unsigned char *)cbc_data, cbc_out, (long)13, &key, iv,
-                     &n);
-    BF_ofb64_encrypt((unsigned char *)&(cbc_data[13]), &(cbc_out[13]),
-                     len - 13, &key, iv, &n);
-    if (memcmp(cbc_out, ofb64_ok, (int)len) != 0) {
-        err = 1;
-        printf("BF_ofb64_encrypt encrypt error\n");
-        for (i = 0; i < (int)len; i++)
-            printf("0x%02X,", cbc_out[i]);
-    }
-    n = 0;
-    memcpy(iv, cbc_iv, 8);
-    BF_ofb64_encrypt(cbc_out, cbc_in, 17, &key, iv, &n);
-    BF_ofb64_encrypt(&(cbc_out[17]), &(cbc_in[17]), len - 17, &key, iv, &n);
-    if (memcmp(cbc_in, cbc_data, (int)len) != 0) {
-        printf("BF_ofb64_encrypt decrypt error\n");
-        err = 1;
-    }
-
-    return (err);
-}
-#endif

+ 25 - 0
drivers/builtin_openssl2/crypto/bio/b_dump.c

@@ -181,3 +181,28 @@ int BIO_dump_indent(BIO *bp, const char *s, int len, int indent)
 {
     return BIO_dump_indent_cb(write_bio, bp, s, len, indent);
 }
+
+int BIO_hex_string(BIO *out, int indent, int width, unsigned char *data,
+                   int datalen)
+{
+    int i, j = 0;
+
+    if (datalen < 1)
+        return 1;
+
+    for (i = 0; i < datalen - 1; i++) {
+        if (i && !j)
+            BIO_printf(out, "%*s", indent, "");
+
+        BIO_printf(out, "%02X:", data[i]);
+
+        j = (j + 1) % width;
+        if (!j)
+            BIO_printf(out, "\n");
+    }
+
+    if (i && !j)
+        BIO_printf(out, "%*s", indent, "");
+    BIO_printf(out, "%02X", data[datalen - 1]);
+    return 1;
+}

+ 6 - 2
drivers/builtin_openssl2/crypto/bio/b_sock.c

@@ -225,13 +225,17 @@ int BIO_get_port(const char *str, unsigned short *port_ptr)
 int BIO_sock_error(int sock)
 {
     int j, i;
-    int size;
+    union {
+        size_t s;
+        int i;
+    } size;
 
 # if defined(OPENSSL_SYS_BEOS_R5)
     return 0;
 # endif
 
-    size = sizeof(int);
+    /* heuristic way to adapt for platforms that expect 64-bit optlen */
+    size.s = 0, size.i = sizeof(j);
     /*
      * Note: under Windows the third parameter is of type (char *) whereas
      * under other systems it is (void *) if you don't have a cast it will

+ 1 - 1
drivers/builtin_openssl2/crypto/bio/bio_err.c

@@ -1,6 +1,6 @@
 /* crypto/bio/bio_err.c */
 /* ====================================================================
- * Copyright (c) 1999-2011 The OpenSSL Project.  All rights reserved.
+ * Copyright (c) 1999-2015 The OpenSSL Project.  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions

+ 1 - 1
drivers/builtin_openssl2/crypto/bio/bss_acpt.c

@@ -445,7 +445,7 @@ static int acpt_puts(BIO *bp, const char *str)
     return (ret);
 }
 
-BIO *BIO_new_accept(char *str)
+BIO *BIO_new_accept(const char *str)
 {
     BIO *ret;
 

+ 1 - 1
drivers/builtin_openssl2/crypto/bio/bss_conn.c

@@ -594,7 +594,7 @@ static int conn_puts(BIO *bp, const char *str)
     return (ret);
 }
 
-BIO *BIO_new_connect(char *str)
+BIO *BIO_new_connect(const char *str)
 {
     BIO *ret;
 

+ 77 - 7
drivers/builtin_openssl2/crypto/bio/bss_dgram.c

@@ -65,7 +65,7 @@
 #include <openssl/bio.h>
 #ifndef OPENSSL_NO_DGRAM
 
-# if defined(OPENSSL_SYS_WIN32) || defined(OPENSSL_SYS_VMS)
+# if defined(OPENSSL_SYS_VMS)
 #  include <sys/timeb.h>
 # endif
 
@@ -80,6 +80,10 @@
 #  define IP_MTU      14        /* linux is lame */
 # endif
 
+# if OPENSSL_USE_IPV6 && !defined(IPPROTO_IPV6)
+#  define IPPROTO_IPV6 41       /* windows is lame */
+# endif
+
 # if defined(__FreeBSD__) && defined(IN6_IS_ADDR_V4MAPPED)
 /* Standard definition causes type-punning problems. */
 #  undef IN6_IS_ADDR_V4MAPPED
@@ -496,8 +500,8 @@ static long dgram_ctrl(BIO *b, int cmd, long num, void *ptr)
     int *ip;
     struct sockaddr *to = NULL;
     bio_dgram_data *data = NULL;
-# if defined(OPENSSL_SYS_LINUX) && (defined(IP_MTU_DISCOVER) || defined(IP_MTU))
     int sockopt_val = 0;
+# if defined(OPENSSL_SYS_LINUX) && (defined(IP_MTU_DISCOVER) || defined(IP_MTU))
     socklen_t sockopt_len;      /* assume that system supporting IP_MTU is
                                  * modern enough to define socklen_t */
     socklen_t addr_len;
@@ -880,6 +884,61 @@ static long dgram_ctrl(BIO *b, int cmd, long num, void *ptr)
             ret = 0;
         break;
 # endif
+    case BIO_CTRL_DGRAM_SET_DONT_FRAG:
+        sockopt_val = num ? 1 : 0;
+
+        switch (data->peer.sa.sa_family) {
+        case AF_INET:
+# if defined(IP_DONTFRAG)
+            if ((ret = setsockopt(b->num, IPPROTO_IP, IP_DONTFRAG,
+                                  &sockopt_val, sizeof(sockopt_val))) < 0) {
+                perror("setsockopt");
+                ret = -1;
+            }
+# elif defined(OPENSSL_SYS_LINUX) && defined(IP_MTU_DISCOVER) && defined (IP_PMTUDISC_PROBE)
+            if ((sockopt_val = num ? IP_PMTUDISC_PROBE : IP_PMTUDISC_DONT),
+                (ret = setsockopt(b->num, IPPROTO_IP, IP_MTU_DISCOVER,
+                                  &sockopt_val, sizeof(sockopt_val))) < 0) {
+                perror("setsockopt");
+                ret = -1;
+            }
+# elif defined(OPENSSL_SYS_WINDOWS) && defined(IP_DONTFRAGMENT)
+            if ((ret = setsockopt(b->num, IPPROTO_IP, IP_DONTFRAGMENT,
+                                  (const char *)&sockopt_val,
+                                  sizeof(sockopt_val))) < 0) {
+                perror("setsockopt");
+                ret = -1;
+            }
+# else
+            ret = -1;
+# endif
+            break;
+# if OPENSSL_USE_IPV6
+        case AF_INET6:
+#  if defined(IPV6_DONTFRAG)
+            if ((ret = setsockopt(b->num, IPPROTO_IPV6, IPV6_DONTFRAG,
+                                  (const void *)&sockopt_val,
+                                  sizeof(sockopt_val))) < 0) {
+                perror("setsockopt");
+                ret = -1;
+            }
+#  elif defined(OPENSSL_SYS_LINUX) && defined(IPV6_MTUDISCOVER)
+            if ((sockopt_val = num ? IP_PMTUDISC_PROBE : IP_PMTUDISC_DONT),
+                (ret = setsockopt(b->num, IPPROTO_IPV6, IPV6_MTU_DISCOVER,
+                                  &sockopt_val, sizeof(sockopt_val))) < 0) {
+                perror("setsockopt");
+                ret = -1;
+            }
+#  else
+            ret = -1;
+#  endif
+            break;
+# endif
+        default:
+            ret = -1;
+            break;
+        }
+        break;
     case BIO_CTRL_DGRAM_GET_MTU_OVERHEAD:
         ret = dgram_get_mtu_overhead(data);
         break;
@@ -1993,11 +2052,22 @@ int BIO_dgram_non_fatal_error(int err)
 
 static void get_current_time(struct timeval *t)
 {
-# ifdef OPENSSL_SYS_WIN32
-    struct _timeb tb;
-    _ftime(&tb);
-    t->tv_sec = (long)tb.time;
-    t->tv_usec = (long)tb.millitm * 1000;
+# if defined(_WIN32)
+    SYSTEMTIME st;
+    union {
+        unsigned __int64 ul;
+        FILETIME ft;
+    } now;
+
+    GetSystemTime(&st);
+    SystemTimeToFileTime(&st, &now.ft);
+#  ifdef  __MINGW32__
+    now.ul -= 116444736000000000ULL;
+#  else
+    now.ul -= 116444736000000000UI64; /* re-bias to 1/1/1970 */
+#  endif
+    t->tv_sec = (long)(now.ul / 10000000);
+    t->tv_usec = ((int)(now.ul % 10000000)) / 10;
 # elif defined(OPENSSL_SYS_VMS)
     struct timeb tb;
     ftime(&tb);

+ 20 - 2
drivers/builtin_openssl2/crypto/bio/bss_fd.c

@@ -63,9 +63,27 @@
 
 #if defined(OPENSSL_NO_POSIX_IO)
 /*
- * One can argue that one should implement dummy placeholder for
- * BIO_s_fd here...
+ * Dummy placeholder for BIO_s_fd...
  */
+BIO *BIO_new_fd(int fd, int close_flag)
+{
+    return NULL;
+}
+
+int BIO_fd_non_fatal_error(int err)
+{
+    return 0;
+}
+
+int BIO_fd_should_retry(int i)
+{
+    return 0;
+}
+
+BIO_METHOD *BIO_s_fd(void)
+{
+    return NULL;
+}
 #else
 /*
  * As for unconditional usage of "UPLINK" interface in this module.

+ 4 - 2
drivers/builtin_openssl2/crypto/bio/bss_mem.c

@@ -91,7 +91,8 @@ BIO_METHOD *BIO_s_mem(void)
     return (&mem_method);
 }
 
-BIO *BIO_new_mem_buf(void *buf, int len)
+
+BIO *BIO_new_mem_buf(const void *buf, int len)
 {
     BIO *ret;
     BUF_MEM *b;
@@ -105,7 +106,8 @@ BIO *BIO_new_mem_buf(void *buf, int len)
     if (!(ret = BIO_new(BIO_s_mem())))
         return NULL;
     b = (BUF_MEM *)ret->ptr;
-    b->data = buf;
+    /* Cast away const and trust in the MEM_RDONLY flag. */
+    b->data = (void *)buf;
     b->length = sz;
     b->max = sz;
     ret->flags |= BIO_FLAGS_MEM_RDONLY;

+ 0 - 321
drivers/builtin_openssl2/crypto/bn/asm/alpha-mont.pl

@@ -1,321 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# On 21264 RSA sign performance improves by 70/35/20/15 percent for
-# 512/1024/2048/4096 bit key lengths. This is against vendor compiler
-# instructed to '-tune host' code with in-line assembler. Other
-# benchmarks improve by 15-20%. To anchor it to something else, the
-# code provides approximately the same performance per GHz as AMD64.
-# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x
-# difference.
-
-# int bn_mul_mont(
-$rp="a0";	# BN_ULONG *rp,
-$ap="a1";	# const BN_ULONG *ap,
-$bp="a2";	# const BN_ULONG *bp,
-$np="a3";	# const BN_ULONG *np,
-$n0="a4";	# const BN_ULONG *n0,
-$num="a5";	# int num);
-
-$lo0="t0";
-$hi0="t1";
-$lo1="t2";
-$hi1="t3";
-$aj="t4";
-$bi="t5";
-$nj="t6";
-$tp="t7";
-$alo="t8";
-$ahi="t9";
-$nlo="t10";
-$nhi="t11";
-$tj="t12";
-$i="s3";
-$j="s4";
-$m1="s5";
-
-$code=<<___;
-#ifdef __linux__
-#include <asm/regdef.h>
-#else
-#include <asm.h>
-#include <regdef.h>
-#endif
-
-.text
-
-.set	noat
-.set	noreorder
-
-.globl	bn_mul_mont
-.align	5
-.ent	bn_mul_mont
-bn_mul_mont:
-	lda	sp,-48(sp)
-	stq	ra,0(sp)
-	stq	s3,8(sp)
-	stq	s4,16(sp)
-	stq	s5,24(sp)
-	stq	fp,32(sp)
-	mov	sp,fp
-	.mask	0x0400f000,-48
-	.frame	fp,48,ra
-	.prologue 0
-
-	.align	4
-	.set	reorder
-	sextl	$num,$num
-	mov	0,v0
-	cmplt	$num,4,AT
-	bne	AT,.Lexit
-
-	ldq	$hi0,0($ap)	# ap[0]
-	s8addq	$num,16,AT
-	ldq	$aj,8($ap)
-	subq	sp,AT,sp
-	ldq	$bi,0($bp)	# bp[0]
-	lda	AT,-4096(zero)	# mov	-4096,AT
-	ldq	$n0,0($n0)
-	and	sp,AT,sp
-
-	mulq	$hi0,$bi,$lo0
-	ldq	$hi1,0($np)	# np[0]
-	umulh	$hi0,$bi,$hi0
-	ldq	$nj,8($np)
-
-	mulq	$lo0,$n0,$m1
-
-	mulq	$hi1,$m1,$lo1
-	umulh	$hi1,$m1,$hi1
-
-	addq	$lo1,$lo0,$lo1
-	cmpult	$lo1,$lo0,AT
-	addq	$hi1,AT,$hi1
-
-	mulq	$aj,$bi,$alo
-	mov	2,$j
-	umulh	$aj,$bi,$ahi
-	mov	sp,$tp
-
-	mulq	$nj,$m1,$nlo
-	s8addq	$j,$ap,$aj
-	umulh	$nj,$m1,$nhi
-	s8addq	$j,$np,$nj
-.align	4
-.L1st:
-	.set	noreorder
-	ldq	$aj,0($aj)
-	addl	$j,1,$j
-	ldq	$nj,0($nj)
-	lda	$tp,8($tp)
-
-	addq	$alo,$hi0,$lo0
-	mulq	$aj,$bi,$alo
-	cmpult	$lo0,$hi0,AT
-	addq	$nlo,$hi1,$lo1
-
-	mulq	$nj,$m1,$nlo
-	addq	$ahi,AT,$hi0
-	cmpult	$lo1,$hi1,v0
-	cmplt	$j,$num,$tj
-
-	umulh	$aj,$bi,$ahi
-	addq	$nhi,v0,$hi1
-	addq	$lo1,$lo0,$lo1
-	s8addq	$j,$ap,$aj
-
-	umulh	$nj,$m1,$nhi
-	cmpult	$lo1,$lo0,v0
-	addq	$hi1,v0,$hi1
-	s8addq	$j,$np,$nj
-
-	stq	$lo1,-8($tp)
-	nop
-	unop
-	bne	$tj,.L1st
-	.set	reorder
-
-	addq	$alo,$hi0,$lo0
-	addq	$nlo,$hi1,$lo1
-	cmpult	$lo0,$hi0,AT
-	cmpult	$lo1,$hi1,v0
-	addq	$ahi,AT,$hi0
-	addq	$nhi,v0,$hi1
-
-	addq	$lo1,$lo0,$lo1
-	cmpult	$lo1,$lo0,v0
-	addq	$hi1,v0,$hi1
-
-	stq	$lo1,0($tp)
-
-	addq	$hi1,$hi0,$hi1
-	cmpult	$hi1,$hi0,AT
-	stq	$hi1,8($tp)
-	stq	AT,16($tp)
-
-	mov	1,$i
-.align	4
-.Louter:
-	s8addq	$i,$bp,$bi
-	ldq	$hi0,0($ap)
-	ldq	$aj,8($ap)
-	ldq	$bi,0($bi)
-	ldq	$hi1,0($np)
-	ldq	$nj,8($np)
-	ldq	$tj,0(sp)
-
-	mulq	$hi0,$bi,$lo0
-	umulh	$hi0,$bi,$hi0
-
-	addq	$lo0,$tj,$lo0
-	cmpult	$lo0,$tj,AT
-	addq	$hi0,AT,$hi0
-
-	mulq	$lo0,$n0,$m1
-
-	mulq	$hi1,$m1,$lo1
-	umulh	$hi1,$m1,$hi1
-
-	addq	$lo1,$lo0,$lo1
-	cmpult	$lo1,$lo0,AT
-	mov	2,$j
-	addq	$hi1,AT,$hi1
-
-	mulq	$aj,$bi,$alo
-	mov	sp,$tp
-	umulh	$aj,$bi,$ahi
-
-	mulq	$nj,$m1,$nlo
-	s8addq	$j,$ap,$aj
-	umulh	$nj,$m1,$nhi
-.align	4
-.Linner:
-	.set	noreorder
-	ldq	$tj,8($tp)	#L0
-	nop			#U1
-	ldq	$aj,0($aj)	#L1
-	s8addq	$j,$np,$nj	#U0
-
-	ldq	$nj,0($nj)	#L0
-	nop			#U1
-	addq	$alo,$hi0,$lo0	#L1
-	lda	$tp,8($tp)
-
-	mulq	$aj,$bi,$alo	#U1
-	cmpult	$lo0,$hi0,AT	#L0
-	addq	$nlo,$hi1,$lo1	#L1
-	addl	$j,1,$j
-
-	mulq	$nj,$m1,$nlo	#U1
-	addq	$ahi,AT,$hi0	#L0
-	addq	$lo0,$tj,$lo0	#L1
-	cmpult	$lo1,$hi1,v0	#U0
-
-	umulh	$aj,$bi,$ahi	#U1
-	cmpult	$lo0,$tj,AT	#L0
-	addq	$lo1,$lo0,$lo1	#L1
-	addq	$nhi,v0,$hi1	#U0
-
-	umulh	$nj,$m1,$nhi	#U1
-	s8addq	$j,$ap,$aj	#L0
-	cmpult	$lo1,$lo0,v0	#L1
-	cmplt	$j,$num,$tj	#U0	# borrow $tj
-
-	addq	$hi0,AT,$hi0	#L0
-	addq	$hi1,v0,$hi1	#U1
-	stq	$lo1,-8($tp)	#L1
-	bne	$tj,.Linner	#U0
-	.set	reorder
-
-	ldq	$tj,8($tp)
-	addq	$alo,$hi0,$lo0
-	addq	$nlo,$hi1,$lo1
-	cmpult	$lo0,$hi0,AT
-	cmpult	$lo1,$hi1,v0
-	addq	$ahi,AT,$hi0
-	addq	$nhi,v0,$hi1
-
-	addq	$lo0,$tj,$lo0
-	cmpult	$lo0,$tj,AT
-	addq	$hi0,AT,$hi0
-
-	ldq	$tj,16($tp)
-	addq	$lo1,$lo0,$j
-	cmpult	$j,$lo0,v0
-	addq	$hi1,v0,$hi1
-
-	addq	$hi1,$hi0,$lo1
-	stq	$j,0($tp)
-	cmpult	$lo1,$hi0,$hi1
-	addq	$lo1,$tj,$lo1
-	cmpult	$lo1,$tj,AT
-	addl	$i,1,$i
-	addq	$hi1,AT,$hi1
-	stq	$lo1,8($tp)
-	cmplt	$i,$num,$tj	# borrow $tj
-	stq	$hi1,16($tp)
-	bne	$tj,.Louter
-
-	s8addq	$num,sp,$tj	# &tp[num]
-	mov	$rp,$bp		# put rp aside
-	mov	sp,$tp
-	mov	sp,$ap
-	mov	0,$hi0		# clear borrow bit
-
-.align	4
-.Lsub:	ldq	$lo0,0($tp)
-	ldq	$lo1,0($np)
-	lda	$tp,8($tp)
-	lda	$np,8($np)
-	subq	$lo0,$lo1,$lo1	# tp[i]-np[i]
-	cmpult	$lo0,$lo1,AT
-	subq	$lo1,$hi0,$lo0
-	cmpult	$lo1,$lo0,$hi0
-	or	$hi0,AT,$hi0
-	stq	$lo0,0($rp)
-	cmpult	$tp,$tj,v0
-	lda	$rp,8($rp)
-	bne	v0,.Lsub
-
-	subq	$hi1,$hi0,$hi0	# handle upmost overflow bit
-	mov	sp,$tp
-	mov	$bp,$rp		# restore rp
-
-	and	sp,$hi0,$ap
-	bic	$bp,$hi0,$bp
-	bis	$bp,$ap,$ap	# ap=borrow?tp:rp
-
-.align	4
-.Lcopy:	ldq	$aj,0($ap)	# copy or in-place refresh
-	lda	$tp,8($tp)
-	lda	$rp,8($rp)
-	lda	$ap,8($ap)
-	stq	zero,-8($tp)	# zap tp
-	cmpult	$tp,$tj,AT
-	stq	$aj,-8($rp)
-	bne	AT,.Lcopy
-	mov	1,v0
-
-.Lexit:
-	.set	noreorder
-	mov	fp,sp
-	/*ldq	ra,0(sp)*/
-	ldq	s3,8(sp)
-	ldq	s4,16(sp)
-	ldq	s5,24(sp)
-	ldq	fp,32(sp)
-	lda	sp,48(sp)
-	ret	(ra)
-.end	bn_mul_mont
-.ascii	"Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
-.align	2
-___
-
-print $code;
-close STDOUT;

+ 0 - 278
drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl

@@ -1,278 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# May 2011
-#
-# The module implements bn_GF2m_mul_2x2 polynomial multiplication
-# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
-# C for the time being... Except that it has two code paths: pure
-# integer code suitable for any ARMv4 and later CPU and NEON code
-# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
-# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
-# faster than compiler-generated code. For ECDH and ECDSA verify (but
-# not for ECDSA sign) it means 25%-45% improvement depending on key
-# length, more for longer keys. Even though NEON 1x1 multiplication
-# runs in even less cycles, ~30, improvement is measurable only on
-# longer keys. One has to optimize code elsewhere to get NEON glow...
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
-sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
-sub Q()     { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
-
-$code=<<___;
-#include "arm_arch.h"
-
-.text
-.code	32
-
-#if __ARM_ARCH__>=7
-.fpu	neon
-
-.type	mul_1x1_neon,%function
-.align	5
-mul_1x1_neon:
-	vshl.u64	`&Dlo("q1")`,d16,#8	@ q1-q3 are slided $a
-	vmull.p8	`&Q("d0")`,d16,d17	@ a·bb
-	vshl.u64	`&Dlo("q2")`,d16,#16
-	vmull.p8	q1,`&Dlo("q1")`,d17	@ a<<8·bb
-	vshl.u64	`&Dlo("q3")`,d16,#24
-	vmull.p8	q2,`&Dlo("q2")`,d17	@ a<<16·bb
-	vshr.u64	`&Dlo("q1")`,#8
-	vmull.p8	q3,`&Dlo("q3")`,d17	@ a<<24·bb
-	vshl.u64	`&Dhi("q1")`,#24
-	veor		d0,`&Dlo("q1")`
-	vshr.u64	`&Dlo("q2")`,#16
-	veor		d0,`&Dhi("q1")`
-	vshl.u64	`&Dhi("q2")`,#16
-	veor		d0,`&Dlo("q2")`
-	vshr.u64	`&Dlo("q3")`,#24
-	veor		d0,`&Dhi("q2")`
-	vshl.u64	`&Dhi("q3")`,#8
-	veor		d0,`&Dlo("q3")`
-	veor		d0,`&Dhi("q3")`
-	bx	lr
-.size	mul_1x1_neon,.-mul_1x1_neon
-#endif
-___
-################
-# private interface to mul_1x1_ialu
-#
-$a="r1";
-$b="r0";
-
-($a0,$a1,$a2,$a12,$a4,$a14)=
-($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
-
-$mask="r12";
-
-$code.=<<___;
-.type	mul_1x1_ialu,%function
-.align	5
-mul_1x1_ialu:
-	mov	$a0,#0
-	bic	$a1,$a,#3<<30		@ a1=a&0x3fffffff
-	str	$a0,[sp,#0]		@ tab[0]=0
-	add	$a2,$a1,$a1		@ a2=a1<<1
-	str	$a1,[sp,#4]		@ tab[1]=a1
-	eor	$a12,$a1,$a2		@ a1^a2
-	str	$a2,[sp,#8]		@ tab[2]=a2
-	mov	$a4,$a1,lsl#2		@ a4=a1<<2
-	str	$a12,[sp,#12]		@ tab[3]=a1^a2
-	eor	$a14,$a1,$a4		@ a1^a4
-	str	$a4,[sp,#16]		@ tab[4]=a4
-	eor	$a0,$a2,$a4		@ a2^a4
-	str	$a14,[sp,#20]		@ tab[5]=a1^a4
-	eor	$a12,$a12,$a4		@ a1^a2^a4
-	str	$a0,[sp,#24]		@ tab[6]=a2^a4
-	and	$i0,$mask,$b,lsl#2
-	str	$a12,[sp,#28]		@ tab[7]=a1^a2^a4
-
-	and	$i1,$mask,$b,lsr#1
-	ldr	$lo,[sp,$i0]		@ tab[b       & 0x7]
-	and	$i0,$mask,$b,lsr#4
-	ldr	$t1,[sp,$i1]		@ tab[b >>  3 & 0x7]
-	and	$i1,$mask,$b,lsr#7
-	ldr	$t0,[sp,$i0]		@ tab[b >>  6 & 0x7]
-	eor	$lo,$lo,$t1,lsl#3	@ stall
-	mov	$hi,$t1,lsr#29
-	ldr	$t1,[sp,$i1]		@ tab[b >>  9 & 0x7]
-
-	and	$i0,$mask,$b,lsr#10
-	eor	$lo,$lo,$t0,lsl#6
-	eor	$hi,$hi,$t0,lsr#26
-	ldr	$t0,[sp,$i0]		@ tab[b >> 12 & 0x7]
-
-	and	$i1,$mask,$b,lsr#13
-	eor	$lo,$lo,$t1,lsl#9
-	eor	$hi,$hi,$t1,lsr#23
-	ldr	$t1,[sp,$i1]		@ tab[b >> 15 & 0x7]
-
-	and	$i0,$mask,$b,lsr#16
-	eor	$lo,$lo,$t0,lsl#12
-	eor	$hi,$hi,$t0,lsr#20
-	ldr	$t0,[sp,$i0]		@ tab[b >> 18 & 0x7]
-
-	and	$i1,$mask,$b,lsr#19
-	eor	$lo,$lo,$t1,lsl#15
-	eor	$hi,$hi,$t1,lsr#17
-	ldr	$t1,[sp,$i1]		@ tab[b >> 21 & 0x7]
-
-	and	$i0,$mask,$b,lsr#22
-	eor	$lo,$lo,$t0,lsl#18
-	eor	$hi,$hi,$t0,lsr#14
-	ldr	$t0,[sp,$i0]		@ tab[b >> 24 & 0x7]
-
-	and	$i1,$mask,$b,lsr#25
-	eor	$lo,$lo,$t1,lsl#21
-	eor	$hi,$hi,$t1,lsr#11
-	ldr	$t1,[sp,$i1]		@ tab[b >> 27 & 0x7]
-
-	tst	$a,#1<<30
-	and	$i0,$mask,$b,lsr#28
-	eor	$lo,$lo,$t0,lsl#24
-	eor	$hi,$hi,$t0,lsr#8
-	ldr	$t0,[sp,$i0]		@ tab[b >> 30      ]
-
-	eorne	$lo,$lo,$b,lsl#30
-	eorne	$hi,$hi,$b,lsr#2
-	tst	$a,#1<<31
-	eor	$lo,$lo,$t1,lsl#27
-	eor	$hi,$hi,$t1,lsr#5
-	eorne	$lo,$lo,$b,lsl#31
-	eorne	$hi,$hi,$b,lsr#1
-	eor	$lo,$lo,$t0,lsl#30
-	eor	$hi,$hi,$t0,lsr#2
-
-	mov	pc,lr
-.size	mul_1x1_ialu,.-mul_1x1_ialu
-___
-################
-# void	bn_GF2m_mul_2x2(BN_ULONG *r,
-#	BN_ULONG a1,BN_ULONG a0,
-#	BN_ULONG b1,BN_ULONG b0);	# r[3..0]=a1a0·b1b0
-
-($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
-
-$code.=<<___;
-.global	bn_GF2m_mul_2x2
-.type	bn_GF2m_mul_2x2,%function
-.align	5
-bn_GF2m_mul_2x2:
-#if __ARM_ARCH__>=7
-	ldr	r12,.LOPENSSL_armcap
-.Lpic:	ldr	r12,[pc,r12]
-	tst	r12,#1
-	beq	.Lialu
-
-	veor	$A1,$A1
-	vmov.32	$B1,r3,r3		@ two copies of b1
-	vmov.32	${A1}[0],r1		@ a1
-
-	veor	$A0,$A0
-	vld1.32	${B0}[],[sp,:32]	@ two copies of b0
-	vmov.32	${A0}[0],r2		@ a0
-	mov	r12,lr
-
-	vmov	d16,$A1
-	vmov	d17,$B1
-	bl	mul_1x1_neon		@ a1·b1
-	vmov	$A1B1,d0
-
-	vmov	d16,$A0
-	vmov	d17,$B0
-	bl	mul_1x1_neon		@ a0·b0
-	vmov	$A0B0,d0
-
-	veor	d16,$A0,$A1
-	veor	d17,$B0,$B1
-	veor	$A0,$A0B0,$A1B1
-	bl	mul_1x1_neon		@ (a0+a1)·(b0+b1)
-
-	veor	d0,$A0			@ (a0+a1)·(b0+b1)-a0·b0-a1·b1
-	vshl.u64 d1,d0,#32
-	vshr.u64 d0,d0,#32
-	veor	$A0B0,d1
-	veor	$A1B1,d0
-	vst1.32	{${A0B0}[0]},[r0,:32]!
-	vst1.32	{${A0B0}[1]},[r0,:32]!
-	vst1.32	{${A1B1}[0]},[r0,:32]!
-	vst1.32	{${A1B1}[1]},[r0,:32]
-	bx	r12
-.align	4
-.Lialu:
-#endif
-___
-$ret="r10";	# reassigned 1st argument
-$code.=<<___;
-	stmdb	sp!,{r4-r10,lr}
-	mov	$ret,r0			@ reassign 1st argument
-	mov	$b,r3			@ $b=b1
-	ldr	r3,[sp,#32]		@ load b0
-	mov	$mask,#7<<2
-	sub	sp,sp,#32		@ allocate tab[8]
-
-	bl	mul_1x1_ialu		@ a1·b1
-	str	$lo,[$ret,#8]
-	str	$hi,[$ret,#12]
-
-	eor	$b,$b,r3		@ flip b0 and b1
-	 eor	$a,$a,r2		@ flip a0 and a1
-	eor	r3,r3,$b
-	 eor	r2,r2,$a
-	eor	$b,$b,r3
-	 eor	$a,$a,r2
-	bl	mul_1x1_ialu		@ a0·b0
-	str	$lo,[$ret]
-	str	$hi,[$ret,#4]
-
-	eor	$a,$a,r2
-	eor	$b,$b,r3
-	bl	mul_1x1_ialu		@ (a1+a0)·(b1+b0)
-___
-@r=map("r$_",(6..9));
-$code.=<<___;
-	ldmia	$ret,{@r[0]-@r[3]}
-	eor	$lo,$lo,$hi
-	eor	$hi,$hi,@r[1]
-	eor	$lo,$lo,@r[0]
-	eor	$hi,$hi,@r[2]
-	eor	$lo,$lo,@r[3]
-	eor	$hi,$hi,@r[3]
-	str	$hi,[$ret,#8]
-	eor	$lo,$lo,$hi
-	add	sp,sp,#32		@ destroy tab[8]
-	str	$lo,[$ret,#4]
-
-#if __ARM_ARCH__>=5
-	ldmia	sp!,{r4-r10,pc}
-#else
-	ldmia	sp!,{r4-r10,lr}
-	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-#endif
-.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
-#if __ARM_ARCH__>=7
-.align	5
-.LOPENSSL_armcap:
-.word	OPENSSL_armcap_P-(.Lpic+8)
-#endif
-.asciz	"GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
-.align	5
-
-.comm	OPENSSL_armcap_P,4,4
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;    # make it possible to compile with -march=armv4
-print $code;
-close STDOUT;   # enforce flush

+ 0 - 204
drivers/builtin_openssl2/crypto/bn/asm/armv4-mont.pl

@@ -1,204 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# January 2007.
-
-# Montgomery multiplication for ARMv4.
-#
-# Performance improvement naturally varies among CPU implementations
-# and compilers. The code was observed to provide +65-35% improvement
-# [depending on key length, less for longer keys] on ARM920T, and
-# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
-# base and compiler generated code with in-lined umull and even umlal
-# instructions. The latter means that this code didn't really have an 
-# "advantage" of utilizing some "secret" instruction.
-#
-# The code is interoperable with Thumb ISA and is rather compact, less
-# than 1/2KB. Windows CE port would be trivial, as it's exclusively
-# about decorations, ABI and instruction syntax are identical.
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$num="r0";	# starts as num argument, but holds &tp[num-1]
-$ap="r1";
-$bp="r2"; $bi="r2"; $rp="r2";
-$np="r3";
-$tp="r4";
-$aj="r5";
-$nj="r6";
-$tj="r7";
-$n0="r8";
-###########	# r9 is reserved by ELF as platform specific, e.g. TLS pointer
-$alo="r10";	# sl, gcc uses it to keep @GOT
-$ahi="r11";	# fp
-$nlo="r12";	# ip
-###########	# r13 is stack pointer
-$nhi="r14";	# lr
-###########	# r15 is program counter
-
-#### argument block layout relative to &tp[num-1], a.k.a. $num
-$_rp="$num,#12*4";
-# ap permanently resides in r1
-$_bp="$num,#13*4";
-# np permanently resides in r3
-$_n0="$num,#14*4";
-$_num="$num,#15*4";	$_bpend=$_num;
-
-$code=<<___;
-.text
-
-.global	bn_mul_mont
-.type	bn_mul_mont,%function
-
-.align	2
-bn_mul_mont:
-	stmdb	sp!,{r0,r2}		@ sp points at argument block
-	ldr	$num,[sp,#3*4]		@ load num
-	cmp	$num,#2
-	movlt	r0,#0
-	addlt	sp,sp,#2*4
-	blt	.Labrt
-
-	stmdb	sp!,{r4-r12,lr}		@ save 10 registers
-
-	mov	$num,$num,lsl#2		@ rescale $num for byte count
-	sub	sp,sp,$num		@ alloca(4*num)
-	sub	sp,sp,#4		@ +extra dword
-	sub	$num,$num,#4		@ "num=num-1"
-	add	$tp,$bp,$num		@ &bp[num-1]
-
-	add	$num,sp,$num		@ $num to point at &tp[num-1]
-	ldr	$n0,[$_n0]		@ &n0
-	ldr	$bi,[$bp]		@ bp[0]
-	ldr	$aj,[$ap],#4		@ ap[0],ap++
-	ldr	$nj,[$np],#4		@ np[0],np++
-	ldr	$n0,[$n0]		@ *n0
-	str	$tp,[$_bpend]		@ save &bp[num]
-
-	umull	$alo,$ahi,$aj,$bi	@ ap[0]*bp[0]
-	str	$n0,[$_n0]		@ save n0 value
-	mul	$n0,$alo,$n0		@ "tp[0]"*n0
-	mov	$nlo,#0
-	umlal	$alo,$nlo,$nj,$n0	@ np[0]*n0+"t[0]"
-	mov	$tp,sp
-
-.L1st:
-	ldr	$aj,[$ap],#4		@ ap[j],ap++
-	mov	$alo,$ahi
-	ldr	$nj,[$np],#4		@ np[j],np++
-	mov	$ahi,#0
-	umlal	$alo,$ahi,$aj,$bi	@ ap[j]*bp[0]
-	mov	$nhi,#0
-	umlal	$nlo,$nhi,$nj,$n0	@ np[j]*n0
-	adds	$nlo,$nlo,$alo
-	str	$nlo,[$tp],#4		@ tp[j-1]=,tp++
-	adc	$nlo,$nhi,#0
-	cmp	$tp,$num
-	bne	.L1st
-
-	adds	$nlo,$nlo,$ahi
-	ldr	$tp,[$_bp]		@ restore bp
-	mov	$nhi,#0
-	ldr	$n0,[$_n0]		@ restore n0
-	adc	$nhi,$nhi,#0
-	str	$nlo,[$num]		@ tp[num-1]=
-	str	$nhi,[$num,#4]		@ tp[num]=
-
-.Louter:
-	sub	$tj,$num,sp		@ "original" $num-1 value
-	sub	$ap,$ap,$tj		@ "rewind" ap to &ap[1]
-	ldr	$bi,[$tp,#4]!		@ *(++bp)
-	sub	$np,$np,$tj		@ "rewind" np to &np[1]
-	ldr	$aj,[$ap,#-4]		@ ap[0]
-	ldr	$alo,[sp]		@ tp[0]
-	ldr	$nj,[$np,#-4]		@ np[0]
-	ldr	$tj,[sp,#4]		@ tp[1]
-
-	mov	$ahi,#0
-	umlal	$alo,$ahi,$aj,$bi	@ ap[0]*bp[i]+tp[0]
-	str	$tp,[$_bp]		@ save bp
-	mul	$n0,$alo,$n0
-	mov	$nlo,#0
-	umlal	$alo,$nlo,$nj,$n0	@ np[0]*n0+"tp[0]"
-	mov	$tp,sp
-
-.Linner:
-	ldr	$aj,[$ap],#4		@ ap[j],ap++
-	adds	$alo,$ahi,$tj		@ +=tp[j]
-	ldr	$nj,[$np],#4		@ np[j],np++
-	mov	$ahi,#0
-	umlal	$alo,$ahi,$aj,$bi	@ ap[j]*bp[i]
-	mov	$nhi,#0
-	umlal	$nlo,$nhi,$nj,$n0	@ np[j]*n0
-	adc	$ahi,$ahi,#0
-	ldr	$tj,[$tp,#8]		@ tp[j+1]
-	adds	$nlo,$nlo,$alo
-	str	$nlo,[$tp],#4		@ tp[j-1]=,tp++
-	adc	$nlo,$nhi,#0
-	cmp	$tp,$num
-	bne	.Linner
-
-	adds	$nlo,$nlo,$ahi
-	mov	$nhi,#0
-	ldr	$tp,[$_bp]		@ restore bp
-	adc	$nhi,$nhi,#0
-	ldr	$n0,[$_n0]		@ restore n0
-	adds	$nlo,$nlo,$tj
-	ldr	$tj,[$_bpend]		@ restore &bp[num]
-	adc	$nhi,$nhi,#0
-	str	$nlo,[$num]		@ tp[num-1]=
-	str	$nhi,[$num,#4]		@ tp[num]=
-
-	cmp	$tp,$tj
-	bne	.Louter
-
-	ldr	$rp,[$_rp]		@ pull rp
-	add	$num,$num,#4		@ $num to point at &tp[num]
-	sub	$aj,$num,sp		@ "original" num value
-	mov	$tp,sp			@ "rewind" $tp
-	mov	$ap,$tp			@ "borrow" $ap
-	sub	$np,$np,$aj		@ "rewind" $np to &np[0]
-
-	subs	$tj,$tj,$tj		@ "clear" carry flag
-.Lsub:	ldr	$tj,[$tp],#4
-	ldr	$nj,[$np],#4
-	sbcs	$tj,$tj,$nj		@ tp[j]-np[j]
-	str	$tj,[$rp],#4		@ rp[j]=
-	teq	$tp,$num		@ preserve carry
-	bne	.Lsub
-	sbcs	$nhi,$nhi,#0		@ upmost carry
-	mov	$tp,sp			@ "rewind" $tp
-	sub	$rp,$rp,$aj		@ "rewind" $rp
-
-	and	$ap,$tp,$nhi
-	bic	$np,$rp,$nhi
-	orr	$ap,$ap,$np		@ ap=borrow?tp:rp
-
-.Lcopy:	ldr	$tj,[$ap],#4		@ copy or in-place refresh
-	str	sp,[$tp],#4		@ zap tp
-	str	$tj,[$rp],#4
-	cmp	$tp,$num
-	bne	.Lcopy
-
-	add	sp,$num,#4		@ skip over tp[num+1]
-	ldmia	sp!,{r4-r12,lr}		@ restore registers
-	add	sp,sp,#2*4		@ skip over {r0,r2}
-	mov	r0,#1
-.Labrt:	tst	lr,#1
-	moveq	pc,lr			@ be binary compatible with V4, yet
-	bx	lr			@ interoperable with Thumb ISA:-)
-.size	bn_mul_mont,.-bn_mul_mont
-.asciz	"Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align	2
-___
-
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
-print $code;
-close STDOUT;

+ 0 - 774
drivers/builtin_openssl2/crypto/bn/asm/bn-586.pl

@@ -1,774 +0,0 @@
-#!/usr/local/bin/perl
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0);
-
-$sse2=0;
-for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
-
-&external_label("OPENSSL_ia32cap_P") if ($sse2);
-
-&bn_mul_add_words("bn_mul_add_words");
-&bn_mul_words("bn_mul_words");
-&bn_sqr_words("bn_sqr_words");
-&bn_div_words("bn_div_words");
-&bn_add_words("bn_add_words");
-&bn_sub_words("bn_sub_words");
-&bn_sub_part_words("bn_sub_part_words");
-
-&asm_finish();
-
-sub bn_mul_add_words
-	{
-	local($name)=@_;
-
-	&function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
-
-	$r="eax";
-	$a="edx";
-	$c="ecx";
-
-	if ($sse2) {
-		&picmeup("eax","OPENSSL_ia32cap_P");
-		&bt(&DWP(0,"eax"),26);
-		&jnc(&label("maw_non_sse2"));
-
-		&mov($r,&wparam(0));
-		&mov($a,&wparam(1));
-		&mov($c,&wparam(2));
-		&movd("mm0",&wparam(3));	# mm0 = w
-		&pxor("mm1","mm1");		# mm1 = carry_in
-		&jmp(&label("maw_sse2_entry"));
-		
-	&set_label("maw_sse2_unrolled",16);
-		&movd("mm3",&DWP(0,$r,"",0));	# mm3 = r[0]
-		&paddq("mm1","mm3");		# mm1 = carry_in + r[0]
-		&movd("mm2",&DWP(0,$a,"",0));	# mm2 = a[0]
-		&pmuludq("mm2","mm0");		# mm2 = w*a[0]
-		&movd("mm4",&DWP(4,$a,"",0));	# mm4 = a[1]
-		&pmuludq("mm4","mm0");		# mm4 = w*a[1]
-		&movd("mm6",&DWP(8,$a,"",0));	# mm6 = a[2]
-		&pmuludq("mm6","mm0");		# mm6 = w*a[2]
-		&movd("mm7",&DWP(12,$a,"",0));	# mm7 = a[3]
-		&pmuludq("mm7","mm0");		# mm7 = w*a[3]
-		&paddq("mm1","mm2");		# mm1 = carry_in + r[0] + w*a[0]
-		&movd("mm3",&DWP(4,$r,"",0));	# mm3 = r[1]
-		&paddq("mm3","mm4");		# mm3 = r[1] + w*a[1]
-		&movd("mm5",&DWP(8,$r,"",0));	# mm5 = r[2]
-		&paddq("mm5","mm6");		# mm5 = r[2] + w*a[2]
-		&movd("mm4",&DWP(12,$r,"",0));	# mm4 = r[3]
-		&paddq("mm7","mm4");		# mm7 = r[3] + w*a[3]
-		&movd(&DWP(0,$r,"",0),"mm1");
-		&movd("mm2",&DWP(16,$a,"",0));	# mm2 = a[4]
-		&pmuludq("mm2","mm0");		# mm2 = w*a[4]
-		&psrlq("mm1",32);		# mm1 = carry0
-		&movd("mm4",&DWP(20,$a,"",0));	# mm4 = a[5]
-		&pmuludq("mm4","mm0");		# mm4 = w*a[5]
-		&paddq("mm1","mm3");		# mm1 = carry0 + r[1] + w*a[1]
-		&movd("mm6",&DWP(24,$a,"",0));	# mm6 = a[6]
-		&pmuludq("mm6","mm0");		# mm6 = w*a[6]
-		&movd(&DWP(4,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry1
-		&movd("mm3",&DWP(28,$a,"",0));	# mm3 = a[7]
-		&add($a,32);
-		&pmuludq("mm3","mm0");		# mm3 = w*a[7]
-		&paddq("mm1","mm5");		# mm1 = carry1 + r[2] + w*a[2]
-		&movd("mm5",&DWP(16,$r,"",0));	# mm5 = r[4]
-		&paddq("mm2","mm5");		# mm2 = r[4] + w*a[4]
-		&movd(&DWP(8,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry2
-		&paddq("mm1","mm7");		# mm1 = carry2 + r[3] + w*a[3]
-		&movd("mm5",&DWP(20,$r,"",0));	# mm5 = r[5]
-		&paddq("mm4","mm5");		# mm4 = r[5] + w*a[5]
-		&movd(&DWP(12,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry3
-		&paddq("mm1","mm2");		# mm1 = carry3 + r[4] + w*a[4]
-		&movd("mm5",&DWP(24,$r,"",0));	# mm5 = r[6]
-		&paddq("mm6","mm5");		# mm6 = r[6] + w*a[6]
-		&movd(&DWP(16,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry4
-		&paddq("mm1","mm4");		# mm1 = carry4 + r[5] + w*a[5]
-		&movd("mm5",&DWP(28,$r,"",0));	# mm5 = r[7]
-		&paddq("mm3","mm5");		# mm3 = r[7] + w*a[7]
-		&movd(&DWP(20,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry5
-		&paddq("mm1","mm6");		# mm1 = carry5 + r[6] + w*a[6]
-		&movd(&DWP(24,$r,"",0),"mm1");
-		&psrlq("mm1",32);		# mm1 = carry6
-		&paddq("mm1","mm3");		# mm1 = carry6 + r[7] + w*a[7]
-		&movd(&DWP(28,$r,"",0),"mm1");
-		&lea($r,&DWP(32,$r));
-		&psrlq("mm1",32);		# mm1 = carry_out
-
-		&sub($c,8);
-		&jz(&label("maw_sse2_exit"));
-	&set_label("maw_sse2_entry");
-		&test($c,0xfffffff8);
-		&jnz(&label("maw_sse2_unrolled"));
-
-	&set_label("maw_sse2_loop",4);
-		&movd("mm2",&DWP(0,$a));	# mm2 = a[i]
-		&movd("mm3",&DWP(0,$r));	# mm3 = r[i]
-		&pmuludq("mm2","mm0");		# a[i] *= w
-		&lea($a,&DWP(4,$a));
-		&paddq("mm1","mm3");		# carry += r[i]
-		&paddq("mm1","mm2");		# carry += a[i]*w
-		&movd(&DWP(0,$r),"mm1");	# r[i] = carry_low
-		&sub($c,1);
-		&psrlq("mm1",32);		# carry = carry_high
-		&lea($r,&DWP(4,$r));
-		&jnz(&label("maw_sse2_loop"));
-	&set_label("maw_sse2_exit");
-		&movd("eax","mm1");		# c = carry_out
-		&emms();
-		&ret();
-
-	&set_label("maw_non_sse2",16);
-	}
-
-	# function_begin prologue
-	&push("ebp");
-	&push("ebx");
-	&push("esi");
-	&push("edi");
-
-	&comment("");
-	$Low="eax";
-	$High="edx";
-	$a="ebx";
-	$w="ebp";
-	$r="edi";
-	$c="esi";
-
-	&xor($c,$c);		# clear carry
-	&mov($r,&wparam(0));	#
-
-	&mov("ecx",&wparam(2));	#
-	&mov($a,&wparam(1));	#
-
-	&and("ecx",0xfffffff8);	# num / 8
-	&mov($w,&wparam(3));	#
-
-	&push("ecx");		# Up the stack for a tmp variable
-
-	&jz(&label("maw_finish"));
-
-	&set_label("maw_loop",16);
-
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-
-		 &mov("eax",&DWP($i,$a)); 	# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+= c
-		&adc("edx",0);			# H(t)+=carry
-		 &add("eax",&DWP($i,$r));	# L(t)+= *r
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i,$r),"eax");	# *r= L(t);
-		&mov($c,"edx");			# c=  H(t);
-		}
-
-	&comment("");
-	&sub("ecx",8);
-	&lea($a,&DWP(32,$a));
-	&lea($r,&DWP(32,$r));
-	&jnz(&label("maw_loop"));
-
-	&set_label("maw_finish",0);
-	&mov("ecx",&wparam(2));	# get num
-	&and("ecx",7);
-	&jnz(&label("maw_finish2"));	# helps branch prediction
-	&jmp(&label("maw_end"));
-
-	&set_label("maw_finish2",1);
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		 &mov("eax",&DWP($i*4,$a));	# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		&adc("edx",0);			# H(t)+=carry
-		 &add("eax",&DWP($i*4,$r));	# L(t)+= *r
-		&adc("edx",0);			# H(t)+=carry
-		 &dec("ecx") if ($i != 7-1);
-		&mov(&DWP($i*4,$r),"eax");	# *r= L(t);
-		 &mov($c,"edx");		# c=  H(t);
-		&jz(&label("maw_end")) if ($i != 7-1);
-		}
-	&set_label("maw_end",0);
-	&mov("eax",$c);
-
-	&pop("ecx");	# clear variable from
-
-	&function_end($name);
-	}
-
-sub bn_mul_words
-	{
-	local($name)=@_;
-
-	&function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
-
-	$r="eax";
-	$a="edx";
-	$c="ecx";
-
-	if ($sse2) {
-		&picmeup("eax","OPENSSL_ia32cap_P");
-		&bt(&DWP(0,"eax"),26);
-		&jnc(&label("mw_non_sse2"));
-
-		&mov($r,&wparam(0));
-		&mov($a,&wparam(1));
-		&mov($c,&wparam(2));
-		&movd("mm0",&wparam(3));	# mm0 = w
-		&pxor("mm1","mm1");		# mm1 = carry = 0
-
-	&set_label("mw_sse2_loop",16);
-		&movd("mm2",&DWP(0,$a));	# mm2 = a[i]
-		&pmuludq("mm2","mm0");		# a[i] *= w
-		&lea($a,&DWP(4,$a));
-		&paddq("mm1","mm2");		# carry += a[i]*w
-		&movd(&DWP(0,$r),"mm1");	# r[i] = carry_low
-		&sub($c,1);
-		&psrlq("mm1",32);		# carry = carry_high
-		&lea($r,&DWP(4,$r));
-		&jnz(&label("mw_sse2_loop"));
-
-		&movd("eax","mm1");		# return carry
-		&emms();
-		&ret();
-	&set_label("mw_non_sse2",16);
-	}
-
-	# function_begin prologue
-	&push("ebp");
-	&push("ebx");
-	&push("esi");
-	&push("edi");
-
-	&comment("");
-	$Low="eax";
-	$High="edx";
-	$a="ebx";
-	$w="ecx";
-	$r="edi";
-	$c="esi";
-	$num="ebp";
-
-	&xor($c,$c);		# clear carry
-	&mov($r,&wparam(0));	#
-	&mov($a,&wparam(1));	#
-	&mov($num,&wparam(2));	#
-	&mov($w,&wparam(3));	#
-
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("mw_finish"));
-
-	&set_label("mw_loop",0);
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-
-		 &mov("eax",&DWP($i,$a,"",0)); 	# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		 # XXX
-
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i,$r,"",0),"eax");	# *r= L(t);
-
-		&mov($c,"edx");			# c=  H(t);
-		}
-
-	&comment("");
-	&add($a,32);
-	&add($r,32);
-	&sub($num,8);
-	&jz(&label("mw_finish"));
-	&jmp(&label("mw_loop"));
-
-	&set_label("mw_finish",0);
-	&mov($num,&wparam(2));	# get num
-	&and($num,7);
-	&jnz(&label("mw_finish2"));
-	&jmp(&label("mw_end"));
-
-	&set_label("mw_finish2",1);
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		 &mov("eax",&DWP($i*4,$a,"",0));# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		 # XXX
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
-		&mov($c,"edx");			# c=  H(t);
-		 &dec($num) if ($i != 7-1);
-		&jz(&label("mw_end")) if ($i != 7-1);
-		}
-	&set_label("mw_end",0);
-	&mov("eax",$c);
-
-	&function_end($name);
-	}
-
-sub bn_sqr_words
-	{
-	local($name)=@_;
-
-	&function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
-
-	$r="eax";
-	$a="edx";
-	$c="ecx";
-
-	if ($sse2) {
-		&picmeup("eax","OPENSSL_ia32cap_P");
-		&bt(&DWP(0,"eax"),26);
-		&jnc(&label("sqr_non_sse2"));
-
-		&mov($r,&wparam(0));
-		&mov($a,&wparam(1));
-		&mov($c,&wparam(2));
-
-	&set_label("sqr_sse2_loop",16);
-		&movd("mm0",&DWP(0,$a));	# mm0 = a[i]
-		&pmuludq("mm0","mm0");		# a[i] *= a[i]
-		&lea($a,&DWP(4,$a));		# a++
-		&movq(&QWP(0,$r),"mm0");	# r[i] = a[i]*a[i]
-		&sub($c,1);
-		&lea($r,&DWP(8,$r));		# r += 2
-		&jnz(&label("sqr_sse2_loop"));
-
-		&emms();
-		&ret();
-	&set_label("sqr_non_sse2",16);
-	}
-
-	# function_begin prologue
-	&push("ebp");
-	&push("ebx");
-	&push("esi");
-	&push("edi");
-
-	&comment("");
-	$r="esi";
-	$a="edi";
-	$num="ebx";
-
-	&mov($r,&wparam(0));	#
-	&mov($a,&wparam(1));	#
-	&mov($num,&wparam(2));	#
-
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("sw_finish"));
-
-	&set_label("sw_loop",0);
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-		&mov("eax",&DWP($i,$a,"",0)); 	# *a
-		 # XXX
-		&mul("eax");			# *a * *a
-		&mov(&DWP($i*2,$r,"",0),"eax");	#
-		 &mov(&DWP($i*2+4,$r,"",0),"edx");#
-		}
-
-	&comment("");
-	&add($a,32);
-	&add($r,64);
-	&sub($num,8);
-	&jnz(&label("sw_loop"));
-
-	&set_label("sw_finish",0);
-	&mov($num,&wparam(2));	# get num
-	&and($num,7);
-	&jz(&label("sw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov("eax",&DWP($i*4,$a,"",0));	# *a
-		 # XXX
-		&mul("eax");			# *a * *a
-		&mov(&DWP($i*8,$r,"",0),"eax");	#
-		 &dec($num) if ($i != 7-1);
-		&mov(&DWP($i*8+4,$r,"",0),"edx");
-		 &jz(&label("sw_end")) if ($i != 7-1);
-		}
-	&set_label("sw_end",0);
-
-	&function_end($name);
-	}
-
-sub bn_div_words
-	{
-	local($name)=@_;
-
-	&function_begin_B($name,"");
-	&mov("edx",&wparam(0));	#
-	&mov("eax",&wparam(1));	#
-	&mov("ecx",&wparam(2));	#
-	&div("ecx");
-	&ret();
-	&function_end_B($name);
-	}
-
-sub bn_add_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$a="esi";
-	$b="edi";
-	$c="eax";
-	$r="ebx";
-	$tmp1="ecx";
-	$tmp2="edx";
-	$num="ebp";
-
-	&mov($r,&wparam(0));	# get r
-	 &mov($a,&wparam(1));	# get a
-	&mov($b,&wparam(2));	# get b
-	 &mov($num,&wparam(3));	# get num
-	&xor($c,$c);		# clear carry
-	 &and($num,0xfffffff8);	# num / 8
-
-	&jz(&label("aw_finish"));
-
-	&set_label("aw_loop",0);
-	for ($i=0; $i<8; $i++)
-		{
-		&comment("Round $i");
-
-		&mov($tmp1,&DWP($i*4,$a,"",0)); 	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-		&add($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &add($tmp1,$tmp2);
-		&adc($c,0);
-		 &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-		}
-
-	&comment("");
-	&add($a,32);
-	 &add($b,32);
-	&add($r,32);
-	 &sub($num,8);
-	&jnz(&label("aw_loop"));
-
-	&set_label("aw_finish",0);
-	&mov($num,&wparam(3));	# get num
-	&and($num,7);
-	 &jz(&label("aw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
-		&add($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &add($tmp1,$tmp2);
-		&adc($c,0);
-		 &dec($num) if ($i != 6);
-		&mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-		 &jz(&label("aw_end")) if ($i != 6);
-		}
-	&set_label("aw_end",0);
-
-#	&mov("eax",$c);		# $c is "eax"
-
-	&function_end($name);
-	}
-
-sub bn_sub_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$a="esi";
-	$b="edi";
-	$c="eax";
-	$r="ebx";
-	$tmp1="ecx";
-	$tmp2="edx";
-	$num="ebp";
-
-	&mov($r,&wparam(0));	# get r
-	 &mov($a,&wparam(1));	# get a
-	&mov($b,&wparam(2));	# get b
-	 &mov($num,&wparam(3));	# get num
-	&xor($c,$c);		# clear carry
-	 &and($num,0xfffffff8);	# num / 8
-
-	&jz(&label("aw_finish"));
-
-	&set_label("aw_loop",0);
-	for ($i=0; $i<8; $i++)
-		{
-		&comment("Round $i");
-
-		&mov($tmp1,&DWP($i*4,$a,"",0)); 	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		 &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-		}
-
-	&comment("");
-	&add($a,32);
-	 &add($b,32);
-	&add($r,32);
-	 &sub($num,8);
-	&jnz(&label("aw_loop"));
-
-	&set_label("aw_finish",0);
-	&mov($num,&wparam(3));	# get num
-	&and($num,7);
-	 &jz(&label("aw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		 &dec($num) if ($i != 6);
-		&mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-		 &jz(&label("aw_end")) if ($i != 6);
-		}
-	&set_label("aw_end",0);
-
-#	&mov("eax",$c);		# $c is "eax"
-
-	&function_end($name);
-	}
-
-sub bn_sub_part_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$a="esi";
-	$b="edi";
-	$c="eax";
-	$r="ebx";
-	$tmp1="ecx";
-	$tmp2="edx";
-	$num="ebp";
-
-	&mov($r,&wparam(0));	# get r
-	 &mov($a,&wparam(1));	# get a
-	&mov($b,&wparam(2));	# get b
-	 &mov($num,&wparam(3));	# get num
-	&xor($c,$c);		# clear carry
-	 &and($num,0xfffffff8);	# num / 8
-
-	&jz(&label("aw_finish"));
-
-	&set_label("aw_loop",0);
-	for ($i=0; $i<8; $i++)
-		{
-		&comment("Round $i");
-
-		&mov($tmp1,&DWP($i*4,$a,"",0)); 	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		 &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-		}
-
-	&comment("");
-	&add($a,32);
-	 &add($b,32);
-	&add($r,32);
-	 &sub($num,8);
-	&jnz(&label("aw_loop"));
-
-	&set_label("aw_finish",0);
-	&mov($num,&wparam(3));	# get num
-	&and($num,7);
-	 &jz(&label("aw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov($tmp1,&DWP(0,$a,"",0));	# *a
-		 &mov($tmp2,&DWP(0,$b,"",0));# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		&mov(&DWP(0,$r,"",0),$tmp1);	# *r
-		&add($a, 4);
-		&add($b, 4);
-		&add($r, 4);
-		 &dec($num) if ($i != 6);
-		 &jz(&label("aw_end")) if ($i != 6);
-		}
-	&set_label("aw_end",0);
-
-	&cmp(&wparam(4),0);
-	&je(&label("pw_end"));
-
-	&mov($num,&wparam(4));	# get dl
-	&cmp($num,0);
-	&je(&label("pw_end"));
-	&jge(&label("pw_pos"));
-
-	&comment("pw_neg");
-	&mov($tmp2,0);
-	&sub($tmp2,$num);
-	&mov($num,$tmp2);
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("pw_neg_finish"));
-
-	&set_label("pw_neg_loop",0);
-	for ($i=0; $i<8; $i++)
-	{
-	    &comment("dl<0 Round $i");
-
-	    &mov($tmp1,0);
-	    &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-	    &sub($tmp1,$c);
-	    &mov($c,0);
-	    &adc($c,$c);
-	    &sub($tmp1,$tmp2);
-	    &adc($c,0);
-	    &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-	}
-	    
-	&comment("");
-	&add($b,32);
-	&add($r,32);
-	&sub($num,8);
-	&jnz(&label("pw_neg_loop"));
-	    
-	&set_label("pw_neg_finish",0);
-	&mov($tmp2,&wparam(4));	# get dl
-	&mov($num,0);
-	&sub($num,$tmp2);
-	&and($num,7);
-	&jz(&label("pw_end"));
-	    
-	for ($i=0; $i<7; $i++)
-	{
-	    &comment("dl<0 Tail Round $i");
-	    &mov($tmp1,0);
-	    &mov($tmp2,&DWP($i*4,$b,"",0));# *b
-	    &sub($tmp1,$c);
-	    &mov($c,0);
-	    &adc($c,$c);
-	    &sub($tmp1,$tmp2);
-	    &adc($c,0);
-	    &dec($num) if ($i != 6);
-	    &mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-	    &jz(&label("pw_end")) if ($i != 6);
-	}
-
-	&jmp(&label("pw_end"));
-	
-	&set_label("pw_pos",0);
-	
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("pw_pos_finish"));
-
-	&set_label("pw_pos_loop",0);
-
-	for ($i=0; $i<8; $i++)
-	{
-	    &comment("dl>0 Round $i");
-
-	    &mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-	    &sub($tmp1,$c);
-	    &mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-	    &jnc(&label("pw_nc".$i));
-	}
-	    
-	&comment("");
-	&add($a,32);
-	&add($r,32);
-	&sub($num,8);
-	&jnz(&label("pw_pos_loop"));
-	    
-	&set_label("pw_pos_finish",0);
-	&mov($num,&wparam(4));	# get dl
-	&and($num,7);
-	&jz(&label("pw_end"));
-	    
-	for ($i=0; $i<7; $i++)
-	{
-	    &comment("dl>0 Tail Round $i");
-	    &mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-	    &sub($tmp1,$c);
-	    &mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-	    &jnc(&label("pw_tail_nc".$i));
-	    &dec($num) if ($i != 6);
-	    &jz(&label("pw_end")) if ($i != 6);
-	}
-	&mov($c,1);
-	&jmp(&label("pw_end"));
-
-	&set_label("pw_nc_loop",0);
-	for ($i=0; $i<8; $i++)
-	{
-	    &mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-	    &mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-	    &set_label("pw_nc".$i,0);
-	}
-	    
-	&comment("");
-	&add($a,32);
-	&add($r,32);
-	&sub($num,8);
-	&jnz(&label("pw_nc_loop"));
-	    
-	&mov($num,&wparam(4));	# get dl
-	&and($num,7);
-	&jz(&label("pw_nc_end"));
-	    
-	for ($i=0; $i<7; $i++)
-	{
-	    &mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-	    &mov(&DWP($i*4,$r,"",0),$tmp1);	# *r
-	    &set_label("pw_tail_nc".$i,0);
-	    &dec($num) if ($i != 6);
-	    &jz(&label("pw_nc_end")) if ($i != 6);
-	}
-
-	&set_label("pw_nc_end",0);
-	&mov($c,0);
-
-	&set_label("pw_end",0);
-
-#	&mov("eax",$c);		# $c is "eax"
-
-	&function_end($name);
-	}
-

+ 0 - 287
drivers/builtin_openssl2/crypto/bn/asm/co-586.pl

@@ -1,287 +0,0 @@
-#!/usr/local/bin/perl
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0);
-
-&bn_mul_comba("bn_mul_comba8",8);
-&bn_mul_comba("bn_mul_comba4",4);
-&bn_sqr_comba("bn_sqr_comba8",8);
-&bn_sqr_comba("bn_sqr_comba4",4);
-
-&asm_finish();
-
-sub mul_add_c
-	{
-	local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("mul a[$ai]*b[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$b,"",0));
-
-	&mul("edx");
-	&add($c0,"eax");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# laod next a
-	 &mov("eax",&wparam(0)) if $pos > 0;			# load r[]
-	 ###
-	&adc($c1,"edx");
-	 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0;	# laod next b
-	 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1;	# laod next b
-	 ###
-	&adc($c2,0);
-	 # is pos > 1, it means it is the last loop 
-	 &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0;		# save r[];
-	&mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;		# laod next a
-	}
-
-sub sqr_add_c
-	{
-	local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("sqr a[$ai]*a[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$b,"",0));
-
-	if ($ai == $bi)
-		{ &mul("eax");}
-	else
-		{ &mul("edx");}
-	&add($c0,"eax");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# load next a
-	 ###
-	&adc($c1,"edx");
-	 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
-	 ###
-	&adc($c2,0);
-	 # is pos > 1, it means it is the last loop 
-	 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0;		# save r[];
-	&mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;		# load next b
-	}
-
-sub sqr_add_c2
-	{
-	local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("sqr a[$ai]*a[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$a,"",0));
-
-	if ($ai == $bi)
-		{ &mul("eax");}
-	else
-		{ &mul("edx");}
-	&add("eax","eax");
-	 ###
-	&adc("edx","edx");
-	 ###
-	&adc($c2,0);
-	 &add($c0,"eax");
-	&adc($c1,"edx");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# load next a
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;	# load next b
-	&adc($c2,0);
-	&mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0;		# save r[];
-	 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
-	 ###
-	}
-
-sub bn_mul_comba
-	{
-	local($name,$num)=@_;
-	local($a,$b,$c0,$c1,$c2);
-	local($i,$as,$ae,$bs,$be,$ai,$bi);
-	local($tot,$end);
-
-	&function_begin_B($name,"");
-
-	$c0="ebx";
-	$c1="ecx";
-	$c2="ebp";
-	$a="esi";
-	$b="edi";
-	
-	$as=0;
-	$ae=0;
-	$bs=0;
-	$be=0;
-	$tot=$num+$num-1;
-
-	&push("esi");
-	 &mov($a,&wparam(1));
-	&push("edi");
-	 &mov($b,&wparam(2));
-	&push("ebp");
-	 &push("ebx");
-
-	&xor($c0,$c0);
-	 &mov("eax",&DWP(0,$a,"",0));	# load the first word 
-	&xor($c1,$c1);
-	 &mov("edx",&DWP(0,$b,"",0));	# load the first second 
-
-	for ($i=0; $i<$tot; $i++)
-		{
-		$ai=$as;
-		$bi=$bs;
-		$end=$be+1;
-
-		&comment("################## Calculate word $i"); 
-
-		for ($j=$bs; $j<$end; $j++)
-			{
-			&xor($c2,$c2) if ($j == $bs);
-			if (($j+1) == $end)
-				{
-				$v=1;
-				$v=2 if (($i+1) == $tot);
-				}
-			else
-				{ $v=0; }
-			if (($j+1) != $end)
-				{
-				$na=($ai-1);
-				$nb=($bi+1);
-				}
-			else
-				{
-				$na=$as+($i < ($num-1));
-				$nb=$bs+($i >= ($num-1));
-				}
-#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
-			&mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
-			if ($v)
-				{
-				&comment("saved r[$i]");
-				# &mov("eax",&wparam(0));
-				# &mov(&DWP($i*4,"eax","",0),$c0);
-				($c0,$c1,$c2)=($c1,$c2,$c0);
-				}
-			$ai--;
-			$bi++;
-			}
-		$as++ if ($i < ($num-1));
-		$ae++ if ($i >= ($num-1));
-
-		$bs++ if ($i >= ($num-1));
-		$be++ if ($i < ($num-1));
-		}
-	&comment("save r[$i]");
-	# &mov("eax",&wparam(0));
-	&mov(&DWP($i*4,"eax","",0),$c0);
-
-	&pop("ebx");
-	&pop("ebp");
-	&pop("edi");
-	&pop("esi");
-	&ret();
-	&function_end_B($name);
-	}
-
-sub bn_sqr_comba
-	{
-	local($name,$num)=@_;
-	local($r,$a,$c0,$c1,$c2)=@_;
-	local($i,$as,$ae,$bs,$be,$ai,$bi);
-	local($b,$tot,$end,$half);
-
-	&function_begin_B($name,"");
-
-	$c0="ebx";
-	$c1="ecx";
-	$c2="ebp";
-	$a="esi";
-	$r="edi";
-
-	&push("esi");
-	 &push("edi");
-	&push("ebp");
-	 &push("ebx");
-	&mov($r,&wparam(0));
-	 &mov($a,&wparam(1));
-	&xor($c0,$c0);
-	 &xor($c1,$c1);
-	&mov("eax",&DWP(0,$a,"",0)); # load the first word
-
-	$as=0;
-	$ae=0;
-	$bs=0;
-	$be=0;
-	$tot=$num+$num-1;
-
-	for ($i=0; $i<$tot; $i++)
-		{
-		$ai=$as;
-		$bi=$bs;
-		$end=$be+1;
-
-		&comment("############### Calculate word $i");
-		for ($j=$bs; $j<$end; $j++)
-			{
-			&xor($c2,$c2) if ($j == $bs);
-			if (($ai-1) < ($bi+1))
-				{
-				$v=1;
-				$v=2 if ($i+1) == $tot;
-				}
-			else
-				{ $v=0; }
-			if (!$v)
-				{
-				$na=$ai-1;
-				$nb=$bi+1;
-				}
-			else
-				{
-				$na=$as+($i < ($num-1));
-				$nb=$bs+($i >= ($num-1));
-				}
-			if ($ai == $bi)
-				{
-				&sqr_add_c($r,$a,$ai,$bi,
-					$c0,$c1,$c2,$v,$i,$na,$nb);
-				}
-			else
-				{
-				&sqr_add_c2($r,$a,$ai,$bi,
-					$c0,$c1,$c2,$v,$i,$na,$nb);
-				}
-			if ($v)
-				{
-				&comment("saved r[$i]");
-				#&mov(&DWP($i*4,$r,"",0),$c0);
-				($c0,$c1,$c2)=($c1,$c2,$c0);
-				last;
-				}
-			$ai--;
-			$bi++;
-			}
-		$as++ if ($i < ($num-1));
-		$ae++ if ($i >= ($num-1));
-
-		$bs++ if ($i >= ($num-1));
-		$be++ if ($i < ($num-1));
-		}
-	&mov(&DWP($i*4,$r,"",0),$c0);
-	&pop("ebx");
-	&pop("ebp");
-	&pop("edi");
-	&pop("esi");
-	&ret();
-	&function_end_B($name);
-	}

+ 0 - 851
drivers/builtin_openssl2/crypto/bn/asm/ia64-mont.pl

@@ -1,851 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# January 2010
-#
-# "Teaser" Montgomery multiplication module for IA-64. There are
-# several possibilities for improvement:
-#
-# - modulo-scheduling outer loop would eliminate quite a number of
-#   stalls after ldf8, xma and getf.sig outside inner loop and
-#   improve shorter key performance;
-# - shorter vector support [with input vectors being fetched only
-#   once] should be added;
-# - 2x unroll with help of n0[1] would make the code scalable on
-#   "wider" IA-64, "wider" than Itanium 2 that is, which is not of
-#   acute interest, because upcoming Tukwila's individual cores are
-#   reportedly based on Itanium 2 design;
-# - dedicated squaring procedure(?);
-#
-# January 2010
-#
-# Shorter vector support is implemented by zero-padding ap and np
-# vectors up to 8 elements, or 512 bits. This means that 256-bit
-# inputs will be processed only 2 times faster than 512-bit inputs,
-# not 4 [as one would expect, because algorithm complexity is n^2].
-# The reason for padding is that inputs shorter than 512 bits won't
-# be processed faster anyway, because minimal critical path of the
-# core loop happens to match 512-bit timing. Either way, it resulted
-# in >100% improvement of 512-bit RSA sign benchmark and 50% - of
-# 1024-bit one [in comparison to original version of *this* module].
-#
-# So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
-# this module is:
-#                   sign    verify    sign/s verify/s
-# rsa  512 bits 0.000290s 0.000024s   3452.8  42031.4
-# rsa 1024 bits 0.000793s 0.000058s   1261.7  17172.0
-# rsa 2048 bits 0.005908s 0.000148s    169.3   6754.0
-# rsa 4096 bits 0.033456s 0.000469s     29.9   2133.6
-# dsa  512 bits 0.000253s 0.000198s   3949.9   5057.0
-# dsa 1024 bits 0.000585s 0.000607s   1708.4   1647.4
-# dsa 2048 bits 0.001453s 0.001703s    688.1    587.4
-#
-# ... and *without* (but still with ia64.S):
-#
-# rsa  512 bits 0.000670s 0.000041s   1491.8  24145.5
-# rsa 1024 bits 0.001988s 0.000080s    502.9  12499.3
-# rsa 2048 bits 0.008702s 0.000189s    114.9   5293.9
-# rsa 4096 bits 0.043860s 0.000533s     22.8   1875.9
-# dsa  512 bits 0.000441s 0.000427s   2265.3   2340.6
-# dsa 1024 bits 0.000823s 0.000867s   1215.6   1153.2
-# dsa 2048 bits 0.001894s 0.002179s    528.1    458.9
-#
-# As it can be seen, RSA sign performance improves by 130-30%,
-# hereafter less for longer keys, while verify - by 74-13%.
-# DSA performance improves by 115-30%.
-
-if ($^O eq "hpux") {
-    $ADDP="addp4";
-    for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
-} else { $ADDP="add"; }
-
-$code=<<___;
-.explicit
-.text
-
-// int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
-//		    const BN_ULONG *bp,const BN_ULONG *np,
-//		    const BN_ULONG *n0p,int num);			
-.align	64
-.global	bn_mul_mont#
-.proc	bn_mul_mont#
-bn_mul_mont:
-	.prologue
-	.body
-{ .mmi;	cmp4.le		p6,p7=2,r37;;
-(p6)	cmp4.lt.unc	p8,p9=8,r37
-	mov		ret0=r0		};;
-{ .bbb;
-(p9)	br.cond.dptk.many	bn_mul_mont_8
-(p8)	br.cond.dpnt.many	bn_mul_mont_general
-(p7)	br.ret.spnt.many	b0	};;
-.endp	bn_mul_mont#
-
-prevfs=r2;	prevpr=r3;	prevlc=r10;	prevsp=r11;
-
-rptr=r8;	aptr=r9;	bptr=r14;	nptr=r15;
-tptr=r16;	// &tp[0]
-tp_1=r17;	// &tp[-1]
-num=r18;	len=r19;	lc=r20;
-topbit=r21;	// carry bit from tmp[num]
-
-n0=f6;
-m0=f7;
-bi=f8;
-
-.align	64
-.local	bn_mul_mont_general#
-.proc	bn_mul_mont_general#
-bn_mul_mont_general:
-	.prologue
-{ .mmi;	.save	ar.pfs,prevfs
-	alloc	prevfs=ar.pfs,6,2,0,8
-	$ADDP	aptr=0,in1
-	.save	ar.lc,prevlc
-	mov	prevlc=ar.lc		}
-{ .mmi;	.vframe	prevsp
-	mov	prevsp=sp
-	$ADDP	bptr=0,in2
-	.save	pr,prevpr
-	mov	prevpr=pr		};;
-
-	.body
-	.rotf		alo[6],nlo[4],ahi[8],nhi[6]
-	.rotr		a[3],n[3],t[2]
-
-{ .mmi;	ldf8		bi=[bptr],8		// (*bp++)
-	ldf8		alo[4]=[aptr],16	// ap[0]
-	$ADDP		r30=8,in1	};;
-{ .mmi;	ldf8		alo[3]=[r30],16		// ap[1]
-	ldf8		alo[2]=[aptr],16	// ap[2]
-	$ADDP		in4=0,in4	};;
-{ .mmi;	ldf8		alo[1]=[r30]		// ap[3]
-	ldf8		n0=[in4]		// n0
-	$ADDP		rptr=0,in0		}
-{ .mmi;	$ADDP		nptr=0,in3
-	mov		r31=16
-	zxt4		num=in5		};;
-{ .mmi;	ldf8		nlo[2]=[nptr],8		// np[0]
-	shladd		len=num,3,r0
-	shladd		r31=num,3,r31	};;
-{ .mmi;	ldf8		nlo[1]=[nptr],8		// np[1]
-	add		lc=-5,num
-	sub		r31=sp,r31	};;
-{ .mfb;	and		sp=-16,r31		// alloca
-	xmpy.hu		ahi[2]=alo[4],bi	// ap[0]*bp[0]
-	nop.b		0		}
-{ .mfb;	nop.m		0
-	xmpy.lu		alo[4]=alo[4],bi
-	brp.loop.imp	.L1st_ctop,.L1st_cend-16
-					};;
-{ .mfi;	nop.m		0
-	xma.hu		ahi[1]=alo[3],bi,ahi[2]	// ap[1]*bp[0]
-	add		tp_1=8,sp	}
-{ .mfi;	nop.m		0
-	xma.lu		alo[3]=alo[3],bi,ahi[2]
-	mov		pr.rot=0x20001f<<16
-			// ------^----- (p40) at first (p23)
-			// ----------^^ p[16:20]=1
-					};;
-{ .mfi;	nop.m		0
-	xmpy.lu		m0=alo[4],n0		// (ap[0]*bp[0])*n0
-	mov		ar.lc=lc	}
-{ .mfi;	nop.m		0
-	fcvt.fxu.s1	nhi[1]=f0
-	mov		ar.ec=8		};;
-
-.align	32
-.L1st_ctop:
-.pred.rel	"mutex",p40,p42
-{ .mfi;	(p16)	ldf8		alo[0]=[aptr],8		    // *(aptr++)
-	(p18)	xma.hu		ahi[0]=alo[2],bi,ahi[1]
-	(p40)	add		n[2]=n[2],a[2]		}   // (p23)					}
-{ .mfi;	(p18)	ldf8		nlo[0]=[nptr],8		    // *(nptr++)(p16)
-	(p18)	xma.lu		alo[2]=alo[2],bi,ahi[1]
-	(p42)	add		n[2]=n[2],a[2],1	};; // (p23)
-{ .mfi;	(p21)	getf.sig	a[0]=alo[5]
-	(p20)	xma.hu		nhi[0]=nlo[2],m0,nhi[1]
-	(p42)	cmp.leu		p41,p39=n[2],a[2]   	}   // (p23)
-{ .mfi;	(p23)	st8		[tp_1]=n[2],8
-	(p20)	xma.lu		nlo[2]=nlo[2],m0,nhi[1]
-	(p40)	cmp.ltu		p41,p39=n[2],a[2]	}   // (p23)
-{ .mmb;	(p21)	getf.sig	n[0]=nlo[3]
-	(p16)	nop.m		0
-	br.ctop.sptk	.L1st_ctop			};;
-.L1st_cend:
-
-{ .mmi;	getf.sig	a[0]=ahi[6]		// (p24)
-	getf.sig	n[0]=nhi[4]
-	add		num=-1,num	};;	// num--
-{ .mmi;	.pred.rel	"mutex",p40,p42
-(p40)	add		n[0]=n[0],a[0]
-(p42)	add		n[0]=n[0],a[0],1
-	sub		aptr=aptr,len	};;	// rewind
-{ .mmi;	.pred.rel	"mutex",p40,p42
-(p40)	cmp.ltu		p41,p39=n[0],a[0]
-(p42)	cmp.leu		p41,p39=n[0],a[0]
-	sub		nptr=nptr,len	};;
-{ .mmi;	.pred.rel	"mutex",p39,p41
-(p39)	add		topbit=r0,r0
-(p41)	add		topbit=r0,r0,1
-	nop.i		0		}	
-{ .mmi;	st8		[tp_1]=n[0]
-	add		tptr=16,sp
-	add		tp_1=8,sp	};;
-
-.Louter:
-{ .mmi;	ldf8		bi=[bptr],8		// (*bp++)
-	ldf8		ahi[3]=[tptr]		// tp[0]
-	add		r30=8,aptr	};;
-{ .mmi;	ldf8		alo[4]=[aptr],16	// ap[0]
-	ldf8		alo[3]=[r30],16		// ap[1]
-	add		r31=8,nptr	};;
-{ .mfb;	ldf8		alo[2]=[aptr],16	// ap[2]
-	xma.hu		ahi[2]=alo[4],bi,ahi[3]	// ap[0]*bp[i]+tp[0]
-	brp.loop.imp	.Linner_ctop,.Linner_cend-16
-					}
-{ .mfb;	ldf8		alo[1]=[r30]		// ap[3]
-	xma.lu		alo[4]=alo[4],bi,ahi[3]
-	clrrrb.pr			};;
-{ .mfi;	ldf8		nlo[2]=[nptr],16	// np[0]
-	xma.hu		ahi[1]=alo[3],bi,ahi[2]	// ap[1]*bp[i]
-	nop.i		0		}
-{ .mfi;	ldf8		nlo[1]=[r31]		// np[1]
-	xma.lu		alo[3]=alo[3],bi,ahi[2]
-	mov		pr.rot=0x20101f<<16
-			// ------^----- (p40) at first (p23)
-			// --------^--- (p30) at first (p22)
-			// ----------^^ p[16:20]=1
-					};;
-{ .mfi;	st8		[tptr]=r0		// tp[0] is already accounted
-	xmpy.lu		m0=alo[4],n0		// (ap[0]*bp[i]+tp[0])*n0
-	mov		ar.lc=lc	}
-{ .mfi;
-	fcvt.fxu.s1	nhi[1]=f0
-	mov		ar.ec=8		};;
-
-// This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
-// 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
-// in latter case accounts for two-tick pipeline stall, which means
-// that its performance would be ~20% lower than optimal one. No
-// attempt was made to address this, because original Itanium is
-// hardly represented out in the wild...
-.align	32
-.Linner_ctop:
-.pred.rel	"mutex",p40,p42
-.pred.rel	"mutex",p30,p32
-{ .mfi;	(p16)	ldf8		alo[0]=[aptr],8		    // *(aptr++)
-	(p18)	xma.hu		ahi[0]=alo[2],bi,ahi[1]
-	(p40)	add		n[2]=n[2],a[2]		}   // (p23)
-{ .mfi;	(p16)	nop.m		0
-	(p18)	xma.lu		alo[2]=alo[2],bi,ahi[1]
-	(p42)	add		n[2]=n[2],a[2],1	};; // (p23)
-{ .mfi;	(p21)	getf.sig	a[0]=alo[5]
-	(p16)	nop.f		0
-	(p40)	cmp.ltu		p41,p39=n[2],a[2]	}   // (p23)
-{ .mfi;	(p21)	ld8		t[0]=[tptr],8
-	(p16)	nop.f		0
-	(p42)	cmp.leu		p41,p39=n[2],a[2]	};; // (p23)
-{ .mfi;	(p18)	ldf8		nlo[0]=[nptr],8		    // *(nptr++)
-	(p20)	xma.hu		nhi[0]=nlo[2],m0,nhi[1]
-	(p30)	add		a[1]=a[1],t[1]		}   // (p22)
-{ .mfi;	(p16)	nop.m		0
-	(p20)	xma.lu		nlo[2]=nlo[2],m0,nhi[1]
-	(p32)	add		a[1]=a[1],t[1],1	};; // (p22)
-{ .mmi;	(p21)	getf.sig	n[0]=nlo[3]
-	(p16)	nop.m		0
-	(p30)	cmp.ltu		p31,p29=a[1],t[1]	}   // (p22)
-{ .mmb;	(p23)	st8		[tp_1]=n[2],8
-	(p32)	cmp.leu		p31,p29=a[1],t[1]	    // (p22)
-	br.ctop.sptk	.Linner_ctop			};;
-.Linner_cend:
-
-{ .mmi;	getf.sig	a[0]=ahi[6]		// (p24)
-	getf.sig	n[0]=nhi[4]
-	nop.i		0		};;
-
-{ .mmi;	.pred.rel	"mutex",p31,p33
-(p31)	add		a[0]=a[0],topbit
-(p33)	add		a[0]=a[0],topbit,1
-	mov		topbit=r0	};;
-{ .mfi; .pred.rel	"mutex",p31,p33
-(p31)	cmp.ltu		p32,p30=a[0],topbit
-(p33)	cmp.leu		p32,p30=a[0],topbit
-					}
-{ .mfi;	.pred.rel	"mutex",p40,p42
-(p40)	add		n[0]=n[0],a[0]
-(p42)	add		n[0]=n[0],a[0],1
-					};;
-{ .mmi;	.pred.rel	"mutex",p44,p46
-(p40)	cmp.ltu		p41,p39=n[0],a[0]
-(p42)	cmp.leu		p41,p39=n[0],a[0]
-(p32)	add		topbit=r0,r0,1	}
-
-{ .mmi;	st8		[tp_1]=n[0],8
-	cmp4.ne		p6,p0=1,num
-	sub		aptr=aptr,len	};;	// rewind
-{ .mmi;	sub		nptr=nptr,len
-(p41)	add		topbit=r0,r0,1
-	add		tptr=16,sp	}
-{ .mmb;	add		tp_1=8,sp
-	add		num=-1,num		// num--
-(p6)	br.cond.sptk.many	.Louter	};;
-
-{ .mbb;	add		lc=4,lc
-	brp.loop.imp	.Lsub_ctop,.Lsub_cend-16
-	clrrrb.pr			};;
-{ .mii;	nop.m		0
-	mov		pr.rot=0x10001<<16
-			// ------^---- (p33) at first (p17)
-	mov		ar.lc=lc	}
-{ .mii;	nop.m		0
-	mov		ar.ec=3
-	nop.i		0		};;
-
-.Lsub_ctop:
-.pred.rel	"mutex",p33,p35
-{ .mfi;	(p16)	ld8		t[0]=[tptr],8		    // t=*(tp++)
-	(p16)	nop.f		0
-	(p33)	sub		n[1]=t[1],n[1]		}   // (p17)
-{ .mfi;	(p16)	ld8		n[0]=[nptr],8		    // n=*(np++)
-	(p16)	nop.f		0
-	(p35)	sub		n[1]=t[1],n[1],1	};; // (p17)
-{ .mib;	(p18)	st8		[rptr]=n[2],8		    // *(rp++)=r
-	(p33)	cmp.gtu		p34,p32=n[1],t[1]	    // (p17)
-	(p18)	nop.b		0			}
-{ .mib;	(p18)	nop.m		0
-	(p35)	cmp.geu		p34,p32=n[1],t[1]	    // (p17)
-	br.ctop.sptk	.Lsub_ctop			};;
-.Lsub_cend:
-
-{ .mmb;	.pred.rel	"mutex",p34,p36
-(p34)	sub	topbit=topbit,r0	// (p19)
-(p36)	sub	topbit=topbit,r0,1
-	brp.loop.imp	.Lcopy_ctop,.Lcopy_cend-16
-					}
-{ .mmb;	sub	rptr=rptr,len		// rewind
-	sub	tptr=tptr,len
-	clrrrb.pr			};;
-{ .mmi;	and	aptr=tptr,topbit
-	andcm	bptr=rptr,topbit
-	mov	pr.rot=1<<16		};;
-{ .mii;	or	nptr=aptr,bptr
-	mov	ar.lc=lc
-	mov	ar.ec=3			};;
-
-.Lcopy_ctop:
-{ .mmb;	(p16)	ld8	n[0]=[nptr],8
-	(p18)	st8	[tptr]=r0,8
-	(p16)	nop.b	0		}
-{ .mmb;	(p16)	nop.m	0
-	(p18)	st8	[rptr]=n[2],8
-	br.ctop.sptk	.Lcopy_ctop	};;
-.Lcopy_cend:
-
-{ .mmi;	mov		ret0=1			// signal "handled"
-	rum		1<<5			// clear um.mfh
-	mov		ar.lc=prevlc	}
-{ .mib;	.restore	sp
-	mov		sp=prevsp
-	mov		pr=prevpr,0x1ffff
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_mont_general#
-
-a1=r16;  a2=r17;  a3=r18;  a4=r19;  a5=r20;  a6=r21;  a7=r22;  a8=r23;
-n1=r24;  n2=r25;  n3=r26;  n4=r27;  n5=r28;  n6=r29;  n7=r30;  n8=r31;
-t0=r15;
-
-ai0=f8;  ai1=f9;  ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
-ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
-
-.align	64
-.skip	48		// aligns loop body
-.local	bn_mul_mont_8#
-.proc	bn_mul_mont_8#
-bn_mul_mont_8:
-	.prologue
-{ .mmi;	.save		ar.pfs,prevfs
-	alloc		prevfs=ar.pfs,6,2,0,8
-	.vframe		prevsp
-	mov		prevsp=sp
-	.save		ar.lc,prevlc
-	mov		prevlc=ar.lc	}
-{ .mmi;	add		r17=-6*16,sp
-	add		sp=-7*16,sp
-	.save		pr,prevpr
-	mov		prevpr=pr	};;
-
-{ .mmi;	.save.gf	0,0x10
-	stf.spill	[sp]=f16,-16
-	.save.gf	0,0x20
-	stf.spill	[r17]=f17,32
-	add		r16=-5*16,prevsp};;
-{ .mmi;	.save.gf	0,0x40
-	stf.spill	[r16]=f18,32
-	.save.gf	0,0x80
-	stf.spill	[r17]=f19,32
-	$ADDP		aptr=0,in1	};;
-{ .mmi;	.save.gf	0,0x100
-	stf.spill	[r16]=f20,32
-	.save.gf	0,0x200
-	stf.spill	[r17]=f21,32
-	$ADDP		r29=8,in1	};;
-{ .mmi;	.save.gf	0,0x400
-	stf.spill	[r16]=f22
-	.save.gf	0,0x800
-	stf.spill	[r17]=f23
-	$ADDP		rptr=0,in0	};;
-
-	.body
-	.rotf		bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
-	.rotr		t[8]
-
-// load input vectors padding them to 8 elements
-{ .mmi;	ldf8		ai0=[aptr],16		// ap[0]
-	ldf8		ai1=[r29],16		// ap[1]
-	$ADDP		bptr=0,in2	}
-{ .mmi;	$ADDP		r30=8,in2
-	$ADDP		nptr=0,in3
-	$ADDP		r31=8,in3	};;
-{ .mmi;	ldf8		bj[7]=[bptr],16		// bp[0]
-	ldf8		bj[6]=[r30],16		// bp[1]
-	cmp4.le		p4,p5=3,in5	}
-{ .mmi;	ldf8		ni0=[nptr],16		// np[0]
-	ldf8		ni1=[r31],16		// np[1]
-	cmp4.le		p6,p7=4,in5	};;
-
-{ .mfi;	(p4)ldf8	ai2=[aptr],16		// ap[2]
-	(p5)fcvt.fxu	ai2=f0
-	cmp4.le		p8,p9=5,in5	}
-{ .mfi;	(p6)ldf8	ai3=[r29],16		// ap[3]
-	(p7)fcvt.fxu	ai3=f0
-	cmp4.le		p10,p11=6,in5	}
-{ .mfi;	(p4)ldf8	bj[5]=[bptr],16		// bp[2]
-	(p5)fcvt.fxu	bj[5]=f0
-	cmp4.le		p12,p13=7,in5	}
-{ .mfi;	(p6)ldf8	bj[4]=[r30],16		// bp[3]
-	(p7)fcvt.fxu	bj[4]=f0
-	cmp4.le		p14,p15=8,in5	}
-{ .mfi;	(p4)ldf8	ni2=[nptr],16		// np[2]
-	(p5)fcvt.fxu	ni2=f0
-	addp4		r28=-1,in5	}
-{ .mfi;	(p6)ldf8	ni3=[r31],16		// np[3]
-	(p7)fcvt.fxu	ni3=f0
-	$ADDP		in4=0,in4	};;
-
-{ .mfi;	ldf8		n0=[in4]
-	fcvt.fxu	tf[1]=f0
-	nop.i		0		}
-
-{ .mfi;	(p8)ldf8	ai4=[aptr],16		// ap[4]
-	(p9)fcvt.fxu	ai4=f0
-	mov		t[0]=r0		}
-{ .mfi;	(p10)ldf8	ai5=[r29],16		// ap[5]
-	(p11)fcvt.fxu	ai5=f0
-	mov		t[1]=r0		}
-{ .mfi;	(p8)ldf8	bj[3]=[bptr],16		// bp[4]
-	(p9)fcvt.fxu	bj[3]=f0
-	mov		t[2]=r0		}
-{ .mfi;	(p10)ldf8	bj[2]=[r30],16		// bp[5]
-	(p11)fcvt.fxu	bj[2]=f0
-	mov		t[3]=r0		}
-{ .mfi;	(p8)ldf8	ni4=[nptr],16		// np[4]
-	(p9)fcvt.fxu	ni4=f0
-	mov		t[4]=r0		}
-{ .mfi;	(p10)ldf8	ni5=[r31],16		// np[5]
-	(p11)fcvt.fxu	ni5=f0
-	mov		t[5]=r0		};;
-
-{ .mfi;	(p12)ldf8	ai6=[aptr],16		// ap[6]
-	(p13)fcvt.fxu	ai6=f0
-	mov		t[6]=r0		}
-{ .mfi;	(p14)ldf8	ai7=[r29],16		// ap[7]
-	(p15)fcvt.fxu	ai7=f0
-	mov		t[7]=r0		}
-{ .mfi;	(p12)ldf8	bj[1]=[bptr],16		// bp[6]
-	(p13)fcvt.fxu	bj[1]=f0
-	mov		ar.lc=r28	}
-{ .mfi;	(p14)ldf8	bj[0]=[r30],16		// bp[7]
-	(p15)fcvt.fxu	bj[0]=f0
-	mov		ar.ec=1		}
-{ .mfi;	(p12)ldf8	ni6=[nptr],16		// np[6]
-	(p13)fcvt.fxu	ni6=f0
-	mov		pr.rot=1<<16	}
-{ .mfb;	(p14)ldf8	ni7=[r31],16		// np[7]
-	(p15)fcvt.fxu	ni7=f0
-	brp.loop.imp	.Louter_8_ctop,.Louter_8_cend-16
-					};;
-
-// The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
-// to measure with help of Interval Time Counter indicated that the
-// factor is a tad higher: 33 or 34, if not 35. Exact measurement and
-// addressing the issue is problematic, because I don't have access
-// to platform-specific instruction-level profiler. On Itanium it
-// should run in 56*n ticks, because of higher xma latency...
-.Louter_8_ctop:
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mfi;	(p16)	nop.m		0			// 0:
-	(p16)	xma.hu		ahi[0]=ai0,bj[7],tf[1]	//	ap[0]*b[i]+t[0]
-	(p40)	add		a3=a3,n3	}	//	(p17) a3+=n3
-{ .mfi;	(p42)	add		a3=a3,n3,1
-	(p16)	xma.lu		alo[0]=ai0,bj[7],tf[1]
-	(p16)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	a7=alo[8]		// 1:
-	(p48)	add		t[6]=t[6],a3		//	(p17) t[6]+=a3
-	(p50)	add		t[6]=t[6],a3,1	};;
-{ .mfi;	(p17)	getf.sig	a8=ahi[8]		// 2:
-	(p17)	xma.hu		nhi[7]=ni6,mj[1],nhi[6]	//	np[6]*m0
-	(p40)	cmp.ltu		p43,p41=a3,n3	}
-{ .mfi;	(p42)	cmp.leu		p43,p41=a3,n3
-	(p17)	xma.lu		nlo[7]=ni6,mj[1],nhi[6]
-	(p16)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	n5=nlo[6]		// 3:
-	(p48)	cmp.ltu		p51,p49=t[6],a3
-	(p50)	cmp.leu		p51,p49=t[6],a3	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mfi;	(p16)	nop.m		0			// 4:
-	(p16)	xma.hu		ahi[1]=ai1,bj[7],ahi[0]	//	ap[1]*b[i]
-	(p41)	add		a4=a4,n4	}	//	(p17) a4+=n4
-{ .mfi;	(p43)	add		a4=a4,n4,1
-	(p16)	xma.lu		alo[1]=ai1,bj[7],ahi[0]
-	(p16)	nop.i		0		};;
-{ .mfi;	(p49)	add		t[5]=t[5],a4		// 5:	(p17) t[5]+=a4
-	(p16)	xmpy.lu		mj[0]=alo[0],n0		//	(ap[0]*b[i]+t[0])*n0
-	(p51)	add		t[5]=t[5],a4,1	};;
-{ .mfi;	(p16)	nop.m		0			// 6:
-	(p17)	xma.hu		nhi[8]=ni7,mj[1],nhi[7]	//	np[7]*m0
-	(p41)	cmp.ltu		p42,p40=a4,n4	}
-{ .mfi;	(p43)	cmp.leu		p42,p40=a4,n4
-	(p17)	xma.lu		nlo[8]=ni7,mj[1],nhi[7]
-	(p16)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	n6=nlo[7]		// 7:
-	(p49)	cmp.ltu		p50,p48=t[5],a4
-	(p51)	cmp.leu		p50,p48=t[5],a4	};;
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mfi;	(p16)	nop.m		0			// 8:
-	(p16)	xma.hu		ahi[2]=ai2,bj[7],ahi[1]	//	ap[2]*b[i]
-	(p40)	add		a5=a5,n5	}	//	(p17) a5+=n5
-{ .mfi;	(p42)	add		a5=a5,n5,1
-	(p16)	xma.lu		alo[2]=ai2,bj[7],ahi[1]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	a1=alo[1]		// 9:
-	(p48)	add		t[4]=t[4],a5		//	p(17) t[4]+=a5
-	(p50)	add		t[4]=t[4],a5,1	};;
-{ .mfi;	(p16)	nop.m		0			// 10:
-	(p16)	xma.hu		nhi[0]=ni0,mj[0],alo[0]	//	np[0]*m0
-	(p40)	cmp.ltu		p43,p41=a5,n5	}
-{ .mfi;	(p42)	cmp.leu		p43,p41=a5,n5
-	(p16)	xma.lu		nlo[0]=ni0,mj[0],alo[0]
-	(p16)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	n7=nlo[8]		// 11:
-	(p48)	cmp.ltu		p51,p49=t[4],a5
-	(p50)	cmp.leu		p51,p49=t[4],a5	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mfi;	(p17)	getf.sig	n8=nhi[8]		// 12:
-	(p16)	xma.hu		ahi[3]=ai3,bj[7],ahi[2]	//	ap[3]*b[i]
-	(p41)	add		a6=a6,n6	}	//	(p17) a6+=n6
-{ .mfi;	(p43)	add		a6=a6,n6,1
-	(p16)	xma.lu		alo[3]=ai3,bj[7],ahi[2]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	a2=alo[2]		// 13:
-	(p49)	add		t[3]=t[3],a6		//	(p17) t[3]+=a6
-	(p51)	add		t[3]=t[3],a6,1	};;
-{ .mfi;	(p16)	nop.m		0			// 14:
-	(p16)	xma.hu		nhi[1]=ni1,mj[0],nhi[0]	//	np[1]*m0
-	(p41)	cmp.ltu		p42,p40=a6,n6	}
-{ .mfi;	(p43)	cmp.leu		p42,p40=a6,n6
-	(p16)	xma.lu		nlo[1]=ni1,mj[0],nhi[0]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	nop.m		0			// 15:
-	(p49)	cmp.ltu		p50,p48=t[3],a6
-	(p51)	cmp.leu		p50,p48=t[3],a6	};;
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mfi;	(p16)	nop.m		0			// 16:
-	(p16)	xma.hu		ahi[4]=ai4,bj[7],ahi[3]	//	ap[4]*b[i]
-	(p40)	add		a7=a7,n7	}	//	(p17) a7+=n7
-{ .mfi;	(p42)	add		a7=a7,n7,1
-	(p16)	xma.lu		alo[4]=ai4,bj[7],ahi[3]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	a3=alo[3]		// 17:
-	(p48)	add		t[2]=t[2],a7		//	(p17) t[2]+=a7
-	(p50)	add		t[2]=t[2],a7,1	};;
-{ .mfi;	(p16)	nop.m		0			// 18:
-	(p16)	xma.hu		nhi[2]=ni2,mj[0],nhi[1]	//	np[2]*m0
-	(p40)	cmp.ltu		p43,p41=a7,n7	}
-{ .mfi;	(p42)	cmp.leu		p43,p41=a7,n7
-	(p16)	xma.lu		nlo[2]=ni2,mj[0],nhi[1]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	n1=nlo[1]		// 19:
-	(p48)	cmp.ltu		p51,p49=t[2],a7
-	(p50)	cmp.leu		p51,p49=t[2],a7	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mfi;	(p16)	nop.m		0			// 20:
-	(p16)	xma.hu		ahi[5]=ai5,bj[7],ahi[4]	//	ap[5]*b[i]
-	(p41)	add		a8=a8,n8	}	//	(p17) a8+=n8
-{ .mfi;	(p43)	add		a8=a8,n8,1
-	(p16)	xma.lu		alo[5]=ai5,bj[7],ahi[4]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	a4=alo[4]		// 21:
-	(p49)	add		t[1]=t[1],a8		//	(p17) t[1]+=a8
-	(p51)	add		t[1]=t[1],a8,1	};;
-{ .mfi;	(p16)	nop.m		0			// 22:
-	(p16)	xma.hu		nhi[3]=ni3,mj[0],nhi[2]	//	np[3]*m0
-	(p41)	cmp.ltu		p42,p40=a8,n8	}
-{ .mfi;	(p43)	cmp.leu		p42,p40=a8,n8
-	(p16)	xma.lu		nlo[3]=ni3,mj[0],nhi[2]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	n2=nlo[2]		// 23:
-	(p49)	cmp.ltu		p50,p48=t[1],a8
-	(p51)	cmp.leu		p50,p48=t[1],a8	};;
-{ .mfi;	(p16)	nop.m		0			// 24:
-	(p16)	xma.hu		ahi[6]=ai6,bj[7],ahi[5]	//	ap[6]*b[i]
-	(p16)	add		a1=a1,n1	}	//	(p16) a1+=n1
-{ .mfi;	(p16)	nop.m		0
-	(p16)	xma.lu		alo[6]=ai6,bj[7],ahi[5]
-	(p17)	mov		t[0]=r0		};;
-{ .mii;	(p16)	getf.sig	a5=alo[5]		// 25:
-	(p16)	add		t0=t[7],a1		//	(p16) t[7]+=a1
-	(p42)	add		t[0]=t[0],r0,1	};;
-{ .mfi;	(p16)	setf.sig	tf[0]=t0		// 26:
-	(p16)	xma.hu		nhi[4]=ni4,mj[0],nhi[3]	//	np[4]*m0
-	(p50)	add		t[0]=t[0],r0,1	}
-{ .mfi;	(p16)	cmp.ltu.unc	p42,p40=a1,n1
-	(p16)	xma.lu		nlo[4]=ni4,mj[0],nhi[3]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	n3=nlo[3]		// 27:
-	(p16)	cmp.ltu.unc	p50,p48=t0,a1
-	(p16)	nop.i		0		};;
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mfi;	(p16)	nop.m		0			// 28:
-	(p16)	xma.hu		ahi[7]=ai7,bj[7],ahi[6]	//	ap[7]*b[i]
-	(p40)	add		a2=a2,n2	}	//	(p16) a2+=n2
-{ .mfi;	(p42)	add		a2=a2,n2,1
-	(p16)	xma.lu		alo[7]=ai7,bj[7],ahi[6]
-	(p16)	nop.i		0		};;
-{ .mii;	(p16)	getf.sig	a6=alo[6]		// 29:
-	(p48)	add		t[6]=t[6],a2		//	(p16) t[6]+=a2
-	(p50)	add		t[6]=t[6],a2,1	};;
-{ .mfi;	(p16)	nop.m		0			// 30:
-	(p16)	xma.hu		nhi[5]=ni5,mj[0],nhi[4]	//	np[5]*m0
-	(p40)	cmp.ltu		p41,p39=a2,n2	}
-{ .mfi;	(p42)	cmp.leu		p41,p39=a2,n2
-	(p16)	xma.lu		nlo[5]=ni5,mj[0],nhi[4]
-	(p16)	nop.i		0		};;
-{ .mfi;	(p16)	getf.sig	n4=nlo[4]		// 31:
-	(p16)	nop.f		0
-	(p48)	cmp.ltu		p49,p47=t[6],a2	}
-{ .mfb;	(p50)	cmp.leu		p49,p47=t[6],a2
-	(p16)	nop.f		0
-	br.ctop.sptk.many	.Louter_8_ctop	};;
-.Louter_8_cend:
-
-// above loop has to execute one more time, without (p16), which is
-// replaced with merged move of np[8] to GPR bank
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mmi;	(p0)	getf.sig	n1=ni0			// 0:
-	(p40)	add		a3=a3,n3		//	(p17) a3+=n3
-	(p42)	add		a3=a3,n3,1	};;
-{ .mii;	(p17)	getf.sig	a7=alo[8]		// 1:
-	(p48)	add		t[6]=t[6],a3		//	(p17) t[6]+=a3
-	(p50)	add		t[6]=t[6],a3,1	};;
-{ .mfi;	(p17)	getf.sig	a8=ahi[8]		// 2:
-	(p17)	xma.hu		nhi[7]=ni6,mj[1],nhi[6]	//	np[6]*m0
-	(p40)	cmp.ltu		p43,p41=a3,n3	}
-{ .mfi;	(p42)	cmp.leu		p43,p41=a3,n3
-	(p17)	xma.lu		nlo[7]=ni6,mj[1],nhi[6]
-	(p0)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	n5=nlo[6]		// 3:
-	(p48)	cmp.ltu		p51,p49=t[6],a3
-	(p50)	cmp.leu		p51,p49=t[6],a3	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mmi;	(p0)	getf.sig	n2=ni1			// 4:
-	(p41)	add		a4=a4,n4		//	(p17) a4+=n4
-	(p43)	add		a4=a4,n4,1	};;
-{ .mfi;	(p49)	add		t[5]=t[5],a4		// 5:	(p17) t[5]+=a4
-	(p0)	nop.f		0
-	(p51)	add		t[5]=t[5],a4,1	};;
-{ .mfi;	(p0)	getf.sig	n3=ni2			// 6:
-	(p17)	xma.hu		nhi[8]=ni7,mj[1],nhi[7]	//	np[7]*m0
-	(p41)	cmp.ltu		p42,p40=a4,n4	}
-{ .mfi;	(p43)	cmp.leu		p42,p40=a4,n4
-	(p17)	xma.lu		nlo[8]=ni7,mj[1],nhi[7]
-	(p0)	nop.i		0		};;
-{ .mii;	(p17)	getf.sig	n6=nlo[7]		// 7:
-	(p49)	cmp.ltu		p50,p48=t[5],a4
-	(p51)	cmp.leu		p50,p48=t[5],a4	};;
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mii;	(p0)	getf.sig	n4=ni3			// 8:
-	(p40)	add		a5=a5,n5		//	(p17) a5+=n5
-	(p42)	add		a5=a5,n5,1	};;
-{ .mii;	(p0)	nop.m		0			// 9:
-	(p48)	add		t[4]=t[4],a5		//	p(17) t[4]+=a5
-	(p50)	add		t[4]=t[4],a5,1	};;
-{ .mii;	(p0)	nop.m		0			// 10:
-	(p40)	cmp.ltu		p43,p41=a5,n5
-	(p42)	cmp.leu		p43,p41=a5,n5	};;
-{ .mii;	(p17)	getf.sig	n7=nlo[8]		// 11:
-	(p48)	cmp.ltu		p51,p49=t[4],a5
-	(p50)	cmp.leu		p51,p49=t[4],a5	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mii;	(p17)	getf.sig	n8=nhi[8]		// 12:
-	(p41)	add		a6=a6,n6		//	(p17) a6+=n6
-	(p43)	add		a6=a6,n6,1	};;
-{ .mii;	(p0)	getf.sig	n5=ni4			// 13:
-	(p49)	add		t[3]=t[3],a6		//	(p17) t[3]+=a6
-	(p51)	add		t[3]=t[3],a6,1	};;
-{ .mii;	(p0)	nop.m		0			// 14:
-	(p41)	cmp.ltu		p42,p40=a6,n6
-	(p43)	cmp.leu		p42,p40=a6,n6	};;
-{ .mii;	(p0)	getf.sig	n6=ni5			// 15:
-	(p49)	cmp.ltu		p50,p48=t[3],a6
-	(p51)	cmp.leu		p50,p48=t[3],a6	};;
-	.pred.rel		"mutex",p40,p42
-	.pred.rel		"mutex",p48,p50
-{ .mii;	(p0)	nop.m		0			// 16:
-	(p40)	add		a7=a7,n7		//	(p17) a7+=n7
-	(p42)	add		a7=a7,n7,1	};;
-{ .mii;	(p0)	nop.m		0			// 17:
-	(p48)	add		t[2]=t[2],a7		//	(p17) t[2]+=a7
-	(p50)	add		t[2]=t[2],a7,1	};;
-{ .mii;	(p0)	nop.m		0			// 18:
-	(p40)	cmp.ltu		p43,p41=a7,n7
-	(p42)	cmp.leu		p43,p41=a7,n7	};;
-{ .mii;	(p0)	getf.sig	n7=ni6			// 19:
-	(p48)	cmp.ltu		p51,p49=t[2],a7
-	(p50)	cmp.leu		p51,p49=t[2],a7	};;
-	.pred.rel		"mutex",p41,p43
-	.pred.rel		"mutex",p49,p51
-{ .mii;	(p0)	nop.m		0			// 20:
-	(p41)	add		a8=a8,n8		//	(p17) a8+=n8
-	(p43)	add		a8=a8,n8,1	};;
-{ .mmi;	(p0)	nop.m		0			// 21:
-	(p49)	add		t[1]=t[1],a8		//	(p17) t[1]+=a8
-	(p51)	add		t[1]=t[1],a8,1	}
-{ .mmi;	(p17)	mov		t[0]=r0
-	(p41)	cmp.ltu		p42,p40=a8,n8
-	(p43)	cmp.leu		p42,p40=a8,n8	};;
-{ .mmi;	(p0)	getf.sig	n8=ni7			// 22:
-	(p49)	cmp.ltu		p50,p48=t[1],a8
-	(p51)	cmp.leu		p50,p48=t[1],a8	}
-{ .mmi;	(p42)	add		t[0]=t[0],r0,1
-	(p0)	add		r16=-7*16,prevsp
-	(p0)	add		r17=-6*16,prevsp	};;
-
-// subtract np[8] from carrybit|tmp[8]
-// carrybit|tmp[8] layout upon exit from above loop is:
-//	t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
-{ .mmi;	(p50)add	t[0]=t[0],r0,1
-	add		r18=-5*16,prevsp
-	sub		n1=t0,n1	};;
-{ .mmi;	cmp.gtu		p34,p32=n1,t0;;
-	.pred.rel	"mutex",p32,p34
-	(p32)sub	n2=t[7],n2
-	(p34)sub	n2=t[7],n2,1	};;
-{ .mii;	(p32)cmp.gtu	p35,p33=n2,t[7]
-	(p34)cmp.geu	p35,p33=n2,t[7];;
-	.pred.rel	"mutex",p33,p35
-	(p33)sub	n3=t[6],n3	}
-{ .mmi;	(p35)sub	n3=t[6],n3,1;;
-	(p33)cmp.gtu	p34,p32=n3,t[6]
-	(p35)cmp.geu	p34,p32=n3,t[6]	};;
-	.pred.rel	"mutex",p32,p34
-{ .mii;	(p32)sub	n4=t[5],n4
-	(p34)sub	n4=t[5],n4,1;;
-	(p32)cmp.gtu	p35,p33=n4,t[5]	}
-{ .mmi;	(p34)cmp.geu	p35,p33=n4,t[5];;
-	.pred.rel	"mutex",p33,p35
-	(p33)sub	n5=t[4],n5
-	(p35)sub	n5=t[4],n5,1	};;
-{ .mii;	(p33)cmp.gtu	p34,p32=n5,t[4]
-	(p35)cmp.geu	p34,p32=n5,t[4];;
-	.pred.rel	"mutex",p32,p34
-	(p32)sub	n6=t[3],n6	}
-{ .mmi;	(p34)sub	n6=t[3],n6,1;;
-	(p32)cmp.gtu	p35,p33=n6,t[3]
-	(p34)cmp.geu	p35,p33=n6,t[3]	};;
-	.pred.rel	"mutex",p33,p35
-{ .mii;	(p33)sub	n7=t[2],n7
-	(p35)sub	n7=t[2],n7,1;;
-	(p33)cmp.gtu	p34,p32=n7,t[2]	}
-{ .mmi;	(p35)cmp.geu	p34,p32=n7,t[2];;
-	.pred.rel	"mutex",p32,p34
-	(p32)sub	n8=t[1],n8
-	(p34)sub	n8=t[1],n8,1	};;
-{ .mii;	(p32)cmp.gtu	p35,p33=n8,t[1]
-	(p34)cmp.geu	p35,p33=n8,t[1];;
-	.pred.rel	"mutex",p33,p35
-	(p33)sub	a8=t[0],r0	}
-{ .mmi;	(p35)sub	a8=t[0],r0,1;;
-	(p33)cmp.gtu	p34,p32=a8,t[0]
-	(p35)cmp.geu	p34,p32=a8,t[0]	};;
-
-// save the result, either tmp[num] or tmp[num]-np[num]
-	.pred.rel	"mutex",p32,p34
-{ .mmi;	(p32)st8	[rptr]=n1,8
-	(p34)st8	[rptr]=t0,8
-	add		r19=-4*16,prevsp};;
-{ .mmb;	(p32)st8	[rptr]=n2,8
-	(p34)st8	[rptr]=t[7],8
-	(p5)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n3,8
-	(p34)st8	[rptr]=t[6],8
-	(p7)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n4,8
-	(p34)st8	[rptr]=t[5],8
-	(p9)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n5,8
-	(p34)st8	[rptr]=t[4],8
-	(p11)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n6,8
-	(p34)st8	[rptr]=t[3],8
-	(p13)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n7,8
-	(p34)st8	[rptr]=t[2],8
-	(p15)br.cond.dpnt.few	.Ldone	};;
-{ .mmb;	(p32)st8	[rptr]=n8,8
-	(p34)st8	[rptr]=t[1],8
-	nop.b		0		};;
-.Ldone:						// epilogue
-{ .mmi;	ldf.fill	f16=[r16],64
-	ldf.fill	f17=[r17],64
-	nop.i		0		}
-{ .mmi;	ldf.fill	f18=[r18],64
-	ldf.fill	f19=[r19],64
-	mov		pr=prevpr,0x1ffff	};;
-{ .mmi;	ldf.fill	f20=[r16]
-	ldf.fill	f21=[r17]
-	mov		ar.lc=prevlc	}
-{ .mmi;	ldf.fill	f22=[r18]
-	ldf.fill	f23=[r19]
-	mov		ret0=1		}	// signal "handled"
-{ .mib;	rum		1<<5
-	.restore	sp
-	mov		sp=prevsp
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_mont_8#
-
-.type	copyright#,\@object
-copyright:
-stringz	"Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-$output=shift and open STDOUT,">$output";
-print $code;
-close STDOUT;

+ 0 - 1555
drivers/builtin_openssl2/crypto/bn/asm/ia64.S

@@ -1,1555 +0,0 @@
-.explicit
-.text
-.ident	"ia64.S, Version 2.1"
-.ident	"IA-64 ISA artwork by Andy Polyakov <[email protected]>"
-
-//
-// ====================================================================
-// Written by Andy Polyakov <[email protected]> for the OpenSSL
-// project.
-//
-// Rights for redistribution and usage in source and binary forms are
-// granted according to the OpenSSL license. Warranty of any kind is
-// disclaimed.
-// ====================================================================
-//
-// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
-// different from Itanium to this module viewpoint. Most notably, is it
-// "wider" than Itanium? Can you experience loop scalability as
-// discussed in commentary sections? Not really:-( Itanium2 has 6
-// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
-// spin twice as fast, as I need 8 IALU ports. Amount of floating point
-// ports is the same, i.e. 2, while I need 4. In other words, to this
-// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
-// essentially different in respect to this module, and a re-tune was
-// required. Well, because some intruction latencies has changed. Most
-// noticeably those intensively used:
-//
-//			Itanium	Itanium2
-//	ldf8		9	6		L2 hit
-//	ld8		2	1		L1 hit
-//	getf		2	5
-//	xma[->getf]	7[+1]	4[+0]
-//	add[->st8]	1[+1]	1[+0]
-//
-// What does it mean? You might ratiocinate that the original code
-// should run just faster... Because sum of latencies is smaller...
-// Wrong! Note that getf latency increased. This means that if a loop is
-// scheduled for lower latency (as they were), then it will suffer from
-// stall condition and the code will therefore turn anti-scalable, e.g.
-// original bn_mul_words spun at 5*n or 2.5 times slower than expected
-// on Itanium2! What to do? Reschedule loops for Itanium2? But then
-// Itanium would exhibit anti-scalability. So I've chosen to reschedule
-// for worst latency for every instruction aiming for best *all-round*
-// performance.  
-
-// Q.	How much faster does it get?
-// A.	Here is the output from 'openssl speed rsa dsa' for vanilla
-//	0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
-//	Linux 7.1 2.96-81):
-//
-//	                  sign    verify    sign/s verify/s
-//	rsa  512 bits   0.0036s   0.0003s    275.3   2999.2
-//	rsa 1024 bits   0.0203s   0.0011s     49.3    894.1
-//	rsa 2048 bits   0.1331s   0.0040s      7.5    250.9
-//	rsa 4096 bits   0.9270s   0.0147s      1.1     68.1
-//	                  sign    verify    sign/s verify/s
-//	dsa  512 bits   0.0035s   0.0043s    288.3    234.8
-//	dsa 1024 bits   0.0111s   0.0135s     90.0     74.2
-//
-//	And here is similar output but for this assembler
-//	implementation:-)
-//
-//	                  sign    verify    sign/s verify/s
-//	rsa  512 bits   0.0021s   0.0001s    549.4   9638.5
-//	rsa 1024 bits   0.0055s   0.0002s    183.8   4481.1
-//	rsa 2048 bits   0.0244s   0.0006s     41.4   1726.3
-//	rsa 4096 bits   0.1295s   0.0018s      7.7    561.5
-//	                  sign    verify    sign/s verify/s
-//	dsa  512 bits   0.0012s   0.0013s    891.9    756.6
-//	dsa 1024 bits   0.0023s   0.0028s    440.4    376.2
-//	
-//	Yes, you may argue that it's not fair comparison as it's
-//	possible to craft the C implementation with BN_UMULT_HIGH
-//	inline assembler macro. But of course! Here is the output
-//	with the macro:
-//
-//	                  sign    verify    sign/s verify/s
-//	rsa  512 bits   0.0020s   0.0002s    495.0   6561.0
-//	rsa 1024 bits   0.0086s   0.0004s    116.2   2235.7
-//	rsa 2048 bits   0.0519s   0.0015s     19.3    667.3
-//	rsa 4096 bits   0.3464s   0.0053s      2.9    187.7
-//	                  sign    verify    sign/s verify/s
-//	dsa  512 bits   0.0016s   0.0020s    613.1    510.5
-//	dsa 1024 bits   0.0045s   0.0054s    221.0    183.9
-//
-//	My code is still way faster, huh:-) And I believe that even
-//	higher performance can be achieved. Note that as keys get
-//	longer, performance gain is larger. Why? According to the
-//	profiler there is another player in the field, namely
-//	BN_from_montgomery consuming larger and larger portion of CPU
-//	time as keysize decreases. I therefore consider putting effort
-//	to assembler implementation of the following routine:
-//
-//	void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
-//	{
-//	int      i,j;
-//	BN_ULONG v;
-//
-//	for (i=0; i<nl; i++)
-//		{
-//		v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
-//		nrp++;
-//		rp++;
-//		if (((nrp[-1]+=v)&BN_MASK2) < v)
-//			for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
-//		}
-//	}
-//
-//	It might as well be beneficial to implement even combaX
-//	variants, as it appears as it can literally unleash the
-//	performance (see comment section to bn_mul_comba8 below).
-//
-//	And finally for your reference the output for 0.9.6a compiled
-//	with SGIcc version 0.01.0-12 (keep in mind that for the moment
-//	of this writing it's not possible to convince SGIcc to use
-//	BN_UMULT_HIGH inline assembler macro, yet the code is fast,
-//	i.e. for a compiler generated one:-):
-//
-//	                  sign    verify    sign/s verify/s
-//	rsa  512 bits   0.0022s   0.0002s    452.7   5894.3
-//	rsa 1024 bits   0.0097s   0.0005s    102.7   2002.9
-//	rsa 2048 bits   0.0578s   0.0017s     17.3    600.2
-//	rsa 4096 bits   0.3838s   0.0061s      2.6    164.5
-//	                  sign    verify    sign/s verify/s
-//	dsa  512 bits   0.0018s   0.0022s    547.3    459.6
-//	dsa 1024 bits   0.0051s   0.0062s    196.6    161.3
-//
-//	Oh! Benchmarks were performed on 733MHz Lion-class Itanium
-//	system running Redhat Linux 7.1 (very special thanks to Ray
-//	McCaffity of Williams Communications for providing an account).
-//
-// Q.	What's the heck with 'rum 1<<5' at the end of every function?
-// A.	Well, by clearing the "upper FP registers written" bit of the
-//	User Mask I want to excuse the kernel from preserving upper
-//	(f32-f128) FP register bank over process context switch, thus
-//	minimizing bus bandwidth consumption during the switch (i.e.
-//	after PKI opration completes and the program is off doing
-//	something else like bulk symmetric encryption). Having said
-//	this, I also want to point out that it might be good idea
-//	to compile the whole toolkit (as well as majority of the
-//	programs for that matter) with -mfixed-range=f32-f127 command
-//	line option. No, it doesn't prevent the compiler from writing
-//	to upper bank, but at least discourages to do so. If you don't
-//	like the idea you have the option to compile the module with
-//	-Drum=nop.m in command line.
-//
-
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-#define	ADDP	addp4
-#else
-#define	ADDP	add
-#endif
-
-#if 1
-//
-// bn_[add|sub]_words routines.
-//
-// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
-// data reside in L1 cache, i.e. 2 ticks away). It's possible to
-// compress the epilogue and get down to 2*n+6, but at the cost of
-// scalability (the neat feature of this implementation is that it
-// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
-// I consider that the epilogue is short enough as it is to trade tiny
-// performance loss on Itanium for scalability.
-//
-// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
-//
-.global	bn_add_words#
-.proc	bn_add_words#
-.align	64
-.skip	32	// makes the loop body aligned at 64-byte boundary
-bn_add_words:
-	.prologue
-	.save	ar.pfs,r2
-{ .mii;	alloc		r2=ar.pfs,4,12,0,16
-	cmp4.le		p6,p0=r35,r0	};;
-{ .mfb;	mov		r8=r0			// return value
-(p6)	br.ret.spnt.many	b0	};;
-
-{ .mib;	sub		r10=r35,r0,1
-	.save	ar.lc,r3
-	mov		r3=ar.lc
-	brp.loop.imp	.L_bn_add_words_ctop,.L_bn_add_words_cend-16
-					}
-{ .mib;	ADDP		r14=0,r32		// rp
-	.save	pr,r9
-	mov		r9=pr		};;
-	.body
-{ .mii;	ADDP		r15=0,r33		// ap
-	mov		ar.lc=r10
-	mov		ar.ec=6		}
-{ .mib;	ADDP		r16=0,r34		// bp
-	mov		pr.rot=1<<16	};;
-
-.L_bn_add_words_ctop:
-{ .mii;	(p16)	ld8		r32=[r16],8	  // b=*(bp++)
-	(p18)	add		r39=r37,r34
-	(p19)	cmp.ltu.unc	p56,p0=r40,r38	}
-{ .mfb;	(p0)	nop.m		0x0
-	(p0)	nop.f		0x0
-	(p0)	nop.b		0x0		}
-{ .mii;	(p16)	ld8		r35=[r15],8	  // a=*(ap++)
-	(p58)	cmp.eq.or	p57,p0=-1,r41	  // (p20)
-	(p58)	add		r41=1,r41	} // (p20)
-{ .mfb;	(p21)	st8		[r14]=r42,8	  // *(rp++)=r
-	(p0)	nop.f		0x0
-	br.ctop.sptk	.L_bn_add_words_ctop	};;
-.L_bn_add_words_cend:
-
-{ .mii;
-(p59)	add		r8=1,r8		// return value
-	mov		pr=r9,0x1ffff
-	mov		ar.lc=r3	}
-{ .mbb;	nop.b		0x0
-	br.ret.sptk.many	b0	};;
-.endp	bn_add_words#
-
-//
-// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
-//
-.global	bn_sub_words#
-.proc	bn_sub_words#
-.align	64
-.skip	32	// makes the loop body aligned at 64-byte boundary
-bn_sub_words:
-	.prologue
-	.save	ar.pfs,r2
-{ .mii;	alloc		r2=ar.pfs,4,12,0,16
-	cmp4.le		p6,p0=r35,r0	};;
-{ .mfb;	mov		r8=r0			// return value
-(p6)	br.ret.spnt.many	b0	};;
-
-{ .mib;	sub		r10=r35,r0,1
-	.save	ar.lc,r3
-	mov		r3=ar.lc
-	brp.loop.imp	.L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
-					}
-{ .mib;	ADDP		r14=0,r32		// rp
-	.save	pr,r9
-	mov		r9=pr		};;
-	.body
-{ .mii;	ADDP		r15=0,r33		// ap
-	mov		ar.lc=r10
-	mov		ar.ec=6		}
-{ .mib;	ADDP		r16=0,r34		// bp
-	mov		pr.rot=1<<16	};;
-
-.L_bn_sub_words_ctop:
-{ .mii;	(p16)	ld8		r32=[r16],8	  // b=*(bp++)
-	(p18)	sub		r39=r37,r34
-	(p19)	cmp.gtu.unc	p56,p0=r40,r38	}
-{ .mfb;	(p0)	nop.m		0x0
-	(p0)	nop.f		0x0
-	(p0)	nop.b		0x0		}
-{ .mii;	(p16)	ld8		r35=[r15],8	  // a=*(ap++)
-	(p58)	cmp.eq.or	p57,p0=0,r41	  // (p20)
-	(p58)	add		r41=-1,r41	} // (p20)
-{ .mbb;	(p21)	st8		[r14]=r42,8	  // *(rp++)=r
-	(p0)	nop.b		0x0
-	br.ctop.sptk	.L_bn_sub_words_ctop	};;
-.L_bn_sub_words_cend:
-
-{ .mii;
-(p59)	add		r8=1,r8		// return value
-	mov		pr=r9,0x1ffff
-	mov		ar.lc=r3	}
-{ .mbb;	nop.b		0x0
-	br.ret.sptk.many	b0	};;
-.endp	bn_sub_words#
-#endif
-
-#if 0
-#define XMA_TEMPTATION
-#endif
-
-#if 1
-//
-// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-//
-.global	bn_mul_words#
-.proc	bn_mul_words#
-.align	64
-.skip	32	// makes the loop body aligned at 64-byte boundary
-bn_mul_words:
-	.prologue
-	.save	ar.pfs,r2
-#ifdef XMA_TEMPTATION
-{ .mfi;	alloc		r2=ar.pfs,4,0,0,0	};;
-#else
-{ .mfi;	alloc		r2=ar.pfs,4,12,0,16	};;
-#endif
-{ .mib;	mov		r8=r0			// return value
-	cmp4.le		p6,p0=r34,r0
-(p6)	br.ret.spnt.many	b0		};;
-
-{ .mii;	sub	r10=r34,r0,1
-	.save	ar.lc,r3
-	mov	r3=ar.lc
-	.save	pr,r9
-	mov	r9=pr			};;
-
-	.body
-{ .mib;	setf.sig	f8=r35	// w
-	mov		pr.rot=0x800001<<16
-			// ------^----- serves as (p50) at first (p27)
-	brp.loop.imp	.L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
-					}
-
-#ifndef XMA_TEMPTATION
-
-{ .mmi;	ADDP		r14=0,r32	// rp
-	ADDP		r15=0,r33	// ap
-	mov		ar.lc=r10	}
-{ .mmi;	mov		r40=0		// serves as r35 at first (p27)
-	mov		ar.ec=13	};;
-
-// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
-// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
-// bypass L1 cache and L2 latency is actually best-case scenario for
-// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
-// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
-// would give us ~5% in *overall* performance improvement on "wider"
-// IA-64, but would hurt Itanium for about same because of longer
-// epilogue. As it's a matter of few percents in either case I've
-// chosen to trade the scalability for development time (you can see
-// this very instruction sequence in bn_mul_add_words loop which in
-// turn is scalable).
-.L_bn_mul_words_ctop:
-{ .mfi;	(p25)	getf.sig	r36=f52			// low
-	(p21)	xmpy.lu		f48=f37,f8
-	(p28)	cmp.ltu		p54,p50=r41,r39	}
-{ .mfi;	(p16)	ldf8		f32=[r15],8
-	(p21)	xmpy.hu		f40=f37,f8
-	(p0)	nop.i		0x0		};;
-{ .mii;	(p25)	getf.sig	r32=f44			// high
-	.pred.rel	"mutex",p50,p54
-	(p50)	add		r40=r38,r35		// (p27)
-	(p54)	add		r40=r38,r35,1	}	// (p27)
-{ .mfb;	(p28)	st8		[r14]=r41,8
-	(p0)	nop.f		0x0
-	br.ctop.sptk	.L_bn_mul_words_ctop	};;
-.L_bn_mul_words_cend:
-
-{ .mii;	nop.m		0x0
-.pred.rel	"mutex",p51,p55
-(p51)	add		r8=r36,r0
-(p55)	add		r8=r36,r0,1	}
-{ .mfb;	nop.m	0x0
-	nop.f	0x0
-	nop.b	0x0			}
-
-#else	// XMA_TEMPTATION
-
-	setf.sig	f37=r0	// serves as carry at (p18) tick
-	mov		ar.lc=r10
-	mov		ar.ec=5;;
-
-// Most of you examining this code very likely wonder why in the name
-// of Intel the following loop is commented out? Indeed, it looks so
-// neat that you find it hard to believe that it's something wrong
-// with it, right? The catch is that every iteration depends on the
-// result from previous one and the latter isn't available instantly.
-// The loop therefore spins at the latency of xma minus 1, or in other
-// words at 6*(n+4) ticks:-( Compare to the "production" loop above
-// that runs in 2*(n+11) where the low latency problem is worked around
-// by moving the dependency to one-tick latent interger ALU. Note that
-// "distance" between ldf8 and xma is not latency of ldf8, but the
-// *difference* between xma and ldf8 latencies.
-.L_bn_mul_words_ctop:
-{ .mfi;	(p16)	ldf8		f32=[r33],8
-	(p18)	xma.hu		f38=f34,f8,f39	}
-{ .mfb;	(p20)	stf8		[r32]=f37,8
-	(p18)	xma.lu		f35=f34,f8,f39
-	br.ctop.sptk	.L_bn_mul_words_ctop	};;
-.L_bn_mul_words_cend:
-
-	getf.sig	r8=f41		// the return value
-
-#endif	// XMA_TEMPTATION
-
-{ .mii;	nop.m		0x0
-	mov		pr=r9,0x1ffff
-	mov		ar.lc=r3	}
-{ .mfb;	rum		1<<5		// clear um.mfh
-	nop.f		0x0
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_words#
-#endif
-
-#if 1
-//
-// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-//
-.global	bn_mul_add_words#
-.proc	bn_mul_add_words#
-.align	64
-.skip	48	// makes the loop body aligned at 64-byte boundary
-bn_mul_add_words:
-	.prologue
-	.save	ar.pfs,r2
-{ .mmi;	alloc		r2=ar.pfs,4,4,0,8
-	cmp4.le		p6,p0=r34,r0
-	.save	ar.lc,r3
-	mov		r3=ar.lc	};;
-{ .mib;	mov		r8=r0		// return value
-	sub		r10=r34,r0,1
-(p6)	br.ret.spnt.many	b0	};;
-
-{ .mib;	setf.sig	f8=r35		// w
-	.save	pr,r9
-	mov		r9=pr
-	brp.loop.imp	.L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
-					}
-	.body
-{ .mmi;	ADDP		r14=0,r32	// rp
-	ADDP		r15=0,r33	// ap
-	mov		ar.lc=r10	}
-{ .mii;	ADDP		r16=0,r32	// rp copy
-	mov		pr.rot=0x2001<<16
-			// ------^----- serves as (p40) at first (p27)
-	mov		ar.ec=11	};;
-
-// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
-// Itanium 2. Yes, unlike previous versions it scales:-) Previous
-// version was performing *all* additions in IALU and was starving
-// for those even on Itanium 2. In this version one addition is
-// moved to FPU and is folded with multiplication. This is at cost
-// of propogating the result from previous call to this subroutine
-// to L2 cache... In other words negligible even for shorter keys.
-// *Overall* performance improvement [over previous version] varies
-// from 11 to 22 percent depending on key length.
-.L_bn_mul_add_words_ctop:
-.pred.rel	"mutex",p40,p42
-{ .mfi;	(p23)	getf.sig	r36=f45			// low
-	(p20)	xma.lu		f42=f36,f8,f50		// low
-	(p40)	add		r39=r39,r35	}	// (p27)
-{ .mfi;	(p16)	ldf8		f32=[r15],8		// *(ap++)
-	(p20)	xma.hu		f36=f36,f8,f50		// high
-	(p42)	add		r39=r39,r35,1	};;	// (p27)
-{ .mmi;	(p24)	getf.sig	r32=f40			// high
-	(p16)	ldf8		f46=[r16],8		// *(rp1++)
-	(p40)	cmp.ltu		p41,p39=r39,r35	}	// (p27)
-{ .mib;	(p26)	st8		[r14]=r39,8		// *(rp2++)
-	(p42)	cmp.leu		p41,p39=r39,r35		// (p27)
-	br.ctop.sptk	.L_bn_mul_add_words_ctop};;
-.L_bn_mul_add_words_cend:
-
-{ .mmi;	.pred.rel	"mutex",p40,p42
-(p40)	add		r8=r35,r0
-(p42)	add		r8=r35,r0,1
-	mov		pr=r9,0x1ffff	}
-{ .mib;	rum		1<<5		// clear um.mfh
-	mov		ar.lc=r3
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_add_words#
-#endif
-
-#if 1
-//
-// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
-//
-.global	bn_sqr_words#
-.proc	bn_sqr_words#
-.align	64
-.skip	32	// makes the loop body aligned at 64-byte boundary 
-bn_sqr_words:
-	.prologue
-	.save	ar.pfs,r2
-{ .mii;	alloc		r2=ar.pfs,3,0,0,0
-	sxt4		r34=r34		};;
-{ .mii;	cmp.le		p6,p0=r34,r0
-	mov		r8=r0		}	// return value
-{ .mfb;	ADDP		r32=0,r32
-	nop.f		0x0
-(p6)	br.ret.spnt.many	b0	};;
-
-{ .mii;	sub	r10=r34,r0,1
-	.save	ar.lc,r3
-	mov	r3=ar.lc
-	.save	pr,r9
-	mov	r9=pr			};;
-
-	.body
-{ .mib;	ADDP		r33=0,r33
-	mov		pr.rot=1<<16
-	brp.loop.imp	.L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
-					}
-{ .mii;	add		r34=8,r32
-	mov		ar.lc=r10
-	mov		ar.ec=18	};;
-
-// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
-// possible to compress the epilogue (I'm getting tired to write this
-// comment over and over) and get down to 2*n+16 at the cost of
-// scalability. The decision will very likely be reconsidered after the
-// benchmark program is profiled. I.e. if perfomance gain on Itanium
-// will appear larger than loss on "wider" IA-64, then the loop should
-// be explicitely split and the epilogue compressed.
-.L_bn_sqr_words_ctop:
-{ .mfi;	(p16)	ldf8		f32=[r33],8
-	(p25)	xmpy.lu		f42=f41,f41
-	(p0)	nop.i		0x0		}
-{ .mib;	(p33)	stf8		[r32]=f50,16
-	(p0)	nop.i		0x0
-	(p0)	nop.b		0x0		}
-{ .mfi;	(p0)	nop.m		0x0
-	(p25)	xmpy.hu		f52=f41,f41
-	(p0)	nop.i		0x0		}
-{ .mib;	(p33)	stf8		[r34]=f60,16
-	(p0)	nop.i		0x0
-	br.ctop.sptk	.L_bn_sqr_words_ctop	};;
-.L_bn_sqr_words_cend:
-
-{ .mii;	nop.m		0x0
-	mov		pr=r9,0x1ffff
-	mov		ar.lc=r3	}
-{ .mfb;	rum		1<<5		// clear um.mfh
-	nop.f		0x0
-	br.ret.sptk.many	b0	};;
-.endp	bn_sqr_words#
-#endif
-
-#if 1
-// Apparently we win nothing by implementing special bn_sqr_comba8.
-// Yes, it is possible to reduce the number of multiplications by
-// almost factor of two, but then the amount of additions would
-// increase by factor of two (as we would have to perform those
-// otherwise performed by xma ourselves). Normally we would trade
-// anyway as multiplications are way more expensive, but not this
-// time... Multiplication kernel is fully pipelined and as we drain
-// one 128-bit multiplication result per clock cycle multiplications
-// are effectively as inexpensive as additions. Special implementation
-// might become of interest for "wider" IA-64 implementation as you'll
-// be able to get through the multiplication phase faster (there won't
-// be any stall issues as discussed in the commentary section below and
-// you therefore will be able to employ all 4 FP units)... But these
-// Itanium days it's simply too hard to justify the effort so I just
-// drop down to bn_mul_comba8 code:-)
-//
-// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
-//
-.global	bn_sqr_comba8#
-.proc	bn_sqr_comba8#
-.align	64
-bn_sqr_comba8:
-	.prologue
-	.save	ar.pfs,r2
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-{ .mii;	alloc	r2=ar.pfs,2,1,0,0
-	addp4	r33=0,r33
-	addp4	r32=0,r32		};;
-{ .mii;
-#else
-{ .mii;	alloc	r2=ar.pfs,2,1,0,0
-#endif
-	mov	r34=r33
-	add	r14=8,r33		};;
-	.body
-{ .mii;	add	r17=8,r34
-	add	r15=16,r33
-	add	r18=16,r34		}
-{ .mfb;	add	r16=24,r33
-	br	.L_cheat_entry_point8	};;
-.endp	bn_sqr_comba8#
-#endif
-
-#if 1
-// I've estimated this routine to run in ~120 ticks, but in reality
-// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
-// cycles consumed for instructions fetch? Or did I misinterpret some
-// clause in Itanium µ-architecture manual? Comments are welcomed and
-// highly appreciated.
-//
-// On Itanium 2 it takes ~190 ticks. This is because of stalls on
-// result from getf.sig. I do nothing about it at this point for
-// reasons depicted below.
-//
-// However! It should be noted that even 160 ticks is darn good result
-// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
-// C version (compiled with gcc with inline assembler). I really
-// kicked compiler's butt here, didn't I? Yeah! This brings us to the
-// following statement. It's damn shame that this routine isn't called
-// very often nowadays! According to the profiler most CPU time is
-// consumed by bn_mul_add_words called from BN_from_montgomery. In
-// order to estimate what we're missing, I've compared the performance
-// of this routine against "traditional" implementation, i.e. against
-// following routine:
-//
-// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-// {	r[ 8]=bn_mul_words(    &(r[0]),a,8,b[0]);
-//	r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
-//	r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
-//	r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
-//	r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
-//	r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
-//	r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
-//	r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
-// }
-//
-// The one below is over 8 times faster than the one above:-( Even
-// more reasons to "combafy" bn_mul_add_mont...
-//
-// And yes, this routine really made me wish there were an optimizing
-// assembler! It also feels like it deserves a dedication.
-//
-//	To my wife for being there and to my kids...
-//
-// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-//
-#define	carry1	r14
-#define	carry2	r15
-#define	carry3	r34
-.global	bn_mul_comba8#
-.proc	bn_mul_comba8#
-.align	64
-bn_mul_comba8:
-	.prologue
-	.save	ar.pfs,r2
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-{ .mii;	alloc	r2=ar.pfs,3,0,0,0
-	addp4	r33=0,r33
-	addp4	r34=0,r34		};;
-{ .mii;	addp4	r32=0,r32
-#else
-{ .mii;	alloc   r2=ar.pfs,3,0,0,0
-#endif
-	add	r14=8,r33
-	add	r17=8,r34		}
-	.body
-{ .mii;	add	r15=16,r33
-	add	r18=16,r34
-	add	r16=24,r33		}
-.L_cheat_entry_point8:
-{ .mmi;	add	r19=24,r34
-
-	ldf8	f32=[r33],32		};;
-
-{ .mmi;	ldf8	f120=[r34],32
-	ldf8	f121=[r17],32		}
-{ .mmi;	ldf8	f122=[r18],32
-	ldf8	f123=[r19],32		};;
-{ .mmi;	ldf8	f124=[r34]
-	ldf8	f125=[r17]		}
-{ .mmi;	ldf8	f126=[r18]
-	ldf8	f127=[r19]		}
-
-{ .mmi;	ldf8	f33=[r14],32
-	ldf8	f34=[r15],32		}
-{ .mmi;	ldf8	f35=[r16],32;;
-	ldf8	f36=[r33]		}
-{ .mmi;	ldf8	f37=[r14]
-	ldf8	f38=[r15]		}
-{ .mfi;	ldf8	f39=[r16]
-// -------\ Entering multiplier's heaven /-------
-// ------------\                    /------------
-// -----------------\          /-----------------
-// ----------------------\/----------------------
-		xma.hu	f41=f32,f120,f0		}
-{ .mfi;		xma.lu	f40=f32,f120,f0		};; // (*)
-{ .mfi;		xma.hu	f51=f32,f121,f0		}
-{ .mfi;		xma.lu	f50=f32,f121,f0		};;
-{ .mfi;		xma.hu	f61=f32,f122,f0		}
-{ .mfi;		xma.lu	f60=f32,f122,f0		};;
-{ .mfi;		xma.hu	f71=f32,f123,f0		}
-{ .mfi;		xma.lu	f70=f32,f123,f0		};;
-{ .mfi;		xma.hu	f81=f32,f124,f0		}
-{ .mfi;		xma.lu	f80=f32,f124,f0		};;
-{ .mfi;		xma.hu	f91=f32,f125,f0		}
-{ .mfi;		xma.lu	f90=f32,f125,f0		};;
-{ .mfi;		xma.hu	f101=f32,f126,f0	}
-{ .mfi;		xma.lu	f100=f32,f126,f0	};;
-{ .mfi;		xma.hu	f111=f32,f127,f0	}
-{ .mfi;		xma.lu	f110=f32,f127,f0	};;//
-// (*)	You can argue that splitting at every second bundle would
-//	prevent "wider" IA-64 implementations from achieving the peak
-//	performance. Well, not really... The catch is that if you
-//	intend to keep 4 FP units busy by splitting at every fourth
-//	bundle and thus perform these 16 multiplications in 4 ticks,
-//	the first bundle *below* would stall because the result from
-//	the first xma bundle *above* won't be available for another 3
-//	ticks (if not more, being an optimist, I assume that "wider"
-//	implementation will have same latency:-). This stall will hold
-//	you back and the performance would be as if every second bundle
-//	were split *anyway*...
-{ .mfi;	getf.sig	r16=f40
-		xma.hu	f42=f33,f120,f41
-	add		r33=8,r32		}
-{ .mfi;		xma.lu	f41=f33,f120,f41	};;
-{ .mfi;	getf.sig	r24=f50
-		xma.hu	f52=f33,f121,f51	}
-{ .mfi;		xma.lu	f51=f33,f121,f51	};;
-{ .mfi;	st8		[r32]=r16,16
-		xma.hu	f62=f33,f122,f61	}
-{ .mfi;		xma.lu	f61=f33,f122,f61	};;
-{ .mfi;		xma.hu	f72=f33,f123,f71	}
-{ .mfi;		xma.lu	f71=f33,f123,f71	};;
-{ .mfi;		xma.hu	f82=f33,f124,f81	}
-{ .mfi;		xma.lu	f81=f33,f124,f81	};;
-{ .mfi;		xma.hu	f92=f33,f125,f91	}
-{ .mfi;		xma.lu	f91=f33,f125,f91	};;
-{ .mfi;		xma.hu	f102=f33,f126,f101	}
-{ .mfi;		xma.lu	f101=f33,f126,f101	};;
-{ .mfi;		xma.hu	f112=f33,f127,f111	}
-{ .mfi;		xma.lu	f111=f33,f127,f111	};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r25=f41
-		xma.hu	f43=f34,f120,f42	}
-{ .mfi;		xma.lu	f42=f34,f120,f42	};;
-{ .mfi;	getf.sig	r16=f60
-		xma.hu	f53=f34,f121,f52	}
-{ .mfi;		xma.lu	f52=f34,f121,f52	};;
-{ .mfi;	getf.sig	r17=f51
-		xma.hu	f63=f34,f122,f62
-	add		r25=r25,r24		}
-{ .mfi;		xma.lu	f62=f34,f122,f62
-	mov		carry1=0		};;
-{ .mfi;	cmp.ltu		p6,p0=r25,r24
-		xma.hu	f73=f34,f123,f72	}
-{ .mfi;		xma.lu	f72=f34,f123,f72	};;
-{ .mfi;	st8		[r33]=r25,16
-		xma.hu	f83=f34,f124,f82
-(p6)	add		carry1=1,carry1		}
-{ .mfi;		xma.lu	f82=f34,f124,f82	};;
-{ .mfi;		xma.hu	f93=f34,f125,f92	}
-{ .mfi;		xma.lu	f92=f34,f125,f92	};;
-{ .mfi;		xma.hu	f103=f34,f126,f102	}
-{ .mfi;		xma.lu	f102=f34,f126,f102	};;
-{ .mfi;		xma.hu	f113=f34,f127,f112	}
-{ .mfi;		xma.lu	f112=f34,f127,f112	};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r18=f42
-		xma.hu	f44=f35,f120,f43
-	add		r17=r17,r16		}
-{ .mfi;		xma.lu	f43=f35,f120,f43	};;
-{ .mfi;	getf.sig	r24=f70
-		xma.hu	f54=f35,f121,f53	}
-{ .mfi;	mov		carry2=0
-		xma.lu	f53=f35,f121,f53	};;
-{ .mfi;	getf.sig	r25=f61
-		xma.hu	f64=f35,f122,f63
-	cmp.ltu		p7,p0=r17,r16		}
-{ .mfi;	add		r18=r18,r17
-		xma.lu	f63=f35,f122,f63	};;
-{ .mfi;	getf.sig	r26=f52
-		xma.hu	f74=f35,f123,f73
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r18,r17
-		xma.lu	f73=f35,f123,f73
-	add		r18=r18,carry1		};;
-{ .mfi;
-		xma.hu	f84=f35,f124,f83
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r18,carry1
-		xma.lu	f83=f35,f124,f83	};;
-{ .mfi;	st8		[r32]=r18,16
-		xma.hu	f94=f35,f125,f93
-(p7)	add		carry2=1,carry2		}
-{ .mfi;		xma.lu	f93=f35,f125,f93	};;
-{ .mfi;		xma.hu	f104=f35,f126,f103	}
-{ .mfi;		xma.lu	f103=f35,f126,f103	};;
-{ .mfi;		xma.hu	f114=f35,f127,f113	}
-{ .mfi;	mov		carry1=0
-		xma.lu	f113=f35,f127,f113
-	add		r25=r25,r24		};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r27=f43
-		xma.hu	f45=f36,f120,f44
-	cmp.ltu		p6,p0=r25,r24		}
-{ .mfi;		xma.lu	f44=f36,f120,f44	
-	add		r26=r26,r25		};;
-{ .mfi;	getf.sig	r16=f80
-		xma.hu	f55=f36,f121,f54
-(p6)	add		carry1=1,carry1		}
-{ .mfi;		xma.lu	f54=f36,f121,f54	};;
-{ .mfi;	getf.sig	r17=f71
-		xma.hu	f65=f36,f122,f64
-	cmp.ltu		p6,p0=r26,r25		}
-{ .mfi;		xma.lu	f64=f36,f122,f64
-	add		r27=r27,r26		};;
-{ .mfi;	getf.sig	r18=f62
-		xma.hu	f75=f36,f123,f74
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	cmp.ltu		p6,p0=r27,r26
-		xma.lu	f74=f36,f123,f74
-	add		r27=r27,carry2		};;
-{ .mfi;	getf.sig	r19=f53
-		xma.hu	f85=f36,f124,f84
-(p6)	add		carry1=1,carry1		}
-{ .mfi;		xma.lu	f84=f36,f124,f84
-	cmp.ltu		p6,p0=r27,carry2	};;
-{ .mfi;	st8		[r33]=r27,16
-		xma.hu	f95=f36,f125,f94
-(p6)	add		carry1=1,carry1		}
-{ .mfi;		xma.lu	f94=f36,f125,f94	};;
-{ .mfi;		xma.hu	f105=f36,f126,f104	}
-{ .mfi;	mov		carry2=0
-		xma.lu	f104=f36,f126,f104
-	add		r17=r17,r16		};;
-{ .mfi;		xma.hu	f115=f36,f127,f114
-	cmp.ltu		p7,p0=r17,r16		}
-{ .mfi;		xma.lu	f114=f36,f127,f114
-	add		r18=r18,r17		};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r20=f44
-		xma.hu	f46=f37,f120,f45
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r18,r17
-		xma.lu	f45=f37,f120,f45
-	add		r19=r19,r18		};;
-{ .mfi;	getf.sig	r24=f90
-		xma.hu	f56=f37,f121,f55	}
-{ .mfi;		xma.lu	f55=f37,f121,f55	};;
-{ .mfi;	getf.sig	r25=f81
-		xma.hu	f66=f37,f122,f65
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r19,r18
-		xma.lu	f65=f37,f122,f65
-	add		r20=r20,r19		};;
-{ .mfi;	getf.sig	r26=f72
-		xma.hu	f76=f37,f123,f75
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r20,r19
-		xma.lu	f75=f37,f123,f75
-	add		r20=r20,carry1		};;
-{ .mfi;	getf.sig	r27=f63
-		xma.hu	f86=f37,f124,f85
-(p7)	add		carry2=1,carry2		}
-{ .mfi;		xma.lu	f85=f37,f124,f85
-	cmp.ltu		p7,p0=r20,carry1	};;
-{ .mfi;	getf.sig	r28=f54
-		xma.hu	f96=f37,f125,f95
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	st8		[r32]=r20,16
-		xma.lu	f95=f37,f125,f95	};;
-{ .mfi;		xma.hu	f106=f37,f126,f105	}
-{ .mfi;	mov		carry1=0
-		xma.lu	f105=f37,f126,f105
-	add		r25=r25,r24		};;
-{ .mfi;		xma.hu	f116=f37,f127,f115
-	cmp.ltu		p6,p0=r25,r24		}
-{ .mfi;		xma.lu	f115=f37,f127,f115
-	add		r26=r26,r25		};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r29=f45
-		xma.hu	f47=f38,f120,f46
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	cmp.ltu		p6,p0=r26,r25
-		xma.lu	f46=f38,f120,f46
-	add		r27=r27,r26		};;
-{ .mfi;	getf.sig	r16=f100
-		xma.hu	f57=f38,f121,f56
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	cmp.ltu		p6,p0=r27,r26
-		xma.lu	f56=f38,f121,f56
-	add		r28=r28,r27		};;
-{ .mfi;	getf.sig	r17=f91
-		xma.hu	f67=f38,f122,f66
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	cmp.ltu		p6,p0=r28,r27
-		xma.lu	f66=f38,f122,f66
-	add		r29=r29,r28		};;
-{ .mfi;	getf.sig	r18=f82
-		xma.hu	f77=f38,f123,f76
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	cmp.ltu		p6,p0=r29,r28
-		xma.lu	f76=f38,f123,f76
-	add		r29=r29,carry2		};;
-{ .mfi;	getf.sig	r19=f73
-		xma.hu	f87=f38,f124,f86
-(p6)	add		carry1=1,carry1		}
-{ .mfi;		xma.lu	f86=f38,f124,f86
-	cmp.ltu		p6,p0=r29,carry2	};;
-{ .mfi;	getf.sig	r20=f64
-		xma.hu	f97=f38,f125,f96
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	st8		[r33]=r29,16
-		xma.lu	f96=f38,f125,f96	};;
-{ .mfi;	getf.sig	r21=f55
-		xma.hu	f107=f38,f126,f106	}
-{ .mfi;	mov		carry2=0
-		xma.lu	f106=f38,f126,f106
-	add		r17=r17,r16		};;
-{ .mfi;		xma.hu	f117=f38,f127,f116
-	cmp.ltu		p7,p0=r17,r16		}
-{ .mfi;		xma.lu	f116=f38,f127,f116
-	add		r18=r18,r17		};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r22=f46
-		xma.hu	f48=f39,f120,f47
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r18,r17
-		xma.lu	f47=f39,f120,f47
-	add		r19=r19,r18		};;
-{ .mfi;	getf.sig	r24=f110
-		xma.hu	f58=f39,f121,f57
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r19,r18
-		xma.lu	f57=f39,f121,f57
-	add		r20=r20,r19		};;
-{ .mfi;	getf.sig	r25=f101
-		xma.hu	f68=f39,f122,f67
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r20,r19
-		xma.lu	f67=f39,f122,f67
-	add		r21=r21,r20		};;
-{ .mfi;	getf.sig	r26=f92
-		xma.hu	f78=f39,f123,f77
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r21,r20
-		xma.lu	f77=f39,f123,f77
-	add		r22=r22,r21		};;
-{ .mfi;	getf.sig	r27=f83
-		xma.hu	f88=f39,f124,f87
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	cmp.ltu		p7,p0=r22,r21
-		xma.lu	f87=f39,f124,f87
-	add		r22=r22,carry1		};;
-{ .mfi;	getf.sig	r28=f74
-		xma.hu	f98=f39,f125,f97
-(p7)	add		carry2=1,carry2		}
-{ .mfi;		xma.lu	f97=f39,f125,f97
-	cmp.ltu		p7,p0=r22,carry1	};;
-{ .mfi;	getf.sig	r29=f65
-		xma.hu	f108=f39,f126,f107
-(p7)	add		carry2=1,carry2		}
-{ .mfi;	st8		[r32]=r22,16
-		xma.lu	f107=f39,f126,f107	};;
-{ .mfi;	getf.sig	r30=f56
-		xma.hu	f118=f39,f127,f117	}
-{ .mfi;		xma.lu	f117=f39,f127,f117	};;//
-//-------------------------------------------------//
-// Leaving muliplier's heaven... Quite a ride, huh?
-
-{ .mii;	getf.sig	r31=f47
-	add		r25=r25,r24
-	mov		carry1=0		};;
-{ .mii;		getf.sig	r16=f111
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mfb;		getf.sig	r17=f102	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r27=r27,r26		};;
-{ .mfb;	nop.m	0x0				}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r27,r26
-	add		r28=r28,r27		};;
-{ .mii;		getf.sig	r18=f93
-		add		r17=r17,r16
-		mov		carry3=0	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r28,r27
-	add		r29=r29,r28		};;
-{ .mii;		getf.sig	r19=f84
-		cmp.ltu		p7,p0=r17,r16	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r29,r28
-	add		r30=r30,r29		};;
-{ .mii;		getf.sig	r20=f75
-		add		r18=r18,r17	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r30,r29
-	add		r31=r31,r30		};;
-{ .mfb;		getf.sig	r21=f66		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r18,r17
-		add		r19=r19,r18	}
-{ .mfb;	nop.m	0x0				}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r31,r30
-	add		r31=r31,carry2		};;
-{ .mfb;		getf.sig	r22=f57		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r19,r18
-		add		r20=r20,r19	}
-{ .mfb;	nop.m	0x0				}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r31,carry2	};;
-{ .mfb;		getf.sig	r23=f48		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r20,r19
-		add		r21=r21,r20	}
-{ .mii;
-(p6)	add		carry1=1,carry1		}
-{ .mfb;	st8		[r33]=r31,16		};;
-
-{ .mfb;	getf.sig	r24=f112		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r21,r20
-		add		r22=r22,r21	};;
-{ .mfb;	getf.sig	r25=f103		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r22,r21
-		add		r23=r23,r22	};;
-{ .mfb;	getf.sig	r26=f94			}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r23,r22
-		add		r23=r23,carry1	};;
-{ .mfb;	getf.sig	r27=f85			}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p8=r23,carry1};;
-{ .mii;	getf.sig	r28=f76
-	add		r25=r25,r24
-	mov		carry1=0		}
-{ .mii;		st8		[r32]=r23,16
-	(p7)	add		carry2=1,carry3
-	(p8)	add		carry2=0,carry3	};;
-
-{ .mfb;	nop.m	0x0				}
-{ .mii;	getf.sig	r29=f67
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mfb;	getf.sig	r30=f58			}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r27=r27,r26		};;
-{ .mfb;		getf.sig	r16=f113	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r27,r26
-	add		r28=r28,r27		};;
-{ .mfb;		getf.sig	r17=f104	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r28,r27
-	add		r29=r29,r28		};;
-{ .mfb;		getf.sig	r18=f95		}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r29,r28
-	add		r30=r30,r29		};;
-{ .mii;		getf.sig	r19=f86
-		add		r17=r17,r16
-		mov		carry3=0	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r30,r29
-	add		r30=r30,carry2		};;
-{ .mii;		getf.sig	r20=f77
-		cmp.ltu		p7,p0=r17,r16
-		add		r18=r18,r17	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r30,carry2	};;
-{ .mfb;		getf.sig	r21=f68		}
-{ .mii;	st8		[r33]=r30,16
-(p6)	add		carry1=1,carry1		};;
-
-{ .mfb;	getf.sig	r24=f114		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r18,r17
-		add		r19=r19,r18	};;
-{ .mfb;	getf.sig	r25=f105		}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r19,r18
-		add		r20=r20,r19	};;
-{ .mfb;	getf.sig	r26=f96			}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r20,r19
-		add		r21=r21,r20	};;
-{ .mfb;	getf.sig	r27=f87			}
-{ .mii;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p0=r21,r20
-		add		r21=r21,carry1	};;
-{ .mib;	getf.sig	r28=f78			
-	add		r25=r25,r24		}
-{ .mib;	(p7)	add		carry3=1,carry3
-		cmp.ltu		p7,p8=r21,carry1};;
-{ .mii;		st8		[r32]=r21,16
-	(p7)	add		carry2=1,carry3
-	(p8)	add		carry2=0,carry3	}
-
-{ .mii;	mov		carry1=0
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mfb;		getf.sig	r16=f115	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r27=r27,r26		};;
-{ .mfb;		getf.sig	r17=f106	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r27,r26
-	add		r28=r28,r27		};;
-{ .mfb;		getf.sig	r18=f97		}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r28,r27
-	add		r28=r28,carry2		};;
-{ .mib;		getf.sig	r19=f88
-		add		r17=r17,r16	}
-{ .mib;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r28,carry2	};;
-{ .mii;	st8		[r33]=r28,16
-(p6)	add		carry1=1,carry1		}
-
-{ .mii;		mov		carry2=0
-		cmp.ltu		p7,p0=r17,r16
-		add		r18=r18,r17	};;
-{ .mfb;	getf.sig	r24=f116		}
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r18,r17
-		add		r19=r19,r18	};;
-{ .mfb;	getf.sig	r25=f107		}
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r19,r18
-		add		r19=r19,carry1	};;
-{ .mfb;	getf.sig	r26=f98			}
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r19,carry1};;
-{ .mii;		st8		[r32]=r19,16
-	(p7)	add		carry2=1,carry2	}
-
-{ .mfb;	add		r25=r25,r24		};;
-
-{ .mfb;		getf.sig	r16=f117	}
-{ .mii;	mov		carry1=0
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mfb;		getf.sig	r17=f108	}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r26=r26,carry2		};;
-{ .mfb;	nop.m	0x0				}
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,carry2	};;
-{ .mii;	st8		[r33]=r26,16
-(p6)	add		carry1=1,carry1		}
-
-{ .mfb;		add		r17=r17,r16	};;
-{ .mfb;	getf.sig	r24=f118		}
-{ .mii;		mov		carry2=0
-		cmp.ltu		p7,p0=r17,r16
-		add		r17=r17,carry1	};;
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r17,carry1};;
-{ .mii;		st8		[r32]=r17
-	(p7)	add		carry2=1,carry2	};;
-{ .mfb;	add		r24=r24,carry2		};;
-{ .mib;	st8		[r33]=r24		}
-
-{ .mib;	rum		1<<5		// clear um.mfh
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_comba8#
-#undef	carry3
-#undef	carry2
-#undef	carry1
-#endif
-
-#if 1
-// It's possible to make it faster (see comment to bn_sqr_comba8), but
-// I reckon it doesn't worth the effort. Basically because the routine
-// (actually both of them) practically never called... So I just play
-// same trick as with bn_sqr_comba8.
-//
-// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
-//
-.global	bn_sqr_comba4#
-.proc	bn_sqr_comba4#
-.align	64
-bn_sqr_comba4:
-	.prologue
-	.save	ar.pfs,r2
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-{ .mii;	alloc   r2=ar.pfs,2,1,0,0
-	addp4	r32=0,r32
-	addp4	r33=0,r33		};;
-{ .mii;
-#else
-{ .mii;	alloc	r2=ar.pfs,2,1,0,0
-#endif
-	mov	r34=r33
-	add	r14=8,r33		};;
-	.body
-{ .mii;	add	r17=8,r34
-	add	r15=16,r33
-	add	r18=16,r34		}
-{ .mfb;	add	r16=24,r33
-	br	.L_cheat_entry_point4	};;
-.endp	bn_sqr_comba4#
-#endif
-
-#if 1
-// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
-//
-// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-//
-#define	carry1	r14
-#define	carry2	r15
-.global	bn_mul_comba4#
-.proc	bn_mul_comba4#
-.align	64
-bn_mul_comba4:
-	.prologue
-	.save	ar.pfs,r2
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-{ .mii;	alloc   r2=ar.pfs,3,0,0,0
-	addp4	r33=0,r33
-	addp4	r34=0,r34		};;
-{ .mii;	addp4	r32=0,r32
-#else
-{ .mii;	alloc	r2=ar.pfs,3,0,0,0
-#endif
-	add	r14=8,r33
-	add	r17=8,r34		}
-	.body
-{ .mii;	add	r15=16,r33
-	add	r18=16,r34
-	add	r16=24,r33		};;
-.L_cheat_entry_point4:
-{ .mmi;	add	r19=24,r34
-
-	ldf8	f32=[r33]		}
-
-{ .mmi;	ldf8	f120=[r34]
-	ldf8	f121=[r17]		};;
-{ .mmi;	ldf8	f122=[r18]
-	ldf8	f123=[r19]		}
-
-{ .mmi;	ldf8	f33=[r14]
-	ldf8	f34=[r15]		}
-{ .mfi;	ldf8	f35=[r16]
-
-		xma.hu	f41=f32,f120,f0		}
-{ .mfi;		xma.lu	f40=f32,f120,f0		};;
-{ .mfi;		xma.hu	f51=f32,f121,f0		}
-{ .mfi;		xma.lu	f50=f32,f121,f0		};;
-{ .mfi;		xma.hu	f61=f32,f122,f0		}
-{ .mfi;		xma.lu	f60=f32,f122,f0		};;
-{ .mfi;		xma.hu	f71=f32,f123,f0		}
-{ .mfi;		xma.lu	f70=f32,f123,f0		};;//
-// Major stall takes place here, and 3 more places below. Result from
-// first xma is not available for another 3 ticks.
-{ .mfi;	getf.sig	r16=f40
-		xma.hu	f42=f33,f120,f41
-	add		r33=8,r32		}
-{ .mfi;		xma.lu	f41=f33,f120,f41	};;
-{ .mfi;	getf.sig	r24=f50
-		xma.hu	f52=f33,f121,f51	}
-{ .mfi;		xma.lu	f51=f33,f121,f51	};;
-{ .mfi;	st8		[r32]=r16,16
-		xma.hu	f62=f33,f122,f61	}
-{ .mfi;		xma.lu	f61=f33,f122,f61	};;
-{ .mfi;		xma.hu	f72=f33,f123,f71	}
-{ .mfi;		xma.lu	f71=f33,f123,f71	};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r25=f41
-		xma.hu	f43=f34,f120,f42	}
-{ .mfi;		xma.lu	f42=f34,f120,f42	};;
-{ .mfi;	getf.sig	r16=f60
-		xma.hu	f53=f34,f121,f52	}
-{ .mfi;		xma.lu	f52=f34,f121,f52	};;
-{ .mfi;	getf.sig	r17=f51
-		xma.hu	f63=f34,f122,f62
-	add		r25=r25,r24		}
-{ .mfi;	mov		carry1=0
-		xma.lu	f62=f34,f122,f62	};;
-{ .mfi;	st8		[r33]=r25,16
-		xma.hu	f73=f34,f123,f72
-	cmp.ltu		p6,p0=r25,r24		}
-{ .mfi;		xma.lu	f72=f34,f123,f72	};;//
-//-------------------------------------------------//
-{ .mfi;	getf.sig	r18=f42
-		xma.hu	f44=f35,f120,f43
-(p6)	add		carry1=1,carry1		}
-{ .mfi;	add		r17=r17,r16
-		xma.lu	f43=f35,f120,f43
-	mov		carry2=0		};;
-{ .mfi;	getf.sig	r24=f70
-		xma.hu	f54=f35,f121,f53
-	cmp.ltu		p7,p0=r17,r16		}
-{ .mfi;		xma.lu	f53=f35,f121,f53	};;
-{ .mfi;	getf.sig	r25=f61
-		xma.hu	f64=f35,f122,f63
-	add		r18=r18,r17		}
-{ .mfi;		xma.lu	f63=f35,f122,f63
-(p7)	add		carry2=1,carry2		};;
-{ .mfi;	getf.sig	r26=f52
-		xma.hu	f74=f35,f123,f73
-	cmp.ltu		p7,p0=r18,r17		}
-{ .mfi;		xma.lu	f73=f35,f123,f73
-	add		r18=r18,carry1		};;
-//-------------------------------------------------//
-{ .mii;	st8		[r32]=r18,16
-(p7)	add		carry2=1,carry2
-	cmp.ltu		p7,p0=r18,carry1	};;
-
-{ .mfi;	getf.sig	r27=f43	// last major stall
-(p7)	add		carry2=1,carry2		};;
-{ .mii;		getf.sig	r16=f71
-	add		r25=r25,r24
-	mov		carry1=0		};;
-{ .mii;		getf.sig	r17=f62	
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r27=r27,r26		};;
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r27,r26
-	add		r27=r27,carry2		};;
-{ .mii;		getf.sig	r18=f53
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r27,carry2	};;
-{ .mfi;	st8		[r33]=r27,16
-(p6)	add		carry1=1,carry1		}
-
-{ .mii;		getf.sig	r19=f44
-		add		r17=r17,r16
-		mov		carry2=0	};;
-{ .mii;	getf.sig	r24=f72
-		cmp.ltu		p7,p0=r17,r16
-		add		r18=r18,r17	};;
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r18,r17
-		add		r19=r19,r18	};;
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r19,r18
-		add		r19=r19,carry1	};;
-{ .mii;	getf.sig	r25=f63
-	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r19,carry1};;
-{ .mii;		st8		[r32]=r19,16
-	(p7)	add		carry2=1,carry2	}
-
-{ .mii;	getf.sig	r26=f54
-	add		r25=r25,r24
-	mov		carry1=0		};;
-{ .mii;		getf.sig	r16=f73
-	cmp.ltu		p6,p0=r25,r24
-	add		r26=r26,r25		};;
-{ .mii;
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,r25
-	add		r26=r26,carry2		};;
-{ .mii;		getf.sig	r17=f64
-(p6)	add		carry1=1,carry1
-	cmp.ltu		p6,p0=r26,carry2	};;
-{ .mii;	st8		[r33]=r26,16
-(p6)	add		carry1=1,carry1		}
-
-{ .mii;	getf.sig	r24=f74
-		add		r17=r17,r16	
-		mov		carry2=0	};;
-{ .mii;		cmp.ltu		p7,p0=r17,r16
-		add		r17=r17,carry1	};;
-
-{ .mii;	(p7)	add		carry2=1,carry2
-		cmp.ltu		p7,p0=r17,carry1};;
-{ .mii;		st8		[r32]=r17,16
-	(p7)	add		carry2=1,carry2	};;
-
-{ .mii;	add		r24=r24,carry2		};;
-{ .mii;	st8		[r33]=r24		}
-
-{ .mib;	rum		1<<5		// clear um.mfh
-	br.ret.sptk.many	b0	};;
-.endp	bn_mul_comba4#
-#undef	carry2
-#undef	carry1
-#endif
-
-#if 1
-//
-// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
-//
-// In the nutshell it's a port of my MIPS III/IV implementation.
-//
-#define	AT	r14
-#define	H	r16
-#define	HH	r20
-#define	L	r17
-#define	D	r18
-#define	DH	r22
-#define	I	r21
-
-#if 0
-// Some preprocessors (most notably HP-UX) appear to be allergic to
-// macros enclosed to parenthesis [as these three were].
-#define	cont	p16
-#define	break	p0	// p20
-#define	equ	p24
-#else
-cont=p16
-break=p0
-equ=p24
-#endif
-
-.global	abort#
-.global	bn_div_words#
-.proc	bn_div_words#
-.align	64
-bn_div_words:
-	.prologue
-	.save	ar.pfs,r2
-{ .mii;	alloc		r2=ar.pfs,3,5,0,8
-	.save	b0,r3
-	mov		r3=b0
-	.save	pr,r10
-	mov		r10=pr		};;
-{ .mmb;	cmp.eq		p6,p0=r34,r0
-	mov		r8=-1
-(p6)	br.ret.spnt.many	b0	};;
-
-	.body
-{ .mii;	mov		H=r32		// save h
-	mov		ar.ec=0		// don't rotate at exit
-	mov		pr.rot=0	}
-{ .mii;	mov		L=r33		// save l
-	mov		r36=r0		};;
-
-.L_divw_shift:	// -vv- note signed comparison
-{ .mfi;	(p0)	cmp.lt		p16,p0=r0,r34	// d
-	(p0)	shladd		r33=r34,1,r0	}
-{ .mfb;	(p0)	add		r35=1,r36
-	(p0)	nop.f		0x0
-(p16)	br.wtop.dpnt		.L_divw_shift	};;
-
-{ .mii;	mov		D=r34
-	shr.u		DH=r34,32
-	sub		r35=64,r36		};;
-{ .mii;	setf.sig	f7=DH
-	shr.u		AT=H,r35
-	mov		I=r36			};;
-{ .mib;	cmp.ne		p6,p0=r0,AT
-	shl		H=H,r36
-(p6)	br.call.spnt.clr	b0=abort	};;	// overflow, die...
-
-{ .mfi;	fcvt.xuf.s1	f7=f7
-	shr.u		AT=L,r35		};;
-{ .mii;	shl		L=L,r36
-	or		H=H,AT			};;
-
-{ .mii;	nop.m		0x0
-	cmp.leu		p6,p0=D,H;;
-(p6)	sub		H=H,D			}
-
-{ .mlx;	setf.sig	f14=D
-	movl		AT=0xffffffff		};;
-///////////////////////////////////////////////////////////
-{ .mii;	setf.sig	f6=H
-	shr.u		HH=H,32;;
-	cmp.eq		p6,p7=HH,DH		};;
-{ .mfb;
-(p6)	setf.sig	f8=AT
-(p7)	fcvt.xuf.s1	f6=f6
-(p7)	br.call.sptk	b6=.L_udiv64_32_b6	};;
-
-{ .mfi;	getf.sig	r33=f8				// q
-	xmpy.lu		f9=f8,f14		}
-{ .mfi;	xmpy.hu		f10=f8,f14
-	shrp		H=H,L,32		};;
-
-{ .mmi;	getf.sig	r35=f9				// tl
-	getf.sig	r31=f10			};;	// th
-
-.L_divw_1st_iter:
-{ .mii;	(p0)	add		r32=-1,r33
-	(p0)	cmp.eq		equ,cont=HH,r31		};;
-{ .mii;	(p0)	cmp.ltu		p8,p0=r35,D
-	(p0)	sub		r34=r35,D
-	(equ)	cmp.leu		break,cont=r35,H	};;
-{ .mib;	(cont)	cmp.leu		cont,break=HH,r31
-	(p8)	add		r31=-1,r31
-(cont)	br.wtop.spnt		.L_divw_1st_iter	};;
-///////////////////////////////////////////////////////////
-{ .mii;	sub		H=H,r35
-	shl		r8=r33,32
-	shl		L=L,32			};;
-///////////////////////////////////////////////////////////
-{ .mii;	setf.sig	f6=H
-	shr.u		HH=H,32;;
-	cmp.eq		p6,p7=HH,DH		};;
-{ .mfb;
-(p6)	setf.sig	f8=AT
-(p7)	fcvt.xuf.s1	f6=f6
-(p7)	br.call.sptk	b6=.L_udiv64_32_b6	};;
-
-{ .mfi;	getf.sig	r33=f8				// q
-	xmpy.lu		f9=f8,f14		}
-{ .mfi;	xmpy.hu		f10=f8,f14
-	shrp		H=H,L,32		};;
-
-{ .mmi;	getf.sig	r35=f9				// tl
-	getf.sig	r31=f10			};;	// th
-
-.L_divw_2nd_iter:
-{ .mii;	(p0)	add		r32=-1,r33
-	(p0)	cmp.eq		equ,cont=HH,r31		};;
-{ .mii;	(p0)	cmp.ltu		p8,p0=r35,D
-	(p0)	sub		r34=r35,D
-	(equ)	cmp.leu		break,cont=r35,H	};;
-{ .mib;	(cont)	cmp.leu		cont,break=HH,r31
-	(p8)	add		r31=-1,r31
-(cont)	br.wtop.spnt		.L_divw_2nd_iter	};;
-///////////////////////////////////////////////////////////
-{ .mii;	sub	H=H,r35
-	or	r8=r8,r33
-	mov	ar.pfs=r2		};;
-{ .mii;	shr.u	r9=H,I			// remainder if anybody wants it
-	mov	pr=r10,0x1ffff		}
-{ .mfb;	br.ret.sptk.many	b0	};;
-
-// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
-// procedure.
-//
-// inputs:	f6 = (double)a, f7 = (double)b
-// output:	f8 = (int)(a/b)
-// clobbered:	f8,f9,f10,f11,pred
-pred=p15
-// One can argue that this snippet is copyrighted to Intel
-// Corporation, as it's essentially identical to one of those
-// found in "Divide, Square Root and Remainder" section at
-// http://www.intel.com/software/products/opensource/libraries/num.htm.
-// Yes, I admit that the referred code was used as template,
-// but after I realized that there hardly is any other instruction
-// sequence which would perform this operation. I mean I figure that
-// any independent attempt to implement high-performance division
-// will result in code virtually identical to the Intel code. It
-// should be noted though that below division kernel is 1 cycle
-// faster than Intel one (note commented splits:-), not to mention
-// original prologue (rather lack of one) and epilogue.
-.align	32
-.skip	16
-.L_udiv64_32_b6:
-	frcpa.s1	f8,pred=f6,f7;;		// [0]  y0 = 1 / b
-
-(pred)	fnma.s1		f9=f7,f8,f1		// [5]  e0 = 1 - b * y0
-(pred)	fmpy.s1		f10=f6,f8;;		// [5]  q0 = a * y0
-(pred)	fmpy.s1		f11=f9,f9		// [10] e1 = e0 * e0
-(pred)	fma.s1		f10=f9,f10,f10;;	// [10] q1 = q0 + e0 * q0
-(pred)	fma.s1		f8=f9,f8,f8	//;;	// [15] y1 = y0 + e0 * y0
-(pred)	fma.s1		f9=f11,f10,f10;;	// [15] q2 = q1 + e1 * q1
-(pred)	fma.s1		f8=f11,f8,f8	//;;	// [20] y2 = y1 + e1 * y1
-(pred)	fnma.s1		f10=f7,f9,f6;;		// [20] r2 = a - b * q2
-(pred)	fma.s1		f8=f10,f8,f9;;		// [25] q3 = q2 + r2 * y2
-
-	fcvt.fxu.trunc.s1	f8=f8		// [30] q = trunc(q3)
-	br.ret.sptk.many	b6;;
-.endp	bn_div_words#
-#endif

+ 0 - 426
drivers/builtin_openssl2/crypto/bn/asm/mips-mont.pl

@@ -1,426 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# This module doesn't present direct interest for OpenSSL, because it
-# doesn't provide better performance for longer keys, at least not on
-# in-order-execution cores. While 512-bit RSA sign operations can be
-# 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and
-# 4096-bit ones are up to 15% slower. In 32-bit mode it varies from
-# 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA
-# verify:-( All comparisons are against bn_mul_mont-free assembler.
-# The module might be of interest to embedded system developers, as
-# the code is smaller than 1KB, yet offers >3x improvement on MIPS64
-# and 75-30% [less for longer keys] on MIPS32 over compiler-generated
-# code.
-
-######################################################################
-# There is a number of MIPS ABI in use, O32 and N32/64 are most
-# widely used. Then there is a new contender: NUBI. It appears that if
-# one picks the latter, it's possible to arrange code in ABI neutral
-# manner. Therefore let's stick to NUBI register layout:
-#
-($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
-($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
-($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
-#
-# The return value is placed in $a0. Following coding rules facilitate
-# interoperability:
-#
-# - never ever touch $tp, "thread pointer", former $gp;
-# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
-#   old code];
-# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
-#
-# For reference here is register layout for N32/64 MIPS ABIs:
-#
-# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
-# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
-# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
-# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
-#
-$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
-
-if ($flavour =~ /64|n32/i) {
-	$PTR_ADD="dadd";	# incidentally works even on n32
-	$PTR_SUB="dsub";	# incidentally works even on n32
-	$REG_S="sd";
-	$REG_L="ld";
-	$SZREG=8;
-} else {
-	$PTR_ADD="add";
-	$PTR_SUB="sub";
-	$REG_S="sw";
-	$REG_L="lw";
-	$SZREG=4;
-}
-$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000;
-#
-# <[email protected]>
-#
-######################################################################
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-if ($flavour =~ /64|n32/i) {
-	$LD="ld";
-	$ST="sd";
-	$MULTU="dmultu";
-	$ADDU="daddu";
-	$SUBU="dsubu";
-	$BNSZ=8;
-} else {
-	$LD="lw";
-	$ST="sw";
-	$MULTU="multu";
-	$ADDU="addu";
-	$SUBU="subu";
-	$BNSZ=4;
-}
-
-# int bn_mul_mont(
-$rp=$a0;	# BN_ULONG *rp,
-$ap=$a1;	# const BN_ULONG *ap,
-$bp=$a2;	# const BN_ULONG *bp,
-$np=$a3;	# const BN_ULONG *np,
-$n0=$a4;	# const BN_ULONG *n0,
-$num=$a5;	# int num);
-
-$lo0=$a6;
-$hi0=$a7;
-$lo1=$t1;
-$hi1=$t2;
-$aj=$s0;
-$bi=$s1;
-$nj=$s2;
-$tp=$s3;
-$alo=$s4;
-$ahi=$s5;
-$nlo=$s6;
-$nhi=$s7;
-$tj=$s8;
-$i=$s9;
-$j=$s10;
-$m1=$s11;
-
-$FRAMESIZE=14;
-
-$code=<<___;
-.text
-
-.set	noat
-.set	noreorder
-
-.align	5
-.globl	bn_mul_mont
-.ent	bn_mul_mont
-bn_mul_mont:
-___
-$code.=<<___ if ($flavour =~ /o32/i);
-	lw	$n0,16($sp)
-	lw	$num,20($sp)
-___
-$code.=<<___;
-	slt	$at,$num,4
-	bnez	$at,1f
-	li	$t0,0
-	slt	$at,$num,17	# on in-order CPU
-	bnez	$at,bn_mul_mont_internal
-	nop
-1:	jr	$ra
-	li	$a0,0
-.end	bn_mul_mont
-
-.align	5
-.ent	bn_mul_mont_internal
-bn_mul_mont_internal:
-	.frame	$fp,$FRAMESIZE*$SZREG,$ra
-	.mask	0x40000000|$SAVED_REGS_MASK,-$SZREG
-	$PTR_SUB $sp,$FRAMESIZE*$SZREG
-	$REG_S	$fp,($FRAMESIZE-1)*$SZREG($sp)
-	$REG_S	$s11,($FRAMESIZE-2)*$SZREG($sp)
-	$REG_S	$s10,($FRAMESIZE-3)*$SZREG($sp)
-	$REG_S	$s9,($FRAMESIZE-4)*$SZREG($sp)
-	$REG_S	$s8,($FRAMESIZE-5)*$SZREG($sp)
-	$REG_S	$s7,($FRAMESIZE-6)*$SZREG($sp)
-	$REG_S	$s6,($FRAMESIZE-7)*$SZREG($sp)
-	$REG_S	$s5,($FRAMESIZE-8)*$SZREG($sp)
-	$REG_S	$s4,($FRAMESIZE-9)*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_S	$s3,($FRAMESIZE-10)*$SZREG($sp)
-	$REG_S	$s2,($FRAMESIZE-11)*$SZREG($sp)
-	$REG_S	$s1,($FRAMESIZE-12)*$SZREG($sp)
-	$REG_S	$s0,($FRAMESIZE-13)*$SZREG($sp)
-___
-$code.=<<___;
-	move	$fp,$sp
-
-	.set	reorder
-	$LD	$n0,0($n0)
-	$LD	$bi,0($bp)	# bp[0]
-	$LD	$aj,0($ap)	# ap[0]
-	$LD	$nj,0($np)	# np[0]
-
-	$PTR_SUB $sp,2*$BNSZ	# place for two extra words
-	sll	$num,`log($BNSZ)/log(2)`
-	li	$at,-4096
-	$PTR_SUB $sp,$num
-	and	$sp,$at
-
-	$MULTU	$aj,$bi
-	$LD	$alo,$BNSZ($ap)
-	$LD	$nlo,$BNSZ($np)
-	mflo	$lo0
-	mfhi	$hi0
-	$MULTU	$lo0,$n0
-	mflo	$m1
-
-	$MULTU	$alo,$bi
-	mflo	$alo
-	mfhi	$ahi
-
-	$MULTU	$nj,$m1
-	mflo	$lo1
-	mfhi	$hi1
-	$MULTU	$nlo,$m1
-	$ADDU	$lo1,$lo0
-	sltu	$at,$lo1,$lo0
-	$ADDU	$hi1,$at
-	mflo	$nlo
-	mfhi	$nhi
-
-	move	$tp,$sp
-	li	$j,2*$BNSZ
-.align	4
-.L1st:
-	.set	noreorder
-	$PTR_ADD $aj,$ap,$j
-	$PTR_ADD $nj,$np,$j
-	$LD	$aj,($aj)
-	$LD	$nj,($nj)
-
-	$MULTU	$aj,$bi
-	$ADDU	$lo0,$alo,$hi0
-	$ADDU	$lo1,$nlo,$hi1
-	sltu	$at,$lo0,$hi0
-	sltu	$t0,$lo1,$hi1
-	$ADDU	$hi0,$ahi,$at
-	$ADDU	$hi1,$nhi,$t0
-	mflo	$alo
-	mfhi	$ahi
-
-	$ADDU	$lo1,$lo0
-	sltu	$at,$lo1,$lo0
-	$MULTU	$nj,$m1
-	$ADDU	$hi1,$at
-	addu	$j,$BNSZ
-	$ST	$lo1,($tp)
-	sltu	$t0,$j,$num
-	mflo	$nlo
-	mfhi	$nhi
-
-	bnez	$t0,.L1st
-	$PTR_ADD $tp,$BNSZ
-	.set	reorder
-
-	$ADDU	$lo0,$alo,$hi0
-	sltu	$at,$lo0,$hi0
-	$ADDU	$hi0,$ahi,$at
-
-	$ADDU	$lo1,$nlo,$hi1
-	sltu	$t0,$lo1,$hi1
-	$ADDU	$hi1,$nhi,$t0
-	$ADDU	$lo1,$lo0
-	sltu	$at,$lo1,$lo0
-	$ADDU	$hi1,$at
-
-	$ST	$lo1,($tp)
-
-	$ADDU	$hi1,$hi0
-	sltu	$at,$hi1,$hi0
-	$ST	$hi1,$BNSZ($tp)
-	$ST	$at,2*$BNSZ($tp)
-
-	li	$i,$BNSZ
-.align	4
-.Louter:
-	$PTR_ADD $bi,$bp,$i
-	$LD	$bi,($bi)
-	$LD	$aj,($ap)
-	$LD	$alo,$BNSZ($ap)
-	$LD	$tj,($sp)
-
-	$MULTU	$aj,$bi
-	$LD	$nj,($np)
-	$LD	$nlo,$BNSZ($np)
-	mflo	$lo0
-	mfhi	$hi0
-	$ADDU	$lo0,$tj
-	$MULTU	$lo0,$n0
-	sltu	$at,$lo0,$tj
-	$ADDU	$hi0,$at
-	mflo	$m1
-
-	$MULTU	$alo,$bi
-	mflo	$alo
-	mfhi	$ahi
-
-	$MULTU	$nj,$m1
-	mflo	$lo1
-	mfhi	$hi1
-
-	$MULTU	$nlo,$m1
-	$ADDU	$lo1,$lo0
-	sltu	$at,$lo1,$lo0
-	$ADDU	$hi1,$at
-	mflo	$nlo
-	mfhi	$nhi
-
-	move	$tp,$sp
-	li	$j,2*$BNSZ
-	$LD	$tj,$BNSZ($tp)
-.align	4
-.Linner:
-	.set	noreorder
-	$PTR_ADD $aj,$ap,$j
-	$PTR_ADD $nj,$np,$j
-	$LD	$aj,($aj)
-	$LD	$nj,($nj)
-
-	$MULTU	$aj,$bi
-	$ADDU	$lo0,$alo,$hi0
-	$ADDU	$lo1,$nlo,$hi1
-	sltu	$at,$lo0,$hi0
-	sltu	$t0,$lo1,$hi1
-	$ADDU	$hi0,$ahi,$at
-	$ADDU	$hi1,$nhi,$t0
-	mflo	$alo
-	mfhi	$ahi
-
-	$ADDU	$lo0,$tj
-	addu	$j,$BNSZ
-	$MULTU	$nj,$m1
-	sltu	$at,$lo0,$tj
-	$ADDU	$lo1,$lo0
-	$ADDU	$hi0,$at
-	sltu	$t0,$lo1,$lo0
-	$LD	$tj,2*$BNSZ($tp)
-	$ADDU	$hi1,$t0
-	sltu	$at,$j,$num
-	mflo	$nlo
-	mfhi	$nhi
-	$ST	$lo1,($tp)
-	bnez	$at,.Linner
-	$PTR_ADD $tp,$BNSZ
-	.set	reorder
-
-	$ADDU	$lo0,$alo,$hi0
-	sltu	$at,$lo0,$hi0
-	$ADDU	$hi0,$ahi,$at
-	$ADDU	$lo0,$tj
-	sltu	$t0,$lo0,$tj
-	$ADDU	$hi0,$t0
-
-	$LD	$tj,2*$BNSZ($tp)
-	$ADDU	$lo1,$nlo,$hi1
-	sltu	$at,$lo1,$hi1
-	$ADDU	$hi1,$nhi,$at
-	$ADDU	$lo1,$lo0
-	sltu	$t0,$lo1,$lo0
-	$ADDU	$hi1,$t0
-	$ST	$lo1,($tp)
-
-	$ADDU	$lo1,$hi1,$hi0
-	sltu	$hi1,$lo1,$hi0
-	$ADDU	$lo1,$tj
-	sltu	$at,$lo1,$tj
-	$ADDU	$hi1,$at
-	$ST	$lo1,$BNSZ($tp)
-	$ST	$hi1,2*$BNSZ($tp)
-
-	addu	$i,$BNSZ
-	sltu	$t0,$i,$num
-	bnez	$t0,.Louter
-
-	.set	noreorder
-	$PTR_ADD $tj,$sp,$num	# &tp[num]
-	move	$tp,$sp
-	move	$ap,$sp
-	li	$hi0,0		# clear borrow bit
-
-.align	4
-.Lsub:	$LD	$lo0,($tp)
-	$LD	$lo1,($np)
-	$PTR_ADD $tp,$BNSZ
-	$PTR_ADD $np,$BNSZ
-	$SUBU	$lo1,$lo0,$lo1	# tp[i]-np[i]
-	sgtu	$at,$lo1,$lo0
-	$SUBU	$lo0,$lo1,$hi0
-	sgtu	$hi0,$lo0,$lo1
-	$ST	$lo0,($rp)
-	or	$hi0,$at
-	sltu	$at,$tp,$tj
-	bnez	$at,.Lsub
-	$PTR_ADD $rp,$BNSZ
-
-	$SUBU	$hi0,$hi1,$hi0	# handle upmost overflow bit
-	move	$tp,$sp
-	$PTR_SUB $rp,$num	# restore rp
-	not	$hi1,$hi0
-
-	and	$ap,$hi0,$sp
-	and	$bp,$hi1,$rp
-	or	$ap,$ap,$bp	# ap=borrow?tp:rp
-
-.align	4
-.Lcopy:	$LD	$aj,($ap)
-	$PTR_ADD $ap,$BNSZ
-	$ST	$zero,($tp)
-	$PTR_ADD $tp,$BNSZ
-	sltu	$at,$tp,$tj
-	$ST	$aj,($rp)
-	bnez	$at,.Lcopy
-	$PTR_ADD $rp,$BNSZ
-
-	li	$a0,1
-	li	$t0,1
-
-	.set	noreorder
-	move	$sp,$fp
-	$REG_L	$fp,($FRAMESIZE-1)*$SZREG($sp)
-	$REG_L	$s11,($FRAMESIZE-2)*$SZREG($sp)
-	$REG_L	$s10,($FRAMESIZE-3)*$SZREG($sp)
-	$REG_L	$s9,($FRAMESIZE-4)*$SZREG($sp)
-	$REG_L	$s8,($FRAMESIZE-5)*$SZREG($sp)
-	$REG_L	$s7,($FRAMESIZE-6)*$SZREG($sp)
-	$REG_L	$s6,($FRAMESIZE-7)*$SZREG($sp)
-	$REG_L	$s5,($FRAMESIZE-8)*$SZREG($sp)
-	$REG_L	$s4,($FRAMESIZE-9)*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$s3,($FRAMESIZE-10)*$SZREG($sp)
-	$REG_L	$s2,($FRAMESIZE-11)*$SZREG($sp)
-	$REG_L	$s1,($FRAMESIZE-12)*$SZREG($sp)
-	$REG_L	$s0,($FRAMESIZE-13)*$SZREG($sp)
-___
-$code.=<<___;
-	jr	$ra
-	$PTR_ADD $sp,$FRAMESIZE*$SZREG
-.end	bn_mul_mont_internal
-.rdata
-.asciiz	"Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-
-print $code;
-close STDOUT;

+ 0 - 2234
drivers/builtin_openssl2/crypto/bn/asm/mips.pl

@@ -1,2234 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project.
-#
-# Rights for redistribution and usage in source and binary forms are
-# granted according to the OpenSSL license. Warranty of any kind is
-# disclaimed.
-# ====================================================================
-
-
-# July 1999
-#
-# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
-#
-# The module is designed to work with either of the "new" MIPS ABI(5),
-# namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
-# IRIX 5.x not only because it doesn't support new ABIs but also
-# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
-# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
-# cause illegal instruction exception:-(
-#
-# In addition the code depends on preprocessor flags set up by MIPSpro
-# compiler driver (either as or cc) and therefore (probably?) can't be
-# compiled by the GNU assembler. GNU C driver manages fine though...
-# I mean as long as -mmips-as is specified or is the default option,
-# because then it simply invokes /usr/bin/as which in turn takes
-# perfect care of the preprocessor definitions. Another neat feature
-# offered by the MIPSpro assembler is an optimization pass. This gave
-# me the opportunity to have the code looking more regular as all those
-# architecture dependent instruction rescheduling details were left to
-# the assembler. Cool, huh?
-#
-# Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
-# goes way over 3 times faster!
-#
-#					<[email protected]>
-
-# October 2010
-#
-# Adapt the module even for 32-bit ABIs and other OSes. The former was
-# achieved by mechanical replacement of 64-bit arithmetic instructions
-# such as dmultu, daddu, etc. with their 32-bit counterparts and
-# adjusting offsets denoting multiples of BN_ULONG. Above mentioned
-# >3x performance improvement naturally does not apply to 32-bit code
-# [because there is no instruction 32-bit compiler can't use], one
-# has to content with 40-85% improvement depending on benchmark and
-# key length, more for longer keys.
-
-$flavour = shift;
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-if ($flavour =~ /64|n32/i) {
-	$LD="ld";
-	$ST="sd";
-	$MULTU="dmultu";
-	$DIVU="ddivu";
-	$ADDU="daddu";
-	$SUBU="dsubu";
-	$SRL="dsrl";
-	$SLL="dsll";
-	$BNSZ=8;
-	$PTR_ADD="daddu";
-	$PTR_SUB="dsubu";
-	$SZREG=8;
-	$REG_S="sd";
-	$REG_L="ld";
-} else {
-	$LD="lw";
-	$ST="sw";
-	$MULTU="multu";
-	$DIVU="divu";
-	$ADDU="addu";
-	$SUBU="subu";
-	$SRL="srl";
-	$SLL="sll";
-	$BNSZ=4;
-	$PTR_ADD="addu";
-	$PTR_SUB="subu";
-	$SZREG=4;
-	$REG_S="sw";
-	$REG_L="lw";
-	$code=".set	mips2\n";
-}
-
-# Below is N32/64 register layout used in the original module.
-#
-($zero,$at,$v0,$v1)=map("\$$_",(0..3));
-($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
-($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
-($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
-($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7);
-#
-# No special adaptation is required for O32. NUBI on the other hand
-# is treated by saving/restoring ($v1,$t0..$t3).
-
-$gp=$v1 if ($flavour =~ /nubi/i);
-
-$minus4=$v1;
-
-$code.=<<___;
-.rdata
-.asciiz	"mips3.s, Version 1.2"
-.asciiz	"MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>"
-
-.text
-.set	noat
-
-.align	5
-.globl	bn_mul_add_words
-.ent	bn_mul_add_words
-bn_mul_add_words:
-	.set	noreorder
-	bgtz	$a2,bn_mul_add_words_internal
-	move	$v0,$zero
-	jr	$ra
-	move	$a0,$v0
-.end	bn_mul_add_words
-
-.align	5
-.ent	bn_mul_add_words_internal
-bn_mul_add_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	li	$minus4,-4
-	and	$ta0,$a2,$minus4
-	beqz	$ta0,.L_bn_mul_add_words_tail
-
-.L_bn_mul_add_words_loop:
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$a3
-	$LD	$t1,0($a0)
-	$LD	$t2,$BNSZ($a1)
-	$LD	$t3,$BNSZ($a0)
-	$LD	$ta0,2*$BNSZ($a1)
-	$LD	$ta1,2*$BNSZ($a0)
-	$ADDU	$t1,$v0
-	sltu	$v0,$t1,$v0	# All manuals say it "compares 32-bit
-				# values", but it seems to work fine
-				# even on 64-bit registers.
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$t1,$at
-	$ADDU	$v0,$t0
-	 $MULTU	$t2,$a3
-	sltu	$at,$t1,$at
-	$ST	$t1,0($a0)
-	$ADDU	$v0,$at
-
-	$LD	$ta2,3*$BNSZ($a1)
-	$LD	$ta3,3*$BNSZ($a0)
-	$ADDU	$t3,$v0
-	sltu	$v0,$t3,$v0
-	mflo	$at
-	mfhi	$t2
-	$ADDU	$t3,$at
-	$ADDU	$v0,$t2
-	 $MULTU	$ta0,$a3
-	sltu	$at,$t3,$at
-	$ST	$t3,$BNSZ($a0)
-	$ADDU	$v0,$at
-
-	subu	$a2,4
-	$PTR_ADD $a0,4*$BNSZ
-	$PTR_ADD $a1,4*$BNSZ
-	$ADDU	$ta1,$v0
-	sltu	$v0,$ta1,$v0
-	mflo	$at
-	mfhi	$ta0
-	$ADDU	$ta1,$at
-	$ADDU	$v0,$ta0
-	 $MULTU	$ta2,$a3
-	sltu	$at,$ta1,$at
-	$ST	$ta1,-2*$BNSZ($a0)
-	$ADDU	$v0,$at
-
-
-	and	$ta0,$a2,$minus4
-	$ADDU	$ta3,$v0
-	sltu	$v0,$ta3,$v0
-	mflo	$at
-	mfhi	$ta2
-	$ADDU	$ta3,$at
-	$ADDU	$v0,$ta2
-	sltu	$at,$ta3,$at
-	$ST	$ta3,-$BNSZ($a0)
-	.set	noreorder
-	bgtz	$ta0,.L_bn_mul_add_words_loop
-	$ADDU	$v0,$at
-
-	beqz	$a2,.L_bn_mul_add_words_return
-	nop
-
-.L_bn_mul_add_words_tail:
-	.set	reorder
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$a3
-	$LD	$t1,0($a0)
-	subu	$a2,1
-	$ADDU	$t1,$v0
-	sltu	$v0,$t1,$v0
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$t1,$at
-	$ADDU	$v0,$t0
-	sltu	$at,$t1,$at
-	$ST	$t1,0($a0)
-	$ADDU	$v0,$at
-	beqz	$a2,.L_bn_mul_add_words_return
-
-	$LD	$t0,$BNSZ($a1)
-	$MULTU	$t0,$a3
-	$LD	$t1,$BNSZ($a0)
-	subu	$a2,1
-	$ADDU	$t1,$v0
-	sltu	$v0,$t1,$v0
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$t1,$at
-	$ADDU	$v0,$t0
-	sltu	$at,$t1,$at
-	$ST	$t1,$BNSZ($a0)
-	$ADDU	$v0,$at
-	beqz	$a2,.L_bn_mul_add_words_return
-
-	$LD	$t0,2*$BNSZ($a1)
-	$MULTU	$t0,$a3
-	$LD	$t1,2*$BNSZ($a0)
-	$ADDU	$t1,$v0
-	sltu	$v0,$t1,$v0
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$t1,$at
-	$ADDU	$v0,$t0
-	sltu	$at,$t1,$at
-	$ST	$t1,2*$BNSZ($a0)
-	$ADDU	$v0,$at
-
-.L_bn_mul_add_words_return:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-.end	bn_mul_add_words_internal
-
-.align	5
-.globl	bn_mul_words
-.ent	bn_mul_words
-bn_mul_words:
-	.set	noreorder
-	bgtz	$a2,bn_mul_words_internal
-	move	$v0,$zero
-	jr	$ra
-	move	$a0,$v0
-.end	bn_mul_words
-
-.align	5
-.ent	bn_mul_words_internal
-bn_mul_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	li	$minus4,-4
-	and	$ta0,$a2,$minus4
-	beqz	$ta0,.L_bn_mul_words_tail
-
-.L_bn_mul_words_loop:
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$a3
-	$LD	$t2,$BNSZ($a1)
-	$LD	$ta0,2*$BNSZ($a1)
-	$LD	$ta2,3*$BNSZ($a1)
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$v0,$at
-	sltu	$t1,$v0,$at
-	 $MULTU	$t2,$a3
-	$ST	$v0,0($a0)
-	$ADDU	$v0,$t1,$t0
-
-	subu	$a2,4
-	$PTR_ADD $a0,4*$BNSZ
-	$PTR_ADD $a1,4*$BNSZ
-	mflo	$at
-	mfhi	$t2
-	$ADDU	$v0,$at
-	sltu	$t3,$v0,$at
-	 $MULTU	$ta0,$a3
-	$ST	$v0,-3*$BNSZ($a0)
-	$ADDU	$v0,$t3,$t2
-
-	mflo	$at
-	mfhi	$ta0
-	$ADDU	$v0,$at
-	sltu	$ta1,$v0,$at
-	 $MULTU	$ta2,$a3
-	$ST	$v0,-2*$BNSZ($a0)
-	$ADDU	$v0,$ta1,$ta0
-
-	and	$ta0,$a2,$minus4
-	mflo	$at
-	mfhi	$ta2
-	$ADDU	$v0,$at
-	sltu	$ta3,$v0,$at
-	$ST	$v0,-$BNSZ($a0)
-	.set	noreorder
-	bgtz	$ta0,.L_bn_mul_words_loop
-	$ADDU	$v0,$ta3,$ta2
-
-	beqz	$a2,.L_bn_mul_words_return
-	nop
-
-.L_bn_mul_words_tail:
-	.set	reorder
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$a3
-	subu	$a2,1
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$v0,$at
-	sltu	$t1,$v0,$at
-	$ST	$v0,0($a0)
-	$ADDU	$v0,$t1,$t0
-	beqz	$a2,.L_bn_mul_words_return
-
-	$LD	$t0,$BNSZ($a1)
-	$MULTU	$t0,$a3
-	subu	$a2,1
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$v0,$at
-	sltu	$t1,$v0,$at
-	$ST	$v0,$BNSZ($a0)
-	$ADDU	$v0,$t1,$t0
-	beqz	$a2,.L_bn_mul_words_return
-
-	$LD	$t0,2*$BNSZ($a1)
-	$MULTU	$t0,$a3
-	mflo	$at
-	mfhi	$t0
-	$ADDU	$v0,$at
-	sltu	$t1,$v0,$at
-	$ST	$v0,2*$BNSZ($a0)
-	$ADDU	$v0,$t1,$t0
-
-.L_bn_mul_words_return:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-.end	bn_mul_words_internal
-
-.align	5
-.globl	bn_sqr_words
-.ent	bn_sqr_words
-bn_sqr_words:
-	.set	noreorder
-	bgtz	$a2,bn_sqr_words_internal
-	move	$v0,$zero
-	jr	$ra
-	move	$a0,$v0
-.end	bn_sqr_words
-
-.align	5
-.ent	bn_sqr_words_internal
-bn_sqr_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	li	$minus4,-4
-	and	$ta0,$a2,$minus4
-	beqz	$ta0,.L_bn_sqr_words_tail
-
-.L_bn_sqr_words_loop:
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$t0
-	$LD	$t2,$BNSZ($a1)
-	$LD	$ta0,2*$BNSZ($a1)
-	$LD	$ta2,3*$BNSZ($a1)
-	mflo	$t1
-	mfhi	$t0
-	$ST	$t1,0($a0)
-	$ST	$t0,$BNSZ($a0)
-
-	$MULTU	$t2,$t2
-	subu	$a2,4
-	$PTR_ADD $a0,8*$BNSZ
-	$PTR_ADD $a1,4*$BNSZ
-	mflo	$t3
-	mfhi	$t2
-	$ST	$t3,-6*$BNSZ($a0)
-	$ST	$t2,-5*$BNSZ($a0)
-
-	$MULTU	$ta0,$ta0
-	mflo	$ta1
-	mfhi	$ta0
-	$ST	$ta1,-4*$BNSZ($a0)
-	$ST	$ta0,-3*$BNSZ($a0)
-
-
-	$MULTU	$ta2,$ta2
-	and	$ta0,$a2,$minus4
-	mflo	$ta3
-	mfhi	$ta2
-	$ST	$ta3,-2*$BNSZ($a0)
-
-	.set	noreorder
-	bgtz	$ta0,.L_bn_sqr_words_loop
-	$ST	$ta2,-$BNSZ($a0)
-
-	beqz	$a2,.L_bn_sqr_words_return
-	nop
-
-.L_bn_sqr_words_tail:
-	.set	reorder
-	$LD	$t0,0($a1)
-	$MULTU	$t0,$t0
-	subu	$a2,1
-	mflo	$t1
-	mfhi	$t0
-	$ST	$t1,0($a0)
-	$ST	$t0,$BNSZ($a0)
-	beqz	$a2,.L_bn_sqr_words_return
-
-	$LD	$t0,$BNSZ($a1)
-	$MULTU	$t0,$t0
-	subu	$a2,1
-	mflo	$t1
-	mfhi	$t0
-	$ST	$t1,2*$BNSZ($a0)
-	$ST	$t0,3*$BNSZ($a0)
-	beqz	$a2,.L_bn_sqr_words_return
-
-	$LD	$t0,2*$BNSZ($a1)
-	$MULTU	$t0,$t0
-	mflo	$t1
-	mfhi	$t0
-	$ST	$t1,4*$BNSZ($a0)
-	$ST	$t0,5*$BNSZ($a0)
-
-.L_bn_sqr_words_return:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-
-.end	bn_sqr_words_internal
-
-.align	5
-.globl	bn_add_words
-.ent	bn_add_words
-bn_add_words:
-	.set	noreorder
-	bgtz	$a3,bn_add_words_internal
-	move	$v0,$zero
-	jr	$ra
-	move	$a0,$v0
-.end	bn_add_words
-
-.align	5
-.ent	bn_add_words_internal
-bn_add_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	li	$minus4,-4
-	and	$at,$a3,$minus4
-	beqz	$at,.L_bn_add_words_tail
-
-.L_bn_add_words_loop:
-	$LD	$t0,0($a1)
-	$LD	$ta0,0($a2)
-	subu	$a3,4
-	$LD	$t1,$BNSZ($a1)
-	and	$at,$a3,$minus4
-	$LD	$t2,2*$BNSZ($a1)
-	$PTR_ADD $a2,4*$BNSZ
-	$LD	$t3,3*$BNSZ($a1)
-	$PTR_ADD $a0,4*$BNSZ
-	$LD	$ta1,-3*$BNSZ($a2)
-	$PTR_ADD $a1,4*$BNSZ
-	$LD	$ta2,-2*$BNSZ($a2)
-	$LD	$ta3,-$BNSZ($a2)
-	$ADDU	$ta0,$t0
-	sltu	$t8,$ta0,$t0
-	$ADDU	$t0,$ta0,$v0
-	sltu	$v0,$t0,$ta0
-	$ST	$t0,-4*$BNSZ($a0)
-	$ADDU	$v0,$t8
-
-	$ADDU	$ta1,$t1
-	sltu	$t9,$ta1,$t1
-	$ADDU	$t1,$ta1,$v0
-	sltu	$v0,$t1,$ta1
-	$ST	$t1,-3*$BNSZ($a0)
-	$ADDU	$v0,$t9
-
-	$ADDU	$ta2,$t2
-	sltu	$t8,$ta2,$t2
-	$ADDU	$t2,$ta2,$v0
-	sltu	$v0,$t2,$ta2
-	$ST	$t2,-2*$BNSZ($a0)
-	$ADDU	$v0,$t8
-	
-	$ADDU	$ta3,$t3
-	sltu	$t9,$ta3,$t3
-	$ADDU	$t3,$ta3,$v0
-	sltu	$v0,$t3,$ta3
-	$ST	$t3,-$BNSZ($a0)
-	
-	.set	noreorder
-	bgtz	$at,.L_bn_add_words_loop
-	$ADDU	$v0,$t9
-
-	beqz	$a3,.L_bn_add_words_return
-	nop
-
-.L_bn_add_words_tail:
-	.set	reorder
-	$LD	$t0,0($a1)
-	$LD	$ta0,0($a2)
-	$ADDU	$ta0,$t0
-	subu	$a3,1
-	sltu	$t8,$ta0,$t0
-	$ADDU	$t0,$ta0,$v0
-	sltu	$v0,$t0,$ta0
-	$ST	$t0,0($a0)
-	$ADDU	$v0,$t8
-	beqz	$a3,.L_bn_add_words_return
-
-	$LD	$t1,$BNSZ($a1)
-	$LD	$ta1,$BNSZ($a2)
-	$ADDU	$ta1,$t1
-	subu	$a3,1
-	sltu	$t9,$ta1,$t1
-	$ADDU	$t1,$ta1,$v0
-	sltu	$v0,$t1,$ta1
-	$ST	$t1,$BNSZ($a0)
-	$ADDU	$v0,$t9
-	beqz	$a3,.L_bn_add_words_return
-
-	$LD	$t2,2*$BNSZ($a1)
-	$LD	$ta2,2*$BNSZ($a2)
-	$ADDU	$ta2,$t2
-	sltu	$t8,$ta2,$t2
-	$ADDU	$t2,$ta2,$v0
-	sltu	$v0,$t2,$ta2
-	$ST	$t2,2*$BNSZ($a0)
-	$ADDU	$v0,$t8
-
-.L_bn_add_words_return:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-
-.end	bn_add_words_internal
-
-.align	5
-.globl	bn_sub_words
-.ent	bn_sub_words
-bn_sub_words:
-	.set	noreorder
-	bgtz	$a3,bn_sub_words_internal
-	move	$v0,$zero
-	jr	$ra
-	move	$a0,$zero
-.end	bn_sub_words
-
-.align	5
-.ent	bn_sub_words_internal
-bn_sub_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	li	$minus4,-4
-	and	$at,$a3,$minus4
-	beqz	$at,.L_bn_sub_words_tail
-
-.L_bn_sub_words_loop:
-	$LD	$t0,0($a1)
-	$LD	$ta0,0($a2)
-	subu	$a3,4
-	$LD	$t1,$BNSZ($a1)
-	and	$at,$a3,$minus4
-	$LD	$t2,2*$BNSZ($a1)
-	$PTR_ADD $a2,4*$BNSZ
-	$LD	$t3,3*$BNSZ($a1)
-	$PTR_ADD $a0,4*$BNSZ
-	$LD	$ta1,-3*$BNSZ($a2)
-	$PTR_ADD $a1,4*$BNSZ
-	$LD	$ta2,-2*$BNSZ($a2)
-	$LD	$ta3,-$BNSZ($a2)
-	sltu	$t8,$t0,$ta0
-	$SUBU	$ta0,$t0,$ta0
-	$SUBU	$t0,$ta0,$v0
-	sgtu	$v0,$t0,$ta0
-	$ST	$t0,-4*$BNSZ($a0)
-	$ADDU	$v0,$t8
-
-	sltu	$t9,$t1,$ta1
-	$SUBU	$ta1,$t1,$ta1
-	$SUBU	$t1,$ta1,$v0
-	sgtu	$v0,$t1,$ta1
-	$ST	$t1,-3*$BNSZ($a0)
-	$ADDU	$v0,$t9
-
-
-	sltu	$t8,$t2,$ta2
-	$SUBU	$ta2,$t2,$ta2
-	$SUBU	$t2,$ta2,$v0
-	sgtu	$v0,$t2,$ta2
-	$ST	$t2,-2*$BNSZ($a0)
-	$ADDU	$v0,$t8
-
-	sltu	$t9,$t3,$ta3
-	$SUBU	$ta3,$t3,$ta3
-	$SUBU	$t3,$ta3,$v0
-	sgtu	$v0,$t3,$ta3
-	$ST	$t3,-$BNSZ($a0)
-
-	.set	noreorder
-	bgtz	$at,.L_bn_sub_words_loop
-	$ADDU	$v0,$t9
-
-	beqz	$a3,.L_bn_sub_words_return
-	nop
-
-.L_bn_sub_words_tail:
-	.set	reorder
-	$LD	$t0,0($a1)
-	$LD	$ta0,0($a2)
-	subu	$a3,1
-	sltu	$t8,$t0,$ta0
-	$SUBU	$ta0,$t0,$ta0
-	$SUBU	$t0,$ta0,$v0
-	sgtu	$v0,$t0,$ta0
-	$ST	$t0,0($a0)
-	$ADDU	$v0,$t8
-	beqz	$a3,.L_bn_sub_words_return
-
-	$LD	$t1,$BNSZ($a1)
-	subu	$a3,1
-	$LD	$ta1,$BNSZ($a2)
-	sltu	$t9,$t1,$ta1
-	$SUBU	$ta1,$t1,$ta1
-	$SUBU	$t1,$ta1,$v0
-	sgtu	$v0,$t1,$ta1
-	$ST	$t1,$BNSZ($a0)
-	$ADDU	$v0,$t9
-	beqz	$a3,.L_bn_sub_words_return
-
-	$LD	$t2,2*$BNSZ($a1)
-	$LD	$ta2,2*$BNSZ($a2)
-	sltu	$t8,$t2,$ta2
-	$SUBU	$ta2,$t2,$ta2
-	$SUBU	$t2,$ta2,$v0
-	sgtu	$v0,$t2,$ta2
-	$ST	$t2,2*$BNSZ($a0)
-	$ADDU	$v0,$t8
-
-.L_bn_sub_words_return:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-.end	bn_sub_words_internal
-
-.align 5
-.globl	bn_div_3_words
-.ent	bn_div_3_words
-bn_div_3_words:
-	.set	noreorder
-	move	$a3,$a0		# we know that bn_div_words does not
-				# touch $a3, $ta2, $ta3 and preserves $a2
-				# so that we can save two arguments
-				# and return address in registers
-				# instead of stack:-)
-				
-	$LD	$a0,($a3)
-	move	$ta2,$a1
-	bne	$a0,$a2,bn_div_3_words_internal
-	$LD	$a1,-$BNSZ($a3)
-	li	$v0,-1
-	jr	$ra
-	move	$a0,$v0
-.end	bn_div_3_words
-
-.align	5
-.ent	bn_div_3_words_internal
-bn_div_3_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	move	$ta3,$ra
-	bal	bn_div_words_internal
-	move	$ra,$ta3
-	$MULTU	$ta2,$v0
-	$LD	$t2,-2*$BNSZ($a3)
-	move	$ta0,$zero
-	mfhi	$t1
-	mflo	$t0
-	sltu	$t8,$t1,$a1
-.L_bn_div_3_words_inner_loop:
-	bnez	$t8,.L_bn_div_3_words_inner_loop_done
-	sgeu	$at,$t2,$t0
-	seq	$t9,$t1,$a1
-	and	$at,$t9
-	sltu	$t3,$t0,$ta2
-	$ADDU	$a1,$a2
-	$SUBU	$t1,$t3
-	$SUBU	$t0,$ta2
-	sltu	$t8,$t1,$a1
-	sltu	$ta0,$a1,$a2
-	or	$t8,$ta0
-	.set	noreorder
-	beqz	$at,.L_bn_div_3_words_inner_loop
-	$SUBU	$v0,1
-	$ADDU	$v0,1
-	.set	reorder
-.L_bn_div_3_words_inner_loop_done:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-.end	bn_div_3_words_internal
-
-.align	5
-.globl	bn_div_words
-.ent	bn_div_words
-bn_div_words:
-	.set	noreorder
-	bnez	$a2,bn_div_words_internal
-	li	$v0,-1		# I would rather signal div-by-zero
-				# which can be done with 'break 7'
-	jr	$ra
-	move	$a0,$v0
-.end	bn_div_words
-
-.align	5
-.ent	bn_div_words_internal
-bn_div_words_internal:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	move	$v1,$zero
-	bltz	$a2,.L_bn_div_words_body
-	move	$t9,$v1
-	$SLL	$a2,1
-	bgtz	$a2,.-4
-	addu	$t9,1
-
-	.set	reorder
-	negu	$t1,$t9
-	li	$t2,-1
-	$SLL	$t2,$t1
-	and	$t2,$a0
-	$SRL	$at,$a1,$t1
-	.set	noreorder
-	beqz	$t2,.+12
-	nop
-	break	6		# signal overflow
-	.set	reorder
-	$SLL	$a0,$t9
-	$SLL	$a1,$t9
-	or	$a0,$at
-___
-$QT=$ta0;
-$HH=$ta1;
-$DH=$v1;
-$code.=<<___;
-.L_bn_div_words_body:
-	$SRL	$DH,$a2,4*$BNSZ	# bits
-	sgeu	$at,$a0,$a2
-	.set	noreorder
-	beqz	$at,.+12
-	nop
-	$SUBU	$a0,$a2
-	.set	reorder
-
-	li	$QT,-1
-	$SRL	$HH,$a0,4*$BNSZ	# bits
-	$SRL	$QT,4*$BNSZ	# q=0xffffffff
-	beq	$DH,$HH,.L_bn_div_words_skip_div1
-	$DIVU	$zero,$a0,$DH
-	mflo	$QT
-.L_bn_div_words_skip_div1:
-	$MULTU	$a2,$QT
-	$SLL	$t3,$a0,4*$BNSZ	# bits
-	$SRL	$at,$a1,4*$BNSZ	# bits
-	or	$t3,$at
-	mflo	$t0
-	mfhi	$t1
-.L_bn_div_words_inner_loop1:
-	sltu	$t2,$t3,$t0
-	seq	$t8,$HH,$t1
-	sltu	$at,$HH,$t1
-	and	$t2,$t8
-	sltu	$v0,$t0,$a2
-	or	$at,$t2
-	.set	noreorder
-	beqz	$at,.L_bn_div_words_inner_loop1_done
-	$SUBU	$t1,$v0
-	$SUBU	$t0,$a2
-	b	.L_bn_div_words_inner_loop1
-	$SUBU	$QT,1
-	.set	reorder
-.L_bn_div_words_inner_loop1_done:
-
-	$SLL	$a1,4*$BNSZ	# bits
-	$SUBU	$a0,$t3,$t0
-	$SLL	$v0,$QT,4*$BNSZ	# bits
-
-	li	$QT,-1
-	$SRL	$HH,$a0,4*$BNSZ	# bits
-	$SRL	$QT,4*$BNSZ	# q=0xffffffff
-	beq	$DH,$HH,.L_bn_div_words_skip_div2
-	$DIVU	$zero,$a0,$DH
-	mflo	$QT
-.L_bn_div_words_skip_div2:
-	$MULTU	$a2,$QT
-	$SLL	$t3,$a0,4*$BNSZ	# bits
-	$SRL	$at,$a1,4*$BNSZ	# bits
-	or	$t3,$at
-	mflo	$t0
-	mfhi	$t1
-.L_bn_div_words_inner_loop2:
-	sltu	$t2,$t3,$t0
-	seq	$t8,$HH,$t1
-	sltu	$at,$HH,$t1
-	and	$t2,$t8
-	sltu	$v1,$t0,$a2
-	or	$at,$t2
-	.set	noreorder
-	beqz	$at,.L_bn_div_words_inner_loop2_done
-	$SUBU	$t1,$v1
-	$SUBU	$t0,$a2
-	b	.L_bn_div_words_inner_loop2
-	$SUBU	$QT,1
-	.set	reorder
-.L_bn_div_words_inner_loop2_done:
-
-	$SUBU	$a0,$t3,$t0
-	or	$v0,$QT
-	$SRL	$v1,$a0,$t9	# $v1 contains remainder if anybody wants it
-	$SRL	$a2,$t9		# restore $a2
-
-	.set	noreorder
-	move	$a1,$v1
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	move	$a0,$v0
-.end	bn_div_words_internal
-___
-undef $HH; undef $QT; undef $DH;
-
-($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3);
-($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3);
-
-($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1
-($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2
-
-($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3);
-
-$code.=<<___;
-
-.align	5
-.globl	bn_mul_comba8
-.ent	bn_mul_comba8
-bn_mul_comba8:
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,12*$SZREG,$ra
-	.mask	0x803ff008,-$SZREG
-	$PTR_SUB $sp,12*$SZREG
-	$REG_S	$ra,11*$SZREG($sp)
-	$REG_S	$s5,10*$SZREG($sp)
-	$REG_S	$s4,9*$SZREG($sp)
-	$REG_S	$s3,8*$SZREG($sp)
-	$REG_S	$s2,7*$SZREG($sp)
-	$REG_S	$s1,6*$SZREG($sp)
-	$REG_S	$s0,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x003f0000,-$SZREG
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$s5,5*$SZREG($sp)
-	$REG_S	$s4,4*$SZREG($sp)
-	$REG_S	$s3,3*$SZREG($sp)
-	$REG_S	$s2,2*$SZREG($sp)
-	$REG_S	$s1,1*$SZREG($sp)
-	$REG_S	$s0,0*$SZREG($sp)
-___
-$code.=<<___;
-
-	.set	reorder
-	$LD	$a_0,0($a1)	# If compiled with -mips3 option on
-				# R5000 box assembler barks on this
-				# 1ine with "should not have mult/div
-				# as last instruction in bb (R10K
-				# bug)" warning. If anybody out there
-				# has a clue about how to circumvent
-				# this do send me a note.
-				#		<appro\@fy.chalmers.se>
-
-	$LD	$b_0,0($a2)
-	$LD	$a_1,$BNSZ($a1)
-	$LD	$a_2,2*$BNSZ($a1)
-	$MULTU	$a_0,$b_0		# mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	$a_3,3*$BNSZ($a1)
-	$LD	$b_1,$BNSZ($a2)
-	$LD	$b_2,2*$BNSZ($a2)
-	$LD	$b_3,3*$BNSZ($a2)
-	mflo	$c_1
-	mfhi	$c_2
-
-	$LD	$a_4,4*$BNSZ($a1)
-	$LD	$a_5,5*$BNSZ($a1)
-	$MULTU	$a_0,$b_1		# mul_add_c(a[0],b[1],c2,c3,c1);
-	$LD	$a_6,6*$BNSZ($a1)
-	$LD	$a_7,7*$BNSZ($a1)
-	$LD	$b_4,4*$BNSZ($a2)
-	$LD	$b_5,5*$BNSZ($a2)
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_1,$b_0		# mul_add_c(a[1],b[0],c2,c3,c1);
-	$ADDU	$c_3,$t_2,$at
-	$LD	$b_6,6*$BNSZ($a2)
-	$LD	$b_7,7*$BNSZ($a2)
-	$ST	$c_1,0($a0)	# r[0]=c1;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_2,$b_0		# mul_add_c(a[2],b[0],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	$ST	$c_2,$BNSZ($a0)	# r[1]=c2;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_1,$b_1		# mul_add_c(a[1],b[1],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_0,$b_2		# mul_add_c(a[0],b[2],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_0,$b_3		# mul_add_c(a[0],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,2*$BNSZ($a0)	# r[2]=c3;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_1,$b_2		# mul_add_c(a[1],b[2],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$c_3,$c_2,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_2,$b_1		# mul_add_c(a[2],b[1],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_3,$b_0		# mul_add_c(a[3],b[0],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_4,$b_0		# mul_add_c(a[4],b[0],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,3*$BNSZ($a0)	# r[3]=c1;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_3,$b_1		# mul_add_c(a[3],b[1],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_2,$b_2		# mul_add_c(a[2],b[2],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_1,$b_3		# mul_add_c(a[1],b[3],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_0,$b_4		# mul_add_c(a[0],b[4],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_0,$b_5		# mul_add_c(a[0],b[5],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,4*$BNSZ($a0)	# r[4]=c2;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_1,$b_4		# mul_add_c(a[1],b[4],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_2,$b_3		# mul_add_c(a[2],b[3],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_3,$b_2		# mul_add_c(a[3],b[2],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_4,$b_1		# mul_add_c(a[4],b[1],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_5,$b_0		# mul_add_c(a[5],b[0],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_6,$b_0		# mul_add_c(a[6],b[0],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,5*$BNSZ($a0)	# r[5]=c3;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_5,$b_1		# mul_add_c(a[5],b[1],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$c_3,$c_2,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_4,$b_2		# mul_add_c(a[4],b[2],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_3,$b_3		# mul_add_c(a[3],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_2,$b_4		# mul_add_c(a[2],b[4],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_1,$b_5		# mul_add_c(a[1],b[5],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_0,$b_6		# mul_add_c(a[0],b[6],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_0,$b_7		# mul_add_c(a[0],b[7],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,6*$BNSZ($a0)	# r[6]=c1;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_1,$b_6		# mul_add_c(a[1],b[6],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_2,$b_5		# mul_add_c(a[2],b[5],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_3,$b_4		# mul_add_c(a[3],b[4],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_4,$b_3		# mul_add_c(a[4],b[3],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_5,$b_2		# mul_add_c(a[5],b[2],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_6,$b_1		# mul_add_c(a[6],b[1],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_7,$b_0		# mul_add_c(a[7],b[0],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_7,$b_1		# mul_add_c(a[7],b[1],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,7*$BNSZ($a0)	# r[7]=c2;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_6,$b_2		# mul_add_c(a[6],b[2],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_5,$b_3		# mul_add_c(a[5],b[3],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_4,$b_4		# mul_add_c(a[4],b[4],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_3,$b_5		# mul_add_c(a[3],b[5],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_2,$b_6		# mul_add_c(a[2],b[6],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_1,$b_7		# mul_add_c(a[1],b[7],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_2,$b_7		# mul_add_c(a[2],b[7],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,8*$BNSZ($a0)	# r[8]=c3;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_3,$b_6		# mul_add_c(a[3],b[6],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$c_3,$c_2,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_4,$b_5		# mul_add_c(a[4],b[5],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_5,$b_4		# mul_add_c(a[5],b[4],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_6,$b_3		# mul_add_c(a[6],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_7,$b_2		# mul_add_c(a[7],b[2],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_7,$b_3		# mul_add_c(a[7],b[3],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,9*$BNSZ($a0)	# r[9]=c1;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_6,$b_4		# mul_add_c(a[6],b[4],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_5,$b_5		# mul_add_c(a[5],b[5],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_4,$b_6		# mul_add_c(a[4],b[6],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_3,$b_7		# mul_add_c(a[3],b[7],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_4,$b_7		# mul_add_c(a[4],b[7],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,10*$BNSZ($a0)	# r[10]=c2;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_5,$b_6		# mul_add_c(a[5],b[6],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_6,$b_5		# mul_add_c(a[6],b[5],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_7,$b_4		# mul_add_c(a[7],b[4],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_7,$b_5		# mul_add_c(a[7],b[5],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,11*$BNSZ($a0)	# r[11]=c3;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_6,$b_6		# mul_add_c(a[6],b[6],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$c_3,$c_2,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_5,$b_7		# mul_add_c(a[5],b[7],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_6,$b_7		# mul_add_c(a[6],b[7],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,12*$BNSZ($a0)	# r[12]=c1;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_7,$b_6		# mul_add_c(a[7],b[6],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_7,$b_7		# mul_add_c(a[7],b[7],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,13*$BNSZ($a0)	# r[13]=c2;
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	$ST	$c_3,14*$BNSZ($a0)	# r[14]=c3;
-	$ST	$c_1,15*$BNSZ($a0)	# r[15]=c1;
-
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$s5,10*$SZREG($sp)
-	$REG_L	$s4,9*$SZREG($sp)
-	$REG_L	$s3,8*$SZREG($sp)
-	$REG_L	$s2,7*$SZREG($sp)
-	$REG_L	$s1,6*$SZREG($sp)
-	$REG_L	$s0,5*$SZREG($sp)
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	jr	$ra
-	$PTR_ADD $sp,12*$SZREG
-___
-$code.=<<___ if ($flavour !~ /nubi/i);
-	$REG_L	$s5,5*$SZREG($sp)
-	$REG_L	$s4,4*$SZREG($sp)
-	$REG_L	$s3,3*$SZREG($sp)
-	$REG_L	$s2,2*$SZREG($sp)
-	$REG_L	$s1,1*$SZREG($sp)
-	$REG_L	$s0,0*$SZREG($sp)
-	jr	$ra
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-.end	bn_mul_comba8
-
-.align	5
-.globl	bn_mul_comba4
-.ent	bn_mul_comba4
-bn_mul_comba4:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	$LD	$a_0,0($a1)
-	$LD	$b_0,0($a2)
-	$LD	$a_1,$BNSZ($a1)
-	$LD	$a_2,2*$BNSZ($a1)
-	$MULTU	$a_0,$b_0		# mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	$a_3,3*$BNSZ($a1)
-	$LD	$b_1,$BNSZ($a2)
-	$LD	$b_2,2*$BNSZ($a2)
-	$LD	$b_3,3*$BNSZ($a2)
-	mflo	$c_1
-	mfhi	$c_2
-	$ST	$c_1,0($a0)
-
-	$MULTU	$a_0,$b_1		# mul_add_c(a[0],b[1],c2,c3,c1);
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_1,$b_0		# mul_add_c(a[1],b[0],c2,c3,c1);
-	$ADDU	$c_3,$t_2,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_2,$b_0		# mul_add_c(a[2],b[0],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	$ST	$c_2,$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_1,$b_1		# mul_add_c(a[1],b[1],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_0,$b_2		# mul_add_c(a[0],b[2],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_0,$b_3		# mul_add_c(a[0],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,2*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_1,$b_2		# mul_add_c(a[1],b[2],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$c_3,$c_2,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_2,$b_1		# mul_add_c(a[2],b[1],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$MULTU	$a_3,$b_0		# mul_add_c(a[3],b[0],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_3,$b_1		# mul_add_c(a[3],b[1],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,3*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_2,$b_2		# mul_add_c(a[2],b[2],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$c_1,$c_3,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$MULTU	$a_1,$b_3		# mul_add_c(a[1],b[3],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_2,$b_3		# mul_add_c(a[2],b[3],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,4*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$MULTU	$a_3,$b_2		# mul_add_c(a[3],b[2],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$c_2,$c_1,$t_2
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_3,$b_3		# mul_add_c(a[3],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,5*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	$ST	$c_1,6*$BNSZ($a0)
-	$ST	$c_2,7*$BNSZ($a0)
-
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	nop
-.end	bn_mul_comba4
-___
-
-($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
-
-sub add_c2 () {
-my ($hi,$lo,$c0,$c1,$c2,
-    $warm,      # !$warm denotes first call with specific sequence of
-                # $c_[XYZ] when there is no Z-carry to accumulate yet;
-    $an,$bn     # these two are arguments for multiplication which
-                # result is used in *next* step [which is why it's
-                # commented as "forward multiplication" below];
-    )=@_;
-$code.=<<___;
-	mflo	$lo
-	mfhi	$hi
-	$ADDU	$c0,$lo
-	sltu	$at,$c0,$lo
-	 $MULTU	$an,$bn			# forward multiplication
-	$ADDU	$c0,$lo
-	$ADDU	$at,$hi
-	sltu	$lo,$c0,$lo
-	$ADDU	$c1,$at
-	$ADDU	$hi,$lo
-___
-$code.=<<___	if (!$warm);
-	sltu	$c2,$c1,$at
-	$ADDU	$c1,$hi
-	sltu	$hi,$c1,$hi
-	$ADDU	$c2,$hi
-___
-$code.=<<___	if ($warm);
-	sltu	$at,$c1,$at
-	$ADDU	$c1,$hi
-	$ADDU	$c2,$at
-	sltu	$hi,$c1,$hi
-	$ADDU	$c2,$hi
-___
-}
-
-$code.=<<___;
-
-.align	5
-.globl	bn_sqr_comba8
-.ent	bn_sqr_comba8
-bn_sqr_comba8:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	$LD	$a_0,0($a1)
-	$LD	$a_1,$BNSZ($a1)
-	$LD	$a_2,2*$BNSZ($a1)
-	$LD	$a_3,3*$BNSZ($a1)
-
-	$MULTU	$a_0,$a_0		# mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	$a_4,4*$BNSZ($a1)
-	$LD	$a_5,5*$BNSZ($a1)
-	$LD	$a_6,6*$BNSZ($a1)
-	$LD	$a_7,7*$BNSZ($a1)
-	mflo	$c_1
-	mfhi	$c_2
-	$ST	$c_1,0($a0)
-
-	$MULTU	$a_0,$a_1		# mul_add_c2(a[0],b[1],c2,c3,c1);
-	mflo	$t_1
-	mfhi	$t_2
-	slt	$c_1,$t_2,$zero
-	$SLL	$t_2,1
-	 $MULTU	$a_2,$a_0		# mul_add_c2(a[2],b[0],c3,c1,c2);
-	slt	$a2,$t_1,$zero
-	$ADDU	$t_2,$a2
-	$SLL	$t_1,1
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$ADDU	$c_3,$t_2,$at
-	$ST	$c_2,$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_1,$a_1);		# mul_add_c(a[1],b[1],c3,c1,c2);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_0,$a_3		# mul_add_c2(a[0],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,2*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
-		$a_1,$a_2);		# mul_add_c2(a[1],b[2],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_4,$a_0);		# mul_add_c2(a[4],b[0],c2,c3,c1);
-$code.=<<___;
-	$ST	$c_1,3*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
-		$a_3,$a_1);		# mul_add_c2(a[3],b[1],c2,c3,c1);
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
-		$a_2,$a_2);		# mul_add_c(a[2],b[2],c2,c3,c1);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_0,$a_5		# mul_add_c2(a[0],b[5],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,4*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_1,$a_4);		# mul_add_c2(a[1],b[4],c3,c1,c2);
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
-		$a_2,$a_3);		# mul_add_c2(a[2],b[3],c3,c1,c2);
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
-		$a_6,$a_0);		# mul_add_c2(a[6],b[0],c1,c2,c3);
-$code.=<<___;
-	$ST	$c_3,5*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
-		$a_5,$a_1);		# mul_add_c2(a[5],b[1],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_4,$a_2);		# mul_add_c2(a[4],b[2],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_3,$a_3);		# mul_add_c(a[3],b[3],c1,c2,c3);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_0,$a_7		# mul_add_c2(a[0],b[7],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,6*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
-		$a_1,$a_6);		# mul_add_c2(a[1],b[6],c2,c3,c1);
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
-		$a_2,$a_5);		# mul_add_c2(a[2],b[5],c2,c3,c1);
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
-		$a_3,$a_4);		# mul_add_c2(a[3],b[4],c2,c3,c1);
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
-		$a_7,$a_1);		# mul_add_c2(a[7],b[1],c3,c1,c2);
-$code.=<<___;
-	$ST	$c_2,7*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_6,$a_2);		# mul_add_c2(a[6],b[2],c3,c1,c2);
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
-		$a_5,$a_3);		# mul_add_c2(a[5],b[3],c3,c1,c2);
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
-		$a_4,$a_4);		# mul_add_c(a[4],b[4],c3,c1,c2);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_2,$a_7		# mul_add_c2(a[2],b[7],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,8*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
-		$a_3,$a_6);		# mul_add_c2(a[3],b[6],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_4,$a_5);		# mul_add_c2(a[4],b[5],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_7,$a_3);		# mul_add_c2(a[7],b[3],c2,c3,c1);
-$code.=<<___;
-	$ST	$c_1,9*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
-		$a_6,$a_4);		# mul_add_c2(a[6],b[4],c2,c3,c1);
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
-		$a_5,$a_5);		# mul_add_c(a[5],b[5],c2,c3,c1);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_4,$a_7		# mul_add_c2(a[4],b[7],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,10*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_5,$a_6);		# mul_add_c2(a[5],b[6],c3,c1,c2);
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
-		$a_7,$a_5);		# mul_add_c2(a[7],b[5],c1,c2,c3);
-$code.=<<___;
-	$ST	$c_3,11*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
-		$a_6,$a_6);		# mul_add_c(a[6],b[6],c1,c2,c3);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	 $MULTU	$a_6,$a_7		# mul_add_c2(a[6],b[7],c2,c3,c1);
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	sltu	$at,$c_2,$t_2
-	$ADDU	$c_3,$at
-	$ST	$c_1,12*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
-		$a_7,$a_7);		# mul_add_c(a[7],b[7],c3,c1,c2);
-$code.=<<___;
-	$ST	$c_2,13*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	$ST	$c_3,14*$BNSZ($a0)
-	$ST	$c_1,15*$BNSZ($a0)
-
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	nop
-.end	bn_sqr_comba8
-
-.align	5
-.globl	bn_sqr_comba4
-.ent	bn_sqr_comba4
-bn_sqr_comba4:
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	.frame	$sp,6*$SZREG,$ra
-	.mask	0x8000f008,-$SZREG
-	.set	noreorder
-	$PTR_SUB $sp,6*$SZREG
-	$REG_S	$ra,5*$SZREG($sp)
-	$REG_S	$t3,4*$SZREG($sp)
-	$REG_S	$t2,3*$SZREG($sp)
-	$REG_S	$t1,2*$SZREG($sp)
-	$REG_S	$t0,1*$SZREG($sp)
-	$REG_S	$gp,0*$SZREG($sp)
-___
-$code.=<<___;
-	.set	reorder
-	$LD	$a_0,0($a1)
-	$LD	$a_1,$BNSZ($a1)
-	$MULTU	$a_0,$a_0		# mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	$a_2,2*$BNSZ($a1)
-	$LD	$a_3,3*$BNSZ($a1)
-	mflo	$c_1
-	mfhi	$c_2
-	$ST	$c_1,0($a0)
-
-	$MULTU	$a_0,$a_1		# mul_add_c2(a[0],b[1],c2,c3,c1);
-	mflo	$t_1
-	mfhi	$t_2
-	slt	$c_1,$t_2,$zero
-	$SLL	$t_2,1
-	 $MULTU	$a_2,$a_0		# mul_add_c2(a[2],b[0],c3,c1,c2);
-	slt	$a2,$t_1,$zero
-	$ADDU	$t_2,$a2
-	$SLL	$t_1,1
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	$ADDU	$c_3,$t_2,$at
-	$ST	$c_2,$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_1,$a_1);		# mul_add_c(a[1],b[1],c3,c1,c2);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_3,$t_1
-	sltu	$at,$c_3,$t_1
-	 $MULTU	$a_0,$a_3		# mul_add_c2(a[0],b[3],c1,c2,c3);
-	$ADDU	$t_2,$at
-	$ADDU	$c_1,$t_2
-	sltu	$at,$c_1,$t_2
-	$ADDU	$c_2,$at
-	$ST	$c_3,2*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
-		$a_1,$a_2);		# mul_add_c2(a2[1],b[2],c1,c2,c3);
-	&add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
-		$a_3,$a_1);		# mul_add_c2(a[3],b[1],c2,c3,c1);
-$code.=<<___;
-	$ST	$c_1,3*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
-		$a_2,$a_2);		# mul_add_c(a[2],b[2],c2,c3,c1);
-$code.=<<___;
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_2,$t_1
-	sltu	$at,$c_2,$t_1
-	 $MULTU	$a_2,$a_3		# mul_add_c2(a[2],b[3],c3,c1,c2);
-	$ADDU	$t_2,$at
-	$ADDU	$c_3,$t_2
-	sltu	$at,$c_3,$t_2
-	$ADDU	$c_1,$at
-	$ST	$c_2,4*$BNSZ($a0)
-___
-	&add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
-		$a_3,$a_3);		# mul_add_c(a[3],b[3],c1,c2,c3);
-$code.=<<___;
-	$ST	$c_3,5*$BNSZ($a0)
-
-	mflo	$t_1
-	mfhi	$t_2
-	$ADDU	$c_1,$t_1
-	sltu	$at,$c_1,$t_1
-	$ADDU	$t_2,$at
-	$ADDU	$c_2,$t_2
-	$ST	$c_1,6*$BNSZ($a0)
-	$ST	$c_2,7*$BNSZ($a0)
-
-	.set	noreorder
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
-	$REG_L	$t3,4*$SZREG($sp)
-	$REG_L	$t2,3*$SZREG($sp)
-	$REG_L	$t1,2*$SZREG($sp)
-	$REG_L	$t0,1*$SZREG($sp)
-	$REG_L	$gp,0*$SZREG($sp)
-	$PTR_ADD $sp,6*$SZREG
-___
-$code.=<<___;
-	jr	$ra
-	nop
-.end	bn_sqr_comba4
-___
-print $code;
-close STDOUT;

+ 0 - 327
drivers/builtin_openssl2/crypto/bn/asm/mips3-mont.pl

@@ -1,327 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# This module doesn't present direct interest for OpenSSL, because it
-# doesn't provide better performance for longer keys. While 512-bit
-# RSA private key operations are 40% faster, 1024-bit ones are hardly
-# faster at all, while longer key operations are slower by up to 20%.
-# It might be of interest to embedded system developers though, as
-# it's smaller than 1KB, yet offers ~3x improvement over compiler
-# generated code.
-#
-# The module targets N32 and N64 MIPS ABIs and currently is a bit
-# IRIX-centric, i.e. is likely to require adaptation for other OSes.
-
-# int bn_mul_mont(
-$rp="a0";	# BN_ULONG *rp,
-$ap="a1";	# const BN_ULONG *ap,
-$bp="a2";	# const BN_ULONG *bp,
-$np="a3";	# const BN_ULONG *np,
-$n0="a4";	# const BN_ULONG *n0,
-$num="a5";	# int num);
-
-$lo0="a6";
-$hi0="a7";
-$lo1="v0";
-$hi1="v1";
-$aj="t0";
-$bi="t1";
-$nj="t2";
-$tp="t3";
-$alo="s0";
-$ahi="s1";
-$nlo="s2";
-$nhi="s3";
-$tj="s4";
-$i="s5";
-$j="s6";
-$fp="t8";
-$m1="t9";
-
-$FRAME=8*(2+8);
-
-$code=<<___;
-#include <asm.h>
-#include <regdef.h>
-
-.text
-
-.set	noat
-.set	reorder
-
-.align	5
-.globl	bn_mul_mont
-.ent	bn_mul_mont
-bn_mul_mont:
-	.set	noreorder
-	PTR_SUB	sp,64
-	move	$fp,sp
-	.frame	$fp,64,ra
-	slt	AT,$num,4
-	li	v0,0
-	beqzl	AT,.Lproceed
-	nop
-	jr	ra
-	PTR_ADD	sp,$fp,64
-	.set	reorder
-.align	5
-.Lproceed:
-	ld	$n0,0($n0)
-	ld	$bi,0($bp)	# bp[0]
-	ld	$aj,0($ap)	# ap[0]
-	ld	$nj,0($np)	# np[0]
-	PTR_SUB	sp,16		# place for two extra words
-	sll	$num,3
-	li	AT,-4096
-	PTR_SUB	sp,$num
-	and	sp,AT
-
-	sd	s0,0($fp)
-	sd	s1,8($fp)
-	sd	s2,16($fp)
-	sd	s3,24($fp)
-	sd	s4,32($fp)
-	sd	s5,40($fp)
-	sd	s6,48($fp)
-	sd	s7,56($fp)
-
-	dmultu	$aj,$bi
-	ld	$alo,8($ap)
-	ld	$nlo,8($np)
-	mflo	$lo0
-	mfhi	$hi0
-	dmultu	$lo0,$n0
-	mflo	$m1
-
-	dmultu	$alo,$bi
-	mflo	$alo
-	mfhi	$ahi
-
-	dmultu	$nj,$m1
-	mflo	$lo1
-	mfhi	$hi1
-	dmultu	$nlo,$m1
-	daddu	$lo1,$lo0
-	sltu	AT,$lo1,$lo0
-	daddu	$hi1,AT
-	mflo	$nlo
-	mfhi	$nhi
-
-	move	$tp,sp
-	li	$j,16
-.align	4
-.L1st:
-	.set	noreorder
-	PTR_ADD	$aj,$ap,$j
-	ld	$aj,($aj)
-	PTR_ADD	$nj,$np,$j
-	ld	$nj,($nj)
-
-	dmultu	$aj,$bi
-	daddu	$lo0,$alo,$hi0
-	daddu	$lo1,$nlo,$hi1
-	sltu	AT,$lo0,$hi0
-	sltu	s7,$lo1,$hi1
-	daddu	$hi0,$ahi,AT
-	daddu	$hi1,$nhi,s7
-	mflo	$alo
-	mfhi	$ahi
-
-	daddu	$lo1,$lo0
-	sltu	AT,$lo1,$lo0
-	dmultu	$nj,$m1
-	daddu	$hi1,AT
-	addu	$j,8
-	sd	$lo1,($tp)
-	sltu	s7,$j,$num
-	mflo	$nlo
-	mfhi	$nhi
-
-	bnez	s7,.L1st
-	PTR_ADD	$tp,8
-	.set	reorder
-
-	daddu	$lo0,$alo,$hi0
-	sltu	AT,$lo0,$hi0
-	daddu	$hi0,$ahi,AT
-
-	daddu	$lo1,$nlo,$hi1
-	sltu	s7,$lo1,$hi1
-	daddu	$hi1,$nhi,s7
-	daddu	$lo1,$lo0
-	sltu	AT,$lo1,$lo0
-	daddu	$hi1,AT
-
-	sd	$lo1,($tp)
-
-	daddu	$hi1,$hi0
-	sltu	AT,$hi1,$hi0
-	sd	$hi1,8($tp)
-	sd	AT,16($tp)
-
-	li	$i,8
-.align	4
-.Louter:
-	PTR_ADD	$bi,$bp,$i
-	ld	$bi,($bi)
-	ld	$aj,($ap)
-	ld	$alo,8($ap)
-	ld	$tj,(sp)
-
-	dmultu	$aj,$bi
-	ld	$nj,($np)
-	ld	$nlo,8($np)
-	mflo	$lo0
-	mfhi	$hi0
-	daddu	$lo0,$tj
-	dmultu	$lo0,$n0
-	sltu	AT,$lo0,$tj
-	daddu	$hi0,AT
-	mflo	$m1
-
-	dmultu	$alo,$bi
-	mflo	$alo
-	mfhi	$ahi
-
-	dmultu	$nj,$m1
-	mflo	$lo1
-	mfhi	$hi1
-
-	dmultu	$nlo,$m1
-	daddu	$lo1,$lo0
-	sltu	AT,$lo1,$lo0
-	daddu	$hi1,AT
-	mflo	$nlo
-	mfhi	$nhi
-
-	move	$tp,sp
-	li	$j,16
-	ld	$tj,8($tp)
-.align	4
-.Linner:
-	.set	noreorder
-	PTR_ADD	$aj,$ap,$j
-	ld	$aj,($aj)
-	PTR_ADD	$nj,$np,$j
-	ld	$nj,($nj)
-
-	dmultu	$aj,$bi
-	daddu	$lo0,$alo,$hi0
-	daddu	$lo1,$nlo,$hi1
-	sltu	AT,$lo0,$hi0
-	sltu	s7,$lo1,$hi1
-	daddu	$hi0,$ahi,AT
-	daddu	$hi1,$nhi,s7
-	mflo	$alo
-	mfhi	$ahi
-
-	daddu	$lo0,$tj
-	addu	$j,8
-	dmultu	$nj,$m1
-	sltu	AT,$lo0,$tj
-	daddu	$lo1,$lo0
-	daddu	$hi0,AT
-	sltu	s7,$lo1,$lo0
-	ld	$tj,16($tp)
-	daddu	$hi1,s7
-	sltu	AT,$j,$num
-	mflo	$nlo
-	mfhi	$nhi
-	sd	$lo1,($tp)
-	bnez	AT,.Linner
-	PTR_ADD	$tp,8
-	.set	reorder
-
-	daddu	$lo0,$alo,$hi0
-	sltu	AT,$lo0,$hi0
-	daddu	$hi0,$ahi,AT
-	daddu	$lo0,$tj
-	sltu	s7,$lo0,$tj
-	daddu	$hi0,s7
-
-	ld	$tj,16($tp)
-	daddu	$lo1,$nlo,$hi1
-	sltu	AT,$lo1,$hi1
-	daddu	$hi1,$nhi,AT
-	daddu	$lo1,$lo0
-	sltu	s7,$lo1,$lo0
-	daddu	$hi1,s7
-	sd	$lo1,($tp)
-
-	daddu	$lo1,$hi1,$hi0
-	sltu	$hi1,$lo1,$hi0
-	daddu	$lo1,$tj
-	sltu	AT,$lo1,$tj
-	daddu	$hi1,AT
-	sd	$lo1,8($tp)
-	sd	$hi1,16($tp)
-
-	addu	$i,8
-	sltu	s7,$i,$num
-	bnez	s7,.Louter
-
-	.set	noreorder
-	PTR_ADD	$tj,sp,$num	# &tp[num]
-	move	$tp,sp
-	move	$ap,sp
-	li	$hi0,0		# clear borrow bit
-
-.align	4
-.Lsub:	ld	$lo0,($tp)
-	ld	$lo1,($np)
-	PTR_ADD	$tp,8
-	PTR_ADD	$np,8
-	dsubu	$lo1,$lo0,$lo1	# tp[i]-np[i]
-	sgtu	AT,$lo1,$lo0
-	dsubu	$lo0,$lo1,$hi0
-	sgtu	$hi0,$lo0,$lo1
-	sd	$lo0,($rp)
-	or	$hi0,AT
-	sltu	AT,$tp,$tj
-	bnez	AT,.Lsub
-	PTR_ADD	$rp,8
-
-	dsubu	$hi0,$hi1,$hi0	# handle upmost overflow bit
-	move	$tp,sp
-	PTR_SUB	$rp,$num	# restore rp
-	not	$hi1,$hi0
-
-	and	$ap,$hi0,sp
-	and	$bp,$hi1,$rp
-	or	$ap,$ap,$bp	# ap=borrow?tp:rp
-
-.align	4
-.Lcopy:	ld	$aj,($ap)
-	PTR_ADD	$ap,8
-	PTR_ADD	$tp,8
-	sd	zero,-8($tp)
-	sltu	AT,$tp,$tj
-	sd	$aj,($rp)
-	bnez	AT,.Lcopy
-	PTR_ADD	$rp,8
-
-	ld	s0,0($fp)
-	ld	s1,8($fp)
-	ld	s2,16($fp)
-	ld	s3,24($fp)
-	ld	s4,32($fp)
-	ld	s5,40($fp)
-	ld	s6,48($fp)
-	ld	s7,56($fp)
-	li	v0,1
-	jr	ra
-	PTR_ADD	sp,$fp,64
-	.set	reorder
-END(bn_mul_mont)
-.rdata
-.asciiz	"Montgomery Multiplication for MIPS III/IV, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-print $code;
-close STDOUT;

+ 0 - 2201
drivers/builtin_openssl2/crypto/bn/asm/mips3.s

@@ -1,2201 +0,0 @@
-.rdata
-.asciiz	"mips3.s, Version 1.1"
-.asciiz	"MIPS III/IV ISA artwork by Andy Polyakov <[email protected]>"
-
-/*
- * ====================================================================
- * Written by Andy Polyakov <[email protected]> for the OpenSSL
- * project.
- *
- * Rights for redistribution and usage in source and binary forms are
- * granted according to the OpenSSL license. Warranty of any kind is
- * disclaimed.
- * ====================================================================
- */
-
-/*
- * This is my modest contributon to the OpenSSL project (see
- * http://www.openssl.org/ for more information about it) and is
- * a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c
- * module. For updates see http://fy.chalmers.se/~appro/hpe/.
- *
- * The module is designed to work with either of the "new" MIPS ABI(5),
- * namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
- * IRIX 5.x not only because it doesn't support new ABIs but also
- * because 5.x kernels put R4x00 CPU into 32-bit mode and all those
- * 64-bit instructions (daddu, dmultu, etc.) found below gonna only
- * cause illegal instruction exception:-(
- *
- * In addition the code depends on preprocessor flags set up by MIPSpro
- * compiler driver (either as or cc) and therefore (probably?) can't be
- * compiled by the GNU assembler. GNU C driver manages fine though...
- * I mean as long as -mmips-as is specified or is the default option,
- * because then it simply invokes /usr/bin/as which in turn takes
- * perfect care of the preprocessor definitions. Another neat feature
- * offered by the MIPSpro assembler is an optimization pass. This gave
- * me the opportunity to have the code looking more regular as all those
- * architecture dependent instruction rescheduling details were left to
- * the assembler. Cool, huh?
- *
- * Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
- * goes way over 3 times faster!
- *
- *					<[email protected]>
- */
-#include <asm.h>
-#include <regdef.h>
-
-#if _MIPS_ISA>=4
-#define	MOVNZ(cond,dst,src)	\
-	movn	dst,src,cond
-#else
-#define	MOVNZ(cond,dst,src)	\
-	.set	noreorder;	\
-	bnezl	cond,.+8;	\
-	move	dst,src;	\
-	.set	reorder
-#endif
-
-.text
-
-.set	noat
-.set	reorder
-
-#define	MINUS4	v1
-
-.align	5
-LEAF(bn_mul_add_words)
-	.set	noreorder
-	bgtzl	a2,.L_bn_mul_add_words_proceed
-	ld	t0,0(a1)
-	jr	ra
-	move	v0,zero
-	.set	reorder
-
-.L_bn_mul_add_words_proceed:
-	li	MINUS4,-4
-	and	ta0,a2,MINUS4
-	move	v0,zero
-	beqz	ta0,.L_bn_mul_add_words_tail
-
-.L_bn_mul_add_words_loop:
-	dmultu	t0,a3
-	ld	t1,0(a0)
-	ld	t2,8(a1)
-	ld	t3,8(a0)
-	ld	ta0,16(a1)
-	ld	ta1,16(a0)
-	daddu	t1,v0
-	sltu	v0,t1,v0	/* All manuals say it "compares 32-bit
-				 * values", but it seems to work fine
-				 * even on 64-bit registers. */
-	mflo	AT
-	mfhi	t0
-	daddu	t1,AT
-	daddu	v0,t0
-	sltu	AT,t1,AT
-	sd	t1,0(a0)
-	daddu	v0,AT
-
-	dmultu	t2,a3
-	ld	ta2,24(a1)
-	ld	ta3,24(a0)
-	daddu	t3,v0
-	sltu	v0,t3,v0
-	mflo	AT
-	mfhi	t2
-	daddu	t3,AT
-	daddu	v0,t2
-	sltu	AT,t3,AT
-	sd	t3,8(a0)
-	daddu	v0,AT
-
-	dmultu	ta0,a3
-	subu	a2,4
-	PTR_ADD	a0,32
-	PTR_ADD	a1,32
-	daddu	ta1,v0
-	sltu	v0,ta1,v0
-	mflo	AT
-	mfhi	ta0
-	daddu	ta1,AT
-	daddu	v0,ta0
-	sltu	AT,ta1,AT
-	sd	ta1,-16(a0)
-	daddu	v0,AT
-
-
-	dmultu	ta2,a3
-	and	ta0,a2,MINUS4
-	daddu	ta3,v0
-	sltu	v0,ta3,v0
-	mflo	AT
-	mfhi	ta2
-	daddu	ta3,AT
-	daddu	v0,ta2
-	sltu	AT,ta3,AT
-	sd	ta3,-8(a0)
-	daddu	v0,AT
-	.set	noreorder
-	bgtzl	ta0,.L_bn_mul_add_words_loop
-	ld	t0,0(a1)
-
-	bnezl	a2,.L_bn_mul_add_words_tail
-	ld	t0,0(a1)
-	.set	reorder
-
-.L_bn_mul_add_words_return:
-	jr	ra
-
-.L_bn_mul_add_words_tail:
-	dmultu	t0,a3
-	ld	t1,0(a0)
-	subu	a2,1
-	daddu	t1,v0
-	sltu	v0,t1,v0
-	mflo	AT
-	mfhi	t0
-	daddu	t1,AT
-	daddu	v0,t0
-	sltu	AT,t1,AT
-	sd	t1,0(a0)
-	daddu	v0,AT
-	beqz	a2,.L_bn_mul_add_words_return
-
-	ld	t0,8(a1)
-	dmultu	t0,a3
-	ld	t1,8(a0)
-	subu	a2,1
-	daddu	t1,v0
-	sltu	v0,t1,v0
-	mflo	AT
-	mfhi	t0
-	daddu	t1,AT
-	daddu	v0,t0
-	sltu	AT,t1,AT
-	sd	t1,8(a0)
-	daddu	v0,AT
-	beqz	a2,.L_bn_mul_add_words_return
-
-	ld	t0,16(a1)
-	dmultu	t0,a3
-	ld	t1,16(a0)
-	daddu	t1,v0
-	sltu	v0,t1,v0
-	mflo	AT
-	mfhi	t0
-	daddu	t1,AT
-	daddu	v0,t0
-	sltu	AT,t1,AT
-	sd	t1,16(a0)
-	daddu	v0,AT
-	jr	ra
-END(bn_mul_add_words)
-
-.align	5
-LEAF(bn_mul_words)
-	.set	noreorder
-	bgtzl	a2,.L_bn_mul_words_proceed
-	ld	t0,0(a1)
-	jr	ra
-	move	v0,zero
-	.set	reorder
-
-.L_bn_mul_words_proceed:
-	li	MINUS4,-4
-	and	ta0,a2,MINUS4
-	move	v0,zero
-	beqz	ta0,.L_bn_mul_words_tail
-
-.L_bn_mul_words_loop:
-	dmultu	t0,a3
-	ld	t2,8(a1)
-	ld	ta0,16(a1)
-	ld	ta2,24(a1)
-	mflo	AT
-	mfhi	t0
-	daddu	v0,AT
-	sltu	t1,v0,AT
-	sd	v0,0(a0)
-	daddu	v0,t1,t0
-
-	dmultu	t2,a3
-	subu	a2,4
-	PTR_ADD	a0,32
-	PTR_ADD	a1,32
-	mflo	AT
-	mfhi	t2
-	daddu	v0,AT
-	sltu	t3,v0,AT
-	sd	v0,-24(a0)
-	daddu	v0,t3,t2
-
-	dmultu	ta0,a3
-	mflo	AT
-	mfhi	ta0
-	daddu	v0,AT
-	sltu	ta1,v0,AT
-	sd	v0,-16(a0)
-	daddu	v0,ta1,ta0
-
-
-	dmultu	ta2,a3
-	and	ta0,a2,MINUS4
-	mflo	AT
-	mfhi	ta2
-	daddu	v0,AT
-	sltu	ta3,v0,AT
-	sd	v0,-8(a0)
-	daddu	v0,ta3,ta2
-	.set	noreorder
-	bgtzl	ta0,.L_bn_mul_words_loop
-	ld	t0,0(a1)
-
-	bnezl	a2,.L_bn_mul_words_tail
-	ld	t0,0(a1)
-	.set	reorder
-
-.L_bn_mul_words_return:
-	jr	ra
-
-.L_bn_mul_words_tail:
-	dmultu	t0,a3
-	subu	a2,1
-	mflo	AT
-	mfhi	t0
-	daddu	v0,AT
-	sltu	t1,v0,AT
-	sd	v0,0(a0)
-	daddu	v0,t1,t0
-	beqz	a2,.L_bn_mul_words_return
-
-	ld	t0,8(a1)
-	dmultu	t0,a3
-	subu	a2,1
-	mflo	AT
-	mfhi	t0
-	daddu	v0,AT
-	sltu	t1,v0,AT
-	sd	v0,8(a0)
-	daddu	v0,t1,t0
-	beqz	a2,.L_bn_mul_words_return
-
-	ld	t0,16(a1)
-	dmultu	t0,a3
-	mflo	AT
-	mfhi	t0
-	daddu	v0,AT
-	sltu	t1,v0,AT
-	sd	v0,16(a0)
-	daddu	v0,t1,t0
-	jr	ra
-END(bn_mul_words)
-
-.align	5
-LEAF(bn_sqr_words)
-	.set	noreorder
-	bgtzl	a2,.L_bn_sqr_words_proceed
-	ld	t0,0(a1)
-	jr	ra
-	move	v0,zero
-	.set	reorder
-
-.L_bn_sqr_words_proceed:
-	li	MINUS4,-4
-	and	ta0,a2,MINUS4
-	move	v0,zero
-	beqz	ta0,.L_bn_sqr_words_tail
-
-.L_bn_sqr_words_loop:
-	dmultu	t0,t0
-	ld	t2,8(a1)
-	ld	ta0,16(a1)
-	ld	ta2,24(a1)
-	mflo	t1
-	mfhi	t0
-	sd	t1,0(a0)
-	sd	t0,8(a0)
-
-	dmultu	t2,t2
-	subu	a2,4
-	PTR_ADD	a0,64
-	PTR_ADD	a1,32
-	mflo	t3
-	mfhi	t2
-	sd	t3,-48(a0)
-	sd	t2,-40(a0)
-
-	dmultu	ta0,ta0
-	mflo	ta1
-	mfhi	ta0
-	sd	ta1,-32(a0)
-	sd	ta0,-24(a0)
-
-
-	dmultu	ta2,ta2
-	and	ta0,a2,MINUS4
-	mflo	ta3
-	mfhi	ta2
-	sd	ta3,-16(a0)
-	sd	ta2,-8(a0)
-
-	.set	noreorder
-	bgtzl	ta0,.L_bn_sqr_words_loop
-	ld	t0,0(a1)
-
-	bnezl	a2,.L_bn_sqr_words_tail
-	ld	t0,0(a1)
-	.set	reorder
-
-.L_bn_sqr_words_return:
-	move	v0,zero
-	jr	ra
-
-.L_bn_sqr_words_tail:
-	dmultu	t0,t0
-	subu	a2,1
-	mflo	t1
-	mfhi	t0
-	sd	t1,0(a0)
-	sd	t0,8(a0)
-	beqz	a2,.L_bn_sqr_words_return
-
-	ld	t0,8(a1)
-	dmultu	t0,t0
-	subu	a2,1
-	mflo	t1
-	mfhi	t0
-	sd	t1,16(a0)
-	sd	t0,24(a0)
-	beqz	a2,.L_bn_sqr_words_return
-
-	ld	t0,16(a1)
-	dmultu	t0,t0
-	mflo	t1
-	mfhi	t0
-	sd	t1,32(a0)
-	sd	t0,40(a0)
-	jr	ra
-END(bn_sqr_words)
-
-.align	5
-LEAF(bn_add_words)
-	.set	noreorder
-	bgtzl	a3,.L_bn_add_words_proceed
-	ld	t0,0(a1)
-	jr	ra
-	move	v0,zero
-	.set	reorder
-
-.L_bn_add_words_proceed:
-	li	MINUS4,-4
-	and	AT,a3,MINUS4
-	move	v0,zero
-	beqz	AT,.L_bn_add_words_tail
-
-.L_bn_add_words_loop:
-	ld	ta0,0(a2)
-	subu	a3,4
-	ld	t1,8(a1)
-	and	AT,a3,MINUS4
-	ld	t2,16(a1)
-	PTR_ADD	a2,32
-	ld	t3,24(a1)
-	PTR_ADD	a0,32
-	ld	ta1,-24(a2)
-	PTR_ADD	a1,32
-	ld	ta2,-16(a2)
-	ld	ta3,-8(a2)
-	daddu	ta0,t0
-	sltu	t8,ta0,t0
-	daddu	t0,ta0,v0
-	sltu	v0,t0,ta0
-	sd	t0,-32(a0)
-	daddu	v0,t8
-
-	daddu	ta1,t1
-	sltu	t9,ta1,t1
-	daddu	t1,ta1,v0
-	sltu	v0,t1,ta1
-	sd	t1,-24(a0)
-	daddu	v0,t9
-
-	daddu	ta2,t2
-	sltu	t8,ta2,t2
-	daddu	t2,ta2,v0
-	sltu	v0,t2,ta2
-	sd	t2,-16(a0)
-	daddu	v0,t8
-	
-	daddu	ta3,t3
-	sltu	t9,ta3,t3
-	daddu	t3,ta3,v0
-	sltu	v0,t3,ta3
-	sd	t3,-8(a0)
-	daddu	v0,t9
-	
-	.set	noreorder
-	bgtzl	AT,.L_bn_add_words_loop
-	ld	t0,0(a1)
-
-	bnezl	a3,.L_bn_add_words_tail
-	ld	t0,0(a1)
-	.set	reorder
-
-.L_bn_add_words_return:
-	jr	ra
-
-.L_bn_add_words_tail:
-	ld	ta0,0(a2)
-	daddu	ta0,t0
-	subu	a3,1
-	sltu	t8,ta0,t0
-	daddu	t0,ta0,v0
-	sltu	v0,t0,ta0
-	sd	t0,0(a0)
-	daddu	v0,t8
-	beqz	a3,.L_bn_add_words_return
-
-	ld	t1,8(a1)
-	ld	ta1,8(a2)
-	daddu	ta1,t1
-	subu	a3,1
-	sltu	t9,ta1,t1
-	daddu	t1,ta1,v0
-	sltu	v0,t1,ta1
-	sd	t1,8(a0)
-	daddu	v0,t9
-	beqz	a3,.L_bn_add_words_return
-
-	ld	t2,16(a1)
-	ld	ta2,16(a2)
-	daddu	ta2,t2
-	sltu	t8,ta2,t2
-	daddu	t2,ta2,v0
-	sltu	v0,t2,ta2
-	sd	t2,16(a0)
-	daddu	v0,t8
-	jr	ra
-END(bn_add_words)
-
-.align	5
-LEAF(bn_sub_words)
-	.set	noreorder
-	bgtzl	a3,.L_bn_sub_words_proceed
-	ld	t0,0(a1)
-	jr	ra
-	move	v0,zero
-	.set	reorder
-
-.L_bn_sub_words_proceed:
-	li	MINUS4,-4
-	and	AT,a3,MINUS4
-	move	v0,zero
-	beqz	AT,.L_bn_sub_words_tail
-
-.L_bn_sub_words_loop:
-	ld	ta0,0(a2)
-	subu	a3,4
-	ld	t1,8(a1)
-	and	AT,a3,MINUS4
-	ld	t2,16(a1)
-	PTR_ADD	a2,32
-	ld	t3,24(a1)
-	PTR_ADD	a0,32
-	ld	ta1,-24(a2)
-	PTR_ADD	a1,32
-	ld	ta2,-16(a2)
-	ld	ta3,-8(a2)
-	sltu	t8,t0,ta0
-	dsubu	t0,ta0
-	dsubu	ta0,t0,v0
-	sd	ta0,-32(a0)
-	MOVNZ	(t0,v0,t8)
-
-	sltu	t9,t1,ta1
-	dsubu	t1,ta1
-	dsubu	ta1,t1,v0
-	sd	ta1,-24(a0)
-	MOVNZ	(t1,v0,t9)
-
-
-	sltu	t8,t2,ta2
-	dsubu	t2,ta2
-	dsubu	ta2,t2,v0
-	sd	ta2,-16(a0)
-	MOVNZ	(t2,v0,t8)
-
-	sltu	t9,t3,ta3
-	dsubu	t3,ta3
-	dsubu	ta3,t3,v0
-	sd	ta3,-8(a0)
-	MOVNZ	(t3,v0,t9)
-
-	.set	noreorder
-	bgtzl	AT,.L_bn_sub_words_loop
-	ld	t0,0(a1)
-
-	bnezl	a3,.L_bn_sub_words_tail
-	ld	t0,0(a1)
-	.set	reorder
-
-.L_bn_sub_words_return:
-	jr	ra
-
-.L_bn_sub_words_tail:
-	ld	ta0,0(a2)
-	subu	a3,1
-	sltu	t8,t0,ta0
-	dsubu	t0,ta0
-	dsubu	ta0,t0,v0
-	MOVNZ	(t0,v0,t8)
-	sd	ta0,0(a0)
-	beqz	a3,.L_bn_sub_words_return
-
-	ld	t1,8(a1)
-	subu	a3,1
-	ld	ta1,8(a2)
-	sltu	t9,t1,ta1
-	dsubu	t1,ta1
-	dsubu	ta1,t1,v0
-	MOVNZ	(t1,v0,t9)
-	sd	ta1,8(a0)
-	beqz	a3,.L_bn_sub_words_return
-
-	ld	t2,16(a1)
-	ld	ta2,16(a2)
-	sltu	t8,t2,ta2
-	dsubu	t2,ta2
-	dsubu	ta2,t2,v0
-	MOVNZ	(t2,v0,t8)
-	sd	ta2,16(a0)
-	jr	ra
-END(bn_sub_words)
-
-#undef	MINUS4
-
-.align 5
-LEAF(bn_div_3_words)
-	.set	reorder
-	move	a3,a0		/* we know that bn_div_words doesn't
-				 * touch a3, ta2, ta3 and preserves a2
-				 * so that we can save two arguments
-				 * and return address in registers
-				 * instead of stack:-)
-				 */
-	ld	a0,(a3)
-	move	ta2,a1
-	ld	a1,-8(a3)
-	bne	a0,a2,.L_bn_div_3_words_proceed
-	li	v0,-1
-	jr	ra
-.L_bn_div_3_words_proceed:
-	move	ta3,ra
-	bal	bn_div_words
-	move	ra,ta3
-	dmultu	ta2,v0
-	ld	t2,-16(a3)
-	move	ta0,zero
-	mfhi	t1
-	mflo	t0
-	sltu	t8,t1,v1
-.L_bn_div_3_words_inner_loop:
-	bnez	t8,.L_bn_div_3_words_inner_loop_done
-	sgeu	AT,t2,t0
-	seq	t9,t1,v1
-	and	AT,t9
-	sltu	t3,t0,ta2
-	daddu	v1,a2
-	dsubu	t1,t3
-	dsubu	t0,ta2
-	sltu	t8,t1,v1
-	sltu	ta0,v1,a2
-	or	t8,ta0
-	.set	noreorder
-	beqzl	AT,.L_bn_div_3_words_inner_loop
-	dsubu	v0,1
-	.set	reorder
-.L_bn_div_3_words_inner_loop_done:
-	jr	ra
-END(bn_div_3_words)
-
-.align	5
-LEAF(bn_div_words)
-	.set	noreorder
-	bnezl	a2,.L_bn_div_words_proceed
-	move	v1,zero
-	jr	ra
-	li	v0,-1		/* I'd rather signal div-by-zero
-				 * which can be done with 'break 7' */
-
-.L_bn_div_words_proceed:
-	bltz	a2,.L_bn_div_words_body
-	move	t9,v1
-	dsll	a2,1
-	bgtz	a2,.-4
-	addu	t9,1
-
-	.set	reorder
-	negu	t1,t9
-	li	t2,-1
-	dsll	t2,t1
-	and	t2,a0
-	dsrl	AT,a1,t1
-	.set	noreorder
-	bnezl	t2,.+8
-	break	6		/* signal overflow */
-	.set	reorder
-	dsll	a0,t9
-	dsll	a1,t9
-	or	a0,AT
-
-#define	QT	ta0
-#define	HH	ta1
-#define	DH	v1
-.L_bn_div_words_body:
-	dsrl	DH,a2,32
-	sgeu	AT,a0,a2
-	.set	noreorder
-	bnezl	AT,.+8
-	dsubu	a0,a2
-	.set	reorder
-
-	li	QT,-1
-	dsrl	HH,a0,32
-	dsrl	QT,32	/* q=0xffffffff */
-	beq	DH,HH,.L_bn_div_words_skip_div1
-	ddivu	zero,a0,DH
-	mflo	QT
-.L_bn_div_words_skip_div1:
-	dmultu	a2,QT
-	dsll	t3,a0,32
-	dsrl	AT,a1,32
-	or	t3,AT
-	mflo	t0
-	mfhi	t1
-.L_bn_div_words_inner_loop1:
-	sltu	t2,t3,t0
-	seq	t8,HH,t1
-	sltu	AT,HH,t1
-	and	t2,t8
-	sltu	v0,t0,a2
-	or	AT,t2
-	.set	noreorder
-	beqz	AT,.L_bn_div_words_inner_loop1_done
-	dsubu	t1,v0
-	dsubu	t0,a2
-	b	.L_bn_div_words_inner_loop1
-	dsubu	QT,1
-	.set	reorder
-.L_bn_div_words_inner_loop1_done:
-
-	dsll	a1,32
-	dsubu	a0,t3,t0
-	dsll	v0,QT,32
-
-	li	QT,-1
-	dsrl	HH,a0,32
-	dsrl	QT,32	/* q=0xffffffff */
-	beq	DH,HH,.L_bn_div_words_skip_div2
-	ddivu	zero,a0,DH
-	mflo	QT
-.L_bn_div_words_skip_div2:
-#undef	DH
-	dmultu	a2,QT
-	dsll	t3,a0,32
-	dsrl	AT,a1,32
-	or	t3,AT
-	mflo	t0
-	mfhi	t1
-.L_bn_div_words_inner_loop2:
-	sltu	t2,t3,t0
-	seq	t8,HH,t1
-	sltu	AT,HH,t1
-	and	t2,t8
-	sltu	v1,t0,a2
-	or	AT,t2
-	.set	noreorder
-	beqz	AT,.L_bn_div_words_inner_loop2_done
-	dsubu	t1,v1
-	dsubu	t0,a2
-	b	.L_bn_div_words_inner_loop2
-	dsubu	QT,1
-	.set	reorder
-.L_bn_div_words_inner_loop2_done:	
-#undef	HH
-
-	dsubu	a0,t3,t0
-	or	v0,QT
-	dsrl	v1,a0,t9	/* v1 contains remainder if anybody wants it */
-	dsrl	a2,t9		/* restore a2 */
-	jr	ra
-#undef	QT
-END(bn_div_words)
-
-#define	a_0	t0
-#define	a_1	t1
-#define	a_2	t2
-#define	a_3	t3
-#define	b_0	ta0
-#define	b_1	ta1
-#define	b_2	ta2
-#define	b_3	ta3
-
-#define	a_4	s0
-#define	a_5	s2
-#define	a_6	s4
-#define	a_7	a1	/* once we load a[7] we don't need a anymore */
-#define	b_4	s1
-#define	b_5	s3
-#define	b_6	s5
-#define	b_7	a2	/* once we load b[7] we don't need b anymore */
-
-#define	t_1	t8
-#define	t_2	t9
-
-#define	c_1	v0
-#define	c_2	v1
-#define	c_3	a3
-
-#define	FRAME_SIZE	48
-
-.align	5
-LEAF(bn_mul_comba8)
-	.set	noreorder
-	PTR_SUB	sp,FRAME_SIZE
-	.frame	sp,64,ra
-	.set	reorder
-	ld	a_0,0(a1)	/* If compiled with -mips3 option on
-				 * R5000 box assembler barks on this
-				 * line with "shouldn't have mult/div
-				 * as last instruction in bb (R10K
-				 * bug)" warning. If anybody out there
-				 * has a clue about how to circumvent
-				 * this do send me a note.
-				 *		<[email protected]>
-				 */
-	ld	b_0,0(a2)
-	ld	a_1,8(a1)
-	ld	a_2,16(a1)
-	ld	a_3,24(a1)
-	ld	b_1,8(a2)
-	ld	b_2,16(a2)
-	ld	b_3,24(a2)
-	dmultu	a_0,b_0		/* mul_add_c(a[0],b[0],c1,c2,c3); */
-	sd	s0,0(sp)
-	sd	s1,8(sp)
-	sd	s2,16(sp)
-	sd	s3,24(sp)
-	sd	s4,32(sp)
-	sd	s5,40(sp)
-	mflo	c_1
-	mfhi	c_2
-
-	dmultu	a_0,b_1		/* mul_add_c(a[0],b[1],c2,c3,c1); */
-	ld	a_4,32(a1)
-	ld	a_5,40(a1)
-	ld	a_6,48(a1)
-	ld	a_7,56(a1)
-	ld	b_4,32(a2)
-	ld	b_5,40(a2)
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	c_3,t_2,AT
-	dmultu	a_1,b_0		/* mul_add_c(a[1],b[0],c2,c3,c1); */
-	ld	b_6,48(a2)
-	ld	b_7,56(a2)
-	sd	c_1,0(a0)	/* r[0]=c1; */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	sd	c_2,8(a0)	/* r[1]=c2; */
-
-	dmultu	a_2,b_0		/* mul_add_c(a[2],b[0],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	dmultu	a_1,b_1		/* mul_add_c(a[1],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_0,b_2		/* mul_add_c(a[0],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,16(a0)	/* r[2]=c3; */
-
-	dmultu	a_0,b_3		/* mul_add_c(a[0],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	c_3,c_2,t_2
-	dmultu	a_1,b_2		/* mul_add_c(a[1],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_2,b_1		/* mul_add_c(a[2],b[1],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_3,b_0		/* mul_add_c(a[3],b[0],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,24(a0)	/* r[3]=c1; */
-
-	dmultu	a_4,b_0		/* mul_add_c(a[4],b[0],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	dmultu	a_3,b_1		/* mul_add_c(a[3],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_2,b_2		/* mul_add_c(a[2],b[2],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_1,b_3		/* mul_add_c(a[1],b[3],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_0,b_4		/* mul_add_c(a[0],b[4],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,32(a0)	/* r[4]=c2; */
-
-	dmultu	a_0,b_5		/* mul_add_c(a[0],b[5],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_1,b_4		/* mul_add_c(a[1],b[4],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_2,b_3		/* mul_add_c(a[2],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_3,b_2		/* mul_add_c(a[3],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_4,b_1		/* mul_add_c(a[4],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_5,b_0		/* mul_add_c(a[5],b[0],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,40(a0)	/* r[5]=c3; */
-
-	dmultu	a_6,b_0		/* mul_add_c(a[6],b[0],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	c_3,c_2,t_2
-	dmultu	a_5,b_1		/* mul_add_c(a[5],b[1],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_4,b_2		/* mul_add_c(a[4],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_3,b_3		/* mul_add_c(a[3],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_2,b_4		/* mul_add_c(a[2],b[4],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_1,b_5		/* mul_add_c(a[1],b[5],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_0,b_6		/* mul_add_c(a[0],b[6],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,48(a0)	/* r[6]=c1; */
-
-	dmultu	a_0,b_7		/* mul_add_c(a[0],b[7],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	dmultu	a_1,b_6		/* mul_add_c(a[1],b[6],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_2,b_5		/* mul_add_c(a[2],b[5],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_3,b_4		/* mul_add_c(a[3],b[4],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_4,b_3		/* mul_add_c(a[4],b[3],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_5,b_2		/* mul_add_c(a[5],b[2],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_6,b_1		/* mul_add_c(a[6],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_7,b_0		/* mul_add_c(a[7],b[0],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,56(a0)	/* r[7]=c2; */
-
-	dmultu	a_7,b_1		/* mul_add_c(a[7],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_6,b_2		/* mul_add_c(a[6],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_5,b_3		/* mul_add_c(a[5],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_4,b_4		/* mul_add_c(a[4],b[4],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_3,b_5		/* mul_add_c(a[3],b[5],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_2,b_6		/* mul_add_c(a[2],b[6],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_1,b_7		/* mul_add_c(a[1],b[7],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,64(a0)	/* r[8]=c3; */
-
-	dmultu	a_2,b_7		/* mul_add_c(a[2],b[7],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	c_3,c_2,t_2
-	dmultu	a_3,b_6		/* mul_add_c(a[3],b[6],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_4,b_5		/* mul_add_c(a[4],b[5],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_5,b_4		/* mul_add_c(a[5],b[4],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_6,b_3		/* mul_add_c(a[6],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_7,b_2		/* mul_add_c(a[7],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,72(a0)	/* r[9]=c1; */
-
-	dmultu	a_7,b_3		/* mul_add_c(a[7],b[3],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	dmultu	a_6,b_4		/* mul_add_c(a[6],b[4],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_5,b_5		/* mul_add_c(a[5],b[5],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_4,b_6		/* mul_add_c(a[4],b[6],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_3,b_7		/* mul_add_c(a[3],b[7],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,80(a0)	/* r[10]=c2; */
-
-	dmultu	a_4,b_7		/* mul_add_c(a[4],b[7],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_5,b_6		/* mul_add_c(a[5],b[6],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_6,b_5		/* mul_add_c(a[6],b[5],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_7,b_4		/* mul_add_c(a[7],b[4],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,88(a0)	/* r[11]=c3; */
-
-	dmultu	a_7,b_5		/* mul_add_c(a[7],b[5],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	c_3,c_2,t_2
-	dmultu	a_6,b_6		/* mul_add_c(a[6],b[6],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_5,b_7		/* mul_add_c(a[5],b[7],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,96(a0)	/* r[12]=c1; */
-
-	dmultu	a_6,b_7		/* mul_add_c(a[6],b[7],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	dmultu	a_7,b_6		/* mul_add_c(a[7],b[6],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,104(a0)	/* r[13]=c2; */
-
-	dmultu	a_7,b_7		/* mul_add_c(a[7],b[7],c3,c1,c2); */
-	ld	s0,0(sp)
-	ld	s1,8(sp)
-	ld	s2,16(sp)
-	ld	s3,24(sp)
-	ld	s4,32(sp)
-	ld	s5,40(sp)
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sd	c_3,112(a0)	/* r[14]=c3; */
-	sd	c_1,120(a0)	/* r[15]=c1; */
-
-	PTR_ADD	sp,FRAME_SIZE
-
-	jr	ra
-END(bn_mul_comba8)
-
-.align	5
-LEAF(bn_mul_comba4)
-	.set	reorder
-	ld	a_0,0(a1)
-	ld	b_0,0(a2)
-	ld	a_1,8(a1)
-	ld	a_2,16(a1)
-	dmultu	a_0,b_0		/* mul_add_c(a[0],b[0],c1,c2,c3); */
-	ld	a_3,24(a1)
-	ld	b_1,8(a2)
-	ld	b_2,16(a2)
-	ld	b_3,24(a2)
-	mflo	c_1
-	mfhi	c_2
-	sd	c_1,0(a0)
-
-	dmultu	a_0,b_1		/* mul_add_c(a[0],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	c_3,t_2,AT
-	dmultu	a_1,b_0		/* mul_add_c(a[1],b[0],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	sd	c_2,8(a0)
-
-	dmultu	a_2,b_0		/* mul_add_c(a[2],b[0],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	dmultu	a_1,b_1		/* mul_add_c(a[1],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_0,b_2		/* mul_add_c(a[0],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,16(a0)
-
-	dmultu	a_0,b_3		/* mul_add_c(a[0],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	c_3,c_2,t_2
-	dmultu	a_1,b_2		/* mul_add_c(a[1],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_2,b_1		/* mul_add_c(a[2],b[1],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_3,b_0		/* mul_add_c(a[3],b[0],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,24(a0)
-
-	dmultu	a_3,b_1		/* mul_add_c(a[3],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	c_1,c_3,t_2
-	dmultu	a_2,b_2		/* mul_add_c(a[2],b[2],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_1,b_3		/* mul_add_c(a[1],b[3],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,32(a0)
-
-	dmultu	a_2,b_3		/* mul_add_c(a[2],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	c_2,c_1,t_2
-	dmultu	a_3,b_2		/* mul_add_c(a[3],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,40(a0)
-
-	dmultu	a_3,b_3		/* mul_add_c(a[3],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sd	c_1,48(a0)
-	sd	c_2,56(a0)
-
-	jr	ra
-END(bn_mul_comba4)
-
-#undef	a_4
-#undef	a_5
-#undef	a_6
-#undef	a_7
-#define	a_4	b_0
-#define	a_5	b_1
-#define	a_6	b_2
-#define	a_7	b_3
-
-.align	5
-LEAF(bn_sqr_comba8)
-	.set	reorder
-	ld	a_0,0(a1)
-	ld	a_1,8(a1)
-	ld	a_2,16(a1)
-	ld	a_3,24(a1)
-
-	dmultu	a_0,a_0		/* mul_add_c(a[0],b[0],c1,c2,c3); */
-	ld	a_4,32(a1)
-	ld	a_5,40(a1)
-	ld	a_6,48(a1)
-	ld	a_7,56(a1)
-	mflo	c_1
-	mfhi	c_2
-	sd	c_1,0(a0)
-
-	dmultu	a_0,a_1		/* mul_add_c2(a[0],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	c_3,t_2,AT
-	sd	c_2,8(a0)
-
-	dmultu	a_2,a_0		/* mul_add_c2(a[2],b[0],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_1,a_1		/* mul_add_c(a[1],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,16(a0)
-
-	dmultu	a_0,a_3		/* mul_add_c2(a[0],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_3,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_1,a_2		/* mul_add_c2(a[1],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,24(a0)
-
-	dmultu	a_4,a_0		/* mul_add_c2(a[4],b[0],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_3,a_1		/* mul_add_c2(a[3],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_1,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_2,a_2		/* mul_add_c(a[2],b[2],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,32(a0)
-
-	dmultu	a_0,a_5		/* mul_add_c2(a[0],b[5],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_1,a_4		/* mul_add_c2(a[1],b[4],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_2,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_2,a_3		/* mul_add_c2(a[2],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_2,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,40(a0)
-
-	dmultu	a_6,a_0		/* mul_add_c2(a[6],b[0],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_3,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_5,a_1		/* mul_add_c2(a[5],b[1],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_4,a_2		/* mul_add_c2(a[4],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_3,a_3		/* mul_add_c(a[3],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,48(a0)
-
-	dmultu	a_0,a_7		/* mul_add_c2(a[0],b[7],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_1,a_6		/* mul_add_c2(a[1],b[6],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_1,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_2,a_5		/* mul_add_c2(a[2],b[5],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_1,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_3,a_4		/* mul_add_c2(a[3],b[4],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_1,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,56(a0)
-
-	dmultu	a_7,a_1		/* mul_add_c2(a[7],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_6,a_2		/* mul_add_c2(a[6],b[2],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_2,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_5,a_3		/* mul_add_c2(a[5],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_2,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_4,a_4		/* mul_add_c(a[4],b[4],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,64(a0)
-
-	dmultu	a_2,a_7		/* mul_add_c2(a[2],b[7],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_3,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_3,a_6		/* mul_add_c2(a[3],b[6],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_4,a_5		/* mul_add_c2(a[4],b[5],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,72(a0)
-
-	dmultu	a_7,a_3		/* mul_add_c2(a[7],b[3],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_6,a_4		/* mul_add_c2(a[6],b[4],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_1,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_5,a_5		/* mul_add_c(a[5],b[5],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,80(a0)
-
-	dmultu	a_4,a_7		/* mul_add_c2(a[4],b[7],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_5,a_6		/* mul_add_c2(a[5],b[6],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_2,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,88(a0)
-
-	dmultu	a_7,a_5		/* mul_add_c2(a[7],b[5],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_3,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_6,a_6		/* mul_add_c(a[6],b[6],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,96(a0)
-
-	dmultu	a_6,a_7		/* mul_add_c2(a[6],b[7],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,104(a0)
-
-	dmultu	a_7,a_7		/* mul_add_c(a[7],b[7],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sd	c_3,112(a0)
-	sd	c_1,120(a0)
-
-	jr	ra
-END(bn_sqr_comba8)
-
-.align	5
-LEAF(bn_sqr_comba4)
-	.set	reorder
-	ld	a_0,0(a1)
-	ld	a_1,8(a1)
-	ld	a_2,16(a1)
-	ld	a_3,24(a1)
-	dmultu	a_0,a_0		/* mul_add_c(a[0],b[0],c1,c2,c3); */
-	mflo	c_1
-	mfhi	c_2
-	sd	c_1,0(a0)
-
-	dmultu	a_0,a_1		/* mul_add_c2(a[0],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	c_3,t_2,AT
-	sd	c_2,8(a0)
-
-	dmultu	a_2,a_0		/* mul_add_c2(a[2],b[0],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	dmultu	a_1,a_1		/* mul_add_c(a[1],b[1],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,16(a0)
-
-	dmultu	a_0,a_3		/* mul_add_c2(a[0],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_3,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	dmultu	a_1,a_2		/* mul_add_c(a2[1],b[2],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	slt	AT,t_2,zero
-	daddu	c_3,AT
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sltu	AT,c_2,t_2
-	daddu	c_3,AT
-	sd	c_1,24(a0)
-
-	dmultu	a_3,a_1		/* mul_add_c2(a[3],b[1],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_1,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	dmultu	a_2,a_2		/* mul_add_c(a[2],b[2],c2,c3,c1); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_2,t_1
-	sltu	AT,c_2,t_1
-	daddu	t_2,AT
-	daddu	c_3,t_2
-	sltu	AT,c_3,t_2
-	daddu	c_1,AT
-	sd	c_2,32(a0)
-
-	dmultu	a_2,a_3		/* mul_add_c2(a[2],b[3],c3,c1,c2); */
-	mflo	t_1
-	mfhi	t_2
-	slt	c_2,t_2,zero
-	dsll	t_2,1
-	slt	a2,t_1,zero
-	daddu	t_2,a2
-	dsll	t_1,1
-	daddu	c_3,t_1
-	sltu	AT,c_3,t_1
-	daddu	t_2,AT
-	daddu	c_1,t_2
-	sltu	AT,c_1,t_2
-	daddu	c_2,AT
-	sd	c_3,40(a0)
-
-	dmultu	a_3,a_3		/* mul_add_c(a[3],b[3],c1,c2,c3); */
-	mflo	t_1
-	mfhi	t_2
-	daddu	c_1,t_1
-	sltu	AT,c_1,t_1
-	daddu	t_2,AT
-	daddu	c_2,t_2
-	sd	c_1,48(a0)
-	sd	c_2,56(a0)
-
-	jr	ra
-END(bn_sqr_comba4)

+ 0 - 1497
drivers/builtin_openssl2/crypto/bn/asm/modexp512-x86_64.pl

@@ -1,1497 +0,0 @@
-#!/usr/bin/env perl
-#
-# Copyright (c) 2010-2011 Intel Corp.
-#   Author: [email protected]
-#           Jim Guilford
-#           [email protected]
-#           [email protected]
-#
-# More information about algorithm used can be found at:
-#   http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf
-#
-# ====================================================================
-# Copyright (c) 2011 The OpenSSL Project.  All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in
-#    the documentation and/or other materials provided with the
-#    distribution.
-#
-# 3. All advertising materials mentioning features or use of this
-#    software must display the following acknowledgment:
-#    "This product includes software developed by the OpenSSL Project
-#    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
-#
-# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
-#    endorse or promote products derived from this software without
-#    prior written permission. For written permission, please contact
-#    [email protected].
-#
-# 5. Products derived from this software may not be called "OpenSSL"
-#    nor may "OpenSSL" appear in their names without prior written
-#    permission of the OpenSSL Project.
-#
-# 6. Redistributions of any form whatsoever must retain the following
-#    acknowledgment:
-#    "This product includes software developed by the OpenSSL Project
-#    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
-#
-# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
-# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
-# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-# OF THE POSSIBILITY OF SUCH DAMAGE.
-# ====================================================================
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-use strict;
-my $code=".text\n\n";
-my $m=0;
-
-#
-# Define x512 macros
-#
-
-#MULSTEP_512_ADD	MACRO	x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2
-#
-# uses rax, rdx, and args
-sub MULSTEP_512_ADD
-{
- my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_;
- my @X=@$x;	# make a copy
-$code.=<<___;
-	 mov	(+8*0)($SRC2), %rax
-	 mul	$OP			# rdx:rax = %OP * [0]
-	 mov	($ASRC), $X[0]
-	 add	%rax, $X[0]
-	 adc	\$0, %rdx
-	 mov	$X[0], $DST
-___
-for(my $i=1;$i<8;$i++) {
-$code.=<<___;
-	 mov	%rdx, $TMP
-
-	 mov	(+8*$i)($SRC2), %rax
-	 mul	$OP			# rdx:rax = %OP * [$i]
-	 mov	(+8*$i)($ASRC), $X[$i]
-	 add	%rax, $X[$i]
-	 adc	\$0, %rdx
-	 add	$TMP, $X[$i]
-	 adc	\$0, %rdx
-___
-}
-$code.=<<___;
-	 mov	%rdx, $X[0]
-___
-}
-
-#MULSTEP_512	MACRO	x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp
-#
-# uses rax, rdx, and args
-sub MULSTEP_512
-{
- my ($x, $DST, $SRC2, $OP, $TMP)=@_;
- my @X=@$x;	# make a copy
-$code.=<<___;
-	 mov	(+8*0)($SRC2), %rax
-	 mul	$OP			# rdx:rax = %OP * [0]
-	 add	%rax, $X[0]
-	 adc	\$0, %rdx
-	 mov	$X[0], $DST
-___
-for(my $i=1;$i<8;$i++) {
-$code.=<<___;
-	 mov	%rdx, $TMP
-
-	 mov	(+8*$i)($SRC2), %rax
-	 mul	$OP			# rdx:rax = %OP * [$i]
-	 add	%rax, $X[$i]
-	 adc	\$0, %rdx
-	 add	$TMP, $X[$i]
-	 adc	\$0, %rdx
-___
-}
-$code.=<<___;
-	 mov	%rdx, $X[0]
-___
-}
-
-#
-# Swizzle Macros
-#
-
-# macro to copy data from flat space to swizzled table
-#MACRO swizzle	pDst, pSrc, tmp1, tmp2
-# pDst and pSrc are modified
-sub swizzle
-{
- my ($pDst, $pSrc, $cnt, $d0)=@_;
-$code.=<<___;
-	 mov	\$8, $cnt
-loop_$m:
-	 mov	($pSrc), $d0
-	 mov	$d0#w, ($pDst)
-	 shr	\$16, $d0
-	 mov	$d0#w, (+64*1)($pDst)
-	 shr	\$16, $d0
-	 mov	$d0#w, (+64*2)($pDst)
-	 shr	\$16, $d0
-	 mov	$d0#w, (+64*3)($pDst)
-	 lea	8($pSrc), $pSrc
-	 lea	64*4($pDst), $pDst
-	 dec	$cnt
-	 jnz	loop_$m
-___
-
- $m++;
-}
-
-# macro to copy data from swizzled table to  flat space
-#MACRO unswizzle	pDst, pSrc, tmp*3
-sub unswizzle
-{
- my ($pDst, $pSrc, $cnt, $d0, $d1)=@_;
-$code.=<<___;
-	 mov	\$4, $cnt
-loop_$m:
-	 movzxw	(+64*3+256*0)($pSrc), $d0
-	 movzxw	(+64*3+256*1)($pSrc), $d1
-	 shl	\$16, $d0
-	 shl	\$16, $d1
-	 mov	(+64*2+256*0)($pSrc), $d0#w
-	 mov	(+64*2+256*1)($pSrc), $d1#w
-	 shl	\$16, $d0
-	 shl	\$16, $d1
-	 mov	(+64*1+256*0)($pSrc), $d0#w
-	 mov	(+64*1+256*1)($pSrc), $d1#w
-	 shl	\$16, $d0
-	 shl	\$16, $d1
-	 mov	(+64*0+256*0)($pSrc), $d0#w
-	 mov	(+64*0+256*1)($pSrc), $d1#w
-	 mov	$d0, (+8*0)($pDst)
-	 mov	$d1, (+8*1)($pDst)
-	 lea	256*2($pSrc), $pSrc
-	 lea	8*2($pDst), $pDst
-	 sub	\$1, $cnt
-	 jnz	loop_$m
-___
-
- $m++;
-}
-
-#
-# Data Structures
-#
-
-# Reduce Data
-#
-#
-# Offset  Value
-# 0C0     Carries
-# 0B8     X2[10]
-# 0B0     X2[9]
-# 0A8     X2[8]
-# 0A0     X2[7]
-# 098     X2[6]
-# 090     X2[5]
-# 088     X2[4]
-# 080     X2[3]
-# 078     X2[2]
-# 070     X2[1]
-# 068     X2[0]
-# 060     X1[12]  P[10]
-# 058     X1[11]  P[9]  Z[8]
-# 050     X1[10]  P[8]  Z[7]
-# 048     X1[9]   P[7]  Z[6]
-# 040     X1[8]   P[6]  Z[5]
-# 038     X1[7]   P[5]  Z[4]
-# 030     X1[6]   P[4]  Z[3]
-# 028     X1[5]   P[3]  Z[2]
-# 020     X1[4]   P[2]  Z[1]
-# 018     X1[3]   P[1]  Z[0]
-# 010     X1[2]   P[0]  Y[2]
-# 008     X1[1]   Q[1]  Y[1]
-# 000     X1[0]   Q[0]  Y[0]
-
-my $X1_offset           =  0;			# 13 qwords
-my $X2_offset           =  $X1_offset + 13*8;			# 11 qwords
-my $Carries_offset      =  $X2_offset + 11*8;			# 1 qword
-my $Q_offset            =  0;			# 2 qwords
-my $P_offset            =  $Q_offset + 2*8;			# 11 qwords
-my $Y_offset            =  0;			# 3 qwords
-my $Z_offset            =  $Y_offset + 3*8;			# 9 qwords
-
-my $Red_Data_Size       =  $Carries_offset + 1*8;			# (25 qwords)
-
-#
-# Stack Frame
-#
-#
-# offset	value
-# ...		<old stack contents>
-# ...
-# 280		Garray
-
-# 278		tmp16[15]
-# ...		...
-# 200		tmp16[0]
-
-# 1F8		tmp[7]
-# ...		...
-# 1C0		tmp[0]
-
-# 1B8		GT[7]
-# ...		...
-# 180		GT[0]
-
-# 178		Reduce Data
-# ...		...
-# 0B8		Reduce Data
-# 0B0		reserved
-# 0A8		reserved
-# 0A0		reserved
-# 098		reserved
-# 090		reserved
-# 088		reduce result addr
-# 080		exp[8]
-
-# ...
-# 048		exp[1]
-# 040		exp[0]
-
-# 038		reserved
-# 030		loop_idx
-# 028		pg
-# 020		i
-# 018		pData	; arg 4
-# 010		pG	; arg 2
-# 008		pResult	; arg 1
-# 000		rsp	; stack pointer before subtract
-
-my $rsp_offset          =  0;
-my $pResult_offset      =  8*1 + $rsp_offset;
-my $pG_offset           =  8*1 + $pResult_offset;
-my $pData_offset        =  8*1 + $pG_offset;
-my $i_offset            =  8*1 + $pData_offset;
-my $pg_offset           =  8*1 + $i_offset;
-my $loop_idx_offset     =  8*1 + $pg_offset;
-my $reserved1_offset    =  8*1 + $loop_idx_offset;
-my $exp_offset          =  8*1 + $reserved1_offset;
-my $red_result_addr_offset=  8*9 + $exp_offset;
-my $reserved2_offset    =  8*1 + $red_result_addr_offset;
-my $Reduce_Data_offset  =  8*5 + $reserved2_offset;
-my $GT_offset           =  $Red_Data_Size + $Reduce_Data_offset;
-my $tmp_offset          =  8*8 + $GT_offset;
-my $tmp16_offset        =  8*8 + $tmp_offset;
-my $garray_offset       =  8*16 + $tmp16_offset;
-my $mem_size            =  8*8*32 + $garray_offset;
-
-#
-# Offsets within Reduce Data
-#
-#
-#	struct MODF_2FOLD_MONT_512_C1_DATA {
-#	UINT64 t[8][8];
-#	UINT64 m[8];
-#	UINT64 m1[8]; /* 2^768 % m */
-#	UINT64 m2[8]; /* 2^640 % m */
-#	UINT64 k1[2]; /* (- 1/m) % 2^128 */
-#	};
-
-my $T                   =  0;
-my $M                   =  512;			# = 8 * 8 * 8
-my $M1                  =  576;			# = 8 * 8 * 9 /* += 8 * 8 */
-my $M2                  =  640;			# = 8 * 8 * 10 /* += 8 * 8 */
-my $K1                  =  704;			# = 8 * 8 * 11 /* += 8 * 8 */
-
-#
-#   FUNCTIONS
-#
-
-{{{
-#
-# MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords)
-#                       and add 512-bits (8 qwords)
-#                       to get 640 bits (10 qwords)
-# Input: 128-bit mul source: [rdi+8*1], rbp
-#        512-bit mul source: [rsi+8*n]
-#        512-bit add source: r15, r14, ..., r9, r8
-# Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0]
-# Clobbers all regs except: rcx, rsi, rdi
-$code.=<<___;
-.type	MULADD_128x512,\@abi-omnipotent
-.align	16
-MULADD_128x512:
-___
-	&MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
-$code.=<<___;
-	 mov	(+8*1)(%rdi), %rbp
-___
-	&MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx");
-$code.=<<___;
-	 ret
-.size	MULADD_128x512,.-MULADD_128x512
-___
-}}}
-
-{{{
-#MULADD_256x512	MACRO	pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0
-#
-# Inputs: pDst: Destination  (768 bits, 12 qwords)
-#         pA:   Multiplicand (1024 bits, 16 qwords)
-#         pB:   Multiplicand (512 bits, 8 qwords)
-# Dst = Ah * B + Al
-# where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits)
-# Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0]
-# Uses registers: arguments, RAX, RDX
-sub MULADD_256x512
-{
- my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_;
-$code.=<<___;
-	mov	(+8*12)($pA), $OP
-___
-	&MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP);
-	push(@$X,shift(@$X));
-
-$code.=<<___;
-	 mov	(+8*13)($pA), $OP
-___
-	&MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP);
-	push(@$X,shift(@$X));
-
-$code.=<<___;
-	 mov	(+8*14)($pA), $OP
-___
-	&MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP);
-	push(@$X,shift(@$X));
-
-$code.=<<___;
-	 mov	(+8*15)($pA), $OP
-___
-	&MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP);
-	push(@$X,shift(@$X));
-}
-
-#
-# mont_reduce(UINT64 *x,  /* 1024 bits, 16 qwords */
-#	       UINT64 *m,  /*  512 bits,  8 qwords */
-#	       MODF_2FOLD_MONT_512_C1_DATA *data,
-#             UINT64 *r)  /*  512 bits,  8 qwords */
-# Input:  x (number to be reduced): tmp16 (Implicit)
-#         m (modulus):              [pM]  (Implicit)
-#         data (reduce data):       [pData] (Implicit)
-# Output: r (result):		     Address in [red_res_addr]
-#         result also in: r9, r8, r15, r14, r13, r12, r11, r10
-
-my @X=map("%r$_",(8..15));
-
-$code.=<<___;
-.type	mont_reduce,\@abi-omnipotent
-.align	16
-mont_reduce:
-___
-
-my $STACK_DEPTH         =  8;
-	#
-	# X1 = Xh * M1 + Xl
-$code.=<<___;
-	 lea	(+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi			# pX1 (Dst) 769 bits, 13 qwords
-	 mov	(+$pData_offset+$STACK_DEPTH)(%rsp), %rsi			# pM1 (Bsrc) 512 bits, 8 qwords
-	 add	\$$M1, %rsi
-	 lea	(+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx			# X (Asrc) 1024 bits, 16 qwords
-
-___
-
-	&MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X);	# rotates @X 4 times
-	# results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0]
-
-$code.=<<___;
-	 xor	%rax, %rax
-	# X1 += xl
-	 add	(+8*8)(%rcx), $X[4]
-	 adc	(+8*9)(%rcx), $X[5]
-	 adc	(+8*10)(%rcx), $X[6]
-	 adc	(+8*11)(%rcx), $X[7]
-	 adc	\$0, %rax
-	# X1 is now rax, r11-r8, r15-r12, tmp16[3:0]
-
-	#
-	# check for carry ;; carry stored in rax
-	 mov	$X[4], (+8*8)(%rdi)			# rdi points to X1
-	 mov	$X[5], (+8*9)(%rdi)
-	 mov	$X[6], %rbp
-	 mov	$X[7], (+8*11)(%rdi)
-
-	 mov	%rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
-
-	 mov	(+8*0)(%rdi), $X[4]
-	 mov	(+8*1)(%rdi), $X[5]
-	 mov	(+8*2)(%rdi), $X[6]
-	 mov	(+8*3)(%rdi), $X[7]
-
-	# X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8
-	# rdi -> X1
-	# rsi -> M1
-
-	#
-	# X2 = Xh * M2 + Xl
-	# do first part (X2 = Xh * M2)
-	 add	\$8*10, %rdi			# rdi -> pXh ; 128 bits, 2 qwords
-				#        Xh is actually { [rdi+8*1], rbp }
-	 add	\$`$M2-$M1`, %rsi			# rsi -> M2
-	 lea	(+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx			# rcx -> pX2 ; 641 bits, 11 qwords
-___
-	unshift(@X,pop(@X));	unshift(@X,pop(@X));
-$code.=<<___;
-
-	 call	MULADD_128x512			# args in rcx, rdi / rbp, rsi, r15-r8
-	# result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
-	 mov	(+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax
-
-	# X2 += Xl
-	 add	(+8*8-8*10)(%rdi), $X[6]		# (-8*10) is to adjust rdi -> Xh to Xl
-	 adc	(+8*9-8*10)(%rdi), $X[7]
-	 mov	$X[6], (+8*8)(%rcx)
-	 mov	$X[7], (+8*9)(%rcx)
-
-	 adc	%rax, %rax
-	 mov	%rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
-
-	 lea	(+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi			# rdi -> pQ ; 128 bits, 2 qwords
-	 add	\$`$K1-$M2`, %rsi			# rsi -> pK1 ; 128 bits, 2 qwords
-
-	# MUL_128x128t128	rdi, rcx, rsi	; Q = X2 * K1 (bottom half)
-	# B1:B0 = rsi[1:0] = K1[1:0]
-	# A1:A0 = rcx[1:0] = X2[1:0]
-	# Result = rdi[1],rbp = Q[1],rbp
-	 mov	(%rsi), %r8			# B0
-	 mov	(+8*1)(%rsi), %rbx			# B1
-
-	 mov	(%rcx), %rax			# A0
-	 mul	%r8			# B0
-	 mov	%rax, %rbp
-	 mov	%rdx, %r9
-
-	 mov	(+8*1)(%rcx), %rax			# A1
-	 mul	%r8			# B0
-	 add	%rax, %r9
-
-	 mov	(%rcx), %rax			# A0
-	 mul	%rbx			# B1
-	 add	%rax, %r9
-
-	 mov	%r9, (+8*1)(%rdi)
-	# end MUL_128x128t128
-
-	 sub	\$`$K1-$M`, %rsi
-
-	 mov	(%rcx), $X[6]
-	 mov	(+8*1)(%rcx), $X[7]			# r9:r8 = X2[1:0]
-
-	 call	MULADD_128x512			# args in rcx, rdi / rbp, rsi, r15-r8
-	# result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
-
-	# load first half of m to rdx, rdi, rbx, rax
-	# moved this here for efficiency
-	 mov	(+8*0)(%rsi), %rax
-	 mov	(+8*1)(%rsi), %rbx
-	 mov	(+8*2)(%rsi), %rdi
-	 mov	(+8*3)(%rsi), %rdx
-
-	# continue with reduction
-	 mov	(+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp
-
-	 add	(+8*8)(%rcx), $X[6]
-	 adc	(+8*9)(%rcx), $X[7]
-
-	#accumulate the final carry to rbp
-	 adc	%rbp, %rbp
-
-	# Add in overflow corrections: R = (X2>>128) += T[overflow]
-	# R = {r9, r8, r15, r14, ..., r10}
-	 shl	\$3, %rbp
-	 mov	(+$pData_offset+$STACK_DEPTH)(%rsp), %rcx			# rsi -> Data (and points to T)
-	 add	%rcx, %rbp			# pT ; 512 bits, 8 qwords, spread out
-
-	# rsi will be used to generate a mask after the addition
-	 xor	%rsi, %rsi
-
-	 add	(+8*8*0)(%rbp), $X[0]
-	 adc	(+8*8*1)(%rbp), $X[1]
-	 adc	(+8*8*2)(%rbp), $X[2]
-	 adc	(+8*8*3)(%rbp), $X[3]
-	 adc	(+8*8*4)(%rbp), $X[4]
-	 adc	(+8*8*5)(%rbp), $X[5]
-	 adc	(+8*8*6)(%rbp), $X[6]
-	 adc	(+8*8*7)(%rbp), $X[7]
-
-	# if there is a carry:	rsi = 0xFFFFFFFFFFFFFFFF
-	# if carry is clear:	rsi = 0x0000000000000000
-	 sbb	\$0, %rsi
-
-	# if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
-	 and	%rsi, %rax
-	 and	%rsi, %rbx
-	 and	%rsi, %rdi
-	 and	%rsi, %rdx
-
-	 mov	\$1, %rbp
-	 sub	%rax, $X[0]
-	 sbb	%rbx, $X[1]
-	 sbb	%rdi, $X[2]
-	 sbb	%rdx, $X[3]
-
-	# if there is a borrow:		rbp = 0
-	# if there is no borrow:	rbp = 1
-	# this is used to save the borrows in between the first half and the 2nd half of the subtraction of m
-	 sbb	\$0, %rbp
-
-	#load second half of m to rdx, rdi, rbx, rax
-
-	 add	\$$M, %rcx
-	 mov	(+8*4)(%rcx), %rax
-	 mov	(+8*5)(%rcx), %rbx
-	 mov	(+8*6)(%rcx), %rdi
-	 mov	(+8*7)(%rcx), %rdx
-
-	# use the rsi mask as before
-	# if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
-	 and	%rsi, %rax
-	 and	%rsi, %rbx
-	 and	%rsi, %rdi
-	 and	%rsi, %rdx
-
-	# if rbp = 0, there was a borrow before, it is moved to the carry flag
-	# if rbp = 1, there was not a borrow before, carry flag is cleared
-	 sub	\$1, %rbp
-
-	 sbb	%rax, $X[4]
-	 sbb	%rbx, $X[5]
-	 sbb	%rdi, $X[6]
-	 sbb	%rdx, $X[7]
-
-	# write R back to memory
-
-	 mov	(+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi
-	 mov	$X[0], (+8*0)(%rsi)
-	 mov	$X[1], (+8*1)(%rsi)
-	 mov	$X[2], (+8*2)(%rsi)
-	 mov	$X[3], (+8*3)(%rsi)
-	 mov	$X[4], (+8*4)(%rsi)
-	 mov	$X[5], (+8*5)(%rsi)
-	 mov	$X[6], (+8*6)(%rsi)
-	 mov	$X[7], (+8*7)(%rsi)
-
-	 ret
-.size	mont_reduce,.-mont_reduce
-___
-}}}
-
-{{{
-#MUL_512x512	MACRO	pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2
-#
-# Inputs: pDst: Destination  (1024 bits, 16 qwords)
-#         pA:   Multiplicand (512 bits, 8 qwords)
-#         pB:   Multiplicand (512 bits, 8 qwords)
-# Uses registers rax, rdx, args
-#   B operand in [pB] and also in x7...x0
-sub MUL_512x512
-{
- my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_;
- my ($pDst,  $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
- my @X=@$x;	# make a copy
-
-$code.=<<___;
-	 mov	(+8*0)($pA), $OP
-
-	 mov	$X[0], %rax
-	 mul	$OP			# rdx:rax = %OP * [0]
-	 mov	%rax, (+$pDst_o+8*0)($pDst)
-	 mov	%rdx, $X[0]
-___
-for(my $i=1;$i<8;$i++) {
-$code.=<<___;
-	 mov	$X[$i], %rax
-	 mul	$OP			# rdx:rax = %OP * [$i]
-	 add	%rax, $X[$i-1]
-	 adc	\$0, %rdx
-	 mov	%rdx, $X[$i]
-___
-}
-
-for(my $i=1;$i<8;$i++) {
-$code.=<<___;
-	 mov	(+8*$i)($pA), $OP
-___
-
-	&MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP);
-	push(@X,shift(@X));
-}
-
-$code.=<<___;
-	 mov	$X[0], (+$pDst_o+8*8)($pDst)
-	 mov	$X[1], (+$pDst_o+8*9)($pDst)
-	 mov	$X[2], (+$pDst_o+8*10)($pDst)
-	 mov	$X[3], (+$pDst_o+8*11)($pDst)
-	 mov	$X[4], (+$pDst_o+8*12)($pDst)
-	 mov	$X[5], (+$pDst_o+8*13)($pDst)
-	 mov	$X[6], (+$pDst_o+8*14)($pDst)
-	 mov	$X[7], (+$pDst_o+8*15)($pDst)
-___
-}
-
-#
-# mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits)
-# Input:  src1: Address of source 1: rdi
-#         src2: Address of source 2: rsi
-# Output: dst:  Address of destination: [red_res_addr]
-#    src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10
-# Temp:   Clobbers [tmp16], all registers
-$code.=<<___;
-.type	mont_mul_a3b,\@abi-omnipotent
-.align	16
-mont_mul_a3b:
-	#
-	# multiply tmp = src1 * src2
-	# For multiply: dst = rcx, src1 = rdi, src2 = rsi
-	# stack depth is extra 8 from call
-___
-	&MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
-$code.=<<___;
-	#
-	# Dst = tmp % m
-	# Call reduce(tmp, m, data, dst)
-
-	# tail recursion optimization: jmp to mont_reduce and return from there
-	 jmp	mont_reduce
-	# call	mont_reduce
-	# ret
-.size	mont_mul_a3b,.-mont_mul_a3b
-___
-}}}
-
-{{{
-#SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4
-#
-# Input in memory [pA] and also in x7...x0
-# Uses all argument registers plus rax and rdx
-#
-# This version computes all of the off-diagonal terms into memory,
-# and then it adds in the diagonal terms
-
-sub SQR_512
-{
- my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_;
- my ($pDst,  $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
- my @X=@$x;	# make a copy
-$code.=<<___;
-	# ------------------
-	# first pass 01...07
-	# ------------------
-	 mov	$X[0], $A
-
-	 mov	$X[1],%rax
-	 mul	$A
-	 mov	%rax, (+$pDst_o+8*1)($pDst)
-___
-for(my $i=2;$i<8;$i++) {
-$code.=<<___;
-	 mov	%rdx, $X[$i-2]
-	 mov	$X[$i],%rax
-	 mul	$A
-	 add	%rax, $X[$i-2]
-	 adc	\$0, %rdx
-___
-}
-$code.=<<___;
-	 mov	%rdx, $x7
-
-	 mov	$X[0], (+$pDst_o+8*2)($pDst)
-
-	# ------------------
-	# second pass 12...17
-	# ------------------
-
-	 mov	(+8*1)($pA), $A
-
-	 mov	(+8*2)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[1]
-	 adc	\$0, %rdx
-	 mov	$X[1], (+$pDst_o+8*3)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*3)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[2]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[2]
-	 adc	\$0, %rdx
-	 mov	$X[2], (+$pDst_o+8*4)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*4)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[3]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[3]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*5)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[4]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[4]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	$X[6],%rax
-	 mul	$A
-	 add	%rax, $X[5]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[5]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $x7
-	 adc	\$0, %rdx
-	 add	$X[0], $x7
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[1]
-
-	# ------------------
-	# third pass 23...27
-	# ------------------
-	 mov	(+8*2)($pA), $A
-
-	 mov	(+8*3)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[3]
-	 adc	\$0, %rdx
-	 mov	$X[3], (+$pDst_o+8*5)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*4)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[4]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[4]
-	 adc	\$0, %rdx
-	 mov	$X[4], (+$pDst_o+8*6)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*5)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[5]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[5]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	$X[6],%rax
-	 mul	$A
-	 add	%rax, $x7
-	 adc	\$0, %rdx
-	 add	$X[0], $x7
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $X[1]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[1]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[2]
-
-	# ------------------
-	# fourth pass 34...37
-	# ------------------
-
-	 mov	(+8*3)($pA), $A
-
-	 mov	(+8*4)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[5]
-	 adc	\$0, %rdx
-	 mov	$X[5], (+$pDst_o+8*7)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	(+8*5)($pA),%rax
-	 mul	$A
-	 add	%rax, $x7
-	 adc	\$0, %rdx
-	 add	$X[0], $x7
-	 adc	\$0, %rdx
-	 mov	$x7, (+$pDst_o+8*8)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	$X[6],%rax
-	 mul	$A
-	 add	%rax, $X[1]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[1]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[0]
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $X[2]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[2]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[5]
-
-	# ------------------
-	# fifth pass 45...47
-	# ------------------
-	 mov	(+8*4)($pA), $A
-
-	 mov	(+8*5)($pA),%rax
-	 mul	$A
-	 add	%rax, $X[1]
-	 adc	\$0, %rdx
-	 mov	$X[1], (+$pDst_o+8*9)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	$X[6],%rax
-	 mul	$A
-	 add	%rax, $X[2]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[2]
-	 adc	\$0, %rdx
-	 mov	$X[2], (+$pDst_o+8*10)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $X[5]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[5]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $X[1]
-
-	# ------------------
-	# sixth pass 56...57
-	# ------------------
-	 mov	(+8*5)($pA), $A
-
-	 mov	$X[6],%rax
-	 mul	$A
-	 add	%rax, $X[5]
-	 adc	\$0, %rdx
-	 mov	$X[5], (+$pDst_o+8*11)($pDst)
-
-	 mov	%rdx, $X[0]
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $X[1]
-	 adc	\$0, %rdx
-	 add	$X[0], $X[1]
-	 adc	\$0, %rdx
-	 mov	$X[1], (+$pDst_o+8*12)($pDst)
-
-	 mov	%rdx, $X[2]
-
-	# ------------------
-	# seventh pass 67
-	# ------------------
-	 mov	$X[6], $A
-
-	 mov	$X[7],%rax
-	 mul	$A
-	 add	%rax, $X[2]
-	 adc	\$0, %rdx
-	 mov	$X[2], (+$pDst_o+8*13)($pDst)
-
-	 mov	%rdx, (+$pDst_o+8*14)($pDst)
-
-	# start finalize (add	in squares, and double off-terms)
-	 mov	(+$pDst_o+8*1)($pDst), $X[0]
-	 mov	(+$pDst_o+8*2)($pDst), $X[1]
-	 mov	(+$pDst_o+8*3)($pDst), $X[2]
-	 mov	(+$pDst_o+8*4)($pDst), $X[3]
-	 mov	(+$pDst_o+8*5)($pDst), $X[4]
-	 mov	(+$pDst_o+8*6)($pDst), $X[5]
-
-	 mov	(+8*3)($pA), %rax
-	 mul	%rax
-	 mov	%rax, $x6
-	 mov	%rdx, $X[6]
-
-	 add	$X[0], $X[0]
-	 adc	$X[1], $X[1]
-	 adc	$X[2], $X[2]
-	 adc	$X[3], $X[3]
-	 adc	$X[4], $X[4]
-	 adc	$X[5], $X[5]
-	 adc	\$0, $X[6]
-
-	 mov	(+8*0)($pA), %rax
-	 mul	%rax
-	 mov	%rax, (+$pDst_o+8*0)($pDst)
-	 mov	%rdx, $A
-
-	 mov	(+8*1)($pA), %rax
-	 mul	%rax
-
-	 add	$A, $X[0]
-	 adc	%rax, $X[1]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $A
-	 mov	$X[0], (+$pDst_o+8*1)($pDst)
-	 mov	$X[1], (+$pDst_o+8*2)($pDst)
-
-	 mov	(+8*2)($pA), %rax
-	 mul	%rax
-
-	 add	$A, $X[2]
-	 adc	%rax, $X[3]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $A
-
-	 mov	$X[2], (+$pDst_o+8*3)($pDst)
-	 mov	$X[3], (+$pDst_o+8*4)($pDst)
-
-	 xor	$tmp, $tmp
-	 add	$A, $X[4]
-	 adc	$x6, $X[5]
-	 adc	\$0, $tmp
-
-	 mov	$X[4], (+$pDst_o+8*5)($pDst)
-	 mov	$X[5], (+$pDst_o+8*6)($pDst)
-
-	# %%tmp has 0/1 in column 7
-	# %%A6 has a full value in column 7
-
-	 mov	(+$pDst_o+8*7)($pDst), $X[0]
-	 mov	(+$pDst_o+8*8)($pDst), $X[1]
-	 mov	(+$pDst_o+8*9)($pDst), $X[2]
-	 mov	(+$pDst_o+8*10)($pDst), $X[3]
-	 mov	(+$pDst_o+8*11)($pDst), $X[4]
-	 mov	(+$pDst_o+8*12)($pDst), $X[5]
-	 mov	(+$pDst_o+8*13)($pDst), $x6
-	 mov	(+$pDst_o+8*14)($pDst), $x7
-
-	 mov	$X[7], %rax
-	 mul	%rax
-	 mov	%rax, $X[7]
-	 mov	%rdx, $A
-
-	 add	$X[0], $X[0]
-	 adc	$X[1], $X[1]
-	 adc	$X[2], $X[2]
-	 adc	$X[3], $X[3]
-	 adc	$X[4], $X[4]
-	 adc	$X[5], $X[5]
-	 adc	$x6, $x6
-	 adc	$x7, $x7
-	 adc	\$0, $A
-
-	 add	$tmp, $X[0]
-
-	 mov	(+8*4)($pA), %rax
-	 mul	%rax
-
-	 add	$X[6], $X[0]
-	 adc	%rax, $X[1]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $tmp
-
-	 mov	$X[0], (+$pDst_o+8*7)($pDst)
-	 mov	$X[1], (+$pDst_o+8*8)($pDst)
-
-	 mov	(+8*5)($pA), %rax
-	 mul	%rax
-
-	 add	$tmp, $X[2]
-	 adc	%rax, $X[3]
-	 adc	\$0, %rdx
-
-	 mov	%rdx, $tmp
-
-	 mov	$X[2], (+$pDst_o+8*9)($pDst)
-	 mov	$X[3], (+$pDst_o+8*10)($pDst)
-
-	 mov	(+8*6)($pA), %rax
-	 mul	%rax
-
-	 add	$tmp, $X[4]
-	 adc	%rax, $X[5]
-	 adc	\$0, %rdx
-
-	 mov	$X[4], (+$pDst_o+8*11)($pDst)
-	 mov	$X[5], (+$pDst_o+8*12)($pDst)
-
-	 add	%rdx, $x6
-	 adc	$X[7], $x7
-	 adc	\$0, $A
-
-	 mov	$x6, (+$pDst_o+8*13)($pDst)
-	 mov	$x7, (+$pDst_o+8*14)($pDst)
-	 mov	$A, (+$pDst_o+8*15)($pDst)
-___
-}
-
-#
-# sqr_reduce: subroutine to compute Result = reduce(Result * Result)
-#
-# input and result also in: r9, r8, r15, r14, r13, r12, r11, r10
-#
-$code.=<<___;
-.type	sqr_reduce,\@abi-omnipotent
-.align	16
-sqr_reduce:
-	 mov	(+$pResult_offset+8)(%rsp), %rcx
-___
-	&SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
-$code.=<<___;
-	# tail recursion optimization: jmp to mont_reduce and return from there
-	 jmp	mont_reduce
-	# call	mont_reduce
-	# ret
-.size	sqr_reduce,.-sqr_reduce
-___
-}}}
-
-#
-# MAIN FUNCTION
-#
-
-#mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
-#           UINT64 *g,   /* 512 bits, 8 qwords */
-#           UINT64 *exp, /* 512 bits, 8 qwords */
-#           struct mod_ctx_512 *data)
-
-# window size = 5
-# table size = 2^5 = 32
-#table_entries	equ	32
-#table_size	equ	table_entries * 8
-$code.=<<___;
-.globl	mod_exp_512
-.type	mod_exp_512,\@function,4
-mod_exp_512:
-	 push	%rbp
-	 push	%rbx
-	 push	%r12
-	 push	%r13
-	 push	%r14
-	 push	%r15
-
-	# adjust stack down and then align it with cache boundary
-	 mov	%rsp, %r8
-	 sub	\$$mem_size, %rsp
-	 and	\$-64, %rsp
-
-	# store previous stack pointer and arguments
-	 mov	%r8, (+$rsp_offset)(%rsp)
-	 mov	%rdi, (+$pResult_offset)(%rsp)
-	 mov	%rsi, (+$pG_offset)(%rsp)
-	 mov	%rcx, (+$pData_offset)(%rsp)
-.Lbody:
-	# transform g into montgomery space
-	# GT = reduce(g * C2) = reduce(g * (2^256))
-	# reduce expects to have the input in [tmp16]
-	 pxor	%xmm4, %xmm4
-	 movdqu	(+16*0)(%rsi), %xmm0
-	 movdqu	(+16*1)(%rsi), %xmm1
-	 movdqu	(+16*2)(%rsi), %xmm2
-	 movdqu	(+16*3)(%rsi), %xmm3
-	 movdqa	%xmm4, (+$tmp16_offset+16*0)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*1)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*6)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*7)(%rsp)
-	 movdqa	%xmm0, (+$tmp16_offset+16*2)(%rsp)
-	 movdqa	%xmm1, (+$tmp16_offset+16*3)(%rsp)
-	 movdqa	%xmm2, (+$tmp16_offset+16*4)(%rsp)
-	 movdqa	%xmm3, (+$tmp16_offset+16*5)(%rsp)
-
-	# load pExp before rdx gets blown away
-	 movdqu	(+16*0)(%rdx), %xmm0
-	 movdqu	(+16*1)(%rdx), %xmm1
-	 movdqu	(+16*2)(%rdx), %xmm2
-	 movdqu	(+16*3)(%rdx), %xmm3
-
-	 lea	(+$GT_offset)(%rsp), %rbx
-	 mov	%rbx, (+$red_result_addr_offset)(%rsp)
-	 call	mont_reduce
-
-	# Initialize tmp = C
-	 lea	(+$tmp_offset)(%rsp), %rcx
-	 xor	%rax, %rax
-	 mov	%rax, (+8*0)(%rcx)
-	 mov	%rax, (+8*1)(%rcx)
-	 mov	%rax, (+8*3)(%rcx)
-	 mov	%rax, (+8*4)(%rcx)
-	 mov	%rax, (+8*5)(%rcx)
-	 mov	%rax, (+8*6)(%rcx)
-	 mov	%rax, (+8*7)(%rcx)
-	 mov	%rax, (+$exp_offset+8*8)(%rsp)
-	 movq	\$1, (+8*2)(%rcx)
-
-	 lea	(+$garray_offset)(%rsp), %rbp
-	 mov	%rcx, %rsi			# pTmp
-	 mov	%rbp, %rdi			# Garray[][0]
-___
-
-	&swizzle("%rdi", "%rcx", "%rax", "%rbx");
-
-	# for (rax = 31; rax != 0; rax--) {
-	#     tmp = reduce(tmp * G)
-	#     swizzle(pg, tmp);
-	#     pg += 2; }
-$code.=<<___;
-	 mov	\$31, %rax
-	 mov	%rax, (+$i_offset)(%rsp)
-	 mov	%rbp, (+$pg_offset)(%rsp)
-	# rsi -> pTmp
-	 mov	%rsi, (+$red_result_addr_offset)(%rsp)
-	 mov	(+8*0)(%rsi), %r10
-	 mov	(+8*1)(%rsi), %r11
-	 mov	(+8*2)(%rsi), %r12
-	 mov	(+8*3)(%rsi), %r13
-	 mov	(+8*4)(%rsi), %r14
-	 mov	(+8*5)(%rsi), %r15
-	 mov	(+8*6)(%rsi), %r8
-	 mov	(+8*7)(%rsi), %r9
-init_loop:
-	 lea	(+$GT_offset)(%rsp), %rdi
-	 call	mont_mul_a3b
-	 lea	(+$tmp_offset)(%rsp), %rsi
-	 mov	(+$pg_offset)(%rsp), %rbp
-	 add	\$2, %rbp
-	 mov	%rbp, (+$pg_offset)(%rsp)
-	 mov	%rsi, %rcx			# rcx = rsi = addr of tmp
-___
-
-	&swizzle("%rbp", "%rcx", "%rax", "%rbx");
-$code.=<<___;
-	 mov	(+$i_offset)(%rsp), %rax
-	 sub	\$1, %rax
-	 mov	%rax, (+$i_offset)(%rsp)
-	 jne	init_loop
-
-	#
-	# Copy exponent onto stack
-	 movdqa	%xmm0, (+$exp_offset+16*0)(%rsp)
-	 movdqa	%xmm1, (+$exp_offset+16*1)(%rsp)
-	 movdqa	%xmm2, (+$exp_offset+16*2)(%rsp)
-	 movdqa	%xmm3, (+$exp_offset+16*3)(%rsp)
-
-
-	#
-	# Do exponentiation
-	# Initialize result to G[exp{511:507}]
-	 mov	(+$exp_offset+62)(%rsp), %eax
-	 mov	%rax, %rdx
-	 shr	\$11, %rax
-	 and	\$0x07FF, %edx
-	 mov	%edx, (+$exp_offset+62)(%rsp)
-	 lea	(+$garray_offset)(%rsp,%rax,2), %rsi
-	 mov	(+$pResult_offset)(%rsp), %rdx
-___
-
-	&unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
-
-	#
-	# Loop variables
-	# rcx = [loop_idx] = index: 510-5 to 0 by 5
-$code.=<<___;
-	 movq	\$505, (+$loop_idx_offset)(%rsp)
-
-	 mov	(+$pResult_offset)(%rsp), %rcx
-	 mov	%rcx, (+$red_result_addr_offset)(%rsp)
-	 mov	(+8*0)(%rcx), %r10
-	 mov	(+8*1)(%rcx), %r11
-	 mov	(+8*2)(%rcx), %r12
-	 mov	(+8*3)(%rcx), %r13
-	 mov	(+8*4)(%rcx), %r14
-	 mov	(+8*5)(%rcx), %r15
-	 mov	(+8*6)(%rcx), %r8
-	 mov	(+8*7)(%rcx), %r9
-	 jmp	sqr_2
-
-main_loop_a3b:
-	 call	sqr_reduce
-	 call	sqr_reduce
-	 call	sqr_reduce
-sqr_2:
-	 call	sqr_reduce
-	 call	sqr_reduce
-
-	#
-	# Do multiply, first look up proper value in Garray
-	 mov	(+$loop_idx_offset)(%rsp), %rcx			# bit index
-	 mov	%rcx, %rax
-	 shr	\$4, %rax			# rax is word pointer
-	 mov	(+$exp_offset)(%rsp,%rax,2), %edx
-	 and	\$15, %rcx
-	 shrq	%cl, %rdx
-	 and	\$0x1F, %rdx
-
-	 lea	(+$garray_offset)(%rsp,%rdx,2), %rsi
-	 lea	(+$tmp_offset)(%rsp), %rdx
-	 mov	%rdx, %rdi
-___
-
-	&unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
-	# rdi = tmp = pG
-
-	#
-	# Call mod_mul_a1(pDst,  pSrc1, pSrc2, pM, pData)
-	#                 result result pG     M   Data
-$code.=<<___;
-	 mov	(+$pResult_offset)(%rsp), %rsi
-	 call	mont_mul_a3b
-
-	#
-	# finish loop
-	 mov	(+$loop_idx_offset)(%rsp), %rcx
-	 sub	\$5, %rcx
-	 mov	%rcx, (+$loop_idx_offset)(%rsp)
-	 jge	main_loop_a3b
-
-	#
-
-end_main_loop_a3b:
-	# transform result out of Montgomery space
-	# result = reduce(result)
-	 mov	(+$pResult_offset)(%rsp), %rdx
-	 pxor	%xmm4, %xmm4
-	 movdqu	(+16*0)(%rdx), %xmm0
-	 movdqu	(+16*1)(%rdx), %xmm1
-	 movdqu	(+16*2)(%rdx), %xmm2
-	 movdqu	(+16*3)(%rdx), %xmm3
-	 movdqa	%xmm4, (+$tmp16_offset+16*4)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*5)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*6)(%rsp)
-	 movdqa	%xmm4, (+$tmp16_offset+16*7)(%rsp)
-	 movdqa	%xmm0, (+$tmp16_offset+16*0)(%rsp)
-	 movdqa	%xmm1, (+$tmp16_offset+16*1)(%rsp)
-	 movdqa	%xmm2, (+$tmp16_offset+16*2)(%rsp)
-	 movdqa	%xmm3, (+$tmp16_offset+16*3)(%rsp)
-	 call	mont_reduce
-
-	# If result > m, subract m
-	# load result into r15:r8
-	 mov	(+$pResult_offset)(%rsp), %rax
-	 mov	(+8*0)(%rax), %r8
-	 mov	(+8*1)(%rax), %r9
-	 mov	(+8*2)(%rax), %r10
-	 mov	(+8*3)(%rax), %r11
-	 mov	(+8*4)(%rax), %r12
-	 mov	(+8*5)(%rax), %r13
-	 mov	(+8*6)(%rax), %r14
-	 mov	(+8*7)(%rax), %r15
-
-	# subtract m
-	 mov	(+$pData_offset)(%rsp), %rbx
-	 add	\$$M, %rbx
-
-	 sub	(+8*0)(%rbx), %r8
-	 sbb	(+8*1)(%rbx), %r9
-	 sbb	(+8*2)(%rbx), %r10
-	 sbb	(+8*3)(%rbx), %r11
-	 sbb	(+8*4)(%rbx), %r12
-	 sbb	(+8*5)(%rbx), %r13
-	 sbb	(+8*6)(%rbx), %r14
-	 sbb	(+8*7)(%rbx), %r15
-
-	# if Carry is clear, replace result with difference
-	 mov	(+8*0)(%rax), %rsi
-	 mov	(+8*1)(%rax), %rdi
-	 mov	(+8*2)(%rax), %rcx
-	 mov	(+8*3)(%rax), %rdx
-	 cmovnc	%r8, %rsi
-	 cmovnc	%r9, %rdi
-	 cmovnc	%r10, %rcx
-	 cmovnc	%r11, %rdx
-	 mov	%rsi, (+8*0)(%rax)
-	 mov	%rdi, (+8*1)(%rax)
-	 mov	%rcx, (+8*2)(%rax)
-	 mov	%rdx, (+8*3)(%rax)
-
-	 mov	(+8*4)(%rax), %rsi
-	 mov	(+8*5)(%rax), %rdi
-	 mov	(+8*6)(%rax), %rcx
-	 mov	(+8*7)(%rax), %rdx
-	 cmovnc	%r12, %rsi
-	 cmovnc	%r13, %rdi
-	 cmovnc	%r14, %rcx
-	 cmovnc	%r15, %rdx
-	 mov	%rsi, (+8*4)(%rax)
-	 mov	%rdi, (+8*5)(%rax)
-	 mov	%rcx, (+8*6)(%rax)
-	 mov	%rdx, (+8*7)(%rax)
-
-	 mov	(+$rsp_offset)(%rsp), %rsi
-	 mov	0(%rsi),%r15
-	 mov	8(%rsi),%r14
-	 mov	16(%rsi),%r13
-	 mov	24(%rsi),%r12
-	 mov	32(%rsi),%rbx
-	 mov	40(%rsi),%rbp
-	 lea	48(%rsi),%rsp
-.Lepilogue:
-	 ret
-.size mod_exp_512, . - mod_exp_512
-___
-
-if ($win64) {
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-my $rec="%rcx";
-my $frame="%rdx";
-my $context="%r8";
-my $disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	mod_exp_512_se_handler,\@abi-omnipotent
-.align	16
-mod_exp_512_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lbody(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	lea	.Lepilogue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_prologue
-
-	mov	$rsp_offset(%rax),%rax	# pull saved Rsp
-
-	mov	32(%rax),%rbx
-	mov	40(%rax),%rbp
-	mov	24(%rax),%r12
-	mov	16(%rax),%r13
-	mov	8(%rax),%r14
-	mov	0(%rax),%r15
-	lea	48(%rax),%rax
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	mod_exp_512_se_handler,.-mod_exp_512_se_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_mod_exp_512
-	.rva	.LSEH_end_mod_exp_512
-	.rva	.LSEH_info_mod_exp_512
-
-.section	.xdata
-.align	8
-.LSEH_info_mod_exp_512:
-	.byte	9,0,0,0
-	.rva	mod_exp_512_se_handler
-___
-}
-
-sub reg_part {
-my ($reg,$conv)=@_;
-    if ($reg =~ /%r[0-9]+/)	{ $reg .= $conv; }
-    elsif ($conv eq "b")	{ $reg =~ s/%[er]([^x]+)x?/%$1l/;	}
-    elsif ($conv eq "w")	{ $reg =~ s/%[er](.+)/%$1/;		}
-    elsif ($conv eq "d")	{ $reg =~ s/%[er](.+)/%e$1/;		}
-    return $reg;
-}
-
-$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-$code =~ s/(\(\+[^)]+\))/eval $1/gem;
-print $code;
-close STDOUT;

+ 0 - 1618
drivers/builtin_openssl2/crypto/bn/asm/pa-risc2.s

@@ -1,1618 +0,0 @@
-;
-; PA-RISC 2.0 implementation of bn_asm code, based on the
-; 64-bit version of the code.  This code is effectively the
-; same as the 64-bit version except the register model is
-; slightly different given all values must be 32-bit between
-; function calls.  Thus the 64-bit return values are returned
-; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
-;
-;
-; This code is approximately 2x faster than the C version
-; for RSA/DSA.
-;
-; See http://devresource.hp.com/  for more details on the PA-RISC
-; architecture.  Also see the book "PA-RISC 2.0 Architecture"
-; by Gerry Kane for information on the instruction set architecture.
-;
-; Code written by Chris Ruemmler (with some help from the HP C
-; compiler).
-;
-; The code compiles with HP's assembler
-;
-
-	.level	2.0N
-	.space	$TEXT$
-	.subspa	$CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
-
-;
-; Global Register definitions used for the routines.
-;
-; Some information about HP's runtime architecture for 32-bits.
-;
-; "Caller save" means the calling function must save the register
-; if it wants the register to be preserved.
-; "Callee save" means if a function uses the register, it must save
-; the value before using it.
-;
-; For the floating point registers 
-;
-;    "caller save" registers: fr4-fr11, fr22-fr31
-;    "callee save" registers: fr12-fr21
-;    "special" registers: fr0-fr3 (status and exception registers)
-;
-; For the integer registers
-;     value zero             :  r0
-;     "caller save" registers: r1,r19-r26
-;     "callee save" registers: r3-r18
-;     return register        :  r2  (rp)
-;     return values          ; r28,r29  (ret0,ret1)
-;     Stack pointer          ; r30  (sp) 
-;     millicode return ptr   ; r31  (also a caller save register)
-
-
-;
-; Arguments to the routines
-;
-r_ptr       .reg %r26
-a_ptr       .reg %r25
-b_ptr       .reg %r24
-num         .reg %r24
-n           .reg %r23
-
-;
-; Note that the "w" argument for bn_mul_add_words and bn_mul_words
-; is passed on the stack at a delta of -56 from the top of stack
-; as the routine is entered.
-;
-
-;
-; Globals used in some routines
-;
-
-top_overflow .reg %r23
-high_mask    .reg %r22    ; value 0xffffffff80000000L
-
-
-;------------------------------------------------------------------------------
-;
-; bn_mul_add_words
-;
-;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr, 
-;								int num, BN_ULONG w)
-;
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg3 = num
-; -56(sp) =  w
-;
-; Local register definitions
-;
-
-fm1          .reg %fr22
-fm           .reg %fr23
-ht_temp      .reg %fr24
-ht_temp_1    .reg %fr25
-lt_temp      .reg %fr26
-lt_temp_1    .reg %fr27
-fm1_1        .reg %fr28
-fm_1         .reg %fr29
-
-fw_h         .reg %fr7L
-fw_l         .reg %fr7R
-fw           .reg %fr7
-
-fht_0        .reg %fr8L
-flt_0        .reg %fr8R
-t_float_0    .reg %fr8
-
-fht_1        .reg %fr9L
-flt_1        .reg %fr9R
-t_float_1    .reg %fr9
-
-tmp_0        .reg %r31
-tmp_1        .reg %r21
-m_0          .reg %r20 
-m_1          .reg %r19 
-ht_0         .reg %r1  
-ht_1         .reg %r3
-lt_0         .reg %r4
-lt_1         .reg %r5
-m1_0         .reg %r6 
-m1_1         .reg %r7 
-rp_val       .reg %r8
-rp_val_1     .reg %r9
-
-bn_mul_add_words
-	.export	bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
-	.proc
-	.callinfo frame=128
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-	NOP                         ; Needed to make the loop 16-byte aligned
-	NOP                         ; needed to make the loop 16-byte aligned
-
-    STD     %r5,16(%sp)         ; save r5  
-	NOP
-    STD     %r6,24(%sp)         ; save r6  
-    STD     %r7,32(%sp)         ; save r7  
-
-    STD     %r8,40(%sp)         ; save r8  
-    STD     %r9,48(%sp)         ; save r9  
-    COPY    %r0,%ret1           ; return 0 by default
-    DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32    
-
-    CMPIB,>= 0,num,bn_mul_add_words_exit  ; if (num <= 0) then exit
-	LDO     128(%sp),%sp        ; bump stack
-
-	;
-	; The loop is unrolled twice, so if there is only 1 number
-    ; then go straight to the cleanup code.
-	;
-	CMPIB,= 1,num,bn_mul_add_words_single_top
-	FLDD    -184(%sp),fw        ; (-56-128) load up w into fw (fw_h/fw_l)
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-	; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
-    ; two 32-bit mutiplies can be issued per cycle.
-    ; 
-bn_mul_add_words_unroll2
-
-    FLDD    0(a_ptr),t_float_0       ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    FLDD    8(a_ptr),t_float_1       ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    LDD     0(r_ptr),rp_val          ; rp[0]
-    LDD     8(r_ptr),rp_val_1        ; rp[1]
-
-    XMPYU   fht_0,fw_l,fm1           ; m1[0] = fht_0*fw_l
-    XMPYU   fht_1,fw_l,fm1_1         ; m1[1] = fht_1*fw_l
-    FSTD    fm1,-16(%sp)             ; -16(sp) = m1[0]
-    FSTD    fm1_1,-48(%sp)           ; -48(sp) = m1[1]
-
-    XMPYU   flt_0,fw_h,fm            ; m[0] = flt_0*fw_h
-    XMPYU   flt_1,fw_h,fm_1          ; m[1] = flt_1*fw_h
-    FSTD    fm,-8(%sp)               ; -8(sp) = m[0]
-    FSTD    fm_1,-40(%sp)            ; -40(sp) = m[1]
-
-    XMPYU   fht_0,fw_h,ht_temp       ; ht_temp   = fht_0*fw_h
-    XMPYU   fht_1,fw_h,ht_temp_1     ; ht_temp_1 = fht_1*fw_h
-    FSTD    ht_temp,-24(%sp)         ; -24(sp)   = ht_temp
-    FSTD    ht_temp_1,-56(%sp)       ; -56(sp)   = ht_temp_1
-
-    XMPYU   flt_0,fw_l,lt_temp       ; lt_temp = lt*fw_l
-    XMPYU   flt_1,fw_l,lt_temp_1     ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)         ; -32(sp) = lt_temp 
-    FSTD    lt_temp_1,-64(%sp)       ; -64(sp) = lt_temp_1 
-
-    LDD     -8(%sp),m_0              ; m[0] 
-    LDD     -40(%sp),m_1             ; m[1]
-    LDD     -16(%sp),m1_0            ; m1[0]
-    LDD     -48(%sp),m1_1            ; m1[1]
-
-    LDD     -24(%sp),ht_0            ; ht[0]
-    LDD     -56(%sp),ht_1            ; ht[1]
-    ADD,L   m1_0,m_0,tmp_0           ; tmp_0 = m[0] + m1[0]; 
-    ADD,L   m1_1,m_1,tmp_1           ; tmp_1 = m[1] + m1[1]; 
-
-    LDD     -32(%sp),lt_0            
-    LDD     -64(%sp),lt_1            
-    CMPCLR,*>>= tmp_0,m1_0, %r0      ; if (m[0] < m1[0])
-    ADD,L   ht_0,top_overflow,ht_0   ; ht[0] += (1<<32)
-
-    CMPCLR,*>>= tmp_1,m1_1,%r0       ; if (m[1] < m1[1])
-    ADD,L   ht_1,top_overflow,ht_1   ; ht[1] += (1<<32)
-    EXTRD,U tmp_0,31,32,m_0          ; m[0]>>32  
-    DEPD,Z  tmp_0,31,32,m1_0         ; m1[0] = m[0]<<32 
-
-    EXTRD,U tmp_1,31,32,m_1          ; m[1]>>32  
-    DEPD,Z  tmp_1,31,32,m1_1         ; m1[1] = m[1]<<32 
-    ADD,L   ht_0,m_0,ht_0            ; ht[0]+= (m[0]>>32)
-    ADD,L   ht_1,m_1,ht_1            ; ht[1]+= (m[1]>>32)
-
-    ADD     lt_0,m1_0,lt_0           ; lt[0] = lt[0]+m1[0];
-	ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-    ADD     lt_1,m1_1,lt_1           ; lt[1] = lt[1]+m1[1];
-    ADD,DC  ht_1,%r0,ht_1            ; ht[1]++
-
-    ADD    %ret1,lt_0,lt_0           ; lt[0] = lt[0] + c;
-	ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-    ADD     lt_0,rp_val,lt_0         ; lt[0] = lt[0]+rp[0]
-    ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-
-	LDO    -2(num),num               ; num = num - 2;
-    ADD     ht_0,lt_1,lt_1           ; lt[1] = lt[1] + ht_0 (c);
-    ADD,DC  ht_1,%r0,ht_1            ; ht[1]++
-    STD     lt_0,0(r_ptr)            ; rp[0] = lt[0]
-
-    ADD     lt_1,rp_val_1,lt_1       ; lt[1] = lt[1]+rp[1]
-    ADD,DC  ht_1,%r0,%ret1           ; ht[1]++
-    LDO     16(a_ptr),a_ptr          ; a_ptr += 2
-
-    STD     lt_1,8(r_ptr)            ; rp[1] = lt[1]
-	CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
-    LDO     16(r_ptr),r_ptr          ; r_ptr += 2
-
-    CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_mul_add_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    LDD     0(r_ptr),rp_val           ; rp[0]
-    LDO     8(a_ptr),a_ptr            ; a_ptr++
-    XMPYU   fht_0,fw_l,fm1            ; m1 = ht*fw_l
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = ht*fw_h
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-
-    LDD     -8(%sp),m_0               
-    LDD    -16(%sp),m1_0              ; m1 = temp1 
-    ADD,L   m_0,m1_0,tmp_0            ; tmp_0 = m + m1; 
-    LDD     -24(%sp),ht_0             
-    LDD     -32(%sp),lt_0             
-
-    CMPCLR,*>>= tmp_0,m1_0,%r0        ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,tmp_0           ; tmp_0 = lt+m1;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-    ADD     %ret1,tmp_0,lt_0          ; lt = lt + c;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-    ADD     lt_0,rp_val,lt_0          ; lt = lt+rp[0]
-    ADD,DC  ht_0,%r0,%ret1            ; ht++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-
-bn_mul_add_words_exit
-    .EXIT
-	
-    EXTRD,U %ret1,31,32,%ret0         ; for 32-bit, return in ret0/ret1
-    LDD     -80(%sp),%r9              ; restore r9  
-    LDD     -88(%sp),%r8              ; restore r8  
-    LDD     -96(%sp),%r7              ; restore r7  
-    LDD     -104(%sp),%r6             ; restore r6  
-    LDD     -112(%sp),%r5             ; restore r5  
-    LDD     -120(%sp),%r4             ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3             ; restore r3
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-;
-; arg0 = rp
-; arg1 = ap
-; arg3 = num
-; w on stack at -56(sp)
-
-bn_mul_words
-	.proc
-	.callinfo frame=128
-    .entry
-	.EXPORT	bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-	NOP
-    STD     %r5,16(%sp)         ; save r5  
-
-    STD     %r6,24(%sp)         ; save r6  
-    STD     %r7,32(%sp)         ; save r7  
-    COPY    %r0,%ret1           ; return 0 by default
-    DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32    
-
-    CMPIB,>= 0,num,bn_mul_words_exit
-	LDO     128(%sp),%sp    ; bump stack
-
-	;
-	; See if only 1 word to do, thus just do cleanup
-	;
-	CMPIB,= 1,num,bn_mul_words_single_top
-	FLDD    -184(%sp),fw        ; (-56-128) load up w into fw (fw_h/fw_l)
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-	; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
-    ; two 32-bit mutiplies can be issued per cycle.
-    ; 
-bn_mul_words_unroll2
-
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    FLDD    8(a_ptr),t_float_1        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    XMPYU   fht_0,fw_l,fm1            ; m1[0] = fht_0*fw_l
-    XMPYU   fht_1,fw_l,fm1_1          ; m1[1] = ht*fw_l
-
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    FSTD    fm1_1,-48(%sp)            ; -48(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    XMPYU   flt_1,fw_h,fm_1           ; m = lt*fw_h
-
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    FSTD    fm_1,-40(%sp)             ; -40(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = fht_0*fw_h
-    XMPYU   fht_1,fw_h,ht_temp_1      ; ht_temp = ht*fw_h
-
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    FSTD    ht_temp_1,-56(%sp)        ; -56(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    XMPYU   flt_1,fw_l,lt_temp_1      ; lt_temp = lt*fw_l
-
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-    FSTD    lt_temp_1,-64(%sp)        ; -64(sp) = lt 
-    LDD     -8(%sp),m_0               
-    LDD     -40(%sp),m_1              
-
-    LDD    -16(%sp),m1_0              
-    LDD    -48(%sp),m1_1              
-    LDD     -24(%sp),ht_0             
-    LDD     -56(%sp),ht_1             
-
-    ADD,L   m1_0,m_0,tmp_0            ; tmp_0 = m + m1; 
-    ADD,L   m1_1,m_1,tmp_1            ; tmp_1 = m + m1; 
-    LDD     -32(%sp),lt_0             
-    LDD     -64(%sp),lt_1             
-
-    CMPCLR,*>>= tmp_0,m1_0, %r0       ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-    CMPCLR,*>>= tmp_1,m1_1,%r0        ; if (m < m1)
-    ADD,L   ht_1,top_overflow,ht_1    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-    EXTRD,U tmp_1,31,32,m_1           ; m>>32  
-    DEPD,Z  tmp_1,31,32,m1_1          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD,L   ht_1,m_1,ht_1             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,lt_0            ; lt = lt+m1;
-	ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     lt_1,m1_1,lt_1            ; lt = lt+m1;
-    ADD,DC  ht_1,%r0,ht_1             ; ht++
-    ADD    %ret1,lt_0,lt_0            ; lt = lt + c (ret1);
-	ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     ht_0,lt_1,lt_1            ; lt = lt + c (ht_0)
-    ADD,DC  ht_1,%r0,ht_1             ; ht++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-    STD     lt_1,8(r_ptr)             ; rp[1] = lt
-
-	COPY    ht_1,%ret1                ; carry = ht
-	LDO    -2(num),num                ; num = num - 2;
-    LDO     16(a_ptr),a_ptr           ; ap += 2
-	CMPIB,<= 2,num,bn_mul_words_unroll2
-    LDO     16(r_ptr),r_ptr           ; rp++
-
-    CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_mul_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-
-    XMPYU   fht_0,fw_l,fm1            ; m1 = ht*fw_l
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = ht*fw_h
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-
-    LDD     -8(%sp),m_0               
-    LDD    -16(%sp),m1_0              
-    ADD,L   m_0,m1_0,tmp_0            ; tmp_0 = m + m1; 
-    LDD     -24(%sp),ht_0             
-    LDD     -32(%sp),lt_0             
-
-    CMPCLR,*>>= tmp_0,m1_0,%r0        ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,lt_0            ; lt= lt+m1;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     %ret1,lt_0,lt_0           ; lt = lt + c;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    COPY    ht_0,%ret1                ; copy carry
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-
-bn_mul_words_exit
-    .EXIT
-    EXTRD,U %ret1,31,32,%ret0           ; for 32-bit, return in ret0/ret1
-    LDD     -96(%sp),%r7              ; restore r7  
-    LDD     -104(%sp),%r6             ; restore r6  
-    LDD     -112(%sp),%r5             ; restore r5  
-    LDD     -120(%sp),%r4             ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3             ; restore r3
-	.PROCEND	
-
-;----------------------------------------------------------------------------
-;
-;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
-;
-; arg0 = rp
-; arg1 = ap
-; arg2 = num
-;
-
-bn_sqr_words
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-	NOP
-    STD     %r5,16(%sp)         ; save r5  
-
-    CMPIB,>= 0,num,bn_sqr_words_exit
-	LDO     128(%sp),%sp       ; bump stack
-
-	;
-	; If only 1, the goto straight to cleanup
-	;
-	CMPIB,= 1,num,bn_sqr_words_single_top
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-
-bn_sqr_words_unroll2
-    FLDD    0(a_ptr),t_float_0        ; a[0]
-    FLDD    8(a_ptr),t_float_1        ; a[1]
-    XMPYU   fht_0,flt_0,fm            ; m[0]
-    XMPYU   fht_1,flt_1,fm_1          ; m[1]
-
-    FSTD    fm,-24(%sp)               ; store m[0]
-    FSTD    fm_1,-56(%sp)             ; store m[1]
-    XMPYU   flt_0,flt_0,lt_temp       ; lt[0]
-    XMPYU   flt_1,flt_1,lt_temp_1     ; lt[1]
-
-    FSTD    lt_temp,-16(%sp)          ; store lt[0]
-    FSTD    lt_temp_1,-48(%sp)        ; store lt[1]
-    XMPYU   fht_0,fht_0,ht_temp       ; ht[0]
-    XMPYU   fht_1,fht_1,ht_temp_1     ; ht[1]
-
-    FSTD    ht_temp,-8(%sp)           ; store ht[0]
-    FSTD    ht_temp_1,-40(%sp)        ; store ht[1]
-    LDD     -24(%sp),m_0             
-    LDD     -56(%sp),m_1              
-
-    AND     m_0,high_mask,tmp_0       ; m[0] & Mask
-    AND     m_1,high_mask,tmp_1       ; m[1] & Mask
-    DEPD,Z  m_0,30,31,m_0             ; m[0] << 32+1
-    DEPD,Z  m_1,30,31,m_1             ; m[1] << 32+1
-
-    LDD     -16(%sp),lt_0        
-    LDD     -48(%sp),lt_1        
-    EXTRD,U tmp_0,32,33,tmp_0         ; tmp_0 = m[0]&Mask >> 32-1
-    EXTRD,U tmp_1,32,33,tmp_1         ; tmp_1 = m[1]&Mask >> 32-1
-
-    LDD     -8(%sp),ht_0            
-    LDD     -40(%sp),ht_1           
-    ADD,L   ht_0,tmp_0,ht_0           ; ht[0] += tmp_0
-    ADD,L   ht_1,tmp_1,ht_1           ; ht[1] += tmp_1
-
-    ADD     lt_0,m_0,lt_0             ; lt = lt+m
-    ADD,DC  ht_0,%r0,ht_0             ; ht[0]++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt[0]
-    STD     ht_0,8(r_ptr)             ; rp[1] = ht[1]
-
-    ADD     lt_1,m_1,lt_1             ; lt = lt+m
-    ADD,DC  ht_1,%r0,ht_1             ; ht[1]++
-    STD     lt_1,16(r_ptr)            ; rp[2] = lt[1]
-    STD     ht_1,24(r_ptr)            ; rp[3] = ht[1]
-
-	LDO    -2(num),num                ; num = num - 2;
-    LDO     16(a_ptr),a_ptr           ; ap += 2
-	CMPIB,<= 2,num,bn_sqr_words_unroll2
-    LDO     32(r_ptr),r_ptr           ; rp += 4
-
-    CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_sqr_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-
-    XMPYU   fht_0,flt_0,fm            ; m
-    FSTD    fm,-24(%sp)               ; store m
-
-    XMPYU   flt_0,flt_0,lt_temp       ; lt
-    FSTD    lt_temp,-16(%sp)          ; store lt
-
-    XMPYU   fht_0,fht_0,ht_temp       ; ht
-    FSTD    ht_temp,-8(%sp)           ; store ht
-
-    LDD     -24(%sp),m_0              ; load m
-    AND     m_0,high_mask,tmp_0       ; m & Mask
-    DEPD,Z  m_0,30,31,m_0             ; m << 32+1
-    LDD     -16(%sp),lt_0             ; lt
-
-    LDD     -8(%sp),ht_0              ; ht
-    EXTRD,U tmp_0,32,33,tmp_0         ; tmp_0 = m&Mask >> 32-1
-    ADD     m_0,lt_0,lt_0             ; lt = lt+m
-    ADD,L   ht_0,tmp_0,ht_0           ; ht += tmp_0
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-    STD     ht_0,8(r_ptr)             ; rp[1] = ht
-
-bn_sqr_words_exit
-    .EXIT
-    LDD     -112(%sp),%r5       ; restore r5  
-    LDD     -120(%sp),%r4       ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3 
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-;
-; arg0 = rp 
-; arg1 = ap
-; arg2 = bp 
-; arg3 = n
-
-t  .reg %r22
-b  .reg %r21
-l  .reg %r20
-
-bn_add_words
-	.proc
-    .entry
-	.callinfo
-	.EXPORT	bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-	.align 64
-
-    CMPIB,>= 0,n,bn_add_words_exit
-    COPY    %r0,%ret1           ; return 0 by default
-
-	;
-	; If 2 or more numbers do the loop
-	;
-	CMPIB,= 1,n,bn_add_words_single_top
-	NOP
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-bn_add_words_unroll2
-	LDD     0(a_ptr),t
-	LDD     0(b_ptr),b
-	ADD     t,%ret1,t                    ; t = t+c;
-	ADD,DC  %r0,%r0,%ret1                ; set c to carry
-	ADD     t,b,l                        ; l = t + b[0]
-	ADD,DC  %ret1,%r0,%ret1              ; c+= carry
-	STD     l,0(r_ptr)
-
-	LDD     8(a_ptr),t
-	LDD     8(b_ptr),b
-	ADD     t,%ret1,t                     ; t = t+c;
-	ADD,DC  %r0,%r0,%ret1                 ; set c to carry
-	ADD     t,b,l                         ; l = t + b[0]
-	ADD,DC  %ret1,%r0,%ret1               ; c+= carry
-	STD     l,8(r_ptr)
-
-	LDO     -2(n),n
-	LDO     16(a_ptr),a_ptr
-	LDO     16(b_ptr),b_ptr
-
-	CMPIB,<= 2,n,bn_add_words_unroll2
-	LDO     16(r_ptr),r_ptr
-
-    CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
-
-bn_add_words_single_top
-	LDD     0(a_ptr),t
-	LDD     0(b_ptr),b
-
-	ADD     t,%ret1,t                 ; t = t+c;
-	ADD,DC  %r0,%r0,%ret1             ; set c to carry (could use CMPCLR??)
-	ADD     t,b,l                     ; l = t + b[0]
-	ADD,DC  %ret1,%r0,%ret1           ; c+= carry
-	STD     l,0(r_ptr)
-
-bn_add_words_exit
-    .EXIT
-    BVE     (%rp)
-    EXTRD,U %ret1,31,32,%ret0           ; for 32-bit, return in ret0/ret1
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-;
-; arg0 = rp 
-; arg1 = ap
-; arg2 = bp 
-; arg3 = n
-
-t1       .reg %r22
-t2       .reg %r21
-sub_tmp1 .reg %r20
-sub_tmp2 .reg %r19
-
-
-bn_sub_words
-	.proc
-	.callinfo 
-	.EXPORT	bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    CMPIB,>=  0,n,bn_sub_words_exit
-    COPY    %r0,%ret1           ; return 0 by default
-
-	;
-	; If 2 or more numbers do the loop
-	;
-	CMPIB,= 1,n,bn_sub_words_single_top
-	NOP
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-bn_sub_words_unroll2
-	LDD     0(a_ptr),t1
-	LDD     0(b_ptr),t2
-	SUB     t1,t2,sub_tmp1           ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret1,sub_tmp1  ; t3 = t3- c; 
-
-	CMPCLR,*>> t1,t2,sub_tmp2        ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret1
-	STD     sub_tmp1,0(r_ptr)
-
-	LDD     8(a_ptr),t1
-	LDD     8(b_ptr),t2
-	SUB     t1,t2,sub_tmp1            ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret1,sub_tmp1   ; t3 = t3- c; 
-	CMPCLR,*>> t1,t2,sub_tmp2         ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret1
-	STD     sub_tmp1,8(r_ptr)
-
-	LDO     -2(n),n
-	LDO     16(a_ptr),a_ptr
-	LDO     16(b_ptr),b_ptr
-
-	CMPIB,<= 2,n,bn_sub_words_unroll2
-	LDO     16(r_ptr),r_ptr
-
-    CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
-
-bn_sub_words_single_top
-	LDD     0(a_ptr),t1
-	LDD     0(b_ptr),t2
-	SUB     t1,t2,sub_tmp1            ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret1,sub_tmp1   ; t3 = t3- c; 
-	CMPCLR,*>> t1,t2,sub_tmp2         ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret1
-
-	STD     sub_tmp1,0(r_ptr)
-
-bn_sub_words_exit
-    .EXIT
-    BVE     (%rp)
-    EXTRD,U %ret1,31,32,%ret0           ; for 32-bit, return in ret0/ret1
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;------------------------------------------------------------------------------
-;
-; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
-;
-; arg0 = h
-; arg1 = l
-; arg2 = d
-;
-; This is mainly just output from the HP C compiler.  
-;
-;------------------------------------------------------------------------------
-bn_div_words
-	.PROC
-	.EXPORT	bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
-	.IMPORT	BN_num_bits_word,CODE
-	;--- not PIC	.IMPORT	__iob,DATA
-	;--- not PIC	.IMPORT	fprintf,CODE
-	.IMPORT	abort,CODE
-	.IMPORT	$$div2U,MILLICODE
-	.CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
-        .ENTRY
-        STW     %r2,-20(%r30)   ;offset 0x8ec
-        STW,MA  %r3,192(%r30)   ;offset 0x8f0
-        STW     %r4,-188(%r30)  ;offset 0x8f4
-        DEPD    %r5,31,32,%r6   ;offset 0x8f8
-        STD     %r6,-184(%r30)  ;offset 0x8fc
-        DEPD    %r7,31,32,%r8   ;offset 0x900
-        STD     %r8,-176(%r30)  ;offset 0x904
-        STW     %r9,-168(%r30)  ;offset 0x908
-        LDD     -248(%r30),%r3  ;offset 0x90c
-        COPY    %r26,%r4        ;offset 0x910
-        COPY    %r24,%r5        ;offset 0x914
-        DEPD    %r25,31,32,%r4  ;offset 0x918
-        CMPB,*<>        %r3,%r0,$0006000C       ;offset 0x91c
-        DEPD    %r23,31,32,%r5  ;offset 0x920
-        MOVIB,TR        -1,%r29,$00060002       ;offset 0x924
-        EXTRD,U %r29,31,32,%r28 ;offset 0x928
-$0006002A
-        LDO     -1(%r29),%r29   ;offset 0x92c
-        SUB     %r23,%r7,%r23   ;offset 0x930
-$00060024
-        SUB     %r4,%r31,%r25   ;offset 0x934
-        AND     %r25,%r19,%r26  ;offset 0x938
-        CMPB,*<>,N      %r0,%r26,$00060046      ;offset 0x93c
-        DEPD,Z  %r25,31,32,%r20 ;offset 0x940
-        OR      %r20,%r24,%r21  ;offset 0x944
-        CMPB,*<<,N      %r21,%r23,$0006002A     ;offset 0x948
-        SUB     %r31,%r2,%r31   ;offset 0x94c
-$00060046
-$0006002E
-        DEPD,Z  %r23,31,32,%r25 ;offset 0x950
-        EXTRD,U %r23,31,32,%r26 ;offset 0x954
-        AND     %r25,%r19,%r24  ;offset 0x958
-        ADD,L   %r31,%r26,%r31  ;offset 0x95c
-        CMPCLR,*>>=     %r5,%r24,%r0    ;offset 0x960
-        LDO     1(%r31),%r31    ;offset 0x964
-$00060032
-        CMPB,*<<=,N     %r31,%r4,$00060036      ;offset 0x968
-        LDO     -1(%r29),%r29   ;offset 0x96c
-        ADD,L   %r4,%r3,%r4     ;offset 0x970
-$00060036
-        ADDIB,=,N       -1,%r8,$D0      ;offset 0x974
-        SUB     %r5,%r24,%r28   ;offset 0x978
-$0006003A
-        SUB     %r4,%r31,%r24   ;offset 0x97c
-        SHRPD   %r24,%r28,32,%r4        ;offset 0x980
-        DEPD,Z  %r29,31,32,%r9  ;offset 0x984
-        DEPD,Z  %r28,31,32,%r5  ;offset 0x988
-$0006001C
-        EXTRD,U %r4,31,32,%r31  ;offset 0x98c
-        CMPB,*<>,N      %r31,%r2,$00060020      ;offset 0x990
-        MOVB,TR %r6,%r29,$D1    ;offset 0x994
-        STD     %r29,-152(%r30) ;offset 0x998
-$0006000C
-        EXTRD,U %r3,31,32,%r25  ;offset 0x99c
-        COPY    %r3,%r26        ;offset 0x9a0
-        EXTRD,U %r3,31,32,%r9   ;offset 0x9a4
-        EXTRD,U %r4,31,32,%r8   ;offset 0x9a8
-        .CALL   ARGW0=GR,ARGW1=GR,RTNVAL=GR     ;in=25,26;out=28;
-        B,L     BN_num_bits_word,%r2    ;offset 0x9ac
-        EXTRD,U %r5,31,32,%r7   ;offset 0x9b0
-        LDI     64,%r20 ;offset 0x9b4
-        DEPD    %r7,31,32,%r5   ;offset 0x9b8
-        DEPD    %r8,31,32,%r4   ;offset 0x9bc
-        DEPD    %r9,31,32,%r3   ;offset 0x9c0
-        CMPB,=  %r28,%r20,$00060012     ;offset 0x9c4
-        COPY    %r28,%r24       ;offset 0x9c8
-        MTSARCM %r24    ;offset 0x9cc
-        DEPDI,Z -1,%sar,1,%r19  ;offset 0x9d0
-        CMPB,*>>,N      %r4,%r19,$D2    ;offset 0x9d4
-$00060012
-        SUBI    64,%r24,%r31    ;offset 0x9d8
-        CMPCLR,*<<      %r4,%r3,%r0     ;offset 0x9dc
-        SUB     %r4,%r3,%r4     ;offset 0x9e0
-$00060016
-        CMPB,=  %r31,%r0,$0006001A      ;offset 0x9e4
-        COPY    %r0,%r9 ;offset 0x9e8
-        MTSARCM %r31    ;offset 0x9ec
-        DEPD,Z  %r3,%sar,64,%r3 ;offset 0x9f0
-        SUBI    64,%r31,%r26    ;offset 0x9f4
-        MTSAR   %r26    ;offset 0x9f8
-        SHRPD   %r4,%r5,%sar,%r4        ;offset 0x9fc
-        MTSARCM %r31    ;offset 0xa00
-        DEPD,Z  %r5,%sar,64,%r5 ;offset 0xa04
-$0006001A
-        DEPDI,Z -1,31,32,%r19   ;offset 0xa08
-        AND     %r3,%r19,%r29   ;offset 0xa0c
-        EXTRD,U %r29,31,32,%r2  ;offset 0xa10
-        DEPDI,Z -1,63,32,%r6    ;offset 0xa14
-        MOVIB,TR        2,%r8,$0006001C ;offset 0xa18
-        EXTRD,U %r3,63,32,%r7   ;offset 0xa1c
-$D2
-        ;--- not PIC	ADDIL   LR'__iob-$global$,%r27,%r1      ;offset 0xa20
-        ;--- not PIC	LDIL    LR'C$7,%r21     ;offset 0xa24
-        ;--- not PIC	LDO     RR'__iob-$global$+32(%r1),%r26  ;offset 0xa28
-        ;--- not PIC	.CALL   ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR    ;in=24,25,26;out=28;
-        ;--- not PIC	B,L     fprintf,%r2     ;offset 0xa2c
-        ;--- not PIC	LDO     RR'C$7(%r21),%r25       ;offset 0xa30
-        .CALL           ;
-        B,L     abort,%r2       ;offset 0xa34
-        NOP             ;offset 0xa38
-        B       $D3     ;offset 0xa3c
-        LDW     -212(%r30),%r2  ;offset 0xa40
-$00060020
-        COPY    %r4,%r26        ;offset 0xa44
-        EXTRD,U %r4,31,32,%r25  ;offset 0xa48
-        COPY    %r2,%r24        ;offset 0xa4c
-        .CALL   ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
-        B,L     $$div2U,%r31    ;offset 0xa50
-        EXTRD,U %r2,31,32,%r23  ;offset 0xa54
-        DEPD    %r28,31,32,%r29 ;offset 0xa58
-$00060022
-        STD     %r29,-152(%r30) ;offset 0xa5c
-$D1
-        AND     %r5,%r19,%r24   ;offset 0xa60
-        EXTRD,U %r24,31,32,%r24 ;offset 0xa64
-        STW     %r2,-160(%r30)  ;offset 0xa68
-        STW     %r7,-128(%r30)  ;offset 0xa6c
-        FLDD    -152(%r30),%fr4 ;offset 0xa70
-        FLDD    -152(%r30),%fr7 ;offset 0xa74
-        FLDW    -160(%r30),%fr8L        ;offset 0xa78
-        FLDW    -128(%r30),%fr5L        ;offset 0xa7c
-        XMPYU   %fr8L,%fr7L,%fr10       ;offset 0xa80
-        FSTD    %fr10,-136(%r30)        ;offset 0xa84
-        XMPYU   %fr8L,%fr7R,%fr22       ;offset 0xa88
-        FSTD    %fr22,-144(%r30)        ;offset 0xa8c
-        XMPYU   %fr5L,%fr4L,%fr11       ;offset 0xa90
-        XMPYU   %fr5L,%fr4R,%fr23       ;offset 0xa94
-        FSTD    %fr11,-112(%r30)        ;offset 0xa98
-        FSTD    %fr23,-120(%r30)        ;offset 0xa9c
-        LDD     -136(%r30),%r28 ;offset 0xaa0
-        DEPD,Z  %r28,31,32,%r31 ;offset 0xaa4
-        LDD     -144(%r30),%r20 ;offset 0xaa8
-        ADD,L   %r20,%r31,%r31  ;offset 0xaac
-        LDD     -112(%r30),%r22 ;offset 0xab0
-        DEPD,Z  %r22,31,32,%r22 ;offset 0xab4
-        LDD     -120(%r30),%r21 ;offset 0xab8
-        B       $00060024       ;offset 0xabc
-        ADD,L   %r21,%r22,%r23  ;offset 0xac0
-$D0
-        OR      %r9,%r29,%r29   ;offset 0xac4
-$00060040
-        EXTRD,U %r29,31,32,%r28 ;offset 0xac8
-$00060002
-$L2
-        LDW     -212(%r30),%r2  ;offset 0xacc
-$D3
-        LDW     -168(%r30),%r9  ;offset 0xad0
-        LDD     -176(%r30),%r8  ;offset 0xad4
-        EXTRD,U %r8,31,32,%r7   ;offset 0xad8
-        LDD     -184(%r30),%r6  ;offset 0xadc
-        EXTRD,U %r6,31,32,%r5   ;offset 0xae0
-        LDW     -188(%r30),%r4  ;offset 0xae4
-        BVE     (%r2)   ;offset 0xae8
-        .EXIT
-        LDW,MB  -192(%r30),%r3  ;offset 0xaec
-	.PROCEND	;in=23,25;out=28,29;fpin=105,107;
-
-
-
-
-;----------------------------------------------------------------------------
-;
-; Registers to hold 64-bit values to manipulate.  The "L" part
-; of the register corresponds to the upper 32-bits, while the "R"
-; part corresponds to the lower 32-bits
-; 
-; Note, that when using b6 and b7, the code must save these before
-; using them because they are callee save registers 
-; 
-;
-; Floating point registers to use to save values that
-; are manipulated.  These don't collide with ftemp1-6 and
-; are all caller save registers
-;
-a0        .reg %fr22
-a0L       .reg %fr22L
-a0R       .reg %fr22R
-
-a1        .reg %fr23
-a1L       .reg %fr23L
-a1R       .reg %fr23R
-
-a2        .reg %fr24
-a2L       .reg %fr24L
-a2R       .reg %fr24R
-
-a3        .reg %fr25
-a3L       .reg %fr25L
-a3R       .reg %fr25R
-
-a4        .reg %fr26
-a4L       .reg %fr26L
-a4R       .reg %fr26R
-
-a5        .reg %fr27
-a5L       .reg %fr27L
-a5R       .reg %fr27R
-
-a6        .reg %fr28
-a6L       .reg %fr28L
-a6R       .reg %fr28R
-
-a7        .reg %fr29
-a7L       .reg %fr29L
-a7R       .reg %fr29R
-
-b0        .reg %fr30
-b0L       .reg %fr30L
-b0R       .reg %fr30R
-
-b1        .reg %fr31
-b1L       .reg %fr31L
-b1R       .reg %fr31R
-
-;
-; Temporary floating point variables, these are all caller save
-; registers
-;
-ftemp1    .reg %fr4
-ftemp2    .reg %fr5
-ftemp3    .reg %fr6
-ftemp4    .reg %fr7
-
-;
-; The B set of registers when used.
-;
-
-b2        .reg %fr8
-b2L       .reg %fr8L
-b2R       .reg %fr8R
-
-b3        .reg %fr9
-b3L       .reg %fr9L
-b3R       .reg %fr9R
-
-b4        .reg %fr10
-b4L       .reg %fr10L
-b4R       .reg %fr10R
-
-b5        .reg %fr11
-b5L       .reg %fr11L
-b5R       .reg %fr11R
-
-b6        .reg %fr12
-b6L       .reg %fr12L
-b6R       .reg %fr12R
-
-b7        .reg %fr13
-b7L       .reg %fr13L
-b7R       .reg %fr13R
-
-c1           .reg %r21   ; only reg
-temp1        .reg %r20   ; only reg
-temp2        .reg %r19   ; only reg
-temp3        .reg %r31   ; only reg
-
-m1           .reg %r28   
-c2           .reg %r23   
-high_one     .reg %r1
-ht           .reg %r6
-lt           .reg %r5
-m            .reg %r4
-c3           .reg %r3
-
-SQR_ADD_C  .macro  A0L,A0R,C1,C2,C3
-    XMPYU   A0L,A0R,ftemp1       ; m
-    FSTD    ftemp1,-24(%sp)      ; store m
-
-    XMPYU   A0R,A0R,ftemp2       ; lt
-    FSTD    ftemp2,-16(%sp)      ; store lt
-
-    XMPYU   A0L,A0L,ftemp3       ; ht
-    FSTD    ftemp3,-8(%sp)       ; store ht
-
-    LDD     -24(%sp),m           ; load m
-    AND     m,high_mask,temp2    ; m & Mask
-    DEPD,Z  m,30,31,temp3        ; m << 32+1
-    LDD     -16(%sp),lt          ; lt
-
-    LDD     -8(%sp),ht           ; ht
-    EXTRD,U temp2,32,33,temp1    ; temp1 = m&Mask >> 32-1
-    ADD     temp3,lt,lt          ; lt = lt+m
-    ADD,L   ht,temp1,ht          ; ht += temp1
-    ADD,DC  ht,%r0,ht            ; ht++
-
-    ADD     C1,lt,C1             ; c1=c1+lt
-    ADD,DC  ht,%r0,ht            ; ht++
-
-    ADD     C2,ht,C2             ; c2=c2+ht
-    ADD,DC  C3,%r0,C3            ; c3++
-.endm
-
-SQR_ADD_C2 .macro  A0L,A0R,A1L,A1R,C1,C2,C3
-    XMPYU   A0L,A1R,ftemp1          ; m1 = bl*ht
-    FSTD    ftemp1,-16(%sp)         ;
-    XMPYU   A0R,A1L,ftemp2          ; m = bh*lt
-    FSTD    ftemp2,-8(%sp)          ;
-    XMPYU   A0R,A1R,ftemp3          ; lt = bl*lt
-    FSTD    ftemp3,-32(%sp)
-    XMPYU   A0L,A1L,ftemp4          ; ht = bh*ht
-    FSTD    ftemp4,-24(%sp)         ;
-
-    LDD     -8(%sp),m               ; r21 = m
-    LDD     -16(%sp),m1             ; r19 = m1
-    ADD,L   m,m1,m                  ; m+m1
-
-    DEPD,Z  m,31,32,temp3           ; (m+m1<<32)
-    LDD     -24(%sp),ht             ; r24 = ht
-
-    CMPCLR,*>>= m,m1,%r0            ; if (m < m1)
-    ADD,L   ht,high_one,ht          ; ht+=high_one
-
-    EXTRD,U m,31,32,temp1           ; m >> 32
-    LDD     -32(%sp),lt             ; lt
-    ADD,L   ht,temp1,ht             ; ht+= m>>32
-    ADD     lt,temp3,lt             ; lt = lt+m1
-    ADD,DC  ht,%r0,ht               ; ht++
-
-    ADD     ht,ht,ht                ; ht=ht+ht;
-    ADD,DC  C3,%r0,C3               ; add in carry (c3++)
-
-    ADD     lt,lt,lt                ; lt=lt+lt;
-    ADD,DC  ht,%r0,ht               ; add in carry (ht++)
-
-    ADD     C1,lt,C1                ; c1=c1+lt
-    ADD,DC,*NUV ht,%r0,ht           ; add in carry (ht++)
-    LDO     1(C3),C3              ; bump c3 if overflow,nullify otherwise
-
-    ADD     C2,ht,C2                ; c2 = c2 + ht
-    ADD,DC  C3,%r0,C3             ; add in carry (c3++)
-.endm
-
-;
-;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
-; arg0 = r_ptr
-; arg1 = a_ptr
-;
-
-bn_sqr_comba8
-	.PROC
-	.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .ENTRY
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD     0(a_ptr),a0       
-    FLDD     8(a_ptr),a1       
-    FLDD    16(a_ptr),a2       
-    FLDD    24(a_ptr),a3       
-    FLDD    32(a_ptr),a4       
-    FLDD    40(a_ptr),a5       
-    FLDD    48(a_ptr),a6       
-    FLDD    56(a_ptr),a7       
-
-	SQR_ADD_C a0L,a0R,c1,c2,c3
-	STD     c1,0(r_ptr)          ; r[0] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
-	STD     c2,8(r_ptr)          ; r[1] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
-	STD     c3,16(r_ptr)            ; r[2] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
-	SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
-	STD     c1,24(r_ptr)           ; r[3] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
-	SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
-	STD     c2,32(r_ptr)          ; r[4] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
-	SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
-	STD     c3,40(r_ptr)          ; r[5] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C a3L,a3R,c1,c2,c3
-	SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
-	SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
-	SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
-	STD     c1,48(r_ptr)          ; r[6] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
-	SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
-	SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
-	STD     c2,56(r_ptr)          ; r[7] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a4L,a4R,c3,c1,c2
-	SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
-	SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
-	SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
-	STD     c3,64(r_ptr)          ; r[8] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
-	SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
-	SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
-	STD     c1,72(r_ptr)          ; r[9] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a5L,a5R,c2,c3,c1
-	SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
-	SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
-	STD     c2,80(r_ptr)          ; r[10] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
-	SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
-	STD     c3,88(r_ptr)          ; r[11] = c3;
-	COPY    %r0,c3
-	
-	SQR_ADD_C a6L,a6R,c1,c2,c3
-	SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
-	STD     c1,96(r_ptr)          ; r[12] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
-	STD     c2,104(r_ptr)         ; r[13] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a7L,a7R,c3,c1,c2
-	STD     c3, 112(r_ptr)       ; r[14] = c3
-	STD     c1, 120(r_ptr)       ; r[15] = c1
-
-    .EXIT
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-;-----------------------------------------------------------------------------
-;
-;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
-; arg0 = r_ptr
-; arg1 = a_ptr
-;
-
-bn_sqr_comba4
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD     0(a_ptr),a0       
-    FLDD     8(a_ptr),a1       
-    FLDD    16(a_ptr),a2       
-    FLDD    24(a_ptr),a3       
-    FLDD    32(a_ptr),a4       
-    FLDD    40(a_ptr),a5       
-    FLDD    48(a_ptr),a6       
-    FLDD    56(a_ptr),a7       
-
-	SQR_ADD_C a0L,a0R,c1,c2,c3
-
-	STD     c1,0(r_ptr)          ; r[0] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
-
-	STD     c2,8(r_ptr)          ; r[1] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
-
-	STD     c3,16(r_ptr)            ; r[2] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
-	SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
-
-	STD     c1,24(r_ptr)           ; r[3] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
-
-	STD     c2,32(r_ptr)           ; r[4] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
-	STD     c3,40(r_ptr)           ; r[5] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C a3L,a3R,c1,c2,c3
-	STD     c1,48(r_ptr)           ; r[6] = c1;
-	STD     c2,56(r_ptr)           ; r[7] = c2;
-
-    .EXIT
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-
-;---------------------------------------------------------------------------
-
-MUL_ADD_C  .macro  A0L,A0R,B0L,B0R,C1,C2,C3
-    XMPYU   A0L,B0R,ftemp1        ; m1 = bl*ht
-    FSTD    ftemp1,-16(%sp)       ;
-    XMPYU   A0R,B0L,ftemp2        ; m = bh*lt
-    FSTD    ftemp2,-8(%sp)        ;
-    XMPYU   A0R,B0R,ftemp3        ; lt = bl*lt
-    FSTD    ftemp3,-32(%sp)
-    XMPYU   A0L,B0L,ftemp4        ; ht = bh*ht
-    FSTD    ftemp4,-24(%sp)       ;
-
-    LDD     -8(%sp),m             ; r21 = m
-    LDD     -16(%sp),m1           ; r19 = m1
-    ADD,L   m,m1,m                ; m+m1
-
-    DEPD,Z  m,31,32,temp3         ; (m+m1<<32)
-    LDD     -24(%sp),ht           ; r24 = ht
-
-    CMPCLR,*>>= m,m1,%r0          ; if (m < m1)
-    ADD,L   ht,high_one,ht        ; ht+=high_one
-
-    EXTRD,U m,31,32,temp1         ; m >> 32
-    LDD     -32(%sp),lt           ; lt
-    ADD,L   ht,temp1,ht           ; ht+= m>>32
-    ADD     lt,temp3,lt           ; lt = lt+m1
-    ADD,DC  ht,%r0,ht             ; ht++
-
-    ADD     C1,lt,C1              ; c1=c1+lt
-    ADD,DC  ht,%r0,ht             ; bump c3 if overflow,nullify otherwise
-
-    ADD     C2,ht,C2              ; c2 = c2 + ht
-    ADD,DC  C3,%r0,C3             ; add in carry (c3++)
-.endm
-
-
-;
-;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg2 = b_ptr
-;
-
-bn_mul_comba8
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-    FSTD    %fr12,32(%sp)       ; save r6
-    FSTD    %fr13,40(%sp)       ; save r7
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD      0(a_ptr),a0       
-    FLDD      8(a_ptr),a1       
-    FLDD     16(a_ptr),a2       
-    FLDD     24(a_ptr),a3       
-    FLDD     32(a_ptr),a4       
-    FLDD     40(a_ptr),a5       
-    FLDD     48(a_ptr),a6       
-    FLDD     56(a_ptr),a7       
-
-    FLDD      0(b_ptr),b0       
-    FLDD      8(b_ptr),b1       
-    FLDD     16(b_ptr),b2       
-    FLDD     24(b_ptr),b3       
-    FLDD     32(b_ptr),b4       
-    FLDD     40(b_ptr),b5       
-    FLDD     48(b_ptr),b6       
-    FLDD     56(b_ptr),b7       
-
-	MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
-	STD       c1,0(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
-	STD       c2,8(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
-	STD       c3,16(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
-	STD       c1,24(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
-	STD       c2,32(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
-	MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
-	MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
-	STD       c3,40(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
-	STD       c1,48(r_ptr)
-	COPY      %r0,c1
-	
-	MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
-	MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
-	STD       c2,56(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
-	STD       c3,64(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
-	MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
-	MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
-	STD       c1,72(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
-	MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
-	MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
-	STD       c2,80(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
-	MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
-	STD       c3,88(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
-	STD       c1,96(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
-	MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
-	STD       c2,104(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
-	STD       c3,112(r_ptr)
-	STD       c1,120(r_ptr)
-
-    .EXIT
-    FLDD    -88(%sp),%fr13 
-    FLDD    -96(%sp),%fr12 
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-;-----------------------------------------------------------------------------
-;
-;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg2 = b_ptr
-;
-
-bn_mul_comba4
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-    FSTD    %fr12,32(%sp)       ; save r6
-    FSTD    %fr13,40(%sp)       ; save r7
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD      0(a_ptr),a0       
-    FLDD      8(a_ptr),a1       
-    FLDD     16(a_ptr),a2       
-    FLDD     24(a_ptr),a3       
-
-    FLDD      0(b_ptr),b0       
-    FLDD      8(b_ptr),b1       
-    FLDD     16(b_ptr),b2       
-    FLDD     24(b_ptr),b3       
-
-	MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
-	STD       c1,0(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
-	STD       c2,8(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
-	STD       c3,16(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
-	STD       c1,24(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
-	STD       c2,32(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
-	STD       c3,40(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
-	STD       c1,48(r_ptr)
-	STD       c2,56(r_ptr)
-
-    .EXIT
-    FLDD    -88(%sp),%fr13 
-    FLDD    -96(%sp),%fr12 
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-
-;--- not PIC	.SPACE	$TEXT$
-;--- not PIC	.SUBSPA	$CODE$
-;--- not PIC	.SPACE	$PRIVATE$,SORT=16
-;--- not PIC	.IMPORT	$global$,DATA
-;--- not PIC	.SPACE	$TEXT$
-;--- not PIC	.SUBSPA	$CODE$
-;--- not PIC	.SUBSPA	$LIT$,ACCESS=0x2c
-;--- not PIC	C$7
-;--- not PIC	.ALIGN	8
-;--- not PIC	.STRINGZ	"Division would overflow (%d)\n"
-	.END

+ 0 - 1605
drivers/builtin_openssl2/crypto/bn/asm/pa-risc2W.s

@@ -1,1605 +0,0 @@
-;
-; PA-RISC 64-bit implementation of bn_asm code
-;
-; This code is approximately 2x faster than the C version
-; for RSA/DSA.
-;
-; See http://devresource.hp.com/  for more details on the PA-RISC
-; architecture.  Also see the book "PA-RISC 2.0 Architecture"
-; by Gerry Kane for information on the instruction set architecture.
-;
-; Code written by Chris Ruemmler (with some help from the HP C
-; compiler).
-;
-; The code compiles with HP's assembler
-;
-
-	.level	2.0W
-	.space	$TEXT$
-	.subspa	$CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
-
-;
-; Global Register definitions used for the routines.
-;
-; Some information about HP's runtime architecture for 64-bits.
-;
-; "Caller save" means the calling function must save the register
-; if it wants the register to be preserved.
-; "Callee save" means if a function uses the register, it must save
-; the value before using it.
-;
-; For the floating point registers 
-;
-;    "caller save" registers: fr4-fr11, fr22-fr31
-;    "callee save" registers: fr12-fr21
-;    "special" registers: fr0-fr3 (status and exception registers)
-;
-; For the integer registers
-;     value zero             :  r0
-;     "caller save" registers: r1,r19-r26
-;     "callee save" registers: r3-r18
-;     return register        :  r2  (rp)
-;     return values          ; r28  (ret0,ret1)
-;     Stack pointer          ; r30  (sp) 
-;     global data pointer    ; r27  (dp)
-;     argument pointer       ; r29  (ap)
-;     millicode return ptr   ; r31  (also a caller save register)
-
-
-;
-; Arguments to the routines
-;
-r_ptr       .reg %r26
-a_ptr       .reg %r25
-b_ptr       .reg %r24
-num         .reg %r24
-w           .reg %r23
-n           .reg %r23
-
-
-;
-; Globals used in some routines
-;
-
-top_overflow .reg %r29
-high_mask    .reg %r22    ; value 0xffffffff80000000L
-
-
-;------------------------------------------------------------------------------
-;
-; bn_mul_add_words
-;
-;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr, 
-;								int num, BN_ULONG w)
-;
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg2 = num
-; arg3 = w
-;
-; Local register definitions
-;
-
-fm1          .reg %fr22
-fm           .reg %fr23
-ht_temp      .reg %fr24
-ht_temp_1    .reg %fr25
-lt_temp      .reg %fr26
-lt_temp_1    .reg %fr27
-fm1_1        .reg %fr28
-fm_1         .reg %fr29
-
-fw_h         .reg %fr7L
-fw_l         .reg %fr7R
-fw           .reg %fr7
-
-fht_0        .reg %fr8L
-flt_0        .reg %fr8R
-t_float_0    .reg %fr8
-
-fht_1        .reg %fr9L
-flt_1        .reg %fr9R
-t_float_1    .reg %fr9
-
-tmp_0        .reg %r31
-tmp_1        .reg %r21
-m_0          .reg %r20 
-m_1          .reg %r19 
-ht_0         .reg %r1  
-ht_1         .reg %r3
-lt_0         .reg %r4
-lt_1         .reg %r5
-m1_0         .reg %r6 
-m1_1         .reg %r7 
-rp_val       .reg %r8
-rp_val_1     .reg %r9
-
-bn_mul_add_words
-	.export	bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
-	.proc
-	.callinfo frame=128
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-	NOP                         ; Needed to make the loop 16-byte aligned
-	NOP                         ; Needed to make the loop 16-byte aligned
-
-    STD     %r5,16(%sp)         ; save r5  
-    STD     %r6,24(%sp)         ; save r6  
-    STD     %r7,32(%sp)         ; save r7  
-    STD     %r8,40(%sp)         ; save r8  
-
-    STD     %r9,48(%sp)         ; save r9  
-    COPY    %r0,%ret0           ; return 0 by default
-    DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32    
-	STD     w,56(%sp)           ; store w on stack
-
-    CMPIB,>= 0,num,bn_mul_add_words_exit  ; if (num <= 0) then exit
-	LDO     128(%sp),%sp       ; bump stack
-
-	;
-	; The loop is unrolled twice, so if there is only 1 number
-    ; then go straight to the cleanup code.
-	;
-	CMPIB,= 1,num,bn_mul_add_words_single_top
-	FLDD    -72(%sp),fw     ; load up w into fp register fw (fw_h/fw_l)
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-	; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
-    ; two 32-bit mutiplies can be issued per cycle.
-    ; 
-bn_mul_add_words_unroll2
-
-    FLDD    0(a_ptr),t_float_0       ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    FLDD    8(a_ptr),t_float_1       ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    LDD     0(r_ptr),rp_val          ; rp[0]
-    LDD     8(r_ptr),rp_val_1        ; rp[1]
-
-    XMPYU   fht_0,fw_l,fm1           ; m1[0] = fht_0*fw_l
-    XMPYU   fht_1,fw_l,fm1_1         ; m1[1] = fht_1*fw_l
-    FSTD    fm1,-16(%sp)             ; -16(sp) = m1[0]
-    FSTD    fm1_1,-48(%sp)           ; -48(sp) = m1[1]
-
-    XMPYU   flt_0,fw_h,fm            ; m[0] = flt_0*fw_h
-    XMPYU   flt_1,fw_h,fm_1          ; m[1] = flt_1*fw_h
-    FSTD    fm,-8(%sp)               ; -8(sp) = m[0]
-    FSTD    fm_1,-40(%sp)            ; -40(sp) = m[1]
-
-    XMPYU   fht_0,fw_h,ht_temp       ; ht_temp   = fht_0*fw_h
-    XMPYU   fht_1,fw_h,ht_temp_1     ; ht_temp_1 = fht_1*fw_h
-    FSTD    ht_temp,-24(%sp)         ; -24(sp)   = ht_temp
-    FSTD    ht_temp_1,-56(%sp)       ; -56(sp)   = ht_temp_1
-
-    XMPYU   flt_0,fw_l,lt_temp       ; lt_temp = lt*fw_l
-    XMPYU   flt_1,fw_l,lt_temp_1     ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)         ; -32(sp) = lt_temp 
-    FSTD    lt_temp_1,-64(%sp)       ; -64(sp) = lt_temp_1 
-
-    LDD     -8(%sp),m_0              ; m[0] 
-    LDD     -40(%sp),m_1             ; m[1]
-    LDD     -16(%sp),m1_0            ; m1[0]
-    LDD     -48(%sp),m1_1            ; m1[1]
-
-    LDD     -24(%sp),ht_0            ; ht[0]
-    LDD     -56(%sp),ht_1            ; ht[1]
-    ADD,L   m1_0,m_0,tmp_0           ; tmp_0 = m[0] + m1[0]; 
-    ADD,L   m1_1,m_1,tmp_1           ; tmp_1 = m[1] + m1[1]; 
-
-    LDD     -32(%sp),lt_0            
-    LDD     -64(%sp),lt_1            
-    CMPCLR,*>>= tmp_0,m1_0, %r0      ; if (m[0] < m1[0])
-    ADD,L   ht_0,top_overflow,ht_0   ; ht[0] += (1<<32)
-
-    CMPCLR,*>>= tmp_1,m1_1,%r0       ; if (m[1] < m1[1])
-    ADD,L   ht_1,top_overflow,ht_1   ; ht[1] += (1<<32)
-    EXTRD,U tmp_0,31,32,m_0          ; m[0]>>32  
-    DEPD,Z  tmp_0,31,32,m1_0         ; m1[0] = m[0]<<32 
-
-    EXTRD,U tmp_1,31,32,m_1          ; m[1]>>32  
-    DEPD,Z  tmp_1,31,32,m1_1         ; m1[1] = m[1]<<32 
-    ADD,L   ht_0,m_0,ht_0            ; ht[0]+= (m[0]>>32)
-    ADD,L   ht_1,m_1,ht_1            ; ht[1]+= (m[1]>>32)
-
-    ADD     lt_0,m1_0,lt_0           ; lt[0] = lt[0]+m1[0];
-	ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-    ADD     lt_1,m1_1,lt_1           ; lt[1] = lt[1]+m1[1];
-    ADD,DC  ht_1,%r0,ht_1            ; ht[1]++
-
-    ADD    %ret0,lt_0,lt_0           ; lt[0] = lt[0] + c;
-	ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-    ADD     lt_0,rp_val,lt_0         ; lt[0] = lt[0]+rp[0]
-    ADD,DC  ht_0,%r0,ht_0            ; ht[0]++
-
-	LDO    -2(num),num               ; num = num - 2;
-    ADD     ht_0,lt_1,lt_1           ; lt[1] = lt[1] + ht_0 (c);
-    ADD,DC  ht_1,%r0,ht_1            ; ht[1]++
-    STD     lt_0,0(r_ptr)            ; rp[0] = lt[0]
-
-    ADD     lt_1,rp_val_1,lt_1       ; lt[1] = lt[1]+rp[1]
-    ADD,DC  ht_1,%r0,%ret0           ; ht[1]++
-    LDO     16(a_ptr),a_ptr          ; a_ptr += 2
-
-    STD     lt_1,8(r_ptr)            ; rp[1] = lt[1]
-	CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
-    LDO     16(r_ptr),r_ptr          ; r_ptr += 2
-
-    CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_mul_add_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    LDD     0(r_ptr),rp_val           ; rp[0]
-    LDO     8(a_ptr),a_ptr            ; a_ptr++
-    XMPYU   fht_0,fw_l,fm1            ; m1 = ht*fw_l
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = ht*fw_h
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-
-    LDD     -8(%sp),m_0               
-    LDD    -16(%sp),m1_0              ; m1 = temp1 
-    ADD,L   m_0,m1_0,tmp_0            ; tmp_0 = m + m1; 
-    LDD     -24(%sp),ht_0             
-    LDD     -32(%sp),lt_0             
-
-    CMPCLR,*>>= tmp_0,m1_0,%r0        ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,tmp_0           ; tmp_0 = lt+m1;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-    ADD     %ret0,tmp_0,lt_0          ; lt = lt + c;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-    ADD     lt_0,rp_val,lt_0          ; lt = lt+rp[0]
-    ADD,DC  ht_0,%r0,%ret0            ; ht++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-
-bn_mul_add_words_exit
-    .EXIT
-    LDD     -80(%sp),%r9              ; restore r9  
-    LDD     -88(%sp),%r8              ; restore r8  
-    LDD     -96(%sp),%r7              ; restore r7  
-    LDD     -104(%sp),%r6             ; restore r6  
-    LDD     -112(%sp),%r5             ; restore r5  
-    LDD     -120(%sp),%r4             ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3             ; restore r3
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-;
-; arg0 = rp
-; arg1 = ap
-; arg2 = num
-; arg3 = w
-
-bn_mul_words
-	.proc
-	.callinfo frame=128
-    .entry
-	.EXPORT	bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-    STD     %r5,16(%sp)         ; save r5  
-    STD     %r6,24(%sp)         ; save r6  
-
-    STD     %r7,32(%sp)         ; save r7  
-    COPY    %r0,%ret0           ; return 0 by default
-    DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32    
-	STD     w,56(%sp)           ; w on stack
-
-    CMPIB,>= 0,num,bn_mul_words_exit
-	LDO     128(%sp),%sp       ; bump stack
-
-	;
-	; See if only 1 word to do, thus just do cleanup
-	;
-	CMPIB,= 1,num,bn_mul_words_single_top
-	FLDD    -72(%sp),fw     ; load up w into fp register fw (fw_h/fw_l)
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-	; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
-    ; two 32-bit mutiplies can be issued per cycle.
-    ; 
-bn_mul_words_unroll2
-
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    FLDD    8(a_ptr),t_float_1        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-    XMPYU   fht_0,fw_l,fm1            ; m1[0] = fht_0*fw_l
-    XMPYU   fht_1,fw_l,fm1_1          ; m1[1] = ht*fw_l
-
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    FSTD    fm1_1,-48(%sp)            ; -48(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    XMPYU   flt_1,fw_h,fm_1           ; m = lt*fw_h
-
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    FSTD    fm_1,-40(%sp)             ; -40(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = fht_0*fw_h
-    XMPYU   fht_1,fw_h,ht_temp_1      ; ht_temp = ht*fw_h
-
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    FSTD    ht_temp_1,-56(%sp)        ; -56(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    XMPYU   flt_1,fw_l,lt_temp_1      ; lt_temp = lt*fw_l
-
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-    FSTD    lt_temp_1,-64(%sp)        ; -64(sp) = lt 
-    LDD     -8(%sp),m_0               
-    LDD     -40(%sp),m_1              
-
-    LDD    -16(%sp),m1_0              
-    LDD    -48(%sp),m1_1              
-    LDD     -24(%sp),ht_0             
-    LDD     -56(%sp),ht_1             
-
-    ADD,L   m1_0,m_0,tmp_0            ; tmp_0 = m + m1; 
-    ADD,L   m1_1,m_1,tmp_1            ; tmp_1 = m + m1; 
-    LDD     -32(%sp),lt_0             
-    LDD     -64(%sp),lt_1             
-
-    CMPCLR,*>>= tmp_0,m1_0, %r0       ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-    CMPCLR,*>>= tmp_1,m1_1,%r0        ; if (m < m1)
-    ADD,L   ht_1,top_overflow,ht_1    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-    EXTRD,U tmp_1,31,32,m_1           ; m>>32  
-    DEPD,Z  tmp_1,31,32,m1_1          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD,L   ht_1,m_1,ht_1             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,lt_0            ; lt = lt+m1;
-	ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     lt_1,m1_1,lt_1            ; lt = lt+m1;
-    ADD,DC  ht_1,%r0,ht_1             ; ht++
-    ADD    %ret0,lt_0,lt_0            ; lt = lt + c (ret0);
-	ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     ht_0,lt_1,lt_1            ; lt = lt + c (ht_0)
-    ADD,DC  ht_1,%r0,ht_1             ; ht++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-    STD     lt_1,8(r_ptr)             ; rp[1] = lt
-
-	COPY    ht_1,%ret0                ; carry = ht
-	LDO    -2(num),num                ; num = num - 2;
-    LDO     16(a_ptr),a_ptr           ; ap += 2
-	CMPIB,<= 2,num,bn_mul_words_unroll2
-    LDO     16(r_ptr),r_ptr           ; rp++
-
-    CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_mul_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-
-    XMPYU   fht_0,fw_l,fm1            ; m1 = ht*fw_l
-    FSTD    fm1,-16(%sp)              ; -16(sp) = m1
-    XMPYU   flt_0,fw_h,fm             ; m = lt*fw_h
-    FSTD    fm,-8(%sp)                ; -8(sp) = m
-    XMPYU   fht_0,fw_h,ht_temp        ; ht_temp = ht*fw_h
-    FSTD    ht_temp,-24(%sp)          ; -24(sp) = ht
-    XMPYU   flt_0,fw_l,lt_temp        ; lt_temp = lt*fw_l
-    FSTD    lt_temp,-32(%sp)          ; -32(sp) = lt 
-
-    LDD     -8(%sp),m_0               
-    LDD    -16(%sp),m1_0              
-    ADD,L   m_0,m1_0,tmp_0            ; tmp_0 = m + m1; 
-    LDD     -24(%sp),ht_0             
-    LDD     -32(%sp),lt_0             
-
-    CMPCLR,*>>= tmp_0,m1_0,%r0        ; if (m < m1)
-    ADD,L   ht_0,top_overflow,ht_0    ; ht += (1<<32)
-
-    EXTRD,U tmp_0,31,32,m_0           ; m>>32  
-    DEPD,Z  tmp_0,31,32,m1_0          ; m1 = m<<32 
-
-    ADD,L   ht_0,m_0,ht_0             ; ht+= (m>>32)
-    ADD     lt_0,m1_0,lt_0            ; lt= lt+m1;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    ADD     %ret0,lt_0,lt_0           ; lt = lt + c;
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    COPY    ht_0,%ret0                ; copy carry
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-
-bn_mul_words_exit
-    .EXIT
-    LDD     -96(%sp),%r7              ; restore r7  
-    LDD     -104(%sp),%r6             ; restore r6  
-    LDD     -112(%sp),%r5             ; restore r5  
-    LDD     -120(%sp),%r4             ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3             ; restore r3
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
-;
-; arg0 = rp
-; arg1 = ap
-; arg2 = num
-;
-
-bn_sqr_words
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3  
-    STD     %r4,8(%sp)          ; save r4  
-	NOP
-    STD     %r5,16(%sp)         ; save r5  
-
-    CMPIB,>= 0,num,bn_sqr_words_exit
-	LDO     128(%sp),%sp       ; bump stack
-
-	;
-	; If only 1, the goto straight to cleanup
-	;
-	CMPIB,= 1,num,bn_sqr_words_single_top
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-
-bn_sqr_words_unroll2
-    FLDD    0(a_ptr),t_float_0        ; a[0]
-    FLDD    8(a_ptr),t_float_1        ; a[1]
-    XMPYU   fht_0,flt_0,fm            ; m[0]
-    XMPYU   fht_1,flt_1,fm_1          ; m[1]
-
-    FSTD    fm,-24(%sp)               ; store m[0]
-    FSTD    fm_1,-56(%sp)             ; store m[1]
-    XMPYU   flt_0,flt_0,lt_temp       ; lt[0]
-    XMPYU   flt_1,flt_1,lt_temp_1     ; lt[1]
-
-    FSTD    lt_temp,-16(%sp)          ; store lt[0]
-    FSTD    lt_temp_1,-48(%sp)        ; store lt[1]
-    XMPYU   fht_0,fht_0,ht_temp       ; ht[0]
-    XMPYU   fht_1,fht_1,ht_temp_1     ; ht[1]
-
-    FSTD    ht_temp,-8(%sp)           ; store ht[0]
-    FSTD    ht_temp_1,-40(%sp)        ; store ht[1]
-    LDD     -24(%sp),m_0             
-    LDD     -56(%sp),m_1              
-
-    AND     m_0,high_mask,tmp_0       ; m[0] & Mask
-    AND     m_1,high_mask,tmp_1       ; m[1] & Mask
-    DEPD,Z  m_0,30,31,m_0             ; m[0] << 32+1
-    DEPD,Z  m_1,30,31,m_1             ; m[1] << 32+1
-
-    LDD     -16(%sp),lt_0        
-    LDD     -48(%sp),lt_1        
-    EXTRD,U tmp_0,32,33,tmp_0         ; tmp_0 = m[0]&Mask >> 32-1
-    EXTRD,U tmp_1,32,33,tmp_1         ; tmp_1 = m[1]&Mask >> 32-1
-
-    LDD     -8(%sp),ht_0            
-    LDD     -40(%sp),ht_1           
-    ADD,L   ht_0,tmp_0,ht_0           ; ht[0] += tmp_0
-    ADD,L   ht_1,tmp_1,ht_1           ; ht[1] += tmp_1
-
-    ADD     lt_0,m_0,lt_0             ; lt = lt+m
-    ADD,DC  ht_0,%r0,ht_0             ; ht[0]++
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt[0]
-    STD     ht_0,8(r_ptr)             ; rp[1] = ht[1]
-
-    ADD     lt_1,m_1,lt_1             ; lt = lt+m
-    ADD,DC  ht_1,%r0,ht_1             ; ht[1]++
-    STD     lt_1,16(r_ptr)            ; rp[2] = lt[1]
-    STD     ht_1,24(r_ptr)            ; rp[3] = ht[1]
-
-	LDO    -2(num),num                ; num = num - 2;
-    LDO     16(a_ptr),a_ptr           ; ap += 2
-	CMPIB,<= 2,num,bn_sqr_words_unroll2
-    LDO     32(r_ptr),r_ptr           ; rp += 4
-
-    CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
-
-	;
-	; Top of loop aligned on 64-byte boundary
-	;
-bn_sqr_words_single_top
-    FLDD    0(a_ptr),t_float_0        ; load up 64-bit value (fr8L) ht(L)/lt(R)
-
-    XMPYU   fht_0,flt_0,fm            ; m
-    FSTD    fm,-24(%sp)               ; store m
-
-    XMPYU   flt_0,flt_0,lt_temp       ; lt
-    FSTD    lt_temp,-16(%sp)          ; store lt
-
-    XMPYU   fht_0,fht_0,ht_temp       ; ht
-    FSTD    ht_temp,-8(%sp)           ; store ht
-
-    LDD     -24(%sp),m_0              ; load m
-    AND     m_0,high_mask,tmp_0       ; m & Mask
-    DEPD,Z  m_0,30,31,m_0             ; m << 32+1
-    LDD     -16(%sp),lt_0             ; lt
-
-    LDD     -8(%sp),ht_0              ; ht
-    EXTRD,U tmp_0,32,33,tmp_0         ; tmp_0 = m&Mask >> 32-1
-    ADD     m_0,lt_0,lt_0             ; lt = lt+m
-    ADD,L   ht_0,tmp_0,ht_0           ; ht += tmp_0
-    ADD,DC  ht_0,%r0,ht_0             ; ht++
-
-    STD     lt_0,0(r_ptr)             ; rp[0] = lt
-    STD     ht_0,8(r_ptr)             ; rp[1] = ht
-
-bn_sqr_words_exit
-    .EXIT
-    LDD     -112(%sp),%r5       ; restore r5  
-    LDD     -120(%sp),%r4       ; restore r4  
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3 
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-;
-; arg0 = rp 
-; arg1 = ap
-; arg2 = bp 
-; arg3 = n
-
-t  .reg %r22
-b  .reg %r21
-l  .reg %r20
-
-bn_add_words
-	.proc
-    .entry
-	.callinfo
-	.EXPORT	bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-	.align 64
-
-    CMPIB,>= 0,n,bn_add_words_exit
-    COPY    %r0,%ret0           ; return 0 by default
-
-	;
-	; If 2 or more numbers do the loop
-	;
-	CMPIB,= 1,n,bn_add_words_single_top
-	NOP
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-bn_add_words_unroll2
-	LDD     0(a_ptr),t
-	LDD     0(b_ptr),b
-	ADD     t,%ret0,t                    ; t = t+c;
-	ADD,DC  %r0,%r0,%ret0                ; set c to carry
-	ADD     t,b,l                        ; l = t + b[0]
-	ADD,DC  %ret0,%r0,%ret0              ; c+= carry
-	STD     l,0(r_ptr)
-
-	LDD     8(a_ptr),t
-	LDD     8(b_ptr),b
-	ADD     t,%ret0,t                     ; t = t+c;
-	ADD,DC  %r0,%r0,%ret0                 ; set c to carry
-	ADD     t,b,l                         ; l = t + b[0]
-	ADD,DC  %ret0,%r0,%ret0               ; c+= carry
-	STD     l,8(r_ptr)
-
-	LDO     -2(n),n
-	LDO     16(a_ptr),a_ptr
-	LDO     16(b_ptr),b_ptr
-
-	CMPIB,<= 2,n,bn_add_words_unroll2
-	LDO     16(r_ptr),r_ptr
-
-    CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
-
-bn_add_words_single_top
-	LDD     0(a_ptr),t
-	LDD     0(b_ptr),b
-
-	ADD     t,%ret0,t                 ; t = t+c;
-	ADD,DC  %r0,%r0,%ret0             ; set c to carry (could use CMPCLR??)
-	ADD     t,b,l                     ; l = t + b[0]
-	ADD,DC  %ret0,%r0,%ret0           ; c+= carry
-	STD     l,0(r_ptr)
-
-bn_add_words_exit
-    .EXIT
-    BVE     (%rp)
-	NOP
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-;
-; arg0 = rp 
-; arg1 = ap
-; arg2 = bp 
-; arg3 = n
-
-t1       .reg %r22
-t2       .reg %r21
-sub_tmp1 .reg %r20
-sub_tmp2 .reg %r19
-
-
-bn_sub_words
-	.proc
-	.callinfo 
-	.EXPORT	bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    CMPIB,>=  0,n,bn_sub_words_exit
-    COPY    %r0,%ret0           ; return 0 by default
-
-	;
-	; If 2 or more numbers do the loop
-	;
-	CMPIB,= 1,n,bn_sub_words_single_top
-	NOP
-
-	;
-	; This loop is unrolled 2 times (64-byte aligned as well)
-	;
-bn_sub_words_unroll2
-	LDD     0(a_ptr),t1
-	LDD     0(b_ptr),t2
-	SUB     t1,t2,sub_tmp1           ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret0,sub_tmp1  ; t3 = t3- c; 
-
-	CMPCLR,*>> t1,t2,sub_tmp2        ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret0
-	STD     sub_tmp1,0(r_ptr)
-
-	LDD     8(a_ptr),t1
-	LDD     8(b_ptr),t2
-	SUB     t1,t2,sub_tmp1            ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret0,sub_tmp1   ; t3 = t3- c; 
-	CMPCLR,*>> t1,t2,sub_tmp2         ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret0
-	STD     sub_tmp1,8(r_ptr)
-
-	LDO     -2(n),n
-	LDO     16(a_ptr),a_ptr
-	LDO     16(b_ptr),b_ptr
-
-	CMPIB,<= 2,n,bn_sub_words_unroll2
-	LDO     16(r_ptr),r_ptr
-
-    CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
-
-bn_sub_words_single_top
-	LDD     0(a_ptr),t1
-	LDD     0(b_ptr),t2
-	SUB     t1,t2,sub_tmp1            ; t3 = t1-t2; 
-	SUB     sub_tmp1,%ret0,sub_tmp1   ; t3 = t3- c; 
-	CMPCLR,*>> t1,t2,sub_tmp2         ; clear if t1 > t2
-	LDO      1(%r0),sub_tmp2
-	
-	CMPCLR,*= t1,t2,%r0
-	COPY    sub_tmp2,%ret0
-
-	STD     sub_tmp1,0(r_ptr)
-
-bn_sub_words_exit
-    .EXIT
-    BVE     (%rp)
-	NOP
-	.PROCEND	;in=23,24,25,26,29;out=28;
-
-;------------------------------------------------------------------------------
-;
-; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
-;
-; arg0 = h
-; arg1 = l
-; arg2 = d
-;
-; This is mainly just modified assembly from the compiler, thus the
-; lack of variable names.
-;
-;------------------------------------------------------------------------------
-bn_div_words
-	.proc
-	.callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-	.IMPORT	BN_num_bits_word,CODE,NO_RELOCATION
-	.IMPORT	__iob,DATA
-	.IMPORT	fprintf,CODE,NO_RELOCATION
-	.IMPORT	abort,CODE,NO_RELOCATION
-	.IMPORT	$$div2U,MILLICODE
-    .entry
-    STD     %r2,-16(%r30)   
-    STD,MA  %r3,352(%r30)   
-    STD     %r4,-344(%r30)  
-    STD     %r5,-336(%r30)  
-    STD     %r6,-328(%r30)  
-    STD     %r7,-320(%r30)  
-    STD     %r8,-312(%r30)  
-    STD     %r9,-304(%r30)  
-    STD     %r10,-296(%r30)
-
-    STD     %r27,-288(%r30)             ; save gp
-
-    COPY    %r24,%r3           ; save d 
-    COPY    %r26,%r4           ; save h (high 64-bits)
-    LDO      -1(%r0),%ret0     ; return -1 by default	
-
-    CMPB,*=  %r0,%arg2,$D3     ; if (d == 0)
-    COPY    %r25,%r5           ; save l (low 64-bits)
-
-    LDO     -48(%r30),%r29     ; create ap 
-    .CALL   ;in=26,29;out=28;
-    B,L     BN_num_bits_word,%r2 
-    COPY    %r3,%r26        
-    LDD     -288(%r30),%r27    ; restore gp 
-    LDI     64,%r21 
-
-    CMPB,=  %r21,%ret0,$00000012   ;if (i == 64) (forward) 
-    COPY    %ret0,%r24             ; i   
-    MTSARCM %r24    
-    DEPDI,Z -1,%sar,1,%r29  
-    CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward) 
-
-$00000012
-    SUBI    64,%r24,%r31                       ; i = 64 - i;
-    CMPCLR,*<< %r4,%r3,%r0                     ; if (h >= d)
-    SUB     %r4,%r3,%r4                        ; h -= d
-    CMPB,=  %r31,%r0,$0000001A                 ; if (i)
-    COPY    %r0,%r10                           ; ret = 0
-    MTSARCM %r31                               ; i to shift
-    DEPD,Z  %r3,%sar,64,%r3                    ; d <<= i;
-    SUBI    64,%r31,%r19                       ; 64 - i; redundent
-    MTSAR   %r19                               ; (64 -i) to shift
-    SHRPD   %r4,%r5,%sar,%r4                   ; l>> (64-i)
-    MTSARCM %r31                               ; i to shift
-    DEPD,Z  %r5,%sar,64,%r5                    ; l <<= i;
-
-$0000001A
-    DEPDI,Z -1,31,32,%r19                      
-    EXTRD,U %r3,31,32,%r6                      ; dh=(d&0xfff)>>32
-    EXTRD,U %r3,63,32,%r8                      ; dl = d&0xffffff
-    LDO     2(%r0),%r9
-    STD    %r3,-280(%r30)                      ; "d" to stack
-
-$0000001C
-    DEPDI,Z -1,63,32,%r29                      ; 
-    EXTRD,U %r4,31,32,%r31                     ; h >> 32
-    CMPB,*=,N  %r31,%r6,$D2     	       ; if ((h>>32) != dh)(forward) div
-    COPY    %r4,%r26       
-    EXTRD,U %r4,31,32,%r25 
-    COPY    %r6,%r24      
-    .CALL   ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
-    B,L     $$div2U,%r2     
-    EXTRD,U %r6,31,32,%r23  
-    DEPD    %r28,31,32,%r29 
-$D2
-    STD     %r29,-272(%r30)                   ; q
-    AND     %r5,%r19,%r24                   ; t & 0xffffffff00000000;
-    EXTRD,U %r24,31,32,%r24                 ; ??? 
-    FLDD    -272(%r30),%fr7                 ; q
-    FLDD    -280(%r30),%fr8                 ; d
-    XMPYU   %fr8L,%fr7L,%fr10  
-    FSTD    %fr10,-256(%r30)   
-    XMPYU   %fr8L,%fr7R,%fr22  
-    FSTD    %fr22,-264(%r30)   
-    XMPYU   %fr8R,%fr7L,%fr11 
-    XMPYU   %fr8R,%fr7R,%fr23
-    FSTD    %fr11,-232(%r30)
-    FSTD    %fr23,-240(%r30)
-    LDD     -256(%r30),%r28
-    DEPD,Z  %r28,31,32,%r2 
-    LDD     -264(%r30),%r20
-    ADD,L   %r20,%r2,%r31   
-    LDD     -232(%r30),%r22 
-    DEPD,Z  %r22,31,32,%r22 
-    LDD     -240(%r30),%r21 
-    B       $00000024       ; enter loop  
-    ADD,L   %r21,%r22,%r23 
-
-$0000002A
-    LDO     -1(%r29),%r29   
-    SUB     %r23,%r8,%r23   
-$00000024
-    SUB     %r4,%r31,%r25   
-    AND     %r25,%r19,%r26  
-    CMPB,*<>,N      %r0,%r26,$00000046  ; (forward)
-    DEPD,Z  %r25,31,32,%r20 
-    OR      %r20,%r24,%r21  
-    CMPB,*<<,N  %r21,%r23,$0000002A ;(backward) 
-    SUB     %r31,%r6,%r31   
-;-------------Break path---------------------
-
-$00000046
-    DEPD,Z  %r23,31,32,%r25              ;tl
-    EXTRD,U %r23,31,32,%r26              ;t
-    AND     %r25,%r19,%r24               ;tl = (tl<<32)&0xfffffff0000000L
-    ADD,L   %r31,%r26,%r31               ;th += t; 
-    CMPCLR,*>>=     %r5,%r24,%r0         ;if (l<tl)
-    LDO     1(%r31),%r31                 ; th++;
-    CMPB,*<<=,N     %r31,%r4,$00000036   ;if (n < th) (forward)
-    LDO     -1(%r29),%r29                ;q--; 
-    ADD,L   %r4,%r3,%r4                  ;h += d;
-$00000036
-    ADDIB,=,N       -1,%r9,$D1 ;if (--count == 0) break (forward) 
-    SUB     %r5,%r24,%r28                ; l -= tl;
-    SUB     %r4,%r31,%r24                ; h -= th;
-    SHRPD   %r24,%r28,32,%r4             ; h = ((h<<32)|(l>>32));
-    DEPD,Z  %r29,31,32,%r10              ; ret = q<<32
-    b      $0000001C
-    DEPD,Z  %r28,31,32,%r5               ; l = l << 32 
-
-$D1
-    OR      %r10,%r29,%r28           ; ret |= q
-$D3
-    LDD     -368(%r30),%r2  
-$D0
-    LDD     -296(%r30),%r10 
-    LDD     -304(%r30),%r9  
-    LDD     -312(%r30),%r8  
-    LDD     -320(%r30),%r7  
-    LDD     -328(%r30),%r6  
-    LDD     -336(%r30),%r5  
-    LDD     -344(%r30),%r4  
-    BVE     (%r2)   
-        .EXIT
-    LDD,MB  -352(%r30),%r3 
-
-bn_div_err_case
-    MFIA    %r6     
-    ADDIL   L'bn_div_words-bn_div_err_case,%r6,%r1 
-    LDO     R'bn_div_words-bn_div_err_case(%r1),%r6  
-    ADDIL   LT'__iob,%r27,%r1       
-    LDD     RT'__iob(%r1),%r26      
-    ADDIL   L'C$4-bn_div_words,%r6,%r1    
-    LDO     R'C$4-bn_div_words(%r1),%r25  
-    LDO     64(%r26),%r26   
-    .CALL           ;in=24,25,26,29;out=28;
-    B,L     fprintf,%r2    
-    LDO     -48(%r30),%r29 
-    LDD     -288(%r30),%r27
-    .CALL           ;in=29;
-    B,L     abort,%r2      
-    LDO     -48(%r30),%r29 
-    LDD     -288(%r30),%r27
-    B       $D0         
-    LDD     -368(%r30),%r2  
-	.PROCEND	;in=24,25,26,29;out=28;
-
-;----------------------------------------------------------------------------
-;
-; Registers to hold 64-bit values to manipulate.  The "L" part
-; of the register corresponds to the upper 32-bits, while the "R"
-; part corresponds to the lower 32-bits
-; 
-; Note, that when using b6 and b7, the code must save these before
-; using them because they are callee save registers 
-; 
-;
-; Floating point registers to use to save values that
-; are manipulated.  These don't collide with ftemp1-6 and
-; are all caller save registers
-;
-a0        .reg %fr22
-a0L       .reg %fr22L
-a0R       .reg %fr22R
-
-a1        .reg %fr23
-a1L       .reg %fr23L
-a1R       .reg %fr23R
-
-a2        .reg %fr24
-a2L       .reg %fr24L
-a2R       .reg %fr24R
-
-a3        .reg %fr25
-a3L       .reg %fr25L
-a3R       .reg %fr25R
-
-a4        .reg %fr26
-a4L       .reg %fr26L
-a4R       .reg %fr26R
-
-a5        .reg %fr27
-a5L       .reg %fr27L
-a5R       .reg %fr27R
-
-a6        .reg %fr28
-a6L       .reg %fr28L
-a6R       .reg %fr28R
-
-a7        .reg %fr29
-a7L       .reg %fr29L
-a7R       .reg %fr29R
-
-b0        .reg %fr30
-b0L       .reg %fr30L
-b0R       .reg %fr30R
-
-b1        .reg %fr31
-b1L       .reg %fr31L
-b1R       .reg %fr31R
-
-;
-; Temporary floating point variables, these are all caller save
-; registers
-;
-ftemp1    .reg %fr4
-ftemp2    .reg %fr5
-ftemp3    .reg %fr6
-ftemp4    .reg %fr7
-
-;
-; The B set of registers when used.
-;
-
-b2        .reg %fr8
-b2L       .reg %fr8L
-b2R       .reg %fr8R
-
-b3        .reg %fr9
-b3L       .reg %fr9L
-b3R       .reg %fr9R
-
-b4        .reg %fr10
-b4L       .reg %fr10L
-b4R       .reg %fr10R
-
-b5        .reg %fr11
-b5L       .reg %fr11L
-b5R       .reg %fr11R
-
-b6        .reg %fr12
-b6L       .reg %fr12L
-b6R       .reg %fr12R
-
-b7        .reg %fr13
-b7L       .reg %fr13L
-b7R       .reg %fr13R
-
-c1           .reg %r21   ; only reg
-temp1        .reg %r20   ; only reg
-temp2        .reg %r19   ; only reg
-temp3        .reg %r31   ; only reg
-
-m1           .reg %r28   
-c2           .reg %r23   
-high_one     .reg %r1
-ht           .reg %r6
-lt           .reg %r5
-m            .reg %r4
-c3           .reg %r3
-
-SQR_ADD_C  .macro  A0L,A0R,C1,C2,C3
-    XMPYU   A0L,A0R,ftemp1       ; m
-    FSTD    ftemp1,-24(%sp)      ; store m
-
-    XMPYU   A0R,A0R,ftemp2       ; lt
-    FSTD    ftemp2,-16(%sp)      ; store lt
-
-    XMPYU   A0L,A0L,ftemp3       ; ht
-    FSTD    ftemp3,-8(%sp)       ; store ht
-
-    LDD     -24(%sp),m           ; load m
-    AND     m,high_mask,temp2    ; m & Mask
-    DEPD,Z  m,30,31,temp3        ; m << 32+1
-    LDD     -16(%sp),lt          ; lt
-
-    LDD     -8(%sp),ht           ; ht
-    EXTRD,U temp2,32,33,temp1    ; temp1 = m&Mask >> 32-1
-    ADD     temp3,lt,lt          ; lt = lt+m
-    ADD,L   ht,temp1,ht          ; ht += temp1
-    ADD,DC  ht,%r0,ht            ; ht++
-
-    ADD     C1,lt,C1             ; c1=c1+lt
-    ADD,DC  ht,%r0,ht            ; ht++
-
-    ADD     C2,ht,C2             ; c2=c2+ht
-    ADD,DC  C3,%r0,C3            ; c3++
-.endm
-
-SQR_ADD_C2 .macro  A0L,A0R,A1L,A1R,C1,C2,C3
-    XMPYU   A0L,A1R,ftemp1          ; m1 = bl*ht
-    FSTD    ftemp1,-16(%sp)         ;
-    XMPYU   A0R,A1L,ftemp2          ; m = bh*lt
-    FSTD    ftemp2,-8(%sp)          ;
-    XMPYU   A0R,A1R,ftemp3          ; lt = bl*lt
-    FSTD    ftemp3,-32(%sp)
-    XMPYU   A0L,A1L,ftemp4          ; ht = bh*ht
-    FSTD    ftemp4,-24(%sp)         ;
-
-    LDD     -8(%sp),m               ; r21 = m
-    LDD     -16(%sp),m1             ; r19 = m1
-    ADD,L   m,m1,m                  ; m+m1
-
-    DEPD,Z  m,31,32,temp3           ; (m+m1<<32)
-    LDD     -24(%sp),ht             ; r24 = ht
-
-    CMPCLR,*>>= m,m1,%r0            ; if (m < m1)
-    ADD,L   ht,high_one,ht          ; ht+=high_one
-
-    EXTRD,U m,31,32,temp1           ; m >> 32
-    LDD     -32(%sp),lt             ; lt
-    ADD,L   ht,temp1,ht             ; ht+= m>>32
-    ADD     lt,temp3,lt             ; lt = lt+m1
-    ADD,DC  ht,%r0,ht               ; ht++
-
-    ADD     ht,ht,ht                ; ht=ht+ht;
-    ADD,DC  C3,%r0,C3               ; add in carry (c3++)
-
-    ADD     lt,lt,lt                ; lt=lt+lt;
-    ADD,DC  ht,%r0,ht               ; add in carry (ht++)
-
-    ADD     C1,lt,C1                ; c1=c1+lt
-    ADD,DC,*NUV ht,%r0,ht           ; add in carry (ht++)
-    LDO     1(C3),C3              ; bump c3 if overflow,nullify otherwise
-
-    ADD     C2,ht,C2                ; c2 = c2 + ht
-    ADD,DC  C3,%r0,C3             ; add in carry (c3++)
-.endm
-
-;
-;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
-; arg0 = r_ptr
-; arg1 = a_ptr
-;
-
-bn_sqr_comba8
-	.PROC
-	.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .ENTRY
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD     0(a_ptr),a0       
-    FLDD     8(a_ptr),a1       
-    FLDD    16(a_ptr),a2       
-    FLDD    24(a_ptr),a3       
-    FLDD    32(a_ptr),a4       
-    FLDD    40(a_ptr),a5       
-    FLDD    48(a_ptr),a6       
-    FLDD    56(a_ptr),a7       
-
-	SQR_ADD_C a0L,a0R,c1,c2,c3
-	STD     c1,0(r_ptr)          ; r[0] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
-	STD     c2,8(r_ptr)          ; r[1] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
-	STD     c3,16(r_ptr)            ; r[2] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
-	SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
-	STD     c1,24(r_ptr)           ; r[3] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
-	SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
-	STD     c2,32(r_ptr)          ; r[4] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
-	SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
-	STD     c3,40(r_ptr)          ; r[5] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C a3L,a3R,c1,c2,c3
-	SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
-	SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
-	SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
-	STD     c1,48(r_ptr)          ; r[6] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
-	SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
-	SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
-	STD     c2,56(r_ptr)          ; r[7] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a4L,a4R,c3,c1,c2
-	SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
-	SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
-	SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
-	STD     c3,64(r_ptr)          ; r[8] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
-	SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
-	SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
-	STD     c1,72(r_ptr)          ; r[9] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a5L,a5R,c2,c3,c1
-	SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
-	SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
-	STD     c2,80(r_ptr)          ; r[10] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
-	SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
-	STD     c3,88(r_ptr)          ; r[11] = c3;
-	COPY    %r0,c3
-	
-	SQR_ADD_C a6L,a6R,c1,c2,c3
-	SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
-	STD     c1,96(r_ptr)          ; r[12] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
-	STD     c2,104(r_ptr)         ; r[13] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a7L,a7R,c3,c1,c2
-	STD     c3, 112(r_ptr)       ; r[14] = c3
-	STD     c1, 120(r_ptr)       ; r[15] = c1
-
-    .EXIT
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-;-----------------------------------------------------------------------------
-;
-;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
-; arg0 = r_ptr
-; arg1 = a_ptr
-;
-
-bn_sqr_comba4
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z -1,32,33,high_mask   ; Create Mask 0xffffffff80000000L
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD     0(a_ptr),a0       
-    FLDD     8(a_ptr),a1       
-    FLDD    16(a_ptr),a2       
-    FLDD    24(a_ptr),a3       
-    FLDD    32(a_ptr),a4       
-    FLDD    40(a_ptr),a5       
-    FLDD    48(a_ptr),a6       
-    FLDD    56(a_ptr),a7       
-
-	SQR_ADD_C a0L,a0R,c1,c2,c3
-
-	STD     c1,0(r_ptr)          ; r[0] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
-
-	STD     c2,8(r_ptr)          ; r[1] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C a1L,a1R,c3,c1,c2
-	SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
-
-	STD     c3,16(r_ptr)            ; r[2] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
-	SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
-
-	STD     c1,24(r_ptr)           ; r[3] = c1;
-	COPY    %r0,c1
-
-	SQR_ADD_C a2L,a2R,c2,c3,c1
-	SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
-
-	STD     c2,32(r_ptr)           ; r[4] = c2;
-	COPY    %r0,c2
-
-	SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
-	STD     c3,40(r_ptr)           ; r[5] = c3;
-	COPY    %r0,c3
-
-	SQR_ADD_C a3L,a3R,c1,c2,c3
-	STD     c1,48(r_ptr)           ; r[6] = c1;
-	STD     c2,56(r_ptr)           ; r[7] = c2;
-
-    .EXIT
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-
-;---------------------------------------------------------------------------
-
-MUL_ADD_C  .macro  A0L,A0R,B0L,B0R,C1,C2,C3
-    XMPYU   A0L,B0R,ftemp1        ; m1 = bl*ht
-    FSTD    ftemp1,-16(%sp)       ;
-    XMPYU   A0R,B0L,ftemp2        ; m = bh*lt
-    FSTD    ftemp2,-8(%sp)        ;
-    XMPYU   A0R,B0R,ftemp3        ; lt = bl*lt
-    FSTD    ftemp3,-32(%sp)
-    XMPYU   A0L,B0L,ftemp4        ; ht = bh*ht
-    FSTD    ftemp4,-24(%sp)       ;
-
-    LDD     -8(%sp),m             ; r21 = m
-    LDD     -16(%sp),m1           ; r19 = m1
-    ADD,L   m,m1,m                ; m+m1
-
-    DEPD,Z  m,31,32,temp3         ; (m+m1<<32)
-    LDD     -24(%sp),ht           ; r24 = ht
-
-    CMPCLR,*>>= m,m1,%r0          ; if (m < m1)
-    ADD,L   ht,high_one,ht        ; ht+=high_one
-
-    EXTRD,U m,31,32,temp1         ; m >> 32
-    LDD     -32(%sp),lt           ; lt
-    ADD,L   ht,temp1,ht           ; ht+= m>>32
-    ADD     lt,temp3,lt           ; lt = lt+m1
-    ADD,DC  ht,%r0,ht             ; ht++
-
-    ADD     C1,lt,C1              ; c1=c1+lt
-    ADD,DC  ht,%r0,ht             ; bump c3 if overflow,nullify otherwise
-
-    ADD     C2,ht,C2              ; c2 = c2 + ht
-    ADD,DC  C3,%r0,C3             ; add in carry (c3++)
-.endm
-
-
-;
-;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg2 = b_ptr
-;
-
-bn_mul_comba8
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-    FSTD    %fr12,32(%sp)       ; save r6
-    FSTD    %fr13,40(%sp)       ; save r7
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD      0(a_ptr),a0       
-    FLDD      8(a_ptr),a1       
-    FLDD     16(a_ptr),a2       
-    FLDD     24(a_ptr),a3       
-    FLDD     32(a_ptr),a4       
-    FLDD     40(a_ptr),a5       
-    FLDD     48(a_ptr),a6       
-    FLDD     56(a_ptr),a7       
-
-    FLDD      0(b_ptr),b0       
-    FLDD      8(b_ptr),b1       
-    FLDD     16(b_ptr),b2       
-    FLDD     24(b_ptr),b3       
-    FLDD     32(b_ptr),b4       
-    FLDD     40(b_ptr),b5       
-    FLDD     48(b_ptr),b6       
-    FLDD     56(b_ptr),b7       
-
-	MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
-	STD       c1,0(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
-	STD       c2,8(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
-	STD       c3,16(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
-	STD       c1,24(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
-	STD       c2,32(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
-	MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
-	MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
-	STD       c3,40(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
-	STD       c1,48(r_ptr)
-	COPY      %r0,c1
-	
-	MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
-	MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
-	STD       c2,56(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
-	STD       c3,64(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
-	MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
-	MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
-	STD       c1,72(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
-	MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
-	MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
-	MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
-	MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
-	STD       c2,80(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
-	MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
-	MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
-	MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
-	STD       c3,88(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
-	MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
-	MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
-	STD       c1,96(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
-	MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
-	STD       c2,104(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
-	STD       c3,112(r_ptr)
-	STD       c1,120(r_ptr)
-
-    .EXIT
-    FLDD    -88(%sp),%fr13 
-    FLDD    -96(%sp),%fr12 
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-;-----------------------------------------------------------------------------
-;
-;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-; arg0 = r_ptr
-; arg1 = a_ptr
-; arg2 = b_ptr
-;
-
-bn_mul_comba4
-	.proc
-	.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
-	.EXPORT	bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
-    .entry
-	.align 64
-
-    STD     %r3,0(%sp)          ; save r3
-    STD     %r4,8(%sp)          ; save r4
-    STD     %r5,16(%sp)         ; save r5
-    STD     %r6,24(%sp)         ; save r6
-    FSTD    %fr12,32(%sp)       ; save r6
-    FSTD    %fr13,40(%sp)       ; save r7
-
-	;
-	; Zero out carries
-	;
-	COPY     %r0,c1
-	COPY     %r0,c2
-	COPY     %r0,c3
-
-	LDO      128(%sp),%sp       ; bump stack
-    DEPDI,Z  1,31,1,high_one     ; Create Value  1 << 32
-
-	;
-	; Load up all of the values we are going to use
-	;
-    FLDD      0(a_ptr),a0       
-    FLDD      8(a_ptr),a1       
-    FLDD     16(a_ptr),a2       
-    FLDD     24(a_ptr),a3       
-
-    FLDD      0(b_ptr),b0       
-    FLDD      8(b_ptr),b1       
-    FLDD     16(b_ptr),b2       
-    FLDD     24(b_ptr),b3       
-
-	MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
-	STD       c1,0(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
-	STD       c2,8(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
-	MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
-	MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
-	STD       c3,16(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
-	MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
-	MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
-	MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
-	STD       c1,24(r_ptr)
-	COPY      %r0,c1
-
-	MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
-	MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
-	MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
-	STD       c2,32(r_ptr)
-	COPY      %r0,c2
-
-	MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
-	MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
-	STD       c3,40(r_ptr)
-	COPY      %r0,c3
-
-	MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
-	STD       c1,48(r_ptr)
-	STD       c2,56(r_ptr)
-
-    .EXIT
-    FLDD    -88(%sp),%fr13 
-    FLDD    -96(%sp),%fr12 
-    LDD     -104(%sp),%r6        ; restore r6
-    LDD     -112(%sp),%r5        ; restore r5
-    LDD     -120(%sp),%r4        ; restore r4
-    BVE     (%rp)
-    LDD,MB  -128(%sp),%r3
-
-	.PROCEND	
-
-
-	.SPACE	$TEXT$
-	.SUBSPA	$CODE$
-	.SPACE	$PRIVATE$,SORT=16
-	.IMPORT	$global$,DATA
-	.SPACE	$TEXT$
-	.SUBSPA	$CODE$
-	.SUBSPA	$LIT$,ACCESS=0x2c
-C$4
-	.ALIGN	8
-	.STRINGZ	"Division would overflow (%d)\n"
-	.END

+ 0 - 995
drivers/builtin_openssl2/crypto/bn/asm/parisc-mont.pl

@@ -1,995 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# On PA-7100LC this module performs ~90-50% better, less for longer
-# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
-# that compiler utilized xmpyu instruction to perform 32x32=64-bit
-# multiplication, which in turn means that "baseline" performance was
-# optimal in respect to instruction set capabilities. Fair comparison
-# with vendor compiler is problematic, because OpenSSL doesn't define
-# BN_LLONG [presumably] for historical reasons, which drives compiler
-# toward 4 times 16x16=32-bit multiplicatons [plus complementary
-# shifts and additions] instead. This means that you should observe
-# several times improvement over code generated by vendor compiler
-# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
-# improvement coefficient was never collected on PA-7100LC, or any
-# other 1.1 CPU, because I don't have access to such machine with
-# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
-# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
-# of ~5x on PA-8600.
-#
-# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
-# reportedly ~2x faster than vendor compiler generated code [according
-# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
-# this implementation is actually 32-bit one, in the sense that it
-# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
-# 64-bit BN_LONGs... How do they interoperate then? No problem. This
-# module picks halves of 64-bit values in reverse order and pretends
-# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
-# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
-# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
-# i.e. there is no "wider" multiplication like on most other 64-bit
-# platforms. This means that even being effectively 32-bit, this
-# implementation performs "64-bit" computational task in same amount
-# of arithmetic operations, most notably multiplications. It requires
-# more memory references, most notably to tp[num], but this doesn't
-# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
-# 2.0 code path provides virtually same performance as pa-risc2[W].s:
-# it's ~10% better for shortest key length and ~10% worse for longest
-# one.
-#
-# In case it wasn't clear. The module has two distinct code paths:
-# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
-# additions and 64-bit integer loads, not to mention specific
-# instruction scheduling. In 64-bit build naturally only 2.0 code path
-# is assembled. In 32-bit application context both code paths are
-# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
-# is taken automatically. Also, in 32-bit build the module imposes
-# couple of limitations: vector lengths has to be even and vector
-# addresses has to be 64-bit aligned. Normally neither is a problem:
-# most common key lengths are even and vectors are commonly malloc-ed,
-# which ensures alignment.
-#
-# Special thanks to polarhome.com for providing HP-UX account on
-# PA-RISC 1.1 machine, and to correspondent who chose to remain
-# anonymous for testing the code on PA-RISC 2.0 machine.
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-
-$flavour = shift;
-$output = shift;
-
-open STDOUT,">$output";
-
-if ($flavour =~ /64/) {
-	$LEVEL		="2.0W";
-	$SIZE_T		=8;
-	$FRAME_MARKER	=80;
-	$SAVED_RP	=16;
-	$PUSH		="std";
-	$PUSHMA		="std,ma";
-	$POP		="ldd";
-	$POPMB		="ldd,mb";
-	$BN_SZ		=$SIZE_T;
-} else {
-	$LEVEL		="1.1";	#$LEVEL.="\n\t.ALLOW\t2.0";
-	$SIZE_T		=4;
-	$FRAME_MARKER	=48;
-	$SAVED_RP	=20;
-	$PUSH		="stw";
-	$PUSHMA		="stwm";
-	$POP		="ldw";
-	$POPMB		="ldwm";
-	$BN_SZ		=$SIZE_T;
-	if (open CONF,"<${dir}../../opensslconf.h") {
-	    while(<CONF>) {
-		if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
-		    $BN_SZ=8;
-		    $LEVEL="2.0";
-		    last;
-		}
-	    }
-	    close CONF;
-	}
-}
-
-$FRAME=8*$SIZE_T+$FRAME_MARKER;	# 8 saved regs + frame marker
-				#                [+ argument transfer]
-$LOCALS=$FRAME-$FRAME_MARKER;
-$FRAME+=32;			# local variables
-
-$tp="%r31";
-$ti1="%r29";
-$ti0="%r28";
-
-$rp="%r26";
-$ap="%r25";
-$bp="%r24";
-$np="%r23";
-$n0="%r22";	# passed through stack in 32-bit
-$num="%r21";	# passed through stack in 32-bit
-$idx="%r20";
-$arrsz="%r19";
-
-$nm1="%r7";
-$nm0="%r6";
-$ab1="%r5";
-$ab0="%r4";
-
-$fp="%r3";
-$hi1="%r2";
-$hi0="%r1";
-
-$xfer=$n0;	# accomodates [-16..15] offset in fld[dw]s
-
-$fm0="%fr4";	$fti=$fm0;
-$fbi="%fr5L";
-$fn0="%fr5R";
-$fai="%fr6";	$fab0="%fr7";	$fab1="%fr8";
-$fni="%fr9";	$fnm0="%fr10";	$fnm1="%fr11";
-
-$code=<<___;
-	.LEVEL	$LEVEL
-	.SPACE	\$TEXT\$
-	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
-
-	.EXPORT	bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
-	.ALIGN	64
-bn_mul_mont
-	.PROC
-	.CALLINFO	FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
-	.ENTRY
-	$PUSH	%r2,-$SAVED_RP(%sp)		; standard prologue
-	$PUSHMA	%r3,$FRAME(%sp)
-	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
-	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
-	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
-	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
-	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
-	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
-	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
-	ldo	-$FRAME(%sp),$fp
-___
-$code.=<<___ if ($SIZE_T==4);
-	ldw	`-$FRAME_MARKER-4`($fp),$n0
-	ldw	`-$FRAME_MARKER-8`($fp),$num
-	nop
-	nop					; alignment
-___
-$code.=<<___ if ($BN_SZ==4);
-	comiclr,<=	6,$num,%r0		; are vectors long enough?
-	b		L\$abort
-	ldi		0,%r28			; signal "unhandled"
-	add,ev		%r0,$num,$num		; is $num even?
-	b		L\$abort
-	nop
-	or		$ap,$np,$ti1
-	extru,=		$ti1,31,3,%r0		; are ap and np 64-bit aligned?
-	b		L\$abort
-	nop
-	nop					; alignment
-	nop
-
-	fldws		0($n0),${fn0}
-	fldws,ma	4($bp),${fbi}		; bp[0]
-___
-$code.=<<___ if ($BN_SZ==8);
-	comib,>		3,$num,L\$abort		; are vectors long enough?
-	ldi		0,%r28			; signal "unhandled"
-	addl		$num,$num,$num		; I operate on 32-bit values
-
-	fldws		4($n0),${fn0}		; only low part of n0
-	fldws		4($bp),${fbi}		; bp[0] in flipped word order
-___
-$code.=<<___;
-	fldds		0($ap),${fai}		; ap[0,1]
-	fldds		0($np),${fni}		; np[0,1]
-
-	sh2addl		$num,%r0,$arrsz
-	ldi		31,$hi0
-	ldo		36($arrsz),$hi1		; space for tp[num+1]
-	andcm		$hi1,$hi0,$hi1		; align
-	addl		$hi1,%sp,%sp
-	$PUSH		$fp,-$SIZE_T(%sp)
-
-	ldo		`$LOCALS+16`($fp),$xfer
-	ldo		`$LOCALS+32+4`($fp),$tp
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[0]*bp[0]
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[1]*bp[0]
-	xmpyu		${fn0},${fab0}R,${fm0}
-
-	addl		$arrsz,$ap,$ap		; point at the end
-	addl		$arrsz,$np,$np
-	subi		0,$arrsz,$idx		; j=0
-	ldo		8($idx),$idx		; j++++
-
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[0]*m
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[1]*m
-	fstds		${fab0},-16($xfer)
-	fstds		${fnm0},-8($xfer)
-	fstds		${fab1},0($xfer)
-	fstds		${fnm1},8($xfer)
-	 flddx		$idx($ap),${fai}	; ap[2,3]
-	 flddx		$idx($np),${fni}	; np[2,3]
-___
-$code.=<<___ if ($BN_SZ==4);
-	mtctl		$hi0,%cr11		; $hi0 still holds 31
-	extrd,u,*=	$hi0,%sar,1,$hi0	; executes on PA-RISC 1.0
-	b		L\$parisc11
-	nop
-___
-$code.=<<___;					# PA-RISC 2.0 code-path
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldd		-16($xfer),$ab0
-	fstds		${fab0},-16($xfer)
-
-	extrd,u		$ab0,31,32,$hi0
-	extrd,u		$ab0,63,32,$ab0
-	ldd		-8($xfer),$nm0
-	fstds		${fnm0},-8($xfer)
-	 ldo		8($idx),$idx		; j++++
-	 addl		$ab0,$nm0,$nm0		; low part is discarded
-	 extrd,u	$nm0,31,32,$hi1
-
-L\$1st
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[0]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
-	ldd		0($xfer),$ab1
-	fstds		${fab1},0($xfer)
-	 addl		$hi0,$ab1,$ab1
-	 extrd,u	$ab1,31,32,$hi0
-	ldd		8($xfer),$nm1
-	fstds		${fnm1},8($xfer)
-	 extrd,u	$ab1,63,32,$ab1
-	 addl		$hi1,$nm1,$nm1
-	flddx		$idx($ap),${fai}	; ap[j,j+1]
-	flddx		$idx($np),${fni}	; np[j,j+1]
-	 addl		$ab1,$nm1,$nm1
-	 extrd,u	$nm1,31,32,$hi1
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldd		-16($xfer),$ab0
-	fstds		${fab0},-16($xfer)
-	 addl		$hi0,$ab0,$ab0
-	 extrd,u	$ab0,31,32,$hi0
-	ldd		-8($xfer),$nm0
-	fstds		${fnm0},-8($xfer)
-	 extrd,u	$ab0,63,32,$ab0
-	 addl		$hi1,$nm0,$nm0
-	stw		$nm1,-4($tp)		; tp[j-1]
-	 addl		$ab0,$nm0,$nm0
-	 stw,ma		$nm0,8($tp)		; tp[j-1]
-	addib,<>	8,$idx,L\$1st		; j++++
-	 extrd,u	$nm0,31,32,$hi1
-
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[0]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
-	ldd		0($xfer),$ab1
-	fstds		${fab1},0($xfer)
-	 addl		$hi0,$ab1,$ab1
-	 extrd,u	$ab1,31,32,$hi0
-	ldd		8($xfer),$nm1
-	fstds		${fnm1},8($xfer)
-	 extrd,u	$ab1,63,32,$ab1
-	 addl		$hi1,$nm1,$nm1
-	ldd		-16($xfer),$ab0
-	 addl		$ab1,$nm1,$nm1
-	ldd		-8($xfer),$nm0
-	 extrd,u	$nm1,31,32,$hi1
-
-	 addl		$hi0,$ab0,$ab0
-	 extrd,u	$ab0,31,32,$hi0
-	stw		$nm1,-4($tp)		; tp[j-1]
-	 extrd,u	$ab0,63,32,$ab0
-	 addl		$hi1,$nm0,$nm0
-	ldd		0($xfer),$ab1
-	 addl		$ab0,$nm0,$nm0
-	ldd,mb		8($xfer),$nm1
-	 extrd,u	$nm0,31,32,$hi1
-	stw,ma		$nm0,8($tp)		; tp[j-1]
-
-	ldo		-1($num),$num		; i--
-	subi		0,$arrsz,$idx		; j=0
-___
-$code.=<<___ if ($BN_SZ==4);
-	fldws,ma	4($bp),${fbi}		; bp[1]
-___
-$code.=<<___ if ($BN_SZ==8);
-	fldws		0($bp),${fbi}		; bp[1] in flipped word order
-___
-$code.=<<___;
-	 flddx		$idx($ap),${fai}	; ap[0,1]
-	 flddx		$idx($np),${fni}	; np[0,1]
-	 fldws		8($xfer),${fti}R	; tp[0]
-	addl		$hi0,$ab1,$ab1
-	 extrd,u	$ab1,31,32,$hi0
-	 extrd,u	$ab1,63,32,$ab1
-	 ldo		8($idx),$idx		; j++++
-	 xmpyu		${fai}L,${fbi},${fab0}	; ap[0]*bp[1]
-	 xmpyu		${fai}R,${fbi},${fab1}	; ap[1]*bp[1]
-	addl		$hi1,$nm1,$nm1
-	addl		$ab1,$nm1,$nm1
-	extrd,u		$nm1,31,32,$hi1
-	 fstws,mb	${fab0}L,-8($xfer)	; save high part
-	stw		$nm1,-4($tp)		; tp[j-1]
-
-	 fcpy,sgl	%fr0,${fti}L		; zero high part
-	 fcpy,sgl	%fr0,${fab0}L
-	addl		$hi1,$hi0,$hi0
-	extrd,u		$hi0,31,32,$hi1
-	 fcnvxf,dbl,dbl	${fti},${fti}		; 32-bit unsigned int -> double
-	 fcnvxf,dbl,dbl	${fab0},${fab0}
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-
-	fadd,dbl	${fti},${fab0},${fab0}	; add tp[0]
-	fcnvfx,dbl,dbl	${fab0},${fab0}		; double -> 33-bit unsigned int
-	xmpyu		${fn0},${fab0}R,${fm0}
-	ldo		`$LOCALS+32+4`($fp),$tp
-L\$outer
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[0]*m
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[1]*m
-	fstds		${fab0},-16($xfer)	; 33-bit value
-	fstds		${fnm0},-8($xfer)
-	 flddx		$idx($ap),${fai}	; ap[2]
-	 flddx		$idx($np),${fni}	; np[2]
-	 ldo		8($idx),$idx		; j++++
-	ldd		-16($xfer),$ab0		; 33-bit value
-	ldd		-8($xfer),$nm0
-	ldw		0($xfer),$hi0		; high part
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	 extrd,u	$ab0,31,32,$ti0		; carry bit
-	 extrd,u	$ab0,63,32,$ab0
-	fstds		${fab1},0($xfer)
-	 addl		$ti0,$hi0,$hi0		; account carry bit
-	fstds		${fnm1},8($xfer)
-	 addl		$ab0,$nm0,$nm0		; low part is discarded
-	ldw		0($tp),$ti1		; tp[1]
-	 extrd,u	$nm0,31,32,$hi1
-	fstds		${fab0},-16($xfer)
-	fstds		${fnm0},-8($xfer)
-
-L\$inner
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[i]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
-	ldd		0($xfer),$ab1
-	fstds		${fab1},0($xfer)
-	 addl		$hi0,$ti1,$ti1
-	 addl		$ti1,$ab1,$ab1
-	ldd		8($xfer),$nm1
-	fstds		${fnm1},8($xfer)
-	 extrd,u	$ab1,31,32,$hi0
-	 extrd,u	$ab1,63,32,$ab1
-	flddx		$idx($ap),${fai}	; ap[j,j+1]
-	flddx		$idx($np),${fni}	; np[j,j+1]
-	 addl		$hi1,$nm1,$nm1
-	 addl		$ab1,$nm1,$nm1
-	ldw		4($tp),$ti0		; tp[j]
-	stw		$nm1,-4($tp)		; tp[j-1]
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldd		-16($xfer),$ab0
-	fstds		${fab0},-16($xfer)
-	 addl		$hi0,$ti0,$ti0
-	 addl		$ti0,$ab0,$ab0
-	ldd		-8($xfer),$nm0
-	fstds		${fnm0},-8($xfer)
-	 extrd,u	$ab0,31,32,$hi0
-	 extrd,u	$nm1,31,32,$hi1
-	ldw		8($tp),$ti1		; tp[j]
-	 extrd,u	$ab0,63,32,$ab0
-	 addl		$hi1,$nm0,$nm0
-	 addl		$ab0,$nm0,$nm0
-	 stw,ma		$nm0,8($tp)		; tp[j-1]
-	addib,<>	8,$idx,L\$inner		; j++++
-	 extrd,u	$nm0,31,32,$hi1
-
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[i]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
-	ldd		0($xfer),$ab1
-	fstds		${fab1},0($xfer)
-	 addl		$hi0,$ti1,$ti1
-	 addl		$ti1,$ab1,$ab1
-	ldd		8($xfer),$nm1
-	fstds		${fnm1},8($xfer)
-	 extrd,u	$ab1,31,32,$hi0
-	 extrd,u	$ab1,63,32,$ab1
-	ldw		4($tp),$ti0		; tp[j]
-	 addl		$hi1,$nm1,$nm1
-	 addl		$ab1,$nm1,$nm1
-	ldd		-16($xfer),$ab0
-	ldd		-8($xfer),$nm0
-	 extrd,u	$nm1,31,32,$hi1
-
-	addl		$hi0,$ab0,$ab0
-	 addl		$ti0,$ab0,$ab0
-	 stw		$nm1,-4($tp)		; tp[j-1]
-	 extrd,u	$ab0,31,32,$hi0
-	ldw		8($tp),$ti1		; tp[j]
-	 extrd,u	$ab0,63,32,$ab0
-	 addl		$hi1,$nm0,$nm0
-	ldd		0($xfer),$ab1
-	 addl		$ab0,$nm0,$nm0
-	ldd,mb		8($xfer),$nm1
-	 extrd,u	$nm0,31,32,$hi1
-	 stw,ma		$nm0,8($tp)		; tp[j-1]
-
-	addib,=		-1,$num,L\$outerdone	; i--
-	subi		0,$arrsz,$idx		; j=0
-___
-$code.=<<___ if ($BN_SZ==4);
-	fldws,ma	4($bp),${fbi}		; bp[i]
-___
-$code.=<<___ if ($BN_SZ==8);
-	ldi		12,$ti0			; bp[i] in flipped word order
-	addl,ev		%r0,$num,$num
-	ldi		-4,$ti0
-	addl		$ti0,$bp,$bp
-	fldws		0($bp),${fbi}
-___
-$code.=<<___;
-	 flddx		$idx($ap),${fai}	; ap[0]
-	addl		$hi0,$ab1,$ab1
-	 flddx		$idx($np),${fni}	; np[0]
-	 fldws		8($xfer),${fti}R	; tp[0]
-	addl		$ti1,$ab1,$ab1
-	extrd,u		$ab1,31,32,$hi0
-	extrd,u		$ab1,63,32,$ab1
-
-	 ldo		8($idx),$idx		; j++++
-	 xmpyu		${fai}L,${fbi},${fab0}	; ap[0]*bp[i]
-	 xmpyu		${fai}R,${fbi},${fab1}	; ap[1]*bp[i]
-	ldw		4($tp),$ti0		; tp[j]
-
-	addl		$hi1,$nm1,$nm1
-	 fstws,mb	${fab0}L,-8($xfer)	; save high part
-	addl		$ab1,$nm1,$nm1
-	extrd,u		$nm1,31,32,$hi1
-	 fcpy,sgl	%fr0,${fti}L		; zero high part
-	 fcpy,sgl	%fr0,${fab0}L
-	stw		$nm1,-4($tp)		; tp[j-1]
-
-	 fcnvxf,dbl,dbl	${fti},${fti}		; 32-bit unsigned int -> double
-	 fcnvxf,dbl,dbl	${fab0},${fab0}
-	addl		$hi1,$hi0,$hi0
-	 fadd,dbl	${fti},${fab0},${fab0}	; add tp[0]
-	addl		$ti0,$hi0,$hi0
-	extrd,u		$hi0,31,32,$hi1
-	 fcnvfx,dbl,dbl	${fab0},${fab0}		; double -> 33-bit unsigned int
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-	 xmpyu		${fn0},${fab0}R,${fm0}
-
-	b		L\$outer
-	ldo		`$LOCALS+32+4`($fp),$tp
-
-L\$outerdone
-	addl		$hi0,$ab1,$ab1
-	addl		$ti1,$ab1,$ab1
-	extrd,u		$ab1,31,32,$hi0
-	extrd,u		$ab1,63,32,$ab1
-
-	ldw		4($tp),$ti0		; tp[j]
-
-	addl		$hi1,$nm1,$nm1
-	addl		$ab1,$nm1,$nm1
-	extrd,u		$nm1,31,32,$hi1
-	stw		$nm1,-4($tp)		; tp[j-1]
-
-	addl		$hi1,$hi0,$hi0
-	addl		$ti0,$hi0,$hi0
-	extrd,u		$hi0,31,32,$hi1
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-
-	ldo		`$LOCALS+32`($fp),$tp
-	sub		%r0,%r0,%r0		; clear borrow
-___
-$code.=<<___ if ($BN_SZ==4);
-	ldws,ma		4($tp),$ti0
-	extru,=		$rp,31,3,%r0		; is rp 64-bit aligned?
-	b		L\$sub_pa11
-	addl		$tp,$arrsz,$tp
-L\$sub
-	ldwx		$idx($np),$hi0
-	subb		$ti0,$hi0,$hi1
-	ldwx		$idx($tp),$ti0
-	addib,<>	4,$idx,L\$sub
-	stws,ma		$hi1,4($rp)
-
-	subb		$ti0,%r0,$hi1
-	ldo		-4($tp),$tp
-___
-$code.=<<___ if ($BN_SZ==8);
-	ldd,ma		8($tp),$ti0
-L\$sub
-	ldd		$idx($np),$hi0
-	shrpd		$ti0,$ti0,32,$ti0	; flip word order
-	std		$ti0,-8($tp)		; save flipped value
-	sub,db		$ti0,$hi0,$hi1
-	ldd,ma		8($tp),$ti0
-	addib,<>	8,$idx,L\$sub
-	std,ma		$hi1,8($rp)
-
-	extrd,u		$ti0,31,32,$ti0		; carry in flipped word order
-	sub,db		$ti0,%r0,$hi1
-	ldo		-8($tp),$tp
-___
-$code.=<<___;
-	and		$tp,$hi1,$ap
-	andcm		$rp,$hi1,$bp
-	or		$ap,$bp,$np
-
-	sub		$rp,$arrsz,$rp		; rewind rp
-	subi		0,$arrsz,$idx
-	ldo		`$LOCALS+32`($fp),$tp
-L\$copy
-	ldd		$idx($np),$hi0
-	std,ma		%r0,8($tp)
-	addib,<>	8,$idx,.-8		; L\$copy
-	std,ma		$hi0,8($rp)	
-___
-
-if ($BN_SZ==4) {				# PA-RISC 1.1 code-path
-$ablo=$ab0;
-$abhi=$ab1;
-$nmlo0=$nm0;
-$nmhi0=$nm1;
-$nmlo1="%r9";
-$nmhi1="%r8";
-
-$code.=<<___;
-	b		L\$done
-	nop
-
-	.ALIGN		8
-L\$parisc11
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldw		-12($xfer),$ablo
-	ldw		-16($xfer),$hi0
-	ldw		-4($xfer),$nmlo0
-	ldw		-8($xfer),$nmhi0
-	fstds		${fab0},-16($xfer)
-	fstds		${fnm0},-8($xfer)
-
-	 ldo		8($idx),$idx		; j++++
-	 add		$ablo,$nmlo0,$nmlo0	; discarded
-	 addc		%r0,$nmhi0,$hi1
-	ldw		4($xfer),$ablo
-	ldw		0($xfer),$abhi
-	nop
-
-L\$1st_pa11
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[0]
-	flddx		$idx($ap),${fai}	; ap[j,j+1]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
-	flddx		$idx($np),${fni}	; np[j,j+1]
-	 add		$hi0,$ablo,$ablo
-	ldw		12($xfer),$nmlo1
-	 addc		%r0,$abhi,$hi0
-	ldw		8($xfer),$nmhi1
-	 add		$ablo,$nmlo1,$nmlo1
-	fstds		${fab1},0($xfer)
-	 addc		%r0,$nmhi1,$nmhi1
-	fstds		${fnm1},8($xfer)
-	 add		$hi1,$nmlo1,$nmlo1
-	ldw		-12($xfer),$ablo
-	 addc		%r0,$nmhi1,$hi1
-	ldw		-16($xfer),$abhi
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[0]
-	ldw		-4($xfer),$nmlo0
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldw		-8($xfer),$nmhi0
-	 add		$hi0,$ablo,$ablo
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-	 addc		%r0,$abhi,$hi0
-	fstds		${fab0},-16($xfer)
-	 add		$ablo,$nmlo0,$nmlo0
-	fstds		${fnm0},-8($xfer)
-	 addc		%r0,$nmhi0,$nmhi0
-	ldw		0($xfer),$abhi
-	 add		$hi1,$nmlo0,$nmlo0
-	ldw		4($xfer),$ablo
-	 stws,ma	$nmlo0,8($tp)		; tp[j-1]
-	addib,<>	8,$idx,L\$1st_pa11	; j++++
-	 addc		%r0,$nmhi0,$hi1
-
-	 ldw		8($xfer),$nmhi1
-	 ldw		12($xfer),$nmlo1
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[0]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
-	 add		$hi0,$ablo,$ablo
-	fstds		${fab1},0($xfer)
-	 addc		%r0,$abhi,$hi0
-	fstds		${fnm1},8($xfer)
-	 add		$ablo,$nmlo1,$nmlo1
-	ldw		-16($xfer),$abhi
-	 addc		%r0,$nmhi1,$nmhi1
-	ldw		-12($xfer),$ablo
-	 add		$hi1,$nmlo1,$nmlo1
-	ldw		-8($xfer),$nmhi0
-	 addc		%r0,$nmhi1,$hi1
-	ldw		-4($xfer),$nmlo0
-
-	 add		$hi0,$ablo,$ablo
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-	 addc		%r0,$abhi,$hi0
-	ldw		0($xfer),$abhi
-	 add		$ablo,$nmlo0,$nmlo0
-	ldw		4($xfer),$ablo
-	 addc		%r0,$nmhi0,$nmhi0
-	ldws,mb		8($xfer),$nmhi1
-	 add		$hi1,$nmlo0,$nmlo0
-	ldw		4($xfer),$nmlo1
-	 addc		%r0,$nmhi0,$hi1
-	stws,ma		$nmlo0,8($tp)		; tp[j-1]
-
-	ldo		-1($num),$num		; i--
-	subi		0,$arrsz,$idx		; j=0
-
-	 fldws,ma	4($bp),${fbi}		; bp[1]
-	 flddx		$idx($ap),${fai}	; ap[0,1]
-	 flddx		$idx($np),${fni}	; np[0,1]
-	 fldws		8($xfer),${fti}R	; tp[0]
-	add		$hi0,$ablo,$ablo
-	addc		%r0,$abhi,$hi0
-	 ldo		8($idx),$idx		; j++++
-	 xmpyu		${fai}L,${fbi},${fab0}	; ap[0]*bp[1]
-	 xmpyu		${fai}R,${fbi},${fab1}	; ap[1]*bp[1]
-	add		$hi1,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$nmhi1
-	add		$ablo,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$hi1
-	 fstws,mb	${fab0}L,-8($xfer)	; save high part
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-
-	 fcpy,sgl	%fr0,${fti}L		; zero high part
-	 fcpy,sgl	%fr0,${fab0}L
-	add		$hi1,$hi0,$hi0
-	addc		%r0,%r0,$hi1
-	 fcnvxf,dbl,dbl	${fti},${fti}		; 32-bit unsigned int -> double
-	 fcnvxf,dbl,dbl	${fab0},${fab0}
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-
-	fadd,dbl	${fti},${fab0},${fab0}	; add tp[0]
-	fcnvfx,dbl,dbl	${fab0},${fab0}		; double -> 33-bit unsigned int
-	xmpyu		${fn0},${fab0}R,${fm0}
-	ldo		`$LOCALS+32+4`($fp),$tp
-L\$outer_pa11
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[0]*m
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[1]*m
-	fstds		${fab0},-16($xfer)	; 33-bit value
-	fstds		${fnm0},-8($xfer)
-	 flddx		$idx($ap),${fai}	; ap[2,3]
-	 flddx		$idx($np),${fni}	; np[2,3]
-	ldw		-16($xfer),$abhi	; carry bit actually
-	 ldo		8($idx),$idx		; j++++
-	ldw		-12($xfer),$ablo
-	ldw		-8($xfer),$nmhi0
-	ldw		-4($xfer),$nmlo0
-	ldw		0($xfer),$hi0		; high part
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	fstds		${fab1},0($xfer)
-	 addl		$abhi,$hi0,$hi0		; account carry bit
-	fstds		${fnm1},8($xfer)
-	 add		$ablo,$nmlo0,$nmlo0	; discarded
-	ldw		0($tp),$ti1		; tp[1]
-	 addc		%r0,$nmhi0,$hi1
-	fstds		${fab0},-16($xfer)
-	fstds		${fnm0},-8($xfer)
-	ldw		4($xfer),$ablo
-	ldw		0($xfer),$abhi
-
-L\$inner_pa11
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j+1]*bp[i]
-	flddx		$idx($ap),${fai}	; ap[j,j+1]
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j+1]*m
-	flddx		$idx($np),${fni}	; np[j,j+1]
-	 add		$hi0,$ablo,$ablo
-	ldw		4($tp),$ti0		; tp[j]
-	 addc		%r0,$abhi,$abhi
-	ldw		12($xfer),$nmlo1
-	 add		$ti1,$ablo,$ablo
-	ldw		8($xfer),$nmhi1
-	 addc		%r0,$abhi,$hi0
-	fstds		${fab1},0($xfer)
-	 add		$ablo,$nmlo1,$nmlo1
-	fstds		${fnm1},8($xfer)
-	 addc		%r0,$nmhi1,$nmhi1
-	ldw		-12($xfer),$ablo
-	 add		$hi1,$nmlo1,$nmlo1
-	ldw		-16($xfer),$abhi
-	 addc		%r0,$nmhi1,$hi1
-
-	xmpyu		${fai}L,${fbi},${fab0}	; ap[j]*bp[i]
-	ldw		8($tp),$ti1		; tp[j]
-	xmpyu		${fni}L,${fm0}R,${fnm0}	; np[j]*m
-	ldw		-4($xfer),$nmlo0
-	 add		$hi0,$ablo,$ablo
-	ldw		-8($xfer),$nmhi0
-	 addc		%r0,$abhi,$abhi
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-	 add		$ti0,$ablo,$ablo
-	fstds		${fab0},-16($xfer)
-	 addc		%r0,$abhi,$hi0
-	fstds		${fnm0},-8($xfer)
-	 add		$ablo,$nmlo0,$nmlo0
-	ldw		4($xfer),$ablo
-	 addc		%r0,$nmhi0,$nmhi0
-	ldw		0($xfer),$abhi
-	 add		$hi1,$nmlo0,$nmlo0
-	 stws,ma	$nmlo0,8($tp)		; tp[j-1]
-	addib,<>	8,$idx,L\$inner_pa11	; j++++
-	 addc		%r0,$nmhi0,$hi1
-
-	xmpyu		${fai}R,${fbi},${fab1}	; ap[j]*bp[i]
-	ldw		12($xfer),$nmlo1
-	xmpyu		${fni}R,${fm0}R,${fnm1}	; np[j]*m
-	ldw		8($xfer),$nmhi1
-	 add		$hi0,$ablo,$ablo
-	ldw		4($tp),$ti0		; tp[j]
-	 addc		%r0,$abhi,$abhi
-	fstds		${fab1},0($xfer)
-	 add		$ti1,$ablo,$ablo
-	fstds		${fnm1},8($xfer)
-	 addc		%r0,$abhi,$hi0
-	ldw		-16($xfer),$abhi
-	 add		$ablo,$nmlo1,$nmlo1
-	ldw		-12($xfer),$ablo
-	 addc		%r0,$nmhi1,$nmhi1
-	ldw		-8($xfer),$nmhi0
-	 add		$hi1,$nmlo1,$nmlo1
-	ldw		-4($xfer),$nmlo0
-	 addc		%r0,$nmhi1,$hi1
-
-	add		$hi0,$ablo,$ablo
-	 stw		$nmlo1,-4($tp)		; tp[j-1]
-	addc		%r0,$abhi,$abhi
-	 add		$ti0,$ablo,$ablo
-	ldw		8($tp),$ti1		; tp[j]
-	 addc		%r0,$abhi,$hi0
-	ldw		0($xfer),$abhi
-	 add		$ablo,$nmlo0,$nmlo0
-	ldw		4($xfer),$ablo
-	 addc		%r0,$nmhi0,$nmhi0
-	ldws,mb		8($xfer),$nmhi1
-	 add		$hi1,$nmlo0,$nmlo0
-	ldw		4($xfer),$nmlo1
-	 addc		%r0,$nmhi0,$hi1
-	 stws,ma	$nmlo0,8($tp)		; tp[j-1]
-
-	addib,=		-1,$num,L\$outerdone_pa11; i--
-	subi		0,$arrsz,$idx		; j=0
-
-	 fldws,ma	4($bp),${fbi}		; bp[i]
-	 flddx		$idx($ap),${fai}	; ap[0]
-	add		$hi0,$ablo,$ablo
-	addc		%r0,$abhi,$abhi
-	 flddx		$idx($np),${fni}	; np[0]
-	 fldws		8($xfer),${fti}R	; tp[0]
-	add		$ti1,$ablo,$ablo
-	addc		%r0,$abhi,$hi0
-
-	 ldo		8($idx),$idx		; j++++
-	 xmpyu		${fai}L,${fbi},${fab0}	; ap[0]*bp[i]
-	 xmpyu		${fai}R,${fbi},${fab1}	; ap[1]*bp[i]
-	ldw		4($tp),$ti0		; tp[j]
-
-	add		$hi1,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$nmhi1
-	 fstws,mb	${fab0}L,-8($xfer)	; save high part
-	add		$ablo,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$hi1
-	 fcpy,sgl	%fr0,${fti}L		; zero high part
-	 fcpy,sgl	%fr0,${fab0}L
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-
-	 fcnvxf,dbl,dbl	${fti},${fti}		; 32-bit unsigned int -> double
-	 fcnvxf,dbl,dbl	${fab0},${fab0}
-	add		$hi1,$hi0,$hi0
-	addc		%r0,%r0,$hi1
-	 fadd,dbl	${fti},${fab0},${fab0}	; add tp[0]
-	add		$ti0,$hi0,$hi0
-	addc		%r0,$hi1,$hi1
-	 fcnvfx,dbl,dbl	${fab0},${fab0}		; double -> 33-bit unsigned int
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-	 xmpyu		${fn0},${fab0}R,${fm0}
-
-	b		L\$outer_pa11
-	ldo		`$LOCALS+32+4`($fp),$tp
-
-L\$outerdone_pa11
-	add		$hi0,$ablo,$ablo
-	addc		%r0,$abhi,$abhi
-	add		$ti1,$ablo,$ablo
-	addc		%r0,$abhi,$hi0
-
-	ldw		4($tp),$ti0		; tp[j]
-
-	add		$hi1,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$nmhi1
-	add		$ablo,$nmlo1,$nmlo1
-	addc		%r0,$nmhi1,$hi1
-	stw		$nmlo1,-4($tp)		; tp[j-1]
-
-	add		$hi1,$hi0,$hi0
-	addc		%r0,%r0,$hi1
-	add		$ti0,$hi0,$hi0
-	addc		%r0,$hi1,$hi1
-	stw		$hi0,0($tp)
-	stw		$hi1,4($tp)
-
-	ldo		`$LOCALS+32+4`($fp),$tp
-	sub		%r0,%r0,%r0		; clear borrow
-	ldw		-4($tp),$ti0
-	addl		$tp,$arrsz,$tp
-L\$sub_pa11
-	ldwx		$idx($np),$hi0
-	subb		$ti0,$hi0,$hi1
-	ldwx		$idx($tp),$ti0
-	addib,<>	4,$idx,L\$sub_pa11
-	stws,ma		$hi1,4($rp)
-
-	subb		$ti0,%r0,$hi1
-	ldo		-4($tp),$tp
-	and		$tp,$hi1,$ap
-	andcm		$rp,$hi1,$bp
-	or		$ap,$bp,$np
-
-	sub		$rp,$arrsz,$rp		; rewind rp
-	subi		0,$arrsz,$idx
-	ldo		`$LOCALS+32`($fp),$tp
-L\$copy_pa11
-	ldwx		$idx($np),$hi0
-	stws,ma		%r0,4($tp)
-	addib,<>	4,$idx,L\$copy_pa11
-	stws,ma		$hi0,4($rp)	
-
-	nop					; alignment
-L\$done
-___
-}
-
-$code.=<<___;
-	ldi		1,%r28			; signal "handled"
-	ldo		$FRAME($fp),%sp		; destroy tp[num+1]
-
-	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
-	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
-	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
-	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
-	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
-	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
-	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
-	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
-L\$abort
-	bv	(%r2)
-	.EXIT
-	$POPMB	-$FRAME(%sp),%r3
-	.PROCEND
-	.STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-# Explicitly encode PA-RISC 2.0 instructions used in this module, so
-# that it can be compiled with .LEVEL 1.0. It should be noted that I
-# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
-# directive...
-
-my $ldd = sub {
-  my ($mod,$args) = @_;
-  my $orig = "ldd$mod\t$args";
-
-    if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/)		# format 4
-    {	my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/)	# format 5
-    {	my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
-	$opcode|=(($1&0xF)<<17)|(($1&0x10)<<12);		# encode offset
-	$opcode|=(1<<5)  if ($mod =~ /^,m/);
-	$opcode|=(1<<13) if ($mod =~ /^,mb/);
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    else { "\t".$orig; }
-};
-
-my $std = sub {
-  my ($mod,$args) = @_;
-  my $orig = "std$mod\t$args";
-
-    if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/)	# format 6
-    {	my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
-	$opcode|=(($2&0xF)<<1)|(($2&0x10)>>4);			# encode offset
-	$opcode|=(1<<5)  if ($mod =~ /^,m/);
-	$opcode|=(1<<13) if ($mod =~ /^,mb/);
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    else { "\t".$orig; }
-};
-
-my $extrd = sub {
-  my ($mod,$args) = @_;
-  my $orig = "extrd$mod\t$args";
-
-    # I only have ",u" completer, it's implicitly encoded...
-    if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/)	# format 15
-    {	my $opcode=(0x36<<26)|($1<<21)|($4<<16);
-	my $len=32-$3;
-	$opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5);		# encode pos
-	$opcode |= (($len&0x20)<<7)|($len&0x1f);		# encode len
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/)	# format 12
-    {	my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
-	my $len=32-$2;
-	$opcode |= (($len&0x20)<<3)|($len&0x1f);		# encode len
-	$opcode |= (1<<13) if ($mod =~ /,\**=/);
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    else { "\t".$orig; }
-};
-
-my $shrpd = sub {
-  my ($mod,$args) = @_;
-  my $orig = "shrpd$mod\t$args";
-
-    if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/)	# format 14
-    {	my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
-	my $cpos=63-$3;
-	$opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5);		# encode sa
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
-    }
-    else { "\t".$orig; }
-};
-
-my $sub = sub {
-  my ($mod,$args) = @_;
-  my $orig = "sub$mod\t$args";
-
-    if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
-	my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
-	$opcode|=(1<<10);	# e1
-	$opcode|=(1<<8);	# e2
-	$opcode|=(1<<5);	# d
-	sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
-    }
-    else { "\t".$orig; }
-};
-
-sub assemble {
-  my ($mnemonic,$mod,$args)=@_;
-  my $opcode = eval("\$$mnemonic");
-
-    ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
-}
-
-foreach (split("\n",$code)) {
-	s/\`([^\`]*)\`/eval $1/ge;
-	# flip word order in 64-bit mode...
-	s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
-	# assemble 2.0 instructions in 32-bit mode...
-	s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
-
-	s/\bbv\b/bve/gm	if ($SIZE_T==8);
-
-	print $_,"\n";
-}
-close STDOUT;

+ 0 - 334
drivers/builtin_openssl2/crypto/bn/asm/ppc-mont.pl

@@ -1,334 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# April 2006
-
-# "Teaser" Montgomery multiplication module for PowerPC. It's possible
-# to gain a bit more by modulo-scheduling outer loop, then dedicated
-# squaring procedure should give further 20% and code can be adapted
-# for 32-bit application running on 64-bit CPU. As for the latter.
-# It won't be able to achieve "native" 64-bit performance, because in
-# 32-bit application context every addc instruction will have to be
-# expanded as addc, twice right shift by 32 and finally adde, etc.
-# So far RSA *sign* performance improvement over pre-bn_mul_mont asm
-# for 64-bit application running on PPC970/G5 is:
-#
-# 512-bit	+65%	
-# 1024-bit	+35%
-# 2048-bit	+18%
-# 4096-bit	+4%
-
-$flavour = shift;
-
-if ($flavour =~ /32/) {
-	$BITS=	32;
-	$BNSZ=	$BITS/8;
-	$SIZE_T=4;
-	$RZONE=	224;
-
-	$LD=	"lwz";		# load
-	$LDU=	"lwzu";		# load and update
-	$LDX=	"lwzx";		# load indexed
-	$ST=	"stw";		# store
-	$STU=	"stwu";		# store and update
-	$STX=	"stwx";		# store indexed
-	$STUX=	"stwux";	# store indexed and update
-	$UMULL=	"mullw";	# unsigned multiply low
-	$UMULH=	"mulhwu";	# unsigned multiply high
-	$UCMP=	"cmplw";	# unsigned compare
-	$SHRI=	"srwi";		# unsigned shift right by immediate	
-	$PUSH=	$ST;
-	$POP=	$LD;
-} elsif ($flavour =~ /64/) {
-	$BITS=	64;
-	$BNSZ=	$BITS/8;
-	$SIZE_T=8;
-	$RZONE=	288;
-
-	# same as above, but 64-bit mnemonics...
-	$LD=	"ld";		# load
-	$LDU=	"ldu";		# load and update
-	$LDX=	"ldx";		# load indexed
-	$ST=	"std";		# store
-	$STU=	"stdu";		# store and update
-	$STX=	"stdx";		# store indexed
-	$STUX=	"stdux";	# store indexed and update
-	$UMULL=	"mulld";	# unsigned multiply low
-	$UMULH=	"mulhdu";	# unsigned multiply high
-	$UCMP=	"cmpld";	# unsigned compare
-	$SHRI=	"srdi";		# unsigned shift right by immediate	
-	$PUSH=	$ST;
-	$POP=	$LD;
-} else { die "nonsense $flavour"; }
-
-$FRAME=8*$SIZE_T+$RZONE;
-$LOCALS=8*$SIZE_T;
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$sp="r1";
-$toc="r2";
-$rp="r3";	$ovf="r3";
-$ap="r4";
-$bp="r5";
-$np="r6";
-$n0="r7";
-$num="r8";
-$rp="r9";	# $rp is reassigned
-$aj="r10";
-$nj="r11";
-$tj="r12";
-# non-volatile registers
-$i="r20";
-$j="r21";
-$tp="r22";
-$m0="r23";
-$m1="r24";
-$lo0="r25";
-$hi0="r26";
-$lo1="r27";
-$hi1="r28";
-$alo="r29";
-$ahi="r30";
-$nlo="r31";
-#
-$nhi="r0";
-
-$code=<<___;
-.machine "any"
-.text
-
-.globl	.bn_mul_mont_int
-.align	4
-.bn_mul_mont_int:
-	cmpwi	$num,4
-	mr	$rp,r3		; $rp is reassigned
-	li	r3,0
-	bltlr
-___
-$code.=<<___ if ($BNSZ==4);
-	cmpwi	$num,32		; longer key performance is not better
-	bgelr
-___
-$code.=<<___;
-	slwi	$num,$num,`log($BNSZ)/log(2)`
-	li	$tj,-4096
-	addi	$ovf,$num,$FRAME
-	subf	$ovf,$ovf,$sp	; $sp-$ovf
-	and	$ovf,$ovf,$tj	; minimize TLB usage
-	subf	$ovf,$sp,$ovf	; $ovf-$sp
-	mr	$tj,$sp
-	srwi	$num,$num,`log($BNSZ)/log(2)`
-	$STUX	$sp,$sp,$ovf
-
-	$PUSH	r20,`-12*$SIZE_T`($tj)
-	$PUSH	r21,`-11*$SIZE_T`($tj)
-	$PUSH	r22,`-10*$SIZE_T`($tj)
-	$PUSH	r23,`-9*$SIZE_T`($tj)
-	$PUSH	r24,`-8*$SIZE_T`($tj)
-	$PUSH	r25,`-7*$SIZE_T`($tj)
-	$PUSH	r26,`-6*$SIZE_T`($tj)
-	$PUSH	r27,`-5*$SIZE_T`($tj)
-	$PUSH	r28,`-4*$SIZE_T`($tj)
-	$PUSH	r29,`-3*$SIZE_T`($tj)
-	$PUSH	r30,`-2*$SIZE_T`($tj)
-	$PUSH	r31,`-1*$SIZE_T`($tj)
-
-	$LD	$n0,0($n0)	; pull n0[0] value
-	addi	$num,$num,-2	; adjust $num for counter register
-
-	$LD	$m0,0($bp)	; m0=bp[0]
-	$LD	$aj,0($ap)	; ap[0]
-	addi	$tp,$sp,$LOCALS
-	$UMULL	$lo0,$aj,$m0	; ap[0]*bp[0]
-	$UMULH	$hi0,$aj,$m0
-
-	$LD	$aj,$BNSZ($ap)	; ap[1]
-	$LD	$nj,0($np)	; np[0]
-
-	$UMULL	$m1,$lo0,$n0	; "tp[0]"*n0
-
-	$UMULL	$alo,$aj,$m0	; ap[1]*bp[0]
-	$UMULH	$ahi,$aj,$m0
-
-	$UMULL	$lo1,$nj,$m1	; np[0]*m1
-	$UMULH	$hi1,$nj,$m1
-	$LD	$nj,$BNSZ($np)	; np[1]
-	addc	$lo1,$lo1,$lo0
-	addze	$hi1,$hi1
-
-	$UMULL	$nlo,$nj,$m1	; np[1]*m1
-	$UMULH	$nhi,$nj,$m1
-
-	mtctr	$num
-	li	$j,`2*$BNSZ`
-.align	4
-L1st:
-	$LDX	$aj,$ap,$j	; ap[j]
-	addc	$lo0,$alo,$hi0
-	$LDX	$nj,$np,$j	; np[j]
-	addze	$hi0,$ahi
-	$UMULL	$alo,$aj,$m0	; ap[j]*bp[0]
-	addc	$lo1,$nlo,$hi1
-	$UMULH	$ahi,$aj,$m0
-	addze	$hi1,$nhi
-	$UMULL	$nlo,$nj,$m1	; np[j]*m1
-	addc	$lo1,$lo1,$lo0	; np[j]*m1+ap[j]*bp[0]
-	$UMULH	$nhi,$nj,$m1
-	addze	$hi1,$hi1
-	$ST	$lo1,0($tp)	; tp[j-1]
-
-	addi	$j,$j,$BNSZ	; j++
-	addi	$tp,$tp,$BNSZ	; tp++
-	bdnz-	L1st
-;L1st
-	addc	$lo0,$alo,$hi0
-	addze	$hi0,$ahi
-
-	addc	$lo1,$nlo,$hi1
-	addze	$hi1,$nhi
-	addc	$lo1,$lo1,$lo0	; np[j]*m1+ap[j]*bp[0]
-	addze	$hi1,$hi1
-	$ST	$lo1,0($tp)	; tp[j-1]
-
-	li	$ovf,0
-	addc	$hi1,$hi1,$hi0
-	addze	$ovf,$ovf	; upmost overflow bit
-	$ST	$hi1,$BNSZ($tp)
-
-	li	$i,$BNSZ
-.align	4
-Louter:
-	$LDX	$m0,$bp,$i	; m0=bp[i]
-	$LD	$aj,0($ap)	; ap[0]
-	addi	$tp,$sp,$LOCALS
-	$LD	$tj,$LOCALS($sp); tp[0]
-	$UMULL	$lo0,$aj,$m0	; ap[0]*bp[i]
-	$UMULH	$hi0,$aj,$m0
-	$LD	$aj,$BNSZ($ap)	; ap[1]
-	$LD	$nj,0($np)	; np[0]
-	addc	$lo0,$lo0,$tj	; ap[0]*bp[i]+tp[0]
-	$UMULL	$alo,$aj,$m0	; ap[j]*bp[i]
-	addze	$hi0,$hi0
-	$UMULL	$m1,$lo0,$n0	; tp[0]*n0
-	$UMULH	$ahi,$aj,$m0
-	$UMULL	$lo1,$nj,$m1	; np[0]*m1
-	$UMULH	$hi1,$nj,$m1
-	$LD	$nj,$BNSZ($np)	; np[1]
-	addc	$lo1,$lo1,$lo0
-	$UMULL	$nlo,$nj,$m1	; np[1]*m1
-	addze	$hi1,$hi1
-	$UMULH	$nhi,$nj,$m1
-
-	mtctr	$num
-	li	$j,`2*$BNSZ`
-.align	4
-Linner:
-	$LDX	$aj,$ap,$j	; ap[j]
-	addc	$lo0,$alo,$hi0
-	$LD	$tj,$BNSZ($tp)	; tp[j]
-	addze	$hi0,$ahi
-	$LDX	$nj,$np,$j	; np[j]
-	addc	$lo1,$nlo,$hi1
-	$UMULL	$alo,$aj,$m0	; ap[j]*bp[i]
-	addze	$hi1,$nhi
-	$UMULH	$ahi,$aj,$m0
-	addc	$lo0,$lo0,$tj	; ap[j]*bp[i]+tp[j]
-	$UMULL	$nlo,$nj,$m1	; np[j]*m1
-	addze	$hi0,$hi0
-	$UMULH	$nhi,$nj,$m1
-	addc	$lo1,$lo1,$lo0	; np[j]*m1+ap[j]*bp[i]+tp[j]
-	addi	$j,$j,$BNSZ	; j++
-	addze	$hi1,$hi1
-	$ST	$lo1,0($tp)	; tp[j-1]
-	addi	$tp,$tp,$BNSZ	; tp++
-	bdnz-	Linner
-;Linner
-	$LD	$tj,$BNSZ($tp)	; tp[j]
-	addc	$lo0,$alo,$hi0
-	addze	$hi0,$ahi
-	addc	$lo0,$lo0,$tj	; ap[j]*bp[i]+tp[j]
-	addze	$hi0,$hi0
-
-	addc	$lo1,$nlo,$hi1
-	addze	$hi1,$nhi
-	addc	$lo1,$lo1,$lo0	; np[j]*m1+ap[j]*bp[i]+tp[j]
-	addze	$hi1,$hi1
-	$ST	$lo1,0($tp)	; tp[j-1]
-
-	addic	$ovf,$ovf,-1	; move upmost overflow to XER[CA]
-	li	$ovf,0
-	adde	$hi1,$hi1,$hi0
-	addze	$ovf,$ovf
-	$ST	$hi1,$BNSZ($tp)
-;
-	slwi	$tj,$num,`log($BNSZ)/log(2)`
-	$UCMP	$i,$tj
-	addi	$i,$i,$BNSZ
-	ble-	Louter
-
-	addi	$num,$num,2	; restore $num
-	subfc	$j,$j,$j	; j=0 and "clear" XER[CA]
-	addi	$tp,$sp,$LOCALS
-	mtctr	$num
-
-.align	4
-Lsub:	$LDX	$tj,$tp,$j
-	$LDX	$nj,$np,$j
-	subfe	$aj,$nj,$tj	; tp[j]-np[j]
-	$STX	$aj,$rp,$j
-	addi	$j,$j,$BNSZ
-	bdnz-	Lsub
-
-	li	$j,0
-	mtctr	$num
-	subfe	$ovf,$j,$ovf	; handle upmost overflow bit
-	and	$ap,$tp,$ovf
-	andc	$np,$rp,$ovf
-	or	$ap,$ap,$np	; ap=borrow?tp:rp
-
-.align	4
-Lcopy:				; copy or in-place refresh
-	$LDX	$tj,$ap,$j
-	$STX	$tj,$rp,$j
-	$STX	$j,$tp,$j	; zap at once
-	addi	$j,$j,$BNSZ
-	bdnz-	Lcopy
-
-	$POP	$tj,0($sp)
-	li	r3,1
-	$POP	r20,`-12*$SIZE_T`($tj)
-	$POP	r21,`-11*$SIZE_T`($tj)
-	$POP	r22,`-10*$SIZE_T`($tj)
-	$POP	r23,`-9*$SIZE_T`($tj)
-	$POP	r24,`-8*$SIZE_T`($tj)
-	$POP	r25,`-7*$SIZE_T`($tj)
-	$POP	r26,`-6*$SIZE_T`($tj)
-	$POP	r27,`-5*$SIZE_T`($tj)
-	$POP	r28,`-4*$SIZE_T`($tj)
-	$POP	r29,`-3*$SIZE_T`($tj)
-	$POP	r30,`-2*$SIZE_T`($tj)
-	$POP	r31,`-1*$SIZE_T`($tj)
-	mr	$sp,$tj
-	blr
-	.long	0
-	.byte	0,12,4,0,0x80,12,6,0
-	.long	0
-
-.asciz  "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;

+ 0 - 1998
drivers/builtin_openssl2/crypto/bn/asm/ppc.pl

@@ -1,1998 +0,0 @@
-#!/usr/bin/env perl
-#
-# Implemented as a Perl wrapper as we want to support several different
-# architectures with single file. We pick up the target based on the
-# file name we are asked to generate.
-#
-# It should be noted though that this perl code is nothing like
-# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
-# as pre-processor to cover for platform differences in name decoration,
-# linker tables, 32-/64-bit instruction sets...
-#
-# As you might know there're several PowerPC ABI in use. Most notably
-# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
-# are similar enough to implement leaf(!) functions, which would be ABI
-# neutral. And that's what you find here: ABI neutral leaf functions.
-# In case you wonder what that is...
-#
-#       AIX performance
-#
-#	MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
-#
-#	The following is the performance of 32-bit compiler
-#	generated code:
-#
-#	OpenSSL 0.9.6c 21 dec 2001
-#	built on: Tue Jun 11 11:06:51 EDT 2002
-#	options:bn(64,32) ...
-#compiler: cc -DTHREADS  -DAIX -DB_ENDIAN -DBN_LLONG -O3
-#                  sign    verify    sign/s verify/s
-#rsa  512 bits   0.0098s   0.0009s    102.0   1170.6
-#rsa 1024 bits   0.0507s   0.0026s     19.7    387.5
-#rsa 2048 bits   0.3036s   0.0085s      3.3    117.1
-#rsa 4096 bits   2.0040s   0.0299s      0.5     33.4
-#dsa  512 bits   0.0087s   0.0106s    114.3     94.5
-#dsa 1024 bits   0.0256s   0.0313s     39.0     32.0	
-#
-#	Same bechmark with this assembler code:
-#
-#rsa  512 bits   0.0056s   0.0005s    178.6   2049.2
-#rsa 1024 bits   0.0283s   0.0015s     35.3    674.1
-#rsa 2048 bits   0.1744s   0.0050s      5.7    201.2
-#rsa 4096 bits   1.1644s   0.0179s      0.9     55.7
-#dsa  512 bits   0.0052s   0.0062s    191.6    162.0
-#dsa 1024 bits   0.0149s   0.0180s     67.0     55.5
-#
-#	Number of operations increases by at almost 75%
-#
-#	Here are performance numbers for 64-bit compiler
-#	generated code:
-#
-#	OpenSSL 0.9.6g [engine] 9 Aug 2002
-#	built on: Fri Apr 18 16:59:20 EDT 2003
-#	options:bn(64,64) ...
-#	compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
-#                  sign    verify    sign/s verify/s
-#rsa  512 bits   0.0028s   0.0003s    357.1   3844.4
-#rsa 1024 bits   0.0148s   0.0008s     67.5   1239.7
-#rsa 2048 bits   0.0963s   0.0028s     10.4    353.0
-#rsa 4096 bits   0.6538s   0.0102s      1.5     98.1
-#dsa  512 bits   0.0026s   0.0032s    382.5    313.7
-#dsa 1024 bits   0.0081s   0.0099s    122.8    100.6
-#
-#	Same benchmark with this assembler code:
-#
-#rsa  512 bits   0.0020s   0.0002s    510.4   6273.7
-#rsa 1024 bits   0.0088s   0.0005s    114.1   2128.3
-#rsa 2048 bits   0.0540s   0.0016s     18.5    622.5
-#rsa 4096 bits   0.3700s   0.0058s      2.7    171.0
-#dsa  512 bits   0.0016s   0.0020s    610.7    507.1
-#dsa 1024 bits   0.0047s   0.0058s    212.5    173.2
-#	
-#	Again, performance increases by at about 75%
-#
-#       Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
-#       OpenSSL 0.9.7c 30 Sep 2003
-#
-#       Original code.
-#
-#rsa  512 bits   0.0011s   0.0001s    906.1  11012.5
-#rsa 1024 bits   0.0060s   0.0003s    166.6   3363.1
-#rsa 2048 bits   0.0370s   0.0010s     27.1    982.4
-#rsa 4096 bits   0.2426s   0.0036s      4.1    280.4
-#dsa  512 bits   0.0010s   0.0012s   1038.1    841.5
-#dsa 1024 bits   0.0030s   0.0037s    329.6    269.7
-#dsa 2048 bits   0.0101s   0.0127s     98.9     78.6
-#
-#       Same benchmark with this assembler code:
-#
-#rsa  512 bits   0.0007s   0.0001s   1416.2  16645.9
-#rsa 1024 bits   0.0036s   0.0002s    274.4   5380.6
-#rsa 2048 bits   0.0222s   0.0006s     45.1   1589.5
-#rsa 4096 bits   0.1469s   0.0022s      6.8    449.6
-#dsa  512 bits   0.0006s   0.0007s   1664.2   1376.2
-#dsa 1024 bits   0.0018s   0.0023s    545.0    442.2
-#dsa 2048 bits   0.0061s   0.0075s    163.5    132.8
-#
-#        Performance increase of ~60%
-#
-#	If you have comments or suggestions to improve code send
-#	me a note at [email protected]
-#
-
-$flavour = shift;
-
-if ($flavour =~ /32/) {
-	$BITS=	32;
-	$BNSZ=	$BITS/8;
-	$ISA=	"\"ppc\"";
-
-	$LD=	"lwz";		# load
-	$LDU=	"lwzu";		# load and update
-	$ST=	"stw";		# store
-	$STU=	"stwu";		# store and update
-	$UMULL=	"mullw";	# unsigned multiply low
-	$UMULH=	"mulhwu";	# unsigned multiply high
-	$UDIV=	"divwu";	# unsigned divide
-	$UCMPI=	"cmplwi";	# unsigned compare with immediate
-	$UCMP=	"cmplw";	# unsigned compare
-	$CNTLZ=	"cntlzw";	# count leading zeros
-	$SHL=	"slw";		# shift left
-	$SHR=	"srw";		# unsigned shift right
-	$SHRI=	"srwi";		# unsigned shift right by immediate	
-	$SHLI=	"slwi";		# shift left by immediate
-	$CLRU=	"clrlwi";	# clear upper bits
-	$INSR=	"insrwi";	# insert right
-	$ROTL=	"rotlwi";	# rotate left by immediate
-	$TR=	"tw";		# conditional trap
-} elsif ($flavour =~ /64/) {
-	$BITS=	64;
-	$BNSZ=	$BITS/8;
-	$ISA=	"\"ppc64\"";
-
-	# same as above, but 64-bit mnemonics...
-	$LD=	"ld";		# load
-	$LDU=	"ldu";		# load and update
-	$ST=	"std";		# store
-	$STU=	"stdu";		# store and update
-	$UMULL=	"mulld";	# unsigned multiply low
-	$UMULH=	"mulhdu";	# unsigned multiply high
-	$UDIV=	"divdu";	# unsigned divide
-	$UCMPI=	"cmpldi";	# unsigned compare with immediate
-	$UCMP=	"cmpld";	# unsigned compare
-	$CNTLZ=	"cntlzd";	# count leading zeros
-	$SHL=	"sld";		# shift left
-	$SHR=	"srd";		# unsigned shift right
-	$SHRI=	"srdi";		# unsigned shift right by immediate	
-	$SHLI=	"sldi";		# shift left by immediate
-	$CLRU=	"clrldi";	# clear upper bits
-	$INSR=	"insrdi";	# insert right 
-	$ROTL=	"rotldi";	# rotate left by immediate
-	$TR=	"td";		# conditional trap
-} else { die "nonsense $flavour"; }
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$data=<<EOF;
-#--------------------------------------------------------------------
-#
-#
-#
-#
-#	File:		ppc32.s
-#
-#	Created by:	Suresh Chari
-#			IBM Thomas J. Watson Research Library
-#			Hawthorne, NY
-#
-#
-#	Description:	Optimized assembly routines for OpenSSL crypto
-#			on the 32 bitPowerPC platform.
-#
-#
-#	Version History
-#
-#	2. Fixed bn_add,bn_sub and bn_div_words, added comments,
-#	   cleaned up code. Also made a single version which can
-#	   be used for both the AIX and Linux compilers. See NOTE
-#	   below.
-#				12/05/03		Suresh Chari
-#			(with lots of help from)        Andy Polyakov
-##	
-#	1. Initial version	10/20/02		Suresh Chari
-#
-#
-#	The following file works for the xlc,cc
-#	and gcc compilers.
-#
-#	NOTE:	To get the file to link correctly with the gcc compiler
-#	        you have to change the names of the routines and remove
-#		the first .(dot) character. This should automatically
-#		be done in the build process.
-#
-#	Hand optimized assembly code for the following routines
-#	
-#	bn_sqr_comba4
-#	bn_sqr_comba8
-#	bn_mul_comba4
-#	bn_mul_comba8
-#	bn_sub_words
-#	bn_add_words
-#	bn_div_words
-#	bn_sqr_words
-#	bn_mul_words
-#	bn_mul_add_words
-#
-#	NOTE:	It is possible to optimize this code more for
-#	specific PowerPC or Power architectures. On the Northstar
-#	architecture the optimizations in this file do
-#	 NOT provide much improvement.
-#
-#	If you have comments or suggestions to improve code send
-#	me a note at schari\@us.ibm.com
-#
-#--------------------------------------------------------------------------
-#
-#	Defines to be used in the assembly code.
-#	
-#.set r0,0	# we use it as storage for value of 0
-#.set SP,1	# preserved
-#.set RTOC,2	# preserved 
-#.set r3,3	# 1st argument/return value
-#.set r4,4	# 2nd argument/volatile register
-#.set r5,5	# 3rd argument/volatile register
-#.set r6,6	# ...
-#.set r7,7
-#.set r8,8
-#.set r9,9
-#.set r10,10
-#.set r11,11
-#.set r12,12
-#.set r13,13	# not used, nor any other "below" it...
-
-#	Declare function names to be global
-#	NOTE:	For gcc these names MUST be changed to remove
-#	        the first . i.e. for example change ".bn_sqr_comba4"
-#		to "bn_sqr_comba4". This should be automatically done
-#		in the build.
-	
-	.globl	.bn_sqr_comba4
-	.globl	.bn_sqr_comba8
-	.globl	.bn_mul_comba4
-	.globl	.bn_mul_comba8
-	.globl	.bn_sub_words
-	.globl	.bn_add_words
-	.globl	.bn_div_words
-	.globl	.bn_sqr_words
-	.globl	.bn_mul_words
-	.globl	.bn_mul_add_words
-	
-# .text section
-	
-	.machine	"any"
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_sqr_comba4" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4
-.bn_sqr_comba4:
-#
-# Optimized version of bn_sqr_comba4.
-#
-# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
-# r3 contains r
-# r4 contains a
-#
-# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:	
-# 
-# r5,r6 are the two BN_ULONGs being multiplied.
-# r7,r8 are the results of the 32x32 giving 64 bit multiply.
-# r9,r10, r11 are the equivalents of c1,c2, c3.
-# Here's the assembly
-#
-#
-	xor		r0,r0,r0		# set r0 = 0. Used in the addze
-						# instructions below
-	
-						#sqr_add_c(a,0,c1,c2,c3)
-	$LD		r5,`0*$BNSZ`(r4)		
-	$UMULL		r9,r5,r5		
-	$UMULH		r10,r5,r5		#in first iteration. No need
-						#to add since c1=c2=c3=0.
-						# Note c3(r11) is NOT set to 0
-						# but will be.
-
-	$ST		r9,`0*$BNSZ`(r3)	# r[0]=c1;
-						# sqr_add_c2(a,1,0,c2,c3,c1);
-	$LD		r6,`1*$BNSZ`(r4)		
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-					
-	addc		r7,r7,r7		# compute (r7,r8)=2*(r7,r8)
-	adde		r8,r8,r8
-	addze		r9,r0			# catch carry if any.
-						# r9= r0(=0) and carry 
-	
-	addc		r10,r7,r10		# now add to temp result.
-	addze		r11,r8                  # r8 added to r11 which is 0 
-	addze		r9,r9
-	
-	$ST		r10,`1*$BNSZ`(r3)	#r[1]=c2; 
-						#sqr_add_c(a,1,c3,c1,c2)
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r0
-						#sqr_add_c2(a,2,0,c3,c1,c2)
-	$LD		r6,`2*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r7,r7,r7
-	adde		r8,r8,r8
-	addze		r10,r10
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	$ST		r11,`2*$BNSZ`(r3)	#r[2]=c3 
-						#sqr_add_c2(a,3,0,c1,c2,c3);
-	$LD		r6,`3*$BNSZ`(r4)		
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r7,r7,r7
-	adde		r8,r8,r8
-	addze		r11,r0
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,2,1,c1,c2,c3);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`2*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r7,r7,r7
-	adde		r8,r8,r8
-	addze		r11,r11
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	$ST		r9,`3*$BNSZ`(r3)	#r[3]=c1
-						#sqr_add_c(a,2,c2,c3,c1);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r0
-						#sqr_add_c2(a,3,1,c2,c3,c1);
-	$LD		r6,`3*$BNSZ`(r4)		
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r7,r7,r7
-	adde		r8,r8,r8
-	addze		r9,r9
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	$ST		r10,`4*$BNSZ`(r3)	#r[4]=c2
-						#sqr_add_c2(a,3,2,c3,c1,c2);
-	$LD		r5,`2*$BNSZ`(r4)		
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r7,r7,r7
-	adde		r8,r8,r8
-	addze		r10,r0
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	$ST		r11,`5*$BNSZ`(r3)	#r[5] = c3
-						#sqr_add_c(a,3,c1,c2,c3);
-	$UMULL		r7,r6,r6		
-	$UMULH		r8,r6,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-
-	$ST		r9,`6*$BNSZ`(r3)	#r[6]=c1
-	$ST		r10,`7*$BNSZ`(r3)	#r[7]=c2
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,2,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_sqr_comba8" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-	
-.align	4
-.bn_sqr_comba8:
-#
-# This is an optimized version of the bn_sqr_comba8 routine.
-# Tightly uses the adde instruction
-#
-#
-# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
-# r3 contains r
-# r4 contains a
-#
-# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:	
-# 
-# r5,r6 are the two BN_ULONGs being multiplied.
-# r7,r8 are the results of the 32x32 giving 64 bit multiply.
-# r9,r10, r11 are the equivalents of c1,c2, c3.
-#
-# Possible optimization of loading all 8 longs of a into registers
-# doesnt provide any speedup
-# 
-
-	xor		r0,r0,r0		#set r0 = 0.Used in addze
-						#instructions below.
-
-						#sqr_add_c(a,0,c1,c2,c3);
-	$LD		r5,`0*$BNSZ`(r4)
-	$UMULL		r9,r5,r5		#1st iteration:	no carries.
-	$UMULH		r10,r5,r5
-	$ST		r9,`0*$BNSZ`(r3)	# r[0]=c1;
-						#sqr_add_c2(a,1,0,c2,c3,c1);
-	$LD		r6,`1*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6	
-	
-	addc		r10,r7,r10		#add the two register number
-	adde		r11,r8,r0 		# (r8,r7) to the three register
-	addze		r9,r0			# number (r9,r11,r10).NOTE:r0=0
-	
-	addc		r10,r7,r10		#add the two register number
-	adde		r11,r8,r11 		# (r8,r7) to the three register
-	addze		r9,r9			# number (r9,r11,r10).
-	
-	$ST		r10,`1*$BNSZ`(r3)	# r[1]=c2
-				
-						#sqr_add_c(a,1,c3,c1,c2);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r0
-						#sqr_add_c2(a,2,0,c3,c1,c2);
-	$LD		r6,`2*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	
-	$ST		r11,`2*$BNSZ`(r3)	#r[2]=c3
-						#sqr_add_c2(a,3,0,c1,c2,c3);
-	$LD		r6,`3*$BNSZ`(r4)	#r6 = a[3]. r5 is already a[0].
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r0
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,2,1,c1,c2,c3);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`2*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	
-	$ST		r9,`3*$BNSZ`(r3)	#r[3]=c1;
-						#sqr_add_c(a,2,c2,c3,c1);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r0
-						#sqr_add_c2(a,3,1,c2,c3,c1);
-	$LD		r6,`3*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-						#sqr_add_c2(a,4,0,c2,c3,c1);
-	$LD		r5,`0*$BNSZ`(r4)
-	$LD		r6,`4*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	$ST		r10,`4*$BNSZ`(r3)	#r[4]=c2;
-						#sqr_add_c2(a,5,0,c3,c1,c2);
-	$LD		r6,`5*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r0
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-						#sqr_add_c2(a,4,1,c3,c1,c2);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`4*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-						#sqr_add_c2(a,3,2,c3,c1,c2);
-	$LD		r5,`2*$BNSZ`(r4)
-	$LD		r6,`3*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	$ST		r11,`5*$BNSZ`(r3)	#r[5]=c3;
-						#sqr_add_c(a,3,c1,c2,c3);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r0
-						#sqr_add_c2(a,4,2,c1,c2,c3);
-	$LD		r6,`4*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,5,1,c1,c2,c3);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`5*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,6,0,c1,c2,c3);
-	$LD		r5,`0*$BNSZ`(r4)
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	$ST		r9,`6*$BNSZ`(r3)	#r[6]=c1;
-						#sqr_add_c2(a,7,0,c2,c3,c1);
-	$LD		r6,`7*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r0
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-						#sqr_add_c2(a,6,1,c2,c3,c1);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-						#sqr_add_c2(a,5,2,c2,c3,c1);
-	$LD		r5,`2*$BNSZ`(r4)
-	$LD		r6,`5*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-						#sqr_add_c2(a,4,3,c2,c3,c1);
-	$LD		r5,`3*$BNSZ`(r4)
-	$LD		r6,`4*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	$ST		r10,`7*$BNSZ`(r3)	#r[7]=c2;
-						#sqr_add_c(a,4,c3,c1,c2);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r0
-						#sqr_add_c2(a,5,3,c3,c1,c2);
-	$LD		r6,`5*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-						#sqr_add_c2(a,6,2,c3,c1,c2);
-	$LD		r5,`2*$BNSZ`(r4)
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-						#sqr_add_c2(a,7,1,c3,c1,c2);
-	$LD		r5,`1*$BNSZ`(r4)
-	$LD		r6,`7*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	$ST		r11,`8*$BNSZ`(r3)	#r[8]=c3;
-						#sqr_add_c2(a,7,2,c1,c2,c3);
-	$LD		r5,`2*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r0
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,6,3,c1,c2,c3);
-	$LD		r5,`3*$BNSZ`(r4)
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-						#sqr_add_c2(a,5,4,c1,c2,c3);
-	$LD		r5,`4*$BNSZ`(r4)
-	$LD		r6,`5*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	$ST		r9,`9*$BNSZ`(r3)	#r[9]=c1;
-						#sqr_add_c(a,5,c2,c3,c1);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r0
-						#sqr_add_c2(a,6,4,c2,c3,c1);
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-						#sqr_add_c2(a,7,3,c2,c3,c1);
-	$LD		r5,`3*$BNSZ`(r4)
-	$LD		r6,`7*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	$ST		r10,`10*$BNSZ`(r3)	#r[10]=c2;
-						#sqr_add_c2(a,7,4,c3,c1,c2);
-	$LD		r5,`4*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r0
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-						#sqr_add_c2(a,6,5,c3,c1,c2);
-	$LD		r5,`5*$BNSZ`(r4)
-	$LD		r6,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	addze		r10,r10
-	$ST		r11,`11*$BNSZ`(r3)	#r[11]=c3;
-						#sqr_add_c(a,6,c1,c2,c3);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r0
-						#sqr_add_c2(a,7,5,c1,c2,c3)
-	$LD		r6,`7*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	addc		r9,r7,r9
-	adde		r10,r8,r10
-	addze		r11,r11
-	$ST		r9,`12*$BNSZ`(r3)	#r[12]=c1;
-	
-						#sqr_add_c2(a,7,6,c2,c3,c1)
-	$LD		r5,`6*$BNSZ`(r4)
-	$UMULL		r7,r5,r6
-	$UMULH		r8,r5,r6
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r0
-	addc		r10,r7,r10
-	adde		r11,r8,r11
-	addze		r9,r9
-	$ST		r10,`13*$BNSZ`(r3)	#r[13]=c2;
-						#sqr_add_c(a,7,c3,c1,c2);
-	$UMULL		r7,r6,r6
-	$UMULH		r8,r6,r6
-	addc		r11,r7,r11
-	adde		r9,r8,r9
-	$ST		r11,`14*$BNSZ`(r3)	#r[14]=c3;
-	$ST		r9, `15*$BNSZ`(r3)	#r[15]=c1;
-
-
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,2,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_mul_comba4" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4
-.bn_mul_comba4:
-#
-# This is an optimized version of the bn_mul_comba4 routine.
-#
-# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-# r3 contains r
-# r4 contains a
-# r5 contains b
-# r6, r7 are the 2 BN_ULONGs being multiplied.
-# r8, r9 are the results of the 32x32 giving 64 multiply.
-# r10, r11, r12 are the equivalents of c1, c2, and c3.
-#
-	xor	r0,r0,r0		#r0=0. Used in addze below.
-					#mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	r6,`0*$BNSZ`(r4)		
-	$LD	r7,`0*$BNSZ`(r5)		
-	$UMULL	r10,r6,r7		
-	$UMULH	r11,r6,r7		
-	$ST	r10,`0*$BNSZ`(r3)	#r[0]=c1
-					#mul_add_c(a[0],b[1],c2,c3,c1);
-	$LD	r7,`1*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r8,r11
-	adde	r12,r9,r0
-	addze	r10,r0
-					#mul_add_c(a[1],b[0],c2,c3,c1);
-	$LD	r6, `1*$BNSZ`(r4)		
-	$LD	r7, `0*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r8,r11
-	adde	r12,r9,r12
-	addze	r10,r10
-	$ST	r11,`1*$BNSZ`(r3)	#r[1]=c2
-					#mul_add_c(a[2],b[0],c3,c1,c2);
-	$LD	r6,`2*$BNSZ`(r4)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r8,r12
-	adde	r10,r9,r10
-	addze	r11,r0
-					#mul_add_c(a[1],b[1],c3,c1,c2);
-	$LD	r6,`1*$BNSZ`(r4)		
-	$LD	r7,`1*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r8,r12
-	adde	r10,r9,r10
-	addze	r11,r11
-					#mul_add_c(a[0],b[2],c3,c1,c2);
-	$LD	r6,`0*$BNSZ`(r4)		
-	$LD	r7,`2*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r8,r12
-	adde	r10,r9,r10
-	addze	r11,r11
-	$ST	r12,`2*$BNSZ`(r3)	#r[2]=c3
-					#mul_add_c(a[0],b[3],c1,c2,c3);
-	$LD	r7,`3*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r8,r10
-	adde	r11,r9,r11
-	addze	r12,r0
-					#mul_add_c(a[1],b[2],c1,c2,c3);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r8,r10
-	adde	r11,r9,r11
-	addze	r12,r12
-					#mul_add_c(a[2],b[1],c1,c2,c3);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r8,r10
-	adde	r11,r9,r11
-	addze	r12,r12
-					#mul_add_c(a[3],b[0],c1,c2,c3);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`0*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r8,r10
-	adde	r11,r9,r11
-	addze	r12,r12
-	$ST	r10,`3*$BNSZ`(r3)	#r[3]=c1
-					#mul_add_c(a[3],b[1],c2,c3,c1);
-	$LD	r7,`1*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r8,r11
-	adde	r12,r9,r12
-	addze	r10,r0
-					#mul_add_c(a[2],b[2],c2,c3,c1);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r8,r11
-	adde	r12,r9,r12
-	addze	r10,r10
-					#mul_add_c(a[1],b[3],c2,c3,c1);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r8,r11
-	adde	r12,r9,r12
-	addze	r10,r10
-	$ST	r11,`4*$BNSZ`(r3)	#r[4]=c2
-					#mul_add_c(a[2],b[3],c3,c1,c2);
-	$LD	r6,`2*$BNSZ`(r4)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r8,r12
-	adde	r10,r9,r10
-	addze	r11,r0
-					#mul_add_c(a[3],b[2],c3,c1,c2);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r8,r12
-	adde	r10,r9,r10
-	addze	r11,r11
-	$ST	r12,`5*$BNSZ`(r3)	#r[5]=c3
-					#mul_add_c(a[3],b[3],c1,c2,c3);
-	$LD	r7,`3*$BNSZ`(r5)		
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r8,r10
-	adde	r11,r9,r11
-
-	$ST	r10,`6*$BNSZ`(r3)	#r[6]=c1
-	$ST	r11,`7*$BNSZ`(r3)	#r[7]=c2
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,3,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_mul_comba8" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-	
-.align	4
-.bn_mul_comba8:
-#
-# Optimized version of the bn_mul_comba8 routine.
-#
-# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
-# r3 contains r
-# r4 contains a
-# r5 contains b
-# r6, r7 are the 2 BN_ULONGs being multiplied.
-# r8, r9 are the results of the 32x32 giving 64 multiply.
-# r10, r11, r12 are the equivalents of c1, c2, and c3.
-#
-	xor	r0,r0,r0		#r0=0. Used in addze below.
-	
-					#mul_add_c(a[0],b[0],c1,c2,c3);
-	$LD	r6,`0*$BNSZ`(r4)	#a[0]
-	$LD	r7,`0*$BNSZ`(r5)	#b[0]
-	$UMULL	r10,r6,r7
-	$UMULH	r11,r6,r7
-	$ST	r10,`0*$BNSZ`(r3)	#r[0]=c1;
-					#mul_add_c(a[0],b[1],c2,c3,c1);
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	addze	r12,r9			# since we didnt set r12 to zero before.
-	addze	r10,r0
-					#mul_add_c(a[1],b[0],c2,c3,c1);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`0*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-	$ST	r11,`1*$BNSZ`(r3)	#r[1]=c2;
-					#mul_add_c(a[2],b[0],c3,c1,c2);
-	$LD	r6,`2*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r0
-					#mul_add_c(a[1],b[1],c3,c1,c2);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[0],b[2],c3,c1,c2);
-	$LD	r6,`0*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-	$ST	r12,`2*$BNSZ`(r3)	#r[2]=c3;
-					#mul_add_c(a[0],b[3],c1,c2,c3);
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r0
-					#mul_add_c(a[1],b[2],c1,c2,c3);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-		
-					#mul_add_c(a[2],b[1],c1,c2,c3);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[3],b[0],c1,c2,c3);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`0*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-	$ST	r10,`3*$BNSZ`(r3)	#r[3]=c1;
-					#mul_add_c(a[4],b[0],c2,c3,c1);
-	$LD	r6,`4*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r0
-					#mul_add_c(a[3],b[1],c2,c3,c1);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[2],b[2],c2,c3,c1);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[1],b[3],c2,c3,c1);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[0],b[4],c2,c3,c1);
-	$LD	r6,`0*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-	$ST	r11,`4*$BNSZ`(r3)	#r[4]=c2;
-					#mul_add_c(a[0],b[5],c3,c1,c2);
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r0
-					#mul_add_c(a[1],b[4],c3,c1,c2);
-	$LD	r6,`1*$BNSZ`(r4)		
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[2],b[3],c3,c1,c2);
-	$LD	r6,`2*$BNSZ`(r4)		
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[3],b[2],c3,c1,c2);
-	$LD	r6,`3*$BNSZ`(r4)		
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[4],b[1],c3,c1,c2);
-	$LD	r6,`4*$BNSZ`(r4)		
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[5],b[0],c3,c1,c2);
-	$LD	r6,`5*$BNSZ`(r4)		
-	$LD	r7,`0*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-	$ST	r12,`5*$BNSZ`(r3)	#r[5]=c3;
-					#mul_add_c(a[6],b[0],c1,c2,c3);
-	$LD	r6,`6*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r0
-					#mul_add_c(a[5],b[1],c1,c2,c3);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[4],b[2],c1,c2,c3);
-	$LD	r6,`4*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[3],b[3],c1,c2,c3);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[2],b[4],c1,c2,c3);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[1],b[5],c1,c2,c3);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[0],b[6],c1,c2,c3);
-	$LD	r6,`0*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-	$ST	r10,`6*$BNSZ`(r3)	#r[6]=c1;
-					#mul_add_c(a[0],b[7],c2,c3,c1);
-	$LD	r7,`7*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r0
-					#mul_add_c(a[1],b[6],c2,c3,c1);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[2],b[5],c2,c3,c1);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[3],b[4],c2,c3,c1);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[4],b[3],c2,c3,c1);
-	$LD	r6,`4*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[5],b[2],c2,c3,c1);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[6],b[1],c2,c3,c1);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[7],b[0],c2,c3,c1);
-	$LD	r6,`7*$BNSZ`(r4)
-	$LD	r7,`0*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-	$ST	r11,`7*$BNSZ`(r3)	#r[7]=c2;
-					#mul_add_c(a[7],b[1],c3,c1,c2);
-	$LD	r7,`1*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r0
-					#mul_add_c(a[6],b[2],c3,c1,c2);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[5],b[3],c3,c1,c2);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[4],b[4],c3,c1,c2);
-	$LD	r6,`4*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[3],b[5],c3,c1,c2);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[2],b[6],c3,c1,c2);
-	$LD	r6,`2*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[1],b[7],c3,c1,c2);
-	$LD	r6,`1*$BNSZ`(r4)
-	$LD	r7,`7*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-	$ST	r12,`8*$BNSZ`(r3)	#r[8]=c3;
-					#mul_add_c(a[2],b[7],c1,c2,c3);
-	$LD	r6,`2*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r0
-					#mul_add_c(a[3],b[6],c1,c2,c3);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[4],b[5],c1,c2,c3);
-	$LD	r6,`4*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[5],b[4],c1,c2,c3);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[6],b[3],c1,c2,c3);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[7],b[2],c1,c2,c3);
-	$LD	r6,`7*$BNSZ`(r4)
-	$LD	r7,`2*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-	$ST	r10,`9*$BNSZ`(r3)	#r[9]=c1;
-					#mul_add_c(a[7],b[3],c2,c3,c1);
-	$LD	r7,`3*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r0
-					#mul_add_c(a[6],b[4],c2,c3,c1);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[5],b[5],c2,c3,c1);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[4],b[6],c2,c3,c1);
-	$LD	r6,`4*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-					#mul_add_c(a[3],b[7],c2,c3,c1);
-	$LD	r6,`3*$BNSZ`(r4)
-	$LD	r7,`7*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-	$ST	r11,`10*$BNSZ`(r3)	#r[10]=c2;
-					#mul_add_c(a[4],b[7],c3,c1,c2);
-	$LD	r6,`4*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r0
-					#mul_add_c(a[5],b[6],c3,c1,c2);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[6],b[5],c3,c1,c2);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-					#mul_add_c(a[7],b[4],c3,c1,c2);
-	$LD	r6,`7*$BNSZ`(r4)
-	$LD	r7,`4*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	addze	r11,r11
-	$ST	r12,`11*$BNSZ`(r3)	#r[11]=c3;
-					#mul_add_c(a[7],b[5],c1,c2,c3);
-	$LD	r7,`5*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r0
-					#mul_add_c(a[6],b[6],c1,c2,c3);
-	$LD	r6,`6*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-					#mul_add_c(a[5],b[7],c1,c2,c3);
-	$LD	r6,`5*$BNSZ`(r4)
-	$LD	r7,`7*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r10,r10,r8
-	adde	r11,r11,r9
-	addze	r12,r12
-	$ST	r10,`12*$BNSZ`(r3)	#r[12]=c1;
-					#mul_add_c(a[6],b[7],c2,c3,c1);
-	$LD	r6,`6*$BNSZ`(r4)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r0
-					#mul_add_c(a[7],b[6],c2,c3,c1);
-	$LD	r6,`7*$BNSZ`(r4)
-	$LD	r7,`6*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r11,r11,r8
-	adde	r12,r12,r9
-	addze	r10,r10
-	$ST	r11,`13*$BNSZ`(r3)	#r[13]=c2;
-					#mul_add_c(a[7],b[7],c3,c1,c2);
-	$LD	r7,`7*$BNSZ`(r5)
-	$UMULL	r8,r6,r7
-	$UMULH	r9,r6,r7
-	addc	r12,r12,r8
-	adde	r10,r10,r9
-	$ST	r12,`14*$BNSZ`(r3)	#r[14]=c3;
-	$ST	r10,`15*$BNSZ`(r3)	#r[15]=c1;
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,3,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_sub_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-#
-.align	4
-.bn_sub_words:
-#
-#	Handcoded version of bn_sub_words
-#
-#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-#
-#	r3 = r
-#	r4 = a
-#	r5 = b
-#	r6 = n
-#
-#       Note:	No loop unrolling done since this is not a performance
-#               critical loop.
-
-	xor	r0,r0,r0	#set r0 = 0
-#
-#	check for r6 = 0 AND set carry bit.
-#
-	subfc.	r7,r0,r6        # If r6 is 0 then result is 0.
-				# if r6 > 0 then result !=0
-				# In either case carry bit is set.
-	beq	Lppcasm_sub_adios
-	addi	r4,r4,-$BNSZ
-	addi	r3,r3,-$BNSZ
-	addi	r5,r5,-$BNSZ
-	mtctr	r6
-Lppcasm_sub_mainloop:	
-	$LDU	r7,$BNSZ(r4)
-	$LDU	r8,$BNSZ(r5)
-	subfe	r6,r8,r7	# r6 = r7+carry bit + onescomplement(r8)
-				# if carry = 1 this is r7-r8. Else it
-				# is r7-r8 -1 as we need.
-	$STU	r6,$BNSZ(r3)
-	bdnz-	Lppcasm_sub_mainloop
-Lppcasm_sub_adios:	
-	subfze	r3,r0		# if carry bit is set then r3 = 0 else -1
-	andi.	r3,r3,1         # keep only last bit.
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,4,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_add_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4
-.bn_add_words:
-#
-#	Handcoded version of bn_add_words
-#
-#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
-#
-#	r3 = r
-#	r4 = a
-#	r5 = b
-#	r6 = n
-#
-#       Note:	No loop unrolling done since this is not a performance
-#               critical loop.
-
-	xor	r0,r0,r0
-#
-#	check for r6 = 0. Is this needed?
-#
-	addic.	r6,r6,0		#test r6 and clear carry bit.
-	beq	Lppcasm_add_adios
-	addi	r4,r4,-$BNSZ
-	addi	r3,r3,-$BNSZ
-	addi	r5,r5,-$BNSZ
-	mtctr	r6
-Lppcasm_add_mainloop:	
-	$LDU	r7,$BNSZ(r4)
-	$LDU	r8,$BNSZ(r5)
-	adde	r8,r7,r8
-	$STU	r8,$BNSZ(r3)
-	bdnz-	Lppcasm_add_mainloop
-Lppcasm_add_adios:	
-	addze	r3,r0			#return carry bit.
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,4,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_div_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4
-.bn_div_words:
-#
-#	This is a cleaned up version of code generated by
-#	the AIX compiler. The only optimization is to use
-#	the PPC instruction to count leading zeros instead
-#	of call to num_bits_word. Since this was compiled
-#	only at level -O2 we can possibly squeeze it more?
-#	
-#	r3 = h
-#	r4 = l
-#	r5 = d
-	
-	$UCMPI	0,r5,0			# compare r5 and 0
-	bne	Lppcasm_div1		# proceed if d!=0
-	li	r3,-1			# d=0 return -1
-	blr
-Lppcasm_div1:
-	xor	r0,r0,r0		#r0=0
-	li	r8,$BITS
-	$CNTLZ.	r7,r5			#r7 = num leading 0s in d.
-	beq	Lppcasm_div2		#proceed if no leading zeros
-	subf	r8,r7,r8		#r8 = BN_num_bits_word(d)
-	$SHR.	r9,r3,r8		#are there any bits above r8'th?
-	$TR	16,r9,r0		#if there're, signal to dump core...
-Lppcasm_div2:
-	$UCMP	0,r3,r5			#h>=d?
-	blt	Lppcasm_div3		#goto Lppcasm_div3 if not
-	subf	r3,r5,r3		#h-=d ; 
-Lppcasm_div3:				#r7 = BN_BITS2-i. so r7=i
-	cmpi	0,0,r7,0		# is (i == 0)?
-	beq	Lppcasm_div4
-	$SHL	r3,r3,r7		# h = (h<< i)
-	$SHR	r8,r4,r8		# r8 = (l >> BN_BITS2 -i)
-	$SHL	r5,r5,r7		# d<<=i
-	or	r3,r3,r8		# h = (h<<i)|(l>>(BN_BITS2-i))
-	$SHL	r4,r4,r7		# l <<=i
-Lppcasm_div4:
-	$SHRI	r9,r5,`$BITS/2`		# r9 = dh
-					# dl will be computed when needed
-					# as it saves registers.
-	li	r6,2			#r6=2
-	mtctr	r6			#counter will be in count.
-Lppcasm_divouterloop: 
-	$SHRI	r8,r3,`$BITS/2`		#r8 = (h>>BN_BITS4)
-	$SHRI	r11,r4,`$BITS/2`	#r11= (l&BN_MASK2h)>>BN_BITS4
-					# compute here for innerloop.
-	$UCMP	0,r8,r9			# is (h>>BN_BITS4)==dh
-	bne	Lppcasm_div5		# goto Lppcasm_div5 if not
-
-	li	r8,-1
-	$CLRU	r8,r8,`$BITS/2`		#q = BN_MASK2l 
-	b	Lppcasm_div6
-Lppcasm_div5:
-	$UDIV	r8,r3,r9		#q = h/dh
-Lppcasm_div6:
-	$UMULL	r12,r9,r8		#th = q*dh
-	$CLRU	r10,r5,`$BITS/2`	#r10=dl
-	$UMULL	r6,r8,r10		#tl = q*dl
-	
-Lppcasm_divinnerloop:
-	subf	r10,r12,r3		#t = h -th
-	$SHRI	r7,r10,`$BITS/2`	#r7= (t &BN_MASK2H), sort of...
-	addic.	r7,r7,0			#test if r7 == 0. used below.
-					# now want to compute
-					# r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
-					# the following 2 instructions do that
-	$SHLI	r7,r10,`$BITS/2`	# r7 = (t<<BN_BITS4)
-	or	r7,r7,r11		# r7|=((l&BN_MASK2h)>>BN_BITS4)
-	$UCMP	cr1,r6,r7		# compare (tl <= r7)
-	bne	Lppcasm_divinnerexit
-	ble	cr1,Lppcasm_divinnerexit
-	addi	r8,r8,-1		#q--
-	subf	r12,r9,r12		#th -=dh
-	$CLRU	r10,r5,`$BITS/2`	#r10=dl. t is no longer needed in loop.
-	subf	r6,r10,r6		#tl -=dl
-	b	Lppcasm_divinnerloop
-Lppcasm_divinnerexit:
-	$SHRI	r10,r6,`$BITS/2`	#t=(tl>>BN_BITS4)
-	$SHLI	r11,r6,`$BITS/2`	#tl=(tl<<BN_BITS4)&BN_MASK2h;
-	$UCMP	cr1,r4,r11		# compare l and tl
-	add	r12,r12,r10		# th+=t
-	bge	cr1,Lppcasm_div7	# if (l>=tl) goto Lppcasm_div7
-	addi	r12,r12,1		# th++
-Lppcasm_div7:
-	subf	r11,r11,r4		#r11=l-tl
-	$UCMP	cr1,r3,r12		#compare h and th
-	bge	cr1,Lppcasm_div8	#if (h>=th) goto Lppcasm_div8
-	addi	r8,r8,-1		# q--
-	add	r3,r5,r3		# h+=d
-Lppcasm_div8:
-	subf	r12,r12,r3		#r12 = h-th
-	$SHLI	r4,r11,`$BITS/2`	#l=(l&BN_MASK2l)<<BN_BITS4
-					# want to compute
-					# h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
-					# the following 2 instructions will do this.
-	$INSR	r11,r12,`$BITS/2`,`$BITS/2`	# r11 is the value we want rotated $BITS/2.
-	$ROTL	r3,r11,`$BITS/2`	# rotate by $BITS/2 and store in r3
-	bdz	Lppcasm_div9		#if (count==0) break ;
-	$SHLI	r0,r8,`$BITS/2`		#ret =q<<BN_BITS4
-	b	Lppcasm_divouterloop
-Lppcasm_div9:
-	or	r3,r8,r0
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,3,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_sqr_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-.align	4
-.bn_sqr_words:
-#
-#	Optimized version of bn_sqr_words
-#
-#	void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
-#
-#	r3 = r
-#	r4 = a
-#	r5 = n
-#
-#	r6 = a[i].
-#	r7,r8 = product.
-#
-#	No unrolling done here. Not performance critical.
-
-	addic.	r5,r5,0			#test r5.
-	beq	Lppcasm_sqr_adios
-	addi	r4,r4,-$BNSZ
-	addi	r3,r3,-$BNSZ
-	mtctr	r5
-Lppcasm_sqr_mainloop:	
-					#sqr(r[0],r[1],a[0]);
-	$LDU	r6,$BNSZ(r4)
-	$UMULL	r7,r6,r6
-	$UMULH  r8,r6,r6
-	$STU	r7,$BNSZ(r3)
-	$STU	r8,$BNSZ(r3)
-	bdnz-	Lppcasm_sqr_mainloop
-Lppcasm_sqr_adios:	
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,3,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_mul_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4	
-.bn_mul_words:
-#
-# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-#
-# r3 = rp
-# r4 = ap
-# r5 = num
-# r6 = w
-	xor	r0,r0,r0
-	xor	r12,r12,r12		# used for carry
-	rlwinm.	r7,r5,30,2,31		# num >> 2
-	beq	Lppcasm_mw_REM
-	mtctr	r7
-Lppcasm_mw_LOOP:	
-					#mul(rp[0],ap[0],w,c1);
-	$LD	r8,`0*$BNSZ`(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	addc	r9,r9,r12
-	#addze	r10,r10			#carry is NOT ignored.
-					#will be taken care of
-					#in second spin below
-					#using adde.
-	$ST	r9,`0*$BNSZ`(r3)
-					#mul(rp[1],ap[1],w,c1);
-	$LD	r8,`1*$BNSZ`(r4)	
-	$UMULL	r11,r6,r8
-	$UMULH  r12,r6,r8
-	adde	r11,r11,r10
-	#addze	r12,r12
-	$ST	r11,`1*$BNSZ`(r3)
-					#mul(rp[2],ap[2],w,c1);
-	$LD	r8,`2*$BNSZ`(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	adde	r9,r9,r12
-	#addze	r10,r10
-	$ST	r9,`2*$BNSZ`(r3)
-					#mul_add(rp[3],ap[3],w,c1);
-	$LD	r8,`3*$BNSZ`(r4)
-	$UMULL	r11,r6,r8
-	$UMULH  r12,r6,r8
-	adde	r11,r11,r10
-	addze	r12,r12			#this spin we collect carry into
-					#r12
-	$ST	r11,`3*$BNSZ`(r3)
-	
-	addi	r3,r3,`4*$BNSZ`
-	addi	r4,r4,`4*$BNSZ`
-	bdnz-	Lppcasm_mw_LOOP
-
-Lppcasm_mw_REM:
-	andi.	r5,r5,0x3
-	beq	Lppcasm_mw_OVER
-					#mul(rp[0],ap[0],w,c1);
-	$LD	r8,`0*$BNSZ`(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	addc	r9,r9,r12
-	addze	r10,r10
-	$ST	r9,`0*$BNSZ`(r3)
-	addi	r12,r10,0
-	
-	addi	r5,r5,-1
-	cmpli	0,0,r5,0
-	beq	Lppcasm_mw_OVER
-
-	
-					#mul(rp[1],ap[1],w,c1);
-	$LD	r8,`1*$BNSZ`(r4)	
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	addc	r9,r9,r12
-	addze	r10,r10
-	$ST	r9,`1*$BNSZ`(r3)
-	addi	r12,r10,0
-	
-	addi	r5,r5,-1
-	cmpli	0,0,r5,0
-	beq	Lppcasm_mw_OVER
-	
-					#mul_add(rp[2],ap[2],w,c1);
-	$LD	r8,`2*$BNSZ`(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	addc	r9,r9,r12
-	addze	r10,r10
-	$ST	r9,`2*$BNSZ`(r3)
-	addi	r12,r10,0
-		
-Lppcasm_mw_OVER:	
-	addi	r3,r12,0
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,4,0
-	.long	0
-
-#
-#	NOTE:	The following label name should be changed to
-#		"bn_mul_add_words" i.e. remove the first dot
-#		for the gcc compiler. This should be automatically
-#		done in the build
-#
-
-.align	4
-.bn_mul_add_words:
-#
-# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
-#
-# r3 = rp
-# r4 = ap
-# r5 = num
-# r6 = w
-#
-# empirical evidence suggests that unrolled version performs best!!
-#
-	xor	r0,r0,r0		#r0 = 0
-	xor	r12,r12,r12  		#r12 = 0 . used for carry		
-	rlwinm.	r7,r5,30,2,31		# num >> 2
-	beq	Lppcasm_maw_leftover	# if (num < 4) go LPPCASM_maw_leftover
-	mtctr	r7
-Lppcasm_maw_mainloop:	
-					#mul_add(rp[0],ap[0],w,c1);
-	$LD	r8,`0*$BNSZ`(r4)
-	$LD	r11,`0*$BNSZ`(r3)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	addc	r9,r9,r12		#r12 is carry.
-	addze	r10,r10
-	addc	r9,r9,r11
-	#addze	r10,r10
-					#the above instruction addze
-					#is NOT needed. Carry will NOT
-					#be ignored. It's not affected
-					#by multiply and will be collected
-					#in the next spin
-	$ST	r9,`0*$BNSZ`(r3)
-	
-					#mul_add(rp[1],ap[1],w,c1);
-	$LD	r8,`1*$BNSZ`(r4)	
-	$LD	r9,`1*$BNSZ`(r3)
-	$UMULL	r11,r6,r8
-	$UMULH  r12,r6,r8
-	adde	r11,r11,r10		#r10 is carry.
-	addze	r12,r12
-	addc	r11,r11,r9
-	#addze	r12,r12
-	$ST	r11,`1*$BNSZ`(r3)
-	
-					#mul_add(rp[2],ap[2],w,c1);
-	$LD	r8,`2*$BNSZ`(r4)
-	$UMULL	r9,r6,r8
-	$LD	r11,`2*$BNSZ`(r3)
-	$UMULH  r10,r6,r8
-	adde	r9,r9,r12
-	addze	r10,r10
-	addc	r9,r9,r11
-	#addze	r10,r10
-	$ST	r9,`2*$BNSZ`(r3)
-	
-					#mul_add(rp[3],ap[3],w,c1);
-	$LD	r8,`3*$BNSZ`(r4)
-	$UMULL	r11,r6,r8
-	$LD	r9,`3*$BNSZ`(r3)
-	$UMULH  r12,r6,r8
-	adde	r11,r11,r10
-	addze	r12,r12
-	addc	r11,r11,r9
-	addze	r12,r12
-	$ST	r11,`3*$BNSZ`(r3)
-	addi	r3,r3,`4*$BNSZ`
-	addi	r4,r4,`4*$BNSZ`
-	bdnz-	Lppcasm_maw_mainloop
-	
-Lppcasm_maw_leftover:
-	andi.	r5,r5,0x3
-	beq	Lppcasm_maw_adios
-	addi	r3,r3,-$BNSZ
-	addi	r4,r4,-$BNSZ
-					#mul_add(rp[0],ap[0],w,c1);
-	mtctr	r5
-	$LDU	r8,$BNSZ(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	$LDU	r11,$BNSZ(r3)
-	addc	r9,r9,r11
-	addze	r10,r10
-	addc	r9,r9,r12
-	addze	r12,r10
-	$ST	r9,0(r3)
-	
-	bdz	Lppcasm_maw_adios
-					#mul_add(rp[1],ap[1],w,c1);
-	$LDU	r8,$BNSZ(r4)	
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	$LDU	r11,$BNSZ(r3)
-	addc	r9,r9,r11
-	addze	r10,r10
-	addc	r9,r9,r12
-	addze	r12,r10
-	$ST	r9,0(r3)
-	
-	bdz	Lppcasm_maw_adios
-					#mul_add(rp[2],ap[2],w,c1);
-	$LDU	r8,$BNSZ(r4)
-	$UMULL	r9,r6,r8
-	$UMULH  r10,r6,r8
-	$LDU	r11,$BNSZ(r3)
-	addc	r9,r9,r11
-	addze	r10,r10
-	addc	r9,r9,r12
-	addze	r12,r10
-	$ST	r9,0(r3)
-		
-Lppcasm_maw_adios:	
-	addi	r3,r12,0
-	blr
-	.long	0
-	.byte	0,12,0x14,0,0,0,4,0
-	.long	0
-	.align	4
-EOF
-$data =~ s/\`([^\`]*)\`/eval $1/gem;
-print $data;
-close STDOUT;

+ 0 - 1088
drivers/builtin_openssl2/crypto/bn/asm/ppc64-mont.pl

@@ -1,1088 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# December 2007
-
-# The reason for undertaken effort is basically following. Even though
-# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
-# performance was observed to be less than impressive, essentially as
-# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
-# Well, it's not surprising that IBM had to make some sacrifices to
-# boost the clock frequency that much, but no overall improvement?
-# Having observed how much difference did switching to FPU make on
-# UltraSPARC, playing same stunt on Power 6 appeared appropriate...
-# Unfortunately the resulting performance improvement is not as
-# impressive, ~30%, and in absolute terms is still very far from what
-# one would expect from 4.7GHz CPU. There is a chance that I'm doing
-# something wrong, but in the lack of assembler level micro-profiling
-# data or at least decent platform guide I can't tell... Or better
-# results might be achieved with VMX... Anyway, this module provides
-# *worse* performance on other PowerPC implementations, ~40-15% slower
-# on PPC970 depending on key length and ~40% slower on Power 5 for all
-# key lengths. As it's obviously inappropriate as "best all-round"
-# alternative, it has to be complemented with run-time CPU family
-# detection. Oh! It should also be noted that unlike other PowerPC
-# implementation IALU ppc-mont.pl module performs *suboptimaly* on
-# >=1024-bit key lengths on Power 6. It should also be noted that
-# *everything* said so far applies to 64-bit builds! As far as 32-bit
-# application executed on 64-bit CPU goes, this module is likely to
-# become preferred choice, because it's easy to adapt it for such
-# case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
-
-# February 2008
-
-# Micro-profiling assisted optimization results in ~15% improvement
-# over original ppc64-mont.pl version, or overall ~50% improvement
-# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
-# Power 6 CPU, this module is 5-150% faster depending on key length,
-# [hereafter] more for longer keys. But if compared to ppc-mont.pl
-# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
-# in absolute terms, but it's apparently the way Power 6 is...
-
-# December 2009
-
-# Adapted for 32-bit build this module delivers 25-120%, yes, more
-# than *twice* for longer keys, performance improvement over 32-bit
-# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
-# even 64-bit integer operations and the trouble is that most PPC
-# operating systems don't preserve upper halves of general purpose
-# registers upon 32-bit signal delivery. They do preserve them upon
-# context switch, but not signalling:-( This means that asynchronous
-# signals have to be blocked upon entry to this subroutine. Signal
-# masking (and of course complementary unmasking) has quite an impact
-# on performance, naturally larger for shorter keys. It's so severe
-# that 512-bit key performance can be as low as 1/3 of expected one.
-# This is why this routine can be engaged for longer key operations
-# only on these OSes, see crypto/ppccap.c for further details. MacOS X
-# is an exception from this and doesn't require signal masking, and
-# that's where above improvement coefficients were collected. For
-# others alternative would be to break dependence on upper halves of
-# GPRs by sticking to 32-bit integer operations...
-
-$flavour = shift;
-
-if ($flavour =~ /32/) {
-	$SIZE_T=4;
-	$RZONE=	224;
-	$fname=	"bn_mul_mont_fpu64";
-
-	$STUX=	"stwux";	# store indexed and update
-	$PUSH=	"stw";
-	$POP=	"lwz";
-} elsif ($flavour =~ /64/) {
-	$SIZE_T=8;
-	$RZONE=	288;
-	$fname=	"bn_mul_mont_fpu64";
-
-	# same as above, but 64-bit mnemonics...
-	$STUX=	"stdux";	# store indexed and update
-	$PUSH=	"std";
-	$POP=	"ld";
-} else { die "nonsense $flavour"; }
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$FRAME=64;	# padded frame header
-$TRANSFER=16*8;
-
-$carry="r0";
-$sp="r1";
-$toc="r2";
-$rp="r3";	$ovf="r3";
-$ap="r4";
-$bp="r5";
-$np="r6";
-$n0="r7";
-$num="r8";
-$rp="r9";	# $rp is reassigned
-$tp="r10";
-$j="r11";
-$i="r12";
-# non-volatile registers
-$nap_d="r22";	# interleaved ap and np in double format
-$a0="r23";	# ap[0]
-$t0="r24";	# temporary registers
-$t1="r25";
-$t2="r26";
-$t3="r27";
-$t4="r28";
-$t5="r29";
-$t6="r30";
-$t7="r31";
-
-# PPC offers enough register bank capacity to unroll inner loops twice
-#
-#     ..A3A2A1A0
-#           dcba
-#    -----------
-#            A0a
-#           A0b
-#          A0c
-#         A0d
-#          A1a
-#         A1b
-#        A1c
-#       A1d
-#        A2a
-#       A2b
-#      A2c
-#     A2d
-#      A3a
-#     A3b
-#    A3c
-#   A3d
-#    ..a
-#   ..b
-#
-$ba="f0";	$bb="f1";	$bc="f2";	$bd="f3";
-$na="f4";	$nb="f5";	$nc="f6";	$nd="f7";
-$dota="f8";	$dotb="f9";
-$A0="f10";	$A1="f11";	$A2="f12";	$A3="f13";
-$N0="f20";	$N1="f21";	$N2="f22";	$N3="f23";
-$T0a="f24";	$T0b="f25";
-$T1a="f26";	$T1b="f27";
-$T2a="f28";	$T2b="f29";
-$T3a="f30";	$T3b="f31";
-
-# sp----------->+-------------------------------+
-#		| saved sp			|
-#		+-------------------------------+
-#		.				.
-#   +64		+-------------------------------+
-#		| 16 gpr<->fpr transfer zone	|
-#		.				.
-#		.				.
-#   +16*8	+-------------------------------+
-#		| __int64 tmp[-1]		|
-#		+-------------------------------+
-#		| __int64 tmp[num]		|
-#		.				.
-#		.				.
-#		.				.
-#   +(num+1)*8	+-------------------------------+
-#		| padding to 64 byte boundary	|
-#		.				.
-#   +X		+-------------------------------+
-#		| double nap_d[4*num]		|
-#		.				.
-#		.				.
-#		.				.
-#		+-------------------------------+
-#		.				.
-#   -12*size_t	+-------------------------------+
-#		| 10 saved gpr, r22-r31		|
-#		.				.
-#		.				.
-#   -12*8	+-------------------------------+
-#		| 12 saved fpr, f20-f31		|
-#		.				.
-#		.				.
-#		+-------------------------------+
-
-$code=<<___;
-.machine "any"
-.text
-
-.globl	.$fname
-.align	5
-.$fname:
-	cmpwi	$num,`3*8/$SIZE_T`
-	mr	$rp,r3		; $rp is reassigned
-	li	r3,0		; possible "not handled" return code
-	bltlr-
-	andi.	r0,$num,`16/$SIZE_T-1`		; $num has to be "even"
-	bnelr-
-
-	slwi	$num,$num,`log($SIZE_T)/log(2)`	; num*=sizeof(BN_LONG)
-	li	$i,-4096
-	slwi	$tp,$num,2	; place for {an}p_{lh}[num], i.e. 4*num
-	add	$tp,$tp,$num	; place for tp[num+1]
-	addi	$tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
-	subf	$tp,$tp,$sp	; $sp-$tp
-	and	$tp,$tp,$i	; minimize TLB usage
-	subf	$tp,$sp,$tp	; $tp-$sp
-	mr	$i,$sp
-	$STUX	$sp,$sp,$tp	; alloca
-
-	$PUSH	r22,`-12*8-10*$SIZE_T`($i)
-	$PUSH	r23,`-12*8-9*$SIZE_T`($i)
-	$PUSH	r24,`-12*8-8*$SIZE_T`($i)
-	$PUSH	r25,`-12*8-7*$SIZE_T`($i)
-	$PUSH	r26,`-12*8-6*$SIZE_T`($i)
-	$PUSH	r27,`-12*8-5*$SIZE_T`($i)
-	$PUSH	r28,`-12*8-4*$SIZE_T`($i)
-	$PUSH	r29,`-12*8-3*$SIZE_T`($i)
-	$PUSH	r30,`-12*8-2*$SIZE_T`($i)
-	$PUSH	r31,`-12*8-1*$SIZE_T`($i)
-	stfd	f20,`-12*8`($i)
-	stfd	f21,`-11*8`($i)
-	stfd	f22,`-10*8`($i)
-	stfd	f23,`-9*8`($i)
-	stfd	f24,`-8*8`($i)
-	stfd	f25,`-7*8`($i)
-	stfd	f26,`-6*8`($i)
-	stfd	f27,`-5*8`($i)
-	stfd	f28,`-4*8`($i)
-	stfd	f29,`-3*8`($i)
-	stfd	f30,`-2*8`($i)
-	stfd	f31,`-1*8`($i)
-___
-$code.=<<___ if ($SIZE_T==8);
-	ld	$a0,0($ap)	; pull ap[0] value
-	ld	$n0,0($n0)	; pull n0[0] value
-	ld	$t3,0($bp)	; bp[0]
-___
-$code.=<<___ if ($SIZE_T==4);
-	mr	$t1,$n0
-	lwz	$a0,0($ap)	; pull ap[0,1] value
-	lwz	$t0,4($ap)
-	lwz	$n0,0($t1)	; pull n0[0,1] value
-	lwz	$t1,4($t1)
-	lwz	$t3,0($bp)	; bp[0,1]
-	lwz	$t2,4($bp)
-	insrdi	$a0,$t0,32,0
-	insrdi	$n0,$t1,32,0
-	insrdi	$t3,$t2,32,0
-___
-$code.=<<___;
-	addi	$tp,$sp,`$FRAME+$TRANSFER+8+64`
-	li	$i,-64
-	add	$nap_d,$tp,$num
-	and	$nap_d,$nap_d,$i	; align to 64 bytes
-
-	mulld	$t7,$a0,$t3	; ap[0]*bp[0]
-	; nap_d is off by 1, because it's used with stfdu/lfdu
-	addi	$nap_d,$nap_d,-8
-	srwi	$j,$num,`3+1`	; counter register, num/2
-	mulld	$t7,$t7,$n0	; tp[0]*n0
-	addi	$j,$j,-1
-	addi	$tp,$sp,`$FRAME+$TRANSFER-8`
-	li	$carry,0
-	mtctr	$j
-
-	; transfer bp[0] to FPU as 4x16-bit values
-	extrdi	$t0,$t3,16,48
-	extrdi	$t1,$t3,16,32
-	extrdi	$t2,$t3,16,16
-	extrdi	$t3,$t3,16,0
-	std	$t0,`$FRAME+0`($sp)
-	std	$t1,`$FRAME+8`($sp)
-	std	$t2,`$FRAME+16`($sp)
-	std	$t3,`$FRAME+24`($sp)
-	; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
-	extrdi	$t4,$t7,16,48
-	extrdi	$t5,$t7,16,32
-	extrdi	$t6,$t7,16,16
-	extrdi	$t7,$t7,16,0
-	std	$t4,`$FRAME+32`($sp)
-	std	$t5,`$FRAME+40`($sp)
-	std	$t6,`$FRAME+48`($sp)
-	std	$t7,`$FRAME+56`($sp)
-___
-$code.=<<___ if ($SIZE_T==8);
-	lwz	$t0,4($ap)		; load a[j] as 32-bit word pair
-	lwz	$t1,0($ap)
-	lwz	$t2,12($ap)		; load a[j+1] as 32-bit word pair
-	lwz	$t3,8($ap)
-	lwz	$t4,4($np)		; load n[j] as 32-bit word pair
-	lwz	$t5,0($np)
-	lwz	$t6,12($np)		; load n[j+1] as 32-bit word pair
-	lwz	$t7,8($np)
-___
-$code.=<<___ if ($SIZE_T==4);
-	lwz	$t0,0($ap)		; load a[j..j+3] as 32-bit word pairs
-	lwz	$t1,4($ap)
-	lwz	$t2,8($ap)
-	lwz	$t3,12($ap)
-	lwz	$t4,0($np)		; load n[j..j+3] as 32-bit word pairs
-	lwz	$t5,4($np)
-	lwz	$t6,8($np)
-	lwz	$t7,12($np)
-___
-$code.=<<___;
-	lfd	$ba,`$FRAME+0`($sp)
-	lfd	$bb,`$FRAME+8`($sp)
-	lfd	$bc,`$FRAME+16`($sp)
-	lfd	$bd,`$FRAME+24`($sp)
-	lfd	$na,`$FRAME+32`($sp)
-	lfd	$nb,`$FRAME+40`($sp)
-	lfd	$nc,`$FRAME+48`($sp)
-	lfd	$nd,`$FRAME+56`($sp)
-	std	$t0,`$FRAME+64`($sp)
-	std	$t1,`$FRAME+72`($sp)
-	std	$t2,`$FRAME+80`($sp)
-	std	$t3,`$FRAME+88`($sp)
-	std	$t4,`$FRAME+96`($sp)
-	std	$t5,`$FRAME+104`($sp)
-	std	$t6,`$FRAME+112`($sp)
-	std	$t7,`$FRAME+120`($sp)
-	fcfid	$ba,$ba
-	fcfid	$bb,$bb
-	fcfid	$bc,$bc
-	fcfid	$bd,$bd
-	fcfid	$na,$na
-	fcfid	$nb,$nb
-	fcfid	$nc,$nc
-	fcfid	$nd,$nd
-
-	lfd	$A0,`$FRAME+64`($sp)
-	lfd	$A1,`$FRAME+72`($sp)
-	lfd	$A2,`$FRAME+80`($sp)
-	lfd	$A3,`$FRAME+88`($sp)
-	lfd	$N0,`$FRAME+96`($sp)
-	lfd	$N1,`$FRAME+104`($sp)
-	lfd	$N2,`$FRAME+112`($sp)
-	lfd	$N3,`$FRAME+120`($sp)
-	fcfid	$A0,$A0
-	fcfid	$A1,$A1
-	fcfid	$A2,$A2
-	fcfid	$A3,$A3
-	fcfid	$N0,$N0
-	fcfid	$N1,$N1
-	fcfid	$N2,$N2
-	fcfid	$N3,$N3
-	addi	$ap,$ap,16
-	addi	$np,$np,16
-
-	fmul	$T1a,$A1,$ba
-	fmul	$T1b,$A1,$bb
-	stfd	$A0,8($nap_d)		; save a[j] in double format
-	stfd	$A1,16($nap_d)
-	fmul	$T2a,$A2,$ba
-	fmul	$T2b,$A2,$bb
-	stfd	$A2,24($nap_d)		; save a[j+1] in double format
-	stfd	$A3,32($nap_d)
-	fmul	$T3a,$A3,$ba
-	fmul	$T3b,$A3,$bb
-	stfd	$N0,40($nap_d)		; save n[j] in double format
-	stfd	$N1,48($nap_d)
-	fmul	$T0a,$A0,$ba
-	fmul	$T0b,$A0,$bb
-	stfd	$N2,56($nap_d)		; save n[j+1] in double format
-	stfdu	$N3,64($nap_d)
-
-	fmadd	$T1a,$A0,$bc,$T1a
-	fmadd	$T1b,$A0,$bd,$T1b
-	fmadd	$T2a,$A1,$bc,$T2a
-	fmadd	$T2b,$A1,$bd,$T2b
-	fmadd	$T3a,$A2,$bc,$T3a
-	fmadd	$T3b,$A2,$bd,$T3b
-	fmul	$dota,$A3,$bc
-	fmul	$dotb,$A3,$bd
-
-	fmadd	$T1a,$N1,$na,$T1a
-	fmadd	$T1b,$N1,$nb,$T1b
-	fmadd	$T2a,$N2,$na,$T2a
-	fmadd	$T2b,$N2,$nb,$T2b
-	fmadd	$T3a,$N3,$na,$T3a
-	fmadd	$T3b,$N3,$nb,$T3b
-	fmadd	$T0a,$N0,$na,$T0a
-	fmadd	$T0b,$N0,$nb,$T0b
-
-	fmadd	$T1a,$N0,$nc,$T1a
-	fmadd	$T1b,$N0,$nd,$T1b
-	fmadd	$T2a,$N1,$nc,$T2a
-	fmadd	$T2b,$N1,$nd,$T2b
-	fmadd	$T3a,$N2,$nc,$T3a
-	fmadd	$T3b,$N2,$nd,$T3b
-	fmadd	$dota,$N3,$nc,$dota
-	fmadd	$dotb,$N3,$nd,$dotb
-
-	fctid	$T0a,$T0a
-	fctid	$T0b,$T0b
-	fctid	$T1a,$T1a
-	fctid	$T1b,$T1b
-	fctid	$T2a,$T2a
-	fctid	$T2b,$T2b
-	fctid	$T3a,$T3a
-	fctid	$T3b,$T3b
-
-	stfd	$T0a,`$FRAME+0`($sp)
-	stfd	$T0b,`$FRAME+8`($sp)
-	stfd	$T1a,`$FRAME+16`($sp)
-	stfd	$T1b,`$FRAME+24`($sp)
-	stfd	$T2a,`$FRAME+32`($sp)
-	stfd	$T2b,`$FRAME+40`($sp)
-	stfd	$T3a,`$FRAME+48`($sp)
-	stfd	$T3b,`$FRAME+56`($sp)
-
-.align	5
-L1st:
-___
-$code.=<<___ if ($SIZE_T==8);
-	lwz	$t0,4($ap)		; load a[j] as 32-bit word pair
-	lwz	$t1,0($ap)
-	lwz	$t2,12($ap)		; load a[j+1] as 32-bit word pair
-	lwz	$t3,8($ap)
-	lwz	$t4,4($np)		; load n[j] as 32-bit word pair
-	lwz	$t5,0($np)
-	lwz	$t6,12($np)		; load n[j+1] as 32-bit word pair
-	lwz	$t7,8($np)
-___
-$code.=<<___ if ($SIZE_T==4);
-	lwz	$t0,0($ap)		; load a[j..j+3] as 32-bit word pairs
-	lwz	$t1,4($ap)
-	lwz	$t2,8($ap)
-	lwz	$t3,12($ap)
-	lwz	$t4,0($np)		; load n[j..j+3] as 32-bit word pairs
-	lwz	$t5,4($np)
-	lwz	$t6,8($np)
-	lwz	$t7,12($np)
-___
-$code.=<<___;
-	std	$t0,`$FRAME+64`($sp)
-	std	$t1,`$FRAME+72`($sp)
-	std	$t2,`$FRAME+80`($sp)
-	std	$t3,`$FRAME+88`($sp)
-	std	$t4,`$FRAME+96`($sp)
-	std	$t5,`$FRAME+104`($sp)
-	std	$t6,`$FRAME+112`($sp)
-	std	$t7,`$FRAME+120`($sp)
-	ld	$t0,`$FRAME+0`($sp)
-	ld	$t1,`$FRAME+8`($sp)
-	ld	$t2,`$FRAME+16`($sp)
-	ld	$t3,`$FRAME+24`($sp)
-	ld	$t4,`$FRAME+32`($sp)
-	ld	$t5,`$FRAME+40`($sp)
-	ld	$t6,`$FRAME+48`($sp)
-	ld	$t7,`$FRAME+56`($sp)
-	lfd	$A0,`$FRAME+64`($sp)
-	lfd	$A1,`$FRAME+72`($sp)
-	lfd	$A2,`$FRAME+80`($sp)
-	lfd	$A3,`$FRAME+88`($sp)
-	lfd	$N0,`$FRAME+96`($sp)
-	lfd	$N1,`$FRAME+104`($sp)
-	lfd	$N2,`$FRAME+112`($sp)
-	lfd	$N3,`$FRAME+120`($sp)
-	fcfid	$A0,$A0
-	fcfid	$A1,$A1
-	fcfid	$A2,$A2
-	fcfid	$A3,$A3
-	fcfid	$N0,$N0
-	fcfid	$N1,$N1
-	fcfid	$N2,$N2
-	fcfid	$N3,$N3
-	addi	$ap,$ap,16
-	addi	$np,$np,16
-
-	fmul	$T1a,$A1,$ba
-	fmul	$T1b,$A1,$bb
-	fmul	$T2a,$A2,$ba
-	fmul	$T2b,$A2,$bb
-	stfd	$A0,8($nap_d)		; save a[j] in double format
-	stfd	$A1,16($nap_d)
-	fmul	$T3a,$A3,$ba
-	fmul	$T3b,$A3,$bb
-	fmadd	$T0a,$A0,$ba,$dota
-	fmadd	$T0b,$A0,$bb,$dotb
-	stfd	$A2,24($nap_d)		; save a[j+1] in double format
-	stfd	$A3,32($nap_d)
-
-	fmadd	$T1a,$A0,$bc,$T1a
-	fmadd	$T1b,$A0,$bd,$T1b
-	fmadd	$T2a,$A1,$bc,$T2a
-	fmadd	$T2b,$A1,$bd,$T2b
-	stfd	$N0,40($nap_d)		; save n[j] in double format
-	stfd	$N1,48($nap_d)
-	fmadd	$T3a,$A2,$bc,$T3a
-	fmadd	$T3b,$A2,$bd,$T3b
-	 add	$t0,$t0,$carry		; can not overflow
-	fmul	$dota,$A3,$bc
-	fmul	$dotb,$A3,$bd
-	stfd	$N2,56($nap_d)		; save n[j+1] in double format
-	stfdu	$N3,64($nap_d)
-	 srdi	$carry,$t0,16
-	 add	$t1,$t1,$carry
-	 srdi	$carry,$t1,16
-
-	fmadd	$T1a,$N1,$na,$T1a
-	fmadd	$T1b,$N1,$nb,$T1b
-	 insrdi	$t0,$t1,16,32
-	fmadd	$T2a,$N2,$na,$T2a
-	fmadd	$T2b,$N2,$nb,$T2b
-	 add	$t2,$t2,$carry
-	fmadd	$T3a,$N3,$na,$T3a
-	fmadd	$T3b,$N3,$nb,$T3b
-	 srdi	$carry,$t2,16
-	fmadd	$T0a,$N0,$na,$T0a
-	fmadd	$T0b,$N0,$nb,$T0b
-	 insrdi	$t0,$t2,16,16
-	 add	$t3,$t3,$carry
-	 srdi	$carry,$t3,16
-
-	fmadd	$T1a,$N0,$nc,$T1a
-	fmadd	$T1b,$N0,$nd,$T1b
-	 insrdi	$t0,$t3,16,0		; 0..63 bits
-	fmadd	$T2a,$N1,$nc,$T2a
-	fmadd	$T2b,$N1,$nd,$T2b
-	 add	$t4,$t4,$carry
-	fmadd	$T3a,$N2,$nc,$T3a
-	fmadd	$T3b,$N2,$nd,$T3b
-	 srdi	$carry,$t4,16
-	fmadd	$dota,$N3,$nc,$dota
-	fmadd	$dotb,$N3,$nd,$dotb
-	 add	$t5,$t5,$carry
-	 srdi	$carry,$t5,16
-	 insrdi	$t4,$t5,16,32
-
-	fctid	$T0a,$T0a
-	fctid	$T0b,$T0b
-	 add	$t6,$t6,$carry
-	fctid	$T1a,$T1a
-	fctid	$T1b,$T1b
-	 srdi	$carry,$t6,16
-	fctid	$T2a,$T2a
-	fctid	$T2b,$T2b
-	 insrdi	$t4,$t6,16,16
-	fctid	$T3a,$T3a
-	fctid	$T3b,$T3b
-	 add	$t7,$t7,$carry
-	 insrdi	$t4,$t7,16,0		; 64..127 bits
-	 srdi	$carry,$t7,16		; upper 33 bits
-
-	stfd	$T0a,`$FRAME+0`($sp)
-	stfd	$T0b,`$FRAME+8`($sp)
-	stfd	$T1a,`$FRAME+16`($sp)
-	stfd	$T1b,`$FRAME+24`($sp)
-	stfd	$T2a,`$FRAME+32`($sp)
-	stfd	$T2b,`$FRAME+40`($sp)
-	stfd	$T3a,`$FRAME+48`($sp)
-	stfd	$T3b,`$FRAME+56`($sp)
-	 std	$t0,8($tp)		; tp[j-1]
-	 stdu	$t4,16($tp)		; tp[j]
-	bdnz-	L1st
-
-	fctid	$dota,$dota
-	fctid	$dotb,$dotb
-
-	ld	$t0,`$FRAME+0`($sp)
-	ld	$t1,`$FRAME+8`($sp)
-	ld	$t2,`$FRAME+16`($sp)
-	ld	$t3,`$FRAME+24`($sp)
-	ld	$t4,`$FRAME+32`($sp)
-	ld	$t5,`$FRAME+40`($sp)
-	ld	$t6,`$FRAME+48`($sp)
-	ld	$t7,`$FRAME+56`($sp)
-	stfd	$dota,`$FRAME+64`($sp)
-	stfd	$dotb,`$FRAME+72`($sp)
-
-	add	$t0,$t0,$carry		; can not overflow
-	srdi	$carry,$t0,16
-	add	$t1,$t1,$carry
-	srdi	$carry,$t1,16
-	insrdi	$t0,$t1,16,32
-	add	$t2,$t2,$carry
-	srdi	$carry,$t2,16
-	insrdi	$t0,$t2,16,16
-	add	$t3,$t3,$carry
-	srdi	$carry,$t3,16
-	insrdi	$t0,$t3,16,0		; 0..63 bits
-	add	$t4,$t4,$carry
-	srdi	$carry,$t4,16
-	add	$t5,$t5,$carry
-	srdi	$carry,$t5,16
-	insrdi	$t4,$t5,16,32
-	add	$t6,$t6,$carry
-	srdi	$carry,$t6,16
-	insrdi	$t4,$t6,16,16
-	add	$t7,$t7,$carry
-	insrdi	$t4,$t7,16,0		; 64..127 bits
-	srdi	$carry,$t7,16		; upper 33 bits
-	ld	$t6,`$FRAME+64`($sp)
-	ld	$t7,`$FRAME+72`($sp)
-
-	std	$t0,8($tp)		; tp[j-1]
-	stdu	$t4,16($tp)		; tp[j]
-
-	add	$t6,$t6,$carry		; can not overflow
-	srdi	$carry,$t6,16
-	add	$t7,$t7,$carry
-	insrdi	$t6,$t7,48,0
-	srdi	$ovf,$t7,48
-	std	$t6,8($tp)		; tp[num-1]
-
-	slwi	$t7,$num,2
-	subf	$nap_d,$t7,$nap_d	; rewind pointer
-
-	li	$i,8			; i=1
-.align	5
-Louter:
-___
-$code.=<<___ if ($SIZE_T==8);
-	ldx	$t3,$bp,$i	; bp[i]
-___
-$code.=<<___ if ($SIZE_T==4);
-	add	$t0,$bp,$i
-	lwz	$t3,0($t0)		; bp[i,i+1]
-	lwz	$t0,4($t0)
-	insrdi	$t3,$t0,32,0
-___
-$code.=<<___;
-	ld	$t6,`$FRAME+$TRANSFER+8`($sp)	; tp[0]
-	mulld	$t7,$a0,$t3	; ap[0]*bp[i]
-
-	addi	$tp,$sp,`$FRAME+$TRANSFER`
-	add	$t7,$t7,$t6	; ap[0]*bp[i]+tp[0]
-	li	$carry,0
-	mulld	$t7,$t7,$n0	; tp[0]*n0
-	mtctr	$j
-
-	; transfer bp[i] to FPU as 4x16-bit values
-	extrdi	$t0,$t3,16,48
-	extrdi	$t1,$t3,16,32
-	extrdi	$t2,$t3,16,16
-	extrdi	$t3,$t3,16,0
-	std	$t0,`$FRAME+0`($sp)
-	std	$t1,`$FRAME+8`($sp)
-	std	$t2,`$FRAME+16`($sp)
-	std	$t3,`$FRAME+24`($sp)
-	; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
-	extrdi	$t4,$t7,16,48
-	extrdi	$t5,$t7,16,32
-	extrdi	$t6,$t7,16,16
-	extrdi	$t7,$t7,16,0
-	std	$t4,`$FRAME+32`($sp)
-	std	$t5,`$FRAME+40`($sp)
-	std	$t6,`$FRAME+48`($sp)
-	std	$t7,`$FRAME+56`($sp)
-
-	lfd	$A0,8($nap_d)		; load a[j] in double format
-	lfd	$A1,16($nap_d)
-	lfd	$A2,24($nap_d)		; load a[j+1] in double format
-	lfd	$A3,32($nap_d)
-	lfd	$N0,40($nap_d)		; load n[j] in double format
-	lfd	$N1,48($nap_d)
-	lfd	$N2,56($nap_d)		; load n[j+1] in double format
-	lfdu	$N3,64($nap_d)
-
-	lfd	$ba,`$FRAME+0`($sp)
-	lfd	$bb,`$FRAME+8`($sp)
-	lfd	$bc,`$FRAME+16`($sp)
-	lfd	$bd,`$FRAME+24`($sp)
-	lfd	$na,`$FRAME+32`($sp)
-	lfd	$nb,`$FRAME+40`($sp)
-	lfd	$nc,`$FRAME+48`($sp)
-	lfd	$nd,`$FRAME+56`($sp)
-
-	fcfid	$ba,$ba
-	fcfid	$bb,$bb
-	fcfid	$bc,$bc
-	fcfid	$bd,$bd
-	fcfid	$na,$na
-	fcfid	$nb,$nb
-	fcfid	$nc,$nc
-	fcfid	$nd,$nd
-
-	fmul	$T1a,$A1,$ba
-	fmul	$T1b,$A1,$bb
-	fmul	$T2a,$A2,$ba
-	fmul	$T2b,$A2,$bb
-	fmul	$T3a,$A3,$ba
-	fmul	$T3b,$A3,$bb
-	fmul	$T0a,$A0,$ba
-	fmul	$T0b,$A0,$bb
-
-	fmadd	$T1a,$A0,$bc,$T1a
-	fmadd	$T1b,$A0,$bd,$T1b
-	fmadd	$T2a,$A1,$bc,$T2a
-	fmadd	$T2b,$A1,$bd,$T2b
-	fmadd	$T3a,$A2,$bc,$T3a
-	fmadd	$T3b,$A2,$bd,$T3b
-	fmul	$dota,$A3,$bc
-	fmul	$dotb,$A3,$bd
-
-	fmadd	$T1a,$N1,$na,$T1a
-	fmadd	$T1b,$N1,$nb,$T1b
-	 lfd	$A0,8($nap_d)		; load a[j] in double format
-	 lfd	$A1,16($nap_d)
-	fmadd	$T2a,$N2,$na,$T2a
-	fmadd	$T2b,$N2,$nb,$T2b
-	 lfd	$A2,24($nap_d)		; load a[j+1] in double format
-	 lfd	$A3,32($nap_d)
-	fmadd	$T3a,$N3,$na,$T3a
-	fmadd	$T3b,$N3,$nb,$T3b
-	fmadd	$T0a,$N0,$na,$T0a
-	fmadd	$T0b,$N0,$nb,$T0b
-
-	fmadd	$T1a,$N0,$nc,$T1a
-	fmadd	$T1b,$N0,$nd,$T1b
-	fmadd	$T2a,$N1,$nc,$T2a
-	fmadd	$T2b,$N1,$nd,$T2b
-	fmadd	$T3a,$N2,$nc,$T3a
-	fmadd	$T3b,$N2,$nd,$T3b
-	fmadd	$dota,$N3,$nc,$dota
-	fmadd	$dotb,$N3,$nd,$dotb
-
-	fctid	$T0a,$T0a
-	fctid	$T0b,$T0b
-	fctid	$T1a,$T1a
-	fctid	$T1b,$T1b
-	fctid	$T2a,$T2a
-	fctid	$T2b,$T2b
-	fctid	$T3a,$T3a
-	fctid	$T3b,$T3b
-
-	stfd	$T0a,`$FRAME+0`($sp)
-	stfd	$T0b,`$FRAME+8`($sp)
-	stfd	$T1a,`$FRAME+16`($sp)
-	stfd	$T1b,`$FRAME+24`($sp)
-	stfd	$T2a,`$FRAME+32`($sp)
-	stfd	$T2b,`$FRAME+40`($sp)
-	stfd	$T3a,`$FRAME+48`($sp)
-	stfd	$T3b,`$FRAME+56`($sp)
-
-.align	5
-Linner:
-	fmul	$T1a,$A1,$ba
-	fmul	$T1b,$A1,$bb
-	fmul	$T2a,$A2,$ba
-	fmul	$T2b,$A2,$bb
-	lfd	$N0,40($nap_d)		; load n[j] in double format
-	lfd	$N1,48($nap_d)
-	fmul	$T3a,$A3,$ba
-	fmul	$T3b,$A3,$bb
-	fmadd	$T0a,$A0,$ba,$dota
-	fmadd	$T0b,$A0,$bb,$dotb
-	lfd	$N2,56($nap_d)		; load n[j+1] in double format
-	lfdu	$N3,64($nap_d)
-
-	fmadd	$T1a,$A0,$bc,$T1a
-	fmadd	$T1b,$A0,$bd,$T1b
-	fmadd	$T2a,$A1,$bc,$T2a
-	fmadd	$T2b,$A1,$bd,$T2b
-	 lfd	$A0,8($nap_d)		; load a[j] in double format
-	 lfd	$A1,16($nap_d)
-	fmadd	$T3a,$A2,$bc,$T3a
-	fmadd	$T3b,$A2,$bd,$T3b
-	fmul	$dota,$A3,$bc
-	fmul	$dotb,$A3,$bd
-	 lfd	$A2,24($nap_d)		; load a[j+1] in double format
-	 lfd	$A3,32($nap_d)
-
-	fmadd	$T1a,$N1,$na,$T1a
-	fmadd	$T1b,$N1,$nb,$T1b
-	 ld	$t0,`$FRAME+0`($sp)
-	 ld	$t1,`$FRAME+8`($sp)
-	fmadd	$T2a,$N2,$na,$T2a
-	fmadd	$T2b,$N2,$nb,$T2b
-	 ld	$t2,`$FRAME+16`($sp)
-	 ld	$t3,`$FRAME+24`($sp)
-	fmadd	$T3a,$N3,$na,$T3a
-	fmadd	$T3b,$N3,$nb,$T3b
-	 add	$t0,$t0,$carry		; can not overflow
-	 ld	$t4,`$FRAME+32`($sp)
-	 ld	$t5,`$FRAME+40`($sp)
-	fmadd	$T0a,$N0,$na,$T0a
-	fmadd	$T0b,$N0,$nb,$T0b
-	 srdi	$carry,$t0,16
-	 add	$t1,$t1,$carry
-	 srdi	$carry,$t1,16
-	 ld	$t6,`$FRAME+48`($sp)
-	 ld	$t7,`$FRAME+56`($sp)
-
-	fmadd	$T1a,$N0,$nc,$T1a
-	fmadd	$T1b,$N0,$nd,$T1b
-	 insrdi	$t0,$t1,16,32
-	 ld	$t1,8($tp)		; tp[j]
-	fmadd	$T2a,$N1,$nc,$T2a
-	fmadd	$T2b,$N1,$nd,$T2b
-	 add	$t2,$t2,$carry
-	fmadd	$T3a,$N2,$nc,$T3a
-	fmadd	$T3b,$N2,$nd,$T3b
-	 srdi	$carry,$t2,16
-	 insrdi	$t0,$t2,16,16
-	fmadd	$dota,$N3,$nc,$dota
-	fmadd	$dotb,$N3,$nd,$dotb
-	 add	$t3,$t3,$carry
-	 ldu	$t2,16($tp)		; tp[j+1]
-	 srdi	$carry,$t3,16
-	 insrdi	$t0,$t3,16,0		; 0..63 bits
-	 add	$t4,$t4,$carry
-
-	fctid	$T0a,$T0a
-	fctid	$T0b,$T0b
-	 srdi	$carry,$t4,16
-	fctid	$T1a,$T1a
-	fctid	$T1b,$T1b
-	 add	$t5,$t5,$carry
-	fctid	$T2a,$T2a
-	fctid	$T2b,$T2b
-	 srdi	$carry,$t5,16
-	 insrdi	$t4,$t5,16,32
-	fctid	$T3a,$T3a
-	fctid	$T3b,$T3b
-	 add	$t6,$t6,$carry
-	 srdi	$carry,$t6,16
-	 insrdi	$t4,$t6,16,16
-
-	stfd	$T0a,`$FRAME+0`($sp)
-	stfd	$T0b,`$FRAME+8`($sp)
-	 add	$t7,$t7,$carry
-	 addc	$t3,$t0,$t1
-___
-$code.=<<___ if ($SIZE_T==4);		# adjust XER[CA]
-	extrdi	$t0,$t0,32,0
-	extrdi	$t1,$t1,32,0
-	adde	$t0,$t0,$t1
-___
-$code.=<<___;
-	stfd	$T1a,`$FRAME+16`($sp)
-	stfd	$T1b,`$FRAME+24`($sp)
-	 insrdi	$t4,$t7,16,0		; 64..127 bits
-	 srdi	$carry,$t7,16		; upper 33 bits
-	stfd	$T2a,`$FRAME+32`($sp)
-	stfd	$T2b,`$FRAME+40`($sp)
-	 adde	$t5,$t4,$t2
-___
-$code.=<<___ if ($SIZE_T==4);		# adjust XER[CA]
-	extrdi	$t4,$t4,32,0
-	extrdi	$t2,$t2,32,0
-	adde	$t4,$t4,$t2
-___
-$code.=<<___;
-	stfd	$T3a,`$FRAME+48`($sp)
-	stfd	$T3b,`$FRAME+56`($sp)
-	 addze	$carry,$carry
-	 std	$t3,-16($tp)		; tp[j-1]
-	 std	$t5,-8($tp)		; tp[j]
-	bdnz-	Linner
-
-	fctid	$dota,$dota
-	fctid	$dotb,$dotb
-	ld	$t0,`$FRAME+0`($sp)
-	ld	$t1,`$FRAME+8`($sp)
-	ld	$t2,`$FRAME+16`($sp)
-	ld	$t3,`$FRAME+24`($sp)
-	ld	$t4,`$FRAME+32`($sp)
-	ld	$t5,`$FRAME+40`($sp)
-	ld	$t6,`$FRAME+48`($sp)
-	ld	$t7,`$FRAME+56`($sp)
-	stfd	$dota,`$FRAME+64`($sp)
-	stfd	$dotb,`$FRAME+72`($sp)
-
-	add	$t0,$t0,$carry		; can not overflow
-	srdi	$carry,$t0,16
-	add	$t1,$t1,$carry
-	srdi	$carry,$t1,16
-	insrdi	$t0,$t1,16,32
-	add	$t2,$t2,$carry
-	ld	$t1,8($tp)		; tp[j]
-	srdi	$carry,$t2,16
-	insrdi	$t0,$t2,16,16
-	add	$t3,$t3,$carry
-	ldu	$t2,16($tp)		; tp[j+1]
-	srdi	$carry,$t3,16
-	insrdi	$t0,$t3,16,0		; 0..63 bits
-	add	$t4,$t4,$carry
-	srdi	$carry,$t4,16
-	add	$t5,$t5,$carry
-	srdi	$carry,$t5,16
-	insrdi	$t4,$t5,16,32
-	add	$t6,$t6,$carry
-	srdi	$carry,$t6,16
-	insrdi	$t4,$t6,16,16
-	add	$t7,$t7,$carry
-	insrdi	$t4,$t7,16,0		; 64..127 bits
-	srdi	$carry,$t7,16		; upper 33 bits
-	ld	$t6,`$FRAME+64`($sp)
-	ld	$t7,`$FRAME+72`($sp)
-
-	addc	$t3,$t0,$t1
-___
-$code.=<<___ if ($SIZE_T==4);		# adjust XER[CA]
-	extrdi	$t0,$t0,32,0
-	extrdi	$t1,$t1,32,0
-	adde	$t0,$t0,$t1
-___
-$code.=<<___;
-	adde	$t5,$t4,$t2
-___
-$code.=<<___ if ($SIZE_T==4);		# adjust XER[CA]
-	extrdi	$t4,$t4,32,0
-	extrdi	$t2,$t2,32,0
-	adde	$t4,$t4,$t2
-___
-$code.=<<___;
-	addze	$carry,$carry
-
-	std	$t3,-16($tp)		; tp[j-1]
-	std	$t5,-8($tp)		; tp[j]
-
-	add	$carry,$carry,$ovf	; comsume upmost overflow
-	add	$t6,$t6,$carry		; can not overflow
-	srdi	$carry,$t6,16
-	add	$t7,$t7,$carry
-	insrdi	$t6,$t7,48,0
-	srdi	$ovf,$t7,48
-	std	$t6,0($tp)		; tp[num-1]
-
-	slwi	$t7,$num,2
-	addi	$i,$i,8
-	subf	$nap_d,$t7,$nap_d	; rewind pointer
-	cmpw	$i,$num
-	blt-	Louter
-___
-
-$code.=<<___ if ($SIZE_T==8);
-	subf	$np,$num,$np	; rewind np
-	addi	$j,$j,1		; restore counter
-	subfc	$i,$i,$i	; j=0 and "clear" XER[CA]
-	addi	$tp,$sp,`$FRAME+$TRANSFER+8`
-	addi	$t4,$sp,`$FRAME+$TRANSFER+16`
-	addi	$t5,$np,8
-	addi	$t6,$rp,8
-	mtctr	$j
-
-.align	4
-Lsub:	ldx	$t0,$tp,$i
-	ldx	$t1,$np,$i
-	ldx	$t2,$t4,$i
-	ldx	$t3,$t5,$i
-	subfe	$t0,$t1,$t0	; tp[j]-np[j]
-	subfe	$t2,$t3,$t2	; tp[j+1]-np[j+1]
-	stdx	$t0,$rp,$i
-	stdx	$t2,$t6,$i
-	addi	$i,$i,16
-	bdnz-	Lsub
-
-	li	$i,0
-	subfe	$ovf,$i,$ovf	; handle upmost overflow bit
-	and	$ap,$tp,$ovf
-	andc	$np,$rp,$ovf
-	or	$ap,$ap,$np	; ap=borrow?tp:rp
-	addi	$t7,$ap,8
-	mtctr	$j
-
-.align	4
-Lcopy:				; copy or in-place refresh
-	ldx	$t0,$ap,$i
-	ldx	$t1,$t7,$i
-	std	$i,8($nap_d)	; zap nap_d
-	std	$i,16($nap_d)
-	std	$i,24($nap_d)
-	std	$i,32($nap_d)
-	std	$i,40($nap_d)
-	std	$i,48($nap_d)
-	std	$i,56($nap_d)
-	stdu	$i,64($nap_d)
-	stdx	$t0,$rp,$i
-	stdx	$t1,$t6,$i
-	stdx	$i,$tp,$i	; zap tp at once
-	stdx	$i,$t4,$i
-	addi	$i,$i,16
-	bdnz-	Lcopy
-___
-$code.=<<___ if ($SIZE_T==4);
-	subf	$np,$num,$np	; rewind np
-	addi	$j,$j,1		; restore counter
-	subfc	$i,$i,$i	; j=0 and "clear" XER[CA]
-	addi	$tp,$sp,`$FRAME+$TRANSFER`
-	addi	$np,$np,-4
-	addi	$rp,$rp,-4
-	addi	$ap,$sp,`$FRAME+$TRANSFER+4`
-	mtctr	$j
-
-.align	4
-Lsub:	ld	$t0,8($tp)	; load tp[j..j+3] in 64-bit word order
-	ldu	$t2,16($tp)
-	lwz	$t4,4($np)	; load np[j..j+3] in 32-bit word order
-	lwz	$t5,8($np)
-	lwz	$t6,12($np)
-	lwzu	$t7,16($np)
-	extrdi	$t1,$t0,32,0
-	extrdi	$t3,$t2,32,0
-	subfe	$t4,$t4,$t0	; tp[j]-np[j]
-	 stw	$t0,4($ap)	; save tp[j..j+3] in 32-bit word order
-	subfe	$t5,$t5,$t1	; tp[j+1]-np[j+1]
-	 stw	$t1,8($ap)
-	subfe	$t6,$t6,$t2	; tp[j+2]-np[j+2]
-	 stw	$t2,12($ap)
-	subfe	$t7,$t7,$t3	; tp[j+3]-np[j+3]
-	 stwu	$t3,16($ap)
-	stw	$t4,4($rp)
-	stw	$t5,8($rp)
-	stw	$t6,12($rp)
-	stwu	$t7,16($rp)
-	bdnz-	Lsub
-
-	li	$i,0
-	subfe	$ovf,$i,$ovf	; handle upmost overflow bit
-	addi	$tp,$sp,`$FRAME+$TRANSFER+4`
-	subf	$rp,$num,$rp	; rewind rp
-	and	$ap,$tp,$ovf
-	andc	$np,$rp,$ovf
-	or	$ap,$ap,$np	; ap=borrow?tp:rp
-	addi	$tp,$sp,`$FRAME+$TRANSFER`
-	mtctr	$j
-
-.align	4
-Lcopy:				; copy or in-place refresh
-	lwz	$t0,4($ap)
-	lwz	$t1,8($ap)
-	lwz	$t2,12($ap)
-	lwzu	$t3,16($ap)
-	std	$i,8($nap_d)	; zap nap_d
-	std	$i,16($nap_d)
-	std	$i,24($nap_d)
-	std	$i,32($nap_d)
-	std	$i,40($nap_d)
-	std	$i,48($nap_d)
-	std	$i,56($nap_d)
-	stdu	$i,64($nap_d)
-	stw	$t0,4($rp)
-	stw	$t1,8($rp)
-	stw	$t2,12($rp)
-	stwu	$t3,16($rp)
-	std	$i,8($tp)	; zap tp at once
-	stdu	$i,16($tp)
-	bdnz-	Lcopy
-___
-
-$code.=<<___;
-	$POP	$i,0($sp)
-	li	r3,1	; signal "handled"
-	$POP	r22,`-12*8-10*$SIZE_T`($i)
-	$POP	r23,`-12*8-9*$SIZE_T`($i)
-	$POP	r24,`-12*8-8*$SIZE_T`($i)
-	$POP	r25,`-12*8-7*$SIZE_T`($i)
-	$POP	r26,`-12*8-6*$SIZE_T`($i)
-	$POP	r27,`-12*8-5*$SIZE_T`($i)
-	$POP	r28,`-12*8-4*$SIZE_T`($i)
-	$POP	r29,`-12*8-3*$SIZE_T`($i)
-	$POP	r30,`-12*8-2*$SIZE_T`($i)
-	$POP	r31,`-12*8-1*$SIZE_T`($i)
-	lfd	f20,`-12*8`($i)
-	lfd	f21,`-11*8`($i)
-	lfd	f22,`-10*8`($i)
-	lfd	f23,`-9*8`($i)
-	lfd	f24,`-8*8`($i)
-	lfd	f25,`-7*8`($i)
-	lfd	f26,`-6*8`($i)
-	lfd	f27,`-5*8`($i)
-	lfd	f28,`-4*8`($i)
-	lfd	f29,`-3*8`($i)
-	lfd	f30,`-2*8`($i)
-	lfd	f31,`-1*8`($i)
-	mr	$sp,$i
-	blr
-	.long	0
-	.byte	0,12,4,0,0x8c,10,6,0
-	.long	0
-
-.asciz  "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;

+ 0 - 221
drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl

@@ -1,221 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# May 2011
-#
-# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
-# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
-# the time being... gcc 4.3 appeared to generate poor code, therefore
-# the effort. And indeed, the module delivers 55%-90%(*) improvement
-# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
-# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
-# This is for 64-bit build. In 32-bit "highgprs" case improvement is
-# even higher, for example on z990 it was measured 80%-150%. ECDSA
-# sign is modest 9%-12% faster. Keep in mind that these coefficients
-# are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is
-# burnt in it...
-#
-# (*)	gcc 4.1 was observed to deliver better results than gcc 4.3,
-#	so that improvement coefficients can vary from one specific
-#	setup to another.
-
-$flavour = shift;
-
-if ($flavour =~ /3[12]/) {
-        $SIZE_T=4;
-        $g="";
-} else {
-        $SIZE_T=8;
-        $g="g";
-}
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$stdframe=16*$SIZE_T+4*8;
-
-$rp="%r2";
-$a1="%r3";
-$a0="%r4";
-$b1="%r5";
-$b0="%r6";
-
-$ra="%r14";
-$sp="%r15";
-
-@T=("%r0","%r1");
-@i=("%r12","%r13");
-
-($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11));
-($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8;
-
-$code.=<<___;
-.text
-
-.type	_mul_1x1,\@function
-.align	16
-_mul_1x1:
-	lgr	$a1,$a
-	sllg	$a2,$a,1
-	sllg	$a4,$a,2
-	sllg	$a8,$a,3
-
-	srag	$lo,$a1,63			# broadcast 63rd bit
-	nihh	$a1,0x1fff
-	srag	@i[0],$a2,63			# broadcast 62nd bit
-	nihh	$a2,0x3fff
-	srag	@i[1],$a4,63			# broadcast 61st bit
-	nihh	$a4,0x7fff
-	ngr	$lo,$b
-	ngr	@i[0],$b
-	ngr	@i[1],$b
-
-	lghi	@T[0],0
-	lgr	$a12,$a1
-	stg	@T[0],`$stdframe+0*8`($sp)	# tab[0]=0
-	xgr	$a12,$a2
-	stg	$a1,`$stdframe+1*8`($sp)	# tab[1]=a1
-	 lgr	$a48,$a4
-	stg	$a2,`$stdframe+2*8`($sp)	# tab[2]=a2
-	 xgr	$a48,$a8
-	stg	$a12,`$stdframe+3*8`($sp)	# tab[3]=a1^a2
-	 xgr	$a1,$a4
-
-	stg	$a4,`$stdframe+4*8`($sp)	# tab[4]=a4
-	xgr	$a2,$a4
-	stg	$a1,`$stdframe+5*8`($sp)	# tab[5]=a1^a4
-	xgr	$a12,$a4
-	stg	$a2,`$stdframe+6*8`($sp)	# tab[6]=a2^a4
-	 xgr	$a1,$a48
-	stg	$a12,`$stdframe+7*8`($sp)	# tab[7]=a1^a2^a4
-	 xgr	$a2,$a48
-
-	stg	$a8,`$stdframe+8*8`($sp)	# tab[8]=a8
-	xgr	$a12,$a48
-	stg	$a1,`$stdframe+9*8`($sp)	# tab[9]=a1^a8
-	 xgr	$a1,$a4
-	stg	$a2,`$stdframe+10*8`($sp)	# tab[10]=a2^a8
-	 xgr	$a2,$a4
-	stg	$a12,`$stdframe+11*8`($sp)	# tab[11]=a1^a2^a8
-
-	xgr	$a12,$a4
-	stg	$a48,`$stdframe+12*8`($sp)	# tab[12]=a4^a8
-	 srlg	$hi,$lo,1
-	stg	$a1,`$stdframe+13*8`($sp)	# tab[13]=a1^a4^a8
-	 sllg	$lo,$lo,63
-	stg	$a2,`$stdframe+14*8`($sp)	# tab[14]=a2^a4^a8
-	 srlg	@T[0],@i[0],2
-	stg	$a12,`$stdframe+15*8`($sp)	# tab[15]=a1^a2^a4^a8
-
-	lghi	$mask,`0xf<<3`
-	sllg	$a1,@i[0],62
-	 sllg	@i[0],$b,3
-	srlg	@T[1],@i[1],3
-	 ngr	@i[0],$mask
-	sllg	$a2,@i[1],61
-	 srlg	@i[1],$b,4-3
-	xgr	$hi,@T[0]
-	 ngr	@i[1],$mask
-	xgr	$lo,$a1
-	xgr	$hi,@T[1]
-	xgr	$lo,$a2
-
-	xg	$lo,$stdframe(@i[0],$sp)
-	srlg	@i[0],$b,8-3
-	ngr	@i[0],$mask
-___
-for($n=1;$n<14;$n++) {
-$code.=<<___;
-	lg	@T[1],$stdframe(@i[1],$sp)
-	srlg	@i[1],$b,`($n+2)*4`-3
-	sllg	@T[0],@T[1],`$n*4`
-	ngr	@i[1],$mask
-	srlg	@T[1],@T[1],`64-$n*4`
-	xgr	$lo,@T[0]
-	xgr	$hi,@T[1]
-___
-	push(@i,shift(@i)); push(@T,shift(@T));
-}
-$code.=<<___;
-	lg	@T[1],$stdframe(@i[1],$sp)
-	sllg	@T[0],@T[1],`$n*4`
-	srlg	@T[1],@T[1],`64-$n*4`
-	xgr	$lo,@T[0]
-	xgr	$hi,@T[1]
-
-	lg	@T[0],$stdframe(@i[0],$sp)
-	sllg	@T[1],@T[0],`($n+1)*4`
-	srlg	@T[0],@T[0],`64-($n+1)*4`
-	xgr	$lo,@T[1]
-	xgr	$hi,@T[0]
-
-	br	$ra
-.size	_mul_1x1,.-_mul_1x1
-
-.globl	bn_GF2m_mul_2x2
-.type	bn_GF2m_mul_2x2,\@function
-.align	16
-bn_GF2m_mul_2x2:
-	stm${g}	%r3,%r15,3*$SIZE_T($sp)
-
-	lghi	%r1,-$stdframe-128
-	la	%r0,0($sp)
-	la	$sp,0(%r1,$sp)			# alloca
-	st${g}	%r0,0($sp)			# back chain
-___
-if ($SIZE_T==8) {
-my @r=map("%r$_",(6..9));
-$code.=<<___;
-	bras	$ra,_mul_1x1			# a1·b1
-	stmg	$lo,$hi,16($rp)
-
-	lg	$a,`$stdframe+128+4*$SIZE_T`($sp)
-	lg	$b,`$stdframe+128+6*$SIZE_T`($sp)
-	bras	$ra,_mul_1x1			# a0·b0
-	stmg	$lo,$hi,0($rp)
-
-	lg	$a,`$stdframe+128+3*$SIZE_T`($sp)
-	lg	$b,`$stdframe+128+5*$SIZE_T`($sp)
-	xg	$a,`$stdframe+128+4*$SIZE_T`($sp)
-	xg	$b,`$stdframe+128+6*$SIZE_T`($sp)
-	bras	$ra,_mul_1x1			# (a0+a1)·(b0+b1)
-	lmg	@r[0],@r[3],0($rp)
-
-	xgr	$lo,$hi
-	xgr	$hi,@r[1]
-	xgr	$lo,@r[0]
-	xgr	$hi,@r[2]
-	xgr	$lo,@r[3]	
-	xgr	$hi,@r[3]
-	xgr	$lo,$hi
-	stg	$hi,16($rp)
-	stg	$lo,8($rp)
-___
-} else {
-$code.=<<___;
-	sllg	%r3,%r3,32
-	sllg	%r5,%r5,32
-	or	%r3,%r4
-	or	%r5,%r6
-	bras	$ra,_mul_1x1
-	rllg	$lo,$lo,32
-	rllg	$hi,$hi,32
-	stmg	$lo,$hi,0($rp)
-___
-}
-$code.=<<___;
-	lm${g}	%r6,%r15,`$stdframe+128+6*$SIZE_T`($sp)
-	br	$ra
-.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
-.string	"GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-print $code;
-close STDOUT;

+ 0 - 277
drivers/builtin_openssl2/crypto/bn/asm/s390x-mont.pl

@@ -1,277 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# April 2007.
-#
-# Performance improvement over vanilla C code varies from 85% to 45%
-# depending on key length and benchmark. Unfortunately in this context
-# these are not very impressive results [for code that utilizes "wide"
-# 64x64=128-bit multiplication, which is not commonly available to C
-# programmers], at least hand-coded bn_asm.c replacement is known to
-# provide 30-40% better results for longest keys. Well, on a second
-# thought it's not very surprising, because z-CPUs are single-issue
-# and _strictly_ in-order execution, while bn_mul_mont is more or less
-# dependent on CPU ability to pipe-line instructions and have several
-# of them "in-flight" at the same time. I mean while other methods,
-# for example Karatsuba, aim to minimize amount of multiplications at
-# the cost of other operations increase, bn_mul_mont aim to neatly
-# "overlap" multiplications and the other operations [and on most
-# platforms even minimize the amount of the other operations, in
-# particular references to memory]. But it's possible to improve this
-# module performance by implementing dedicated squaring code-path and
-# possibly by unrolling loops...
-
-# January 2009.
-#
-# Reschedule to minimize/avoid Address Generation Interlock hazard,
-# make inner loops counter-based.
-
-# November 2010.
-#
-# Adapt for -m31 build. If kernel supports what's called "highgprs"
-# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
-# instructions and achieve "64-bit" performance even in 31-bit legacy
-# application context. The feature is not specific to any particular
-# processor, as long as it's "z-CPU". Latter implies that the code
-# remains z/Architecture specific. Compatibility with 32-bit BN_ULONG
-# is achieved by swapping words after 64-bit loads, follow _dswap-s.
-# On z990 it was measured to perform 2.6-2.2 times better than
-# compiler-generated code, less for longer keys...
-
-$flavour = shift;
-
-if ($flavour =~ /3[12]/) {
-	$SIZE_T=4;
-	$g="";
-} else {
-	$SIZE_T=8;
-	$g="g";
-}
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$stdframe=16*$SIZE_T+4*8;
-
-$mn0="%r0";
-$num="%r1";
-
-# int bn_mul_mont(
-$rp="%r2";		# BN_ULONG *rp,
-$ap="%r3";		# const BN_ULONG *ap,
-$bp="%r4";		# const BN_ULONG *bp,
-$np="%r5";		# const BN_ULONG *np,
-$n0="%r6";		# const BN_ULONG *n0,
-#$num="160(%r15)"	# int num);
-
-$bi="%r2";	# zaps rp
-$j="%r7";
-
-$ahi="%r8";
-$alo="%r9";
-$nhi="%r10";
-$nlo="%r11";
-$AHI="%r12";
-$NHI="%r13";
-$count="%r14";
-$sp="%r15";
-
-$code.=<<___;
-.text
-.globl	bn_mul_mont
-.type	bn_mul_mont,\@function
-bn_mul_mont:
-	lgf	$num,`$stdframe+$SIZE_T-4`($sp)	# pull $num
-	sla	$num,`log($SIZE_T)/log(2)`	# $num to enumerate bytes
-	la	$bp,0($num,$bp)
-
-	st${g}	%r2,2*$SIZE_T($sp)
-
-	cghi	$num,16		#
-	lghi	%r2,0		#
-	blr	%r14		# if($num<16) return 0;
-___
-$code.=<<___ if ($flavour =~ /3[12]/);
-	tmll	$num,4
-	bnzr	%r14		# if ($num&1) return 0;
-___
-$code.=<<___ if ($flavour !~ /3[12]/);
-	cghi	$num,96		#
-	bhr	%r14		# if($num>96) return 0;
-___
-$code.=<<___;
-	stm${g}	%r3,%r15,3*$SIZE_T($sp)
-
-	lghi	$rp,-$stdframe-8	# leave room for carry bit
-	lcgr	$j,$num		# -$num
-	lgr	%r0,$sp
-	la	$rp,0($rp,$sp)
-	la	$sp,0($j,$rp)	# alloca
-	st${g}	%r0,0($sp)	# back chain
-
-	sra	$num,3		# restore $num
-	la	$bp,0($j,$bp)	# restore $bp
-	ahi	$num,-1		# adjust $num for inner loop
-	lg	$n0,0($n0)	# pull n0
-	_dswap	$n0
-
-	lg	$bi,0($bp)
-	_dswap	$bi
-	lg	$alo,0($ap)
-	_dswap	$alo
-	mlgr	$ahi,$bi	# ap[0]*bp[0]
-	lgr	$AHI,$ahi
-
-	lgr	$mn0,$alo	# "tp[0]"*n0
-	msgr	$mn0,$n0
-
-	lg	$nlo,0($np)	#
-	_dswap	$nlo
-	mlgr	$nhi,$mn0	# np[0]*m1
-	algr	$nlo,$alo	# +="tp[0]"
-	lghi	$NHI,0
-	alcgr	$NHI,$nhi
-
-	la	$j,8(%r0)	# j=1
-	lr	$count,$num
-
-.align	16
-.L1st:
-	lg	$alo,0($j,$ap)
-	_dswap	$alo
-	mlgr	$ahi,$bi	# ap[j]*bp[0]
-	algr	$alo,$AHI
-	lghi	$AHI,0
-	alcgr	$AHI,$ahi
-
-	lg	$nlo,0($j,$np)
-	_dswap	$nlo
-	mlgr	$nhi,$mn0	# np[j]*m1
-	algr	$nlo,$NHI
-	lghi	$NHI,0
-	alcgr	$nhi,$NHI	# +="tp[j]"
-	algr	$nlo,$alo
-	alcgr	$NHI,$nhi
-
-	stg	$nlo,$stdframe-8($j,$sp)	# tp[j-1]=
-	la	$j,8($j)	# j++
-	brct	$count,.L1st
-
-	algr	$NHI,$AHI
-	lghi	$AHI,0
-	alcgr	$AHI,$AHI	# upmost overflow bit
-	stg	$NHI,$stdframe-8($j,$sp)
-	stg	$AHI,$stdframe($j,$sp)
-	la	$bp,8($bp)	# bp++
-
-.Louter:
-	lg	$bi,0($bp)	# bp[i]
-	_dswap	$bi
-	lg	$alo,0($ap)
-	_dswap	$alo
-	mlgr	$ahi,$bi	# ap[0]*bp[i]
-	alg	$alo,$stdframe($sp)	# +=tp[0]
-	lghi	$AHI,0
-	alcgr	$AHI,$ahi
-
-	lgr	$mn0,$alo
-	msgr	$mn0,$n0	# tp[0]*n0
-
-	lg	$nlo,0($np)	# np[0]
-	_dswap	$nlo
-	mlgr	$nhi,$mn0	# np[0]*m1
-	algr	$nlo,$alo	# +="tp[0]"
-	lghi	$NHI,0
-	alcgr	$NHI,$nhi
-
-	la	$j,8(%r0)	# j=1
-	lr	$count,$num
-
-.align	16
-.Linner:
-	lg	$alo,0($j,$ap)
-	_dswap	$alo
-	mlgr	$ahi,$bi	# ap[j]*bp[i]
-	algr	$alo,$AHI
-	lghi	$AHI,0
-	alcgr	$ahi,$AHI
-	alg	$alo,$stdframe($j,$sp)# +=tp[j]
-	alcgr	$AHI,$ahi
-
-	lg	$nlo,0($j,$np)
-	_dswap	$nlo
-	mlgr	$nhi,$mn0	# np[j]*m1
-	algr	$nlo,$NHI
-	lghi	$NHI,0
-	alcgr	$nhi,$NHI
-	algr	$nlo,$alo	# +="tp[j]"
-	alcgr	$NHI,$nhi
-
-	stg	$nlo,$stdframe-8($j,$sp)	# tp[j-1]=
-	la	$j,8($j)	# j++
-	brct	$count,.Linner
-
-	algr	$NHI,$AHI
-	lghi	$AHI,0
-	alcgr	$AHI,$AHI
-	alg	$NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit
-	lghi	$ahi,0
-	alcgr	$AHI,$ahi	# new upmost overflow bit
-	stg	$NHI,$stdframe-8($j,$sp)
-	stg	$AHI,$stdframe($j,$sp)
-
-	la	$bp,8($bp)	# bp++
-	cl${g}	$bp,`$stdframe+8+4*$SIZE_T`($j,$sp)	# compare to &bp[num]
-	jne	.Louter
-
-	l${g}	$rp,`$stdframe+8+2*$SIZE_T`($j,$sp)	# reincarnate rp
-	la	$ap,$stdframe($sp)
-	ahi	$num,1		# restore $num, incidentally clears "borrow"
-
-	la	$j,0(%r0)
-	lr	$count,$num
-.Lsub:	lg	$alo,0($j,$ap)
-	lg	$nlo,0($j,$np)
-	_dswap	$nlo
-	slbgr	$alo,$nlo
-	stg	$alo,0($j,$rp)
-	la	$j,8($j)
-	brct	$count,.Lsub
-	lghi	$ahi,0
-	slbgr	$AHI,$ahi	# handle upmost carry
-
-	ngr	$ap,$AHI
-	lghi	$np,-1
-	xgr	$np,$AHI
-	ngr	$np,$rp
-	ogr	$ap,$np		# ap=borrow?tp:rp
-
-	la	$j,0(%r0)
-	lgr	$count,$num
-.Lcopy:	lg	$alo,0($j,$ap)		# copy or in-place refresh
-	_dswap	$alo
-	stg	$j,$stdframe($j,$sp)	# zap tp
-	stg	$alo,0($j,$rp)
-	la	$j,8($j)
-	brct	$count,.Lcopy
-
-	la	%r1,`$stdframe+8+6*$SIZE_T`($j,$sp)
-	lm${g}	%r6,%r15,0(%r1)
-	lghi	%r2,1		# signal "processed"
-	br	%r14
-.size	bn_mul_mont,.-bn_mul_mont
-.string	"Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-foreach (split("\n",$code)) {
-	s/\`([^\`]*)\`/eval $1/ge;
-	s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e;
-	print $_,"\n";
-}
-close STDOUT;

+ 0 - 678
drivers/builtin_openssl2/crypto/bn/asm/s390x.S

@@ -1,678 +0,0 @@
-.ident "s390x.S, version 1.1"
-// ====================================================================
-// Written by Andy Polyakov <[email protected]> for the OpenSSL
-// project.
-//
-// Rights for redistribution and usage in source and binary forms are
-// granted according to the OpenSSL license. Warranty of any kind is
-// disclaimed.
-// ====================================================================
-
-.text
-
-#define zero	%r0
-
-// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
-.globl	bn_mul_add_words
-.type	bn_mul_add_words,@function
-.align	4
-bn_mul_add_words:
-	lghi	zero,0		// zero = 0
-	la	%r1,0(%r2)	// put rp aside
-	lghi	%r2,0		// i=0;
-	ltgfr	%r4,%r4
-	bler	%r14		// if (len<=0) return 0;
-
-	stmg	%r6,%r10,48(%r15)
-	lghi	%r10,3
-	lghi	%r8,0		// carry = 0
-	nr	%r10,%r4	// len%4
-	sra	%r4,2		// cnt=len/4
-	jz	.Loop1_madd	// carry is incidentally cleared if branch taken
-	algr	zero,zero	// clear carry
-
-.Loop4_madd:
-	lg	%r7,0(%r2,%r3)	// ap[i]
-	mlgr	%r6,%r5		// *=w
-	alcgr	%r7,%r8		// +=carry
-	alcgr	%r6,zero
-	alg	%r7,0(%r2,%r1)	// +=rp[i]
-	stg	%r7,0(%r2,%r1)	// rp[i]=
-
-	lg	%r9,8(%r2,%r3)
-	mlgr	%r8,%r5
-	alcgr	%r9,%r6
-	alcgr	%r8,zero
-	alg	%r9,8(%r2,%r1)
-	stg	%r9,8(%r2,%r1)
-
-	lg	%r7,16(%r2,%r3)
-	mlgr	%r6,%r5
-	alcgr	%r7,%r8
-	alcgr	%r6,zero
-	alg	%r7,16(%r2,%r1)
-	stg	%r7,16(%r2,%r1)
-
-	lg	%r9,24(%r2,%r3)
-	mlgr	%r8,%r5
-	alcgr	%r9,%r6
-	alcgr	%r8,zero
-	alg	%r9,24(%r2,%r1)
-	stg	%r9,24(%r2,%r1)
-
-	la	%r2,32(%r2)	// i+=4
-	brct	%r4,.Loop4_madd
-
-	la	%r10,1(%r10)		// see if len%4 is zero ...
-	brct	%r10,.Loop1_madd	// without touching condition code:-)
-
-.Lend_madd:
-	alcgr	%r8,zero	// collect carry bit
-	lgr	%r2,%r8
-	lmg	%r6,%r10,48(%r15)
-	br	%r14
-
-.Loop1_madd:
-	lg	%r7,0(%r2,%r3)	// ap[i]
-	mlgr	%r6,%r5		// *=w
-	alcgr	%r7,%r8		// +=carry
-	alcgr	%r6,zero
-	alg	%r7,0(%r2,%r1)	// +=rp[i]
-	stg	%r7,0(%r2,%r1)	// rp[i]=
-
-	lgr	%r8,%r6
-	la	%r2,8(%r2)	// i++
-	brct	%r10,.Loop1_madd
-
-	j	.Lend_madd
-.size	bn_mul_add_words,.-bn_mul_add_words
-
-// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
-.globl	bn_mul_words
-.type	bn_mul_words,@function
-.align	4
-bn_mul_words:
-	lghi	zero,0		// zero = 0
-	la	%r1,0(%r2)	// put rp aside
-	lghi	%r2,0		// i=0;
-	ltgfr	%r4,%r4
-	bler	%r14		// if (len<=0) return 0;
-
-	stmg	%r6,%r10,48(%r15)
-	lghi	%r10,3
-	lghi	%r8,0		// carry = 0
-	nr	%r10,%r4	// len%4
-	sra	%r4,2		// cnt=len/4
-	jz	.Loop1_mul	// carry is incidentally cleared if branch taken
-	algr	zero,zero	// clear carry
-
-.Loop4_mul:
-	lg	%r7,0(%r2,%r3)	// ap[i]
-	mlgr	%r6,%r5		// *=w
-	alcgr	%r7,%r8		// +=carry
-	stg	%r7,0(%r2,%r1)	// rp[i]=
-
-	lg	%r9,8(%r2,%r3)
-	mlgr	%r8,%r5
-	alcgr	%r9,%r6
-	stg	%r9,8(%r2,%r1)
-
-	lg	%r7,16(%r2,%r3)
-	mlgr	%r6,%r5
-	alcgr	%r7,%r8
-	stg	%r7,16(%r2,%r1)
-
-	lg	%r9,24(%r2,%r3)
-	mlgr	%r8,%r5
-	alcgr	%r9,%r6
-	stg	%r9,24(%r2,%r1)
-
-	la	%r2,32(%r2)	// i+=4
-	brct	%r4,.Loop4_mul
-
-	la	%r10,1(%r10)		// see if len%4 is zero ...
-	brct	%r10,.Loop1_mul		// without touching condition code:-)
-
-.Lend_mul:
-	alcgr	%r8,zero	// collect carry bit
-	lgr	%r2,%r8
-	lmg	%r6,%r10,48(%r15)
-	br	%r14
-
-.Loop1_mul:
-	lg	%r7,0(%r2,%r3)	// ap[i]
-	mlgr	%r6,%r5		// *=w
-	alcgr	%r7,%r8		// +=carry
-	stg	%r7,0(%r2,%r1)	// rp[i]=
-
-	lgr	%r8,%r6
-	la	%r2,8(%r2)	// i++
-	brct	%r10,.Loop1_mul
-
-	j	.Lend_mul
-.size	bn_mul_words,.-bn_mul_words
-
-// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
-.globl	bn_sqr_words
-.type	bn_sqr_words,@function
-.align	4
-bn_sqr_words:
-	ltgfr	%r4,%r4
-	bler	%r14
-
-	stmg	%r6,%r7,48(%r15)
-	srag	%r1,%r4,2	// cnt=len/4
-	jz	.Loop1_sqr
-
-.Loop4_sqr:
-	lg	%r7,0(%r3)
-	mlgr	%r6,%r7
-	stg	%r7,0(%r2)
-	stg	%r6,8(%r2)
-
-	lg	%r7,8(%r3)
-	mlgr	%r6,%r7
-	stg	%r7,16(%r2)
-	stg	%r6,24(%r2)
-
-	lg	%r7,16(%r3)
-	mlgr	%r6,%r7
-	stg	%r7,32(%r2)
-	stg	%r6,40(%r2)
-
-	lg	%r7,24(%r3)
-	mlgr	%r6,%r7
-	stg	%r7,48(%r2)
-	stg	%r6,56(%r2)
-
-	la	%r3,32(%r3)
-	la	%r2,64(%r2)
-	brct	%r1,.Loop4_sqr
-
-	lghi	%r1,3
-	nr	%r4,%r1		// cnt=len%4
-	jz	.Lend_sqr
-
-.Loop1_sqr:
-	lg	%r7,0(%r3)
-	mlgr	%r6,%r7
-	stg	%r7,0(%r2)
-	stg	%r6,8(%r2)
-
-	la	%r3,8(%r3)
-	la	%r2,16(%r2)
-	brct	%r4,.Loop1_sqr
-
-.Lend_sqr:
-	lmg	%r6,%r7,48(%r15)
-	br	%r14
-.size	bn_sqr_words,.-bn_sqr_words
-
-// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
-.globl	bn_div_words
-.type	bn_div_words,@function
-.align	4
-bn_div_words:
-	dlgr	%r2,%r4
-	lgr	%r2,%r3
-	br	%r14
-.size	bn_div_words,.-bn_div_words
-
-// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
-.globl	bn_add_words
-.type	bn_add_words,@function
-.align	4
-bn_add_words:
-	la	%r1,0(%r2)	// put rp aside
-	lghi	%r2,0		// i=0
-	ltgfr	%r5,%r5
-	bler	%r14		// if (len<=0) return 0;
-
-	stg	%r6,48(%r15)
-	lghi	%r6,3
-	nr	%r6,%r5		// len%4
-	sra	%r5,2		// len/4, use sra because it sets condition code
-	jz	.Loop1_add	// carry is incidentally cleared if branch taken
-	algr	%r2,%r2		// clear carry
-
-.Loop4_add:
-	lg	%r0,0(%r2,%r3)
-	alcg	%r0,0(%r2,%r4)
-	stg	%r0,0(%r2,%r1)
-	lg	%r0,8(%r2,%r3)
-	alcg	%r0,8(%r2,%r4)
-	stg	%r0,8(%r2,%r1)
-	lg	%r0,16(%r2,%r3)
-	alcg	%r0,16(%r2,%r4)
-	stg	%r0,16(%r2,%r1)
-	lg	%r0,24(%r2,%r3)
-	alcg	%r0,24(%r2,%r4)
-	stg	%r0,24(%r2,%r1)
-
-	la	%r2,32(%r2)	// i+=4
-	brct	%r5,.Loop4_add
-
-	la	%r6,1(%r6)	// see if len%4 is zero ...
-	brct	%r6,.Loop1_add	// without touching condition code:-)
-
-.Lexit_add:
-	lghi	%r2,0
-	alcgr	%r2,%r2
-	lg	%r6,48(%r15)
-	br	%r14
-
-.Loop1_add:
-	lg	%r0,0(%r2,%r3)
-	alcg	%r0,0(%r2,%r4)
-	stg	%r0,0(%r2,%r1)
-
-	la	%r2,8(%r2)	// i++
-	brct	%r6,.Loop1_add
-
-	j	.Lexit_add
-.size	bn_add_words,.-bn_add_words
-
-// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
-.globl	bn_sub_words
-.type	bn_sub_words,@function
-.align	4
-bn_sub_words:
-	la	%r1,0(%r2)	// put rp aside
-	lghi	%r2,0		// i=0
-	ltgfr	%r5,%r5
-	bler	%r14		// if (len<=0) return 0;
-
-	stg	%r6,48(%r15)
-	lghi	%r6,3
-	nr	%r6,%r5		// len%4
-	sra	%r5,2		// len/4, use sra because it sets condition code
-	jnz	.Loop4_sub	// borrow is incidentally cleared if branch taken
-	slgr	%r2,%r2		// clear borrow
-
-.Loop1_sub:
-	lg	%r0,0(%r2,%r3)
-	slbg	%r0,0(%r2,%r4)
-	stg	%r0,0(%r2,%r1)
-
-	la	%r2,8(%r2)	// i++
-	brct	%r6,.Loop1_sub
-	j	.Lexit_sub
-
-.Loop4_sub:
-	lg	%r0,0(%r2,%r3)
-	slbg	%r0,0(%r2,%r4)
-	stg	%r0,0(%r2,%r1)
-	lg	%r0,8(%r2,%r3)
-	slbg	%r0,8(%r2,%r4)
-	stg	%r0,8(%r2,%r1)
-	lg	%r0,16(%r2,%r3)
-	slbg	%r0,16(%r2,%r4)
-	stg	%r0,16(%r2,%r1)
-	lg	%r0,24(%r2,%r3)
-	slbg	%r0,24(%r2,%r4)
-	stg	%r0,24(%r2,%r1)
-
-	la	%r2,32(%r2)	// i+=4
-	brct	%r5,.Loop4_sub
-
-	la	%r6,1(%r6)	// see if len%4 is zero ...
-	brct	%r6,.Loop1_sub	// without touching condition code:-)
-
-.Lexit_sub:
-	lghi	%r2,0
-	slbgr	%r2,%r2
-	lcgr	%r2,%r2
-	lg	%r6,48(%r15)
-	br	%r14
-.size	bn_sub_words,.-bn_sub_words
-
-#define c1	%r1
-#define c2	%r5
-#define c3	%r8
-
-#define mul_add_c(ai,bi,c1,c2,c3)	\
-	lg	%r7,ai*8(%r3);		\
-	mlg	%r6,bi*8(%r4);		\
-	algr	c1,%r7;			\
-	alcgr	c2,%r6;			\
-	alcgr	c3,zero
-
-// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
-.globl	bn_mul_comba8
-.type	bn_mul_comba8,@function
-.align	4
-bn_mul_comba8:
-	stmg	%r6,%r8,48(%r15)
-
-	lghi	c1,0
-	lghi	c2,0
-	lghi	c3,0
-	lghi	zero,0
-
-	mul_add_c(0,0,c1,c2,c3);
-	stg	c1,0*8(%r2)
-	lghi	c1,0
-
-	mul_add_c(0,1,c2,c3,c1);
-	mul_add_c(1,0,c2,c3,c1);
-	stg	c2,1*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(2,0,c3,c1,c2);
-	mul_add_c(1,1,c3,c1,c2);
-	mul_add_c(0,2,c3,c1,c2);
-	stg	c3,2*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(0,3,c1,c2,c3);
-	mul_add_c(1,2,c1,c2,c3);
-	mul_add_c(2,1,c1,c2,c3);
-	mul_add_c(3,0,c1,c2,c3);
-	stg	c1,3*8(%r2)
-	lghi	c1,0
-
-	mul_add_c(4,0,c2,c3,c1);
-	mul_add_c(3,1,c2,c3,c1);
-	mul_add_c(2,2,c2,c3,c1);
-	mul_add_c(1,3,c2,c3,c1);
-	mul_add_c(0,4,c2,c3,c1);
-	stg	c2,4*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(0,5,c3,c1,c2);
-	mul_add_c(1,4,c3,c1,c2);
-	mul_add_c(2,3,c3,c1,c2);
-	mul_add_c(3,2,c3,c1,c2);
-	mul_add_c(4,1,c3,c1,c2);
-	mul_add_c(5,0,c3,c1,c2);
-	stg	c3,5*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(6,0,c1,c2,c3);
-	mul_add_c(5,1,c1,c2,c3);
-	mul_add_c(4,2,c1,c2,c3);
-	mul_add_c(3,3,c1,c2,c3);
-	mul_add_c(2,4,c1,c2,c3);
-	mul_add_c(1,5,c1,c2,c3);
-	mul_add_c(0,6,c1,c2,c3);
-	stg	c1,6*8(%r2)
-	lghi	c1,0
-
-	mul_add_c(0,7,c2,c3,c1);
-	mul_add_c(1,6,c2,c3,c1);
-	mul_add_c(2,5,c2,c3,c1);
-	mul_add_c(3,4,c2,c3,c1);
-	mul_add_c(4,3,c2,c3,c1);
-	mul_add_c(5,2,c2,c3,c1);
-	mul_add_c(6,1,c2,c3,c1);
-	mul_add_c(7,0,c2,c3,c1);
-	stg	c2,7*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(7,1,c3,c1,c2);
-	mul_add_c(6,2,c3,c1,c2);
-	mul_add_c(5,3,c3,c1,c2);
-	mul_add_c(4,4,c3,c1,c2);
-	mul_add_c(3,5,c3,c1,c2);
-	mul_add_c(2,6,c3,c1,c2);
-	mul_add_c(1,7,c3,c1,c2);
-	stg	c3,8*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(2,7,c1,c2,c3);
-	mul_add_c(3,6,c1,c2,c3);
-	mul_add_c(4,5,c1,c2,c3);
-	mul_add_c(5,4,c1,c2,c3);
-	mul_add_c(6,3,c1,c2,c3);
-	mul_add_c(7,2,c1,c2,c3);
-	stg	c1,9*8(%r2)
-	lghi	c1,0
-
-	mul_add_c(7,3,c2,c3,c1);
-	mul_add_c(6,4,c2,c3,c1);
-	mul_add_c(5,5,c2,c3,c1);
-	mul_add_c(4,6,c2,c3,c1);
-	mul_add_c(3,7,c2,c3,c1);
-	stg	c2,10*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(4,7,c3,c1,c2);
-	mul_add_c(5,6,c3,c1,c2);
-	mul_add_c(6,5,c3,c1,c2);
-	mul_add_c(7,4,c3,c1,c2);
-	stg	c3,11*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(7,5,c1,c2,c3);
-	mul_add_c(6,6,c1,c2,c3);
-	mul_add_c(5,7,c1,c2,c3);
-	stg	c1,12*8(%r2)
-	lghi	c1,0
-
-
-	mul_add_c(6,7,c2,c3,c1);
-	mul_add_c(7,6,c2,c3,c1);
-	stg	c2,13*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(7,7,c3,c1,c2);
-	stg	c3,14*8(%r2)
-	stg	c1,15*8(%r2)
-
-	lmg	%r6,%r8,48(%r15)
-	br	%r14
-.size	bn_mul_comba8,.-bn_mul_comba8
-
-// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
-.globl	bn_mul_comba4
-.type	bn_mul_comba4,@function
-.align	4
-bn_mul_comba4:
-	stmg	%r6,%r8,48(%r15)
-
-	lghi	c1,0
-	lghi	c2,0
-	lghi	c3,0
-	lghi	zero,0
-
-	mul_add_c(0,0,c1,c2,c3);
-	stg	c1,0*8(%r3)
-	lghi	c1,0
-
-	mul_add_c(0,1,c2,c3,c1);
-	mul_add_c(1,0,c2,c3,c1);
-	stg	c2,1*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(2,0,c3,c1,c2);
-	mul_add_c(1,1,c3,c1,c2);
-	mul_add_c(0,2,c3,c1,c2);
-	stg	c3,2*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(0,3,c1,c2,c3);
-	mul_add_c(1,2,c1,c2,c3);
-	mul_add_c(2,1,c1,c2,c3);
-	mul_add_c(3,0,c1,c2,c3);
-	stg	c1,3*8(%r2)
-	lghi	c1,0
-
-	mul_add_c(3,1,c2,c3,c1);
-	mul_add_c(2,2,c2,c3,c1);
-	mul_add_c(1,3,c2,c3,c1);
-	stg	c2,4*8(%r2)
-	lghi	c2,0
-
-	mul_add_c(2,3,c3,c1,c2);
-	mul_add_c(3,2,c3,c1,c2);
-	stg	c3,5*8(%r2)
-	lghi	c3,0
-
-	mul_add_c(3,3,c1,c2,c3);
-	stg	c1,6*8(%r2)
-	stg	c2,7*8(%r2)
-
-	stmg	%r6,%r8,48(%r15)
-	br	%r14
-.size	bn_mul_comba4,.-bn_mul_comba4
-
-#define sqr_add_c(ai,c1,c2,c3)		\
-	lg	%r7,ai*8(%r3);		\
-	mlgr	%r6,%r7;		\
-	algr	c1,%r7;			\
-	alcgr	c2,%r6;			\
-	alcgr	c3,zero
-
-#define sqr_add_c2(ai,aj,c1,c2,c3)	\
-	lg	%r7,ai*8(%r3);		\
-	mlg	%r6,aj*8(%r3);		\
-	algr	c1,%r7;			\
-	alcgr	c2,%r6;			\
-	alcgr	c3,zero;		\
-	algr	c1,%r7;			\
-	alcgr	c2,%r6;			\
-	alcgr	c3,zero
-
-// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
-.globl	bn_sqr_comba8
-.type	bn_sqr_comba8,@function
-.align	4
-bn_sqr_comba8:
-	stmg	%r6,%r8,48(%r15)
-
-	lghi	c1,0
-	lghi	c2,0
-	lghi	c3,0
-	lghi	zero,0
-
-	sqr_add_c(0,c1,c2,c3);
-	stg	c1,0*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c2(1,0,c2,c3,c1);
-	stg	c2,1*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c(1,c3,c1,c2);
-	sqr_add_c2(2,0,c3,c1,c2);
-	stg	c3,2*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c2(3,0,c1,c2,c3);
-	sqr_add_c2(2,1,c1,c2,c3);
-	stg	c1,3*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c(2,c2,c3,c1);
-	sqr_add_c2(3,1,c2,c3,c1);
-	sqr_add_c2(4,0,c2,c3,c1);
-	stg	c2,4*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c2(5,0,c3,c1,c2);
-	sqr_add_c2(4,1,c3,c1,c2);
-	sqr_add_c2(3,2,c3,c1,c2);
-	stg	c3,5*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c(3,c1,c2,c3);
-	sqr_add_c2(4,2,c1,c2,c3);
-	sqr_add_c2(5,1,c1,c2,c3);
-	sqr_add_c2(6,0,c1,c2,c3);
-	stg	c1,6*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c2(7,0,c2,c3,c1);
-	sqr_add_c2(6,1,c2,c3,c1);
-	sqr_add_c2(5,2,c2,c3,c1);
-	sqr_add_c2(4,3,c2,c3,c1);
-	stg	c2,7*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c(4,c3,c1,c2);
-	sqr_add_c2(5,3,c3,c1,c2);
-	sqr_add_c2(6,2,c3,c1,c2);
-	sqr_add_c2(7,1,c3,c1,c2);
-	stg	c3,8*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c2(7,2,c1,c2,c3);
-	sqr_add_c2(6,3,c1,c2,c3);
-	sqr_add_c2(5,4,c1,c2,c3);
-	stg	c1,9*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c(5,c2,c3,c1);
-	sqr_add_c2(6,4,c2,c3,c1);
-	sqr_add_c2(7,3,c2,c3,c1);
-	stg	c2,10*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c2(7,4,c3,c1,c2);
-	sqr_add_c2(6,5,c3,c1,c2);
-	stg	c3,11*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c(6,c1,c2,c3);
-	sqr_add_c2(7,5,c1,c2,c3);
-	stg	c1,12*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c2(7,6,c2,c3,c1);
-	stg	c2,13*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c(7,c3,c1,c2);
-	stg	c3,14*8(%r2)
-	stg	c1,15*8(%r2)
-
-	lmg	%r6,%r8,48(%r15)
-	br	%r14
-.size	bn_sqr_comba8,.-bn_sqr_comba8
-
-// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
-.globl bn_sqr_comba4
-.type	bn_sqr_comba4,@function
-.align	4
-bn_sqr_comba4:
-	stmg	%r6,%r8,48(%r15)
-
-	lghi	c1,0
-	lghi	c2,0
-	lghi	c3,0
-	lghi	zero,0
-
-	sqr_add_c(0,c1,c2,c3);
-	stg	c1,0*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c2(1,0,c2,c3,c1);
-	stg	c2,1*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c(1,c3,c1,c2);
-	sqr_add_c2(2,0,c3,c1,c2);
-	stg	c3,2*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c2(3,0,c1,c2,c3);
-	sqr_add_c2(2,1,c1,c2,c3);
-	stg	c1,3*8(%r2)
-	lghi	c1,0
-
-	sqr_add_c(2,c2,c3,c1);
-	sqr_add_c2(3,1,c2,c3,c1);
-	stg	c2,4*8(%r2)
-	lghi	c2,0
-
-	sqr_add_c2(3,2,c3,c1,c2);
-	stg	c3,5*8(%r2)
-	lghi	c3,0
-
-	sqr_add_c(3,c1,c2,c3);
-	stg	c1,6*8(%r2)
-	stg	c2,7*8(%r2)
-
-	lmg	%r6,%r8,48(%r15)
-	br	%r14
-.size	bn_sqr_comba4,.-bn_sqr_comba4

+ 0 - 1458
drivers/builtin_openssl2/crypto/bn/asm/sparcv8.S

@@ -1,1458 +0,0 @@
-.ident	"sparcv8.s, Version 1.4"
-.ident	"SPARC v8 ISA artwork by Andy Polyakov <[email protected]>"
-
-/*
- * ====================================================================
- * Written by Andy Polyakov <[email protected]> for the OpenSSL
- * project.
- *
- * Rights for redistribution and usage in source and binary forms are
- * granted according to the OpenSSL license. Warranty of any kind is
- * disclaimed.
- * ====================================================================
- */
-
-/*
- * This is my modest contributon to OpenSSL project (see
- * http://www.openssl.org/ for more information about it) and is
- * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
- * module. For updates see http://fy.chalmers.se/~appro/hpe/.
- *
- * See bn_asm.sparc.v8plus.S for more details.
- */
-
-/*
- * Revision history.
- *
- * 1.1	- new loop unrolling model(*);
- * 1.2	- made gas friendly;
- * 1.3	- fixed problem with /usr/ccs/lib/cpp;
- * 1.4	- some retunes;
- *
- * (*)	see bn_asm.sparc.v8plus.S for details
- */
-
-.section	".text",#alloc,#execinstr
-.file		"bn_asm.sparc.v8.S"
-
-.align	32
-
-.global bn_mul_add_words
-/*
- * BN_ULONG bn_mul_add_words(rp,ap,num,w)
- * BN_ULONG *rp,*ap;
- * int num;
- * BN_ULONG w;
- */
-bn_mul_add_words:
-	cmp	%o2,0
-	bg,a	.L_bn_mul_add_words_proceed
-	ld	[%o1],%g2
-	retl
-	clr	%o0
-
-.L_bn_mul_add_words_proceed:
-	andcc	%o2,-4,%g0
-	bz	.L_bn_mul_add_words_tail
-	clr	%o5
-
-.L_bn_mul_add_words_loop:
-	ld	[%o0],%o4
-	ld	[%o1+4],%g3
-	umul	%o3,%g2,%g2
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g2,%o4
-	st	%o4,[%o0]
-	addx	%g1,0,%o5
-
-	ld	[%o0+4],%o4
-	ld	[%o1+8],%g2
-	umul	%o3,%g3,%g3
-	dec	4,%o2
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g3,%o4
-	st	%o4,[%o0+4]
-	addx	%g1,0,%o5
-
-	ld	[%o0+8],%o4
-	ld	[%o1+12],%g3
-	umul	%o3,%g2,%g2
-	inc	16,%o1
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g2,%o4
-	st	%o4,[%o0+8]
-	addx	%g1,0,%o5
-
-	ld	[%o0+12],%o4
-	umul	%o3,%g3,%g3
-	inc	16,%o0
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g3,%o4
-	st	%o4,[%o0-4]
-	addx	%g1,0,%o5
-	andcc	%o2,-4,%g0
-	bnz,a	.L_bn_mul_add_words_loop
-	ld	[%o1],%g2
-
-	tst	%o2
-	bnz,a	.L_bn_mul_add_words_tail
-	ld	[%o1],%g2
-.L_bn_mul_add_words_return:
-	retl
-	mov	%o5,%o0
-	nop
-
-.L_bn_mul_add_words_tail:
-	ld	[%o0],%o4
-	umul	%o3,%g2,%g2
-	addcc	%o4,%o5,%o4
-	rd	%y,%g1
-	addx	%g1,0,%g1
-	addcc	%o4,%g2,%o4
-	addx	%g1,0,%o5
-	deccc	%o2
-	bz	.L_bn_mul_add_words_return
-	st	%o4,[%o0]
-
-	ld	[%o1+4],%g2
-	ld	[%o0+4],%o4
-	umul	%o3,%g2,%g2
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g2,%o4
-	addx	%g1,0,%o5
-	deccc	%o2
-	bz	.L_bn_mul_add_words_return
-	st	%o4,[%o0+4]
-
-	ld	[%o1+8],%g2
-	ld	[%o0+8],%o4
-	umul	%o3,%g2,%g2
-	rd	%y,%g1
-	addcc	%o4,%o5,%o4
-	addx	%g1,0,%g1
-	addcc	%o4,%g2,%o4
-	st	%o4,[%o0+8]
-	retl
-	addx	%g1,0,%o0
-
-.type	bn_mul_add_words,#function
-.size	bn_mul_add_words,(.-bn_mul_add_words)
-
-.align	32
-
-.global bn_mul_words
-/*
- * BN_ULONG bn_mul_words(rp,ap,num,w)
- * BN_ULONG *rp,*ap;
- * int num;
- * BN_ULONG w;
- */
-bn_mul_words:
-	cmp	%o2,0
-	bg,a	.L_bn_mul_words_proceeed
-	ld	[%o1],%g2
-	retl
-	clr	%o0
-
-.L_bn_mul_words_proceeed:
-	andcc	%o2,-4,%g0
-	bz	.L_bn_mul_words_tail
-	clr	%o5
-
-.L_bn_mul_words_loop:
-	ld	[%o1+4],%g3
-	umul	%o3,%g2,%g2
-	addcc	%g2,%o5,%g2
-	rd	%y,%g1
-	addx	%g1,0,%o5
-	st	%g2,[%o0]
-
-	ld	[%o1+8],%g2
-	umul	%o3,%g3,%g3
-	addcc	%g3,%o5,%g3
-	rd	%y,%g1
-	dec	4,%o2
-	addx	%g1,0,%o5
-	st	%g3,[%o0+4]
-
-	ld	[%o1+12],%g3
-	umul	%o3,%g2,%g2
-	addcc	%g2,%o5,%g2
-	rd	%y,%g1
-	inc	16,%o1
-	st	%g2,[%o0+8]
-	addx	%g1,0,%o5
-
-	umul	%o3,%g3,%g3
-	addcc	%g3,%o5,%g3
-	rd	%y,%g1
-	inc	16,%o0
-	addx	%g1,0,%o5
-	st	%g3,[%o0-4]
-	andcc	%o2,-4,%g0
-	nop
-	bnz,a	.L_bn_mul_words_loop
-	ld	[%o1],%g2
-
-	tst	%o2
-	bnz,a	.L_bn_mul_words_tail
-	ld	[%o1],%g2
-.L_bn_mul_words_return:
-	retl
-	mov	%o5,%o0
-	nop
-
-.L_bn_mul_words_tail:
-	umul	%o3,%g2,%g2
-	addcc	%g2,%o5,%g2
-	rd	%y,%g1
-	addx	%g1,0,%o5
-	deccc	%o2
-	bz	.L_bn_mul_words_return
-	st	%g2,[%o0]
-	nop
-
-	ld	[%o1+4],%g2
-	umul	%o3,%g2,%g2
-	addcc	%g2,%o5,%g2
-	rd	%y,%g1
-	addx	%g1,0,%o5
-	deccc	%o2
-	bz	.L_bn_mul_words_return
-	st	%g2,[%o0+4]
-
-	ld	[%o1+8],%g2
-	umul	%o3,%g2,%g2
-	addcc	%g2,%o5,%g2
-	rd	%y,%g1
-	st	%g2,[%o0+8]
-	retl
-	addx	%g1,0,%o0
-
-.type	bn_mul_words,#function
-.size	bn_mul_words,(.-bn_mul_words)
-
-.align  32
-.global	bn_sqr_words
-/*
- * void bn_sqr_words(r,a,n)
- * BN_ULONG *r,*a;
- * int n;
- */
-bn_sqr_words:
-	cmp	%o2,0
-	bg,a	.L_bn_sqr_words_proceeed
-	ld	[%o1],%g2
-	retl
-	clr	%o0
-
-.L_bn_sqr_words_proceeed:
-	andcc	%o2,-4,%g0
-	bz	.L_bn_sqr_words_tail
-	clr	%o5
-
-.L_bn_sqr_words_loop:
-	ld	[%o1+4],%g3
-	umul	%g2,%g2,%o4
-	st	%o4,[%o0]
-	rd	%y,%o5
-	st	%o5,[%o0+4]
-
-	ld	[%o1+8],%g2
-	umul	%g3,%g3,%o4
-	dec	4,%o2
-	st	%o4,[%o0+8]
-	rd	%y,%o5
-	st	%o5,[%o0+12]
-	nop
-
-	ld	[%o1+12],%g3
-	umul	%g2,%g2,%o4
-	st	%o4,[%o0+16]
-	rd	%y,%o5
-	inc	16,%o1
-	st	%o5,[%o0+20]
-
-	umul	%g3,%g3,%o4
-	inc	32,%o0
-	st	%o4,[%o0-8]
-	rd	%y,%o5
-	st	%o5,[%o0-4]
-	andcc	%o2,-4,%g2
-	bnz,a	.L_bn_sqr_words_loop
-	ld	[%o1],%g2
-
-	tst	%o2
-	nop
-	bnz,a	.L_bn_sqr_words_tail
-	ld	[%o1],%g2
-.L_bn_sqr_words_return:
-	retl
-	clr	%o0
-
-.L_bn_sqr_words_tail:
-	umul	%g2,%g2,%o4
-	st	%o4,[%o0]
-	deccc	%o2
-	rd	%y,%o5
-	bz	.L_bn_sqr_words_return
-	st	%o5,[%o0+4]
-
-	ld	[%o1+4],%g2
-	umul	%g2,%g2,%o4
-	st	%o4,[%o0+8]
-	deccc	%o2
-	rd	%y,%o5
-	nop
-	bz	.L_bn_sqr_words_return
-	st	%o5,[%o0+12]
-
-	ld	[%o1+8],%g2
-	umul	%g2,%g2,%o4
-	st	%o4,[%o0+16]
-	rd	%y,%o5
-	st	%o5,[%o0+20]
-	retl
-	clr	%o0
-
-.type	bn_sqr_words,#function
-.size	bn_sqr_words,(.-bn_sqr_words)
-
-.align	32
-
-.global bn_div_words
-/*
- * BN_ULONG bn_div_words(h,l,d)
- * BN_ULONG h,l,d;
- */
-bn_div_words:
-	wr	%o0,%y
-	udiv	%o1,%o2,%o0
-	retl
-	nop
-
-.type	bn_div_words,#function
-.size	bn_div_words,(.-bn_div_words)
-
-.align	32
-
-.global bn_add_words
-/*
- * BN_ULONG bn_add_words(rp,ap,bp,n)
- * BN_ULONG *rp,*ap,*bp;
- * int n;
- */
-bn_add_words:
-	cmp	%o3,0
-	bg,a	.L_bn_add_words_proceed
-	ld	[%o1],%o4
-	retl
-	clr	%o0
-
-.L_bn_add_words_proceed:
-	andcc	%o3,-4,%g0
-	bz	.L_bn_add_words_tail
-	clr	%g1
-	ba	.L_bn_add_words_warn_loop
-	addcc	%g0,0,%g0	! clear carry flag
-
-.L_bn_add_words_loop:
-	ld	[%o1],%o4
-.L_bn_add_words_warn_loop:
-	ld	[%o2],%o5
-	ld	[%o1+4],%g3
-	ld	[%o2+4],%g4
-	dec	4,%o3
-	addxcc	%o5,%o4,%o5
-	st	%o5,[%o0]
-
-	ld	[%o1+8],%o4
-	ld	[%o2+8],%o5
-	inc	16,%o1
-	addxcc	%g3,%g4,%g3
-	st	%g3,[%o0+4]
-	
-	ld	[%o1-4],%g3
-	ld	[%o2+12],%g4
-	inc	16,%o2
-	addxcc	%o5,%o4,%o5
-	st	%o5,[%o0+8]
-
-	inc	16,%o0
-	addxcc	%g3,%g4,%g3
-	st	%g3,[%o0-4]
-	addx	%g0,0,%g1
-	andcc	%o3,-4,%g0
-	bnz,a	.L_bn_add_words_loop
-	addcc	%g1,-1,%g0
-
-	tst	%o3
-	bnz,a	.L_bn_add_words_tail
-	ld	[%o1],%o4
-.L_bn_add_words_return:
-	retl
-	mov	%g1,%o0
-
-.L_bn_add_words_tail:
-	addcc	%g1,-1,%g0
-	ld	[%o2],%o5
-	addxcc	%o5,%o4,%o5
-	addx	%g0,0,%g1
-	deccc	%o3
-	bz	.L_bn_add_words_return
-	st	%o5,[%o0]
-
-	ld	[%o1+4],%o4
-	addcc	%g1,-1,%g0
-	ld	[%o2+4],%o5
-	addxcc	%o5,%o4,%o5
-	addx	%g0,0,%g1
-	deccc	%o3
-	bz	.L_bn_add_words_return
-	st	%o5,[%o0+4]
-
-	ld	[%o1+8],%o4
-	addcc	%g1,-1,%g0
-	ld	[%o2+8],%o5
-	addxcc	%o5,%o4,%o5
-	st	%o5,[%o0+8]
-	retl
-	addx	%g0,0,%o0
-
-.type	bn_add_words,#function
-.size	bn_add_words,(.-bn_add_words)
-
-.align	32
-
-.global bn_sub_words
-/*
- * BN_ULONG bn_sub_words(rp,ap,bp,n)
- * BN_ULONG *rp,*ap,*bp;
- * int n;
- */
-bn_sub_words:
-	cmp	%o3,0
-	bg,a	.L_bn_sub_words_proceed
-	ld	[%o1],%o4
-	retl
-	clr	%o0
-
-.L_bn_sub_words_proceed:
-	andcc	%o3,-4,%g0
-	bz	.L_bn_sub_words_tail
-	clr	%g1
-	ba	.L_bn_sub_words_warm_loop
-	addcc	%g0,0,%g0	! clear carry flag
-
-.L_bn_sub_words_loop:
-	ld	[%o1],%o4
-.L_bn_sub_words_warm_loop:
-	ld	[%o2],%o5
-	ld	[%o1+4],%g3
-	ld	[%o2+4],%g4
-	dec	4,%o3
-	subxcc	%o4,%o5,%o5
-	st	%o5,[%o0]
-
-	ld	[%o1+8],%o4
-	ld	[%o2+8],%o5
-	inc	16,%o1
-	subxcc	%g3,%g4,%g4
-	st	%g4,[%o0+4]
-	
-	ld	[%o1-4],%g3
-	ld	[%o2+12],%g4
-	inc	16,%o2
-	subxcc	%o4,%o5,%o5
-	st	%o5,[%o0+8]
-
-	inc	16,%o0
-	subxcc	%g3,%g4,%g4
-	st	%g4,[%o0-4]
-	addx	%g0,0,%g1
-	andcc	%o3,-4,%g0
-	bnz,a	.L_bn_sub_words_loop
-	addcc	%g1,-1,%g0
-
-	tst	%o3
-	nop
-	bnz,a	.L_bn_sub_words_tail
-	ld	[%o1],%o4
-.L_bn_sub_words_return:
-	retl
-	mov	%g1,%o0
-
-.L_bn_sub_words_tail:
-	addcc	%g1,-1,%g0
-	ld	[%o2],%o5
-	subxcc	%o4,%o5,%o5
-	addx	%g0,0,%g1
-	deccc	%o3
-	bz	.L_bn_sub_words_return
-	st	%o5,[%o0]
-	nop
-
-	ld	[%o1+4],%o4
-	addcc	%g1,-1,%g0
-	ld	[%o2+4],%o5
-	subxcc	%o4,%o5,%o5
-	addx	%g0,0,%g1
-	deccc	%o3
-	bz	.L_bn_sub_words_return
-	st	%o5,[%o0+4]
-
-	ld	[%o1+8],%o4
-	addcc	%g1,-1,%g0
-	ld	[%o2+8],%o5
-	subxcc	%o4,%o5,%o5
-	st	%o5,[%o0+8]
-	retl
-	addx	%g0,0,%o0
-
-.type	bn_sub_words,#function
-.size	bn_sub_words,(.-bn_sub_words)
-
-#define FRAME_SIZE	-96
-
-/*
- * Here is register usage map for *all* routines below.
- */
-#define t_1	%o0
-#define	t_2	%o1
-#define c_1	%o2
-#define c_2	%o3
-#define c_3	%o4
-
-#define ap(I)	[%i1+4*I]
-#define bp(I)	[%i2+4*I]
-#define rp(I)	[%i0+4*I]
-
-#define	a_0	%l0
-#define	a_1	%l1
-#define	a_2	%l2
-#define	a_3	%l3
-#define	a_4	%l4
-#define	a_5	%l5
-#define	a_6	%l6
-#define	a_7	%l7
-
-#define	b_0	%i3
-#define	b_1	%i4
-#define	b_2	%i5
-#define	b_3	%o5
-#define	b_4	%g1
-#define	b_5	%g2
-#define	b_6	%g3
-#define	b_7	%g4
-
-.align	32
-.global bn_mul_comba8
-/*
- * void bn_mul_comba8(r,a,b)
- * BN_ULONG *r,*a,*b;
- */
-bn_mul_comba8:
-	save	%sp,FRAME_SIZE,%sp
-	ld	ap(0),a_0
-	ld	bp(0),b_0
-	umul	a_0,b_0,c_1	!=!mul_add_c(a[0],b[0],c1,c2,c3);
-	ld	bp(1),b_1
-	rd	%y,c_2
-	st	c_1,rp(0)	!r[0]=c1;
-
-	umul	a_0,b_1,t_1	!=!mul_add_c(a[0],b[1],c2,c3,c1);
-	ld	ap(1),a_1
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	%g0,t_2,c_3	!=
-	addx	%g0,%g0,c_1
-	ld	ap(2),a_2
-	umul	a_1,b_0,t_1	!mul_add_c(a[1],b[0],c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	st	c_2,rp(1)	!r[1]=c2;
-	addx	c_1,%g0,c_1	!=
-
-	umul	a_2,b_0,t_1	!mul_add_c(a[2],b[0],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	%g0,%g0,c_2
-	ld	bp(2),b_2
-	umul	a_1,b_1,t_1	!mul_add_c(a[1],b[1],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	ld	bp(3),b_3
-	addx	c_2,%g0,c_2	!=
-	umul	a_0,b_2,t_1	!mul_add_c(a[0],b[2],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	st	c_3,rp(2)	!r[2]=c3;
-
-	umul	a_0,b_3,t_1	!mul_add_c(a[0],b[3],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3
-	umul	a_1,b_2,t_1	!=!mul_add_c(a[1],b[2],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	ld	ap(3),a_3
-	umul	a_2,b_1,t_1	!mul_add_c(a[2],b[1],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2		!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	ld	ap(4),a_4
-	umul	a_3,b_0,t_1	!mul_add_c(a[3],b[0],c1,c2,c3);!=
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(3)	!r[3]=c1;
-
-	umul	a_4,b_0,t_1	!mul_add_c(a[4],b[0],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	umul	a_3,b_1,t_1	!mul_add_c(a[3],b[1],c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	umul	a_2,b_2,t_1	!=!mul_add_c(a[2],b[2],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	ld	bp(4),b_4
-	umul	a_1,b_3,t_1	!mul_add_c(a[1],b[3],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	ld	bp(5),b_5
-	umul	a_0,b_4,t_1	!=!mul_add_c(a[0],b[4],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	st	c_2,rp(4)	!r[4]=c2;
-
-	umul	a_0,b_5,t_1	!mul_add_c(a[0],b[5],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	umul	a_1,b_4,t_1	!mul_add_c(a[1],b[4],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_2,b_3,t_1	!=!mul_add_c(a[2],b[3],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	umul	a_3,b_2,t_1	!mul_add_c(a[3],b[2],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	ld	ap(5),a_5
-	umul	a_4,b_1,t_1	!mul_add_c(a[4],b[1],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	ld	ap(6),a_6
-	addx	c_2,%g0,c_2	!=
-	umul	a_5,b_0,t_1	!mul_add_c(a[5],b[0],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	st	c_3,rp(5)	!r[5]=c3;
-
-	umul	a_6,b_0,t_1	!mul_add_c(a[6],b[0],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3
-	umul	a_5,b_1,t_1	!=!mul_add_c(a[5],b[1],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_4,b_2,t_1	!mul_add_c(a[4],b[2],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	umul	a_3,b_3,t_1	!mul_add_c(a[3],b[3],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2		!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_2,b_4,t_1	!mul_add_c(a[2],b[4],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	ld	bp(6),b_6
-	addx	c_3,%g0,c_3	!=
-	umul	a_1,b_5,t_1	!mul_add_c(a[1],b[5],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	ld	bp(7),b_7
-	umul	a_0,b_6,t_1	!mul_add_c(a[0],b[6],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	st	c_1,rp(6)	!r[6]=c1;
-	addx	c_3,%g0,c_3	!=
-
-	umul	a_0,b_7,t_1	!mul_add_c(a[0],b[7],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	%g0,%g0,c_1
-	umul	a_1,b_6,t_1	!mul_add_c(a[1],b[6],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	umul	a_2,b_5,t_1	!mul_add_c(a[2],b[5],c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	umul	a_3,b_4,t_1	!=!mul_add_c(a[3],b[4],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	umul	a_4,b_3,t_1	!mul_add_c(a[4],b[3],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_5,b_2,t_1	!mul_add_c(a[5],b[2],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	ld	ap(7),a_7
-	umul	a_6,b_1,t_1	!=!mul_add_c(a[6],b[1],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	umul	a_7,b_0,t_1	!mul_add_c(a[7],b[0],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	st	c_2,rp(7)	!r[7]=c2;
-
-	umul	a_7,b_1,t_1	!mul_add_c(a[7],b[1],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	umul	a_6,b_2,t_1	!=!mul_add_c(a[6],b[2],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	umul	a_5,b_3,t_1	!mul_add_c(a[5],b[3],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	umul	a_4,b_4,t_1	!mul_add_c(a[4],b[4],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_3,b_5,t_1	!mul_add_c(a[3],b[5],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_2,b_6,t_1	!=!mul_add_c(a[2],b[6],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	umul	a_1,b_7,t_1	!mul_add_c(a[1],b[7],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!
-	addx	c_2,%g0,c_2
-	st	c_3,rp(8)	!r[8]=c3;
-
-	umul	a_2,b_7,t_1	!mul_add_c(a[2],b[7],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3
-	umul	a_3,b_6,t_1	!=!mul_add_c(a[3],b[6],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_4,b_5,t_1	!mul_add_c(a[4],b[5],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	umul	a_5,b_4,t_1	!mul_add_c(a[5],b[4],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2		!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_6,b_3,t_1	!mul_add_c(a[6],b[3],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_7,b_2,t_1	!=!mul_add_c(a[7],b[2],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(9)	!r[9]=c1;
-
-	umul	a_7,b_3,t_1	!mul_add_c(a[7],b[3],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	umul	a_6,b_4,t_1	!mul_add_c(a[6],b[4],c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	umul	a_5,b_5,t_1	!=!mul_add_c(a[5],b[5],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	umul	a_4,b_6,t_1	!mul_add_c(a[4],b[6],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_3,b_7,t_1	!mul_add_c(a[3],b[7],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	st	c_2,rp(10)	!r[10]=c2;
-
-	umul	a_4,b_7,t_1	!=!mul_add_c(a[4],b[7],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2	!=
-	umul	a_5,b_6,t_1	!mul_add_c(a[5],b[6],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	umul	a_6,b_5,t_1	!mul_add_c(a[6],b[5],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_7,b_4,t_1	!mul_add_c(a[7],b[4],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(11)	!r[11]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_7,b_5,t_1	!mul_add_c(a[7],b[5],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	%g0,%g0,c_3
-	umul	a_6,b_6,t_1	!mul_add_c(a[6],b[6],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2		!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_5,b_7,t_1	!mul_add_c(a[5],b[7],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	st	c_1,rp(12)	!r[12]=c1;
-	addx	c_3,%g0,c_3	!=
-
-	umul	a_6,b_7,t_1	!mul_add_c(a[6],b[7],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	%g0,%g0,c_1
-	umul	a_7,b_6,t_1	!mul_add_c(a[7],b[6],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	st	c_2,rp(13)	!r[13]=c2;
-
-	umul	a_7,b_7,t_1	!=!mul_add_c(a[7],b[7],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	nop			!=
-	st	c_3,rp(14)	!r[14]=c3;
-	st	c_1,rp(15)	!r[15]=c1;
-
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_mul_comba8,#function
-.size	bn_mul_comba8,(.-bn_mul_comba8)
-
-.align	32
-
-.global bn_mul_comba4
-/*
- * void bn_mul_comba4(r,a,b)
- * BN_ULONG *r,*a,*b;
- */
-bn_mul_comba4:
-	save	%sp,FRAME_SIZE,%sp
-	ld	ap(0),a_0
-	ld	bp(0),b_0
-	umul	a_0,b_0,c_1	!=!mul_add_c(a[0],b[0],c1,c2,c3);
-	ld	bp(1),b_1
-	rd	%y,c_2
-	st	c_1,rp(0)	!r[0]=c1;
-
-	umul	a_0,b_1,t_1	!=!mul_add_c(a[0],b[1],c2,c3,c1);
-	ld	ap(1),a_1
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	%g0,t_2,c_3
-	addx	%g0,%g0,c_1
-	ld	ap(2),a_2
-	umul	a_1,b_0,t_1	!=!mul_add_c(a[1],b[0],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	st	c_2,rp(1)	!r[1]=c2;
-
-	umul	a_2,b_0,t_1	!mul_add_c(a[2],b[0],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	ld	bp(2),b_2
-	umul	a_1,b_1,t_1	!=!mul_add_c(a[1],b[1],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	ld	bp(3),b_3
-	umul	a_0,b_2,t_1	!mul_add_c(a[0],b[2],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	st	c_3,rp(2)	!r[2]=c3;
-
-	umul	a_0,b_3,t_1	!=!mul_add_c(a[0],b[3],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3	!=
-	umul	a_1,b_2,t_1	!mul_add_c(a[1],b[2],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	ld	ap(3),a_3
-	umul	a_2,b_1,t_1	!mul_add_c(a[2],b[1],c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_3,b_0,t_1	!=!mul_add_c(a[3],b[0],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(3)	!r[3]=c1;
-
-	umul	a_3,b_1,t_1	!mul_add_c(a[3],b[1],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	umul	a_2,b_2,t_1	!mul_add_c(a[2],b[2],c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	umul	a_1,b_3,t_1	!=!mul_add_c(a[1],b[3],c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	st	c_2,rp(4)	!r[4]=c2;
-
-	umul	a_2,b_3,t_1	!mul_add_c(a[2],b[3],c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	umul	a_3,b_2,t_1	!mul_add_c(a[3],b[2],c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(5)	!r[5]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_3,b_3,t_1	!mul_add_c(a[3],b[3],c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	st	c_1,rp(6)	!r[6]=c1;
-	st	c_2,rp(7)	!r[7]=c2;
-	
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_mul_comba4,#function
-.size	bn_mul_comba4,(.-bn_mul_comba4)
-
-.align	32
-
-.global bn_sqr_comba8
-bn_sqr_comba8:
-	save	%sp,FRAME_SIZE,%sp
-	ld	ap(0),a_0
-	ld	ap(1),a_1
-	umul	a_0,a_0,c_1	!=!sqr_add_c(a,0,c1,c2,c3);
-	rd	%y,c_2
-	st	c_1,rp(0)	!r[0]=c1;
-
-	ld	ap(2),a_2
-	umul	a_0,a_1,t_1	!=!sqr_add_c2(a,1,0,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	%g0,t_2,c_3
-	addx	%g0,%g0,c_1	!=
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3
-	st	c_2,rp(1)	!r[1]=c2;
-	addx	c_1,%g0,c_1	!=
-
-	umul	a_2,a_0,t_1	!sqr_add_c2(a,2,0,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	%g0,%g0,c_2
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	ld	ap(3),a_3
-	umul	a_1,a_1,t_1	!sqr_add_c(a,1,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	st	c_3,rp(2)	!r[2]=c3;
-
-	umul	a_0,a_3,t_1	!=!sqr_add_c2(a,3,0,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3	!=
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	ld	ap(4),a_4
-	addx	c_3,%g0,c_3	!=
-	umul	a_1,a_2,t_1	!sqr_add_c2(a,2,1,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(3)	!r[3]=c1;
-
-	umul	a_4,a_0,t_1	!sqr_add_c2(a,4,0,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_3,a_1,t_1	!sqr_add_c2(a,3,1,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	ld	ap(5),a_5
-	umul	a_2,a_2,t_1	!sqr_add_c(a,2,c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	st	c_2,rp(4)	!r[4]=c2;
-	addx	c_1,%g0,c_1	!=
-
-	umul	a_0,a_5,t_1	!sqr_add_c2(a,5,0,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	%g0,%g0,c_2
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	umul	a_1,a_4,t_1	!sqr_add_c2(a,4,1,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	ld	ap(6),a_6
-	umul	a_2,a_3,t_1	!sqr_add_c2(a,3,2,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	st	c_3,rp(5)	!r[5]=c3;
-
-	umul	a_6,a_0,t_1	!sqr_add_c2(a,6,0,c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	%g0,%g0,c_3
-	addcc	c_1,t_1,c_1	!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_5,a_1,t_1	!sqr_add_c2(a,5,1,c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1	!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	umul	a_4,a_2,t_1	!sqr_add_c2(a,4,2,c1,c2,c3);
-	addcc	c_1,t_1,c_1	!=
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1	!=
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3
-	ld	ap(7),a_7
-	umul	a_3,a_3,t_1	!=!sqr_add_c(a,3,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(6)	!r[6]=c1;
-
-	umul	a_0,a_7,t_1	!sqr_add_c2(a,7,0,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_1,a_6,t_1	!sqr_add_c2(a,6,1,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_2,a_5,t_1	!sqr_add_c2(a,5,2,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_3,a_4,t_1	!sqr_add_c2(a,4,3,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	st	c_2,rp(7)	!r[7]=c2;
-
-	umul	a_7,a_1,t_1	!sqr_add_c2(a,7,1,c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	addcc	c_3,t_1,c_3	!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_6,a_2,t_1	!sqr_add_c2(a,6,2,c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	addcc	c_3,t_1,c_3	!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_5,a_3,t_1	!sqr_add_c2(a,5,3,c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	addcc	c_3,t_1,c_3	!=
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_4,a_4,t_1	!sqr_add_c(a,4,c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(8)	!r[8]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_2,a_7,t_1	!sqr_add_c2(a,7,2,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	%g0,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_3,a_6,t_1	!sqr_add_c2(a,6,3,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_4,a_5,t_1	!sqr_add_c2(a,5,4,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(9)	!r[9]=c1;
-
-	umul	a_7,a_3,t_1	!sqr_add_c2(a,7,3,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_6,a_4,t_1	!sqr_add_c2(a,6,4,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_5,a_5,t_1	!sqr_add_c(a,5,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	st	c_2,rp(10)	!r[10]=c2;
-
-	umul	a_4,a_7,t_1	!=!sqr_add_c2(a,7,4,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2	!=
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2
-	umul	a_5,a_6,t_1	!=!sqr_add_c2(a,6,5,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	c_2,%g0,c_2	!=
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(11)	!r[11]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_7,a_5,t_1	!sqr_add_c2(a,7,5,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	%g0,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_6,a_6,t_1	!sqr_add_c(a,6,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	st	c_1,rp(12)	!r[12]=c1;
-
-	umul	a_6,a_7,t_1	!sqr_add_c2(a,7,6,c2,c3,c1);
-	addcc	c_2,t_1,c_2	!=
-	rd	%y,t_2
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	addcc	c_2,t_1,c_2	!=
-	addxcc	c_3,t_2,c_3
-	st	c_2,rp(13)	!r[13]=c2;
-	addx	c_1,%g0,c_1	!=
-
-	umul	a_7,a_7,t_1	!sqr_add_c(a,7,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1	!=
-	st	c_3,rp(14)	!r[14]=c3;
-	st	c_1,rp(15)	!r[15]=c1;
-
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_sqr_comba8,#function
-.size	bn_sqr_comba8,(.-bn_sqr_comba8)
-
-.align	32
-
-.global bn_sqr_comba4
-/*
- * void bn_sqr_comba4(r,a)
- * BN_ULONG *r,*a;
- */
-bn_sqr_comba4:
-	save	%sp,FRAME_SIZE,%sp
-	ld	ap(0),a_0
-	umul	a_0,a_0,c_1	!sqr_add_c(a,0,c1,c2,c3);
-	ld	ap(1),a_1	!=
-	rd	%y,c_2
-	st	c_1,rp(0)	!r[0]=c1;
-
-	ld	ap(2),a_2
-	umul	a_0,a_1,t_1	!=!sqr_add_c2(a,1,0,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2
-	addxcc	%g0,t_2,c_3
-	addx	%g0,%g0,c_1	!=
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1	!=
-	st	c_2,rp(1)	!r[1]=c2;
-
-	umul	a_2,a_0,t_1	!sqr_add_c2(a,2,0,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2		!=
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1	!=
-	addx	c_2,%g0,c_2
-	ld	ap(3),a_3
-	umul	a_1,a_1,t_1	!sqr_add_c(a,1,c3,c1,c2);
-	addcc	c_3,t_1,c_3	!=
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(2)	!r[2]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_0,a_3,t_1	!sqr_add_c2(a,3,0,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	%g0,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	umul	a_1,a_2,t_1	!sqr_add_c2(a,2,1,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	addx	c_3,%g0,c_3
-	addcc	c_1,t_1,c_1
-	addxcc	c_2,t_2,c_2
-	addx	c_3,%g0,c_3	!=
-	st	c_1,rp(3)	!r[3]=c1;
-
-	umul	a_3,a_1,t_1	!sqr_add_c2(a,3,1,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	%g0,%g0,c_1
-	addcc	c_2,t_1,c_2
-	addxcc	c_3,t_2,c_3	!=
-	addx	c_1,%g0,c_1
-	umul	a_2,a_2,t_1	!sqr_add_c(a,2,c2,c3,c1);
-	addcc	c_2,t_1,c_2
-	rd	%y,t_2		!=
-	addxcc	c_3,t_2,c_3
-	addx	c_1,%g0,c_1
-	st	c_2,rp(4)	!r[4]=c2;
-
-	umul	a_2,a_3,t_1	!=!sqr_add_c2(a,3,2,c3,c1,c2);
-	addcc	c_3,t_1,c_3
-	rd	%y,t_2
-	addxcc	c_1,t_2,c_1
-	addx	%g0,%g0,c_2	!=
-	addcc	c_3,t_1,c_3
-	addxcc	c_1,t_2,c_1
-	st	c_3,rp(5)	!r[5]=c3;
-	addx	c_2,%g0,c_2	!=
-
-	umul	a_3,a_3,t_1	!sqr_add_c(a,3,c1,c2,c3);
-	addcc	c_1,t_1,c_1
-	rd	%y,t_2
-	addxcc	c_2,t_2,c_2	!=
-	st	c_1,rp(6)	!r[6]=c1;
-	st	c_2,rp(7)	!r[7]=c2;
-	
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_sqr_comba4,#function
-.size	bn_sqr_comba4,(.-bn_sqr_comba4)
-
-.align	32

+ 0 - 1558
drivers/builtin_openssl2/crypto/bn/asm/sparcv8plus.S

@@ -1,1558 +0,0 @@
-.ident	"sparcv8plus.s, Version 1.4"
-.ident	"SPARC v9 ISA artwork by Andy Polyakov <[email protected]>"
-
-/*
- * ====================================================================
- * Written by Andy Polyakov <[email protected]> for the OpenSSL
- * project.
- *
- * Rights for redistribution and usage in source and binary forms are
- * granted according to the OpenSSL license. Warranty of any kind is
- * disclaimed.
- * ====================================================================
- */
-
-/*
- * This is my modest contributon to OpenSSL project (see
- * http://www.openssl.org/ for more information about it) and is
- * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
- * module. For updates see http://fy.chalmers.se/~appro/hpe/.
- *
- * Questions-n-answers.
- *
- * Q. How to compile?
- * A. With SC4.x/SC5.x:
- *
- *	cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
- *
- *    and with gcc:
- *
- *	gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
- *
- *    or if above fails (it does if you have gas installed):
- *
- *	gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
- *
- *    Quick-n-dirty way to fuse the module into the library.
- *    Provided that the library is already configured and built
- *    (in 0.9.2 case with no-asm option):
- *
- *	# cd crypto/bn
- *	# cp /some/place/bn_asm.sparc.v8plus.S .
- *	# cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
- *	# make
- *	# cd ../..
- *	# make; make test
- *
- *    Quick-n-dirty way to get rid of it:
- *
- *	# cd crypto/bn
- *	# touch bn_asm.c
- *	# make
- *	# cd ../..
- *	# make; make test
- *
- * Q. V8plus achitecture? What kind of beast is that?
- * A. Well, it's rather a programming model than an architecture...
- *    It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
- *    special conditions, namely when kernel doesn't preserve upper
- *    32 bits of otherwise 64-bit registers during a context switch.
- *
- * Q. Why just UltraSPARC? What about SuperSPARC?
- * A. Original release did target UltraSPARC only. Now SuperSPARC
- *    version is provided along. Both version share bn_*comba[48]
- *    implementations (see comment later in code for explanation).
- *    But what's so special about this UltraSPARC implementation?
- *    Why didn't I let compiler do the job? Trouble is that most of
- *    available compilers (well, SC5.0 is the only exception) don't
- *    attempt to take advantage of UltraSPARC's 64-bitness under
- *    32-bit kernels even though it's perfectly possible (see next
- *    question).
- *
- * Q. 64-bit registers under 32-bit kernels? Didn't you just say it
- *    doesn't work?
- * A. You can't adress *all* registers as 64-bit wide:-( The catch is
- *    that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
- *    preserved if you're in a leaf function, i.e. such never calling
- *    any other functions. All functions in this module are leaf and
- *    10 registers is a handful. And as a matter of fact none-"comba"
- *    routines don't require even that much and I could even afford to
- *    not allocate own stack frame for 'em:-)
- *
- * Q. What about 64-bit kernels?
- * A. What about 'em? Just kidding:-) Pure 64-bit version is currently
- *    under evaluation and development...
- *
- * Q. What about shared libraries?
- * A. What about 'em? Kidding again:-) Code does *not* contain any
- *    code position dependencies and it's safe to include it into
- *    shared library as is.
- *
- * Q. How much faster does it go?
- * A. Do you have a good benchmark? In either case below is what I
- *    experience with crypto/bn/expspeed.c test program:
- *
- *	v8plus module on U10/300MHz against bn_asm.c compiled with:
- *
- *	cc-5.0 -xarch=v8plus -xO5 -xdepend	+7-12%
- *	cc-4.2 -xarch=v8plus -xO5 -xdepend	+25-35%
- *	egcs-1.1.2 -mcpu=ultrasparc -O3		+35-45%
- *
- *	v8 module on SS10/60MHz against bn_asm.c compiled with:
- *
- *	cc-5.0 -xarch=v8 -xO5 -xdepend		+7-10%
- *	cc-4.2 -xarch=v8 -xO5 -xdepend		+10%
- *	egcs-1.1.2 -mv8 -O3			+35-45%
- *
- *    As you can see it's damn hard to beat the new Sun C compiler
- *    and it's in first place GNU C users who will appreciate this
- *    assembler implementation:-)	
- */
-
-/*
- * Revision history.
- *
- * 1.0	- initial release;
- * 1.1	- new loop unrolling model(*);
- *	- some more fine tuning;
- * 1.2	- made gas friendly;
- *	- updates to documentation concerning v9;
- *	- new performance comparison matrix;
- * 1.3	- fixed problem with /usr/ccs/lib/cpp;
- * 1.4	- native V9 bn_*_comba[48] implementation (15% more efficient)
- *	  resulting in slight overall performance kick;
- *	- some retunes;
- *	- support for GNU as added;
- *
- * (*)	Originally unrolled loop looked like this:
- *	    for (;;) {
- *		op(p+0); if (--n==0) break;
- *		op(p+1); if (--n==0) break;
- *		op(p+2); if (--n==0) break;
- *		op(p+3); if (--n==0) break;
- *		p+=4;
- *	    }
- *	I unroll according to following:
- *	    while (n&~3) {
- *		op(p+0); op(p+1); op(p+2); op(p+3);
- *		p+=4; n=-4;
- *	    }
- *	    if (n) {
- *		op(p+0); if (--n==0) return;
- *		op(p+2); if (--n==0) return;
- *		op(p+3); return;
- *	    }
- */
-
-#if defined(__SUNPRO_C) && defined(__sparcv9)
-  /* They've said -xarch=v9 at command line */
-  .register	%g2,#scratch
-  .register	%g3,#scratch
-# define	FRAME_SIZE	-192
-#elif defined(__GNUC__) && defined(__arch64__)
-  /* They've said -m64 at command line */
-  .register	%g2,#scratch
-  .register	%g3,#scratch
-# define	FRAME_SIZE	-192
-#else 
-# define	FRAME_SIZE	-96
-#endif 
-/*
- * GNU assembler can't stand stuw:-(
- */
-#define stuw st
-
-.section	".text",#alloc,#execinstr
-.file		"bn_asm.sparc.v8plus.S"
-
-.align	32
-
-.global bn_mul_add_words
-/*
- * BN_ULONG bn_mul_add_words(rp,ap,num,w)
- * BN_ULONG *rp,*ap;
- * int num;
- * BN_ULONG w;
- */
-bn_mul_add_words:
-	sra	%o2,%g0,%o2	! signx %o2
-	brgz,a	%o2,.L_bn_mul_add_words_proceed
-	lduw	[%o1],%g2
-	retl
-	clr	%o0
-	nop
-	nop
-	nop
-
-.L_bn_mul_add_words_proceed:
-	srl	%o3,%g0,%o3	! clruw	%o3
-	andcc	%o2,-4,%g0
-	bz,pn	%icc,.L_bn_mul_add_words_tail
-	clr	%o5
-
-.L_bn_mul_add_words_loop:	! wow! 32 aligned!
-	lduw	[%o0],%g1
-	lduw	[%o1+4],%g3
-	mulx	%o3,%g2,%g2
-	add	%g1,%o5,%o4
-	nop
-	add	%o4,%g2,%o4
-	stuw	%o4,[%o0]
-	srlx	%o4,32,%o5
-
-	lduw	[%o0+4],%g1
-	lduw	[%o1+8],%g2
-	mulx	%o3,%g3,%g3
-	add	%g1,%o5,%o4
-	dec	4,%o2
-	add	%o4,%g3,%o4
-	stuw	%o4,[%o0+4]
-	srlx	%o4,32,%o5
-
-	lduw	[%o0+8],%g1
-	lduw	[%o1+12],%g3
-	mulx	%o3,%g2,%g2
-	add	%g1,%o5,%o4
-	inc	16,%o1
-	add	%o4,%g2,%o4
-	stuw	%o4,[%o0+8]
-	srlx	%o4,32,%o5
-
-	lduw	[%o0+12],%g1
-	mulx	%o3,%g3,%g3
-	add	%g1,%o5,%o4
-	inc	16,%o0
-	add	%o4,%g3,%o4
-	andcc	%o2,-4,%g0
-	stuw	%o4,[%o0-4]
-	srlx	%o4,32,%o5
-	bnz,a,pt	%icc,.L_bn_mul_add_words_loop
-	lduw	[%o1],%g2
-
-	brnz,a,pn	%o2,.L_bn_mul_add_words_tail
-	lduw	[%o1],%g2
-.L_bn_mul_add_words_return:
-	retl
-	mov	%o5,%o0
-
-.L_bn_mul_add_words_tail:
-	lduw	[%o0],%g1
-	mulx	%o3,%g2,%g2
-	add	%g1,%o5,%o4
-	dec	%o2
-	add	%o4,%g2,%o4
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_mul_add_words_return
-	stuw	%o4,[%o0]
-
-	lduw	[%o1+4],%g2
-	lduw	[%o0+4],%g1
-	mulx	%o3,%g2,%g2
-	add	%g1,%o5,%o4
-	dec	%o2
-	add	%o4,%g2,%o4
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_mul_add_words_return
-	stuw	%o4,[%o0+4]
-
-	lduw	[%o1+8],%g2
-	lduw	[%o0+8],%g1
-	mulx	%o3,%g2,%g2
-	add	%g1,%o5,%o4
-	add	%o4,%g2,%o4
-	stuw	%o4,[%o0+8]
-	retl
-	srlx	%o4,32,%o0
-
-.type	bn_mul_add_words,#function
-.size	bn_mul_add_words,(.-bn_mul_add_words)
-
-.align	32
-
-.global bn_mul_words
-/*
- * BN_ULONG bn_mul_words(rp,ap,num,w)
- * BN_ULONG *rp,*ap;
- * int num;
- * BN_ULONG w;
- */
-bn_mul_words:
-	sra	%o2,%g0,%o2	! signx %o2
-	brgz,a	%o2,.L_bn_mul_words_proceeed
-	lduw	[%o1],%g2
-	retl
-	clr	%o0
-	nop
-	nop
-	nop
-
-.L_bn_mul_words_proceeed:
-	srl	%o3,%g0,%o3	! clruw	%o3
-	andcc	%o2,-4,%g0
-	bz,pn	%icc,.L_bn_mul_words_tail
-	clr	%o5
-
-.L_bn_mul_words_loop:		! wow! 32 aligned!
-	lduw	[%o1+4],%g3
-	mulx	%o3,%g2,%g2
-	add	%g2,%o5,%o4
-	nop
-	stuw	%o4,[%o0]
-	srlx	%o4,32,%o5
-
-	lduw	[%o1+8],%g2
-	mulx	%o3,%g3,%g3
-	add	%g3,%o5,%o4
-	dec	4,%o2
-	stuw	%o4,[%o0+4]
-	srlx	%o4,32,%o5
-
-	lduw	[%o1+12],%g3
-	mulx	%o3,%g2,%g2
-	add	%g2,%o5,%o4
-	inc	16,%o1
-	stuw	%o4,[%o0+8]
-	srlx	%o4,32,%o5
-
-	mulx	%o3,%g3,%g3
-	add	%g3,%o5,%o4
-	inc	16,%o0
-	stuw	%o4,[%o0-4]
-	srlx	%o4,32,%o5
-	andcc	%o2,-4,%g0
-	bnz,a,pt	%icc,.L_bn_mul_words_loop
-	lduw	[%o1],%g2
-	nop
-	nop
-
-	brnz,a,pn	%o2,.L_bn_mul_words_tail
-	lduw	[%o1],%g2
-.L_bn_mul_words_return:
-	retl
-	mov	%o5,%o0
-
-.L_bn_mul_words_tail:
-	mulx	%o3,%g2,%g2
-	add	%g2,%o5,%o4
-	dec	%o2
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_mul_words_return
-	stuw	%o4,[%o0]
-
-	lduw	[%o1+4],%g2
-	mulx	%o3,%g2,%g2
-	add	%g2,%o5,%o4
-	dec	%o2
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_mul_words_return
-	stuw	%o4,[%o0+4]
-
-	lduw	[%o1+8],%g2
-	mulx	%o3,%g2,%g2
-	add	%g2,%o5,%o4
-	stuw	%o4,[%o0+8]
-	retl
-	srlx	%o4,32,%o0
-
-.type	bn_mul_words,#function
-.size	bn_mul_words,(.-bn_mul_words)
-
-.align  32
-.global	bn_sqr_words
-/*
- * void bn_sqr_words(r,a,n)
- * BN_ULONG *r,*a;
- * int n;
- */
-bn_sqr_words:
-	sra	%o2,%g0,%o2	! signx %o2
-	brgz,a	%o2,.L_bn_sqr_words_proceeed
-	lduw	[%o1],%g2
-	retl
-	clr	%o0
-	nop
-	nop
-	nop
-
-.L_bn_sqr_words_proceeed:
-	andcc	%o2,-4,%g0
-	nop
-	bz,pn	%icc,.L_bn_sqr_words_tail
-	nop
-
-.L_bn_sqr_words_loop:		! wow! 32 aligned!
-	lduw	[%o1+4],%g3
-	mulx	%g2,%g2,%o4
-	stuw	%o4,[%o0]
-	srlx	%o4,32,%o5
-	stuw	%o5,[%o0+4]
-	nop
-
-	lduw	[%o1+8],%g2
-	mulx	%g3,%g3,%o4
-	dec	4,%o2
-	stuw	%o4,[%o0+8]
-	srlx	%o4,32,%o5
-	stuw	%o5,[%o0+12]
-
-	lduw	[%o1+12],%g3
-	mulx	%g2,%g2,%o4
-	srlx	%o4,32,%o5
-	stuw	%o4,[%o0+16]
-	inc	16,%o1
-	stuw	%o5,[%o0+20]
-
-	mulx	%g3,%g3,%o4
-	inc	32,%o0
-	stuw	%o4,[%o0-8]
-	srlx	%o4,32,%o5
-	andcc	%o2,-4,%g2
-	stuw	%o5,[%o0-4]
-	bnz,a,pt	%icc,.L_bn_sqr_words_loop
-	lduw	[%o1],%g2
-	nop
-
-	brnz,a,pn	%o2,.L_bn_sqr_words_tail
-	lduw	[%o1],%g2
-.L_bn_sqr_words_return:
-	retl
-	clr	%o0
-
-.L_bn_sqr_words_tail:
-	mulx	%g2,%g2,%o4
-	dec	%o2
-	stuw	%o4,[%o0]
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_sqr_words_return
-	stuw	%o5,[%o0+4]
-
-	lduw	[%o1+4],%g2
-	mulx	%g2,%g2,%o4
-	dec	%o2
-	stuw	%o4,[%o0+8]
-	srlx	%o4,32,%o5
-	brz,pt	%o2,.L_bn_sqr_words_return
-	stuw	%o5,[%o0+12]
-
-	lduw	[%o1+8],%g2
-	mulx	%g2,%g2,%o4
-	srlx	%o4,32,%o5
-	stuw	%o4,[%o0+16]
-	stuw	%o5,[%o0+20]
-	retl
-	clr	%o0
-
-.type	bn_sqr_words,#function
-.size	bn_sqr_words,(.-bn_sqr_words)
-
-.align	32
-.global bn_div_words
-/*
- * BN_ULONG bn_div_words(h,l,d)
- * BN_ULONG h,l,d;
- */
-bn_div_words:
-	sllx	%o0,32,%o0
-	or	%o0,%o1,%o0
-	udivx	%o0,%o2,%o0
-	retl
-	srl	%o0,%g0,%o0	! clruw	%o0
-
-.type	bn_div_words,#function
-.size	bn_div_words,(.-bn_div_words)
-
-.align	32
-
-.global bn_add_words
-/*
- * BN_ULONG bn_add_words(rp,ap,bp,n)
- * BN_ULONG *rp,*ap,*bp;
- * int n;
- */
-bn_add_words:
-	sra	%o3,%g0,%o3	! signx %o3
-	brgz,a	%o3,.L_bn_add_words_proceed
-	lduw	[%o1],%o4
-	retl
-	clr	%o0
-
-.L_bn_add_words_proceed:
-	andcc	%o3,-4,%g0
-	bz,pn	%icc,.L_bn_add_words_tail
-	addcc	%g0,0,%g0	! clear carry flag
-
-.L_bn_add_words_loop:		! wow! 32 aligned!
-	dec	4,%o3
-	lduw	[%o2],%o5
-	lduw	[%o1+4],%g1
-	lduw	[%o2+4],%g2
-	lduw	[%o1+8],%g3
-	lduw	[%o2+8],%g4
-	addccc	%o5,%o4,%o5
-	stuw	%o5,[%o0]
-
-	lduw	[%o1+12],%o4
-	lduw	[%o2+12],%o5
-	inc	16,%o1
-	addccc	%g1,%g2,%g1
-	stuw	%g1,[%o0+4]
-	
-	inc	16,%o2
-	addccc	%g3,%g4,%g3
-	stuw	%g3,[%o0+8]
-
-	inc	16,%o0
-	addccc	%o5,%o4,%o5
-	stuw	%o5,[%o0-4]
-	and	%o3,-4,%g1
-	brnz,a,pt	%g1,.L_bn_add_words_loop
-	lduw	[%o1],%o4
-
-	brnz,a,pn	%o3,.L_bn_add_words_tail
-	lduw	[%o1],%o4
-.L_bn_add_words_return:
-	clr	%o0
-	retl
-	movcs	%icc,1,%o0
-	nop
-
-.L_bn_add_words_tail:
-	lduw	[%o2],%o5
-	dec	%o3
-	addccc	%o5,%o4,%o5
-	brz,pt	%o3,.L_bn_add_words_return
-	stuw	%o5,[%o0]
-
-	lduw	[%o1+4],%o4
-	lduw	[%o2+4],%o5
-	dec	%o3
-	addccc	%o5,%o4,%o5
-	brz,pt	%o3,.L_bn_add_words_return
-	stuw	%o5,[%o0+4]
-
-	lduw	[%o1+8],%o4
-	lduw	[%o2+8],%o5
-	addccc	%o5,%o4,%o5
-	stuw	%o5,[%o0+8]
-	clr	%o0
-	retl
-	movcs	%icc,1,%o0
-
-.type	bn_add_words,#function
-.size	bn_add_words,(.-bn_add_words)
-
-.global bn_sub_words
-/*
- * BN_ULONG bn_sub_words(rp,ap,bp,n)
- * BN_ULONG *rp,*ap,*bp;
- * int n;
- */
-bn_sub_words:
-	sra	%o3,%g0,%o3	! signx %o3
-	brgz,a	%o3,.L_bn_sub_words_proceed
-	lduw	[%o1],%o4
-	retl
-	clr	%o0
-
-.L_bn_sub_words_proceed:
-	andcc	%o3,-4,%g0
-	bz,pn	%icc,.L_bn_sub_words_tail
-	addcc	%g0,0,%g0	! clear carry flag
-
-.L_bn_sub_words_loop:		! wow! 32 aligned!
-	dec	4,%o3
-	lduw	[%o2],%o5
-	lduw	[%o1+4],%g1
-	lduw	[%o2+4],%g2
-	lduw	[%o1+8],%g3
-	lduw	[%o2+8],%g4
-	subccc	%o4,%o5,%o5
-	stuw	%o5,[%o0]
-
-	lduw	[%o1+12],%o4
-	lduw	[%o2+12],%o5
-	inc	16,%o1
-	subccc	%g1,%g2,%g2
-	stuw	%g2,[%o0+4]
-
-	inc	16,%o2
-	subccc	%g3,%g4,%g4
-	stuw	%g4,[%o0+8]
-
-	inc	16,%o0
-	subccc	%o4,%o5,%o5
-	stuw	%o5,[%o0-4]
-	and	%o3,-4,%g1
-	brnz,a,pt	%g1,.L_bn_sub_words_loop
-	lduw	[%o1],%o4
-
-	brnz,a,pn	%o3,.L_bn_sub_words_tail
-	lduw	[%o1],%o4
-.L_bn_sub_words_return:
-	clr	%o0
-	retl
-	movcs	%icc,1,%o0
-	nop
-
-.L_bn_sub_words_tail:		! wow! 32 aligned!
-	lduw	[%o2],%o5
-	dec	%o3
-	subccc	%o4,%o5,%o5
-	brz,pt	%o3,.L_bn_sub_words_return
-	stuw	%o5,[%o0]
-
-	lduw	[%o1+4],%o4
-	lduw	[%o2+4],%o5
-	dec	%o3
-	subccc	%o4,%o5,%o5
-	brz,pt	%o3,.L_bn_sub_words_return
-	stuw	%o5,[%o0+4]
-
-	lduw	[%o1+8],%o4
-	lduw	[%o2+8],%o5
-	subccc	%o4,%o5,%o5
-	stuw	%o5,[%o0+8]
-	clr	%o0
-	retl
-	movcs	%icc,1,%o0
-
-.type	bn_sub_words,#function
-.size	bn_sub_words,(.-bn_sub_words)
-
-/*
- * Code below depends on the fact that upper parts of the %l0-%l7
- * and %i0-%i7 are zeroed by kernel after context switch. In
- * previous versions this comment stated that "the trouble is that
- * it's not feasible to implement the mumbo-jumbo in less V9
- * instructions:-(" which apparently isn't true thanks to
- * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
- * results not from the shorter code, but from elimination of
- * multicycle none-pairable 'rd %y,%rd' instructions.
- *
- *							Andy.
- */
-
-/*
- * Here is register usage map for *all* routines below.
- */
-#define t_1	%o0
-#define	t_2	%o1
-#define c_12	%o2
-#define c_3	%o3
-
-#define ap(I)	[%i1+4*I]
-#define bp(I)	[%i2+4*I]
-#define rp(I)	[%i0+4*I]
-
-#define	a_0	%l0
-#define	a_1	%l1
-#define	a_2	%l2
-#define	a_3	%l3
-#define	a_4	%l4
-#define	a_5	%l5
-#define	a_6	%l6
-#define	a_7	%l7
-
-#define	b_0	%i3
-#define	b_1	%i4
-#define	b_2	%i5
-#define	b_3	%o4
-#define	b_4	%o5
-#define	b_5	%o7
-#define	b_6	%g1
-#define	b_7	%g4
-
-.align	32
-.global bn_mul_comba8
-/*
- * void bn_mul_comba8(r,a,b)
- * BN_ULONG *r,*a,*b;
- */
-bn_mul_comba8:
-	save	%sp,FRAME_SIZE,%sp
-	mov	1,t_2
-	lduw	ap(0),a_0
-	sllx	t_2,32,t_2
-	lduw	bp(0),b_0	!=
-	lduw	bp(1),b_1
-	mulx	a_0,b_0,t_1	!mul_add_c(a[0],b[0],c1,c2,c3);
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(0)	!=!r[0]=c1;
-
-	lduw	ap(1),a_1
-	mulx	a_0,b_1,t_1	!mul_add_c(a[0],b[1],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3		!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(2),a_2
-	mulx	a_1,b_0,t_1	!=!mul_add_c(a[1],b[0],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(1)	!r[1]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,b_0,t_1	!mul_add_c(a[2],b[0],c3,c1,c2);
-	addcc	c_12,t_1,c_12	!=
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	bp(2),b_2	!=
-	mulx	a_1,b_1,t_1	!mul_add_c(a[1],b[1],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	lduw	bp(3),b_3
-	mulx	a_0,b_2,t_1	!mul_add_c(a[0],b[2],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(2)	!r[2]=c3;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_0,b_3,t_1	!mul_add_c(a[0],b[3],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_1,b_2,t_1	!=!mul_add_c(a[1],b[2],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	lduw	ap(3),a_3
-	mulx	a_2,b_1,t_1	!mul_add_c(a[2],b[1],c1,c2,c3);
-	addcc	c_12,t_1,c_12	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(4),a_4
-	mulx	a_3,b_0,t_1	!=!mul_add_c(a[3],b[0],c1,c2,c3);!=
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(3)	!r[3]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_4,b_0,t_1	!mul_add_c(a[4],b[0],c2,c3,c1);
-	addcc	c_12,t_1,c_12	!=
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,b_1,t_1	!=!mul_add_c(a[3],b[1],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_2,b_2,t_1	!=!mul_add_c(a[2],b[2],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	bp(4),b_4	!=
-	mulx	a_1,b_3,t_1	!mul_add_c(a[1],b[3],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	lduw	bp(5),b_5
-	mulx	a_0,b_4,t_1	!mul_add_c(a[0],b[4],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(4)	!r[4]=c2;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_0,b_5,t_1	!mul_add_c(a[0],b[5],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_1,b_4,t_1	!mul_add_c(a[1],b[4],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_2,b_3,t_1	!mul_add_c(a[2],b[3],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_3,b_2,t_1	!mul_add_c(a[3],b[2],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	lduw	ap(5),a_5
-	mulx	a_4,b_1,t_1	!mul_add_c(a[4],b[1],c3,c1,c2);
-	addcc	c_12,t_1,c_12	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(6),a_6
-	mulx	a_5,b_0,t_1	!=!mul_add_c(a[5],b[0],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(5)	!r[5]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_6,b_0,t_1	!mul_add_c(a[6],b[0],c1,c2,c3);
-	addcc	c_12,t_1,c_12	!=
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_5,b_1,t_1	!=!mul_add_c(a[5],b[1],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_4,b_2,t_1	!=!mul_add_c(a[4],b[2],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,b_3,t_1	!=!mul_add_c(a[3],b[3],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_2,b_4,t_1	!=!mul_add_c(a[2],b[4],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	bp(6),b_6	!=
-	mulx	a_1,b_5,t_1	!mul_add_c(a[1],b[5],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	lduw	bp(7),b_7
-	mulx	a_0,b_6,t_1	!mul_add_c(a[0],b[6],c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(6)	!r[6]=c1;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_0,b_7,t_1	!mul_add_c(a[0],b[7],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_1,b_6,t_1	!mul_add_c(a[1],b[6],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_2,b_5,t_1	!mul_add_c(a[2],b[5],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_3,b_4,t_1	!mul_add_c(a[3],b[4],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_4,b_3,t_1	!mul_add_c(a[4],b[3],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_5,b_2,t_1	!mul_add_c(a[5],b[2],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	lduw	ap(7),a_7
-	mulx	a_6,b_1,t_1	!=!mul_add_c(a[6],b[1],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_7,b_0,t_1	!=!mul_add_c(a[7],b[0],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(7)	!r[7]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_7,b_1,t_1	!=!mul_add_c(a[7],b[1],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_6,b_2,t_1	!mul_add_c(a[6],b[2],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_5,b_3,t_1	!mul_add_c(a[5],b[3],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_4,b_4,t_1	!mul_add_c(a[4],b[4],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_3,b_5,t_1	!mul_add_c(a[3],b[5],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_2,b_6,t_1	!mul_add_c(a[2],b[6],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_1,b_7,t_1	!mul_add_c(a[1],b[7],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(8)	!r[8]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,b_7,t_1	!=!mul_add_c(a[2],b[7],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	mulx	a_3,b_6,t_1	!mul_add_c(a[3],b[6],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_4,b_5,t_1	!mul_add_c(a[4],b[5],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_5,b_4,t_1	!mul_add_c(a[5],b[4],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_6,b_3,t_1	!mul_add_c(a[6],b[3],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_7,b_2,t_1	!mul_add_c(a[7],b[2],c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(9)	!r[9]=c1;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_7,b_3,t_1	!mul_add_c(a[7],b[3],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_6,b_4,t_1	!mul_add_c(a[6],b[4],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_5,b_5,t_1	!mul_add_c(a[5],b[5],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_4,b_6,t_1	!mul_add_c(a[4],b[6],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_3,b_7,t_1	!mul_add_c(a[3],b[7],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(10)	!r[10]=c2;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_4,b_7,t_1	!mul_add_c(a[4],b[7],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_5,b_6,t_1	!mul_add_c(a[5],b[6],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_6,b_5,t_1	!mul_add_c(a[6],b[5],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_7,b_4,t_1	!mul_add_c(a[7],b[4],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(11)	!r[11]=c3;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_7,b_5,t_1	!mul_add_c(a[7],b[5],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_6,b_6,t_1	!mul_add_c(a[6],b[6],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_5,b_7,t_1	!mul_add_c(a[5],b[7],c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(12)	!r[12]=c1;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_6,b_7,t_1	!mul_add_c(a[6],b[7],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_7,b_6,t_1	!mul_add_c(a[7],b[6],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	st	t_1,rp(13)	!r[13]=c2;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_7,b_7,t_1	!mul_add_c(a[7],b[7],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(14)	!r[14]=c3;
-	stuw	c_12,rp(15)	!r[15]=c1;
-
-	ret
-	restore	%g0,%g0,%o0	!=
-
-.type	bn_mul_comba8,#function
-.size	bn_mul_comba8,(.-bn_mul_comba8)
-
-.align	32
-
-.global bn_mul_comba4
-/*
- * void bn_mul_comba4(r,a,b)
- * BN_ULONG *r,*a,*b;
- */
-bn_mul_comba4:
-	save	%sp,FRAME_SIZE,%sp
-	lduw	ap(0),a_0
-	mov	1,t_2
-	lduw	bp(0),b_0
-	sllx	t_2,32,t_2	!=
-	lduw	bp(1),b_1
-	mulx	a_0,b_0,t_1	!mul_add_c(a[0],b[0],c1,c2,c3);
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(0)	!=!r[0]=c1;
-
-	lduw	ap(1),a_1
-	mulx	a_0,b_1,t_1	!mul_add_c(a[0],b[1],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3		!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(2),a_2
-	mulx	a_1,b_0,t_1	!=!mul_add_c(a[1],b[0],c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(1)	!r[1]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,b_0,t_1	!mul_add_c(a[2],b[0],c3,c1,c2);
-	addcc	c_12,t_1,c_12	!=
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	bp(2),b_2	!=
-	mulx	a_1,b_1,t_1	!mul_add_c(a[1],b[1],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3	!=
-	lduw	bp(3),b_3
-	mulx	a_0,b_2,t_1	!mul_add_c(a[0],b[2],c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(2)	!r[2]=c3;
-	or	c_12,c_3,c_12	!=
-
-	mulx	a_0,b_3,t_1	!mul_add_c(a[0],b[3],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	mulx	a_1,b_2,t_1	!mul_add_c(a[1],b[2],c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8	!=
-	add	c_3,t_2,c_3
-	lduw	ap(3),a_3
-	mulx	a_2,b_1,t_1	!mul_add_c(a[2],b[1],c1,c2,c3);
-	addcc	c_12,t_1,c_12	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,b_0,t_1	!mul_add_c(a[3],b[0],c1,c2,c3);!=
-	addcc	c_12,t_1,t_1	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(3)	!=!r[3]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_3,b_1,t_1	!mul_add_c(a[3],b[1],c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3		!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_2,b_2,t_1	!mul_add_c(a[2],b[2],c2,c3,c1);
-	addcc	c_12,t_1,c_12	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_1,b_3,t_1	!mul_add_c(a[1],b[3],c2,c3,c1);
-	addcc	c_12,t_1,t_1	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(4)	!=!r[4]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,b_3,t_1	!mul_add_c(a[2],b[3],c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3		!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,b_2,t_1	!mul_add_c(a[3],b[2],c3,c1,c2);
-	addcc	c_12,t_1,t_1	!=
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(5)	!=!r[5]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_3,b_3,t_1	!mul_add_c(a[3],b[3],c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	srlx	t_1,32,c_12	!=
-	stuw	t_1,rp(6)	!r[6]=c1;
-	stuw	c_12,rp(7)	!r[7]=c2;
-	
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_mul_comba4,#function
-.size	bn_mul_comba4,(.-bn_mul_comba4)
-
-.align	32
-
-.global bn_sqr_comba8
-bn_sqr_comba8:
-	save	%sp,FRAME_SIZE,%sp
-	mov	1,t_2
-	lduw	ap(0),a_0
-	sllx	t_2,32,t_2
-	lduw	ap(1),a_1
-	mulx	a_0,a_0,t_1	!sqr_add_c(a,0,c1,c2,c3);
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(0)	!r[0]=c1;
-
-	lduw	ap(2),a_2
-	mulx	a_0,a_1,t_1	!=!sqr_add_c2(a,1,0,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(1)	!r[1]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,a_0,t_1	!sqr_add_c2(a,2,0,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(3),a_3
-	mulx	a_1,a_1,t_1	!sqr_add_c(a,1,c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(2)	!r[2]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_0,a_3,t_1	!sqr_add_c2(a,3,0,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(4),a_4
-	mulx	a_1,a_2,t_1	!sqr_add_c2(a,2,1,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	st	t_1,rp(3)	!r[3]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_4,a_0,t_1	!sqr_add_c2(a,4,0,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,a_1,t_1	!sqr_add_c2(a,3,1,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(5),a_5
-	mulx	a_2,a_2,t_1	!sqr_add_c(a,2,c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(4)	!r[4]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_0,a_5,t_1	!sqr_add_c2(a,5,0,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_1,a_4,t_1	!sqr_add_c2(a,4,1,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(6),a_6
-	mulx	a_2,a_3,t_1	!sqr_add_c2(a,3,2,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(5)	!r[5]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_6,a_0,t_1	!sqr_add_c2(a,6,0,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_5,a_1,t_1	!sqr_add_c2(a,5,1,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_4,a_2,t_1	!sqr_add_c2(a,4,2,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(7),a_7
-	mulx	a_3,a_3,t_1	!=!sqr_add_c(a,3,c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(6)	!r[6]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_0,a_7,t_1	!sqr_add_c2(a,7,0,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_1,a_6,t_1	!sqr_add_c2(a,6,1,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_2,a_5,t_1	!sqr_add_c2(a,5,2,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,a_4,t_1	!sqr_add_c2(a,4,3,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(7)	!r[7]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_7,a_1,t_1	!sqr_add_c2(a,7,1,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_6,a_2,t_1	!sqr_add_c2(a,6,2,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_5,a_3,t_1	!sqr_add_c2(a,5,3,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_4,a_4,t_1	!sqr_add_c(a,4,c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(8)	!r[8]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,a_7,t_1	!sqr_add_c2(a,7,2,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_3,a_6,t_1	!sqr_add_c2(a,6,3,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_4,a_5,t_1	!sqr_add_c2(a,5,4,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(9)	!r[9]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_7,a_3,t_1	!sqr_add_c2(a,7,3,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_6,a_4,t_1	!sqr_add_c2(a,6,4,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_5,a_5,t_1	!sqr_add_c(a,5,c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(10)	!r[10]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_4,a_7,t_1	!sqr_add_c2(a,7,4,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_5,a_6,t_1	!sqr_add_c2(a,6,5,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(11)	!r[11]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_7,a_5,t_1	!sqr_add_c2(a,7,5,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_6,a_6,t_1	!sqr_add_c(a,6,c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(12)	!r[12]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_6,a_7,t_1	!sqr_add_c2(a,7,6,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(13)	!r[13]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_7,a_7,t_1	!sqr_add_c(a,7,c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(14)	!r[14]=c3;
-	stuw	c_12,rp(15)	!r[15]=c1;
-
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_sqr_comba8,#function
-.size	bn_sqr_comba8,(.-bn_sqr_comba8)
-
-.align	32
-
-.global bn_sqr_comba4
-/*
- * void bn_sqr_comba4(r,a)
- * BN_ULONG *r,*a;
- */
-bn_sqr_comba4:
-	save	%sp,FRAME_SIZE,%sp
-	mov	1,t_2
-	lduw	ap(0),a_0
-	sllx	t_2,32,t_2
-	lduw	ap(1),a_1
-	mulx	a_0,a_0,t_1	!sqr_add_c(a,0,c1,c2,c3);
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(0)	!r[0]=c1;
-
-	lduw	ap(2),a_2
-	mulx	a_0,a_1,t_1	!sqr_add_c2(a,1,0,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(1)	!r[1]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,a_0,t_1	!sqr_add_c2(a,2,0,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	lduw	ap(3),a_3
-	mulx	a_1,a_1,t_1	!sqr_add_c(a,1,c3,c1,c2);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(2)	!r[2]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_0,a_3,t_1	!sqr_add_c2(a,3,0,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_1,a_2,t_1	!sqr_add_c2(a,2,1,c1,c2,c3);
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(3)	!r[3]=c1;
-	or	c_12,c_3,c_12
-
-	mulx	a_3,a_1,t_1	!sqr_add_c2(a,3,1,c2,c3,c1);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,c_12
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	mulx	a_2,a_2,t_1	!sqr_add_c(a,2,c2,c3,c1);
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(4)	!r[4]=c2;
-	or	c_12,c_3,c_12
-
-	mulx	a_2,a_3,t_1	!sqr_add_c2(a,3,2,c3,c1,c2);
-	addcc	c_12,t_1,c_12
-	clr	c_3
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	addcc	c_12,t_1,t_1
-	bcs,a	%xcc,.+8
-	add	c_3,t_2,c_3
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(5)	!r[5]=c3;
-	or	c_12,c_3,c_12
-
-	mulx	a_3,a_3,t_1	!sqr_add_c(a,3,c1,c2,c3);
-	addcc	c_12,t_1,t_1
-	srlx	t_1,32,c_12
-	stuw	t_1,rp(6)	!r[6]=c1;
-	stuw	c_12,rp(7)	!r[7]=c2;
-	
-	ret
-	restore	%g0,%g0,%o0
-
-.type	bn_sqr_comba4,#function
-.size	bn_sqr_comba4,(.-bn_sqr_comba4)
-
-.align	32

+ 0 - 606
drivers/builtin_openssl2/crypto/bn/asm/sparcv9-mont.pl

@@ -1,606 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# December 2005
-#
-# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
-# for undertaken effort are multiple. First of all, UltraSPARC is not
-# the whole SPARCv9 universe and other VIS-free implementations deserve
-# optimized code as much. Secondly, newly introduced UltraSPARC T1,
-# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
-# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
-# several integrated RSA/DSA accelerator circuits accessible through
-# kernel driver [only(*)], but having decent user-land software
-# implementation is important too. Finally, reasons like desire to
-# experiment with dedicated squaring procedure. Yes, this module
-# implements one, because it was easiest to draft it in SPARCv9
-# instructions...
-
-# (*)	Engine accessing the driver in question is on my TODO list.
-#	For reference, acceleator is estimated to give 6 to 10 times
-#	improvement on single-threaded RSA sign. It should be noted
-#	that 6-10x improvement coefficient does not actually mean
-#	something extraordinary in terms of absolute [single-threaded]
-#	performance, as SPARCv9 instruction set is by all means least
-#	suitable for high performance crypto among other 64 bit
-#	platforms. 6-10x factor simply places T1 in same performance
-#	domain as say AMD64 and IA-64. Improvement of RSA verify don't
-#	appear impressive at all, but it's the sign operation which is
-#	far more critical/interesting.
-
-# You might notice that inner loops are modulo-scheduled:-) This has
-# essentially negligible impact on UltraSPARC performance, it's
-# Fujitsu SPARC64 V users who should notice and hopefully appreciate
-# the advantage... Currently this module surpasses sparcv9a-mont.pl
-# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
-# module still have hidden potential [see TODO list there], which is
-# estimated to be larger than 20%...
-
-# int bn_mul_mont(
-$rp="%i0";	# BN_ULONG *rp,
-$ap="%i1";	# const BN_ULONG *ap,
-$bp="%i2";	# const BN_ULONG *bp,
-$np="%i3";	# const BN_ULONG *np,
-$n0="%i4";	# const BN_ULONG *n0,
-$num="%i5";	# int num);
-
-$bits=32;
-for (@ARGV)	{ $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64)	{ $bias=2047; $frame=192; }
-else		{ $bias=0;    $frame=128; }
-
-$car0="%o0";
-$car1="%o1";
-$car2="%o2";	# 1 bit
-$acc0="%o3";
-$acc1="%o4";
-$mask="%g1";	# 32 bits, what a waste...
-$tmp0="%g4";
-$tmp1="%g5";
-
-$i="%l0";
-$j="%l1";
-$mul0="%l2";
-$mul1="%l3";
-$tp="%l4";
-$apj="%l5";
-$npj="%l6";
-$tpj="%l7";
-
-$fname="bn_mul_mont_int";
-
-$code=<<___;
-.section	".text",#alloc,#execinstr
-
-.global	$fname
-.align	32
-$fname:
-	cmp	%o5,4			! 128 bits minimum
-	bge,pt	%icc,.Lenter
-	sethi	%hi(0xffffffff),$mask
-	retl
-	clr	%o0
-.align	32
-.Lenter:
-	save	%sp,-$frame,%sp
-	sll	$num,2,$num		! num*=4
-	or	$mask,%lo(0xffffffff),$mask
-	ld	[$n0],$n0
-	cmp	$ap,$bp
-	and	$num,$mask,$num
-	ld	[$bp],$mul0		! bp[0]
-	nop
-
-	add	%sp,$bias,%o7		! real top of stack
-	ld	[$ap],$car0		! ap[0] ! redundant in squaring context
-	sub	%o7,$num,%o7
-	ld	[$ap+4],$apj		! ap[1]
-	and	%o7,-1024,%o7
-	ld	[$np],$car1		! np[0]
-	sub	%o7,$bias,%sp		! alloca
-	ld	[$np+4],$npj		! np[1]
-	be,pt	`$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
-	mov	12,$j
-
-	mulx	$car0,$mul0,$car0	! ap[0]*bp[0]
-	mulx	$apj,$mul0,$tmp0	!prologue! ap[1]*bp[0]
-	and	$car0,$mask,$acc0
-	add	%sp,$bias+$frame,$tp
-	ld	[$ap+8],$apj		!prologue!
-
-	mulx	$n0,$acc0,$mul1		! "t[0]"*n0
-	and	$mul1,$mask,$mul1
-
-	mulx	$car1,$mul1,$car1	! np[0]*"t[0]"*n0
-	mulx	$npj,$mul1,$acc1	!prologue! np[1]*"t[0]"*n0
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	ld	[$np+8],$npj		!prologue!
-	srlx	$car1,32,$car1
-	mov	$tmp0,$acc0		!prologue!
-
-.L1st:
-	mulx	$apj,$mul0,$tmp0
-	mulx	$npj,$mul1,$tmp1
-	add	$acc0,$car0,$car0
-	ld	[$ap+$j],$apj		! ap[j]
-	and	$car0,$mask,$acc0
-	add	$acc1,$car1,$car1
-	ld	[$np+$j],$npj		! np[j]
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	add	$j,4,$j			! j++
-	mov	$tmp0,$acc0
-	st	$car1,[$tp]
-	cmp	$j,$num
-	mov	$tmp1,$acc1
-	srlx	$car1,32,$car1
-	bl	%icc,.L1st
-	add	$tp,4,$tp		! tp++
-!.L1st
-
-	mulx	$apj,$mul0,$tmp0	!epilogue!
-	mulx	$npj,$mul1,$tmp1
-	add	$acc0,$car0,$car0
-	and	$car0,$mask,$acc0
-	add	$acc1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-
-	add	$tmp0,$car0,$car0
-	and	$car0,$mask,$acc0
-	add	$tmp1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp+4]
-	srlx	$car1,32,$car1
-
-	add	$car0,$car1,$car1
-	st	$car1,[$tp+8]
-	srlx	$car1,32,$car2
-
-	mov	4,$i			! i++
-	ld	[$bp+4],$mul0		! bp[1]
-.Louter:
-	add	%sp,$bias+$frame,$tp
-	ld	[$ap],$car0		! ap[0]
-	ld	[$ap+4],$apj		! ap[1]
-	ld	[$np],$car1		! np[0]
-	ld	[$np+4],$npj		! np[1]
-	ld	[$tp],$tmp1		! tp[0]
-	ld	[$tp+4],$tpj		! tp[1]
-	mov	12,$j
-
-	mulx	$car0,$mul0,$car0
-	mulx	$apj,$mul0,$tmp0	!prologue!
-	add	$tmp1,$car0,$car0
-	ld	[$ap+8],$apj		!prologue!
-	and	$car0,$mask,$acc0
-
-	mulx	$n0,$acc0,$mul1
-	and	$mul1,$mask,$mul1
-
-	mulx	$car1,$mul1,$car1
-	mulx	$npj,$mul1,$acc1	!prologue!
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	ld	[$np+8],$npj		!prologue!
-	srlx	$car1,32,$car1
-	mov	$tmp0,$acc0		!prologue!
-
-.Linner:
-	mulx	$apj,$mul0,$tmp0
-	mulx	$npj,$mul1,$tmp1
-	add	$tpj,$car0,$car0
-	ld	[$ap+$j],$apj		! ap[j]
-	add	$acc0,$car0,$car0
-	add	$acc1,$car1,$car1
-	ld	[$np+$j],$npj		! np[j]
-	and	$car0,$mask,$acc0
-	ld	[$tp+8],$tpj		! tp[j]
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	add	$j,4,$j			! j++
-	mov	$tmp0,$acc0
-	st	$car1,[$tp]		! tp[j-1]
-	srlx	$car1,32,$car1
-	mov	$tmp1,$acc1
-	cmp	$j,$num
-	bl	%icc,.Linner
-	add	$tp,4,$tp		! tp++
-!.Linner
-
-	mulx	$apj,$mul0,$tmp0	!epilogue!
-	mulx	$npj,$mul1,$tmp1
-	add	$tpj,$car0,$car0
-	add	$acc0,$car0,$car0
-	ld	[$tp+8],$tpj		! tp[j]
-	and	$car0,$mask,$acc0
-	add	$acc1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]		! tp[j-1]
-	srlx	$car1,32,$car1
-
-	add	$tpj,$car0,$car0
-	add	$tmp0,$car0,$car0
-	and	$car0,$mask,$acc0
-	add	$tmp1,$car1,$car1
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp+4]		! tp[j-1]
-	srlx	$car0,32,$car0
-	add	$i,4,$i			! i++
-	srlx	$car1,32,$car1
-
-	add	$car0,$car1,$car1
-	cmp	$i,$num
-	add	$car2,$car1,$car1
-	st	$car1,[$tp+8]
-
-	srlx	$car1,32,$car2
-	bl,a	%icc,.Louter
-	ld	[$bp+$i],$mul0		! bp[i]
-!.Louter
-
-	add	$tp,12,$tp
-
-.Ltail:
-	add	$np,$num,$np
-	add	$rp,$num,$rp
-	mov	$tp,$ap
-	sub	%g0,$num,%o7		! k=-num
-	ba	.Lsub
-	subcc	%g0,%g0,%g0		! clear %icc.c
-.align	16
-.Lsub:
-	ld	[$tp+%o7],%o0
-	ld	[$np+%o7],%o1
-	subccc	%o0,%o1,%o1		! tp[j]-np[j]
-	add	$rp,%o7,$i
-	add	%o7,4,%o7
-	brnz	%o7,.Lsub
-	st	%o1,[$i]
-	subc	$car2,0,$car2		! handle upmost overflow bit
-	and	$tp,$car2,$ap
-	andn	$rp,$car2,$np
-	or	$ap,$np,$ap
-	sub	%g0,$num,%o7
-
-.Lcopy:
-	ld	[$ap+%o7],%o0		! copy or in-place refresh
-	st	%g0,[$tp+%o7]		! zap tp
-	st	%o0,[$rp+%o7]
-	add	%o7,4,%o7
-	brnz	%o7,.Lcopy
-	nop
-	mov	1,%i0
-	ret
-	restore
-___
-
-########
-######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
-######## code without following dedicated squaring procedure.
-########
-$sbit="%i2";		# re-use $bp!
-
-$code.=<<___;
-.align	32
-.Lbn_sqr_mont:
-	mulx	$mul0,$mul0,$car0		! ap[0]*ap[0]
-	mulx	$apj,$mul0,$tmp0		!prologue!
-	and	$car0,$mask,$acc0
-	add	%sp,$bias+$frame,$tp
-	ld	[$ap+8],$apj			!prologue!
-
-	mulx	$n0,$acc0,$mul1			! "t[0]"*n0
-	srlx	$car0,32,$car0
-	and	$mul1,$mask,$mul1
-
-	mulx	$car1,$mul1,$car1		! np[0]*"t[0]"*n0
-	mulx	$npj,$mul1,$acc1		!prologue!
-	and	$car0,1,$sbit
-	ld	[$np+8],$npj			!prologue!
-	srlx	$car0,1,$car0
-	add	$acc0,$car1,$car1
-	srlx	$car1,32,$car1
-	mov	$tmp0,$acc0			!prologue!
-
-.Lsqr_1st:
-	mulx	$apj,$mul0,$tmp0
-	mulx	$npj,$mul1,$tmp1
-	add	$acc0,$car0,$car0		! ap[j]*a0+c0
-	add	$acc1,$car1,$car1
-	ld	[$ap+$j],$apj			! ap[j]
-	and	$car0,$mask,$acc0
-	ld	[$np+$j],$npj			! np[j]
-	srlx	$car0,32,$car0
-	add	$acc0,$acc0,$acc0
-	or	$sbit,$acc0,$acc0
-	mov	$tmp1,$acc1
-	srlx	$acc0,32,$sbit
-	add	$j,4,$j				! j++
-	and	$acc0,$mask,$acc0
-	cmp	$j,$num
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]
-	mov	$tmp0,$acc0
-	srlx	$car1,32,$car1
-	bl	%icc,.Lsqr_1st
-	add	$tp,4,$tp			! tp++
-!.Lsqr_1st
-
-	mulx	$apj,$mul0,$tmp0		! epilogue
-	mulx	$npj,$mul1,$tmp1
-	add	$acc0,$car0,$car0		! ap[j]*a0+c0
-	add	$acc1,$car1,$car1
-	and	$car0,$mask,$acc0
-	srlx	$car0,32,$car0
-	add	$acc0,$acc0,$acc0
-	or	$sbit,$acc0,$acc0
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-
-	add	$tmp0,$car0,$car0		! ap[j]*a0+c0
-	add	$tmp1,$car1,$car1
-	and	$car0,$mask,$acc0
-	srlx	$car0,32,$car0
-	add	$acc0,$acc0,$acc0
-	or	$sbit,$acc0,$acc0
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp+4]
-	srlx	$car1,32,$car1
-
-	add	$car0,$car0,$car0
-	or	$sbit,$car0,$car0
-	add	$car0,$car1,$car1
-	st	$car1,[$tp+8]
-	srlx	$car1,32,$car2
-
-	ld	[%sp+$bias+$frame],$tmp0	! tp[0]
-	ld	[%sp+$bias+$frame+4],$tmp1	! tp[1]
-	ld	[%sp+$bias+$frame+8],$tpj	! tp[2]
-	ld	[$ap+4],$mul0			! ap[1]
-	ld	[$ap+8],$apj			! ap[2]
-	ld	[$np],$car1			! np[0]
-	ld	[$np+4],$npj			! np[1]
-	mulx	$n0,$tmp0,$mul1
-
-	mulx	$mul0,$mul0,$car0
-	and	$mul1,$mask,$mul1
-
-	mulx	$car1,$mul1,$car1
-	mulx	$npj,$mul1,$acc1
-	add	$tmp0,$car1,$car1
-	and	$car0,$mask,$acc0
-	ld	[$np+8],$npj			! np[2]
-	srlx	$car1,32,$car1
-	add	$tmp1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	$acc0,$car1,$car1
-	and	$car0,1,$sbit
-	add	$acc1,$car1,$car1
-	srlx	$car0,1,$car0
-	mov	12,$j
-	st	$car1,[%sp+$bias+$frame]	! tp[0]=
-	srlx	$car1,32,$car1
-	add	%sp,$bias+$frame+4,$tp
-
-.Lsqr_2nd:
-	mulx	$apj,$mul0,$acc0
-	mulx	$npj,$mul1,$acc1
-	add	$acc0,$car0,$car0
-	add	$tpj,$car1,$car1
-	ld	[$ap+$j],$apj			! ap[j]
-	and	$car0,$mask,$acc0
-	ld	[$np+$j],$npj			! np[j]
-	srlx	$car0,32,$car0
-	add	$acc1,$car1,$car1
-	ld	[$tp+8],$tpj			! tp[j]
-	add	$acc0,$acc0,$acc0
-	add	$j,4,$j				! j++
-	or	$sbit,$acc0,$acc0
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	cmp	$j,$num
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]			! tp[j-1]
-	srlx	$car1,32,$car1
-	bl	%icc,.Lsqr_2nd
-	add	$tp,4,$tp			! tp++
-!.Lsqr_2nd
-
-	mulx	$apj,$mul0,$acc0
-	mulx	$npj,$mul1,$acc1
-	add	$acc0,$car0,$car0
-	add	$tpj,$car1,$car1
-	and	$car0,$mask,$acc0
-	srlx	$car0,32,$car0
-	add	$acc1,$car1,$car1
-	add	$acc0,$acc0,$acc0
-	or	$sbit,$acc0,$acc0
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	add	$acc0,$car1,$car1
-	st	$car1,[$tp]			! tp[j-1]
-	srlx	$car1,32,$car1
-
-	add	$car0,$car0,$car0
-	or	$sbit,$car0,$car0
-	add	$car0,$car1,$car1
-	add	$car2,$car1,$car1
-	st	$car1,[$tp+4]
-	srlx	$car1,32,$car2
-
-	ld	[%sp+$bias+$frame],$tmp1	! tp[0]
-	ld	[%sp+$bias+$frame+4],$tpj	! tp[1]
-	ld	[$ap+8],$mul0			! ap[2]
-	ld	[$np],$car1			! np[0]
-	ld	[$np+4],$npj			! np[1]
-	mulx	$n0,$tmp1,$mul1
-	and	$mul1,$mask,$mul1
-	mov	8,$i
-
-	mulx	$mul0,$mul0,$car0
-	mulx	$car1,$mul1,$car1
-	and	$car0,$mask,$acc0
-	add	$tmp1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	%sp,$bias+$frame,$tp
-	srlx	$car1,32,$car1
-	and	$car0,1,$sbit
-	srlx	$car0,1,$car0
-	mov	4,$j
-
-.Lsqr_outer:
-.Lsqr_inner1:
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	add	$j,4,$j
-	ld	[$tp+8],$tpj
-	cmp	$j,$i
-	add	$acc1,$car1,$car1
-	ld	[$np+$j],$npj
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-	bl	%icc,.Lsqr_inner1
-	add	$tp,4,$tp
-!.Lsqr_inner1
-
-	add	$j,4,$j
-	ld	[$ap+$j],$apj			! ap[j]
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	ld	[$np+$j],$npj			! np[j]
-	add	$acc0,$car1,$car1
-	ld	[$tp+8],$tpj			! tp[j]
-	add	$acc1,$car1,$car1
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-
-	add	$j,4,$j
-	cmp	$j,$num
-	be,pn	%icc,.Lsqr_no_inner2
-	add	$tp,4,$tp
-
-.Lsqr_inner2:
-	mulx	$apj,$mul0,$acc0
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	add	$acc0,$car0,$car0
-	ld	[$ap+$j],$apj			! ap[j]
-	and	$car0,$mask,$acc0
-	ld	[$np+$j],$npj			! np[j]
-	srlx	$car0,32,$car0
-	add	$acc0,$acc0,$acc0
-	ld	[$tp+8],$tpj			! tp[j]
-	or	$sbit,$acc0,$acc0
-	add	$j,4,$j				! j++
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	cmp	$j,$num
-	add	$acc0,$car1,$car1
-	add	$acc1,$car1,$car1
-	st	$car1,[$tp]			! tp[j-1]
-	srlx	$car1,32,$car1
-	bl	%icc,.Lsqr_inner2
-	add	$tp,4,$tp			! tp++
-
-.Lsqr_no_inner2:
-	mulx	$apj,$mul0,$acc0
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	add	$acc0,$car0,$car0
-	and	$car0,$mask,$acc0
-	srlx	$car0,32,$car0
-	add	$acc0,$acc0,$acc0
-	or	$sbit,$acc0,$acc0
-	srlx	$acc0,32,$sbit
-	and	$acc0,$mask,$acc0
-	add	$acc0,$car1,$car1
-	add	$acc1,$car1,$car1
-	st	$car1,[$tp]			! tp[j-1]
-	srlx	$car1,32,$car1
-
-	add	$car0,$car0,$car0
-	or	$sbit,$car0,$car0
-	add	$car0,$car1,$car1
-	add	$car2,$car1,$car1
-	st	$car1,[$tp+4]
-	srlx	$car1,32,$car2
-
-	add	$i,4,$i				! i++
-	ld	[%sp+$bias+$frame],$tmp1	! tp[0]
-	ld	[%sp+$bias+$frame+4],$tpj	! tp[1]
-	ld	[$ap+$i],$mul0			! ap[j]
-	ld	[$np],$car1			! np[0]
-	ld	[$np+4],$npj			! np[1]
-	mulx	$n0,$tmp1,$mul1
-	and	$mul1,$mask,$mul1
-	add	$i,4,$tmp0
-
-	mulx	$mul0,$mul0,$car0
-	mulx	$car1,$mul1,$car1
-	and	$car0,$mask,$acc0
-	add	$tmp1,$car1,$car1
-	srlx	$car0,32,$car0
-	add	%sp,$bias+$frame,$tp
-	srlx	$car1,32,$car1
-	and	$car0,1,$sbit
-	srlx	$car0,1,$car0
-
-	cmp	$tmp0,$num			! i<num-1
-	bl	%icc,.Lsqr_outer
-	mov	4,$j
-
-.Lsqr_last:
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	add	$j,4,$j
-	ld	[$tp+8],$tpj
-	cmp	$j,$i
-	add	$acc1,$car1,$car1
-	ld	[$np+$j],$npj
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-	bl	%icc,.Lsqr_last
-	add	$tp,4,$tp
-!.Lsqr_last
-
-	mulx	$npj,$mul1,$acc1
-	add	$tpj,$car1,$car1
-	add	$acc0,$car1,$car1
-	add	$acc1,$car1,$car1
-	st	$car1,[$tp]
-	srlx	$car1,32,$car1
-
-	add	$car0,$car0,$car0		! recover $car0
-	or	$sbit,$car0,$car0
-	add	$car0,$car1,$car1
-	add	$car2,$car1,$car1
-	st	$car1,[$tp+4]
-	srlx	$car1,32,$car2
-
-	ba	.Ltail
-	add	$tp,8,$tp
-.type	$fname,#function
-.size	$fname,(.-$fname)
-.asciz	"Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
-.align	32
-___
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-print $code;
-close STDOUT;

+ 0 - 882
drivers/builtin_openssl2/crypto/bn/asm/sparcv9a-mont.pl

@@ -1,882 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# October 2005
-#
-# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
-# Because unlike integer multiplier, which simply stalls whole CPU,
-# FPU is fully pipelined and can effectively emit 48 bit partial
-# product every cycle. Why not blended SPARC v9? One can argue that
-# making this module dependent on UltraSPARC VIS extension limits its
-# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
-# implementations from compatibility matrix. But the rest, whole Sun
-# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
-# VIS extension instructions used in this module. This is considered
-# good enough to not care about HAL SPARC64 users [if any] who have
-# integer-only pure SPARCv9 module to "fall down" to.
-
-# USI&II cores currently exhibit uniform 2x improvement [over pre-
-# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
-# performance improves few percents for shorter keys and worsens few
-# percents for longer keys. This is because USIII integer multiplier
-# is >3x faster than USI&II one, which is harder to match [but see
-# TODO list below]. It should also be noted that SPARC64 V features
-# out-of-order execution, which *might* mean that integer multiplier
-# is pipelined, which in turn *might* be impossible to match... On
-# additional note, SPARC64 V implements FP Multiply-Add instruction,
-# which is perfectly usable in this context... In other words, as far
-# as Fujitsu SPARC64 V goes, talk to the author:-)
-
-# The implementation implies following "non-natural" limitations on
-# input arguments:
-# - num may not be less than 4;
-# - num has to be even;
-# Failure to meet either condition has no fatal effects, simply
-# doesn't give any performance gain.
-
-# TODO:
-# - modulo-schedule inner loop for better performance (on in-order
-#   execution core such as UltraSPARC this shall result in further
-#   noticeable(!) improvement);
-# - dedicated squaring procedure[?];
-
-######################################################################
-# November 2006
-#
-# Modulo-scheduled inner loops allow to interleave floating point and
-# integer instructions and minimize Read-After-Write penalties. This
-# results in *further* 20-50% perfromance improvement [depending on
-# key length, more for longer keys] on USI&II cores and 30-80% - on
-# USIII&IV.
-
-$fname="bn_mul_mont_fpu";
-$bits=32;
-for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-
-if ($bits==64) {
-	$bias=2047;
-	$frame=192;
-} else {
-	$bias=0;
-	$frame=128;	# 96 rounded up to largest known cache-line
-}
-$locals=64;
-
-# In order to provide for 32-/64-bit ABI duality, I keep integers wider
-# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
-# exclusively for pointers, indexes and other small values...
-# int bn_mul_mont(
-$rp="%i0";	# BN_ULONG *rp,
-$ap="%i1";	# const BN_ULONG *ap,
-$bp="%i2";	# const BN_ULONG *bp,
-$np="%i3";	# const BN_ULONG *np,
-$n0="%i4";	# const BN_ULONG *n0,
-$num="%i5";	# int num);
-
-$tp="%l0";	# t[num]
-$ap_l="%l1";	# a[num],n[num] are smashed to 32-bit words and saved
-$ap_h="%l2";	# to these four vectors as double-precision FP values.
-$np_l="%l3";	# This way a bunch of fxtods are eliminated in second
-$np_h="%l4";	# loop and L1-cache aliasing is minimized...
-$i="%l5";
-$j="%l6";
-$mask="%l7";	# 16-bit mask, 0xffff
-
-$n0="%g4";	# reassigned(!) to "64-bit" register
-$carry="%i4";	# %i4 reused(!) for a carry bit
-
-# FP register naming chart
-#
-#     ..HILO
-#       dcba
-#   --------
-#        LOa
-#       LOb
-#      LOc
-#     LOd
-#      HIa
-#     HIb
-#    HIc
-#   HId
-#    ..a
-#   ..b
-$ba="%f0";    $bb="%f2";    $bc="%f4";    $bd="%f6";
-$na="%f8";    $nb="%f10";   $nc="%f12";   $nd="%f14";
-$alo="%f16";  $alo_="%f17"; $ahi="%f18";  $ahi_="%f19";
-$nlo="%f20";  $nlo_="%f21"; $nhi="%f22";  $nhi_="%f23";
-
-$dota="%f24"; $dotb="%f26";
-
-$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
-$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
-$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
-$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
-
-$ASI_FL16_P=0xD2;	# magic ASI value to engage 16-bit FP load
-
-$code=<<___;
-.section	".text",#alloc,#execinstr
-
-.global $fname
-.align  32
-$fname:
-	save	%sp,-$frame-$locals,%sp
-
-	cmp	$num,4
-	bl,a,pn %icc,.Lret
-	clr	%i0
-	andcc	$num,1,%g0		! $num has to be even...
-	bnz,a,pn %icc,.Lret
-	clr	%i0			! signal "unsupported input value"
-
-	srl	$num,1,$num
-	sethi	%hi(0xffff),$mask
-	ld	[%i4+0],$n0		! $n0 reassigned, remember?
-	or	$mask,%lo(0xffff),$mask
-	ld	[%i4+4],%o0
-	sllx	%o0,32,%o0
-	or	%o0,$n0,$n0		! $n0=n0[1].n0[0]
-
-	sll	$num,3,$num		! num*=8
-
-	add	%sp,$bias,%o0		! real top of stack
-	sll	$num,2,%o1
-	add	%o1,$num,%o1		! %o1=num*5
-	sub	%o0,%o1,%o0
-	and	%o0,-2048,%o0		! optimize TLB utilization
-	sub	%o0,$bias,%sp		! alloca(5*num*8)
-
-	rd	%asi,%o7		! save %asi
-	add	%sp,$bias+$frame+$locals,$tp
-	add	$tp,$num,$ap_l
-	add	$ap_l,$num,$ap_l	! [an]p_[lh] point at the vectors' ends !
-	add	$ap_l,$num,$ap_h
-	add	$ap_h,$num,$np_l
-	add	$np_l,$num,$np_h
-
-	wr	%g0,$ASI_FL16_P,%asi	! setup %asi for 16-bit FP loads
-
-	add	$rp,$num,$rp		! readjust input pointers to point
-	add	$ap,$num,$ap		! at the ends too...
-	add	$bp,$num,$bp
-	add	$np,$num,$np
-
-	stx	%o7,[%sp+$bias+$frame+48]	! save %asi
-
-	sub	%g0,$num,$i		! i=-num
-	sub	%g0,$num,$j		! j=-num
-
-	add	$ap,$j,%o3
-	add	$bp,$i,%o4
-
-	ld	[%o3+4],%g1		! bp[0]
-	ld	[%o3+0],%o0
-	ld	[%o4+4],%g5		! ap[0]
-	sllx	%g1,32,%g1
-	ld	[%o4+0],%o1
-	sllx	%g5,32,%g5
-	or	%g1,%o0,%o0
-	or	%g5,%o1,%o1
-
-	add	$np,$j,%o5
-
-	mulx	%o1,%o0,%o0		! ap[0]*bp[0]
-	mulx	$n0,%o0,%o0		! ap[0]*bp[0]*n0
-	stx	%o0,[%sp+$bias+$frame+0]
-
-	ld	[%o3+0],$alo_	! load a[j] as pair of 32-bit words
-	fzeros	$alo
-	ld	[%o3+4],$ahi_
-	fzeros	$ahi
-	ld	[%o5+0],$nlo_	! load n[j] as pair of 32-bit words
-	fzeros	$nlo
-	ld	[%o5+4],$nhi_
-	fzeros	$nhi
-
-	! transfer b[i] to FPU as 4x16-bit values
-	ldda	[%o4+2]%asi,$ba
-	fxtod	$alo,$alo
-	ldda	[%o4+0]%asi,$bb
-	fxtod	$ahi,$ahi
-	ldda	[%o4+6]%asi,$bc
-	fxtod	$nlo,$nlo
-	ldda	[%o4+4]%asi,$bd
-	fxtod	$nhi,$nhi
-
-	! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
-	ldda	[%sp+$bias+$frame+6]%asi,$na
-	fxtod	$ba,$ba
-	ldda	[%sp+$bias+$frame+4]%asi,$nb
-	fxtod	$bb,$bb
-	ldda	[%sp+$bias+$frame+2]%asi,$nc
-	fxtod	$bc,$bc
-	ldda	[%sp+$bias+$frame+0]%asi,$nd
-	fxtod	$bd,$bd
-
-	std	$alo,[$ap_l+$j]		! save smashed ap[j] in double format
-	fxtod	$na,$na
-	std	$ahi,[$ap_h+$j]
-	fxtod	$nb,$nb
-	std	$nlo,[$np_l+$j]		! save smashed np[j] in double format
-	fxtod	$nc,$nc
-	std	$nhi,[$np_h+$j]
-	fxtod	$nd,$nd
-
-		fmuld	$alo,$ba,$aloa
-		fmuld	$nlo,$na,$nloa
-		fmuld	$alo,$bb,$alob
-		fmuld	$nlo,$nb,$nlob
-		fmuld	$alo,$bc,$aloc
-	faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-		fmuld	$alo,$bd,$alod
-	faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-		fmuld	$ahi,$ba,$ahia
-	faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-		fmuld	$ahi,$bb,$ahib
-	faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-		fmuld	$ahi,$bc,$ahic
-	faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-		fmuld	$ahi,$bd,$ahid
-	faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-
-	faddd	$ahic,$nhic,$dota	! $nhic
-	faddd	$ahid,$nhid,$dotb	! $nhid
-
-	faddd	$nloc,$nhia,$nloc
-	faddd	$nlod,$nhib,$nlod
-
-	fdtox	$nloa,$nloa
-	fdtox	$nlob,$nlob
-	fdtox	$nloc,$nloc
-	fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	add	$j,8,$j
-	std	$nlob,[%sp+$bias+$frame+8]
-	add	$ap,$j,%o4
-	std	$nloc,[%sp+$bias+$frame+16]
-	add	$np,$j,%o5
-	std	$nlod,[%sp+$bias+$frame+24]
-
-	ld	[%o4+0],$alo_	! load a[j] as pair of 32-bit words
-	fzeros	$alo
-	ld	[%o4+4],$ahi_
-	fzeros	$ahi
-	ld	[%o5+0],$nlo_	! load n[j] as pair of 32-bit words
-	fzeros	$nlo
-	ld	[%o5+4],$nhi_
-	fzeros	$nhi
-
-	fxtod	$alo,$alo
-	fxtod	$ahi,$ahi
-	fxtod	$nlo,$nlo
-	fxtod	$nhi,$nhi
-
-	ldx	[%sp+$bias+$frame+0],%o0
-		fmuld	$alo,$ba,$aloa
-	ldx	[%sp+$bias+$frame+8],%o1
-		fmuld	$nlo,$na,$nloa
-	ldx	[%sp+$bias+$frame+16],%o2
-		fmuld	$alo,$bb,$alob
-	ldx	[%sp+$bias+$frame+24],%o3
-		fmuld	$nlo,$nb,$nlob
-
-	srlx	%o0,16,%o7
-	std	$alo,[$ap_l+$j]		! save smashed ap[j] in double format
-		fmuld	$alo,$bc,$aloc
-	add	%o7,%o1,%o1
-	std	$ahi,[$ap_h+$j]
-		faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-	srlx	%o1,16,%o7
-	std	$nlo,[$np_l+$j]		! save smashed np[j] in double format
-		fmuld	$alo,$bd,$alod
-	add	%o7,%o2,%o2
-	std	$nhi,[$np_h+$j]
-		faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-	srlx	%o2,16,%o7
-		fmuld	$ahi,$ba,$ahia
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-		faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-	!and	%o0,$mask,%o0
-	!and	%o1,$mask,%o1
-	!and	%o2,$mask,%o2
-	!sllx	%o1,16,%o1
-	!sllx	%o2,32,%o2
-	!sllx	%o3,48,%o7
-	!or	%o1,%o0,%o0
-	!or	%o2,%o0,%o0
-	!or	%o7,%o0,%o0		! 64-bit result
-	srlx	%o3,16,%g1		! 34-bit carry
-		fmuld	$ahi,$bb,$ahib
-
-	faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-		fmuld	$ahi,$bc,$ahic
-	faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-		fmuld	$ahi,$bd,$ahid
-	faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-
-	faddd	$dota,$nloa,$nloa
-	faddd	$dotb,$nlob,$nlob
-	faddd	$ahic,$nhic,$dota	! $nhic
-	faddd	$ahid,$nhid,$dotb	! $nhid
-
-	faddd	$nloc,$nhia,$nloc
-	faddd	$nlod,$nhib,$nlod
-
-	fdtox	$nloa,$nloa
-	fdtox	$nlob,$nlob
-	fdtox	$nloc,$nloc
-	fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	std	$nlob,[%sp+$bias+$frame+8]
-	addcc	$j,8,$j
-	std	$nloc,[%sp+$bias+$frame+16]
-	bz,pn	%icc,.L1stskip
-	std	$nlod,[%sp+$bias+$frame+24]
-
-.align	32			! incidentally already aligned !
-.L1st:
-	add	$ap,$j,%o4
-	add	$np,$j,%o5
-	ld	[%o4+0],$alo_	! load a[j] as pair of 32-bit words
-	fzeros	$alo
-	ld	[%o4+4],$ahi_
-	fzeros	$ahi
-	ld	[%o5+0],$nlo_	! load n[j] as pair of 32-bit words
-	fzeros	$nlo
-	ld	[%o5+4],$nhi_
-	fzeros	$nhi
-
-	fxtod	$alo,$alo
-	fxtod	$ahi,$ahi
-	fxtod	$nlo,$nlo
-	fxtod	$nhi,$nhi
-
-	ldx	[%sp+$bias+$frame+0],%o0
-		fmuld	$alo,$ba,$aloa
-	ldx	[%sp+$bias+$frame+8],%o1
-		fmuld	$nlo,$na,$nloa
-	ldx	[%sp+$bias+$frame+16],%o2
-		fmuld	$alo,$bb,$alob
-	ldx	[%sp+$bias+$frame+24],%o3
-		fmuld	$nlo,$nb,$nlob
-
-	srlx	%o0,16,%o7
-	std	$alo,[$ap_l+$j]		! save smashed ap[j] in double format
-		fmuld	$alo,$bc,$aloc
-	add	%o7,%o1,%o1
-	std	$ahi,[$ap_h+$j]
-		faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-	srlx	%o1,16,%o7
-	std	$nlo,[$np_l+$j]		! save smashed np[j] in double format
-		fmuld	$alo,$bd,$alod
-	add	%o7,%o2,%o2
-	std	$nhi,[$np_h+$j]
-		faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-	srlx	%o2,16,%o7
-		fmuld	$ahi,$ba,$ahia
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-	and	%o0,$mask,%o0
-		faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-	and	%o1,$mask,%o1
-	and	%o2,$mask,%o2
-		fmuld	$ahi,$bb,$ahib
-	sllx	%o1,16,%o1
-		faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-	sllx	%o2,32,%o2
-		fmuld	$ahi,$bc,$ahic
-	sllx	%o3,48,%o7
-	or	%o1,%o0,%o0
-		faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-	or	%o2,%o0,%o0
-		fmuld	$ahi,$bd,$ahid
-	or	%o7,%o0,%o0		! 64-bit result
-		faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-	addcc	%g1,%o0,%o0
-		faddd	$dota,$nloa,$nloa
-	srlx	%o3,16,%g1		! 34-bit carry
-		faddd	$dotb,$nlob,$nlob
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	stx	%o0,[$tp]		! tp[j-1]=
-
-	faddd	$ahic,$nhic,$dota	! $nhic
-	faddd	$ahid,$nhid,$dotb	! $nhid
-
-	faddd	$nloc,$nhia,$nloc
-	faddd	$nlod,$nhib,$nlod
-
-	fdtox	$nloa,$nloa
-	fdtox	$nlob,$nlob
-	fdtox	$nloc,$nloc
-	fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	std	$nlob,[%sp+$bias+$frame+8]
-	std	$nloc,[%sp+$bias+$frame+16]
-	std	$nlod,[%sp+$bias+$frame+24]
-
-	addcc	$j,8,$j
-	bnz,pt	%icc,.L1st
-	add	$tp,8,$tp
-
-.L1stskip:
-	fdtox	$dota,$dota
-	fdtox	$dotb,$dotb
-
-	ldx	[%sp+$bias+$frame+0],%o0
-	ldx	[%sp+$bias+$frame+8],%o1
-	ldx	[%sp+$bias+$frame+16],%o2
-	ldx	[%sp+$bias+$frame+24],%o3
-
-	srlx	%o0,16,%o7
-	std	$dota,[%sp+$bias+$frame+32]
-	add	%o7,%o1,%o1
-	std	$dotb,[%sp+$bias+$frame+40]
-	srlx	%o1,16,%o7
-	add	%o7,%o2,%o2
-	srlx	%o2,16,%o7
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-	and	%o0,$mask,%o0
-	and	%o1,$mask,%o1
-	and	%o2,$mask,%o2
-	sllx	%o1,16,%o1
-	sllx	%o2,32,%o2
-	sllx	%o3,48,%o7
-	or	%o1,%o0,%o0
-	or	%o2,%o0,%o0
-	or	%o7,%o0,%o0		! 64-bit result
-	ldx	[%sp+$bias+$frame+32],%o4
-	addcc	%g1,%o0,%o0
-	ldx	[%sp+$bias+$frame+40],%o5
-	srlx	%o3,16,%g1		! 34-bit carry
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	stx	%o0,[$tp]		! tp[j-1]=
-	add	$tp,8,$tp
-
-	srlx	%o4,16,%o7
-	add	%o7,%o5,%o5
-	and	%o4,$mask,%o4
-	sllx	%o5,16,%o7
-	or	%o7,%o4,%o4
-	addcc	%g1,%o4,%o4
-	srlx	%o5,48,%g1
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	mov	%g1,$carry
-	stx	%o4,[$tp]		! tp[num-1]=
-
-	ba	.Louter
-	add	$i,8,$i
-.align	32
-.Louter:
-	sub	%g0,$num,$j		! j=-num
-	add	%sp,$bias+$frame+$locals,$tp
-
-	add	$ap,$j,%o3
-	add	$bp,$i,%o4
-
-	ld	[%o3+4],%g1		! bp[i]
-	ld	[%o3+0],%o0
-	ld	[%o4+4],%g5		! ap[0]
-	sllx	%g1,32,%g1
-	ld	[%o4+0],%o1
-	sllx	%g5,32,%g5
-	or	%g1,%o0,%o0
-	or	%g5,%o1,%o1
-
-	ldx	[$tp],%o2		! tp[0]
-	mulx	%o1,%o0,%o0
-	addcc	%o2,%o0,%o0
-	mulx	$n0,%o0,%o0		! (ap[0]*bp[i]+t[0])*n0
-	stx	%o0,[%sp+$bias+$frame+0]
-
-	! transfer b[i] to FPU as 4x16-bit values
-	ldda	[%o4+2]%asi,$ba
-	ldda	[%o4+0]%asi,$bb
-	ldda	[%o4+6]%asi,$bc
-	ldda	[%o4+4]%asi,$bd
-
-	! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
-	ldda	[%sp+$bias+$frame+6]%asi,$na
-	fxtod	$ba,$ba
-	ldda	[%sp+$bias+$frame+4]%asi,$nb
-	fxtod	$bb,$bb
-	ldda	[%sp+$bias+$frame+2]%asi,$nc
-	fxtod	$bc,$bc
-	ldda	[%sp+$bias+$frame+0]%asi,$nd
-	fxtod	$bd,$bd
-	ldd	[$ap_l+$j],$alo		! load a[j] in double format
-	fxtod	$na,$na
-	ldd	[$ap_h+$j],$ahi
-	fxtod	$nb,$nb
-	ldd	[$np_l+$j],$nlo		! load n[j] in double format
-	fxtod	$nc,$nc
-	ldd	[$np_h+$j],$nhi
-	fxtod	$nd,$nd
-
-		fmuld	$alo,$ba,$aloa
-		fmuld	$nlo,$na,$nloa
-		fmuld	$alo,$bb,$alob
-		fmuld	$nlo,$nb,$nlob
-		fmuld	$alo,$bc,$aloc
-	faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-		fmuld	$alo,$bd,$alod
-	faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-		fmuld	$ahi,$ba,$ahia
-	faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-		fmuld	$ahi,$bb,$ahib
-	faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-		fmuld	$ahi,$bc,$ahic
-	faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-		fmuld	$ahi,$bd,$ahid
-	faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-
-	faddd	$ahic,$nhic,$dota	! $nhic
-	faddd	$ahid,$nhid,$dotb	! $nhid
-
-	faddd	$nloc,$nhia,$nloc
-	faddd	$nlod,$nhib,$nlod
-
-	fdtox	$nloa,$nloa
-	fdtox	$nlob,$nlob
-	fdtox	$nloc,$nloc
-	fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	std	$nlob,[%sp+$bias+$frame+8]
-	std	$nloc,[%sp+$bias+$frame+16]
-	add	$j,8,$j
-	std	$nlod,[%sp+$bias+$frame+24]
-
-	ldd	[$ap_l+$j],$alo		! load a[j] in double format
-	ldd	[$ap_h+$j],$ahi
-	ldd	[$np_l+$j],$nlo		! load n[j] in double format
-	ldd	[$np_h+$j],$nhi
-
-		fmuld	$alo,$ba,$aloa
-		fmuld	$nlo,$na,$nloa
-		fmuld	$alo,$bb,$alob
-		fmuld	$nlo,$nb,$nlob
-		fmuld	$alo,$bc,$aloc
-	ldx	[%sp+$bias+$frame+0],%o0
-		faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-	ldx	[%sp+$bias+$frame+8],%o1
-		fmuld	$alo,$bd,$alod
-	ldx	[%sp+$bias+$frame+16],%o2
-		faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-	ldx	[%sp+$bias+$frame+24],%o3
-		fmuld	$ahi,$ba,$ahia
-
-	srlx	%o0,16,%o7
-		faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-	add	%o7,%o1,%o1
-		fmuld	$ahi,$bb,$ahib
-	srlx	%o1,16,%o7
-		faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-	add	%o7,%o2,%o2
-		fmuld	$ahi,$bc,$ahic
-	srlx	%o2,16,%o7
-		faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-	! why?
-	and	%o0,$mask,%o0
-		fmuld	$ahi,$bd,$ahid
-	and	%o1,$mask,%o1
-	and	%o2,$mask,%o2
-		faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-	sllx	%o1,16,%o1
-		faddd	$dota,$nloa,$nloa
-	sllx	%o2,32,%o2
-		faddd	$dotb,$nlob,$nlob
-	sllx	%o3,48,%o7
-	or	%o1,%o0,%o0
-		faddd	$ahic,$nhic,$dota	! $nhic
-	or	%o2,%o0,%o0
-		faddd	$ahid,$nhid,$dotb	! $nhid
-	or	%o7,%o0,%o0		! 64-bit result
-	ldx	[$tp],%o7
-		faddd	$nloc,$nhia,$nloc
-	addcc	%o7,%o0,%o0
-	! end-of-why?
-		faddd	$nlod,$nhib,$nlod
-	srlx	%o3,16,%g1		! 34-bit carry
-		fdtox	$nloa,$nloa
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	fdtox	$nlob,$nlob
-	fdtox	$nloc,$nloc
-	fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	std	$nlob,[%sp+$bias+$frame+8]
-	addcc	$j,8,$j
-	std	$nloc,[%sp+$bias+$frame+16]
-	bz,pn	%icc,.Linnerskip
-	std	$nlod,[%sp+$bias+$frame+24]
-
-	ba	.Linner
-	nop
-.align	32
-.Linner:
-	ldd	[$ap_l+$j],$alo		! load a[j] in double format
-	ldd	[$ap_h+$j],$ahi
-	ldd	[$np_l+$j],$nlo		! load n[j] in double format
-	ldd	[$np_h+$j],$nhi
-
-		fmuld	$alo,$ba,$aloa
-		fmuld	$nlo,$na,$nloa
-		fmuld	$alo,$bb,$alob
-		fmuld	$nlo,$nb,$nlob
-		fmuld	$alo,$bc,$aloc
-	ldx	[%sp+$bias+$frame+0],%o0
-		faddd	$aloa,$nloa,$nloa
-		fmuld	$nlo,$nc,$nloc
-	ldx	[%sp+$bias+$frame+8],%o1
-		fmuld	$alo,$bd,$alod
-	ldx	[%sp+$bias+$frame+16],%o2
-		faddd	$alob,$nlob,$nlob
-		fmuld	$nlo,$nd,$nlod
-	ldx	[%sp+$bias+$frame+24],%o3
-		fmuld	$ahi,$ba,$ahia
-
-	srlx	%o0,16,%o7
-		faddd	$aloc,$nloc,$nloc
-		fmuld	$nhi,$na,$nhia
-	add	%o7,%o1,%o1
-		fmuld	$ahi,$bb,$ahib
-	srlx	%o1,16,%o7
-		faddd	$alod,$nlod,$nlod
-		fmuld	$nhi,$nb,$nhib
-	add	%o7,%o2,%o2
-		fmuld	$ahi,$bc,$ahic
-	srlx	%o2,16,%o7
-		faddd	$ahia,$nhia,$nhia
-		fmuld	$nhi,$nc,$nhic
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-	and	%o0,$mask,%o0
-		fmuld	$ahi,$bd,$ahid
-	and	%o1,$mask,%o1
-	and	%o2,$mask,%o2
-		faddd	$ahib,$nhib,$nhib
-		fmuld	$nhi,$nd,$nhid
-	sllx	%o1,16,%o1
-		faddd	$dota,$nloa,$nloa
-	sllx	%o2,32,%o2
-		faddd	$dotb,$nlob,$nlob
-	sllx	%o3,48,%o7
-	or	%o1,%o0,%o0
-		faddd	$ahic,$nhic,$dota	! $nhic
-	or	%o2,%o0,%o0
-		faddd	$ahid,$nhid,$dotb	! $nhid
-	or	%o7,%o0,%o0		! 64-bit result
-		faddd	$nloc,$nhia,$nloc
-	addcc	%g1,%o0,%o0
-	ldx	[$tp+8],%o7		! tp[j]
-		faddd	$nlod,$nhib,$nlod
-	srlx	%o3,16,%g1		! 34-bit carry
-		fdtox	$nloa,$nloa
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-		fdtox	$nlob,$nlob
-	addcc	%o7,%o0,%o0
-		fdtox	$nloc,$nloc
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	stx	%o0,[$tp]		! tp[j-1]
-		fdtox	$nlod,$nlod
-
-	std	$nloa,[%sp+$bias+$frame+0]
-	std	$nlob,[%sp+$bias+$frame+8]
-	std	$nloc,[%sp+$bias+$frame+16]
-	addcc	$j,8,$j
-	std	$nlod,[%sp+$bias+$frame+24]
-	bnz,pt	%icc,.Linner
-	add	$tp,8,$tp
-
-.Linnerskip:
-	fdtox	$dota,$dota
-	fdtox	$dotb,$dotb
-
-	ldx	[%sp+$bias+$frame+0],%o0
-	ldx	[%sp+$bias+$frame+8],%o1
-	ldx	[%sp+$bias+$frame+16],%o2
-	ldx	[%sp+$bias+$frame+24],%o3
-
-	srlx	%o0,16,%o7
-	std	$dota,[%sp+$bias+$frame+32]
-	add	%o7,%o1,%o1
-	std	$dotb,[%sp+$bias+$frame+40]
-	srlx	%o1,16,%o7
-	add	%o7,%o2,%o2
-	srlx	%o2,16,%o7
-	add	%o7,%o3,%o3		! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
-	and	%o0,$mask,%o0
-	and	%o1,$mask,%o1
-	and	%o2,$mask,%o2
-	sllx	%o1,16,%o1
-	sllx	%o2,32,%o2
-	sllx	%o3,48,%o7
-	or	%o1,%o0,%o0
-	or	%o2,%o0,%o0
-	ldx	[%sp+$bias+$frame+32],%o4
-	or	%o7,%o0,%o0		! 64-bit result
-	ldx	[%sp+$bias+$frame+40],%o5
-	addcc	%g1,%o0,%o0
-	ldx	[$tp+8],%o7		! tp[j]
-	srlx	%o3,16,%g1		! 34-bit carry
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	addcc	%o7,%o0,%o0
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	stx	%o0,[$tp]		! tp[j-1]
-	add	$tp,8,$tp
-
-	srlx	%o4,16,%o7
-	add	%o7,%o5,%o5
-	and	%o4,$mask,%o4
-	sllx	%o5,16,%o7
-	or	%o7,%o4,%o4
-	addcc	%g1,%o4,%o4
-	srlx	%o5,48,%g1
-	bcs,a	%xcc,.+8
-	add	%g1,1,%g1
-
-	addcc	$carry,%o4,%o4
-	stx	%o4,[$tp]		! tp[num-1]
-	mov	%g1,$carry
-	bcs,a	%xcc,.+8
-	add	$carry,1,$carry
-
-	addcc	$i,8,$i
-	bnz	%icc,.Louter
-	nop
-
-	add	$tp,8,$tp		! adjust tp to point at the end
-	orn	%g0,%g0,%g4
-	sub	%g0,$num,%o7		! n=-num
-	ba	.Lsub
-	subcc	%g0,%g0,%g0		! clear %icc.c
-
-.align	32
-.Lsub:
-	ldx	[$tp+%o7],%o0
-	add	$np,%o7,%g1
-	ld	[%g1+0],%o2
-	ld	[%g1+4],%o3
-	srlx	%o0,32,%o1
-	subccc	%o0,%o2,%o2
-	add	$rp,%o7,%g1
-	subccc	%o1,%o3,%o3
-	st	%o2,[%g1+0]
-	add	%o7,8,%o7
-	brnz,pt	%o7,.Lsub
-	st	%o3,[%g1+4]
-	subc	$carry,0,%g4
-	sub	%g0,$num,%o7		! n=-num
-	ba	.Lcopy
-	nop
-
-.align	32
-.Lcopy:
-	ldx	[$tp+%o7],%o0
-	add	$rp,%o7,%g1
-	ld	[%g1+0],%o2
-	ld	[%g1+4],%o3
-	stx	%g0,[$tp+%o7]
-	and	%o0,%g4,%o0
-	srlx	%o0,32,%o1
-	andn	%o2,%g4,%o2
-	andn	%o3,%g4,%o3
-	or	%o2,%o0,%o0
-	or	%o3,%o1,%o1
-	st	%o0,[%g1+0]
-	add	%o7,8,%o7
-	brnz,pt	%o7,.Lcopy
-	st	%o1,[%g1+4]
-	sub	%g0,$num,%o7		! n=-num
-
-.Lzap:
-	stx	%g0,[$ap_l+%o7]
-	stx	%g0,[$ap_h+%o7]
-	stx	%g0,[$np_l+%o7]
-	stx	%g0,[$np_h+%o7]
-	add	%o7,8,%o7
-	brnz,pt	%o7,.Lzap
-	nop
-
-	ldx	[%sp+$bias+$frame+48],%o7
-	wr	%g0,%o7,%asi		! restore %asi
-
-	mov	1,%i0
-.Lret:
-	ret
-	restore
-.type   $fname,#function
-.size	$fname,(.-$fname)
-.asciz	"Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
-.align	32
-___
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-# Below substitution makes it possible to compile without demanding
-# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
-# dare to do this, because VIS capability is detected at run-time now
-# and this routine is not called on CPU not capable to execute it. Do
-# note that fzeros is not the only VIS dependency! Another dependency
-# is implicit and is just _a_ numerical value loaded to %asi register,
-# which assembler can't recognize as VIS specific...
-$code =~ s/fzeros\s+%f([0-9]+)/
-	   sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
-	  /gem;
-
-print $code;
-# flush
-close STDOUT;

+ 0 - 242
drivers/builtin_openssl2/crypto/bn/asm/via-mont.pl

@@ -1,242 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# Wrapper around 'rep montmul', VIA-specific instruction accessing
-# PadLock Montgomery Multiplier. The wrapper is designed as drop-in
-# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9].
-#
-# Below are interleaved outputs from 'openssl speed rsa dsa' for 4
-# different software configurations on 1.5GHz VIA Esther processor.
-# Lines marked with "software integer" denote performance of hand-
-# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2"
-# refers to hand-coded SSE2 Montgomery multiplication procedure found
-# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from
-# Padlock SDK 2.0.1 available for download from VIA, which naturally
-# utilizes the magic 'repz montmul' instruction. And finally "hardware
-# this" refers to *this* implementation which also uses 'repz montmul'
-#
-#                   sign    verify    sign/s verify/s
-# rsa  512 bits 0.001720s 0.000140s    581.4   7149.7	software integer
-# rsa  512 bits 0.000690s 0.000086s   1450.3  11606.0	software SSE2
-# rsa  512 bits 0.006136s 0.000201s    163.0   4974.5	hardware VIA SDK
-# rsa  512 bits 0.000712s 0.000050s   1404.9  19858.5	hardware this
-#
-# rsa 1024 bits 0.008518s 0.000413s    117.4   2420.8	software integer
-# rsa 1024 bits 0.004275s 0.000277s    233.9   3609.7	software SSE2
-# rsa 1024 bits 0.012136s 0.000260s     82.4   3844.5	hardware VIA SDK
-# rsa 1024 bits 0.002522s 0.000116s    396.5   8650.9	hardware this
-#
-# rsa 2048 bits 0.050101s 0.001371s     20.0    729.6	software integer
-# rsa 2048 bits 0.030273s 0.001008s     33.0    991.9	software SSE2
-# rsa 2048 bits 0.030833s 0.000976s     32.4   1025.1	hardware VIA SDK
-# rsa 2048 bits 0.011879s 0.000342s     84.2   2921.7	hardware this
-#
-# rsa 4096 bits 0.327097s 0.004859s      3.1    205.8	software integer
-# rsa 4096 bits 0.229318s 0.003859s      4.4    259.2	software SSE2
-# rsa 4096 bits 0.233953s 0.003274s      4.3    305.4	hardware VIA SDK
-# rsa 4096 bits 0.070493s 0.001166s     14.2    857.6	hardware this
-#
-# dsa  512 bits 0.001342s 0.001651s    745.2    605.7	software integer
-# dsa  512 bits 0.000844s 0.000987s   1185.3   1013.1	software SSE2
-# dsa  512 bits 0.001902s 0.002247s    525.6    444.9	hardware VIA SDK
-# dsa  512 bits 0.000458s 0.000524s   2182.2   1909.1	hardware this
-#
-# dsa 1024 bits 0.003964s 0.004926s    252.3    203.0	software integer
-# dsa 1024 bits 0.002686s 0.003166s    372.3    315.8	software SSE2
-# dsa 1024 bits 0.002397s 0.002823s    417.1    354.3	hardware VIA SDK
-# dsa 1024 bits 0.000978s 0.001170s   1022.2    855.0	hardware this
-#
-# dsa 2048 bits 0.013280s 0.016518s     75.3     60.5	software integer
-# dsa 2048 bits 0.009911s 0.011522s    100.9     86.8	software SSE2
-# dsa 2048 bits 0.009542s 0.011763s    104.8     85.0	hardware VIA SDK
-# dsa 2048 bits 0.002884s 0.003352s    346.8    298.3	hardware this
-#
-# To give you some other reference point here is output for 2.4GHz P4
-# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software
-# SSE2" in above terms.
-#
-# rsa  512 bits 0.000407s 0.000047s   2454.2  21137.0
-# rsa 1024 bits 0.002426s 0.000141s    412.1   7100.0
-# rsa 2048 bits 0.015046s 0.000491s     66.5   2034.9
-# rsa 4096 bits 0.109770s 0.002379s      9.1    420.3
-# dsa  512 bits 0.000438s 0.000525s   2281.1   1904.1
-# dsa 1024 bits 0.001346s 0.001595s    742.7    627.0
-# dsa 2048 bits 0.004745s 0.005582s    210.7    179.1
-#
-# Conclusions: 
-# - VIA SDK leaves a *lot* of room for improvement (which this
-#   implementation successfully fills:-);
-# - 'rep montmul' gives up to >3x performance improvement depending on
-#   key length;
-# - in terms of absolute performance it delivers approximately as much
-#   as modern out-of-order 32-bit cores [again, for longer keys].
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"via-mont.pl");
-
-# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
-$func="bn_mul_mont_padlock";
-
-$pad=16*1;	# amount of reserved bytes on top of every vector
-
-# stack layout
-$mZeroPrime=&DWP(0,"esp");		# these are specified by VIA
-$A=&DWP(4,"esp");
-$B=&DWP(8,"esp");
-$T=&DWP(12,"esp");
-$M=&DWP(16,"esp");
-$scratch=&DWP(20,"esp");
-$rp=&DWP(24,"esp");			# these are mine
-$sp=&DWP(28,"esp");
-# &DWP(32,"esp")			# 32 byte scratch area
-# &DWP(64+(4*$num+$pad)*0,"esp")	# padded tp[num]
-# &DWP(64+(4*$num+$pad)*1,"esp")	# padded copy of ap[num]
-# &DWP(64+(4*$num+$pad)*2,"esp")	# padded copy of bp[num]
-# &DWP(64+(4*$num+$pad)*3,"esp")	# padded copy of np[num]
-# Note that SDK suggests to unconditionally allocate 2K per vector. This
-# has quite an impact on performance. It naturally depends on key length,
-# but to give an example 1024 bit private RSA key operations suffer >30%
-# penalty. I allocate only as much as actually required...
-
-&function_begin($func);
-	&xor	("eax","eax");
-	&mov	("ecx",&wparam(5));	# num
-	# meet VIA's limitations for num [note that the specification
-	# expresses them in bits, while we work with amount of 32-bit words]
-	&test	("ecx",3);
-	&jnz	(&label("leave"));	# num % 4 != 0
-	&cmp	("ecx",8);
-	&jb	(&label("leave"));	# num < 8
-	&cmp	("ecx",1024);
-	&ja	(&label("leave"));	# num > 1024
-
-	&pushf	();
-	&cld	();
-
-	&mov	("edi",&wparam(0));	# rp
-	&mov	("eax",&wparam(1));	# ap
-	&mov	("ebx",&wparam(2));	# bp
-	&mov	("edx",&wparam(3));	# np
-	&mov	("esi",&wparam(4));	# n0
-	&mov	("esi",&DWP(0,"esi"));	# *n0
-
-	&lea	("ecx",&DWP($pad,"","ecx",4));	# ecx becomes vector size in bytes
-	&lea	("ebp",&DWP(64,"","ecx",4));	# allocate 4 vectors + 64 bytes
-	&neg	("ebp");
-	&add	("ebp","esp");
-	&and	("ebp",-64);		# align to cache-line
-	&xchg	("ebp","esp");		# alloca
-
-	&mov	($rp,"edi");		# save rp
-	&mov	($sp,"ebp");		# save esp
-
-	&mov	($mZeroPrime,"esi");
-	&lea	("esi",&DWP(64,"esp"));	# tp
-	&mov	($T,"esi");
-	&lea	("edi",&DWP(32,"esp"));	# scratch area
-	&mov	($scratch,"edi");
-	&mov	("esi","eax");
-
-	&lea	("ebp",&DWP(-$pad,"ecx"));
-	&shr	("ebp",2);		# restore original num value in ebp
-
-	&xor	("eax","eax");
-
-	&mov	("ecx","ebp");
-	&lea	("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch
-	&data_byte(0xf3,0xab);		# rep stosl, bzero
-
-	&mov	("ecx","ebp");
-	&lea	("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy
-	&mov	($A,"edi");
-	&data_byte(0xf3,0xa5);		# rep movsl, memcpy
-	&mov	("ecx",$pad/4);
-	&data_byte(0xf3,0xab);		# rep stosl, bzero pad
-	# edi points at the end of padded ap copy...
-
-	&mov	("ecx","ebp");
-	&mov	("esi","ebx");
-	&mov	($B,"edi");
-	&data_byte(0xf3,0xa5);		# rep movsl, memcpy
-	&mov	("ecx",$pad/4);
-	&data_byte(0xf3,0xab);		# rep stosl, bzero pad
-	# edi points at the end of padded bp copy...
-
-	&mov	("ecx","ebp");
-	&mov	("esi","edx");
-	&mov	($M,"edi");
-	&data_byte(0xf3,0xa5);		# rep movsl, memcpy
-	&mov	("ecx",$pad/4);
-	&data_byte(0xf3,0xab);		# rep stosl, bzero pad
-	# edi points at the end of padded np copy...
-
-	# let magic happen...
-	&mov	("ecx","ebp");
-	&mov	("esi","esp");
-	&shl	("ecx",5);		# convert word counter to bit counter
-	&align	(4);
-	&data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul
-
-	&mov	("ecx","ebp");
-	&lea	("esi",&DWP(64,"esp"));		# tp
-	# edi still points at the end of padded np copy...
-	&neg	("ebp");
-	&lea	("ebp",&DWP(-$pad,"edi","ebp",4));	# so just "rewind"
-	&mov	("edi",$rp);			# restore rp
-	&xor	("edx","edx");			# i=0 and clear CF
-
-&set_label("sub",8);
-	&mov	("eax",&DWP(0,"esi","edx",4));
-	&sbb	("eax",&DWP(0,"ebp","edx",4));
-	&mov	(&DWP(0,"edi","edx",4),"eax");	# rp[i]=tp[i]-np[i]
-	&lea	("edx",&DWP(1,"edx"));		# i++
-	&loop	(&label("sub"));		# doesn't affect CF!
-
-	&mov	("eax",&DWP(0,"esi","edx",4));	# upmost overflow bit
-	&sbb	("eax",0);
-	&and	("esi","eax");
-	&not	("eax");
-	&mov	("ebp","edi");
-	&and	("ebp","eax");
-	&or	("esi","ebp");			# tp=carry?tp:rp
-
-	&mov	("ecx","edx");			# num
-	&xor	("edx","edx");			# i=0
-
-&set_label("copy",8);
-	&mov	("eax",&DWP(0,"esi","edx",4));
-	&mov	(&DWP(64,"esp","edx",4),"ecx");	# zap tp
-	&mov	(&DWP(0,"edi","edx",4),"eax");
-	&lea	("edx",&DWP(1,"edx"));		# i++
-	&loop	(&label("copy"));
-
-	&mov	("ebp",$sp);
-	&xor	("eax","eax");
-
-	&mov	("ecx",64/4);
-	&mov	("edi","esp");		# zap frame including scratch area
-	&data_byte(0xf3,0xab);		# rep stosl, bzero
-
-	# zap copies of ap, bp and np
-	&lea	("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap
-	&lea	("ecx",&DWP(3*$pad/4,"edx","edx",2));
-	&data_byte(0xf3,0xab);		# rep stosl, bzero
-
-	&mov	("esp","ebp");
-	&inc	("eax");		# signal "done"
-	&popf	();
-&set_label("leave");
-&function_end($func);
-
-&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 313
drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl

@@ -1,313 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# May 2011
-#
-# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
-# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
-# the time being... Except that it has three code paths: pure integer
-# code suitable for any x86 CPU, MMX code suitable for PIII and later
-# and PCLMULQDQ suitable for Westmere and later. Improvement varies
-# from one benchmark and µ-arch to another. Below are interval values
-# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
-# code:
-#
-# PIII		16%-30%
-# P4		12%-12%
-# Opteron	18%-40%
-# Core2		19%-44%
-# Atom		38%-64%
-# Westmere	53%-121%(PCLMULQDQ)/20%-32%(MMX)
-# Sandy Bridge	72%-127%(PCLMULQDQ)/27%-23%(MMX)
-#
-# Note that above improvement coefficients are not coefficients for
-# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
-# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
-# is more and more dominated by other subroutines, most notably by
-# BN_GF2m_mod[_mul]_arr...
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386");
-
-$sse2=0;
-for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
-
-&external_label("OPENSSL_ia32cap_P") if ($sse2);
-
-$a="eax";
-$b="ebx";
-($a1,$a2,$a4)=("ecx","edx","ebp");
-
-$R="mm0";
-@T=("mm1","mm2");
-($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
-@i=("esi","edi");
-
-					if (!$x86only) {
-&function_begin_B("_mul_1x1_mmx");
-	&sub	("esp",32+4);
-	 &mov	($a1,$a);
-	 &lea	($a2,&DWP(0,$a,$a));
-	 &and	($a1,0x3fffffff);
-	 &lea	($a4,&DWP(0,$a2,$a2));
-	 &mov	(&DWP(0*4,"esp"),0);
-	 &and	($a2,0x7fffffff);
-	&movd	($A,$a);
-	&movd	($B,$b);
-	 &mov	(&DWP(1*4,"esp"),$a1);	# a1
-	 &xor	($a1,$a2);		# a1^a2
-	&pxor	($B31,$B31);
-	&pxor	($B30,$B30);
-	 &mov	(&DWP(2*4,"esp"),$a2);	# a2
-	 &xor	($a2,$a4);		# a2^a4
-	 &mov	(&DWP(3*4,"esp"),$a1);	# a1^a2
-	&pcmpgtd($B31,$A);		# broadcast 31st bit
-	&paddd	($A,$A);		# $A<<=1
-	 &xor	($a1,$a2);		# a1^a4=a1^a2^a2^a4
-	 &mov	(&DWP(4*4,"esp"),$a4);	# a4
-	 &xor	($a4,$a2);		# a2=a4^a2^a4
-	&pand	($B31,$B);
-	&pcmpgtd($B30,$A);		# broadcast 30th bit
-	 &mov	(&DWP(5*4,"esp"),$a1);	# a1^a4
-	 &xor	($a4,$a1);		# a1^a2^a4
-	&psllq	($B31,31);
-	&pand	($B30,$B);
-	 &mov	(&DWP(6*4,"esp"),$a2);	# a2^a4
-	&mov	(@i[0],0x7);
-	 &mov	(&DWP(7*4,"esp"),$a4);	# a1^a2^a4
-	 &mov	($a4,@i[0]);
-	&and	(@i[0],$b);
-	&shr	($b,3);
-	&mov	(@i[1],$a4);
-	&psllq	($B30,30);
-	&and	(@i[1],$b);
-	&shr	($b,3);
-	&movd	($R,&DWP(0,"esp",@i[0],4));
-	&mov	(@i[0],$a4);
-	&and	(@i[0],$b);
-	&shr	($b,3);
-	for($n=1;$n<9;$n++) {
-		&movd	(@T[1],&DWP(0,"esp",@i[1],4));
-		&mov	(@i[1],$a4);
-		&psllq	(@T[1],3*$n);
-		&and	(@i[1],$b);
-		&shr	($b,3);
-		&pxor	($R,@T[1]);
-
-		push(@i,shift(@i)); push(@T,shift(@T));
-	}
-	&movd	(@T[1],&DWP(0,"esp",@i[1],4));
-	&pxor	($R,$B30);
-	&psllq	(@T[1],3*$n++);
-	&pxor	($R,@T[1]);
-
-	&movd	(@T[0],&DWP(0,"esp",@i[0],4));
-	&pxor	($R,$B31);
-	&psllq	(@T[0],3*$n);
-	&add	("esp",32+4);
-	&pxor	($R,@T[0]);
-	&ret	();
-&function_end_B("_mul_1x1_mmx");
-					}
-
-($lo,$hi)=("eax","edx");
-@T=("ecx","ebp");
-
-&function_begin_B("_mul_1x1_ialu");
-	&sub	("esp",32+4);
-	 &mov	($a1,$a);
-	 &lea	($a2,&DWP(0,$a,$a));
-	 &lea	($a4,&DWP(0,"",$a,4));
-	 &and	($a1,0x3fffffff);
-	&lea	(@i[1],&DWP(0,$lo,$lo));
-	&sar	($lo,31);		# broadcast 31st bit
-	 &mov	(&DWP(0*4,"esp"),0);
-	 &and	($a2,0x7fffffff);
-	 &mov	(&DWP(1*4,"esp"),$a1);	# a1
-	 &xor	($a1,$a2);		# a1^a2
-	 &mov	(&DWP(2*4,"esp"),$a2);	# a2
-	 &xor	($a2,$a4);		# a2^a4
-	 &mov	(&DWP(3*4,"esp"),$a1);	# a1^a2
-	 &xor	($a1,$a2);		# a1^a4=a1^a2^a2^a4
-	 &mov	(&DWP(4*4,"esp"),$a4);	# a4
-	 &xor	($a4,$a2);		# a2=a4^a2^a4
-	 &mov	(&DWP(5*4,"esp"),$a1);	# a1^a4
-	 &xor	($a4,$a1);		# a1^a2^a4
-	&sar	(@i[1],31);		# broardcast 30th bit
-	&and	($lo,$b);
-	 &mov	(&DWP(6*4,"esp"),$a2);	# a2^a4
-	&and	(@i[1],$b);
-	 &mov	(&DWP(7*4,"esp"),$a4);	# a1^a2^a4
-	&mov	($hi,$lo);
-	&shl	($lo,31);
-	&mov	(@T[0],@i[1]);
-	&shr	($hi,1);
-
-	 &mov	(@i[0],0x7);
-	&shl	(@i[1],30);
-	 &and	(@i[0],$b);
-	&shr	(@T[0],2);
-	&xor	($lo,@i[1]);
-
-	&shr	($b,3);
-	&mov	(@i[1],0x7);		# 5-byte instruction!?
-	&and	(@i[1],$b);
-	&shr	($b,3);
-	 &xor	($hi,@T[0]);
-	&xor	($lo,&DWP(0,"esp",@i[0],4));
-	&mov	(@i[0],0x7);
-	&and	(@i[0],$b);
-	&shr	($b,3);
-	for($n=1;$n<9;$n++) {
-		&mov	(@T[1],&DWP(0,"esp",@i[1],4));
-		&mov	(@i[1],0x7);
-		&mov	(@T[0],@T[1]);
-		&shl	(@T[1],3*$n);
-		&and	(@i[1],$b);
-		&shr	(@T[0],32-3*$n);
-		&xor	($lo,@T[1]);
-		&shr	($b,3);
-		&xor	($hi,@T[0]);
-
-		push(@i,shift(@i)); push(@T,shift(@T));
-	}
-	&mov	(@T[1],&DWP(0,"esp",@i[1],4));
-	&mov	(@T[0],@T[1]);
-	&shl	(@T[1],3*$n);
-	&mov	(@i[1],&DWP(0,"esp",@i[0],4));
-	&shr	(@T[0],32-3*$n);	$n++;
-	&mov	(@i[0],@i[1]);
-	&xor	($lo,@T[1]);
-	&shl	(@i[1],3*$n);
-	&xor	($hi,@T[0]);
-	&shr	(@i[0],32-3*$n);
-	&xor	($lo,@i[1]);
-	&xor	($hi,@i[0]);
-
-	&add	("esp",32+4);
-	&ret	();
-&function_end_B("_mul_1x1_ialu");
-
-# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
-&function_begin_B("bn_GF2m_mul_2x2");
-if (!$x86only) {
-	&picmeup("edx","OPENSSL_ia32cap_P");
-	&mov	("eax",&DWP(0,"edx"));
-	&mov	("edx",&DWP(4,"edx"));
-	&test	("eax",1<<23);		# check MMX bit
-	&jz	(&label("ialu"));
-if ($sse2) {
-	&test	("eax",1<<24);		# check FXSR bit
-	&jz	(&label("mmx"));
-	&test	("edx",1<<1);		# check PCLMULQDQ bit
-	&jz	(&label("mmx"));
-
-	&movups		("xmm0",&QWP(8,"esp"));
-	&shufps		("xmm0","xmm0",0b10110001);
-	&pclmulqdq	("xmm0","xmm0",1);
-	&mov		("eax",&DWP(4,"esp"));
-	&movups		(&QWP(0,"eax"),"xmm0");
-	&ret	();
-
-&set_label("mmx",16);
-}
-	&push	("ebp");
-	&push	("ebx");
-	&push	("esi");
-	&push	("edi");
-	&mov	($a,&wparam(1));
-	&mov	($b,&wparam(3));
-	&call	("_mul_1x1_mmx");	# a1·b1
-	&movq	("mm7",$R);
-
-	&mov	($a,&wparam(2));
-	&mov	($b,&wparam(4));
-	&call	("_mul_1x1_mmx");	# a0·b0
-	&movq	("mm6",$R);
-
-	&mov	($a,&wparam(1));
-	&mov	($b,&wparam(3));
-	&xor	($a,&wparam(2));
-	&xor	($b,&wparam(4));
-	&call	("_mul_1x1_mmx");	# (a0+a1)·(b0+b1)
-	&pxor	($R,"mm7");
-	&mov	($a,&wparam(0));
-	&pxor	($R,"mm6");		# (a0+a1)·(b0+b1)-a1·b1-a0·b0
-
-	&movq	($A,$R);
-	&psllq	($R,32);
-	&pop	("edi");
-	&psrlq	($A,32);
-	&pop	("esi");
-	&pxor	($R,"mm6");
-	&pop	("ebx");
-	&pxor	($A,"mm7");
-	&movq	(&QWP(0,$a),$R);
-	&pop	("ebp");
-	&movq	(&QWP(8,$a),$A);
-	&emms	();
-	&ret	();
-&set_label("ialu",16);
-}
-	&push	("ebp");
-	&push	("ebx");
-	&push	("esi");
-	&push	("edi");
-	&stack_push(4+1);
-
-	&mov	($a,&wparam(1));
-	&mov	($b,&wparam(3));
-	&call	("_mul_1x1_ialu");	# a1·b1
-	&mov	(&DWP(8,"esp"),$lo);
-	&mov	(&DWP(12,"esp"),$hi);
-
-	&mov	($a,&wparam(2));
-	&mov	($b,&wparam(4));
-	&call	("_mul_1x1_ialu");	# a0·b0
-	&mov	(&DWP(0,"esp"),$lo);
-	&mov	(&DWP(4,"esp"),$hi);
-
-	&mov	($a,&wparam(1));
-	&mov	($b,&wparam(3));
-	&xor	($a,&wparam(2));
-	&xor	($b,&wparam(4));
-	&call	("_mul_1x1_ialu");	# (a0+a1)·(b0+b1)
-
-	&mov	("ebp",&wparam(0));
-		 @r=("ebx","ecx","edi","esi");
-	&mov	(@r[0],&DWP(0,"esp"));
-	&mov	(@r[1],&DWP(4,"esp"));
-	&mov	(@r[2],&DWP(8,"esp"));
-	&mov	(@r[3],&DWP(12,"esp"));
-
-	&xor	($lo,$hi);
-	&xor	($hi,@r[1]);
-	&xor	($lo,@r[0]);
-	&mov	(&DWP(0,"ebp"),@r[0]);
-	&xor	($hi,@r[2]);
-	&mov	(&DWP(12,"ebp"),@r[3]);
-	&xor	($lo,@r[3]);
-	&stack_pop(4+1);
-	&xor	($hi,@r[3]);
-	&pop	("edi");
-	&xor	($lo,$hi);
-	&pop	("esi");
-	&mov	(&DWP(8,"ebp"),$hi);
-	&pop	("ebx");
-	&mov	(&DWP(4,"ebp"),$lo);
-	&pop	("ebp");
-	&ret	();
-&function_end_B("bn_GF2m_mul_2x2");
-
-&asciz	("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 593
drivers/builtin_openssl2/crypto/bn/asm/x86-mont.pl

@@ -1,593 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# October 2005
-#
-# This is a "teaser" code, as it can be improved in several ways...
-# First of all non-SSE2 path should be implemented (yes, for now it
-# performs Montgomery multiplication/convolution only on SSE2-capable
-# CPUs such as P4, others fall down to original code). Then inner loop
-# can be unrolled and modulo-scheduled to improve ILP and possibly
-# moved to 128-bit XMM register bank (though it would require input
-# rearrangement and/or increase bus bandwidth utilization). Dedicated
-# squaring procedure should give further performance improvement...
-# Yet, for being draft, the code improves rsa512 *sign* benchmark by
-# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-)
-
-# December 2006
-#
-# Modulo-scheduling SSE2 loops results in further 15-20% improvement.
-# Integer-only code [being equipped with dedicated squaring procedure]
-# gives ~40% on rsa512 sign benchmark...
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0);
-
-$sse2=0;
-for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
-
-&external_label("OPENSSL_ia32cap_P") if ($sse2);
-
-&function_begin("bn_mul_mont");
-
-$i="edx";
-$j="ecx";
-$ap="esi";	$tp="esi";		# overlapping variables!!!
-$rp="edi";	$bp="edi";		# overlapping variables!!!
-$np="ebp";
-$num="ebx";
-
-$_num=&DWP(4*0,"esp");			# stack top layout
-$_rp=&DWP(4*1,"esp");
-$_ap=&DWP(4*2,"esp");
-$_bp=&DWP(4*3,"esp");
-$_np=&DWP(4*4,"esp");
-$_n0=&DWP(4*5,"esp");	$_n0q=&QWP(4*5,"esp");
-$_sp=&DWP(4*6,"esp");
-$_bpend=&DWP(4*7,"esp");
-$frame=32;				# size of above frame rounded up to 16n
-
-	&xor	("eax","eax");
-	&mov	("edi",&wparam(5));	# int num
-	&cmp	("edi",4);
-	&jl	(&label("just_leave"));
-
-	&lea	("esi",&wparam(0));	# put aside pointer to argument block
-	&lea	("edx",&wparam(1));	# load ap
-	&mov	("ebp","esp");		# saved stack pointer!
-	&add	("edi",2);		# extra two words on top of tp
-	&neg	("edi");
-	&lea	("esp",&DWP(-$frame,"esp","edi",4));	# alloca($frame+4*(num+2))
-	&neg	("edi");
-
-	# minimize cache contention by arraning 2K window between stack
-	# pointer and ap argument [np is also position sensitive vector,
-	# but it's assumed to be near ap, as it's allocated at ~same
-	# time].
-	&mov	("eax","esp");
-	&sub	("eax","edx");
-	&and	("eax",2047);
-	&sub	("esp","eax");		# this aligns sp and ap modulo 2048
-
-	&xor	("edx","esp");
-	&and	("edx",2048);
-	&xor	("edx",2048);
-	&sub	("esp","edx");		# this splits them apart modulo 4096
-
-	&and	("esp",-64);		# align to cache line
-
-	################################# load argument block...
-	&mov	("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
-	&mov	("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
-	&mov	("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp
-	&mov	("edx",&DWP(3*4,"esi"));# const BN_ULONG *np
-	&mov	("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0
-	#&mov	("edi",&DWP(5*4,"esi"));# int num
-
-	&mov	("esi",&DWP(0,"esi"));	# pull n0[0]
-	&mov	($_rp,"eax");		# ... save a copy of argument block
-	&mov	($_ap,"ebx");
-	&mov	($_bp,"ecx");
-	&mov	($_np,"edx");
-	&mov	($_n0,"esi");
-	&lea	($num,&DWP(-3,"edi"));	# num=num-1 to assist modulo-scheduling
-	#&mov	($_num,$num);		# redundant as $num is not reused
-	&mov	($_sp,"ebp");		# saved stack pointer!
-
-if($sse2) {
-$acc0="mm0";	# mmx register bank layout
-$acc1="mm1";
-$car0="mm2";
-$car1="mm3";
-$mul0="mm4";
-$mul1="mm5";
-$temp="mm6";
-$mask="mm7";
-
-	&picmeup("eax","OPENSSL_ia32cap_P");
-	&bt	(&DWP(0,"eax"),26);
-	&jnc	(&label("non_sse2"));
-
-	&mov	("eax",-1);
-	&movd	($mask,"eax");		# mask 32 lower bits
-
-	&mov	($ap,$_ap);		# load input pointers
-	&mov	($bp,$_bp);
-	&mov	($np,$_np);
-
-	&xor	($i,$i);		# i=0
-	&xor	($j,$j);		# j=0
-
-	&movd	($mul0,&DWP(0,$bp));		# bp[0]
-	&movd	($mul1,&DWP(0,$ap));		# ap[0]
-	&movd	($car1,&DWP(0,$np));		# np[0]
-
-	&pmuludq($mul1,$mul0);			# ap[0]*bp[0]
-	&movq	($car0,$mul1);
-	&movq	($acc0,$mul1);			# I wish movd worked for
-	&pand	($acc0,$mask);			# inter-register transfers
-
-	&pmuludq($mul1,$_n0q);			# *=n0
-
-	&pmuludq($car1,$mul1);			# "t[0]"*np[0]*n0
-	&paddq	($car1,$acc0);
-
-	&movd	($acc1,&DWP(4,$np));		# np[1]
-	&movd	($acc0,&DWP(4,$ap));		# ap[1]
-
-	&psrlq	($car0,32);
-	&psrlq	($car1,32);
-
-	&inc	($j);				# j++
-&set_label("1st",16);
-	&pmuludq($acc0,$mul0);			# ap[j]*bp[0]
-	&pmuludq($acc1,$mul1);			# np[j]*m1
-	&paddq	($car0,$acc0);			# +=c0
-	&paddq	($car1,$acc1);			# +=c1
-
-	&movq	($acc0,$car0);
-	&pand	($acc0,$mask);
-	&movd	($acc1,&DWP(4,$np,$j,4));	# np[j+1]
-	&paddq	($car1,$acc0);			# +=ap[j]*bp[0];
-	&movd	($acc0,&DWP(4,$ap,$j,4));	# ap[j+1]
-	&psrlq	($car0,32);
-	&movd	(&DWP($frame-4,"esp",$j,4),$car1);	# tp[j-1]=
-	&psrlq	($car1,32);
-
-	&lea	($j,&DWP(1,$j));
-	&cmp	($j,$num);
-	&jl	(&label("1st"));
-
-	&pmuludq($acc0,$mul0);			# ap[num-1]*bp[0]
-	&pmuludq($acc1,$mul1);			# np[num-1]*m1
-	&paddq	($car0,$acc0);			# +=c0
-	&paddq	($car1,$acc1);			# +=c1
-
-	&movq	($acc0,$car0);
-	&pand	($acc0,$mask);
-	&paddq	($car1,$acc0);			# +=ap[num-1]*bp[0];
-	&movd	(&DWP($frame-4,"esp",$j,4),$car1);	# tp[num-2]=
-
-	&psrlq	($car0,32);
-	&psrlq	($car1,32);
-
-	&paddq	($car1,$car0);
-	&movq	(&QWP($frame,"esp",$num,4),$car1);	# tp[num].tp[num-1]
-
-	&inc	($i);				# i++
-&set_label("outer");
-	&xor	($j,$j);			# j=0
-
-	&movd	($mul0,&DWP(0,$bp,$i,4));	# bp[i]
-	&movd	($mul1,&DWP(0,$ap));		# ap[0]
-	&movd	($temp,&DWP($frame,"esp"));	# tp[0]
-	&movd	($car1,&DWP(0,$np));		# np[0]
-	&pmuludq($mul1,$mul0);			# ap[0]*bp[i]
-
-	&paddq	($mul1,$temp);			# +=tp[0]
-	&movq	($acc0,$mul1);
-	&movq	($car0,$mul1);
-	&pand	($acc0,$mask);
-
-	&pmuludq($mul1,$_n0q);			# *=n0
-
-	&pmuludq($car1,$mul1);
-	&paddq	($car1,$acc0);
-
-	&movd	($temp,&DWP($frame+4,"esp"));	# tp[1]
-	&movd	($acc1,&DWP(4,$np));		# np[1]
-	&movd	($acc0,&DWP(4,$ap));		# ap[1]
-
-	&psrlq	($car0,32);
-	&psrlq	($car1,32);
-	&paddq	($car0,$temp);			# +=tp[1]
-
-	&inc	($j);				# j++
-	&dec	($num);
-&set_label("inner");
-	&pmuludq($acc0,$mul0);			# ap[j]*bp[i]
-	&pmuludq($acc1,$mul1);			# np[j]*m1
-	&paddq	($car0,$acc0);			# +=c0
-	&paddq	($car1,$acc1);			# +=c1
-
-	&movq	($acc0,$car0);
-	&movd	($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1]
-	&pand	($acc0,$mask);
-	&movd	($acc1,&DWP(4,$np,$j,4));	# np[j+1]
-	&paddq	($car1,$acc0);			# +=ap[j]*bp[i]+tp[j]
-	&movd	($acc0,&DWP(4,$ap,$j,4));	# ap[j+1]
-	&psrlq	($car0,32);
-	&movd	(&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]=
-	&psrlq	($car1,32);
-	&paddq	($car0,$temp);			# +=tp[j+1]
-
-	&dec	($num);
-	&lea	($j,&DWP(1,$j));		# j++
-	&jnz	(&label("inner"));
-
-	&mov	($num,$j);
-	&pmuludq($acc0,$mul0);			# ap[num-1]*bp[i]
-	&pmuludq($acc1,$mul1);			# np[num-1]*m1
-	&paddq	($car0,$acc0);			# +=c0
-	&paddq	($car1,$acc1);			# +=c1
-
-	&movq	($acc0,$car0);
-	&pand	($acc0,$mask);
-	&paddq	($car1,$acc0);			# +=ap[num-1]*bp[i]+tp[num-1]
-	&movd	(&DWP($frame-4,"esp",$j,4),$car1);	# tp[num-2]=
-	&psrlq	($car0,32);
-	&psrlq	($car1,32);
-
-	&movd	($temp,&DWP($frame+4,"esp",$num,4));	# += tp[num]
-	&paddq	($car1,$car0);
-	&paddq	($car1,$temp);
-	&movq	(&QWP($frame,"esp",$num,4),$car1);	# tp[num].tp[num-1]
-
-	&lea	($i,&DWP(1,$i));		# i++
-	&cmp	($i,$num);
-	&jle	(&label("outer"));
-
-	&emms	();				# done with mmx bank
-	&jmp	(&label("common_tail"));
-
-&set_label("non_sse2",16);
-}
-
-if (0) {
-	&mov	("esp",$_sp);
-	&xor	("eax","eax");	# signal "not fast enough [yet]"
-	&jmp	(&label("just_leave"));
-	# While the below code provides competitive performance for
-	# all key lengthes on modern Intel cores, it's still more
-	# than 10% slower for 4096-bit key elsewhere:-( "Competitive"
-	# means compared to the original integer-only assembler.
-	# 512-bit RSA sign is better by ~40%, but that's about all
-	# one can say about all CPUs...
-} else {
-$inp="esi";	# integer path uses these registers differently
-$word="edi";
-$carry="ebp";
-
-	&mov	($inp,$_ap);
-	&lea	($carry,&DWP(1,$num));
-	&mov	($word,$_bp);
-	&xor	($j,$j);				# j=0
-	&mov	("edx",$inp);
-	&and	($carry,1);				# see if num is even
-	&sub	("edx",$word);				# see if ap==bp
-	&lea	("eax",&DWP(4,$word,$num,4));		# &bp[num]
-	&or	($carry,"edx");
-	&mov	($word,&DWP(0,$word));			# bp[0]
-	&jz	(&label("bn_sqr_mont"));
-	&mov	($_bpend,"eax");
-	&mov	("eax",&DWP(0,$inp));
-	&xor	("edx","edx");
-
-&set_label("mull",16);
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[j]*bp[0]
-	&add	($carry,"eax");
-	&lea	($j,&DWP(1,$j));
-	&adc	("edx",0);
-	&mov	("eax",&DWP(0,$inp,$j,4));		# ap[j+1]
-	&cmp	($j,$num);
-	&mov	(&DWP($frame-4,"esp",$j,4),$carry);	# tp[j]=
-	&jl	(&label("mull"));
-
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[num-1]*bp[0]
-	 &mov	($word,$_n0);
-	&add	("eax",$carry);
-	 &mov	($inp,$_np);
-	&adc	("edx",0);
-	 &imul	($word,&DWP($frame,"esp"));		# n0*tp[0]
-
-	&mov	(&DWP($frame,"esp",$num,4),"eax");	# tp[num-1]=
-	&xor	($j,$j);
-	&mov	(&DWP($frame+4,"esp",$num,4),"edx");	# tp[num]=
-	&mov	(&DWP($frame+8,"esp",$num,4),$j);	# tp[num+1]=
-
-	&mov	("eax",&DWP(0,$inp));			# np[0]
-	&mul	($word);				# np[0]*m
-	&add	("eax",&DWP($frame,"esp"));		# +=tp[0]
-	&mov	("eax",&DWP(4,$inp));			# np[1]
-	&adc	("edx",0);
-	&inc	($j);
-
-	&jmp	(&label("2ndmadd"));
-
-&set_label("1stmadd",16);
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[j]*bp[i]
-	&add	($carry,&DWP($frame,"esp",$j,4));	# +=tp[j]
-	&lea	($j,&DWP(1,$j));
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&mov	("eax",&DWP(0,$inp,$j,4));		# ap[j+1]
-	&adc	("edx",0);
-	&cmp	($j,$num);
-	&mov	(&DWP($frame-4,"esp",$j,4),$carry);	# tp[j]=
-	&jl	(&label("1stmadd"));
-
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[num-1]*bp[i]
-	&add	("eax",&DWP($frame,"esp",$num,4));	# +=tp[num-1]
-	 &mov	($word,$_n0);
-	&adc	("edx",0);
-	 &mov	($inp,$_np);
-	&add	($carry,"eax");
-	&adc	("edx",0);
-	 &imul	($word,&DWP($frame,"esp"));		# n0*tp[0]
-
-	&xor	($j,$j);
-	&add	("edx",&DWP($frame+4,"esp",$num,4));	# carry+=tp[num]
-	&mov	(&DWP($frame,"esp",$num,4),$carry);	# tp[num-1]=
-	&adc	($j,0);
-	 &mov	("eax",&DWP(0,$inp));			# np[0]
-	&mov	(&DWP($frame+4,"esp",$num,4),"edx");	# tp[num]=
-	&mov	(&DWP($frame+8,"esp",$num,4),$j);	# tp[num+1]=
-
-	&mul	($word);				# np[0]*m
-	&add	("eax",&DWP($frame,"esp"));		# +=tp[0]
-	&mov	("eax",&DWP(4,$inp));			# np[1]
-	&adc	("edx",0);
-	&mov	($j,1);
-
-&set_label("2ndmadd",16);
-	&mov	($carry,"edx");
-	&mul	($word);				# np[j]*m
-	&add	($carry,&DWP($frame,"esp",$j,4));	# +=tp[j]
-	&lea	($j,&DWP(1,$j));
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&mov	("eax",&DWP(0,$inp,$j,4));		# np[j+1]
-	&adc	("edx",0);
-	&cmp	($j,$num);
-	&mov	(&DWP($frame-8,"esp",$j,4),$carry);	# tp[j-1]=
-	&jl	(&label("2ndmadd"));
-
-	&mov	($carry,"edx");
-	&mul	($word);				# np[j]*m
-	&add	($carry,&DWP($frame,"esp",$num,4));	# +=tp[num-1]
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&adc	("edx",0);
-	&mov	(&DWP($frame-4,"esp",$num,4),$carry);	# tp[num-2]=
-
-	&xor	("eax","eax");
-	 &mov	($j,$_bp);				# &bp[i]
-	&add	("edx",&DWP($frame+4,"esp",$num,4));	# carry+=tp[num]
-	&adc	("eax",&DWP($frame+8,"esp",$num,4));	# +=tp[num+1]
-	 &lea	($j,&DWP(4,$j));
-	&mov	(&DWP($frame,"esp",$num,4),"edx");	# tp[num-1]=
-	 &cmp	($j,$_bpend);
-	&mov	(&DWP($frame+4,"esp",$num,4),"eax");	# tp[num]=
-	&je	(&label("common_tail"));
-
-	&mov	($word,&DWP(0,$j));			# bp[i+1]
-	&mov	($inp,$_ap);
-	&mov	($_bp,$j);				# &bp[++i]
-	&xor	($j,$j);
-	&xor	("edx","edx");
-	&mov	("eax",&DWP(0,$inp));
-	&jmp	(&label("1stmadd"));
-
-&set_label("bn_sqr_mont",16);
-$sbit=$num;
-	&mov	($_num,$num);
-	&mov	($_bp,$j);				# i=0
-
-	&mov	("eax",$word);				# ap[0]
-	&mul	($word);				# ap[0]*ap[0]
-	&mov	(&DWP($frame,"esp"),"eax");		# tp[0]=
-	&mov	($sbit,"edx");
-	&shr	("edx",1);
-	&and	($sbit,1);
-	&inc	($j);
-&set_label("sqr",16);
-	&mov	("eax",&DWP(0,$inp,$j,4));		# ap[j]
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[j]*ap[0]
-	&add	("eax",$carry);
-	&lea	($j,&DWP(1,$j));
-	&adc	("edx",0);
-	&lea	($carry,&DWP(0,$sbit,"eax",2));
-	&shr	("eax",31);
-	&cmp	($j,$_num);
-	&mov	($sbit,"eax");
-	&mov	(&DWP($frame-4,"esp",$j,4),$carry);	# tp[j]=
-	&jl	(&label("sqr"));
-
-	&mov	("eax",&DWP(0,$inp,$j,4));		# ap[num-1]
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[num-1]*ap[0]
-	&add	("eax",$carry);
-	 &mov	($word,$_n0);
-	&adc	("edx",0);
-	 &mov	($inp,$_np);
-	&lea	($carry,&DWP(0,$sbit,"eax",2));
-	 &imul	($word,&DWP($frame,"esp"));		# n0*tp[0]
-	&shr	("eax",31);
-	&mov	(&DWP($frame,"esp",$j,4),$carry);	# tp[num-1]=
-
-	&lea	($carry,&DWP(0,"eax","edx",2));
-	 &mov	("eax",&DWP(0,$inp));			# np[0]
-	&shr	("edx",31);
-	&mov	(&DWP($frame+4,"esp",$j,4),$carry);	# tp[num]=
-	&mov	(&DWP($frame+8,"esp",$j,4),"edx");	# tp[num+1]=
-
-	&mul	($word);				# np[0]*m
-	&add	("eax",&DWP($frame,"esp"));		# +=tp[0]
-	&mov	($num,$j);
-	&adc	("edx",0);
-	&mov	("eax",&DWP(4,$inp));			# np[1]
-	&mov	($j,1);
-
-&set_label("3rdmadd",16);
-	&mov	($carry,"edx");
-	&mul	($word);				# np[j]*m
-	&add	($carry,&DWP($frame,"esp",$j,4));	# +=tp[j]
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&mov	("eax",&DWP(4,$inp,$j,4));		# np[j+1]
-	&adc	("edx",0);
-	&mov	(&DWP($frame-4,"esp",$j,4),$carry);	# tp[j-1]=
-
-	&mov	($carry,"edx");
-	&mul	($word);				# np[j+1]*m
-	&add	($carry,&DWP($frame+4,"esp",$j,4));	# +=tp[j+1]
-	&lea	($j,&DWP(2,$j));
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&mov	("eax",&DWP(0,$inp,$j,4));		# np[j+2]
-	&adc	("edx",0);
-	&cmp	($j,$num);
-	&mov	(&DWP($frame-8,"esp",$j,4),$carry);	# tp[j]=
-	&jl	(&label("3rdmadd"));
-
-	&mov	($carry,"edx");
-	&mul	($word);				# np[j]*m
-	&add	($carry,&DWP($frame,"esp",$num,4));	# +=tp[num-1]
-	&adc	("edx",0);
-	&add	($carry,"eax");
-	&adc	("edx",0);
-	&mov	(&DWP($frame-4,"esp",$num,4),$carry);	# tp[num-2]=
-
-	&mov	($j,$_bp);				# i
-	&xor	("eax","eax");
-	&mov	($inp,$_ap);
-	&add	("edx",&DWP($frame+4,"esp",$num,4));	# carry+=tp[num]
-	&adc	("eax",&DWP($frame+8,"esp",$num,4));	# +=tp[num+1]
-	&mov	(&DWP($frame,"esp",$num,4),"edx");	# tp[num-1]=
-	&cmp	($j,$num);
-	&mov	(&DWP($frame+4,"esp",$num,4),"eax");	# tp[num]=
-	&je	(&label("common_tail"));
-
-	&mov	($word,&DWP(4,$inp,$j,4));		# ap[i]
-	&lea	($j,&DWP(1,$j));
-	&mov	("eax",$word);
-	&mov	($_bp,$j);				# ++i
-	&mul	($word);				# ap[i]*ap[i]
-	&add	("eax",&DWP($frame,"esp",$j,4));	# +=tp[i]
-	&adc	("edx",0);
-	&mov	(&DWP($frame,"esp",$j,4),"eax");	# tp[i]=
-	&xor	($carry,$carry);
-	&cmp	($j,$num);
-	&lea	($j,&DWP(1,$j));
-	&je	(&label("sqrlast"));
-
-	&mov	($sbit,"edx");				# zaps $num
-	&shr	("edx",1);
-	&and	($sbit,1);
-&set_label("sqradd",16);
-	&mov	("eax",&DWP(0,$inp,$j,4));		# ap[j]
-	&mov	($carry,"edx");
-	&mul	($word);				# ap[j]*ap[i]
-	&add	("eax",$carry);
-	&lea	($carry,&DWP(0,"eax","eax"));
-	&adc	("edx",0);
-	&shr	("eax",31);
-	&add	($carry,&DWP($frame,"esp",$j,4));	# +=tp[j]
-	&lea	($j,&DWP(1,$j));
-	&adc	("eax",0);
-	&add	($carry,$sbit);
-	&adc	("eax",0);
-	&cmp	($j,$_num);
-	&mov	(&DWP($frame-4,"esp",$j,4),$carry);	# tp[j]=
-	&mov	($sbit,"eax");
-	&jle	(&label("sqradd"));
-
-	&mov	($carry,"edx");
-	&add	("edx","edx");
-	&shr	($carry,31);
-	&add	("edx",$sbit);
-	&adc	($carry,0);
-&set_label("sqrlast");
-	&mov	($word,$_n0);
-	&mov	($inp,$_np);
-	&imul	($word,&DWP($frame,"esp"));		# n0*tp[0]
-
-	&add	("edx",&DWP($frame,"esp",$j,4));	# +=tp[num]
-	&mov	("eax",&DWP(0,$inp));			# np[0]
-	&adc	($carry,0);
-	&mov	(&DWP($frame,"esp",$j,4),"edx");	# tp[num]=
-	&mov	(&DWP($frame+4,"esp",$j,4),$carry);	# tp[num+1]=
-
-	&mul	($word);				# np[0]*m
-	&add	("eax",&DWP($frame,"esp"));		# +=tp[0]
-	&lea	($num,&DWP(-1,$j));
-	&adc	("edx",0);
-	&mov	($j,1);
-	&mov	("eax",&DWP(4,$inp));			# np[1]
-
-	&jmp	(&label("3rdmadd"));
-}
-
-&set_label("common_tail",16);
-	&mov	($np,$_np);			# load modulus pointer
-	&mov	($rp,$_rp);			# load result pointer
-	&lea	($tp,&DWP($frame,"esp"));	# [$ap and $bp are zapped]
-
-	&mov	("eax",&DWP(0,$tp));		# tp[0]
-	&mov	($j,$num);			# j=num-1
-	&xor	($i,$i);			# i=0 and clear CF!
-
-&set_label("sub",16);
-	&sbb	("eax",&DWP(0,$np,$i,4));
-	&mov	(&DWP(0,$rp,$i,4),"eax");	# rp[i]=tp[i]-np[i]
-	&dec	($j);				# doesn't affect CF!
-	&mov	("eax",&DWP(4,$tp,$i,4));	# tp[i+1]
-	&lea	($i,&DWP(1,$i));		# i++
-	&jge	(&label("sub"));
-
-	&sbb	("eax",0);			# handle upmost overflow bit
-	&and	($tp,"eax");
-	&not	("eax");
-	&mov	($np,$rp);
-	&and	($np,"eax");
-	&or	($tp,$np);			# tp=carry?tp:rp
-
-&set_label("copy",16);				# copy or in-place refresh
-	&mov	("eax",&DWP(0,$tp,$num,4));
-	&mov	(&DWP(0,$rp,$num,4),"eax");	# rp[i]=tp[i]
-	&mov	(&DWP($frame,"esp",$num,4),$j);	# zap temporary vector
-	&dec	($num);
-	&jge	(&label("copy"));
-
-	&mov	("esp",$_sp);		# pull saved stack pointer
-	&mov	("eax",1);
-&set_label("just_leave");
-&function_end("bn_mul_mont");
-
-&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 28
drivers/builtin_openssl2/crypto/bn/asm/x86.pl

@@ -1,28 +0,0 @@
-#!/usr/local/bin/perl
-
-push(@INC,"perlasm","../../perlasm");
-require "x86asm.pl";
-
-require("x86/mul_add.pl");
-require("x86/mul.pl");
-require("x86/sqr.pl");
-require("x86/div.pl");
-require("x86/add.pl");
-require("x86/sub.pl");
-require("x86/comba.pl");
-
-&asm_init($ARGV[0],$0);
-
-&bn_mul_add_words("bn_mul_add_words");
-&bn_mul_words("bn_mul_words");
-&bn_sqr_words("bn_sqr_words");
-&bn_div_words("bn_div_words");
-&bn_add_words("bn_add_words");
-&bn_sub_words("bn_sub_words");
-&bn_mul_comba("bn_mul_comba8",8);
-&bn_mul_comba("bn_mul_comba4",4);
-&bn_sqr_comba("bn_sqr_comba8",8);
-&bn_sqr_comba("bn_sqr_comba4",4);
-
-&asm_finish();
-

+ 0 - 76
drivers/builtin_openssl2/crypto/bn/asm/x86/add.pl

@@ -1,76 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_add_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$a="esi";
-	$b="edi";
-	$c="eax";
-	$r="ebx";
-	$tmp1="ecx";
-	$tmp2="edx";
-	$num="ebp";
-
-	&mov($r,&wparam(0));	# get r
-	 &mov($a,&wparam(1));	# get a
-	&mov($b,&wparam(2));	# get b
-	 &mov($num,&wparam(3));	# get num
-	&xor($c,$c);		# clear carry
-	 &and($num,0xfffffff8);	# num / 8
-
-	&jz(&label("aw_finish"));
-
-	&set_label("aw_loop",0);
-	for ($i=0; $i<8; $i++)
-		{
-		&comment("Round $i");
-
-		&mov($tmp1,&DWP($i*4,$a,"",0)); 	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-		&add($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &add($tmp1,$tmp2);
-		&adc($c,0);
-		 &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-		}
-
-	&comment("");
-	&add($a,32);
-	 &add($b,32);
-	&add($r,32);
-	 &sub($num,8);
-	&jnz(&label("aw_loop"));
-
-	&set_label("aw_finish",0);
-	&mov($num,&wparam(3));	# get num
-	&and($num,7);
-	 &jz(&label("aw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
-		&add($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &add($tmp1,$tmp2);
-		&adc($c,0);
-		 &dec($num) if ($i != 6);
-		&mov(&DWP($i*4,$r,"",0),$tmp1);	# *a
-		 &jz(&label("aw_end")) if ($i != 6);
-		}
-	&set_label("aw_end",0);
-
-#	&mov("eax",$c);		# $c is "eax"
-
-	&function_end($name);
-	}
-
-1;

+ 0 - 277
drivers/builtin_openssl2/crypto/bn/asm/x86/comba.pl

@@ -1,277 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub mul_add_c
-	{
-	local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("mul a[$ai]*b[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$b,"",0));
-
-	&mul("edx");
-	&add($c0,"eax");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# laod next a
-	 &mov("eax",&wparam(0)) if $pos > 0;			# load r[]
-	 ###
-	&adc($c1,"edx");
-	 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0;	# laod next b
-	 &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1;	# laod next b
-	 ###
-	&adc($c2,0);
-	 # is pos > 1, it means it is the last loop 
-	 &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0;		# save r[];
-	&mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;		# laod next a
-	}
-
-sub sqr_add_c
-	{
-	local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("sqr a[$ai]*a[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$b,"",0));
-
-	if ($ai == $bi)
-		{ &mul("eax");}
-	else
-		{ &mul("edx");}
-	&add($c0,"eax");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# load next a
-	 ###
-	&adc($c1,"edx");
-	 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
-	 ###
-	&adc($c2,0);
-	 # is pos > 1, it means it is the last loop 
-	 &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0;		# save r[];
-	&mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;		# load next b
-	}
-
-sub sqr_add_c2
-	{
-	local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
-
-	# pos == -1 if eax and edx are pre-loaded, 0 to load from next
-	# words, and 1 if load return value
-
-	&comment("sqr a[$ai]*a[$bi]");
-
-	# "eax" and "edx" will always be pre-loaded.
-	# &mov("eax",&DWP($ai*4,$a,"",0)) ;
-	# &mov("edx",&DWP($bi*4,$a,"",0));
-
-	if ($ai == $bi)
-		{ &mul("eax");}
-	else
-		{ &mul("edx");}
-	&add("eax","eax");
-	 ###
-	&adc("edx","edx");
-	 ###
-	&adc($c2,0);
-	 &add($c0,"eax");
-	&adc($c1,"edx");
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0;	# load next a
-	 &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1;	# load next b
-	&adc($c2,0);
-	&mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0;		# save r[];
-	 &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
-	 ###
-	}
-
-sub bn_mul_comba
-	{
-	local($name,$num)=@_;
-	local($a,$b,$c0,$c1,$c2);
-	local($i,$as,$ae,$bs,$be,$ai,$bi);
-	local($tot,$end);
-
-	&function_begin_B($name,"");
-
-	$c0="ebx";
-	$c1="ecx";
-	$c2="ebp";
-	$a="esi";
-	$b="edi";
-	
-	$as=0;
-	$ae=0;
-	$bs=0;
-	$be=0;
-	$tot=$num+$num-1;
-
-	&push("esi");
-	 &mov($a,&wparam(1));
-	&push("edi");
-	 &mov($b,&wparam(2));
-	&push("ebp");
-	 &push("ebx");
-
-	&xor($c0,$c0);
-	 &mov("eax",&DWP(0,$a,"",0));	# load the first word 
-	&xor($c1,$c1);
-	 &mov("edx",&DWP(0,$b,"",0));	# load the first second 
-
-	for ($i=0; $i<$tot; $i++)
-		{
-		$ai=$as;
-		$bi=$bs;
-		$end=$be+1;
-
-		&comment("################## Calculate word $i"); 
-
-		for ($j=$bs; $j<$end; $j++)
-			{
-			&xor($c2,$c2) if ($j == $bs);
-			if (($j+1) == $end)
-				{
-				$v=1;
-				$v=2 if (($i+1) == $tot);
-				}
-			else
-				{ $v=0; }
-			if (($j+1) != $end)
-				{
-				$na=($ai-1);
-				$nb=($bi+1);
-				}
-			else
-				{
-				$na=$as+($i < ($num-1));
-				$nb=$bs+($i >= ($num-1));
-				}
-#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
-			&mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
-			if ($v)
-				{
-				&comment("saved r[$i]");
-				# &mov("eax",&wparam(0));
-				# &mov(&DWP($i*4,"eax","",0),$c0);
-				($c0,$c1,$c2)=($c1,$c2,$c0);
-				}
-			$ai--;
-			$bi++;
-			}
-		$as++ if ($i < ($num-1));
-		$ae++ if ($i >= ($num-1));
-
-		$bs++ if ($i >= ($num-1));
-		$be++ if ($i < ($num-1));
-		}
-	&comment("save r[$i]");
-	# &mov("eax",&wparam(0));
-	&mov(&DWP($i*4,"eax","",0),$c0);
-
-	&pop("ebx");
-	&pop("ebp");
-	&pop("edi");
-	&pop("esi");
-	&ret();
-	&function_end_B($name);
-	}
-
-sub bn_sqr_comba
-	{
-	local($name,$num)=@_;
-	local($r,$a,$c0,$c1,$c2)=@_;
-	local($i,$as,$ae,$bs,$be,$ai,$bi);
-	local($b,$tot,$end,$half);
-
-	&function_begin_B($name,"");
-
-	$c0="ebx";
-	$c1="ecx";
-	$c2="ebp";
-	$a="esi";
-	$r="edi";
-
-	&push("esi");
-	 &push("edi");
-	&push("ebp");
-	 &push("ebx");
-	&mov($r,&wparam(0));
-	 &mov($a,&wparam(1));
-	&xor($c0,$c0);
-	 &xor($c1,$c1);
-	&mov("eax",&DWP(0,$a,"",0)); # load the first word
-
-	$as=0;
-	$ae=0;
-	$bs=0;
-	$be=0;
-	$tot=$num+$num-1;
-
-	for ($i=0; $i<$tot; $i++)
-		{
-		$ai=$as;
-		$bi=$bs;
-		$end=$be+1;
-
-		&comment("############### Calculate word $i");
-		for ($j=$bs; $j<$end; $j++)
-			{
-			&xor($c2,$c2) if ($j == $bs);
-			if (($ai-1) < ($bi+1))
-				{
-				$v=1;
-				$v=2 if ($i+1) == $tot;
-				}
-			else
-				{ $v=0; }
-			if (!$v)
-				{
-				$na=$ai-1;
-				$nb=$bi+1;
-				}
-			else
-				{
-				$na=$as+($i < ($num-1));
-				$nb=$bs+($i >= ($num-1));
-				}
-			if ($ai == $bi)
-				{
-				&sqr_add_c($r,$a,$ai,$bi,
-					$c0,$c1,$c2,$v,$i,$na,$nb);
-				}
-			else
-				{
-				&sqr_add_c2($r,$a,$ai,$bi,
-					$c0,$c1,$c2,$v,$i,$na,$nb);
-				}
-			if ($v)
-				{
-				&comment("saved r[$i]");
-				#&mov(&DWP($i*4,$r,"",0),$c0);
-				($c0,$c1,$c2)=($c1,$c2,$c0);
-				last;
-				}
-			$ai--;
-			$bi++;
-			}
-		$as++ if ($i < ($num-1));
-		$ae++ if ($i >= ($num-1));
-
-		$bs++ if ($i >= ($num-1));
-		$be++ if ($i < ($num-1));
-		}
-	&mov(&DWP($i*4,$r,"",0),$c0);
-	&pop("ebx");
-	&pop("ebp");
-	&pop("edi");
-	&pop("esi");
-	&ret();
-	&function_end_B($name);
-	}
-
-1;

+ 0 - 15
drivers/builtin_openssl2/crypto/bn/asm/x86/div.pl

@@ -1,15 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_div_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-	&mov("edx",&wparam(0));	#
-	&mov("eax",&wparam(1));	#
-	&mov("ebx",&wparam(2));	#
-	&div("ebx");
-	&function_end($name);
-	}
-1;

+ 0 - 77
drivers/builtin_openssl2/crypto/bn/asm/x86/mul.pl

@@ -1,77 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_mul_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$Low="eax";
-	$High="edx";
-	$a="ebx";
-	$w="ecx";
-	$r="edi";
-	$c="esi";
-	$num="ebp";
-
-	&xor($c,$c);		# clear carry
-	&mov($r,&wparam(0));	#
-	&mov($a,&wparam(1));	#
-	&mov($num,&wparam(2));	#
-	&mov($w,&wparam(3));	#
-
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("mw_finish"));
-
-	&set_label("mw_loop",0);
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-
-		 &mov("eax",&DWP($i,$a,"",0)); 	# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		 # XXX
-
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i,$r,"",0),"eax");	# *r= L(t);
-
-		&mov($c,"edx");			# c=  H(t);
-		}
-
-	&comment("");
-	&add($a,32);
-	&add($r,32);
-	&sub($num,8);
-	&jz(&label("mw_finish"));
-	&jmp(&label("mw_loop"));
-
-	&set_label("mw_finish",0);
-	&mov($num,&wparam(2));	# get num
-	&and($num,7);
-	&jnz(&label("mw_finish2"));
-	&jmp(&label("mw_end"));
-
-	&set_label("mw_finish2",1);
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		 &mov("eax",&DWP($i*4,$a,"",0));# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		 # XXX
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
-		&mov($c,"edx");			# c=  H(t);
-		 &dec($num) if ($i != 7-1);
-		&jz(&label("mw_end")) if ($i != 7-1);
-		}
-	&set_label("mw_end",0);
-	&mov("eax",$c);
-
-	&function_end($name);
-	}
-
-1;

+ 0 - 87
drivers/builtin_openssl2/crypto/bn/asm/x86/mul_add.pl

@@ -1,87 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_mul_add_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$Low="eax";
-	$High="edx";
-	$a="ebx";
-	$w="ebp";
-	$r="edi";
-	$c="esi";
-
-	&xor($c,$c);		# clear carry
-	&mov($r,&wparam(0));	#
-
-	&mov("ecx",&wparam(2));	#
-	&mov($a,&wparam(1));	#
-
-	&and("ecx",0xfffffff8);	# num / 8
-	&mov($w,&wparam(3));	#
-
-	&push("ecx");		# Up the stack for a tmp variable
-
-	&jz(&label("maw_finish"));
-
-	&set_label("maw_loop",0);
-
-	&mov(&swtmp(0),"ecx");	#
-
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-
-		 &mov("eax",&DWP($i,$a,"",0)); 	# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);		# L(t)+= *r
-		 &mov($c,&DWP($i,$r,"",0));	# L(t)+= *r
-		&adc("edx",0);			# H(t)+=carry
-		 &add("eax",$c);		# L(t)+=c
-		&adc("edx",0);			# H(t)+=carry
-		 &mov(&DWP($i,$r,"",0),"eax");	# *r= L(t);
-		&mov($c,"edx");			# c=  H(t);
-		}
-
-	&comment("");
-	&mov("ecx",&swtmp(0));	#
-	&add($a,32);
-	&add($r,32);
-	&sub("ecx",8);
-	&jnz(&label("maw_loop"));
-
-	&set_label("maw_finish",0);
-	&mov("ecx",&wparam(2));	# get num
-	&and("ecx",7);
-	&jnz(&label("maw_finish2"));	# helps branch prediction
-	&jmp(&label("maw_end"));
-
-	&set_label("maw_finish2",1);
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		 &mov("eax",&DWP($i*4,$a,"",0));# *a
-		&mul($w);			# *a * w
-		&add("eax",$c);			# L(t)+=c
-		 &mov($c,&DWP($i*4,$r,"",0));	# L(t)+= *r
-		&adc("edx",0);			# H(t)+=carry
-		 &add("eax",$c);
-		&adc("edx",0);			# H(t)+=carry
-		 &dec("ecx") if ($i != 7-1);
-		&mov(&DWP($i*4,$r,"",0),"eax");	# *r= L(t);
-		 &mov($c,"edx");			# c=  H(t);
-		&jz(&label("maw_end")) if ($i != 7-1);
-		}
-	&set_label("maw_end",0);
-	&mov("eax",$c);
-
-	&pop("ecx");	# clear variable from
-
-	&function_end($name);
-	}
-
-1;

+ 0 - 60
drivers/builtin_openssl2/crypto/bn/asm/x86/sqr.pl

@@ -1,60 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_sqr_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$r="esi";
-	$a="edi";
-	$num="ebx";
-
-	&mov($r,&wparam(0));	#
-	&mov($a,&wparam(1));	#
-	&mov($num,&wparam(2));	#
-
-	&and($num,0xfffffff8);	# num / 8
-	&jz(&label("sw_finish"));
-
-	&set_label("sw_loop",0);
-	for ($i=0; $i<32; $i+=4)
-		{
-		&comment("Round $i");
-		&mov("eax",&DWP($i,$a,"",0)); 	# *a
-		 # XXX
-		&mul("eax");			# *a * *a
-		&mov(&DWP($i*2,$r,"",0),"eax");	#
-		 &mov(&DWP($i*2+4,$r,"",0),"edx");#
-		}
-
-	&comment("");
-	&add($a,32);
-	&add($r,64);
-	&sub($num,8);
-	&jnz(&label("sw_loop"));
-
-	&set_label("sw_finish",0);
-	&mov($num,&wparam(2));	# get num
-	&and($num,7);
-	&jz(&label("sw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov("eax",&DWP($i*4,$a,"",0));	# *a
-		 # XXX
-		&mul("eax");			# *a * *a
-		&mov(&DWP($i*8,$r,"",0),"eax");	#
-		 &dec($num) if ($i != 7-1);
-		&mov(&DWP($i*8+4,$r,"",0),"edx");
-		 &jz(&label("sw_end")) if ($i != 7-1);
-		}
-	&set_label("sw_end",0);
-
-	&function_end($name);
-	}
-
-1;

+ 0 - 76
drivers/builtin_openssl2/crypto/bn/asm/x86/sub.pl

@@ -1,76 +0,0 @@
-#!/usr/local/bin/perl
-# x86 assember
-
-sub bn_sub_words
-	{
-	local($name)=@_;
-
-	&function_begin($name,"");
-
-	&comment("");
-	$a="esi";
-	$b="edi";
-	$c="eax";
-	$r="ebx";
-	$tmp1="ecx";
-	$tmp2="edx";
-	$num="ebp";
-
-	&mov($r,&wparam(0));	# get r
-	 &mov($a,&wparam(1));	# get a
-	&mov($b,&wparam(2));	# get b
-	 &mov($num,&wparam(3));	# get num
-	&xor($c,$c);		# clear carry
-	 &and($num,0xfffffff8);	# num / 8
-
-	&jz(&label("aw_finish"));
-
-	&set_label("aw_loop",0);
-	for ($i=0; $i<8; $i++)
-		{
-		&comment("Round $i");
-
-		&mov($tmp1,&DWP($i*4,$a,"",0)); 	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0)); 	# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		 &mov(&DWP($i*4,$r,"",0),$tmp1); 	# *r
-		}
-
-	&comment("");
-	&add($a,32);
-	 &add($b,32);
-	&add($r,32);
-	 &sub($num,8);
-	&jnz(&label("aw_loop"));
-
-	&set_label("aw_finish",0);
-	&mov($num,&wparam(3));	# get num
-	&and($num,7);
-	 &jz(&label("aw_end"));
-
-	for ($i=0; $i<7; $i++)
-		{
-		&comment("Tail Round $i");
-		&mov($tmp1,&DWP($i*4,$a,"",0));	# *a
-		 &mov($tmp2,&DWP($i*4,$b,"",0));# *b
-		&sub($tmp1,$c);
-		 &mov($c,0);
-		&adc($c,$c);
-		 &sub($tmp1,$tmp2);
-		&adc($c,0);
-		 &dec($num) if ($i != 6);
-		&mov(&DWP($i*4,$r,"",0),$tmp1);	# *a
-		 &jz(&label("aw_end")) if ($i != 6);
-		}
-	&set_label("aw_end",0);
-
-#	&mov("eax",$c);		# $c is "eax"
-
-	&function_end($name);
-	}
-
-1;

+ 56 - 54
drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c

@@ -55,7 +55,7 @@
  *    machine.
  */
 
-# ifdef _WIN64
+# if defined(_WIN64) || !defined(__LP64__)
 #  define BN_ULONG unsigned long long
 # else
 #  define BN_ULONG unsigned long
@@ -63,7 +63,6 @@
 
 # undef mul
 # undef mul_add
-# undef sqr
 
 /*-
  * "m"(a), "+m"(r)      is the way to favor DirectPath µ-code;
@@ -99,8 +98,8 @@
                 : "cc");                \
         (r)=carry, carry=high;          \
         } while (0)
-
-# define sqr(r0,r1,a)                    \
+# undef sqr
+# define sqr(r0,r1,a)                   \
         asm ("mulq %2"                  \
                 : "=a"(r0),"=d"(r1)     \
                 : "a"(a)                \
@@ -204,20 +203,22 @@ BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
 BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
                       int n)
 {
-    BN_ULONG ret = 0, i = 0;
+    BN_ULONG ret;
+    size_t i = 0;
 
     if (n <= 0)
         return 0;
 
-    asm volatile ("       subq    %2,%2           \n"
+    asm volatile ("       subq    %0,%0           \n" /* clear carry */
+                  "       jmp     1f              \n"
                   ".p2align 4                     \n"
                   "1:     movq    (%4,%2,8),%0    \n"
                   "       adcq    (%5,%2,8),%0    \n"
                   "       movq    %0,(%3,%2,8)    \n"
-                  "       leaq    1(%2),%2        \n"
+                  "       lea     1(%2),%2        \n"
                   "       loop    1b              \n"
-                  "       sbbq    %0,%0           \n":"=&a" (ret), "+c"(n),
-                  "=&r"(i)
+                  "       sbbq    %0,%0           \n":"=&r" (ret), "+c"(n),
+                  "+r"(i)
                   :"r"(rp), "r"(ap), "r"(bp)
                   :"cc", "memory");
 
@@ -228,20 +229,22 @@ BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
 BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
                       int n)
 {
-    BN_ULONG ret = 0, i = 0;
+    BN_ULONG ret;
+    size_t i = 0;
 
     if (n <= 0)
         return 0;
 
-    asm volatile ("       subq    %2,%2           \n"
+    asm volatile ("       subq    %0,%0           \n" /* clear borrow */
+                  "       jmp     1f              \n"
                   ".p2align 4                     \n"
                   "1:     movq    (%4,%2,8),%0    \n"
                   "       sbbq    (%5,%2,8),%0    \n"
                   "       movq    %0,(%3,%2,8)    \n"
-                  "       leaq    1(%2),%2        \n"
+                  "       lea     1(%2),%2        \n"
                   "       loop    1b              \n"
-                  "       sbbq    %0,%0           \n":"=&a" (ret), "+c"(n),
-                  "=&r"(i)
+                  "       sbbq    %0,%0           \n":"=&r" (ret), "+c"(n),
+                  "+r"(i)
                   :"r"(rp), "r"(ap), "r"(bp)
                   :"cc", "memory");
 
@@ -313,55 +316,58 @@ BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  */
 # if 0
 /* original macros are kept for reference purposes */
-#  define mul_add_c(a,b,c0,c1,c2) {       \
-        BN_ULONG ta=(a),tb=(b);         \
-        t1 = ta * tb;                   \
-        t2 = BN_UMULT_HIGH(ta,tb);      \
-        c0 += t1; t2 += (c0<t1)?1:0;    \
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        }
-
-#  define mul_add_c2(a,b,c0,c1,c2) {      \
-        BN_ULONG ta=(a),tb=(b),t0;      \
-        t1 = BN_UMULT_HIGH(ta,tb);      \
-        t0 = ta * tb;                   \
-        c0 += t0; t2 = t1+((c0<t0)?1:0);\
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        c0 += t0; t1 += (c0<t0)?1:0;    \
-        c1 += t1; c2 += (c1<t1)?1:0;    \
-        }
+#  define mul_add_c(a,b,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a), tb = (b);            \
+        BN_ULONG lo, hi;                        \
+        BN_UMULT_LOHI(lo,hi,ta,tb);             \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define mul_add_c2(a,b,c0,c1,c2)      do {    \
+        BN_ULONG ta = (a), tb = (b);            \
+        BN_ULONG lo, hi, tt;                    \
+        BN_UMULT_LOHI(lo,hi,ta,tb);             \
+        c0 += lo; tt = hi+((c0<lo)?1:0);        \
+        c1 += tt; c2 += (c1<tt)?1:0;            \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define sqr_add_c(a,i,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a)[i];                   \
+        BN_ULONG lo, hi;                        \
+        BN_UMULT_LOHI(lo,hi,ta,ta);             \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
 # else
-#  define mul_add_c(a,b,c0,c1,c2) do {    \
+#  define mul_add_c(a,b,c0,c1,c2) do {  \
+        BN_ULONG t1,t2;                 \
         asm ("mulq %3"                  \
                 : "=a"(t1),"=d"(t2)     \
                 : "a"(a),"m"(b)         \
                 : "cc");                \
-        asm ("addq %2,%0; adcq %3,%1"   \
-                : "+r"(c0),"+d"(t2)     \
-                : "a"(t1),"g"(0)        \
-                : "cc");                \
-        asm ("addq %2,%0; adcq %3,%1"   \
-                : "+r"(c1),"+r"(c2)     \
-                : "d"(t2),"g"(0)        \
-                : "cc");                \
+        asm ("addq %3,%0; adcq %4,%1; adcq %5,%2"       \
+                : "+r"(c0),"+r"(c1),"+r"(c2)            \
+                : "r"(t1),"r"(t2),"g"(0)                \
+                : "cc");                                \
         } while (0)
 
-#  define sqr_add_c(a,i,c0,c1,c2) do {    \
+#  define sqr_add_c(a,i,c0,c1,c2) do {  \
+        BN_ULONG t1,t2;                 \
         asm ("mulq %2"                  \
                 : "=a"(t1),"=d"(t2)     \
                 : "a"(a[i])             \
                 : "cc");                \
-        asm ("addq %2,%0; adcq %3,%1"   \
-                : "+r"(c0),"+d"(t2)     \
-                : "a"(t1),"g"(0)        \
-                : "cc");                \
-        asm ("addq %2,%0; adcq %3,%1"   \
-                : "+r"(c1),"+r"(c2)     \
-                : "d"(t2),"g"(0)        \
-                : "cc");                \
+        asm ("addq %3,%0; adcq %4,%1; adcq %5,%2"       \
+                : "+r"(c0),"+r"(c1),"+r"(c2)            \
+                : "r"(t1),"r"(t2),"g"(0)                \
+                : "cc");                                \
         } while (0)
 
-#  define mul_add_c2(a,b,c0,c1,c2) do {   \
+#  define mul_add_c2(a,b,c0,c1,c2) do { \
+        BN_ULONG t1,t2;                 \
         asm ("mulq %3"                  \
                 : "=a"(t1),"=d"(t2)     \
                 : "a"(a),"m"(b)         \
@@ -382,7 +388,6 @@ BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
 
 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 {
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -486,7 +491,6 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 
 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 {
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -526,7 +530,6 @@ void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 
 void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
 {
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -602,7 +605,6 @@ void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
 
 void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
 {
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;

+ 0 - 390
drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl

@@ -1,390 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# May 2011
-#
-# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
-# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
-# the time being... Except that it has two code paths: code suitable
-# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
-# later. Improvement varies from one benchmark and µ-arch to another.
-# Vanilla code path is at most 20% faster than compiler-generated code
-# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
-# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
-# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
-# all CPU time is burnt in it...
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-($lo,$hi)=("%rax","%rdx");	$a=$lo;
-($i0,$i1)=("%rsi","%rdi");
-($t0,$t1)=("%rbx","%rcx");
-($b,$mask)=("%rbp","%r8");
-($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
-($R,$Tx)=("%xmm0","%xmm1");
-
-$code.=<<___;
-.text
-
-.type	_mul_1x1,\@abi-omnipotent
-.align	16
-_mul_1x1:
-	sub	\$128+8,%rsp
-	mov	\$-1,$a1
-	lea	($a,$a),$i0
-	shr	\$3,$a1
-	lea	(,$a,4),$i1
-	and	$a,$a1			# a1=a&0x1fffffffffffffff
-	lea	(,$a,8),$a8
-	sar	\$63,$a			# broadcast 63rd bit
-	lea	($a1,$a1),$a2
-	sar	\$63,$i0		# broadcast 62nd bit
-	lea	(,$a1,4),$a4
-	and	$b,$a
-	sar	\$63,$i1		# boardcast 61st bit
-	mov	$a,$hi			# $a is $lo
-	shl	\$63,$lo
-	and	$b,$i0
-	shr	\$1,$hi
-	mov	$i0,$t1
-	shl	\$62,$i0
-	and	$b,$i1
-	shr	\$2,$t1
-	xor	$i0,$lo
-	mov	$i1,$t0
-	shl	\$61,$i1
-	xor	$t1,$hi
-	shr	\$3,$t0
-	xor	$i1,$lo
-	xor	$t0,$hi
-
-	mov	$a1,$a12
-	movq	\$0,0(%rsp)		# tab[0]=0
-	xor	$a2,$a12		# a1^a2
-	mov	$a1,8(%rsp)		# tab[1]=a1
-	 mov	$a4,$a48
-	mov	$a2,16(%rsp)		# tab[2]=a2
-	 xor	$a8,$a48		# a4^a8
-	mov	$a12,24(%rsp)		# tab[3]=a1^a2
-
-	xor	$a4,$a1
-	mov	$a4,32(%rsp)		# tab[4]=a4
-	xor	$a4,$a2
-	mov	$a1,40(%rsp)		# tab[5]=a1^a4
-	xor	$a4,$a12
-	mov	$a2,48(%rsp)		# tab[6]=a2^a4
-	 xor	$a48,$a1		# a1^a4^a4^a8=a1^a8
-	mov	$a12,56(%rsp)		# tab[7]=a1^a2^a4
-	 xor	$a48,$a2		# a2^a4^a4^a8=a1^a8
-
-	mov	$a8,64(%rsp)		# tab[8]=a8
-	xor	$a48,$a12		# a1^a2^a4^a4^a8=a1^a2^a8
-	mov	$a1,72(%rsp)		# tab[9]=a1^a8
-	 xor	$a4,$a1			# a1^a8^a4
-	mov	$a2,80(%rsp)		# tab[10]=a2^a8
-	 xor	$a4,$a2			# a2^a8^a4
-	mov	$a12,88(%rsp)		# tab[11]=a1^a2^a8
-
-	xor	$a4,$a12		# a1^a2^a8^a4
-	mov	$a48,96(%rsp)		# tab[12]=a4^a8
-	 mov	$mask,$i0
-	mov	$a1,104(%rsp)		# tab[13]=a1^a4^a8
-	 and	$b,$i0
-	mov	$a2,112(%rsp)		# tab[14]=a2^a4^a8
-	 shr	\$4,$b
-	mov	$a12,120(%rsp)		# tab[15]=a1^a2^a4^a8
-	 mov	$mask,$i1
-	 and	$b,$i1
-	 shr	\$4,$b
-
-	movq	(%rsp,$i0,8),$R		# half of calculations is done in SSE2
-	mov	$mask,$i0
-	and	$b,$i0
-	shr	\$4,$b
-___
-    for ($n=1;$n<8;$n++) {
-	$code.=<<___;
-	mov	(%rsp,$i1,8),$t1
-	mov	$mask,$i1
-	mov	$t1,$t0
-	shl	\$`8*$n-4`,$t1
-	and	$b,$i1
-	 movq	(%rsp,$i0,8),$Tx
-	shr	\$`64-(8*$n-4)`,$t0
-	xor	$t1,$lo
-	 pslldq	\$$n,$Tx
-	 mov	$mask,$i0
-	shr	\$4,$b
-	xor	$t0,$hi
-	 and	$b,$i0
-	 shr	\$4,$b
-	 pxor	$Tx,$R
-___
-    }
-$code.=<<___;
-	mov	(%rsp,$i1,8),$t1
-	mov	$t1,$t0
-	shl	\$`8*$n-4`,$t1
-	movq	$R,$i0
-	shr	\$`64-(8*$n-4)`,$t0
-	xor	$t1,$lo
-	psrldq	\$8,$R
-	xor	$t0,$hi
-	movq	$R,$i1
-	xor	$i0,$lo
-	xor	$i1,$hi
-
-	add	\$128+8,%rsp
-	ret
-.Lend_mul_1x1:
-.size	_mul_1x1,.-_mul_1x1
-___
-
-($rp,$a1,$a0,$b1,$b0) = $win64?	("%rcx","%rdx","%r8", "%r9","%r10") :	# Win64 order
-				("%rdi","%rsi","%rdx","%rcx","%r8");	# Unix order
-
-$code.=<<___;
-.extern	OPENSSL_ia32cap_P
-.globl	bn_GF2m_mul_2x2
-.type	bn_GF2m_mul_2x2,\@abi-omnipotent
-.align	16
-bn_GF2m_mul_2x2:
-	mov	OPENSSL_ia32cap_P(%rip),%rax
-	bt	\$33,%rax
-	jnc	.Lvanilla_mul_2x2
-
-	movq		$a1,%xmm0
-	movq		$b1,%xmm1
-	movq		$a0,%xmm2
-___
-$code.=<<___ if ($win64);
-	movq		40(%rsp),%xmm3
-___
-$code.=<<___ if (!$win64);
-	movq		$b0,%xmm3
-___
-$code.=<<___;
-	movdqa		%xmm0,%xmm4
-	movdqa		%xmm1,%xmm5
-	pclmulqdq	\$0,%xmm1,%xmm0	# a1·b1
-	pxor		%xmm2,%xmm4
-	pxor		%xmm3,%xmm5
-	pclmulqdq	\$0,%xmm3,%xmm2	# a0·b0
-	pclmulqdq	\$0,%xmm5,%xmm4	# (a0+a1)·(b0+b1)
-	xorps		%xmm0,%xmm4
-	xorps		%xmm2,%xmm4	# (a0+a1)·(b0+b1)-a0·b0-a1·b1
-	movdqa		%xmm4,%xmm5
-	pslldq		\$8,%xmm4
-	psrldq		\$8,%xmm5
-	pxor		%xmm4,%xmm2
-	pxor		%xmm5,%xmm0
-	movdqu		%xmm2,0($rp)
-	movdqu		%xmm0,16($rp)
-	ret
-
-.align	16
-.Lvanilla_mul_2x2:
-	lea	-8*17(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
-	mov	`8*17+40`(%rsp),$b0
-	mov	%rdi,8*15(%rsp)
-	mov	%rsi,8*16(%rsp)
-___
-$code.=<<___;
-	mov	%r14,8*10(%rsp)
-	mov	%r13,8*11(%rsp)
-	mov	%r12,8*12(%rsp)
-	mov	%rbp,8*13(%rsp)
-	mov	%rbx,8*14(%rsp)
-.Lbody_mul_2x2:
-	mov	$rp,32(%rsp)		# save the arguments
-	mov	$a1,40(%rsp)
-	mov	$a0,48(%rsp)
-	mov	$b1,56(%rsp)
-	mov	$b0,64(%rsp)
-
-	mov	\$0xf,$mask
-	mov	$a1,$a
-	mov	$b1,$b
-	call	_mul_1x1		# a1·b1
-	mov	$lo,16(%rsp)
-	mov	$hi,24(%rsp)
-
-	mov	48(%rsp),$a
-	mov	64(%rsp),$b
-	call	_mul_1x1		# a0·b0
-	mov	$lo,0(%rsp)
-	mov	$hi,8(%rsp)
-
-	mov	40(%rsp),$a
-	mov	56(%rsp),$b
-	xor	48(%rsp),$a
-	xor	64(%rsp),$b
-	call	_mul_1x1		# (a0+a1)·(b0+b1)
-___
-	@r=("%rbx","%rcx","%rdi","%rsi");
-$code.=<<___;
-	mov	0(%rsp),@r[0]
-	mov	8(%rsp),@r[1]
-	mov	16(%rsp),@r[2]
-	mov	24(%rsp),@r[3]
-	mov	32(%rsp),%rbp
-
-	xor	$hi,$lo
-	xor	@r[1],$hi
-	xor	@r[0],$lo
-	mov	@r[0],0(%rbp)
-	xor	@r[2],$hi
-	mov	@r[3],24(%rbp)
-	xor	@r[3],$lo
-	xor	@r[3],$hi
-	xor	$hi,$lo
-	mov	$hi,16(%rbp)
-	mov	$lo,8(%rbp)
-
-	mov	8*10(%rsp),%r14
-	mov	8*11(%rsp),%r13
-	mov	8*12(%rsp),%r12
-	mov	8*13(%rsp),%rbp
-	mov	8*14(%rsp),%rbx
-___
-$code.=<<___ if ($win64);
-	mov	8*15(%rsp),%rdi
-	mov	8*16(%rsp),%rsi
-___
-$code.=<<___;
-	lea	8*17(%rsp),%rsp
-	ret
-.Lend_mul_2x2:
-.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
-.asciz	"GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align	16
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#               CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-
-.type	se_handler,\@abi-omnipotent
-.align	16
-se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	152($context),%rax	# pull context->Rsp
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lbody_mul_2x2(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<"prologue" label
-	jb	.Lin_prologue
-
-	mov	8*10(%rax),%r14		# mimic epilogue
-	mov	8*11(%rax),%r13
-	mov	8*12(%rax),%r12
-	mov	8*13(%rax),%rbp
-	mov	8*14(%rax),%rbx
-	mov	8*15(%rax),%rdi
-	mov	8*16(%rax),%rsi
-
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-
-.Lin_prologue:
-	lea	8*17(%rax),%rax
-	mov	%rax,152($context)	# restore context->Rsp
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	se_handler,.-se_handler
-
-.section	.pdata
-.align	4
-	.rva	_mul_1x1
-	.rva	.Lend_mul_1x1
-	.rva	.LSEH_info_1x1
-
-	.rva	.Lvanilla_mul_2x2
-	.rva	.Lend_mul_2x2
-	.rva	.LSEH_info_2x2
-.section	.xdata
-.align	8
-.LSEH_info_1x1:
-	.byte	0x01,0x07,0x02,0x00
-	.byte	0x07,0x01,0x11,0x00	# sub rsp,128+8
-.LSEH_info_2x2:
-	.byte	9,0,0,0
-	.rva	se_handler
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-print $code;
-close STDOUT;

+ 0 - 1681
drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont.pl

@@ -1,1681 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# October 2005.
-#
-# Montgomery multiplication routine for x86_64. While it gives modest
-# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
-# than twice, >2x, as fast. Most common rsa1024 sign is improved by
-# respectful 50%. It remains to be seen if loop unrolling and
-# dedicated squaring routine can provide further improvement...
-
-# July 2011.
-#
-# Add dedicated squaring procedure. Performance improvement varies
-# from platform to platform, but in average it's ~5%/15%/25%/33%
-# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
-
-# August 2011.
-#
-# Unroll and modulo-schedule inner loops in such manner that they
-# are "fallen through" for input lengths of 8, which is critical for
-# 1024-bit RSA *sign*. Average performance improvement in comparison
-# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
-# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-# int bn_mul_mont(
-$rp="%rdi";	# BN_ULONG *rp,
-$ap="%rsi";	# const BN_ULONG *ap,
-$bp="%rdx";	# const BN_ULONG *bp,
-$np="%rcx";	# const BN_ULONG *np,
-$n0="%r8";	# const BN_ULONG *n0,
-$num="%r9";	# int num);
-$lo0="%r10";
-$hi0="%r11";
-$hi1="%r13";
-$i="%r14";
-$j="%r15";
-$m0="%rbx";
-$m1="%rbp";
-
-$code=<<___;
-.text
-
-.globl	bn_mul_mont
-.type	bn_mul_mont,\@function,6
-.align	16
-bn_mul_mont:
-	test	\$3,${num}d
-	jnz	.Lmul_enter
-	cmp	\$8,${num}d
-	jb	.Lmul_enter
-	cmp	$ap,$bp
-	jne	.Lmul4x_enter
-	jmp	.Lsqr4x_enter
-
-.align	16
-.Lmul_enter:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-	mov	${num}d,${num}d
-	lea	2($num),%r10
-	mov	%rsp,%r11
-	neg	%r10
-	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+2))
-	and	\$-1024,%rsp		# minimize TLB usage
-
-	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
-.Lmul_body:
-	mov	$bp,%r12		# reassign $bp
-___
-		$bp="%r12";
-$code.=<<___;
-	mov	($n0),$n0		# pull n0[0] value
-	mov	($bp),$m0		# m0=bp[0]
-	mov	($ap),%rax
-
-	xor	$i,$i			# i=0
-	xor	$j,$j			# j=0
-
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[0]
-	mov	%rax,$lo0
-	mov	($np),%rax
-
-	imulq	$lo0,$m1		# "tp[0]"*n0
-	mov	%rdx,$hi0
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$lo0		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$hi1
-
-	lea	1($j),$j		# j++
-	jmp	.L1st_enter
-
-.align	16
-.L1st:
-	add	%rax,$hi1
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
-	mov	$lo0,$hi0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-.L1st_enter:
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$hi0
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	lea	1($j),$j		# j++
-	mov	%rdx,$lo0
-
-	mulq	$m1			# np[j]*m1
-	cmp	$num,$j
-	jne	.L1st
-
-	add	%rax,$hi1
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-	mov	$lo0,$hi0
-
-	xor	%rdx,%rdx
-	add	$hi0,$hi1
-	adc	\$0,%rdx
-	mov	$hi1,-8(%rsp,$num,8)
-	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-	jmp	.Louter
-.align	16
-.Louter:
-	mov	($bp,$i,8),$m0		# m0=bp[i]
-	xor	$j,$j			# j=0
-	mov	$n0,$m1
-	mov	(%rsp),$lo0
-	mulq	$m0			# ap[0]*bp[i]
-	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
-	mov	($np),%rax
-	adc	\$0,%rdx
-
-	imulq	$lo0,$m1		# tp[0]*n0
-	mov	%rdx,$hi0
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$lo0		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	8(%rsp),$lo0		# tp[1]
-	mov	%rdx,$hi1
-
-	lea	1($j),$j		# j++
-	jmp	.Linner_enter
-
-.align	16
-.Linner:
-	add	%rax,$hi1
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	mov	(%rsp,$j,8),$lo0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-.Linner_enter:
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$hi0
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
-	mov	%rdx,$hi0
-	adc	\$0,$hi0
-	lea	1($j),$j		# j++
-
-	mulq	$m1			# np[j]*m1
-	cmp	$num,$j
-	jne	.Linner
-
-	add	%rax,$hi1
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	mov	(%rsp,$j,8),$lo0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-	xor	%rdx,%rdx
-	add	$hi0,$hi1
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# pull upmost overflow bit
-	adc	\$0,%rdx
-	mov	$hi1,-8(%rsp,$num,8)
-	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-	cmp	$num,$i
-	jl	.Louter
-
-	xor	$i,$i			# i=0 and clear CF!
-	mov	(%rsp),%rax		# tp[0]
-	lea	(%rsp),$ap		# borrow ap for tp
-	mov	$num,$j			# j=num
-	jmp	.Lsub
-.align	16
-.Lsub:	sbb	($np,$i,8),%rax
-	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
-	mov	8($ap,$i,8),%rax	# tp[i+1]
-	lea	1($i),$i		# i++
-	dec	$j			# doesnn't affect CF!
-	jnz	.Lsub
-
-	sbb	\$0,%rax		# handle upmost overflow bit
-	xor	$i,$i
-	and	%rax,$ap
-	not	%rax
-	mov	$rp,$np
-	and	%rax,$np
-	mov	$num,$j			# j=num
-	or	$np,$ap			# ap=borrow?tp:rp
-.align	16
-.Lcopy:					# copy or in-place refresh
-	mov	($ap,$i,8),%rax
-	mov	$i,(%rsp,$i,8)		# zap temporary vector
-	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]
-	lea	1($i),$i
-	sub	\$1,$j
-	jnz	.Lcopy
-
-	mov	8(%rsp,$num,8),%rsi	# restore %rsp
-	mov	\$1,%rax
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lmul_epilogue:
-	ret
-.size	bn_mul_mont,.-bn_mul_mont
-___
-{{{
-my @A=("%r10","%r11");
-my @N=("%r13","%rdi");
-$code.=<<___;
-.type	bn_mul4x_mont,\@function,6
-.align	16
-bn_mul4x_mont:
-.Lmul4x_enter:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-	mov	${num}d,${num}d
-	lea	4($num),%r10
-	mov	%rsp,%r11
-	neg	%r10
-	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+4))
-	and	\$-1024,%rsp		# minimize TLB usage
-
-	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
-.Lmul4x_body:
-	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
-	mov	%rdx,%r12		# reassign $bp
-___
-		$bp="%r12";
-$code.=<<___;
-	mov	($n0),$n0		# pull n0[0] value
-	mov	($bp),$m0		# m0=bp[0]
-	mov	($ap),%rax
-
-	xor	$i,$i			# i=0
-	xor	$j,$j			# j=0
-
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[0]
-	mov	%rax,$A[0]
-	mov	($np),%rax
-
-	imulq	$A[0],$m1		# "tp[0]"*n0
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$A[0]		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$N[1]
-
-	mulq	$m0
-	add	%rax,$A[1]
-	mov	8($np),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1
-	add	%rax,$N[1]
-	mov	16($ap),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	lea	4($j),$j		# j++
-	adc	\$0,%rdx
-	mov	$N[1],(%rsp)
-	mov	%rdx,$N[0]
-	jmp	.L1st4x
-.align	16
-.L1st4x:
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	8($np,$j,8),%rax
-	adc	\$0,%rdx
-	lea	4($j),$j		# j++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	-16($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-	cmp	$num,$j
-	jl	.L1st4x
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	xor	$N[1],$N[1]
-	add	$A[0],$N[0]
-	adc	\$0,$N[1]
-	mov	$N[0],-8(%rsp,$j,8)
-	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-.align	4
-.Louter4x:
-	mov	($bp,$i,8),$m0		# m0=bp[i]
-	xor	$j,$j			# j=0
-	mov	(%rsp),$A[0]
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[i]
-	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
-	mov	($np),%rax
-	adc	\$0,%rdx
-
-	imulq	$A[0],$m1		# tp[0]*n0
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$A[0]		# "$N[0]", discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	8($np),%rax
-	adc	\$0,%rdx
-	add	8(%rsp),$A[1]		# +tp[1]
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	16($ap),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	lea	4($j),$j		# j+=2
-	adc	\$0,%rdx
-	mov	$N[1],(%rsp)		# tp[j-1]
-	mov	%rdx,$N[0]
-	jmp	.Linner4x
-.align	16
-.Linner4x:
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	lea	4($j),$j		# j++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	-16($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-	cmp	$num,$j
-	jl	.Linner4x
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	lea	1($i),$i		# i++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	xor	$N[1],$N[1]
-	add	$A[0],$N[0]
-	adc	\$0,$N[1]
-	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
-	adc	\$0,$N[1]
-	mov	$N[0],-8(%rsp,$j,8)
-	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
-
-	cmp	$num,$i
-	jl	.Louter4x
-___
-{
-my @ri=("%rax","%rdx",$m0,$m1);
-$code.=<<___;
-	mov	16(%rsp,$num,8),$rp	# restore $rp
-	mov	0(%rsp),@ri[0]		# tp[0]
-	pxor	%xmm0,%xmm0
-	mov	8(%rsp),@ri[1]		# tp[1]
-	shr	\$2,$num		# num/=4
-	lea	(%rsp),$ap		# borrow ap for tp
-	xor	$i,$i			# i=0 and clear CF!
-
-	sub	0($np),@ri[0]
-	mov	16($ap),@ri[2]		# tp[2]
-	mov	24($ap),@ri[3]		# tp[3]
-	sbb	8($np),@ri[1]
-	lea	-1($num),$j		# j=num/4-1
-	jmp	.Lsub4x
-.align	16
-.Lsub4x:
-	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	16($np,$i,8),@ri[2]
-	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
-	mov	40($ap,$i,8),@ri[1]
-	sbb	24($np,$i,8),@ri[3]
-	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	32($np,$i,8),@ri[0]
-	mov	48($ap,$i,8),@ri[2]
-	mov	56($ap,$i,8),@ri[3]
-	sbb	40($np,$i,8),@ri[1]
-	lea	4($i),$i		# i++
-	dec	$j			# doesnn't affect CF!
-	jnz	.Lsub4x
-
-	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	32($ap,$i,8),@ri[0]	# load overflow bit
-	sbb	16($np,$i,8),@ri[2]
-	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	24($np,$i,8),@ri[3]
-	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
-
-	sbb	\$0,@ri[0]		# handle upmost overflow bit
-	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	xor	$i,$i			# i=0
-	and	@ri[0],$ap
-	not	@ri[0]
-	mov	$rp,$np
-	and	@ri[0],$np
-	lea	-1($num),$j
-	or	$np,$ap			# ap=borrow?tp:rp
-
-	movdqu	($ap),%xmm1
-	movdqa	%xmm0,(%rsp)
-	movdqu	%xmm1,($rp)
-	jmp	.Lcopy4x
-.align	16
-.Lcopy4x:					# copy or in-place refresh
-	movdqu	16($ap,$i),%xmm2
-	movdqu	32($ap,$i),%xmm1
-	movdqa	%xmm0,16(%rsp,$i)
-	movdqu	%xmm2,16($rp,$i)
-	movdqa	%xmm0,32(%rsp,$i)
-	movdqu	%xmm1,32($rp,$i)
-	lea	32($i),$i
-	dec	$j
-	jnz	.Lcopy4x
-
-	shl	\$2,$num
-	movdqu	16($ap,$i),%xmm2
-	movdqa	%xmm0,16(%rsp,$i)
-	movdqu	%xmm2,16($rp,$i)
-___
-}
-$code.=<<___;
-	mov	8(%rsp,$num,8),%rsi	# restore %rsp
-	mov	\$1,%rax
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lmul4x_epilogue:
-	ret
-.size	bn_mul4x_mont,.-bn_mul4x_mont
-___
-}}}
-{{{
-######################################################################
-# void bn_sqr4x_mont(
-my $rptr="%rdi";	# const BN_ULONG *rptr,
-my $aptr="%rsi";	# const BN_ULONG *aptr,
-my $bptr="%rdx";	# not used
-my $nptr="%rcx";	# const BN_ULONG *nptr,
-my $n0  ="%r8";		# const BN_ULONG *n0);
-my $num ="%r9";		# int num, has to be divisible by 4 and
-			# not less than 8
-
-my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
-my @A0=("%r10","%r11");
-my @A1=("%r12","%r13");
-my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
-
-$code.=<<___;
-.type	bn_sqr4x_mont,\@function,6
-.align	16
-bn_sqr4x_mont:
-.Lsqr4x_enter:
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-	shl	\$3,${num}d		# convert $num to bytes
-	xor	%r10,%r10
-	mov	%rsp,%r11		# put aside %rsp
-	sub	$num,%r10		# -$num
-	mov	($n0),$n0		# *n0
-	lea	-72(%rsp,%r10,2),%rsp	# alloca(frame+2*$num)
-	and	\$-1024,%rsp		# minimize TLB usage
-	##############################################################
-	# Stack layout
-	#
-	# +0	saved $num, used in reduction section
-	# +8	&t[2*$num], used in reduction section
-	# +32	saved $rptr
-	# +40	saved $nptr
-	# +48	saved *n0
-	# +56	saved %rsp
-	# +64	t[2*$num]
-	#
-	mov	$rptr,32(%rsp)		# save $rptr
-	mov	$nptr,40(%rsp)
-	mov	$n0,  48(%rsp)
-	mov	%r11, 56(%rsp)		# save original %rsp
-.Lsqr4x_body:
-	##############################################################
-	# Squaring part:
-	#
-	# a) multiply-n-add everything but a[i]*a[i];
-	# b) shift result of a) by 1 to the left and accumulate
-	#    a[i]*a[i] products;
-	#
-	lea	32(%r10),$i		# $i=-($num-32)
-	lea	($aptr,$num),$aptr	# end of a[] buffer, ($aptr,$i)=&ap[2]
-
-	mov	$num,$j			# $j=$num
-
-					# comments apply to $num==8 case
-	mov	-32($aptr,$i),$a0	# a[0]
-	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
-	mov	-24($aptr,$i),%rax	# a[1]
-	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
-	mov	-16($aptr,$i),$ai	# a[2]
-	mov	%rax,$a1
-
-	mul	$a0			# a[1]*a[0]
-	mov	%rax,$A0[0]		# a[1]*a[0]
-	 mov	$ai,%rax		# a[2]
-	mov	%rdx,$A0[1]
-	mov	$A0[0],-24($tptr,$i)	# t[1]
-
-	xor	$A0[0],$A0[0]
-	mul	$a0			# a[2]*a[0]
-	add	%rax,$A0[1]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[0]
-	mov	$A0[1],-16($tptr,$i)	# t[2]
-
-	lea	-16($i),$j		# j=-16
-
-
-	 mov	8($aptr,$j),$ai		# a[3]
-	mul	$a1			# a[2]*a[1]
-	mov	%rax,$A1[0]		# a[2]*a[1]+t[3]
-	 mov	$ai,%rax
-	mov	%rdx,$A1[1]
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	 lea	16($j),$j
-	adc	\$0,$A0[1]
-	mul	$a0			# a[3]*a[0]
-	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-8($tptr,$j)	# t[3]
-	jmp	.Lsqr4x_1st
-
-.align	16
-.Lsqr4x_1st:
-	 mov	($aptr,$j),$ai		# a[4]
-	xor	$A1[0],$A1[0]
-	mul	$a1			# a[3]*a[1]
-	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[0]
-
-	xor	$A0[0],$A0[0]
-	add	$A1[1],$A0[1]
-	adc	\$0,$A0[0]
-	mul	$a0			# a[4]*a[0]
-	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
-	 mov	$ai,%rax		# a[3]
-	adc	%rdx,$A0[0]
-	mov	$A0[1],($tptr,$j)	# t[4]
-
-
-	 mov	8($aptr,$j),$ai		# a[5]
-	xor	$A1[1],$A1[1]
-	mul	$a1			# a[4]*a[3]
-	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[1]
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	adc	\$0,$A0[1]
-	mul	$a0			# a[5]*a[2]
-	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],8($tptr,$j)	# t[5]
-
-	 mov	16($aptr,$j),$ai	# a[6]
-	xor	$A1[0],$A1[0]
-	mul	$a1			# a[5]*a[3]
-	add	%rax,$A1[1]		# a[5]*a[3]+t[6]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[0]
-
-	xor	$A0[0],$A0[0]
-	add	$A1[1],$A0[1]
-	adc	\$0,$A0[0]
-	mul	$a0			# a[6]*a[2]
-	add	%rax,$A0[1]		# a[6]*a[2]+a[5]*a[3]+t[6]
-	 mov	$ai,%rax		# a[3]
-	adc	%rdx,$A0[0]
-	mov	$A0[1],16($tptr,$j)	# t[6]
-
-
-	 mov	24($aptr,$j),$ai	# a[7]
-	xor	$A1[1],$A1[1]
-	mul	$a1			# a[6]*a[5]
-	add	%rax,$A1[0]		# a[6]*a[5]+t[7]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[1]
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	 lea	32($j),$j
-	adc	\$0,$A0[1]
-	mul	$a0			# a[7]*a[4]
-	add	%rax,$A0[0]		# a[7]*a[4]+a[6]*a[5]+t[6]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-8($tptr,$j)	# t[7]
-
-	cmp	\$0,$j
-	jne	.Lsqr4x_1st
-
-	xor	$A1[0],$A1[0]
-	add	$A0[1],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$a1			# a[7]*a[5]
-	add	%rax,$A1[1]
-	adc	%rdx,$A1[0]
-
-	mov	$A1[1],($tptr)		# t[8]
-	lea	16($i),$i
-	mov	$A1[0],8($tptr)		# t[9]
-	jmp	.Lsqr4x_outer
-
-.align	16
-.Lsqr4x_outer:				# comments apply to $num==6 case
-	mov	-32($aptr,$i),$a0	# a[0]
-	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
-	mov	-24($aptr,$i),%rax	# a[1]
-	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
-	mov	-16($aptr,$i),$ai	# a[2]
-	mov	%rax,$a1
-
-	mov	-24($tptr,$i),$A0[0]	# t[1]
-	xor	$A0[1],$A0[1]
-	mul	$a0			# a[1]*a[0]
-	add	%rax,$A0[0]		# a[1]*a[0]+t[1]
-	 mov	$ai,%rax		# a[2]
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-24($tptr,$i)	# t[1]
-
-	xor	$A0[0],$A0[0]
-	add	-16($tptr,$i),$A0[1]	# a[2]*a[0]+t[2]
-	adc	\$0,$A0[0]
-	mul	$a0			# a[2]*a[0]
-	add	%rax,$A0[1]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[0]
-	mov	$A0[1],-16($tptr,$i)	# t[2]
-
-	lea	-16($i),$j		# j=-16
-	xor	$A1[0],$A1[0]
-
-
-	 mov	8($aptr,$j),$ai		# a[3]
-	xor	$A1[1],$A1[1]
-	add	8($tptr,$j),$A1[0]
-	adc	\$0,$A1[1]
-	mul	$a1			# a[2]*a[1]
-	add	%rax,$A1[0]		# a[2]*a[1]+t[3]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[1]
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	adc	\$0,$A0[1]
-	mul	$a0			# a[3]*a[0]
-	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],8($tptr,$j)	# t[3]
-
-	lea	16($j),$j
-	jmp	.Lsqr4x_inner
-
-.align	16
-.Lsqr4x_inner:
-	 mov	($aptr,$j),$ai		# a[4]
-	xor	$A1[0],$A1[0]
-	add	($tptr,$j),$A1[1]
-	adc	\$0,$A1[0]
-	mul	$a1			# a[3]*a[1]
-	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[0]
-
-	xor	$A0[0],$A0[0]
-	add	$A1[1],$A0[1]
-	adc	\$0,$A0[0]
-	mul	$a0			# a[4]*a[0]
-	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
-	 mov	$ai,%rax		# a[3]
-	adc	%rdx,$A0[0]
-	mov	$A0[1],($tptr,$j)	# t[4]
-
-	 mov	8($aptr,$j),$ai		# a[5]
-	xor	$A1[1],$A1[1]
-	add	8($tptr,$j),$A1[0]
-	adc	\$0,$A1[1]
-	mul	$a1			# a[4]*a[3]
-	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
-	 mov	$ai,%rax
-	adc	%rdx,$A1[1]
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	lea	16($j),$j		# j++
-	adc	\$0,$A0[1]
-	mul	$a0			# a[5]*a[2]
-	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-8($tptr,$j)	# t[5], "preloaded t[1]" below
-
-	cmp	\$0,$j
-	jne	.Lsqr4x_inner
-
-	xor	$A1[0],$A1[0]
-	add	$A0[1],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$a1			# a[5]*a[3]
-	add	%rax,$A1[1]
-	adc	%rdx,$A1[0]
-
-	mov	$A1[1],($tptr)		# t[6], "preloaded t[2]" below
-	mov	$A1[0],8($tptr)		# t[7], "preloaded t[3]" below
-
-	add	\$16,$i
-	jnz	.Lsqr4x_outer
-
-					# comments apply to $num==4 case
-	mov	-32($aptr),$a0		# a[0]
-	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
-	mov	-24($aptr),%rax		# a[1]
-	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
-	mov	-16($aptr),$ai		# a[2]
-	mov	%rax,$a1
-
-	xor	$A0[1],$A0[1]
-	mul	$a0			# a[1]*a[0]
-	add	%rax,$A0[0]		# a[1]*a[0]+t[1], preloaded t[1]
-	 mov	$ai,%rax		# a[2]
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-24($tptr)	# t[1]
-
-	xor	$A0[0],$A0[0]
-	add	$A1[1],$A0[1]		# a[2]*a[0]+t[2], preloaded t[2]
-	adc	\$0,$A0[0]
-	mul	$a0			# a[2]*a[0]
-	add	%rax,$A0[1]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[0]
-	mov	$A0[1],-16($tptr)	# t[2]
-
-	 mov	-8($aptr),$ai		# a[3]
-	mul	$a1			# a[2]*a[1]
-	add	%rax,$A1[0]		# a[2]*a[1]+t[3], preloaded t[3]
-	 mov	$ai,%rax
-	adc	\$0,%rdx
-
-	xor	$A0[1],$A0[1]
-	add	$A1[0],$A0[0]
-	 mov	%rdx,$A1[1]
-	adc	\$0,$A0[1]
-	mul	$a0			# a[3]*a[0]
-	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
-	 mov	$ai,%rax
-	adc	%rdx,$A0[1]
-	mov	$A0[0],-8($tptr)	# t[3]
-
-	xor	$A1[0],$A1[0]
-	add	$A0[1],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$a1			# a[3]*a[1]
-	add	%rax,$A1[1]
-	 mov	-16($aptr),%rax		# a[2]
-	adc	%rdx,$A1[0]
-
-	mov	$A1[1],($tptr)		# t[4]
-	mov	$A1[0],8($tptr)		# t[5]
-
-	mul	$ai			# a[2]*a[3]
-___
-{
-my ($shift,$carry)=($a0,$a1);
-my @S=(@A1,$ai,$n0);
-$code.=<<___;
-	 add	\$16,$i
-	 xor	$shift,$shift
-	 sub	$num,$i			# $i=16-$num
-	 xor	$carry,$carry
-
-	add	$A1[0],%rax		# t[5]
-	adc	\$0,%rdx
-	mov	%rax,8($tptr)		# t[5]
-	mov	%rdx,16($tptr)		# t[6]
-	mov	$carry,24($tptr)	# t[7]
-
-	 mov	-16($aptr,$i),%rax	# a[0]
-	lea	64(%rsp,$num,2),$tptr
-	 xor	$A0[0],$A0[0]		# t[0]
-	 mov	-24($tptr,$i,2),$A0[1]	# t[1]
-
-	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[1]		# | t[2*i]>>63
-	 mov	-16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	-8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[0]
-	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[0],-32($tptr,$i,2)
-	adc	%rdx,$S[1]
-
-	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
-	 mov	$S[1],-24($tptr,$i,2)
-	 sbb	$carry,$carry		# mov cf,$carry
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[3]		# | t[2*i]>>63
-	 mov	0($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[2]
-	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[2],-16($tptr,$i,2)
-	adc	%rdx,$S[3]
-	lea	16($i),$i
-	mov	$S[3],-40($tptr,$i,2)
-	sbb	$carry,$carry		# mov cf,$carry
-	jmp	.Lsqr4x_shift_n_add
-
-.align	16
-.Lsqr4x_shift_n_add:
-	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[1]		# | t[2*i]>>63
-	 mov	-16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	-8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[0]
-	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[0],-32($tptr,$i,2)
-	adc	%rdx,$S[1]
-
-	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
-	 mov	$S[1],-24($tptr,$i,2)
-	 sbb	$carry,$carry		# mov cf,$carry
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[3]		# | t[2*i]>>63
-	 mov	0($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[2]
-	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[2],-16($tptr,$i,2)
-	adc	%rdx,$S[3]
-
-	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
-	 mov	$S[3],-8($tptr,$i,2)
-	 sbb	$carry,$carry		# mov cf,$carry
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[1]		# | t[2*i]>>63
-	 mov	16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	24($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[0]
-	 mov	8($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[0],0($tptr,$i,2)
-	adc	%rdx,$S[1]
-
-	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
-	 mov	$S[1],8($tptr,$i,2)
-	 sbb	$carry,$carry		# mov cf,$carry
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[3]		# | t[2*i]>>63
-	 mov	32($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	40($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[2]
-	 mov	16($aptr,$i),%rax	# a[i+1]	# prefetch
-	mov	$S[2],16($tptr,$i,2)
-	adc	%rdx,$S[3]
-	mov	$S[3],24($tptr,$i,2)
-	sbb	$carry,$carry		# mov cf,$carry
-	add	\$32,$i
-	jnz	.Lsqr4x_shift_n_add
-
-	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[1]		# | t[2*i]>>63
-	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
-	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
-	adc	%rax,$S[0]
-	 mov	-8($aptr),%rax		# a[i+1]	# prefetch
-	mov	$S[0],-32($tptr)
-	adc	%rdx,$S[1]
-
-	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1|shift
-	 mov	$S[1],-24($tptr)
-	 sbb	$carry,$carry		# mov cf,$carry
-	shr	\$63,$A0[0]
-	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
-	shr	\$63,$A0[1]
-	or	$A0[0],$S[3]		# | t[2*i]>>63
-	mul	%rax			# a[i]*a[i]
-	neg	$carry			# mov $carry,cf
-	adc	%rax,$S[2]
-	adc	%rdx,$S[3]
-	mov	$S[2],-16($tptr)
-	mov	$S[3],-8($tptr)
-___
-}
-##############################################################
-# Montgomery reduction part, "word-by-word" algorithm.
-#
-{
-my ($topbit,$nptr)=("%rbp",$aptr);
-my ($m0,$m1)=($a0,$a1);
-my @Ni=("%rbx","%r9");
-$code.=<<___;
-	mov	40(%rsp),$nptr		# restore $nptr
-	mov	48(%rsp),$n0		# restore *n0
-	xor	$j,$j
-	mov	$num,0(%rsp)		# save $num
-	sub	$num,$j			# $j=-$num
-	 mov	64(%rsp),$A0[0]		# t[0]		# modsched #
-	 mov	$n0,$m0			#		# modsched #
-	lea	64(%rsp,$num,2),%rax	# end of t[] buffer
-	lea	64(%rsp,$num),$tptr	# end of t[] window
-	mov	%rax,8(%rsp)		# save end of t[] buffer
-	lea	($nptr,$num),$nptr	# end of n[] buffer
-	xor	$topbit,$topbit		# $topbit=0
-
-	mov	0($nptr,$j),%rax	# n[0]		# modsched #
-	mov	8($nptr,$j),$Ni[1]	# n[1]		# modsched #
-	 imulq	$A0[0],$m0		# m0=t[0]*n0	# modsched #
-	 mov	%rax,$Ni[0]		#		# modsched #
-	jmp	.Lsqr4x_mont_outer
-
-.align	16
-.Lsqr4x_mont_outer:
-	xor	$A0[1],$A0[1]
-	mul	$m0			# n[0]*m0
-	add	%rax,$A0[0]		# n[0]*m0+t[0]
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A0[1]
-	mov	$n0,$m1
-
-	xor	$A0[0],$A0[0]
-	add	8($tptr,$j),$A0[1]
-	adc	\$0,$A0[0]
-	mul	$m0			# n[1]*m0
-	add	%rax,$A0[1]		# n[1]*m0+t[1]
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A0[0]
-
-	imulq	$A0[1],$m1
-
-	mov	16($nptr,$j),$Ni[0]	# n[2]
-	xor	$A1[1],$A1[1]
-	add	$A0[1],$A1[0]
-	adc	\$0,$A1[1]
-	mul	$m1			# n[0]*m1
-	add	%rax,$A1[0]		# n[0]*m1+"t[1]"
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A1[1]
-	mov	$A1[0],8($tptr,$j)	# "t[1]"
-
-	xor	$A0[1],$A0[1]
-	add	16($tptr,$j),$A0[0]
-	adc	\$0,$A0[1]
-	mul	$m0			# n[2]*m0
-	add	%rax,$A0[0]		# n[2]*m0+t[2]
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A0[1]
-
-	mov	24($nptr,$j),$Ni[1]	# n[3]
-	xor	$A1[0],$A1[0]
-	add	$A0[0],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$m1			# n[1]*m1
-	add	%rax,$A1[1]		# n[1]*m1+"t[2]"
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A1[0]
-	mov	$A1[1],16($tptr,$j)	# "t[2]"
-
-	xor	$A0[0],$A0[0]
-	add	24($tptr,$j),$A0[1]
-	lea	32($j),$j
-	adc	\$0,$A0[0]
-	mul	$m0			# n[3]*m0
-	add	%rax,$A0[1]		# n[3]*m0+t[3]
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A0[0]
-	jmp	.Lsqr4x_mont_inner
-
-.align	16
-.Lsqr4x_mont_inner:
-	mov	($nptr,$j),$Ni[0]	# n[4]
-	xor	$A1[1],$A1[1]
-	add	$A0[1],$A1[0]
-	adc	\$0,$A1[1]
-	mul	$m1			# n[2]*m1
-	add	%rax,$A1[0]		# n[2]*m1+"t[3]"
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A1[1]
-	mov	$A1[0],-8($tptr,$j)	# "t[3]"
-
-	xor	$A0[1],$A0[1]
-	add	($tptr,$j),$A0[0]
-	adc	\$0,$A0[1]
-	mul	$m0			# n[4]*m0
-	add	%rax,$A0[0]		# n[4]*m0+t[4]
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A0[1]
-
-	mov	8($nptr,$j),$Ni[1]	# n[5]
-	xor	$A1[0],$A1[0]
-	add	$A0[0],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$m1			# n[3]*m1
-	add	%rax,$A1[1]		# n[3]*m1+"t[4]"
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A1[0]
-	mov	$A1[1],($tptr,$j)	# "t[4]"
-
-	xor	$A0[0],$A0[0]
-	add	8($tptr,$j),$A0[1]
-	adc	\$0,$A0[0]
-	mul	$m0			# n[5]*m0
-	add	%rax,$A0[1]		# n[5]*m0+t[5]
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A0[0]
-
-
-	mov	16($nptr,$j),$Ni[0]	# n[6]
-	xor	$A1[1],$A1[1]
-	add	$A0[1],$A1[0]
-	adc	\$0,$A1[1]
-	mul	$m1			# n[4]*m1
-	add	%rax,$A1[0]		# n[4]*m1+"t[5]"
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A1[1]
-	mov	$A1[0],8($tptr,$j)	# "t[5]"
-
-	xor	$A0[1],$A0[1]
-	add	16($tptr,$j),$A0[0]
-	adc	\$0,$A0[1]
-	mul	$m0			# n[6]*m0
-	add	%rax,$A0[0]		# n[6]*m0+t[6]
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A0[1]
-
-	mov	24($nptr,$j),$Ni[1]	# n[7]
-	xor	$A1[0],$A1[0]
-	add	$A0[0],$A1[1]
-	adc	\$0,$A1[0]
-	mul	$m1			# n[5]*m1
-	add	%rax,$A1[1]		# n[5]*m1+"t[6]"
-	 mov	$Ni[1],%rax
-	adc	%rdx,$A1[0]
-	mov	$A1[1],16($tptr,$j)	# "t[6]"
-
-	xor	$A0[0],$A0[0]
-	add	24($tptr,$j),$A0[1]
-	lea	32($j),$j
-	adc	\$0,$A0[0]
-	mul	$m0			# n[7]*m0
-	add	%rax,$A0[1]		# n[7]*m0+t[7]
-	 mov	$Ni[0],%rax
-	adc	%rdx,$A0[0]
-	cmp	\$0,$j
-	jne	.Lsqr4x_mont_inner
-
-	 sub	0(%rsp),$j		# $j=-$num	# modsched #
-	 mov	$n0,$m0			#		# modsched #
-
-	xor	$A1[1],$A1[1]
-	add	$A0[1],$A1[0]
-	adc	\$0,$A1[1]
-	mul	$m1			# n[6]*m1
-	add	%rax,$A1[0]		# n[6]*m1+"t[7]"
-	mov	$Ni[1],%rax
-	adc	%rdx,$A1[1]
-	mov	$A1[0],-8($tptr)	# "t[7]"
-
-	xor	$A0[1],$A0[1]
-	add	($tptr),$A0[0]		# +t[8]
-	adc	\$0,$A0[1]
-	 mov	0($nptr,$j),$Ni[0]	# n[0]		# modsched #
-	add	$topbit,$A0[0]
-	adc	\$0,$A0[1]
-
-	 imulq	16($tptr,$j),$m0	# m0=t[0]*n0	# modsched #
-	xor	$A1[0],$A1[0]
-	 mov	8($nptr,$j),$Ni[1]	# n[1]		# modsched #
-	add	$A0[0],$A1[1]
-	 mov	16($tptr,$j),$A0[0]	# t[0]		# modsched #
-	adc	\$0,$A1[0]
-	mul	$m1			# n[7]*m1
-	add	%rax,$A1[1]		# n[7]*m1+"t[8]"
-	 mov	$Ni[0],%rax		#		# modsched #
-	adc	%rdx,$A1[0]
-	mov	$A1[1],($tptr)		# "t[8]"
-
-	xor	$topbit,$topbit
-	add	8($tptr),$A1[0]		# +t[9]
-	adc	$topbit,$topbit
-	add	$A0[1],$A1[0]
-	lea	16($tptr),$tptr		# "t[$num]>>128"
-	adc	\$0,$topbit
-	mov	$A1[0],-8($tptr)	# "t[9]"
-	cmp	8(%rsp),$tptr		# are we done?
-	jb	.Lsqr4x_mont_outer
-
-	mov	0(%rsp),$num		# restore $num
-	mov	$topbit,($tptr)		# save $topbit
-___
-}
-##############################################################
-# Post-condition, 4x unrolled copy from bn_mul_mont
-#
-{
-my ($tptr,$nptr)=("%rbx",$aptr);
-my @ri=("%rax","%rdx","%r10","%r11");
-$code.=<<___;
-	mov	64(%rsp,$num),@ri[0]	# tp[0]
-	lea	64(%rsp,$num),$tptr	# upper half of t[2*$num] holds result
-	mov	40(%rsp),$nptr		# restore $nptr
-	shr	\$5,$num		# num/4
-	mov	8($tptr),@ri[1]		# t[1]
-	xor	$i,$i			# i=0 and clear CF!
-
-	mov	32(%rsp),$rptr		# restore $rptr
-	sub	0($nptr),@ri[0]
-	mov	16($tptr),@ri[2]	# t[2]
-	mov	24($tptr),@ri[3]	# t[3]
-	sbb	8($nptr),@ri[1]
-	lea	-1($num),$j		# j=num/4-1
-	jmp	.Lsqr4x_sub
-.align	16
-.Lsqr4x_sub:
-	mov	@ri[0],0($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[1],8($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	16($nptr,$i,8),@ri[2]
-	mov	32($tptr,$i,8),@ri[0]	# tp[i+1]
-	mov	40($tptr,$i,8),@ri[1]
-	sbb	24($nptr,$i,8),@ri[3]
-	mov	@ri[2],16($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[3],24($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	32($nptr,$i,8),@ri[0]
-	mov	48($tptr,$i,8),@ri[2]
-	mov	56($tptr,$i,8),@ri[3]
-	sbb	40($nptr,$i,8),@ri[1]
-	lea	4($i),$i		# i++
-	dec	$j			# doesn't affect CF!
-	jnz	.Lsqr4x_sub
-
-	mov	@ri[0],0($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	32($tptr,$i,8),@ri[0]	# load overflow bit
-	sbb	16($nptr,$i,8),@ri[2]
-	mov	@ri[1],8($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	24($nptr,$i,8),@ri[3]
-	mov	@ri[2],16($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-
-	sbb	\$0,@ri[0]		# handle upmost overflow bit
-	mov	@ri[3],24($rptr,$i,8)	# rp[i]=tp[i]-np[i]
-	xor	$i,$i			# i=0
-	and	@ri[0],$tptr
-	not	@ri[0]
-	mov	$rptr,$nptr
-	and	@ri[0],$nptr
-	lea	-1($num),$j
-	or	$nptr,$tptr		# tp=borrow?tp:rp
-
-	pxor	%xmm0,%xmm0
-	lea	64(%rsp,$num,8),$nptr
-	movdqu	($tptr),%xmm1
-	lea	($nptr,$num,8),$nptr
-	movdqa	%xmm0,64(%rsp)		# zap lower half of temporary vector
-	movdqa	%xmm0,($nptr)		# zap upper half of temporary vector
-	movdqu	%xmm1,($rptr)
-	jmp	.Lsqr4x_copy
-.align	16
-.Lsqr4x_copy:				# copy or in-place refresh
-	movdqu	16($tptr,$i),%xmm2
-	movdqu	32($tptr,$i),%xmm1
-	movdqa	%xmm0,80(%rsp,$i)	# zap lower half of temporary vector
-	movdqa	%xmm0,96(%rsp,$i)	# zap lower half of temporary vector
-	movdqa	%xmm0,16($nptr,$i)	# zap upper half of temporary vector
-	movdqa	%xmm0,32($nptr,$i)	# zap upper half of temporary vector
-	movdqu	%xmm2,16($rptr,$i)
-	movdqu	%xmm1,32($rptr,$i)
-	lea	32($i),$i
-	dec	$j
-	jnz	.Lsqr4x_copy
-
-	movdqu	16($tptr,$i),%xmm2
-	movdqa	%xmm0,80(%rsp,$i)	# zap lower half of temporary vector
-	movdqa	%xmm0,16($nptr,$i)	# zap upper half of temporary vector
-	movdqu	%xmm2,16($rptr,$i)
-___
-}
-$code.=<<___;
-	mov	56(%rsp),%rsi		# restore %rsp
-	mov	\$1,%rax
-	mov	0(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lsqr4x_epilogue:
-	ret
-.size	bn_sqr4x_mont,.-bn_sqr4x_mont
-___
-}}}
-$code.=<<___;
-.asciz	"Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align	16
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	mul_handler,\@abi-omnipotent
-.align	16
-mul_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# end of prologue label
-	cmp	%r10,%rbx		# context->Rip<end of prologue label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lcommon_seh_tail
-
-	mov	192($context),%r10	# pull $num
-	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
-	lea	48(%rax),%rax
-
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-	jmp	.Lcommon_seh_tail
-.size	mul_handler,.-mul_handler
-
-.type	sqr_handler,\@abi-omnipotent
-.align	16
-sqr_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lsqr4x_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lsqr_body
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	lea	.Lsqr4x_epilogue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
-	jae	.Lcommon_seh_tail
-
-	mov	56(%rax),%rax		# pull saved stack pointer
-	lea	48(%rax),%rax
-
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lcommon_seh_tail:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	sqr_handler,.-sqr_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_bn_mul_mont
-	.rva	.LSEH_end_bn_mul_mont
-	.rva	.LSEH_info_bn_mul_mont
-
-	.rva	.LSEH_begin_bn_mul4x_mont
-	.rva	.LSEH_end_bn_mul4x_mont
-	.rva	.LSEH_info_bn_mul4x_mont
-
-	.rva	.LSEH_begin_bn_sqr4x_mont
-	.rva	.LSEH_end_bn_sqr4x_mont
-	.rva	.LSEH_info_bn_sqr4x_mont
-
-.section	.xdata
-.align	8
-.LSEH_info_bn_mul_mont:
-	.byte	9,0,0,0
-	.rva	mul_handler
-	.rva	.Lmul_body,.Lmul_epilogue	# HandlerData[]
-.LSEH_info_bn_mul4x_mont:
-	.byte	9,0,0,0
-	.rva	mul_handler
-	.rva	.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
-.LSEH_info_bn_sqr4x_mont:
-	.byte	9,0,0,0
-	.rva	sqr_handler
-___
-}
-
-print $code;
-close STDOUT;

+ 0 - 1186
drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl

@@ -1,1186 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <[email protected]> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# August 2011.
-#
-# Companion to x86_64-mont.pl that optimizes cache-timing attack
-# countermeasures. The subroutines are produced by replacing bp[i]
-# references in their x86_64-mont.pl counterparts with cache-neutral
-# references to powers table computed in BN_mod_exp_mont_consttime.
-# In addition subroutine that scatters elements of the powers table
-# is implemented, so that scatter-/gathering can be tuned without
-# bn_exp.c modifications.
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-# int bn_mul_mont_gather5(
-$rp="%rdi";	# BN_ULONG *rp,
-$ap="%rsi";	# const BN_ULONG *ap,
-$bp="%rdx";	# const BN_ULONG *bp,
-$np="%rcx";	# const BN_ULONG *np,
-$n0="%r8";	# const BN_ULONG *n0,
-$num="%r9";	# int num,
-		# int idx);	# 0 to 2^5-1, "index" in $bp holding
-				# pre-computed powers of a', interlaced
-				# in such manner that b[0] is $bp[idx],
-				# b[1] is [2^5+idx], etc.
-$lo0="%r10";
-$hi0="%r11";
-$hi1="%r13";
-$i="%r14";
-$j="%r15";
-$m0="%rbx";
-$m1="%rbp";
-
-$code=<<___;
-.text
-
-.globl	bn_mul_mont_gather5
-.type	bn_mul_mont_gather5,\@function,6
-.align	64
-bn_mul_mont_gather5:
-	test	\$3,${num}d
-	jnz	.Lmul_enter
-	cmp	\$8,${num}d
-	jb	.Lmul_enter
-	jmp	.Lmul4x_enter
-
-.align	16
-.Lmul_enter:
-	mov	${num}d,${num}d
-	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
-	lea	.Linc(%rip),%r10
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-.Lmul_alloca:
-	mov	%rsp,%rax
-	lea	2($num),%r11
-	neg	%r11
-	lea	-264(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+2)+256+8)
-	and	\$-1024,%rsp		# minimize TLB usage
-
-	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
-.Lmul_body:
-	lea	128($bp),%r12		# reassign $bp (+size optimization)
-___
-		$bp="%r12";
-		$STRIDE=2**5*8;		# 5 is "window size"
-		$N=$STRIDE/4;		# should match cache line size
-$code.=<<___;
-	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
-	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
-	lea	24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
-	and	\$-16,%r10
-
-	pshufd	\$0,%xmm5,%xmm5		# broadcast index
-	movdqa	%xmm1,%xmm4
-	movdqa	%xmm1,%xmm2
-___
-########################################################################
-# calculate mask by comparing 0..31 to index and save result to stack
-#
-$code.=<<___;
-	paddd	%xmm0,%xmm1
-	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
-	.byte	0x67
-	movdqa	%xmm4,%xmm3
-___
-for($k=0;$k<$STRIDE/16-4;$k+=4) {
-$code.=<<___;
-	paddd	%xmm1,%xmm2
-	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
-	movdqa	%xmm0,`16*($k+0)+112`(%r10)
-	movdqa	%xmm4,%xmm0
-
-	paddd	%xmm2,%xmm3
-	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
-	movdqa	%xmm1,`16*($k+1)+112`(%r10)
-	movdqa	%xmm4,%xmm1
-
-	paddd	%xmm3,%xmm0
-	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
-	movdqa	%xmm2,`16*($k+2)+112`(%r10)
-	movdqa	%xmm4,%xmm2
-
-	paddd	%xmm0,%xmm1
-	pcmpeqd	%xmm5,%xmm0
-	movdqa	%xmm3,`16*($k+3)+112`(%r10)
-	movdqa	%xmm4,%xmm3
-___
-}
-$code.=<<___;				# last iteration can be optimized
-	paddd	%xmm1,%xmm2
-	pcmpeqd	%xmm5,%xmm1
-	movdqa	%xmm0,`16*($k+0)+112`(%r10)
-
-	paddd	%xmm2,%xmm3
-	.byte	0x67
-	pcmpeqd	%xmm5,%xmm2
-	movdqa	%xmm1,`16*($k+1)+112`(%r10)
-
-	pcmpeqd	%xmm5,%xmm3
-	movdqa	%xmm2,`16*($k+2)+112`(%r10)
-	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
-
-	pand	`16*($k+1)-128`($bp),%xmm1
-	pand	`16*($k+2)-128`($bp),%xmm2
-	movdqa	%xmm3,`16*($k+3)+112`(%r10)
-	pand	`16*($k+3)-128`($bp),%xmm3
-	por	%xmm2,%xmm0
-	por	%xmm3,%xmm1
-___
-for($k=0;$k<$STRIDE/16-4;$k+=4) {
-$code.=<<___;
-	movdqa	`16*($k+0)-128`($bp),%xmm4
-	movdqa	`16*($k+1)-128`($bp),%xmm5
-	movdqa	`16*($k+2)-128`($bp),%xmm2
-	pand	`16*($k+0)+112`(%r10),%xmm4
-	movdqa	`16*($k+3)-128`($bp),%xmm3
-	pand	`16*($k+1)+112`(%r10),%xmm5
-	por	%xmm4,%xmm0
-	pand	`16*($k+2)+112`(%r10),%xmm2
-	por	%xmm5,%xmm1
-	pand	`16*($k+3)+112`(%r10),%xmm3
-	por	%xmm2,%xmm0
-	por	%xmm3,%xmm1
-___
-}
-$code.=<<___;
-	por	%xmm1,%xmm0
-	pshufd	\$0x4e,%xmm0,%xmm1
-	por	%xmm1,%xmm0
-	lea	$STRIDE($bp),$bp
-	movq	%xmm0,$m0		# m0=bp[0]
-
-	mov	($n0),$n0		# pull n0[0] value
-	mov	($ap),%rax
-
-	xor	$i,$i			# i=0
-	xor	$j,$j			# j=0
-
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[0]
-	mov	%rax,$lo0
-	mov	($np),%rax
-
-	imulq	$lo0,$m1		# "tp[0]"*n0
-	mov	%rdx,$hi0
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$lo0		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$hi1
-
-	lea	1($j),$j		# j++
-	jmp	.L1st_enter
-
-.align	16
-.L1st:
-	add	%rax,$hi1
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
-	mov	$lo0,$hi0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-.L1st_enter:
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$hi0
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	lea	1($j),$j		# j++
-	mov	%rdx,$lo0
-
-	mulq	$m1			# np[j]*m1
-	cmp	$num,$j
-	jne	.L1st
-
-	add	%rax,$hi1
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-	mov	$lo0,$hi0
-
-	xor	%rdx,%rdx
-	add	$hi0,$hi1
-	adc	\$0,%rdx
-	mov	$hi1,-8(%rsp,$num,8)
-	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-	jmp	.Louter
-.align	16
-.Louter:
-	lea	24+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
-	and	\$-16,%rdx
-	pxor	%xmm4,%xmm4
-	pxor	%xmm5,%xmm5
-___
-for($k=0;$k<$STRIDE/16;$k+=4) {
-$code.=<<___;
-	movdqa	`16*($k+0)-128`($bp),%xmm0
-	movdqa	`16*($k+1)-128`($bp),%xmm1
-	movdqa	`16*($k+2)-128`($bp),%xmm2
-	movdqa	`16*($k+3)-128`($bp),%xmm3
-	pand	`16*($k+0)-128`(%rdx),%xmm0
-	pand	`16*($k+1)-128`(%rdx),%xmm1
-	por	%xmm0,%xmm4
-	pand	`16*($k+2)-128`(%rdx),%xmm2
-	por	%xmm1,%xmm5
-	pand	`16*($k+3)-128`(%rdx),%xmm3
-	por	%xmm2,%xmm4
-	por	%xmm3,%xmm5
-___
-}
-$code.=<<___;
-	por	%xmm5,%xmm4
-	pshufd	\$0x4e,%xmm4,%xmm0
-	por	%xmm4,%xmm0
-	lea	$STRIDE($bp),$bp
-	movq	%xmm0,$m0		# m0=bp[i]
-
-	xor	$j,$j			# j=0
-	mov	$n0,$m1
-	mov	(%rsp),$lo0
-
-	mulq	$m0			# ap[0]*bp[i]
-	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
-	mov	($np),%rax
-	adc	\$0,%rdx
-
-	imulq	$lo0,$m1		# tp[0]*n0
-	mov	%rdx,$hi0
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$lo0		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	8(%rsp),$lo0		# tp[1]
-	mov	%rdx,$hi1
-
-	lea	1($j),$j		# j++
-	jmp	.Linner_enter
-
-.align	16
-.Linner:
-	add	%rax,$hi1
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	mov	(%rsp,$j,8),$lo0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-.Linner_enter:
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$hi0
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
-	mov	%rdx,$hi0
-	adc	\$0,$hi0
-	lea	1($j),$j		# j++
-
-	mulq	$m1			# np[j]*m1
-	cmp	$num,$j
-	jne	.Linner
-
-	add	%rax,$hi1
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	mov	(%rsp,$j,8),$lo0
-	adc	\$0,%rdx
-	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$hi1
-
-	xor	%rdx,%rdx
-	add	$hi0,$hi1
-	adc	\$0,%rdx
-	add	$lo0,$hi1		# pull upmost overflow bit
-	adc	\$0,%rdx
-	mov	$hi1,-8(%rsp,$num,8)
-	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-	cmp	$num,$i
-	jl	.Louter
-
-	xor	$i,$i			# i=0 and clear CF!
-	mov	(%rsp),%rax		# tp[0]
-	lea	(%rsp),$ap		# borrow ap for tp
-	mov	$num,$j			# j=num
-	jmp	.Lsub
-.align	16
-.Lsub:	sbb	($np,$i,8),%rax
-	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
-	mov	8($ap,$i,8),%rax	# tp[i+1]
-	lea	1($i),$i		# i++
-	dec	$j			# doesnn't affect CF!
-	jnz	.Lsub
-
-	sbb	\$0,%rax		# handle upmost overflow bit
-	xor	$i,$i
-	and	%rax,$ap
-	not	%rax
-	mov	$rp,$np
-	and	%rax,$np
-	mov	$num,$j			# j=num
-	or	$np,$ap			# ap=borrow?tp:rp
-.align	16
-.Lcopy:					# copy or in-place refresh
-	mov	($ap,$i,8),%rax
-	mov	$i,(%rsp,$i,8)		# zap temporary vector
-	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]
-	lea	1($i),$i
-	sub	\$1,$j
-	jnz	.Lcopy
-
-	mov	8(%rsp,$num,8),%rsi	# restore %rsp
-	mov	\$1,%rax
-
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lmul_epilogue:
-	ret
-.size	bn_mul_mont_gather5,.-bn_mul_mont_gather5
-___
-{{{
-my @A=("%r10","%r11");
-my @N=("%r13","%rdi");
-$code.=<<___;
-.type	bn_mul4x_mont_gather5,\@function,6
-.align	16
-bn_mul4x_mont_gather5:
-.Lmul4x_enter:
-	mov	${num}d,${num}d
-	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
-	lea	.Linc(%rip),%r10
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-
-.Lmul4x_alloca:
-	mov	%rsp,%rax
-	lea	4($num),%r11
-	neg	%r11
-	lea	-256(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+4)+256)
-	and	\$-1024,%rsp		# minimize TLB usage
-
-	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
-.Lmul4x_body:
-	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
-	lea	128(%rdx),%r12		# reassign $bp (+size optimization)
-___
-		$bp="%r12";
-		$STRIDE=2**5*8;		# 5 is "window size"
-		$N=$STRIDE/4;		# should match cache line size
-$code.=<<___;
-	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
-	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
-	lea	32-112(%rsp,$num,8),%r10# place the mask after tp[num+4] (+ICache optimization)
-
-	pshufd	\$0,%xmm5,%xmm5		# broadcast index
-	movdqa	%xmm1,%xmm4
-	.byte	0x67,0x67
-	movdqa	%xmm1,%xmm2
-___
-########################################################################
-# calculate mask by comparing 0..31 to index and save result to stack
-#
-$code.=<<___;
-	paddd	%xmm0,%xmm1
-	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
-	.byte	0x67
-	movdqa	%xmm4,%xmm3
-___
-for($k=0;$k<$STRIDE/16-4;$k+=4) {
-$code.=<<___;
-	paddd	%xmm1,%xmm2
-	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
-	movdqa	%xmm0,`16*($k+0)+112`(%r10)
-	movdqa	%xmm4,%xmm0
-
-	paddd	%xmm2,%xmm3
-	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
-	movdqa	%xmm1,`16*($k+1)+112`(%r10)
-	movdqa	%xmm4,%xmm1
-
-	paddd	%xmm3,%xmm0
-	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
-	movdqa	%xmm2,`16*($k+2)+112`(%r10)
-	movdqa	%xmm4,%xmm2
-
-	paddd	%xmm0,%xmm1
-	pcmpeqd	%xmm5,%xmm0
-	movdqa	%xmm3,`16*($k+3)+112`(%r10)
-	movdqa	%xmm4,%xmm3
-___
-}
-$code.=<<___;				# last iteration can be optimized
-	paddd	%xmm1,%xmm2
-	pcmpeqd	%xmm5,%xmm1
-	movdqa	%xmm0,`16*($k+0)+112`(%r10)
-
-	paddd	%xmm2,%xmm3
-	.byte	0x67
-	pcmpeqd	%xmm5,%xmm2
-	movdqa	%xmm1,`16*($k+1)+112`(%r10)
-
-	pcmpeqd	%xmm5,%xmm3
-	movdqa	%xmm2,`16*($k+2)+112`(%r10)
-	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
-
-	pand	`16*($k+1)-128`($bp),%xmm1
-	pand	`16*($k+2)-128`($bp),%xmm2
-	movdqa	%xmm3,`16*($k+3)+112`(%r10)
-	pand	`16*($k+3)-128`($bp),%xmm3
-	por	%xmm2,%xmm0
-	por	%xmm3,%xmm1
-___
-for($k=0;$k<$STRIDE/16-4;$k+=4) {
-$code.=<<___;
-	movdqa	`16*($k+0)-128`($bp),%xmm4
-	movdqa	`16*($k+1)-128`($bp),%xmm5
-	movdqa	`16*($k+2)-128`($bp),%xmm2
-	pand	`16*($k+0)+112`(%r10),%xmm4
-	movdqa	`16*($k+3)-128`($bp),%xmm3
-	pand	`16*($k+1)+112`(%r10),%xmm5
-	por	%xmm4,%xmm0
-	pand	`16*($k+2)+112`(%r10),%xmm2
-	por	%xmm5,%xmm1
-	pand	`16*($k+3)+112`(%r10),%xmm3
-	por	%xmm2,%xmm0
-	por	%xmm3,%xmm1
-___
-}
-$code.=<<___;
-	por	%xmm1,%xmm0
-	pshufd	\$0x4e,%xmm0,%xmm1
-	por	%xmm1,%xmm0
-	lea	$STRIDE($bp),$bp
-	movq	%xmm0,$m0		# m0=bp[0]
-
-	mov	($n0),$n0		# pull n0[0] value
-	mov	($ap),%rax
-
-	xor	$i,$i			# i=0
-	xor	$j,$j			# j=0
-
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[0]
-	mov	%rax,$A[0]
-	mov	($np),%rax
-
-	imulq	$A[0],$m1		# "tp[0]"*n0
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$A[0]		# discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$N[1]
-
-	mulq	$m0
-	add	%rax,$A[1]
-	mov	8($np),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1
-	add	%rax,$N[1]
-	mov	16($ap),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	lea	4($j),$j		# j++
-	adc	\$0,%rdx
-	mov	$N[1],(%rsp)
-	mov	%rdx,$N[0]
-	jmp	.L1st4x
-.align	16
-.L1st4x:
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	8($np,$j,8),%rax
-	adc	\$0,%rdx
-	lea	4($j),$j		# j++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	-16($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-	cmp	$num,$j
-	jl	.L1st4x
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[0]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	xor	$N[1],$N[1]
-	add	$A[0],$N[0]
-	adc	\$0,$N[1]
-	mov	$N[0],-8(%rsp,$j,8)
-	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
-
-	lea	1($i),$i		# i++
-.align	4
-.Louter4x:
-	lea	32+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
-	pxor	%xmm4,%xmm4
-	pxor	%xmm5,%xmm5
-___
-for($k=0;$k<$STRIDE/16;$k+=4) {
-$code.=<<___;
-	movdqa	`16*($k+0)-128`($bp),%xmm0
-	movdqa	`16*($k+1)-128`($bp),%xmm1
-	movdqa	`16*($k+2)-128`($bp),%xmm2
-	movdqa	`16*($k+3)-128`($bp),%xmm3
-	pand	`16*($k+0)-128`(%rdx),%xmm0
-	pand	`16*($k+1)-128`(%rdx),%xmm1
-	por	%xmm0,%xmm4
-	pand	`16*($k+2)-128`(%rdx),%xmm2
-	por	%xmm1,%xmm5
-	pand	`16*($k+3)-128`(%rdx),%xmm3
-	por	%xmm2,%xmm4
-	por	%xmm3,%xmm5
-___
-}
-$code.=<<___;
-	por	%xmm5,%xmm4
-	pshufd	\$0x4e,%xmm4,%xmm0
-	por	%xmm4,%xmm0
-	lea	$STRIDE($bp),$bp
-	movq	%xmm0,$m0		# m0=bp[i]
-
-	xor	$j,$j			# j=0
-
-	mov	(%rsp),$A[0]
-	mov	$n0,$m1
-	mulq	$m0			# ap[0]*bp[i]
-	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
-	mov	($np),%rax
-	adc	\$0,%rdx
-
-	imulq	$A[0],$m1		# tp[0]*n0
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[0]*m1
-	add	%rax,$A[0]		# "$N[0]", discarded
-	mov	8($ap),%rax
-	adc	\$0,%rdx
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	8($np),%rax
-	adc	\$0,%rdx
-	add	8(%rsp),$A[1]		# +tp[1]
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	16($ap),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
-	lea	4($j),$j		# j+=2
-	adc	\$0,%rdx
-	mov	%rdx,$N[0]
-	jmp	.Linner4x
-.align	16
-.Linner4x:
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	lea	4($j),$j		# j++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	-16($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[0],-40(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-	cmp	$num,$j
-	jl	.Linner4x
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[0]
-	mov	-16($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
-	adc	\$0,%rdx
-	mov	%rdx,$A[1]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[0]
-	mov	-8($ap,$j,8),%rax
-	adc	\$0,%rdx
-	add	$A[0],$N[0]
-	adc	\$0,%rdx
-	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[1]
-
-	mulq	$m0			# ap[j]*bp[i]
-	add	%rax,$A[1]
-	mov	-8($np,$j,8),%rax
-	adc	\$0,%rdx
-	add	-8(%rsp,$j,8),$A[1]
-	adc	\$0,%rdx
-	lea	1($i),$i		# i++
-	mov	%rdx,$A[0]
-
-	mulq	$m1			# np[j]*m1
-	add	%rax,$N[1]
-	mov	($ap),%rax		# ap[0]
-	adc	\$0,%rdx
-	add	$A[1],$N[1]
-	adc	\$0,%rdx
-	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
-	mov	%rdx,$N[0]
-
-	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
-
-	xor	$N[1],$N[1]
-	add	$A[0],$N[0]
-	adc	\$0,$N[1]
-	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
-	adc	\$0,$N[1]
-	mov	$N[0],-8(%rsp,$j,8)
-	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
-
-	cmp	$num,$i
-	jl	.Louter4x
-___
-{
-my @ri=("%rax","%rdx",$m0,$m1);
-$code.=<<___;
-	mov	16(%rsp,$num,8),$rp	# restore $rp
-	mov	0(%rsp),@ri[0]		# tp[0]
-	pxor	%xmm0,%xmm0
-	mov	8(%rsp),@ri[1]		# tp[1]
-	shr	\$2,$num		# num/=4
-	lea	(%rsp),$ap		# borrow ap for tp
-	xor	$i,$i			# i=0 and clear CF!
-
-	sub	0($np),@ri[0]
-	mov	16($ap),@ri[2]		# tp[2]
-	mov	24($ap),@ri[3]		# tp[3]
-	sbb	8($np),@ri[1]
-	lea	-1($num),$j		# j=num/4-1
-	jmp	.Lsub4x
-.align	16
-.Lsub4x:
-	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	16($np,$i,8),@ri[2]
-	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
-	mov	40($ap,$i,8),@ri[1]
-	sbb	24($np,$i,8),@ri[3]
-	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	32($np,$i,8),@ri[0]
-	mov	48($ap,$i,8),@ri[2]
-	mov	56($ap,$i,8),@ri[3]
-	sbb	40($np,$i,8),@ri[1]
-	lea	4($i),$i		# i++
-	dec	$j			# doesnn't affect CF!
-	jnz	.Lsub4x
-
-	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	mov	32($ap,$i,8),@ri[0]	# load overflow bit
-	sbb	16($np,$i,8),@ri[2]
-	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	sbb	24($np,$i,8),@ri[3]
-	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
-
-	sbb	\$0,@ri[0]		# handle upmost overflow bit
-	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
-	xor	$i,$i			# i=0
-	and	@ri[0],$ap
-	not	@ri[0]
-	mov	$rp,$np
-	and	@ri[0],$np
-	lea	-1($num),$j
-	or	$np,$ap			# ap=borrow?tp:rp
-
-	movdqu	($ap),%xmm1
-	movdqa	%xmm0,(%rsp)
-	movdqu	%xmm1,($rp)
-	jmp	.Lcopy4x
-.align	16
-.Lcopy4x:					# copy or in-place refresh
-	movdqu	16($ap,$i),%xmm2
-	movdqu	32($ap,$i),%xmm1
-	movdqa	%xmm0,16(%rsp,$i)
-	movdqu	%xmm2,16($rp,$i)
-	movdqa	%xmm0,32(%rsp,$i)
-	movdqu	%xmm1,32($rp,$i)
-	lea	32($i),$i
-	dec	$j
-	jnz	.Lcopy4x
-
-	shl	\$2,$num
-	movdqu	16($ap,$i),%xmm2
-	movdqa	%xmm0,16(%rsp,$i)
-	movdqu	%xmm2,16($rp,$i)
-___
-}
-$code.=<<___;
-	mov	8(%rsp,$num,8),%rsi	# restore %rsp
-	mov	\$1,%rax
-
-	mov	(%rsi),%r15
-	mov	8(%rsi),%r14
-	mov	16(%rsi),%r13
-	mov	24(%rsi),%r12
-	mov	32(%rsi),%rbp
-	mov	40(%rsi),%rbx
-	lea	48(%rsi),%rsp
-.Lmul4x_epilogue:
-	ret
-.size	bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
-___
-}}}
-
-{
-my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9d") : # Win64 order
-				("%rdi","%rsi","%rdx","%ecx"); # Unix order
-my $out=$inp;
-my $STRIDE=2**5*8;
-my $N=$STRIDE/4;
-
-$code.=<<___;
-.globl	bn_scatter5
-.type	bn_scatter5,\@abi-omnipotent
-.align	16
-bn_scatter5:
-	cmp	\$0, $num
-	jz	.Lscatter_epilogue
-	lea	($tbl,$idx,8),$tbl
-.Lscatter:
-	mov	($inp),%rax
-	lea	8($inp),$inp
-	mov	%rax,($tbl)
-	lea	32*8($tbl),$tbl
-	sub	\$1,$num
-	jnz	.Lscatter
-.Lscatter_epilogue:
-	ret
-.size	bn_scatter5,.-bn_scatter5
-
-.globl	bn_gather5
-.type	bn_gather5,\@abi-omnipotent
-.align	16
-bn_gather5:
-.LSEH_begin_bn_gather5:			# Win64 thing, but harmless in other cases
-	# I can't trust assembler to use specific encoding:-(
-	.byte	0x4c,0x8d,0x14,0x24			# lea    (%rsp),%r10
-	.byte	0x48,0x81,0xec,0x08,0x01,0x00,0x00	# sub	$0x108,%rsp
-	lea	.Linc(%rip),%rax
-	and	\$-16,%rsp		# shouldn't be formally required
-
-	movd	$idx,%xmm5
-	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
-	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
-	lea	128($tbl),%r11		# size optimization
-	lea	128(%rsp),%rax		# size optimization
-
-	pshufd	\$0,%xmm5,%xmm5		# broadcast $idx
-	movdqa	%xmm1,%xmm4
-	movdqa	%xmm1,%xmm2
-___
-########################################################################
-# calculate mask by comparing 0..31 to $idx and save result to stack
-#
-for($i=0;$i<$STRIDE/16;$i+=4) {
-$code.=<<___;
-	paddd	%xmm0,%xmm1
-	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
-___
-$code.=<<___	if ($i);
-	movdqa	%xmm3,`16*($i-1)-128`(%rax)
-___
-$code.=<<___;
-	movdqa	%xmm4,%xmm3
-
-	paddd	%xmm1,%xmm2
-	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
-	movdqa	%xmm0,`16*($i+0)-128`(%rax)
-	movdqa	%xmm4,%xmm0
-
-	paddd	%xmm2,%xmm3
-	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
-	movdqa	%xmm1,`16*($i+1)-128`(%rax)
-	movdqa	%xmm4,%xmm1
-
-	paddd	%xmm3,%xmm0
-	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
-	movdqa	%xmm2,`16*($i+2)-128`(%rax)
-	movdqa	%xmm4,%xmm2
-___
-}
-$code.=<<___;
-	movdqa	%xmm3,`16*($i-1)-128`(%rax)
-	jmp	.Lgather
-
-.align	32
-.Lgather:
-	pxor	%xmm4,%xmm4
-	pxor	%xmm5,%xmm5
-___
-for($i=0;$i<$STRIDE/16;$i+=4) {
-$code.=<<___;
-	movdqa	`16*($i+0)-128`(%r11),%xmm0
-	movdqa	`16*($i+1)-128`(%r11),%xmm1
-	movdqa	`16*($i+2)-128`(%r11),%xmm2
-	pand	`16*($i+0)-128`(%rax),%xmm0
-	movdqa	`16*($i+3)-128`(%r11),%xmm3
-	pand	`16*($i+1)-128`(%rax),%xmm1
-	por	%xmm0,%xmm4
-	pand	`16*($i+2)-128`(%rax),%xmm2
-	por	%xmm1,%xmm5
-	pand	`16*($i+3)-128`(%rax),%xmm3
-	por	%xmm2,%xmm4
-	por	%xmm3,%xmm5
-___
-}
-$code.=<<___;
-	por	%xmm5,%xmm4
-	lea	$STRIDE(%r11),%r11
-	pshufd	\$0x4e,%xmm4,%xmm0
-	por	%xmm4,%xmm0
-	movq	%xmm0,($out)		# m0=bp[0]
-	lea	8($out),$out
-	sub	\$1,$num
-	jnz	.Lgather
-
-	lea	(%r10),%rsp
-	ret
-.LSEH_end_bn_gather5:
-.size	bn_gather5,.-bn_gather5
-___
-}
-$code.=<<___;
-.align	64
-.Linc:
-	.long	0,0, 1,1
-	.long	2,2, 2,2
-.asciz	"Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	mul_handler,\@abi-omnipotent
-.align	16
-mul_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	sub	\$64,%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# end of prologue label
-	cmp	%r10,%rbx		# context->Rip<end of prologue label
-	jb	.Lcommon_seh_tail
-
-	lea	48(%rax),%rax
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# end of alloca label
-	cmp	%r10,%rbx		# context->Rip<end of alloca label
-	jb	.Lcommon_seh_tail
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	8(%r11),%r10d		# HandlerData[2]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lcommon_seh_tail
-
-	mov	192($context),%r10	# pull $num
-	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
-
-	lea	48(%rax),%rax
-
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lcommon_seh_tail:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$154,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	add	\$64,%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	mul_handler,.-mul_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_bn_mul_mont_gather5
-	.rva	.LSEH_end_bn_mul_mont_gather5
-	.rva	.LSEH_info_bn_mul_mont_gather5
-
-	.rva	.LSEH_begin_bn_mul4x_mont_gather5
-	.rva	.LSEH_end_bn_mul4x_mont_gather5
-	.rva	.LSEH_info_bn_mul4x_mont_gather5
-
-	.rva	.LSEH_begin_bn_gather5
-	.rva	.LSEH_end_bn_gather5
-	.rva	.LSEH_info_bn_gather5
-
-.section	.xdata
-.align	8
-.LSEH_info_bn_mul_mont_gather5:
-	.byte	9,0,0,0
-	.rva	mul_handler
-	.rva	.Lmul_alloca,.Lmul_body,.Lmul_epilogue		# HandlerData[]
-.align	8
-.LSEH_info_bn_mul4x_mont_gather5:
-	.byte	9,0,0,0
-	.rva	mul_handler
-	.rva	.Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
-.align	8
-.LSEH_info_bn_gather5:
-	.byte	0x01,0x0b,0x03,0x0a
-	.byte	0x0b,0x01,0x21,0x00	# sub	rsp,0x108
-	.byte	0x04,0xa3,0x00,0x00	# lea	r10,(rsp), set_frame r10
-.align	8
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-close STDOUT;

+ 121 - 122
drivers/builtin_openssl2/crypto/bn/bn_asm.c

@@ -489,121 +489,144 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  * c=(c2,c1,c0)
  */
 
+# ifdef BN_LLONG
 /*
- * Keep in mind that carrying into high part of multiplication result
- * can not overflow, because it cannot be all-ones.
+ * Keep in mind that additions to multiplication result can not
+ * overflow, because its high half cannot be all-ones.
  */
-# ifdef BN_LLONG
-#  define mul_add_c(a,b,c0,c1,c2) \
-        t=(BN_ULLONG)a*b; \
-        t1=(BN_ULONG)Lw(t); \
-        t2=(BN_ULONG)Hw(t); \
-        c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
-
-#  define mul_add_c2(a,b,c0,c1,c2) \
-        t=(BN_ULLONG)a*b; \
-        tt=(t+t)&BN_MASK; \
-        if (tt < t) c2++; \
-        t1=(BN_ULONG)Lw(tt); \
-        t2=(BN_ULONG)Hw(tt); \
-        c0=(c0+t1)&BN_MASK2;  \
-        if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
-
-#  define sqr_add_c(a,i,c0,c1,c2) \
-        t=(BN_ULLONG)a[i]*a[i]; \
-        t1=(BN_ULONG)Lw(t); \
-        t2=(BN_ULONG)Hw(t); \
-        c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+#  define mul_add_c(a,b,c0,c1,c2)       do {    \
+        BN_ULONG hi;                            \
+        BN_ULLONG t = (BN_ULLONG)(a)*(b);       \
+        t += c0;                /* no carry */  \
+        c0 = (BN_ULONG)Lw(t);                   \
+        hi = (BN_ULONG)Hw(t);                   \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
+
+#  define mul_add_c2(a,b,c0,c1,c2)      do {    \
+        BN_ULONG hi;                            \
+        BN_ULLONG t = (BN_ULLONG)(a)*(b);       \
+        BN_ULLONG tt = t+c0;    /* no carry */  \
+        c0 = (BN_ULONG)Lw(tt);                  \
+        hi = (BN_ULONG)Hw(tt);                  \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        t += c0;                /* no carry */  \
+        c0 = (BN_ULONG)Lw(t);                   \
+        hi = (BN_ULONG)Hw(t);                   \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
+
+#  define sqr_add_c(a,i,c0,c1,c2)       do {    \
+        BN_ULONG hi;                            \
+        BN_ULLONG t = (BN_ULLONG)a[i]*a[i];     \
+        t += c0;                /* no carry */  \
+        c0 = (BN_ULONG)Lw(t);                   \
+        hi = (BN_ULONG)Hw(t);                   \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
 
 #  define sqr_add_c2(a,i,j,c0,c1,c2) \
         mul_add_c2((a)[i],(a)[j],c0,c1,c2)
 
 # elif defined(BN_UMULT_LOHI)
-
-#  define mul_add_c(a,b,c0,c1,c2) {       \
-        BN_ULONG ta=(a),tb=(b);         \
-        BN_UMULT_LOHI(t1,t2,ta,tb);     \
-        c0 += t1; t2 += (c0<t1)?1:0;    \
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        }
-
-#  define mul_add_c2(a,b,c0,c1,c2) {      \
-        BN_ULONG ta=(a),tb=(b),t0;      \
-        BN_UMULT_LOHI(t0,t1,ta,tb);     \
-        c0 += t0; t2 = t1+((c0<t0)?1:0);\
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        c0 += t0; t1 += (c0<t0)?1:0;    \
-        c1 += t1; c2 += (c1<t1)?1:0;    \
-        }
-
-#  define sqr_add_c(a,i,c0,c1,c2) {       \
-        BN_ULONG ta=(a)[i];             \
-        BN_UMULT_LOHI(t1,t2,ta,ta);     \
-        c0 += t1; t2 += (c0<t1)?1:0;    \
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        }
+/*
+ * Keep in mind that additions to hi can not overflow, because
+ * the high word of a multiplication result cannot be all-ones.
+ */
+#  define mul_add_c(a,b,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a), tb = (b);            \
+        BN_ULONG lo, hi;                        \
+        BN_UMULT_LOHI(lo,hi,ta,tb);             \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define mul_add_c2(a,b,c0,c1,c2)      do {    \
+        BN_ULONG ta = (a), tb = (b);            \
+        BN_ULONG lo, hi, tt;                    \
+        BN_UMULT_LOHI(lo,hi,ta,tb);             \
+        c0 += lo; tt = hi+((c0<lo)?1:0);        \
+        c1 += tt; c2 += (c1<tt)?1:0;            \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define sqr_add_c(a,i,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a)[i];                   \
+        BN_ULONG lo, hi;                        \
+        BN_UMULT_LOHI(lo,hi,ta,ta);             \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
 
 #  define sqr_add_c2(a,i,j,c0,c1,c2)    \
         mul_add_c2((a)[i],(a)[j],c0,c1,c2)
 
 # elif defined(BN_UMULT_HIGH)
-
-#  define mul_add_c(a,b,c0,c1,c2) {       \
-        BN_ULONG ta=(a),tb=(b);         \
-        t1 = ta * tb;                   \
-        t2 = BN_UMULT_HIGH(ta,tb);      \
-        c0 += t1; t2 += (c0<t1)?1:0;    \
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        }
-
-#  define mul_add_c2(a,b,c0,c1,c2) {      \
-        BN_ULONG ta=(a),tb=(b),t0;      \
-        t1 = BN_UMULT_HIGH(ta,tb);      \
-        t0 = ta * tb;                   \
-        c0 += t0; t2 = t1+((c0<t0)?1:0);\
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        c0 += t0; t1 += (c0<t0)?1:0;    \
-        c1 += t1; c2 += (c1<t1)?1:0;    \
-        }
-
-#  define sqr_add_c(a,i,c0,c1,c2) {       \
-        BN_ULONG ta=(a)[i];             \
-        t1 = ta * ta;                   \
-        t2 = BN_UMULT_HIGH(ta,ta);      \
-        c0 += t1; t2 += (c0<t1)?1:0;    \
-        c1 += t2; c2 += (c1<t2)?1:0;    \
-        }
+/*
+ * Keep in mind that additions to hi can not overflow, because
+ * the high word of a multiplication result cannot be all-ones.
+ */
+#  define mul_add_c(a,b,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a), tb = (b);            \
+        BN_ULONG lo = ta * tb;                  \
+        BN_ULONG hi = BN_UMULT_HIGH(ta,tb);     \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define mul_add_c2(a,b,c0,c1,c2)      do {    \
+        BN_ULONG ta = (a), tb = (b), tt;        \
+        BN_ULONG lo = ta * tb;                  \
+        BN_ULONG hi = BN_UMULT_HIGH(ta,tb);     \
+        c0 += lo; tt = hi + ((c0<lo)?1:0);      \
+        c1 += tt; c2 += (c1<tt)?1:0;            \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
+
+#  define sqr_add_c(a,i,c0,c1,c2)       do {    \
+        BN_ULONG ta = (a)[i];                   \
+        BN_ULONG lo = ta * ta;                  \
+        BN_ULONG hi = BN_UMULT_HIGH(ta,ta);     \
+        c0 += lo; hi += (c0<lo)?1:0;            \
+        c1 += hi; c2 += (c1<hi)?1:0;            \
+        } while(0)
 
 #  define sqr_add_c2(a,i,j,c0,c1,c2)      \
         mul_add_c2((a)[i],(a)[j],c0,c1,c2)
 
 # else                          /* !BN_LLONG */
-#  define mul_add_c(a,b,c0,c1,c2) \
-        t1=LBITS(a); t2=HBITS(a); \
-        bl=LBITS(b); bh=HBITS(b); \
-        mul64(t1,t2,bl,bh); \
-        c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
-
-#  define mul_add_c2(a,b,c0,c1,c2) \
-        t1=LBITS(a); t2=HBITS(a); \
-        bl=LBITS(b); bh=HBITS(b); \
-        mul64(t1,t2,bl,bh); \
-        if (t2 & BN_TBIT) c2++; \
-        t2=(t2+t2)&BN_MASK2; \
-        if (t1 & BN_TBIT) t2++; \
-        t1=(t1+t1)&BN_MASK2; \
-        c0=(c0+t1)&BN_MASK2;  \
-        if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
-
-#  define sqr_add_c(a,i,c0,c1,c2) \
-        sqr64(t1,t2,(a)[i]); \
-        c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
-        c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+/*
+ * Keep in mind that additions to hi can not overflow, because
+ * the high word of a multiplication result cannot be all-ones.
+ */
+#  define mul_add_c(a,b,c0,c1,c2)       do {    \
+        BN_ULONG lo = LBITS(a), hi = HBITS(a);  \
+        BN_ULONG bl = LBITS(b), bh = HBITS(b);  \
+        mul64(lo,hi,bl,bh);                     \
+        c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
+
+#  define mul_add_c2(a,b,c0,c1,c2)      do {    \
+        BN_ULONG tt;                            \
+        BN_ULONG lo = LBITS(a), hi = HBITS(a);  \
+        BN_ULONG bl = LBITS(b), bh = HBITS(b);  \
+        mul64(lo,hi,bl,bh);                     \
+        tt = hi;                                \
+        c0 = (c0+lo)&BN_MASK2; if (c0<lo) tt++; \
+        c1 = (c1+tt)&BN_MASK2; if (c1<tt) c2++; \
+        c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
+
+#  define sqr_add_c(a,i,c0,c1,c2)       do {    \
+        BN_ULONG lo, hi;                        \
+        sqr64(lo,hi,(a)[i]);                    \
+        c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
+        c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+        } while(0)
 
 #  define sqr_add_c2(a,i,j,c0,c1,c2) \
         mul_add_c2((a)[i],(a)[j],c0,c1,c2)
@@ -611,12 +634,6 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
 
 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 {
-# ifdef BN_LLONG
-    BN_ULLONG t;
-# else
-    BN_ULONG bl, bh;
-# endif
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -720,12 +737,6 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 
 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 {
-# ifdef BN_LLONG
-    BN_ULLONG t;
-# else
-    BN_ULONG bl, bh;
-# endif
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -765,12 +776,6 @@ void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
 
 void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
 {
-# ifdef BN_LLONG
-    BN_ULLONG t, tt;
-# else
-    BN_ULONG bl, bh;
-# endif
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;
@@ -846,12 +851,6 @@ void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
 
 void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
 {
-# ifdef BN_LLONG
-    BN_ULLONG t, tt;
-# else
-    BN_ULONG bl, bh;
-# endif
-    BN_ULONG t1, t2;
     BN_ULONG c1, c2, c3;
 
     c1 = 0;

+ 296 - 22
drivers/builtin_openssl2/crypto/bn/bn_const.c

@@ -123,6 +123,17 @@
 # ifndef alloca
 #  define alloca(s) __builtin_alloca((s))
 # endif
+#elif defined(__sun)
+# include <alloca.h>
+#endif
+
+#include "rsaz_exp.h"
+
+#undef SPARC_T4_MONT
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+# include "sparc_arch.h"
+extern unsigned int OPENSSL_sparcv9cap_P[];
+# define SPARC_T4_MONT
 #endif
 
 /* maximum precomputation table size for *variable* sliding windows */
@@ -476,6 +487,23 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     wstart = bits - 1;          /* The top bit of the window */
     wend = 0;                   /* The bottom bit of the window */
 
+#if 1                           /* by Shay Gueron's suggestion */
+    j = m->top;                 /* borrow j */
+    if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
+        if (bn_wexpand(r, j) == NULL)
+            goto err;
+        /* 2^(top*BN_BITS2) - m */
+        r->d[0] = (0 - m->d[0]) & BN_MASK2;
+        for (i = 1; i < j; i++)
+            r->d[i] = (~m->d[i]) & BN_MASK2;
+        r->top = j;
+        /*
+         * Upper words will be zero if the corresponding words of 'm' were
+         * 0xfff[...], so decrement r->top accordingly.
+         */
+        bn_correct_top(r);
+    } else
+#endif
     if (!BN_to_montgomery(r, BN_value_one(), mont, ctx))
         goto err;
     for (;;) {
@@ -527,6 +555,17 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
         if (wstart < 0)
             break;
     }
+#if defined(SPARC_T4_MONT)
+    if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
+        j = mont->N.top;        /* borrow j */
+        val[0]->d[0] = 1;       /* borrow val[0] */
+        for (i = 1; i < j; i++)
+            val[0]->d[i] = 0;
+        val[0]->top = j;
+        if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx))
+            goto err;
+    } else
+#endif
     if (!BN_from_montgomery(rr, r, mont, ctx))
         goto err;
     ret = 1;
@@ -538,6 +577,27 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     return (ret);
 }
 
+#if defined(SPARC_T4_MONT)
+static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos)
+{
+    BN_ULONG ret = 0;
+    int wordpos;
+
+    wordpos = bitpos / BN_BITS2;
+    bitpos %= BN_BITS2;
+    if (wordpos >= 0 && wordpos < a->top) {
+        ret = a->d[wordpos] & BN_MASK2;
+        if (bitpos) {
+            ret >>= bitpos;
+            if (++wordpos < a->top)
+                ret |= a->d[wordpos] << (BN_BITS2 - bitpos);
+        }
+    }
+
+    return ret & BN_MASK2;
+}
+#endif
+
 /*
  * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
  * layout so that accessing any of these table values shows the same access
@@ -644,6 +704,9 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     int powerbufLen = 0;
     unsigned char *powerbuf = NULL;
     BIGNUM tmp, am;
+#if defined(SPARC_T4_MONT)
+    unsigned int t4 = 0;
+#endif
 
     bn_check_top(a);
     bn_check_top(p);
@@ -683,21 +746,62 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
             goto err;
     }
 
+#ifdef RSAZ_ENABLED
+    /*
+     * If the size of the operands allow it, perform the optimized
+     * RSAZ exponentiation. For further information see
+     * crypto/bn/rsaz_exp.c and accompanying assembly modules.
+     */
+    if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
+        && rsaz_avx2_eligible()) {
+        if (NULL == bn_wexpand(rr, 16))
+            goto err;
+        RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d,
+                               mont->n0[0]);
+        rr->top = 16;
+        rr->neg = 0;
+        bn_correct_top(rr);
+        ret = 1;
+        goto err;
+    } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) {
+        if (NULL == bn_wexpand(rr, 8))
+            goto err;
+        RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
+        rr->top = 8;
+        rr->neg = 0;
+        bn_correct_top(rr);
+        ret = 1;
+        goto err;
+    }
+#endif
+
     /* Get the window size to use with size of p. */
     window = BN_window_bits_for_ctime_exponent_size(bits);
+#if defined(SPARC_T4_MONT)
+    if (window >= 5 && (top & 15) == 0 && top <= 64 &&
+        (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) ==
+        (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0]))
+        window = 5;
+    else
+#endif
 #if defined(OPENSSL_BN_ASM_MONT5)
-    if (window == 6 && bits <= 1024)
-        window = 5;             /* ~5% improvement of 2048-bit RSA sign */
+    if (window >= 5) {
+        window = 5;             /* ~5% improvement for RSA2048 sign, and even
+                                 * for RSA4096 */
+        /* reserve space for mont->N.d[] copy */
+        powerbufLen += top * sizeof(mont->N.d[0]);
+    }
 #endif
+    (void)0;
 
     /*
      * Allocate a buffer large enough to hold all of the pre-computed powers
      * of am, am itself and tmp.
      */
     numPowers = 1 << window;
-    powerbufLen = sizeof(m->d[0]) * (top * numPowers +
-                                     ((2 * top) >
-                                      numPowers ? (2 * top) : numPowers));
+    powerbufLen += sizeof(m->d[0]) * (top * numPowers +
+                                      ((2 * top) >
+                                       numPowers ? (2 * top) : numPowers));
 #ifdef alloca
     if (powerbufLen < 3072)
         powerbufFree =
@@ -727,15 +831,17 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     tmp.flags = am.flags = BN_FLG_STATIC_DATA;
 
     /* prepare a^0 in Montgomery domain */
-#if 1
+#if 1                           /* by Shay Gueron's suggestion */
+    if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
+        /* 2^(top*BN_BITS2) - m */
+        tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
+        for (i = 1; i < top; i++)
+            tmp.d[i] = (~m->d[i]) & BN_MASK2;
+        tmp.top = top;
+    } else
+#endif
     if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx))
         goto err;
-#else
-    tmp.d[0] = (0 - m->d[0]) & BN_MASK2; /* 2^(top*BN_BITS2) - m */
-    for (i = 1; i < top; i++)
-        tmp.d[i] = (~m->d[i]) & BN_MASK2;
-    tmp.top = top;
-#endif
 
     /* prepare a^1 in Montgomery domain */
     if (a->neg || BN_ucmp(a, m) >= 0) {
@@ -746,6 +852,138 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     } else if (!BN_to_montgomery(&am, a, mont, ctx))
         goto err;
 
+#if defined(SPARC_T4_MONT)
+    if (t4) {
+        typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np,
+                                       const BN_ULONG *n0, const void *table,
+                                       int power, int bits);
+        int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np,
+                              const BN_ULONG *n0, const void *table,
+                              int power, int bits);
+        int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np,
+                               const BN_ULONG *n0, const void *table,
+                               int power, int bits);
+        int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np,
+                               const BN_ULONG *n0, const void *table,
+                               int power, int bits);
+        int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np,
+                               const BN_ULONG *n0, const void *table,
+                               int power, int bits);
+        static const bn_pwr5_mont_f pwr5_funcs[4] = {
+            bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16,
+            bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32
+        };
+        bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1];
+
+        typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap,
+                                      const void *bp, const BN_ULONG *np,
+                                      const BN_ULONG *n0);
+        int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp,
+                             const BN_ULONG *np, const BN_ULONG *n0);
+        int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap,
+                              const void *bp, const BN_ULONG *np,
+                              const BN_ULONG *n0);
+        int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap,
+                              const void *bp, const BN_ULONG *np,
+                              const BN_ULONG *n0);
+        int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap,
+                              const void *bp, const BN_ULONG *np,
+                              const BN_ULONG *n0);
+        static const bn_mul_mont_f mul_funcs[4] = {
+            bn_mul_mont_t4_8, bn_mul_mont_t4_16,
+            bn_mul_mont_t4_24, bn_mul_mont_t4_32
+        };
+        bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1];
+
+        void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap,
+                              const void *bp, const BN_ULONG *np,
+                              const BN_ULONG *n0, int num);
+        void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap,
+                            const void *bp, const BN_ULONG *np,
+                            const BN_ULONG *n0, int num);
+        void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap,
+                                    const void *table, const BN_ULONG *np,
+                                    const BN_ULONG *n0, int num, int power);
+        void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num,
+                                   void *table, size_t power);
+        void bn_gather5_t4(BN_ULONG *out, size_t num,
+                           void *table, size_t power);
+        void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num);
+
+        BN_ULONG *np = mont->N.d, *n0 = mont->n0;
+        int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less
+                                                * than 32 */
+
+        /*
+         * BN_to_montgomery can contaminate words above .top [in
+         * BN_DEBUG[_DEBUG] build]...
+         */
+        for (i = am.top; i < top; i++)
+            am.d[i] = 0;
+        for (i = tmp.top; i < top; i++)
+            tmp.d[i] = 0;
+
+        bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0);
+        bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1);
+        if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) &&
+            !(*mul_worker) (tmp.d, am.d, am.d, np, n0))
+            bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top);
+        bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2);
+
+        for (i = 3; i < 32; i++) {
+            /* Calculate a^i = a^(i-1) * a */
+            if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) &&
+                !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0))
+                bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top);
+            bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i);
+        }
+
+        /* switch to 64-bit domain */
+        np = alloca(top * sizeof(BN_ULONG));
+        top /= 2;
+        bn_flip_t4(np, mont->N.d, top);
+
+        bits--;
+        for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--)
+            wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
+        bn_gather5_t4(tmp.d, top, powerbuf, wvalue);
+
+        /*
+         * Scan the exponent one window at a time starting from the most
+         * significant bits.
+         */
+        while (bits >= 0) {
+            if (bits < stride)
+                stride = bits + 1;
+            bits -= stride;
+            wvalue = bn_get_bits(p, bits + 1);
+
+            if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
+                continue;
+            /* retry once and fall back */
+            if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
+                continue;
+
+            bits += stride - 5;
+            wvalue >>= stride - 5;
+            wvalue &= 31;
+            bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
+            bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
+            bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
+            bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
+            bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
+            bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top,
+                                   wvalue);
+        }
+
+        bn_flip_t4(tmp.d, tmp.d, top);
+        top *= 2;
+        /* back to 32-bit domain */
+        tmp.top = top;
+        bn_correct_top(&tmp);
+        OPENSSL_cleanse(np, top * sizeof(BN_ULONG));
+    } else
+#endif
 #if defined(OPENSSL_BN_ASM_MONT5)
     if (window == 5 && top > 1) {
         /*
@@ -764,8 +1002,15 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
         void bn_scatter5(const BN_ULONG *inp, size_t num,
                          void *table, size_t power);
         void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power);
+        void bn_power5(BN_ULONG *rp, const BN_ULONG *ap,
+                       const void *table, const BN_ULONG *np,
+                       const BN_ULONG *n0, int num, int power);
+        int bn_get_bits5(const BN_ULONG *ap, int off);
+        int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap,
+                               const BN_ULONG *not_used, const BN_ULONG *np,
+                               const BN_ULONG *n0, int num);
 
-        BN_ULONG *np = mont->N.d, *n0 = mont->n0;
+        BN_ULONG *n0 = mont->n0, *np;
 
         /*
          * BN_to_montgomery can contaminate words above .top [in
@@ -776,6 +1021,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
         for (i = tmp.top; i < top; i++)
             tmp.d[i] = 0;
 
+        /*
+         * copy mont->N.d[] to improve cache locality
+         */
+        for (np = am.d + top, i = 0; i < top; i++)
+            np[i] = mont->N.d[i];
+
         bn_scatter5(tmp.d, top, powerbuf, 0);
         bn_scatter5(am.d, am.top, powerbuf, 1);
         bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
@@ -822,20 +1073,34 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
          * Scan the exponent one window at a time starting from the most
          * significant bits.
          */
-        while (bits >= 0) {
-            for (wvalue = 0, i = 0; i < 5; i++, bits--)
-                wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
+        if (top & 7)
+            while (bits >= 0) {
+                for (wvalue = 0, i = 0; i < 5; i++, bits--)
+                    wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
 
-            bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
-            bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
-            bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
-            bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
-            bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
-            bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue);
+                bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
+                bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
+                bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
+                bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
+                bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
+                bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top,
+                                    wvalue);
+        } else {
+            while (bits >= 0) {
+                wvalue = bn_get_bits5(p->d, bits - 4);
+                bits -= 5;
+                bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue);
+            }
         }
 
+        ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np, n0, top);
         tmp.top = top;
         bn_correct_top(&tmp);
+        if (ret) {
+            if (!BN_copy(rr, &tmp))
+                ret = 0;
+            goto err;           /* non-zero ret means it's not error */
+        }
     } else
 #endif
     {
@@ -901,6 +1166,15 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
     }
 
     /* Convert the final result from montgomery to standard format */
+#if defined(SPARC_T4_MONT)
+    if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
+        am.d[0] = 1;            /* borrow am */
+        for (i = 1; i < top; i++)
+            am.d[i] = 0;
+        if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx))
+            goto err;
+    } else
+#endif
     if (!BN_from_montgomery(rr, &tmp, mont, ctx))
         goto err;
     ret = 1;

+ 1 - 2
drivers/builtin_openssl2/crypto/bn/bn_gf2m.c

@@ -450,8 +450,7 @@ int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[])
             d0 = p[k] % BN_BITS2;
             d1 = BN_BITS2 - d0;
             z[n] ^= (zz << d0);
-            tmp_ulong = zz >> d1;
-            if (d0 && tmp_ulong)
+            if (d0 && (tmp_ulong = zz >> d1))
                 z[n + 1] ^= tmp_ulong;
         }
 

+ 27 - 0
drivers/builtin_openssl2/crypto/bn/bn_lcl.h

@@ -204,6 +204,24 @@ extern "C" {
 # define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL        (32)/* 32 */
 # define BN_MONT_CTX_SET_SIZE_WORD               (64)/* 32 */
 
+/*
+ * 2011-02-22 SMS. In various places, a size_t variable or a type cast to
+ * size_t was used to perform integer-only operations on pointers.  This
+ * failed on VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t
+ * is still only 32 bits.  What's needed in these cases is an integer type
+ * with the same size as a pointer, which size_t is not certain to be. The
+ * only fix here is VMS-specific.
+ */
+# if defined(OPENSSL_SYS_VMS)
+#  if __INITIAL_POINTER_SIZE == 64
+#   define PTR_SIZE_INT long long
+#  else                         /* __INITIAL_POINTER_SIZE == 64 */
+#   define PTR_SIZE_INT int
+#  endif                        /* __INITIAL_POINTER_SIZE == 64 [else] */
+# elif !defined(PTR_SIZE_INT)   /* defined(OPENSSL_SYS_VMS) */
+#  define PTR_SIZE_INT size_t
+# endif                         /* defined(OPENSSL_SYS_VMS) [else] */
+
 # if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
 /*
  * BN_UMULT_HIGH section.
@@ -295,6 +313,15 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
              : "r"(a), "r"(b));
 #    endif
 #   endif
+#  elif defined(__aarch64__) && defined(SIXTY_FOUR_BIT_LONG)
+#   if defined(__GNUC__) && __GNUC__>=2
+#    define BN_UMULT_HIGH(a,b)   ({      \
+        register BN_ULONG ret;          \
+        asm ("umulh     %0,%1,%2"       \
+             : "=r"(ret)                \
+             : "r"(a), "r"(b));         \
+        ret;                    })
+#   endif
 #  endif                        /* cpu */
 # endif                         /* OPENSSL_NO_ASM */
 

+ 0 - 119
drivers/builtin_openssl2/crypto/bn/bn_prime.pl

@@ -1,119 +0,0 @@
-#!/usr/local/bin/perl
-# bn_prime.pl
-
-$num=2048;
-$num=$ARGV[0] if ($#ARGV >= 0);
-
-push(@primes,2);
-$p=1;
-loop: while ($#primes < $num-1)
-	{
-	$p+=2;
-	$s=int(sqrt($p));
-
-	for ($i=0; defined($primes[$i]) && $primes[$i]<=$s; $i++)
-		{
-		next loop if (($p%$primes[$i]) == 0);
-		}
-	push(@primes,$p);
-	}
-
-# print <<"EOF";
-# /* Auto generated by bn_prime.pl */
-# /* Copyright (C) 1995-1997 Eric Young (eay\@mincom.oz.au).
-#  * All rights reserved.
-#  * Copyright remains Eric Young's, and as such any Copyright notices in
-#  * the code are not to be removed.
-#  * See the COPYRIGHT file in the SSLeay distribution for more details.
-#  */
-# 
-# EOF
-
-print <<\EOF;
-/* Auto generated by bn_prime.pl */
-/* Copyright (C) 1995-1998 Eric Young ([email protected])
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young ([email protected]).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson ([email protected]).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young ([email protected])"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson ([email protected])"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-EOF
-
-for ($i=0; $i <= $#primes; $i++)
-	{
-	if ($primes[$i] > 256)
-		{
-		$eight=$i;
-		last;
-		}
-	}
-
-printf "#ifndef EIGHT_BIT\n";
-printf "#define NUMPRIMES %d\n",$num;
-printf "typedef unsigned short prime_t;\n";
-printf "#else\n";
-printf "#define NUMPRIMES %d\n",$eight;
-printf "typedef unsigned char prime_t;\n";
-printf "#endif\n";
-print "static const prime_t primes[NUMPRIMES]=\n\t{\n\t";
-$init=0;
-for ($i=0; $i <= $#primes; $i++)
-	{
-	printf "\n#ifndef EIGHT_BIT\n\t" if ($primes[$i] > 256) && !($init++);
-	printf("\n\t") if (($i%8) == 0) && ($i != 0);
-	printf("%4d,",$primes[$i]);
-	}
-print "\n#endif\n\t};\n";
-
-

+ 0 - 2137
drivers/builtin_openssl2/crypto/bn/bntest.c

@@ -1,2137 +0,0 @@
-/* crypto/bn/bntest.c */
-/* Copyright (C) 1995-1998 Eric Young ([email protected])
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young ([email protected]).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson ([email protected]).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young ([email protected])"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson ([email protected])"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- *
- * Portions of the attached software ("Contribution") are developed by
- * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
- *
- * The Contribution is licensed pursuant to the Eric Young open source
- * license provided above.
- *
- * The binary polynomial arithmetic software is originally written by
- * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories.
- *
- */
-
-/*
- * Until the key-gen callbacks are modified to use newer prototypes, we allow
- * deprecated functions for openssl-internal code
- */
-#ifdef OPENSSL_NO_DEPRECATED
-# undef OPENSSL_NO_DEPRECATED
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "e_os.h"
-
-#include <openssl/bio.h>
-#include <openssl/bn.h>
-#include <openssl/rand.h>
-#include <openssl/x509.h>
-#include <openssl/err.h>
-
-const int num0 = 100;           /* number of tests */
-const int num1 = 50;            /* additional tests for some functions */
-const int num2 = 5;             /* number of tests for slow functions */
-
-int test_add(BIO *bp);
-int test_sub(BIO *bp);
-int test_lshift1(BIO *bp);
-int test_lshift(BIO *bp, BN_CTX *ctx, BIGNUM *a_);
-int test_rshift1(BIO *bp);
-int test_rshift(BIO *bp, BN_CTX *ctx);
-int test_div(BIO *bp, BN_CTX *ctx);
-int test_div_word(BIO *bp);
-int test_div_recp(BIO *bp, BN_CTX *ctx);
-int test_mul(BIO *bp);
-int test_sqr(BIO *bp, BN_CTX *ctx);
-int test_mont(BIO *bp, BN_CTX *ctx);
-int test_mod(BIO *bp, BN_CTX *ctx);
-int test_mod_mul(BIO *bp, BN_CTX *ctx);
-int test_mod_exp(BIO *bp, BN_CTX *ctx);
-int test_mod_exp_mont_consttime(BIO *bp, BN_CTX *ctx);
-int test_mod_exp_mont5(BIO *bp, BN_CTX *ctx);
-int test_exp(BIO *bp, BN_CTX *ctx);
-int test_gf2m_add(BIO *bp);
-int test_gf2m_mod(BIO *bp);
-int test_gf2m_mod_mul(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_sqr(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_inv(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_div(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_exp(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_sqrt(BIO *bp, BN_CTX *ctx);
-int test_gf2m_mod_solve_quad(BIO *bp, BN_CTX *ctx);
-int test_kron(BIO *bp, BN_CTX *ctx);
-int test_sqrt(BIO *bp, BN_CTX *ctx);
-int rand_neg(void);
-static int results = 0;
-
-static unsigned char lst[] =
-    "\xC6\x4F\x43\x04\x2A\xEA\xCA\x6E\x58\x36\x80\x5B\xE8\xC9"
-    "\x9B\x04\x5D\x48\x36\xC2\xFD\x16\xC9\x64\xF0";
-
-static const char rnd_seed[] =
-    "string to make the random number generator think it has entropy";
-
-static void message(BIO *out, char *m)
-{
-    fprintf(stderr, "test %s\n", m);
-    BIO_puts(out, "print \"test ");
-    BIO_puts(out, m);
-    BIO_puts(out, "\\n\"\n");
-}
-
-int main(int argc, char *argv[])
-{
-    BN_CTX *ctx;
-    BIO *out;
-    char *outfile = NULL;
-
-    results = 0;
-
-    RAND_seed(rnd_seed, sizeof rnd_seed); /* or BN_generate_prime may fail */
-
-    argc--;
-    argv++;
-    while (argc >= 1) {
-        if (strcmp(*argv, "-results") == 0)
-            results = 1;
-        else if (strcmp(*argv, "-out") == 0) {
-            if (--argc < 1)
-                break;
-            outfile = *(++argv);
-        }
-        argc--;
-        argv++;
-    }
-
-    ctx = BN_CTX_new();
-    if (ctx == NULL)
-        EXIT(1);
-
-    out = BIO_new(BIO_s_file());
-    if (out == NULL)
-        EXIT(1);
-    if (outfile == NULL) {
-        BIO_set_fp(out, stdout, BIO_NOCLOSE);
-    } else {
-        if (!BIO_write_filename(out, outfile)) {
-            perror(outfile);
-            EXIT(1);
-        }
-    }
-
-    if (!results)
-        BIO_puts(out, "obase=16\nibase=16\n");
-
-    message(out, "BN_add");
-    if (!test_add(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_sub");
-    if (!test_sub(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_lshift1");
-    if (!test_lshift1(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_lshift (fixed)");
-    if (!test_lshift(out, ctx, BN_bin2bn(lst, sizeof(lst) - 1, NULL)))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_lshift");
-    if (!test_lshift(out, ctx, NULL))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_rshift1");
-    if (!test_rshift1(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_rshift");
-    if (!test_rshift(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_sqr");
-    if (!test_sqr(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mul");
-    if (!test_mul(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_div");
-    if (!test_div(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_div_word");
-    if (!test_div_word(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_div_recp");
-    if (!test_div_recp(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mod");
-    if (!test_mod(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mod_mul");
-    if (!test_mod_mul(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mont");
-    if (!test_mont(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mod_exp");
-    if (!test_mod_exp(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mod_exp_mont_consttime");
-    if (!test_mod_exp_mont_consttime(out, ctx))
-        goto err;
-    if (!test_mod_exp_mont5(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_exp");
-    if (!test_exp(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_kronecker");
-    if (!test_kron(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_mod_sqrt");
-    if (!test_sqrt(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-#ifndef OPENSSL_NO_EC2M
-    message(out, "BN_GF2m_add");
-    if (!test_gf2m_add(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod");
-    if (!test_gf2m_mod(out))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_mul");
-    if (!test_gf2m_mod_mul(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_sqr");
-    if (!test_gf2m_mod_sqr(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_inv");
-    if (!test_gf2m_mod_inv(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_div");
-    if (!test_gf2m_mod_div(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_exp");
-    if (!test_gf2m_mod_exp(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_sqrt");
-    if (!test_gf2m_mod_sqrt(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-
-    message(out, "BN_GF2m_mod_solve_quad");
-    if (!test_gf2m_mod_solve_quad(out, ctx))
-        goto err;
-    (void)BIO_flush(out);
-#endif
-    BN_CTX_free(ctx);
-    BIO_free(out);
-
-    EXIT(0);
- err:
-    BIO_puts(out, "1\n");       /* make sure the Perl script fed by bc
-                                 * notices the failure, see test_bn in
-                                 * test/Makefile.ssl */
-    (void)BIO_flush(out);
-    ERR_load_crypto_strings();
-    ERR_print_errors_fp(stderr);
-    EXIT(1);
-    return (1);
-}
-
-int test_add(BIO *bp)
-{
-    BIGNUM a, b, c;
-    int i;
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-
-    BN_bntest_rand(&a, 512, 0, 0);
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(&b, 450 + i, 0, 0);
-        a.neg = rand_neg();
-        b.neg = rand_neg();
-        BN_add(&c, &a, &b);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " + ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-        a.neg = !a.neg;
-        b.neg = !b.neg;
-        BN_add(&c, &c, &b);
-        BN_add(&c, &c, &a);
-        if (!BN_is_zero(&c)) {
-            fprintf(stderr, "Add test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    return (1);
-}
-
-int test_sub(BIO *bp)
-{
-    BIGNUM a, b, c;
-    int i;
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-
-    for (i = 0; i < num0 + num1; i++) {
-        if (i < num1) {
-            BN_bntest_rand(&a, 512, 0, 0);
-            BN_copy(&b, &a);
-            if (BN_set_bit(&a, i) == 0)
-                return (0);
-            BN_add_word(&b, i);
-        } else {
-            BN_bntest_rand(&b, 400 + i - num1, 0, 0);
-            a.neg = rand_neg();
-            b.neg = rand_neg();
-        }
-        BN_sub(&c, &a, &b);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " - ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-        BN_add(&c, &c, &b);
-        BN_sub(&c, &c, &a);
-        if (!BN_is_zero(&c)) {
-            fprintf(stderr, "Subtract test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    return (1);
-}
-
-int test_div(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM a, b, c, d, e;
-    int i;
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-    BN_init(&d);
-    BN_init(&e);
-
-    BN_one(&a);
-    BN_zero(&b);
-
-    if (BN_div(&d, &c, &a, &b, ctx)) {
-        fprintf(stderr, "Division by zero succeeded!\n");
-        return 0;
-    }
-
-    for (i = 0; i < num0 + num1; i++) {
-        if (i < num1) {
-            BN_bntest_rand(&a, 400, 0, 0);
-            BN_copy(&b, &a);
-            BN_lshift(&a, &a, i);
-            BN_add_word(&a, i);
-        } else
-            BN_bntest_rand(&b, 50 + 3 * (i - num1), 0, 0);
-        a.neg = rand_neg();
-        b.neg = rand_neg();
-        BN_div(&d, &c, &a, &b, ctx);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " / ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &d);
-            BIO_puts(bp, "\n");
-
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " % ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-        BN_mul(&e, &d, &b, ctx);
-        BN_add(&d, &e, &c);
-        BN_sub(&d, &d, &a);
-        if (!BN_is_zero(&d)) {
-            fprintf(stderr, "Division test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    BN_free(&d);
-    BN_free(&e);
-    return (1);
-}
-
-static void print_word(BIO *bp, BN_ULONG w)
-{
-#ifdef SIXTY_FOUR_BIT
-    if (sizeof(w) > sizeof(unsigned long)) {
-        unsigned long h = (unsigned long)(w >> 32), l = (unsigned long)(w);
-
-        if (h)
-            BIO_printf(bp, "%lX%08lX", h, l);
-        else
-            BIO_printf(bp, "%lX", l);
-        return;
-    }
-#endif
-    BIO_printf(bp, BN_HEX_FMT1, w);
-}
-
-int test_div_word(BIO *bp)
-{
-    BIGNUM a, b;
-    BN_ULONG r, s;
-    int i;
-
-    BN_init(&a);
-    BN_init(&b);
-
-    for (i = 0; i < num0; i++) {
-        do {
-            BN_bntest_rand(&a, 512, -1, 0);
-            BN_bntest_rand(&b, BN_BITS2, -1, 0);
-        } while (BN_is_zero(&b));
-
-        s = b.d[0];
-        BN_copy(&b, &a);
-        r = BN_div_word(&b, s);
-
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " / ");
-                print_word(bp, s);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &b);
-            BIO_puts(bp, "\n");
-
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " % ");
-                print_word(bp, s);
-                BIO_puts(bp, " - ");
-            }
-            print_word(bp, r);
-            BIO_puts(bp, "\n");
-        }
-        BN_mul_word(&b, s);
-        BN_add_word(&b, r);
-        BN_sub(&b, &a, &b);
-        if (!BN_is_zero(&b)) {
-            fprintf(stderr, "Division (word) test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    return (1);
-}
-
-int test_div_recp(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM a, b, c, d, e;
-    BN_RECP_CTX recp;
-    int i;
-
-    BN_RECP_CTX_init(&recp);
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-    BN_init(&d);
-    BN_init(&e);
-
-    for (i = 0; i < num0 + num1; i++) {
-        if (i < num1) {
-            BN_bntest_rand(&a, 400, 0, 0);
-            BN_copy(&b, &a);
-            BN_lshift(&a, &a, i);
-            BN_add_word(&a, i);
-        } else
-            BN_bntest_rand(&b, 50 + 3 * (i - num1), 0, 0);
-        a.neg = rand_neg();
-        b.neg = rand_neg();
-        BN_RECP_CTX_set(&recp, &b, ctx);
-        BN_div_recp(&d, &c, &a, &recp, ctx);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " / ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &d);
-            BIO_puts(bp, "\n");
-
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " % ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-        BN_mul(&e, &d, &b, ctx);
-        BN_add(&d, &e, &c);
-        BN_sub(&d, &d, &a);
-        if (!BN_is_zero(&d)) {
-            fprintf(stderr, "Reciprocal division test failed!\n");
-            fprintf(stderr, "a=");
-            BN_print_fp(stderr, &a);
-            fprintf(stderr, "\nb=");
-            BN_print_fp(stderr, &b);
-            fprintf(stderr, "\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    BN_free(&d);
-    BN_free(&e);
-    BN_RECP_CTX_free(&recp);
-    return (1);
-}
-
-int test_mul(BIO *bp)
-{
-    BIGNUM a, b, c, d, e;
-    int i;
-    BN_CTX *ctx;
-
-    ctx = BN_CTX_new();
-    if (ctx == NULL)
-        EXIT(1);
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-    BN_init(&d);
-    BN_init(&e);
-
-    for (i = 0; i < num0 + num1; i++) {
-        if (i <= num1) {
-            BN_bntest_rand(&a, 100, 0, 0);
-            BN_bntest_rand(&b, 100, 0, 0);
-        } else
-            BN_bntest_rand(&b, i - num1, 0, 0);
-        a.neg = rand_neg();
-        b.neg = rand_neg();
-        BN_mul(&c, &a, &b, ctx);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " * ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-        BN_div(&d, &e, &c, &a, ctx);
-        BN_sub(&d, &d, &b);
-        if (!BN_is_zero(&d) || !BN_is_zero(&e)) {
-            fprintf(stderr, "Multiplication test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    BN_free(&d);
-    BN_free(&e);
-    BN_CTX_free(ctx);
-    return (1);
-}
-
-int test_sqr(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *c, *d, *e;
-    int i, ret = 0;
-
-    a = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    if (a == NULL || c == NULL || d == NULL || e == NULL) {
-        goto err;
-    }
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 40 + i * 10, 0, 0);
-        a->neg = rand_neg();
-        BN_sqr(c, a, ctx);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " * ");
-                BN_print(bp, a);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, c);
-            BIO_puts(bp, "\n");
-        }
-        BN_div(d, e, c, a, ctx);
-        BN_sub(d, d, a);
-        if (!BN_is_zero(d) || !BN_is_zero(e)) {
-            fprintf(stderr, "Square test failed!\n");
-            goto err;
-        }
-    }
-
-    /* Regression test for a BN_sqr overflow bug. */
-    BN_hex2bn(&a,
-              "80000000000000008000000000000001"
-              "FFFFFFFFFFFFFFFE0000000000000000");
-    BN_sqr(c, a, ctx);
-    if (bp != NULL) {
-        if (!results) {
-            BN_print(bp, a);
-            BIO_puts(bp, " * ");
-            BN_print(bp, a);
-            BIO_puts(bp, " - ");
-        }
-        BN_print(bp, c);
-        BIO_puts(bp, "\n");
-    }
-    BN_mul(d, a, a, ctx);
-    if (BN_cmp(c, d)) {
-        fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce "
-                "different results!\n");
-        goto err;
-    }
-
-    /* Regression test for a BN_sqr overflow bug. */
-    BN_hex2bn(&a,
-              "80000000000000000000000080000001"
-              "FFFFFFFE000000000000000000000000");
-    BN_sqr(c, a, ctx);
-    if (bp != NULL) {
-        if (!results) {
-            BN_print(bp, a);
-            BIO_puts(bp, " * ");
-            BN_print(bp, a);
-            BIO_puts(bp, " - ");
-        }
-        BN_print(bp, c);
-        BIO_puts(bp, "\n");
-    }
-    BN_mul(d, a, a, ctx);
-    if (BN_cmp(c, d)) {
-        fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce "
-                "different results!\n");
-        goto err;
-    }
-    ret = 1;
- err:
-    if (a != NULL)
-        BN_free(a);
-    if (c != NULL)
-        BN_free(c);
-    if (d != NULL)
-        BN_free(d);
-    if (e != NULL)
-        BN_free(e);
-    return ret;
-}
-
-int test_mont(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM a, b, c, d, A, B;
-    BIGNUM n;
-    int i;
-    BN_MONT_CTX *mont;
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-    BN_init(&d);
-    BN_init(&A);
-    BN_init(&B);
-    BN_init(&n);
-
-    mont = BN_MONT_CTX_new();
-    if (mont == NULL)
-        return 0;
-
-    BN_zero(&n);
-    if (BN_MONT_CTX_set(mont, &n, ctx)) {
-        fprintf(stderr, "BN_MONT_CTX_set succeeded for zero modulus!\n");
-        return 0;
-    }
-
-    BN_set_word(&n, 16);
-    if (BN_MONT_CTX_set(mont, &n, ctx)) {
-        fprintf(stderr, "BN_MONT_CTX_set succeeded for even modulus!\n");
-        return 0;
-    }
-
-    BN_bntest_rand(&a, 100, 0, 0);
-    BN_bntest_rand(&b, 100, 0, 0);
-    for (i = 0; i < num2; i++) {
-        int bits = (200 * (i + 1)) / num2;
-
-        if (bits == 0)
-            continue;
-        BN_bntest_rand(&n, bits, 0, 1);
-        BN_MONT_CTX_set(mont, &n, ctx);
-
-        BN_nnmod(&a, &a, &n, ctx);
-        BN_nnmod(&b, &b, &n, ctx);
-
-        BN_to_montgomery(&A, &a, mont, ctx);
-        BN_to_montgomery(&B, &b, mont, ctx);
-
-        BN_mod_mul_montgomery(&c, &A, &B, mont, ctx);
-        BN_from_montgomery(&A, &c, mont, ctx);
-        if (bp != NULL) {
-            if (!results) {
-#ifdef undef
-                fprintf(stderr, "%d * %d %% %d\n",
-                        BN_num_bits(&a),
-                        BN_num_bits(&b), BN_num_bits(mont->N));
-#endif
-                BN_print(bp, &a);
-                BIO_puts(bp, " * ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " % ");
-                BN_print(bp, &(mont->N));
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, &A);
-            BIO_puts(bp, "\n");
-        }
-        BN_mod_mul(&d, &a, &b, &n, ctx);
-        BN_sub(&d, &d, &A);
-        if (!BN_is_zero(&d)) {
-            fprintf(stderr, "Montgomery multiplication test failed!\n");
-            return 0;
-        }
-    }
-    BN_MONT_CTX_free(mont);
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    BN_free(&d);
-    BN_free(&A);
-    BN_free(&B);
-    BN_free(&n);
-    return (1);
-}
-
-int test_mod(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *c, *d, *e;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_bntest_rand(a, 1024, 0, 0);
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(b, 450 + i * 10, 0, 0);
-        a->neg = rand_neg();
-        b->neg = rand_neg();
-        BN_mod(c, a, b, ctx);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " % ");
-                BN_print(bp, b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, c);
-            BIO_puts(bp, "\n");
-        }
-        BN_div(d, e, a, b, ctx);
-        BN_sub(e, e, c);
-        if (!BN_is_zero(e)) {
-            fprintf(stderr, "Modulo test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-int test_mod_mul(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *c, *d, *e;
-    int i, j;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_one(a);
-    BN_one(b);
-    BN_zero(c);
-    if (BN_mod_mul(e, a, b, c, ctx)) {
-        fprintf(stderr, "BN_mod_mul with zero modulus succeeded!\n");
-        return 0;
-    }
-
-    for (j = 0; j < 3; j++) {
-        BN_bntest_rand(c, 1024, 0, 0);
-        for (i = 0; i < num0; i++) {
-            BN_bntest_rand(a, 475 + i * 10, 0, 0);
-            BN_bntest_rand(b, 425 + i * 11, 0, 0);
-            a->neg = rand_neg();
-            b->neg = rand_neg();
-            if (!BN_mod_mul(e, a, b, c, ctx)) {
-                unsigned long l;
-
-                while ((l = ERR_get_error()))
-                    fprintf(stderr, "ERROR:%s\n", ERR_error_string(l, NULL));
-                EXIT(1);
-            }
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " * ");
-                    BN_print(bp, b);
-                    BIO_puts(bp, " % ");
-                    BN_print(bp, c);
-                    if ((a->neg ^ b->neg) && !BN_is_zero(e)) {
-                        /*
-                         * If (a*b) % c is negative, c must be added in order
-                         * to obtain the normalized remainder (new with
-                         * OpenSSL 0.9.7, previous versions of BN_mod_mul
-                         * could generate negative results)
-                         */
-                        BIO_puts(bp, " + ");
-                        BN_print(bp, c);
-                    }
-                    BIO_puts(bp, " - ");
-                }
-                BN_print(bp, e);
-                BIO_puts(bp, "\n");
-            }
-            BN_mul(d, a, b, ctx);
-            BN_sub(d, d, e);
-            BN_div(a, b, d, c, ctx);
-            if (!BN_is_zero(b)) {
-                fprintf(stderr, "Modulo multiply test failed!\n");
-                ERR_print_errors_fp(stderr);
-                return 0;
-            }
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-int test_mod_exp(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *c, *d, *e;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_one(a);
-    BN_one(b);
-    BN_zero(c);
-    if (BN_mod_exp(d, a, b, c, ctx)) {
-        fprintf(stderr, "BN_mod_exp with zero modulus succeeded!\n");
-        return 0;
-    }
-
-    BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */
-    for (i = 0; i < num2; i++) {
-        BN_bntest_rand(a, 20 + i * 5, 0, 0);
-        BN_bntest_rand(b, 2 + i, 0, 0);
-
-        if (!BN_mod_exp(d, a, b, c, ctx))
-            return (0);
-
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " ^ ");
-                BN_print(bp, b);
-                BIO_puts(bp, " % ");
-                BN_print(bp, c);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, d);
-            BIO_puts(bp, "\n");
-        }
-        BN_exp(e, a, b, ctx);
-        BN_sub(e, e, d);
-        BN_div(a, b, e, c, ctx);
-        if (!BN_is_zero(b)) {
-            fprintf(stderr, "Modulo exponentiation test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-int test_mod_exp_mont_consttime(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *c, *d, *e;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_one(a);
-    BN_one(b);
-    BN_zero(c);
-    if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) {
-        fprintf(stderr, "BN_mod_exp_mont_consttime with zero modulus "
-                "succeeded\n");
-        return 0;
-    }
-
-    BN_set_word(c, 16);
-    if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) {
-        fprintf(stderr, "BN_mod_exp_mont_consttime with even modulus "
-                "succeeded\n");
-        return 0;
-    }
-
-    BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */
-    for (i = 0; i < num2; i++) {
-        BN_bntest_rand(a, 20 + i * 5, 0, 0);
-        BN_bntest_rand(b, 2 + i, 0, 0);
-
-        if (!BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL))
-            return (00);
-
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " ^ ");
-                BN_print(bp, b);
-                BIO_puts(bp, " % ");
-                BN_print(bp, c);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, d);
-            BIO_puts(bp, "\n");
-        }
-        BN_exp(e, a, b, ctx);
-        BN_sub(e, e, d);
-        BN_div(a, b, e, c, ctx);
-        if (!BN_is_zero(b)) {
-            fprintf(stderr, "Modulo exponentiation test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-/*
- * Test constant-time modular exponentiation with 1024-bit inputs, which on
- * x86_64 cause a different code branch to be taken.
- */
-int test_mod_exp_mont5(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *p, *m, *d, *e;
-
-    BN_MONT_CTX *mont;
-
-    a = BN_new();
-    p = BN_new();
-    m = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    mont = BN_MONT_CTX_new();
-
-    BN_bntest_rand(m, 1024, 0, 1); /* must be odd for montgomery */
-    /* Zero exponent */
-    BN_bntest_rand(a, 1024, 0, 0);
-    BN_zero(p);
-    if (!BN_mod_exp_mont_consttime(d, a, p, m, ctx, NULL))
-        return 0;
-    if (!BN_is_one(d)) {
-        fprintf(stderr, "Modular exponentiation test failed!\n");
-        return 0;
-    }
-    /* Zero input */
-    BN_bntest_rand(p, 1024, 0, 0);
-    BN_zero(a);
-    if (!BN_mod_exp_mont_consttime(d, a, p, m, ctx, NULL))
-        return 0;
-    if (!BN_is_zero(d)) {
-        fprintf(stderr, "Modular exponentiation test failed!\n");
-        return 0;
-    }
-    /*
-     * Craft an input whose Montgomery representation is 1, i.e., shorter
-     * than the modulus m, in order to test the const time precomputation
-     * scattering/gathering.
-     */
-    BN_one(a);
-    BN_MONT_CTX_set(mont, m, ctx);
-    if (!BN_from_montgomery(e, a, mont, ctx))
-        return 0;
-    if (!BN_mod_exp_mont_consttime(d, e, p, m, ctx, NULL))
-        return 0;
-    if (!BN_mod_exp_simple(a, e, p, m, ctx))
-        return 0;
-    if (BN_cmp(a, d) != 0) {
-        fprintf(stderr, "Modular exponentiation test failed!\n");
-        return 0;
-    }
-    /* Finally, some regular test vectors. */
-    BN_bntest_rand(e, 1024, 0, 0);
-    if (!BN_mod_exp_mont_consttime(d, e, p, m, ctx, NULL))
-        return 0;
-    if (!BN_mod_exp_simple(a, e, p, m, ctx))
-        return 0;
-    if (BN_cmp(a, d) != 0) {
-        fprintf(stderr, "Modular exponentiation test failed!\n");
-        return 0;
-    }
-    BN_free(a);
-    BN_free(p);
-    BN_free(m);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-int test_exp(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *d, *e, *one;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    d = BN_new();
-    e = BN_new();
-    one = BN_new();
-    BN_one(one);
-
-    for (i = 0; i < num2; i++) {
-        BN_bntest_rand(a, 20 + i * 5, 0, 0);
-        BN_bntest_rand(b, 2 + i, 0, 0);
-
-        if (BN_exp(d, a, b, ctx) <= 0)
-            return (0);
-
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " ^ ");
-                BN_print(bp, b);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, d);
-            BIO_puts(bp, "\n");
-        }
-        BN_one(e);
-        for (; !BN_is_zero(b); BN_sub(b, b, one))
-            BN_mul(e, e, a, ctx);
-        BN_sub(e, e, d);
-        if (!BN_is_zero(e)) {
-            fprintf(stderr, "Exponentiation test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(d);
-    BN_free(e);
-    BN_free(one);
-    return (1);
-}
-
-#ifndef OPENSSL_NO_EC2M
-int test_gf2m_add(BIO *bp)
-{
-    BIGNUM a, b, c;
-    int i, ret = 0;
-
-    BN_init(&a);
-    BN_init(&b);
-    BN_init(&c);
-
-    for (i = 0; i < num0; i++) {
-        BN_rand(&a, 512, 0, 0);
-        BN_copy(&b, BN_value_one());
-        a.neg = rand_neg();
-        b.neg = rand_neg();
-        BN_GF2m_add(&c, &a, &b);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, &a);
-                BIO_puts(bp, " ^ ");
-                BN_print(bp, &b);
-                BIO_puts(bp, " = ");
-            }
-            BN_print(bp, &c);
-            BIO_puts(bp, "\n");
-        }
-# endif
-        /* Test that two added values have the correct parity. */
-        if ((BN_is_odd(&a) && BN_is_odd(&c))
-            || (!BN_is_odd(&a) && !BN_is_odd(&c))) {
-            fprintf(stderr, "GF(2^m) addition test (a) failed!\n");
-            goto err;
-        }
-        BN_GF2m_add(&c, &c, &c);
-        /* Test that c + c = 0. */
-        if (!BN_is_zero(&c)) {
-            fprintf(stderr, "GF(2^m) addition test (b) failed!\n");
-            goto err;
-        }
-    }
-    ret = 1;
- err:
-    BN_free(&a);
-    BN_free(&b);
-    BN_free(&c);
-    return ret;
-}
-
-int test_gf2m_mod(BIO *bp)
-{
-    BIGNUM *a, *b[2], *c, *d, *e;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 1024, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod(c, a, b[j]);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, " - ");
-                    BN_print(bp, c);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            BN_GF2m_add(d, a, c);
-            BN_GF2m_mod(e, d, b[j]);
-            /* Test that a + (a mod p) mod p == 0. */
-            if (!BN_is_zero(e)) {
-                fprintf(stderr, "GF(2^m) modulo test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return ret;
-}
-
-int test_gf2m_mod_mul(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d, *e, *f, *g, *h;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    f = BN_new();
-    g = BN_new();
-    h = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 1024, 0, 0);
-        BN_bntest_rand(c, 1024, 0, 0);
-        BN_bntest_rand(d, 1024, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod_mul(e, a, c, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " * ");
-                    BN_print(bp, c);
-                    BIO_puts(bp, " % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, " - ");
-                    BN_print(bp, e);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            BN_GF2m_add(f, a, d);
-            BN_GF2m_mod_mul(g, f, c, b[j], ctx);
-            BN_GF2m_mod_mul(h, d, c, b[j], ctx);
-            BN_GF2m_add(f, e, g);
-            BN_GF2m_add(f, f, h);
-            /* Test that (a+d)*c = a*c + d*c. */
-            if (!BN_is_zero(f)) {
-                fprintf(stderr,
-                        "GF(2^m) modular multiplication test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    BN_free(f);
-    BN_free(g);
-    BN_free(h);
-    return ret;
-}
-
-int test_gf2m_mod_sqr(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 1024, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod_sqr(c, a, b[j], ctx);
-            BN_copy(d, a);
-            BN_GF2m_mod_mul(d, a, d, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " ^ 2 % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, " = ");
-                    BN_print(bp, c);
-                    BIO_puts(bp, "; a * a = ");
-                    BN_print(bp, d);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            BN_GF2m_add(d, c, d);
-            /* Test that a*a = a^2. */
-            if (!BN_is_zero(d)) {
-                fprintf(stderr, "GF(2^m) modular squaring test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    return ret;
-}
-
-int test_gf2m_mod_inv(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 512, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod_inv(c, a, b[j], ctx);
-            BN_GF2m_mod_mul(d, a, c, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " * ");
-                    BN_print(bp, c);
-                    BIO_puts(bp, " - 1 % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            /* Test that ((1/a)*a) = 1. */
-            if (!BN_is_one(d)) {
-                fprintf(stderr, "GF(2^m) modular inversion test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    return ret;
-}
-
-int test_gf2m_mod_div(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d, *e, *f;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    f = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 512, 0, 0);
-        BN_bntest_rand(c, 512, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod_div(d, a, c, b[j], ctx);
-            BN_GF2m_mod_mul(e, d, c, b[j], ctx);
-            BN_GF2m_mod_div(f, a, e, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " = ");
-                    BN_print(bp, c);
-                    BIO_puts(bp, " * ");
-                    BN_print(bp, d);
-                    BIO_puts(bp, " % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            /* Test that ((a/c)*c)/a = 1. */
-            if (!BN_is_one(f)) {
-                fprintf(stderr, "GF(2^m) modular division test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    BN_free(f);
-    return ret;
-}
-
-int test_gf2m_mod_exp(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d, *e, *f;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    f = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 512, 0, 0);
-        BN_bntest_rand(c, 512, 0, 0);
-        BN_bntest_rand(d, 512, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod_exp(e, a, c, b[j], ctx);
-            BN_GF2m_mod_exp(f, a, d, b[j], ctx);
-            BN_GF2m_mod_mul(e, e, f, b[j], ctx);
-            BN_add(f, c, d);
-            BN_GF2m_mod_exp(f, a, f, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, a);
-                    BIO_puts(bp, " ^ (");
-                    BN_print(bp, c);
-                    BIO_puts(bp, " + ");
-                    BN_print(bp, d);
-                    BIO_puts(bp, ") = ");
-                    BN_print(bp, e);
-                    BIO_puts(bp, "; - ");
-                    BN_print(bp, f);
-                    BIO_puts(bp, " % ");
-                    BN_print(bp, b[j]);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            BN_GF2m_add(f, e, f);
-            /* Test that a^(c+d)=a^c*a^d. */
-            if (!BN_is_zero(f)) {
-                fprintf(stderr,
-                        "GF(2^m) modular exponentiation test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    BN_free(f);
-    return ret;
-}
-
-int test_gf2m_mod_sqrt(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d, *e, *f;
-    int i, j, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    f = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 512, 0, 0);
-        for (j = 0; j < 2; j++) {
-            BN_GF2m_mod(c, a, b[j]);
-            BN_GF2m_mod_sqrt(d, a, b[j], ctx);
-            BN_GF2m_mod_sqr(e, d, b[j], ctx);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-            if (bp != NULL) {
-                if (!results) {
-                    BN_print(bp, d);
-                    BIO_puts(bp, " ^ 2 - ");
-                    BN_print(bp, a);
-                    BIO_puts(bp, "\n");
-                }
-            }
-# endif
-            BN_GF2m_add(f, c, e);
-            /* Test that d^2 = a, where d = sqrt(a). */
-            if (!BN_is_zero(f)) {
-                fprintf(stderr, "GF(2^m) modular square root test failed!\n");
-                goto err;
-            }
-        }
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    BN_free(f);
-    return ret;
-}
-
-int test_gf2m_mod_solve_quad(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b[2], *c, *d, *e;
-    int i, j, s = 0, t, ret = 0;
-    int p0[] = { 163, 7, 6, 3, 0, -1 };
-    int p1[] = { 193, 15, 0, -1 };
-
-    a = BN_new();
-    b[0] = BN_new();
-    b[1] = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-
-    BN_GF2m_arr2poly(p0, b[0]);
-    BN_GF2m_arr2poly(p1, b[1]);
-
-    for (i = 0; i < num0; i++) {
-        BN_bntest_rand(a, 512, 0, 0);
-        for (j = 0; j < 2; j++) {
-            t = BN_GF2m_mod_solve_quad(c, a, b[j], ctx);
-            if (t) {
-                s++;
-                BN_GF2m_mod_sqr(d, c, b[j], ctx);
-                BN_GF2m_add(d, c, d);
-                BN_GF2m_mod(e, a, b[j]);
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-                if (bp != NULL) {
-                    if (!results) {
-                        BN_print(bp, c);
-                        BIO_puts(bp, " is root of z^2 + z = ");
-                        BN_print(bp, a);
-                        BIO_puts(bp, " % ");
-                        BN_print(bp, b[j]);
-                        BIO_puts(bp, "\n");
-                    }
-                }
-# endif
-                BN_GF2m_add(e, e, d);
-                /*
-                 * Test that solution of quadratic c satisfies c^2 + c = a.
-                 */
-                if (!BN_is_zero(e)) {
-                    fprintf(stderr,
-                            "GF(2^m) modular solve quadratic test failed!\n");
-                    goto err;
-                }
-
-            } else {
-# if 0                          /* make test uses ouput in bc but bc can't
-                                 * handle GF(2^m) arithmetic */
-                if (bp != NULL) {
-                    if (!results) {
-                        BIO_puts(bp, "There are no roots of z^2 + z = ");
-                        BN_print(bp, a);
-                        BIO_puts(bp, " % ");
-                        BN_print(bp, b[j]);
-                        BIO_puts(bp, "\n");
-                    }
-                }
-# endif
-            }
-        }
-    }
-    if (s == 0) {
-        fprintf(stderr,
-                "All %i tests of GF(2^m) modular solve quadratic resulted in no roots;\n",
-                num0);
-        fprintf(stderr,
-                "this is very unlikely and probably indicates an error.\n");
-        goto err;
-    }
-    ret = 1;
- err:
-    BN_free(a);
-    BN_free(b[0]);
-    BN_free(b[1]);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return ret;
-}
-#endif
-static int genprime_cb(int p, int n, BN_GENCB *arg)
-{
-    char c = '*';
-
-    if (p == 0)
-        c = '.';
-    if (p == 1)
-        c = '+';
-    if (p == 2)
-        c = '*';
-    if (p == 3)
-        c = '\n';
-    putc(c, stderr);
-    fflush(stderr);
-    return 1;
-}
-
-int test_kron(BIO *bp, BN_CTX *ctx)
-{
-    BN_GENCB cb;
-    BIGNUM *a, *b, *r, *t;
-    int i;
-    int legendre, kronecker;
-    int ret = 0;
-
-    a = BN_new();
-    b = BN_new();
-    r = BN_new();
-    t = BN_new();
-    if (a == NULL || b == NULL || r == NULL || t == NULL)
-        goto err;
-
-    BN_GENCB_set(&cb, genprime_cb, NULL);
-
-    /*
-     * We test BN_kronecker(a, b, ctx) just for b odd (Jacobi symbol). In
-     * this case we know that if b is prime, then BN_kronecker(a, b, ctx) is
-     * congruent to $a^{(b-1)/2}$, modulo $b$ (Legendre symbol). So we
-     * generate a random prime b and compare these values for a number of
-     * random a's.  (That is, we run the Solovay-Strassen primality test to
-     * confirm that b is prime, except that we don't want to test whether b
-     * is prime but whether BN_kronecker works.)
-     */
-
-    if (!BN_generate_prime_ex(b, 512, 0, NULL, NULL, &cb))
-        goto err;
-    b->neg = rand_neg();
-    putc('\n', stderr);
-
-    for (i = 0; i < num0; i++) {
-        if (!BN_bntest_rand(a, 512, 0, 0))
-            goto err;
-        a->neg = rand_neg();
-
-        /* t := (|b|-1)/2  (note that b is odd) */
-        if (!BN_copy(t, b))
-            goto err;
-        t->neg = 0;
-        if (!BN_sub_word(t, 1))
-            goto err;
-        if (!BN_rshift1(t, t))
-            goto err;
-        /* r := a^t mod b */
-        b->neg = 0;
-
-        if (!BN_mod_exp_recp(r, a, t, b, ctx))
-            goto err;
-        b->neg = 1;
-
-        if (BN_is_word(r, 1))
-            legendre = 1;
-        else if (BN_is_zero(r))
-            legendre = 0;
-        else {
-            if (!BN_add_word(r, 1))
-                goto err;
-            if (0 != BN_ucmp(r, b)) {
-                fprintf(stderr, "Legendre symbol computation failed\n");
-                goto err;
-            }
-            legendre = -1;
-        }
-
-        kronecker = BN_kronecker(a, b, ctx);
-        if (kronecker < -1)
-            goto err;
-        /* we actually need BN_kronecker(a, |b|) */
-        if (a->neg && b->neg)
-            kronecker = -kronecker;
-
-        if (legendre != kronecker) {
-            fprintf(stderr, "legendre != kronecker; a = ");
-            BN_print_fp(stderr, a);
-            fprintf(stderr, ", b = ");
-            BN_print_fp(stderr, b);
-            fprintf(stderr, "\n");
-            goto err;
-        }
-
-        putc('.', stderr);
-        fflush(stderr);
-    }
-
-    putc('\n', stderr);
-    fflush(stderr);
-    ret = 1;
- err:
-    if (a != NULL)
-        BN_free(a);
-    if (b != NULL)
-        BN_free(b);
-    if (r != NULL)
-        BN_free(r);
-    if (t != NULL)
-        BN_free(t);
-    return ret;
-}
-
-int test_sqrt(BIO *bp, BN_CTX *ctx)
-{
-    BN_GENCB cb;
-    BIGNUM *a, *p, *r;
-    int i, j;
-    int ret = 0;
-
-    a = BN_new();
-    p = BN_new();
-    r = BN_new();
-    if (a == NULL || p == NULL || r == NULL)
-        goto err;
-
-    BN_GENCB_set(&cb, genprime_cb, NULL);
-
-    for (i = 0; i < 16; i++) {
-        if (i < 8) {
-            unsigned primes[8] = { 2, 3, 5, 7, 11, 13, 17, 19 };
-
-            if (!BN_set_word(p, primes[i]))
-                goto err;
-        } else {
-            if (!BN_set_word(a, 32))
-                goto err;
-            if (!BN_set_word(r, 2 * i + 1))
-                goto err;
-
-            if (!BN_generate_prime_ex(p, 256, 0, a, r, &cb))
-                goto err;
-            putc('\n', stderr);
-        }
-        p->neg = rand_neg();
-
-        for (j = 0; j < num2; j++) {
-            /*
-             * construct 'a' such that it is a square modulo p, but in
-             * general not a proper square and not reduced modulo p
-             */
-            if (!BN_bntest_rand(r, 256, 0, 3))
-                goto err;
-            if (!BN_nnmod(r, r, p, ctx))
-                goto err;
-            if (!BN_mod_sqr(r, r, p, ctx))
-                goto err;
-            if (!BN_bntest_rand(a, 256, 0, 3))
-                goto err;
-            if (!BN_nnmod(a, a, p, ctx))
-                goto err;
-            if (!BN_mod_sqr(a, a, p, ctx))
-                goto err;
-            if (!BN_mul(a, a, r, ctx))
-                goto err;
-            if (rand_neg())
-                if (!BN_sub(a, a, p))
-                    goto err;
-
-            if (!BN_mod_sqrt(r, a, p, ctx))
-                goto err;
-            if (!BN_mod_sqr(r, r, p, ctx))
-                goto err;
-
-            if (!BN_nnmod(a, a, p, ctx))
-                goto err;
-
-            if (BN_cmp(a, r) != 0) {
-                fprintf(stderr, "BN_mod_sqrt failed: a = ");
-                BN_print_fp(stderr, a);
-                fprintf(stderr, ", r = ");
-                BN_print_fp(stderr, r);
-                fprintf(stderr, ", p = ");
-                BN_print_fp(stderr, p);
-                fprintf(stderr, "\n");
-                goto err;
-            }
-
-            putc('.', stderr);
-            fflush(stderr);
-        }
-
-        putc('\n', stderr);
-        fflush(stderr);
-    }
-    ret = 1;
- err:
-    if (a != NULL)
-        BN_free(a);
-    if (p != NULL)
-        BN_free(p);
-    if (r != NULL)
-        BN_free(r);
-    return ret;
-}
-
-int test_lshift(BIO *bp, BN_CTX *ctx, BIGNUM *a_)
-{
-    BIGNUM *a, *b, *c, *d;
-    int i;
-
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    BN_one(c);
-
-    if (a_)
-        a = a_;
-    else {
-        a = BN_new();
-        BN_bntest_rand(a, 200, 0, 0);
-        a->neg = rand_neg();
-    }
-    for (i = 0; i < num0; i++) {
-        BN_lshift(b, a, i + 1);
-        BN_add(c, c, c);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " * ");
-                BN_print(bp, c);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, b);
-            BIO_puts(bp, "\n");
-        }
-        BN_mul(d, a, c, ctx);
-        BN_sub(d, d, b);
-        if (!BN_is_zero(d)) {
-            fprintf(stderr, "Left shift test failed!\n");
-            fprintf(stderr, "a=");
-            BN_print_fp(stderr, a);
-            fprintf(stderr, "\nb=");
-            BN_print_fp(stderr, b);
-            fprintf(stderr, "\nc=");
-            BN_print_fp(stderr, c);
-            fprintf(stderr, "\nd=");
-            BN_print_fp(stderr, d);
-            fprintf(stderr, "\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    return (1);
-}
-
-int test_lshift1(BIO *bp)
-{
-    BIGNUM *a, *b, *c;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-
-    BN_bntest_rand(a, 200, 0, 0);
-    a->neg = rand_neg();
-    for (i = 0; i < num0; i++) {
-        BN_lshift1(b, a);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " * 2");
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, b);
-            BIO_puts(bp, "\n");
-        }
-        BN_add(c, a, a);
-        BN_sub(a, b, c);
-        if (!BN_is_zero(a)) {
-            fprintf(stderr, "Left shift one test failed!\n");
-            return 0;
-        }
-
-        BN_copy(a, b);
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    return (1);
-}
-
-int test_rshift(BIO *bp, BN_CTX *ctx)
-{
-    BIGNUM *a, *b, *c, *d, *e;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-    d = BN_new();
-    e = BN_new();
-    BN_one(c);
-
-    BN_bntest_rand(a, 200, 0, 0);
-    a->neg = rand_neg();
-    for (i = 0; i < num0; i++) {
-        BN_rshift(b, a, i + 1);
-        BN_add(c, c, c);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " / ");
-                BN_print(bp, c);
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, b);
-            BIO_puts(bp, "\n");
-        }
-        BN_div(d, e, a, c, ctx);
-        BN_sub(d, d, b);
-        if (!BN_is_zero(d)) {
-            fprintf(stderr, "Right shift test failed!\n");
-            return 0;
-        }
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    BN_free(d);
-    BN_free(e);
-    return (1);
-}
-
-int test_rshift1(BIO *bp)
-{
-    BIGNUM *a, *b, *c;
-    int i;
-
-    a = BN_new();
-    b = BN_new();
-    c = BN_new();
-
-    BN_bntest_rand(a, 200, 0, 0);
-    a->neg = rand_neg();
-    for (i = 0; i < num0; i++) {
-        BN_rshift1(b, a);
-        if (bp != NULL) {
-            if (!results) {
-                BN_print(bp, a);
-                BIO_puts(bp, " / 2");
-                BIO_puts(bp, " - ");
-            }
-            BN_print(bp, b);
-            BIO_puts(bp, "\n");
-        }
-        BN_sub(c, a, b);
-        BN_sub(c, c, b);
-        if (!BN_is_zero(c) && !BN_abs_is_word(c, 1)) {
-            fprintf(stderr, "Right shift one test failed!\n");
-            return 0;
-        }
-        BN_copy(a, b);
-    }
-    BN_free(a);
-    BN_free(b);
-    BN_free(c);
-    return (1);
-}
-
-int rand_neg(void)
-{
-    static unsigned int neg = 0;
-    static int sign[8] = { 0, 0, 0, 1, 1, 0, 1, 1 };
-
-    return (sign[(neg++) % 8]);
-}

+ 0 - 42
drivers/builtin_openssl2/crypto/bn/divtest.c

@@ -1,42 +0,0 @@
-#include <openssl/bn.h>
-#include <openssl/rand.h>
-
-static int Rand(n)
-{
-    unsigned char x[2];
-    RAND_pseudo_bytes(x, 2);
-    return (x[0] + 2 * x[1]);
-}
-
-static void bug(char *m, BIGNUM *a, BIGNUM *b)
-{
-    printf("%s!\na=", m);
-    BN_print_fp(stdout, a);
-    printf("\nb=");
-    BN_print_fp(stdout, b);
-    printf("\n");
-    fflush(stdout);
-}
-
-main()
-{
-    BIGNUM *a = BN_new(), *b = BN_new(), *c = BN_new(), *d = BN_new(),
-        *C = BN_new(), *D = BN_new();
-    BN_RECP_CTX *recp = BN_RECP_CTX_new();
-    BN_CTX *ctx = BN_CTX_new();
-
-    for (;;) {
-        BN_pseudo_rand(a, Rand(), 0, 0);
-        BN_pseudo_rand(b, Rand(), 0, 0);
-        if (BN_is_zero(b))
-            continue;
-
-        BN_RECP_CTX_set(recp, b, ctx);
-        if (BN_div(C, D, a, b, ctx) != 1)
-            bug("BN_div failed", a, b);
-        if (BN_div_recp(c, d, a, recp, ctx) != 1)
-            bug("BN_div_recp failed", a, b);
-        else if (BN_cmp(c, C) != 0 || BN_cmp(c, C) != 0)
-            bug("mismatch", a, b);
-    }
-}

+ 0 - 313
drivers/builtin_openssl2/crypto/bn/exptest.c

@@ -1,313 +0,0 @@
-/* crypto/bn/exptest.c */
-/* Copyright (C) 1995-1998 Eric Young ([email protected])
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young ([email protected]).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson ([email protected]).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young ([email protected])"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson ([email protected])"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "../e_os.h"
-
-#include <openssl/bio.h>
-#include <openssl/bn.h>
-#include <openssl/rand.h>
-#include <openssl/err.h>
-
-#define NUM_BITS        (BN_BITS*2)
-
-static const char rnd_seed[] =
-    "string to make the random number generator think it has entropy";
-
-/*
- * Test that r == 0 in test_exp_mod_zero(). Returns one on success,
- * returns zero and prints debug output otherwise.
- */
-static int a_is_zero_mod_one(const char *method, const BIGNUM *r,
-                             const BIGNUM *a) {
-    if (!BN_is_zero(r)) {
-        fprintf(stderr, "%s failed:\n", method);
-        fprintf(stderr, "a ** 0 mod 1 = r (should be 0)\n");
-        fprintf(stderr, "a = ");
-        BN_print_fp(stderr, a);
-        fprintf(stderr, "\nr = ");
-        BN_print_fp(stderr, r);
-        fprintf(stderr, "\n");
-        return 0;
-    }
-    return 1;
-}
-
-/*
- * test_exp_mod_zero tests that x**0 mod 1 == 0. It returns zero on success.
- */
-static int test_exp_mod_zero()
-{
-    BIGNUM a, p, m;
-    BIGNUM r;
-    BN_ULONG one_word = 1;
-    BN_CTX *ctx = BN_CTX_new();
-    int ret = 1, failed = 0;
-
-    BN_init(&m);
-    BN_one(&m);
-
-    BN_init(&a);
-    BN_one(&a);
-
-    BN_init(&p);
-    BN_zero(&p);
-
-    BN_init(&r);
-
-    if (!BN_rand(&a, 1024, 0, 0))
-        goto err;
-
-    if (!BN_mod_exp(&r, &a, &p, &m, ctx))
-        goto err;
-
-    if (!a_is_zero_mod_one("BN_mod_exp", &r, &a))
-        failed = 1;
-
-    if (!BN_mod_exp_recp(&r, &a, &p, &m, ctx))
-        goto err;
-
-    if (!a_is_zero_mod_one("BN_mod_exp_recp", &r, &a))
-        failed = 1;
-
-    if (!BN_mod_exp_simple(&r, &a, &p, &m, ctx))
-        goto err;
-
-    if (!a_is_zero_mod_one("BN_mod_exp_simple", &r, &a))
-        failed = 1;
-
-    if (!BN_mod_exp_mont(&r, &a, &p, &m, ctx, NULL))
-        goto err;
-
-    if (!a_is_zero_mod_one("BN_mod_exp_mont", &r, &a))
-        failed = 1;
-
-    if (!BN_mod_exp_mont_consttime(&r, &a, &p, &m, ctx, NULL)) {
-        goto err;
-    }
-
-    if (!a_is_zero_mod_one("BN_mod_exp_mont_consttime", &r, &a))
-        failed = 1;
-
-    /*
-     * A different codepath exists for single word multiplication
-     * in non-constant-time only.
-     */
-    if (!BN_mod_exp_mont_word(&r, one_word, &p, &m, ctx, NULL))
-        goto err;
-
-    if (!BN_is_zero(&r)) {
-        fprintf(stderr, "BN_mod_exp_mont_word failed:\n");
-        fprintf(stderr, "1 ** 0 mod 1 = r (should be 0)\n");
-        fprintf(stderr, "r = ");
-        BN_print_fp(stderr, &r);
-        fprintf(stderr, "\n");
-        return 0;
-    }
-
-    ret = failed;
-
- err:
-    BN_free(&r);
-    BN_free(&a);
-    BN_free(&p);
-    BN_free(&m);
-    BN_CTX_free(ctx);
-
-    return ret;
-}
-
-int main(int argc, char *argv[])
-{
-    BN_CTX *ctx;
-    BIO *out = NULL;
-    int i, ret;
-    unsigned char c;
-    BIGNUM *r_mont, *r_mont_const, *r_recp, *r_simple, *a, *b, *m;
-
-    RAND_seed(rnd_seed, sizeof rnd_seed); /* or BN_rand may fail, and we
-                                           * don't even check its return
-                                           * value (which we should) */
-
-    ERR_load_BN_strings();
-
-    ctx = BN_CTX_new();
-    if (ctx == NULL)
-        EXIT(1);
-    r_mont = BN_new();
-    r_mont_const = BN_new();
-    r_recp = BN_new();
-    r_simple = BN_new();
-    a = BN_new();
-    b = BN_new();
-    m = BN_new();
-    if ((r_mont == NULL) || (r_recp == NULL) || (a == NULL) || (b == NULL))
-        goto err;
-
-    out = BIO_new(BIO_s_file());
-
-    if (out == NULL)
-        EXIT(1);
-    BIO_set_fp(out, stdout, BIO_NOCLOSE);
-
-    for (i = 0; i < 200; i++) {
-        RAND_bytes(&c, 1);
-        c = (c % BN_BITS) - BN_BITS2;
-        BN_rand(a, NUM_BITS + c, 0, 0);
-
-        RAND_bytes(&c, 1);
-        c = (c % BN_BITS) - BN_BITS2;
-        BN_rand(b, NUM_BITS + c, 0, 0);
-
-        RAND_bytes(&c, 1);
-        c = (c % BN_BITS) - BN_BITS2;
-        BN_rand(m, NUM_BITS + c, 0, 1);
-
-        BN_mod(a, a, m, ctx);
-        BN_mod(b, b, m, ctx);
-
-        ret = BN_mod_exp_mont(r_mont, a, b, m, ctx, NULL);
-        if (ret <= 0) {
-            printf("BN_mod_exp_mont() problems\n");
-            ERR_print_errors(out);
-            EXIT(1);
-        }
-
-        ret = BN_mod_exp_recp(r_recp, a, b, m, ctx);
-        if (ret <= 0) {
-            printf("BN_mod_exp_recp() problems\n");
-            ERR_print_errors(out);
-            EXIT(1);
-        }
-
-        ret = BN_mod_exp_simple(r_simple, a, b, m, ctx);
-        if (ret <= 0) {
-            printf("BN_mod_exp_simple() problems\n");
-            ERR_print_errors(out);
-            EXIT(1);
-        }
-
-        ret = BN_mod_exp_mont_consttime(r_mont_const, a, b, m, ctx, NULL);
-        if (ret <= 0) {
-            printf("BN_mod_exp_mont_consttime() problems\n");
-            ERR_print_errors(out);
-            EXIT(1);
-        }
-
-        if (BN_cmp(r_simple, r_mont) == 0
-            && BN_cmp(r_simple, r_recp) == 0
-            && BN_cmp(r_simple, r_mont_const) == 0) {
-            printf(".");
-            fflush(stdout);
-        } else {
-            if (BN_cmp(r_simple, r_mont) != 0)
-                printf("\nsimple and mont results differ\n");
-            if (BN_cmp(r_simple, r_mont_const) != 0)
-                printf("\nsimple and mont const time results differ\n");
-            if (BN_cmp(r_simple, r_recp) != 0)
-                printf("\nsimple and recp results differ\n");
-
-            printf("a (%3d) = ", BN_num_bits(a));
-            BN_print(out, a);
-            printf("\nb (%3d) = ", BN_num_bits(b));
-            BN_print(out, b);
-            printf("\nm (%3d) = ", BN_num_bits(m));
-            BN_print(out, m);
-            printf("\nsimple   =");
-            BN_print(out, r_simple);
-            printf("\nrecp     =");
-            BN_print(out, r_recp);
-            printf("\nmont     =");
-            BN_print(out, r_mont);
-            printf("\nmont_ct  =");
-            BN_print(out, r_mont_const);
-            printf("\n");
-            EXIT(1);
-        }
-    }
-    BN_free(r_mont);
-    BN_free(r_mont_const);
-    BN_free(r_recp);
-    BN_free(r_simple);
-    BN_free(a);
-    BN_free(b);
-    BN_free(m);
-    BN_CTX_free(ctx);
-    ERR_remove_thread_state(NULL);
-    CRYPTO_mem_leaks(out);
-    BIO_free(out);
-    printf("\n");
-
-    if (test_exp_mod_zero() != 0)
-        goto err;
-
-    printf("done\n");
-
-    EXIT(0);
- err:
-    ERR_load_crypto_strings();
-    ERR_print_errors(out);
-#ifdef OPENSSL_SYS_NETWARE
-    printf("ERROR\n");
-#endif
-    EXIT(1);
-    return (1);
-}

+ 346 - 0
drivers/builtin_openssl2/crypto/bn/rsaz_exp.c

@@ -0,0 +1,346 @@
+/*****************************************************************************
+*                                                                            *
+*  Copyright (c) 2012, Intel Corporation                                     *
+*                                                                            *
+*  All rights reserved.                                                      *
+*                                                                            *
+*  Redistribution and use in source and binary forms, with or without        *
+*  modification, are permitted provided that the following conditions are    *
+*  met:                                                                      *
+*                                                                            *
+*  *  Redistributions of source code must retain the above copyright         *
+*     notice, this list of conditions and the following disclaimer.          *
+*                                                                            *
+*  *  Redistributions in binary form must reproduce the above copyright      *
+*     notice, this list of conditions and the following disclaimer in the    *
+*     documentation and/or other materials provided with the                 *
+*     distribution.                                                          *
+*                                                                            *
+*  *  Neither the name of the Intel Corporation nor the names of its         *
+*     contributors may be used to endorse or promote products derived from   *
+*     this software without specific prior written permission.               *
+*                                                                            *
+*                                                                            *
+*  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          *
+*  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         *
+*  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        *
+*  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            *
+*  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     *
+*  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       *
+*  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        *
+*  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    *
+*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      *
+*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        *
+*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              *
+*                                                                            *
+******************************************************************************
+* Developers and authors:                                                    *
+* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   *
+* (1) Intel Corporation, Israel Development Center, Haifa, Israel            *
+* (2) University of Haifa, Israel                                            *
+*****************************************************************************/
+
+#include "rsaz_exp.h"
+
+#ifdef RSAZ_ENABLED
+
+/*
+ * See crypto/bn/asm/rsaz-avx2.pl for further details.
+ */
+void rsaz_1024_norm2red_avx2(void *red, const void *norm);
+void rsaz_1024_mul_avx2(void *ret, const void *a, const void *b,
+                        const void *n, BN_ULONG k);
+void rsaz_1024_sqr_avx2(void *ret, const void *a, const void *n, BN_ULONG k,
+                        int cnt);
+void rsaz_1024_scatter5_avx2(void *tbl, const void *val, int i);
+void rsaz_1024_gather5_avx2(void *val, const void *tbl, int i);
+void rsaz_1024_red2norm_avx2(void *norm, const void *red);
+
+#if defined(__GNUC__)
+# define ALIGN64        __attribute__((aligned(64)))
+#elif defined(_MSC_VER)
+# define ALIGN64        __declspec(align(64))
+#elif defined(__SUNPRO_C)
+# define ALIGN64
+# pragma align 64(one,two80)
+#else
+/* not fatal, might hurt performance a little */
+# define ALIGN64
+#endif
+
+ALIGN64 static const BN_ULONG one[40] = {
+    1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+ALIGN64 static const BN_ULONG two80[40] = {
+    0, 0, 1 << 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+void RSAZ_1024_mod_exp_avx2(BN_ULONG result_norm[16],
+                            const BN_ULONG base_norm[16],
+                            const BN_ULONG exponent[16],
+                            const BN_ULONG m_norm[16], const BN_ULONG RR[16],
+                            BN_ULONG k0)
+{
+    unsigned char storage[320 * 3 + 32 * 9 * 16 + 64]; /* 5.5KB */
+    unsigned char *p_str = storage + (64 - ((size_t)storage % 64));
+    unsigned char *a_inv, *m, *result;
+    unsigned char *table_s = p_str + 320 * 3;
+    unsigned char *R2 = table_s; /* borrow */
+    int index;
+    int wvalue;
+
+    if ((((size_t)p_str & 4095) + 320) >> 12) {
+        result = p_str;
+        a_inv = p_str + 320;
+        m = p_str + 320 * 2;    /* should not cross page */
+    } else {
+        m = p_str;              /* should not cross page */
+        result = p_str + 320;
+        a_inv = p_str + 320 * 2;
+    }
+
+    rsaz_1024_norm2red_avx2(m, m_norm);
+    rsaz_1024_norm2red_avx2(a_inv, base_norm);
+    rsaz_1024_norm2red_avx2(R2, RR);
+
+    rsaz_1024_mul_avx2(R2, R2, R2, m, k0);
+    rsaz_1024_mul_avx2(R2, R2, two80, m, k0);
+
+    /* table[0] = 1 */
+    rsaz_1024_mul_avx2(result, R2, one, m, k0);
+    /* table[1] = a_inv^1 */
+    rsaz_1024_mul_avx2(a_inv, a_inv, R2, m, k0);
+
+    rsaz_1024_scatter5_avx2(table_s, result, 0);
+    rsaz_1024_scatter5_avx2(table_s, a_inv, 1);
+
+    /* table[2] = a_inv^2 */
+    rsaz_1024_sqr_avx2(result, a_inv, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 2);
+#if 0
+    /* this is almost 2x smaller and less than 1% slower */
+    for (index = 3; index < 32; index++) {
+        rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+        rsaz_1024_scatter5_avx2(table_s, result, index);
+    }
+#else
+    /* table[4] = a_inv^4 */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 4);
+    /* table[8] = a_inv^8 */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 8);
+    /* table[16] = a_inv^16 */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 16);
+    /* table[17] = a_inv^17 */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 17);
+
+    /* table[3] */
+    rsaz_1024_gather5_avx2(result, table_s, 2);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 3);
+    /* table[6] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 6);
+    /* table[12] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 12);
+    /* table[24] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 24);
+    /* table[25] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 25);
+
+    /* table[5] */
+    rsaz_1024_gather5_avx2(result, table_s, 4);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 5);
+    /* table[10] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 10);
+    /* table[20] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 20);
+    /* table[21] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 21);
+
+    /* table[7] */
+    rsaz_1024_gather5_avx2(result, table_s, 6);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 7);
+    /* table[14] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 14);
+    /* table[28] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 28);
+    /* table[29] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 29);
+
+    /* table[9] */
+    rsaz_1024_gather5_avx2(result, table_s, 8);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 9);
+    /* table[18] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 18);
+    /* table[19] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 19);
+
+    /* table[11] */
+    rsaz_1024_gather5_avx2(result, table_s, 10);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 11);
+    /* table[22] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 22);
+    /* table[23] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 23);
+
+    /* table[13] */
+    rsaz_1024_gather5_avx2(result, table_s, 12);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 13);
+    /* table[26] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 26);
+    /* table[27] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 27);
+
+    /* table[15] */
+    rsaz_1024_gather5_avx2(result, table_s, 14);
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 15);
+    /* table[30] */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 1);
+    rsaz_1024_scatter5_avx2(table_s, result, 30);
+    /* table[31] */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    rsaz_1024_scatter5_avx2(table_s, result, 31);
+#endif
+
+    /* load first window */
+    p_str = (unsigned char *)exponent;
+    wvalue = p_str[127] >> 3;
+    rsaz_1024_gather5_avx2(result, table_s, wvalue);
+
+    index = 1014;
+
+    while (index > -1) {        /* loop for the remaining 127 windows */
+
+        rsaz_1024_sqr_avx2(result, result, m, k0, 5);
+
+        wvalue = *((unsigned short *)&p_str[index / 8]);
+        wvalue = (wvalue >> (index % 8)) & 31;
+        index -= 5;
+
+        rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); /* borrow a_inv */
+        rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+    }
+
+    /* square four times */
+    rsaz_1024_sqr_avx2(result, result, m, k0, 4);
+
+    wvalue = p_str[0] & 15;
+
+    rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); /* borrow a_inv */
+    rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
+
+    /* from Montgomery */
+    rsaz_1024_mul_avx2(result, result, one, m, k0);
+
+    rsaz_1024_red2norm_avx2(result_norm, result);
+
+    OPENSSL_cleanse(storage, sizeof(storage));
+}
+
+/*
+ * See crypto/bn/rsaz-x86_64.pl for further details.
+ */
+void rsaz_512_mul(void *ret, const void *a, const void *b, const void *n,
+                  BN_ULONG k);
+void rsaz_512_mul_scatter4(void *ret, const void *a, const void *n,
+                           BN_ULONG k, const void *tbl, unsigned int power);
+void rsaz_512_mul_gather4(void *ret, const void *a, const void *tbl,
+                          const void *n, BN_ULONG k, unsigned int power);
+void rsaz_512_mul_by_one(void *ret, const void *a, const void *n, BN_ULONG k);
+void rsaz_512_sqr(void *ret, const void *a, const void *n, BN_ULONG k,
+                  int cnt);
+void rsaz_512_scatter4(void *tbl, const BN_ULONG *val, int power);
+void rsaz_512_gather4(BN_ULONG *val, const void *tbl, int power);
+
+void RSAZ_512_mod_exp(BN_ULONG result[8],
+                      const BN_ULONG base[8], const BN_ULONG exponent[8],
+                      const BN_ULONG m[8], BN_ULONG k0, const BN_ULONG RR[8])
+{
+    unsigned char storage[16 * 8 * 8 + 64 * 2 + 64]; /* 1.2KB */
+    unsigned char *table = storage + (64 - ((size_t)storage % 64));
+    BN_ULONG *a_inv = (BN_ULONG *)(table + 16 * 8 * 8);
+    BN_ULONG *temp = (BN_ULONG *)(table + 16 * 8 * 8 + 8 * 8);
+    unsigned char *p_str = (unsigned char *)exponent;
+    int index;
+    unsigned int wvalue;
+
+    /* table[0] = 1_inv */
+    temp[0] = 0 - m[0];
+    temp[1] = ~m[1];
+    temp[2] = ~m[2];
+    temp[3] = ~m[3];
+    temp[4] = ~m[4];
+    temp[5] = ~m[5];
+    temp[6] = ~m[6];
+    temp[7] = ~m[7];
+    rsaz_512_scatter4(table, temp, 0);
+
+    /* table [1] = a_inv^1 */
+    rsaz_512_mul(a_inv, base, RR, m, k0);
+    rsaz_512_scatter4(table, a_inv, 1);
+
+    /* table [2] = a_inv^2 */
+    rsaz_512_sqr(temp, a_inv, m, k0, 1);
+    rsaz_512_scatter4(table, temp, 2);
+
+    for (index = 3; index < 16; index++)
+        rsaz_512_mul_scatter4(temp, a_inv, m, k0, table, index);
+
+    /* load first window */
+    wvalue = p_str[63];
+
+    rsaz_512_gather4(temp, table, wvalue >> 4);
+    rsaz_512_sqr(temp, temp, m, k0, 4);
+    rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue & 0xf);
+
+    for (index = 62; index >= 0; index--) {
+        wvalue = p_str[index];
+
+        rsaz_512_sqr(temp, temp, m, k0, 4);
+        rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue >> 4);
+
+        rsaz_512_sqr(temp, temp, m, k0, 4);
+        rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue & 0x0f);
+    }
+
+    /* from Montgomery */
+    rsaz_512_mul_by_one(result, temp, m, k0);
+
+    OPENSSL_cleanse(storage, sizeof(storage));
+}
+
+#else
+
+# if defined(PEDANTIC) || defined(__DECC) || defined(__clang__)
+static void *dummy = &dummy;
+# endif
+
+#endif

+ 68 - 0
drivers/builtin_openssl2/crypto/bn/rsaz_exp.h

@@ -0,0 +1,68 @@
+/*****************************************************************************
+*                                                                            *
+*  Copyright (c) 2012, Intel Corporation                                     *
+*                                                                            *
+*  All rights reserved.                                                      *
+*                                                                            *
+*  Redistribution and use in source and binary forms, with or without        *
+*  modification, are permitted provided that the following conditions are    *
+*  met:                                                                      *
+*                                                                            *
+*  *  Redistributions of source code must retain the above copyright         *
+*     notice, this list of conditions and the following disclaimer.          *
+*                                                                            *
+*  *  Redistributions in binary form must reproduce the above copyright      *
+*     notice, this list of conditions and the following disclaimer in the    *
+*     documentation and/or other materials provided with the                 *
+*     distribution.                                                          *
+*                                                                            *
+*  *  Neither the name of the Intel Corporation nor the names of its         *
+*     contributors may be used to endorse or promote products derived from   *
+*     this software without specific prior written permission.               *
+*                                                                            *
+*                                                                            *
+*  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          *
+*  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         *
+*  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        *
+*  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            *
+*  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     *
+*  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       *
+*  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        *
+*  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    *
+*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      *
+*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        *
+*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              *
+*                                                                            *
+******************************************************************************
+* Developers and authors:                                                    *
+* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   *
+* (1) Intel Corporation, Israel Development Center, Haifa, Israel            *
+* (2) University of Haifa, Israel                                            *
+*****************************************************************************/
+
+#ifndef RSAZ_EXP_H
+# define RSAZ_EXP_H
+
+# undef RSAZ_ENABLED
+# if defined(OPENSSL_BN_ASM_MONT) && \
+        (defined(__x86_64) || defined(__x86_64__) || \
+         defined(_M_AMD64) || defined(_M_X64))
+#  define RSAZ_ENABLED
+
+#  include <openssl/bn.h>
+
+void RSAZ_1024_mod_exp_avx2(BN_ULONG result[16],
+                            const BN_ULONG base_norm[16],
+                            const BN_ULONG exponent[16],
+                            const BN_ULONG m_norm[16], const BN_ULONG RR[16],
+                            BN_ULONG k0);
+int rsaz_avx2_eligible();
+
+void RSAZ_512_mod_exp(BN_ULONG result[8],
+                      const BN_ULONG base_norm[8], const BN_ULONG exponent[8],
+                      const BN_ULONG m_norm[8], BN_ULONG k0,
+                      const BN_ULONG RR[8]);
+
+# endif
+
+#endif

+ 11 - 0
drivers/builtin_openssl2/crypto/buffer/buf_str.c

@@ -61,6 +61,15 @@
 #include <limits.h>
 #include <openssl/buffer.h>
 
+size_t BUF_strnlen(const char *str, size_t maxlen)
+{
+    const char *p;
+
+    for (p = str; maxlen-- != 0 && *p != '\0'; ++p) ;
+
+    return p - str;
+}
+
 char *BUF_strdup(const char *str)
 {
     if (str == NULL)
@@ -75,6 +84,8 @@ char *BUF_strndup(const char *str, size_t siz)
     if (str == NULL)
         return NULL;
 
+    siz = BUF_strnlen(str, siz);
+
     if (siz >= INT_MAX)
         return NULL;
 

+ 0 - 1138
drivers/builtin_openssl2/crypto/camellia/asm/cmll-x86.pl

@@ -1,1138 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Copyright (c) 2008 Andy Polyakov <[email protected]>
-#
-# This module may be used under the terms of either the GNU General
-# Public License version 2 or later, the GNU Lesser General Public
-# License version 2.1 or later, the Mozilla Public License version
-# 1.1 or the BSD License. The exact terms of either license are
-# distributed along with this module. For further details see
-# http://www.openssl.org/~appro/camellia/.
-# ====================================================================
-
-# Performance in cycles per processed byte (less is better) in
-# 'openssl speed ...' benchmark:
-#
-#			AMD K8	Core2	PIII	P4
-# -evp camellia-128-ecb	21.5	22.8	27.0	28.9
-# + over gcc 3.4.6	+90/11% +70/10%	+53/4%	+160/64%
-# + over icc 8.0	+48/19% +21/15%	+21/17%	+55/37%
-#
-# camellia-128-cbc	17.3	21.1	23.9	25.9
-#
-# 128-bit key setup	196	280	256	240	cycles/key
-# + over gcc 3.4.6	+30/0%	+17/11%	+11/0%	+63/40%
-# + over icc 8.0	+18/3%	+10/0%	+10/3%	+21/10%
-#
-# Pairs of numbers in "+" rows represent performance improvement over
-# compiler generated position-independent code, PIC, and non-PIC
-# respectively. PIC results are of greater relevance, as this module
-# is position-independent, i.e. suitable for a shared library or PIE.
-# Position independence "costs" one register, which is why compilers
-# are so close with non-PIC results, they have an extra register to
-# spare. CBC results are better than ECB ones thanks to "zero-copy"
-# private _x86_* interface, and are ~30-40% better than with compiler
-# generated cmll_cbc.o, and reach ~80-90% of x86_64 performance on
-# same CPU (where applicable).
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-$OPENSSL=1;
-
-&asm_init($ARGV[0],"cmll-586.pl",$ARGV[$#ARGV] eq "386");
-
-@T=("eax","ebx","ecx","edx");
-$idx="esi";
-$key="edi";
-$Tbl="ebp";
-
-# stack frame layout in _x86_Camellia_* routines, frame is allocated
-# by caller
-$__ra=&DWP(0,"esp");	# return address
-$__s0=&DWP(4,"esp");	# s0 backing store
-$__s1=&DWP(8,"esp");	# s1 backing store
-$__s2=&DWP(12,"esp");	# s2 backing store
-$__s3=&DWP(16,"esp");	# s3 backing store
-$__end=&DWP(20,"esp");	# pointer to end/start of key schedule
-
-# stack frame layout in Camellia_[en|crypt] routines, which differs from
-# above by 4 and overlaps by pointer to end/start of key schedule
-$_end=&DWP(16,"esp");
-$_esp=&DWP(20,"esp");
-
-# const unsigned int Camellia_SBOX[4][256];
-# Well, sort of... Camellia_SBOX[0][] is interleaved with [1][],
-# and [2][] - with [3][]. This is done to optimize code size.
-$SBOX1_1110=0;		# Camellia_SBOX[0]
-$SBOX4_4404=4;		# Camellia_SBOX[1]
-$SBOX2_0222=2048;	# Camellia_SBOX[2]
-$SBOX3_3033=2052;	# Camellia_SBOX[3]
-&static_label("Camellia_SIGMA");
-&static_label("Camellia_SBOX");
-
-sub Camellia_Feistel {
-my $i=@_[0];
-my $seed=defined(@_[1])?@_[1]:0;
-my $scale=$seed<0?-8:8;
-my $frame=defined(@_[2])?@_[2]:0;
-my $j=($i&1)*2;
-my $t0=@T[($j)%4],$t1=@T[($j+1)%4],$t2=@T[($j+2)%4],$t3=@T[($j+3)%4];
-
-	&xor	($t0,$idx);				# t0^=key[0]
-	&xor	($t1,&DWP($seed+$i*$scale+4,$key));	# t1^=key[1]
-	&movz	($idx,&HB($t0));			# (t0>>8)&0xff
-	&mov	($t3,&DWP($SBOX3_3033,$Tbl,$idx,8));	# t3=SBOX3_3033[0]
-	&movz	($idx,&LB($t0));			# (t0>>0)&0xff
-	&xor	($t3,&DWP($SBOX4_4404,$Tbl,$idx,8));	# t3^=SBOX4_4404[0]
-	&shr	($t0,16);
-	&movz	($idx,&LB($t1));			# (t1>>0)&0xff
-	&mov	($t2,&DWP($SBOX1_1110,$Tbl,$idx,8));	# t2=SBOX1_1110[1]
-	&movz	($idx,&HB($t0));			# (t0>>24)&0xff
-	&xor	($t3,&DWP($SBOX1_1110,$Tbl,$idx,8));	# t3^=SBOX1_1110[0]
-	&movz	($idx,&HB($t1));			# (t1>>8)&0xff
-	&xor	($t2,&DWP($SBOX4_4404,$Tbl,$idx,8));	# t2^=SBOX4_4404[1]
-	&shr	($t1,16);
-	&movz	($t0,&LB($t0));				# (t0>>16)&0xff
-	&xor	($t3,&DWP($SBOX2_0222,$Tbl,$t0,8));	# t3^=SBOX2_0222[0]
-	&movz	($idx,&HB($t1));			# (t1>>24)&0xff
-	&mov	($t0,&DWP($frame+4*(($j+3)%4),"esp"));	# prefetch "s3"
-	&xor	($t2,$t3);				# t2^=t3
-	&rotr	($t3,8);				# t3=RightRotate(t3,8)
-	&xor	($t2,&DWP($SBOX2_0222,$Tbl,$idx,8));	# t2^=SBOX2_0222[1]
-	&movz	($idx,&LB($t1));			# (t1>>16)&0xff
-	&mov	($t1,&DWP($frame+4*(($j+2)%4),"esp"));	# prefetch "s2"
-	&xor	($t3,$t0);				# t3^=s3
-	&xor	($t2,&DWP($SBOX3_3033,$Tbl,$idx,8));	# t2^=SBOX3_3033[1]
-	&mov	($idx,&DWP($seed+($i+1)*$scale,$key));	# prefetch key[i+1]
-	&xor	($t3,$t2);				# t3^=t2
-	&mov	(&DWP($frame+4*(($j+3)%4),"esp"),$t3);	# s3=t3
-	&xor	($t2,$t1);				# t2^=s2
-	&mov	(&DWP($frame+4*(($j+2)%4),"esp"),$t2);	# s2=t2
-}
-
-# void Camellia_EncryptBlock_Rounds(
-#		int grandRounds,
-#		const Byte plaintext[],
-#		const KEY_TABLE_TYPE keyTable,
-#		Byte ciphertext[])
-&function_begin("Camellia_EncryptBlock_Rounds");
-	&mov	("eax",&wparam(0));	# load grandRounds
-	&mov	($idx,&wparam(1));	# load plaintext pointer
-	&mov	($key,&wparam(2));	# load key schedule pointer
-
-	&mov	("ebx","esp");
-	&sub	("esp",7*4);		# place for s[0-3],keyEnd,esp and ra
-	&and	("esp",-64);
-
-	# place stack frame just "above mod 1024" the key schedule
-	# this ensures that cache associativity of 2 suffices
-	&lea	("ecx",&DWP(-64-63,$key));
-	&sub	("ecx","esp");
-	&neg	("ecx");
-	&and	("ecx",0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp","ecx");
-	&add	("esp",4);	# 4 is reserved for callee's return address
-
-	&shl	("eax",6);
-	&lea	("eax",&DWP(0,$key,"eax"));
-	&mov	($_esp,"ebx");	# save %esp
-	&mov	($_end,"eax");	# save keyEnd
-
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea	($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-
-	&mov	(@T[0],&DWP(0,$idx));	# load plaintext
-	&mov	(@T[1],&DWP(4,$idx));
-	&mov	(@T[2],&DWP(8,$idx));
-	&bswap	(@T[0]);
-	&mov	(@T[3],&DWP(12,$idx));
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&call	("_x86_Camellia_encrypt");
-
-	&mov	("esp",$_esp);
-	&bswap	(@T[0]);
-	&mov	($idx,&wparam(3));	# load ciphertext pointer
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-	&mov	(&DWP(0,$idx),@T[0]);	# write ciphertext
-	&mov	(&DWP(4,$idx),@T[1]);
-	&mov	(&DWP(8,$idx),@T[2]);
-	&mov	(&DWP(12,$idx),@T[3]);
-&function_end("Camellia_EncryptBlock_Rounds");
-# V1.x API
-&function_begin_B("Camellia_EncryptBlock");
-	&mov	("eax",128);
-	&sub	("eax",&wparam(0));	# load keyBitLength
-	&mov	("eax",3);
-	&adc	("eax",0);		# keyBitLength==128?3:4
-	&mov	(&wparam(0),"eax");
-	&jmp	(&label("Camellia_EncryptBlock_Rounds"));
-&function_end_B("Camellia_EncryptBlock");
-
-if ($OPENSSL) {
-# void Camellia_encrypt(
-#		const unsigned char *in,
-#		unsigned char *out,
-#		const CAMELLIA_KEY *key)
-&function_begin("Camellia_encrypt");
-	&mov	($idx,&wparam(0));	# load plaintext pointer
-	&mov	($key,&wparam(2));	# load key schedule pointer
-
-	&mov	("ebx","esp");
-	&sub	("esp",7*4);		# place for s[0-3],keyEnd,esp and ra
-	&and	("esp",-64);
-	&mov	("eax",&DWP(272,$key));	# load grandRounds counter
-
-	# place stack frame just "above mod 1024" the key schedule
-	# this ensures that cache associativity of 2 suffices
-	&lea	("ecx",&DWP(-64-63,$key));
-	&sub	("ecx","esp");
-	&neg	("ecx");
-	&and	("ecx",0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp","ecx");
-	&add	("esp",4);	# 4 is reserved for callee's return address
-
-	&shl	("eax",6);
-	&lea	("eax",&DWP(0,$key,"eax"));
-	&mov	($_esp,"ebx");	# save %esp
-	&mov	($_end,"eax");	# save keyEnd
-
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea	($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-
-	&mov	(@T[0],&DWP(0,$idx));	# load plaintext
-	&mov	(@T[1],&DWP(4,$idx));
-	&mov	(@T[2],&DWP(8,$idx));
-	&bswap	(@T[0]);
-	&mov	(@T[3],&DWP(12,$idx));
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&call	("_x86_Camellia_encrypt");
-
-	&mov	("esp",$_esp);
-	&bswap	(@T[0]);
-	&mov	($idx,&wparam(1));	# load ciphertext pointer
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-	&mov	(&DWP(0,$idx),@T[0]);	# write ciphertext
-	&mov	(&DWP(4,$idx),@T[1]);
-	&mov	(&DWP(8,$idx),@T[2]);
-	&mov	(&DWP(12,$idx),@T[3]);
-&function_end("Camellia_encrypt");
-}
-
-&function_begin_B("_x86_Camellia_encrypt");
-	&xor	(@T[0],&DWP(0,$key));	# ^=key[0-3]
-	&xor	(@T[1],&DWP(4,$key));
-	&xor	(@T[2],&DWP(8,$key));
-	&xor	(@T[3],&DWP(12,$key));
-	&mov	($idx,&DWP(16,$key));	# prefetch key[4]
-
-	&mov	($__s0,@T[0]);		# save s[0-3]
-	&mov	($__s1,@T[1]);
-	&mov	($__s2,@T[2]);
-	&mov	($__s3,@T[3]);
-
-&set_label("loop",16);
-	for ($i=0;$i<6;$i++) { Camellia_Feistel($i,16,4); }
-
-	&add	($key,16*4);
-	&cmp	($key,$__end);
-	&je	(&label("done"));
-
-	# @T[0-1] are preloaded, $idx is preloaded with key[0]
-	&and	($idx,@T[0]);
-	 &mov	 (@T[3],$__s3);
-	&rotl	($idx,1);
-	 &mov	 (@T[2],@T[3]);
-	&xor	(@T[1],$idx);
-	 &or	 (@T[2],&DWP(12,$key));
-	&mov	($__s1,@T[1]);		# s1^=LeftRotate(s0&key[0],1);
-	 &xor	 (@T[2],$__s2);
-
-	&mov	($idx,&DWP(4,$key));
-	 &mov	 ($__s2,@T[2]);		# s2^=s3|key[3];
-	&or	($idx,@T[1]);
-	 &and	 (@T[2],&DWP(8,$key));
-	&xor	(@T[0],$idx);
-	 &rotl	 (@T[2],1);
-	&mov	($__s0,@T[0]);		# s0^=s1|key[1];
-	 &xor	 (@T[3],@T[2]);
-	&mov	($idx,&DWP(16,$key));		# prefetch key[4]
-	 &mov	 ($__s3,@T[3]);		# s3^=LeftRotate(s2&key[2],1);
-	&jmp	(&label("loop"));
-
-&set_label("done",8);
-	&mov	(@T[2],@T[0]);		# SwapHalf
-	&mov	(@T[3],@T[1]);
-	&mov	(@T[0],$__s2);
-	&mov	(@T[1],$__s3);
-	&xor	(@T[0],$idx);		# $idx is preloaded with key[0]
-	&xor	(@T[1],&DWP(4,$key));
-	&xor	(@T[2],&DWP(8,$key));
-	&xor	(@T[3],&DWP(12,$key));
-	&ret	();
-&function_end_B("_x86_Camellia_encrypt");
-
-# void Camellia_DecryptBlock_Rounds(
-#		int grandRounds,
-#		const Byte ciphertext[],
-#		const KEY_TABLE_TYPE keyTable,
-#		Byte plaintext[])
-&function_begin("Camellia_DecryptBlock_Rounds");
-	&mov	("eax",&wparam(0));	# load grandRounds
-	&mov	($idx,&wparam(1));	# load ciphertext pointer
-	&mov	($key,&wparam(2));	# load key schedule pointer
-
-	&mov	("ebx","esp");
-	&sub	("esp",7*4);		# place for s[0-3],keyEnd,esp and ra
-	&and	("esp",-64);
-
-	# place stack frame just "above mod 1024" the key schedule
-	# this ensures that cache associativity of 2 suffices
-	&lea	("ecx",&DWP(-64-63,$key));
-	&sub	("ecx","esp");
-	&neg	("ecx");
-	&and	("ecx",0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp","ecx");
-	&add	("esp",4);	# 4 is reserved for callee's return address
-
-	&shl	("eax",6);
-	&mov	(&DWP(4*4,"esp"),$key);	# save keyStart
-	&lea	($key,&DWP(0,$key,"eax"));
-	&mov	(&DWP(5*4,"esp"),"ebx");# save %esp
-
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea	($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-
-	&mov	(@T[0],&DWP(0,$idx));	# load ciphertext
-	&mov	(@T[1],&DWP(4,$idx));
-	&mov	(@T[2],&DWP(8,$idx));
-	&bswap	(@T[0]);
-	&mov	(@T[3],&DWP(12,$idx));
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&call	("_x86_Camellia_decrypt");
-
-	&mov	("esp",&DWP(5*4,"esp"));
-	&bswap	(@T[0]);
-	&mov	($idx,&wparam(3));	# load plaintext pointer
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-	&mov	(&DWP(0,$idx),@T[0]);	# write plaintext
-	&mov	(&DWP(4,$idx),@T[1]);
-	&mov	(&DWP(8,$idx),@T[2]);
-	&mov	(&DWP(12,$idx),@T[3]);
-&function_end("Camellia_DecryptBlock_Rounds");
-# V1.x API
-&function_begin_B("Camellia_DecryptBlock");
-	&mov	("eax",128);
-	&sub	("eax",&wparam(0));	# load keyBitLength
-	&mov	("eax",3);
-	&adc	("eax",0);		# keyBitLength==128?3:4
-	&mov	(&wparam(0),"eax");
-	&jmp	(&label("Camellia_DecryptBlock_Rounds"));
-&function_end_B("Camellia_DecryptBlock");
-
-if ($OPENSSL) {
-# void Camellia_decrypt(
-#		const unsigned char *in,
-#		unsigned char *out,
-#		const CAMELLIA_KEY *key)
-&function_begin("Camellia_decrypt");
-	&mov	($idx,&wparam(0));	# load ciphertext pointer
-	&mov	($key,&wparam(2));	# load key schedule pointer
-
-	&mov	("ebx","esp");
-	&sub	("esp",7*4);		# place for s[0-3],keyEnd,esp and ra
-	&and	("esp",-64);
-	&mov	("eax",&DWP(272,$key));	# load grandRounds counter
-
-	# place stack frame just "above mod 1024" the key schedule
-	# this ensures that cache associativity of 2 suffices
-	&lea	("ecx",&DWP(-64-63,$key));
-	&sub	("ecx","esp");
-	&neg	("ecx");
-	&and	("ecx",0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	("esp","ecx");
-	&add	("esp",4);	# 4 is reserved for callee's return address
-
-	&shl	("eax",6);
-	&mov	(&DWP(4*4,"esp"),$key);	# save keyStart
-	&lea	($key,&DWP(0,$key,"eax"));
-	&mov	(&DWP(5*4,"esp"),"ebx");# save %esp
-
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea	($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-
-	&mov	(@T[0],&DWP(0,$idx));	# load ciphertext
-	&mov	(@T[1],&DWP(4,$idx));
-	&mov	(@T[2],&DWP(8,$idx));
-	&bswap	(@T[0]);
-	&mov	(@T[3],&DWP(12,$idx));
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&call	("_x86_Camellia_decrypt");
-
-	&mov	("esp",&DWP(5*4,"esp"));
-	&bswap	(@T[0]);
-	&mov	($idx,&wparam(1));	# load plaintext pointer
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-	&mov	(&DWP(0,$idx),@T[0]);	# write plaintext
-	&mov	(&DWP(4,$idx),@T[1]);
-	&mov	(&DWP(8,$idx),@T[2]);
-	&mov	(&DWP(12,$idx),@T[3]);
-&function_end("Camellia_decrypt");
-}
-
-&function_begin_B("_x86_Camellia_decrypt");
-	&xor	(@T[0],&DWP(0,$key));	# ^=key[0-3]
-	&xor	(@T[1],&DWP(4,$key));
-	&xor	(@T[2],&DWP(8,$key));
-	&xor	(@T[3],&DWP(12,$key));
-	&mov	($idx,&DWP(-8,$key));	# prefetch key[-2]
-
-	&mov	($__s0,@T[0]);		# save s[0-3]
-	&mov	($__s1,@T[1]);
-	&mov	($__s2,@T[2]);
-	&mov	($__s3,@T[3]);
-
-&set_label("loop",16);
-	for ($i=0;$i<6;$i++) { Camellia_Feistel($i,-8,4); }
-
-	&sub	($key,16*4);
-	&cmp	($key,$__end);
-	&je	(&label("done"));
-
-	# @T[0-1] are preloaded, $idx is preloaded with key[2]
-	&and	($idx,@T[0]);
-	 &mov	 (@T[3],$__s3);
-	&rotl	($idx,1);
-	 &mov	 (@T[2],@T[3]);
-	&xor	(@T[1],$idx);
-	 &or	 (@T[2],&DWP(4,$key));
-	&mov	($__s1,@T[1]);		# s1^=LeftRotate(s0&key[0],1);
-	 &xor	 (@T[2],$__s2);
-
-	&mov	($idx,&DWP(12,$key));
-	 &mov	 ($__s2,@T[2]);		# s2^=s3|key[3];
-	&or	($idx,@T[1]);
-	 &and	 (@T[2],&DWP(0,$key));
-	&xor	(@T[0],$idx);
-	 &rotl	 (@T[2],1);
-	&mov	($__s0,@T[0]);		# s0^=s1|key[1];
-	 &xor	 (@T[3],@T[2]);
-	&mov	($idx,&DWP(-8,$key));	# prefetch key[4]
-	 &mov	 ($__s3,@T[3]);		# s3^=LeftRotate(s2&key[2],1);
-	&jmp	(&label("loop"));
-
-&set_label("done",8);
-	&mov	(@T[2],@T[0]);		# SwapHalf
-	&mov	(@T[3],@T[1]);
-	&mov	(@T[0],$__s2);
-	&mov	(@T[1],$__s3);
-	&xor	(@T[2],$idx);		# $idx is preloaded with key[2]
-	&xor	(@T[3],&DWP(12,$key));
-	&xor	(@T[0],&DWP(0,$key));
-	&xor	(@T[1],&DWP(4,$key));
-	&ret	();
-&function_end_B("_x86_Camellia_decrypt");
-
-# shld is very slow on Intel P4 family. Even on AMD it limits
-# instruction decode rate [because it's VectorPath] and consequently
-# performance. PIII, PM and Core[2] seem to be the only ones which
-# execute this code ~7% faster...
-sub __rotl128 {
-  my ($i0,$i1,$i2,$i3,$rot,$rnd,@T)=@_;
-
-    $rnd *= 2;
-    if ($rot) {
-	&mov	($idx,$i0);
-	&shld	($i0,$i1,$rot);
-	&shld	($i1,$i2,$rot);
-	&shld	($i2,$i3,$rot);
-	&shld	($i3,$idx,$rot);
-    }
-    &mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i0 eq @T[0]);
-    &mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i1 eq @T[0]);
-    &mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i2 eq @T[0]);
-    &mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i3 eq @T[0]);
-}
-
-# ... Implementing 128-bit rotate without shld gives >3x performance
-# improvement on P4, only ~7% degradation on other Intel CPUs and
-# not worse performance on AMD. This is therefore preferred.
-sub _rotl128 {
-  my ($i0,$i1,$i2,$i3,$rot,$rnd,@T)=@_;
-
-    $rnd *= 2;
-    if ($rot) {
-	&mov	($Tbl,$i0);
-	&shl	($i0,$rot);
-	&mov	($idx,$i1);
-	&shr	($idx,32-$rot);
-	&shl	($i1,$rot);
-	&or	($i0,$idx);
-	&mov	($idx,$i2);
-	&shl	($i2,$rot);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i0 eq @T[0]);
-	&shr	($idx,32-$rot);
-	&or	($i1,$idx);
-	&shr	($Tbl,32-$rot);
-	&mov	($idx,$i3);
-	&shr	($idx,32-$rot);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i1 eq @T[0]);
-	&shl	($i3,$rot);
-	&or	($i2,$idx);
-	&or	($i3,$Tbl);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i2 eq @T[0]);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i3 eq @T[0]);
-    } else {
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i0 eq @T[0]);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i1 eq @T[0]);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i2 eq @T[0]);
-	&mov	(&DWP(-128+4*$rnd++,$key),shift(@T))	if ($i3 eq @T[0]);
-    }
-}
-
-sub _saveround {
-my ($rnd,$key,@T)=@_;
-my $bias=int(@T[0])?shift(@T):0;
-
-	&mov	(&DWP($bias+$rnd*8+0,$key),@T[0]);
-	&mov	(&DWP($bias+$rnd*8+4,$key),@T[1])	if ($#T>=1);
-	&mov	(&DWP($bias+$rnd*8+8,$key),@T[2])	if ($#T>=2);
-	&mov	(&DWP($bias+$rnd*8+12,$key),@T[3])	if ($#T>=3);
-}
-
-sub _loadround {
-my ($rnd,$key,@T)=@_;
-my $bias=int(@T[0])?shift(@T):0;
-
-	&mov	(@T[0],&DWP($bias+$rnd*8+0,$key));
-	&mov	(@T[1],&DWP($bias+$rnd*8+4,$key))	if ($#T>=1);
-	&mov	(@T[2],&DWP($bias+$rnd*8+8,$key))	if ($#T>=2);
-	&mov	(@T[3],&DWP($bias+$rnd*8+12,$key))	if ($#T>=3);
-}
-
-# void Camellia_Ekeygen(
-#		const int keyBitLength,
-#		const Byte *rawKey,
-#		KEY_TABLE_TYPE keyTable)
-&function_begin("Camellia_Ekeygen");
-{ my $step=0;
-
-	&stack_push(4);				# place for s[0-3]
-
-	&mov	($Tbl,&wparam(0));		# load arguments
-	&mov	($idx,&wparam(1));
-	&mov	($key,&wparam(2));
-
-	&mov	(@T[0],&DWP(0,$idx));		# load 0-127 bits
-	&mov	(@T[1],&DWP(4,$idx));
-	&mov	(@T[2],&DWP(8,$idx));
-	&mov	(@T[3],&DWP(12,$idx));
-
-	&bswap	(@T[0]);
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&_saveround	(0,$key,@T);		# KL<<<0
-
-	&cmp	($Tbl,128);
-	&je	(&label("1st128"));
-
-	&mov	(@T[0],&DWP(16,$idx));		# load 128-191 bits
-	&mov	(@T[1],&DWP(20,$idx));
-	&cmp	($Tbl,192);
-	&je	(&label("1st192"));
-	&mov	(@T[2],&DWP(24,$idx));		# load 192-255 bits
-	&mov	(@T[3],&DWP(28,$idx));
-	&jmp	(&label("1st256"));
-&set_label("1st192",4);
-	&mov	(@T[2],@T[0]);
-	&mov	(@T[3],@T[1]);
-	&not	(@T[2]);
-	&not	(@T[3]);
-&set_label("1st256",4);
-	&bswap	(@T[0]);
-	&bswap	(@T[1]);
-	&bswap	(@T[2]);
-	&bswap	(@T[3]);
-
-	&_saveround	(4,$key,@T);		# temporary storage for KR!
-
-	&xor	(@T[0],&DWP(0*8+0,$key));	# KR^KL
-	&xor	(@T[1],&DWP(0*8+4,$key));
-	&xor	(@T[2],&DWP(1*8+0,$key));
-	&xor	(@T[3],&DWP(1*8+4,$key));
-
-&set_label("1st128",4);
-	&call	(&label("pic_point"));
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea	($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-	&lea	($key,&DWP(&label("Camellia_SIGMA")."-".&label("Camellia_SBOX"),$Tbl));
-
-	&mov	($idx,&DWP($step*8,$key));	# prefetch SIGMA[0]
-	&mov	(&swtmp(0),@T[0]);		# save s[0-3]
-	&mov	(&swtmp(1),@T[1]);
-	&mov	(&swtmp(2),@T[2]);
-	&mov	(&swtmp(3),@T[3]);
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-	&mov	(@T[2],&swtmp(2));
-	&mov	(@T[3],&swtmp(3));
-
-	&mov	($idx,&wparam(2));
-	&xor	(@T[0],&DWP(0*8+0,$idx));	# ^KL
-	&xor	(@T[1],&DWP(0*8+4,$idx));
-	&xor	(@T[2],&DWP(1*8+0,$idx));
-	&xor	(@T[3],&DWP(1*8+4,$idx));
-
-	&mov	($idx,&DWP($step*8,$key));	# prefetch SIGMA[4]
-	&mov	(&swtmp(0),@T[0]);		# save s[0-3]
-	&mov	(&swtmp(1),@T[1]);
-	&mov	(&swtmp(2),@T[2]);
-	&mov	(&swtmp(3),@T[3]);
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-	&mov	(@T[2],&swtmp(2));
-	&mov	(@T[3],&swtmp(3));
-
-	&mov	($idx,&wparam(0));
-	&cmp	($idx,128);
-	&jne	(&label("2nd256"));
-
-	&mov	($key,&wparam(2));
-	&lea	($key,&DWP(128,$key));		# size optimization
-
-	####### process KA
-	&_saveround	(2,$key,-128,@T);	# KA<<<0
-	&_rotl128	(@T,15,6,@T);		# KA<<<15
-	&_rotl128	(@T,15,8,@T);		# KA<<<(15+15=30)
-	&_rotl128	(@T,15,12,@T[0],@T[1]);	# KA<<<(30+15=45)
-	&_rotl128	(@T,15,14,@T);		# KA<<<(45+15=60)
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,2,20,@T);		# KA<<<(60+32+2=94)
-	&_rotl128	(@T,17,24,@T);		# KA<<<(94+17=111)
-
-	####### process KL
-	&_loadround	(0,$key,-128,@T);	# load KL
-	&_rotl128	(@T,15,4,@T);		# KL<<<15
-	&_rotl128	(@T,30,10,@T);		# KL<<<(15+30=45)
-	&_rotl128	(@T,15,13,@T[2],@T[3]);	# KL<<<(45+15=60)
-	&_rotl128	(@T,17,16,@T);		# KL<<<(60+17=77)
-	&_rotl128	(@T,17,18,@T);		# KL<<<(77+17=94)
-	&_rotl128	(@T,17,22,@T);		# KL<<<(94+17=111)
-
-	while (@T[0] ne "eax")			# restore order
-	{   unshift	(@T,pop(@T));   }
-
-	&mov	("eax",3);			# 3 grandRounds
-	&jmp	(&label("done"));
-
-&set_label("2nd256",16);
-	&mov	($idx,&wparam(2));
-	&_saveround	(6,$idx,@T);		# temporary storage for KA!
-
-	&xor	(@T[0],&DWP(4*8+0,$idx));	# KA^KR
-	&xor	(@T[1],&DWP(4*8+4,$idx));
-	&xor	(@T[2],&DWP(5*8+0,$idx));
-	&xor	(@T[3],&DWP(5*8+4,$idx));
-
-	&mov	($idx,&DWP($step*8,$key));	# prefetch SIGMA[8]
-	&mov	(&swtmp(0),@T[0]);		# save s[0-3]
-	&mov	(&swtmp(1),@T[1]);
-	&mov	(&swtmp(2),@T[2]);
-	&mov	(&swtmp(3),@T[3]);
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-	&mov	(@T[2],&swtmp(2));
-	&mov	(@T[3],&swtmp(3));
-
-	&mov	($key,&wparam(2));
-	&lea	($key,&DWP(128,$key));		# size optimization
-
-	####### process KB
-	&_saveround	(2,$key,-128,@T);	# KB<<<0
-	&_rotl128	(@T,30,10,@T);		# KB<<<30
-	&_rotl128	(@T,30,20,@T);		# KB<<<(30+30=60)
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,19,32,@T);		# KB<<<(60+32+19=111)
-
-	####### process KR
-	&_loadround	(4,$key,-128,@T);	# load KR
-	&_rotl128	(@T,15,4,@T);		# KR<<<15
-	&_rotl128	(@T,15,8,@T);		# KR<<<(15+15=30)
-	&_rotl128	(@T,30,18,@T);		# KR<<<(30+30=60)
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,2,26,@T);		# KR<<<(60+32+2=94)
-
-	####### process KA
-	&_loadround	(6,$key,-128,@T);	# load KA
-	&_rotl128	(@T,15,6,@T);		# KA<<<15
-	&_rotl128	(@T,30,14,@T);		# KA<<<(15+30=45)
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,0,24,@T);		# KA<<<(45+32+0=77)
-	&_rotl128	(@T,17,28,@T);		# KA<<<(77+17=94)
-
-	####### process KL
-	&_loadround	(0,$key,-128,@T);	# load KL
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,13,12,@T);		# KL<<<(32+13=45)
-	&_rotl128	(@T,15,16,@T);		# KL<<<(45+15=60)
-	&_rotl128	(@T,17,22,@T);		# KL<<<(60+17=77)
-	push		(@T,shift(@T));		# rotl128(@T,32);
-	&_rotl128	(@T,2,30,@T);		# KL<<<(77+32+2=111)
-
-	while (@T[0] ne "eax")			# restore order
-	{   unshift	(@T,pop(@T));   }
-
-	&mov	("eax",4);			# 4 grandRounds
-&set_label("done");
-	&lea	("edx",&DWP(272-128,$key));	# end of key schedule
-	&stack_pop(4);
-}
-&function_end("Camellia_Ekeygen");
-
-if ($OPENSSL) {
-# int private_Camellia_set_key (
-#		const unsigned char *userKey,
-#		int bits,
-#		CAMELLIA_KEY *key)
-&function_begin_B("private_Camellia_set_key");
-	&push	("ebx");
-	&mov	("ecx",&wparam(0));	# pull arguments
-	&mov	("ebx",&wparam(1));
-	&mov	("edx",&wparam(2));
-
-	&mov	("eax",-1);
-	&test	("ecx","ecx");
-	&jz	(&label("done"));	# userKey==NULL?
-	&test	("edx","edx");
-	&jz	(&label("done"));	# key==NULL?
-
-	&mov	("eax",-2);
-	&cmp	("ebx",256);
-	&je	(&label("arg_ok"));	# bits==256?
-	&cmp	("ebx",192);
-	&je	(&label("arg_ok"));	# bits==192?
-	&cmp	("ebx",128);
-	&jne	(&label("done"));	# bits!=128?
-&set_label("arg_ok",4);
-
-	&push	("edx");		# push arguments
-	&push	("ecx");
-	&push	("ebx");
-	&call	("Camellia_Ekeygen");
-	&stack_pop(3);
-
-	# eax holds grandRounds and edx points at where to put it
-	&mov	(&DWP(0,"edx"),"eax");
-	&xor	("eax","eax");
-&set_label("done",4);
-	&pop	("ebx");
-	&ret	();
-&function_end_B("private_Camellia_set_key");
-}
-
-@SBOX=(
-112,130, 44,236,179, 39,192,229,228,133, 87, 53,234, 12,174, 65,
- 35,239,107,147, 69, 25,165, 33,237, 14, 79, 78, 29,101,146,189,
-134,184,175,143,124,235, 31,206, 62, 48,220, 95, 94,197, 11, 26,
-166,225, 57,202,213, 71, 93, 61,217,  1, 90,214, 81, 86,108, 77,
-139, 13,154,102,251,204,176, 45,116, 18, 43, 32,240,177,132,153,
-223, 76,203,194, 52,126,118,  5,109,183,169, 49,209, 23,  4,215,
- 20, 88, 58, 97,222, 27, 17, 28, 50, 15,156, 22, 83, 24,242, 34,
-254, 68,207,178,195,181,122,145, 36,  8,232,168, 96,252,105, 80,
-170,208,160,125,161,137, 98,151, 84, 91, 30,149,224,255,100,210,
- 16,196,  0, 72,163,247,117,219,138,  3,230,218,  9, 63,221,148,
-135, 92,131,  2,205, 74,144, 51,115,103,246,243,157,127,191,226,
- 82,155,216, 38,200, 55,198, 59,129,150,111, 75, 19,190, 99, 46,
-233,121,167,140,159,110,188,142, 41,245,249,182, 47,253,180, 89,
-120,152,  6,106,231, 70,113,186,212, 37,171, 66,136,162,141,250,
-114,  7,185, 85,248,238,172, 10, 54, 73, 42,104, 60, 56,241,164,
- 64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158);
-
-sub S1110 { my $i=shift; $i=@SBOX[$i]; return $i<<24|$i<<16|$i<<8; }
-sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; return $i<<24|$i<<16|$i; }	
-sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; return $i<<16|$i<<8|$i; }	
-sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; return $i<<24|$i<<8|$i; }	
-
-&set_label("Camellia_SIGMA",64);
-&data_word(
-    0xa09e667f, 0x3bcc908b, 0xb67ae858, 0x4caa73b2,
-    0xc6ef372f, 0xe94f82be, 0x54ff53a5, 0xf1d36f1c,
-    0x10e527fa, 0xde682d1d, 0xb05688c2, 0xb3e6c1fd,
-    0,          0,          0,          0);
-&set_label("Camellia_SBOX",64);
-# tables are interleaved, remember?
-for ($i=0;$i<256;$i++) { &data_word(&S1110($i),&S4404($i)); }
-for ($i=0;$i<256;$i++) { &data_word(&S0222($i),&S3033($i)); }
-
-# void Camellia_cbc_encrypt (const void char *inp, unsigned char *out,
-#			size_t length, const CAMELLIA_KEY *key,
-#			unsigned char *ivp,const int enc);
-{
-# stack frame layout
-#             -4(%esp)		# return address	 0(%esp)
-#              0(%esp)		# s0			 4(%esp)
-#              4(%esp)		# s1			 8(%esp)
-#              8(%esp)		# s2			12(%esp)
-#             12(%esp)		# s3			16(%esp)
-#             16(%esp)		# end of key schedule	20(%esp)
-#             20(%esp)		# %esp backup
-my $_inp=&DWP(24,"esp");	#copy of wparam(0)
-my $_out=&DWP(28,"esp");	#copy of wparam(1)
-my $_len=&DWP(32,"esp");	#copy of wparam(2)
-my $_key=&DWP(36,"esp");	#copy of wparam(3)
-my $_ivp=&DWP(40,"esp");	#copy of wparam(4)
-my $ivec=&DWP(44,"esp");	#ivec[16]
-my $_tmp=&DWP(44,"esp");	#volatile variable [yes, aliases with ivec]
-my ($s0,$s1,$s2,$s3) = @T;
-
-&function_begin("Camellia_cbc_encrypt");
-	&mov	($s2 eq "ecx"? $s2 : "",&wparam(2));	# load len
-	&cmp	($s2,0);
-	&je	(&label("enc_out"));
-
-	&pushf	();
-	&cld	();
-
-	&mov	($s0,&wparam(0));	# load inp
-	&mov	($s1,&wparam(1));	# load out
-	#&mov	($s2,&wparam(2));	# load len
-	&mov	($s3,&wparam(3));	# load key
-	&mov	($Tbl,&wparam(4));	# load ivp
-
-	# allocate aligned stack frame...
-	&lea	($idx,&DWP(-64,"esp"));
-	&and	($idx,-64);
-
-	# place stack frame just "above mod 1024" the key schedule
-	# this ensures that cache associativity of 2 suffices
-	&lea	($key,&DWP(-64-63,$s3));
-	&sub	($key,$idx);
-	&neg	($key);
-	&and	($key,0x3C0);	# modulo 1024, but aligned to cache-line
-	&sub	($idx,$key);
-
-	&mov	($key,&wparam(5));	# load enc
-
-	&exch	("esp",$idx);
-	&add	("esp",4);		# reserve for return address!
-	&mov	($_esp,$idx);		# save %esp
-
-	&mov	($_inp,$s0);		# save copy of inp
-	&mov	($_out,$s1);		# save copy of out
-	&mov	($_len,$s2);		# save copy of len
-	&mov	($_key,$s3);		# save copy of key
-	&mov	($_ivp,$Tbl);		# save copy of ivp
-
-	&call   (&label("pic_point"));	# make it PIC!
-	&set_label("pic_point");
-	&blindpop($Tbl);
-	&lea    ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
-
-	&mov	($idx,32);
-	&set_label("prefetch_sbox",4);
-		&mov	($s0,&DWP(0,$Tbl));
-		&mov	($s1,&DWP(32,$Tbl));
-		&mov	($s2,&DWP(64,$Tbl));
-		&mov	($s3,&DWP(96,$Tbl));
-		&lea	($Tbl,&DWP(128,$Tbl));
-		&dec	($idx);
-	&jnz	(&label("prefetch_sbox"));
-	&mov	($s0,$_key);
-	&sub	($Tbl,4096);
-	&mov	($idx,$_inp);
-	&mov	($s3,&DWP(272,$s0));		# load grandRounds
-
-	&cmp	($key,0);
-	&je	(&label("DECRYPT"));
-
-	&mov	($s2,$_len);
-	&mov	($key,$_ivp);
-	&shl	($s3,6);
-	&lea	($s3,&DWP(0,$s0,$s3));
-	&mov	($_end,$s3);
-
-	&test	($s2,0xFFFFFFF0);
-	&jz	(&label("enc_tail"));		# short input...
-
-	&mov	($s0,&DWP(0,$key));		# load iv
-	&mov	($s1,&DWP(4,$key));
-
-	&set_label("enc_loop",4);
-		&mov	($s2,&DWP(8,$key));
-		&mov	($s3,&DWP(12,$key));
-
-		&xor	($s0,&DWP(0,$idx));	# xor input data
-		&xor	($s1,&DWP(4,$idx));
-		&xor	($s2,&DWP(8,$idx));
-		&bswap	($s0);
-		&xor	($s3,&DWP(12,$idx));
-		&bswap	($s1);
-		&mov	($key,$_key);		# load key
-		&bswap	($s2);
-		&bswap	($s3);
-
-		&call	("_x86_Camellia_encrypt");
-
-		&mov	($idx,$_inp);		# load inp
-		&mov	($key,$_out);		# load out
-
-		&bswap	($s0);
-		&bswap	($s1);
-		&bswap	($s2);
-		&mov	(&DWP(0,$key),$s0);	# save output data
-		&bswap	($s3);
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($s2,$_len);		# load len
-
-		&lea	($idx,&DWP(16,$idx));
-		&mov	($_inp,$idx);		# save inp
-
-		&lea	($s3,&DWP(16,$key));
-		&mov	($_out,$s3);		# save out
-
-		&sub	($s2,16);
-		&test	($s2,0xFFFFFFF0);
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("enc_loop"));
-	&test	($s2,15);
-	&jnz	(&label("enc_tail"));
-	&mov	($idx,$_ivp);		# load ivp
-	&mov	($s2,&DWP(8,$key));	# restore last dwords
-	&mov	($s3,&DWP(12,$key));
-	&mov	(&DWP(0,$idx),$s0);	# save ivec
-	&mov	(&DWP(4,$idx),$s1);
-	&mov	(&DWP(8,$idx),$s2);
-	&mov	(&DWP(12,$idx),$s3);
-
-	&mov	("esp",$_esp);
-	&popf	();
-    &set_label("enc_out");
-	&function_end_A();
-	&pushf	();			# kludge, never executed
-
-    &set_label("enc_tail",4);
-	&mov	($s0,$key eq "edi" ? $key : "");
-	&mov	($key,$_out);			# load out
-	&push	($s0);				# push ivp
-	&mov	($s1,16);
-	&sub	($s1,$s2);
-	&cmp	($key,$idx);			# compare with inp
-	&je	(&label("enc_in_place"));
-	&align	(4);
-	&data_word(0xA4F3F689);	# rep movsb	# copy input
-	&jmp	(&label("enc_skip_in_place"));
-    &set_label("enc_in_place");
-	&lea	($key,&DWP(0,$key,$s2));
-    &set_label("enc_skip_in_place");
-	&mov	($s2,$s1);
-	&xor	($s0,$s0);
-	&align	(4);
-	&data_word(0xAAF3F689);	# rep stosb	# zero tail
-	&pop	($key);				# pop ivp
-
-	&mov	($idx,$_out);			# output as input
-	&mov	($s0,&DWP(0,$key));
-	&mov	($s1,&DWP(4,$key));
-	&mov	($_len,16);			# len=16
-	&jmp	(&label("enc_loop"));		# one more spin...
-
-#----------------------------- DECRYPT -----------------------------#
-&set_label("DECRYPT",16);
-	&shl	($s3,6);
-	&lea	($s3,&DWP(0,$s0,$s3));
-	&mov	($_end,$s0);
-	&mov	($_key,$s3);
-
-	&cmp	($idx,$_out);
-	&je	(&label("dec_in_place"));	# in-place processing...
-
-	&mov	($key,$_ivp);			# load ivp
-	&mov	($_tmp,$key);
-
-	&set_label("dec_loop",4);
-		&mov	($s0,&DWP(0,$idx));	# read input
-		&mov	($s1,&DWP(4,$idx));
-		&mov	($s2,&DWP(8,$idx));
-		&bswap	($s0);
-		&mov	($s3,&DWP(12,$idx));
-		&bswap	($s1);
-		&mov	($key,$_key);		# load key
-		&bswap	($s2);
-		&bswap	($s3);
-
-		&call	("_x86_Camellia_decrypt");
-
-		&mov	($key,$_tmp);		# load ivp
-		&mov	($idx,$_len);		# load len
-
-		&bswap	($s0);
-		&bswap	($s1);
-		&bswap	($s2);
-		&xor	($s0,&DWP(0,$key));	# xor iv
-		&bswap	($s3);
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-		&sub	($idx,16);
-		&jc	(&label("dec_partial"));
-		&mov	($_len,$idx);		# save len
-		&mov	($idx,$_inp);		# load inp
-		&mov	($key,$_out);		# load out
-
-		&mov	(&DWP(0,$key),$s0);	# write output
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($_tmp,$idx);		# save ivp
-		&lea	($idx,&DWP(16,$idx));
-		&mov	($_inp,$idx);		# save inp
-
-		&lea	($key,&DWP(16,$key));
-		&mov	($_out,$key);		# save out
-
-	&jnz	(&label("dec_loop"));
-	&mov	($key,$_tmp);		# load temp ivp
-    &set_label("dec_end");
-	&mov	($idx,$_ivp);		# load user ivp
-	&mov	($s0,&DWP(0,$key));	# load iv
-	&mov	($s1,&DWP(4,$key));
-	&mov	($s2,&DWP(8,$key));
-	&mov	($s3,&DWP(12,$key));
-	&mov	(&DWP(0,$idx),$s0);	# copy back to user
-	&mov	(&DWP(4,$idx),$s1);
-	&mov	(&DWP(8,$idx),$s2);
-	&mov	(&DWP(12,$idx),$s3);
-	&jmp	(&label("dec_out"));
-
-    &set_label("dec_partial",4);
-	&lea	($key,$ivec);
-	&mov	(&DWP(0,$key),$s0);	# dump output to stack
-	&mov	(&DWP(4,$key),$s1);
-	&mov	(&DWP(8,$key),$s2);
-	&mov	(&DWP(12,$key),$s3);
-	&lea	($s2 eq "ecx" ? $s2 : "",&DWP(16,$idx));
-	&mov	($idx eq "esi" ? $idx : "",$key);
-	&mov	($key eq "edi" ? $key : "",$_out);	# load out
-	&data_word(0xA4F3F689);	# rep movsb		# copy output
-	&mov	($key,$_inp);				# use inp as temp ivp
-	&jmp	(&label("dec_end"));
-
-    &set_label("dec_in_place",4);
-	&set_label("dec_in_place_loop");
-		&lea	($key,$ivec);
-		&mov	($s0,&DWP(0,$idx));	# read input
-		&mov	($s1,&DWP(4,$idx));
-		&mov	($s2,&DWP(8,$idx));
-		&mov	($s3,&DWP(12,$idx));
-
-		&mov	(&DWP(0,$key),$s0);	# copy to temp
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&bswap	($s0);
-		&mov	(&DWP(12,$key),$s3);
-		&bswap	($s1);
-		&mov	($key,$_key);		# load key
-		&bswap	($s2);
-		&bswap	($s3);
-
-		&call	("_x86_Camellia_decrypt");
-
-		&mov	($key,$_ivp);		# load ivp
-		&mov	($idx,$_out);		# load out
-
-		&bswap	($s0);
-		&bswap	($s1);
-		&bswap	($s2);
-		&xor	($s0,&DWP(0,$key));	# xor iv
-		&bswap	($s3);
-		&xor	($s1,&DWP(4,$key));
-		&xor	($s2,&DWP(8,$key));
-		&xor	($s3,&DWP(12,$key));
-
-		&mov	(&DWP(0,$idx),$s0);	# write output
-		&mov	(&DWP(4,$idx),$s1);
-		&mov	(&DWP(8,$idx),$s2);
-		&mov	(&DWP(12,$idx),$s3);
-
-		&lea	($idx,&DWP(16,$idx));
-		&mov	($_out,$idx);		# save out
-
-		&lea	($idx,$ivec);
-		&mov	($s0,&DWP(0,$idx));	# read temp
-		&mov	($s1,&DWP(4,$idx));
-		&mov	($s2,&DWP(8,$idx));
-		&mov	($s3,&DWP(12,$idx));
-
-		&mov	(&DWP(0,$key),$s0);	# copy iv
-		&mov	(&DWP(4,$key),$s1);
-		&mov	(&DWP(8,$key),$s2);
-		&mov	(&DWP(12,$key),$s3);
-
-		&mov	($idx,$_inp);		# load inp
-
-		&lea	($idx,&DWP(16,$idx));
-		&mov	($_inp,$idx);		# save inp
-
-		&mov	($s2,$_len);		# load len
-		&sub	($s2,16);
-		&jc	(&label("dec_in_place_partial"));
-		&mov	($_len,$s2);		# save len
-	&jnz	(&label("dec_in_place_loop"));
-	&jmp	(&label("dec_out"));
-
-    &set_label("dec_in_place_partial",4);
-	# one can argue if this is actually required...
-	&mov	($key eq "edi" ? $key : "",$_out);
-	&lea	($idx eq "esi" ? $idx : "",$ivec);
-	&lea	($key,&DWP(0,$key,$s2));
-	&lea	($idx,&DWP(16,$idx,$s2));
-	&neg	($s2 eq "ecx" ? $s2 : "");
-	&data_word(0xA4F3F689);	# rep movsb	# restore tail
-
-    &set_label("dec_out",4);
-    &mov	("esp",$_esp);
-    &popf	();
-&function_end("Camellia_cbc_encrypt");
-}
-
-&asciz("Camellia for x86 by <appro\@openssl.org>");
-
-&asm_finish();

+ 0 - 1081
drivers/builtin_openssl2/crypto/camellia/asm/cmll-x86_64.pl

@@ -1,1081 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Copyright (c) 2008 Andy Polyakov <[email protected]>
-#
-# This module may be used under the terms of either the GNU General
-# Public License version 2 or later, the GNU Lesser General Public
-# License version 2.1 or later, the Mozilla Public License version
-# 1.1 or the BSD License. The exact terms of either license are
-# distributed along with this module. For further details see
-# http://www.openssl.org/~appro/camellia/.
-# ====================================================================
-
-# Performance in cycles per processed byte (less is better) in
-# 'openssl speed ...' benchmark:
-#
-#			AMD64	Core2	EM64T
-# -evp camellia-128-ecb	16.7	21.0	22.7
-# + over gcc 3.4.6	+25%	+5%	0%
-#
-# camellia-128-cbc	15.7	20.4	21.1
-#
-# 128-bit key setup	128	216	205	cycles/key
-# + over gcc 3.4.6	+54%	+39%	+15%
-#
-# Numbers in "+" rows represent performance improvement over compiler
-# generated code. Key setup timings are impressive on AMD and Core2
-# thanks to 64-bit operations being covertly deployed. Improvement on
-# EM64T, pre-Core2 Intel x86_64 CPU, is not as impressive, because it
-# apparently emulates some of 64-bit operations in [32-bit] microcode.
-
-$flavour = shift;
-$output  = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/;    $r; }
-sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
-                        $r =~ s/%[er]([sd]i)/%\1l/;
-                        $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
-
-$t0="%eax";$t1="%ebx";$t2="%ecx";$t3="%edx";
-@S=("%r8d","%r9d","%r10d","%r11d");
-$i0="%esi";
-$i1="%edi";
-$Tbl="%rbp";	# size optimization
-$inp="%r12";
-$out="%r13";
-$key="%r14";
-$keyend="%r15";
-$arg0d=$win64?"%ecx":"%edi";
-
-# const unsigned int Camellia_SBOX[4][256];
-# Well, sort of... Camellia_SBOX[0][] is interleaved with [1][],
-# and [2][] - with [3][]. This is done to minimize code size.
-$SBOX1_1110=0;		# Camellia_SBOX[0]
-$SBOX4_4404=4;		# Camellia_SBOX[1]
-$SBOX2_0222=2048;	# Camellia_SBOX[2]
-$SBOX3_3033=2052;	# Camellia_SBOX[3]
-
-sub Camellia_Feistel {
-my $i=@_[0];
-my $seed=defined(@_[1])?@_[1]:0;
-my $scale=$seed<0?-8:8;
-my $j=($i&1)*2;
-my $s0=@S[($j)%4],$s1=@S[($j+1)%4],$s2=@S[($j+2)%4],$s3=@S[($j+3)%4];
-
-$code.=<<___;
-	xor	$s0,$t0				# t0^=key[0]
-	xor	$s1,$t1				# t1^=key[1]
-	movz	`&hi("$t0")`,$i0		# (t0>>8)&0xff
-	movz	`&lo("$t1")`,$i1		# (t1>>0)&0xff
-	mov	$SBOX3_3033($Tbl,$i0,8),$t3	# t3=SBOX3_3033[0]
-	mov	$SBOX1_1110($Tbl,$i1,8),$t2	# t2=SBOX1_1110[1]
-	movz	`&lo("$t0")`,$i0		# (t0>>0)&0xff
-	shr	\$16,$t0
-	movz	`&hi("$t1")`,$i1		# (t1>>8)&0xff
-	xor	$SBOX4_4404($Tbl,$i0,8),$t3	# t3^=SBOX4_4404[0]
-	shr	\$16,$t1
-	xor	$SBOX4_4404($Tbl,$i1,8),$t2	# t2^=SBOX4_4404[1]
-	movz	`&hi("$t0")`,$i0		# (t0>>24)&0xff
-	movz	`&lo("$t1")`,$i1		# (t1>>16)&0xff
-	xor	$SBOX1_1110($Tbl,$i0,8),$t3	# t3^=SBOX1_1110[0]
-	xor	$SBOX3_3033($Tbl,$i1,8),$t2	# t2^=SBOX3_3033[1]
-	movz	`&lo("$t0")`,$i0		# (t0>>16)&0xff
-	movz	`&hi("$t1")`,$i1		# (t1>>24)&0xff
-	xor	$SBOX2_0222($Tbl,$i0,8),$t3	# t3^=SBOX2_0222[0]
-	xor	$SBOX2_0222($Tbl,$i1,8),$t2	# t2^=SBOX2_0222[1]
-	mov	`$seed+($i+1)*$scale`($key),$t1	# prefetch key[i+1]
-	mov	`$seed+($i+1)*$scale+4`($key),$t0
-	xor	$t3,$t2				# t2^=t3
-	ror	\$8,$t3				# t3=RightRotate(t3,8)
-	xor	$t2,$s2
-	xor	$t2,$s3
-	xor	$t3,$s3
-___
-}
-
-# void Camellia_EncryptBlock_Rounds(
-#		int grandRounds,
-#		const Byte plaintext[],
-#		const KEY_TABLE_TYPE keyTable,
-#		Byte ciphertext[])
-$code=<<___;
-.text
-
-# V1.x API
-.globl	Camellia_EncryptBlock
-.type	Camellia_EncryptBlock,\@abi-omnipotent
-.align	16
-Camellia_EncryptBlock:
-	movl	\$128,%eax
-	subl	$arg0d,%eax
-	movl	\$3,$arg0d
-	adcl	\$0,$arg0d	# keyBitLength==128?3:4
-	jmp	.Lenc_rounds
-.size	Camellia_EncryptBlock,.-Camellia_EncryptBlock
-# V2
-.globl	Camellia_EncryptBlock_Rounds
-.type	Camellia_EncryptBlock_Rounds,\@function,4
-.align	16
-.Lenc_rounds:
-Camellia_EncryptBlock_Rounds:
-	push	%rbx
-	push	%rbp
-	push	%r13
-	push	%r14
-	push	%r15
-.Lenc_prologue:
-
-	#mov	%rsi,$inp		# put away arguments
-	mov	%rcx,$out
-	mov	%rdx,$key
-
-	shl	\$6,%edi		# process grandRounds
-	lea	.LCamellia_SBOX(%rip),$Tbl
-	lea	($key,%rdi),$keyend
-
-	mov	0(%rsi),@S[0]		# load plaintext
-	mov	4(%rsi),@S[1]
-	mov	8(%rsi),@S[2]
-	bswap	@S[0]
-	mov	12(%rsi),@S[3]
-	bswap	@S[1]
-	bswap	@S[2]
-	bswap	@S[3]
-
-	call	_x86_64_Camellia_encrypt
-
-	bswap	@S[0]
-	bswap	@S[1]
-	bswap	@S[2]
-	mov	@S[0],0($out)
-	bswap	@S[3]
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	mov	@S[3],12($out)
-
-	mov	0(%rsp),%r15
-	mov	8(%rsp),%r14
-	mov	16(%rsp),%r13
-	mov	24(%rsp),%rbp
-	mov	32(%rsp),%rbx
-	lea	40(%rsp),%rsp
-.Lenc_epilogue:
-	ret
-.size	Camellia_EncryptBlock_Rounds,.-Camellia_EncryptBlock_Rounds
-
-.type	_x86_64_Camellia_encrypt,\@abi-omnipotent
-.align	16
-_x86_64_Camellia_encrypt:
-	xor	0($key),@S[1]
-	xor	4($key),@S[0]		# ^=key[0-3]
-	xor	8($key),@S[3]
-	xor	12($key),@S[2]
-.align	16
-.Leloop:
-	mov	16($key),$t1		# prefetch key[4-5]
-	mov	20($key),$t0
-
-___
-	for ($i=0;$i<6;$i++) { Camellia_Feistel($i,16); }
-$code.=<<___;
-	lea	16*4($key),$key
-	cmp	$keyend,$key
-	mov	8($key),$t3		# prefetch key[2-3]
-	mov	12($key),$t2
-	je	.Ledone
-
-	and	@S[0],$t0
-	or	@S[3],$t3
-	rol	\$1,$t0
-	xor	$t3,@S[2]		# s2^=s3|key[3];
-	xor	$t0,@S[1]		# s1^=LeftRotate(s0&key[0],1);
-	and	@S[2],$t2
-	or	@S[1],$t1
-	rol	\$1,$t2
-	xor	$t1,@S[0]		# s0^=s1|key[1];
-	xor	$t2,@S[3]		# s3^=LeftRotate(s2&key[2],1);
-	jmp	.Leloop
-
-.align	16
-.Ledone:
-	xor	@S[2],$t0		# SwapHalf
-	xor	@S[3],$t1
-	xor	@S[0],$t2
-	xor	@S[1],$t3
-
-	mov	$t0,@S[0]
-	mov	$t1,@S[1]
-	mov	$t2,@S[2]
-	mov	$t3,@S[3]
-
-	.byte	0xf3,0xc3		# rep ret
-.size	_x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt
-
-# V1.x API
-.globl	Camellia_DecryptBlock
-.type	Camellia_DecryptBlock,\@abi-omnipotent
-.align	16
-Camellia_DecryptBlock:
-	movl	\$128,%eax
-	subl	$arg0d,%eax
-	movl	\$3,$arg0d
-	adcl	\$0,$arg0d	# keyBitLength==128?3:4
-	jmp	.Ldec_rounds
-.size	Camellia_DecryptBlock,.-Camellia_DecryptBlock
-# V2
-.globl	Camellia_DecryptBlock_Rounds
-.type	Camellia_DecryptBlock_Rounds,\@function,4
-.align	16
-.Ldec_rounds:
-Camellia_DecryptBlock_Rounds:
-	push	%rbx
-	push	%rbp
-	push	%r13
-	push	%r14
-	push	%r15
-.Ldec_prologue:
-
-	#mov	%rsi,$inp		# put away arguments
-	mov	%rcx,$out
-	mov	%rdx,$keyend
-
-	shl	\$6,%edi		# process grandRounds
-	lea	.LCamellia_SBOX(%rip),$Tbl
-	lea	($keyend,%rdi),$key
-
-	mov	0(%rsi),@S[0]		# load plaintext
-	mov	4(%rsi),@S[1]
-	mov	8(%rsi),@S[2]
-	bswap	@S[0]
-	mov	12(%rsi),@S[3]
-	bswap	@S[1]
-	bswap	@S[2]
-	bswap	@S[3]
-
-	call	_x86_64_Camellia_decrypt
-
-	bswap	@S[0]
-	bswap	@S[1]
-	bswap	@S[2]
-	mov	@S[0],0($out)
-	bswap	@S[3]
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	mov	@S[3],12($out)
-
-	mov	0(%rsp),%r15
-	mov	8(%rsp),%r14
-	mov	16(%rsp),%r13
-	mov	24(%rsp),%rbp
-	mov	32(%rsp),%rbx
-	lea	40(%rsp),%rsp
-.Ldec_epilogue:
-	ret
-.size	Camellia_DecryptBlock_Rounds,.-Camellia_DecryptBlock_Rounds
-
-.type	_x86_64_Camellia_decrypt,\@abi-omnipotent
-.align	16
-_x86_64_Camellia_decrypt:
-	xor	0($key),@S[1]
-	xor	4($key),@S[0]		# ^=key[0-3]
-	xor	8($key),@S[3]
-	xor	12($key),@S[2]
-.align	16
-.Ldloop:
-	mov	-8($key),$t1		# prefetch key[4-5]
-	mov	-4($key),$t0
-
-___
-	for ($i=0;$i<6;$i++) { Camellia_Feistel($i,-8); }
-$code.=<<___;
-	lea	-16*4($key),$key
-	cmp	$keyend,$key
-	mov	0($key),$t3		# prefetch key[2-3]
-	mov	4($key),$t2
-	je	.Lddone
-
-	and	@S[0],$t0
-	or	@S[3],$t3
-	rol	\$1,$t0
-	xor	$t3,@S[2]		# s2^=s3|key[3];
-	xor	$t0,@S[1]		# s1^=LeftRotate(s0&key[0],1);
-	and	@S[2],$t2
-	or	@S[1],$t1
-	rol	\$1,$t2
-	xor	$t1,@S[0]		# s0^=s1|key[1];
-	xor	$t2,@S[3]		# s3^=LeftRotate(s2&key[2],1);
-
-	jmp	.Ldloop
-
-.align	16
-.Lddone:
-	xor	@S[2],$t2
-	xor	@S[3],$t3
-	xor	@S[0],$t0
-	xor	@S[1],$t1
-
-	mov	$t2,@S[0]		# SwapHalf
-	mov	$t3,@S[1]
-	mov	$t0,@S[2]
-	mov	$t1,@S[3]
-
-	.byte	0xf3,0xc3		# rep ret
-.size	_x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt
-___
-
-sub _saveround {
-my ($rnd,$key,@T)=@_;
-my $bias=int(@T[0])?shift(@T):0;
-
-    if ($#T==3) {
-	$code.=<<___;
-	mov	@T[1],`$bias+$rnd*8+0`($key)
-	mov	@T[0],`$bias+$rnd*8+4`($key)
-	mov	@T[3],`$bias+$rnd*8+8`($key)
-	mov	@T[2],`$bias+$rnd*8+12`($key)
-___
-    } else {
-	$code.="	mov	@T[0],`$bias+$rnd*8+0`($key)\n";
-	$code.="	mov	@T[1],`$bias+$rnd*8+8`($key)\n"	if ($#T>=1);
-    }
-}
-
-sub _loadround {
-my ($rnd,$key,@T)=@_;
-my $bias=int(@T[0])?shift(@T):0;
-
-$code.="	mov	`$bias+$rnd*8+0`($key),@T[0]\n";
-$code.="	mov	`$bias+$rnd*8+8`($key),@T[1]\n"	if ($#T>=1);
-}
-
-# shld is very slow on Intel EM64T family. Even on AMD it limits
-# instruction decode rate [because it's VectorPath] and consequently
-# performance...
-sub __rotl128 {
-my ($i0,$i1,$rot)=@_;
-
-    if ($rot) {
-	$code.=<<___;
-	mov	$i0,%r11
-	shld	\$$rot,$i1,$i0
-	shld	\$$rot,%r11,$i1
-___
-    }
-}
-
-# ... Implementing 128-bit rotate without shld gives 80% better
-# performance EM64T, +15% on AMD64 and only ~7% degradation on
-# Core2. This is therefore preferred.
-sub _rotl128 {
-my ($i0,$i1,$rot)=@_;
-
-    if ($rot) {
-	$code.=<<___;
-	mov	$i0,%r11
-	shl	\$$rot,$i0
-	mov	$i1,%r9
-	shr	\$`64-$rot`,%r9
-	shr	\$`64-$rot`,%r11
-	or	%r9,$i0
-	shl	\$$rot,$i1
-	or	%r11,$i1
-___
-    }
-}
-
-{ my $step=0;
-
-$code.=<<___;
-.globl	Camellia_Ekeygen
-.type	Camellia_Ekeygen,\@function,3
-.align	16
-Camellia_Ekeygen:
-	push	%rbx
-	push	%rbp
-	push	%r13
-	push	%r14
-	push	%r15
-.Lkey_prologue:
-
-	mov	%rdi,$keyend		# put away arguments, keyBitLength
-	mov	%rdx,$out		# keyTable
-
-	mov	0(%rsi),@S[0]		# load 0-127 bits
-	mov	4(%rsi),@S[1]
-	mov	8(%rsi),@S[2]
-	mov	12(%rsi),@S[3]
-
-	bswap	@S[0]
-	bswap	@S[1]
-	bswap	@S[2]
-	bswap	@S[3]
-___
-	&_saveround	(0,$out,@S);	# KL<<<0
-$code.=<<___;
-	cmp	\$128,$keyend		# check keyBitLength
-	je	.L1st128
-
-	mov	16(%rsi),@S[0]		# load 128-191 bits
-	mov	20(%rsi),@S[1]
-	cmp	\$192,$keyend
-	je	.L1st192
-	mov	24(%rsi),@S[2]		# load 192-255 bits
-	mov	28(%rsi),@S[3]
-	jmp	.L1st256
-.L1st192:
-	mov	@S[0],@S[2]
-	mov	@S[1],@S[3]
-	not	@S[2]
-	not	@S[3]
-.L1st256:
-	bswap	@S[0]
-	bswap	@S[1]
-	bswap	@S[2]
-	bswap	@S[3]
-___
-	&_saveround	(4,$out,@S);	# temp storage for KR!
-$code.=<<___;
-	xor	0($out),@S[1]		# KR^KL
-	xor	4($out),@S[0]
-	xor	8($out),@S[3]
-	xor	12($out),@S[2]
-
-.L1st128:
-	lea	.LCamellia_SIGMA(%rip),$key
-	lea	.LCamellia_SBOX(%rip),$Tbl
-
-	mov	0($key),$t1
-	mov	4($key),$t0
-___
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-$code.=<<___;
-	xor	0($out),@S[1]		# ^KL
-	xor	4($out),@S[0]
-	xor	8($out),@S[3]
-	xor	12($out),@S[2]
-___
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-$code.=<<___;
-	cmp	\$128,$keyend
-	jne	.L2nd256
-
-	lea	128($out),$out		# size optimization
-	shl	\$32,%r8		# @S[0]||
-	shl	\$32,%r10		# @S[2]||
-	or	%r9,%r8			# ||@S[1]
-	or	%r11,%r10		# ||@S[3]
-___
-	&_loadround	(0,$out,-128,"%rax","%rbx");	# KL
-	&_saveround	(2,$out,-128,"%r8","%r10");	# KA<<<0
-	&_rotl128	("%rax","%rbx",15);
-	&_saveround	(4,$out,-128,"%rax","%rbx");	# KL<<<15
-	&_rotl128	("%r8","%r10",15);
-	&_saveround	(6,$out,-128,"%r8","%r10");	# KA<<<15
-	&_rotl128	("%r8","%r10",15);		# 15+15=30
-	&_saveround	(8,$out,-128,"%r8","%r10");	# KA<<<30
-	&_rotl128	("%rax","%rbx",30);		# 15+30=45
-	&_saveround	(10,$out,-128,"%rax","%rbx");	# KL<<<45
-	&_rotl128	("%r8","%r10",15);		# 30+15=45
-	&_saveround	(12,$out,-128,"%r8");		# KA<<<45
-	&_rotl128	("%rax","%rbx",15);		# 45+15=60
-	&_saveround	(13,$out,-128,"%rbx");		# KL<<<60
-	&_rotl128	("%r8","%r10",15);		# 45+15=60
-	&_saveround	(14,$out,-128,"%r8","%r10");	# KA<<<60
-	&_rotl128	("%rax","%rbx",17);		# 60+17=77
-	&_saveround	(16,$out,-128,"%rax","%rbx");	# KL<<<77
-	&_rotl128	("%rax","%rbx",17);		# 77+17=94
-	&_saveround	(18,$out,-128,"%rax","%rbx");	# KL<<<94
-	&_rotl128	("%r8","%r10",34);		# 60+34=94
-	&_saveround	(20,$out,-128,"%r8","%r10");	# KA<<<94
-	&_rotl128	("%rax","%rbx",17);		# 94+17=111
-	&_saveround	(22,$out,-128,"%rax","%rbx");	# KL<<<111
-	&_rotl128	("%r8","%r10",17);		# 94+17=111
-	&_saveround	(24,$out,-128,"%r8","%r10");	# KA<<<111
-$code.=<<___;
-	mov	\$3,%eax
-	jmp	.Ldone
-.align	16
-.L2nd256:
-___
-	&_saveround	(6,$out,@S);	# temp storage for KA!
-$code.=<<___;
-	xor	`4*8+0`($out),@S[1]	# KA^KR
-	xor	`4*8+4`($out),@S[0]
-	xor	`5*8+0`($out),@S[3]
-	xor	`5*8+4`($out),@S[2]
-___
-	&Camellia_Feistel($step++);
-	&Camellia_Feistel($step++);
-
-	&_loadround	(0,$out,"%rax","%rbx");	# KL
-	&_loadround	(4,$out,"%rcx","%rdx");	# KR
-	&_loadround	(6,$out,"%r14","%r15");	# KA
-$code.=<<___;
-	lea	128($out),$out		# size optimization
-	shl	\$32,%r8		# @S[0]||
-	shl	\$32,%r10		# @S[2]||
-	or	%r9,%r8			# ||@S[1]
-	or	%r11,%r10		# ||@S[3]
-___
-	&_saveround	(2,$out,-128,"%r8","%r10");	# KB<<<0
-	&_rotl128	("%rcx","%rdx",15);
-	&_saveround	(4,$out,-128,"%rcx","%rdx");	# KR<<<15
-	&_rotl128	("%r14","%r15",15);
-	&_saveround	(6,$out,-128,"%r14","%r15");	# KA<<<15
-	&_rotl128	("%rcx","%rdx",15);		# 15+15=30
-	&_saveround	(8,$out,-128,"%rcx","%rdx");	# KR<<<30
-	&_rotl128	("%r8","%r10",30);
-	&_saveround	(10,$out,-128,"%r8","%r10");	# KB<<<30
-	&_rotl128	("%rax","%rbx",45);
-	&_saveround	(12,$out,-128,"%rax","%rbx");	# KL<<<45
-	&_rotl128	("%r14","%r15",30);		# 15+30=45
-	&_saveround	(14,$out,-128,"%r14","%r15");	# KA<<<45
-	&_rotl128	("%rax","%rbx",15);		# 45+15=60
-	&_saveround	(16,$out,-128,"%rax","%rbx");	# KL<<<60
-	&_rotl128	("%rcx","%rdx",30);		# 30+30=60
-	&_saveround	(18,$out,-128,"%rcx","%rdx");	# KR<<<60
-	&_rotl128	("%r8","%r10",30);		# 30+30=60
-	&_saveround	(20,$out,-128,"%r8","%r10");	# KB<<<60
-	&_rotl128	("%rax","%rbx",17);		# 60+17=77
-	&_saveround	(22,$out,-128,"%rax","%rbx");	# KL<<<77
-	&_rotl128	("%r14","%r15",32);		# 45+32=77
-	&_saveround	(24,$out,-128,"%r14","%r15");	# KA<<<77
-	&_rotl128	("%rcx","%rdx",34);		# 60+34=94
-	&_saveround	(26,$out,-128,"%rcx","%rdx");	# KR<<<94
-	&_rotl128	("%r14","%r15",17);		# 77+17=94
-	&_saveround	(28,$out,-128,"%r14","%r15");	# KA<<<77
-	&_rotl128	("%rax","%rbx",34);		# 77+34=111
-	&_saveround	(30,$out,-128,"%rax","%rbx");	# KL<<<111
-	&_rotl128	("%r8","%r10",51);		# 60+51=111
-	&_saveround	(32,$out,-128,"%r8","%r10");	# KB<<<111
-$code.=<<___;
-	mov	\$4,%eax
-.Ldone:
-	mov	0(%rsp),%r15
-	mov	8(%rsp),%r14
-	mov	16(%rsp),%r13
-	mov	24(%rsp),%rbp
-	mov	32(%rsp),%rbx
-	lea	40(%rsp),%rsp
-.Lkey_epilogue:
-	ret
-.size	Camellia_Ekeygen,.-Camellia_Ekeygen
-___
-}
-
-@SBOX=(
-112,130, 44,236,179, 39,192,229,228,133, 87, 53,234, 12,174, 65,
- 35,239,107,147, 69, 25,165, 33,237, 14, 79, 78, 29,101,146,189,
-134,184,175,143,124,235, 31,206, 62, 48,220, 95, 94,197, 11, 26,
-166,225, 57,202,213, 71, 93, 61,217,  1, 90,214, 81, 86,108, 77,
-139, 13,154,102,251,204,176, 45,116, 18, 43, 32,240,177,132,153,
-223, 76,203,194, 52,126,118,  5,109,183,169, 49,209, 23,  4,215,
- 20, 88, 58, 97,222, 27, 17, 28, 50, 15,156, 22, 83, 24,242, 34,
-254, 68,207,178,195,181,122,145, 36,  8,232,168, 96,252,105, 80,
-170,208,160,125,161,137, 98,151, 84, 91, 30,149,224,255,100,210,
- 16,196,  0, 72,163,247,117,219,138,  3,230,218,  9, 63,221,148,
-135, 92,131,  2,205, 74,144, 51,115,103,246,243,157,127,191,226,
- 82,155,216, 38,200, 55,198, 59,129,150,111, 75, 19,190, 99, 46,
-233,121,167,140,159,110,188,142, 41,245,249,182, 47,253,180, 89,
-120,152,  6,106,231, 70,113,186,212, 37,171, 66,136,162,141,250,
-114,  7,185, 85,248,238,172, 10, 54, 73, 42,104, 60, 56,241,164,
- 64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158);
-
-sub S1110 { my $i=shift; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i<<8; sprintf("0x%08x",$i); }
-sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i; sprintf("0x%08x",$i); }
-sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; $i=$i<<16|$i<<8|$i; sprintf("0x%08x",$i); }
-sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; $i=$i<<24|$i<<8|$i; sprintf("0x%08x",$i); }
-
-$code.=<<___;
-.align	64
-.LCamellia_SIGMA:
-.long	0x3bcc908b, 0xa09e667f, 0x4caa73b2, 0xb67ae858
-.long	0xe94f82be, 0xc6ef372f, 0xf1d36f1c, 0x54ff53a5
-.long	0xde682d1d, 0x10e527fa, 0xb3e6c1fd, 0xb05688c2
-.long	0,          0,          0,          0
-.LCamellia_SBOX:
-___
-# tables are interleaved, remember?
-sub data_word { $code.=".long\t".join(',',@_)."\n"; }
-for ($i=0;$i<256;$i++) { &data_word(&S1110($i),&S4404($i)); }
-for ($i=0;$i<256;$i++) { &data_word(&S0222($i),&S3033($i)); }
-
-# void Camellia_cbc_encrypt (const void char *inp, unsigned char *out,
-#			size_t length, const CAMELLIA_KEY *key,
-#			unsigned char *ivp,const int enc);
-{
-$_key="0(%rsp)";
-$_end="8(%rsp)";	# inp+len&~15
-$_res="16(%rsp)";	# len&15
-$ivec="24(%rsp)";
-$_ivp="40(%rsp)";
-$_rsp="48(%rsp)";
-
-$code.=<<___;
-.globl	Camellia_cbc_encrypt
-.type	Camellia_cbc_encrypt,\@function,6
-.align	16
-Camellia_cbc_encrypt:
-	cmp	\$0,%rdx
-	je	.Lcbc_abort
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-.Lcbc_prologue:
-
-	mov	%rsp,%rbp
-	sub	\$64,%rsp
-	and	\$-64,%rsp
-
-	# place stack frame just "above mod 1024" the key schedule,
-	# this ensures that cache associativity suffices
-	lea	-64-63(%rcx),%r10
-	sub	%rsp,%r10
-	neg	%r10
-	and	\$0x3C0,%r10
-	sub	%r10,%rsp
-	#add	\$8,%rsp		# 8 is reserved for callee's ra
-
-	mov	%rdi,$inp		# inp argument
-	mov	%rsi,$out		# out argument
-	mov	%r8,%rbx		# ivp argument
-	mov	%rcx,$key		# key argument
-	mov	272(%rcx),${keyend}d	# grandRounds
-
-	mov	%r8,$_ivp
-	mov	%rbp,$_rsp
-
-.Lcbc_body:
-	lea	.LCamellia_SBOX(%rip),$Tbl
-
-	mov	\$32,%ecx
-.align	4
-.Lcbc_prefetch_sbox:
-	mov	0($Tbl),%rax
-	mov	32($Tbl),%rsi
-	mov	64($Tbl),%rdi
-	mov	96($Tbl),%r11
-	lea	128($Tbl),$Tbl
-	loop	.Lcbc_prefetch_sbox
-	sub	\$4096,$Tbl
-	shl	\$6,$keyend
-	mov	%rdx,%rcx		# len argument
-	lea	($key,$keyend),$keyend
-
-	cmp	\$0,%r9d		# enc argument
-	je	.LCBC_DECRYPT
-
-	and	\$-16,%rdx
-	and	\$15,%rcx		# length residue
-	lea	($inp,%rdx),%rdx
-	mov	$key,$_key
-	mov	%rdx,$_end
-	mov	%rcx,$_res
-
-	cmp	$inp,%rdx
-	mov	0(%rbx),@S[0]		# load IV
-	mov	4(%rbx),@S[1]
-	mov	8(%rbx),@S[2]
-	mov	12(%rbx),@S[3]
-	je	.Lcbc_enc_tail
-	jmp	.Lcbc_eloop
-
-.align	16
-.Lcbc_eloop:
-	xor	0($inp),@S[0]
-	xor	4($inp),@S[1]
-	xor	8($inp),@S[2]
-	bswap	@S[0]
-	xor	12($inp),@S[3]
-	bswap	@S[1]
-	bswap	@S[2]
-	bswap	@S[3]
-
-	call	_x86_64_Camellia_encrypt
-
-	mov	$_key,$key		# "rewind" the key
-	bswap	@S[0]
-	mov	$_end,%rdx
-	bswap	@S[1]
-	mov	$_res,%rcx
-	bswap	@S[2]
-	mov	@S[0],0($out)
-	bswap	@S[3]
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	lea	16($inp),$inp
-	mov	@S[3],12($out)
-	cmp	%rdx,$inp
-	lea	16($out),$out
-	jne	.Lcbc_eloop
-
-	cmp	\$0,%rcx
-	jne	.Lcbc_enc_tail
-
-	mov	$_ivp,$out
-	mov	@S[0],0($out)		# write out IV residue
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	mov	@S[3],12($out)
-	jmp	.Lcbc_done
-
-.align	16
-.Lcbc_enc_tail:
-	xor	%rax,%rax
-	mov	%rax,0+$ivec
-	mov	%rax,8+$ivec
-	mov	%rax,$_res
-
-.Lcbc_enc_pushf:
-	pushfq
-	cld
-	mov	$inp,%rsi
-	lea	8+$ivec,%rdi
-	.long	0x9066A4F3		# rep movsb
-	popfq
-.Lcbc_enc_popf:
-
-	lea	$ivec,$inp
-	lea	16+$ivec,%rax
-	mov	%rax,$_end
-	jmp	.Lcbc_eloop		# one more time
-
-.align	16
-.LCBC_DECRYPT:
-	xchg	$key,$keyend
-	add	\$15,%rdx
-	and	\$15,%rcx		# length residue
-	and	\$-16,%rdx
-	mov	$key,$_key
-	lea	($inp,%rdx),%rdx
-	mov	%rdx,$_end
-	mov	%rcx,$_res
-
-	mov	(%rbx),%rax		# load IV
-	mov	8(%rbx),%rbx
-	jmp	.Lcbc_dloop
-.align	16
-.Lcbc_dloop:
-	mov	0($inp),@S[0]
-	mov	4($inp),@S[1]
-	mov	8($inp),@S[2]
-	bswap	@S[0]
-	mov	12($inp),@S[3]
-	bswap	@S[1]
-	mov	%rax,0+$ivec		# save IV to temporary storage
-	bswap	@S[2]
-	mov	%rbx,8+$ivec
-	bswap	@S[3]
-
-	call	_x86_64_Camellia_decrypt
-
-	mov	$_key,$key		# "rewind" the key
-	mov	$_end,%rdx
-	mov	$_res,%rcx
-
-	bswap	@S[0]
-	mov	($inp),%rax		# load IV for next iteration
-	bswap	@S[1]
-	mov	8($inp),%rbx
-	bswap	@S[2]
-	xor	0+$ivec,@S[0]
-	bswap	@S[3]
-	xor	4+$ivec,@S[1]
-	xor	8+$ivec,@S[2]
-	lea	16($inp),$inp
-	xor	12+$ivec,@S[3]
-	cmp	%rdx,$inp
-	je	.Lcbc_ddone
-
-	mov	@S[0],0($out)
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	mov	@S[3],12($out)
-
-	lea	16($out),$out
-	jmp	.Lcbc_dloop
-
-.align	16
-.Lcbc_ddone:
-	mov	$_ivp,%rdx
-	cmp	\$0,%rcx
-	jne	.Lcbc_dec_tail
-
-	mov	@S[0],0($out)
-	mov	@S[1],4($out)
-	mov	@S[2],8($out)
-	mov	@S[3],12($out)
-
-	mov	%rax,(%rdx)		# write out IV residue
-	mov	%rbx,8(%rdx)
-	jmp	.Lcbc_done
-.align	16
-.Lcbc_dec_tail:
-	mov	@S[0],0+$ivec
-	mov	@S[1],4+$ivec
-	mov	@S[2],8+$ivec
-	mov	@S[3],12+$ivec
-
-.Lcbc_dec_pushf:
-	pushfq
-	cld
-	lea	8+$ivec,%rsi
-	lea	($out),%rdi
-	.long	0x9066A4F3		# rep movsb
-	popfq
-.Lcbc_dec_popf:
-
-	mov	%rax,(%rdx)		# write out IV residue
-	mov	%rbx,8(%rdx)
-	jmp	.Lcbc_done
-
-.align	16
-.Lcbc_done:
-	mov	$_rsp,%rcx
-	mov	0(%rcx),%r15
-	mov	8(%rcx),%r14
-	mov	16(%rcx),%r13
-	mov	24(%rcx),%r12
-	mov	32(%rcx),%rbp
-	mov	40(%rcx),%rbx
-	lea	48(%rcx),%rsp
-.Lcbc_abort:
-	ret
-.size	Camellia_cbc_encrypt,.-Camellia_cbc_encrypt
-
-.asciz	"Camellia for x86_64 by <appro\@openssl.org>"
-___
-}
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern	__imp_RtlVirtualUnwind
-.type	common_se_handler,\@abi-omnipotent
-.align	16
-common_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	lea	-64(%rsp),%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	mov	8($disp),%rsi		# disp->ImageBase
-	mov	56($disp),%r11		# disp->HandlerData
-
-	mov	0(%r11),%r10d		# HandlerData[0]
-	lea	(%rsi,%r10),%r10	# prologue label
-	cmp	%r10,%rbx		# context->Rip<prologue label
-	jb	.Lin_prologue
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	mov	4(%r11),%r10d		# HandlerData[1]
-	lea	(%rsi,%r10),%r10	# epilogue label
-	cmp	%r10,%rbx		# context->Rip>=epilogue label
-	jae	.Lin_prologue
-
-	lea	40(%rax),%rax
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r13
-	mov	-32(%rax),%r14
-	mov	-40(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-	jmp	.Lcommon_seh_exit
-.size	common_se_handler,.-common_se_handler
-
-.type	cbc_se_handler,\@abi-omnipotent
-.align	16
-cbc_se_handler:
-	push	%rsi
-	push	%rdi
-	push	%rbx
-	push	%rbp
-	push	%r12
-	push	%r13
-	push	%r14
-	push	%r15
-	pushfq
-	lea	-64(%rsp),%rsp
-
-	mov	120($context),%rax	# pull context->Rax
-	mov	248($context),%rbx	# pull context->Rip
-
-	lea	.Lcbc_prologue(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_prologue
-	jb	.Lin_cbc_prologue
-
-	lea	.Lcbc_body(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_body
-	jb	.Lin_cbc_frame_setup
-
-	mov	152($context),%rax	# pull context->Rsp
-
-	lea	.Lcbc_abort(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip>=.Lcbc_abort
-	jae	.Lin_cbc_prologue
-
-	# handle pushf/popf in Camellia_cbc_encrypt
-	lea	.Lcbc_enc_pushf(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<=.Lcbc_enc_pushf
-	jbe	.Lin_cbc_no_flag
-	lea	8(%rax),%rax
-	lea	.Lcbc_enc_popf(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_enc_popf
-	jb	.Lin_cbc_no_flag
-	lea	-8(%rax),%rax
-	lea	.Lcbc_dec_pushf(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<=.Lcbc_dec_pushf
-	jbe	.Lin_cbc_no_flag
-	lea	8(%rax),%rax
-	lea	.Lcbc_dec_popf(%rip),%r10
-	cmp	%r10,%rbx		# context->Rip<.Lcbc_dec_popf
-	jb	.Lin_cbc_no_flag
-	lea	-8(%rax),%rax
-
-.Lin_cbc_no_flag:
-	mov	48(%rax),%rax		# $_rsp
-	lea	48(%rax),%rax
-
-.Lin_cbc_frame_setup:
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
-	mov	-24(%rax),%r12
-	mov	-32(%rax),%r13
-	mov	-40(%rax),%r14
-	mov	-48(%rax),%r15
-	mov	%rbx,144($context)	# restore context->Rbx
-	mov	%rbp,160($context)	# restore context->Rbp
-	mov	%r12,216($context)	# restore context->R12
-	mov	%r13,224($context)	# restore context->R13
-	mov	%r14,232($context)	# restore context->R14
-	mov	%r15,240($context)	# restore context->R15
-
-.Lin_cbc_prologue:
-	mov	8(%rax),%rdi
-	mov	16(%rax),%rsi
-	mov	%rax,152($context)	# restore context->Rsp
-	mov	%rsi,168($context)	# restore context->Rsi
-	mov	%rdi,176($context)	# restore context->Rdi
-
-.align	4
-.Lcommon_seh_exit:
-
-	mov	40($disp),%rdi		# disp->ContextRecord
-	mov	$context,%rsi		# context
-	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
-	.long	0xa548f3fc		# cld; rep movsq
-
-	mov	$disp,%rsi
-	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
-	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
-	mov	0(%rsi),%r8		# arg3, disp->ControlPc
-	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
-	mov	40(%rsi),%r10		# disp->ContextRecord
-	lea	56(%rsi),%r11		# &disp->HandlerData
-	lea	24(%rsi),%r12		# &disp->EstablisherFrame
-	mov	%r10,32(%rsp)		# arg5
-	mov	%r11,40(%rsp)		# arg6
-	mov	%r12,48(%rsp)		# arg7
-	mov	%rcx,56(%rsp)		# arg8, (NULL)
-	call	*__imp_RtlVirtualUnwind(%rip)
-
-	mov	\$1,%eax		# ExceptionContinueSearch
-	lea	64(%rsp),%rsp
-	popfq
-	pop	%r15
-	pop	%r14
-	pop	%r13
-	pop	%r12
-	pop	%rbp
-	pop	%rbx
-	pop	%rdi
-	pop	%rsi
-	ret
-.size	cbc_se_handler,.-cbc_se_handler
-
-.section	.pdata
-.align	4
-	.rva	.LSEH_begin_Camellia_EncryptBlock_Rounds
-	.rva	.LSEH_end_Camellia_EncryptBlock_Rounds
-	.rva	.LSEH_info_Camellia_EncryptBlock_Rounds
-
-	.rva	.LSEH_begin_Camellia_DecryptBlock_Rounds
-	.rva	.LSEH_end_Camellia_DecryptBlock_Rounds
-	.rva	.LSEH_info_Camellia_DecryptBlock_Rounds
-
-	.rva	.LSEH_begin_Camellia_Ekeygen
-	.rva	.LSEH_end_Camellia_Ekeygen
-	.rva	.LSEH_info_Camellia_Ekeygen
-
-	.rva	.LSEH_begin_Camellia_cbc_encrypt
-	.rva	.LSEH_end_Camellia_cbc_encrypt
-	.rva	.LSEH_info_Camellia_cbc_encrypt
-
-.section	.xdata
-.align	8
-.LSEH_info_Camellia_EncryptBlock_Rounds:
-	.byte	9,0,0,0
-	.rva	common_se_handler
-	.rva	.Lenc_prologue,.Lenc_epilogue	# HandlerData[]
-.LSEH_info_Camellia_DecryptBlock_Rounds:
-	.byte	9,0,0,0
-	.rva	common_se_handler
-	.rva	.Ldec_prologue,.Ldec_epilogue	# HandlerData[]
-.LSEH_info_Camellia_Ekeygen:
-	.byte	9,0,0,0
-	.rva	common_se_handler
-	.rva	.Lkey_prologue,.Lkey_epilogue	# HandlerData[]
-.LSEH_info_Camellia_cbc_encrypt:
-	.byte	9,0,0,0
-	.rva	cbc_se_handler
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;

+ 0 - 177
drivers/builtin_openssl2/crypto/cast/asm/cast-586.pl

@@ -1,177 +0,0 @@
-#!/usr/local/bin/perl
-
-# define for pentium pro friendly version
-$ppro=1;
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-require "cbc.pl";
-
-&asm_init($ARGV[0],"cast-586.pl",$ARGV[$#ARGV] eq "386");
-
-$CAST_ROUNDS=16;
-$L="edi";
-$R="esi";
-$K="ebp";
-$tmp1="ecx";
-$tmp2="ebx";
-$tmp3="eax";
-$tmp4="edx";
-$S1="CAST_S_table0";
-$S2="CAST_S_table1";
-$S3="CAST_S_table2";
-$S4="CAST_S_table3";
-
-@F1=("add","xor","sub");
-@F2=("xor","sub","add");
-@F3=("sub","add","xor");
-
-&CAST_encrypt("CAST_encrypt",1);
-&CAST_encrypt("CAST_decrypt",0);
-&cbc("CAST_cbc_encrypt","CAST_encrypt","CAST_decrypt",1,4,5,3,-1,-1);
-
-&asm_finish();
-
-sub CAST_encrypt {
-    local($name,$enc)=@_;
-
-    local($win_ex)=<<"EOF";
-EXTERN	_CAST_S_table0:DWORD
-EXTERN	_CAST_S_table1:DWORD
-EXTERN	_CAST_S_table2:DWORD
-EXTERN	_CAST_S_table3:DWORD
-EOF
-    &main::external_label(
-			  "CAST_S_table0",
-			  "CAST_S_table1",
-			  "CAST_S_table2",
-			  "CAST_S_table3",
-			  );
-
-    &function_begin_B($name,$win_ex);
-
-    &comment("");
-
-    &push("ebp");
-    &push("ebx");
-    &mov($tmp2,&wparam(0));
-    &mov($K,&wparam(1));
-    &push("esi");
-    &push("edi");
-
-    &comment("Load the 2 words");
-    &mov($L,&DWP(0,$tmp2,"",0));
-    &mov($R,&DWP(4,$tmp2,"",0));
-
-    &comment('Get short key flag');
-    &mov($tmp3,&DWP(128,$K,"",0));
-    if($enc) {
-	&push($tmp3);
-    } else {
-	&or($tmp3,$tmp3);
-	&jnz(&label('cast_dec_skip'));
-    }
-
-    &xor($tmp3,	$tmp3);
-
-    # encrypting part
-
-    if ($enc) {
-	&E_CAST( 0,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 1,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 2,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 3,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 4,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 5,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 6,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 7,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 8,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 9,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(10,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(11,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&comment('test short key flag');
-	&pop($tmp4);
-	&or($tmp4,$tmp4);
-	&jnz(&label('cast_enc_done'));
-	&E_CAST(12,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(13,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(14,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(15,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-    } else {
-	&E_CAST(15,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(14,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(13,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(12,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&set_label('cast_dec_skip');
-	&E_CAST(11,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST(10,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 9,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 8,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 7,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 6,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 5,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 4,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 3,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 2,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 1,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4);
-	&E_CAST( 0,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4);
-    }
-
-    &set_label('cast_enc_done') if $enc;
-# Why the nop? - Ben 17/1/99
-    &nop();
-    &mov($tmp3,&wparam(0));
-    &mov(&DWP(4,$tmp3,"",0),$L);
-    &mov(&DWP(0,$tmp3,"",0),$R);
-    &function_end($name);
-}
-
-sub E_CAST {
-    local($i,$S,$L,$R,$K,$OP1,$OP2,$OP3,$tmp1,$tmp2,$tmp3,$tmp4)=@_;
-    # Ri needs to have 16 pre added.
-
-    &comment("round $i");
-    &mov(	$tmp4,		&DWP($i*8,$K,"",1));
-
-    &mov(	$tmp1,		&DWP($i*8+4,$K,"",1));
-    &$OP1(	$tmp4,		$R);
-
-    &rotl(	$tmp4,		&LB($tmp1));
-
-    if ($ppro) {
-	&mov(	$tmp2,		$tmp4);		# B
-	&xor(	$tmp1,		$tmp1);
-	
-	&movb(	&LB($tmp1),	&HB($tmp4));	# A
-	&and(	$tmp2,		0xff);
-
-	&shr(	$tmp4,		16); 		#
-	&xor(	$tmp3,		$tmp3);
-    } else {
-	&mov(	$tmp2,		$tmp4);		# B
-	&movb(	&LB($tmp1),	&HB($tmp4));	# A	# BAD BAD BAD
-	
-	&shr(	$tmp4,		16); 		#
-	&and(	$tmp2,		0xff);
-    }
-
-    &movb(	&LB($tmp3),	&HB($tmp4));	# C	# BAD BAD BAD
-    &and(	$tmp4,		0xff);		# D
-
-    &mov(	$tmp1,		&DWP($S1,"",$tmp1,4));
-    &mov(	$tmp2,		&DWP($S2,"",$tmp2,4));
-
-    &$OP2(	$tmp1,		$tmp2);
-    &mov(	$tmp2,		&DWP($S3,"",$tmp3,4));
-
-    &$OP3(	$tmp1,		$tmp2);
-    &mov(	$tmp2,		&DWP($S4,"",$tmp4,4));
-
-    &$OP1(	$tmp1,		$tmp2);
-    # XXX
-
-    &xor(	$L,		$tmp1);
-    # XXX
-}
-

+ 2 - 0
drivers/builtin_openssl2/crypto/cast/cast_lcl.h

@@ -152,6 +152,8 @@
 
 #if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)
 # define ROTL(a,n)     (_lrotl(a,n))
+#elif defined(PEDANTIC)
+# define ROTL(a,n)     ((((a)<<(n))&0xffffffffL)|((a)>>((32-(n))&31)))
 #else
 # define ROTL(a,n)     ((((a)<<(n))&0xffffffffL)|((a)>>(32-(n))))
 #endif

+ 0 - 241
drivers/builtin_openssl2/crypto/cast/casttest.c

@@ -1,241 +0,0 @@
-/* crypto/cast/casttest.c */
-/* Copyright (C) 1995-1998 Eric Young ([email protected])
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young ([email protected]).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson ([email protected]).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young ([email protected])"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson ([email protected])"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <openssl/opensslconf.h> /* To see if OPENSSL_NO_CAST is defined */
-
-#include "../e_os.h"
-
-#ifdef OPENSSL_NO_CAST
-int main(int argc, char *argv[])
-{
-    printf("No CAST support\n");
-    return (0);
-}
-#else
-# include <openssl/cast.h>
-
-# define FULL_TEST
-
-static unsigned char k[16] = {
-    0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78,
-    0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A
-};
-
-static unsigned char in[8] =
-    { 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF };
-
-static int k_len[3] = { 16, 10, 5 };
-
-static unsigned char c[3][8] = {
-    {0x23, 0x8B, 0x4F, 0xE5, 0x84, 0x7E, 0x44, 0xB2},
-    {0xEB, 0x6A, 0x71, 0x1A, 0x2C, 0x02, 0x27, 0x1B},
-    {0x7A, 0xC8, 0x16, 0xD1, 0x6E, 0x9B, 0x30, 0x2E},
-};
-
-static unsigned char out[80];
-
-static unsigned char in_a[16] = {
-    0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78,
-    0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A
-};
-
-static unsigned char in_b[16] = {
-    0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78,
-    0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A
-};
-
-static unsigned char c_a[16] = {
-    0xEE, 0xA9, 0xD0, 0xA2, 0x49, 0xFD, 0x3B, 0xA6,
-    0xB3, 0x43, 0x6F, 0xB8, 0x9D, 0x6D, 0xCA, 0x92
-};
-
-static unsigned char c_b[16] = {
-    0xB2, 0xC9, 0x5E, 0xB0, 0x0C, 0x31, 0xAD, 0x71,
-    0x80, 0xAC, 0x05, 0xB8, 0xE8, 0x3D, 0x69, 0x6E
-};
-
-# if 0
-char *text = "Hello to all people out there";
-
-static unsigned char cfb_key[16] = {
-    0xe1, 0xf0, 0xc3, 0xd2, 0xa5, 0xb4, 0x87, 0x96,
-    0x69, 0x78, 0x4b, 0x5a, 0x2d, 0x3c, 0x0f, 0x1e,
-};
-static unsigned char cfb_iv[80] =
-    { 0x34, 0x12, 0x78, 0x56, 0xab, 0x90, 0xef, 0xcd };
-static unsigned char cfb_buf1[40], cfb_buf2[40], cfb_tmp[8];
-#  define CFB_TEST_SIZE 24
-static unsigned char plain[CFB_TEST_SIZE] = {
-    0x4e, 0x6f, 0x77, 0x20, 0x69, 0x73,
-    0x20, 0x74, 0x68, 0x65, 0x20, 0x74,
-    0x69, 0x6d, 0x65, 0x20, 0x66, 0x6f,
-    0x72, 0x20, 0x61, 0x6c, 0x6c, 0x20
-};
-
-static unsigned char cfb_cipher64[CFB_TEST_SIZE] = {
-    0x59, 0xD8, 0xE2, 0x65, 0x00, 0x58, 0x6C, 0x3F,
-    0x2C, 0x17, 0x25, 0xD0, 0x1A, 0x38, 0xB7, 0x2A,
-    0x39, 0x61, 0x37, 0xDC, 0x79, 0xFB, 0x9F, 0x45
-/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
-    0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
-    0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
-};
-# endif
-
-int main(int argc, char *argv[])
-{
-# ifdef FULL_TEST
-    long l;
-    CAST_KEY key_b;
-# endif
-    int i, z, err = 0;
-    CAST_KEY key;
-
-    for (z = 0; z < 3; z++) {
-        CAST_set_key(&key, k_len[z], k);
-
-        CAST_ecb_encrypt(in, out, &key, CAST_ENCRYPT);
-        if (memcmp(out, &(c[z][0]), 8) != 0) {
-            printf("ecb cast error encrypting for keysize %d\n",
-                   k_len[z] * 8);
-            printf("got     :");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", out[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", c[z][i]);
-            err = 20;
-            printf("\n");
-        }
-
-        CAST_ecb_encrypt(out, out, &key, CAST_DECRYPT);
-        if (memcmp(out, in, 8) != 0) {
-            printf("ecb cast error decrypting for keysize %d\n",
-                   k_len[z] * 8);
-            printf("got     :");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", out[i]);
-            printf("\n");
-            printf("expected:");
-            for (i = 0; i < 8; i++)
-                printf("%02X ", in[i]);
-            printf("\n");
-            err = 3;
-        }
-    }
-    if (err == 0)
-        printf("ecb cast5 ok\n");
-
-# ifdef FULL_TEST
-    {
-        unsigned char out_a[16], out_b[16];
-        static char *hex = "0123456789ABCDEF";
-
-        printf("This test will take some time....");
-        fflush(stdout);
-        memcpy(out_a, in_a, sizeof(in_a));
-        memcpy(out_b, in_b, sizeof(in_b));
-        i = 1;
-
-        for (l = 0; l < 1000000L; l++) {
-            CAST_set_key(&key_b, 16, out_b);
-            CAST_ecb_encrypt(&(out_a[0]), &(out_a[0]), &key_b, CAST_ENCRYPT);
-            CAST_ecb_encrypt(&(out_a[8]), &(out_a[8]), &key_b, CAST_ENCRYPT);
-            CAST_set_key(&key, 16, out_a);
-            CAST_ecb_encrypt(&(out_b[0]), &(out_b[0]), &key, CAST_ENCRYPT);
-            CAST_ecb_encrypt(&(out_b[8]), &(out_b[8]), &key, CAST_ENCRYPT);
-            if ((l & 0xffff) == 0xffff) {
-                printf("%c", hex[i & 0x0f]);
-                fflush(stdout);
-                i++;
-            }
-        }
-
-        if ((memcmp(out_a, c_a, sizeof(c_a)) != 0) ||
-            (memcmp(out_b, c_b, sizeof(c_b)) != 0)) {
-            printf("\n");
-            printf("Error\n");
-
-            printf("A out =");
-            for (i = 0; i < 16; i++)
-                printf("%02X ", out_a[i]);
-            printf("\nactual=");
-            for (i = 0; i < 16; i++)
-                printf("%02X ", c_a[i]);
-            printf("\n");
-
-            printf("B out =");
-            for (i = 0; i < 16; i++)
-                printf("%02X ", out_b[i]);
-            printf("\nactual=");
-            for (i = 0; i < 16; i++)
-                printf("%02X ", c_b[i]);
-            printf("\n");
-        } else
-            printf(" ok\n");
-    }
-# endif
-
-    EXIT(err);
-    return (err);
-}
-#endif

Some files were not shown because too many files changed in this diff