private.odin 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330
  1. package math_big
  2. /*
  3. Copyright 2021 Jeroen van Rijn <[email protected]>.
  4. Made available under Odin's BSD-3 license.
  5. An arbitrary precision mathematics implementation in Odin.
  6. For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
  7. The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
  8. ============================= Private procedures =============================
  9. Private procedures used by the above low-level routines follow.
  10. Don't call these yourself unless you really know what you're doing.
  11. They include implementations that are optimimal for certain ranges of input only.
  12. These aren't exported for the same reasons.
  13. */
  14. import "core:intrinsics"
  15. import "core:mem"
  16. /*
  17. Multiplies |a| * |b| and only computes upto digs digits of result.
  18. HAC pp. 595, Algorithm 14.12 Modified so you can control how
  19. many digits of output are created.
  20. */
  21. _private_int_mul :: proc(dest, a, b: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
  22. context.allocator = allocator;
  23. /*
  24. Can we use the fast multiplier?
  25. */
  26. if digits < _WARRAY && min(a.used, b.used) < _MAX_COMBA {
  27. return #force_inline _private_int_mul_comba(dest, a, b, digits);
  28. }
  29. /*
  30. Set up temporary output `Int`, which we'll swap for `dest` when done.
  31. */
  32. t := &Int{};
  33. internal_grow(t, max(digits, _DEFAULT_DIGIT_COUNT)) or_return;
  34. t.used = digits;
  35. /*
  36. Compute the digits of the product directly.
  37. */
  38. pa := a.used;
  39. for ix := 0; ix < pa; ix += 1 {
  40. /*
  41. Limit ourselves to `digits` DIGITs of output.
  42. */
  43. pb := min(b.used, digits - ix);
  44. carry := _WORD(0);
  45. iy := 0;
  46. /*
  47. Compute the column of the output and propagate the carry.
  48. */
  49. #no_bounds_check for iy = 0; iy < pb; iy += 1 {
  50. /*
  51. Compute the column as a _WORD.
  52. */
  53. column := _WORD(t.digit[ix + iy]) + _WORD(a.digit[ix]) * _WORD(b.digit[iy]) + carry;
  54. /*
  55. The new column is the lower part of the result.
  56. */
  57. t.digit[ix + iy] = DIGIT(column & _WORD(_MASK));
  58. /*
  59. Get the carry word from the result.
  60. */
  61. carry = column >> _DIGIT_BITS;
  62. }
  63. /*
  64. Set carry if it is placed below digits
  65. */
  66. if ix + iy < digits {
  67. t.digit[ix + pb] = DIGIT(carry);
  68. }
  69. }
  70. internal_swap(dest, t);
  71. internal_destroy(t);
  72. return internal_clamp(dest);
  73. }
  74. /*
  75. Multiplication using the Toom-Cook 3-way algorithm.
  76. Much more complicated than Karatsuba but has a lower asymptotic running time of O(N**1.464).
  77. This algorithm is only particularly useful on VERY large inputs.
  78. (We're talking 1000s of digits here...).
  79. This file contains code from J. Arndt's book "Matters Computational"
  80. and the accompanying FXT-library with permission of the author.
  81. Setup from:
  82. Chung, Jaewook, and M. Anwar Hasan. "Asymmetric squaring formulae."
  83. 18th IEEE Symposium on Computer Arithmetic (ARITH'07). IEEE, 2007.
  84. The interpolation from above needed one temporary variable more than the interpolation here:
  85. Bodrato, Marco, and Alberto Zanoni. "What about Toom-Cook matrices optimality."
  86. Centro Vito Volterra Universita di Roma Tor Vergata (2006)
  87. */
  88. _private_int_mul_toom :: proc(dest, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  89. context.allocator = allocator;
  90. S1, S2, T1, a0, a1, a2, b0, b1, b2 := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  91. defer internal_destroy(S1, S2, T1, a0, a1, a2, b0, b1, b2);
  92. /*
  93. Init temps.
  94. */
  95. internal_init_multi(S1, S2, T1) or_return;
  96. /*
  97. B
  98. */
  99. B := min(a.used, b.used) / 3;
  100. /*
  101. a = a2 * x^2 + a1 * x + a0;
  102. */
  103. internal_grow(a0, B) or_return;
  104. internal_grow(a1, B) or_return;
  105. internal_grow(a2, a.used - 2 * B) or_return;
  106. a0.used, a1.used = B, B;
  107. a2.used = a.used - 2 * B;
  108. internal_copy_digits(a0, a, a0.used) or_return;
  109. internal_copy_digits(a1, a, a1.used, B) or_return;
  110. internal_copy_digits(a2, a, a2.used, 2 * B) or_return;
  111. internal_clamp(a0);
  112. internal_clamp(a1);
  113. internal_clamp(a2);
  114. /*
  115. b = b2 * x^2 + b1 * x + b0;
  116. */
  117. internal_grow(b0, B) or_return;
  118. internal_grow(b1, B) or_return;
  119. internal_grow(b2, b.used - 2 * B) or_return;
  120. b0.used, b1.used = B, B;
  121. b2.used = b.used - 2 * B;
  122. internal_copy_digits(b0, b, b0.used) or_return;
  123. internal_copy_digits(b1, b, b1.used, B) or_return;
  124. internal_copy_digits(b2, b, b2.used, 2 * B) or_return;
  125. internal_clamp(b0);
  126. internal_clamp(b1);
  127. internal_clamp(b2);
  128. /*
  129. \\ S1 = (a2+a1+a0) * (b2+b1+b0);
  130. */
  131. internal_add(T1, a2, a1) or_return; /* T1 = a2 + a1; */
  132. internal_add(S2, T1, a0) or_return; /* S2 = T1 + a0; */
  133. internal_add(dest, b2, b1) or_return; /* dest = b2 + b1; */
  134. internal_add(S1, dest, b0) or_return; /* S1 = c + b0; */
  135. internal_mul(S1, S1, S2) or_return; /* S1 = S1 * S2; */
  136. /*
  137. \\S2 = (4*a2+2*a1+a0) * (4*b2+2*b1+b0);
  138. */
  139. internal_add(T1, T1, a2) or_return; /* T1 = T1 + a2; */
  140. internal_int_shl1(T1, T1) or_return; /* T1 = T1 << 1; */
  141. internal_add(T1, T1, a0) or_return; /* T1 = T1 + a0; */
  142. internal_add(dest, dest, b2) or_return; /* c = c + b2; */
  143. internal_int_shl1(dest, dest) or_return; /* c = c << 1; */
  144. internal_add(dest, dest, b0) or_return; /* c = c + b0; */
  145. internal_mul(S2, T1, dest) or_return; /* S2 = T1 * c; */
  146. /*
  147. \\S3 = (a2-a1+a0) * (b2-b1+b0);
  148. */
  149. internal_sub(a1, a2, a1) or_return; /* a1 = a2 - a1; */
  150. internal_add(a1, a1, a0) or_return; /* a1 = a1 + a0; */
  151. internal_sub(b1, b2, b1) or_return; /* b1 = b2 - b1; */
  152. internal_add(b1, b1, b0) or_return; /* b1 = b1 + b0; */
  153. internal_mul(a1, a1, b1) or_return; /* a1 = a1 * b1; */
  154. internal_mul(b1, a2, b2) or_return; /* b1 = a2 * b2; */
  155. /*
  156. \\S2 = (S2 - S3) / 3;
  157. */
  158. internal_sub(S2, S2, a1) or_return; /* S2 = S2 - a1; */
  159. _private_int_div_3(S2, S2) or_return; /* S2 = S2 / 3; \\ this is an exact division */
  160. internal_sub(a1, S1, a1) or_return; /* a1 = S1 - a1; */
  161. internal_int_shr1(a1, a1) or_return; /* a1 = a1 >> 1; */
  162. internal_mul(a0, a0, b0) or_return; /* a0 = a0 * b0; */
  163. internal_sub(S1, S1, a0) or_return; /* S1 = S1 - a0; */
  164. internal_sub(S2, S2, S1) or_return; /* S2 = S2 - S1; */
  165. internal_int_shr1(S2, S2) or_return; /* S2 = S2 >> 1; */
  166. internal_sub(S1, S1, a1) or_return; /* S1 = S1 - a1; */
  167. internal_sub(S1, S1, b1) or_return; /* S1 = S1 - b1; */
  168. internal_int_shl1(T1, b1) or_return; /* T1 = b1 << 1; */
  169. internal_sub(S2, S2, T1) or_return; /* S2 = S2 - T1; */
  170. internal_sub(a1, a1, S2) or_return; /* a1 = a1 - S2; */
  171. /*
  172. P = b1*x^4+ S2*x^3+ S1*x^2+ a1*x + a0;
  173. */
  174. internal_shl_digit(b1, 4 * B) or_return;
  175. internal_shl_digit(S2, 3 * B) or_return;
  176. internal_add(b1, b1, S2) or_return;
  177. internal_shl_digit(S1, 2 * B) or_return;
  178. internal_add(b1, b1, S1) or_return;
  179. internal_shl_digit(a1, 1 * B) or_return;
  180. internal_add(b1, b1, a1) or_return;
  181. internal_add(dest, b1, a0) or_return;
  182. /*
  183. a * b - P
  184. */
  185. return nil;
  186. }
  187. /*
  188. product = |a| * |b| using Karatsuba Multiplication using three half size multiplications.
  189. Let `B` represent the radix [e.g. 2**_DIGIT_BITS] and let `n` represent
  190. half of the number of digits in the min(a,b)
  191. `a` = `a1` * `B`**`n` + `a0`
  192. `b` = `b`1 * `B`**`n` + `b0`
  193. Then, a * b => 1b1 * B**2n + ((a1 + a0)(b1 + b0) - (a0b0 + a1b1)) * B + a0b0
  194. Note that a1b1 and a0b0 are used twice and only need to be computed once.
  195. So in total three half size (half # of digit) multiplications are performed,
  196. a0b0, a1b1 and (a1+b1)(a0+b0)
  197. Note that a multiplication of half the digits requires 1/4th the number of
  198. single precision multiplications, so in total after one call 25% of the
  199. single precision multiplications are saved.
  200. Note also that the call to `internal_mul` can end up back in this function
  201. if the a0, a1, b0, or b1 are above the threshold.
  202. This is known as divide-and-conquer and leads to the famous O(N**lg(3)) or O(N**1.584)
  203. work which is asymptopically lower than the standard O(N**2) that the
  204. baseline/comba methods use. Generally though, the overhead of this method doesn't pay off
  205. until a certain size is reached, of around 80 used DIGITs.
  206. */
  207. _private_int_mul_karatsuba :: proc(dest, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  208. context.allocator = allocator;
  209. x0, x1, y0, y1, t1, x0y0, x1y1 := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  210. defer internal_destroy(x0, x1, y0, y1, t1, x0y0, x1y1);
  211. /*
  212. min # of digits, divided by two.
  213. */
  214. B := min(a.used, b.used) >> 1;
  215. /*
  216. Init all the temps.
  217. */
  218. internal_grow(x0, B) or_return;
  219. internal_grow(x1, a.used - B) or_return;
  220. internal_grow(y0, B) or_return;
  221. internal_grow(y1, b.used - B) or_return;
  222. internal_grow(t1, B * 2) or_return;
  223. internal_grow(x0y0, B * 2) or_return;
  224. internal_grow(x1y1, B * 2) or_return;
  225. /*
  226. Now shift the digits.
  227. */
  228. x0.used, y0.used = B, B;
  229. x1.used = a.used - B;
  230. y1.used = b.used - B;
  231. /*
  232. We copy the digits directly instead of using higher level functions
  233. since we also need to shift the digits.
  234. */
  235. internal_copy_digits(x0, a, x0.used);
  236. internal_copy_digits(y0, b, y0.used);
  237. internal_copy_digits(x1, a, x1.used, B);
  238. internal_copy_digits(y1, b, y1.used, B);
  239. /*
  240. Only need to clamp the lower words since by definition the
  241. upper words x1/y1 must have a known number of digits.
  242. */
  243. clamp(x0);
  244. clamp(y0);
  245. /*
  246. Now calc the products x0y0 and x1y1,
  247. after this x0 is no longer required, free temp [x0==t2]!
  248. */
  249. internal_mul(x0y0, x0, y0) or_return; /* x0y0 = x0*y0 */
  250. internal_mul(x1y1, x1, y1) or_return; /* x1y1 = x1*y1 */
  251. internal_add(t1, x1, x0) or_return; /* now calc x1+x0 and */
  252. internal_add(x0, y1, y0) or_return; /* t2 = y1 + y0 */
  253. internal_mul(t1, t1, x0) or_return; /* t1 = (x1 + x0) * (y1 + y0) */
  254. /*
  255. Add x0y0.
  256. */
  257. internal_add(x0, x0y0, x1y1) or_return; /* t2 = x0y0 + x1y1 */
  258. internal_sub(t1, t1, x0) or_return; /* t1 = (x1+x0)*(y1+y0) - (x1y1 + x0y0) */
  259. /*
  260. shift by B.
  261. */
  262. internal_shl_digit(t1, B) or_return; /* t1 = (x0y0 + x1y1 - (x1-x0)*(y1-y0))<<B */
  263. internal_shl_digit(x1y1, B * 2) or_return; /* x1y1 = x1y1 << 2*B */
  264. internal_add(t1, x0y0, t1) or_return; /* t1 = x0y0 + t1 */
  265. internal_add(dest, t1, x1y1) or_return; /* t1 = x0y0 + t1 + x1y1 */
  266. return nil;
  267. }
  268. /*
  269. Fast (comba) multiplier
  270. This is the fast column-array [comba] multiplier. It is
  271. designed to compute the columns of the product first
  272. then handle the carries afterwards. This has the effect
  273. of making the nested loops that compute the columns very
  274. simple and schedulable on super-scalar processors.
  275. This has been modified to produce a variable number of
  276. digits of output so if say only a half-product is required
  277. you don't have to compute the upper half (a feature
  278. required for fast Barrett reduction).
  279. Based on Algorithm 14.12 on pp.595 of HAC.
  280. */
  281. _private_int_mul_comba :: proc(dest, a, b: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
  282. context.allocator = allocator;
  283. /*
  284. Set up array.
  285. */
  286. W: [_WARRAY]DIGIT = ---;
  287. /*
  288. Grow the destination as required.
  289. */
  290. internal_grow(dest, digits) or_return;
  291. /*
  292. Number of output digits to produce.
  293. */
  294. pa := min(digits, a.used + b.used);
  295. /*
  296. Clear the carry
  297. */
  298. _W := _WORD(0);
  299. ix: int;
  300. for ix = 0; ix < pa; ix += 1 {
  301. tx, ty, iy, iz: int;
  302. /*
  303. Get offsets into the two bignums.
  304. */
  305. ty = min(b.used - 1, ix);
  306. tx = ix - ty;
  307. /*
  308. This is the number of times the loop will iterate, essentially.
  309. while (tx++ < a->used && ty-- >= 0) { ... }
  310. */
  311. iy = min(a.used - tx, ty + 1);
  312. /*
  313. Execute loop.
  314. */
  315. #no_bounds_check for iz = 0; iz < iy; iz += 1 {
  316. _W += _WORD(a.digit[tx + iz]) * _WORD(b.digit[ty - iz]);
  317. }
  318. /*
  319. Store term.
  320. */
  321. W[ix] = DIGIT(_W) & _MASK;
  322. /*
  323. Make next carry.
  324. */
  325. _W = _W >> _WORD(_DIGIT_BITS);
  326. }
  327. /*
  328. Setup dest.
  329. */
  330. old_used := dest.used;
  331. dest.used = pa;
  332. /*
  333. Now extract the previous digit [below the carry].
  334. */
  335. copy_slice(dest.digit[0:], W[:pa]);
  336. /*
  337. Clear unused digits [that existed in the old copy of dest].
  338. */
  339. internal_zero_unused(dest, old_used);
  340. /*
  341. Adjust dest.used based on leading zeroes.
  342. */
  343. return internal_clamp(dest);
  344. }
  345. /*
  346. Multiplies |a| * |b| and does not compute the lower digs digits
  347. [meant to get the higher part of the product]
  348. */
  349. _private_int_mul_high :: proc(dest, a, b: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
  350. context.allocator = allocator;
  351. /*
  352. Can we use the fast multiplier?
  353. */
  354. if a.used + b.used + 1 < _WARRAY && min(a.used, b.used) < _MAX_COMBA {
  355. return _private_int_mul_high_comba(dest, a, b, digits);
  356. }
  357. internal_grow(dest, a.used + b.used + 1) or_return;
  358. dest.used = a.used + b.used + 1;
  359. pa := a.used;
  360. pb := b.used;
  361. for ix := 0; ix < pa; ix += 1 {
  362. carry := DIGIT(0);
  363. for iy := digits - ix; iy < pb; iy += 1 {
  364. /*
  365. Calculate the double precision result.
  366. */
  367. r := _WORD(dest.digit[ix + iy]) + _WORD(a.digit[ix]) * _WORD(b.digit[iy]) + _WORD(carry);
  368. /*
  369. Get the lower part.
  370. */
  371. dest.digit[ix + iy] = DIGIT(r & _WORD(_MASK));
  372. /*
  373. Carry the carry.
  374. */
  375. carry = DIGIT(r >> _WORD(_DIGIT_BITS));
  376. }
  377. dest.digit[ix + pb] = carry;
  378. }
  379. return internal_clamp(dest);
  380. }
  381. /*
  382. This is a modified version of `_private_int_mul_comba` that only produces output digits *above* `digits`.
  383. See the comments for `_private_int_mul_comba` to see how it works.
  384. This is used in the Barrett reduction since for one of the multiplications
  385. only the higher digits were needed. This essentially halves the work.
  386. Based on Algorithm 14.12 on pp.595 of HAC.
  387. */
  388. _private_int_mul_high_comba :: proc(dest, a, b: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
  389. context.allocator = allocator;
  390. W: [_WARRAY]DIGIT = ---;
  391. _W: _WORD = 0;
  392. /*
  393. Number of output digits to produce. Grow the destination as required.
  394. */
  395. pa := a.used + b.used;
  396. internal_grow(dest, pa) or_return;
  397. ix: int;
  398. for ix = digits; ix < pa; ix += 1 {
  399. /*
  400. Get offsets into the two bignums.
  401. */
  402. ty := min(b.used - 1, ix);
  403. tx := ix - ty;
  404. /*
  405. This is the number of times the loop will iterrate, essentially it's
  406. while (tx++ < a->used && ty-- >= 0) { ... }
  407. */
  408. iy := min(a.used - tx, ty + 1);
  409. /*
  410. Execute loop.
  411. */
  412. for iz := 0; iz < iy; iz += 1 {
  413. _W += _WORD(a.digit[tx + iz]) * _WORD(b.digit[ty - iz]);
  414. }
  415. /*
  416. Store term.
  417. */
  418. W[ix] = DIGIT(_W) & DIGIT(_MASK);
  419. /*
  420. Make next carry.
  421. */
  422. _W = _W >> _WORD(_DIGIT_BITS);
  423. }
  424. /*
  425. Setup dest
  426. */
  427. old_used := dest.used;
  428. dest.used = pa;
  429. for ix = digits; ix < pa; ix += 1 {
  430. /*
  431. Now extract the previous digit [below the carry].
  432. */
  433. dest.digit[ix] = W[ix];
  434. }
  435. /*
  436. Zero remainder.
  437. */
  438. internal_zero_unused(dest, old_used);
  439. /*
  440. Adjust dest.used based on leading zeroes.
  441. */
  442. return internal_clamp(dest);
  443. }
  444. /*
  445. Single-digit multiplication with the smaller number as the single-digit.
  446. */
  447. _private_int_mul_balance :: proc(dest, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  448. context.allocator = allocator;
  449. a, b := a, b;
  450. a0, tmp, r := &Int{}, &Int{}, &Int{};
  451. defer internal_destroy(a0, tmp, r);
  452. b_size := min(a.used, b.used);
  453. n_blocks := max(a.used, b.used) / b_size;
  454. internal_grow(a0, b_size + 2) or_return;
  455. internal_init_multi(tmp, r) or_return;
  456. /*
  457. Make sure that `a` is the larger one.
  458. */
  459. if a.used < b.used {
  460. a, b = b, a;
  461. }
  462. assert(a.used >= b.used);
  463. i, j := 0, 0;
  464. for ; i < n_blocks; i += 1 {
  465. /*
  466. Cut a slice off of `a`.
  467. */
  468. a0.used = b_size;
  469. internal_copy_digits(a0, a, a0.used, j);
  470. j += a0.used;
  471. internal_clamp(a0);
  472. /*
  473. Multiply with `b`.
  474. */
  475. internal_mul(tmp, a0, b) or_return;
  476. /*
  477. Shift `tmp` to the correct position.
  478. */
  479. internal_shl_digit(tmp, b_size * i) or_return;
  480. /*
  481. Add to output. No carry needed.
  482. */
  483. internal_add(r, r, tmp) or_return;
  484. }
  485. /*
  486. The left-overs; there are always left-overs.
  487. */
  488. if j < a.used {
  489. a0.used = a.used - j;
  490. internal_copy_digits(a0, a, a0.used, j);
  491. j += a0.used;
  492. internal_clamp(a0);
  493. internal_mul(tmp, a0, b) or_return;
  494. internal_shl_digit(tmp, b_size * i) or_return;
  495. internal_add(r, r, tmp) or_return;
  496. }
  497. internal_swap(dest, r);
  498. return;
  499. }
  500. /*
  501. Low level squaring, b = a*a, HAC pp.596-597, Algorithm 14.16
  502. Assumes `dest` and `src` to not be `nil`, and `src` to have been initialized.
  503. */
  504. _private_int_sqr :: proc(dest, src: ^Int, allocator := context.allocator) -> (err: Error) {
  505. context.allocator = allocator;
  506. pa := src.used;
  507. t := &Int{}; ix, iy: int;
  508. /*
  509. Grow `t` to maximum needed size, or `_DEFAULT_DIGIT_COUNT`, whichever is bigger.
  510. */
  511. internal_grow(t, max((2 * pa) + 1, _DEFAULT_DIGIT_COUNT)) or_return;
  512. t.used = (2 * pa) + 1;
  513. #no_bounds_check for ix = 0; ix < pa; ix += 1 {
  514. carry := DIGIT(0);
  515. /*
  516. First calculate the digit at 2*ix; calculate double precision result.
  517. */
  518. r := _WORD(t.digit[ix+ix]) + (_WORD(src.digit[ix]) * _WORD(src.digit[ix]));
  519. /*
  520. Store lower part in result.
  521. */
  522. t.digit[ix+ix] = DIGIT(r & _WORD(_MASK));
  523. /*
  524. Get the carry.
  525. */
  526. carry = DIGIT(r >> _DIGIT_BITS);
  527. #no_bounds_check for iy = ix + 1; iy < pa; iy += 1 {
  528. /*
  529. First calculate the product.
  530. */
  531. r = _WORD(src.digit[ix]) * _WORD(src.digit[iy]);
  532. /* Now calculate the double precision result. Nóte we use
  533. * addition instead of *2 since it's easier to optimize
  534. */
  535. r = _WORD(t.digit[ix+iy]) + r + r + _WORD(carry);
  536. /*
  537. Store lower part.
  538. */
  539. t.digit[ix+iy] = DIGIT(r & _WORD(_MASK));
  540. /*
  541. Get carry.
  542. */
  543. carry = DIGIT(r >> _DIGIT_BITS);
  544. }
  545. /*
  546. Propagate upwards.
  547. */
  548. #no_bounds_check for carry != 0 {
  549. r = _WORD(t.digit[ix+iy]) + _WORD(carry);
  550. t.digit[ix+iy] = DIGIT(r & _WORD(_MASK));
  551. carry = DIGIT(r >> _WORD(_DIGIT_BITS));
  552. iy += 1;
  553. }
  554. }
  555. err = internal_clamp(t);
  556. internal_swap(dest, t);
  557. internal_destroy(t);
  558. return err;
  559. }
  560. /*
  561. The jist of squaring...
  562. You do like mult except the offset of the tmpx [one that starts closer to zero] can't equal the offset of tmpy.
  563. So basically you set up iy like before then you min it with (ty-tx) so that it never happens.
  564. You double all those you add in the inner loop. After that loop you do the squares and add them in.
  565. Assumes `dest` and `src` not to be `nil` and `src` to have been initialized.
  566. */
  567. _private_int_sqr_comba :: proc(dest, src: ^Int, allocator := context.allocator) -> (err: Error) {
  568. context.allocator = allocator;
  569. W: [_WARRAY]DIGIT = ---;
  570. /*
  571. Grow the destination as required.
  572. */
  573. pa := uint(src.used) + uint(src.used);
  574. internal_grow(dest, int(pa)) or_return;
  575. /*
  576. Number of output digits to produce.
  577. */
  578. W1 := _WORD(0);
  579. _W : _WORD = ---;
  580. ix := uint(0);
  581. #no_bounds_check for ; ix < pa; ix += 1 {
  582. /*
  583. Clear counter.
  584. */
  585. _W = {};
  586. /*
  587. Get offsets into the two bignums.
  588. */
  589. ty := min(uint(src.used) - 1, ix);
  590. tx := ix - ty;
  591. /*
  592. This is the number of times the loop will iterate,
  593. essentially while (tx++ < a->used && ty-- >= 0) { ... }
  594. */
  595. iy := min(uint(src.used) - tx, ty + 1);
  596. /*
  597. Now for squaring, tx can never equal ty.
  598. We halve the distance since they approach at a rate of 2x,
  599. and we have to round because odd cases need to be executed.
  600. */
  601. iy = min(iy, ((ty - tx) + 1) >> 1 );
  602. /*
  603. Execute loop.
  604. */
  605. #no_bounds_check for iz := uint(0); iz < iy; iz += 1 {
  606. _W += _WORD(src.digit[tx + iz]) * _WORD(src.digit[ty - iz]);
  607. }
  608. /*
  609. Double the inner product and add carry.
  610. */
  611. _W = _W + _W + W1;
  612. /*
  613. Even columns have the square term in them.
  614. */
  615. if ix & 1 == 0 {
  616. _W += _WORD(src.digit[ix >> 1]) * _WORD(src.digit[ix >> 1]);
  617. }
  618. /*
  619. Store it.
  620. */
  621. W[ix] = DIGIT(_W & _WORD(_MASK));
  622. /*
  623. Make next carry.
  624. */
  625. W1 = _W >> _DIGIT_BITS;
  626. }
  627. /*
  628. Setup dest.
  629. */
  630. old_used := dest.used;
  631. dest.used = src.used + src.used;
  632. #no_bounds_check for ix = 0; ix < pa; ix += 1 {
  633. dest.digit[ix] = W[ix] & _MASK;
  634. }
  635. /*
  636. Clear unused digits [that existed in the old copy of dest].
  637. */
  638. internal_zero_unused(dest, old_used);
  639. return internal_clamp(dest);
  640. }
  641. /*
  642. Karatsuba squaring, computes `dest` = `src` * `src` using three half-size squarings.
  643. See comments of `_private_int_mul_karatsuba` for details.
  644. It is essentially the same algorithm but merely tuned to perform recursive squarings.
  645. */
  646. _private_int_sqr_karatsuba :: proc(dest, src: ^Int, allocator := context.allocator) -> (err: Error) {
  647. context.allocator = allocator;
  648. x0, x1, t1, t2, x0x0, x1x1 := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  649. defer internal_destroy(x0, x1, t1, t2, x0x0, x1x1);
  650. /*
  651. Min # of digits, divided by two.
  652. */
  653. B := src.used >> 1;
  654. /*
  655. Init temps.
  656. */
  657. internal_grow(x0, B) or_return;
  658. internal_grow(x1, src.used - B) or_return;
  659. internal_grow(t1, src.used * 2) or_return;
  660. internal_grow(t2, src.used * 2) or_return;
  661. internal_grow(x0x0, B * 2 ) or_return;
  662. internal_grow(x1x1, (src.used - B) * 2) or_return;
  663. /*
  664. Now shift the digits.
  665. */
  666. x0.used = B;
  667. x1.used = src.used - B;
  668. #force_inline internal_copy_digits(x0, src, x0.used);
  669. #force_inline mem.copy_non_overlapping(&x1.digit[0], &src.digit[B], size_of(DIGIT) * x1.used);
  670. #force_inline internal_clamp(x0);
  671. /*
  672. Now calc the products x0*x0 and x1*x1.
  673. */
  674. internal_sqr(x0x0, x0) or_return;
  675. internal_sqr(x1x1, x1) or_return;
  676. /*
  677. Now calc (x1+x0)^2
  678. */
  679. internal_add(t1, x0, x1) or_return;
  680. internal_sqr(t1, t1) or_return;
  681. /*
  682. Add x0y0
  683. */
  684. internal_add(t2, x0x0, x1x1) or_return;
  685. internal_sub(t1, t1, t2) or_return;
  686. /*
  687. Shift by B.
  688. */
  689. internal_shl_digit(t1, B) or_return;
  690. internal_shl_digit(x1x1, B * 2) or_return;
  691. internal_add(t1, t1, x0x0) or_return;
  692. internal_add(dest, t1, x1x1) or_return;
  693. return #force_inline internal_clamp(dest);
  694. }
  695. /*
  696. Squaring using Toom-Cook 3-way algorithm.
  697. Setup and interpolation from algorithm SQR_3 in Chung, Jaewook, and M. Anwar Hasan. "Asymmetric squaring formulae."
  698. 18th IEEE Symposium on Computer Arithmetic (ARITH'07). IEEE, 2007.
  699. */
  700. _private_int_sqr_toom :: proc(dest, src: ^Int, allocator := context.allocator) -> (err: Error) {
  701. context.allocator = allocator;
  702. S0, a0, a1, a2 := &Int{}, &Int{}, &Int{}, &Int{};
  703. defer internal_destroy(S0, a0, a1, a2);
  704. /*
  705. Init temps.
  706. */
  707. internal_zero(S0) or_return;
  708. /*
  709. B
  710. */
  711. B := src.used / 3;
  712. /*
  713. a = a2 * x^2 + a1 * x + a0;
  714. */
  715. internal_grow(a0, B) or_return;
  716. internal_grow(a1, B) or_return;
  717. internal_grow(a2, src.used - (2 * B)) or_return;
  718. a0.used = B;
  719. a1.used = B;
  720. a2.used = src.used - 2 * B;
  721. #force_inline mem.copy_non_overlapping(&a0.digit[0], &src.digit[ 0], size_of(DIGIT) * a0.used);
  722. #force_inline mem.copy_non_overlapping(&a1.digit[0], &src.digit[ B], size_of(DIGIT) * a1.used);
  723. #force_inline mem.copy_non_overlapping(&a2.digit[0], &src.digit[2 * B], size_of(DIGIT) * a2.used);
  724. internal_clamp(a0);
  725. internal_clamp(a1);
  726. internal_clamp(a2);
  727. /** S0 = a0^2; */
  728. internal_sqr(S0, a0) or_return;
  729. /** \\S1 = (a2 + a1 + a0)^2 */
  730. /** \\S2 = (a2 - a1 + a0)^2 */
  731. /** \\S1 = a0 + a2; */
  732. /** a0 = a0 + a2; */
  733. internal_add(a0, a0, a2) or_return;
  734. /** \\S2 = S1 - a1; */
  735. /** b = a0 - a1; */
  736. internal_sub(dest, a0, a1) or_return;
  737. /** \\S1 = S1 + a1; */
  738. /** a0 = a0 + a1; */
  739. internal_add(a0, a0, a1) or_return;
  740. /** \\S1 = S1^2; */
  741. /** a0 = a0^2; */
  742. internal_sqr(a0, a0) or_return;
  743. /** \\S2 = S2^2; */
  744. /** b = b^2; */
  745. internal_sqr(dest, dest) or_return;
  746. /** \\ S3 = 2 * a1 * a2 */
  747. /** \\S3 = a1 * a2; */
  748. /** a1 = a1 * a2; */
  749. internal_mul(a1, a1, a2) or_return;
  750. /** \\S3 = S3 << 1; */
  751. /** a1 = a1 << 1; */
  752. internal_shl(a1, a1, 1) or_return;
  753. /** \\S4 = a2^2; */
  754. /** a2 = a2^2; */
  755. internal_sqr(a2, a2) or_return;
  756. /** \\ tmp = (S1 + S2)/2 */
  757. /** \\tmp = S1 + S2; */
  758. /** b = a0 + b; */
  759. internal_add(dest, a0, dest) or_return;
  760. /** \\tmp = tmp >> 1; */
  761. /** b = b >> 1; */
  762. internal_shr(dest, dest, 1) or_return;
  763. /** \\ S1 = S1 - tmp - S3 */
  764. /** \\S1 = S1 - tmp; */
  765. /** a0 = a0 - b; */
  766. internal_sub(a0, a0, dest) or_return;
  767. /** \\S1 = S1 - S3; */
  768. /** a0 = a0 - a1; */
  769. internal_sub(a0, a0, a1) or_return;
  770. /** \\S2 = tmp - S4 -S0 */
  771. /** \\S2 = tmp - S4; */
  772. /** b = b - a2; */
  773. internal_sub(dest, dest, a2) or_return;
  774. /** \\S2 = S2 - S0; */
  775. /** b = b - S0; */
  776. internal_sub(dest, dest, S0) or_return;
  777. /** \\P = S4*x^4 + S3*x^3 + S2*x^2 + S1*x + S0; */
  778. /** P = a2*x^4 + a1*x^3 + b*x^2 + a0*x + S0; */
  779. internal_shl_digit( a2, 4 * B) or_return;
  780. internal_shl_digit( a1, 3 * B) or_return;
  781. internal_shl_digit(dest, 2 * B) or_return;
  782. internal_shl_digit( a0, 1 * B) or_return;
  783. internal_add(a2, a2, a1) or_return;
  784. internal_add(dest, dest, a2) or_return;
  785. internal_add(dest, dest, a0) or_return;
  786. internal_add(dest, dest, S0) or_return;
  787. /** a^2 - P */
  788. return #force_inline internal_clamp(dest);
  789. }
  790. /*
  791. Divide by three (based on routine from MPI and the GMP manual).
  792. */
  793. _private_int_div_3 :: proc(quotient, numerator: ^Int, allocator := context.allocator) -> (remainder: DIGIT, err: Error) {
  794. context.allocator = allocator;
  795. /*
  796. b = 2^_DIGIT_BITS / 3
  797. */
  798. b := _WORD(1) << _WORD(_DIGIT_BITS) / _WORD(3);
  799. q := &Int{};
  800. internal_grow(q, numerator.used) or_return;
  801. q.used = numerator.used;
  802. q.sign = numerator.sign;
  803. w, t: _WORD;
  804. #no_bounds_check for ix := numerator.used; ix >= 0; ix -= 1 {
  805. w = (w << _WORD(_DIGIT_BITS)) | _WORD(numerator.digit[ix]);
  806. if w >= 3 {
  807. /*
  808. Multiply w by [1/3].
  809. */
  810. t = (w * b) >> _WORD(_DIGIT_BITS);
  811. /*
  812. Now subtract 3 * [w/3] from w, to get the remainder.
  813. */
  814. w -= t+t+t;
  815. /*
  816. Fixup the remainder as required since the optimization is not exact.
  817. */
  818. for w >= 3 {
  819. t += 1;
  820. w -= 3;
  821. }
  822. } else {
  823. t = 0;
  824. }
  825. q.digit[ix] = DIGIT(t);
  826. }
  827. remainder = DIGIT(w);
  828. /*
  829. [optional] store the quotient.
  830. */
  831. if quotient != nil {
  832. err = clamp(q);
  833. internal_swap(q, quotient);
  834. }
  835. internal_destroy(q);
  836. return remainder, nil;
  837. }
  838. /*
  839. Signed Integer Division
  840. c*b + d == a [i.e. a/b, c=quotient, d=remainder], HAC pp.598 Algorithm 14.20
  841. Note that the description in HAC is horribly incomplete.
  842. For example, it doesn't consider the case where digits are removed from 'x' in
  843. the inner loop.
  844. It also doesn't consider the case that y has fewer than three digits, etc.
  845. The overall algorithm is as described as 14.20 from HAC but fixed to treat these cases.
  846. */
  847. _private_int_div_school :: proc(quotient, remainder, numerator, denominator: ^Int, allocator := context.allocator) -> (err: Error) {
  848. context.allocator = allocator;
  849. error_if_immutable(quotient, remainder) or_return;
  850. q, x, y, t1, t2 := &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  851. defer internal_destroy(q, x, y, t1, t2);
  852. internal_grow(q, numerator.used + 2) or_return;
  853. q.used = numerator.used + 2;
  854. internal_init_multi(t1, t2) or_return;
  855. internal_copy(x, numerator) or_return;
  856. internal_copy(y, denominator) or_return;
  857. /*
  858. Fix the sign.
  859. */
  860. neg := numerator.sign != denominator.sign;
  861. x.sign = .Zero_or_Positive;
  862. y.sign = .Zero_or_Positive;
  863. /*
  864. Normalize both x and y, ensure that y >= b/2, [b == 2**MP_DIGIT_BIT]
  865. */
  866. norm := internal_count_bits(y) % _DIGIT_BITS;
  867. if norm < _DIGIT_BITS - 1 {
  868. norm = (_DIGIT_BITS - 1) - norm;
  869. internal_shl(x, x, norm) or_return;
  870. internal_shl(y, y, norm) or_return;
  871. } else {
  872. norm = 0;
  873. }
  874. /*
  875. Note: HAC does 0 based, so if used==5 then it's 0,1,2,3,4, i.e. use 4
  876. */
  877. n := x.used - 1;
  878. t := y.used - 1;
  879. /*
  880. while (x >= y*b**n-t) do { q[n-t] += 1; x -= y*b**{n-t} }
  881. y = y*b**{n-t}
  882. */
  883. internal_shl_digit(y, n - t) or_return;
  884. c := internal_cmp(x, y);
  885. for c != -1 {
  886. q.digit[n - t] += 1;
  887. internal_sub(x, x, y) or_return;
  888. c = internal_cmp(x, y);
  889. }
  890. /*
  891. Reset y by shifting it back down.
  892. */
  893. internal_shr_digit(y, n - t);
  894. /*
  895. Step 3. for i from n down to (t + 1).
  896. */
  897. #no_bounds_check for i := n; i >= (t + 1); i -= 1 {
  898. if (i > x.used) { continue; }
  899. /*
  900. step 3.1 if xi == yt then set q{i-t-1} to b-1, otherwise set q{i-t-1} to (xi*b + x{i-1})/yt
  901. */
  902. if x.digit[i] == y.digit[t] {
  903. q.digit[(i - t) - 1] = 1 << (_DIGIT_BITS - 1);
  904. } else {
  905. tmp := _WORD(x.digit[i]) << _DIGIT_BITS;
  906. tmp |= _WORD(x.digit[i - 1]);
  907. tmp /= _WORD(y.digit[t]);
  908. if tmp > _WORD(_MASK) {
  909. tmp = _WORD(_MASK);
  910. }
  911. q.digit[(i - t) - 1] = DIGIT(tmp & _WORD(_MASK));
  912. }
  913. /* while (q{i-t-1} * (yt * b + y{t-1})) >
  914. xi * b**2 + xi-1 * b + xi-2
  915. do q{i-t-1} -= 1;
  916. */
  917. iter := 0;
  918. q.digit[(i - t) - 1] = (q.digit[(i - t) - 1] + 1) & _MASK;
  919. #no_bounds_check for {
  920. q.digit[(i - t) - 1] = (q.digit[(i - t) - 1] - 1) & _MASK;
  921. /*
  922. Find left hand.
  923. */
  924. internal_zero(t1);
  925. t1.digit[0] = ((t - 1) < 0) ? 0 : y.digit[t - 1];
  926. t1.digit[1] = y.digit[t];
  927. t1.used = 2;
  928. internal_mul(t1, t1, q.digit[(i - t) - 1]) or_return;
  929. /*
  930. Find right hand.
  931. */
  932. t2.digit[0] = ((i - 2) < 0) ? 0 : x.digit[i - 2];
  933. t2.digit[1] = x.digit[i - 1]; /* i >= 1 always holds */
  934. t2.digit[2] = x.digit[i];
  935. t2.used = 3;
  936. if t1_t2 := internal_cmp_mag(t1, t2); t1_t2 != 1 {
  937. break;
  938. }
  939. iter += 1; if iter > 100 {
  940. return .Max_Iterations_Reached;
  941. }
  942. }
  943. /*
  944. Step 3.3 x = x - q{i-t-1} * y * b**{i-t-1}
  945. */
  946. int_mul_digit(t1, y, q.digit[(i - t) - 1]) or_return;
  947. internal_shl_digit(t1, (i - t) - 1) or_return;
  948. internal_sub(x, x, t1) or_return;
  949. /*
  950. if x < 0 then { x = x + y*b**{i-t-1}; q{i-t-1} -= 1; }
  951. */
  952. if x.sign == .Negative {
  953. internal_copy(t1, y) or_return;
  954. internal_shl_digit(t1, (i - t) - 1) or_return;
  955. internal_add(x, x, t1) or_return;
  956. q.digit[(i - t) - 1] = (q.digit[(i - t) - 1] - 1) & _MASK;
  957. }
  958. }
  959. /*
  960. Now q is the quotient and x is the remainder, [which we have to normalize]
  961. Get sign before writing to c.
  962. */
  963. z, _ := is_zero(x);
  964. x.sign = .Zero_or_Positive if z else numerator.sign;
  965. if quotient != nil {
  966. internal_clamp(q);
  967. internal_swap(q, quotient);
  968. quotient.sign = .Negative if neg else .Zero_or_Positive;
  969. }
  970. if remainder != nil {
  971. internal_shr(x, x, norm) or_return;
  972. internal_swap(x, remainder);
  973. }
  974. return nil;
  975. }
  976. /*
  977. Direct implementation of algorithms 1.8 "RecursiveDivRem" and 1.9 "UnbalancedDivision" from:
  978. Brent, Richard P., and Paul Zimmermann. "Modern computer arithmetic"
  979. Vol. 18. Cambridge University Press, 2010
  980. Available online at https://arxiv.org/pdf/1004.4710
  981. pages 19ff. in the above online document.
  982. */
  983. _private_div_recursion :: proc(quotient, remainder, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  984. context.allocator = allocator;
  985. A1, A2, B1, B0, Q1, Q0, R1, R0, t := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  986. defer internal_destroy(A1, A2, B1, B0, Q1, Q0, R1, R0, t);
  987. m := a.used - b.used;
  988. k := m / 2;
  989. if m < MUL_KARATSUBA_CUTOFF {
  990. return _private_int_div_school(quotient, remainder, a, b);
  991. }
  992. internal_init_multi(A1, A2, B1, B0, Q1, Q0, R1, R0, t) or_return;
  993. /*
  994. `B1` = `b` / `beta`^`k`, `B0` = `b` % `beta`^`k`
  995. */
  996. internal_shrmod(B1, B0, b, k * _DIGIT_BITS) or_return;
  997. /*
  998. (Q1, R1) = RecursiveDivRem(A / beta^(2k), B1)
  999. */
  1000. internal_shrmod(A1, t, a, 2 * k * _DIGIT_BITS) or_return;
  1001. _private_div_recursion(Q1, R1, A1, B1) or_return;
  1002. /*
  1003. A1 = (R1 * beta^(2k)) + (A % beta^(2k)) - (Q1 * B0 * beta^k)
  1004. */
  1005. internal_shl_digit(R1, 2 * k) or_return;
  1006. internal_add(A1, R1, t) or_return;
  1007. internal_mul(t, Q1, B0) or_return;
  1008. /*
  1009. While A1 < 0 do Q1 = Q1 - 1, A1 = A1 + (beta^k * B)
  1010. */
  1011. if internal_cmp(A1, 0) == -1 {
  1012. internal_shl(t, b, k * _DIGIT_BITS) or_return;
  1013. for {
  1014. internal_decr(Q1) or_return;
  1015. internal_add(A1, A1, t) or_return;
  1016. if internal_cmp(A1, 0) != -1 {
  1017. break;
  1018. }
  1019. }
  1020. }
  1021. /*
  1022. (Q0, R0) = RecursiveDivRem(A1 / beta^(k), B1)
  1023. */
  1024. internal_shrmod(A1, t, A1, k * _DIGIT_BITS) or_return;
  1025. _private_div_recursion(Q0, R0, A1, B1) or_return;
  1026. /*
  1027. A2 = (R0*beta^k) + (A1 % beta^k) - (Q0*B0)
  1028. */
  1029. internal_shl_digit(R0, k) or_return;
  1030. internal_add(A2, R0, t) or_return;
  1031. internal_mul(t, Q0, B0) or_return;
  1032. internal_sub(A2, A2, t) or_return;
  1033. /*
  1034. While A2 < 0 do Q0 = Q0 - 1, A2 = A2 + B.
  1035. */
  1036. for internal_cmp(A2, 0) == -1 {
  1037. internal_decr(Q0) or_return;
  1038. internal_add(A2, A2, b) or_return;
  1039. }
  1040. /*
  1041. Return q = (Q1*beta^k) + Q0, r = A2.
  1042. */
  1043. internal_shl_digit(Q1, k) or_return;
  1044. internal_add(quotient, Q1, Q0) or_return;
  1045. return internal_copy(remainder, A2);
  1046. }
  1047. _private_int_div_recursive :: proc(quotient, remainder, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  1048. context.allocator = allocator;
  1049. A, B, Q, Q1, R, A_div, A_mod := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  1050. defer internal_destroy(A, B, Q, Q1, R, A_div, A_mod);
  1051. internal_init_multi(A, B, Q, Q1, R, A_div, A_mod) or_return;
  1052. /*
  1053. Most significant bit of a limb.
  1054. Assumes _DIGIT_MAX < (sizeof(DIGIT) * sizeof(u8)).
  1055. */
  1056. msb := (_DIGIT_MAX + DIGIT(1)) >> 1;
  1057. sigma := 0;
  1058. msb_b := b.digit[b.used - 1];
  1059. for msb_b < msb {
  1060. sigma += 1;
  1061. msb_b <<= 1;
  1062. }
  1063. /*
  1064. Use that sigma to normalize B.
  1065. */
  1066. internal_shl(B, b, sigma) or_return;
  1067. internal_shl(A, a, sigma) or_return;
  1068. /*
  1069. Fix the sign.
  1070. */
  1071. neg := a.sign != b.sign;
  1072. A.sign = .Zero_or_Positive; B.sign = .Zero_or_Positive;
  1073. /*
  1074. If the magnitude of "A" is not more more than twice that of "B" we can work
  1075. on them directly, otherwise we need to work at "A" in chunks.
  1076. */
  1077. n := B.used;
  1078. m := A.used - B.used;
  1079. /*
  1080. Q = 0. We already ensured that when we called `internal_init_multi`.
  1081. */
  1082. for m > n {
  1083. /*
  1084. (q, r) = RecursiveDivRem(A / (beta^(m-n)), B)
  1085. */
  1086. j := (m - n) * _DIGIT_BITS;
  1087. internal_shrmod(A_div, A_mod, A, j) or_return;
  1088. _private_div_recursion(Q1, R, A_div, B) or_return;
  1089. /*
  1090. Q = (Q*beta!(n)) + q
  1091. */
  1092. internal_shl(Q, Q, n * _DIGIT_BITS) or_return;
  1093. internal_add(Q, Q, Q1) or_return;
  1094. /*
  1095. A = (r * beta^(m-n)) + (A % beta^(m-n))
  1096. */
  1097. internal_shl(R, R, (m - n) * _DIGIT_BITS) or_return;
  1098. internal_add(A, R, A_mod) or_return;
  1099. /*
  1100. m = m - n
  1101. */
  1102. m -= n;
  1103. }
  1104. /*
  1105. (q, r) = RecursiveDivRem(A, B)
  1106. */
  1107. _private_div_recursion(Q1, R, A, B) or_return;
  1108. /*
  1109. Q = (Q * beta^m) + q, R = r
  1110. */
  1111. internal_shl(Q, Q, m * _DIGIT_BITS) or_return;
  1112. internal_add(Q, Q, Q1) or_return;
  1113. /*
  1114. Get sign before writing to dest.
  1115. */
  1116. R.sign = .Zero_or_Positive if internal_is_zero(Q) else a.sign;
  1117. if quotient != nil {
  1118. swap(quotient, Q);
  1119. quotient.sign = .Negative if neg else .Zero_or_Positive;
  1120. }
  1121. if remainder != nil {
  1122. /*
  1123. De-normalize the remainder.
  1124. */
  1125. internal_shrmod(R, nil, R, sigma) or_return;
  1126. swap(remainder, R);
  1127. }
  1128. return nil;
  1129. }
  1130. /*
  1131. Slower bit-bang division... also smaller.
  1132. */
  1133. @(deprecated="Use `_int_div_school`, it's 3.5x faster.")
  1134. _private_int_div_small :: proc(quotient, remainder, numerator, denominator: ^Int) -> (err: Error) {
  1135. ta, tb, tq, q := &Int{}, &Int{}, &Int{}, &Int{};
  1136. c: int;
  1137. defer internal_destroy(ta, tb, tq, q);
  1138. for {
  1139. internal_one(tq) or_return;
  1140. num_bits, _ := count_bits(numerator);
  1141. den_bits, _ := count_bits(denominator);
  1142. n := num_bits - den_bits;
  1143. abs(ta, numerator) or_return;
  1144. abs(tb, denominator) or_return;
  1145. shl(tb, tb, n) or_return;
  1146. shl(tq, tq, n) or_return;
  1147. for n >= 0 {
  1148. if c, _ = cmp_mag(ta, tb); c == 0 || c == 1 {
  1149. // ta -= tb
  1150. sub(ta, ta, tb) or_return;
  1151. // q += tq
  1152. add( q, q, tq) or_return;
  1153. }
  1154. shr1(tb, tb) or_return;
  1155. shr1(tq, tq) or_return;
  1156. n -= 1;
  1157. }
  1158. /*
  1159. Now q == quotient and ta == remainder.
  1160. */
  1161. neg := numerator.sign != denominator.sign;
  1162. if quotient != nil {
  1163. swap(quotient, q);
  1164. z, _ := is_zero(quotient);
  1165. quotient.sign = .Negative if neg && !z else .Zero_or_Positive;
  1166. }
  1167. if remainder != nil {
  1168. swap(remainder, ta);
  1169. z, _ := is_zero(numerator);
  1170. remainder.sign = .Zero_or_Positive if z else numerator.sign;
  1171. }
  1172. break;
  1173. }
  1174. return err;
  1175. }
  1176. /*
  1177. Binary split factorial algo due to: http://www.luschny.de/math/factorial/binarysplitfact.html
  1178. */
  1179. _private_int_factorial_binary_split :: proc(res: ^Int, n: int, allocator := context.allocator) -> (err: Error) {
  1180. context.allocator = allocator;
  1181. inner, outer, start, stop, temp := &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  1182. defer internal_destroy(inner, outer, start, stop, temp);
  1183. internal_one(inner, false) or_return;
  1184. internal_one(outer, false) or_return;
  1185. bits_used := int(_DIGIT_TYPE_BITS - intrinsics.count_leading_zeros(n));
  1186. for i := bits_used; i >= 0; i -= 1 {
  1187. start := (n >> (uint(i) + 1)) + 1 | 1;
  1188. stop := (n >> uint(i)) + 1 | 1;
  1189. _private_int_recursive_product(temp, start, stop, 0) or_return;
  1190. internal_mul(inner, inner, temp) or_return;
  1191. internal_mul(outer, outer, inner) or_return;
  1192. }
  1193. shift := n - intrinsics.count_ones(n);
  1194. return internal_shl(res, outer, int(shift));
  1195. }
  1196. /*
  1197. Recursive product used by binary split factorial algorithm.
  1198. */
  1199. _private_int_recursive_product :: proc(res: ^Int, start, stop: int, level := int(0), allocator := context.allocator) -> (err: Error) {
  1200. context.allocator = allocator;
  1201. t1, t2 := &Int{}, &Int{};
  1202. defer internal_destroy(t1, t2);
  1203. if level > FACTORIAL_BINARY_SPLIT_MAX_RECURSIONS {
  1204. return .Max_Iterations_Reached;
  1205. }
  1206. num_factors := (stop - start) >> 1;
  1207. if num_factors == 2 {
  1208. internal_set(t1, start, false) or_return;
  1209. when true {
  1210. internal_grow(t2, t1.used + 1, false) or_return;
  1211. internal_add(t2, t1, 2) or_return;
  1212. } else {
  1213. internal_add(t2, t1, 2) or_return;
  1214. }
  1215. return internal_mul(res, t1, t2);
  1216. }
  1217. if num_factors > 1 {
  1218. mid := (start + num_factors) | 1;
  1219. _private_int_recursive_product(t1, start, mid, level + 1) or_return;
  1220. _private_int_recursive_product(t2, mid, stop, level + 1) or_return;
  1221. return internal_mul(res, t1, t2);
  1222. }
  1223. if num_factors == 1 {
  1224. return #force_inline internal_set(res, start, true);
  1225. }
  1226. return #force_inline internal_one(res, true);
  1227. }
  1228. /*
  1229. Internal function computing both GCD using the binary method,
  1230. and, if target isn't `nil`, also LCM.
  1231. Expects the `a` and `b` to have been initialized
  1232. and one or both of `res_gcd` or `res_lcm` not to be `nil`.
  1233. If both `a` and `b` are zero, return zero.
  1234. If either `a` or `b`, return the other one.
  1235. The `gcd` and `lcm` wrappers have already done this test,
  1236. but `gcd_lcm` wouldn't have, so we still need to perform it.
  1237. If neither result is wanted, we have nothing to do.
  1238. */
  1239. _private_int_gcd_lcm :: proc(res_gcd, res_lcm, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  1240. context.allocator = allocator;
  1241. if res_gcd == nil && res_lcm == nil {
  1242. return nil;
  1243. }
  1244. /*
  1245. We need a temporary because `res_gcd` is allowed to be `nil`.
  1246. */
  1247. if a.used == 0 && b.used == 0 {
  1248. /*
  1249. GCD(0, 0) and LCM(0, 0) are both 0.
  1250. */
  1251. if res_gcd != nil {
  1252. internal_zero(res_gcd) or_return;
  1253. }
  1254. if res_lcm != nil {
  1255. internal_zero(res_lcm) or_return;
  1256. }
  1257. return nil;
  1258. } else if a.used == 0 {
  1259. /*
  1260. We can early out with GCD = B and LCM = 0
  1261. */
  1262. if res_gcd != nil {
  1263. internal_abs(res_gcd, b) or_return;
  1264. }
  1265. if res_lcm != nil {
  1266. internal_zero(res_lcm) or_return;
  1267. }
  1268. return nil;
  1269. } else if b.used == 0 {
  1270. /*
  1271. We can early out with GCD = A and LCM = 0
  1272. */
  1273. if res_gcd != nil {
  1274. internal_abs(res_gcd, a) or_return;
  1275. }
  1276. if res_lcm != nil {
  1277. internal_zero(res_lcm) or_return;
  1278. }
  1279. return nil;
  1280. }
  1281. temp_gcd_res := &Int{};
  1282. defer internal_destroy(temp_gcd_res);
  1283. /*
  1284. If neither `a` or `b` was zero, we need to compute `gcd`.
  1285. Get copies of `a` and `b` we can modify.
  1286. */
  1287. u, v := &Int{}, &Int{};
  1288. defer internal_destroy(u, v);
  1289. internal_copy(u, a) or_return;
  1290. internal_copy(v, b) or_return;
  1291. /*
  1292. Must be positive for the remainder of the algorithm.
  1293. */
  1294. u.sign = .Zero_or_Positive; v.sign = .Zero_or_Positive;
  1295. /*
  1296. B1. Find the common power of two for `u` and `v`.
  1297. */
  1298. u_lsb, _ := internal_count_lsb(u);
  1299. v_lsb, _ := internal_count_lsb(v);
  1300. k := min(u_lsb, v_lsb);
  1301. if k > 0 {
  1302. /*
  1303. Divide the power of two out.
  1304. */
  1305. internal_shr(u, u, k) or_return;
  1306. internal_shr(v, v, k) or_return;
  1307. }
  1308. /*
  1309. Divide any remaining factors of two out.
  1310. */
  1311. if u_lsb != k {
  1312. internal_shr(u, u, u_lsb - k) or_return;
  1313. }
  1314. if v_lsb != k {
  1315. internal_shr(v, v, v_lsb - k) or_return;
  1316. }
  1317. for v.used != 0 {
  1318. /*
  1319. Make sure `v` is the largest.
  1320. */
  1321. if internal_cmp_mag(u, v) == 1 {
  1322. /*
  1323. Swap `u` and `v` to make sure `v` is >= `u`.
  1324. */
  1325. internal_swap(u, v);
  1326. }
  1327. /*
  1328. Subtract smallest from largest.
  1329. */
  1330. internal_sub(v, v, u) or_return;
  1331. /*
  1332. Divide out all factors of two.
  1333. */
  1334. b, _ := internal_count_lsb(v);
  1335. internal_shr(v, v, b) or_return;
  1336. }
  1337. /*
  1338. Multiply by 2**k which we divided out at the beginning.
  1339. */
  1340. internal_shl(temp_gcd_res, u, k) or_return;
  1341. temp_gcd_res.sign = .Zero_or_Positive;
  1342. /*
  1343. We've computed `gcd`, either the long way, or because one of the inputs was zero.
  1344. If we don't want `lcm`, we're done.
  1345. */
  1346. if res_lcm == nil {
  1347. internal_swap(temp_gcd_res, res_gcd);
  1348. return nil;
  1349. }
  1350. /*
  1351. Computes least common multiple as `|a*b|/gcd(a,b)`
  1352. Divide the smallest by the GCD.
  1353. */
  1354. if internal_cmp_mag(a, b) == -1 {
  1355. /*
  1356. Store quotient in `t2` such that `t2 * b` is the LCM.
  1357. */
  1358. internal_div(res_lcm, a, temp_gcd_res) or_return;
  1359. err = internal_mul(res_lcm, res_lcm, b);
  1360. } else {
  1361. /*
  1362. Store quotient in `t2` such that `t2 * a` is the LCM.
  1363. */
  1364. internal_div(res_lcm, a, temp_gcd_res) or_return;
  1365. err = internal_mul(res_lcm, res_lcm, b);
  1366. }
  1367. if res_gcd != nil {
  1368. internal_swap(temp_gcd_res, res_gcd);
  1369. }
  1370. /*
  1371. Fix the sign to positive and return.
  1372. */
  1373. res_lcm.sign = .Zero_or_Positive;
  1374. return err;
  1375. }
  1376. /*
  1377. Internal implementation of log.
  1378. Assumes `a` not to be `nil` and to have been initialized.
  1379. */
  1380. _private_int_log :: proc(a: ^Int, base: DIGIT, allocator := context.allocator) -> (res: int, err: Error) {
  1381. bracket_low, bracket_high, bracket_mid, t, bi_base := &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  1382. defer internal_destroy(bracket_low, bracket_high, bracket_mid, t, bi_base);
  1383. ic := #force_inline internal_cmp(a, base);
  1384. if ic == -1 || ic == 0 {
  1385. return 1 if ic == 0 else 0, nil;
  1386. }
  1387. defer if err != nil {
  1388. res = -1;
  1389. }
  1390. internal_set(bi_base, base, true, allocator) or_return;
  1391. internal_clear(bracket_mid, false, allocator) or_return;
  1392. internal_clear(t, false, allocator) or_return;
  1393. internal_one(bracket_low, false, allocator) or_return;
  1394. internal_set(bracket_high, base, false, allocator) or_return;
  1395. low := 0; high := 1;
  1396. /*
  1397. A kind of Giant-step/baby-step algorithm.
  1398. Idea shamelessly stolen from https://programmingpraxis.com/2010/05/07/integer-logarithms/2/
  1399. The effect is asymptotic, hence needs benchmarks to test if the Giant-step should be skipped
  1400. for small n.
  1401. */
  1402. for {
  1403. /*
  1404. Iterate until `a` is bracketed between low + high.
  1405. */
  1406. if #force_inline internal_cmp(bracket_high, a) != -1 {
  1407. break;
  1408. }
  1409. low = high;
  1410. #force_inline internal_copy(bracket_low, bracket_high) or_return;
  1411. high <<= 1;
  1412. #force_inline internal_sqr(bracket_high, bracket_high) or_return;
  1413. }
  1414. for (high - low) > 1 {
  1415. mid := (high + low) >> 1;
  1416. #force_inline internal_pow(t, bi_base, mid - low) or_return;
  1417. #force_inline internal_mul(bracket_mid, bracket_low, t) or_return;
  1418. mc := #force_inline internal_cmp(a, bracket_mid);
  1419. switch mc {
  1420. case -1:
  1421. high = mid;
  1422. internal_swap(bracket_mid, bracket_high);
  1423. case 0:
  1424. return mid, nil;
  1425. case 1:
  1426. low = mid;
  1427. internal_swap(bracket_mid, bracket_low);
  1428. }
  1429. }
  1430. fc := #force_inline internal_cmp(bracket_high, a);
  1431. res = high if fc == 0 else low;
  1432. return;
  1433. }
  1434. /*
  1435. Computes xR**-1 == x (mod N) via Montgomery Reduction.
  1436. This is an optimized implementation of `internal_montgomery_reduce`
  1437. which uses the comba method to quickly calculate the columns of the reduction.
  1438. Based on Algorithm 14.32 on pp.601 of HAC.
  1439. */
  1440. _private_montgomery_reduce_comba :: proc(x, n: ^Int, rho: DIGIT, allocator := context.allocator) -> (err: Error) {
  1441. context.allocator = allocator;
  1442. W: [_WARRAY]_WORD = ---;
  1443. if x.used > _WARRAY { return .Invalid_Argument; }
  1444. /*
  1445. Get old used count.
  1446. */
  1447. old_used := x.used;
  1448. /*
  1449. Grow `x` as required.
  1450. */
  1451. internal_grow(x, n.used + 1) or_return;
  1452. /*
  1453. First we have to get the digits of the input into an array of double precision words W[...]
  1454. Copy the digits of `x` into W[0..`x.used` - 1]
  1455. */
  1456. ix: int;
  1457. for ix = 0; ix < x.used; ix += 1 {
  1458. W[ix] = _WORD(x.digit[ix]);
  1459. }
  1460. /*
  1461. Zero the high words of W[a->used..m->used*2].
  1462. */
  1463. zero_upper := (n.used * 2) + 1;
  1464. if ix < zero_upper {
  1465. for ix = x.used; ix < zero_upper; ix += 1 {
  1466. W[ix] = {};
  1467. }
  1468. }
  1469. /*
  1470. Now we proceed to zero successive digits from the least significant upwards.
  1471. */
  1472. for ix = 0; ix < n.used; ix += 1 {
  1473. /*
  1474. `mu = ai * m' mod b`
  1475. We avoid a double precision multiplication (which isn't required)
  1476. by casting the value down to a DIGIT. Note this requires
  1477. that W[ix-1] have the carry cleared (see after the inner loop)
  1478. */
  1479. mu := ((W[ix] & _WORD(_MASK)) * _WORD(rho)) & _WORD(_MASK);
  1480. /*
  1481. `a = a + mu * m * b**i`
  1482. This is computed in place and on the fly. The multiplication
  1483. by b**i is handled by offseting which columns the results
  1484. are added to.
  1485. Note the comba method normally doesn't handle carries in the
  1486. inner loop In this case we fix the carry from the previous
  1487. column since the Montgomery reduction requires digits of the
  1488. result (so far) [see above] to work.
  1489. This is handled by fixing up one carry after the inner loop.
  1490. The carry fixups are done in order so after these loops the
  1491. first m->used words of W[] have the carries fixed.
  1492. */
  1493. for iy := 0; iy < n.used; iy += 1 {
  1494. W[ix + iy] += mu * _WORD(n.digit[iy]);
  1495. }
  1496. /*
  1497. Now fix carry for next digit, W[ix+1].
  1498. */
  1499. W[ix + 1] += (W[ix] >> _DIGIT_BITS);
  1500. }
  1501. /*
  1502. Now we have to propagate the carries and shift the words downward
  1503. [all those least significant digits we zeroed].
  1504. */
  1505. for ; ix < n.used * 2; ix += 1 {
  1506. W[ix + 1] += (W[ix] >> _DIGIT_BITS);
  1507. }
  1508. /* copy out, A = A/b**n
  1509. *
  1510. * The result is A/b**n but instead of converting from an
  1511. * array of mp_word to mp_digit than calling mp_rshd
  1512. * we just copy them in the right order
  1513. */
  1514. for ix = 0; ix < (n.used + 1); ix += 1 {
  1515. x.digit[ix] = DIGIT(W[n.used + ix] & _WORD(_MASK));
  1516. }
  1517. /*
  1518. Set the max used.
  1519. */
  1520. x.used = n.used + 1;
  1521. /*
  1522. Zero old_used digits, if the input a was larger than m->used+1 we'll have to clear the digits.
  1523. */
  1524. internal_zero_unused(x, old_used);
  1525. internal_clamp(x);
  1526. /*
  1527. if A >= m then A = A - m
  1528. */
  1529. if internal_cmp_mag(x, n) != -1 {
  1530. return internal_sub(x, x, n);
  1531. }
  1532. return nil;
  1533. }
  1534. /*
  1535. hac 14.61, pp608
  1536. */
  1537. _private_inverse_modulo :: proc(dest, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  1538. context.allocator = allocator;
  1539. x, y, u, v, A, B, C, D := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  1540. defer internal_destroy(x, y, u, v, A, B, C, D);
  1541. /*
  1542. `b` cannot be negative.
  1543. */
  1544. if b.sign == .Negative || internal_is_zero(b) {
  1545. return .Invalid_Argument;
  1546. }
  1547. /*
  1548. init temps.
  1549. */
  1550. internal_init_multi(x, y, u, v, A, B, C, D) or_return;
  1551. /*
  1552. `x` = `a` % `b`, `y` = `b`
  1553. */
  1554. internal_mod(x, a, b) or_return;
  1555. internal_copy(y, b) or_return;
  1556. /*
  1557. 2. [modified] if x,y are both even then return an error!
  1558. */
  1559. if internal_is_even(x) && internal_is_even(y) {
  1560. return .Invalid_Argument;
  1561. }
  1562. /*
  1563. 3. u=x, v=y, A=1, B=0, C=0, D=1
  1564. */
  1565. internal_copy(u, x) or_return;
  1566. internal_copy(v, y) or_return;
  1567. internal_one(A) or_return;
  1568. internal_one(D) or_return;
  1569. for {
  1570. /*
  1571. 4. while `u` is even do:
  1572. */
  1573. for internal_is_even(u) {
  1574. /*
  1575. 4.1 `u` = `u` / 2
  1576. */
  1577. internal_int_shr1(u, u) or_return;
  1578. /*
  1579. 4.2 if `A` or `B` is odd then:
  1580. */
  1581. if internal_is_odd(A) || internal_is_odd(B) {
  1582. /*
  1583. `A` = (`A`+`y`) / 2, `B` = (`B`-`x`) / 2
  1584. */
  1585. internal_add(A, A, y) or_return;
  1586. internal_add(B, B, x) or_return;
  1587. }
  1588. /*
  1589. `A` = `A` / 2, `B` = `B` / 2
  1590. */
  1591. internal_int_shr1(A, A) or_return;
  1592. internal_int_shr1(B, B) or_return;
  1593. }
  1594. /*
  1595. 5. while `v` is even do:
  1596. */
  1597. for internal_is_even(v) {
  1598. /*
  1599. 5.1 `v` = `v` / 2
  1600. */
  1601. internal_int_shr1(v, v) or_return;
  1602. /*
  1603. 5.2 if `C` or `D` is odd then:
  1604. */
  1605. if internal_is_odd(C) || internal_is_odd(D) {
  1606. /*
  1607. `C` = (`C`+`y`) / 2, `D` = (`D`-`x`) / 2
  1608. */
  1609. internal_add(C, C, y) or_return;
  1610. internal_add(D, D, x) or_return;
  1611. }
  1612. /*
  1613. `C` = `C` / 2, `D` = `D` / 2
  1614. */
  1615. internal_int_shr1(C, C) or_return;
  1616. internal_int_shr1(D, D) or_return;
  1617. }
  1618. /*
  1619. 6. if `u` >= `v` then:
  1620. */
  1621. if internal_cmp(u, v) != -1 {
  1622. /*
  1623. `u` = `u` - `v`, `A` = `A` - `C`, `B` = `B` - `D`
  1624. */
  1625. internal_sub(u, u, v) or_return;
  1626. internal_sub(A, A, C) or_return;
  1627. internal_sub(B, B, D) or_return;
  1628. } else {
  1629. /* v - v - u, C = C - A, D = D - B */
  1630. internal_sub(v, v, u) or_return;
  1631. internal_sub(C, C, A) or_return;
  1632. internal_sub(D, D, B) or_return;
  1633. }
  1634. /*
  1635. If not zero goto step 4
  1636. */
  1637. if internal_is_zero(u) {
  1638. break;
  1639. }
  1640. }
  1641. /*
  1642. Now `a` = `C`, `b` = `D`, `gcd` == `g`*`v`
  1643. */
  1644. /*
  1645. If `v` != `1` then there is no inverse.
  1646. */
  1647. if internal_cmp(v, 1) != 0 {
  1648. return .Invalid_Argument;
  1649. }
  1650. /*
  1651. If its too low.
  1652. */
  1653. if internal_cmp(C, 0) == -1 {
  1654. internal_add(C, C, b) or_return;
  1655. }
  1656. /*
  1657. Too big.
  1658. */
  1659. if internal_cmp(C, 0) != -1 {
  1660. internal_sub(C, C, b) or_return;
  1661. }
  1662. /*
  1663. `C` is now the inverse.
  1664. */
  1665. swap(dest, C);
  1666. return;
  1667. }
  1668. /*
  1669. Computes the modular inverse via binary extended Euclidean algorithm, that is `dest` = 1 / `a` mod `b`.
  1670. Based on slow invmod except this is optimized for the case where `b` is odd,
  1671. as per HAC Note 14.64 on pp. 610.
  1672. */
  1673. _private_inverse_modulo_odd :: proc(dest, a, b: ^Int, allocator := context.allocator) -> (err: Error) {
  1674. context.allocator = allocator;
  1675. x, y, u, v, B, D := &Int{}, &Int{}, &Int{}, &Int{}, &Int{}, &Int{};
  1676. defer internal_destroy(x, y, u, v, B, D);
  1677. sign: Sign;
  1678. /*
  1679. 2. [modified] `b` must be odd.
  1680. */
  1681. if internal_is_even(b) { return .Invalid_Argument; }
  1682. /*
  1683. Init all our temps.
  1684. */
  1685. internal_init_multi(x, y, u, v, B, D) or_return;
  1686. /*
  1687. `x` == modulus, `y` == value to invert.
  1688. */
  1689. internal_copy(x, b) or_return;
  1690. /*
  1691. We need `y` = `|a|`.
  1692. */
  1693. internal_mod(y, a, b) or_return;
  1694. /*
  1695. If one of `x`, `y` is zero return an error!
  1696. */
  1697. if internal_is_zero(x) || internal_is_zero(y) { return .Invalid_Argument; }
  1698. /*
  1699. 3. `u` = `x`, `v` = `y`, `A` = 1, `B` = 0, `C` = 0, `D` = 1
  1700. */
  1701. internal_copy(u, x) or_return;
  1702. internal_copy(v, y) or_return;
  1703. internal_one(D) or_return;
  1704. for {
  1705. /*
  1706. 4. while `u` is even do.
  1707. */
  1708. for internal_is_even(u) {
  1709. /*
  1710. 4.1 `u` = `u` / 2
  1711. */
  1712. internal_int_shr1(u, u) or_return;
  1713. /*
  1714. 4.2 if `B` is odd then:
  1715. */
  1716. if internal_is_odd(B) {
  1717. /*
  1718. `B` = (`B` - `x`) / 2
  1719. */
  1720. internal_sub(B, B, x) or_return;
  1721. }
  1722. /*
  1723. `B` = `B` / 2
  1724. */
  1725. internal_int_shr1(B, B) or_return;
  1726. }
  1727. /*
  1728. 5. while `v` is even do:
  1729. */
  1730. for internal_is_even(v) {
  1731. /*
  1732. 5.1 `v` = `v` / 2
  1733. */
  1734. internal_int_shr1(v, v) or_return;
  1735. /*
  1736. 5.2 if `D` is odd then:
  1737. */
  1738. if internal_is_odd(D) {
  1739. /*
  1740. `D` = (`D` - `x`) / 2
  1741. */
  1742. internal_sub(D, D, x) or_return;
  1743. }
  1744. /*
  1745. `D` = `D` / 2
  1746. */
  1747. internal_int_shr1(D, D) or_return;
  1748. }
  1749. /*
  1750. 6. if `u` >= `v` then:
  1751. */
  1752. if internal_cmp(u, v) != -1 {
  1753. /*
  1754. `u` = `u` - `v`, `B` = `B` - `D`
  1755. */
  1756. internal_sub(u, u, v) or_return;
  1757. internal_sub(B, B, D) or_return;
  1758. } else {
  1759. /*
  1760. `v` - `v` - `u`, `D` = `D` - `B`
  1761. */
  1762. internal_sub(v, v, u) or_return;
  1763. internal_sub(D, D, B) or_return;
  1764. }
  1765. /*
  1766. If not zero goto step 4.
  1767. */
  1768. if internal_is_zero(u) { break; }
  1769. }
  1770. /*
  1771. Now `a` = C, `b` = D, gcd == g*v
  1772. */
  1773. /*
  1774. if `v` != 1 then there is no inverse
  1775. */
  1776. if internal_cmp(v, 1) != 0 {
  1777. return .Invalid_Argument;
  1778. }
  1779. /*
  1780. `b` is now the inverse.
  1781. */
  1782. sign = a.sign;
  1783. for internal_int_is_negative(D) {
  1784. internal_add(D, D, b) or_return;
  1785. }
  1786. /*
  1787. Too big.
  1788. */
  1789. for internal_cmp_mag(D, b) != -1 {
  1790. internal_sub(D, D, b) or_return;
  1791. }
  1792. swap(dest, D);
  1793. dest.sign = sign;
  1794. return nil;
  1795. }
  1796. /*
  1797. Returns the log2 of an `Int`.
  1798. Assumes `a` not to be `nil` and to have been initialized.
  1799. Also assumes `base` is a power of two.
  1800. */
  1801. _private_log_power_of_two :: proc(a: ^Int, base: DIGIT) -> (log: int, err: Error) {
  1802. base := base;
  1803. y: int;
  1804. for y = 0; base & 1 == 0; {
  1805. y += 1;
  1806. base >>= 1;
  1807. }
  1808. log = internal_count_bits(a);
  1809. return (log - 1) / y, err;
  1810. }
  1811. /*
  1812. Copies DIGITs from `src` to `dest`.
  1813. Assumes `src` and `dest` to not be `nil` and have been initialized.
  1814. */
  1815. _private_copy_digits :: proc(dest, src: ^Int, digits: int, offset := int(0)) -> (err: Error) {
  1816. digits := digits;
  1817. /*
  1818. If dest == src, do nothing
  1819. */
  1820. if dest == src {
  1821. return nil;
  1822. }
  1823. digits = min(digits, len(src.digit), len(dest.digit));
  1824. mem.copy_non_overlapping(&dest.digit[0], &src.digit[offset], size_of(DIGIT) * digits);
  1825. return nil;
  1826. }
  1827. /*
  1828. ======================== End of private procedures =======================
  1829. =============================== Private tables ===============================
  1830. Tables used by `internal_*` and `_*`.
  1831. */
  1832. _private_int_rem_128 := [?]DIGIT{
  1833. 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1834. 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1835. 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1836. 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1837. 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1838. 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1839. 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1840. 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
  1841. };
  1842. #assert(128 * size_of(DIGIT) == size_of(_private_int_rem_128));
  1843. _private_int_rem_105 := [?]DIGIT{
  1844. 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
  1845. 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1,
  1846. 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,
  1847. 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  1848. 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
  1849. 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1,
  1850. 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
  1851. };
  1852. #assert(105 * size_of(DIGIT) == size_of(_private_int_rem_105));
  1853. _private_prime_table := [?]DIGIT{
  1854. 0x0002, 0x0003, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0013,
  1855. 0x0017, 0x001D, 0x001F, 0x0025, 0x0029, 0x002B, 0x002F, 0x0035,
  1856. 0x003B, 0x003D, 0x0043, 0x0047, 0x0049, 0x004F, 0x0053, 0x0059,
  1857. 0x0061, 0x0065, 0x0067, 0x006B, 0x006D, 0x0071, 0x007F, 0x0083,
  1858. 0x0089, 0x008B, 0x0095, 0x0097, 0x009D, 0x00A3, 0x00A7, 0x00AD,
  1859. 0x00B3, 0x00B5, 0x00BF, 0x00C1, 0x00C5, 0x00C7, 0x00D3, 0x00DF,
  1860. 0x00E3, 0x00E5, 0x00E9, 0x00EF, 0x00F1, 0x00FB, 0x0101, 0x0107,
  1861. 0x010D, 0x010F, 0x0115, 0x0119, 0x011B, 0x0125, 0x0133, 0x0137,
  1862. 0x0139, 0x013D, 0x014B, 0x0151, 0x015B, 0x015D, 0x0161, 0x0167,
  1863. 0x016F, 0x0175, 0x017B, 0x017F, 0x0185, 0x018D, 0x0191, 0x0199,
  1864. 0x01A3, 0x01A5, 0x01AF, 0x01B1, 0x01B7, 0x01BB, 0x01C1, 0x01C9,
  1865. 0x01CD, 0x01CF, 0x01D3, 0x01DF, 0x01E7, 0x01EB, 0x01F3, 0x01F7,
  1866. 0x01FD, 0x0209, 0x020B, 0x021D, 0x0223, 0x022D, 0x0233, 0x0239,
  1867. 0x023B, 0x0241, 0x024B, 0x0251, 0x0257, 0x0259, 0x025F, 0x0265,
  1868. 0x0269, 0x026B, 0x0277, 0x0281, 0x0283, 0x0287, 0x028D, 0x0293,
  1869. 0x0295, 0x02A1, 0x02A5, 0x02AB, 0x02B3, 0x02BD, 0x02C5, 0x02CF,
  1870. 0x02D7, 0x02DD, 0x02E3, 0x02E7, 0x02EF, 0x02F5, 0x02F9, 0x0301,
  1871. 0x0305, 0x0313, 0x031D, 0x0329, 0x032B, 0x0335, 0x0337, 0x033B,
  1872. 0x033D, 0x0347, 0x0355, 0x0359, 0x035B, 0x035F, 0x036D, 0x0371,
  1873. 0x0373, 0x0377, 0x038B, 0x038F, 0x0397, 0x03A1, 0x03A9, 0x03AD,
  1874. 0x03B3, 0x03B9, 0x03C7, 0x03CB, 0x03D1, 0x03D7, 0x03DF, 0x03E5,
  1875. 0x03F1, 0x03F5, 0x03FB, 0x03FD, 0x0407, 0x0409, 0x040F, 0x0419,
  1876. 0x041B, 0x0425, 0x0427, 0x042D, 0x043F, 0x0443, 0x0445, 0x0449,
  1877. 0x044F, 0x0455, 0x045D, 0x0463, 0x0469, 0x047F, 0x0481, 0x048B,
  1878. 0x0493, 0x049D, 0x04A3, 0x04A9, 0x04B1, 0x04BD, 0x04C1, 0x04C7,
  1879. 0x04CD, 0x04CF, 0x04D5, 0x04E1, 0x04EB, 0x04FD, 0x04FF, 0x0503,
  1880. 0x0509, 0x050B, 0x0511, 0x0515, 0x0517, 0x051B, 0x0527, 0x0529,
  1881. 0x052F, 0x0551, 0x0557, 0x055D, 0x0565, 0x0577, 0x0581, 0x058F,
  1882. 0x0593, 0x0595, 0x0599, 0x059F, 0x05A7, 0x05AB, 0x05AD, 0x05B3,
  1883. 0x05BF, 0x05C9, 0x05CB, 0x05CF, 0x05D1, 0x05D5, 0x05DB, 0x05E7,
  1884. 0x05F3, 0x05FB, 0x0607, 0x060D, 0x0611, 0x0617, 0x061F, 0x0623,
  1885. 0x062B, 0x062F, 0x063D, 0x0641, 0x0647, 0x0649, 0x064D, 0x0653,
  1886. };
  1887. #assert(256 * size_of(DIGIT) == size_of(_private_prime_table));
  1888. when MATH_BIG_FORCE_64_BIT || (!MATH_BIG_FORCE_32_BIT && size_of(rawptr) == 8) {
  1889. _factorial_table := [35]_WORD{
  1890. /* f(00): */ 1,
  1891. /* f(01): */ 1,
  1892. /* f(02): */ 2,
  1893. /* f(03): */ 6,
  1894. /* f(04): */ 24,
  1895. /* f(05): */ 120,
  1896. /* f(06): */ 720,
  1897. /* f(07): */ 5_040,
  1898. /* f(08): */ 40_320,
  1899. /* f(09): */ 362_880,
  1900. /* f(10): */ 3_628_800,
  1901. /* f(11): */ 39_916_800,
  1902. /* f(12): */ 479_001_600,
  1903. /* f(13): */ 6_227_020_800,
  1904. /* f(14): */ 87_178_291_200,
  1905. /* f(15): */ 1_307_674_368_000,
  1906. /* f(16): */ 20_922_789_888_000,
  1907. /* f(17): */ 355_687_428_096_000,
  1908. /* f(18): */ 6_402_373_705_728_000,
  1909. /* f(19): */ 121_645_100_408_832_000,
  1910. /* f(20): */ 2_432_902_008_176_640_000,
  1911. /* f(21): */ 51_090_942_171_709_440_000,
  1912. /* f(22): */ 1_124_000_727_777_607_680_000,
  1913. /* f(23): */ 25_852_016_738_884_976_640_000,
  1914. /* f(24): */ 620_448_401_733_239_439_360_000,
  1915. /* f(25): */ 15_511_210_043_330_985_984_000_000,
  1916. /* f(26): */ 403_291_461_126_605_635_584_000_000,
  1917. /* f(27): */ 10_888_869_450_418_352_160_768_000_000,
  1918. /* f(28): */ 304_888_344_611_713_860_501_504_000_000,
  1919. /* f(29): */ 8_841_761_993_739_701_954_543_616_000_000,
  1920. /* f(30): */ 265_252_859_812_191_058_636_308_480_000_000,
  1921. /* f(31): */ 8_222_838_654_177_922_817_725_562_880_000_000,
  1922. /* f(32): */ 263_130_836_933_693_530_167_218_012_160_000_000,
  1923. /* f(33): */ 8_683_317_618_811_886_495_518_194_401_280_000_000,
  1924. /* f(34): */ 295_232_799_039_604_140_847_618_609_643_520_000_000,
  1925. };
  1926. } else {
  1927. _factorial_table := [21]_WORD{
  1928. /* f(00): */ 1,
  1929. /* f(01): */ 1,
  1930. /* f(02): */ 2,
  1931. /* f(03): */ 6,
  1932. /* f(04): */ 24,
  1933. /* f(05): */ 120,
  1934. /* f(06): */ 720,
  1935. /* f(07): */ 5_040,
  1936. /* f(08): */ 40_320,
  1937. /* f(09): */ 362_880,
  1938. /* f(10): */ 3_628_800,
  1939. /* f(11): */ 39_916_800,
  1940. /* f(12): */ 479_001_600,
  1941. /* f(13): */ 6_227_020_800,
  1942. /* f(14): */ 87_178_291_200,
  1943. /* f(15): */ 1_307_674_368_000,
  1944. /* f(16): */ 20_922_789_888_000,
  1945. /* f(17): */ 355_687_428_096_000,
  1946. /* f(18): */ 6_402_373_705_728_000,
  1947. /* f(19): */ 121_645_100_408_832_000,
  1948. /* f(20): */ 2_432_902_008_176_640_000,
  1949. };
  1950. };
  1951. /*
  1952. ========================= End of private tables ========================
  1953. */