mpdecimal.c 185 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935
  1. /*
  2. * Copyright (c) 2008-2010 Stefan Krah. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
  16. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25. * SUCH DAMAGE.
  26. */
  27. #include "mpdecimal.h"
  28. #include <stdio.h>
  29. #include <stdlib.h>
  30. #include <string.h>
  31. #include <limits.h>
  32. #include <math.h>
  33. #include "basearith.h"
  34. #include "bits.h"
  35. #include "convolute.h"
  36. #include "crt.h"
  37. #include "errno.h"
  38. #include "memory.h"
  39. #include "typearith.h"
  40. #include "umodarith.h"
  41. #include "mptest.h"
  42. #include "mptypes.h"
  43. #ifdef PPRO
  44. #if defined(_MSC_VER)
  45. #include <float.h>
  46. #pragma fenv_access(on)
  47. #elif !defined(__OpenBSD__) && !defined(__NetBSD__)
  48. /* C99 */
  49. #include <fenv.h>
  50. #pragma STDC FENV_ACCESS ON
  51. #endif
  52. #endif
  53. #if defined(__x86_64__) && defined(__GLIBC__) && !defined(__INTEL_COMPILER)
  54. #define USE_80BIT_LONG_DOUBLE
  55. #endif
  56. #if defined(_MSC_VER)
  57. #define ALWAYS_INLINE __forceinline
  58. #elif defined(LEGACY_COMPILER)
  59. #define ALWAYS_INLINE
  60. #undef inline
  61. #define inline
  62. #else
  63. #ifdef TEST_COVERAGE
  64. #define ALWAYS_INLINE
  65. #else
  66. #define ALWAYS_INLINE inline __attribute__ ((always_inline))
  67. #endif
  68. #endif
  69. #define MPD_NEWTONDIV_CUTOFF 1024L
  70. #define MPD_NEW_STATIC(name, flags, exp, digits, len) \
  71. mpd_uint_t name##_data[MPD_MINALLOC_MAX]; \
  72. mpd_t name = {flags|MPD_STATIC|MPD_STATIC_DATA, exp, digits, \
  73. len, MPD_MINALLOC_MAX, name##_data}
  74. #define MPD_NEW_CONST(name, flags, exp, digits, len, alloc, initval) \
  75. mpd_uint_t name##_data[alloc] = {initval}; \
  76. mpd_t name = {flags|MPD_STATIC|MPD_CONST_DATA, exp, digits, \
  77. len, alloc, name##_data}
  78. #define MPD_NEW_SHARED(name, a) \
  79. mpd_t name = {(a->flags&~MPD_DATAFLAGS)|MPD_STATIC|MPD_SHARED_DATA, \
  80. a->exp, a->digits, a->len, a->alloc, a->data}
  81. static mpd_uint_t data_one[1] = {1};
  82. static mpd_uint_t data_zero[1] = {0};
  83. static const mpd_t one = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_one};
  84. static const mpd_t minus_one = {MPD_NEG|MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1,
  85. data_one};
  86. static const mpd_t zero = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_zero};
  87. static inline void _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx,
  88. uint32_t *status);
  89. static void _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a,
  90. mpd_ssize_t exp);
  91. static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size);
  92. static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  93. const mpd_context_t *ctx, uint32_t *status);
  94. static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  95. const mpd_context_t *ctx, uint32_t *status);
  96. static void _mpd_qbarrett_divmod(mpd_t *q, mpd_t *r, const mpd_t *a,
  97. const mpd_t *b, uint32_t *status);
  98. static inline void _mpd_qpow_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
  99. uint8_t resultsign, const mpd_context_t *ctx, uint32_t *status);
  100. mpd_uint_t mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n);
  101. /******************************************************************************/
  102. /* Performance critical inline functions */
  103. /******************************************************************************/
  104. #ifdef CONFIG_64
  105. /* Digits in a word, primarily useful for the most significant word. */
  106. ALWAYS_INLINE int
  107. mpd_word_digits(mpd_uint_t word)
  108. {
  109. if (word < mpd_pow10[9]) {
  110. if (word < mpd_pow10[4]) {
  111. if (word < mpd_pow10[2]) {
  112. return (word < mpd_pow10[1]) ? 1 : 2;
  113. }
  114. return (word < mpd_pow10[3]) ? 3 : 4;
  115. }
  116. if (word < mpd_pow10[6]) {
  117. return (word < mpd_pow10[5]) ? 5 : 6;
  118. }
  119. if (word < mpd_pow10[8]) {
  120. return (word < mpd_pow10[7]) ? 7 : 8;
  121. }
  122. return 9;
  123. }
  124. if (word < mpd_pow10[14]) {
  125. if (word < mpd_pow10[11]) {
  126. return (word < mpd_pow10[10]) ? 10 : 11;
  127. }
  128. if (word < mpd_pow10[13]) {
  129. return (word < mpd_pow10[12]) ? 12 : 13;
  130. }
  131. return 14;
  132. }
  133. if (word < mpd_pow10[17]) {
  134. if (word < mpd_pow10[16]) {
  135. return (word < mpd_pow10[15]) ? 15 : 16;
  136. }
  137. return 17;
  138. }
  139. return (word < mpd_pow10[18]) ? 18 : 19;
  140. }
  141. #else
  142. ALWAYS_INLINE int
  143. mpd_word_digits(mpd_uint_t word)
  144. {
  145. if (word < mpd_pow10[4]) {
  146. if (word < mpd_pow10[2]) {
  147. return (word < mpd_pow10[1]) ? 1 : 2;
  148. }
  149. return (word < mpd_pow10[3]) ? 3 : 4;
  150. }
  151. if (word < mpd_pow10[6]) {
  152. return (word < mpd_pow10[5]) ? 5 : 6;
  153. }
  154. if (word < mpd_pow10[8]) {
  155. return (word < mpd_pow10[7]) ? 7 : 8;
  156. }
  157. return (word < mpd_pow10[9]) ? 9 : 10;
  158. }
  159. #endif
  160. /* Adjusted exponent */
  161. ALWAYS_INLINE mpd_ssize_t
  162. mpd_adjexp(const mpd_t *dec)
  163. {
  164. return (dec->exp + dec->digits) - 1;
  165. }
  166. /* Etiny */
  167. ALWAYS_INLINE mpd_ssize_t
  168. mpd_etiny(const mpd_context_t *ctx)
  169. {
  170. return ctx->emin - (ctx->prec - 1);
  171. }
  172. /* Etop: used for folding down in IEEE clamping */
  173. ALWAYS_INLINE mpd_ssize_t
  174. mpd_etop(const mpd_context_t *ctx)
  175. {
  176. return ctx->emax - (ctx->prec - 1);
  177. }
  178. /* Most significant word */
  179. ALWAYS_INLINE mpd_uint_t
  180. mpd_msword(const mpd_t *dec)
  181. {
  182. assert(dec->len > 0);
  183. return dec->data[dec->len-1];
  184. }
  185. /* Most significant digit of a word */
  186. inline mpd_uint_t
  187. mpd_msd(mpd_uint_t word)
  188. {
  189. int n;
  190. n = mpd_word_digits(word);
  191. return word / mpd_pow10[n-1];
  192. }
  193. /* Least significant digit of a word */
  194. ALWAYS_INLINE mpd_uint_t
  195. mpd_lsd(mpd_uint_t word)
  196. {
  197. return word % 10;
  198. }
  199. /* Coefficient size needed to store 'digits' */
  200. ALWAYS_INLINE mpd_ssize_t
  201. mpd_digits_to_size(mpd_ssize_t digits)
  202. {
  203. mpd_ssize_t q, r;
  204. _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
  205. return (r == 0) ? q : q+1;
  206. }
  207. /* Number of digits in the exponent. Not defined for MPD_SSIZE_MIN. */
  208. inline int
  209. mpd_exp_digits(mpd_ssize_t exp)
  210. {
  211. exp = (exp < 0) ? -exp : exp;
  212. return mpd_word_digits(exp);
  213. }
  214. /* Canonical */
  215. ALWAYS_INLINE int
  216. mpd_iscanonical(const mpd_t *dec UNUSED)
  217. {
  218. return 1;
  219. }
  220. /* Finite */
  221. ALWAYS_INLINE int
  222. mpd_isfinite(const mpd_t *dec)
  223. {
  224. return !(dec->flags & MPD_SPECIAL);
  225. }
  226. /* Infinite */
  227. ALWAYS_INLINE int
  228. mpd_isinfinite(const mpd_t *dec)
  229. {
  230. return dec->flags & MPD_INF;
  231. }
  232. /* NaN */
  233. ALWAYS_INLINE int
  234. mpd_isnan(const mpd_t *dec)
  235. {
  236. return dec->flags & (MPD_NAN|MPD_SNAN);
  237. }
  238. /* Negative */
  239. ALWAYS_INLINE int
  240. mpd_isnegative(const mpd_t *dec)
  241. {
  242. return dec->flags & MPD_NEG;
  243. }
  244. /* Positive */
  245. ALWAYS_INLINE int
  246. mpd_ispositive(const mpd_t *dec)
  247. {
  248. return !(dec->flags & MPD_NEG);
  249. }
  250. /* qNaN */
  251. ALWAYS_INLINE int
  252. mpd_isqnan(const mpd_t *dec)
  253. {
  254. return dec->flags & MPD_NAN;
  255. }
  256. /* Signed */
  257. ALWAYS_INLINE int
  258. mpd_issigned(const mpd_t *dec)
  259. {
  260. return dec->flags & MPD_NEG;
  261. }
  262. /* sNaN */
  263. ALWAYS_INLINE int
  264. mpd_issnan(const mpd_t *dec)
  265. {
  266. return dec->flags & MPD_SNAN;
  267. }
  268. /* Special */
  269. ALWAYS_INLINE int
  270. mpd_isspecial(const mpd_t *dec)
  271. {
  272. return dec->flags & MPD_SPECIAL;
  273. }
  274. /* Zero */
  275. ALWAYS_INLINE int
  276. mpd_iszero(const mpd_t *dec)
  277. {
  278. return !mpd_isspecial(dec) && mpd_msword(dec) == 0;
  279. }
  280. /* Test for zero when specials have been ruled out already */
  281. ALWAYS_INLINE int
  282. mpd_iszerocoeff(const mpd_t *dec)
  283. {
  284. return mpd_msword(dec) == 0;
  285. }
  286. /* Normal */
  287. inline int
  288. mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx)
  289. {
  290. if (mpd_isspecial(dec)) return 0;
  291. if (mpd_iszerocoeff(dec)) return 0;
  292. return mpd_adjexp(dec) >= ctx->emin;
  293. }
  294. /* Subnormal */
  295. inline int
  296. mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx)
  297. {
  298. if (mpd_isspecial(dec)) return 0;
  299. if (mpd_iszerocoeff(dec)) return 0;
  300. return mpd_adjexp(dec) < ctx->emin;
  301. }
  302. /* Odd word */
  303. ALWAYS_INLINE int
  304. mpd_isoddword(mpd_uint_t word)
  305. {
  306. return word & 1;
  307. }
  308. /* Odd coefficient */
  309. ALWAYS_INLINE int
  310. mpd_isoddcoeff(const mpd_t *dec)
  311. {
  312. return mpd_isoddword(dec->data[0]);
  313. }
  314. /* 0 if dec is positive, 1 if dec is negative */
  315. ALWAYS_INLINE uint8_t
  316. mpd_sign(const mpd_t *dec)
  317. {
  318. return dec->flags & MPD_NEG;
  319. }
  320. /* 1 if dec is positive, -1 if dec is negative */
  321. ALWAYS_INLINE int
  322. mpd_arith_sign(const mpd_t *dec)
  323. {
  324. return 1 - 2 * mpd_isnegative(dec);
  325. }
  326. /* Radix */
  327. ALWAYS_INLINE long
  328. mpd_radix(void)
  329. {
  330. return 10;
  331. }
  332. /* Dynamic decimal */
  333. ALWAYS_INLINE int
  334. mpd_isdynamic(mpd_t *dec)
  335. {
  336. return !(dec->flags & MPD_STATIC);
  337. }
  338. /* Static decimal */
  339. ALWAYS_INLINE int
  340. mpd_isstatic(mpd_t *dec)
  341. {
  342. return dec->flags & MPD_STATIC;
  343. }
  344. /* Data of decimal is dynamic */
  345. ALWAYS_INLINE int
  346. mpd_isdynamic_data(mpd_t *dec)
  347. {
  348. return !(dec->flags & MPD_DATAFLAGS);
  349. }
  350. /* Data of decimal is static */
  351. ALWAYS_INLINE int
  352. mpd_isstatic_data(mpd_t *dec)
  353. {
  354. return dec->flags & MPD_STATIC_DATA;
  355. }
  356. /* Data of decimal is shared */
  357. ALWAYS_INLINE int
  358. mpd_isshared_data(mpd_t *dec)
  359. {
  360. return dec->flags & MPD_SHARED_DATA;
  361. }
  362. /* Data of decimal is const */
  363. ALWAYS_INLINE int
  364. mpd_isconst_data(mpd_t *dec)
  365. {
  366. return dec->flags & MPD_CONST_DATA;
  367. }
  368. /******************************************************************************/
  369. /* Inline memory handling */
  370. /******************************************************************************/
  371. /* Fill destination with zeros */
  372. ALWAYS_INLINE void
  373. mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len)
  374. {
  375. mpd_size_t i;
  376. for (i = 0; i < len; i++) {
  377. dest[i] = 0;
  378. }
  379. }
  380. /* Free a decimal */
  381. ALWAYS_INLINE void
  382. mpd_del(mpd_t *dec)
  383. {
  384. if (mpd_isdynamic_data(dec)) {
  385. mpd_free(dec->data);
  386. }
  387. if (mpd_isdynamic(dec)) {
  388. mpd_free(dec);
  389. }
  390. }
  391. /*
  392. * Update the memory size for the coefficient. Existing data up to size is
  393. * left untouched.
  394. *
  395. * Error handling: When relloc fails, result->data will still be a valid pointer
  396. * to the old memory area of size result->len. If the requested size is less than
  397. * result->len, we can continue normally, so we treat the failure as a soft error.
  398. * If the requested size is greater than the old area, MPD_Malloc_error is
  399. * set and the result will be a NaN.
  400. */
  401. ALWAYS_INLINE int
  402. mpd_qresize(mpd_t *result, mpd_ssize_t size, uint32_t *status)
  403. {
  404. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  405. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  406. if (mpd_isstatic_data(result)) {
  407. if (size > result->alloc) {
  408. return mpd_switch_to_dyn(result, size, status);
  409. }
  410. }
  411. else if (size != result->alloc && size >= MPD_MINALLOC) {
  412. return mpd_realloc_dyn(result, size, status);
  413. }
  414. return 1;
  415. }
  416. /* Same as mpd_qresize, but the complete coefficient (including the old
  417. * memory area!) is initialized to zero. */
  418. ALWAYS_INLINE int
  419. mpd_qresize_zero(mpd_t *result, mpd_ssize_t size, uint32_t *status)
  420. {
  421. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  422. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  423. if (mpd_isstatic_data(result)) {
  424. if (size > result->alloc) {
  425. return mpd_switch_to_dyn_zero(result, size, status);
  426. }
  427. }
  428. else if (size != result->alloc && size >= MPD_MINALLOC) {
  429. if (!mpd_realloc_dyn(result, size, status)) {
  430. return 0;
  431. }
  432. }
  433. mpd_uint_zero(result->data, size);
  434. return 1;
  435. }
  436. /*
  437. * Reduce memory size for the coefficient to MPD_MINALLOC. In theory,
  438. * realloc may fail even when reducing the memory size. But in that case
  439. * the old memory area is always big enough, so checking for MPD_Malloc_error
  440. * is not imperative.
  441. */
  442. ALWAYS_INLINE void
  443. mpd_minalloc(mpd_t *result)
  444. {
  445. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  446. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  447. if (!mpd_isstatic_data(result) && result->alloc > MPD_MINALLOC) {
  448. uint8_t err = 0;
  449. result->data = mpd_realloc(result->data, MPD_MINALLOC,
  450. sizeof *result->data, &err);
  451. if (!err) {
  452. result->alloc = MPD_MINALLOC;
  453. }
  454. }
  455. }
  456. int
  457. mpd_resize(mpd_t *result, mpd_ssize_t size, mpd_context_t *ctx)
  458. {
  459. uint32_t status = 0;
  460. if (!mpd_qresize(result, size, &status)) {
  461. mpd_addstatus_raise(ctx, status);
  462. return 0;
  463. }
  464. return 1;
  465. }
  466. int
  467. mpd_resize_zero(mpd_t *result, mpd_ssize_t size, mpd_context_t *ctx)
  468. {
  469. uint32_t status = 0;
  470. if (!mpd_qresize_zero(result, size, &status)) {
  471. mpd_addstatus_raise(ctx, status);
  472. return 0;
  473. }
  474. return 1;
  475. }
  476. /******************************************************************************/
  477. /* Set attributes of a decimal */
  478. /******************************************************************************/
  479. /* Set digits. result->len is assumed to be correct. */
  480. inline void
  481. mpd_setdigits(mpd_t *result)
  482. {
  483. mpd_ssize_t wdigits = mpd_word_digits(mpd_msword(result));
  484. result->digits = wdigits + (result->len-1) * MPD_RDIGITS;
  485. }
  486. /* Set sign */
  487. ALWAYS_INLINE void
  488. mpd_set_sign(mpd_t *result, uint8_t sign)
  489. {
  490. result->flags &= ~MPD_NEG;
  491. result->flags |= sign;
  492. }
  493. /* Copy sign from another decimal */
  494. ALWAYS_INLINE void
  495. mpd_signcpy(mpd_t *result, mpd_t *a)
  496. {
  497. uint8_t sign = a->flags&MPD_NEG;
  498. result->flags &= ~MPD_NEG;
  499. result->flags |= sign;
  500. }
  501. /* Set infinity */
  502. ALWAYS_INLINE void
  503. mpd_set_infinity(mpd_t *result)
  504. {
  505. result->flags &= ~MPD_SPECIAL;
  506. result->flags |= MPD_INF;
  507. }
  508. /* Set qNaN */
  509. ALWAYS_INLINE void
  510. mpd_set_qnan(mpd_t *result)
  511. {
  512. result->flags &= ~MPD_SPECIAL;
  513. result->flags |= MPD_NAN;
  514. }
  515. /* Set sNaN */
  516. ALWAYS_INLINE void
  517. mpd_set_snan(mpd_t *result)
  518. {
  519. result->flags &= ~MPD_SPECIAL;
  520. result->flags |= MPD_SNAN;
  521. }
  522. /* Set to negative */
  523. ALWAYS_INLINE void
  524. mpd_set_negative(mpd_t *result)
  525. {
  526. result->flags |= MPD_NEG;
  527. }
  528. /* Set to positive */
  529. ALWAYS_INLINE void
  530. mpd_set_positive(mpd_t *result)
  531. {
  532. result->flags &= ~MPD_NEG;
  533. }
  534. /* Set to dynamic */
  535. ALWAYS_INLINE void
  536. mpd_set_dynamic(mpd_t *result)
  537. {
  538. result->flags &= ~MPD_STATIC;
  539. }
  540. /* Set to static */
  541. ALWAYS_INLINE void
  542. mpd_set_static(mpd_t *result)
  543. {
  544. result->flags |= MPD_STATIC;
  545. }
  546. /* Set data to dynamic */
  547. ALWAYS_INLINE void
  548. mpd_set_dynamic_data(mpd_t *result)
  549. {
  550. result->flags &= ~MPD_DATAFLAGS;
  551. }
  552. /* Set data to static */
  553. ALWAYS_INLINE void
  554. mpd_set_static_data(mpd_t *result)
  555. {
  556. result->flags &= ~MPD_DATAFLAGS;
  557. result->flags |= MPD_STATIC_DATA;
  558. }
  559. /* Set data to shared */
  560. ALWAYS_INLINE void
  561. mpd_set_shared_data(mpd_t *result)
  562. {
  563. result->flags &= ~MPD_DATAFLAGS;
  564. result->flags |= MPD_SHARED_DATA;
  565. }
  566. /* Set data to const */
  567. ALWAYS_INLINE void
  568. mpd_set_const_data(mpd_t *result)
  569. {
  570. result->flags &= ~MPD_DATAFLAGS;
  571. result->flags |= MPD_CONST_DATA;
  572. }
  573. /* Clear flags, preserving memory attributes. */
  574. ALWAYS_INLINE void
  575. mpd_clear_flags(mpd_t *result)
  576. {
  577. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  578. }
  579. /* Set flags, preserving memory attributes. */
  580. ALWAYS_INLINE void
  581. mpd_set_flags(mpd_t *result, uint8_t flags)
  582. {
  583. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  584. result->flags |= flags;
  585. }
  586. /* Copy flags, preserving memory attributes of result. */
  587. ALWAYS_INLINE void
  588. mpd_copy_flags(mpd_t *result, const mpd_t *a)
  589. {
  590. uint8_t aflags = a->flags;
  591. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  592. result->flags |= (aflags & ~(MPD_STATIC|MPD_DATAFLAGS));
  593. }
  594. /* Make a work context */
  595. static inline void
  596. mpd_workcontext(mpd_context_t *workctx, const mpd_context_t *ctx)
  597. {
  598. workctx->prec = ctx->prec;
  599. workctx->emax = ctx->emax;
  600. workctx->emin = ctx->emin;
  601. workctx->round = ctx->round;
  602. workctx->traps = 0;
  603. workctx->status= 0;
  604. workctx->newtrap= 0;
  605. workctx->clamp = ctx->clamp;
  606. workctx->allcr = ctx->allcr;
  607. }
  608. /******************************************************************************/
  609. /* Getting and setting parts of decimals */
  610. /******************************************************************************/
  611. /* Flip the sign of a decimal */
  612. static inline void
  613. _mpd_negate(mpd_t *dec)
  614. {
  615. dec->flags ^= MPD_NEG;
  616. }
  617. /* Set coefficient to zero */
  618. void
  619. mpd_zerocoeff(mpd_t *result)
  620. {
  621. mpd_minalloc(result);
  622. result->digits = 1;
  623. result->len = 1;
  624. result->data[0] = 0;
  625. }
  626. /* Set the coefficient to all nines. */
  627. void
  628. mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
  629. {
  630. mpd_ssize_t len, r;
  631. _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
  632. len = (r == 0) ? len : len+1;
  633. if (!mpd_qresize(result, len, status)) {
  634. return;
  635. }
  636. result->len = len;
  637. result->digits = ctx->prec;
  638. --len;
  639. if (r > 0) {
  640. result->data[len--] = mpd_pow10[r]-1;
  641. }
  642. for (; len >= 0; --len) {
  643. result->data[len] = MPD_RADIX-1;
  644. }
  645. }
  646. /*
  647. * Cut off the most significant digits so that the rest fits in ctx->prec.
  648. * Cannot fail.
  649. */
  650. static void
  651. _mpd_cap(mpd_t *result, const mpd_context_t *ctx)
  652. {
  653. uint32_t dummy;
  654. mpd_ssize_t len, r;
  655. if (result->len > 0 && result->digits > ctx->prec) {
  656. _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
  657. len = (r == 0) ? len : len+1;
  658. if (r != 0) {
  659. result->data[len-1] %= mpd_pow10[r];
  660. }
  661. len = _mpd_real_size(result->data, len);
  662. /* resize to fewer words cannot fail */
  663. mpd_qresize(result, len, &dummy);
  664. result->len = len;
  665. mpd_setdigits(result);
  666. }
  667. if (mpd_iszero(result)) {
  668. _settriple(result, mpd_sign(result), 0, result->exp);
  669. }
  670. }
  671. /*
  672. * Cut off the most significant digits of a NaN payload so that the rest
  673. * fits in ctx->prec - ctx->clamp. Cannot fail.
  674. */
  675. static void
  676. _mpd_fix_nan(mpd_t *result, const mpd_context_t *ctx)
  677. {
  678. uint32_t dummy;
  679. mpd_ssize_t prec;
  680. mpd_ssize_t len, r;
  681. prec = ctx->prec - ctx->clamp;
  682. if (result->len > 0 && result->digits > prec) {
  683. if (prec == 0) {
  684. mpd_minalloc(result);
  685. result->len = result->digits = 0;
  686. }
  687. else {
  688. _mpd_idiv_word(&len, &r, prec, MPD_RDIGITS);
  689. len = (r == 0) ? len : len+1;
  690. if (r != 0) {
  691. result->data[len-1] %= mpd_pow10[r];
  692. }
  693. len = _mpd_real_size(result->data, len);
  694. /* resize to fewer words cannot fail */
  695. mpd_qresize(result, len, &dummy);
  696. result->len = len;
  697. mpd_setdigits(result);
  698. if (mpd_iszerocoeff(result)) {
  699. /* NaN0 is not a valid representation */
  700. result->len = result->digits = 0;
  701. }
  702. }
  703. }
  704. }
  705. /*
  706. * Get n most significant digits from a decimal, where 0 < n <= MPD_UINT_DIGITS.
  707. * Assumes MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for 32 and 64 bit
  708. * machines.
  709. *
  710. * The result of the operation will be in lo. If the operation is impossible,
  711. * hi will be nonzero. This is used to indicate an error.
  712. */
  713. static inline void
  714. _mpd_get_msdigits(mpd_uint_t *hi, mpd_uint_t *lo, const mpd_t *dec,
  715. unsigned int n)
  716. {
  717. mpd_uint_t r, tmp;
  718. assert(0 < n && n <= MPD_RDIGITS+1);
  719. _mpd_div_word(&tmp, &r, dec->digits, MPD_RDIGITS);
  720. r = (r == 0) ? MPD_RDIGITS : r; /* digits in the most significant word */
  721. *hi = 0;
  722. *lo = dec->data[dec->len-1];
  723. if (n <= r) {
  724. *lo /= mpd_pow10[r-n];
  725. }
  726. else if (dec->len > 1) {
  727. /* at this point 1 <= r < n <= MPD_RDIGITS+1 */
  728. _mpd_mul_words(hi, lo, *lo, mpd_pow10[n-r]);
  729. tmp = dec->data[dec->len-2] / mpd_pow10[MPD_RDIGITS-(n-r)];
  730. *lo = *lo + tmp;
  731. if (*lo < tmp) (*hi)++;
  732. }
  733. }
  734. /******************************************************************************/
  735. /* Gathering information about a decimal */
  736. /******************************************************************************/
  737. /* The real size of the coefficient without leading zero words. */
  738. static inline mpd_ssize_t
  739. _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size)
  740. {
  741. while (size > 1 && data[size-1] == 0) {
  742. size--;
  743. }
  744. return size;
  745. }
  746. /* Return number of trailing zeros. No errors are possible. */
  747. mpd_ssize_t
  748. mpd_trail_zeros(const mpd_t *dec)
  749. {
  750. mpd_uint_t word;
  751. mpd_ssize_t i, tz = 0;
  752. for (i=0; i < dec->len; ++i) {
  753. if (dec->data[i] != 0) {
  754. word = dec->data[i];
  755. tz = i * MPD_RDIGITS;
  756. while (word % 10 == 0) {
  757. word /= 10;
  758. tz++;
  759. }
  760. break;
  761. }
  762. }
  763. return tz;
  764. }
  765. /* Integer: Undefined for specials */
  766. static int
  767. _mpd_isint(const mpd_t *dec)
  768. {
  769. mpd_ssize_t tz;
  770. if (mpd_iszerocoeff(dec)) {
  771. return 1;
  772. }
  773. tz = mpd_trail_zeros(dec);
  774. return (dec->exp + tz >= 0);
  775. }
  776. /* Integer */
  777. int
  778. mpd_isinteger(const mpd_t *dec)
  779. {
  780. if (mpd_isspecial(dec)) {
  781. return 0;
  782. }
  783. return _mpd_isint(dec);
  784. }
  785. /* Word is a power of 10 */
  786. static int
  787. mpd_word_ispow10(mpd_uint_t word)
  788. {
  789. int n;
  790. n = mpd_word_digits(word);
  791. if (word == mpd_pow10[n-1]) {
  792. return 1;
  793. }
  794. return 0;
  795. }
  796. /* Coefficient is a power of 10 */
  797. static int
  798. mpd_coeff_ispow10(const mpd_t *dec)
  799. {
  800. if (mpd_word_ispow10(mpd_msword(dec))) {
  801. if (_mpd_isallzero(dec->data, dec->len-1)) {
  802. return 1;
  803. }
  804. }
  805. return 0;
  806. }
  807. /* All digits of a word are nines */
  808. static int
  809. mpd_word_isallnine(mpd_uint_t word)
  810. {
  811. int n;
  812. n = mpd_word_digits(word);
  813. if (word == mpd_pow10[n]-1) {
  814. return 1;
  815. }
  816. return 0;
  817. }
  818. /* All digits of the coefficient are nines */
  819. static int
  820. mpd_coeff_isallnine(const mpd_t *dec)
  821. {
  822. if (mpd_word_isallnine(mpd_msword(dec))) {
  823. if (_mpd_isallnine(dec->data, dec->len-1)) {
  824. return 1;
  825. }
  826. }
  827. return 0;
  828. }
  829. /* Odd decimal: Undefined for non-integers! */
  830. int
  831. mpd_isodd(const mpd_t *dec)
  832. {
  833. mpd_uint_t q, r;
  834. assert(mpd_isinteger(dec));
  835. if (mpd_iszerocoeff(dec)) return 0;
  836. if (dec->exp < 0) {
  837. _mpd_div_word(&q, &r, -dec->exp, MPD_RDIGITS);
  838. q = dec->data[q] / mpd_pow10[r];
  839. return mpd_isoddword(q);
  840. }
  841. return dec->exp == 0 && mpd_isoddword(dec->data[0]);
  842. }
  843. /* Even: Undefined for non-integers! */
  844. int
  845. mpd_iseven(const mpd_t *dec)
  846. {
  847. return !mpd_isodd(dec);
  848. }
  849. /******************************************************************************/
  850. /* Getting and setting decimals */
  851. /******************************************************************************/
  852. /* Internal function: Set a static decimal from a triple, no error checking. */
  853. static void
  854. _ssettriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
  855. {
  856. mpd_set_flags(result, sign);
  857. result->exp = exp;
  858. _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
  859. result->len = (result->data[1] == 0) ? 1 : 2;
  860. mpd_setdigits(result);
  861. }
  862. /* Internal function: Set a decimal from a triple, no error checking. */
  863. static void
  864. _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
  865. {
  866. mpd_minalloc(result);
  867. mpd_set_flags(result, sign);
  868. result->exp = exp;
  869. _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
  870. result->len = (result->data[1] == 0) ? 1 : 2;
  871. mpd_setdigits(result);
  872. }
  873. /* Set a special number from a triple */
  874. void
  875. mpd_setspecial(mpd_t *result, uint8_t sign, uint8_t type)
  876. {
  877. mpd_minalloc(result);
  878. result->flags &= ~(MPD_NEG|MPD_SPECIAL);
  879. result->flags |= (sign|type);
  880. result->exp = result->digits = result->len = 0;
  881. }
  882. /* Set result of NaN with an error status */
  883. void
  884. mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status)
  885. {
  886. mpd_minalloc(result);
  887. mpd_set_qnan(result);
  888. mpd_set_positive(result);
  889. result->exp = result->digits = result->len = 0;
  890. *status |= flags;
  891. }
  892. /* quietly set a static decimal from an mpd_ssize_t */
  893. void
  894. mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
  895. uint32_t *status)
  896. {
  897. mpd_uint_t u;
  898. uint8_t sign = MPD_POS;
  899. if (a < 0) {
  900. if (a == MPD_SSIZE_MIN) {
  901. u = (mpd_uint_t)MPD_SSIZE_MAX +
  902. (-(MPD_SSIZE_MIN+MPD_SSIZE_MAX));
  903. }
  904. else {
  905. u = -a;
  906. }
  907. sign = MPD_NEG;
  908. }
  909. else {
  910. u = a;
  911. }
  912. _ssettriple(result, sign, u, 0);
  913. mpd_qfinalize(result, ctx, status);
  914. }
  915. /* quietly set a static decimal from an mpd_uint_t */
  916. void
  917. mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
  918. uint32_t *status)
  919. {
  920. _ssettriple(result, MPD_POS, a, 0);
  921. mpd_qfinalize(result, ctx, status);
  922. }
  923. /* quietly set a static decimal from an int32_t */
  924. void
  925. mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
  926. uint32_t *status)
  927. {
  928. mpd_qsset_ssize(result, a, ctx, status);
  929. }
  930. /* quietly set a static decimal from a uint32_t */
  931. void
  932. mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
  933. uint32_t *status)
  934. {
  935. mpd_qsset_uint(result, a, ctx, status);
  936. }
  937. #ifdef CONFIG_64
  938. /* quietly set a static decimal from an int64_t */
  939. void
  940. mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  941. uint32_t *status)
  942. {
  943. mpd_qsset_ssize(result, a, ctx, status);
  944. }
  945. /* quietly set a static decimal from a uint64_t */
  946. void
  947. mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  948. uint32_t *status)
  949. {
  950. mpd_qsset_uint(result, a, ctx, status);
  951. }
  952. #endif
  953. /* quietly set a decimal from an mpd_ssize_t */
  954. void
  955. mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
  956. uint32_t *status)
  957. {
  958. mpd_minalloc(result);
  959. mpd_qsset_ssize(result, a, ctx, status);
  960. }
  961. /* quietly set a decimal from an mpd_uint_t */
  962. void
  963. mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
  964. uint32_t *status)
  965. {
  966. _settriple(result, MPD_POS, a, 0);
  967. mpd_qfinalize(result, ctx, status);
  968. }
  969. /* quietly set a decimal from an int32_t */
  970. void
  971. mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
  972. uint32_t *status)
  973. {
  974. mpd_qset_ssize(result, a, ctx, status);
  975. }
  976. /* quietly set a decimal from a uint32_t */
  977. void
  978. mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
  979. uint32_t *status)
  980. {
  981. mpd_qset_uint(result, a, ctx, status);
  982. }
  983. #if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
  984. /* set a decimal from a uint64_t */
  985. static void
  986. _c32setu64(mpd_t *result, uint64_t u, uint8_t sign, uint32_t *status)
  987. {
  988. mpd_uint_t w[3];
  989. uint64_t q;
  990. int i, len;
  991. len = 0;
  992. do {
  993. q = u / MPD_RADIX;
  994. w[len] = (mpd_uint_t)(u - q * MPD_RADIX);
  995. u = q; len++;
  996. } while (u != 0);
  997. if (!mpd_qresize(result, len, status)) {
  998. return;
  999. }
  1000. for (i = 0; i < len; i++) {
  1001. result->data[i] = w[i];
  1002. }
  1003. mpd_set_sign(result, sign);
  1004. result->exp = 0;
  1005. result->len = len;
  1006. mpd_setdigits(result);
  1007. }
  1008. static void
  1009. _c32_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  1010. uint32_t *status)
  1011. {
  1012. _c32setu64(result, a, MPD_POS, status);
  1013. mpd_qfinalize(result, ctx, status);
  1014. }
  1015. /* set a decimal from an int64_t */
  1016. static void
  1017. _c32_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  1018. uint32_t *status)
  1019. {
  1020. uint64_t u;
  1021. uint8_t sign = MPD_POS;
  1022. if (a < 0) {
  1023. if (a == INT64_MIN) {
  1024. u = (uint64_t)INT64_MAX + (-(INT64_MIN+INT64_MAX));
  1025. }
  1026. else {
  1027. u = -a;
  1028. }
  1029. sign = MPD_NEG;
  1030. }
  1031. else {
  1032. u = a;
  1033. }
  1034. _c32setu64(result, u, sign, status);
  1035. mpd_qfinalize(result, ctx, status);
  1036. }
  1037. #endif /* CONFIG_32 && !LEGACY_COMPILER */
  1038. #ifndef LEGACY_COMPILER
  1039. /* quietly set a decimal from an int64_t */
  1040. void
  1041. mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  1042. uint32_t *status)
  1043. {
  1044. #ifdef CONFIG_64
  1045. mpd_qset_ssize(result, a, ctx, status);
  1046. #else
  1047. _c32_qset_i64(result, a, ctx, status);
  1048. #endif
  1049. }
  1050. /* quietly set a decimal from a uint64_t */
  1051. void
  1052. mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  1053. uint32_t *status)
  1054. {
  1055. #ifdef CONFIG_64
  1056. mpd_qset_uint(result, a, ctx, status);
  1057. #else
  1058. _c32_qset_u64(result, a, ctx, status);
  1059. #endif
  1060. }
  1061. #endif /* !LEGACY_COMPILER */
  1062. /*
  1063. * Quietly get an mpd_uint_t from a decimal. Assumes
  1064. * MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for
  1065. * 32 and 64 bit machines.
  1066. *
  1067. * If the operation is impossible, MPD_Invalid_operation is set.
  1068. */
  1069. static mpd_uint_t
  1070. _mpd_qget_uint(int use_sign, const mpd_t *a, uint32_t *status)
  1071. {
  1072. mpd_t tmp;
  1073. mpd_uint_t tmp_data[2];
  1074. mpd_uint_t lo, hi;
  1075. if (mpd_isspecial(a)) {
  1076. *status |= MPD_Invalid_operation;
  1077. return MPD_UINT_MAX;
  1078. }
  1079. if (mpd_iszero(a)) {
  1080. return 0;
  1081. }
  1082. if (use_sign && mpd_isnegative(a)) {
  1083. *status |= MPD_Invalid_operation;
  1084. return MPD_UINT_MAX;
  1085. }
  1086. if (a->digits+a->exp > MPD_RDIGITS+1) {
  1087. *status |= MPD_Invalid_operation;
  1088. return MPD_UINT_MAX;
  1089. }
  1090. if (a->exp < 0) {
  1091. if (!_mpd_isint(a)) {
  1092. *status |= MPD_Invalid_operation;
  1093. return MPD_UINT_MAX;
  1094. }
  1095. /* At this point a->digits+a->exp <= MPD_RDIGITS+1,
  1096. * so the shift fits. */
  1097. tmp.data = tmp_data;
  1098. tmp.flags = MPD_STATIC|MPD_CONST_DATA;
  1099. mpd_qsshiftr(&tmp, a, -a->exp);
  1100. tmp.exp = 0;
  1101. a = &tmp;
  1102. }
  1103. _mpd_get_msdigits(&hi, &lo, a, MPD_RDIGITS+1);
  1104. if (hi) {
  1105. *status |= MPD_Invalid_operation;
  1106. return MPD_UINT_MAX;
  1107. }
  1108. if (a->exp > 0) {
  1109. _mpd_mul_words(&hi, &lo, lo, mpd_pow10[a->exp]);
  1110. if (hi) {
  1111. *status |= MPD_Invalid_operation;
  1112. return MPD_UINT_MAX;
  1113. }
  1114. }
  1115. return lo;
  1116. }
  1117. /*
  1118. * Sets Invalid_operation for:
  1119. * - specials
  1120. * - negative numbers (except negative zero)
  1121. * - non-integers
  1122. * - overflow
  1123. */
  1124. mpd_uint_t
  1125. mpd_qget_uint(const mpd_t *a, uint32_t *status)
  1126. {
  1127. return _mpd_qget_uint(1, a, status);
  1128. }
  1129. /* Same as above, but gets the absolute value, i.e. the sign is ignored. */
  1130. mpd_uint_t
  1131. mpd_qabs_uint(const mpd_t *a, uint32_t *status)
  1132. {
  1133. return _mpd_qget_uint(0, a, status);
  1134. }
  1135. /* quietly get an mpd_ssize_t from a decimal */
  1136. mpd_ssize_t
  1137. mpd_qget_ssize(const mpd_t *a, uint32_t *status)
  1138. {
  1139. mpd_uint_t u;
  1140. int isneg;
  1141. u = mpd_qabs_uint(a, status);
  1142. if (*status&MPD_Invalid_operation) {
  1143. return MPD_SSIZE_MAX;
  1144. }
  1145. isneg = mpd_isnegative(a);
  1146. if (u <= MPD_SSIZE_MAX) {
  1147. return isneg ? -((mpd_ssize_t)u) : (mpd_ssize_t)u;
  1148. }
  1149. else if (isneg && u-1 == MPD_SSIZE_MAX) {
  1150. return MPD_SSIZE_MIN;
  1151. }
  1152. *status |= MPD_Invalid_operation;
  1153. return MPD_SSIZE_MAX;
  1154. }
  1155. #ifdef CONFIG_64
  1156. /* quietly get a uint64_t from a decimal */
  1157. uint64_t
  1158. mpd_qget_u64(const mpd_t *a, uint32_t *status)
  1159. {
  1160. return mpd_qget_uint(a, status);
  1161. }
  1162. /* quietly get an int64_t from a decimal */
  1163. int64_t
  1164. mpd_qget_i64(const mpd_t *a, uint32_t *status)
  1165. {
  1166. return mpd_qget_ssize(a, status);
  1167. }
  1168. #else
  1169. /* quietly get a uint32_t from a decimal */
  1170. uint32_t
  1171. mpd_qget_u32(const mpd_t *a, uint32_t *status)
  1172. {
  1173. return mpd_qget_uint(a, status);
  1174. }
  1175. /* quietly get an int32_t from a decimal */
  1176. int32_t
  1177. mpd_qget_i32(const mpd_t *a, uint32_t *status)
  1178. {
  1179. return mpd_qget_ssize(a, status);
  1180. }
  1181. #endif
  1182. /******************************************************************************/
  1183. /* Filtering input of functions, finalizing output of functions */
  1184. /******************************************************************************/
  1185. /*
  1186. * Check if the operand is NaN, copy to result and return 1 if this is
  1187. * the case. Copying can fail since NaNs are allowed to have a payload that
  1188. * does not fit in MPD_MINALLOC.
  1189. */
  1190. int
  1191. mpd_qcheck_nan(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  1192. uint32_t *status)
  1193. {
  1194. if (mpd_isnan(a)) {
  1195. *status |= mpd_issnan(a) ? MPD_Invalid_operation : 0;
  1196. mpd_qcopy(result, a, status);
  1197. mpd_set_qnan(result);
  1198. _mpd_fix_nan(result, ctx);
  1199. return 1;
  1200. }
  1201. return 0;
  1202. }
  1203. /*
  1204. * Check if either operand is NaN, copy to result and return 1 if this
  1205. * is the case. Copying can fail since NaNs are allowed to have a payload
  1206. * that does not fit in MPD_MINALLOC.
  1207. */
  1208. int
  1209. mpd_qcheck_nans(mpd_t *result, const mpd_t *a, const mpd_t *b,
  1210. const mpd_context_t *ctx, uint32_t *status)
  1211. {
  1212. if ((a->flags|b->flags)&(MPD_NAN|MPD_SNAN)) {
  1213. const mpd_t *choice = b;
  1214. if (mpd_issnan(a)) {
  1215. choice = a;
  1216. *status |= MPD_Invalid_operation;
  1217. }
  1218. else if (mpd_issnan(b)) {
  1219. *status |= MPD_Invalid_operation;
  1220. }
  1221. else if (mpd_isqnan(a)) {
  1222. choice = a;
  1223. }
  1224. mpd_qcopy(result, choice, status);
  1225. mpd_set_qnan(result);
  1226. _mpd_fix_nan(result, ctx);
  1227. return 1;
  1228. }
  1229. return 0;
  1230. }
  1231. /*
  1232. * Check if one of the operands is NaN, copy to result and return 1 if this
  1233. * is the case. Copying can fail since NaNs are allowed to have a payload
  1234. * that does not fit in MPD_MINALLOC.
  1235. */
  1236. static int
  1237. mpd_qcheck_3nans(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
  1238. const mpd_context_t *ctx, uint32_t *status)
  1239. {
  1240. if ((a->flags|b->flags|c->flags)&(MPD_NAN|MPD_SNAN)) {
  1241. const mpd_t *choice = c;
  1242. if (mpd_issnan(a)) {
  1243. choice = a;
  1244. *status |= MPD_Invalid_operation;
  1245. }
  1246. else if (mpd_issnan(b)) {
  1247. choice = b;
  1248. *status |= MPD_Invalid_operation;
  1249. }
  1250. else if (mpd_issnan(c)) {
  1251. *status |= MPD_Invalid_operation;
  1252. }
  1253. else if (mpd_isqnan(a)) {
  1254. choice = a;
  1255. }
  1256. else if (mpd_isqnan(b)) {
  1257. choice = b;
  1258. }
  1259. mpd_qcopy(result, choice, status);
  1260. mpd_set_qnan(result);
  1261. _mpd_fix_nan(result, ctx);
  1262. return 1;
  1263. }
  1264. return 0;
  1265. }
  1266. /* Check if rounding digit 'rnd' leads to an increment. */
  1267. static inline int
  1268. _mpd_rnd_incr(const mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx)
  1269. {
  1270. int ld;
  1271. switch (ctx->round) {
  1272. case MPD_ROUND_DOWN: case MPD_ROUND_TRUNC:
  1273. return 0;
  1274. case MPD_ROUND_HALF_UP:
  1275. return (rnd >= 5);
  1276. case MPD_ROUND_HALF_EVEN:
  1277. return (rnd > 5) || ((rnd == 5) && mpd_isoddcoeff(dec));
  1278. case MPD_ROUND_CEILING:
  1279. return !(rnd == 0 || mpd_isnegative(dec));
  1280. case MPD_ROUND_FLOOR:
  1281. return !(rnd == 0 || mpd_ispositive(dec));
  1282. case MPD_ROUND_HALF_DOWN:
  1283. return (rnd > 5);
  1284. case MPD_ROUND_UP:
  1285. return !(rnd == 0);
  1286. case MPD_ROUND_05UP:
  1287. ld = (int)mpd_lsd(dec->data[0]);
  1288. return (!(rnd == 0) && (ld == 0 || ld == 5));
  1289. default:
  1290. /* Without a valid context, further results will be undefined. */
  1291. return 0; /* GCOV_NOT_REACHED */
  1292. }
  1293. }
  1294. /*
  1295. * Apply rounding to a decimal that has been right-shifted into a full
  1296. * precision decimal. If an increment leads to an overflow of the precision,
  1297. * adjust the coefficient and the exponent and check the new exponent for
  1298. * overflow.
  1299. */
  1300. static inline void
  1301. _mpd_apply_round(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1302. uint32_t *status)
  1303. {
  1304. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1305. /* We have a number with exactly ctx->prec digits. The increment
  1306. * can only lead to an overflow if the decimal is all nines. In
  1307. * that case, the result is a power of ten with prec+1 digits.
  1308. *
  1309. * If the precision is a multiple of MPD_RDIGITS, this situation is
  1310. * detected by _mpd_baseincr returning a carry.
  1311. * If the precision is not a multiple of MPD_RDIGITS, we have to
  1312. * check if the result has one digit too many.
  1313. */
  1314. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1315. if (carry) {
  1316. dec->data[dec->len-1] = mpd_pow10[MPD_RDIGITS-1];
  1317. dec->exp += 1;
  1318. _mpd_check_exp(dec, ctx, status);
  1319. return;
  1320. }
  1321. mpd_setdigits(dec);
  1322. if (dec->digits > ctx->prec) {
  1323. mpd_qshiftr_inplace(dec, 1);
  1324. dec->exp += 1;
  1325. dec->digits = ctx->prec;
  1326. _mpd_check_exp(dec, ctx, status);
  1327. }
  1328. }
  1329. }
  1330. /*
  1331. * Apply rounding to a decimal. Allow overflow of the precision.
  1332. */
  1333. static inline void
  1334. _mpd_apply_round_excess(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1335. uint32_t *status)
  1336. {
  1337. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1338. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1339. if (carry) {
  1340. if (!mpd_qresize(dec, dec->len+1, status)) {
  1341. return;
  1342. }
  1343. dec->data[dec->len] = 1;
  1344. dec->len += 1;
  1345. }
  1346. mpd_setdigits(dec);
  1347. }
  1348. }
  1349. /*
  1350. * Apply rounding to a decimal that has been right-shifted into a decimal
  1351. * with full precision or less. Return failure if an increment would
  1352. * overflow the precision.
  1353. */
  1354. static inline int
  1355. _mpd_apply_round_fit(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1356. uint32_t *status)
  1357. {
  1358. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1359. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1360. if (carry) {
  1361. if (!mpd_qresize(dec, dec->len+1, status)) {
  1362. return 0;
  1363. }
  1364. dec->data[dec->len] = 1;
  1365. dec->len += 1;
  1366. }
  1367. mpd_setdigits(dec);
  1368. if (dec->digits > ctx->prec) {
  1369. mpd_seterror(dec, MPD_Invalid_operation, status);
  1370. return 0;
  1371. }
  1372. }
  1373. return 1;
  1374. }
  1375. /* Check a normal number for overflow, underflow, clamping. If the operand
  1376. is modified, it will be zero, special or (sub)normal with a coefficient
  1377. that fits into the current context precision. */
  1378. static inline void
  1379. _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1380. {
  1381. mpd_ssize_t adjexp, etiny, shift;
  1382. int rnd;
  1383. adjexp = mpd_adjexp(dec);
  1384. if (adjexp > ctx->emax) {
  1385. if (mpd_iszerocoeff(dec)) {
  1386. dec->exp = ctx->emax;
  1387. if (ctx->clamp) {
  1388. dec->exp -= (ctx->prec-1);
  1389. }
  1390. mpd_zerocoeff(dec);
  1391. *status |= MPD_Clamped;
  1392. return;
  1393. }
  1394. switch (ctx->round) {
  1395. case MPD_ROUND_HALF_UP: case MPD_ROUND_HALF_EVEN:
  1396. case MPD_ROUND_HALF_DOWN: case MPD_ROUND_UP:
  1397. case MPD_ROUND_TRUNC:
  1398. mpd_setspecial(dec, mpd_sign(dec), MPD_INF);
  1399. break;
  1400. case MPD_ROUND_DOWN: case MPD_ROUND_05UP:
  1401. mpd_qmaxcoeff(dec, ctx, status);
  1402. dec->exp = ctx->emax - ctx->prec + 1;
  1403. break;
  1404. case MPD_ROUND_CEILING:
  1405. if (mpd_isnegative(dec)) {
  1406. mpd_qmaxcoeff(dec, ctx, status);
  1407. dec->exp = ctx->emax - ctx->prec + 1;
  1408. }
  1409. else {
  1410. mpd_setspecial(dec, MPD_POS, MPD_INF);
  1411. }
  1412. break;
  1413. case MPD_ROUND_FLOOR:
  1414. if (mpd_ispositive(dec)) {
  1415. mpd_qmaxcoeff(dec, ctx, status);
  1416. dec->exp = ctx->emax - ctx->prec + 1;
  1417. }
  1418. else {
  1419. mpd_setspecial(dec, MPD_NEG, MPD_INF);
  1420. }
  1421. break;
  1422. default: /* debug */
  1423. abort(); /* GCOV_NOT_REACHED */
  1424. }
  1425. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  1426. } /* fold down */
  1427. else if (ctx->clamp && dec->exp > mpd_etop(ctx)) {
  1428. /* At this point adjexp=exp+digits-1 <= emax and exp > etop=emax-prec+1:
  1429. * (1) shift = exp -emax+prec-1 > 0
  1430. * (2) digits+shift = exp+digits-1 - emax + prec <= prec */
  1431. shift = dec->exp - mpd_etop(ctx);
  1432. if (!mpd_qshiftl(dec, dec, shift, status)) {
  1433. return;
  1434. }
  1435. dec->exp -= shift;
  1436. *status |= MPD_Clamped;
  1437. if (!mpd_iszerocoeff(dec) && adjexp < ctx->emin) {
  1438. /* Underflow is impossible, since exp < etiny=emin-prec+1
  1439. * and exp > etop=emax-prec+1 would imply emax < emin. */
  1440. *status |= MPD_Subnormal;
  1441. }
  1442. }
  1443. else if (adjexp < ctx->emin) {
  1444. etiny = mpd_etiny(ctx);
  1445. if (mpd_iszerocoeff(dec)) {
  1446. if (dec->exp < etiny) {
  1447. dec->exp = etiny;
  1448. mpd_zerocoeff(dec);
  1449. *status |= MPD_Clamped;
  1450. }
  1451. return;
  1452. }
  1453. *status |= MPD_Subnormal;
  1454. if (dec->exp < etiny) {
  1455. /* At this point adjexp=exp+digits-1 < emin and exp < etiny=emin-prec+1:
  1456. * (1) shift = emin-prec+1 - exp > 0
  1457. * (2) digits-shift = exp+digits-1 - emin + prec < prec */
  1458. shift = etiny - dec->exp;
  1459. rnd = (int)mpd_qshiftr_inplace(dec, shift);
  1460. dec->exp = etiny;
  1461. /* We always have a spare digit in case of an increment. */
  1462. _mpd_apply_round_excess(dec, rnd, ctx, status);
  1463. *status |= MPD_Rounded;
  1464. if (rnd) {
  1465. *status |= (MPD_Inexact|MPD_Underflow);
  1466. if (mpd_iszerocoeff(dec)) {
  1467. mpd_zerocoeff(dec);
  1468. *status |= MPD_Clamped;
  1469. }
  1470. }
  1471. }
  1472. /* Case exp >= etiny=emin-prec+1:
  1473. * (1) adjexp=exp+digits-1 < emin
  1474. * (2) digits < emin-exp+1 <= prec */
  1475. }
  1476. }
  1477. /* Transcendental functions do not always set Underflow reliably,
  1478. * since they only use as much precision as is necessary for correct
  1479. * rounding. If a result like 1.0000000000e-101 is finalized, there
  1480. * is no rounding digit that would trigger Underflow. But we can
  1481. * assume Inexact, so a short check suffices. */
  1482. static inline void
  1483. mpd_check_underflow(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1484. {
  1485. if (mpd_adjexp(dec) < ctx->emin && !mpd_iszero(dec) &&
  1486. dec->exp < mpd_etiny(ctx)) {
  1487. *status |= MPD_Underflow;
  1488. }
  1489. }
  1490. /* Check if a normal number must be rounded after the exponent has been checked. */
  1491. static inline void
  1492. _mpd_check_round(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1493. {
  1494. mpd_uint_t rnd;
  1495. mpd_ssize_t shift;
  1496. /* must handle specials: _mpd_check_exp() can produce infinities or NaNs */
  1497. if (mpd_isspecial(dec)) {
  1498. return;
  1499. }
  1500. if (dec->digits > ctx->prec) {
  1501. shift = dec->digits - ctx->prec;
  1502. rnd = mpd_qshiftr_inplace(dec, shift);
  1503. dec->exp += shift;
  1504. _mpd_apply_round(dec, rnd, ctx, status);
  1505. *status |= MPD_Rounded;
  1506. if (rnd) {
  1507. *status |= MPD_Inexact;
  1508. }
  1509. }
  1510. }
  1511. /* Finalize all operations. */
  1512. void
  1513. mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
  1514. {
  1515. if (mpd_isspecial(result)) {
  1516. if (mpd_isnan(result)) {
  1517. _mpd_fix_nan(result, ctx);
  1518. }
  1519. return;
  1520. }
  1521. _mpd_check_exp(result, ctx, status);
  1522. _mpd_check_round(result, ctx, status);
  1523. }
  1524. /******************************************************************************/
  1525. /* Copying */
  1526. /******************************************************************************/
  1527. /* Internal function: Copy a decimal, share data with src: USE WITH CARE! */
  1528. static inline void
  1529. _mpd_copy_shared(mpd_t *dest, const mpd_t *src)
  1530. {
  1531. dest->flags = src->flags;
  1532. dest->exp = src->exp;
  1533. dest->digits = src->digits;
  1534. dest->len = src->len;
  1535. dest->alloc = src->alloc;
  1536. dest->data = src->data;
  1537. mpd_set_shared_data(dest);
  1538. }
  1539. /*
  1540. * Copy a decimal. In case of an error, status is set to MPD_Malloc_error.
  1541. */
  1542. int
  1543. mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status)
  1544. {
  1545. if (result == a) return 1;
  1546. if (!mpd_qresize(result, a->len, status)) {
  1547. return 0;
  1548. }
  1549. mpd_copy_flags(result, a);
  1550. result->exp = a->exp;
  1551. result->digits = a->digits;
  1552. result->len = a->len;
  1553. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1554. return 1;
  1555. }
  1556. /*
  1557. * Copy to a decimal with a static buffer. The caller has to make sure that
  1558. * the buffer is big enough. Cannot fail.
  1559. */
  1560. static void
  1561. mpd_qcopy_static(mpd_t *result, const mpd_t *a)
  1562. {
  1563. if (result == a) return;
  1564. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1565. mpd_copy_flags(result, a);
  1566. result->exp = a->exp;
  1567. result->digits = a->digits;
  1568. result->len = a->len;
  1569. }
  1570. /*
  1571. * Return a newly allocated copy of the operand. In case of an error,
  1572. * status is set to MPD_Malloc_error and the return value is NULL.
  1573. */
  1574. mpd_t *
  1575. mpd_qncopy(const mpd_t *a)
  1576. {
  1577. mpd_t *result;
  1578. if ((result = mpd_qnew_size(a->len)) == NULL) {
  1579. return NULL;
  1580. }
  1581. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1582. mpd_copy_flags(result, a);
  1583. result->exp = a->exp;
  1584. result->digits = a->digits;
  1585. result->len = a->len;
  1586. return result;
  1587. }
  1588. /*
  1589. * Copy a decimal and set the sign to positive. In case of an error, the
  1590. * status is set to MPD_Malloc_error.
  1591. */
  1592. int
  1593. mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status)
  1594. {
  1595. if (!mpd_qcopy(result, a, status)) {
  1596. return 0;
  1597. }
  1598. mpd_set_positive(result);
  1599. return 1;
  1600. }
  1601. /*
  1602. * Copy a decimal and negate the sign. In case of an error, the
  1603. * status is set to MPD_Malloc_error.
  1604. */
  1605. int
  1606. mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status)
  1607. {
  1608. if (!mpd_qcopy(result, a, status)) {
  1609. return 0;
  1610. }
  1611. _mpd_negate(result);
  1612. return 1;
  1613. }
  1614. /*
  1615. * Copy a decimal, setting the sign of the first operand to the sign of the
  1616. * second operand. In case of an error, the status is set to MPD_Malloc_error.
  1617. */
  1618. int
  1619. mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
  1620. {
  1621. uint8_t sign_b = mpd_sign(b); /* result may equal b! */
  1622. if (!mpd_qcopy(result, a, status)) {
  1623. return 0;
  1624. }
  1625. mpd_set_sign(result, sign_b);
  1626. return 1;
  1627. }
  1628. /******************************************************************************/
  1629. /* Comparisons */
  1630. /******************************************************************************/
  1631. /*
  1632. * For all functions that compare two operands and return an int the usual
  1633. * convention applies to the return value:
  1634. *
  1635. * -1 if op1 < op2
  1636. * 0 if op1 == op2
  1637. * 1 if op1 > op2
  1638. *
  1639. * INT_MAX for error
  1640. */
  1641. /* Convenience macro. If a and b are not equal, return from the calling
  1642. * function with the correct comparison value. */
  1643. #define CMP_EQUAL_OR_RETURN(a, b) \
  1644. if (a != b) { \
  1645. if (a < b) { \
  1646. return -1; \
  1647. } \
  1648. return 1; \
  1649. }
  1650. /*
  1651. * Compare the data of big and small. This function does the equivalent
  1652. * of first shifting small to the left and then comparing the data of
  1653. * big and small, except that no allocation for the left shift is needed.
  1654. */
  1655. static int
  1656. _mpd_basecmp(mpd_uint_t *big, mpd_uint_t *small, mpd_size_t n, mpd_size_t m,
  1657. mpd_size_t shift)
  1658. {
  1659. #if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
  1660. /* spurious uninitialized warnings */
  1661. mpd_uint_t l=l, lprev=lprev, h=h;
  1662. #else
  1663. mpd_uint_t l, lprev, h;
  1664. #endif
  1665. mpd_uint_t q, r;
  1666. mpd_uint_t ph, x;
  1667. assert(m > 0 && n >= m && shift > 0);
  1668. _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
  1669. if (r != 0) {
  1670. ph = mpd_pow10[r];
  1671. --m; --n;
  1672. _mpd_divmod_pow10(&h, &lprev, small[m--], MPD_RDIGITS-r);
  1673. if (h != 0) {
  1674. CMP_EQUAL_OR_RETURN(big[n], h)
  1675. --n;
  1676. }
  1677. for (; m != MPD_SIZE_MAX; m--,n--) {
  1678. _mpd_divmod_pow10(&h, &l, small[m], MPD_RDIGITS-r);
  1679. x = ph * lprev + h;
  1680. CMP_EQUAL_OR_RETURN(big[n], x)
  1681. lprev = l;
  1682. }
  1683. x = ph * lprev;
  1684. CMP_EQUAL_OR_RETURN(big[q], x)
  1685. }
  1686. else {
  1687. while (--m != MPD_SIZE_MAX) {
  1688. CMP_EQUAL_OR_RETURN(big[m+q], small[m])
  1689. }
  1690. }
  1691. return !_mpd_isallzero(big, q);
  1692. }
  1693. /* Compare two decimals with the same adjusted exponent. */
  1694. static int
  1695. _mpd_cmp_same_adjexp(const mpd_t *a, const mpd_t *b)
  1696. {
  1697. mpd_ssize_t shift, i;
  1698. if (a->exp != b->exp) {
  1699. /* Cannot wrap: a->exp + a->digits = b->exp + b->digits, so
  1700. * a->exp - b->exp = b->digits - a->digits. */
  1701. shift = a->exp - b->exp;
  1702. if (shift > 0) {
  1703. return -1 * _mpd_basecmp(b->data, a->data, b->len, a->len, shift);
  1704. }
  1705. else {
  1706. return _mpd_basecmp(a->data, b->data, a->len, b->len, -shift);
  1707. }
  1708. }
  1709. /*
  1710. * At this point adjexp(a) == adjexp(b) and a->exp == b->exp,
  1711. * so a->digits == b->digits, therefore a->len == b->len.
  1712. */
  1713. for (i = a->len-1; i >= 0; --i) {
  1714. CMP_EQUAL_OR_RETURN(a->data[i], b->data[i])
  1715. }
  1716. return 0;
  1717. }
  1718. /* Compare two numerical values. */
  1719. static int
  1720. _mpd_cmp(const mpd_t *a, const mpd_t *b)
  1721. {
  1722. mpd_ssize_t adjexp_a, adjexp_b;
  1723. /* equal pointers */
  1724. if (a == b) {
  1725. return 0;
  1726. }
  1727. /* infinities */
  1728. if (mpd_isinfinite(a)) {
  1729. if (mpd_isinfinite(b)) {
  1730. return mpd_isnegative(b) - mpd_isnegative(a);
  1731. }
  1732. return mpd_arith_sign(a);
  1733. }
  1734. if (mpd_isinfinite(b)) {
  1735. return -mpd_arith_sign(b);
  1736. }
  1737. /* zeros */
  1738. if (mpd_iszerocoeff(a)) {
  1739. if (mpd_iszerocoeff(b)) {
  1740. return 0;
  1741. }
  1742. return -mpd_arith_sign(b);
  1743. }
  1744. if (mpd_iszerocoeff(b)) {
  1745. return mpd_arith_sign(a);
  1746. }
  1747. /* different signs */
  1748. if (mpd_sign(a) != mpd_sign(b)) {
  1749. return mpd_sign(b) - mpd_sign(a);
  1750. }
  1751. /* different adjusted exponents */
  1752. adjexp_a = mpd_adjexp(a);
  1753. adjexp_b = mpd_adjexp(b);
  1754. if (adjexp_a != adjexp_b) {
  1755. if (adjexp_a < adjexp_b) {
  1756. return -1 * mpd_arith_sign(a);
  1757. }
  1758. return mpd_arith_sign(a);
  1759. }
  1760. /* same adjusted exponents */
  1761. return _mpd_cmp_same_adjexp(a, b) * mpd_arith_sign(a);
  1762. }
  1763. /* Compare the absolutes of two numerical values. */
  1764. static int
  1765. _mpd_cmp_abs(const mpd_t *a, const mpd_t *b)
  1766. {
  1767. mpd_ssize_t adjexp_a, adjexp_b;
  1768. /* equal pointers */
  1769. if (a == b) {
  1770. return 0;
  1771. }
  1772. /* infinities */
  1773. if (mpd_isinfinite(a)) {
  1774. if (mpd_isinfinite(b)) {
  1775. return 0;
  1776. }
  1777. return 1;
  1778. }
  1779. if (mpd_isinfinite(b)) {
  1780. return -1;
  1781. }
  1782. /* zeros */
  1783. if (mpd_iszerocoeff(a)) {
  1784. if (mpd_iszerocoeff(b)) {
  1785. return 0;
  1786. }
  1787. return -1;
  1788. }
  1789. if (mpd_iszerocoeff(b)) {
  1790. return 1;
  1791. }
  1792. /* different adjusted exponents */
  1793. adjexp_a = mpd_adjexp(a);
  1794. adjexp_b = mpd_adjexp(b);
  1795. if (adjexp_a != adjexp_b) {
  1796. if (adjexp_a < adjexp_b) {
  1797. return -1;
  1798. }
  1799. return 1;
  1800. }
  1801. /* same adjusted exponents */
  1802. return _mpd_cmp_same_adjexp(a, b);
  1803. }
  1804. /* Compare two values and return an integer result. */
  1805. int
  1806. mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status)
  1807. {
  1808. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  1809. if (mpd_isnan(a) || mpd_isnan(b)) {
  1810. *status |= MPD_Invalid_operation;
  1811. return INT_MAX;
  1812. }
  1813. }
  1814. return _mpd_cmp(a, b);
  1815. }
  1816. /*
  1817. * Compare a and b, convert the the usual integer result to a decimal and
  1818. * store it in 'result'. For convenience, the integer result of the comparison
  1819. * is returned. Comparisons involving NaNs return NaN/INT_MAX.
  1820. */
  1821. int
  1822. mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b,
  1823. const mpd_context_t *ctx, uint32_t *status)
  1824. {
  1825. int c;
  1826. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  1827. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  1828. return INT_MAX;
  1829. }
  1830. }
  1831. c = _mpd_cmp(a, b);
  1832. _settriple(result, (c < 0), (c != 0), 0);
  1833. return c;
  1834. }
  1835. /* Same as mpd_compare(), but signal for all NaNs, i.e. also for quiet NaNs. */
  1836. int
  1837. mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b,
  1838. const mpd_context_t *ctx, uint32_t *status)
  1839. {
  1840. int c;
  1841. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  1842. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  1843. *status |= MPD_Invalid_operation;
  1844. return INT_MAX;
  1845. }
  1846. }
  1847. c = _mpd_cmp(a, b);
  1848. _settriple(result, (c < 0), (c != 0), 0);
  1849. return c;
  1850. }
  1851. /* Compare the operands using a total order. */
  1852. int
  1853. mpd_cmp_total(const mpd_t *a, const mpd_t *b)
  1854. {
  1855. mpd_t aa, bb;
  1856. int nan_a, nan_b;
  1857. int c;
  1858. if (mpd_sign(a) != mpd_sign(b)) {
  1859. return mpd_sign(b) - mpd_sign(a);
  1860. }
  1861. if (mpd_isnan(a)) {
  1862. c = 1;
  1863. if (mpd_isnan(b)) {
  1864. nan_a = (mpd_isqnan(a)) ? 1 : 0;
  1865. nan_b = (mpd_isqnan(b)) ? 1 : 0;
  1866. if (nan_b == nan_a) {
  1867. if (a->len > 0 && b->len > 0) {
  1868. _mpd_copy_shared(&aa, a);
  1869. _mpd_copy_shared(&bb, b);
  1870. aa.exp = bb.exp = 0;
  1871. /* compare payload */
  1872. c = _mpd_cmp_abs(&aa, &bb);
  1873. }
  1874. else {
  1875. c = (a->len > 0) - (b->len > 0);
  1876. }
  1877. }
  1878. else {
  1879. c = nan_a - nan_b;
  1880. }
  1881. }
  1882. }
  1883. else if (mpd_isnan(b)) {
  1884. c = -1;
  1885. }
  1886. else {
  1887. c = _mpd_cmp_abs(a, b);
  1888. if (c == 0 && a->exp != b->exp) {
  1889. c = (a->exp < b->exp) ? -1 : 1;
  1890. }
  1891. }
  1892. return c * mpd_arith_sign(a);
  1893. }
  1894. /*
  1895. * Compare a and b according to a total order, convert the usual integer result
  1896. * to a decimal and store it in 'result'. For convenience, the integer result
  1897. * of the comparison is returned.
  1898. */
  1899. int
  1900. mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b)
  1901. {
  1902. int c;
  1903. c = mpd_cmp_total(a, b);
  1904. _settriple(result, (c < 0), (c != 0), 0);
  1905. return c;
  1906. }
  1907. /* Compare the magnitude of the operands using a total order. */
  1908. int
  1909. mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b)
  1910. {
  1911. mpd_t aa, bb;
  1912. _mpd_copy_shared(&aa, a);
  1913. _mpd_copy_shared(&bb, b);
  1914. mpd_set_positive(&aa);
  1915. mpd_set_positive(&bb);
  1916. return mpd_cmp_total(&aa, &bb);
  1917. }
  1918. /*
  1919. * Compare the magnitude of a and b according to a total order, convert the
  1920. * the usual integer result to a decimal and store it in 'result'.
  1921. * For convenience, the integer result of the comparison is returned.
  1922. */
  1923. int
  1924. mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b)
  1925. {
  1926. int c;
  1927. c = mpd_cmp_total_mag(a, b);
  1928. _settriple(result, (c < 0), (c != 0), 0);
  1929. return c;
  1930. }
  1931. /* Determine an ordering for operands that are numerically equal. */
  1932. static inline int
  1933. _mpd_cmp_numequal(const mpd_t *a, const mpd_t *b)
  1934. {
  1935. int sign_a, sign_b;
  1936. int c;
  1937. sign_a = mpd_sign(a);
  1938. sign_b = mpd_sign(b);
  1939. if (sign_a != sign_b) {
  1940. c = sign_b - sign_a;
  1941. }
  1942. else {
  1943. c = (a->exp < b->exp) ? -1 : 1;
  1944. c *= mpd_arith_sign(a);
  1945. }
  1946. return c;
  1947. }
  1948. /******************************************************************************/
  1949. /* Shifting the coefficient */
  1950. /******************************************************************************/
  1951. /*
  1952. * Shift the coefficient of the operand to the left, no check for specials.
  1953. * Both operands may be the same pointer. If the result length has to be
  1954. * increased, mpd_qresize() might fail with MPD_Malloc_error.
  1955. */
  1956. int
  1957. mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
  1958. {
  1959. mpd_ssize_t size;
  1960. assert(n >= 0);
  1961. if (mpd_iszerocoeff(a) || n == 0) {
  1962. return mpd_qcopy(result, a, status);
  1963. }
  1964. size = mpd_digits_to_size(a->digits+n);
  1965. if (!mpd_qresize(result, size, status)) {
  1966. return 0; /* result is NaN */
  1967. }
  1968. _mpd_baseshiftl(result->data, a->data, size, a->len, n);
  1969. mpd_copy_flags(result, a);
  1970. result->len = size;
  1971. result->exp = a->exp;
  1972. result->digits = a->digits+n;
  1973. return 1;
  1974. }
  1975. /* Determine the rounding indicator if all digits of the coefficient are shifted
  1976. * out of the picture. */
  1977. static mpd_uint_t
  1978. _mpd_get_rnd(const mpd_uint_t *data, mpd_ssize_t len, int use_msd)
  1979. {
  1980. mpd_uint_t rnd = 0, rest = 0, word;
  1981. word = data[len-1];
  1982. /* special treatment for the most significant digit if shift == digits */
  1983. if (use_msd) {
  1984. _mpd_divmod_pow10(&rnd, &rest, word, mpd_word_digits(word)-1);
  1985. if (len > 1 && rest == 0) {
  1986. rest = !_mpd_isallzero(data, len-1);
  1987. }
  1988. }
  1989. else {
  1990. rest = !_mpd_isallzero(data, len);
  1991. }
  1992. return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
  1993. }
  1994. /*
  1995. * Same as mpd_qshiftr(), but 'result' is a static array. It is the
  1996. * caller's responsibility to make sure that the array is big enough.
  1997. * The function cannot fail.
  1998. */
  1999. mpd_uint_t
  2000. mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n)
  2001. {
  2002. mpd_uint_t rnd;
  2003. mpd_ssize_t size;
  2004. assert(n >= 0);
  2005. if (mpd_iszerocoeff(a) || n == 0) {
  2006. mpd_qcopy_static(result, a);
  2007. return 0;
  2008. }
  2009. if (n >= a->digits) {
  2010. rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
  2011. mpd_zerocoeff(result);
  2012. result->digits = 1;
  2013. size = 1;
  2014. }
  2015. else {
  2016. result->digits = a->digits-n;
  2017. size = mpd_digits_to_size(result->digits);
  2018. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2019. }
  2020. mpd_copy_flags(result, a);
  2021. result->exp = a->exp;
  2022. result->len = size;
  2023. return rnd;
  2024. }
  2025. /*
  2026. * Inplace shift of the coefficient to the right, no check for specials.
  2027. * Returns the rounding indicator for mpd_rnd_incr().
  2028. * The function cannot fail.
  2029. */
  2030. mpd_uint_t
  2031. mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n)
  2032. {
  2033. uint32_t dummy;
  2034. mpd_uint_t rnd;
  2035. mpd_ssize_t size;
  2036. assert(n >= 0);
  2037. if (mpd_iszerocoeff(result) || n == 0) {
  2038. return 0;
  2039. }
  2040. if (n >= result->digits) {
  2041. rnd = _mpd_get_rnd(result->data, result->len, (n==result->digits));
  2042. mpd_zerocoeff(result);
  2043. result->digits = 1;
  2044. size = 1;
  2045. }
  2046. else {
  2047. rnd = _mpd_baseshiftr(result->data, result->data, result->len, n);
  2048. result->digits -= n;
  2049. size = mpd_digits_to_size(result->digits);
  2050. /* reducing the size cannot fail */
  2051. mpd_qresize(result, size, &dummy);
  2052. }
  2053. result->len = size;
  2054. return rnd;
  2055. }
  2056. /*
  2057. * Shift the coefficient of the operand to the right, no check for specials.
  2058. * Both operands may be the same pointer. Returns the rounding indicator to
  2059. * be used by mpd_rnd_incr(). If the result length has to be increased,
  2060. * mpd_qcopy() or mpd_qresize() might fail with MPD_Malloc_error. In those
  2061. * cases, MPD_UINT_MAX is returned.
  2062. */
  2063. mpd_uint_t
  2064. mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
  2065. {
  2066. mpd_uint_t rnd;
  2067. mpd_ssize_t size;
  2068. assert(n >= 0);
  2069. if (mpd_iszerocoeff(a) || n == 0) {
  2070. if (!mpd_qcopy(result, a, status)) {
  2071. return MPD_UINT_MAX;
  2072. }
  2073. return 0;
  2074. }
  2075. if (n >= a->digits) {
  2076. rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
  2077. mpd_zerocoeff(result);
  2078. result->digits = 1;
  2079. size = 1;
  2080. }
  2081. else {
  2082. result->digits = a->digits-n;
  2083. size = mpd_digits_to_size(result->digits);
  2084. if (result == a) {
  2085. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2086. /* reducing the size cannot fail */
  2087. mpd_qresize(result, size, status);
  2088. }
  2089. else {
  2090. if (!mpd_qresize(result, size, status)) {
  2091. return MPD_UINT_MAX;
  2092. }
  2093. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2094. }
  2095. }
  2096. mpd_copy_flags(result, a);
  2097. result->exp = a->exp;
  2098. result->len = size;
  2099. return rnd;
  2100. }
  2101. /******************************************************************************/
  2102. /* Miscellaneous operations */
  2103. /******************************************************************************/
  2104. /* Logical And */
  2105. void
  2106. mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2107. const mpd_context_t *ctx, uint32_t *status)
  2108. {
  2109. const mpd_t *big = a, *small = b;
  2110. mpd_uint_t x, y, z, xbit, ybit;
  2111. int k, mswdigits;
  2112. mpd_ssize_t i;
  2113. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2114. mpd_isnegative(a) || mpd_isnegative(b) ||
  2115. a->exp != 0 || b->exp != 0) {
  2116. mpd_seterror(result, MPD_Invalid_operation, status);
  2117. return;
  2118. }
  2119. if (b->digits > a->digits) {
  2120. big = b;
  2121. small = a;
  2122. }
  2123. if (!mpd_qresize(result, big->len, status)) {
  2124. return;
  2125. }
  2126. /* full words */
  2127. for (i = 0; i < small->len-1; i++) {
  2128. x = small->data[i];
  2129. y = big->data[i];
  2130. z = 0;
  2131. for (k = 0; k < MPD_RDIGITS; k++) {
  2132. xbit = x % 10;
  2133. x /= 10;
  2134. ybit = y % 10;
  2135. y /= 10;
  2136. if (xbit > 1 || ybit > 1) {
  2137. goto invalid_operation;
  2138. }
  2139. z += (xbit&ybit) ? mpd_pow10[k] : 0;
  2140. }
  2141. result->data[i] = z;
  2142. }
  2143. /* most significant word of small */
  2144. x = small->data[i];
  2145. y = big->data[i];
  2146. z = 0;
  2147. mswdigits = mpd_word_digits(x);
  2148. for (k = 0; k < mswdigits; k++) {
  2149. xbit = x % 10;
  2150. x /= 10;
  2151. ybit = y % 10;
  2152. y /= 10;
  2153. if (xbit > 1 || ybit > 1) {
  2154. goto invalid_operation;
  2155. }
  2156. z += (xbit&ybit) ? mpd_pow10[k] : 0;
  2157. }
  2158. result->data[i++] = z;
  2159. /* scan the rest of y for digit > 1 */
  2160. for (; k < MPD_RDIGITS; k++) {
  2161. ybit = y % 10;
  2162. y /= 10;
  2163. if (ybit > 1) {
  2164. goto invalid_operation;
  2165. }
  2166. }
  2167. /* scan the rest of big for digit > 1 */
  2168. for (; i < big->len; i++) {
  2169. y = big->data[i];
  2170. for (k = 0; k < MPD_RDIGITS; k++) {
  2171. ybit = y % 10;
  2172. y /= 10;
  2173. if (ybit > 1) {
  2174. goto invalid_operation;
  2175. }
  2176. }
  2177. }
  2178. mpd_clear_flags(result);
  2179. result->exp = 0;
  2180. result->len = _mpd_real_size(result->data, small->len);
  2181. mpd_qresize(result, result->len, status);
  2182. mpd_setdigits(result);
  2183. _mpd_cap(result, ctx);
  2184. return;
  2185. invalid_operation:
  2186. mpd_seterror(result, MPD_Invalid_operation, status);
  2187. }
  2188. /* Class of an operand. Returns a pointer to the constant name. */
  2189. const char *
  2190. mpd_class(const mpd_t *a, const mpd_context_t *ctx)
  2191. {
  2192. if (mpd_isnan(a)) {
  2193. if (mpd_isqnan(a))
  2194. return "NaN";
  2195. else
  2196. return "sNaN";
  2197. }
  2198. else if (mpd_ispositive(a)) {
  2199. if (mpd_isinfinite(a))
  2200. return "+Infinity";
  2201. else if (mpd_iszero(a))
  2202. return "+Zero";
  2203. else if (mpd_isnormal(a, ctx))
  2204. return "+Normal";
  2205. else
  2206. return "+Subnormal";
  2207. }
  2208. else {
  2209. if (mpd_isinfinite(a))
  2210. return "-Infinity";
  2211. else if (mpd_iszero(a))
  2212. return "-Zero";
  2213. else if (mpd_isnormal(a, ctx))
  2214. return "-Normal";
  2215. else
  2216. return "-Subnormal";
  2217. }
  2218. }
  2219. /* Logical Xor */
  2220. void
  2221. mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2222. uint32_t *status)
  2223. {
  2224. mpd_uint_t x, z, xbit;
  2225. mpd_ssize_t i, digits, len;
  2226. mpd_ssize_t q, r;
  2227. int k;
  2228. if (mpd_isspecial(a) || mpd_isnegative(a) || a->exp != 0) {
  2229. mpd_seterror(result, MPD_Invalid_operation, status);
  2230. return;
  2231. }
  2232. digits = (a->digits < ctx->prec) ? ctx->prec : a->digits;
  2233. _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
  2234. len = (r == 0) ? q : q+1;
  2235. if (!mpd_qresize(result, len, status)) {
  2236. return;
  2237. }
  2238. for (i = 0; i < len; i++) {
  2239. x = (i < a->len) ? a->data[i] : 0;
  2240. z = 0;
  2241. for (k = 0; k < MPD_RDIGITS; k++) {
  2242. xbit = x % 10;
  2243. x /= 10;
  2244. if (xbit > 1) {
  2245. goto invalid_operation;
  2246. }
  2247. z += !xbit ? mpd_pow10[k] : 0;
  2248. }
  2249. result->data[i] = z;
  2250. }
  2251. mpd_clear_flags(result);
  2252. result->exp = 0;
  2253. result->len = _mpd_real_size(result->data, len);
  2254. mpd_qresize(result, result->len, status);
  2255. mpd_setdigits(result);
  2256. _mpd_cap(result, ctx);
  2257. return;
  2258. invalid_operation:
  2259. mpd_seterror(result, MPD_Invalid_operation, status);
  2260. }
  2261. /* Exponent of the magnitude of the most significant digit of the operand. */
  2262. void
  2263. mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2264. uint32_t *status)
  2265. {
  2266. if (mpd_isspecial(a)) {
  2267. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2268. return;
  2269. }
  2270. mpd_setspecial(result, MPD_POS, MPD_INF);
  2271. }
  2272. else if (mpd_iszerocoeff(a)) {
  2273. mpd_setspecial(result, MPD_NEG, MPD_INF);
  2274. *status |= MPD_Division_by_zero;
  2275. }
  2276. else {
  2277. mpd_qset_ssize(result, mpd_adjexp(a), ctx, status);
  2278. }
  2279. }
  2280. /* Logical Or */
  2281. void
  2282. mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2283. const mpd_context_t *ctx, uint32_t *status)
  2284. {
  2285. const mpd_t *big = a, *small = b;
  2286. mpd_uint_t x, y, z, xbit, ybit;
  2287. int k, mswdigits;
  2288. mpd_ssize_t i;
  2289. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2290. mpd_isnegative(a) || mpd_isnegative(b) ||
  2291. a->exp != 0 || b->exp != 0) {
  2292. mpd_seterror(result, MPD_Invalid_operation, status);
  2293. return;
  2294. }
  2295. if (b->digits > a->digits) {
  2296. big = b;
  2297. small = a;
  2298. }
  2299. if (!mpd_qresize(result, big->len, status)) {
  2300. return;
  2301. }
  2302. /* full words */
  2303. for (i = 0; i < small->len-1; i++) {
  2304. x = small->data[i];
  2305. y = big->data[i];
  2306. z = 0;
  2307. for (k = 0; k < MPD_RDIGITS; k++) {
  2308. xbit = x % 10;
  2309. x /= 10;
  2310. ybit = y % 10;
  2311. y /= 10;
  2312. if (xbit > 1 || ybit > 1) {
  2313. goto invalid_operation;
  2314. }
  2315. z += (xbit|ybit) ? mpd_pow10[k] : 0;
  2316. }
  2317. result->data[i] = z;
  2318. }
  2319. /* most significant word of small */
  2320. x = small->data[i];
  2321. y = big->data[i];
  2322. z = 0;
  2323. mswdigits = mpd_word_digits(x);
  2324. for (k = 0; k < mswdigits; k++) {
  2325. xbit = x % 10;
  2326. x /= 10;
  2327. ybit = y % 10;
  2328. y /= 10;
  2329. if (xbit > 1 || ybit > 1) {
  2330. goto invalid_operation;
  2331. }
  2332. z += (xbit|ybit) ? mpd_pow10[k] : 0;
  2333. }
  2334. /* scan and copy the rest of y for digit > 1 */
  2335. for (; k < MPD_RDIGITS; k++) {
  2336. ybit = y % 10;
  2337. y /= 10;
  2338. if (ybit > 1) {
  2339. goto invalid_operation;
  2340. }
  2341. z += ybit*mpd_pow10[k];
  2342. }
  2343. result->data[i++] = z;
  2344. /* scan and copy the rest of big for digit > 1 */
  2345. for (; i < big->len; i++) {
  2346. y = big->data[i];
  2347. for (k = 0; k < MPD_RDIGITS; k++) {
  2348. ybit = y % 10;
  2349. y /= 10;
  2350. if (ybit > 1) {
  2351. goto invalid_operation;
  2352. }
  2353. }
  2354. result->data[i] = big->data[i];
  2355. }
  2356. mpd_clear_flags(result);
  2357. result->exp = 0;
  2358. result->len = _mpd_real_size(result->data, big->len);
  2359. mpd_qresize(result, result->len, status);
  2360. mpd_setdigits(result);
  2361. _mpd_cap(result, ctx);
  2362. return;
  2363. invalid_operation:
  2364. mpd_seterror(result, MPD_Invalid_operation, status);
  2365. }
  2366. /*
  2367. * Rotate the coefficient of a by b->data digits. b must be an integer with
  2368. * exponent 0.
  2369. */
  2370. void
  2371. mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2372. const mpd_context_t *ctx, uint32_t *status)
  2373. {
  2374. uint32_t workstatus = 0;
  2375. MPD_NEW_STATIC(tmp,0,0,0,0);
  2376. MPD_NEW_STATIC(big,0,0,0,0);
  2377. MPD_NEW_STATIC(small,0,0,0,0);
  2378. mpd_ssize_t n, lshift, rshift;
  2379. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2380. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2381. return;
  2382. }
  2383. }
  2384. if (b->exp != 0 || mpd_isinfinite(b)) {
  2385. mpd_seterror(result, MPD_Invalid_operation, status);
  2386. return;
  2387. }
  2388. n = mpd_qget_ssize(b, &workstatus);
  2389. if (workstatus&MPD_Invalid_operation) {
  2390. mpd_seterror(result, MPD_Invalid_operation, status);
  2391. return;
  2392. }
  2393. if (n > ctx->prec || n < -ctx->prec) {
  2394. mpd_seterror(result, MPD_Invalid_operation, status);
  2395. return;
  2396. }
  2397. if (mpd_isinfinite(a)) {
  2398. mpd_qcopy(result, a, status);
  2399. return;
  2400. }
  2401. if (n >= 0) {
  2402. lshift = n;
  2403. rshift = ctx->prec-n;
  2404. }
  2405. else {
  2406. lshift = ctx->prec+n;
  2407. rshift = -n;
  2408. }
  2409. if (a->digits > ctx->prec) {
  2410. if (!mpd_qcopy(&tmp, a, status)) {
  2411. mpd_seterror(result, MPD_Malloc_error, status);
  2412. goto finish;
  2413. }
  2414. _mpd_cap(&tmp, ctx);
  2415. a = &tmp;
  2416. }
  2417. if (!mpd_qshiftl(&big, a, lshift, status)) {
  2418. mpd_seterror(result, MPD_Malloc_error, status);
  2419. goto finish;
  2420. }
  2421. _mpd_cap(&big, ctx);
  2422. if (mpd_qshiftr(&small, a, rshift, status) == MPD_UINT_MAX) {
  2423. mpd_seterror(result, MPD_Malloc_error, status);
  2424. goto finish;
  2425. }
  2426. _mpd_qadd(result, &big, &small, ctx, status);
  2427. finish:
  2428. mpd_del(&tmp);
  2429. mpd_del(&big);
  2430. mpd_del(&small);
  2431. }
  2432. /*
  2433. * b must be an integer with exponent 0 and in the range +-2*(emax + prec).
  2434. * XXX: In my opinion +-(2*emax + prec) would be more sensible.
  2435. * The result is a with the value of b added to its exponent.
  2436. */
  2437. void
  2438. mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2439. const mpd_context_t *ctx, uint32_t *status)
  2440. {
  2441. uint32_t workstatus = 0;
  2442. mpd_uint_t n, maxjump;
  2443. #ifndef LEGACY_COMPILER
  2444. int64_t exp;
  2445. #else
  2446. mpd_uint_t x;
  2447. int x_sign, n_sign;
  2448. mpd_ssize_t exp;
  2449. #endif
  2450. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2451. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2452. return;
  2453. }
  2454. }
  2455. if (b->exp != 0 || mpd_isinfinite(b)) {
  2456. mpd_seterror(result, MPD_Invalid_operation, status);
  2457. return;
  2458. }
  2459. n = mpd_qabs_uint(b, &workstatus);
  2460. /* the spec demands this */
  2461. maxjump = 2 * (mpd_uint_t)(ctx->emax + ctx->prec);
  2462. if (n > maxjump || workstatus&MPD_Invalid_operation) {
  2463. mpd_seterror(result, MPD_Invalid_operation, status);
  2464. return;
  2465. }
  2466. if (mpd_isinfinite(a)) {
  2467. mpd_qcopy(result, a, status);
  2468. return;
  2469. }
  2470. #ifndef LEGACY_COMPILER
  2471. exp = a->exp + (int64_t)n * mpd_arith_sign(b);
  2472. exp = (exp > MPD_EXP_INF) ? MPD_EXP_INF : exp;
  2473. exp = (exp < MPD_EXP_CLAMP) ? MPD_EXP_CLAMP : exp;
  2474. #else
  2475. x = (a->exp < 0) ? -a->exp : a->exp;
  2476. x_sign = (a->exp < 0) ? 1 : 0;
  2477. n_sign = mpd_isnegative(b) ? 1 : 0;
  2478. if (x_sign == n_sign) {
  2479. x = x + n;
  2480. if (x < n) x = MPD_UINT_MAX;
  2481. }
  2482. else {
  2483. x_sign = (x >= n) ? x_sign : n_sign;
  2484. x = (x >= n) ? x - n : n - x;
  2485. }
  2486. if (!x_sign && x > MPD_EXP_INF) x = MPD_EXP_INF;
  2487. if (x_sign && x > -MPD_EXP_CLAMP) x = -MPD_EXP_CLAMP;
  2488. exp = x_sign ? -((mpd_ssize_t)x) : (mpd_ssize_t)x;
  2489. #endif
  2490. mpd_qcopy(result, a, status);
  2491. result->exp = (mpd_ssize_t)exp;
  2492. mpd_qfinalize(result, ctx, status);
  2493. }
  2494. /*
  2495. * Shift the coefficient by n digits, positive n is a left shift. In the case
  2496. * of a left shift, the result is decapitated to fit the context precision. If
  2497. * you don't want that, use mpd_shiftl().
  2498. */
  2499. void
  2500. mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx,
  2501. uint32_t *status)
  2502. {
  2503. if (mpd_isspecial(a)) {
  2504. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2505. return;
  2506. }
  2507. mpd_qcopy(result, a, status);
  2508. return;
  2509. }
  2510. if (n >= 0 && n <= ctx->prec) {
  2511. mpd_qshiftl(result, a, n, status);
  2512. _mpd_cap(result, ctx);
  2513. }
  2514. else if (n < 0 && n >= -ctx->prec) {
  2515. if (!mpd_qcopy(result, a, status)) {
  2516. return;
  2517. }
  2518. _mpd_cap(result, ctx);
  2519. mpd_qshiftr_inplace(result, -n);
  2520. }
  2521. else {
  2522. mpd_seterror(result, MPD_Invalid_operation, status);
  2523. }
  2524. }
  2525. /*
  2526. * Same as mpd_shiftn(), but the shift is specified by the decimal b, which
  2527. * must be an integer with a zero exponent. Infinities remain infinities.
  2528. */
  2529. void
  2530. mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
  2531. uint32_t *status)
  2532. {
  2533. uint32_t workstatus = 0;
  2534. mpd_ssize_t n;
  2535. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2536. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2537. return;
  2538. }
  2539. }
  2540. if (b->exp != 0 || mpd_isinfinite(b)) {
  2541. mpd_seterror(result, MPD_Invalid_operation, status);
  2542. return;
  2543. }
  2544. n = mpd_qget_ssize(b, &workstatus);
  2545. if (workstatus&MPD_Invalid_operation) {
  2546. mpd_seterror(result, MPD_Invalid_operation, status);
  2547. return;
  2548. }
  2549. if (n > ctx->prec || n < -ctx->prec) {
  2550. mpd_seterror(result, MPD_Invalid_operation, status);
  2551. return;
  2552. }
  2553. if (mpd_isinfinite(a)) {
  2554. mpd_qcopy(result, a, status);
  2555. return;
  2556. }
  2557. if (n >= 0) {
  2558. mpd_qshiftl(result, a, n, status);
  2559. _mpd_cap(result, ctx);
  2560. }
  2561. else {
  2562. if (!mpd_qcopy(result, a, status)) {
  2563. return;
  2564. }
  2565. _mpd_cap(result, ctx);
  2566. mpd_qshiftr_inplace(result, -n);
  2567. }
  2568. }
  2569. /* Logical Xor */
  2570. void
  2571. mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2572. const mpd_context_t *ctx, uint32_t *status)
  2573. {
  2574. const mpd_t *big = a, *small = b;
  2575. mpd_uint_t x, y, z, xbit, ybit;
  2576. int k, mswdigits;
  2577. mpd_ssize_t i;
  2578. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2579. mpd_isnegative(a) || mpd_isnegative(b) ||
  2580. a->exp != 0 || b->exp != 0) {
  2581. mpd_seterror(result, MPD_Invalid_operation, status);
  2582. return;
  2583. }
  2584. if (b->digits > a->digits) {
  2585. big = b;
  2586. small = a;
  2587. }
  2588. if (!mpd_qresize(result, big->len, status)) {
  2589. return;
  2590. }
  2591. /* full words */
  2592. for (i = 0; i < small->len-1; i++) {
  2593. x = small->data[i];
  2594. y = big->data[i];
  2595. z = 0;
  2596. for (k = 0; k < MPD_RDIGITS; k++) {
  2597. xbit = x % 10;
  2598. x /= 10;
  2599. ybit = y % 10;
  2600. y /= 10;
  2601. if (xbit > 1 || ybit > 1) {
  2602. goto invalid_operation;
  2603. }
  2604. z += (xbit^ybit) ? mpd_pow10[k] : 0;
  2605. }
  2606. result->data[i] = z;
  2607. }
  2608. /* most significant word of small */
  2609. x = small->data[i];
  2610. y = big->data[i];
  2611. z = 0;
  2612. mswdigits = mpd_word_digits(x);
  2613. for (k = 0; k < mswdigits; k++) {
  2614. xbit = x % 10;
  2615. x /= 10;
  2616. ybit = y % 10;
  2617. y /= 10;
  2618. if (xbit > 1 || ybit > 1) {
  2619. goto invalid_operation;
  2620. }
  2621. z += (xbit^ybit) ? mpd_pow10[k] : 0;
  2622. }
  2623. /* scan and copy the rest of y for digit > 1 */
  2624. for (; k < MPD_RDIGITS; k++) {
  2625. ybit = y % 10;
  2626. y /= 10;
  2627. if (ybit > 1) {
  2628. goto invalid_operation;
  2629. }
  2630. z += ybit*mpd_pow10[k];
  2631. }
  2632. result->data[i++] = z;
  2633. /* scan and copy the rest of big for digit > 1 */
  2634. for (; i < big->len; i++) {
  2635. y = big->data[i];
  2636. for (k = 0; k < MPD_RDIGITS; k++) {
  2637. ybit = y % 10;
  2638. y /= 10;
  2639. if (ybit > 1) {
  2640. goto invalid_operation;
  2641. }
  2642. }
  2643. result->data[i] = big->data[i];
  2644. }
  2645. mpd_clear_flags(result);
  2646. result->exp = 0;
  2647. result->len = _mpd_real_size(result->data, big->len);
  2648. mpd_qresize(result, result->len, status);
  2649. mpd_setdigits(result);
  2650. _mpd_cap(result, ctx);
  2651. return;
  2652. invalid_operation:
  2653. mpd_seterror(result, MPD_Invalid_operation, status);
  2654. }
  2655. /******************************************************************************/
  2656. /* Arithmetic operations */
  2657. /******************************************************************************/
  2658. /*
  2659. * The absolute value of a. If a is negative, the result is the same
  2660. * as the result of the minus operation. Otherwise, the result is the
  2661. * result of the plus operation.
  2662. */
  2663. void
  2664. mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2665. uint32_t *status)
  2666. {
  2667. if (mpd_isspecial(a)) {
  2668. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2669. return;
  2670. }
  2671. }
  2672. if (mpd_isnegative(a)) {
  2673. mpd_qminus(result, a, ctx, status);
  2674. }
  2675. else {
  2676. mpd_qplus(result, a, ctx, status);
  2677. }
  2678. mpd_qfinalize(result, ctx, status);
  2679. }
  2680. static inline void
  2681. _mpd_ptrswap(mpd_t **a, mpd_t **b)
  2682. {
  2683. mpd_t *t = *a;
  2684. *a = *b;
  2685. *b = t;
  2686. }
  2687. /* Add or subtract infinities. */
  2688. static void
  2689. _mpd_qaddsub_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
  2690. uint32_t *status)
  2691. {
  2692. if (mpd_isinfinite(a)) {
  2693. if (mpd_sign(a) != sign_b && mpd_isinfinite(b)) {
  2694. mpd_seterror(result, MPD_Invalid_operation, status);
  2695. }
  2696. else {
  2697. mpd_setspecial(result, mpd_sign(a), MPD_INF);
  2698. }
  2699. return;
  2700. }
  2701. assert(mpd_isinfinite(b));
  2702. mpd_setspecial(result, sign_b, MPD_INF);
  2703. }
  2704. /* Add or subtract non-special numbers. */
  2705. static void
  2706. _mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
  2707. const mpd_context_t *ctx, uint32_t *status)
  2708. {
  2709. mpd_t *big, *small;
  2710. MPD_NEW_STATIC(big_aligned,0,0,0,0);
  2711. MPD_NEW_CONST(tiny,0,0,0,1,1,1);
  2712. mpd_uint_t carry;
  2713. mpd_ssize_t newsize, shift;
  2714. mpd_ssize_t exp, i;
  2715. int swap = 0;
  2716. /* compare exponents */
  2717. big = (mpd_t *)a; small = (mpd_t *)b;
  2718. if (big->exp != small->exp) {
  2719. if (small->exp > big->exp) {
  2720. _mpd_ptrswap(&big, &small);
  2721. swap++;
  2722. }
  2723. if (!mpd_iszerocoeff(big)) {
  2724. /* Test for adjexp(small) + big->digits < adjexp(big), if big-digits > prec
  2725. * Test for adjexp(small) + prec + 1 < adjexp(big), if big-digits <= prec
  2726. * If true, the magnitudes of the numbers are so far apart that one can as
  2727. * well add or subtract 1*10**big->exp. */
  2728. exp = big->exp - 1;
  2729. exp += (big->digits > ctx->prec) ? 0 : big->digits-ctx->prec-1;
  2730. if (mpd_adjexp(small) < exp) {
  2731. mpd_copy_flags(&tiny, small);
  2732. tiny.exp = exp;
  2733. tiny.digits = 1;
  2734. tiny.len = 1;
  2735. tiny.data[0] = mpd_iszerocoeff(small) ? 0 : 1;
  2736. small = &tiny;
  2737. }
  2738. /* this cannot wrap: the difference is positive and <= maxprec+1 */
  2739. shift = big->exp - small->exp;
  2740. if (!mpd_qshiftl(&big_aligned, big, shift, status)) {
  2741. mpd_seterror(result, MPD_Malloc_error, status);
  2742. goto finish;
  2743. }
  2744. big = &big_aligned;
  2745. }
  2746. }
  2747. result->exp = small->exp;
  2748. /* compare length of coefficients */
  2749. if (big->len < small->len) {
  2750. _mpd_ptrswap(&big, &small);
  2751. swap++;
  2752. }
  2753. newsize = big->len;
  2754. if (!mpd_qresize(result, newsize, status)) {
  2755. goto finish;
  2756. }
  2757. if (mpd_sign(a) == sign_b) {
  2758. carry = _mpd_baseadd(result->data, big->data, small->data,
  2759. big->len, small->len);
  2760. if (carry) {
  2761. newsize = big->len + 1;
  2762. if (!mpd_qresize(result, newsize, status)) {
  2763. goto finish;
  2764. }
  2765. result->data[newsize-1] = carry;
  2766. }
  2767. result->len = newsize;
  2768. mpd_set_flags(result, sign_b);
  2769. }
  2770. else {
  2771. if (big->len == small->len) {
  2772. for (i=big->len-1; i >= 0; --i) {
  2773. if (big->data[i] != small->data[i]) {
  2774. if (big->data[i] < small->data[i]) {
  2775. _mpd_ptrswap(&big, &small);
  2776. swap++;
  2777. }
  2778. break;
  2779. }
  2780. }
  2781. }
  2782. _mpd_basesub(result->data, big->data, small->data,
  2783. big->len, small->len);
  2784. newsize = _mpd_real_size(result->data, big->len);
  2785. /* resize to smaller cannot fail */
  2786. (void)mpd_qresize(result, newsize, status);
  2787. result->len = newsize;
  2788. sign_b = (swap & 1) ? sign_b : mpd_sign(a);
  2789. mpd_set_flags(result, sign_b);
  2790. if (mpd_iszerocoeff(result)) {
  2791. mpd_set_positive(result);
  2792. if (ctx->round == MPD_ROUND_FLOOR) {
  2793. mpd_set_negative(result);
  2794. }
  2795. }
  2796. }
  2797. mpd_setdigits(result);
  2798. finish:
  2799. mpd_del(&big_aligned);
  2800. }
  2801. /* Add a and b. No specials, no finalizing. */
  2802. static void
  2803. _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2804. const mpd_context_t *ctx, uint32_t *status)
  2805. {
  2806. _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
  2807. }
  2808. /* Subtract b from a. No specials, no finalizing. */
  2809. static void
  2810. _mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2811. const mpd_context_t *ctx, uint32_t *status)
  2812. {
  2813. _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
  2814. }
  2815. /* Add a and b. */
  2816. void
  2817. mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2818. const mpd_context_t *ctx, uint32_t *status)
  2819. {
  2820. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2821. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2822. return;
  2823. }
  2824. _mpd_qaddsub_inf(result, a, b, mpd_sign(b), status);
  2825. return;
  2826. }
  2827. _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
  2828. mpd_qfinalize(result, ctx, status);
  2829. }
  2830. /* Subtract b from a. */
  2831. void
  2832. mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2833. const mpd_context_t *ctx, uint32_t *status)
  2834. {
  2835. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2836. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2837. return;
  2838. }
  2839. _mpd_qaddsub_inf(result, a, b, !mpd_sign(b), status);
  2840. return;
  2841. }
  2842. _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
  2843. mpd_qfinalize(result, ctx, status);
  2844. }
  2845. /* Add decimal and mpd_ssize_t. */
  2846. void
  2847. mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  2848. const mpd_context_t *ctx, uint32_t *status)
  2849. {
  2850. mpd_context_t maxcontext;
  2851. MPD_NEW_STATIC(bb,0,0,0,0);
  2852. mpd_maxcontext(&maxcontext);
  2853. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  2854. mpd_qadd(result, a, &bb, ctx, status);
  2855. mpd_del(&bb);
  2856. }
  2857. /* Add decimal and mpd_uint_t. */
  2858. void
  2859. mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  2860. const mpd_context_t *ctx, uint32_t *status)
  2861. {
  2862. mpd_context_t maxcontext;
  2863. MPD_NEW_STATIC(bb,0,0,0,0);
  2864. mpd_maxcontext(&maxcontext);
  2865. mpd_qsset_uint(&bb, b, &maxcontext, status);
  2866. mpd_qadd(result, a, &bb, ctx, status);
  2867. mpd_del(&bb);
  2868. }
  2869. /* Subtract mpd_ssize_t from decimal. */
  2870. void
  2871. mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  2872. const mpd_context_t *ctx, uint32_t *status)
  2873. {
  2874. mpd_context_t maxcontext;
  2875. MPD_NEW_STATIC(bb,0,0,0,0);
  2876. mpd_maxcontext(&maxcontext);
  2877. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  2878. mpd_qsub(result, a, &bb, ctx, status);
  2879. mpd_del(&bb);
  2880. }
  2881. /* Subtract mpd_uint_t from decimal. */
  2882. void
  2883. mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  2884. const mpd_context_t *ctx, uint32_t *status)
  2885. {
  2886. mpd_context_t maxcontext;
  2887. MPD_NEW_STATIC(bb,0,0,0,0);
  2888. mpd_maxcontext(&maxcontext);
  2889. mpd_qsset_uint(&bb, b, &maxcontext, status);
  2890. mpd_qsub(result, a, &bb, ctx, status);
  2891. mpd_del(&bb);
  2892. }
  2893. /* Add decimal and int32_t. */
  2894. void
  2895. mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b,
  2896. const mpd_context_t *ctx, uint32_t *status)
  2897. {
  2898. mpd_qadd_ssize(result, a, b, ctx, status);
  2899. }
  2900. /* Add decimal and uint32_t. */
  2901. void
  2902. mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  2903. const mpd_context_t *ctx, uint32_t *status)
  2904. {
  2905. mpd_qadd_uint(result, a, b, ctx, status);
  2906. }
  2907. #ifdef CONFIG_64
  2908. /* Add decimal and int64_t. */
  2909. void
  2910. mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
  2911. const mpd_context_t *ctx, uint32_t *status)
  2912. {
  2913. mpd_qadd_ssize(result, a, b, ctx, status);
  2914. }
  2915. /* Add decimal and uint64_t. */
  2916. void
  2917. mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  2918. const mpd_context_t *ctx, uint32_t *status)
  2919. {
  2920. mpd_qadd_uint(result, a, b, ctx, status);
  2921. }
  2922. #endif
  2923. /* Subtract int32_t from decimal. */
  2924. void
  2925. mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b,
  2926. const mpd_context_t *ctx, uint32_t *status)
  2927. {
  2928. mpd_qsub_ssize(result, a, b, ctx, status);
  2929. }
  2930. /* Subtract uint32_t from decimal. */
  2931. void
  2932. mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  2933. const mpd_context_t *ctx, uint32_t *status)
  2934. {
  2935. mpd_qsub_uint(result, a, b, ctx, status);
  2936. }
  2937. #ifdef CONFIG_64
  2938. /* Subtract int64_t from decimal. */
  2939. void
  2940. mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
  2941. const mpd_context_t *ctx, uint32_t *status)
  2942. {
  2943. mpd_qsub_ssize(result, a, b, ctx, status);
  2944. }
  2945. /* Subtract uint64_t from decimal. */
  2946. void
  2947. mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  2948. const mpd_context_t *ctx, uint32_t *status)
  2949. {
  2950. mpd_qsub_uint(result, a, b, ctx, status);
  2951. }
  2952. #endif
  2953. /* Divide infinities. */
  2954. static void
  2955. _mpd_qdiv_inf(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2956. const mpd_context_t *ctx, uint32_t *status)
  2957. {
  2958. if (mpd_isinfinite(a)) {
  2959. if (mpd_isinfinite(b)) {
  2960. mpd_seterror(result, MPD_Invalid_operation, status);
  2961. return;
  2962. }
  2963. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  2964. return;
  2965. }
  2966. assert(mpd_isinfinite(b));
  2967. _settriple(result, mpd_sign(a)^mpd_sign(b), 0, mpd_etiny(ctx));
  2968. *status |= MPD_Clamped;
  2969. }
  2970. enum {NO_IDEAL_EXP, SET_IDEAL_EXP};
  2971. /* Divide a by b. */
  2972. static void
  2973. _mpd_qdiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
  2974. const mpd_context_t *ctx, uint32_t *status)
  2975. {
  2976. MPD_NEW_STATIC(aligned,0,0,0,0);
  2977. mpd_uint_t ld;
  2978. mpd_ssize_t shift, exp, tz;
  2979. mpd_ssize_t newsize;
  2980. mpd_ssize_t ideal_exp;
  2981. mpd_uint_t rem;
  2982. uint8_t sign_a = mpd_sign(a);
  2983. uint8_t sign_b = mpd_sign(b);
  2984. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2985. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  2986. return;
  2987. }
  2988. _mpd_qdiv_inf(q, a, b, ctx, status);
  2989. return;
  2990. }
  2991. if (mpd_iszerocoeff(b)) {
  2992. if (mpd_iszerocoeff(a)) {
  2993. mpd_seterror(q, MPD_Division_undefined, status);
  2994. }
  2995. else {
  2996. mpd_setspecial(q, sign_a^sign_b, MPD_INF);
  2997. *status |= MPD_Division_by_zero;
  2998. }
  2999. return;
  3000. }
  3001. if (mpd_iszerocoeff(a)) {
  3002. exp = a->exp - b->exp;
  3003. _settriple(q, sign_a^sign_b, 0, exp);
  3004. mpd_qfinalize(q, ctx, status);
  3005. return;
  3006. }
  3007. shift = (b->digits - a->digits) + ctx->prec + 1;
  3008. ideal_exp = a->exp - b->exp;
  3009. exp = ideal_exp - shift;
  3010. if (shift > 0) {
  3011. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  3012. mpd_seterror(q, MPD_Malloc_error, status);
  3013. goto finish;
  3014. }
  3015. a = &aligned;
  3016. }
  3017. else if (shift < 0) {
  3018. shift = -shift;
  3019. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  3020. mpd_seterror(q, MPD_Malloc_error, status);
  3021. goto finish;
  3022. }
  3023. b = &aligned;
  3024. }
  3025. newsize = a->len - b->len + 1;
  3026. if ((q != b && q != a) || (q == b && newsize > b->len)) {
  3027. if (!mpd_qresize(q, newsize, status)) {
  3028. mpd_seterror(q, MPD_Malloc_error, status);
  3029. goto finish;
  3030. }
  3031. }
  3032. if (b->len == 1) {
  3033. rem = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
  3034. }
  3035. else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
  3036. b->len < MPD_NEWTONDIV_CUTOFF) {
  3037. int ret = _mpd_basedivmod(q->data, NULL, a->data, b->data,
  3038. a->len, b->len);
  3039. if (ret < 0) {
  3040. mpd_seterror(q, MPD_Malloc_error, status);
  3041. goto finish;
  3042. }
  3043. rem = ret;
  3044. }
  3045. else {
  3046. MPD_NEW_STATIC(r,0,0,0,0);
  3047. _mpd_qbarrett_divmod(q, &r, a, b, status);
  3048. if (mpd_isspecial(q) || mpd_isspecial(&r)) {
  3049. mpd_del(&r);
  3050. goto finish;
  3051. }
  3052. rem = !mpd_iszerocoeff(&r);
  3053. mpd_del(&r);
  3054. newsize = q->len;
  3055. }
  3056. newsize = _mpd_real_size(q->data, newsize);
  3057. /* resize to smaller cannot fail */
  3058. mpd_qresize(q, newsize, status);
  3059. q->len = newsize;
  3060. mpd_setdigits(q);
  3061. shift = ideal_exp - exp;
  3062. if (rem) {
  3063. ld = mpd_lsd(q->data[0]);
  3064. if (ld == 0 || ld == 5) {
  3065. q->data[0] += 1;
  3066. }
  3067. }
  3068. else if (action == SET_IDEAL_EXP && shift > 0) {
  3069. tz = mpd_trail_zeros(q);
  3070. shift = (tz > shift) ? shift : tz;
  3071. mpd_qshiftr_inplace(q, shift);
  3072. exp += shift;
  3073. }
  3074. mpd_set_flags(q, sign_a^sign_b);
  3075. q->exp = exp;
  3076. finish:
  3077. mpd_del(&aligned);
  3078. mpd_qfinalize(q, ctx, status);
  3079. }
  3080. /* Divide a by b. */
  3081. void
  3082. mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
  3083. const mpd_context_t *ctx, uint32_t *status)
  3084. {
  3085. _mpd_qdiv(SET_IDEAL_EXP, q, a, b, ctx, status);
  3086. }
  3087. /* Internal function. */
  3088. static void
  3089. _mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  3090. const mpd_context_t *ctx, uint32_t *status)
  3091. {
  3092. MPD_NEW_STATIC(aligned,0,0,0,0);
  3093. mpd_ssize_t qsize, rsize;
  3094. mpd_ssize_t ideal_exp, expdiff, shift;
  3095. uint8_t sign_a = mpd_sign(a);
  3096. uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
  3097. ideal_exp = (a->exp > b->exp) ? b->exp : a->exp;
  3098. if (mpd_iszerocoeff(a)) {
  3099. if (!mpd_qcopy(r, a, status)) {
  3100. goto nanresult; /* GCOV_NOT_REACHED */
  3101. }
  3102. r->exp = ideal_exp;
  3103. _settriple(q, sign_ab, 0, 0);
  3104. return;
  3105. }
  3106. expdiff = mpd_adjexp(a) - mpd_adjexp(b);
  3107. if (expdiff < 0) {
  3108. if (a->exp > b->exp) {
  3109. /* positive and less than b->digits - a->digits */
  3110. shift = a->exp - b->exp;
  3111. if (!mpd_qshiftl(r, a, shift, status)) {
  3112. goto nanresult;
  3113. }
  3114. r->exp = ideal_exp;
  3115. }
  3116. else {
  3117. if (!mpd_qcopy(r, a, status)) {
  3118. goto nanresult;
  3119. }
  3120. }
  3121. _settriple(q, sign_ab, 0, 0);
  3122. return;
  3123. }
  3124. if (expdiff > ctx->prec) {
  3125. *status |= MPD_Division_impossible;
  3126. goto nanresult;
  3127. }
  3128. /*
  3129. * At this point we have:
  3130. * (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
  3131. * (2) a->exp - b->exp >= b->digits - a->digits
  3132. * (3) a->exp - b->exp <= prec + b->digits - a->digits
  3133. */
  3134. if (a->exp != b->exp) {
  3135. shift = a->exp - b->exp;
  3136. if (shift > 0) {
  3137. /* by (3), after the shift a->digits <= prec + b->digits */
  3138. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  3139. goto nanresult;
  3140. }
  3141. a = &aligned;
  3142. }
  3143. else {
  3144. shift = -shift;
  3145. /* by (2), after the shift b->digits <= a->digits */
  3146. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  3147. goto nanresult;
  3148. }
  3149. b = &aligned;
  3150. }
  3151. }
  3152. qsize = a->len - b->len + 1;
  3153. if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
  3154. if (!mpd_qresize(q, qsize, status)) {
  3155. goto nanresult;
  3156. }
  3157. }
  3158. rsize = b->len;
  3159. if (!(r == a && rsize < a->len)) {
  3160. if (!mpd_qresize(r, rsize, status)) {
  3161. goto nanresult;
  3162. }
  3163. }
  3164. if (b->len == 1) {
  3165. if (a->len == 1) {
  3166. _mpd_div_word(&q->data[0], &r->data[0], a->data[0], b->data[0]);
  3167. }
  3168. else {
  3169. r->data[0] = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
  3170. }
  3171. }
  3172. else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
  3173. b->len < MPD_NEWTONDIV_CUTOFF) {
  3174. int ret;
  3175. ret = _mpd_basedivmod(q->data, r->data, a->data, b->data,
  3176. a->len, b->len);
  3177. if (ret == -1) {
  3178. *status |= MPD_Malloc_error;
  3179. goto nanresult;
  3180. }
  3181. }
  3182. else {
  3183. _mpd_qbarrett_divmod(q, r, a, b, status);
  3184. if (mpd_isspecial(q) || mpd_isspecial(r)) {
  3185. goto nanresult;
  3186. }
  3187. if (mpd_isinfinite(q) || q->digits > ctx->prec) {
  3188. *status |= MPD_Division_impossible;
  3189. goto nanresult;
  3190. }
  3191. qsize = q->len;
  3192. rsize = r->len;
  3193. }
  3194. qsize = _mpd_real_size(q->data, qsize);
  3195. /* resize to smaller cannot fail */
  3196. mpd_qresize(q, qsize, status);
  3197. q->len = qsize;
  3198. mpd_setdigits(q);
  3199. mpd_set_flags(q, sign_ab);
  3200. q->exp = 0;
  3201. if (q->digits > ctx->prec) {
  3202. *status |= MPD_Division_impossible;
  3203. goto nanresult;
  3204. }
  3205. rsize = _mpd_real_size(r->data, rsize);
  3206. /* resize to smaller cannot fail */
  3207. mpd_qresize(r, rsize, status);
  3208. r->len = rsize;
  3209. mpd_setdigits(r);
  3210. mpd_set_flags(r, sign_a);
  3211. r->exp = ideal_exp;
  3212. out:
  3213. mpd_del(&aligned);
  3214. return;
  3215. nanresult:
  3216. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3217. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3218. goto out;
  3219. }
  3220. /* Integer division with remainder. */
  3221. void
  3222. mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  3223. const mpd_context_t *ctx, uint32_t *status)
  3224. {
  3225. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  3226. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3227. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  3228. mpd_qcopy(r, q, status);
  3229. return;
  3230. }
  3231. if (mpd_isinfinite(a)) {
  3232. if (mpd_isinfinite(b)) {
  3233. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3234. }
  3235. else {
  3236. mpd_setspecial(q, sign, MPD_INF);
  3237. }
  3238. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3239. *status |= MPD_Invalid_operation;
  3240. return;
  3241. }
  3242. if (mpd_isinfinite(b)) {
  3243. if (!mpd_qcopy(r, a, status)) {
  3244. mpd_seterror(q, MPD_Malloc_error, status);
  3245. return;
  3246. }
  3247. mpd_qfinalize(r, ctx, status);
  3248. _settriple(q, sign, 0, 0);
  3249. return;
  3250. }
  3251. /* debug */
  3252. abort(); /* GCOV_NOT_REACHED */
  3253. }
  3254. if (mpd_iszerocoeff(b)) {
  3255. if (mpd_iszerocoeff(a)) {
  3256. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3257. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3258. *status |= MPD_Division_undefined;
  3259. }
  3260. else {
  3261. mpd_setspecial(q, sign, MPD_INF);
  3262. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3263. *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
  3264. }
  3265. return;
  3266. }
  3267. _mpd_qdivmod(q, r, a, b, ctx, status);
  3268. mpd_qfinalize(q, ctx, status);
  3269. mpd_qfinalize(r, ctx, status);
  3270. }
  3271. void
  3272. mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
  3273. const mpd_context_t *ctx, uint32_t *status)
  3274. {
  3275. MPD_NEW_STATIC(r,0,0,0,0);
  3276. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  3277. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3278. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  3279. return;
  3280. }
  3281. if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
  3282. mpd_seterror(q, MPD_Invalid_operation, status);
  3283. return;
  3284. }
  3285. if (mpd_isinfinite(a)) {
  3286. mpd_setspecial(q, sign, MPD_INF);
  3287. return;
  3288. }
  3289. if (mpd_isinfinite(b)) {
  3290. _settriple(q, sign, 0, 0);
  3291. return;
  3292. }
  3293. /* debug */
  3294. abort(); /* GCOV_NOT_REACHED */
  3295. }
  3296. if (mpd_iszerocoeff(b)) {
  3297. if (mpd_iszerocoeff(a)) {
  3298. mpd_seterror(q, MPD_Division_undefined, status);
  3299. }
  3300. else {
  3301. mpd_setspecial(q, sign, MPD_INF);
  3302. *status |= MPD_Division_by_zero;
  3303. }
  3304. return;
  3305. }
  3306. _mpd_qdivmod(q, &r, a, b, ctx, status);
  3307. mpd_del(&r);
  3308. mpd_qfinalize(q, ctx, status);
  3309. }
  3310. /* Divide decimal by mpd_ssize_t. */
  3311. void
  3312. mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  3313. const mpd_context_t *ctx, uint32_t *status)
  3314. {
  3315. mpd_context_t maxcontext;
  3316. MPD_NEW_STATIC(bb,0,0,0,0);
  3317. mpd_maxcontext(&maxcontext);
  3318. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  3319. mpd_qdiv(result, a, &bb, ctx, status);
  3320. mpd_del(&bb);
  3321. }
  3322. /* Divide decimal by mpd_uint_t. */
  3323. void
  3324. mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  3325. const mpd_context_t *ctx, uint32_t *status)
  3326. {
  3327. mpd_context_t maxcontext;
  3328. MPD_NEW_STATIC(bb,0,0,0,0);
  3329. mpd_maxcontext(&maxcontext);
  3330. mpd_qsset_uint(&bb, b, &maxcontext, status);
  3331. mpd_qdiv(result, a, &bb, ctx, status);
  3332. mpd_del(&bb);
  3333. }
  3334. /* Divide decimal by int32_t. */
  3335. void
  3336. mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b,
  3337. const mpd_context_t *ctx, uint32_t *status)
  3338. {
  3339. mpd_qdiv_ssize(result, a, b, ctx, status);
  3340. }
  3341. /* Divide decimal by uint32_t. */
  3342. void
  3343. mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  3344. const mpd_context_t *ctx, uint32_t *status)
  3345. {
  3346. mpd_qdiv_uint(result, a, b, ctx, status);
  3347. }
  3348. #ifdef CONFIG_64
  3349. /* Divide decimal by int64_t. */
  3350. void
  3351. mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3352. const mpd_context_t *ctx, uint32_t *status)
  3353. {
  3354. mpd_qdiv_ssize(result, a, b, ctx, status);
  3355. }
  3356. /* Divide decimal by uint64_t. */
  3357. void
  3358. mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3359. const mpd_context_t *ctx, uint32_t *status)
  3360. {
  3361. mpd_qdiv_uint(result, a, b, ctx, status);
  3362. }
  3363. #endif
  3364. #if defined(_MSC_VER)
  3365. /* conversion from 'double' to 'mpd_ssize_t', possible loss of data */
  3366. #pragma warning(disable:4244)
  3367. #endif
  3368. /*
  3369. * Get the number of iterations for the Horner scheme in _mpd_qexp().
  3370. */
  3371. static inline mpd_ssize_t
  3372. _mpd_get_exp_iterations(const mpd_t *a, mpd_ssize_t prec)
  3373. {
  3374. mpd_uint_t dummy;
  3375. mpd_uint_t msdigits;
  3376. double f;
  3377. /* 9 is MPD_RDIGITS for 32 bit platforms */
  3378. _mpd_get_msdigits(&dummy, &msdigits, a, 9);
  3379. f = ((double)msdigits + 1) / mpd_pow10[mpd_word_digits(msdigits)];
  3380. #ifdef CONFIG_64
  3381. #ifdef USE_80BIT_LONG_DOUBLE
  3382. return ceill((1.435*(long double)prec - 1.182)
  3383. / log10l((long double)prec/f));
  3384. #else
  3385. /* prec > floor((1ULL<<53) / 1.435) */
  3386. if (prec > 6276793905742851LL) {
  3387. return MPD_SSIZE_MAX;
  3388. }
  3389. return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f));
  3390. #endif
  3391. #else /* CONFIG_32 */
  3392. return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f));
  3393. #if defined(_MSC_VER)
  3394. #pragma warning(default:4244)
  3395. #endif
  3396. #endif
  3397. }
  3398. /*
  3399. * Internal function, specials have been dealt with.
  3400. *
  3401. * The algorithm is from Hull&Abrham, Variable Precision Exponential Function,
  3402. * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986.
  3403. *
  3404. * Main differences:
  3405. *
  3406. * - The number of iterations for the Horner scheme is calculated using the
  3407. * C log10() function.
  3408. *
  3409. * - The analysis for early abortion has been adapted for the mpd_t
  3410. * ranges.
  3411. */
  3412. static void
  3413. _mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3414. uint32_t *status)
  3415. {
  3416. mpd_context_t workctx;
  3417. MPD_NEW_STATIC(tmp,0,0,0,0);
  3418. MPD_NEW_STATIC(sum,0,0,0,0);
  3419. MPD_NEW_CONST(word,0,0,0,1,1,1);
  3420. mpd_ssize_t j, n, t;
  3421. assert(!mpd_isspecial(a));
  3422. /*
  3423. * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where r < 1 and t >= 0.
  3424. *
  3425. * If t > 0, we have:
  3426. *
  3427. * (1) 0.1 <= r < 1, so e^r >= e^0.1. Overflow in the final power operation
  3428. * will occur when (e^0.1)^(10^t) > 10^(emax+1). If we consider MAX_EMAX,
  3429. * this will happen for t > 10 (32 bit) or (t > 19) (64 bit).
  3430. *
  3431. * (2) -1 < r <= -0.1, so e^r > e^-1. Underflow in the final power operation
  3432. * will occur when (e^-1)^(10^t) < 10^(etiny-1). If we consider MIN_ETINY,
  3433. * this will also happen for t > 10 (32 bit) or (t > 19) (64 bit).
  3434. */
  3435. #if defined(CONFIG_64)
  3436. #define MPD_EXP_MAX_T 19
  3437. #elif defined(CONFIG_32)
  3438. #define MPD_EXP_MAX_T 10
  3439. #endif
  3440. t = a->digits + a->exp;
  3441. t = (t > 0) ? t : 0;
  3442. if (t > MPD_EXP_MAX_T) {
  3443. if (mpd_ispositive(a)) {
  3444. mpd_setspecial(result, MPD_POS, MPD_INF);
  3445. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  3446. }
  3447. else {
  3448. _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
  3449. *status |= (MPD_Inexact|MPD_Rounded|MPD_Subnormal|
  3450. MPD_Underflow|MPD_Clamped);
  3451. }
  3452. return;
  3453. }
  3454. mpd_maxcontext(&workctx);
  3455. workctx.prec = ctx->prec + t + 2;
  3456. workctx.prec = (workctx.prec < 9) ? 9 : workctx.prec;
  3457. workctx.round = MPD_ROUND_HALF_EVEN;
  3458. if ((n = _mpd_get_exp_iterations(a, workctx.prec)) == MPD_SSIZE_MAX) {
  3459. mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */
  3460. goto finish; /* GCOV_UNLIKELY */
  3461. }
  3462. if (!mpd_qcopy(result, a, status)) {
  3463. goto finish;
  3464. }
  3465. result->exp -= t;
  3466. _settriple(&sum, MPD_POS, 1, 0);
  3467. for (j = n-1; j >= 1; j--) {
  3468. word.data[0] = j;
  3469. mpd_setdigits(&word);
  3470. mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status);
  3471. mpd_qmul(&sum, &sum, &tmp, &workctx, &workctx.status);
  3472. mpd_qadd(&sum, &sum, &one, &workctx, &workctx.status);
  3473. }
  3474. #ifdef CONFIG_64
  3475. _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
  3476. #else
  3477. if (t <= MPD_MAX_POW10) {
  3478. _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
  3479. }
  3480. else {
  3481. t -= MPD_MAX_POW10;
  3482. _mpd_qpow_uint(&tmp, &sum, mpd_pow10[MPD_MAX_POW10], MPD_POS,
  3483. &workctx, status);
  3484. _mpd_qpow_uint(result, &tmp, mpd_pow10[t], MPD_POS, &workctx, status);
  3485. }
  3486. #endif
  3487. finish:
  3488. mpd_del(&tmp);
  3489. mpd_del(&sum);
  3490. *status |= (workctx.status&MPD_Errors);
  3491. *status |= (MPD_Inexact|MPD_Rounded);
  3492. }
  3493. /* exp(a) */
  3494. void
  3495. mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3496. uint32_t *status)
  3497. {
  3498. mpd_context_t workctx;
  3499. if (mpd_isspecial(a)) {
  3500. if (mpd_qcheck_nan(result, a, ctx, status)) {
  3501. return;
  3502. }
  3503. if (mpd_isnegative(a)) {
  3504. _settriple(result, MPD_POS, 0, 0);
  3505. }
  3506. else {
  3507. mpd_setspecial(result, MPD_POS, MPD_INF);
  3508. }
  3509. return;
  3510. }
  3511. if (mpd_iszerocoeff(a)) {
  3512. _settriple(result, MPD_POS, 1, 0);
  3513. return;
  3514. }
  3515. workctx = *ctx;
  3516. workctx.round = MPD_ROUND_HALF_EVEN;
  3517. if (ctx->allcr) {
  3518. MPD_NEW_STATIC(t1, 0,0,0,0);
  3519. MPD_NEW_STATIC(t2, 0,0,0,0);
  3520. MPD_NEW_STATIC(ulp, 0,0,0,0);
  3521. MPD_NEW_STATIC(aa, 0,0,0,0);
  3522. mpd_ssize_t prec;
  3523. if (result == a) {
  3524. if (!mpd_qcopy(&aa, a, status)) {
  3525. mpd_seterror(result, MPD_Malloc_error, status);
  3526. return;
  3527. }
  3528. a = &aa;
  3529. }
  3530. workctx.clamp = 0;
  3531. prec = ctx->prec + 3;
  3532. while (1) {
  3533. workctx.prec = prec;
  3534. _mpd_qexp(result, a, &workctx, status);
  3535. _ssettriple(&ulp, MPD_POS, 1,
  3536. result->exp + result->digits-workctx.prec-1);
  3537. workctx.prec = ctx->prec;
  3538. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  3539. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  3540. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  3541. mpd_qcmp(&t1, &t2, status) == 0) {
  3542. workctx.clamp = ctx->clamp;
  3543. mpd_check_underflow(result, &workctx, status);
  3544. mpd_qfinalize(result, &workctx, status);
  3545. break;
  3546. }
  3547. prec += MPD_RDIGITS;
  3548. }
  3549. mpd_del(&t1);
  3550. mpd_del(&t2);
  3551. mpd_del(&ulp);
  3552. mpd_del(&aa);
  3553. }
  3554. else {
  3555. _mpd_qexp(result, a, &workctx, status);
  3556. mpd_check_underflow(result, &workctx, status);
  3557. mpd_qfinalize(result, &workctx, status);
  3558. }
  3559. }
  3560. /* Fused multiply-add: (a * b) + c, with a single final rounding. */
  3561. void
  3562. mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
  3563. const mpd_context_t *ctx, uint32_t *status)
  3564. {
  3565. uint32_t workstatus = 0;
  3566. mpd_t *cc = (mpd_t *)c;
  3567. if (result == c) {
  3568. if ((cc = mpd_qncopy(c)) == NULL) {
  3569. mpd_seterror(result, MPD_Malloc_error, status);
  3570. return;
  3571. }
  3572. }
  3573. _mpd_qmul(result, a, b, ctx, &workstatus);
  3574. if (!(workstatus&MPD_Invalid_operation)) {
  3575. mpd_qadd(result, result, cc, ctx, &workstatus);
  3576. }
  3577. if (cc != c) mpd_del(cc);
  3578. *status |= workstatus;
  3579. }
  3580. static inline int
  3581. ln_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2], mpd_ssize_t maxprec,
  3582. mpd_ssize_t initprec)
  3583. {
  3584. mpd_ssize_t k;
  3585. int i;
  3586. assert(maxprec >= 2 && initprec >= 2);
  3587. if (maxprec <= initprec) return -1;
  3588. i = 0; k = maxprec;
  3589. do {
  3590. k = (k+2) / 2;
  3591. klist[i++] = k;
  3592. } while (k > initprec);
  3593. return i-1;
  3594. }
  3595. /* Two word initial approximations for ln(10) */
  3596. #ifdef CONFIG_64
  3597. #if MPD_RDIGITS != 19
  3598. #error "mpdecimal.c: MPD_RDIGITS must be 19."
  3599. #endif
  3600. static mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
  3601. 179914546843642076, 2302585092994045684
  3602. };
  3603. static mpd_uint_t mpd_ln10_init[2] = {
  3604. 179914546843642076, 2302585092994045684
  3605. };
  3606. #else
  3607. #if MPD_RDIGITS != 9
  3608. #error "mpdecimal.c: MPD_RDIGITS must be 9."
  3609. #endif
  3610. static mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {299404568, 230258509};
  3611. static mpd_uint_t mpd_ln10_init[2] = {299404568, 230258509};
  3612. #endif
  3613. /* mpd_ln10 is cached in order to speed up computations */
  3614. mpd_t mpd_ln10 = {MPD_STATIC|MPD_STATIC_DATA, -(2*MPD_RDIGITS-1),
  3615. 2*MPD_RDIGITS, 2, MPD_MINALLOC_MAX, mpd_ln10_data};
  3616. static void
  3617. mpd_reset_ln10(void)
  3618. {
  3619. if (mpd_isdynamic_data(&mpd_ln10)) {
  3620. mpd_free(mpd_ln10.data);
  3621. }
  3622. mpd_ln10.data = mpd_ln10_data;
  3623. mpd_ln10_data[0] = mpd_ln10_init[0];
  3624. mpd_ln10_data[1] = mpd_ln10_init[1];
  3625. mpd_ln10.flags = MPD_STATIC|MPD_STATIC_DATA;
  3626. mpd_ln10.exp = -(2*MPD_RDIGITS-1);
  3627. mpd_ln10.digits = 2*MPD_RDIGITS;
  3628. mpd_ln10.len = 2;
  3629. mpd_ln10.alloc = MPD_MINALLOC_MAX;
  3630. }
  3631. /*
  3632. * Initializes or updates mpd_ln10. If mpd_ln10 is cached and has exactly the
  3633. * requested precision, the function returns. If the cached precision is greater
  3634. * than the requested precision, mpd_ln10 is shifted to the requested precision.
  3635. *
  3636. * The function can fail with MPD_Malloc_error.
  3637. */
  3638. void
  3639. mpd_update_ln10(mpd_ssize_t maxprec, uint32_t *status)
  3640. {
  3641. mpd_context_t varcontext, maxcontext;
  3642. MPD_NEW_STATIC(tmp, 0,0,0,0);
  3643. MPD_NEW_CONST(static10, 0,0,2,1,1,10);
  3644. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  3645. int i;
  3646. if (mpd_isspecial(&mpd_ln10)) {
  3647. mpd_reset_ln10();
  3648. }
  3649. if (mpd_ln10.digits > maxprec) {
  3650. /* shift to smaller cannot fail */
  3651. mpd_qshiftr_inplace(&mpd_ln10, mpd_ln10.digits-maxprec);
  3652. mpd_ln10.exp = -(mpd_ln10.digits-1);
  3653. return;
  3654. }
  3655. else if (mpd_ln10.digits == maxprec) {
  3656. return;
  3657. }
  3658. mpd_maxcontext(&maxcontext);
  3659. mpd_maxcontext(&varcontext);
  3660. varcontext.round = MPD_ROUND_TRUNC;
  3661. i = ln_schedule_prec(klist, maxprec+2, mpd_ln10.digits);
  3662. for (; i >= 0; i--) {
  3663. varcontext.prec = 2*klist[i]+3;
  3664. mpd_ln10.flags ^= MPD_NEG;
  3665. _mpd_qexp(&tmp, &mpd_ln10, &varcontext, status);
  3666. mpd_ln10.flags ^= MPD_NEG;
  3667. mpd_qmul(&tmp, &static10, &tmp, &varcontext, status);
  3668. mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
  3669. mpd_qadd(&mpd_ln10, &mpd_ln10, &tmp, &maxcontext, status);
  3670. if (mpd_isspecial(&mpd_ln10)) {
  3671. break;
  3672. }
  3673. }
  3674. mpd_del(&tmp);
  3675. varcontext.prec = maxprec;
  3676. varcontext.round = MPD_ROUND_HALF_EVEN;
  3677. mpd_qfinalize(&mpd_ln10, &varcontext, status);
  3678. }
  3679. /* Initial approximations for the ln() iteration */
  3680. static const uint16_t lnapprox[900] = {
  3681. /* index 0 - 400: log((i+100)/100) * 1000 */
  3682. 0, 10, 20, 30, 39, 49, 58, 68, 77, 86, 95, 104, 113, 122, 131, 140, 148, 157,
  3683. 166, 174, 182, 191, 199, 207, 215, 223, 231, 239, 247, 255, 262, 270, 278,
  3684. 285, 293, 300, 308, 315, 322, 329, 336, 344, 351, 358, 365, 372, 378, 385,
  3685. 392, 399, 406, 412, 419, 425, 432, 438, 445, 451, 457, 464, 470, 476, 482,
  3686. 489, 495, 501, 507, 513, 519, 525, 531, 536, 542, 548, 554, 560, 565, 571,
  3687. 577, 582, 588, 593, 599, 604, 610, 615, 621, 626, 631, 637, 642, 647, 652,
  3688. 658, 663, 668, 673, 678, 683, 688, 693, 698, 703, 708, 713, 718, 723, 728,
  3689. 732, 737, 742, 747, 751, 756, 761, 766, 770, 775, 779, 784, 788, 793, 798,
  3690. 802, 806, 811, 815, 820, 824, 829, 833, 837, 842, 846, 850, 854, 859, 863,
  3691. 867, 871, 876, 880, 884, 888, 892, 896, 900, 904, 908, 912, 916, 920, 924,
  3692. 928, 932, 936, 940, 944, 948, 952, 956, 959, 963, 967, 971, 975, 978, 982,
  3693. 986, 990, 993, 997, 1001, 1004, 1008, 1012, 1015, 1019, 1022, 1026, 1030,
  3694. 1033, 1037, 1040, 1044, 1047, 1051, 1054, 1058, 1061, 1065, 1068, 1072, 1075,
  3695. 1078, 1082, 1085, 1089, 1092, 1095, 1099, 1102, 1105, 1109, 1112, 1115, 1118,
  3696. 1122, 1125, 1128, 1131, 1135, 1138, 1141, 1144, 1147, 1151, 1154, 1157, 1160,
  3697. 1163, 1166, 1169, 1172, 1176, 1179, 1182, 1185, 1188, 1191, 1194, 1197, 1200,
  3698. 1203, 1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227, 1230, 1233, 1235, 1238,
  3699. 1241, 1244, 1247, 1250, 1253, 1256, 1258, 1261, 1264, 1267, 1270, 1273, 1275,
  3700. 1278, 1281, 1284, 1286, 1289, 1292, 1295, 1297, 1300, 1303, 1306, 1308, 1311,
  3701. 1314, 1316, 1319, 1322, 1324, 1327, 1330, 1332, 1335, 1338, 1340, 1343, 1345,
  3702. 1348, 1351, 1353, 1356, 1358, 1361, 1364, 1366, 1369, 1371, 1374, 1376, 1379,
  3703. 1381, 1384, 1386, 1389, 1391, 1394, 1396, 1399, 1401, 1404, 1406, 1409, 1411,
  3704. 1413, 1416, 1418, 1421, 1423, 1426, 1428, 1430, 1433, 1435, 1437, 1440, 1442,
  3705. 1445, 1447, 1449, 1452, 1454, 1456, 1459, 1461, 1463, 1466, 1468, 1470, 1472,
  3706. 1475, 1477, 1479, 1482, 1484, 1486, 1488, 1491, 1493, 1495, 1497, 1500, 1502,
  3707. 1504, 1506, 1509, 1511, 1513, 1515, 1517, 1520, 1522, 1524, 1526, 1528, 1530,
  3708. 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1548, 1550, 1552, 1554, 1556, 1558,
  3709. 1560, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585,
  3710. 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609,
  3711. /* index 401 - 899: -log((i+100)/1000) * 1000 */
  3712. 691, 689, 687, 685, 683, 681, 679, 677, 675, 673, 671, 669, 668, 666, 664,
  3713. 662, 660, 658, 656, 654, 652, 650, 648, 646, 644, 642, 641, 639, 637, 635,
  3714. 633, 631, 629, 627, 626, 624, 622, 620, 618, 616, 614, 612, 611, 609, 607,
  3715. 605, 603, 602, 600, 598, 596, 594, 592, 591, 589, 587, 585, 583, 582, 580,
  3716. 578, 576, 574, 573, 571, 569, 567, 566, 564, 562, 560, 559, 557, 555, 553,
  3717. 552, 550, 548, 546, 545, 543, 541, 540, 538, 536, 534, 533, 531, 529, 528,
  3718. 526, 524, 523, 521, 519, 518, 516, 514, 512, 511, 509, 508, 506, 504, 502,
  3719. 501, 499, 498, 496, 494, 493, 491, 489, 488, 486, 484, 483, 481, 480, 478,
  3720. 476, 475, 473, 472, 470, 468, 467, 465, 464, 462, 460, 459, 457, 456, 454,
  3721. 453, 451, 449, 448, 446, 445, 443, 442, 440, 438, 437, 435, 434, 432, 431,
  3722. 429, 428, 426, 425, 423, 422, 420, 419, 417, 416, 414, 412, 411, 410, 408,
  3723. 406, 405, 404, 402, 400, 399, 398, 396, 394, 393, 392, 390, 389, 387, 386,
  3724. 384, 383, 381, 380, 378, 377, 375, 374, 372, 371, 370, 368, 367, 365, 364,
  3725. 362, 361, 360, 358, 357, 355, 354, 352, 351, 350, 348, 347, 345, 344, 342,
  3726. 341, 340, 338, 337, 336, 334, 333, 331, 330, 328, 327, 326, 324, 323, 322,
  3727. 320, 319, 318, 316, 315, 313, 312, 311, 309, 308, 306, 305, 304, 302, 301,
  3728. 300, 298, 297, 296, 294, 293, 292, 290, 289, 288, 286, 285, 284, 282, 281,
  3729. 280, 278, 277, 276, 274, 273, 272, 270, 269, 268, 267, 265, 264, 263, 261,
  3730. 260, 259, 258, 256, 255, 254, 252, 251, 250, 248, 247, 246, 245, 243, 242,
  3731. 241, 240, 238, 237, 236, 234, 233, 232, 231, 229, 228, 227, 226, 224, 223,
  3732. 222, 221, 219, 218, 217, 216, 214, 213, 212, 211, 210, 208, 207, 206, 205,
  3733. 203, 202, 201, 200, 198, 197, 196, 195, 194, 192, 191, 190, 189, 188, 186,
  3734. 185, 184, 183, 182, 180, 179, 178, 177, 176, 174, 173, 172, 171, 170, 168,
  3735. 167, 166, 165, 164, 162, 161, 160, 159, 158, 157, 156, 154, 153, 152, 151,
  3736. 150, 148, 147, 146, 145, 144, 143, 142, 140, 139, 138, 137, 136, 135, 134,
  3737. 132, 131, 130, 129, 128, 127, 126, 124, 123, 122, 121, 120, 119, 118, 116,
  3738. 115, 114, 113, 112, 111, 110, 109, 108, 106, 105, 104, 103, 102, 101, 100,
  3739. 99, 98, 97, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 84, 83, 82, 81, 80, 79,
  3740. 78, 77, 76, 75, 74, 73, 72, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59,
  3741. 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39,
  3742. 38, 37, 36, 35, 34, 33, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
  3743. 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
  3744. };
  3745. /* Internal ln() function that does not check for specials, zero or one. */
  3746. static void
  3747. _mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3748. uint32_t *status)
  3749. {
  3750. mpd_context_t varcontext, maxcontext;
  3751. mpd_t *z = (mpd_t *) result;
  3752. MPD_NEW_STATIC(v,0,0,0,0);
  3753. MPD_NEW_STATIC(vtmp,0,0,0,0);
  3754. MPD_NEW_STATIC(tmp,0,0,0,0);
  3755. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  3756. mpd_ssize_t maxprec, shift, t;
  3757. mpd_ssize_t a_digits, a_exp;
  3758. mpd_uint_t dummy, x;
  3759. int i;
  3760. assert(!mpd_isspecial(a) && !mpd_iszerocoeff(a));
  3761. /*
  3762. * We are calculating ln(a) = ln(v * 10^t) = ln(v) + t*ln(10),
  3763. * where 0.5 < v <= 5.
  3764. */
  3765. if (!mpd_qcopy(&v, a, status)) {
  3766. mpd_seterror(result, MPD_Malloc_error, status);
  3767. goto finish;
  3768. }
  3769. /* Initial approximation: we have at least one non-zero digit */
  3770. _mpd_get_msdigits(&dummy, &x, &v, 3);
  3771. if (x < 10) x *= 10;
  3772. if (x < 100) x *= 10;
  3773. x -= 100;
  3774. /* a may equal z */
  3775. a_digits = a->digits;
  3776. a_exp = a->exp;
  3777. mpd_minalloc(z);
  3778. mpd_clear_flags(z);
  3779. z->data[0] = lnapprox[x];
  3780. z->len = 1;
  3781. z->exp = -3;
  3782. mpd_setdigits(z);
  3783. if (x <= 400) {
  3784. v.exp = -(a_digits - 1);
  3785. t = a_exp + a_digits - 1;
  3786. }
  3787. else {
  3788. v.exp = -a_digits;
  3789. t = a_exp + a_digits;
  3790. mpd_set_negative(z);
  3791. }
  3792. mpd_maxcontext(&maxcontext);
  3793. mpd_maxcontext(&varcontext);
  3794. varcontext.round = MPD_ROUND_TRUNC;
  3795. maxprec = ctx->prec + 2;
  3796. if (x <= 10 || x >= 805) {
  3797. /* v is close to 1: Estimate the magnitude of the logarithm.
  3798. * If v = 1 or ln(v) will underflow, skip the loop. Otherwise,
  3799. * adjust the precision upwards in order to obtain a sufficient
  3800. * number of significant digits.
  3801. *
  3802. * 1) x/(1+x) < ln(1+x) < x, for x > -1, x != 0
  3803. *
  3804. * 2) (v-1)/v < ln(v) < v-1
  3805. */
  3806. mpd_t *lower = &tmp;
  3807. mpd_t *upper = &vtmp;
  3808. int cmp = _mpd_cmp(&v, &one);
  3809. varcontext.round = MPD_ROUND_CEILING;
  3810. varcontext.prec = maxprec;
  3811. mpd_qsub(upper, &v, &one, &varcontext, &varcontext.status);
  3812. varcontext.round = MPD_ROUND_FLOOR;
  3813. mpd_qdiv(lower, upper, &v, &varcontext, &varcontext.status);
  3814. varcontext.round = MPD_ROUND_TRUNC;
  3815. if (cmp < 0) {
  3816. _mpd_ptrswap(&upper, &lower);
  3817. }
  3818. if (mpd_adjexp(upper) < mpd_etiny(ctx)) {
  3819. _settriple(z, (cmp<0), 1, mpd_etiny(ctx)-1);
  3820. goto postloop;
  3821. }
  3822. if (mpd_adjexp(lower) < 0) {
  3823. maxprec = maxprec - mpd_adjexp(lower);
  3824. }
  3825. }
  3826. i = ln_schedule_prec(klist, maxprec, 2);
  3827. for (; i >= 0; i--) {
  3828. varcontext.prec = 2*klist[i]+3;
  3829. z->flags ^= MPD_NEG;
  3830. _mpd_qexp(&tmp, z, &varcontext, status);
  3831. z->flags ^= MPD_NEG;
  3832. if (v.digits > varcontext.prec) {
  3833. shift = v.digits - varcontext.prec;
  3834. mpd_qshiftr(&vtmp, &v, shift, status);
  3835. vtmp.exp += shift;
  3836. mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status);
  3837. }
  3838. else {
  3839. mpd_qmul(&tmp, &v, &tmp, &varcontext, status);
  3840. }
  3841. mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
  3842. mpd_qadd(z, z, &tmp, &maxcontext, status);
  3843. if (mpd_isspecial(z)) {
  3844. break;
  3845. }
  3846. }
  3847. postloop:
  3848. mpd_update_ln10(maxprec+2, status);
  3849. mpd_qmul_ssize(&tmp, &mpd_ln10, t, &maxcontext, status);
  3850. varcontext.prec = maxprec+2;
  3851. mpd_qadd(result, &tmp, z, &varcontext, status);
  3852. finish:
  3853. mpd_del(&v);
  3854. mpd_del(&vtmp);
  3855. mpd_del(&tmp);
  3856. }
  3857. /* ln(a) */
  3858. void
  3859. mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3860. uint32_t *status)
  3861. {
  3862. mpd_context_t workctx;
  3863. mpd_ssize_t adjexp, t;
  3864. if (mpd_isspecial(a)) {
  3865. if (mpd_qcheck_nan(result, a, ctx, status)) {
  3866. return;
  3867. }
  3868. if (mpd_isnegative(a)) {
  3869. mpd_seterror(result, MPD_Invalid_operation, status);
  3870. return;
  3871. }
  3872. mpd_setspecial(result, MPD_POS, MPD_INF);
  3873. return;
  3874. }
  3875. if (mpd_iszerocoeff(a)) {
  3876. mpd_setspecial(result, MPD_NEG, MPD_INF);
  3877. return;
  3878. }
  3879. if (mpd_isnegative(a)) {
  3880. mpd_seterror(result, MPD_Invalid_operation, status);
  3881. return;
  3882. }
  3883. if (_mpd_cmp(a, &one) == 0) {
  3884. _settriple(result, MPD_POS, 0, 0);
  3885. return;
  3886. }
  3887. /* Check if the result will overflow.
  3888. *
  3889. * 1) adjexp(a) + 1 > log10(a) >= adjexp(a)
  3890. *
  3891. * 2) |log10(a)| >= adjexp(a), if adjexp(a) >= 0
  3892. * |log10(a)| > -adjexp(a)-1, if adjexp(a) < 0
  3893. *
  3894. * 3) |log(a)| > 2*|log10(a)|
  3895. */
  3896. adjexp = mpd_adjexp(a);
  3897. t = (adjexp < 0) ? -adjexp-1 : adjexp;
  3898. t *= 2;
  3899. if (mpd_exp_digits(t)-1 > ctx->emax) {
  3900. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  3901. mpd_setspecial(result, (adjexp<0), MPD_INF);
  3902. return;
  3903. }
  3904. workctx = *ctx;
  3905. workctx.round = MPD_ROUND_HALF_EVEN;
  3906. if (ctx->allcr) {
  3907. MPD_NEW_STATIC(t1, 0,0,0,0);
  3908. MPD_NEW_STATIC(t2, 0,0,0,0);
  3909. MPD_NEW_STATIC(ulp, 0,0,0,0);
  3910. MPD_NEW_STATIC(aa, 0,0,0,0);
  3911. mpd_ssize_t prec;
  3912. if (result == a) {
  3913. if (!mpd_qcopy(&aa, a, status)) {
  3914. mpd_seterror(result, MPD_Malloc_error, status);
  3915. return;
  3916. }
  3917. a = &aa;
  3918. }
  3919. workctx.clamp = 0;
  3920. prec = ctx->prec + 3;
  3921. while (1) {
  3922. workctx.prec = prec;
  3923. _mpd_qln(result, a, &workctx, status);
  3924. _ssettriple(&ulp, MPD_POS, 1,
  3925. result->exp + result->digits-workctx.prec-1);
  3926. workctx.prec = ctx->prec;
  3927. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  3928. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  3929. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  3930. mpd_qcmp(&t1, &t2, status) == 0) {
  3931. workctx.clamp = ctx->clamp;
  3932. mpd_check_underflow(result, &workctx, status);
  3933. mpd_qfinalize(result, &workctx, status);
  3934. break;
  3935. }
  3936. prec += MPD_RDIGITS;
  3937. }
  3938. mpd_del(&t1);
  3939. mpd_del(&t2);
  3940. mpd_del(&ulp);
  3941. mpd_del(&aa);
  3942. }
  3943. else {
  3944. _mpd_qln(result, a, &workctx, status);
  3945. mpd_check_underflow(result, &workctx, status);
  3946. mpd_qfinalize(result, &workctx, status);
  3947. }
  3948. }
  3949. /* Internal log10() function that does not check for specials, zero, ... */
  3950. static void
  3951. _mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3952. uint32_t *status)
  3953. {
  3954. mpd_context_t workctx;
  3955. mpd_maxcontext(&workctx);
  3956. workctx.prec = ctx->prec + 3;
  3957. _mpd_qln(result, a, &workctx, status);
  3958. mpd_update_ln10(workctx.prec, status);
  3959. workctx = *ctx;
  3960. workctx.round = MPD_ROUND_HALF_EVEN;
  3961. _mpd_qdiv(NO_IDEAL_EXP, result, result, &mpd_ln10, &workctx, status);
  3962. }
  3963. /* log10(a) */
  3964. void
  3965. mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3966. uint32_t *status)
  3967. {
  3968. mpd_context_t workctx;
  3969. mpd_ssize_t adjexp, t;
  3970. workctx = *ctx;
  3971. workctx.round = MPD_ROUND_HALF_EVEN;
  3972. if (mpd_isspecial(a)) {
  3973. if (mpd_qcheck_nan(result, a, ctx, status)) {
  3974. return;
  3975. }
  3976. if (mpd_isnegative(a)) {
  3977. mpd_seterror(result, MPD_Invalid_operation, status);
  3978. return;
  3979. }
  3980. mpd_setspecial(result, MPD_POS, MPD_INF);
  3981. return;
  3982. }
  3983. if (mpd_iszerocoeff(a)) {
  3984. mpd_setspecial(result, MPD_NEG, MPD_INF);
  3985. return;
  3986. }
  3987. if (mpd_isnegative(a)) {
  3988. mpd_seterror(result, MPD_Invalid_operation, status);
  3989. return;
  3990. }
  3991. if (mpd_coeff_ispow10(a)) {
  3992. uint8_t sign = 0;
  3993. adjexp = mpd_adjexp(a);
  3994. if (adjexp < 0) {
  3995. sign = 1;
  3996. adjexp = -adjexp;
  3997. }
  3998. _settriple(result, sign, adjexp, 0);
  3999. mpd_qfinalize(result, &workctx, status);
  4000. return;
  4001. }
  4002. /* Check if the result will overflow.
  4003. *
  4004. * 1) adjexp(a) + 1 > log10(a) >= adjexp(a)
  4005. *
  4006. * 2) |log10(a)| >= adjexp(a), if adjexp(a) >= 0
  4007. * |log10(a)| > -adjexp(a)-1, if adjexp(a) < 0
  4008. */
  4009. adjexp = mpd_adjexp(a);
  4010. t = (adjexp < 0) ? -adjexp-1 : adjexp;
  4011. if (mpd_exp_digits(t)-1 > ctx->emax) {
  4012. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  4013. mpd_setspecial(result, (adjexp<0), MPD_INF);
  4014. return;
  4015. }
  4016. if (ctx->allcr) {
  4017. MPD_NEW_STATIC(t1, 0,0,0,0);
  4018. MPD_NEW_STATIC(t2, 0,0,0,0);
  4019. MPD_NEW_STATIC(ulp, 0,0,0,0);
  4020. MPD_NEW_STATIC(aa, 0,0,0,0);
  4021. mpd_ssize_t prec;
  4022. if (result == a) {
  4023. if (!mpd_qcopy(&aa, a, status)) {
  4024. mpd_seterror(result, MPD_Malloc_error, status);
  4025. return;
  4026. }
  4027. a = &aa;
  4028. }
  4029. workctx.clamp = 0;
  4030. prec = ctx->prec + 3;
  4031. while (1) {
  4032. workctx.prec = prec;
  4033. _mpd_qlog10(result, a, &workctx, status);
  4034. _ssettriple(&ulp, MPD_POS, 1,
  4035. result->exp + result->digits-workctx.prec-1);
  4036. workctx.prec = ctx->prec;
  4037. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  4038. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  4039. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  4040. mpd_qcmp(&t1, &t2, status) == 0) {
  4041. workctx.clamp = ctx->clamp;
  4042. mpd_check_underflow(result, &workctx, status);
  4043. mpd_qfinalize(result, &workctx, status);
  4044. break;
  4045. }
  4046. prec += MPD_RDIGITS;
  4047. }
  4048. mpd_del(&t1);
  4049. mpd_del(&t2);
  4050. mpd_del(&ulp);
  4051. mpd_del(&aa);
  4052. }
  4053. else {
  4054. _mpd_qlog10(result, a, &workctx, status);
  4055. mpd_check_underflow(result, &workctx, status);
  4056. }
  4057. }
  4058. /*
  4059. * Maximum of the two operands. Attention: If one operand is a quiet NaN and the
  4060. * other is numeric, the numeric operand is returned. This may not be what one
  4061. * expects.
  4062. */
  4063. void
  4064. mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4065. const mpd_context_t *ctx, uint32_t *status)
  4066. {
  4067. int c;
  4068. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4069. mpd_qcopy(result, b, status);
  4070. }
  4071. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4072. mpd_qcopy(result, a, status);
  4073. }
  4074. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4075. return;
  4076. }
  4077. else {
  4078. c = _mpd_cmp(a, b);
  4079. if (c == 0) {
  4080. c = _mpd_cmp_numequal(a, b);
  4081. }
  4082. if (c < 0) {
  4083. mpd_qcopy(result, b, status);
  4084. }
  4085. else {
  4086. mpd_qcopy(result, a, status);
  4087. }
  4088. }
  4089. mpd_qfinalize(result, ctx, status);
  4090. }
  4091. /*
  4092. * Maximum magnitude: Same as mpd_max(), but compares the operands with their
  4093. * sign ignored.
  4094. */
  4095. void
  4096. mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4097. const mpd_context_t *ctx, uint32_t *status)
  4098. {
  4099. int c;
  4100. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4101. mpd_qcopy(result, b, status);
  4102. }
  4103. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4104. mpd_qcopy(result, a, status);
  4105. }
  4106. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4107. return;
  4108. }
  4109. else {
  4110. c = _mpd_cmp_abs(a, b);
  4111. if (c == 0) {
  4112. c = _mpd_cmp_numequal(a, b);
  4113. }
  4114. if (c < 0) {
  4115. mpd_qcopy(result, b, status);
  4116. }
  4117. else {
  4118. mpd_qcopy(result, a, status);
  4119. }
  4120. }
  4121. mpd_qfinalize(result, ctx, status);
  4122. }
  4123. /*
  4124. * Minimum of the two operands. Attention: If one operand is a quiet NaN and the
  4125. * other is numeric, the numeric operand is returned. This may not be what one
  4126. * expects.
  4127. */
  4128. void
  4129. mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4130. const mpd_context_t *ctx, uint32_t *status)
  4131. {
  4132. int c;
  4133. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4134. mpd_qcopy(result, b, status);
  4135. }
  4136. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4137. mpd_qcopy(result, a, status);
  4138. }
  4139. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4140. return;
  4141. }
  4142. else {
  4143. c = _mpd_cmp(a, b);
  4144. if (c == 0) {
  4145. c = _mpd_cmp_numequal(a, b);
  4146. }
  4147. if (c < 0) {
  4148. mpd_qcopy(result, a, status);
  4149. }
  4150. else {
  4151. mpd_qcopy(result, b, status);
  4152. }
  4153. }
  4154. mpd_qfinalize(result, ctx, status);
  4155. }
  4156. /*
  4157. * Minimum magnitude: Same as mpd_min(), but compares the operands with their
  4158. * sign ignored.
  4159. */
  4160. void
  4161. mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4162. const mpd_context_t *ctx, uint32_t *status)
  4163. {
  4164. int c;
  4165. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4166. mpd_qcopy(result, b, status);
  4167. }
  4168. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4169. mpd_qcopy(result, a, status);
  4170. }
  4171. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4172. return;
  4173. }
  4174. else {
  4175. c = _mpd_cmp_abs(a, b);
  4176. if (c == 0) {
  4177. c = _mpd_cmp_numequal(a, b);
  4178. }
  4179. if (c < 0) {
  4180. mpd_qcopy(result, a, status);
  4181. }
  4182. else {
  4183. mpd_qcopy(result, b, status);
  4184. }
  4185. }
  4186. mpd_qfinalize(result, ctx, status);
  4187. }
  4188. /* Minimum space needed for the result array in _karatsuba_rec(). */
  4189. static inline mpd_size_t
  4190. _kmul_resultsize(mpd_size_t la, mpd_size_t lb)
  4191. {
  4192. mpd_size_t n, m;
  4193. n = add_size_t(la, lb);
  4194. n = add_size_t(n, 1);
  4195. m = (la+1)/2 + 1;
  4196. m = mul_size_t(m, 3);
  4197. return (m > n) ? m : n;
  4198. }
  4199. /* Work space needed in _karatsuba_rec(). lim >= 4 */
  4200. static inline mpd_size_t
  4201. _kmul_worksize(mpd_size_t n, mpd_size_t lim)
  4202. {
  4203. mpd_size_t m;
  4204. if (n <= lim) {
  4205. return 0;
  4206. }
  4207. m = (n+1)/2 + 1;
  4208. return add_size_t(mul_size_t(m, 2), _kmul_worksize(m, lim));
  4209. }
  4210. #define MPD_KARATSUBA_BASECASE 16 /* must be >= 4 */
  4211. /*
  4212. * Add the product of a and b to c.
  4213. * c must be _kmul_resultsize(la, lb) in size.
  4214. * w is used as a work array and must be _kmul_worksize(a, lim) in size.
  4215. * Roman E. Maeder, Storage Allocation for the Karatsuba Integer Multiplication
  4216. * Algorithm. In "Design and implementation of symbolic computation systems",
  4217. * Springer, 1993, ISBN 354057235X, 9783540572350.
  4218. */
  4219. static void
  4220. _karatsuba_rec(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
  4221. mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
  4222. {
  4223. mpd_size_t m, lt;
  4224. assert (la >= lb && lb > 0);
  4225. if (la <= MPD_KARATSUBA_BASECASE) {
  4226. _mpd_basemul(c, a, b, la, lb);
  4227. return;
  4228. }
  4229. m = (la+1)/2; // ceil(la/2)
  4230. /* lb <= m < la */
  4231. if (lb <= m) {
  4232. /* lb can now be larger than la-m */
  4233. if (lb > la-m) {
  4234. lt = lb + lb + 1; // space needed for result array
  4235. mpd_uint_zero(w, lt); // clear result array
  4236. _karatsuba_rec(w, b, a+m, w+lt, lb, la-m); // b*ah
  4237. }
  4238. else {
  4239. lt = (la-m) + (la-m) + 1; // space needed for result array
  4240. mpd_uint_zero(w, lt); // clear result array
  4241. _karatsuba_rec(w, a+m, b, w+lt, la-m, lb); // ah*b
  4242. }
  4243. _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
  4244. lt = m + m + 1; // space needed for the result array
  4245. mpd_uint_zero(w, lt); // clear result array
  4246. _karatsuba_rec(w, a, b, w+lt, m, lb); // al*b
  4247. _mpd_baseaddto(c, w, m+lb); // add al*b
  4248. return;
  4249. }
  4250. /* la >= lb > m */
  4251. memcpy(w, a, m * sizeof *w);
  4252. w[m] = 0;
  4253. _mpd_baseaddto(w, a+m, la-m);
  4254. memcpy(w+(m+1), b, m * sizeof *w);
  4255. w[m+1+m] = 0;
  4256. _mpd_baseaddto(w+(m+1), b+m, lb-m);
  4257. _karatsuba_rec(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1);
  4258. lt = (la-m) + (la-m) + 1;
  4259. mpd_uint_zero(w, lt);
  4260. _karatsuba_rec(w, a+m, b+m, w+lt, la-m, lb-m);
  4261. _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
  4262. _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
  4263. lt = m + m + 1;
  4264. mpd_uint_zero(w, lt);
  4265. _karatsuba_rec(w, a, b, w+lt, m, m);
  4266. _mpd_baseaddto(c, w, m+m);
  4267. _mpd_basesubfrom(c+m, w, m+m);
  4268. return;
  4269. }
  4270. /*
  4271. * Multiply u and v, using Karatsuba multiplication. Returns a pointer
  4272. * to the result or NULL in case of failure (malloc error).
  4273. * Conditions: ulen >= vlen, ulen >= 4
  4274. */
  4275. mpd_uint_t *
  4276. _mpd_kmul(const mpd_uint_t *u, const mpd_uint_t *v,
  4277. mpd_size_t ulen, mpd_size_t vlen,
  4278. mpd_size_t *rsize)
  4279. {
  4280. mpd_uint_t *result = NULL, *w = NULL;
  4281. mpd_size_t m;
  4282. assert(ulen >= 4);
  4283. assert(ulen >= vlen);
  4284. *rsize = _kmul_resultsize(ulen, vlen);
  4285. if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
  4286. return NULL;
  4287. }
  4288. m = _kmul_worksize(ulen, MPD_KARATSUBA_BASECASE);
  4289. if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
  4290. mpd_free(result);
  4291. return NULL;
  4292. }
  4293. _karatsuba_rec(result, u, v, w, ulen, vlen);
  4294. if (w) mpd_free(w);
  4295. return result;
  4296. }
  4297. /* Determine the minimum length for the number theoretic transform. */
  4298. static inline mpd_size_t
  4299. _mpd_get_transform_len(mpd_size_t rsize)
  4300. {
  4301. mpd_size_t log2rsize;
  4302. mpd_size_t x, step;
  4303. assert(rsize >= 4);
  4304. log2rsize = mpd_bsr(rsize);
  4305. if (rsize <= 1024) {
  4306. x = ONE_UM<<log2rsize;
  4307. return (rsize == x) ? x : x<<1;
  4308. }
  4309. else if (rsize <= MPD_MAXTRANSFORM_2N) {
  4310. x = ONE_UM<<log2rsize;
  4311. if (rsize == x) return x;
  4312. step = x>>1;
  4313. x += step;
  4314. return (rsize <= x) ? x : x + step;
  4315. }
  4316. else if (rsize <= MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2) {
  4317. return MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2;
  4318. }
  4319. else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
  4320. return 3*MPD_MAXTRANSFORM_2N;
  4321. }
  4322. else {
  4323. return MPD_SIZE_MAX;
  4324. }
  4325. }
  4326. #ifdef PPRO
  4327. #ifndef _MSC_VER
  4328. static inline unsigned short
  4329. _mpd_get_control87(void)
  4330. {
  4331. unsigned short cw;
  4332. __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
  4333. return cw;
  4334. }
  4335. static inline void
  4336. _mpd_set_control87(unsigned short cw)
  4337. {
  4338. __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
  4339. }
  4340. #endif
  4341. unsigned int
  4342. mpd_set_fenv(void)
  4343. {
  4344. unsigned int cw;
  4345. #ifdef _MSC_VER
  4346. cw = _control87(0, 0);
  4347. _control87((_RC_CHOP|_PC_64), (_MCW_RC|_MCW_PC));
  4348. #else
  4349. cw = _mpd_get_control87();
  4350. _mpd_set_control87(cw|0x780);
  4351. #endif
  4352. return cw;
  4353. }
  4354. void
  4355. mpd_restore_fenv(unsigned int cw)
  4356. {
  4357. #ifdef _MSC_VER
  4358. _control87(cw, (_MCW_RC|_MCW_PC));
  4359. #else
  4360. _mpd_set_control87((unsigned short)cw);
  4361. #endif
  4362. }
  4363. #endif /* PPRO */
  4364. /*
  4365. * Multiply u and v, using the fast number theoretic transform. Returns
  4366. * a pointer to the result or NULL in case of failure (malloc error).
  4367. */
  4368. mpd_uint_t *
  4369. _mpd_fntmul(const mpd_uint_t *u, const mpd_uint_t *v,
  4370. mpd_size_t ulen, mpd_size_t vlen,
  4371. mpd_size_t *rsize)
  4372. {
  4373. mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL;
  4374. mpd_size_t n;
  4375. #ifdef PPRO
  4376. unsigned int cw;
  4377. cw = mpd_set_fenv();
  4378. #endif
  4379. *rsize = add_size_t(ulen, vlen);
  4380. if ((n = _mpd_get_transform_len(*rsize)) == MPD_SIZE_MAX) {
  4381. goto malloc_error;
  4382. }
  4383. if ((c1 = mpd_calloc(sizeof *c1, n)) == NULL) {
  4384. goto malloc_error;
  4385. }
  4386. if ((c2 = mpd_calloc(sizeof *c2, n)) == NULL) {
  4387. goto malloc_error;
  4388. }
  4389. if ((c3 = mpd_calloc(sizeof *c3, n)) == NULL) {
  4390. goto malloc_error;
  4391. }
  4392. memcpy(c1, u, ulen * (sizeof *c1));
  4393. memcpy(c2, u, ulen * (sizeof *c2));
  4394. memcpy(c3, u, ulen * (sizeof *c3));
  4395. if (u == v) {
  4396. if (!fnt_autoconvolute(c1, n, P1) ||
  4397. !fnt_autoconvolute(c2, n, P2) ||
  4398. !fnt_autoconvolute(c3, n, P3)) {
  4399. goto malloc_error;
  4400. }
  4401. }
  4402. else {
  4403. if ((vtmp = mpd_calloc(sizeof *vtmp, n)) == NULL) {
  4404. goto malloc_error;
  4405. }
  4406. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  4407. if (!fnt_convolute(c1, vtmp, n, P1)) {
  4408. mpd_free(vtmp);
  4409. goto malloc_error;
  4410. }
  4411. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  4412. mpd_uint_zero(vtmp+vlen, n-vlen);
  4413. if (!fnt_convolute(c2, vtmp, n, P2)) {
  4414. mpd_free(vtmp);
  4415. goto malloc_error;
  4416. }
  4417. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  4418. mpd_uint_zero(vtmp+vlen, n-vlen);
  4419. if (!fnt_convolute(c3, vtmp, n, P3)) {
  4420. mpd_free(vtmp);
  4421. goto malloc_error;
  4422. }
  4423. mpd_free(vtmp);
  4424. }
  4425. crt3(c1, c2, c3, *rsize);
  4426. out:
  4427. #ifdef PPRO
  4428. mpd_restore_fenv(cw);
  4429. #endif
  4430. if (c2) mpd_free(c2);
  4431. if (c3) mpd_free(c3);
  4432. return c1;
  4433. malloc_error:
  4434. if (c1) mpd_free(c1);
  4435. c1 = NULL;
  4436. goto out;
  4437. }
  4438. /*
  4439. * Karatsuba multiplication with FNT/basemul as the base case.
  4440. */
  4441. static int
  4442. _karatsuba_rec_fnt(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
  4443. mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
  4444. {
  4445. mpd_size_t m, lt;
  4446. assert (la >= lb && lb > 0);
  4447. if (la <= 3*(MPD_MAXTRANSFORM_2N/2)) {
  4448. if (lb <= 192) {
  4449. _mpd_basemul(c, b, a, lb, la);
  4450. }
  4451. else {
  4452. mpd_uint_t *result;
  4453. mpd_size_t dummy;
  4454. if ((result = _mpd_fntmul(a, b, la, lb, &dummy)) == NULL) {
  4455. return 0;
  4456. }
  4457. memcpy(c, result, (la+lb) * (sizeof *result));
  4458. mpd_free(result);
  4459. }
  4460. return 1;
  4461. }
  4462. m = (la+1)/2; // ceil(la/2)
  4463. /* lb <= m < la */
  4464. if (lb <= m) {
  4465. /* lb can now be larger than la-m */
  4466. if (lb > la-m) {
  4467. lt = lb + lb + 1; // space needed for result array
  4468. mpd_uint_zero(w, lt); // clear result array
  4469. if (!_karatsuba_rec_fnt(w, b, a+m, w+lt, lb, la-m)) { // b*ah
  4470. return 0; /* GCOV_UNLIKELY */
  4471. }
  4472. }
  4473. else {
  4474. lt = (la-m) + (la-m) + 1; // space needed for result array
  4475. mpd_uint_zero(w, lt); // clear result array
  4476. if (!_karatsuba_rec_fnt(w, a+m, b, w+lt, la-m, lb)) { // ah*b
  4477. return 0; /* GCOV_UNLIKELY */
  4478. }
  4479. }
  4480. _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
  4481. lt = m + m + 1; // space needed for the result array
  4482. mpd_uint_zero(w, lt); // clear result array
  4483. if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, lb)) { // al*b
  4484. return 0; /* GCOV_UNLIKELY */
  4485. }
  4486. _mpd_baseaddto(c, w, m+lb); // add al*b
  4487. return 1;
  4488. }
  4489. /* la >= lb > m */
  4490. memcpy(w, a, m * sizeof *w);
  4491. w[m] = 0;
  4492. _mpd_baseaddto(w, a+m, la-m);
  4493. memcpy(w+(m+1), b, m * sizeof *w);
  4494. w[m+1+m] = 0;
  4495. _mpd_baseaddto(w+(m+1), b+m, lb-m);
  4496. if (!_karatsuba_rec_fnt(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1)) {
  4497. return 0; /* GCOV_UNLIKELY */
  4498. }
  4499. lt = (la-m) + (la-m) + 1;
  4500. mpd_uint_zero(w, lt);
  4501. if (!_karatsuba_rec_fnt(w, a+m, b+m, w+lt, la-m, lb-m)) {
  4502. return 0; /* GCOV_UNLIKELY */
  4503. }
  4504. _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
  4505. _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
  4506. lt = m + m + 1;
  4507. mpd_uint_zero(w, lt);
  4508. if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, m)) {
  4509. return 0; /* GCOV_UNLIKELY */
  4510. }
  4511. _mpd_baseaddto(c, w, m+m);
  4512. _mpd_basesubfrom(c+m, w, m+m);
  4513. return 1;
  4514. }
  4515. /*
  4516. * Multiply u and v, using Karatsuba multiplication with the FNT as the
  4517. * base case. Returns a pointer to the result or NULL in case of failure
  4518. * (malloc error). Conditions: ulen >= vlen, ulen >= 4.
  4519. */
  4520. mpd_uint_t *
  4521. _mpd_kmul_fnt(const mpd_uint_t *u, const mpd_uint_t *v,
  4522. mpd_size_t ulen, mpd_size_t vlen,
  4523. mpd_size_t *rsize)
  4524. {
  4525. mpd_uint_t *result = NULL, *w = NULL;
  4526. mpd_size_t m;
  4527. assert(ulen >= 4);
  4528. assert(ulen >= vlen);
  4529. *rsize = _kmul_resultsize(ulen, vlen);
  4530. if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
  4531. return NULL;
  4532. }
  4533. m = _kmul_worksize(ulen, 3*(MPD_MAXTRANSFORM_2N/2));
  4534. if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
  4535. mpd_free(result); /* GCOV_UNLIKELY */
  4536. return NULL; /* GCOV_UNLIKELY */
  4537. }
  4538. if (!_karatsuba_rec_fnt(result, u, v, w, ulen, vlen)) {
  4539. mpd_free(result);
  4540. result = NULL;
  4541. }
  4542. if (w) mpd_free(w);
  4543. return result;
  4544. }
  4545. /* Deal with the special cases of multiplying infinities. */
  4546. static void
  4547. _mpd_qmul_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
  4548. {
  4549. if (mpd_isinfinite(a)) {
  4550. if (mpd_iszero(b)) {
  4551. mpd_seterror(result, MPD_Invalid_operation, status);
  4552. }
  4553. else {
  4554. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  4555. }
  4556. return;
  4557. }
  4558. assert(mpd_isinfinite(b));
  4559. if (mpd_iszero(a)) {
  4560. mpd_seterror(result, MPD_Invalid_operation, status);
  4561. }
  4562. else {
  4563. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  4564. }
  4565. }
  4566. /*
  4567. * Internal function: Multiply a and b. _mpd_qmul deals with specials but
  4568. * does NOT finalize the result. This is for use in mpd_fma().
  4569. */
  4570. static inline void
  4571. _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4572. const mpd_context_t *ctx, uint32_t *status)
  4573. {
  4574. mpd_t *big = (mpd_t *)a, *small = (mpd_t *)b;
  4575. mpd_uint_t *rdata = NULL;
  4576. mpd_uint_t rbuf[MPD_MINALLOC_MAX];
  4577. mpd_size_t rsize, i;
  4578. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  4579. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4580. return;
  4581. }
  4582. _mpd_qmul_inf(result, a, b, status);
  4583. return;
  4584. }
  4585. if (small->len > big->len) {
  4586. _mpd_ptrswap(&big, &small);
  4587. }
  4588. rsize = big->len + small->len;
  4589. if (big->len == 1) {
  4590. _mpd_singlemul(result->data, big->data[0], small->data[0]);
  4591. goto finish;
  4592. }
  4593. if (rsize <= (mpd_size_t)MPD_MINALLOC_MAX) {
  4594. if (big->len == 2) {
  4595. _mpd_mul_2_le2(rbuf, big->data, small->data, small->len);
  4596. }
  4597. else {
  4598. mpd_uint_zero(rbuf, rsize);
  4599. if (small->len == 1) {
  4600. _mpd_shortmul(rbuf, big->data, big->len, small->data[0]);
  4601. }
  4602. else {
  4603. _mpd_basemul(rbuf, small->data, big->data, small->len, big->len);
  4604. }
  4605. }
  4606. if (!mpd_qresize(result, rsize, status)) {
  4607. return;
  4608. }
  4609. for(i = 0; i < rsize; i++) {
  4610. result->data[i] = rbuf[i];
  4611. }
  4612. goto finish;
  4613. }
  4614. if (small->len == 1) {
  4615. if ((rdata = mpd_calloc(rsize, sizeof *rdata)) == NULL) {
  4616. mpd_seterror(result, MPD_Malloc_error, status);
  4617. return;
  4618. }
  4619. _mpd_shortmul(rdata, big->data, big->len, small->data[0]);
  4620. }
  4621. else if (rsize <= 1024) {
  4622. rdata = _mpd_kmul(big->data, small->data, big->len, small->len, &rsize);
  4623. if (rdata == NULL) {
  4624. mpd_seterror(result, MPD_Malloc_error, status);
  4625. return;
  4626. }
  4627. }
  4628. else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
  4629. rdata = _mpd_fntmul(big->data, small->data, big->len, small->len, &rsize);
  4630. if (rdata == NULL) {
  4631. mpd_seterror(result, MPD_Malloc_error, status);
  4632. return;
  4633. }
  4634. }
  4635. else {
  4636. rdata = _mpd_kmul_fnt(big->data, small->data, big->len, small->len, &rsize);
  4637. if (rdata == NULL) {
  4638. mpd_seterror(result, MPD_Malloc_error, status); /* GCOV_UNLIKELY */
  4639. return; /* GCOV_UNLIKELY */
  4640. }
  4641. }
  4642. if (mpd_isdynamic_data(result)) {
  4643. mpd_free(result->data);
  4644. }
  4645. result->data = rdata;
  4646. result->alloc = rsize;
  4647. mpd_set_dynamic_data(result);
  4648. finish:
  4649. mpd_set_flags(result, mpd_sign(a)^mpd_sign(b));
  4650. result->exp = big->exp + small->exp;
  4651. result->len = _mpd_real_size(result->data, rsize);
  4652. /* resize to smaller cannot fail */
  4653. mpd_qresize(result, result->len, status);
  4654. mpd_setdigits(result);
  4655. }
  4656. /* Multiply a and b. */
  4657. void
  4658. mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4659. const mpd_context_t *ctx, uint32_t *status)
  4660. {
  4661. _mpd_qmul(result, a, b, ctx, status);
  4662. mpd_qfinalize(result, ctx, status);
  4663. }
  4664. /* Multiply decimal and mpd_ssize_t. */
  4665. void
  4666. mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  4667. const mpd_context_t *ctx, uint32_t *status)
  4668. {
  4669. mpd_context_t maxcontext;
  4670. MPD_NEW_STATIC(bb,0,0,0,0);
  4671. mpd_maxcontext(&maxcontext);
  4672. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  4673. mpd_qmul(result, a, &bb, ctx, status);
  4674. mpd_del(&bb);
  4675. }
  4676. /* Multiply decimal and mpd_uint_t. */
  4677. void
  4678. mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  4679. const mpd_context_t *ctx, uint32_t *status)
  4680. {
  4681. mpd_context_t maxcontext;
  4682. MPD_NEW_STATIC(bb,0,0,0,0);
  4683. mpd_maxcontext(&maxcontext);
  4684. mpd_qsset_uint(&bb, b, &maxcontext, status);
  4685. mpd_qmul(result, a, &bb, ctx, status);
  4686. mpd_del(&bb);
  4687. }
  4688. void
  4689. mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b,
  4690. const mpd_context_t *ctx, uint32_t *status)
  4691. {
  4692. mpd_qmul_ssize(result, a, b, ctx, status);
  4693. }
  4694. void
  4695. mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  4696. const mpd_context_t *ctx, uint32_t *status)
  4697. {
  4698. mpd_qmul_uint(result, a, b, ctx, status);
  4699. }
  4700. #ifdef CONFIG_64
  4701. void
  4702. mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
  4703. const mpd_context_t *ctx, uint32_t *status)
  4704. {
  4705. mpd_qmul_ssize(result, a, b, ctx, status);
  4706. }
  4707. void
  4708. mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  4709. const mpd_context_t *ctx, uint32_t *status)
  4710. {
  4711. mpd_qmul_uint(result, a, b, ctx, status);
  4712. }
  4713. #endif
  4714. /* Like the minus operator. */
  4715. void
  4716. mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4717. uint32_t *status)
  4718. {
  4719. if (mpd_isspecial(a)) {
  4720. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4721. return;
  4722. }
  4723. }
  4724. if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
  4725. mpd_qcopy_abs(result, a, status);
  4726. }
  4727. else {
  4728. mpd_qcopy_negate(result, a, status);
  4729. }
  4730. mpd_qfinalize(result, ctx, status);
  4731. }
  4732. /* Like the plus operator. */
  4733. void
  4734. mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4735. uint32_t *status)
  4736. {
  4737. if (mpd_isspecial(a)) {
  4738. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4739. return;
  4740. }
  4741. }
  4742. if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
  4743. mpd_qcopy_abs(result, a, status);
  4744. }
  4745. else {
  4746. mpd_qcopy(result, a, status);
  4747. }
  4748. mpd_qfinalize(result, ctx, status);
  4749. }
  4750. /* The largest representable number that is smaller than the operand. */
  4751. void
  4752. mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4753. uint32_t *status)
  4754. {
  4755. mpd_context_t workctx; /* function context */
  4756. MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
  4757. if (mpd_isspecial(a)) {
  4758. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4759. return;
  4760. }
  4761. if (mpd_isinfinite(a)) {
  4762. if (mpd_isnegative(a)) {
  4763. mpd_qcopy(result, a, status);
  4764. return;
  4765. }
  4766. else {
  4767. mpd_clear_flags(result);
  4768. mpd_qmaxcoeff(result, ctx, status);
  4769. if (mpd_isnan(result)) {
  4770. return;
  4771. }
  4772. result->exp = ctx->emax - ctx->prec + 1;
  4773. return;
  4774. }
  4775. }
  4776. /* debug */
  4777. abort(); /* GCOV_NOT_REACHED */
  4778. }
  4779. mpd_workcontext(&workctx, ctx);
  4780. workctx.round = MPD_ROUND_FLOOR;
  4781. if (!mpd_qcopy(result, a, status)) {
  4782. return;
  4783. }
  4784. mpd_qfinalize(result, &workctx, &workctx.status);
  4785. if (workctx.status&(MPD_Inexact|MPD_Errors)) {
  4786. *status |= (workctx.status&MPD_Errors);
  4787. return;
  4788. }
  4789. workctx.status = 0;
  4790. mpd_qsub(result, a, &tiny, &workctx, &workctx.status);
  4791. *status |= (workctx.status&MPD_Errors);
  4792. }
  4793. /* The smallest representable number that is larger than the operand. */
  4794. void
  4795. mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4796. uint32_t *status)
  4797. {
  4798. mpd_context_t workctx;
  4799. MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
  4800. if (mpd_isspecial(a)) {
  4801. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4802. return;
  4803. }
  4804. if (mpd_isinfinite(a)) {
  4805. if (mpd_ispositive(a)) {
  4806. mpd_qcopy(result, a, status);
  4807. }
  4808. else {
  4809. mpd_clear_flags(result);
  4810. mpd_qmaxcoeff(result, ctx, status);
  4811. if (mpd_isnan(result)) {
  4812. return;
  4813. }
  4814. mpd_set_flags(result, MPD_NEG);
  4815. result->exp = mpd_etop(ctx);
  4816. }
  4817. return;
  4818. }
  4819. }
  4820. mpd_workcontext(&workctx, ctx);
  4821. workctx.round = MPD_ROUND_CEILING;
  4822. if (!mpd_qcopy(result, a, status)) {
  4823. return;
  4824. }
  4825. mpd_qfinalize(result, &workctx, &workctx.status);
  4826. if (workctx.status & (MPD_Inexact|MPD_Errors)) {
  4827. *status |= (workctx.status&MPD_Errors);
  4828. return;
  4829. }
  4830. workctx.status = 0;
  4831. mpd_qadd(result, a, &tiny, &workctx, &workctx.status);
  4832. *status |= (workctx.status&MPD_Errors);
  4833. }
  4834. /*
  4835. * The number closest to the first operand that is in the direction towards
  4836. * the second operand.
  4837. */
  4838. void
  4839. mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4840. const mpd_context_t *ctx, uint32_t *status)
  4841. {
  4842. int c;
  4843. if (mpd_isnan(a) || mpd_isnan(b)) {
  4844. if (mpd_qcheck_nans(result, a, b, ctx, status))
  4845. return;
  4846. }
  4847. c = _mpd_cmp(a, b);
  4848. if (c == 0) {
  4849. mpd_qcopy_sign(result, a, b, status);
  4850. return;
  4851. }
  4852. if (c < 0) {
  4853. mpd_qnext_plus(result, a, ctx, status);
  4854. }
  4855. else {
  4856. mpd_qnext_minus(result, a, ctx, status);
  4857. }
  4858. if (mpd_isinfinite(result)) {
  4859. *status |= (MPD_Overflow|MPD_Rounded|MPD_Inexact);
  4860. }
  4861. else if (mpd_adjexp(result) < ctx->emin) {
  4862. *status |= (MPD_Underflow|MPD_Subnormal|MPD_Rounded|MPD_Inexact);
  4863. if (mpd_iszero(result)) {
  4864. *status |= MPD_Clamped;
  4865. }
  4866. }
  4867. }
  4868. /*
  4869. * Internal function: Integer power with mpd_uint_t exponent, base is modified!
  4870. * Function can fail with MPD_Malloc_error.
  4871. */
  4872. static inline void
  4873. _mpd_qpow_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp, uint8_t resultsign,
  4874. const mpd_context_t *ctx, uint32_t *status)
  4875. {
  4876. uint32_t workstatus = 0;
  4877. mpd_uint_t n;
  4878. if (exp == 0) {
  4879. _settriple(result, resultsign, 1, 0); /* GCOV_NOT_REACHED */
  4880. return; /* GCOV_NOT_REACHED */
  4881. }
  4882. if (!mpd_qcopy(result, base, status)) {
  4883. return;
  4884. }
  4885. n = mpd_bits[mpd_bsr(exp)];
  4886. while (n >>= 1) {
  4887. mpd_qmul(result, result, result, ctx, &workstatus);
  4888. if (exp & n) {
  4889. mpd_qmul(result, result, base, ctx, &workstatus);
  4890. }
  4891. if (workstatus & (MPD_Overflow|MPD_Clamped)) {
  4892. break;
  4893. }
  4894. }
  4895. *status |= workstatus;
  4896. mpd_set_sign(result, resultsign);
  4897. }
  4898. /*
  4899. * Internal function: Integer power with mpd_t exponent, tbase and texp
  4900. * are modified!! Function can fail with MPD_Malloc_error.
  4901. */
  4902. static inline void
  4903. _mpd_qpow_mpd(mpd_t *result, mpd_t *tbase, mpd_t *texp, uint8_t resultsign,
  4904. const mpd_context_t *ctx, uint32_t *status)
  4905. {
  4906. uint32_t workstatus = 0;
  4907. mpd_context_t maxctx;
  4908. MPD_NEW_CONST(two,0,0,1,1,1,2);
  4909. mpd_maxcontext(&maxctx);
  4910. /* resize to smaller cannot fail */
  4911. mpd_qcopy(result, &one, status);
  4912. while (!mpd_iszero(texp)) {
  4913. if (mpd_isodd(texp)) {
  4914. mpd_qmul(result, result, tbase, ctx, &workstatus);
  4915. *status |= workstatus;
  4916. if (workstatus & (MPD_Overflow|MPD_Clamped)) {
  4917. break;
  4918. }
  4919. }
  4920. mpd_qmul(tbase, tbase, tbase, ctx, &workstatus);
  4921. mpd_qdivint(texp, texp, &two, &maxctx, &workstatus);
  4922. if (mpd_isnan(tbase) || mpd_isnan(texp)) {
  4923. mpd_seterror(result, workstatus&MPD_Errors, status);
  4924. return;
  4925. }
  4926. }
  4927. mpd_set_sign(result, resultsign);
  4928. }
  4929. /*
  4930. * The power function for integer exponents.
  4931. */
  4932. static void
  4933. _mpd_qpow_int(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  4934. uint8_t resultsign,
  4935. const mpd_context_t *ctx, uint32_t *status)
  4936. {
  4937. mpd_context_t workctx;
  4938. MPD_NEW_STATIC(tbase,0,0,0,0);
  4939. MPD_NEW_STATIC(texp,0,0,0,0);
  4940. mpd_ssize_t n;
  4941. mpd_workcontext(&workctx, ctx);
  4942. workctx.prec += (exp->digits + exp->exp + 2);
  4943. workctx.round = MPD_ROUND_HALF_EVEN;
  4944. workctx.clamp = 0;
  4945. if (mpd_isnegative(exp)) {
  4946. mpd_qdiv(&tbase, &one, base, &workctx, status);
  4947. if (*status&MPD_Errors) {
  4948. mpd_setspecial(result, MPD_POS, MPD_NAN);
  4949. goto finish;
  4950. }
  4951. }
  4952. else {
  4953. if (!mpd_qcopy(&tbase, base, status)) {
  4954. mpd_setspecial(result, MPD_POS, MPD_NAN);
  4955. goto finish;
  4956. }
  4957. }
  4958. n = mpd_qabs_uint(exp, &workctx.status);
  4959. if (workctx.status&MPD_Invalid_operation) {
  4960. if (!mpd_qcopy(&texp, exp, status)) {
  4961. mpd_setspecial(result, MPD_POS, MPD_NAN); /* GCOV_UNLIKELY */
  4962. goto finish; /* GCOV_UNLIKELY */
  4963. }
  4964. _mpd_qpow_mpd(result, &tbase, &texp, resultsign, &workctx, status);
  4965. }
  4966. else {
  4967. _mpd_qpow_uint(result, &tbase, n, resultsign, &workctx, status);
  4968. }
  4969. if (mpd_isinfinite(result)) {
  4970. /* for ROUND_DOWN, ROUND_FLOOR, etc. */
  4971. _settriple(result, resultsign, 1, MPD_EXP_INF);
  4972. }
  4973. finish:
  4974. mpd_del(&tbase);
  4975. mpd_del(&texp);
  4976. mpd_qfinalize(result, ctx, status);
  4977. }
  4978. /*
  4979. * This is an internal function that does not check for NaNs.
  4980. */
  4981. static int
  4982. _qcheck_pow_one_inf(mpd_t *result, const mpd_t *base, uint8_t resultsign,
  4983. const mpd_context_t *ctx, uint32_t *status)
  4984. {
  4985. mpd_ssize_t shift;
  4986. int cmp;
  4987. if ((cmp = _mpd_cmp(base, &one)) == 0) {
  4988. shift = ctx->prec-1;
  4989. mpd_qshiftl(result, &one, shift, status);
  4990. result->exp = -shift;
  4991. mpd_set_flags(result, resultsign);
  4992. *status |= (MPD_Inexact|MPD_Rounded);
  4993. }
  4994. return cmp;
  4995. }
  4996. /*
  4997. * If base equals one, calculate the correct power of one result.
  4998. * Otherwise, result is undefined. Return the value of the comparison
  4999. * against 1.
  5000. *
  5001. * This is an internal function that does not check for specials.
  5002. */
  5003. static int
  5004. _qcheck_pow_one(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5005. uint8_t resultsign,
  5006. const mpd_context_t *ctx, uint32_t *status)
  5007. {
  5008. uint32_t workstatus = 0;
  5009. mpd_ssize_t shift;
  5010. int cmp;
  5011. if ((cmp = _mpd_cmp_abs(base, &one)) == 0) {
  5012. if (_mpd_isint(exp)) {
  5013. if (mpd_isnegative(exp)) {
  5014. _settriple(result, resultsign, 1, 0);
  5015. return 0;
  5016. }
  5017. /* 1.000**3 = 1.000000000 */
  5018. mpd_qmul_ssize(result, exp, -base->exp, ctx, &workstatus);
  5019. if (workstatus&MPD_Errors) {
  5020. *status |= (workstatus&MPD_Errors);
  5021. return 0;
  5022. }
  5023. /* digits-1 after exponentiation */
  5024. shift = mpd_qget_ssize(result, &workstatus);
  5025. /* shift is MPD_SSIZE_MAX if result is too large */
  5026. if (shift > ctx->prec-1) {
  5027. shift = ctx->prec-1;
  5028. *status |= MPD_Rounded;
  5029. }
  5030. }
  5031. else if (mpd_ispositive(base)) {
  5032. shift = ctx->prec-1;
  5033. *status |= (MPD_Inexact|MPD_Rounded);
  5034. }
  5035. else {
  5036. return -2; /* GCOV_NOT_REACHED */
  5037. }
  5038. if (!mpd_qshiftl(result, &one, shift, status)) {
  5039. return 0;
  5040. }
  5041. result->exp = -shift;
  5042. mpd_set_flags(result, resultsign);
  5043. }
  5044. return cmp;
  5045. }
  5046. /*
  5047. * Detect certain over/underflow of x**y.
  5048. * ACL2 proof: pow_bounds.lisp.
  5049. *
  5050. * Symbols:
  5051. *
  5052. * e: EXP_INF or EXP_CLAMP
  5053. * x: base
  5054. * y: exponent
  5055. *
  5056. * omega(e) = log10(abs(e))
  5057. * zeta(x) = log10(abs(log10(x)))
  5058. * theta(y) = log10(abs(y))
  5059. *
  5060. * Upper and lower bounds:
  5061. *
  5062. * ub_omega(e) = ceil(log10(abs(e)))
  5063. * lb_theta(y) = floor(log10(abs(y)))
  5064. *
  5065. * | floor(log10(floor(abs(log10(x))))) if x < 1/10 or x >= 10
  5066. * lb_zeta(x) = | floor(log10(abs(x-1)/10)) if 1/10 <= x < 1
  5067. * | floor(log10(abs((x-1)/100))) if 1 < x < 10
  5068. *
  5069. * ub_omega(e) and lb_theta(y) are obviously upper and lower bounds
  5070. * for omega(e) and theta(y).
  5071. *
  5072. * lb_zeta is a lower bound for zeta(x):
  5073. *
  5074. * x < 1/10 or x >= 10:
  5075. *
  5076. * abs(log10(x)) >= 1, so the outer log10 is well defined. Since log10
  5077. * is strictly increasing, the end result is a lower bound.
  5078. *
  5079. * 1/10 <= x < 1:
  5080. *
  5081. * We use: log10(x) <= (x-1)/log(10)
  5082. * abs(log10(x)) >= abs(x-1)/log(10)
  5083. * abs(log10(x)) >= abs(x-1)/10
  5084. *
  5085. * 1 < x < 10:
  5086. *
  5087. * We use: (x-1)/(x*log(10)) < log10(x)
  5088. * abs((x-1)/100) < abs(log10(x))
  5089. *
  5090. * XXX: abs((x-1)/10) would work, need ACL2 proof.
  5091. *
  5092. *
  5093. * Let (0 < x < 1 and y < 0) or (x > 1 and y > 0). (H1)
  5094. * Let ub_omega(exp_inf) < lb_zeta(x) + lb_theta(y) (H2)
  5095. *
  5096. * Then:
  5097. * log10(abs(exp_inf)) < log10(abs(log10(x))) + log10(abs(y)). (1)
  5098. * exp_inf < log10(x) * y (2)
  5099. * 10**exp_inf < x**y (3)
  5100. *
  5101. * Let (0 < x < 1 and y > 0) or (x > 1 and y < 0). (H3)
  5102. * Let ub_omega(exp_clamp) < lb_zeta(x) + lb_theta(y) (H4)
  5103. *
  5104. * Then:
  5105. * log10(abs(exp_clamp)) < log10(abs(log10(x))) + log10(abs(y)). (4)
  5106. * log10(x) * y < exp_clamp (5)
  5107. * x**y < 10**exp_clamp (6)
  5108. *
  5109. */
  5110. static mpd_ssize_t
  5111. _lower_bound_zeta(const mpd_t *x, uint32_t *status)
  5112. {
  5113. mpd_context_t maxctx;
  5114. MPD_NEW_STATIC(scratch,0,0,0,0);
  5115. mpd_ssize_t t, u;
  5116. t = mpd_adjexp(x);
  5117. if (t > 0) {
  5118. /* x >= 10 -> floor(log10(floor(abs(log10(x))))) */
  5119. return mpd_exp_digits(t) - 1;
  5120. }
  5121. else if (t < -1) {
  5122. /* x < 1/10 -> floor(log10(floor(abs(log10(x))))) */
  5123. return mpd_exp_digits(t+1) - 1;
  5124. }
  5125. else {
  5126. mpd_maxcontext(&maxctx);
  5127. mpd_qsub(&scratch, x, &one, &maxctx, status);
  5128. if (mpd_isspecial(&scratch)) {
  5129. mpd_del(&scratch);
  5130. return MPD_SSIZE_MAX;
  5131. }
  5132. u = mpd_adjexp(&scratch);
  5133. mpd_del(&scratch);
  5134. /* t == -1, 1/10 <= x < 1 -> floor(log10(abs(x-1)/10))
  5135. * t == 0, 1 < x < 10 -> floor(log10(abs(x-1)/100)) */
  5136. return (t == 0) ? u-2 : u-1;
  5137. }
  5138. }
  5139. /*
  5140. * Detect cases of certain overflow/underflow in the power function.
  5141. * Assumptions: x != 1, y != 0. The proof above is for positive x.
  5142. * If x is negative and y is an odd integer, x**y == -(abs(x)**y),
  5143. * so the analysis does not change.
  5144. */
  5145. static int
  5146. _qcheck_pow_bounds(mpd_t *result, const mpd_t *x, const mpd_t *y,
  5147. uint8_t resultsign,
  5148. const mpd_context_t *ctx, uint32_t *status)
  5149. {
  5150. MPD_NEW_SHARED(abs_x, x);
  5151. mpd_ssize_t ub_omega, lb_zeta, lb_theta;
  5152. uint8_t sign;
  5153. mpd_set_positive(&abs_x);
  5154. lb_theta = mpd_adjexp(y);
  5155. lb_zeta = _lower_bound_zeta(&abs_x, status);
  5156. if (lb_zeta == MPD_SSIZE_MAX) {
  5157. mpd_seterror(result, MPD_Malloc_error, status);
  5158. return 1;
  5159. }
  5160. sign = (mpd_adjexp(&abs_x) < 0) ^ mpd_sign(y);
  5161. if (sign == 0) {
  5162. /* (0 < |x| < 1 and y < 0) or (|x| > 1 and y > 0) */
  5163. ub_omega = mpd_exp_digits(ctx->emax);
  5164. if (ub_omega < lb_zeta + lb_theta) {
  5165. _settriple(result, resultsign, 1, MPD_EXP_INF);
  5166. mpd_qfinalize(result, ctx, status);
  5167. return 1;
  5168. }
  5169. }
  5170. else {
  5171. /* (0 < |x| < 1 and y > 0) or (|x| > 1 and y < 0). */
  5172. ub_omega = mpd_exp_digits(mpd_etiny(ctx));
  5173. if (ub_omega < lb_zeta + lb_theta) {
  5174. _settriple(result, resultsign, 1, mpd_etiny(ctx)-1);
  5175. mpd_qfinalize(result, ctx, status);
  5176. return 1;
  5177. }
  5178. }
  5179. return 0;
  5180. }
  5181. /*
  5182. * TODO: Implement algorithm for computing exact powers from decimal.py.
  5183. * In order to prevent infinite loops, this has to be called before
  5184. * using Ziv's strategy for correct rounding.
  5185. */
  5186. /*
  5187. static int
  5188. _mpd_qpow_exact(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5189. const mpd_context_t *ctx, uint32_t *status)
  5190. {
  5191. return 0;
  5192. }
  5193. */
  5194. /* The power function for real exponents */
  5195. static void
  5196. _mpd_qpow_real(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5197. const mpd_context_t *ctx, uint32_t *status)
  5198. {
  5199. mpd_context_t workctx;
  5200. MPD_NEW_STATIC(texp,0,0,0,0);
  5201. if (!mpd_qcopy(&texp, exp, status)) {
  5202. mpd_seterror(result, MPD_Malloc_error, status);
  5203. return;
  5204. }
  5205. mpd_maxcontext(&workctx);
  5206. workctx.prec = (base->digits > ctx->prec) ? base->digits : ctx->prec;
  5207. workctx.prec += (4 + MPD_EXPDIGITS);
  5208. workctx.round = MPD_ROUND_HALF_EVEN;
  5209. workctx.allcr = ctx->allcr;
  5210. mpd_qln(result, base, &workctx, &workctx.status);
  5211. mpd_qmul(result, result, &texp, &workctx, &workctx.status);
  5212. mpd_qexp(result, result, &workctx, status);
  5213. mpd_del(&texp);
  5214. *status |= (workctx.status&MPD_Errors);
  5215. *status |= (MPD_Inexact|MPD_Rounded);
  5216. }
  5217. /* The power function: base**exp */
  5218. void
  5219. mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5220. const mpd_context_t *ctx, uint32_t *status)
  5221. {
  5222. uint8_t resultsign = 0;
  5223. int intexp = 0;
  5224. int cmp;
  5225. if (mpd_isspecial(base) || mpd_isspecial(exp)) {
  5226. if (mpd_qcheck_nans(result, base, exp, ctx, status)) {
  5227. return;
  5228. }
  5229. }
  5230. if (mpd_isinteger(exp)) {
  5231. intexp = 1;
  5232. resultsign = mpd_isnegative(base) && mpd_isodd(exp);
  5233. }
  5234. if (mpd_iszero(base)) {
  5235. if (mpd_iszero(exp)) {
  5236. mpd_seterror(result, MPD_Invalid_operation, status);
  5237. }
  5238. else if (mpd_isnegative(exp)) {
  5239. mpd_setspecial(result, resultsign, MPD_INF);
  5240. }
  5241. else {
  5242. _settriple(result, resultsign, 0, 0);
  5243. }
  5244. return;
  5245. }
  5246. if (mpd_isnegative(base)) {
  5247. if (!intexp || mpd_isinfinite(exp)) {
  5248. mpd_seterror(result, MPD_Invalid_operation, status);
  5249. return;
  5250. }
  5251. }
  5252. if (mpd_isinfinite(exp)) {
  5253. /* power of one */
  5254. cmp = _qcheck_pow_one_inf(result, base, resultsign, ctx, status);
  5255. if (cmp == 0) {
  5256. return;
  5257. }
  5258. else {
  5259. cmp *= mpd_arith_sign(exp);
  5260. if (cmp < 0) {
  5261. _settriple(result, resultsign, 0, 0);
  5262. }
  5263. else {
  5264. mpd_setspecial(result, resultsign, MPD_INF);
  5265. }
  5266. }
  5267. return;
  5268. }
  5269. if (mpd_isinfinite(base)) {
  5270. if (mpd_iszero(exp)) {
  5271. _settriple(result, resultsign, 1, 0);
  5272. }
  5273. else if (mpd_isnegative(exp)) {
  5274. _settriple(result, resultsign, 0, 0);
  5275. }
  5276. else {
  5277. mpd_setspecial(result, resultsign, MPD_INF);
  5278. }
  5279. return;
  5280. }
  5281. if (mpd_iszero(exp)) {
  5282. _settriple(result, resultsign, 1, 0);
  5283. return;
  5284. }
  5285. if (_qcheck_pow_one(result, base, exp, resultsign, ctx, status) == 0) {
  5286. return;
  5287. }
  5288. if (_qcheck_pow_bounds(result, base, exp, resultsign, ctx, status)) {
  5289. return;
  5290. }
  5291. if (intexp) {
  5292. _mpd_qpow_int(result, base, exp, resultsign, ctx, status);
  5293. }
  5294. else {
  5295. _mpd_qpow_real(result, base, exp, ctx, status);
  5296. if (!mpd_isspecial(result) && _mpd_cmp(result, &one) == 0) {
  5297. mpd_ssize_t shift = ctx->prec-1;
  5298. mpd_qshiftl(result, &one, shift, status);
  5299. result->exp = -shift;
  5300. }
  5301. if (mpd_isinfinite(result)) {
  5302. /* for ROUND_DOWN, ROUND_FLOOR, etc. */
  5303. _settriple(result, MPD_POS, 1, MPD_EXP_INF);
  5304. }
  5305. mpd_qfinalize(result, ctx, status);
  5306. }
  5307. }
  5308. /*
  5309. * Internal function: Integer powmod with mpd_uint_t exponent, base is modified!
  5310. * Function can fail with MPD_Malloc_error.
  5311. */
  5312. static inline void
  5313. _mpd_qpowmod_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
  5314. mpd_t *mod, uint32_t *status)
  5315. {
  5316. mpd_context_t maxcontext;
  5317. mpd_maxcontext(&maxcontext);
  5318. /* resize to smaller cannot fail */
  5319. mpd_qcopy(result, &one, status);
  5320. while (exp > 0) {
  5321. if (exp & 1) {
  5322. mpd_qmul(result, result, base, &maxcontext, status);
  5323. mpd_qrem(result, result, mod, &maxcontext, status);
  5324. }
  5325. mpd_qmul(base, base, base, &maxcontext, status);
  5326. mpd_qrem(base, base, mod, &maxcontext, status);
  5327. exp >>= 1;
  5328. }
  5329. }
  5330. /* The powmod function: (base**exp) % mod */
  5331. void
  5332. mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5333. const mpd_t *mod,
  5334. const mpd_context_t *ctx, uint32_t *status)
  5335. {
  5336. mpd_context_t maxcontext;
  5337. MPD_NEW_STATIC(tbase,0,0,0,0);
  5338. MPD_NEW_STATIC(texp,0,0,0,0);
  5339. MPD_NEW_STATIC(tmod,0,0,0,0);
  5340. MPD_NEW_STATIC(tmp,0,0,0,0);
  5341. MPD_NEW_CONST(two,0,0,1,1,1,2);
  5342. mpd_ssize_t tbase_exp, texp_exp;
  5343. mpd_ssize_t i;
  5344. mpd_t t;
  5345. mpd_uint_t r;
  5346. uint8_t sign;
  5347. if (mpd_isspecial(base) || mpd_isspecial(exp) || mpd_isspecial(mod)) {
  5348. if (mpd_qcheck_3nans(result, base, exp, mod, ctx, status)) {
  5349. return;
  5350. }
  5351. mpd_seterror(result, MPD_Invalid_operation, status);
  5352. return;
  5353. }
  5354. if (!_mpd_isint(base) || !_mpd_isint(exp) || !_mpd_isint(mod)) {
  5355. mpd_seterror(result, MPD_Invalid_operation, status);
  5356. return;
  5357. }
  5358. if (mpd_iszerocoeff(mod)) {
  5359. mpd_seterror(result, MPD_Invalid_operation, status);
  5360. return;
  5361. }
  5362. sign = (mpd_isnegative(base)) && (mpd_isodd(exp));
  5363. if (mpd_iszerocoeff(exp)) {
  5364. if (mpd_iszerocoeff(base)) {
  5365. mpd_seterror(result, MPD_Invalid_operation, status);
  5366. return;
  5367. }
  5368. r = (_mpd_cmp_abs(mod, &one)==0) ? 0 : 1;
  5369. _settriple(result, sign, r, 0);
  5370. return;
  5371. }
  5372. if (mpd_isnegative(exp)) {
  5373. mpd_seterror(result, MPD_Invalid_operation, status);
  5374. return;
  5375. }
  5376. if (mpd_iszerocoeff(base)) {
  5377. _settriple(result, sign, 0, 0);
  5378. return;
  5379. }
  5380. if (mod->digits+mod->exp > ctx->prec) {
  5381. mpd_seterror(result, MPD_Invalid_operation, status);
  5382. return;
  5383. }
  5384. if (!mpd_qcopy(&tmod, mod, status)) {
  5385. goto mpd_errors;
  5386. }
  5387. mpd_set_positive(&tmod);
  5388. mpd_maxcontext(&maxcontext);
  5389. mpd_qround_to_int(&tbase, base, &maxcontext, status);
  5390. mpd_qround_to_int(&texp, exp, &maxcontext, status);
  5391. mpd_qround_to_int(&tmod, &tmod, &maxcontext, status);
  5392. tbase_exp = tbase.exp;
  5393. tbase.exp = 0;
  5394. texp_exp = texp.exp;
  5395. texp.exp = 0;
  5396. /* base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo */
  5397. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  5398. _settriple(result, MPD_POS, 1, tbase_exp);
  5399. mpd_qrem(result, result, &tmod, &maxcontext, status);
  5400. mpd_qmul(&tbase, &tbase, result, &maxcontext, status);
  5401. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  5402. if (mpd_isspecial(&tbase) ||
  5403. mpd_isspecial(&texp) ||
  5404. mpd_isspecial(&tmod)) {
  5405. goto mpd_errors;
  5406. }
  5407. for (i = 0; i < texp_exp; i++) {
  5408. _mpd_qpowmod_uint(&tmp, &tbase, 10, &tmod, status);
  5409. t = tmp;
  5410. tmp = tbase;
  5411. tbase = t;
  5412. }
  5413. if (mpd_isspecial(&tbase)) {
  5414. goto mpd_errors; /* GCOV_UNLIKELY */
  5415. }
  5416. /* resize to smaller cannot fail */
  5417. mpd_qcopy(result, &one, status);
  5418. while (mpd_isfinite(&texp) && !mpd_iszero(&texp)) {
  5419. if (mpd_isodd(&texp)) {
  5420. mpd_qmul(result, result, &tbase, &maxcontext, status);
  5421. mpd_qrem(result, result, &tmod, &maxcontext, status);
  5422. }
  5423. mpd_qmul(&tbase, &tbase, &tbase, &maxcontext, status);
  5424. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  5425. mpd_qdivint(&texp, &texp, &two, &maxcontext, status);
  5426. }
  5427. if (mpd_isspecial(&texp) || mpd_isspecial(&tbase) ||
  5428. mpd_isspecial(&tmod) || mpd_isspecial(result)) {
  5429. /* MPD_Malloc_error */
  5430. goto mpd_errors;
  5431. }
  5432. else {
  5433. mpd_set_sign(result, sign);
  5434. }
  5435. out:
  5436. mpd_del(&tbase);
  5437. mpd_del(&texp);
  5438. mpd_del(&tmod);
  5439. mpd_del(&tmp);
  5440. mpd_qfinalize(result, ctx, status);
  5441. return;
  5442. mpd_errors:
  5443. mpd_setspecial(result, MPD_POS, MPD_NAN);
  5444. goto out;
  5445. }
  5446. void
  5447. mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b,
  5448. const mpd_context_t *ctx, uint32_t *status)
  5449. {
  5450. uint32_t workstatus = 0;
  5451. mpd_ssize_t b_exp = b->exp;
  5452. mpd_ssize_t expdiff, shift;
  5453. mpd_uint_t rnd;
  5454. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  5455. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  5456. return;
  5457. }
  5458. if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
  5459. mpd_qcopy(result, a, status);
  5460. return;
  5461. }
  5462. mpd_seterror(result, MPD_Invalid_operation, status);
  5463. return;
  5464. }
  5465. if (b->exp > ctx->emax || b->exp < mpd_etiny(ctx)) {
  5466. mpd_seterror(result, MPD_Invalid_operation, status);
  5467. return;
  5468. }
  5469. if (mpd_iszero(a)) {
  5470. _settriple(result, mpd_sign(a), 0, b->exp);
  5471. mpd_qfinalize(result, ctx, status);
  5472. return;
  5473. }
  5474. expdiff = a->exp - b->exp;
  5475. if (a->digits + expdiff > ctx->prec) {
  5476. mpd_seterror(result, MPD_Invalid_operation, status);
  5477. return;
  5478. }
  5479. if (expdiff >= 0) {
  5480. shift = expdiff;
  5481. if (!mpd_qshiftl(result, a, shift, status)) {
  5482. return;
  5483. }
  5484. result->exp = b_exp;
  5485. }
  5486. else {
  5487. /* At this point expdiff < 0 and a->digits+expdiff <= prec,
  5488. * so the shift before an increment will fit in prec. */
  5489. shift = -expdiff;
  5490. rnd = mpd_qshiftr(result, a, shift, status);
  5491. if (rnd == MPD_UINT_MAX) {
  5492. return;
  5493. }
  5494. result->exp = b_exp;
  5495. if (!_mpd_apply_round_fit(result, rnd, ctx, status)) {
  5496. return;
  5497. }
  5498. workstatus |= MPD_Rounded;
  5499. if (rnd) {
  5500. workstatus |= MPD_Inexact;
  5501. }
  5502. }
  5503. if (mpd_adjexp(result) > ctx->emax ||
  5504. mpd_adjexp(result) < mpd_etiny(ctx)) {
  5505. mpd_seterror(result, MPD_Invalid_operation, status);
  5506. return;
  5507. }
  5508. *status |= workstatus;
  5509. mpd_qfinalize(result, ctx, status);
  5510. }
  5511. void
  5512. mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5513. uint32_t *status)
  5514. {
  5515. mpd_ssize_t shift, maxexp, maxshift;
  5516. uint8_t sign_a = mpd_sign(a);
  5517. if (mpd_isspecial(a)) {
  5518. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5519. return;
  5520. }
  5521. mpd_qcopy(result, a, status);
  5522. return;
  5523. }
  5524. if (!mpd_qcopy(result, a, status)) {
  5525. return;
  5526. }
  5527. mpd_qfinalize(result, ctx, status);
  5528. if (mpd_isspecial(result)) {
  5529. return;
  5530. }
  5531. if (mpd_iszero(result)) {
  5532. _settriple(result, sign_a, 0, 0);
  5533. return;
  5534. }
  5535. shift = mpd_trail_zeros(result);
  5536. maxexp = (ctx->clamp) ? mpd_etop(ctx) : ctx->emax;
  5537. /* After the finalizing above result->exp <= maxexp. */
  5538. maxshift = maxexp - result->exp;
  5539. shift = (shift > maxshift) ? maxshift : shift;
  5540. mpd_qshiftr_inplace(result, shift);
  5541. result->exp += shift;
  5542. }
  5543. void
  5544. mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
  5545. uint32_t *status)
  5546. {
  5547. MPD_NEW_STATIC(q,0,0,0,0);
  5548. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  5549. if (mpd_qcheck_nans(r, a, b, ctx, status)) {
  5550. return;
  5551. }
  5552. if (mpd_isinfinite(a)) {
  5553. mpd_seterror(r, MPD_Invalid_operation, status);
  5554. return;
  5555. }
  5556. if (mpd_isinfinite(b)) {
  5557. mpd_qcopy(r, a, status);
  5558. mpd_qfinalize(r, ctx, status);
  5559. return;
  5560. }
  5561. /* debug */
  5562. abort(); /* GCOV_NOT_REACHED */
  5563. }
  5564. if (mpd_iszerocoeff(b)) {
  5565. if (mpd_iszerocoeff(a)) {
  5566. mpd_seterror(r, MPD_Division_undefined, status);
  5567. }
  5568. else {
  5569. mpd_seterror(r, MPD_Invalid_operation, status);
  5570. }
  5571. return;
  5572. }
  5573. _mpd_qdivmod(&q, r, a, b, ctx, status);
  5574. mpd_del(&q);
  5575. mpd_qfinalize(r, ctx, status);
  5576. }
  5577. void
  5578. mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
  5579. const mpd_context_t *ctx, uint32_t *status)
  5580. {
  5581. mpd_context_t workctx;
  5582. MPD_NEW_STATIC(btmp,0,0,0,0);
  5583. MPD_NEW_STATIC(q,0,0,0,0);
  5584. mpd_ssize_t expdiff, floordigits;
  5585. int cmp, isodd, allnine;
  5586. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  5587. if (mpd_qcheck_nans(r, a, b, ctx, status)) {
  5588. return;
  5589. }
  5590. if (mpd_isinfinite(a)) {
  5591. mpd_seterror(r, MPD_Invalid_operation, status);
  5592. return;
  5593. }
  5594. if (mpd_isinfinite(b)) {
  5595. mpd_qcopy(r, a, status);
  5596. mpd_qfinalize(r, ctx, status);
  5597. return;
  5598. }
  5599. /* debug */
  5600. abort(); /* GCOV_NOT_REACHED */
  5601. }
  5602. if (mpd_iszerocoeff(b)) {
  5603. if (mpd_iszerocoeff(a)) {
  5604. mpd_seterror(r, MPD_Division_undefined, status);
  5605. }
  5606. else {
  5607. mpd_seterror(r, MPD_Invalid_operation, status);
  5608. }
  5609. return;
  5610. }
  5611. if (r == b) {
  5612. if (!mpd_qcopy(&btmp, b, status)) {
  5613. mpd_seterror(r, MPD_Malloc_error, status);
  5614. return;
  5615. }
  5616. b = &btmp;
  5617. }
  5618. workctx = *ctx;
  5619. workctx.prec = a->digits;
  5620. workctx.prec = (workctx.prec > ctx->prec) ? workctx.prec : ctx->prec;
  5621. _mpd_qdivmod(&q, r, a, b, &workctx, status);
  5622. if (mpd_isnan(&q) || mpd_isnan(r) || q.digits > ctx->prec) {
  5623. mpd_seterror(r, MPD_Division_impossible, status);
  5624. goto finish;
  5625. }
  5626. if (mpd_iszerocoeff(r)) {
  5627. goto finish;
  5628. }
  5629. /* Deal with cases like rmnx078:
  5630. * remaindernear 999999999.5 1 -> NaN Division_impossible */
  5631. expdiff = mpd_adjexp(b) - mpd_adjexp(r);
  5632. if (-1 <= expdiff && expdiff <= 1) {
  5633. mpd_qtrunc(&q, &q, &workctx, &workctx.status);
  5634. allnine = mpd_coeff_isallnine(&q);
  5635. floordigits = q.digits;
  5636. isodd = mpd_isodd(&q);
  5637. mpd_maxcontext(&workctx);
  5638. if (mpd_sign(a) == mpd_sign(b)) {
  5639. _mpd_qsub(&q, r, b, &workctx, &workctx.status);
  5640. if (workctx.status&MPD_Errors) {
  5641. mpd_seterror(r, workctx.status&MPD_Errors, status);
  5642. goto finish;
  5643. }
  5644. }
  5645. else {
  5646. _mpd_qadd(&q, r, b, &workctx, &workctx.status);
  5647. if (workctx.status&MPD_Errors) {
  5648. mpd_seterror(r, workctx.status&MPD_Errors, status);
  5649. goto finish;
  5650. }
  5651. }
  5652. cmp = mpd_cmp_total_mag(&q, r);
  5653. if (cmp < 0 || (cmp == 0 && isodd)) {
  5654. if (allnine && floordigits == ctx->prec) {
  5655. mpd_seterror(r, MPD_Division_impossible, status);
  5656. goto finish;
  5657. }
  5658. mpd_qcopy(r, &q, status);
  5659. *status &= ~MPD_Rounded;
  5660. }
  5661. }
  5662. finish:
  5663. mpd_del(&btmp);
  5664. mpd_del(&q);
  5665. mpd_qfinalize(r, ctx, status);
  5666. }
  5667. /*
  5668. * Rescale a number so that it has exponent 'exp'. Does not regard
  5669. * context precision, emax, emin, but uses the rounding mode.
  5670. * Special numbers are quietly copied.
  5671. */
  5672. void
  5673. mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
  5674. const mpd_context_t *ctx, uint32_t *status)
  5675. {
  5676. mpd_ssize_t expdiff, shift;
  5677. mpd_uint_t rnd;
  5678. if (mpd_isspecial(a)) {
  5679. mpd_qcopy(result, a, status);
  5680. return;
  5681. }
  5682. if (exp > MPD_MAX_EMAX || exp < MPD_MIN_ETINY) {
  5683. mpd_seterror(result, MPD_Invalid_operation, status);
  5684. return;
  5685. }
  5686. if (mpd_iszero(a)) {
  5687. _settriple(result, mpd_sign(a), 0, exp);
  5688. return;
  5689. }
  5690. expdiff = a->exp - exp;
  5691. if (expdiff >= 0) {
  5692. shift = expdiff;
  5693. if (!mpd_qshiftl(result, a, shift, status)) {
  5694. return;
  5695. }
  5696. result->exp = exp;
  5697. }
  5698. else {
  5699. shift = -expdiff;
  5700. rnd = mpd_qshiftr(result, a, shift, status);
  5701. if (rnd == MPD_UINT_MAX) {
  5702. return;
  5703. }
  5704. result->exp = exp;
  5705. _mpd_apply_round_excess(result, rnd, ctx, status);
  5706. *status |= MPD_Rounded;
  5707. if (rnd) {
  5708. *status |= MPD_Inexact;
  5709. }
  5710. }
  5711. if (mpd_issubnormal(result, ctx)) {
  5712. *status |= MPD_Subnormal;
  5713. }
  5714. }
  5715. /* Round to an integer according to 'action' and ctx->round. */
  5716. enum {TO_INT_EXACT, TO_INT_SILENT, TO_INT_TRUNC, TO_INT_FLOOR, TO_INT_CEIL};
  5717. static void
  5718. _mpd_qround_to_integral(int action, mpd_t *result, const mpd_t *a,
  5719. const mpd_context_t *ctx, uint32_t *status)
  5720. {
  5721. mpd_uint_t rnd;
  5722. if (mpd_isspecial(a)) {
  5723. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5724. return;
  5725. }
  5726. mpd_qcopy(result, a, status);
  5727. return;
  5728. }
  5729. if (a->exp >= 0) {
  5730. mpd_qcopy(result, a, status);
  5731. return;
  5732. }
  5733. if (mpd_iszerocoeff(a)) {
  5734. _settriple(result, mpd_sign(a), 0, 0);
  5735. return;
  5736. }
  5737. rnd = mpd_qshiftr(result, a, -a->exp, status);
  5738. if (rnd == MPD_UINT_MAX) {
  5739. return;
  5740. }
  5741. result->exp = 0;
  5742. if (action == TO_INT_EXACT || action == TO_INT_SILENT) {
  5743. _mpd_apply_round_excess(result, rnd, ctx, status);
  5744. if (action == TO_INT_EXACT) {
  5745. *status |= MPD_Rounded;
  5746. if (rnd) {
  5747. *status |= MPD_Inexact;
  5748. }
  5749. }
  5750. }
  5751. else if (action == TO_INT_FLOOR) {
  5752. if (rnd && mpd_isnegative(result)) {
  5753. _mpd_qsub(result, result, &one, ctx, status);
  5754. }
  5755. }
  5756. else if (action == TO_INT_CEIL) {
  5757. if (rnd && mpd_ispositive(result)) {
  5758. _mpd_qadd(result, result, &one, ctx, status);
  5759. }
  5760. }
  5761. }
  5762. void
  5763. mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5764. uint32_t *status)
  5765. {
  5766. (void)_mpd_qround_to_integral(TO_INT_EXACT, result, a, ctx, status);
  5767. }
  5768. void
  5769. mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5770. uint32_t *status)
  5771. {
  5772. (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a, ctx, status);
  5773. }
  5774. void
  5775. mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5776. uint32_t *status)
  5777. {
  5778. (void)_mpd_qround_to_integral(TO_INT_TRUNC, result, a, ctx, status);
  5779. }
  5780. void
  5781. mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5782. uint32_t *status)
  5783. {
  5784. (void)_mpd_qround_to_integral(TO_INT_FLOOR, result, a, ctx, status);
  5785. }
  5786. void
  5787. mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5788. uint32_t *status)
  5789. {
  5790. (void)_mpd_qround_to_integral(TO_INT_CEIL, result, a, ctx, status);
  5791. }
  5792. int
  5793. mpd_same_quantum(const mpd_t *a, const mpd_t *b)
  5794. {
  5795. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  5796. return ((mpd_isnan(a) && mpd_isnan(b)) ||
  5797. (mpd_isinfinite(a) && mpd_isinfinite(b)));
  5798. }
  5799. return a->exp == b->exp;
  5800. }
  5801. /* Schedule the increase in precision for the Newton iteration. */
  5802. static inline int
  5803. recpr_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
  5804. mpd_ssize_t maxprec, mpd_ssize_t initprec)
  5805. {
  5806. mpd_ssize_t k;
  5807. int i;
  5808. assert(maxprec > 0 && initprec > 0);
  5809. if (maxprec <= initprec) return -1;
  5810. i = 0; k = maxprec;
  5811. do {
  5812. k = (k+1) / 2;
  5813. klist[i++] = k;
  5814. } while (k > initprec);
  5815. return i-1;
  5816. }
  5817. /*
  5818. * Initial approximation for the reciprocal. Result has MPD_RDIGITS-2
  5819. * significant digits.
  5820. */
  5821. static void
  5822. _mpd_qreciprocal_approx(mpd_t *z, const mpd_t *v, uint32_t *status)
  5823. {
  5824. mpd_uint_t p10data[2] = {0, mpd_pow10[MPD_RDIGITS-2]}; /* 10**(2*MPD_RDIGITS-2) */
  5825. mpd_uint_t dummy, word;
  5826. int n;
  5827. _mpd_get_msdigits(&dummy, &word, v, MPD_RDIGITS);
  5828. n = mpd_word_digits(word);
  5829. word *= mpd_pow10[MPD_RDIGITS-n];
  5830. mpd_qresize(z, 2, status);
  5831. (void)_mpd_shortdiv(z->data, p10data, 2, word);
  5832. mpd_clear_flags(z);
  5833. z->exp = -(v->exp + v->digits) - (MPD_RDIGITS-2);
  5834. z->len = (z->data[1] == 0) ? 1 : 2;
  5835. mpd_setdigits(z);
  5836. }
  5837. /* Reciprocal, calculated with Newton's Method */
  5838. static void
  5839. _mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5840. uint32_t *status)
  5841. {
  5842. mpd_context_t varcontext, maxcontext;
  5843. mpd_t *z = result; /* current approximation */
  5844. mpd_t *v; /* a, normalized to a number between 0.1 and 1 */
  5845. MPD_NEW_SHARED(vtmp, a); /* by default v will share data with a */
  5846. MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
  5847. MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
  5848. MPD_NEW_CONST(two,0,0,1,1,1,2); /* const 2 */
  5849. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  5850. mpd_ssize_t adj, maxprec, initprec;
  5851. uint8_t sign = mpd_sign(a);
  5852. int i;
  5853. v = &vtmp;
  5854. if (result == a) {
  5855. if ((v = mpd_qncopy(a)) == NULL) { /* GCOV_NOT_REACHED */
  5856. mpd_seterror(result, MPD_Malloc_error, status); /* GCOV_NOT_REACHED */
  5857. goto finish; /* GCOV_NOT_REACHED */
  5858. }
  5859. }
  5860. mpd_clear_flags(v);
  5861. adj = v->digits + v->exp;
  5862. v->exp = -v->digits;
  5863. /* initial approximation */
  5864. _mpd_qreciprocal_approx(z, v, status);
  5865. mpd_maxcontext(&varcontext);
  5866. mpd_maxcontext(&maxcontext);
  5867. varcontext.round = MPD_ROUND_TRUNC;
  5868. maxcontext.round = MPD_ROUND_TRUNC;
  5869. maxprec = (v->digits > ctx->prec) ? v->digits : ctx->prec;
  5870. maxprec += 2;
  5871. initprec = MPD_RDIGITS-3;
  5872. i = recpr_schedule_prec(klist, maxprec, initprec);
  5873. for (; i >= 0; i--) {
  5874. mpd_qmul(&s, z, z, &maxcontext, status);
  5875. varcontext.prec = 2*klist[i] + 5;
  5876. if (v->digits > varcontext.prec) {
  5877. mpd_qshiftr(&t, v, v->digits-varcontext.prec, status);
  5878. t.exp = -varcontext.prec;
  5879. mpd_qmul(&t, &t, &s, &varcontext, status);
  5880. }
  5881. else {
  5882. mpd_qmul(&t, v, &s, &varcontext, status);
  5883. }
  5884. mpd_qmul(&s, z, &two, &maxcontext, status);
  5885. mpd_qsub(z, &s, &t, &maxcontext, status);
  5886. }
  5887. if (!mpd_isspecial(z)) {
  5888. z->exp -= adj;
  5889. mpd_set_flags(z, sign);
  5890. }
  5891. finish:
  5892. mpd_del(&s);
  5893. mpd_del(&t);
  5894. if (v != &vtmp) mpd_del(v);
  5895. mpd_qfinalize(z, ctx, status);
  5896. }
  5897. /*
  5898. * Integer division with remainder of the coefficients: coeff(a) / coeff(b).
  5899. * This function is for large numbers where it is faster to divide by
  5900. * multiplying the dividend by the reciprocal of the divisor.
  5901. * The inexact result is fixed by a small loop, which should not take
  5902. * more than 2 iterations.
  5903. */
  5904. static void
  5905. _mpd_qbarrett_divmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  5906. uint32_t *status)
  5907. {
  5908. mpd_context_t workctx;
  5909. mpd_t *qq = q, *rr = r;
  5910. mpd_t aa, bb;
  5911. int k;
  5912. mpd_maxcontext(&workctx);
  5913. _mpd_copy_shared(&aa, a);
  5914. _mpd_copy_shared(&bb, b);
  5915. mpd_set_positive(&aa);
  5916. mpd_set_positive(&bb);
  5917. aa.exp = 0;
  5918. bb.exp = 0;
  5919. if (q == a || q == b) {
  5920. if ((qq = mpd_qnew()) == NULL) {
  5921. *status |= MPD_Malloc_error;
  5922. goto nanresult;
  5923. }
  5924. }
  5925. if (r == a || r == b) {
  5926. if ((rr = mpd_qnew()) == NULL) {
  5927. *status |= MPD_Malloc_error;
  5928. goto nanresult;
  5929. }
  5930. }
  5931. /* maximum length of q + 3 digits */
  5932. workctx.prec = aa.digits - bb.digits + 1 + 3;
  5933. /* we get the reciprocal with precision maxlen(q) + 3 */
  5934. _mpd_qreciprocal(rr, &bb, &workctx, &workctx.status);
  5935. mpd_qmul(qq, &aa, rr, &workctx, &workctx.status);
  5936. mpd_qtrunc(qq, qq, &workctx, &workctx.status);
  5937. workctx.prec = aa.digits + 3;
  5938. /* get the remainder */
  5939. mpd_qmul(rr, &bb, qq, &workctx, &workctx.status);
  5940. mpd_qsub(rr, &aa, rr, &workctx, &workctx.status);
  5941. /* Fix the result. Algorithm from: Karl Hasselstrom, Fast Division of Large Integers */
  5942. for (k = 0;; k++) {
  5943. if (mpd_isspecial(rr)) {
  5944. *status |= (workctx.status&MPD_Errors);
  5945. goto nanresult;
  5946. }
  5947. if (k > 2) {
  5948. mpd_err_warn("_mpd_barrett_divmod: k > 2 in correcting loop"); /* GCOV_NOT_REACHED */
  5949. abort(); /* GCOV_NOT_REACHED */
  5950. }
  5951. else if (_mpd_cmp(&zero, rr) == 1) {
  5952. mpd_qadd(rr, rr, &bb, &workctx, &workctx.status);
  5953. mpd_qadd(qq, qq, &minus_one, &workctx, &workctx.status);
  5954. }
  5955. else if (_mpd_cmp(rr, &bb) == -1) {
  5956. break;
  5957. }
  5958. else {
  5959. mpd_qsub(rr, rr, &bb, &workctx, &workctx.status);
  5960. mpd_qadd(qq, qq, &one, &workctx, &workctx.status);
  5961. }
  5962. }
  5963. if (qq != q) {
  5964. if (!mpd_qcopy(q, qq, status)) {
  5965. goto nanresult; /* GCOV_UNLIKELY */
  5966. }
  5967. mpd_del(qq);
  5968. }
  5969. if (rr != r) {
  5970. if (!mpd_qcopy(r, rr, status)) {
  5971. goto nanresult; /* GCOV_UNLIKELY */
  5972. }
  5973. mpd_del(rr);
  5974. }
  5975. *status |= (workctx.status&MPD_Errors);
  5976. return;
  5977. nanresult:
  5978. if (qq && qq != q) mpd_del(qq);
  5979. if (rr && rr != r) mpd_del(rr);
  5980. mpd_setspecial(q, MPD_POS, MPD_NAN);
  5981. mpd_setspecial(r, MPD_POS, MPD_NAN);
  5982. }
  5983. static inline int
  5984. invroot_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
  5985. mpd_ssize_t maxprec, mpd_ssize_t initprec)
  5986. {
  5987. mpd_ssize_t k;
  5988. int i;
  5989. assert(maxprec >= 3 && initprec >= 3);
  5990. if (maxprec <= initprec) return -1;
  5991. i = 0; k = maxprec;
  5992. do {
  5993. k = (k+3) / 2;
  5994. klist[i++] = k;
  5995. } while (k > initprec);
  5996. return i-1;
  5997. }
  5998. /*
  5999. * Initial approximation for the inverse square root.
  6000. *
  6001. * Input:
  6002. * v := 7 or 8 decimal digits with an implicit exponent of 10**-6,
  6003. * representing a number 1 <= x < 100.
  6004. *
  6005. * Output:
  6006. * An approximation to 1/sqrt(v)
  6007. */
  6008. static inline void
  6009. _invroot_init_approx(mpd_t *z, mpd_uint_t v)
  6010. {
  6011. mpd_uint_t lo = 1000;
  6012. mpd_uint_t hi = 10000;
  6013. mpd_uint_t a, sq;
  6014. assert(v >= lo*lo && v < (hi+1)*(hi+1));
  6015. for(;;) {
  6016. a = (lo + hi) / 2;
  6017. sq = a * a;
  6018. if (v >= sq) {
  6019. if (v < sq + 2*a + 1) {
  6020. break;
  6021. }
  6022. lo = a + 1;
  6023. }
  6024. else {
  6025. hi = a - 1;
  6026. }
  6027. }
  6028. /* At this point a/1000 is an approximation to sqrt(v). */
  6029. mpd_minalloc(z);
  6030. mpd_clear_flags(z);
  6031. z->data[0] = 1000000000UL / a;
  6032. z->len = 1;
  6033. z->exp = -6;
  6034. mpd_setdigits(z);
  6035. }
  6036. static void
  6037. _mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6038. uint32_t *status)
  6039. {
  6040. uint32_t workstatus = 0;
  6041. mpd_context_t varcontext, maxcontext;
  6042. mpd_t *z = result; /* current approximation */
  6043. mpd_t *v; /* a, normalized to a number between 1 and 100 */
  6044. MPD_NEW_SHARED(vtmp, a); /* by default v will share data with a */
  6045. MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
  6046. MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
  6047. MPD_NEW_CONST(one_half,0,-1,1,1,1,5);
  6048. MPD_NEW_CONST(three,0,0,1,1,1,3);
  6049. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  6050. mpd_ssize_t ideal_exp, shift;
  6051. mpd_ssize_t adj, tz;
  6052. mpd_ssize_t maxprec, fracdigits;
  6053. mpd_uint_t x, dummy;
  6054. int i, n;
  6055. ideal_exp = -(a->exp - (a->exp & 1)) / 2;
  6056. v = &vtmp;
  6057. if (result == a) {
  6058. if ((v = mpd_qncopy(a)) == NULL) {
  6059. mpd_seterror(result, MPD_Malloc_error, status);
  6060. return;
  6061. }
  6062. }
  6063. /* normalize a to 1 <= v < 100 */
  6064. if ((v->digits+v->exp) & 1) {
  6065. fracdigits = v->digits - 1;
  6066. v->exp = -fracdigits;
  6067. n = (v->digits > 7) ? 7 : (int)v->digits;
  6068. _mpd_get_msdigits(&dummy, &x, v, n);
  6069. if (n < 7) {
  6070. x *= mpd_pow10[7-n];
  6071. }
  6072. }
  6073. else {
  6074. fracdigits = v->digits - 2;
  6075. v->exp = -fracdigits;
  6076. n = (v->digits > 8) ? 8 : (int)v->digits;
  6077. _mpd_get_msdigits(&dummy, &x, v, n);
  6078. if (n < 8) {
  6079. x *= mpd_pow10[8-n];
  6080. }
  6081. }
  6082. adj = (a->exp-v->exp) / 2;
  6083. /* initial approximation */
  6084. _invroot_init_approx(z, x);
  6085. mpd_maxcontext(&maxcontext);
  6086. mpd_maxcontext(&varcontext);
  6087. varcontext.round = MPD_ROUND_TRUNC;
  6088. maxprec = ctx->prec + 2;
  6089. i = invroot_schedule_prec(klist, maxprec, 3);
  6090. for (; i >= 0; i--) {
  6091. varcontext.prec = 2*klist[i]+2;
  6092. mpd_qmul(&s, z, z, &maxcontext, &workstatus);
  6093. if (v->digits > varcontext.prec) {
  6094. shift = v->digits - varcontext.prec;
  6095. mpd_qshiftr(&t, v, shift, &workstatus);
  6096. t.exp += shift;
  6097. mpd_qmul(&t, &t, &s, &varcontext, &workstatus);
  6098. }
  6099. else {
  6100. mpd_qmul(&t, v, &s, &varcontext, &workstatus);
  6101. }
  6102. mpd_qsub(&t, &three, &t, &maxcontext, &workstatus);
  6103. mpd_qmul(z, z, &t, &varcontext, &workstatus);
  6104. mpd_qmul(z, z, &one_half, &maxcontext, &workstatus);
  6105. }
  6106. z->exp -= adj;
  6107. tz = mpd_trail_zeros(result);
  6108. shift = ideal_exp - result->exp;
  6109. shift = (tz > shift) ? shift : tz;
  6110. if (shift > 0) {
  6111. mpd_qshiftr_inplace(result, shift);
  6112. result->exp += shift;
  6113. }
  6114. mpd_del(&s);
  6115. mpd_del(&t);
  6116. if (v != &vtmp) mpd_del(v);
  6117. *status |= (workstatus&MPD_Errors);
  6118. varcontext = *ctx;
  6119. varcontext.round = MPD_ROUND_HALF_EVEN;
  6120. mpd_qfinalize(result, &varcontext, status);
  6121. }
  6122. void
  6123. mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6124. uint32_t *status)
  6125. {
  6126. if (mpd_isspecial(a)) {
  6127. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6128. return;
  6129. }
  6130. if (mpd_isnegative(a)) {
  6131. mpd_seterror(result, MPD_Invalid_operation, status);
  6132. return;
  6133. }
  6134. /* positive infinity */
  6135. _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
  6136. *status |= MPD_Clamped;
  6137. return;
  6138. }
  6139. if (mpd_iszero(a)) {
  6140. mpd_setspecial(result, mpd_sign(a), MPD_INF);
  6141. *status |= MPD_Division_by_zero;
  6142. return;
  6143. }
  6144. if (mpd_isnegative(a)) {
  6145. mpd_seterror(result, MPD_Invalid_operation, status);
  6146. return;
  6147. }
  6148. _mpd_qinvroot(result, a, ctx, status);
  6149. }
  6150. /*
  6151. * Ensure correct rounding. Algorithm after Hull & Abrham, "Properly Rounded
  6152. * Variable Precision Square Root", ACM Transactions on Mathematical Software,
  6153. * Vol. 11, No. 3.
  6154. */
  6155. static void
  6156. _mpd_fix_sqrt(mpd_t *result, const mpd_t *a, mpd_t *tmp,
  6157. const mpd_context_t *ctx, uint32_t *status)
  6158. {
  6159. mpd_context_t maxctx;
  6160. MPD_NEW_CONST(u,0,0,1,1,1,5);
  6161. mpd_maxcontext(&maxctx);
  6162. u.exp = u.digits - ctx->prec + result->exp - 1;
  6163. _mpd_qsub(tmp, result, &u, &maxctx, status);
  6164. if (*status&MPD_Errors) goto nanresult;
  6165. _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
  6166. if (*status&MPD_Errors) goto nanresult;
  6167. if (_mpd_cmp(tmp, a) == 1) {
  6168. u.exp += 1;
  6169. u.data[0] = 1;
  6170. _mpd_qsub(result, result, &u, &maxctx, status);
  6171. }
  6172. else {
  6173. _mpd_qadd(tmp, result, &u, &maxctx, status);
  6174. if (*status&MPD_Errors) goto nanresult;
  6175. _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
  6176. if (*status&MPD_Errors) goto nanresult;
  6177. if (_mpd_cmp(tmp, a) == -1) {
  6178. u.exp += 1;
  6179. u.data[0] = 1;
  6180. _mpd_qadd(result, result, &u, &maxctx, status);
  6181. }
  6182. }
  6183. return;
  6184. nanresult:
  6185. mpd_setspecial(result, MPD_POS, MPD_NAN);
  6186. }
  6187. void
  6188. mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6189. uint32_t *status)
  6190. {
  6191. uint32_t workstatus = 0;
  6192. mpd_context_t varcontext;
  6193. mpd_t *z = result; /* current approximation */
  6194. MPD_NEW_STATIC(v,0,0,0,0); /* a, normalized to a number between 1 and 10 */
  6195. MPD_NEW_STATIC(vtmp,0,0,0,0);
  6196. MPD_NEW_STATIC(tmp,0,0,0,0);
  6197. mpd_ssize_t ideal_exp, shift;
  6198. mpd_ssize_t target_prec, fracdigits;
  6199. mpd_ssize_t a_exp, a_digits;
  6200. mpd_ssize_t adj, tz;
  6201. mpd_uint_t dummy, t;
  6202. int exact = 0;
  6203. varcontext = *ctx;
  6204. varcontext.round = MPD_ROUND_HALF_EVEN;
  6205. ideal_exp = (a->exp - (a->exp & 1)) / 2;
  6206. if (mpd_isspecial(a)) {
  6207. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6208. return;
  6209. }
  6210. if (mpd_isnegative(a)) {
  6211. mpd_seterror(result, MPD_Invalid_operation, status);
  6212. return;
  6213. }
  6214. mpd_setspecial(result, MPD_POS, MPD_INF);
  6215. return;
  6216. }
  6217. if (mpd_iszero(a)) {
  6218. _settriple(result, mpd_sign(a), 0, ideal_exp);
  6219. mpd_qfinalize(result, ctx, status);
  6220. return;
  6221. }
  6222. if (mpd_isnegative(a)) {
  6223. mpd_seterror(result, MPD_Invalid_operation, status);
  6224. return;
  6225. }
  6226. if (!mpd_qcopy(&v, a, status)) {
  6227. mpd_seterror(result, MPD_Malloc_error, status);
  6228. goto finish;
  6229. }
  6230. a_exp = a->exp;
  6231. a_digits = a->digits;
  6232. /* normalize a to 1 <= v < 100 */
  6233. if ((v.digits+v.exp) & 1) {
  6234. fracdigits = v.digits - 1;
  6235. v.exp = -fracdigits;
  6236. _mpd_get_msdigits(&dummy, &t, &v, 3);
  6237. t = t < 100 ? t*10 : t;
  6238. t = t < 100 ? t*10 : t;
  6239. }
  6240. else {
  6241. fracdigits = v.digits - 2;
  6242. v.exp = -fracdigits;
  6243. _mpd_get_msdigits(&dummy, &t, &v, 4);
  6244. t = t < 1000 ? t*10 : t;
  6245. t = t < 1000 ? t*10 : t;
  6246. t = t < 1000 ? t*10 : t;
  6247. }
  6248. adj = (a_exp-v.exp) / 2;
  6249. /* use excess digits */
  6250. target_prec = (a_digits > ctx->prec) ? a_digits : ctx->prec;
  6251. target_prec += 2;
  6252. varcontext.prec = target_prec + 3;
  6253. /* invroot is much faster for large numbers */
  6254. _mpd_qinvroot(&tmp, &v, &varcontext, &workstatus);
  6255. varcontext.prec = target_prec;
  6256. _mpd_qdiv(NO_IDEAL_EXP, z, &one, &tmp, &varcontext, &workstatus);
  6257. tz = mpd_trail_zeros(result);
  6258. if ((result->digits-tz)*2-1 <= v.digits) {
  6259. _mpd_qmul(&tmp, result, result, &varcontext, &workstatus);
  6260. if (workstatus&MPD_Errors) {
  6261. mpd_seterror(result, workstatus&MPD_Errors, status);
  6262. goto finish;
  6263. }
  6264. exact = (_mpd_cmp(&tmp, &v) == 0);
  6265. }
  6266. *status |= (workstatus&MPD_Errors);
  6267. if (!exact && !mpd_isspecial(result) && !mpd_iszero(result)) {
  6268. _mpd_fix_sqrt(result, &v, &tmp, &varcontext, status);
  6269. if (mpd_isspecial(result)) goto finish;
  6270. *status |= (MPD_Rounded|MPD_Inexact);
  6271. }
  6272. result->exp += adj;
  6273. if (exact) {
  6274. shift = ideal_exp - result->exp;
  6275. shift = (tz > shift) ? shift : tz;
  6276. if (shift > 0) {
  6277. mpd_qshiftr_inplace(result, shift);
  6278. result->exp += shift;
  6279. }
  6280. }
  6281. finish:
  6282. mpd_del(&v);
  6283. mpd_del(&vtmp);
  6284. mpd_del(&tmp);
  6285. varcontext.prec = ctx->prec;
  6286. mpd_qfinalize(result, &varcontext, status);
  6287. }
  6288. /******************************************************************************/
  6289. /* Base conversions */
  6290. /******************************************************************************/
  6291. /*
  6292. * Returns the space needed to represent an integer mpd_t in base 'base'.
  6293. * The result is undefined for non-integers.
  6294. *
  6295. * Max space needed:
  6296. *
  6297. * base^n >= 10^(digits+exp)
  6298. * n >= log10(10^(digits+exp))/log10(base) = (digits+exp) / log10(base)
  6299. */
  6300. size_t
  6301. mpd_sizeinbase(mpd_t *a, uint32_t base)
  6302. {
  6303. size_t x;
  6304. assert(mpd_isinteger(a));
  6305. if (mpd_iszero(a)) {
  6306. return 1;
  6307. }
  6308. x = a->digits+a->exp;
  6309. #ifdef CONFIG_64
  6310. #ifdef USE_80BIT_LONG_DOUBLE
  6311. return (long double)x / log10(base) + 3;
  6312. #else
  6313. /* x > floor(((1ULL<<53)-3) * log10(2)) */
  6314. if (x > 2711437152599294ULL) {
  6315. return SIZE_MAX;
  6316. }
  6317. return (double)x / log10(base) + 3;
  6318. #endif
  6319. #else /* CONFIG_32 */
  6320. {
  6321. double y = x / log10(base) + 3;
  6322. return (y > SIZE_MAX) ? SIZE_MAX : (size_t)y;
  6323. }
  6324. #endif
  6325. }
  6326. /*
  6327. * Returns the space needed to import a base 'base' integer of length 'srclen'.
  6328. */
  6329. static inline mpd_ssize_t
  6330. _mpd_importsize(size_t srclen, uint32_t base)
  6331. {
  6332. #if SIZE_MAX == UINT64_MAX
  6333. #ifdef USE_80BIT_LONG_DOUBLE
  6334. long double x = (long double)srclen * (log10(base)/MPD_RDIGITS) + 3;
  6335. #else
  6336. double x;
  6337. if (srclen > (1ULL<<53)) {
  6338. return MPD_SSIZE_MAX;
  6339. }
  6340. x = (double)srclen * (log10(base)/MPD_RDIGITS) + 3;
  6341. #endif
  6342. #else
  6343. double x = srclen * (log10(base)/MPD_RDIGITS) + 3;
  6344. #endif
  6345. return (x > MPD_MAXIMPORT) ? MPD_SSIZE_MAX : (mpd_ssize_t)x;
  6346. }
  6347. static inline size_t
  6348. _to_base_u16(uint16_t *w, size_t wlen, mpd_uint_t wbase,
  6349. mpd_uint_t *u, mpd_ssize_t ulen)
  6350. {
  6351. size_t n = 0;
  6352. assert(wlen > 0 && ulen > 0);
  6353. do {
  6354. w[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
  6355. /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
  6356. ulen = _mpd_real_size(u, ulen);
  6357. } while (u[ulen-1] != 0 && n < wlen);
  6358. /* proper termination condition */
  6359. assert(u[ulen-1] == 0);
  6360. return n;
  6361. }
  6362. static inline void
  6363. _from_base_u16(mpd_uint_t *w, mpd_ssize_t wlen,
  6364. const mpd_uint_t *u, size_t ulen, uint32_t ubase)
  6365. {
  6366. mpd_ssize_t m = 1;
  6367. mpd_uint_t carry;
  6368. assert(wlen > 0 && ulen > 0);
  6369. w[0] = u[--ulen];
  6370. while (--ulen != SIZE_MAX && m < wlen) {
  6371. _mpd_shortmul(w, w, m, ubase);
  6372. m = _mpd_real_size(w, m+1);
  6373. carry = _mpd_shortadd(w, m, u[ulen]);
  6374. if (carry) w[m++] = carry;
  6375. }
  6376. /* proper termination condition */
  6377. assert(ulen == SIZE_MAX);
  6378. }
  6379. /* target base wbase <= source base ubase */
  6380. static inline size_t
  6381. _baseconv_to_smaller(uint32_t *w, size_t wlen, mpd_uint_t wbase,
  6382. mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase)
  6383. {
  6384. size_t n = 0;
  6385. assert(wlen > 0 && ulen > 0);
  6386. do {
  6387. w[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
  6388. /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
  6389. ulen = _mpd_real_size(u, ulen);
  6390. } while (u[ulen-1] != 0 && n < wlen);
  6391. /* proper termination condition */
  6392. assert(u[ulen-1] == 0);
  6393. return n;
  6394. }
  6395. /* target base wbase >= source base ubase */
  6396. static inline void
  6397. _baseconv_to_larger(mpd_uint_t *w, mpd_ssize_t wlen, mpd_uint_t wbase,
  6398. const mpd_uint_t *u, size_t ulen, mpd_uint_t ubase)
  6399. {
  6400. mpd_ssize_t m = 1;
  6401. mpd_uint_t carry;
  6402. assert(wlen > 0 && ulen > 0);
  6403. w[0] = u[--ulen];
  6404. while (--ulen != SIZE_MAX && m < wlen) {
  6405. _mpd_shortmul_b(w, w, m, ubase, wbase);
  6406. m = _mpd_real_size(w, m+1);
  6407. carry = _mpd_shortadd_b(w, m, u[ulen], wbase);
  6408. if (carry) w[m++] = carry;
  6409. }
  6410. /* proper termination condition */
  6411. assert(ulen == SIZE_MAX);
  6412. }
  6413. /*
  6414. * Converts an integer mpd_t to a multiprecision integer with
  6415. * base <= UINT16_MAX+1. The least significant word of the result
  6416. * is rdata[0].
  6417. */
  6418. size_t
  6419. mpd_qexport_u16(uint16_t *rdata, size_t rlen, uint32_t rbase,
  6420. const mpd_t *src, uint32_t *status)
  6421. {
  6422. mpd_t *tsrc;
  6423. size_t n;
  6424. assert(rbase <= (1U<<16));
  6425. assert(rlen <= SIZE_MAX/(sizeof *rdata));
  6426. if (mpd_isspecial(src) || !_mpd_isint(src)) {
  6427. *status |= MPD_Invalid_operation;
  6428. return SIZE_MAX;
  6429. }
  6430. memset(rdata, 0, rlen * (sizeof *rdata));
  6431. if (mpd_iszero(src)) {
  6432. return 1;
  6433. }
  6434. if ((tsrc = mpd_qnew()) == NULL) {
  6435. *status |= MPD_Malloc_error;
  6436. return SIZE_MAX;
  6437. }
  6438. if (src->exp >= 0) {
  6439. if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
  6440. mpd_del(tsrc);
  6441. return SIZE_MAX;
  6442. }
  6443. }
  6444. else {
  6445. if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
  6446. mpd_del(tsrc);
  6447. return SIZE_MAX;
  6448. }
  6449. }
  6450. n = _to_base_u16(rdata, rlen, rbase, tsrc->data, tsrc->len);
  6451. mpd_del(tsrc);
  6452. return n;
  6453. }
  6454. /*
  6455. * Converts an integer mpd_t to a multiprecision integer with
  6456. * base <= UINT32_MAX. The least significant word of the result
  6457. * is rdata[0].
  6458. */
  6459. size_t
  6460. mpd_qexport_u32(uint32_t *rdata, size_t rlen, uint32_t rbase,
  6461. const mpd_t *src, uint32_t *status)
  6462. {
  6463. mpd_t *tsrc;
  6464. size_t n;
  6465. if (mpd_isspecial(src) || !_mpd_isint(src)) {
  6466. *status |= MPD_Invalid_operation;
  6467. return SIZE_MAX;
  6468. }
  6469. #if MPD_SIZE_MAX < SIZE_MAX
  6470. if (rlen > MPD_SSIZE_MAX) {
  6471. *status |= MPD_Invalid_operation;
  6472. return SIZE_MAX;
  6473. }
  6474. #endif
  6475. assert(rlen <= SIZE_MAX/(sizeof *rdata));
  6476. memset(rdata, 0, rlen * (sizeof *rdata));
  6477. if (mpd_iszero(src)) {
  6478. return 1;
  6479. }
  6480. if ((tsrc = mpd_qnew()) == NULL) {
  6481. *status |= MPD_Malloc_error;
  6482. return SIZE_MAX;
  6483. }
  6484. if (src->exp >= 0) {
  6485. if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
  6486. mpd_del(tsrc);
  6487. return SIZE_MAX;
  6488. }
  6489. }
  6490. else {
  6491. if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
  6492. mpd_del(tsrc);
  6493. return SIZE_MAX;
  6494. }
  6495. }
  6496. #ifdef CONFIG_64
  6497. n = _baseconv_to_smaller(rdata, rlen, rbase,
  6498. tsrc->data, tsrc->len, MPD_RADIX);
  6499. #else
  6500. if (rbase <= MPD_RADIX) {
  6501. n = _baseconv_to_smaller(rdata, rlen, rbase,
  6502. tsrc->data, tsrc->len, MPD_RADIX);
  6503. }
  6504. else {
  6505. _baseconv_to_larger(rdata, (mpd_ssize_t)rlen, rbase,
  6506. tsrc->data, tsrc->len, MPD_RADIX);
  6507. n = _mpd_real_size(rdata, (mpd_ssize_t)rlen);
  6508. }
  6509. #endif
  6510. mpd_del(tsrc);
  6511. return n;
  6512. }
  6513. /*
  6514. * Converts a multiprecision integer with base <= UINT16_MAX+1 to an mpd_t.
  6515. * The least significant word of the source is srcdata[0].
  6516. */
  6517. void
  6518. mpd_qimport_u16(mpd_t *result,
  6519. const uint16_t *srcdata, size_t srclen,
  6520. uint8_t srcsign, uint32_t srcbase,
  6521. const mpd_context_t *ctx, uint32_t *status)
  6522. {
  6523. mpd_uint_t *usrc; /* uint16_t src copied to an mpd_uint_t array */
  6524. mpd_ssize_t rlen; /* length of the result */
  6525. size_t n = 0;
  6526. assert(srclen > 0);
  6527. assert(srcbase <= (1U<<16));
  6528. if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
  6529. mpd_seterror(result, MPD_Invalid_operation, status);
  6530. return;
  6531. }
  6532. if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
  6533. mpd_seterror(result, MPD_Invalid_operation, status);
  6534. return;
  6535. }
  6536. if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
  6537. mpd_seterror(result, MPD_Malloc_error, status);
  6538. return;
  6539. }
  6540. for (n = 0; n < srclen; n++) {
  6541. usrc[n] = srcdata[n];
  6542. }
  6543. /* result->data is initialized to zero */
  6544. if (!mpd_qresize_zero(result, rlen, status)) {
  6545. goto finish;
  6546. }
  6547. _from_base_u16(result->data, rlen, usrc, srclen, srcbase);
  6548. mpd_set_flags(result, srcsign);
  6549. result->exp = 0;
  6550. result->len = _mpd_real_size(result->data, rlen);
  6551. mpd_setdigits(result);
  6552. mpd_qresize(result, result->len, status);
  6553. mpd_qfinalize(result, ctx, status);
  6554. finish:
  6555. mpd_free(usrc);
  6556. }
  6557. /*
  6558. * Converts a multiprecision integer with base <= UINT32_MAX to an mpd_t.
  6559. * The least significant word of the source is srcdata[0].
  6560. */
  6561. void
  6562. mpd_qimport_u32(mpd_t *result,
  6563. const uint32_t *srcdata, size_t srclen,
  6564. uint8_t srcsign, uint32_t srcbase,
  6565. const mpd_context_t *ctx, uint32_t *status)
  6566. {
  6567. mpd_uint_t *usrc; /* uint32_t src copied to an mpd_uint_t array */
  6568. mpd_ssize_t rlen; /* length of the result */
  6569. size_t n = 0;
  6570. assert(srclen > 0);
  6571. if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
  6572. mpd_seterror(result, MPD_Invalid_operation, status);
  6573. return;
  6574. }
  6575. if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
  6576. mpd_seterror(result, MPD_Invalid_operation, status);
  6577. return;
  6578. }
  6579. if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
  6580. mpd_seterror(result, MPD_Malloc_error, status);
  6581. return;
  6582. }
  6583. for (n = 0; n < srclen; n++) {
  6584. usrc[n] = srcdata[n];
  6585. }
  6586. /* result->data is initialized to zero */
  6587. if (!mpd_qresize_zero(result, rlen, status)) {
  6588. goto finish;
  6589. }
  6590. #ifdef CONFIG_64
  6591. _baseconv_to_larger(result->data, rlen, MPD_RADIX,
  6592. usrc, srclen, srcbase);
  6593. #else
  6594. if (srcbase <= MPD_RADIX) {
  6595. _baseconv_to_larger(result->data, rlen, MPD_RADIX,
  6596. usrc, srclen, srcbase);
  6597. }
  6598. else {
  6599. _baseconv_to_smaller(result->data, rlen, MPD_RADIX,
  6600. usrc, (mpd_ssize_t)srclen, srcbase);
  6601. }
  6602. #endif
  6603. mpd_set_flags(result, srcsign);
  6604. result->exp = 0;
  6605. result->len = _mpd_real_size(result->data, rlen);
  6606. mpd_setdigits(result);
  6607. mpd_qresize(result, result->len, status);
  6608. mpd_qfinalize(result, ctx, status);
  6609. finish:
  6610. mpd_free(usrc);
  6611. }
  6612. /*********************************************************************/
  6613. /* Testcases for Newton Division */
  6614. /*********************************************************************/
  6615. static void
  6616. _mpd_qtest_newtondiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
  6617. const mpd_context_t *ctx, uint32_t *status)
  6618. {
  6619. MPD_NEW_STATIC(aligned,0,0,0,0);
  6620. mpd_uint_t ld;
  6621. mpd_ssize_t shift, exp, tz;
  6622. mpd_ssize_t newsize;
  6623. mpd_ssize_t ideal_exp;
  6624. mpd_uint_t rem;
  6625. uint8_t sign_a = mpd_sign(a);
  6626. uint8_t sign_b = mpd_sign(b);
  6627. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6628. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  6629. return;
  6630. }
  6631. _mpd_qdiv_inf(q, a, b, ctx, status);
  6632. return;
  6633. }
  6634. if (mpd_iszerocoeff(b)) {
  6635. if (mpd_iszerocoeff(a)) {
  6636. mpd_seterror(q, MPD_Division_undefined, status);
  6637. }
  6638. else {
  6639. mpd_setspecial(q, sign_a^sign_b, MPD_INF);
  6640. *status |= MPD_Division_by_zero;
  6641. }
  6642. return;
  6643. }
  6644. if (mpd_iszerocoeff(a)) {
  6645. exp = a->exp - b->exp;
  6646. _settriple(q, sign_a^sign_b, 0, exp);
  6647. mpd_qfinalize(q, ctx, status);
  6648. return;
  6649. }
  6650. shift = (b->digits - a->digits) + ctx->prec + 1;
  6651. ideal_exp = a->exp - b->exp;
  6652. exp = ideal_exp - shift;
  6653. if (shift > 0) {
  6654. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  6655. mpd_seterror(q, MPD_Malloc_error, status);
  6656. goto finish;
  6657. }
  6658. a = &aligned;
  6659. }
  6660. else if (shift < 0) {
  6661. shift = -shift;
  6662. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  6663. mpd_seterror(q, MPD_Malloc_error, status);
  6664. goto finish;
  6665. }
  6666. b = &aligned;
  6667. }
  6668. newsize = a->len - b->len + 1;
  6669. if ((q != b && q != a) || (q == b && newsize > b->len)) {
  6670. if (!mpd_qresize(q, newsize, status)) {
  6671. mpd_seterror(q, MPD_Malloc_error, status);
  6672. goto finish;
  6673. }
  6674. }
  6675. {
  6676. MPD_NEW_STATIC(r,0,0,0,0);
  6677. _mpd_qbarrett_divmod(q, &r, a, b, status);
  6678. if (mpd_isspecial(q) || mpd_isspecial(&r)) {
  6679. mpd_del(&r);
  6680. goto finish;
  6681. }
  6682. rem = !mpd_iszerocoeff(&r);
  6683. mpd_del(&r);
  6684. newsize = q->len;
  6685. }
  6686. newsize = _mpd_real_size(q->data, newsize);
  6687. /* resize to smaller cannot fail */
  6688. mpd_qresize(q, newsize, status);
  6689. q->len = newsize;
  6690. mpd_setdigits(q);
  6691. shift = ideal_exp - exp;
  6692. if (rem) {
  6693. ld = mpd_lsd(q->data[0]);
  6694. if (ld == 0 || ld == 5) {
  6695. q->data[0] += 1;
  6696. }
  6697. }
  6698. else if (action == SET_IDEAL_EXP && shift > 0) {
  6699. tz = mpd_trail_zeros(q);
  6700. shift = (tz > shift) ? shift : tz;
  6701. mpd_qshiftr_inplace(q, shift);
  6702. exp += shift;
  6703. }
  6704. mpd_set_flags(q, sign_a^sign_b);
  6705. q->exp = exp;
  6706. finish:
  6707. mpd_del(&aligned);
  6708. mpd_qfinalize(q, ctx, status);
  6709. }
  6710. static void
  6711. mpd_qtest_newtondiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
  6712. const mpd_context_t *ctx, uint32_t *status)
  6713. {
  6714. _mpd_qtest_newtondiv(SET_IDEAL_EXP, q, a, b, ctx, status);
  6715. }
  6716. static void
  6717. _mpd_qtest_barrett_divmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  6718. const mpd_context_t *ctx, uint32_t *status)
  6719. {
  6720. MPD_NEW_STATIC(aligned,0,0,0,0);
  6721. mpd_ssize_t qsize, rsize;
  6722. mpd_ssize_t ideal_exp, expdiff, shift;
  6723. uint8_t sign_a = mpd_sign(a);
  6724. uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
  6725. ideal_exp = (a->exp > b->exp) ? b->exp : a->exp;
  6726. if (mpd_iszerocoeff(a)) {
  6727. if (!mpd_qcopy(r, a, status)) {
  6728. goto nanresult; /* GCOV_NOT_REACHED */
  6729. }
  6730. r->exp = ideal_exp;
  6731. _settriple(q, sign_ab, 0, 0);
  6732. return;
  6733. }
  6734. expdiff = mpd_adjexp(a) - mpd_adjexp(b);
  6735. if (expdiff < 0) {
  6736. if (a->exp > b->exp) {
  6737. /* positive and less than b->digits - a->digits */
  6738. shift = a->exp - b->exp;
  6739. if (!mpd_qshiftl(r, a, shift, status)) {
  6740. goto nanresult;
  6741. }
  6742. r->exp = ideal_exp;
  6743. }
  6744. else {
  6745. if (!mpd_qcopy(r, a, status)) {
  6746. goto nanresult;
  6747. }
  6748. }
  6749. _settriple(q, sign_ab, 0, 0);
  6750. return;
  6751. }
  6752. if (expdiff > ctx->prec) {
  6753. *status |= MPD_Division_impossible;
  6754. goto nanresult;
  6755. }
  6756. /*
  6757. * At this point we have:
  6758. * (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
  6759. * (2) a->exp - b->exp >= b->digits - a->digits
  6760. * (3) a->exp - b->exp <= prec + b->digits - a->digits
  6761. */
  6762. if (a->exp != b->exp) {
  6763. shift = a->exp - b->exp;
  6764. if (shift > 0) {
  6765. /* by (3), after the shift a->digits <= prec + b->digits */
  6766. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  6767. goto nanresult;
  6768. }
  6769. a = &aligned;
  6770. }
  6771. else {
  6772. shift = -shift;
  6773. /* by (2), after the shift b->digits <= a->digits */
  6774. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  6775. goto nanresult;
  6776. }
  6777. b = &aligned;
  6778. }
  6779. }
  6780. qsize = a->len - b->len + 1;
  6781. if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
  6782. if (!mpd_qresize(q, qsize, status)) {
  6783. goto nanresult;
  6784. }
  6785. }
  6786. rsize = b->len;
  6787. if (!(r == a && rsize < a->len)) {
  6788. if (!mpd_qresize(r, rsize, status)) {
  6789. goto nanresult;
  6790. }
  6791. }
  6792. _mpd_qbarrett_divmod(q, r, a, b, status);
  6793. if (mpd_isspecial(q) || mpd_isspecial(r)) {
  6794. goto nanresult;
  6795. }
  6796. if (mpd_isinfinite(q) || q->digits > ctx->prec) {
  6797. *status |= MPD_Division_impossible;
  6798. goto nanresult;
  6799. }
  6800. qsize = q->len;
  6801. rsize = r->len;
  6802. qsize = _mpd_real_size(q->data, qsize);
  6803. /* resize to smaller cannot fail */
  6804. mpd_qresize(q, qsize, status);
  6805. q->len = qsize;
  6806. mpd_setdigits(q);
  6807. mpd_set_flags(q, sign_ab);
  6808. q->exp = 0;
  6809. if (q->digits > ctx->prec) {
  6810. *status |= MPD_Division_impossible; /* GCOV_NOT_REACHED */
  6811. goto nanresult; /* GCOV_NOT_REACHED */
  6812. }
  6813. rsize = _mpd_real_size(r->data, rsize);
  6814. /* resize to smaller cannot fail */
  6815. mpd_qresize(r, rsize, status);
  6816. r->len = rsize;
  6817. mpd_setdigits(r);
  6818. mpd_set_flags(r, sign_a);
  6819. r->exp = ideal_exp;
  6820. out:
  6821. mpd_del(&aligned);
  6822. return;
  6823. nanresult:
  6824. mpd_setspecial(q, MPD_POS, MPD_NAN);
  6825. mpd_setspecial(r, MPD_POS, MPD_NAN);
  6826. goto out;
  6827. }
  6828. static void
  6829. mpd_qtest_newtondivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
  6830. const mpd_context_t *ctx, uint32_t *status)
  6831. {
  6832. MPD_NEW_STATIC(r,0,0,0,0);
  6833. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  6834. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6835. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  6836. return;
  6837. }
  6838. if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
  6839. mpd_seterror(q, MPD_Invalid_operation, status);
  6840. return;
  6841. }
  6842. if (mpd_isinfinite(a)) {
  6843. mpd_setspecial(q, sign, MPD_INF);
  6844. return;
  6845. }
  6846. if (mpd_isinfinite(b)) {
  6847. _settriple(q, sign, 0, 0);
  6848. return;
  6849. }
  6850. /* debug */
  6851. abort(); /* GCOV_NOT_REACHED */
  6852. }
  6853. if (mpd_iszerocoeff(b)) {
  6854. if (mpd_iszerocoeff(a)) {
  6855. mpd_seterror(q, MPD_Division_undefined, status);
  6856. }
  6857. else {
  6858. mpd_setspecial(q, sign, MPD_INF);
  6859. *status |= MPD_Division_by_zero;
  6860. }
  6861. return;
  6862. }
  6863. _mpd_qtest_barrett_divmod(q, &r, a, b, ctx, status);
  6864. mpd_del(&r);
  6865. mpd_qfinalize(q, ctx, status);
  6866. }
  6867. static void
  6868. mpd_qtest_newtonrem(mpd_t *r, const mpd_t *a, const mpd_t *b,
  6869. const mpd_context_t *ctx, uint32_t *status)
  6870. {
  6871. MPD_NEW_STATIC(q,0,0,0,0);
  6872. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6873. if (mpd_qcheck_nans(r, a, b, ctx, status)) {
  6874. return;
  6875. }
  6876. if (mpd_isinfinite(a)) {
  6877. mpd_seterror(r, MPD_Invalid_operation, status);
  6878. return;
  6879. }
  6880. if (mpd_isinfinite(b)) {
  6881. mpd_qcopy(r, a, status);
  6882. mpd_qfinalize(r, ctx, status);
  6883. return;
  6884. }
  6885. /* debug */
  6886. abort(); /* GCOV_NOT_REACHED */
  6887. }
  6888. if (mpd_iszerocoeff(b)) {
  6889. if (mpd_iszerocoeff(a)) {
  6890. mpd_seterror(r, MPD_Division_undefined, status);
  6891. }
  6892. else {
  6893. mpd_seterror(r, MPD_Invalid_operation, status);
  6894. }
  6895. return;
  6896. }
  6897. _mpd_qtest_barrett_divmod(&q, r, a, b, ctx, status);
  6898. mpd_del(&q);
  6899. mpd_qfinalize(r, ctx, status);
  6900. }
  6901. static void
  6902. mpd_qtest_newtondivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  6903. const mpd_context_t *ctx, uint32_t *status)
  6904. {
  6905. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  6906. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6907. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  6908. mpd_qcopy(r, q, status);
  6909. return;
  6910. }
  6911. if (mpd_isinfinite(a)) {
  6912. if (mpd_isinfinite(b)) {
  6913. mpd_setspecial(q, MPD_POS, MPD_NAN);
  6914. }
  6915. else {
  6916. mpd_setspecial(q, sign, MPD_INF);
  6917. }
  6918. mpd_setspecial(r, MPD_POS, MPD_NAN);
  6919. *status |= MPD_Invalid_operation;
  6920. return;
  6921. }
  6922. if (mpd_isinfinite(b)) {
  6923. if (!mpd_qcopy(r, a, status)) {
  6924. mpd_seterror(q, MPD_Malloc_error, status);
  6925. return;
  6926. }
  6927. mpd_qfinalize(r, ctx, status);
  6928. _settriple(q, sign, 0, 0);
  6929. return;
  6930. }
  6931. /* debug */
  6932. abort(); /* GCOV_NOT_REACHED */
  6933. }
  6934. if (mpd_iszerocoeff(b)) {
  6935. if (mpd_iszerocoeff(a)) {
  6936. mpd_setspecial(q, MPD_POS, MPD_NAN);
  6937. mpd_setspecial(r, MPD_POS, MPD_NAN);
  6938. *status |= MPD_Division_undefined;
  6939. }
  6940. else {
  6941. mpd_setspecial(q, sign, MPD_INF);
  6942. mpd_setspecial(r, MPD_POS, MPD_NAN);
  6943. *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
  6944. }
  6945. return;
  6946. }
  6947. _mpd_qtest_barrett_divmod(q, r, a, b, ctx, status);
  6948. mpd_qfinalize(q, ctx, status);
  6949. mpd_qfinalize(r, ctx, status);
  6950. }
  6951. void
  6952. mpd_test_newtondiv(mpd_t *q, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx)
  6953. {
  6954. uint32_t status = 0;
  6955. mpd_qtest_newtondiv(q, a, b, ctx, &status);
  6956. mpd_addstatus_raise(ctx, status);
  6957. }
  6958. void
  6959. mpd_test_newtondivint(mpd_t *q, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx)
  6960. {
  6961. uint32_t status = 0;
  6962. mpd_qtest_newtondivint(q, a, b, ctx, &status);
  6963. mpd_addstatus_raise(ctx, status);
  6964. }
  6965. void
  6966. mpd_test_newtonrem(mpd_t *r, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx)
  6967. {
  6968. uint32_t status = 0;
  6969. mpd_qtest_newtonrem(r, a, b, ctx, &status);
  6970. mpd_addstatus_raise(ctx, status);
  6971. }
  6972. void
  6973. mpd_test_newtondivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx)
  6974. {
  6975. uint32_t status = 0;
  6976. mpd_qtest_newtondivmod(q, r, a, b, ctx, &status);
  6977. mpd_addstatus_raise(ctx, status);
  6978. }