xxhash.h 258 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088
  1. /*
  2. * xxHash - Extremely Fast Hash algorithm
  3. * Header File
  4. * Copyright (C) 2012-2023 Yann Collet
  5. *
  6. * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are
  10. * met:
  11. *
  12. * * Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * * Redistributions in binary form must reproduce the above
  15. * copyright notice, this list of conditions and the following disclaimer
  16. * in the documentation and/or other materials provided with the
  17. * distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * You can contact the author at:
  32. * - xxHash homepage: https://www.xxhash.com
  33. * - xxHash source repository: https://github.com/Cyan4973/xxHash
  34. */
  35. /*!
  36. * @mainpage xxHash
  37. *
  38. * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
  39. * limits.
  40. *
  41. * It is proposed in four flavors, in three families:
  42. * 1. @ref XXH32_family
  43. * - Classic 32-bit hash function. Simple, compact, and runs on almost all
  44. * 32-bit and 64-bit systems.
  45. * 2. @ref XXH64_family
  46. * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
  47. * 64-bit systems (but _not_ 32-bit systems).
  48. * 3. @ref XXH3_family
  49. * - Modern 64-bit and 128-bit hash function family which features improved
  50. * strength and performance across the board, especially on smaller data.
  51. * It benefits greatly from SIMD and 64-bit without requiring it.
  52. *
  53. * Benchmarks
  54. * ---
  55. * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
  56. * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
  57. *
  58. * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
  59. * | -------------------- | ------- | ----: | ---------------: | ------------------: |
  60. * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
  61. * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
  62. * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
  63. * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
  64. * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
  65. * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
  66. * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
  67. * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
  68. * | City64 | | 64 | 22.0 GB/s | 76.6 |
  69. * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
  70. * | City128 | | 128 | 21.7 GB/s | 57.7 |
  71. * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
  72. * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
  73. * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
  74. * | Mum | | 64 | 18.0 GB/s | 67.0 |
  75. * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
  76. * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
  77. * | City32 | | 32 | 9.1 GB/s | 66.0 |
  78. * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
  79. * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
  80. * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
  81. * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
  82. * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
  83. * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
  84. * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
  85. * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
  86. * | MD5* | | 128 | 0.6 GB/s | 7.8 |
  87. * @note
  88. * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
  89. * even though it is mandatory on x64.
  90. * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
  91. * by modern standards.
  92. * - Small data velocity is a rough average of algorithm's efficiency for small
  93. * data. For more accurate information, see the wiki.
  94. * - More benchmarks and strength tests are found on the wiki:
  95. * https://github.com/Cyan4973/xxHash/wiki
  96. *
  97. * Usage
  98. * ------
  99. * All xxHash variants use a similar API. Changing the algorithm is a trivial
  100. * substitution.
  101. *
  102. * @pre
  103. * For functions which take an input and length parameter, the following
  104. * requirements are assumed:
  105. * - The range from [`input`, `input + length`) is valid, readable memory.
  106. * - The only exception is if the `length` is `0`, `input` may be `NULL`.
  107. * - For C++, the objects must have the *TriviallyCopyable* property, as the
  108. * functions access bytes directly as if it was an array of `unsigned char`.
  109. *
  110. * @anchor single_shot_example
  111. * **Single Shot**
  112. *
  113. * These functions are stateless functions which hash a contiguous block of memory,
  114. * immediately returning the result. They are the easiest and usually the fastest
  115. * option.
  116. *
  117. * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
  118. *
  119. * @code{.c}
  120. * #include <string.h>
  121. * #include "xxhash.h"
  122. *
  123. * // Example for a function which hashes a null terminated string with XXH32().
  124. * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
  125. * {
  126. * // NULL pointers are only valid if the length is zero
  127. * size_t length = (string == NULL) ? 0 : strlen(string);
  128. * return XXH32(string, length, seed);
  129. * }
  130. * @endcode
  131. *
  132. *
  133. * @anchor streaming_example
  134. * **Streaming**
  135. *
  136. * These groups of functions allow incremental hashing of unknown size, even
  137. * more than what would fit in a size_t.
  138. *
  139. * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
  140. *
  141. * @code{.c}
  142. * #include <stdio.h>
  143. * #include <assert.h>
  144. * #include "xxhash.h"
  145. * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
  146. * XXH64_hash_t hashFile(FILE* f)
  147. * {
  148. * // Allocate a state struct. Do not just use malloc() or new.
  149. * XXH3_state_t* state = XXH3_createState();
  150. * assert(state != NULL && "Out of memory!");
  151. * // Reset the state to start a new hashing session.
  152. * XXH3_64bits_reset(state);
  153. * char buffer[4096];
  154. * size_t count;
  155. * // Read the file in chunks
  156. * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
  157. * // Run update() as many times as necessary to process the data
  158. * XXH3_64bits_update(state, buffer, count);
  159. * }
  160. * // Retrieve the finalized hash. This will not change the state.
  161. * XXH64_hash_t result = XXH3_64bits_digest(state);
  162. * // Free the state. Do not use free().
  163. * XXH3_freeState(state);
  164. * return result;
  165. * }
  166. * @endcode
  167. *
  168. * Streaming functions generate the xxHash value from an incremental input.
  169. * This method is slower than single-call functions, due to state management.
  170. * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
  171. *
  172. * An XXH state must first be allocated using `XXH*_createState()`.
  173. *
  174. * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
  175. *
  176. * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
  177. *
  178. * The function returns an error code, with 0 meaning OK, and any other value
  179. * meaning there is an error.
  180. *
  181. * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
  182. * This function returns the nn-bits hash as an int or long long.
  183. *
  184. * It's still possible to continue inserting input into the hash state after a
  185. * digest, and generate new hash values later on by invoking `XXH*_digest()`.
  186. *
  187. * When done, release the state using `XXH*_freeState()`.
  188. *
  189. *
  190. * @anchor canonical_representation_example
  191. * **Canonical Representation**
  192. *
  193. * The default return values from XXH functions are unsigned 32, 64 and 128 bit
  194. * integers.
  195. * This the simplest and fastest format for further post-processing.
  196. *
  197. * However, this leaves open the question of what is the order on the byte level,
  198. * since little and big endian conventions will store the same number differently.
  199. *
  200. * The canonical representation settles this issue by mandating big-endian
  201. * convention, the same convention as human-readable numbers (large digits first).
  202. *
  203. * When writing hash values to storage, sending them over a network, or printing
  204. * them, it's highly recommended to use the canonical representation to ensure
  205. * portability across a wider range of systems, present and future.
  206. *
  207. * The following functions allow transformation of hash values to and from
  208. * canonical format.
  209. *
  210. * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(),
  211. * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(),
  212. * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(),
  213. *
  214. * @code{.c}
  215. * #include <stdio.h>
  216. * #include "xxhash.h"
  217. *
  218. * // Example for a function which prints XXH32_hash_t in human readable format
  219. * void printXxh32(XXH32_hash_t hash)
  220. * {
  221. * XXH32_canonical_t cano;
  222. * XXH32_canonicalFromHash(&cano, hash);
  223. * size_t i;
  224. * for(i = 0; i < sizeof(cano.digest); ++i) {
  225. * printf("%02x", cano.digest[i]);
  226. * }
  227. * printf("\n");
  228. * }
  229. *
  230. * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t
  231. * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano)
  232. * {
  233. * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano);
  234. * return hash;
  235. * }
  236. * @endcode
  237. *
  238. *
  239. * @file xxhash.h
  240. * xxHash prototypes and implementation
  241. */
  242. #if defined (__cplusplus)
  243. extern "C" {
  244. #endif
  245. /* ****************************
  246. * INLINE mode
  247. ******************************/
  248. /*!
  249. * @defgroup public Public API
  250. * Contains details on the public xxHash functions.
  251. * @{
  252. */
  253. #ifdef XXH_DOXYGEN
  254. /*!
  255. * @brief Gives access to internal state declaration, required for static allocation.
  256. *
  257. * Incompatible with dynamic linking, due to risks of ABI changes.
  258. *
  259. * Usage:
  260. * @code{.c}
  261. * #define XXH_STATIC_LINKING_ONLY
  262. * #include "xxhash.h"
  263. * @endcode
  264. */
  265. # define XXH_STATIC_LINKING_ONLY
  266. /* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
  267. /*!
  268. * @brief Gives access to internal definitions.
  269. *
  270. * Usage:
  271. * @code{.c}
  272. * #define XXH_STATIC_LINKING_ONLY
  273. * #define XXH_IMPLEMENTATION
  274. * #include "xxhash.h"
  275. * @endcode
  276. */
  277. # define XXH_IMPLEMENTATION
  278. /* Do not undef XXH_IMPLEMENTATION for Doxygen */
  279. /*!
  280. * @brief Exposes the implementation and marks all functions as `inline`.
  281. *
  282. * Use these build macros to inline xxhash into the target unit.
  283. * Inlining improves performance on small inputs, especially when the length is
  284. * expressed as a compile-time constant:
  285. *
  286. * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
  287. *
  288. * It also keeps xxHash symbols private to the unit, so they are not exported.
  289. *
  290. * Usage:
  291. * @code{.c}
  292. * #define XXH_INLINE_ALL
  293. * #include "xxhash.h"
  294. * @endcode
  295. * Do not compile and link xxhash.o as a separate object, as it is not useful.
  296. */
  297. # define XXH_INLINE_ALL
  298. # undef XXH_INLINE_ALL
  299. /*!
  300. * @brief Exposes the implementation without marking functions as inline.
  301. */
  302. # define XXH_PRIVATE_API
  303. # undef XXH_PRIVATE_API
  304. /*!
  305. * @brief Emulate a namespace by transparently prefixing all symbols.
  306. *
  307. * If you want to include _and expose_ xxHash functions from within your own
  308. * library, but also want to avoid symbol collisions with other libraries which
  309. * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
  310. * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
  311. * (therefore, avoid empty or numeric values).
  312. *
  313. * Note that no change is required within the calling program as long as it
  314. * includes `xxhash.h`: Regular symbol names will be automatically translated
  315. * by this header.
  316. */
  317. # define XXH_NAMESPACE /* YOUR NAME HERE */
  318. # undef XXH_NAMESPACE
  319. #endif
  320. #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
  321. && !defined(XXH_INLINE_ALL_31684351384)
  322. /* this section should be traversed only once */
  323. # define XXH_INLINE_ALL_31684351384
  324. /* give access to the advanced API, required to compile implementations */
  325. # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
  326. # define XXH_STATIC_LINKING_ONLY
  327. /* make all functions private */
  328. # undef XXH_PUBLIC_API
  329. # if defined(__GNUC__)
  330. # define XXH_PUBLIC_API static __inline __attribute__((__unused__))
  331. # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
  332. # define XXH_PUBLIC_API static inline
  333. # elif defined(_MSC_VER)
  334. # define XXH_PUBLIC_API static __inline
  335. # else
  336. /* note: this version may generate warnings for unused static functions */
  337. # define XXH_PUBLIC_API static
  338. # endif
  339. /*
  340. * This part deals with the special case where a unit wants to inline xxHash,
  341. * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
  342. * such as part of some previously included *.h header file.
  343. * Without further action, the new include would just be ignored,
  344. * and functions would effectively _not_ be inlined (silent failure).
  345. * The following macros solve this situation by prefixing all inlined names,
  346. * avoiding naming collision with previous inclusions.
  347. */
  348. /* Before that, we unconditionally #undef all symbols,
  349. * in case they were already defined with XXH_NAMESPACE.
  350. * They will then be redefined for XXH_INLINE_ALL
  351. */
  352. # undef XXH_versionNumber
  353. /* XXH32 */
  354. # undef XXH32
  355. # undef XXH32_createState
  356. # undef XXH32_freeState
  357. # undef XXH32_reset
  358. # undef XXH32_update
  359. # undef XXH32_digest
  360. # undef XXH32_copyState
  361. # undef XXH32_canonicalFromHash
  362. # undef XXH32_hashFromCanonical
  363. /* XXH64 */
  364. # undef XXH64
  365. # undef XXH64_createState
  366. # undef XXH64_freeState
  367. # undef XXH64_reset
  368. # undef XXH64_update
  369. # undef XXH64_digest
  370. # undef XXH64_copyState
  371. # undef XXH64_canonicalFromHash
  372. # undef XXH64_hashFromCanonical
  373. /* XXH3_64bits */
  374. # undef XXH3_64bits
  375. # undef XXH3_64bits_withSecret
  376. # undef XXH3_64bits_withSeed
  377. # undef XXH3_64bits_withSecretandSeed
  378. # undef XXH3_createState
  379. # undef XXH3_freeState
  380. # undef XXH3_copyState
  381. # undef XXH3_64bits_reset
  382. # undef XXH3_64bits_reset_withSeed
  383. # undef XXH3_64bits_reset_withSecret
  384. # undef XXH3_64bits_update
  385. # undef XXH3_64bits_digest
  386. # undef XXH3_generateSecret
  387. /* XXH3_128bits */
  388. # undef XXH128
  389. # undef XXH3_128bits
  390. # undef XXH3_128bits_withSeed
  391. # undef XXH3_128bits_withSecret
  392. # undef XXH3_128bits_reset
  393. # undef XXH3_128bits_reset_withSeed
  394. # undef XXH3_128bits_reset_withSecret
  395. # undef XXH3_128bits_reset_withSecretandSeed
  396. # undef XXH3_128bits_update
  397. # undef XXH3_128bits_digest
  398. # undef XXH128_isEqual
  399. # undef XXH128_cmp
  400. # undef XXH128_canonicalFromHash
  401. # undef XXH128_hashFromCanonical
  402. /* Finally, free the namespace itself */
  403. # undef XXH_NAMESPACE
  404. /* employ the namespace for XXH_INLINE_ALL */
  405. # define XXH_NAMESPACE XXH_INLINE_
  406. /*
  407. * Some identifiers (enums, type names) are not symbols,
  408. * but they must nonetheless be renamed to avoid redeclaration.
  409. * Alternative solution: do not redeclare them.
  410. * However, this requires some #ifdefs, and has a more dispersed impact.
  411. * Meanwhile, renaming can be achieved in a single place.
  412. */
  413. # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
  414. # define XXH_OK XXH_IPREF(XXH_OK)
  415. # define XXH_ERROR XXH_IPREF(XXH_ERROR)
  416. # define XXH_errorcode XXH_IPREF(XXH_errorcode)
  417. # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
  418. # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
  419. # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
  420. # define XXH32_state_s XXH_IPREF(XXH32_state_s)
  421. # define XXH32_state_t XXH_IPREF(XXH32_state_t)
  422. # define XXH64_state_s XXH_IPREF(XXH64_state_s)
  423. # define XXH64_state_t XXH_IPREF(XXH64_state_t)
  424. # define XXH3_state_s XXH_IPREF(XXH3_state_s)
  425. # define XXH3_state_t XXH_IPREF(XXH3_state_t)
  426. # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
  427. /* Ensure the header is parsed again, even if it was previously included */
  428. # undef XXHASH_H_5627135585666179
  429. # undef XXHASH_H_STATIC_13879238742
  430. #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
  431. /* ****************************************************************
  432. * Stable API
  433. *****************************************************************/
  434. #ifndef XXHASH_H_5627135585666179
  435. #define XXHASH_H_5627135585666179 1
  436. /*! @brief Marks a global symbol. */
  437. #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
  438. # if defined(_WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
  439. # ifdef XXH_EXPORT
  440. # define XXH_PUBLIC_API __declspec(dllexport)
  441. # elif XXH_IMPORT
  442. # define XXH_PUBLIC_API __declspec(dllimport)
  443. # endif
  444. # else
  445. # define XXH_PUBLIC_API /* do nothing */
  446. # endif
  447. #endif
  448. #ifdef XXH_NAMESPACE
  449. # define XXH_CAT(A,B) A##B
  450. # define XXH_NAME2(A,B) XXH_CAT(A,B)
  451. # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
  452. /* XXH32 */
  453. # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
  454. # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
  455. # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
  456. # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
  457. # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
  458. # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
  459. # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
  460. # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
  461. # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
  462. /* XXH64 */
  463. # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
  464. # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
  465. # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
  466. # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
  467. # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
  468. # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
  469. # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
  470. # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
  471. # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
  472. /* XXH3_64bits */
  473. # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
  474. # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
  475. # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
  476. # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
  477. # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
  478. # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
  479. # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
  480. # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
  481. # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
  482. # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
  483. # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
  484. # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
  485. # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
  486. # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
  487. # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
  488. /* XXH3_128bits */
  489. # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
  490. # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
  491. # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
  492. # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
  493. # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
  494. # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
  495. # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
  496. # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
  497. # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
  498. # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
  499. # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
  500. # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
  501. # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
  502. # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
  503. # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
  504. #endif
  505. /* *************************************
  506. * Compiler specifics
  507. ***************************************/
  508. /* specific declaration modes for Windows */
  509. #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
  510. # if defined(_WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
  511. # ifdef XXH_EXPORT
  512. # define XXH_PUBLIC_API __declspec(dllexport)
  513. # elif XXH_IMPORT
  514. # define XXH_PUBLIC_API __declspec(dllimport)
  515. # endif
  516. # else
  517. # define XXH_PUBLIC_API /* do nothing */
  518. # endif
  519. #endif
  520. #if defined (__GNUC__)
  521. # define XXH_CONSTF __attribute__((__const__))
  522. # define XXH_PUREF __attribute__((__pure__))
  523. # define XXH_MALLOCF __attribute__((__malloc__))
  524. #else
  525. # define XXH_CONSTF /* disable */
  526. # define XXH_PUREF
  527. # define XXH_MALLOCF
  528. #endif
  529. /* *************************************
  530. * Version
  531. ***************************************/
  532. #define XXH_VERSION_MAJOR 0
  533. #define XXH_VERSION_MINOR 8
  534. #define XXH_VERSION_RELEASE 3
  535. /*! @brief Version number, encoded as two digits each */
  536. #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
  537. /*!
  538. * @brief Obtains the xxHash version.
  539. *
  540. * This is mostly useful when xxHash is compiled as a shared library,
  541. * since the returned value comes from the library, as opposed to header file.
  542. *
  543. * @return @ref XXH_VERSION_NUMBER of the invoked library.
  544. */
  545. XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
  546. /* ****************************
  547. * Common basic types
  548. ******************************/
  549. #include <stddef.h> /* size_t */
  550. /*!
  551. * @brief Exit code for the streaming API.
  552. */
  553. typedef enum {
  554. XXH_OK = 0, /*!< OK */
  555. XXH_ERROR /*!< Error */
  556. } XXH_errorcode;
  557. /*-**********************************************************************
  558. * 32-bit hash
  559. ************************************************************************/
  560. #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
  561. /*!
  562. * @brief An unsigned 32-bit integer.
  563. *
  564. * Not necessarily defined to `uint32_t` but functionally equivalent.
  565. */
  566. typedef uint32_t XXH32_hash_t;
  567. #elif !defined (__VMS) \
  568. && (defined (__cplusplus) \
  569. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  570. # ifdef _AIX
  571. # include <inttypes.h>
  572. # else
  573. # include <stdint.h>
  574. # endif
  575. typedef uint32_t XXH32_hash_t;
  576. #else
  577. # include <limits.h>
  578. # if UINT_MAX == 0xFFFFFFFFUL
  579. typedef unsigned int XXH32_hash_t;
  580. # elif ULONG_MAX == 0xFFFFFFFFUL
  581. typedef unsigned long XXH32_hash_t;
  582. # else
  583. # error "unsupported platform: need a 32-bit type"
  584. # endif
  585. #endif
  586. /*!
  587. * @}
  588. *
  589. * @defgroup XXH32_family XXH32 family
  590. * @ingroup public
  591. * Contains functions used in the classic 32-bit xxHash algorithm.
  592. *
  593. * @note
  594. * XXH32 is useful for older platforms, with no or poor 64-bit performance.
  595. * Note that the @ref XXH3_family provides competitive speed for both 32-bit
  596. * and 64-bit systems, and offers true 64/128 bit hash results.
  597. *
  598. * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
  599. * @see @ref XXH32_impl for implementation details
  600. * @{
  601. */
  602. /*!
  603. * @brief Calculates the 32-bit hash of @p input using xxHash32.
  604. *
  605. * @param input The block of data to be hashed, at least @p length bytes in size.
  606. * @param length The length of @p input, in bytes.
  607. * @param seed The 32-bit seed to alter the hash's output predictably.
  608. *
  609. * @pre
  610. * The memory between @p input and @p input + @p length must be valid,
  611. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  612. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  613. *
  614. * @return The calculated 32-bit xxHash32 value.
  615. *
  616. * @see @ref single_shot_example "Single Shot Example" for an example.
  617. */
  618. XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
  619. #ifndef XXH_NO_STREAM
  620. /*!
  621. * @typedef struct XXH32_state_s XXH32_state_t
  622. * @brief The opaque state struct for the XXH32 streaming API.
  623. *
  624. * @see XXH32_state_s for details.
  625. * @see @ref streaming_example "Streaming Example"
  626. */
  627. typedef struct XXH32_state_s XXH32_state_t;
  628. /*!
  629. * @brief Allocates an @ref XXH32_state_t.
  630. *
  631. * @return An allocated pointer of @ref XXH32_state_t on success.
  632. * @return `NULL` on failure.
  633. *
  634. * @note Must be freed with XXH32_freeState().
  635. *
  636. * @see @ref streaming_example "Streaming Example"
  637. */
  638. XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
  639. /*!
  640. * @brief Frees an @ref XXH32_state_t.
  641. *
  642. * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
  643. *
  644. * @return @ref XXH_OK.
  645. *
  646. * @note @p statePtr must be allocated with XXH32_createState().
  647. *
  648. * @see @ref streaming_example "Streaming Example"
  649. *
  650. */
  651. XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
  652. /*!
  653. * @brief Copies one @ref XXH32_state_t to another.
  654. *
  655. * @param dst_state The state to copy to.
  656. * @param src_state The state to copy from.
  657. * @pre
  658. * @p dst_state and @p src_state must not be `NULL` and must not overlap.
  659. */
  660. XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
  661. /*!
  662. * @brief Resets an @ref XXH32_state_t to begin a new hash.
  663. *
  664. * @param statePtr The state struct to reset.
  665. * @param seed The 32-bit seed to alter the hash result predictably.
  666. *
  667. * @pre
  668. * @p statePtr must not be `NULL`.
  669. *
  670. * @return @ref XXH_OK on success.
  671. * @return @ref XXH_ERROR on failure.
  672. *
  673. * @note This function resets and seeds a state. Call it before @ref XXH32_update().
  674. *
  675. * @see @ref streaming_example "Streaming Example"
  676. */
  677. XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
  678. /*!
  679. * @brief Consumes a block of @p input to an @ref XXH32_state_t.
  680. *
  681. * @param statePtr The state struct to update.
  682. * @param input The block of data to be hashed, at least @p length bytes in size.
  683. * @param length The length of @p input, in bytes.
  684. *
  685. * @pre
  686. * @p statePtr must not be `NULL`.
  687. * @pre
  688. * The memory between @p input and @p input + @p length must be valid,
  689. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  690. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  691. *
  692. * @return @ref XXH_OK on success.
  693. * @return @ref XXH_ERROR on failure.
  694. *
  695. * @note Call this to incrementally consume blocks of data.
  696. *
  697. * @see @ref streaming_example "Streaming Example"
  698. */
  699. XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
  700. /*!
  701. * @brief Returns the calculated hash value from an @ref XXH32_state_t.
  702. *
  703. * @param statePtr The state struct to calculate the hash from.
  704. *
  705. * @pre
  706. * @p statePtr must not be `NULL`.
  707. *
  708. * @return The calculated 32-bit xxHash32 value from that state.
  709. *
  710. * @note
  711. * Calling XXH32_digest() will not affect @p statePtr, so you can update,
  712. * digest, and update again.
  713. *
  714. * @see @ref streaming_example "Streaming Example"
  715. */
  716. XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
  717. #endif /* !XXH_NO_STREAM */
  718. /******* Canonical representation *******/
  719. /*!
  720. * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
  721. */
  722. typedef struct {
  723. unsigned char digest[4]; /*!< Hash bytes, big endian */
  724. } XXH32_canonical_t;
  725. /*!
  726. * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
  727. *
  728. * @param dst The @ref XXH32_canonical_t pointer to be stored to.
  729. * @param hash The @ref XXH32_hash_t to be converted.
  730. *
  731. * @pre
  732. * @p dst must not be `NULL`.
  733. *
  734. * @see @ref canonical_representation_example "Canonical Representation Example"
  735. */
  736. XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
  737. /*!
  738. * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
  739. *
  740. * @param src The @ref XXH32_canonical_t to convert.
  741. *
  742. * @pre
  743. * @p src must not be `NULL`.
  744. *
  745. * @return The converted hash.
  746. *
  747. * @see @ref canonical_representation_example "Canonical Representation Example"
  748. */
  749. XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
  750. /*! @cond Doxygen ignores this part */
  751. #ifdef __has_attribute
  752. # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
  753. #else
  754. # define XXH_HAS_ATTRIBUTE(x) 0
  755. #endif
  756. /*! @endcond */
  757. /*! @cond Doxygen ignores this part */
  758. /*
  759. * C23 __STDC_VERSION__ number hasn't been specified yet. For now
  760. * leave as `201711L` (C17 + 1).
  761. * TODO: Update to correct value when its been specified.
  762. */
  763. #define XXH_C23_VN 201711L
  764. /*! @endcond */
  765. /*! @cond Doxygen ignores this part */
  766. /* C-language Attributes are added in C23. */
  767. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
  768. # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
  769. #else
  770. # define XXH_HAS_C_ATTRIBUTE(x) 0
  771. #endif
  772. /*! @endcond */
  773. /*! @cond Doxygen ignores this part */
  774. #if defined(__cplusplus) && defined(__has_cpp_attribute)
  775. # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
  776. #else
  777. # define XXH_HAS_CPP_ATTRIBUTE(x) 0
  778. #endif
  779. /*! @endcond */
  780. /*! @cond Doxygen ignores this part */
  781. /*
  782. * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
  783. * introduced in CPP17 and C23.
  784. * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
  785. * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
  786. */
  787. #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
  788. # define XXH_FALLTHROUGH [[fallthrough]]
  789. #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
  790. # define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
  791. #else
  792. # define XXH_FALLTHROUGH /* fallthrough */
  793. #endif
  794. /*! @endcond */
  795. /*! @cond Doxygen ignores this part */
  796. /*
  797. * Define XXH_NOESCAPE for annotated pointers in public API.
  798. * https://clang.llvm.org/docs/AttributeReference.html#noescape
  799. * As of writing this, only supported by clang.
  800. */
  801. #if XXH_HAS_ATTRIBUTE(noescape)
  802. # define XXH_NOESCAPE __attribute__((__noescape__))
  803. #else
  804. # define XXH_NOESCAPE
  805. #endif
  806. /*! @endcond */
  807. /*!
  808. * @}
  809. * @ingroup public
  810. * @{
  811. */
  812. #ifndef XXH_NO_LONG_LONG
  813. /*-**********************************************************************
  814. * 64-bit hash
  815. ************************************************************************/
  816. #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
  817. /*!
  818. * @brief An unsigned 64-bit integer.
  819. *
  820. * Not necessarily defined to `uint64_t` but functionally equivalent.
  821. */
  822. typedef uint64_t XXH64_hash_t;
  823. #elif !defined (__VMS) \
  824. && (defined (__cplusplus) \
  825. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  826. # ifdef _AIX
  827. # include <inttypes.h>
  828. # else
  829. # include <stdint.h>
  830. # endif
  831. typedef uint64_t XXH64_hash_t;
  832. #else
  833. # include <limits.h>
  834. # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
  835. /* LP64 ABI says uint64_t is unsigned long */
  836. typedef unsigned long XXH64_hash_t;
  837. # else
  838. /* the following type must have a width of 64-bit */
  839. typedef unsigned long long XXH64_hash_t;
  840. # endif
  841. #endif
  842. /*!
  843. * @}
  844. *
  845. * @defgroup XXH64_family XXH64 family
  846. * @ingroup public
  847. * @{
  848. * Contains functions used in the classic 64-bit xxHash algorithm.
  849. *
  850. * @note
  851. * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
  852. * and offers true 64/128 bit hash results.
  853. * It provides better speed for systems with vector processing capabilities.
  854. */
  855. /*!
  856. * @brief Calculates the 64-bit hash of @p input using xxHash64.
  857. *
  858. * @param input The block of data to be hashed, at least @p length bytes in size.
  859. * @param length The length of @p input, in bytes.
  860. * @param seed The 64-bit seed to alter the hash's output predictably.
  861. *
  862. * @pre
  863. * The memory between @p input and @p input + @p length must be valid,
  864. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  865. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  866. *
  867. * @return The calculated 64-bit xxHash64 value.
  868. *
  869. * @see @ref single_shot_example "Single Shot Example" for an example.
  870. */
  871. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
  872. /******* Streaming *******/
  873. #ifndef XXH_NO_STREAM
  874. /*!
  875. * @brief The opaque state struct for the XXH64 streaming API.
  876. *
  877. * @see XXH64_state_s for details.
  878. * @see @ref streaming_example "Streaming Example"
  879. */
  880. typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
  881. /*!
  882. * @brief Allocates an @ref XXH64_state_t.
  883. *
  884. * @return An allocated pointer of @ref XXH64_state_t on success.
  885. * @return `NULL` on failure.
  886. *
  887. * @note Must be freed with XXH64_freeState().
  888. *
  889. * @see @ref streaming_example "Streaming Example"
  890. */
  891. XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
  892. /*!
  893. * @brief Frees an @ref XXH64_state_t.
  894. *
  895. * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
  896. *
  897. * @return @ref XXH_OK.
  898. *
  899. * @note @p statePtr must be allocated with XXH64_createState().
  900. *
  901. * @see @ref streaming_example "Streaming Example"
  902. */
  903. XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
  904. /*!
  905. * @brief Copies one @ref XXH64_state_t to another.
  906. *
  907. * @param dst_state The state to copy to.
  908. * @param src_state The state to copy from.
  909. * @pre
  910. * @p dst_state and @p src_state must not be `NULL` and must not overlap.
  911. */
  912. XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
  913. /*!
  914. * @brief Resets an @ref XXH64_state_t to begin a new hash.
  915. *
  916. * @param statePtr The state struct to reset.
  917. * @param seed The 64-bit seed to alter the hash result predictably.
  918. *
  919. * @pre
  920. * @p statePtr must not be `NULL`.
  921. *
  922. * @return @ref XXH_OK on success.
  923. * @return @ref XXH_ERROR on failure.
  924. *
  925. * @note This function resets and seeds a state. Call it before @ref XXH64_update().
  926. *
  927. * @see @ref streaming_example "Streaming Example"
  928. */
  929. XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
  930. /*!
  931. * @brief Consumes a block of @p input to an @ref XXH64_state_t.
  932. *
  933. * @param statePtr The state struct to update.
  934. * @param input The block of data to be hashed, at least @p length bytes in size.
  935. * @param length The length of @p input, in bytes.
  936. *
  937. * @pre
  938. * @p statePtr must not be `NULL`.
  939. * @pre
  940. * The memory between @p input and @p input + @p length must be valid,
  941. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  942. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  943. *
  944. * @return @ref XXH_OK on success.
  945. * @return @ref XXH_ERROR on failure.
  946. *
  947. * @note Call this to incrementally consume blocks of data.
  948. *
  949. * @see @ref streaming_example "Streaming Example"
  950. */
  951. XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
  952. /*!
  953. * @brief Returns the calculated hash value from an @ref XXH64_state_t.
  954. *
  955. * @param statePtr The state struct to calculate the hash from.
  956. *
  957. * @pre
  958. * @p statePtr must not be `NULL`.
  959. *
  960. * @return The calculated 64-bit xxHash64 value from that state.
  961. *
  962. * @note
  963. * Calling XXH64_digest() will not affect @p statePtr, so you can update,
  964. * digest, and update again.
  965. *
  966. * @see @ref streaming_example "Streaming Example"
  967. */
  968. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
  969. #endif /* !XXH_NO_STREAM */
  970. /******* Canonical representation *******/
  971. /*!
  972. * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
  973. */
  974. typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
  975. /*!
  976. * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
  977. *
  978. * @param dst The @ref XXH64_canonical_t pointer to be stored to.
  979. * @param hash The @ref XXH64_hash_t to be converted.
  980. *
  981. * @pre
  982. * @p dst must not be `NULL`.
  983. *
  984. * @see @ref canonical_representation_example "Canonical Representation Example"
  985. */
  986. XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
  987. /*!
  988. * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
  989. *
  990. * @param src The @ref XXH64_canonical_t to convert.
  991. *
  992. * @pre
  993. * @p src must not be `NULL`.
  994. *
  995. * @return The converted hash.
  996. *
  997. * @see @ref canonical_representation_example "Canonical Representation Example"
  998. */
  999. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
  1000. #ifndef XXH_NO_XXH3
  1001. /*!
  1002. * @}
  1003. * ************************************************************************
  1004. * @defgroup XXH3_family XXH3 family
  1005. * @ingroup public
  1006. * @{
  1007. *
  1008. * XXH3 is a more recent hash algorithm featuring:
  1009. * - Improved speed for both small and large inputs
  1010. * - True 64-bit and 128-bit outputs
  1011. * - SIMD acceleration
  1012. * - Improved 32-bit viability
  1013. *
  1014. * Speed analysis methodology is explained here:
  1015. *
  1016. * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
  1017. *
  1018. * Compared to XXH64, expect XXH3 to run approximately
  1019. * ~2x faster on large inputs and >3x faster on small ones,
  1020. * exact differences vary depending on platform.
  1021. *
  1022. * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
  1023. * but does not require it.
  1024. * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
  1025. * at competitive speeds, even without vector support. Further details are
  1026. * explained in the implementation.
  1027. *
  1028. * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
  1029. * implementations for many common platforms:
  1030. * - AVX512
  1031. * - AVX2
  1032. * - SSE2
  1033. * - ARM NEON
  1034. * - WebAssembly SIMD128
  1035. * - POWER8 VSX
  1036. * - s390x ZVector
  1037. * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
  1038. * selects the best version according to predefined macros. For the x86 family, an
  1039. * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
  1040. *
  1041. * XXH3 implementation is portable:
  1042. * it has a generic C90 formulation that can be compiled on any platform,
  1043. * all implementations generate exactly the same hash value on all platforms.
  1044. * Starting from v0.8.0, it's also labelled "stable", meaning that
  1045. * any future version will also generate the same hash value.
  1046. *
  1047. * XXH3 offers 2 variants, _64bits and _128bits.
  1048. *
  1049. * When only 64 bits are needed, prefer invoking the _64bits variant, as it
  1050. * reduces the amount of mixing, resulting in faster speed on small inputs.
  1051. * It's also generally simpler to manipulate a scalar return type than a struct.
  1052. *
  1053. * The API supports one-shot hashing, streaming mode, and custom secrets.
  1054. */
  1055. /*-**********************************************************************
  1056. * XXH3 64-bit variant
  1057. ************************************************************************/
  1058. /*!
  1059. * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input.
  1060. *
  1061. * @param input The block of data to be hashed, at least @p length bytes in size.
  1062. * @param length The length of @p input, in bytes.
  1063. *
  1064. * @pre
  1065. * The memory between @p input and @p input + @p length must be valid,
  1066. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  1067. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1068. *
  1069. * @return The calculated 64-bit XXH3 hash value.
  1070. *
  1071. * @note
  1072. * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however
  1073. * it may have slightly better performance due to constant propagation of the
  1074. * defaults.
  1075. *
  1076. * @see
  1077. * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
  1078. * @see @ref single_shot_example "Single Shot Example" for an example.
  1079. */
  1080. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
  1081. /*!
  1082. * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input.
  1083. *
  1084. * @param input The block of data to be hashed, at least @p length bytes in size.
  1085. * @param length The length of @p input, in bytes.
  1086. * @param seed The 64-bit seed to alter the hash result predictably.
  1087. *
  1088. * @pre
  1089. * The memory between @p input and @p input + @p length must be valid,
  1090. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  1091. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1092. *
  1093. * @return The calculated 64-bit XXH3 hash value.
  1094. *
  1095. * @note
  1096. * seed == 0 produces the same results as @ref XXH3_64bits().
  1097. *
  1098. * This variant generates a custom secret on the fly based on default secret
  1099. * altered using the @p seed value.
  1100. *
  1101. * While this operation is decently fast, note that it's not completely free.
  1102. *
  1103. * @see @ref single_shot_example "Single Shot Example" for an example.
  1104. */
  1105. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
  1106. /*!
  1107. * The bare minimum size for a custom secret.
  1108. *
  1109. * @see
  1110. * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
  1111. * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
  1112. */
  1113. #define XXH3_SECRET_SIZE_MIN 136
  1114. /*!
  1115. * @brief Calculates 64-bit variant of XXH3 with a custom "secret".
  1116. *
  1117. * @param data The block of data to be hashed, at least @p len bytes in size.
  1118. * @param len The length of @p data, in bytes.
  1119. * @param secret The secret data.
  1120. * @param secretSize The length of @p secret, in bytes.
  1121. *
  1122. * @return The calculated 64-bit XXH3 hash value.
  1123. *
  1124. * @pre
  1125. * The memory between @p data and @p data + @p len must be valid,
  1126. * readable, contiguous memory. However, if @p length is `0`, @p data may be
  1127. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1128. *
  1129. * It's possible to provide any blob of bytes as a "secret" to generate the hash.
  1130. * This makes it more difficult for an external actor to prepare an intentional collision.
  1131. * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
  1132. * However, the quality of the secret impacts the dispersion of the hash algorithm.
  1133. * Therefore, the secret _must_ look like a bunch of random bytes.
  1134. * Avoid "trivial" or structured data such as repeated sequences or a text document.
  1135. * Whenever in doubt about the "randomness" of the blob of bytes,
  1136. * consider employing @ref XXH3_generateSecret() instead (see below).
  1137. * It will generate a proper high entropy secret derived from the blob of bytes.
  1138. * Another advantage of using XXH3_generateSecret() is that
  1139. * it guarantees that all bits within the initial blob of bytes
  1140. * will impact every bit of the output.
  1141. * This is not necessarily the case when using the blob of bytes directly
  1142. * because, when hashing _small_ inputs, only a portion of the secret is employed.
  1143. *
  1144. * @see @ref single_shot_example "Single Shot Example" for an example.
  1145. */
  1146. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
  1147. /******* Streaming *******/
  1148. #ifndef XXH_NO_STREAM
  1149. /*
  1150. * Streaming requires state maintenance.
  1151. * This operation costs memory and CPU.
  1152. * As a consequence, streaming is slower than one-shot hashing.
  1153. * For better performance, prefer one-shot functions whenever applicable.
  1154. */
  1155. /*!
  1156. * @brief The opaque state struct for the XXH3 streaming API.
  1157. *
  1158. * @see XXH3_state_s for details.
  1159. * @see @ref streaming_example "Streaming Example"
  1160. */
  1161. typedef struct XXH3_state_s XXH3_state_t;
  1162. XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
  1163. XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
  1164. /*!
  1165. * @brief Copies one @ref XXH3_state_t to another.
  1166. *
  1167. * @param dst_state The state to copy to.
  1168. * @param src_state The state to copy from.
  1169. * @pre
  1170. * @p dst_state and @p src_state must not be `NULL` and must not overlap.
  1171. */
  1172. XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
  1173. /*!
  1174. * @brief Resets an @ref XXH3_state_t to begin a new hash.
  1175. *
  1176. * @param statePtr The state struct to reset.
  1177. *
  1178. * @pre
  1179. * @p statePtr must not be `NULL`.
  1180. *
  1181. * @return @ref XXH_OK on success.
  1182. * @return @ref XXH_ERROR on failure.
  1183. *
  1184. * @note
  1185. * - This function resets `statePtr` and generate a secret with default parameters.
  1186. * - Call this function before @ref XXH3_64bits_update().
  1187. * - Digest will be equivalent to `XXH3_64bits()`.
  1188. *
  1189. * @see @ref streaming_example "Streaming Example"
  1190. *
  1191. */
  1192. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
  1193. /*!
  1194. * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
  1195. *
  1196. * @param statePtr The state struct to reset.
  1197. * @param seed The 64-bit seed to alter the hash result predictably.
  1198. *
  1199. * @pre
  1200. * @p statePtr must not be `NULL`.
  1201. *
  1202. * @return @ref XXH_OK on success.
  1203. * @return @ref XXH_ERROR on failure.
  1204. *
  1205. * @note
  1206. * - This function resets `statePtr` and generate a secret from `seed`.
  1207. * - Call this function before @ref XXH3_64bits_update().
  1208. * - Digest will be equivalent to `XXH3_64bits_withSeed()`.
  1209. *
  1210. * @see @ref streaming_example "Streaming Example"
  1211. *
  1212. */
  1213. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
  1214. /*!
  1215. * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
  1216. *
  1217. * @param statePtr The state struct to reset.
  1218. * @param secret The secret data.
  1219. * @param secretSize The length of @p secret, in bytes.
  1220. *
  1221. * @pre
  1222. * @p statePtr must not be `NULL`.
  1223. *
  1224. * @return @ref XXH_OK on success.
  1225. * @return @ref XXH_ERROR on failure.
  1226. *
  1227. * @note
  1228. * `secret` is referenced, it _must outlive_ the hash streaming session.
  1229. *
  1230. * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
  1231. * and the quality of produced hash values depends on secret's entropy
  1232. * (secret's content should look like a bunch of random bytes).
  1233. * When in doubt about the randomness of a candidate `secret`,
  1234. * consider employing `XXH3_generateSecret()` instead (see below).
  1235. *
  1236. * @see @ref streaming_example "Streaming Example"
  1237. */
  1238. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
  1239. /*!
  1240. * @brief Consumes a block of @p input to an @ref XXH3_state_t.
  1241. *
  1242. * @param statePtr The state struct to update.
  1243. * @param input The block of data to be hashed, at least @p length bytes in size.
  1244. * @param length The length of @p input, in bytes.
  1245. *
  1246. * @pre
  1247. * @p statePtr must not be `NULL`.
  1248. * @pre
  1249. * The memory between @p input and @p input + @p length must be valid,
  1250. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  1251. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1252. *
  1253. * @return @ref XXH_OK on success.
  1254. * @return @ref XXH_ERROR on failure.
  1255. *
  1256. * @note Call this to incrementally consume blocks of data.
  1257. *
  1258. * @see @ref streaming_example "Streaming Example"
  1259. */
  1260. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
  1261. /*!
  1262. * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
  1263. *
  1264. * @param statePtr The state struct to calculate the hash from.
  1265. *
  1266. * @pre
  1267. * @p statePtr must not be `NULL`.
  1268. *
  1269. * @return The calculated XXH3 64-bit hash value from that state.
  1270. *
  1271. * @note
  1272. * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
  1273. * digest, and update again.
  1274. *
  1275. * @see @ref streaming_example "Streaming Example"
  1276. */
  1277. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
  1278. #endif /* !XXH_NO_STREAM */
  1279. /* note : canonical representation of XXH3 is the same as XXH64
  1280. * since they both produce XXH64_hash_t values */
  1281. /*-**********************************************************************
  1282. * XXH3 128-bit variant
  1283. ************************************************************************/
  1284. /*!
  1285. * @brief The return value from 128-bit hashes.
  1286. *
  1287. * Stored in little endian order, although the fields themselves are in native
  1288. * endianness.
  1289. */
  1290. typedef struct {
  1291. XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
  1292. XXH64_hash_t high64; /*!< `value >> 64` */
  1293. } XXH128_hash_t;
  1294. /*!
  1295. * @brief Calculates 128-bit unseeded variant of XXH3 of @p data.
  1296. *
  1297. * @param data The block of data to be hashed, at least @p length bytes in size.
  1298. * @param len The length of @p data, in bytes.
  1299. *
  1300. * @return The calculated 128-bit variant of XXH3 value.
  1301. *
  1302. * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
  1303. * for shorter inputs.
  1304. *
  1305. * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however
  1306. * it may have slightly better performance due to constant propagation of the
  1307. * defaults.
  1308. *
  1309. * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
  1310. * @see @ref single_shot_example "Single Shot Example" for an example.
  1311. */
  1312. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
  1313. /*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
  1314. *
  1315. * @param data The block of data to be hashed, at least @p length bytes in size.
  1316. * @param len The length of @p data, in bytes.
  1317. * @param seed The 64-bit seed to alter the hash result predictably.
  1318. *
  1319. * @return The calculated 128-bit variant of XXH3 value.
  1320. *
  1321. * @note
  1322. * seed == 0 produces the same results as @ref XXH3_64bits().
  1323. *
  1324. * This variant generates a custom secret on the fly based on default secret
  1325. * altered using the @p seed value.
  1326. *
  1327. * While this operation is decently fast, note that it's not completely free.
  1328. *
  1329. * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants
  1330. * @see @ref single_shot_example "Single Shot Example" for an example.
  1331. */
  1332. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
  1333. /*!
  1334. * @brief Calculates 128-bit variant of XXH3 with a custom "secret".
  1335. *
  1336. * @param data The block of data to be hashed, at least @p len bytes in size.
  1337. * @param len The length of @p data, in bytes.
  1338. * @param secret The secret data.
  1339. * @param secretSize The length of @p secret, in bytes.
  1340. *
  1341. * @return The calculated 128-bit variant of XXH3 value.
  1342. *
  1343. * It's possible to provide any blob of bytes as a "secret" to generate the hash.
  1344. * This makes it more difficult for an external actor to prepare an intentional collision.
  1345. * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
  1346. * However, the quality of the secret impacts the dispersion of the hash algorithm.
  1347. * Therefore, the secret _must_ look like a bunch of random bytes.
  1348. * Avoid "trivial" or structured data such as repeated sequences or a text document.
  1349. * Whenever in doubt about the "randomness" of the blob of bytes,
  1350. * consider employing @ref XXH3_generateSecret() instead (see below).
  1351. * It will generate a proper high entropy secret derived from the blob of bytes.
  1352. * Another advantage of using XXH3_generateSecret() is that
  1353. * it guarantees that all bits within the initial blob of bytes
  1354. * will impact every bit of the output.
  1355. * This is not necessarily the case when using the blob of bytes directly
  1356. * because, when hashing _small_ inputs, only a portion of the secret is employed.
  1357. *
  1358. * @see @ref single_shot_example "Single Shot Example" for an example.
  1359. */
  1360. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
  1361. /******* Streaming *******/
  1362. #ifndef XXH_NO_STREAM
  1363. /*
  1364. * Streaming requires state maintenance.
  1365. * This operation costs memory and CPU.
  1366. * As a consequence, streaming is slower than one-shot hashing.
  1367. * For better performance, prefer one-shot functions whenever applicable.
  1368. *
  1369. * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
  1370. * Use already declared XXH3_createState() and XXH3_freeState().
  1371. *
  1372. * All reset and streaming functions have same meaning as their 64-bit counterpart.
  1373. */
  1374. /*!
  1375. * @brief Resets an @ref XXH3_state_t to begin a new hash.
  1376. *
  1377. * @param statePtr The state struct to reset.
  1378. *
  1379. * @pre
  1380. * @p statePtr must not be `NULL`.
  1381. *
  1382. * @return @ref XXH_OK on success.
  1383. * @return @ref XXH_ERROR on failure.
  1384. *
  1385. * @note
  1386. * - This function resets `statePtr` and generate a secret with default parameters.
  1387. * - Call it before @ref XXH3_128bits_update().
  1388. * - Digest will be equivalent to `XXH3_128bits()`.
  1389. *
  1390. * @see @ref streaming_example "Streaming Example"
  1391. */
  1392. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
  1393. /*!
  1394. * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
  1395. *
  1396. * @param statePtr The state struct to reset.
  1397. * @param seed The 64-bit seed to alter the hash result predictably.
  1398. *
  1399. * @pre
  1400. * @p statePtr must not be `NULL`.
  1401. *
  1402. * @return @ref XXH_OK on success.
  1403. * @return @ref XXH_ERROR on failure.
  1404. *
  1405. * @note
  1406. * - This function resets `statePtr` and generate a secret from `seed`.
  1407. * - Call it before @ref XXH3_128bits_update().
  1408. * - Digest will be equivalent to `XXH3_128bits_withSeed()`.
  1409. *
  1410. * @see @ref streaming_example "Streaming Example"
  1411. */
  1412. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
  1413. /*!
  1414. * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
  1415. *
  1416. * @param statePtr The state struct to reset.
  1417. * @param secret The secret data.
  1418. * @param secretSize The length of @p secret, in bytes.
  1419. *
  1420. * @pre
  1421. * @p statePtr must not be `NULL`.
  1422. *
  1423. * @return @ref XXH_OK on success.
  1424. * @return @ref XXH_ERROR on failure.
  1425. *
  1426. * `secret` is referenced, it _must outlive_ the hash streaming session.
  1427. * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
  1428. * and the quality of produced hash values depends on secret's entropy
  1429. * (secret's content should look like a bunch of random bytes).
  1430. * When in doubt about the randomness of a candidate `secret`,
  1431. * consider employing `XXH3_generateSecret()` instead (see below).
  1432. *
  1433. * @see @ref streaming_example "Streaming Example"
  1434. */
  1435. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
  1436. /*!
  1437. * @brief Consumes a block of @p input to an @ref XXH3_state_t.
  1438. *
  1439. * Call this to incrementally consume blocks of data.
  1440. *
  1441. * @param statePtr The state struct to update.
  1442. * @param input The block of data to be hashed, at least @p length bytes in size.
  1443. * @param length The length of @p input, in bytes.
  1444. *
  1445. * @pre
  1446. * @p statePtr must not be `NULL`.
  1447. *
  1448. * @return @ref XXH_OK on success.
  1449. * @return @ref XXH_ERROR on failure.
  1450. *
  1451. * @note
  1452. * The memory between @p input and @p input + @p length must be valid,
  1453. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  1454. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1455. *
  1456. */
  1457. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
  1458. /*!
  1459. * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
  1460. *
  1461. * @param statePtr The state struct to calculate the hash from.
  1462. *
  1463. * @pre
  1464. * @p statePtr must not be `NULL`.
  1465. *
  1466. * @return The calculated XXH3 128-bit hash value from that state.
  1467. *
  1468. * @note
  1469. * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
  1470. * digest, and update again.
  1471. *
  1472. */
  1473. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
  1474. #endif /* !XXH_NO_STREAM */
  1475. /* Following helper functions make it possible to compare XXH128_hast_t values.
  1476. * Since XXH128_hash_t is a structure, this capability is not offered by the language.
  1477. * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
  1478. /*!
  1479. * @brief Check equality of two XXH128_hash_t values
  1480. *
  1481. * @param h1 The 128-bit hash value.
  1482. * @param h2 Another 128-bit hash value.
  1483. *
  1484. * @return `1` if `h1` and `h2` are equal.
  1485. * @return `0` if they are not.
  1486. */
  1487. XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
  1488. /*!
  1489. * @brief Compares two @ref XXH128_hash_t
  1490. *
  1491. * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
  1492. *
  1493. * @param h128_1 Left-hand side value
  1494. * @param h128_2 Right-hand side value
  1495. *
  1496. * @return >0 if @p h128_1 > @p h128_2
  1497. * @return =0 if @p h128_1 == @p h128_2
  1498. * @return <0 if @p h128_1 < @p h128_2
  1499. */
  1500. XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
  1501. /******* Canonical representation *******/
  1502. typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
  1503. /*!
  1504. * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
  1505. *
  1506. * @param dst The @ref XXH128_canonical_t pointer to be stored to.
  1507. * @param hash The @ref XXH128_hash_t to be converted.
  1508. *
  1509. * @pre
  1510. * @p dst must not be `NULL`.
  1511. * @see @ref canonical_representation_example "Canonical Representation Example"
  1512. */
  1513. XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
  1514. /*!
  1515. * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
  1516. *
  1517. * @param src The @ref XXH128_canonical_t to convert.
  1518. *
  1519. * @pre
  1520. * @p src must not be `NULL`.
  1521. *
  1522. * @return The converted hash.
  1523. * @see @ref canonical_representation_example "Canonical Representation Example"
  1524. */
  1525. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
  1526. #endif /* !XXH_NO_XXH3 */
  1527. #endif /* XXH_NO_LONG_LONG */
  1528. /*!
  1529. * @}
  1530. */
  1531. #endif /* XXHASH_H_5627135585666179 */
  1532. #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
  1533. #define XXHASH_H_STATIC_13879238742
  1534. /* ****************************************************************************
  1535. * This section contains declarations which are not guaranteed to remain stable.
  1536. * They may change in future versions, becoming incompatible with a different
  1537. * version of the library.
  1538. * These declarations should only be used with static linking.
  1539. * Never use them in association with dynamic linking!
  1540. ***************************************************************************** */
  1541. /*
  1542. * These definitions are only present to allow static allocation
  1543. * of XXH states, on stack or in a struct, for example.
  1544. * Never **ever** access their members directly.
  1545. */
  1546. /*!
  1547. * @internal
  1548. * @brief Structure for XXH32 streaming API.
  1549. *
  1550. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  1551. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
  1552. * an opaque type. This allows fields to safely be changed.
  1553. *
  1554. * Typedef'd to @ref XXH32_state_t.
  1555. * Do not access the members of this struct directly.
  1556. * @see XXH64_state_s, XXH3_state_s
  1557. */
  1558. struct XXH32_state_s {
  1559. XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
  1560. XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
  1561. XXH32_hash_t v[4]; /*!< Accumulator lanes */
  1562. XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
  1563. XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
  1564. XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
  1565. }; /* typedef'd to XXH32_state_t */
  1566. #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
  1567. /*!
  1568. * @internal
  1569. * @brief Structure for XXH64 streaming API.
  1570. *
  1571. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  1572. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
  1573. * an opaque type. This allows fields to safely be changed.
  1574. *
  1575. * Typedef'd to @ref XXH64_state_t.
  1576. * Do not access the members of this struct directly.
  1577. * @see XXH32_state_s, XXH3_state_s
  1578. */
  1579. struct XXH64_state_s {
  1580. XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
  1581. XXH64_hash_t v[4]; /*!< Accumulator lanes */
  1582. XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
  1583. XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
  1584. XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
  1585. XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
  1586. }; /* typedef'd to XXH64_state_t */
  1587. #ifndef XXH_NO_XXH3
  1588. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
  1589. # define XXH_ALIGN(n) _Alignas(n)
  1590. #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
  1591. /* In C++ alignas() is a keyword */
  1592. # define XXH_ALIGN(n) alignas(n)
  1593. #elif defined(__GNUC__)
  1594. # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
  1595. #elif defined(_MSC_VER)
  1596. # define XXH_ALIGN(n) __declspec(align(n))
  1597. #else
  1598. # define XXH_ALIGN(n) /* disabled */
  1599. #endif
  1600. /* Old GCC versions only accept the attribute after the type in structures. */
  1601. #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
  1602. && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
  1603. && defined(__GNUC__)
  1604. # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
  1605. #else
  1606. # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
  1607. #endif
  1608. /*!
  1609. * @brief The size of the internal XXH3 buffer.
  1610. *
  1611. * This is the optimal update size for incremental hashing.
  1612. *
  1613. * @see XXH3_64b_update(), XXH3_128b_update().
  1614. */
  1615. #define XXH3_INTERNALBUFFER_SIZE 256
  1616. /*!
  1617. * @internal
  1618. * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
  1619. *
  1620. * This is the size used in @ref XXH3_kSecret and the seeded functions.
  1621. *
  1622. * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
  1623. */
  1624. #define XXH3_SECRET_DEFAULT_SIZE 192
  1625. /*!
  1626. * @internal
  1627. * @brief Structure for XXH3 streaming API.
  1628. *
  1629. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  1630. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
  1631. * Otherwise it is an opaque type.
  1632. * Never use this definition in combination with dynamic library.
  1633. * This allows fields to safely be changed in the future.
  1634. *
  1635. * @note ** This structure has a strict alignment requirement of 64 bytes!! **
  1636. * Do not allocate this with `malloc()` or `new`,
  1637. * it will not be sufficiently aligned.
  1638. * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
  1639. *
  1640. * Typedef'd to @ref XXH3_state_t.
  1641. * Do never access the members of this struct directly.
  1642. *
  1643. * @see XXH3_INITSTATE() for stack initialization.
  1644. * @see XXH3_createState(), XXH3_freeState().
  1645. * @see XXH32_state_s, XXH64_state_s
  1646. */
  1647. struct XXH3_state_s {
  1648. XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
  1649. /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
  1650. XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
  1651. /*!< Used to store a custom secret generated from a seed. */
  1652. XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
  1653. /*!< The internal buffer. @see XXH32_state_s::mem32 */
  1654. XXH32_hash_t bufferedSize;
  1655. /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
  1656. XXH32_hash_t useSeed;
  1657. /*!< Reserved field. Needed for padding on 64-bit. */
  1658. size_t nbStripesSoFar;
  1659. /*!< Number or stripes processed. */
  1660. XXH64_hash_t totalLen;
  1661. /*!< Total length hashed. 64-bit even on 32-bit targets. */
  1662. size_t nbStripesPerBlock;
  1663. /*!< Number of stripes per block. */
  1664. size_t secretLimit;
  1665. /*!< Size of @ref customSecret or @ref extSecret */
  1666. XXH64_hash_t seed;
  1667. /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
  1668. XXH64_hash_t reserved64;
  1669. /*!< Reserved field. */
  1670. const unsigned char* extSecret;
  1671. /*!< Reference to an external secret for the _withSecret variants, NULL
  1672. * for other variants. */
  1673. /* note: there may be some padding at the end due to alignment on 64 bytes */
  1674. }; /* typedef'd to XXH3_state_t */
  1675. #undef XXH_ALIGN_MEMBER
  1676. /*!
  1677. * @brief Initializes a stack-allocated `XXH3_state_s`.
  1678. *
  1679. * When the @ref XXH3_state_t structure is merely emplaced on stack,
  1680. * it should be initialized with XXH3_INITSTATE() or a memset()
  1681. * in case its first reset uses XXH3_NNbits_reset_withSeed().
  1682. * This init can be omitted if the first reset uses default or _withSecret mode.
  1683. * This operation isn't necessary when the state is created with XXH3_createState().
  1684. * Note that this doesn't prepare the state for a streaming operation,
  1685. * it's still necessary to use XXH3_NNbits_reset*() afterwards.
  1686. */
  1687. #define XXH3_INITSTATE(XXH3_state_ptr) \
  1688. do { \
  1689. XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
  1690. tmp_xxh3_state_ptr->seed = 0; \
  1691. tmp_xxh3_state_ptr->extSecret = NULL; \
  1692. } while(0)
  1693. /*!
  1694. * @brief Calculates the 128-bit hash of @p data using XXH3.
  1695. *
  1696. * @param data The block of data to be hashed, at least @p len bytes in size.
  1697. * @param len The length of @p data, in bytes.
  1698. * @param seed The 64-bit seed to alter the hash's output predictably.
  1699. *
  1700. * @pre
  1701. * The memory between @p data and @p data + @p len must be valid,
  1702. * readable, contiguous memory. However, if @p len is `0`, @p data may be
  1703. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  1704. *
  1705. * @return The calculated 128-bit XXH3 value.
  1706. *
  1707. * @see @ref single_shot_example "Single Shot Example" for an example.
  1708. */
  1709. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
  1710. /* === Experimental API === */
  1711. /* Symbols defined below must be considered tied to a specific library version. */
  1712. /*!
  1713. * @brief Derive a high-entropy secret from any user-defined content, named customSeed.
  1714. *
  1715. * @param secretBuffer A writable buffer for derived high-entropy secret data.
  1716. * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_SIZE_MIN.
  1717. * @param customSeed A user-defined content.
  1718. * @param customSeedSize Size of customSeed, in bytes.
  1719. *
  1720. * @return @ref XXH_OK on success.
  1721. * @return @ref XXH_ERROR on failure.
  1722. *
  1723. * The generated secret can be used in combination with `*_withSecret()` functions.
  1724. * The `_withSecret()` variants are useful to provide a higher level of protection
  1725. * than 64-bit seed, as it becomes much more difficult for an external actor to
  1726. * guess how to impact the calculation logic.
  1727. *
  1728. * The function accepts as input a custom seed of any length and any content,
  1729. * and derives from it a high-entropy secret of length @p secretSize into an
  1730. * already allocated buffer @p secretBuffer.
  1731. *
  1732. * The generated secret can then be used with any `*_withSecret()` variant.
  1733. * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
  1734. * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
  1735. * are part of this list. They all accept a `secret` parameter
  1736. * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
  1737. * _and_ feature very high entropy (consist of random-looking bytes).
  1738. * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
  1739. * be employed to ensure proper quality.
  1740. *
  1741. * @p customSeed can be anything. It can have any size, even small ones,
  1742. * and its content can be anything, even "poor entropy" sources such as a bunch
  1743. * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
  1744. *
  1745. * @pre
  1746. * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
  1747. * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
  1748. *
  1749. * Example code:
  1750. * @code{.c}
  1751. * #include <stdio.h>
  1752. * #include <stdlib.h>
  1753. * #include <string.h>
  1754. * #define XXH_STATIC_LINKING_ONLY // expose unstable API
  1755. * #include "xxhash.h"
  1756. * // Hashes argv[2] using the entropy from argv[1].
  1757. * int main(int argc, char* argv[])
  1758. * {
  1759. * char secret[XXH3_SECRET_SIZE_MIN];
  1760. * if (argv != 3) { return 1; }
  1761. * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
  1762. * XXH64_hash_t h = XXH3_64bits_withSecret(
  1763. * argv[2], strlen(argv[2]),
  1764. * secret, sizeof(secret)
  1765. * );
  1766. * printf("%016llx\n", (unsigned long long) h);
  1767. * }
  1768. * @endcode
  1769. */
  1770. XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
  1771. /*!
  1772. * @brief Generate the same secret as the _withSeed() variants.
  1773. *
  1774. * @param secretBuffer A writable buffer of @ref XXH3_SECRET_DEFAULT_SIZE bytes
  1775. * @param seed The 64-bit seed to alter the hash result predictably.
  1776. *
  1777. * The generated secret can be used in combination with
  1778. *`*_withSecret()` and `_withSecretandSeed()` variants.
  1779. *
  1780. * Example C++ `std::string` hash class:
  1781. * @code{.cpp}
  1782. * #include <string>
  1783. * #define XXH_STATIC_LINKING_ONLY // expose unstable API
  1784. * #include "xxhash.h"
  1785. * // Slow, seeds each time
  1786. * class HashSlow {
  1787. * XXH64_hash_t seed;
  1788. * public:
  1789. * HashSlow(XXH64_hash_t s) : seed{s} {}
  1790. * size_t operator()(const std::string& x) const {
  1791. * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
  1792. * }
  1793. * };
  1794. * // Fast, caches the seeded secret for future uses.
  1795. * class HashFast {
  1796. * unsigned char secret[XXH3_SECRET_DEFAULT_SIZE];
  1797. * public:
  1798. * HashFast(XXH64_hash_t s) {
  1799. * XXH3_generateSecret_fromSeed(secret, seed);
  1800. * }
  1801. * size_t operator()(const std::string& x) const {
  1802. * return size_t{
  1803. * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
  1804. * };
  1805. * }
  1806. * };
  1807. * @endcode
  1808. */
  1809. XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
  1810. /*!
  1811. * @brief Maximum size of "short" key in bytes.
  1812. */
  1813. #define XXH3_MIDSIZE_MAX 240
  1814. /*!
  1815. * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data.
  1816. *
  1817. * @param data The block of data to be hashed, at least @p len bytes in size.
  1818. * @param len The length of @p data, in bytes.
  1819. * @param secret The secret data.
  1820. * @param secretSize The length of @p secret, in bytes.
  1821. * @param seed The 64-bit seed to alter the hash result predictably.
  1822. *
  1823. * These variants generate hash values using either:
  1824. * - @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes)
  1825. * - @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX).
  1826. *
  1827. * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
  1828. * `_withSeed()` has to generate the secret on the fly for "large" keys.
  1829. * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
  1830. * `_withSecret()` has to generate the masks on the fly for "small" keys,
  1831. * which requires more instructions than _withSeed() variants.
  1832. * Therefore, _withSecretandSeed variant combines the best of both worlds.
  1833. *
  1834. * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
  1835. * this variant produces *exactly* the same results as `_withSeed()` variant,
  1836. * hence offering only a pure speed benefit on "large" input,
  1837. * by skipping the need to regenerate the secret for every large input.
  1838. *
  1839. * Another usage scenario is to hash the secret to a 64-bit hash value,
  1840. * for example with XXH3_64bits(), which then becomes the seed,
  1841. * and then employ both the seed and the secret in _withSecretandSeed().
  1842. * On top of speed, an added benefit is that each bit in the secret
  1843. * has a 50% chance to swap each bit in the output, via its impact to the seed.
  1844. *
  1845. * This is not guaranteed when using the secret directly in "small data" scenarios,
  1846. * because only portions of the secret are employed for small data.
  1847. */
  1848. XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
  1849. XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
  1850. XXH_NOESCAPE const void* secret, size_t secretSize,
  1851. XXH64_hash_t seed);
  1852. /*!
  1853. * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
  1854. *
  1855. * @param data The memory segment to be hashed, at least @p len bytes in size.
  1856. * @param length The length of @p data, in bytes.
  1857. * @param secret The secret used to alter hash result predictably.
  1858. * @param secretSize The length of @p secret, in bytes (must be >= XXH3_SECRET_SIZE_MIN)
  1859. * @param seed64 The 64-bit seed to alter the hash result predictably.
  1860. *
  1861. * @return @ref XXH_OK on success.
  1862. * @return @ref XXH_ERROR on failure.
  1863. *
  1864. * @see XXH3_64bits_withSecretandSeed(): contract is the same.
  1865. */
  1866. XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
  1867. XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
  1868. XXH_NOESCAPE const void* secret, size_t secretSize,
  1869. XXH64_hash_t seed64);
  1870. #ifndef XXH_NO_STREAM
  1871. /*!
  1872. * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
  1873. *
  1874. * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
  1875. * @param secret The secret data.
  1876. * @param secretSize The length of @p secret, in bytes.
  1877. * @param seed64 The 64-bit seed to alter the hash result predictably.
  1878. *
  1879. * @return @ref XXH_OK on success.
  1880. * @return @ref XXH_ERROR on failure.
  1881. *
  1882. * @see XXH3_64bits_withSecretandSeed(). Contract is identical.
  1883. */
  1884. XXH_PUBLIC_API XXH_errorcode
  1885. XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
  1886. XXH_NOESCAPE const void* secret, size_t secretSize,
  1887. XXH64_hash_t seed64);
  1888. /*!
  1889. * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
  1890. *
  1891. * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
  1892. * @param secret The secret data.
  1893. * @param secretSize The length of @p secret, in bytes.
  1894. * @param seed64 The 64-bit seed to alter the hash result predictably.
  1895. *
  1896. * @return @ref XXH_OK on success.
  1897. * @return @ref XXH_ERROR on failure.
  1898. *
  1899. * @see XXH3_64bits_withSecretandSeed(). Contract is identical.
  1900. *
  1901. * Note: there was a bug in an earlier version of this function (<= v0.8.2)
  1902. * that would make it generate an incorrect hash value
  1903. * when @p seed == 0 and @p length < XXH3_MIDSIZE_MAX
  1904. * and @p secret is different from XXH3_generateSecret_fromSeed().
  1905. * As stated in the contract, the correct hash result must be
  1906. * the same as XXH3_128bits_withSeed() when @p length <= XXH3_MIDSIZE_MAX.
  1907. * Results generated by this older version are wrong, hence not comparable.
  1908. */
  1909. XXH_PUBLIC_API XXH_errorcode
  1910. XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
  1911. XXH_NOESCAPE const void* secret, size_t secretSize,
  1912. XXH64_hash_t seed64);
  1913. #endif /* !XXH_NO_STREAM */
  1914. #endif /* !XXH_NO_XXH3 */
  1915. #endif /* XXH_NO_LONG_LONG */
  1916. #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
  1917. # define XXH_IMPLEMENTATION
  1918. #endif
  1919. #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
  1920. /* ======================================================================== */
  1921. /* ======================================================================== */
  1922. /* ======================================================================== */
  1923. /*-**********************************************************************
  1924. * xxHash implementation
  1925. *-**********************************************************************
  1926. * xxHash's implementation used to be hosted inside xxhash.c.
  1927. *
  1928. * However, inlining requires implementation to be visible to the compiler,
  1929. * hence be included alongside the header.
  1930. * Previously, implementation was hosted inside xxhash.c,
  1931. * which was then #included when inlining was activated.
  1932. * This construction created issues with a few build and install systems,
  1933. * as it required xxhash.c to be stored in /include directory.
  1934. *
  1935. * xxHash implementation is now directly integrated within xxhash.h.
  1936. * As a consequence, xxhash.c is no longer needed in /include.
  1937. *
  1938. * xxhash.c is still available and is still useful.
  1939. * In a "normal" setup, when xxhash is not inlined,
  1940. * xxhash.h only exposes the prototypes and public symbols,
  1941. * while xxhash.c can be built into an object file xxhash.o
  1942. * which can then be linked into the final binary.
  1943. ************************************************************************/
  1944. #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
  1945. || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
  1946. # define XXH_IMPLEM_13a8737387
  1947. /* *************************************
  1948. * Tuning parameters
  1949. ***************************************/
  1950. /*!
  1951. * @defgroup tuning Tuning parameters
  1952. * @{
  1953. *
  1954. * Various macros to control xxHash's behavior.
  1955. */
  1956. #ifdef XXH_DOXYGEN
  1957. /*!
  1958. * @brief Define this to disable 64-bit code.
  1959. *
  1960. * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
  1961. */
  1962. # define XXH_NO_LONG_LONG
  1963. # undef XXH_NO_LONG_LONG /* don't actually */
  1964. /*!
  1965. * @brief Controls how unaligned memory is accessed.
  1966. *
  1967. * By default, access to unaligned memory is controlled by `memcpy()`, which is
  1968. * safe and portable.
  1969. *
  1970. * Unfortunately, on some target/compiler combinations, the generated assembly
  1971. * is sub-optimal.
  1972. *
  1973. * The below switch allow selection of a different access method
  1974. * in the search for improved performance.
  1975. *
  1976. * @par Possible options:
  1977. *
  1978. * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
  1979. * @par
  1980. * Use `memcpy()`. Safe and portable. Note that most modern compilers will
  1981. * eliminate the function call and treat it as an unaligned access.
  1982. *
  1983. * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
  1984. * @par
  1985. * Depends on compiler extensions and is therefore not portable.
  1986. * This method is safe _if_ your compiler supports it,
  1987. * and *generally* as fast or faster than `memcpy`.
  1988. *
  1989. * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
  1990. * @par
  1991. * Casts directly and dereferences. This method doesn't depend on the
  1992. * compiler, but it violates the C standard as it directly dereferences an
  1993. * unaligned pointer. It can generate buggy code on targets which do not
  1994. * support unaligned memory accesses, but in some circumstances, it's the
  1995. * only known way to get the most performance.
  1996. *
  1997. * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
  1998. * @par
  1999. * Also portable. This can generate the best code on old compilers which don't
  2000. * inline small `memcpy()` calls, and it might also be faster on big-endian
  2001. * systems which lack a native byteswap instruction. However, some compilers
  2002. * will emit literal byteshifts even if the target supports unaligned access.
  2003. *
  2004. *
  2005. * @warning
  2006. * Methods 1 and 2 rely on implementation-defined behavior. Use these with
  2007. * care, as what works on one compiler/platform/optimization level may cause
  2008. * another to read garbage data or even crash.
  2009. *
  2010. * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
  2011. *
  2012. * Prefer these methods in priority order (0 > 3 > 1 > 2)
  2013. */
  2014. # define XXH_FORCE_MEMORY_ACCESS 0
  2015. /*!
  2016. * @def XXH_SIZE_OPT
  2017. * @brief Controls how much xxHash optimizes for size.
  2018. *
  2019. * xxHash, when compiled, tends to result in a rather large binary size. This
  2020. * is mostly due to heavy usage to forced inlining and constant folding of the
  2021. * @ref XXH3_family to increase performance.
  2022. *
  2023. * However, some developers prefer size over speed. This option can
  2024. * significantly reduce the size of the generated code. When using the `-Os`
  2025. * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
  2026. * otherwise it is defined to 0.
  2027. *
  2028. * Most of these size optimizations can be controlled manually.
  2029. *
  2030. * This is a number from 0-2.
  2031. * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
  2032. * comes first.
  2033. * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
  2034. * conservative and disables hacks that increase code size. It implies the
  2035. * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
  2036. * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
  2037. * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
  2038. * Performance may cry. For example, the single shot functions just use the
  2039. * streaming API.
  2040. */
  2041. # define XXH_SIZE_OPT 0
  2042. /*!
  2043. * @def XXH_FORCE_ALIGN_CHECK
  2044. * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
  2045. * and XXH64() only).
  2046. *
  2047. * This is an important performance trick for architectures without decent
  2048. * unaligned memory access performance.
  2049. *
  2050. * It checks for input alignment, and when conditions are met, uses a "fast
  2051. * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
  2052. * faster_ read speed.
  2053. *
  2054. * The check costs one initial branch per hash, which is generally negligible,
  2055. * but not zero.
  2056. *
  2057. * Moreover, it's not useful to generate an additional code path if memory
  2058. * access uses the same instruction for both aligned and unaligned
  2059. * addresses (e.g. x86 and aarch64).
  2060. *
  2061. * In these cases, the alignment check can be removed by setting this macro to 0.
  2062. * Then the code will always use unaligned memory access.
  2063. * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
  2064. * which are platforms known to offer good unaligned memory accesses performance.
  2065. *
  2066. * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
  2067. *
  2068. * This option does not affect XXH3 (only XXH32 and XXH64).
  2069. */
  2070. # define XXH_FORCE_ALIGN_CHECK 0
  2071. /*!
  2072. * @def XXH_NO_INLINE_HINTS
  2073. * @brief When non-zero, sets all functions to `static`.
  2074. *
  2075. * By default, xxHash tries to force the compiler to inline almost all internal
  2076. * functions.
  2077. *
  2078. * This can usually improve performance due to reduced jumping and improved
  2079. * constant folding, but significantly increases the size of the binary which
  2080. * might not be favorable.
  2081. *
  2082. * Additionally, sometimes the forced inlining can be detrimental to performance,
  2083. * depending on the architecture.
  2084. *
  2085. * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
  2086. * compiler full control on whether to inline or not.
  2087. *
  2088. * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
  2089. * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
  2090. */
  2091. # define XXH_NO_INLINE_HINTS 0
  2092. /*!
  2093. * @def XXH3_INLINE_SECRET
  2094. * @brief Determines whether to inline the XXH3 withSecret code.
  2095. *
  2096. * When the secret size is known, the compiler can improve the performance
  2097. * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
  2098. *
  2099. * However, if the secret size is not known, it doesn't have any benefit. This
  2100. * happens when xxHash is compiled into a global symbol. Therefore, if
  2101. * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
  2102. *
  2103. * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
  2104. * that are *sometimes* force inline on -Og, and it is impossible to automatically
  2105. * detect this optimization level.
  2106. */
  2107. # define XXH3_INLINE_SECRET 0
  2108. /*!
  2109. * @def XXH32_ENDJMP
  2110. * @brief Whether to use a jump for `XXH32_finalize`.
  2111. *
  2112. * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
  2113. * This is generally preferable for performance,
  2114. * but depending on exact architecture, a jmp may be preferable.
  2115. *
  2116. * This setting is only possibly making a difference for very small inputs.
  2117. */
  2118. # define XXH32_ENDJMP 0
  2119. /*!
  2120. * @internal
  2121. * @brief Redefines old internal names.
  2122. *
  2123. * For compatibility with code that uses xxHash's internals before the names
  2124. * were changed to improve namespacing. There is no other reason to use this.
  2125. */
  2126. # define XXH_OLD_NAMES
  2127. # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
  2128. /*!
  2129. * @def XXH_NO_STREAM
  2130. * @brief Disables the streaming API.
  2131. *
  2132. * When xxHash is not inlined and the streaming functions are not used, disabling
  2133. * the streaming functions can improve code size significantly, especially with
  2134. * the @ref XXH3_family which tends to make constant folded copies of itself.
  2135. */
  2136. # define XXH_NO_STREAM
  2137. # undef XXH_NO_STREAM /* don't actually */
  2138. #endif /* XXH_DOXYGEN */
  2139. /*!
  2140. * @}
  2141. */
  2142. #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
  2143. /* prefer __packed__ structures (method 1) for GCC
  2144. * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
  2145. * which for some reason does unaligned loads. */
  2146. # if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
  2147. # define XXH_FORCE_MEMORY_ACCESS 1
  2148. # endif
  2149. #endif
  2150. #ifndef XXH_SIZE_OPT
  2151. /* default to 1 for -Os or -Oz */
  2152. # if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
  2153. # define XXH_SIZE_OPT 1
  2154. # else
  2155. # define XXH_SIZE_OPT 0
  2156. # endif
  2157. #endif
  2158. #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
  2159. /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
  2160. # if XXH_SIZE_OPT >= 1 || \
  2161. defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
  2162. || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
  2163. # define XXH_FORCE_ALIGN_CHECK 0
  2164. # else
  2165. # define XXH_FORCE_ALIGN_CHECK 1
  2166. # endif
  2167. #endif
  2168. #ifndef XXH_NO_INLINE_HINTS
  2169. # if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
  2170. # define XXH_NO_INLINE_HINTS 1
  2171. # else
  2172. # define XXH_NO_INLINE_HINTS 0
  2173. # endif
  2174. #endif
  2175. #ifndef XXH3_INLINE_SECRET
  2176. # if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
  2177. || !defined(XXH_INLINE_ALL)
  2178. # define XXH3_INLINE_SECRET 0
  2179. # else
  2180. # define XXH3_INLINE_SECRET 1
  2181. # endif
  2182. #endif
  2183. #ifndef XXH32_ENDJMP
  2184. /* generally preferable for performance */
  2185. # define XXH32_ENDJMP 0
  2186. #endif
  2187. /*!
  2188. * @defgroup impl Implementation
  2189. * @{
  2190. */
  2191. /* *************************************
  2192. * Includes & Memory related functions
  2193. ***************************************/
  2194. #if defined(XXH_NO_STREAM)
  2195. /* nothing */
  2196. #elif defined(XXH_NO_STDLIB)
  2197. /* When requesting to disable any mention of stdlib,
  2198. * the library loses the ability to invoked malloc / free.
  2199. * In practice, it means that functions like `XXH*_createState()`
  2200. * will always fail, and return NULL.
  2201. * This flag is useful in situations where
  2202. * xxhash.h is integrated into some kernel, embedded or limited environment
  2203. * without access to dynamic allocation.
  2204. */
  2205. static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
  2206. static void XXH_free(void* p) { (void)p; }
  2207. #else
  2208. /*
  2209. * Modify the local functions below should you wish to use
  2210. * different memory routines for malloc() and free()
  2211. */
  2212. #include <stdlib.h>
  2213. /*!
  2214. * @internal
  2215. * @brief Modify this function to use a different routine than malloc().
  2216. */
  2217. static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
  2218. /*!
  2219. * @internal
  2220. * @brief Modify this function to use a different routine than free().
  2221. */
  2222. static void XXH_free(void* p) { free(p); }
  2223. #endif /* XXH_NO_STDLIB */
  2224. #include <string.h>
  2225. /*!
  2226. * @internal
  2227. * @brief Modify this function to use a different routine than memcpy().
  2228. */
  2229. static void* XXH_memcpy(void* dest, const void* src, size_t size)
  2230. {
  2231. return memcpy(dest,src,size);
  2232. }
  2233. #include <limits.h> /* ULLONG_MAX */
  2234. /* *************************************
  2235. * Compiler Specific Options
  2236. ***************************************/
  2237. #ifdef _MSC_VER /* Visual Studio warning fix */
  2238. # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
  2239. #endif
  2240. #if XXH_NO_INLINE_HINTS /* disable inlining hints */
  2241. # if defined(__GNUC__) || defined(__clang__)
  2242. # define XXH_FORCE_INLINE static __attribute__((__unused__))
  2243. # else
  2244. # define XXH_FORCE_INLINE static
  2245. # endif
  2246. # define XXH_NO_INLINE static
  2247. /* enable inlining hints */
  2248. #elif defined(__GNUC__) || defined(__clang__)
  2249. # define XXH_FORCE_INLINE static __inline__ __attribute__((__always_inline__, __unused__))
  2250. # define XXH_NO_INLINE static __attribute__((__noinline__))
  2251. #elif defined(_MSC_VER) /* Visual Studio */
  2252. # define XXH_FORCE_INLINE static __forceinline
  2253. # define XXH_NO_INLINE static __declspec(noinline)
  2254. #elif defined (__cplusplus) \
  2255. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
  2256. # define XXH_FORCE_INLINE static inline
  2257. # define XXH_NO_INLINE static
  2258. #else
  2259. # define XXH_FORCE_INLINE static
  2260. # define XXH_NO_INLINE static
  2261. #endif
  2262. #if XXH3_INLINE_SECRET
  2263. # define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
  2264. #else
  2265. # define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
  2266. #endif
  2267. /* *************************************
  2268. * Debug
  2269. ***************************************/
  2270. /*!
  2271. * @ingroup tuning
  2272. * @def XXH_DEBUGLEVEL
  2273. * @brief Sets the debugging level.
  2274. *
  2275. * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
  2276. * compiler's command line options. The value must be a number.
  2277. */
  2278. #ifndef XXH_DEBUGLEVEL
  2279. # ifdef DEBUGLEVEL /* backwards compat */
  2280. # define XXH_DEBUGLEVEL DEBUGLEVEL
  2281. # else
  2282. # define XXH_DEBUGLEVEL 0
  2283. # endif
  2284. #endif
  2285. #if (XXH_DEBUGLEVEL>=1)
  2286. # include <assert.h> /* note: can still be disabled with NDEBUG */
  2287. # define XXH_ASSERT(c) assert(c)
  2288. #else
  2289. # if defined(__INTEL_COMPILER)
  2290. # define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
  2291. # else
  2292. # define XXH_ASSERT(c) XXH_ASSUME(c)
  2293. # endif
  2294. #endif
  2295. /* note: use after variable declarations */
  2296. #ifndef XXH_STATIC_ASSERT
  2297. # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
  2298. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
  2299. # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
  2300. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
  2301. # else
  2302. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
  2303. # endif
  2304. # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
  2305. #endif
  2306. /*!
  2307. * @internal
  2308. * @def XXH_COMPILER_GUARD(var)
  2309. * @brief Used to prevent unwanted optimizations for @p var.
  2310. *
  2311. * It uses an empty GCC inline assembly statement with a register constraint
  2312. * which forces @p var into a general purpose register (eg eax, ebx, ecx
  2313. * on x86) and marks it as modified.
  2314. *
  2315. * This is used in a few places to avoid unwanted autovectorization (e.g.
  2316. * XXH32_round()). All vectorization we want is explicit via intrinsics,
  2317. * and _usually_ isn't wanted elsewhere.
  2318. *
  2319. * We also use it to prevent unwanted constant folding for AArch64 in
  2320. * XXH3_initCustomSecret_scalar().
  2321. */
  2322. #if defined(__GNUC__) || defined(__clang__)
  2323. # define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
  2324. #else
  2325. # define XXH_COMPILER_GUARD(var) ((void)0)
  2326. #endif
  2327. /* Specifically for NEON vectors which use the "w" constraint, on
  2328. * Clang. */
  2329. #if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
  2330. # define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
  2331. #else
  2332. # define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
  2333. #endif
  2334. /* *************************************
  2335. * Basic Types
  2336. ***************************************/
  2337. #if !defined (__VMS) \
  2338. && (defined (__cplusplus) \
  2339. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  2340. # ifdef _AIX
  2341. # include <inttypes.h>
  2342. # else
  2343. # include <stdint.h>
  2344. # endif
  2345. typedef uint8_t xxh_u8;
  2346. #else
  2347. typedef unsigned char xxh_u8;
  2348. #endif
  2349. typedef XXH32_hash_t xxh_u32;
  2350. #ifdef XXH_OLD_NAMES
  2351. # warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
  2352. # define BYTE xxh_u8
  2353. # define U8 xxh_u8
  2354. # define U32 xxh_u32
  2355. #endif
  2356. /* *** Memory access *** */
  2357. /*!
  2358. * @internal
  2359. * @fn xxh_u32 XXH_read32(const void* ptr)
  2360. * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
  2361. *
  2362. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  2363. *
  2364. * @param ptr The pointer to read from.
  2365. * @return The 32-bit native endian integer from the bytes at @p ptr.
  2366. */
  2367. /*!
  2368. * @internal
  2369. * @fn xxh_u32 XXH_readLE32(const void* ptr)
  2370. * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
  2371. *
  2372. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  2373. *
  2374. * @param ptr The pointer to read from.
  2375. * @return The 32-bit little endian integer from the bytes at @p ptr.
  2376. */
  2377. /*!
  2378. * @internal
  2379. * @fn xxh_u32 XXH_readBE32(const void* ptr)
  2380. * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
  2381. *
  2382. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  2383. *
  2384. * @param ptr The pointer to read from.
  2385. * @return The 32-bit big endian integer from the bytes at @p ptr.
  2386. */
  2387. /*!
  2388. * @internal
  2389. * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
  2390. * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
  2391. *
  2392. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  2393. * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
  2394. * always @ref XXH_alignment::XXH_unaligned.
  2395. *
  2396. * @param ptr The pointer to read from.
  2397. * @param align Whether @p ptr is aligned.
  2398. * @pre
  2399. * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
  2400. * aligned.
  2401. * @return The 32-bit little endian integer from the bytes at @p ptr.
  2402. */
  2403. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  2404. /*
  2405. * Manual byteshift. Best for old compilers which don't inline memcpy.
  2406. * We actually directly use XXH_readLE32 and XXH_readBE32.
  2407. */
  2408. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
  2409. /*
  2410. * Force direct memory access. Only works on CPU which support unaligned memory
  2411. * access in hardware.
  2412. */
  2413. static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
  2414. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
  2415. /*
  2416. * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
  2417. * documentation claimed that it only increased the alignment, but actually it
  2418. * can decrease it on gcc, clang, and icc:
  2419. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
  2420. * https://gcc.godbolt.org/z/xYez1j67Y.
  2421. */
  2422. #ifdef XXH_OLD_NAMES
  2423. typedef union { xxh_u32 u32; } __attribute__((__packed__)) unalign;
  2424. #endif
  2425. static xxh_u32 XXH_read32(const void* ptr)
  2426. {
  2427. typedef __attribute__((__aligned__(1))) xxh_u32 xxh_unalign32;
  2428. return *((const xxh_unalign32*)ptr);
  2429. }
  2430. #else
  2431. /*
  2432. * Portable and safe solution. Generally efficient.
  2433. * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
  2434. */
  2435. static xxh_u32 XXH_read32(const void* memPtr)
  2436. {
  2437. xxh_u32 val;
  2438. XXH_memcpy(&val, memPtr, sizeof(val));
  2439. return val;
  2440. }
  2441. #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
  2442. /* *** Endianness *** */
  2443. /*!
  2444. * @ingroup tuning
  2445. * @def XXH_CPU_LITTLE_ENDIAN
  2446. * @brief Whether the target is little endian.
  2447. *
  2448. * Defined to 1 if the target is little endian, or 0 if it is big endian.
  2449. * It can be defined externally, for example on the compiler command line.
  2450. *
  2451. * If it is not defined,
  2452. * a runtime check (which is usually constant folded) is used instead.
  2453. *
  2454. * @note
  2455. * This is not necessarily defined to an integer constant.
  2456. *
  2457. * @see XXH_isLittleEndian() for the runtime check.
  2458. */
  2459. #ifndef XXH_CPU_LITTLE_ENDIAN
  2460. /*
  2461. * Try to detect endianness automatically, to avoid the nonstandard behavior
  2462. * in `XXH_isLittleEndian()`
  2463. */
  2464. # if defined(_WIN32) /* Windows is always little endian */ \
  2465. || defined(__LITTLE_ENDIAN__) \
  2466. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
  2467. # define XXH_CPU_LITTLE_ENDIAN 1
  2468. # elif defined(__BIG_ENDIAN__) \
  2469. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  2470. # define XXH_CPU_LITTLE_ENDIAN 0
  2471. # else
  2472. /*!
  2473. * @internal
  2474. * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
  2475. *
  2476. * Most compilers will constant fold this.
  2477. */
  2478. static int XXH_isLittleEndian(void)
  2479. {
  2480. /*
  2481. * Portable and well-defined behavior.
  2482. * Don't use static: it is detrimental to performance.
  2483. */
  2484. const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
  2485. return one.c[0];
  2486. }
  2487. # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
  2488. # endif
  2489. #endif
  2490. /* ****************************************
  2491. * Compiler-specific Functions and Macros
  2492. ******************************************/
  2493. #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
  2494. #ifdef __has_builtin
  2495. # define XXH_HAS_BUILTIN(x) __has_builtin(x)
  2496. #else
  2497. # define XXH_HAS_BUILTIN(x) 0
  2498. #endif
  2499. /*
  2500. * C23 and future versions have standard "unreachable()".
  2501. * Once it has been implemented reliably we can add it as an
  2502. * additional case:
  2503. *
  2504. * ```
  2505. * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
  2506. * # include <stddef.h>
  2507. * # ifdef unreachable
  2508. * # define XXH_UNREACHABLE() unreachable()
  2509. * # endif
  2510. * #endif
  2511. * ```
  2512. *
  2513. * Note C++23 also has std::unreachable() which can be detected
  2514. * as follows:
  2515. * ```
  2516. * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
  2517. * # include <utility>
  2518. * # define XXH_UNREACHABLE() std::unreachable()
  2519. * #endif
  2520. * ```
  2521. * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
  2522. * We don't use that as including `<utility>` in `extern "C"` blocks
  2523. * doesn't work on GCC12
  2524. */
  2525. #if XXH_HAS_BUILTIN(__builtin_unreachable)
  2526. # define XXH_UNREACHABLE() __builtin_unreachable()
  2527. #elif defined(_MSC_VER)
  2528. # define XXH_UNREACHABLE() __assume(0)
  2529. #else
  2530. # define XXH_UNREACHABLE()
  2531. #endif
  2532. #if XXH_HAS_BUILTIN(__builtin_assume)
  2533. # define XXH_ASSUME(c) __builtin_assume(c)
  2534. #else
  2535. # define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
  2536. #endif
  2537. /*!
  2538. * @internal
  2539. * @def XXH_rotl32(x,r)
  2540. * @brief 32-bit rotate left.
  2541. *
  2542. * @param x The 32-bit integer to be rotated.
  2543. * @param r The number of bits to rotate.
  2544. * @pre
  2545. * @p r > 0 && @p r < 32
  2546. * @note
  2547. * @p x and @p r may be evaluated multiple times.
  2548. * @return The rotated result.
  2549. */
  2550. #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
  2551. && XXH_HAS_BUILTIN(__builtin_rotateleft64)
  2552. # define XXH_rotl32 __builtin_rotateleft32
  2553. # define XXH_rotl64 __builtin_rotateleft64
  2554. /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
  2555. #elif defined(_MSC_VER)
  2556. # define XXH_rotl32(x,r) _rotl(x,r)
  2557. # define XXH_rotl64(x,r) _rotl64(x,r)
  2558. #else
  2559. # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
  2560. # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
  2561. #endif
  2562. /*!
  2563. * @internal
  2564. * @fn xxh_u32 XXH_swap32(xxh_u32 x)
  2565. * @brief A 32-bit byteswap.
  2566. *
  2567. * @param x The 32-bit integer to byteswap.
  2568. * @return @p x, byteswapped.
  2569. */
  2570. #if defined(_MSC_VER) /* Visual Studio */
  2571. # define XXH_swap32 _byteswap_ulong
  2572. #elif XXH_GCC_VERSION >= 403
  2573. # define XXH_swap32 __builtin_bswap32
  2574. #else
  2575. static xxh_u32 XXH_swap32 (xxh_u32 x)
  2576. {
  2577. return ((x << 24) & 0xff000000 ) |
  2578. ((x << 8) & 0x00ff0000 ) |
  2579. ((x >> 8) & 0x0000ff00 ) |
  2580. ((x >> 24) & 0x000000ff );
  2581. }
  2582. #endif
  2583. /* ***************************
  2584. * Memory reads
  2585. *****************************/
  2586. /*!
  2587. * @internal
  2588. * @brief Enum to indicate whether a pointer is aligned.
  2589. */
  2590. typedef enum {
  2591. XXH_aligned, /*!< Aligned */
  2592. XXH_unaligned /*!< Possibly unaligned */
  2593. } XXH_alignment;
  2594. /*
  2595. * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
  2596. *
  2597. * This is ideal for older compilers which don't inline memcpy.
  2598. */
  2599. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  2600. XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
  2601. {
  2602. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  2603. return bytePtr[0]
  2604. | ((xxh_u32)bytePtr[1] << 8)
  2605. | ((xxh_u32)bytePtr[2] << 16)
  2606. | ((xxh_u32)bytePtr[3] << 24);
  2607. }
  2608. XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
  2609. {
  2610. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  2611. return bytePtr[3]
  2612. | ((xxh_u32)bytePtr[2] << 8)
  2613. | ((xxh_u32)bytePtr[1] << 16)
  2614. | ((xxh_u32)bytePtr[0] << 24);
  2615. }
  2616. #else
  2617. XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
  2618. {
  2619. return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
  2620. }
  2621. static xxh_u32 XXH_readBE32(const void* ptr)
  2622. {
  2623. return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
  2624. }
  2625. #endif
  2626. XXH_FORCE_INLINE xxh_u32
  2627. XXH_readLE32_align(const void* ptr, XXH_alignment align)
  2628. {
  2629. if (align==XXH_unaligned) {
  2630. return XXH_readLE32(ptr);
  2631. } else {
  2632. return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
  2633. }
  2634. }
  2635. /* *************************************
  2636. * Misc
  2637. ***************************************/
  2638. /*! @ingroup public */
  2639. XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
  2640. /* *******************************************************************
  2641. * 32-bit hash functions
  2642. *********************************************************************/
  2643. /*!
  2644. * @}
  2645. * @defgroup XXH32_impl XXH32 implementation
  2646. * @ingroup impl
  2647. *
  2648. * Details on the XXH32 implementation.
  2649. * @{
  2650. */
  2651. /* #define instead of static const, to be used as initializers */
  2652. #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
  2653. #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
  2654. #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
  2655. #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
  2656. #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
  2657. #ifdef XXH_OLD_NAMES
  2658. # define PRIME32_1 XXH_PRIME32_1
  2659. # define PRIME32_2 XXH_PRIME32_2
  2660. # define PRIME32_3 XXH_PRIME32_3
  2661. # define PRIME32_4 XXH_PRIME32_4
  2662. # define PRIME32_5 XXH_PRIME32_5
  2663. #endif
  2664. /*!
  2665. * @internal
  2666. * @brief Normal stripe processing routine.
  2667. *
  2668. * This shuffles the bits so that any bit from @p input impacts several bits in
  2669. * @p acc.
  2670. *
  2671. * @param acc The accumulator lane.
  2672. * @param input The stripe of input to mix.
  2673. * @return The mixed accumulator lane.
  2674. */
  2675. static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
  2676. {
  2677. acc += input * XXH_PRIME32_2;
  2678. acc = XXH_rotl32(acc, 13);
  2679. acc *= XXH_PRIME32_1;
  2680. #if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
  2681. /*
  2682. * UGLY HACK:
  2683. * A compiler fence is used to prevent GCC and Clang from
  2684. * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
  2685. * reason) without globally disabling SSE4.1.
  2686. *
  2687. * The reason we want to avoid vectorization is because despite working on
  2688. * 4 integers at a time, there are multiple factors slowing XXH32 down on
  2689. * SSE4:
  2690. * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
  2691. * newer chips!) making it slightly slower to multiply four integers at
  2692. * once compared to four integers independently. Even when pmulld was
  2693. * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
  2694. * just to multiply unless doing a long operation.
  2695. *
  2696. * - Four instructions are required to rotate,
  2697. * movqda tmp, v // not required with VEX encoding
  2698. * pslld tmp, 13 // tmp <<= 13
  2699. * psrld v, 19 // x >>= 19
  2700. * por v, tmp // x |= tmp
  2701. * compared to one for scalar:
  2702. * roll v, 13 // reliably fast across the board
  2703. * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
  2704. *
  2705. * - Instruction level parallelism is actually more beneficial here because
  2706. * the SIMD actually serializes this operation: While v1 is rotating, v2
  2707. * can load data, while v3 can multiply. SSE forces them to operate
  2708. * together.
  2709. *
  2710. * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
  2711. * the loop. NEON is only faster on the A53, and with the newer cores, it is less
  2712. * than half the speed.
  2713. *
  2714. * Additionally, this is used on WASM SIMD128 because it JITs to the same
  2715. * SIMD instructions and has the same issue.
  2716. */
  2717. XXH_COMPILER_GUARD(acc);
  2718. #endif
  2719. return acc;
  2720. }
  2721. /*!
  2722. * @internal
  2723. * @brief Mixes all bits to finalize the hash.
  2724. *
  2725. * The final mix ensures that all input bits have a chance to impact any bit in
  2726. * the output digest, resulting in an unbiased distribution.
  2727. *
  2728. * @param hash The hash to avalanche.
  2729. * @return The avalanched hash.
  2730. */
  2731. static xxh_u32 XXH32_avalanche(xxh_u32 hash)
  2732. {
  2733. hash ^= hash >> 15;
  2734. hash *= XXH_PRIME32_2;
  2735. hash ^= hash >> 13;
  2736. hash *= XXH_PRIME32_3;
  2737. hash ^= hash >> 16;
  2738. return hash;
  2739. }
  2740. #define XXH_get32bits(p) XXH_readLE32_align(p, align)
  2741. /*!
  2742. * @internal
  2743. * @brief Processes the last 0-15 bytes of @p ptr.
  2744. *
  2745. * There may be up to 15 bytes remaining to consume from the input.
  2746. * This final stage will digest them to ensure that all input bytes are present
  2747. * in the final mix.
  2748. *
  2749. * @param hash The hash to finalize.
  2750. * @param ptr The pointer to the remaining input.
  2751. * @param len The remaining length, modulo 16.
  2752. * @param align Whether @p ptr is aligned.
  2753. * @return The finalized hash.
  2754. * @see XXH64_finalize().
  2755. */
  2756. static XXH_PUREF xxh_u32
  2757. XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
  2758. {
  2759. #define XXH_PROCESS1 do { \
  2760. hash += (*ptr++) * XXH_PRIME32_5; \
  2761. hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
  2762. } while (0)
  2763. #define XXH_PROCESS4 do { \
  2764. hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
  2765. ptr += 4; \
  2766. hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
  2767. } while (0)
  2768. if (ptr==NULL) XXH_ASSERT(len == 0);
  2769. /* Compact rerolled version; generally faster */
  2770. if (!XXH32_ENDJMP) {
  2771. len &= 15;
  2772. while (len >= 4) {
  2773. XXH_PROCESS4;
  2774. len -= 4;
  2775. }
  2776. while (len > 0) {
  2777. XXH_PROCESS1;
  2778. --len;
  2779. }
  2780. return XXH32_avalanche(hash);
  2781. } else {
  2782. switch(len&15) /* or switch(bEnd - p) */ {
  2783. case 12: XXH_PROCESS4;
  2784. XXH_FALLTHROUGH; /* fallthrough */
  2785. case 8: XXH_PROCESS4;
  2786. XXH_FALLTHROUGH; /* fallthrough */
  2787. case 4: XXH_PROCESS4;
  2788. return XXH32_avalanche(hash);
  2789. case 13: XXH_PROCESS4;
  2790. XXH_FALLTHROUGH; /* fallthrough */
  2791. case 9: XXH_PROCESS4;
  2792. XXH_FALLTHROUGH; /* fallthrough */
  2793. case 5: XXH_PROCESS4;
  2794. XXH_PROCESS1;
  2795. return XXH32_avalanche(hash);
  2796. case 14: XXH_PROCESS4;
  2797. XXH_FALLTHROUGH; /* fallthrough */
  2798. case 10: XXH_PROCESS4;
  2799. XXH_FALLTHROUGH; /* fallthrough */
  2800. case 6: XXH_PROCESS4;
  2801. XXH_PROCESS1;
  2802. XXH_PROCESS1;
  2803. return XXH32_avalanche(hash);
  2804. case 15: XXH_PROCESS4;
  2805. XXH_FALLTHROUGH; /* fallthrough */
  2806. case 11: XXH_PROCESS4;
  2807. XXH_FALLTHROUGH; /* fallthrough */
  2808. case 7: XXH_PROCESS4;
  2809. XXH_FALLTHROUGH; /* fallthrough */
  2810. case 3: XXH_PROCESS1;
  2811. XXH_FALLTHROUGH; /* fallthrough */
  2812. case 2: XXH_PROCESS1;
  2813. XXH_FALLTHROUGH; /* fallthrough */
  2814. case 1: XXH_PROCESS1;
  2815. XXH_FALLTHROUGH; /* fallthrough */
  2816. case 0: return XXH32_avalanche(hash);
  2817. }
  2818. XXH_ASSERT(0);
  2819. return hash; /* reaching this point is deemed impossible */
  2820. }
  2821. }
  2822. #ifdef XXH_OLD_NAMES
  2823. # define PROCESS1 XXH_PROCESS1
  2824. # define PROCESS4 XXH_PROCESS4
  2825. #else
  2826. # undef XXH_PROCESS1
  2827. # undef XXH_PROCESS4
  2828. #endif
  2829. /*!
  2830. * @internal
  2831. * @brief The implementation for @ref XXH32().
  2832. *
  2833. * @param input , len , seed Directly passed from @ref XXH32().
  2834. * @param align Whether @p input is aligned.
  2835. * @return The calculated hash.
  2836. */
  2837. XXH_FORCE_INLINE XXH_PUREF xxh_u32
  2838. XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
  2839. {
  2840. xxh_u32 h32;
  2841. if (input==NULL) XXH_ASSERT(len == 0);
  2842. if (len>=16) {
  2843. const xxh_u8* const bEnd = input + len;
  2844. const xxh_u8* const limit = bEnd - 15;
  2845. xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
  2846. xxh_u32 v2 = seed + XXH_PRIME32_2;
  2847. xxh_u32 v3 = seed + 0;
  2848. xxh_u32 v4 = seed - XXH_PRIME32_1;
  2849. do {
  2850. v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
  2851. v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
  2852. v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
  2853. v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
  2854. } while (input < limit);
  2855. h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
  2856. + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
  2857. } else {
  2858. h32 = seed + XXH_PRIME32_5;
  2859. }
  2860. h32 += (xxh_u32)len;
  2861. return XXH32_finalize(h32, input, len&15, align);
  2862. }
  2863. /*! @ingroup XXH32_family */
  2864. XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
  2865. {
  2866. #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
  2867. /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
  2868. XXH32_state_t state;
  2869. XXH32_reset(&state, seed);
  2870. XXH32_update(&state, (const xxh_u8*)input, len);
  2871. return XXH32_digest(&state);
  2872. #else
  2873. if (XXH_FORCE_ALIGN_CHECK) {
  2874. if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
  2875. return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
  2876. } }
  2877. return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
  2878. #endif
  2879. }
  2880. /******* Hash streaming *******/
  2881. #ifndef XXH_NO_STREAM
  2882. /*! @ingroup XXH32_family */
  2883. XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
  2884. {
  2885. return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
  2886. }
  2887. /*! @ingroup XXH32_family */
  2888. XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
  2889. {
  2890. XXH_free(statePtr);
  2891. return XXH_OK;
  2892. }
  2893. /*! @ingroup XXH32_family */
  2894. XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
  2895. {
  2896. XXH_memcpy(dstState, srcState, sizeof(*dstState));
  2897. }
  2898. /*! @ingroup XXH32_family */
  2899. XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
  2900. {
  2901. XXH_ASSERT(statePtr != NULL);
  2902. memset(statePtr, 0, sizeof(*statePtr));
  2903. statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
  2904. statePtr->v[1] = seed + XXH_PRIME32_2;
  2905. statePtr->v[2] = seed + 0;
  2906. statePtr->v[3] = seed - XXH_PRIME32_1;
  2907. return XXH_OK;
  2908. }
  2909. /*! @ingroup XXH32_family */
  2910. XXH_PUBLIC_API XXH_errorcode
  2911. XXH32_update(XXH32_state_t* state, const void* input, size_t len)
  2912. {
  2913. if (input==NULL) {
  2914. XXH_ASSERT(len == 0);
  2915. return XXH_OK;
  2916. }
  2917. { const xxh_u8* p = (const xxh_u8*)input;
  2918. const xxh_u8* const bEnd = p + len;
  2919. state->total_len_32 += (XXH32_hash_t)len;
  2920. state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
  2921. if (state->memsize + len < 16) { /* fill in tmp buffer */
  2922. XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
  2923. state->memsize += (XXH32_hash_t)len;
  2924. return XXH_OK;
  2925. }
  2926. if (state->memsize) { /* some data left from previous update */
  2927. XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
  2928. { const xxh_u32* p32 = state->mem32;
  2929. state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
  2930. state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
  2931. state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
  2932. state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
  2933. }
  2934. p += 16-state->memsize;
  2935. state->memsize = 0;
  2936. }
  2937. if (p <= bEnd-16) {
  2938. const xxh_u8* const limit = bEnd - 16;
  2939. do {
  2940. state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
  2941. state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
  2942. state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
  2943. state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
  2944. } while (p<=limit);
  2945. }
  2946. if (p < bEnd) {
  2947. XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
  2948. state->memsize = (unsigned)(bEnd-p);
  2949. }
  2950. }
  2951. return XXH_OK;
  2952. }
  2953. /*! @ingroup XXH32_family */
  2954. XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
  2955. {
  2956. xxh_u32 h32;
  2957. if (state->large_len) {
  2958. h32 = XXH_rotl32(state->v[0], 1)
  2959. + XXH_rotl32(state->v[1], 7)
  2960. + XXH_rotl32(state->v[2], 12)
  2961. + XXH_rotl32(state->v[3], 18);
  2962. } else {
  2963. h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
  2964. }
  2965. h32 += state->total_len_32;
  2966. return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
  2967. }
  2968. #endif /* !XXH_NO_STREAM */
  2969. /******* Canonical representation *******/
  2970. /*! @ingroup XXH32_family */
  2971. XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
  2972. {
  2973. XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
  2974. if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
  2975. XXH_memcpy(dst, &hash, sizeof(*dst));
  2976. }
  2977. /*! @ingroup XXH32_family */
  2978. XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
  2979. {
  2980. return XXH_readBE32(src);
  2981. }
  2982. #ifndef XXH_NO_LONG_LONG
  2983. /* *******************************************************************
  2984. * 64-bit hash functions
  2985. *********************************************************************/
  2986. /*!
  2987. * @}
  2988. * @ingroup impl
  2989. * @{
  2990. */
  2991. /******* Memory access *******/
  2992. typedef XXH64_hash_t xxh_u64;
  2993. #ifdef XXH_OLD_NAMES
  2994. # define U64 xxh_u64
  2995. #endif
  2996. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  2997. /*
  2998. * Manual byteshift. Best for old compilers which don't inline memcpy.
  2999. * We actually directly use XXH_readLE64 and XXH_readBE64.
  3000. */
  3001. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
  3002. /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
  3003. static xxh_u64 XXH_read64(const void* memPtr)
  3004. {
  3005. return *(const xxh_u64*) memPtr;
  3006. }
  3007. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
  3008. /*
  3009. * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
  3010. * documentation claimed that it only increased the alignment, but actually it
  3011. * can decrease it on gcc, clang, and icc:
  3012. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
  3013. * https://gcc.godbolt.org/z/xYez1j67Y.
  3014. */
  3015. #ifdef XXH_OLD_NAMES
  3016. typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((__packed__)) unalign64;
  3017. #endif
  3018. static xxh_u64 XXH_read64(const void* ptr)
  3019. {
  3020. typedef __attribute__((__aligned__(1))) xxh_u64 xxh_unalign64;
  3021. return *((const xxh_unalign64*)ptr);
  3022. }
  3023. #else
  3024. /*
  3025. * Portable and safe solution. Generally efficient.
  3026. * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
  3027. */
  3028. static xxh_u64 XXH_read64(const void* memPtr)
  3029. {
  3030. xxh_u64 val;
  3031. XXH_memcpy(&val, memPtr, sizeof(val));
  3032. return val;
  3033. }
  3034. #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
  3035. #if defined(_MSC_VER) /* Visual Studio */
  3036. # define XXH_swap64 _byteswap_uint64
  3037. #elif XXH_GCC_VERSION >= 403
  3038. # define XXH_swap64 __builtin_bswap64
  3039. #else
  3040. static xxh_u64 XXH_swap64(xxh_u64 x)
  3041. {
  3042. return ((x << 56) & 0xff00000000000000ULL) |
  3043. ((x << 40) & 0x00ff000000000000ULL) |
  3044. ((x << 24) & 0x0000ff0000000000ULL) |
  3045. ((x << 8) & 0x000000ff00000000ULL) |
  3046. ((x >> 8) & 0x00000000ff000000ULL) |
  3047. ((x >> 24) & 0x0000000000ff0000ULL) |
  3048. ((x >> 40) & 0x000000000000ff00ULL) |
  3049. ((x >> 56) & 0x00000000000000ffULL);
  3050. }
  3051. #endif
  3052. /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
  3053. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  3054. XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
  3055. {
  3056. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  3057. return bytePtr[0]
  3058. | ((xxh_u64)bytePtr[1] << 8)
  3059. | ((xxh_u64)bytePtr[2] << 16)
  3060. | ((xxh_u64)bytePtr[3] << 24)
  3061. | ((xxh_u64)bytePtr[4] << 32)
  3062. | ((xxh_u64)bytePtr[5] << 40)
  3063. | ((xxh_u64)bytePtr[6] << 48)
  3064. | ((xxh_u64)bytePtr[7] << 56);
  3065. }
  3066. XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
  3067. {
  3068. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  3069. return bytePtr[7]
  3070. | ((xxh_u64)bytePtr[6] << 8)
  3071. | ((xxh_u64)bytePtr[5] << 16)
  3072. | ((xxh_u64)bytePtr[4] << 24)
  3073. | ((xxh_u64)bytePtr[3] << 32)
  3074. | ((xxh_u64)bytePtr[2] << 40)
  3075. | ((xxh_u64)bytePtr[1] << 48)
  3076. | ((xxh_u64)bytePtr[0] << 56);
  3077. }
  3078. #else
  3079. XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
  3080. {
  3081. return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
  3082. }
  3083. static xxh_u64 XXH_readBE64(const void* ptr)
  3084. {
  3085. return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
  3086. }
  3087. #endif
  3088. XXH_FORCE_INLINE xxh_u64
  3089. XXH_readLE64_align(const void* ptr, XXH_alignment align)
  3090. {
  3091. if (align==XXH_unaligned)
  3092. return XXH_readLE64(ptr);
  3093. else
  3094. return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
  3095. }
  3096. /******* xxh64 *******/
  3097. /*!
  3098. * @}
  3099. * @defgroup XXH64_impl XXH64 implementation
  3100. * @ingroup impl
  3101. *
  3102. * Details on the XXH64 implementation.
  3103. * @{
  3104. */
  3105. /* #define rather that static const, to be used as initializers */
  3106. #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
  3107. #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
  3108. #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
  3109. #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
  3110. #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
  3111. #ifdef XXH_OLD_NAMES
  3112. # define PRIME64_1 XXH_PRIME64_1
  3113. # define PRIME64_2 XXH_PRIME64_2
  3114. # define PRIME64_3 XXH_PRIME64_3
  3115. # define PRIME64_4 XXH_PRIME64_4
  3116. # define PRIME64_5 XXH_PRIME64_5
  3117. #endif
  3118. /*! @copydoc XXH32_round */
  3119. static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
  3120. {
  3121. acc += input * XXH_PRIME64_2;
  3122. acc = XXH_rotl64(acc, 31);
  3123. acc *= XXH_PRIME64_1;
  3124. #if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
  3125. /*
  3126. * DISABLE AUTOVECTORIZATION:
  3127. * A compiler fence is used to prevent GCC and Clang from
  3128. * autovectorizing the XXH64 loop (pragmas and attributes don't work for some
  3129. * reason) without globally disabling AVX512.
  3130. *
  3131. * Autovectorization of XXH64 tends to be detrimental,
  3132. * though the exact outcome may change depending on exact cpu and compiler version.
  3133. * For information, it has been reported as detrimental for Skylake-X,
  3134. * but possibly beneficial for Zen4.
  3135. *
  3136. * The default is to disable auto-vectorization,
  3137. * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable.
  3138. */
  3139. XXH_COMPILER_GUARD(acc);
  3140. #endif
  3141. return acc;
  3142. }
  3143. static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
  3144. {
  3145. val = XXH64_round(0, val);
  3146. acc ^= val;
  3147. acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
  3148. return acc;
  3149. }
  3150. /*! @copydoc XXH32_avalanche */
  3151. static xxh_u64 XXH64_avalanche(xxh_u64 hash)
  3152. {
  3153. hash ^= hash >> 33;
  3154. hash *= XXH_PRIME64_2;
  3155. hash ^= hash >> 29;
  3156. hash *= XXH_PRIME64_3;
  3157. hash ^= hash >> 32;
  3158. return hash;
  3159. }
  3160. #define XXH_get64bits(p) XXH_readLE64_align(p, align)
  3161. /*!
  3162. * @internal
  3163. * @brief Processes the last 0-31 bytes of @p ptr.
  3164. *
  3165. * There may be up to 31 bytes remaining to consume from the input.
  3166. * This final stage will digest them to ensure that all input bytes are present
  3167. * in the final mix.
  3168. *
  3169. * @param hash The hash to finalize.
  3170. * @param ptr The pointer to the remaining input.
  3171. * @param len The remaining length, modulo 32.
  3172. * @param align Whether @p ptr is aligned.
  3173. * @return The finalized hash
  3174. * @see XXH32_finalize().
  3175. */
  3176. static XXH_PUREF xxh_u64
  3177. XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
  3178. {
  3179. if (ptr==NULL) XXH_ASSERT(len == 0);
  3180. len &= 31;
  3181. while (len >= 8) {
  3182. xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
  3183. ptr += 8;
  3184. hash ^= k1;
  3185. hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
  3186. len -= 8;
  3187. }
  3188. if (len >= 4) {
  3189. hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
  3190. ptr += 4;
  3191. hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
  3192. len -= 4;
  3193. }
  3194. while (len > 0) {
  3195. hash ^= (*ptr++) * XXH_PRIME64_5;
  3196. hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
  3197. --len;
  3198. }
  3199. return XXH64_avalanche(hash);
  3200. }
  3201. #ifdef XXH_OLD_NAMES
  3202. # define PROCESS1_64 XXH_PROCESS1_64
  3203. # define PROCESS4_64 XXH_PROCESS4_64
  3204. # define PROCESS8_64 XXH_PROCESS8_64
  3205. #else
  3206. # undef XXH_PROCESS1_64
  3207. # undef XXH_PROCESS4_64
  3208. # undef XXH_PROCESS8_64
  3209. #endif
  3210. /*!
  3211. * @internal
  3212. * @brief The implementation for @ref XXH64().
  3213. *
  3214. * @param input , len , seed Directly passed from @ref XXH64().
  3215. * @param align Whether @p input is aligned.
  3216. * @return The calculated hash.
  3217. */
  3218. XXH_FORCE_INLINE XXH_PUREF xxh_u64
  3219. XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
  3220. {
  3221. xxh_u64 h64;
  3222. if (input==NULL) XXH_ASSERT(len == 0);
  3223. if (len>=32) {
  3224. const xxh_u8* const bEnd = input + len;
  3225. const xxh_u8* const limit = bEnd - 31;
  3226. xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
  3227. xxh_u64 v2 = seed + XXH_PRIME64_2;
  3228. xxh_u64 v3 = seed + 0;
  3229. xxh_u64 v4 = seed - XXH_PRIME64_1;
  3230. do {
  3231. v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
  3232. v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
  3233. v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
  3234. v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
  3235. } while (input<limit);
  3236. h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
  3237. h64 = XXH64_mergeRound(h64, v1);
  3238. h64 = XXH64_mergeRound(h64, v2);
  3239. h64 = XXH64_mergeRound(h64, v3);
  3240. h64 = XXH64_mergeRound(h64, v4);
  3241. } else {
  3242. h64 = seed + XXH_PRIME64_5;
  3243. }
  3244. h64 += (xxh_u64) len;
  3245. return XXH64_finalize(h64, input, len, align);
  3246. }
  3247. /*! @ingroup XXH64_family */
  3248. XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
  3249. {
  3250. #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
  3251. /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
  3252. XXH64_state_t state;
  3253. XXH64_reset(&state, seed);
  3254. XXH64_update(&state, (const xxh_u8*)input, len);
  3255. return XXH64_digest(&state);
  3256. #else
  3257. if (XXH_FORCE_ALIGN_CHECK) {
  3258. if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
  3259. return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
  3260. } }
  3261. return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
  3262. #endif
  3263. }
  3264. /******* Hash Streaming *******/
  3265. #ifndef XXH_NO_STREAM
  3266. /*! @ingroup XXH64_family*/
  3267. XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
  3268. {
  3269. return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
  3270. }
  3271. /*! @ingroup XXH64_family */
  3272. XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
  3273. {
  3274. XXH_free(statePtr);
  3275. return XXH_OK;
  3276. }
  3277. /*! @ingroup XXH64_family */
  3278. XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
  3279. {
  3280. XXH_memcpy(dstState, srcState, sizeof(*dstState));
  3281. }
  3282. /*! @ingroup XXH64_family */
  3283. XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
  3284. {
  3285. XXH_ASSERT(statePtr != NULL);
  3286. memset(statePtr, 0, sizeof(*statePtr));
  3287. statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
  3288. statePtr->v[1] = seed + XXH_PRIME64_2;
  3289. statePtr->v[2] = seed + 0;
  3290. statePtr->v[3] = seed - XXH_PRIME64_1;
  3291. return XXH_OK;
  3292. }
  3293. /*! @ingroup XXH64_family */
  3294. XXH_PUBLIC_API XXH_errorcode
  3295. XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
  3296. {
  3297. if (input==NULL) {
  3298. XXH_ASSERT(len == 0);
  3299. return XXH_OK;
  3300. }
  3301. { const xxh_u8* p = (const xxh_u8*)input;
  3302. const xxh_u8* const bEnd = p + len;
  3303. state->total_len += len;
  3304. if (state->memsize + len < 32) { /* fill in tmp buffer */
  3305. XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
  3306. state->memsize += (xxh_u32)len;
  3307. return XXH_OK;
  3308. }
  3309. if (state->memsize) { /* tmp buffer is full */
  3310. XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
  3311. state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
  3312. state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
  3313. state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
  3314. state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
  3315. p += 32 - state->memsize;
  3316. state->memsize = 0;
  3317. }
  3318. if (p+32 <= bEnd) {
  3319. const xxh_u8* const limit = bEnd - 32;
  3320. do {
  3321. state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
  3322. state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
  3323. state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
  3324. state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
  3325. } while (p<=limit);
  3326. }
  3327. if (p < bEnd) {
  3328. XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
  3329. state->memsize = (unsigned)(bEnd-p);
  3330. }
  3331. }
  3332. return XXH_OK;
  3333. }
  3334. /*! @ingroup XXH64_family */
  3335. XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
  3336. {
  3337. xxh_u64 h64;
  3338. if (state->total_len >= 32) {
  3339. h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
  3340. h64 = XXH64_mergeRound(h64, state->v[0]);
  3341. h64 = XXH64_mergeRound(h64, state->v[1]);
  3342. h64 = XXH64_mergeRound(h64, state->v[2]);
  3343. h64 = XXH64_mergeRound(h64, state->v[3]);
  3344. } else {
  3345. h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
  3346. }
  3347. h64 += (xxh_u64) state->total_len;
  3348. return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
  3349. }
  3350. #endif /* !XXH_NO_STREAM */
  3351. /******* Canonical representation *******/
  3352. /*! @ingroup XXH64_family */
  3353. XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
  3354. {
  3355. XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
  3356. if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
  3357. XXH_memcpy(dst, &hash, sizeof(*dst));
  3358. }
  3359. /*! @ingroup XXH64_family */
  3360. XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
  3361. {
  3362. return XXH_readBE64(src);
  3363. }
  3364. #ifndef XXH_NO_XXH3
  3365. /* *********************************************************************
  3366. * XXH3
  3367. * New generation hash designed for speed on small keys and vectorization
  3368. ************************************************************************ */
  3369. /*!
  3370. * @}
  3371. * @defgroup XXH3_impl XXH3 implementation
  3372. * @ingroup impl
  3373. * @{
  3374. */
  3375. /* === Compiler specifics === */
  3376. #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
  3377. # define XXH_RESTRICT /* disable */
  3378. #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
  3379. # define XXH_RESTRICT restrict
  3380. #elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
  3381. || (defined (__clang__)) \
  3382. || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
  3383. || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
  3384. /*
  3385. * There are a LOT more compilers that recognize __restrict but this
  3386. * covers the major ones.
  3387. */
  3388. # define XXH_RESTRICT __restrict
  3389. #else
  3390. # define XXH_RESTRICT /* disable */
  3391. #endif
  3392. #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
  3393. || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
  3394. || defined(__clang__)
  3395. # define XXH_likely(x) __builtin_expect(x, 1)
  3396. # define XXH_unlikely(x) __builtin_expect(x, 0)
  3397. #else
  3398. # define XXH_likely(x) (x)
  3399. # define XXH_unlikely(x) (x)
  3400. #endif
  3401. #ifndef XXH_HAS_INCLUDE
  3402. # ifdef __has_include
  3403. /*
  3404. * Not defined as XXH_HAS_INCLUDE(x) (function-like) because
  3405. * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion)
  3406. */
  3407. # define XXH_HAS_INCLUDE __has_include
  3408. # else
  3409. # define XXH_HAS_INCLUDE(x) 0
  3410. # endif
  3411. #endif
  3412. #if defined(__GNUC__) || defined(__clang__)
  3413. # if defined(__ARM_FEATURE_SVE)
  3414. # include <arm_sve.h>
  3415. # endif
  3416. # if defined(__ARM_NEON__) || defined(__ARM_NEON) \
  3417. || (defined(_M_ARM) && _M_ARM >= 7) \
  3418. || defined(_M_ARM64) || defined(_M_ARM64EC) \
  3419. || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
  3420. # define inline __inline__ /* circumvent a clang bug */
  3421. # include <arm_neon.h>
  3422. # undef inline
  3423. # elif defined(__AVX2__)
  3424. # include <immintrin.h>
  3425. # elif defined(__SSE2__)
  3426. # include <emmintrin.h>
  3427. # endif
  3428. #endif
  3429. #if defined(_MSC_VER)
  3430. # include <intrin.h>
  3431. #endif
  3432. /*
  3433. * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
  3434. * remaining a true 64-bit/128-bit hash function.
  3435. *
  3436. * This is done by prioritizing a subset of 64-bit operations that can be
  3437. * emulated without too many steps on the average 32-bit machine.
  3438. *
  3439. * For example, these two lines seem similar, and run equally fast on 64-bit:
  3440. *
  3441. * xxh_u64 x;
  3442. * x ^= (x >> 47); // good
  3443. * x ^= (x >> 13); // bad
  3444. *
  3445. * However, to a 32-bit machine, there is a major difference.
  3446. *
  3447. * x ^= (x >> 47) looks like this:
  3448. *
  3449. * x.lo ^= (x.hi >> (47 - 32));
  3450. *
  3451. * while x ^= (x >> 13) looks like this:
  3452. *
  3453. * // note: funnel shifts are not usually cheap.
  3454. * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
  3455. * x.hi ^= (x.hi >> 13);
  3456. *
  3457. * The first one is significantly faster than the second, simply because the
  3458. * shift is larger than 32. This means:
  3459. * - All the bits we need are in the upper 32 bits, so we can ignore the lower
  3460. * 32 bits in the shift.
  3461. * - The shift result will always fit in the lower 32 bits, and therefore,
  3462. * we can ignore the upper 32 bits in the xor.
  3463. *
  3464. * Thanks to this optimization, XXH3 only requires these features to be efficient:
  3465. *
  3466. * - Usable unaligned access
  3467. * - A 32-bit or 64-bit ALU
  3468. * - If 32-bit, a decent ADC instruction
  3469. * - A 32 or 64-bit multiply with a 64-bit result
  3470. * - For the 128-bit variant, a decent byteswap helps short inputs.
  3471. *
  3472. * The first two are already required by XXH32, and almost all 32-bit and 64-bit
  3473. * platforms which can run XXH32 can run XXH3 efficiently.
  3474. *
  3475. * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
  3476. * notable exception.
  3477. *
  3478. * First of all, Thumb-1 lacks support for the UMULL instruction which
  3479. * performs the important long multiply. This means numerous __aeabi_lmul
  3480. * calls.
  3481. *
  3482. * Second of all, the 8 functional registers are just not enough.
  3483. * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
  3484. * Lo registers, and this shuffling results in thousands more MOVs than A32.
  3485. *
  3486. * A32 and T32 don't have this limitation. They can access all 14 registers,
  3487. * do a 32->64 multiply with UMULL, and the flexible operand allowing free
  3488. * shifts is helpful, too.
  3489. *
  3490. * Therefore, we do a quick sanity check.
  3491. *
  3492. * If compiling Thumb-1 for a target which supports ARM instructions, we will
  3493. * emit a warning, as it is not a "sane" platform to compile for.
  3494. *
  3495. * Usually, if this happens, it is because of an accident and you probably need
  3496. * to specify -march, as you likely meant to compile for a newer architecture.
  3497. *
  3498. * Credit: large sections of the vectorial and asm source code paths
  3499. * have been contributed by @easyaspi314
  3500. */
  3501. #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
  3502. # warning "XXH3 is highly inefficient without ARM or Thumb-2."
  3503. #endif
  3504. /* ==========================================
  3505. * Vectorization detection
  3506. * ========================================== */
  3507. #ifdef XXH_DOXYGEN
  3508. /*!
  3509. * @ingroup tuning
  3510. * @brief Overrides the vectorization implementation chosen for XXH3.
  3511. *
  3512. * Can be defined to 0 to disable SIMD or any of the values mentioned in
  3513. * @ref XXH_VECTOR_TYPE.
  3514. *
  3515. * If this is not defined, it uses predefined macros to determine the best
  3516. * implementation.
  3517. */
  3518. # define XXH_VECTOR XXH_SCALAR
  3519. /*!
  3520. * @ingroup tuning
  3521. * @brief Possible values for @ref XXH_VECTOR.
  3522. *
  3523. * Note that these are actually implemented as macros.
  3524. *
  3525. * If this is not defined, it is detected automatically.
  3526. * internal macro XXH_X86DISPATCH overrides this.
  3527. */
  3528. enum XXH_VECTOR_TYPE /* fake enum */ {
  3529. XXH_SCALAR = 0, /*!< Portable scalar version */
  3530. XXH_SSE2 = 1, /*!<
  3531. * SSE2 for Pentium 4, Opteron, all x86_64.
  3532. *
  3533. * @note SSE2 is also guaranteed on Windows 10, macOS, and
  3534. * Android x86.
  3535. */
  3536. XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
  3537. XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
  3538. XXH_NEON = 4, /*!<
  3539. * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
  3540. * via the SIMDeverywhere polyfill provided with the
  3541. * Emscripten SDK.
  3542. */
  3543. XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
  3544. XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
  3545. };
  3546. /*!
  3547. * @ingroup tuning
  3548. * @brief Selects the minimum alignment for XXH3's accumulators.
  3549. *
  3550. * When using SIMD, this should match the alignment required for said vector
  3551. * type, so, for example, 32 for AVX2.
  3552. *
  3553. * Default: Auto detected.
  3554. */
  3555. # define XXH_ACC_ALIGN 8
  3556. #endif
  3557. /* Actual definition */
  3558. #ifndef XXH_DOXYGEN
  3559. # define XXH_SCALAR 0
  3560. # define XXH_SSE2 1
  3561. # define XXH_AVX2 2
  3562. # define XXH_AVX512 3
  3563. # define XXH_NEON 4
  3564. # define XXH_VSX 5
  3565. # define XXH_SVE 6
  3566. #endif
  3567. #ifndef XXH_VECTOR /* can be defined on command line */
  3568. # if defined(__ARM_FEATURE_SVE)
  3569. # define XXH_VECTOR XXH_SVE
  3570. # elif ( \
  3571. defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
  3572. || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
  3573. || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
  3574. ) && ( \
  3575. defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
  3576. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
  3577. )
  3578. # define XXH_VECTOR XXH_NEON
  3579. # elif defined(__AVX512F__)
  3580. # define XXH_VECTOR XXH_AVX512
  3581. # elif defined(__AVX2__)
  3582. # define XXH_VECTOR XXH_AVX2
  3583. # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
  3584. # define XXH_VECTOR XXH_SSE2
  3585. # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
  3586. || (defined(__s390x__) && defined(__VEC__)) \
  3587. && defined(__GNUC__) /* TODO: IBM XL */
  3588. # define XXH_VECTOR XXH_VSX
  3589. # else
  3590. # define XXH_VECTOR XXH_SCALAR
  3591. # endif
  3592. #endif
  3593. /* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
  3594. #if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
  3595. # ifdef _MSC_VER
  3596. # pragma warning(once : 4606)
  3597. # else
  3598. # warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
  3599. # endif
  3600. # undef XXH_VECTOR
  3601. # define XXH_VECTOR XXH_SCALAR
  3602. #endif
  3603. /*
  3604. * Controls the alignment of the accumulator,
  3605. * for compatibility with aligned vector loads, which are usually faster.
  3606. */
  3607. #ifndef XXH_ACC_ALIGN
  3608. # if defined(XXH_X86DISPATCH)
  3609. # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
  3610. # elif XXH_VECTOR == XXH_SCALAR /* scalar */
  3611. # define XXH_ACC_ALIGN 8
  3612. # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
  3613. # define XXH_ACC_ALIGN 16
  3614. # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
  3615. # define XXH_ACC_ALIGN 32
  3616. # elif XXH_VECTOR == XXH_NEON /* neon */
  3617. # define XXH_ACC_ALIGN 16
  3618. # elif XXH_VECTOR == XXH_VSX /* vsx */
  3619. # define XXH_ACC_ALIGN 16
  3620. # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
  3621. # define XXH_ACC_ALIGN 64
  3622. # elif XXH_VECTOR == XXH_SVE /* sve */
  3623. # define XXH_ACC_ALIGN 64
  3624. # endif
  3625. #endif
  3626. #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
  3627. || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
  3628. # define XXH_SEC_ALIGN XXH_ACC_ALIGN
  3629. #elif XXH_VECTOR == XXH_SVE
  3630. # define XXH_SEC_ALIGN XXH_ACC_ALIGN
  3631. #else
  3632. # define XXH_SEC_ALIGN 8
  3633. #endif
  3634. #if defined(__GNUC__) || defined(__clang__)
  3635. # define XXH_ALIASING __attribute__((__may_alias__))
  3636. #else
  3637. # define XXH_ALIASING /* nothing */
  3638. #endif
  3639. /*
  3640. * UGLY HACK:
  3641. * GCC usually generates the best code with -O3 for xxHash.
  3642. *
  3643. * However, when targeting AVX2, it is overzealous in its unrolling resulting
  3644. * in code roughly 3/4 the speed of Clang.
  3645. *
  3646. * There are other issues, such as GCC splitting _mm256_loadu_si256 into
  3647. * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
  3648. * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
  3649. *
  3650. * That is why when compiling the AVX2 version, it is recommended to use either
  3651. * -O2 -mavx2 -march=haswell
  3652. * or
  3653. * -O2 -mavx2 -mno-avx256-split-unaligned-load
  3654. * for decent performance, or to use Clang instead.
  3655. *
  3656. * Fortunately, we can control the first one with a pragma that forces GCC into
  3657. * -O2, but the other one we can't control without "failed to inline always
  3658. * inline function due to target mismatch" warnings.
  3659. */
  3660. #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
  3661. && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  3662. && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
  3663. # pragma GCC push_options
  3664. # pragma GCC optimize("-O2")
  3665. #endif
  3666. #if XXH_VECTOR == XXH_NEON
  3667. /*
  3668. * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
  3669. * optimizes out the entire hashLong loop because of the aliasing violation.
  3670. *
  3671. * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
  3672. * so the only option is to mark it as aliasing.
  3673. */
  3674. typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
  3675. /*!
  3676. * @internal
  3677. * @brief `vld1q_u64` but faster and alignment-safe.
  3678. *
  3679. * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
  3680. * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
  3681. *
  3682. * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
  3683. * prohibits load-store optimizations. Therefore, a direct dereference is used.
  3684. *
  3685. * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
  3686. * unaligned load.
  3687. */
  3688. #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
  3689. XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
  3690. {
  3691. return *(xxh_aliasing_uint64x2_t const *)ptr;
  3692. }
  3693. #else
  3694. XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
  3695. {
  3696. return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
  3697. }
  3698. #endif
  3699. /*!
  3700. * @internal
  3701. * @brief `vmlal_u32` on low and high halves of a vector.
  3702. *
  3703. * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
  3704. * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
  3705. * with `vmlal_u32`.
  3706. */
  3707. #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
  3708. XXH_FORCE_INLINE uint64x2_t
  3709. XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
  3710. {
  3711. /* Inline assembly is the only way */
  3712. __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
  3713. return acc;
  3714. }
  3715. XXH_FORCE_INLINE uint64x2_t
  3716. XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
  3717. {
  3718. /* This intrinsic works as expected */
  3719. return vmlal_high_u32(acc, lhs, rhs);
  3720. }
  3721. #else
  3722. /* Portable intrinsic versions */
  3723. XXH_FORCE_INLINE uint64x2_t
  3724. XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
  3725. {
  3726. return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
  3727. }
  3728. /*! @copydoc XXH_vmlal_low_u32
  3729. * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
  3730. XXH_FORCE_INLINE uint64x2_t
  3731. XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
  3732. {
  3733. return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
  3734. }
  3735. #endif
  3736. /*!
  3737. * @ingroup tuning
  3738. * @brief Controls the NEON to scalar ratio for XXH3
  3739. *
  3740. * This can be set to 2, 4, 6, or 8.
  3741. *
  3742. * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
  3743. *
  3744. * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
  3745. * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
  3746. * bandwidth.
  3747. *
  3748. * This is even more noticeable on the more advanced cores like the Cortex-A76 which
  3749. * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
  3750. *
  3751. * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
  3752. * and 2 scalar lanes, which is chosen by default.
  3753. *
  3754. * This does not apply to Apple processors or 32-bit processors, which run better with
  3755. * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
  3756. *
  3757. * This change benefits CPUs with large micro-op buffers without negatively affecting
  3758. * most other CPUs:
  3759. *
  3760. * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
  3761. * |:----------------------|:--------------------|----------:|-----------:|------:|
  3762. * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
  3763. * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
  3764. * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
  3765. * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
  3766. *
  3767. * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
  3768. *
  3769. * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
  3770. * it effectively becomes worse 4.
  3771. *
  3772. * @see XXH3_accumulate_512_neon()
  3773. */
  3774. # ifndef XXH3_NEON_LANES
  3775. # if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
  3776. && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
  3777. # define XXH3_NEON_LANES 6
  3778. # else
  3779. # define XXH3_NEON_LANES XXH_ACC_NB
  3780. # endif
  3781. # endif
  3782. #endif /* XXH_VECTOR == XXH_NEON */
  3783. /*
  3784. * VSX and Z Vector helpers.
  3785. *
  3786. * This is very messy, and any pull requests to clean this up are welcome.
  3787. *
  3788. * There are a lot of problems with supporting VSX and s390x, due to
  3789. * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
  3790. */
  3791. #if XXH_VECTOR == XXH_VSX
  3792. /* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
  3793. * and `pixel`. This is a problem for obvious reasons.
  3794. *
  3795. * These keywords are unnecessary; the spec literally says they are
  3796. * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
  3797. * after including the header.
  3798. *
  3799. * We use pragma push_macro/pop_macro to keep the namespace clean. */
  3800. # pragma push_macro("bool")
  3801. # pragma push_macro("vector")
  3802. # pragma push_macro("pixel")
  3803. /* silence potential macro redefined warnings */
  3804. # undef bool
  3805. # undef vector
  3806. # undef pixel
  3807. # if defined(__s390x__)
  3808. # include <s390intrin.h>
  3809. # else
  3810. # include <altivec.h>
  3811. # endif
  3812. /* Restore the original macro values, if applicable. */
  3813. # pragma pop_macro("pixel")
  3814. # pragma pop_macro("vector")
  3815. # pragma pop_macro("bool")
  3816. typedef __vector unsigned long long xxh_u64x2;
  3817. typedef __vector unsigned char xxh_u8x16;
  3818. typedef __vector unsigned xxh_u32x4;
  3819. /*
  3820. * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
  3821. */
  3822. typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
  3823. # ifndef XXH_VSX_BE
  3824. # if defined(__BIG_ENDIAN__) \
  3825. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  3826. # define XXH_VSX_BE 1
  3827. # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
  3828. # warning "-maltivec=be is not recommended. Please use native endianness."
  3829. # define XXH_VSX_BE 1
  3830. # else
  3831. # define XXH_VSX_BE 0
  3832. # endif
  3833. # endif /* !defined(XXH_VSX_BE) */
  3834. # if XXH_VSX_BE
  3835. # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
  3836. # define XXH_vec_revb vec_revb
  3837. # else
  3838. /*!
  3839. * A polyfill for POWER9's vec_revb().
  3840. */
  3841. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
  3842. {
  3843. xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
  3844. 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
  3845. return vec_perm(val, val, vByteSwap);
  3846. }
  3847. # endif
  3848. # endif /* XXH_VSX_BE */
  3849. /*!
  3850. * Performs an unaligned vector load and byte swaps it on big endian.
  3851. */
  3852. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
  3853. {
  3854. xxh_u64x2 ret;
  3855. XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
  3856. # if XXH_VSX_BE
  3857. ret = XXH_vec_revb(ret);
  3858. # endif
  3859. return ret;
  3860. }
  3861. /*
  3862. * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
  3863. *
  3864. * These intrinsics weren't added until GCC 8, despite existing for a while,
  3865. * and they are endian dependent. Also, their meaning swap depending on version.
  3866. * */
  3867. # if defined(__s390x__)
  3868. /* s390x is always big endian, no issue on this platform */
  3869. # define XXH_vec_mulo vec_mulo
  3870. # define XXH_vec_mule vec_mule
  3871. # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
  3872. /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
  3873. /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
  3874. # define XXH_vec_mulo __builtin_altivec_vmulouw
  3875. # define XXH_vec_mule __builtin_altivec_vmuleuw
  3876. # else
  3877. /* gcc needs inline assembly */
  3878. /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
  3879. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
  3880. {
  3881. xxh_u64x2 result;
  3882. __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
  3883. return result;
  3884. }
  3885. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
  3886. {
  3887. xxh_u64x2 result;
  3888. __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
  3889. return result;
  3890. }
  3891. # endif /* XXH_vec_mulo, XXH_vec_mule */
  3892. #endif /* XXH_VECTOR == XXH_VSX */
  3893. #if XXH_VECTOR == XXH_SVE
  3894. #define ACCRND(acc, offset) \
  3895. do { \
  3896. svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
  3897. svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
  3898. svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
  3899. svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
  3900. svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
  3901. svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
  3902. svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
  3903. acc = svadd_u64_x(mask, acc, mul); \
  3904. } while (0)
  3905. #endif /* XXH_VECTOR == XXH_SVE */
  3906. /* prefetch
  3907. * can be disabled, by declaring XXH_NO_PREFETCH build macro */
  3908. #if defined(XXH_NO_PREFETCH)
  3909. # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
  3910. #else
  3911. # if XXH_SIZE_OPT >= 1
  3912. # define XXH_PREFETCH(ptr) (void)(ptr)
  3913. # elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
  3914. # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
  3915. # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
  3916. # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
  3917. # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
  3918. # else
  3919. # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
  3920. # endif
  3921. #endif /* XXH_NO_PREFETCH */
  3922. /* ==========================================
  3923. * XXH3 default settings
  3924. * ========================================== */
  3925. #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
  3926. #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
  3927. # error "default keyset is not large enough"
  3928. #endif
  3929. /*! Pseudorandom secret taken directly from FARSH. */
  3930. XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
  3931. 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
  3932. 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
  3933. 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
  3934. 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
  3935. 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
  3936. 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
  3937. 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
  3938. 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
  3939. 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
  3940. 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
  3941. 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
  3942. 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
  3943. };
  3944. static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
  3945. static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
  3946. #ifdef XXH_OLD_NAMES
  3947. # define kSecret XXH3_kSecret
  3948. #endif
  3949. #ifdef XXH_DOXYGEN
  3950. /*!
  3951. * @brief Calculates a 32-bit to 64-bit long multiply.
  3952. *
  3953. * Implemented as a macro.
  3954. *
  3955. * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
  3956. * need to (but it shouldn't need to anyways, it is about 7 instructions to do
  3957. * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
  3958. * use that instead of the normal method.
  3959. *
  3960. * If you are compiling for platforms like Thumb-1 and don't have a better option,
  3961. * you may also want to write your own long multiply routine here.
  3962. *
  3963. * @param x, y Numbers to be multiplied
  3964. * @return 64-bit product of the low 32 bits of @p x and @p y.
  3965. */
  3966. XXH_FORCE_INLINE xxh_u64
  3967. XXH_mult32to64(xxh_u64 x, xxh_u64 y)
  3968. {
  3969. return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
  3970. }
  3971. #elif defined(_MSC_VER) && defined(_M_IX86)
  3972. # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
  3973. #else
  3974. /*
  3975. * Downcast + upcast is usually better than masking on older compilers like
  3976. * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
  3977. *
  3978. * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
  3979. * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
  3980. */
  3981. # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
  3982. #endif
  3983. /*!
  3984. * @brief Calculates a 64->128-bit long multiply.
  3985. *
  3986. * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
  3987. * version.
  3988. *
  3989. * @param lhs , rhs The 64-bit integers to be multiplied
  3990. * @return The 128-bit result represented in an @ref XXH128_hash_t.
  3991. */
  3992. static XXH128_hash_t
  3993. XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
  3994. {
  3995. /*
  3996. * GCC/Clang __uint128_t method.
  3997. *
  3998. * On most 64-bit targets, GCC and Clang define a __uint128_t type.
  3999. * This is usually the best way as it usually uses a native long 64-bit
  4000. * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
  4001. *
  4002. * Usually.
  4003. *
  4004. * Despite being a 32-bit platform, Clang (and emscripten) define this type
  4005. * despite not having the arithmetic for it. This results in a laggy
  4006. * compiler builtin call which calculates a full 128-bit multiply.
  4007. * In that case it is best to use the portable one.
  4008. * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
  4009. */
  4010. #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
  4011. && defined(__SIZEOF_INT128__) \
  4012. || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  4013. __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
  4014. XXH128_hash_t r128;
  4015. r128.low64 = (xxh_u64)(product);
  4016. r128.high64 = (xxh_u64)(product >> 64);
  4017. return r128;
  4018. /*
  4019. * MSVC for x64's _umul128 method.
  4020. *
  4021. * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
  4022. *
  4023. * This compiles to single operand MUL on x64.
  4024. */
  4025. #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
  4026. #ifndef _MSC_VER
  4027. # pragma intrinsic(_umul128)
  4028. #endif
  4029. xxh_u64 product_high;
  4030. xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
  4031. XXH128_hash_t r128;
  4032. r128.low64 = product_low;
  4033. r128.high64 = product_high;
  4034. return r128;
  4035. /*
  4036. * MSVC for ARM64's __umulh method.
  4037. *
  4038. * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
  4039. */
  4040. #elif defined(_M_ARM64) || defined(_M_ARM64EC)
  4041. #ifndef _MSC_VER
  4042. # pragma intrinsic(__umulh)
  4043. #endif
  4044. XXH128_hash_t r128;
  4045. r128.low64 = lhs * rhs;
  4046. r128.high64 = __umulh(lhs, rhs);
  4047. return r128;
  4048. #else
  4049. /*
  4050. * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
  4051. *
  4052. * This is a fast and simple grade school multiply, which is shown below
  4053. * with base 10 arithmetic instead of base 0x100000000.
  4054. *
  4055. * 9 3 // D2 lhs = 93
  4056. * x 7 5 // D2 rhs = 75
  4057. * ----------
  4058. * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
  4059. * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
  4060. * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
  4061. * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
  4062. * ---------
  4063. * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
  4064. * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
  4065. * ---------
  4066. * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
  4067. *
  4068. * The reasons for adding the products like this are:
  4069. * 1. It avoids manual carry tracking. Just like how
  4070. * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
  4071. * This avoids a lot of complexity.
  4072. *
  4073. * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
  4074. * instruction available in ARM's Digital Signal Processing extension
  4075. * in 32-bit ARMv6 and later, which is shown below:
  4076. *
  4077. * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
  4078. * {
  4079. * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
  4080. * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
  4081. * *RdHi = (xxh_u32)(product >> 32);
  4082. * }
  4083. *
  4084. * This instruction was designed for efficient long multiplication, and
  4085. * allows this to be calculated in only 4 instructions at speeds
  4086. * comparable to some 64-bit ALUs.
  4087. *
  4088. * 3. It isn't terrible on other platforms. Usually this will be a couple
  4089. * of 32-bit ADD/ADCs.
  4090. */
  4091. /* First calculate all of the cross products. */
  4092. xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
  4093. xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
  4094. xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
  4095. xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
  4096. /* Now add the products together. These will never overflow. */
  4097. xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
  4098. xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
  4099. xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
  4100. XXH128_hash_t r128;
  4101. r128.low64 = lower;
  4102. r128.high64 = upper;
  4103. return r128;
  4104. #endif
  4105. }
  4106. /*!
  4107. * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
  4108. *
  4109. * The reason for the separate function is to prevent passing too many structs
  4110. * around by value. This will hopefully inline the multiply, but we don't force it.
  4111. *
  4112. * @param lhs , rhs The 64-bit integers to multiply
  4113. * @return The low 64 bits of the product XOR'd by the high 64 bits.
  4114. * @see XXH_mult64to128()
  4115. */
  4116. static xxh_u64
  4117. XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
  4118. {
  4119. XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
  4120. return product.low64 ^ product.high64;
  4121. }
  4122. /*! Seems to produce slightly better code on GCC for some reason. */
  4123. XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
  4124. {
  4125. XXH_ASSERT(0 <= shift && shift < 64);
  4126. return v64 ^ (v64 >> shift);
  4127. }
  4128. /*
  4129. * This is a fast avalanche stage,
  4130. * suitable when input bits are already partially mixed
  4131. */
  4132. static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
  4133. {
  4134. h64 = XXH_xorshift64(h64, 37);
  4135. h64 *= PRIME_MX1;
  4136. h64 = XXH_xorshift64(h64, 32);
  4137. return h64;
  4138. }
  4139. /*
  4140. * This is a stronger avalanche,
  4141. * inspired by Pelle Evensen's rrmxmx
  4142. * preferable when input has not been previously mixed
  4143. */
  4144. static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
  4145. {
  4146. /* this mix is inspired by Pelle Evensen's rrmxmx */
  4147. h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
  4148. h64 *= PRIME_MX2;
  4149. h64 ^= (h64 >> 35) + len ;
  4150. h64 *= PRIME_MX2;
  4151. return XXH_xorshift64(h64, 28);
  4152. }
  4153. /* ==========================================
  4154. * Short keys
  4155. * ==========================================
  4156. * One of the shortcomings of XXH32 and XXH64 was that their performance was
  4157. * sub-optimal on short lengths. It used an iterative algorithm which strongly
  4158. * favored lengths that were a multiple of 4 or 8.
  4159. *
  4160. * Instead of iterating over individual inputs, we use a set of single shot
  4161. * functions which piece together a range of lengths and operate in constant time.
  4162. *
  4163. * Additionally, the number of multiplies has been significantly reduced. This
  4164. * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
  4165. *
  4166. * Depending on the platform, this may or may not be faster than XXH32, but it
  4167. * is almost guaranteed to be faster than XXH64.
  4168. */
  4169. /*
  4170. * At very short lengths, there isn't enough input to fully hide secrets, or use
  4171. * the entire secret.
  4172. *
  4173. * There is also only a limited amount of mixing we can do before significantly
  4174. * impacting performance.
  4175. *
  4176. * Therefore, we use different sections of the secret and always mix two secret
  4177. * samples with an XOR. This should have no effect on performance on the
  4178. * seedless or withSeed variants because everything _should_ be constant folded
  4179. * by modern compilers.
  4180. *
  4181. * The XOR mixing hides individual parts of the secret and increases entropy.
  4182. *
  4183. * This adds an extra layer of strength for custom secrets.
  4184. */
  4185. XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
  4186. XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4187. {
  4188. XXH_ASSERT(input != NULL);
  4189. XXH_ASSERT(1 <= len && len <= 3);
  4190. XXH_ASSERT(secret != NULL);
  4191. /*
  4192. * len = 1: combined = { input[0], 0x01, input[0], input[0] }
  4193. * len = 2: combined = { input[1], 0x02, input[0], input[1] }
  4194. * len = 3: combined = { input[2], 0x03, input[0], input[1] }
  4195. */
  4196. { xxh_u8 const c1 = input[0];
  4197. xxh_u8 const c2 = input[len >> 1];
  4198. xxh_u8 const c3 = input[len - 1];
  4199. xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
  4200. | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
  4201. xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
  4202. xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
  4203. return XXH64_avalanche(keyed);
  4204. }
  4205. }
  4206. XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
  4207. XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4208. {
  4209. XXH_ASSERT(input != NULL);
  4210. XXH_ASSERT(secret != NULL);
  4211. XXH_ASSERT(4 <= len && len <= 8);
  4212. seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
  4213. { xxh_u32 const input1 = XXH_readLE32(input);
  4214. xxh_u32 const input2 = XXH_readLE32(input + len - 4);
  4215. xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
  4216. xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
  4217. xxh_u64 const keyed = input64 ^ bitflip;
  4218. return XXH3_rrmxmx(keyed, len);
  4219. }
  4220. }
  4221. XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
  4222. XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4223. {
  4224. XXH_ASSERT(input != NULL);
  4225. XXH_ASSERT(secret != NULL);
  4226. XXH_ASSERT(9 <= len && len <= 16);
  4227. { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
  4228. xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
  4229. xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
  4230. xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
  4231. xxh_u64 const acc = len
  4232. + XXH_swap64(input_lo) + input_hi
  4233. + XXH3_mul128_fold64(input_lo, input_hi);
  4234. return XXH3_avalanche(acc);
  4235. }
  4236. }
  4237. XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
  4238. XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4239. {
  4240. XXH_ASSERT(len <= 16);
  4241. { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
  4242. if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
  4243. if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
  4244. return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
  4245. }
  4246. }
  4247. /*
  4248. * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
  4249. * multiplication by zero, affecting hashes of lengths 17 to 240.
  4250. *
  4251. * However, they are very unlikely.
  4252. *
  4253. * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
  4254. * unseeded non-cryptographic hashes, it does not attempt to defend itself
  4255. * against specially crafted inputs, only random inputs.
  4256. *
  4257. * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
  4258. * cancelling out the secret is taken an arbitrary number of times (addressed
  4259. * in XXH3_accumulate_512), this collision is very unlikely with random inputs
  4260. * and/or proper seeding:
  4261. *
  4262. * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
  4263. * function that is only called up to 16 times per hash with up to 240 bytes of
  4264. * input.
  4265. *
  4266. * This is not too bad for a non-cryptographic hash function, especially with
  4267. * only 64 bit outputs.
  4268. *
  4269. * The 128-bit variant (which trades some speed for strength) is NOT affected
  4270. * by this, although it is always a good idea to use a proper seed if you care
  4271. * about strength.
  4272. */
  4273. XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
  4274. const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
  4275. {
  4276. #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  4277. && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
  4278. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
  4279. /*
  4280. * UGLY HACK:
  4281. * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
  4282. * slower code.
  4283. *
  4284. * By forcing seed64 into a register, we disrupt the cost model and
  4285. * cause it to scalarize. See `XXH32_round()`
  4286. *
  4287. * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
  4288. * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
  4289. * GCC 9.2, despite both emitting scalar code.
  4290. *
  4291. * GCC generates much better scalar code than Clang for the rest of XXH3,
  4292. * which is why finding a more optimal codepath is an interest.
  4293. */
  4294. XXH_COMPILER_GUARD(seed64);
  4295. #endif
  4296. { xxh_u64 const input_lo = XXH_readLE64(input);
  4297. xxh_u64 const input_hi = XXH_readLE64(input+8);
  4298. return XXH3_mul128_fold64(
  4299. input_lo ^ (XXH_readLE64(secret) + seed64),
  4300. input_hi ^ (XXH_readLE64(secret+8) - seed64)
  4301. );
  4302. }
  4303. }
  4304. /* For mid range keys, XXH3 uses a Mum-hash variant. */
  4305. XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
  4306. XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
  4307. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  4308. XXH64_hash_t seed)
  4309. {
  4310. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  4311. XXH_ASSERT(16 < len && len <= 128);
  4312. { xxh_u64 acc = len * XXH_PRIME64_1;
  4313. #if XXH_SIZE_OPT >= 1
  4314. /* Smaller and cleaner, but slightly slower. */
  4315. unsigned int i = (unsigned int)(len - 1) / 32;
  4316. do {
  4317. acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
  4318. acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
  4319. } while (i-- != 0);
  4320. #else
  4321. if (len > 32) {
  4322. if (len > 64) {
  4323. if (len > 96) {
  4324. acc += XXH3_mix16B(input+48, secret+96, seed);
  4325. acc += XXH3_mix16B(input+len-64, secret+112, seed);
  4326. }
  4327. acc += XXH3_mix16B(input+32, secret+64, seed);
  4328. acc += XXH3_mix16B(input+len-48, secret+80, seed);
  4329. }
  4330. acc += XXH3_mix16B(input+16, secret+32, seed);
  4331. acc += XXH3_mix16B(input+len-32, secret+48, seed);
  4332. }
  4333. acc += XXH3_mix16B(input+0, secret+0, seed);
  4334. acc += XXH3_mix16B(input+len-16, secret+16, seed);
  4335. #endif
  4336. return XXH3_avalanche(acc);
  4337. }
  4338. }
  4339. XXH_NO_INLINE XXH_PUREF XXH64_hash_t
  4340. XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
  4341. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  4342. XXH64_hash_t seed)
  4343. {
  4344. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  4345. XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
  4346. #define XXH3_MIDSIZE_STARTOFFSET 3
  4347. #define XXH3_MIDSIZE_LASTOFFSET 17
  4348. { xxh_u64 acc = len * XXH_PRIME64_1;
  4349. xxh_u64 acc_end;
  4350. unsigned int const nbRounds = (unsigned int)len / 16;
  4351. unsigned int i;
  4352. XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
  4353. for (i=0; i<8; i++) {
  4354. acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
  4355. }
  4356. /* last bytes */
  4357. acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
  4358. XXH_ASSERT(nbRounds >= 8);
  4359. acc = XXH3_avalanche(acc);
  4360. #if defined(__clang__) /* Clang */ \
  4361. && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
  4362. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
  4363. /*
  4364. * UGLY HACK:
  4365. * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
  4366. * In everywhere else, it uses scalar code.
  4367. *
  4368. * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
  4369. * would still be slower than UMAAL (see XXH_mult64to128).
  4370. *
  4371. * Unfortunately, Clang doesn't handle the long multiplies properly and
  4372. * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
  4373. * scalarized into an ugly mess of VMOV.32 instructions.
  4374. *
  4375. * This mess is difficult to avoid without turning autovectorization
  4376. * off completely, but they are usually relatively minor and/or not
  4377. * worth it to fix.
  4378. *
  4379. * This loop is the easiest to fix, as unlike XXH32, this pragma
  4380. * _actually works_ because it is a loop vectorization instead of an
  4381. * SLP vectorization.
  4382. */
  4383. #pragma clang loop vectorize(disable)
  4384. #endif
  4385. for (i=8 ; i < nbRounds; i++) {
  4386. /*
  4387. * Prevents clang for unrolling the acc loop and interleaving with this one.
  4388. */
  4389. XXH_COMPILER_GUARD(acc);
  4390. acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
  4391. }
  4392. return XXH3_avalanche(acc + acc_end);
  4393. }
  4394. }
  4395. /* ======= Long Keys ======= */
  4396. #define XXH_STRIPE_LEN 64
  4397. #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
  4398. #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
  4399. #ifdef XXH_OLD_NAMES
  4400. # define STRIPE_LEN XXH_STRIPE_LEN
  4401. # define ACC_NB XXH_ACC_NB
  4402. #endif
  4403. #ifndef XXH_PREFETCH_DIST
  4404. # ifdef __clang__
  4405. # define XXH_PREFETCH_DIST 320
  4406. # else
  4407. # if (XXH_VECTOR == XXH_AVX512)
  4408. # define XXH_PREFETCH_DIST 512
  4409. # else
  4410. # define XXH_PREFETCH_DIST 384
  4411. # endif
  4412. # endif /* __clang__ */
  4413. #endif /* XXH_PREFETCH_DIST */
  4414. /*
  4415. * These macros are to generate an XXH3_accumulate() function.
  4416. * The two arguments select the name suffix and target attribute.
  4417. *
  4418. * The name of this symbol is XXH3_accumulate_<name>() and it calls
  4419. * XXH3_accumulate_512_<name>().
  4420. *
  4421. * It may be useful to hand implement this function if the compiler fails to
  4422. * optimize the inline function.
  4423. */
  4424. #define XXH3_ACCUMULATE_TEMPLATE(name) \
  4425. void \
  4426. XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
  4427. const xxh_u8* XXH_RESTRICT input, \
  4428. const xxh_u8* XXH_RESTRICT secret, \
  4429. size_t nbStripes) \
  4430. { \
  4431. size_t n; \
  4432. for (n = 0; n < nbStripes; n++ ) { \
  4433. const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
  4434. XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
  4435. XXH3_accumulate_512_##name( \
  4436. acc, \
  4437. in, \
  4438. secret + n*XXH_SECRET_CONSUME_RATE); \
  4439. } \
  4440. }
  4441. XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
  4442. {
  4443. if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
  4444. XXH_memcpy(dst, &v64, sizeof(v64));
  4445. }
  4446. /* Several intrinsic functions below are supposed to accept __int64 as argument,
  4447. * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
  4448. * However, several environments do not define __int64 type,
  4449. * requiring a workaround.
  4450. */
  4451. #if !defined (__VMS) \
  4452. && (defined (__cplusplus) \
  4453. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  4454. typedef int64_t xxh_i64;
  4455. #else
  4456. /* the following type must have a width of 64-bit */
  4457. typedef long long xxh_i64;
  4458. #endif
  4459. /*
  4460. * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
  4461. *
  4462. * It is a hardened version of UMAC, based off of FARSH's implementation.
  4463. *
  4464. * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
  4465. * implementations, and it is ridiculously fast.
  4466. *
  4467. * We harden it by mixing the original input to the accumulators as well as the product.
  4468. *
  4469. * This means that in the (relatively likely) case of a multiply by zero, the
  4470. * original input is preserved.
  4471. *
  4472. * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
  4473. * cross-pollination, as otherwise the upper and lower halves would be
  4474. * essentially independent.
  4475. *
  4476. * This doesn't matter on 64-bit hashes since they all get merged together in
  4477. * the end, so we skip the extra step.
  4478. *
  4479. * Both XXH3_64bits and XXH3_128bits use this subroutine.
  4480. */
  4481. #if (XXH_VECTOR == XXH_AVX512) \
  4482. || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
  4483. #ifndef XXH_TARGET_AVX512
  4484. # define XXH_TARGET_AVX512 /* disable attribute target */
  4485. #endif
  4486. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  4487. XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
  4488. const void* XXH_RESTRICT input,
  4489. const void* XXH_RESTRICT secret)
  4490. {
  4491. __m512i* const xacc = (__m512i *) acc;
  4492. XXH_ASSERT((((size_t)acc) & 63) == 0);
  4493. XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
  4494. {
  4495. /* data_vec = input[0]; */
  4496. __m512i const data_vec = _mm512_loadu_si512 (input);
  4497. /* key_vec = secret[0]; */
  4498. __m512i const key_vec = _mm512_loadu_si512 (secret);
  4499. /* data_key = data_vec ^ key_vec; */
  4500. __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
  4501. /* data_key_lo = data_key >> 32; */
  4502. __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
  4503. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  4504. __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
  4505. /* xacc[0] += swap(data_vec); */
  4506. __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
  4507. __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
  4508. /* xacc[0] += product; */
  4509. *xacc = _mm512_add_epi64(product, sum);
  4510. }
  4511. }
  4512. XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
  4513. /*
  4514. * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
  4515. *
  4516. * Multiplication isn't perfect, as explained by Google in HighwayHash:
  4517. *
  4518. * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
  4519. * // varying degrees. In descending order of goodness, bytes
  4520. * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
  4521. * // As expected, the upper and lower bytes are much worse.
  4522. *
  4523. * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
  4524. *
  4525. * Since our algorithm uses a pseudorandom secret to add some variance into the
  4526. * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
  4527. *
  4528. * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
  4529. * extraction.
  4530. *
  4531. * Both XXH3_64bits and XXH3_128bits use this subroutine.
  4532. */
  4533. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  4534. XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  4535. {
  4536. XXH_ASSERT((((size_t)acc) & 63) == 0);
  4537. XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
  4538. { __m512i* const xacc = (__m512i*) acc;
  4539. const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
  4540. /* xacc[0] ^= (xacc[0] >> 47) */
  4541. __m512i const acc_vec = *xacc;
  4542. __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
  4543. /* xacc[0] ^= secret; */
  4544. __m512i const key_vec = _mm512_loadu_si512 (secret);
  4545. __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
  4546. /* xacc[0] *= XXH_PRIME32_1; */
  4547. __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
  4548. __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
  4549. __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
  4550. *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
  4551. }
  4552. }
  4553. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  4554. XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  4555. {
  4556. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
  4557. XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
  4558. XXH_ASSERT(((size_t)customSecret & 63) == 0);
  4559. (void)(&XXH_writeLE64);
  4560. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
  4561. __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
  4562. __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
  4563. const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
  4564. __m512i* const dest = ( __m512i*) customSecret;
  4565. int i;
  4566. XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
  4567. XXH_ASSERT(((size_t)dest & 63) == 0);
  4568. for (i=0; i < nbRounds; ++i) {
  4569. dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
  4570. } }
  4571. }
  4572. #endif
  4573. #if (XXH_VECTOR == XXH_AVX2) \
  4574. || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
  4575. #ifndef XXH_TARGET_AVX2
  4576. # define XXH_TARGET_AVX2 /* disable attribute target */
  4577. #endif
  4578. XXH_FORCE_INLINE XXH_TARGET_AVX2 void
  4579. XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
  4580. const void* XXH_RESTRICT input,
  4581. const void* XXH_RESTRICT secret)
  4582. {
  4583. XXH_ASSERT((((size_t)acc) & 31) == 0);
  4584. { __m256i* const xacc = (__m256i *) acc;
  4585. /* Unaligned. This is mainly for pointer arithmetic, and because
  4586. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  4587. const __m256i* const xinput = (const __m256i *) input;
  4588. /* Unaligned. This is mainly for pointer arithmetic, and because
  4589. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  4590. const __m256i* const xsecret = (const __m256i *) secret;
  4591. size_t i;
  4592. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
  4593. /* data_vec = xinput[i]; */
  4594. __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
  4595. /* key_vec = xsecret[i]; */
  4596. __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
  4597. /* data_key = data_vec ^ key_vec; */
  4598. __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
  4599. /* data_key_lo = data_key >> 32; */
  4600. __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
  4601. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  4602. __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
  4603. /* xacc[i] += swap(data_vec); */
  4604. __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
  4605. __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
  4606. /* xacc[i] += product; */
  4607. xacc[i] = _mm256_add_epi64(product, sum);
  4608. } }
  4609. }
  4610. XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
  4611. XXH_FORCE_INLINE XXH_TARGET_AVX2 void
  4612. XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  4613. {
  4614. XXH_ASSERT((((size_t)acc) & 31) == 0);
  4615. { __m256i* const xacc = (__m256i*) acc;
  4616. /* Unaligned. This is mainly for pointer arithmetic, and because
  4617. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  4618. const __m256i* const xsecret = (const __m256i *) secret;
  4619. const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
  4620. size_t i;
  4621. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
  4622. /* xacc[i] ^= (xacc[i] >> 47) */
  4623. __m256i const acc_vec = xacc[i];
  4624. __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
  4625. __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
  4626. /* xacc[i] ^= xsecret; */
  4627. __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
  4628. __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
  4629. /* xacc[i] *= XXH_PRIME32_1; */
  4630. __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
  4631. __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
  4632. __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
  4633. xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
  4634. }
  4635. }
  4636. }
  4637. XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  4638. {
  4639. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
  4640. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
  4641. XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
  4642. (void)(&XXH_writeLE64);
  4643. XXH_PREFETCH(customSecret);
  4644. { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
  4645. const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
  4646. __m256i* dest = ( __m256i*) customSecret;
  4647. # if defined(__GNUC__) || defined(__clang__)
  4648. /*
  4649. * On GCC & Clang, marking 'dest' as modified will cause the compiler:
  4650. * - do not extract the secret from sse registers in the internal loop
  4651. * - use less common registers, and avoid pushing these reg into stack
  4652. */
  4653. XXH_COMPILER_GUARD(dest);
  4654. # endif
  4655. XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
  4656. XXH_ASSERT(((size_t)dest & 31) == 0);
  4657. /* GCC -O2 need unroll loop manually */
  4658. dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
  4659. dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
  4660. dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
  4661. dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
  4662. dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
  4663. dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
  4664. }
  4665. }
  4666. #endif
  4667. /* x86dispatch always generates SSE2 */
  4668. #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
  4669. #ifndef XXH_TARGET_SSE2
  4670. # define XXH_TARGET_SSE2 /* disable attribute target */
  4671. #endif
  4672. XXH_FORCE_INLINE XXH_TARGET_SSE2 void
  4673. XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
  4674. const void* XXH_RESTRICT input,
  4675. const void* XXH_RESTRICT secret)
  4676. {
  4677. /* SSE2 is just a half-scale version of the AVX2 version. */
  4678. XXH_ASSERT((((size_t)acc) & 15) == 0);
  4679. { __m128i* const xacc = (__m128i *) acc;
  4680. /* Unaligned. This is mainly for pointer arithmetic, and because
  4681. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  4682. const __m128i* const xinput = (const __m128i *) input;
  4683. /* Unaligned. This is mainly for pointer arithmetic, and because
  4684. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  4685. const __m128i* const xsecret = (const __m128i *) secret;
  4686. size_t i;
  4687. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
  4688. /* data_vec = xinput[i]; */
  4689. __m128i const data_vec = _mm_loadu_si128 (xinput+i);
  4690. /* key_vec = xsecret[i]; */
  4691. __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
  4692. /* data_key = data_vec ^ key_vec; */
  4693. __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
  4694. /* data_key_lo = data_key >> 32; */
  4695. __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  4696. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  4697. __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
  4698. /* xacc[i] += swap(data_vec); */
  4699. __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
  4700. __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
  4701. /* xacc[i] += product; */
  4702. xacc[i] = _mm_add_epi64(product, sum);
  4703. } }
  4704. }
  4705. XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
  4706. XXH_FORCE_INLINE XXH_TARGET_SSE2 void
  4707. XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  4708. {
  4709. XXH_ASSERT((((size_t)acc) & 15) == 0);
  4710. { __m128i* const xacc = (__m128i*) acc;
  4711. /* Unaligned. This is mainly for pointer arithmetic, and because
  4712. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  4713. const __m128i* const xsecret = (const __m128i *) secret;
  4714. const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
  4715. size_t i;
  4716. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
  4717. /* xacc[i] ^= (xacc[i] >> 47) */
  4718. __m128i const acc_vec = xacc[i];
  4719. __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
  4720. __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
  4721. /* xacc[i] ^= xsecret[i]; */
  4722. __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
  4723. __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
  4724. /* xacc[i] *= XXH_PRIME32_1; */
  4725. __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  4726. __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
  4727. __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
  4728. xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
  4729. }
  4730. }
  4731. }
  4732. XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  4733. {
  4734. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
  4735. (void)(&XXH_writeLE64);
  4736. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
  4737. # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
  4738. /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
  4739. XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
  4740. __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
  4741. # else
  4742. __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
  4743. # endif
  4744. int i;
  4745. const void* const src16 = XXH3_kSecret;
  4746. __m128i* dst16 = (__m128i*) customSecret;
  4747. # if defined(__GNUC__) || defined(__clang__)
  4748. /*
  4749. * On GCC & Clang, marking 'dest' as modified will cause the compiler:
  4750. * - do not extract the secret from sse registers in the internal loop
  4751. * - use less common registers, and avoid pushing these reg into stack
  4752. */
  4753. XXH_COMPILER_GUARD(dst16);
  4754. # endif
  4755. XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
  4756. XXH_ASSERT(((size_t)dst16 & 15) == 0);
  4757. for (i=0; i < nbRounds; ++i) {
  4758. dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
  4759. } }
  4760. }
  4761. #endif
  4762. #if (XXH_VECTOR == XXH_NEON)
  4763. /* forward declarations for the scalar routines */
  4764. XXH_FORCE_INLINE void
  4765. XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
  4766. void const* XXH_RESTRICT secret, size_t lane);
  4767. XXH_FORCE_INLINE void
  4768. XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
  4769. void const* XXH_RESTRICT secret, size_t lane);
  4770. /*!
  4771. * @internal
  4772. * @brief The bulk processing loop for NEON and WASM SIMD128.
  4773. *
  4774. * The NEON code path is actually partially scalar when running on AArch64. This
  4775. * is to optimize the pipelining and can have up to 15% speedup depending on the
  4776. * CPU, and it also mitigates some GCC codegen issues.
  4777. *
  4778. * @see XXH3_NEON_LANES for configuring this and details about this optimization.
  4779. *
  4780. * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
  4781. * integers instead of the other platforms which mask full 64-bit vectors,
  4782. * so the setup is more complicated than just shifting right.
  4783. *
  4784. * Additionally, there is an optimization for 4 lanes at once noted below.
  4785. *
  4786. * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
  4787. * there needs to be *three* versions of the accumulate operation used
  4788. * for the remaining 2 lanes.
  4789. *
  4790. * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
  4791. * nearly perfectly.
  4792. */
  4793. XXH_FORCE_INLINE void
  4794. XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
  4795. const void* XXH_RESTRICT input,
  4796. const void* XXH_RESTRICT secret)
  4797. {
  4798. XXH_ASSERT((((size_t)acc) & 15) == 0);
  4799. XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
  4800. { /* GCC for darwin arm64 does not like aliasing here */
  4801. xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
  4802. /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
  4803. uint8_t const* xinput = (const uint8_t *) input;
  4804. uint8_t const* xsecret = (const uint8_t *) secret;
  4805. size_t i;
  4806. #ifdef __wasm_simd128__
  4807. /*
  4808. * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
  4809. * is constant propagated, which results in it converting it to this
  4810. * inside the loop:
  4811. *
  4812. * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
  4813. * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
  4814. * ...
  4815. *
  4816. * This requires a full 32-bit address immediate (and therefore a 6 byte
  4817. * instruction) as well as an add for each offset.
  4818. *
  4819. * Putting an asm guard prevents it from folding (at the cost of losing
  4820. * the alignment hint), and uses the free offset in `v128.load` instead
  4821. * of adding secret_offset each time which overall reduces code size by
  4822. * about a kilobyte and improves performance.
  4823. */
  4824. XXH_COMPILER_GUARD(xsecret);
  4825. #endif
  4826. /* Scalar lanes use the normal scalarRound routine */
  4827. for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
  4828. XXH3_scalarRound(acc, input, secret, i);
  4829. }
  4830. i = 0;
  4831. /* 4 NEON lanes at a time. */
  4832. for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
  4833. /* data_vec = xinput[i]; */
  4834. uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
  4835. uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
  4836. /* key_vec = xsecret[i]; */
  4837. uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
  4838. uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
  4839. /* data_swap = swap(data_vec) */
  4840. uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
  4841. uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
  4842. /* data_key = data_vec ^ key_vec; */
  4843. uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
  4844. uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
  4845. /*
  4846. * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
  4847. * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
  4848. * get one vector with the low 32 bits of each lane, and one vector
  4849. * with the high 32 bits of each lane.
  4850. *
  4851. * The intrinsic returns a double vector because the original ARMv7-a
  4852. * instruction modified both arguments in place. AArch64 and SIMD128 emit
  4853. * two instructions from this intrinsic.
  4854. *
  4855. * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
  4856. * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
  4857. */
  4858. uint32x4x2_t unzipped = vuzpq_u32(
  4859. vreinterpretq_u32_u64(data_key_1),
  4860. vreinterpretq_u32_u64(data_key_2)
  4861. );
  4862. /* data_key_lo = data_key & 0xFFFFFFFF */
  4863. uint32x4_t data_key_lo = unzipped.val[0];
  4864. /* data_key_hi = data_key >> 32 */
  4865. uint32x4_t data_key_hi = unzipped.val[1];
  4866. /*
  4867. * Then, we can split the vectors horizontally and multiply which, as for most
  4868. * widening intrinsics, have a variant that works on both high half vectors
  4869. * for free on AArch64. A similar instruction is available on SIMD128.
  4870. *
  4871. * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
  4872. */
  4873. uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
  4874. uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
  4875. /*
  4876. * Clang reorders
  4877. * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
  4878. * c += a; // add acc.2d, acc.2d, swap.2d
  4879. * to
  4880. * c += a; // add acc.2d, acc.2d, swap.2d
  4881. * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
  4882. *
  4883. * While it would make sense in theory since the addition is faster,
  4884. * for reasons likely related to umlal being limited to certain NEON
  4885. * pipelines, this is worse. A compiler guard fixes this.
  4886. */
  4887. XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
  4888. XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
  4889. /* xacc[i] = acc_vec + sum; */
  4890. xacc[i] = vaddq_u64(xacc[i], sum_1);
  4891. xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
  4892. }
  4893. /* Operate on the remaining NEON lanes 2 at a time. */
  4894. for (; i < XXH3_NEON_LANES / 2; i++) {
  4895. /* data_vec = xinput[i]; */
  4896. uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
  4897. /* key_vec = xsecret[i]; */
  4898. uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
  4899. /* acc_vec_2 = swap(data_vec) */
  4900. uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
  4901. /* data_key = data_vec ^ key_vec; */
  4902. uint64x2_t data_key = veorq_u64(data_vec, key_vec);
  4903. /* For two lanes, just use VMOVN and VSHRN. */
  4904. /* data_key_lo = data_key & 0xFFFFFFFF; */
  4905. uint32x2_t data_key_lo = vmovn_u64(data_key);
  4906. /* data_key_hi = data_key >> 32; */
  4907. uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
  4908. /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
  4909. uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
  4910. /* Same Clang workaround as before */
  4911. XXH_COMPILER_GUARD_CLANG_NEON(sum);
  4912. /* xacc[i] = acc_vec + sum; */
  4913. xacc[i] = vaddq_u64 (xacc[i], sum);
  4914. }
  4915. }
  4916. }
  4917. XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
  4918. XXH_FORCE_INLINE void
  4919. XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  4920. {
  4921. XXH_ASSERT((((size_t)acc) & 15) == 0);
  4922. { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
  4923. uint8_t const* xsecret = (uint8_t const*) secret;
  4924. size_t i;
  4925. /* WASM uses operator overloads and doesn't need these. */
  4926. #ifndef __wasm_simd128__
  4927. /* { prime32_1, prime32_1 } */
  4928. uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
  4929. /* { 0, prime32_1, 0, prime32_1 } */
  4930. uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
  4931. #endif
  4932. /* AArch64 uses both scalar and neon at the same time */
  4933. for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
  4934. XXH3_scalarScrambleRound(acc, secret, i);
  4935. }
  4936. for (i=0; i < XXH3_NEON_LANES / 2; i++) {
  4937. /* xacc[i] ^= (xacc[i] >> 47); */
  4938. uint64x2_t acc_vec = xacc[i];
  4939. uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
  4940. uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
  4941. /* xacc[i] ^= xsecret[i]; */
  4942. uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
  4943. uint64x2_t data_key = veorq_u64(data_vec, key_vec);
  4944. /* xacc[i] *= XXH_PRIME32_1 */
  4945. #ifdef __wasm_simd128__
  4946. /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
  4947. xacc[i] = data_key * XXH_PRIME32_1;
  4948. #else
  4949. /*
  4950. * Expanded version with portable NEON intrinsics
  4951. *
  4952. * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
  4953. *
  4954. * prod_hi = hi(data_key) * lo(prime) << 32
  4955. *
  4956. * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
  4957. * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
  4958. * and avoid the shift.
  4959. */
  4960. uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
  4961. /* Extract low bits for vmlal_u32 */
  4962. uint32x2_t data_key_lo = vmovn_u64(data_key);
  4963. /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
  4964. xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
  4965. #endif
  4966. }
  4967. }
  4968. }
  4969. #endif
  4970. #if (XXH_VECTOR == XXH_VSX)
  4971. XXH_FORCE_INLINE void
  4972. XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
  4973. const void* XXH_RESTRICT input,
  4974. const void* XXH_RESTRICT secret)
  4975. {
  4976. /* presumed aligned */
  4977. xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
  4978. xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
  4979. xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
  4980. xxh_u64x2 const v32 = { 32, 32 };
  4981. size_t i;
  4982. for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
  4983. /* data_vec = xinput[i]; */
  4984. xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
  4985. /* key_vec = xsecret[i]; */
  4986. xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
  4987. xxh_u64x2 const data_key = data_vec ^ key_vec;
  4988. /* shuffled = (data_key << 32) | (data_key >> 32); */
  4989. xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
  4990. /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
  4991. xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
  4992. /* acc_vec = xacc[i]; */
  4993. xxh_u64x2 acc_vec = xacc[i];
  4994. acc_vec += product;
  4995. /* swap high and low halves */
  4996. #ifdef __s390x__
  4997. acc_vec += vec_permi(data_vec, data_vec, 2);
  4998. #else
  4999. acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
  5000. #endif
  5001. xacc[i] = acc_vec;
  5002. }
  5003. }
  5004. XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
  5005. XXH_FORCE_INLINE void
  5006. XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  5007. {
  5008. XXH_ASSERT((((size_t)acc) & 15) == 0);
  5009. { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
  5010. const xxh_u8* const xsecret = (const xxh_u8*) secret;
  5011. /* constants */
  5012. xxh_u64x2 const v32 = { 32, 32 };
  5013. xxh_u64x2 const v47 = { 47, 47 };
  5014. xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
  5015. size_t i;
  5016. for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
  5017. /* xacc[i] ^= (xacc[i] >> 47); */
  5018. xxh_u64x2 const acc_vec = xacc[i];
  5019. xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
  5020. /* xacc[i] ^= xsecret[i]; */
  5021. xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
  5022. xxh_u64x2 const data_key = data_vec ^ key_vec;
  5023. /* xacc[i] *= XXH_PRIME32_1 */
  5024. /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
  5025. xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
  5026. /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
  5027. xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
  5028. xacc[i] = prod_odd + (prod_even << v32);
  5029. } }
  5030. }
  5031. #endif
  5032. #if (XXH_VECTOR == XXH_SVE)
  5033. XXH_FORCE_INLINE void
  5034. XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
  5035. const void* XXH_RESTRICT input,
  5036. const void* XXH_RESTRICT secret)
  5037. {
  5038. uint64_t *xacc = (uint64_t *)acc;
  5039. const uint64_t *xinput = (const uint64_t *)(const void *)input;
  5040. const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
  5041. svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
  5042. uint64_t element_count = svcntd();
  5043. if (element_count >= 8) {
  5044. svbool_t mask = svptrue_pat_b64(SV_VL8);
  5045. svuint64_t vacc = svld1_u64(mask, xacc);
  5046. ACCRND(vacc, 0);
  5047. svst1_u64(mask, xacc, vacc);
  5048. } else if (element_count == 2) { /* sve128 */
  5049. svbool_t mask = svptrue_pat_b64(SV_VL2);
  5050. svuint64_t acc0 = svld1_u64(mask, xacc + 0);
  5051. svuint64_t acc1 = svld1_u64(mask, xacc + 2);
  5052. svuint64_t acc2 = svld1_u64(mask, xacc + 4);
  5053. svuint64_t acc3 = svld1_u64(mask, xacc + 6);
  5054. ACCRND(acc0, 0);
  5055. ACCRND(acc1, 2);
  5056. ACCRND(acc2, 4);
  5057. ACCRND(acc3, 6);
  5058. svst1_u64(mask, xacc + 0, acc0);
  5059. svst1_u64(mask, xacc + 2, acc1);
  5060. svst1_u64(mask, xacc + 4, acc2);
  5061. svst1_u64(mask, xacc + 6, acc3);
  5062. } else {
  5063. svbool_t mask = svptrue_pat_b64(SV_VL4);
  5064. svuint64_t acc0 = svld1_u64(mask, xacc + 0);
  5065. svuint64_t acc1 = svld1_u64(mask, xacc + 4);
  5066. ACCRND(acc0, 0);
  5067. ACCRND(acc1, 4);
  5068. svst1_u64(mask, xacc + 0, acc0);
  5069. svst1_u64(mask, xacc + 4, acc1);
  5070. }
  5071. }
  5072. XXH_FORCE_INLINE void
  5073. XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
  5074. const xxh_u8* XXH_RESTRICT input,
  5075. const xxh_u8* XXH_RESTRICT secret,
  5076. size_t nbStripes)
  5077. {
  5078. if (nbStripes != 0) {
  5079. uint64_t *xacc = (uint64_t *)acc;
  5080. const uint64_t *xinput = (const uint64_t *)(const void *)input;
  5081. const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
  5082. svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
  5083. uint64_t element_count = svcntd();
  5084. if (element_count >= 8) {
  5085. svbool_t mask = svptrue_pat_b64(SV_VL8);
  5086. svuint64_t vacc = svld1_u64(mask, xacc + 0);
  5087. do {
  5088. /* svprfd(svbool_t, void *, enum svfprop); */
  5089. svprfd(mask, xinput + 128, SV_PLDL1STRM);
  5090. ACCRND(vacc, 0);
  5091. xinput += 8;
  5092. xsecret += 1;
  5093. nbStripes--;
  5094. } while (nbStripes != 0);
  5095. svst1_u64(mask, xacc + 0, vacc);
  5096. } else if (element_count == 2) { /* sve128 */
  5097. svbool_t mask = svptrue_pat_b64(SV_VL2);
  5098. svuint64_t acc0 = svld1_u64(mask, xacc + 0);
  5099. svuint64_t acc1 = svld1_u64(mask, xacc + 2);
  5100. svuint64_t acc2 = svld1_u64(mask, xacc + 4);
  5101. svuint64_t acc3 = svld1_u64(mask, xacc + 6);
  5102. do {
  5103. svprfd(mask, xinput + 128, SV_PLDL1STRM);
  5104. ACCRND(acc0, 0);
  5105. ACCRND(acc1, 2);
  5106. ACCRND(acc2, 4);
  5107. ACCRND(acc3, 6);
  5108. xinput += 8;
  5109. xsecret += 1;
  5110. nbStripes--;
  5111. } while (nbStripes != 0);
  5112. svst1_u64(mask, xacc + 0, acc0);
  5113. svst1_u64(mask, xacc + 2, acc1);
  5114. svst1_u64(mask, xacc + 4, acc2);
  5115. svst1_u64(mask, xacc + 6, acc3);
  5116. } else {
  5117. svbool_t mask = svptrue_pat_b64(SV_VL4);
  5118. svuint64_t acc0 = svld1_u64(mask, xacc + 0);
  5119. svuint64_t acc1 = svld1_u64(mask, xacc + 4);
  5120. do {
  5121. svprfd(mask, xinput + 128, SV_PLDL1STRM);
  5122. ACCRND(acc0, 0);
  5123. ACCRND(acc1, 4);
  5124. xinput += 8;
  5125. xsecret += 1;
  5126. nbStripes--;
  5127. } while (nbStripes != 0);
  5128. svst1_u64(mask, xacc + 0, acc0);
  5129. svst1_u64(mask, xacc + 4, acc1);
  5130. }
  5131. }
  5132. }
  5133. #endif
  5134. /* scalar variants - universal */
  5135. #if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
  5136. /*
  5137. * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
  5138. * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
  5139. *
  5140. * While this might not seem like much, as AArch64 is a 64-bit architecture, only
  5141. * big Cortex designs have a full 64-bit multiplier.
  5142. *
  5143. * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
  5144. * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
  5145. * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
  5146. *
  5147. * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
  5148. * not have this penalty and does the mask automatically.
  5149. */
  5150. XXH_FORCE_INLINE xxh_u64
  5151. XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
  5152. {
  5153. xxh_u64 ret;
  5154. /* note: %x = 64-bit register, %w = 32-bit register */
  5155. __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
  5156. return ret;
  5157. }
  5158. #else
  5159. XXH_FORCE_INLINE xxh_u64
  5160. XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
  5161. {
  5162. return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
  5163. }
  5164. #endif
  5165. /*!
  5166. * @internal
  5167. * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
  5168. *
  5169. * This is extracted to its own function because the NEON path uses a combination
  5170. * of NEON and scalar.
  5171. */
  5172. XXH_FORCE_INLINE void
  5173. XXH3_scalarRound(void* XXH_RESTRICT acc,
  5174. void const* XXH_RESTRICT input,
  5175. void const* XXH_RESTRICT secret,
  5176. size_t lane)
  5177. {
  5178. xxh_u64* xacc = (xxh_u64*) acc;
  5179. xxh_u8 const* xinput = (xxh_u8 const*) input;
  5180. xxh_u8 const* xsecret = (xxh_u8 const*) secret;
  5181. XXH_ASSERT(lane < XXH_ACC_NB);
  5182. XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
  5183. {
  5184. xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
  5185. xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
  5186. xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
  5187. xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
  5188. }
  5189. }
  5190. /*!
  5191. * @internal
  5192. * @brief Processes a 64 byte block of data using the scalar path.
  5193. */
  5194. XXH_FORCE_INLINE void
  5195. XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
  5196. const void* XXH_RESTRICT input,
  5197. const void* XXH_RESTRICT secret)
  5198. {
  5199. size_t i;
  5200. /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
  5201. #if defined(__GNUC__) && !defined(__clang__) \
  5202. && (defined(__arm__) || defined(__thumb2__)) \
  5203. && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
  5204. && XXH_SIZE_OPT <= 0
  5205. # pragma GCC unroll 8
  5206. #endif
  5207. for (i=0; i < XXH_ACC_NB; i++) {
  5208. XXH3_scalarRound(acc, input, secret, i);
  5209. }
  5210. }
  5211. XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
  5212. /*!
  5213. * @internal
  5214. * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
  5215. *
  5216. * This is extracted to its own function because the NEON path uses a combination
  5217. * of NEON and scalar.
  5218. */
  5219. XXH_FORCE_INLINE void
  5220. XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
  5221. void const* XXH_RESTRICT secret,
  5222. size_t lane)
  5223. {
  5224. xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
  5225. const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
  5226. XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
  5227. XXH_ASSERT(lane < XXH_ACC_NB);
  5228. {
  5229. xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
  5230. xxh_u64 acc64 = xacc[lane];
  5231. acc64 = XXH_xorshift64(acc64, 47);
  5232. acc64 ^= key64;
  5233. acc64 *= XXH_PRIME32_1;
  5234. xacc[lane] = acc64;
  5235. }
  5236. }
  5237. /*!
  5238. * @internal
  5239. * @brief Scrambles the accumulators after a large chunk has been read
  5240. */
  5241. XXH_FORCE_INLINE void
  5242. XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  5243. {
  5244. size_t i;
  5245. for (i=0; i < XXH_ACC_NB; i++) {
  5246. XXH3_scalarScrambleRound(acc, secret, i);
  5247. }
  5248. }
  5249. XXH_FORCE_INLINE void
  5250. XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  5251. {
  5252. /*
  5253. * We need a separate pointer for the hack below,
  5254. * which requires a non-const pointer.
  5255. * Any decent compiler will optimize this out otherwise.
  5256. */
  5257. const xxh_u8* kSecretPtr = XXH3_kSecret;
  5258. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
  5259. #if defined(__GNUC__) && defined(__aarch64__)
  5260. /*
  5261. * UGLY HACK:
  5262. * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
  5263. * placed sequentially, in order, at the top of the unrolled loop.
  5264. *
  5265. * While MOVK is great for generating constants (2 cycles for a 64-bit
  5266. * constant compared to 4 cycles for LDR), it fights for bandwidth with
  5267. * the arithmetic instructions.
  5268. *
  5269. * I L S
  5270. * MOVK
  5271. * MOVK
  5272. * MOVK
  5273. * MOVK
  5274. * ADD
  5275. * SUB STR
  5276. * STR
  5277. * By forcing loads from memory (as the asm line causes the compiler to assume
  5278. * that XXH3_kSecretPtr has been changed), the pipelines are used more
  5279. * efficiently:
  5280. * I L S
  5281. * LDR
  5282. * ADD LDR
  5283. * SUB STR
  5284. * STR
  5285. *
  5286. * See XXH3_NEON_LANES for details on the pipsline.
  5287. *
  5288. * XXH3_64bits_withSeed, len == 256, Snapdragon 835
  5289. * without hack: 2654.4 MB/s
  5290. * with hack: 3202.9 MB/s
  5291. */
  5292. XXH_COMPILER_GUARD(kSecretPtr);
  5293. #endif
  5294. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
  5295. int i;
  5296. for (i=0; i < nbRounds; i++) {
  5297. /*
  5298. * The asm hack causes the compiler to assume that kSecretPtr aliases with
  5299. * customSecret, and on aarch64, this prevented LDP from merging two
  5300. * loads together for free. Putting the loads together before the stores
  5301. * properly generates LDP.
  5302. */
  5303. xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
  5304. xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
  5305. XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
  5306. XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
  5307. } }
  5308. }
  5309. typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
  5310. typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
  5311. typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
  5312. #if (XXH_VECTOR == XXH_AVX512)
  5313. #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
  5314. #define XXH3_accumulate XXH3_accumulate_avx512
  5315. #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
  5316. #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
  5317. #elif (XXH_VECTOR == XXH_AVX2)
  5318. #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
  5319. #define XXH3_accumulate XXH3_accumulate_avx2
  5320. #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
  5321. #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
  5322. #elif (XXH_VECTOR == XXH_SSE2)
  5323. #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
  5324. #define XXH3_accumulate XXH3_accumulate_sse2
  5325. #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
  5326. #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
  5327. #elif (XXH_VECTOR == XXH_NEON)
  5328. #define XXH3_accumulate_512 XXH3_accumulate_512_neon
  5329. #define XXH3_accumulate XXH3_accumulate_neon
  5330. #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
  5331. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  5332. #elif (XXH_VECTOR == XXH_VSX)
  5333. #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
  5334. #define XXH3_accumulate XXH3_accumulate_vsx
  5335. #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
  5336. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  5337. #elif (XXH_VECTOR == XXH_SVE)
  5338. #define XXH3_accumulate_512 XXH3_accumulate_512_sve
  5339. #define XXH3_accumulate XXH3_accumulate_sve
  5340. #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
  5341. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  5342. #else /* scalar */
  5343. #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
  5344. #define XXH3_accumulate XXH3_accumulate_scalar
  5345. #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
  5346. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  5347. #endif
  5348. #if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
  5349. # undef XXH3_initCustomSecret
  5350. # define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  5351. #endif
  5352. XXH_FORCE_INLINE void
  5353. XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
  5354. const xxh_u8* XXH_RESTRICT input, size_t len,
  5355. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  5356. XXH3_f_accumulate f_acc,
  5357. XXH3_f_scrambleAcc f_scramble)
  5358. {
  5359. size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
  5360. size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
  5361. size_t const nb_blocks = (len - 1) / block_len;
  5362. size_t n;
  5363. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  5364. for (n = 0; n < nb_blocks; n++) {
  5365. f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
  5366. f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
  5367. }
  5368. /* last partial block */
  5369. XXH_ASSERT(len > XXH_STRIPE_LEN);
  5370. { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
  5371. XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
  5372. f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
  5373. /* last stripe */
  5374. { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
  5375. #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
  5376. XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
  5377. } }
  5378. }
  5379. XXH_FORCE_INLINE xxh_u64
  5380. XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
  5381. {
  5382. return XXH3_mul128_fold64(
  5383. acc[0] ^ XXH_readLE64(secret),
  5384. acc[1] ^ XXH_readLE64(secret+8) );
  5385. }
  5386. static XXH64_hash_t
  5387. XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
  5388. {
  5389. xxh_u64 result64 = start;
  5390. size_t i = 0;
  5391. for (i = 0; i < 4; i++) {
  5392. result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
  5393. #if defined(__clang__) /* Clang */ \
  5394. && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
  5395. && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
  5396. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
  5397. /*
  5398. * UGLY HACK:
  5399. * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
  5400. * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
  5401. * XXH3_64bits, len == 256, Snapdragon 835:
  5402. * without hack: 2063.7 MB/s
  5403. * with hack: 2560.7 MB/s
  5404. */
  5405. XXH_COMPILER_GUARD(result64);
  5406. #endif
  5407. }
  5408. return XXH3_avalanche(result64);
  5409. }
  5410. #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
  5411. XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
  5412. XXH_FORCE_INLINE XXH64_hash_t
  5413. XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
  5414. const void* XXH_RESTRICT secret, size_t secretSize,
  5415. XXH3_f_accumulate f_acc,
  5416. XXH3_f_scrambleAcc f_scramble)
  5417. {
  5418. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
  5419. XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
  5420. /* converge into final hash */
  5421. XXH_STATIC_ASSERT(sizeof(acc) == 64);
  5422. /* do not align on 8, so that the secret is different from the accumulator */
  5423. #define XXH_SECRET_MERGEACCS_START 11
  5424. XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  5425. return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
  5426. }
  5427. /*
  5428. * It's important for performance to transmit secret's size (when it's static)
  5429. * so that the compiler can properly optimize the vectorized loop.
  5430. * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
  5431. * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
  5432. * breaks -Og, this is XXH_NO_INLINE.
  5433. */
  5434. XXH3_WITH_SECRET_INLINE XXH64_hash_t
  5435. XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
  5436. XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
  5437. {
  5438. (void)seed64;
  5439. return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
  5440. }
  5441. /*
  5442. * It's preferable for performance that XXH3_hashLong is not inlined,
  5443. * as it results in a smaller function for small data, easier to the instruction cache.
  5444. * Note that inside this no_inline function, we do inline the internal loop,
  5445. * and provide a statically defined secret size to allow optimization of vector loop.
  5446. */
  5447. XXH_NO_INLINE XXH_PUREF XXH64_hash_t
  5448. XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
  5449. XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
  5450. {
  5451. (void)seed64; (void)secret; (void)secretLen;
  5452. return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
  5453. }
  5454. /*
  5455. * XXH3_hashLong_64b_withSeed():
  5456. * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
  5457. * and then use this key for long mode hashing.
  5458. *
  5459. * This operation is decently fast but nonetheless costs a little bit of time.
  5460. * Try to avoid it whenever possible (typically when seed==0).
  5461. *
  5462. * It's important for performance that XXH3_hashLong is not inlined. Not sure
  5463. * why (uop cache maybe?), but the difference is large and easily measurable.
  5464. */
  5465. XXH_FORCE_INLINE XXH64_hash_t
  5466. XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
  5467. XXH64_hash_t seed,
  5468. XXH3_f_accumulate f_acc,
  5469. XXH3_f_scrambleAcc f_scramble,
  5470. XXH3_f_initCustomSecret f_initSec)
  5471. {
  5472. #if XXH_SIZE_OPT <= 0
  5473. if (seed == 0)
  5474. return XXH3_hashLong_64b_internal(input, len,
  5475. XXH3_kSecret, sizeof(XXH3_kSecret),
  5476. f_acc, f_scramble);
  5477. #endif
  5478. { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  5479. f_initSec(secret, seed);
  5480. return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
  5481. f_acc, f_scramble);
  5482. }
  5483. }
  5484. /*
  5485. * It's important for performance that XXH3_hashLong is not inlined.
  5486. */
  5487. XXH_NO_INLINE XXH64_hash_t
  5488. XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
  5489. XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
  5490. {
  5491. (void)secret; (void)secretLen;
  5492. return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
  5493. XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
  5494. }
  5495. typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
  5496. XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
  5497. XXH_FORCE_INLINE XXH64_hash_t
  5498. XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
  5499. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
  5500. XXH3_hashLong64_f f_hashLong)
  5501. {
  5502. XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
  5503. /*
  5504. * If an action is to be taken if `secretLen` condition is not respected,
  5505. * it should be done here.
  5506. * For now, it's a contract pre-condition.
  5507. * Adding a check and a branch here would cost performance at every hash.
  5508. * Also, note that function signature doesn't offer room to return an error.
  5509. */
  5510. if (len <= 16)
  5511. return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
  5512. if (len <= 128)
  5513. return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  5514. if (len <= XXH3_MIDSIZE_MAX)
  5515. return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  5516. return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
  5517. }
  5518. /* === Public entry point === */
  5519. /*! @ingroup XXH3_family */
  5520. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
  5521. {
  5522. return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
  5523. }
  5524. /*! @ingroup XXH3_family */
  5525. XXH_PUBLIC_API XXH64_hash_t
  5526. XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
  5527. {
  5528. return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
  5529. }
  5530. /*! @ingroup XXH3_family */
  5531. XXH_PUBLIC_API XXH64_hash_t
  5532. XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
  5533. {
  5534. return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
  5535. }
  5536. XXH_PUBLIC_API XXH64_hash_t
  5537. XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
  5538. {
  5539. if (length <= XXH3_MIDSIZE_MAX)
  5540. return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
  5541. return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
  5542. }
  5543. /* === XXH3 streaming === */
  5544. #ifndef XXH_NO_STREAM
  5545. /*
  5546. * Malloc's a pointer that is always aligned to @align.
  5547. *
  5548. * This must be freed with `XXH_alignedFree()`.
  5549. *
  5550. * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
  5551. * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
  5552. * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
  5553. *
  5554. * This underalignment previously caused a rather obvious crash which went
  5555. * completely unnoticed due to XXH3_createState() not actually being tested.
  5556. * Credit to RedSpah for noticing this bug.
  5557. *
  5558. * The alignment is done manually: Functions like posix_memalign or _mm_malloc
  5559. * are avoided: To maintain portability, we would have to write a fallback
  5560. * like this anyways, and besides, testing for the existence of library
  5561. * functions without relying on external build tools is impossible.
  5562. *
  5563. * The method is simple: Overallocate, manually align, and store the offset
  5564. * to the original behind the returned pointer.
  5565. *
  5566. * Align must be a power of 2 and 8 <= align <= 128.
  5567. */
  5568. static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
  5569. {
  5570. XXH_ASSERT(align <= 128 && align >= 8); /* range check */
  5571. XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
  5572. XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
  5573. { /* Overallocate to make room for manual realignment and an offset byte */
  5574. xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
  5575. if (base != NULL) {
  5576. /*
  5577. * Get the offset needed to align this pointer.
  5578. *
  5579. * Even if the returned pointer is aligned, there will always be
  5580. * at least one byte to store the offset to the original pointer.
  5581. */
  5582. size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
  5583. /* Add the offset for the now-aligned pointer */
  5584. xxh_u8* ptr = base + offset;
  5585. XXH_ASSERT((size_t)ptr % align == 0);
  5586. /* Store the offset immediately before the returned pointer. */
  5587. ptr[-1] = (xxh_u8)offset;
  5588. return ptr;
  5589. }
  5590. return NULL;
  5591. }
  5592. }
  5593. /*
  5594. * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
  5595. * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
  5596. */
  5597. static void XXH_alignedFree(void* p)
  5598. {
  5599. if (p != NULL) {
  5600. xxh_u8* ptr = (xxh_u8*)p;
  5601. /* Get the offset byte we added in XXH_malloc. */
  5602. xxh_u8 offset = ptr[-1];
  5603. /* Free the original malloc'd pointer */
  5604. xxh_u8* base = ptr - offset;
  5605. XXH_free(base);
  5606. }
  5607. }
  5608. /*! @ingroup XXH3_family */
  5609. /*!
  5610. * @brief Allocate an @ref XXH3_state_t.
  5611. *
  5612. * @return An allocated pointer of @ref XXH3_state_t on success.
  5613. * @return `NULL` on failure.
  5614. *
  5615. * @note Must be freed with XXH3_freeState().
  5616. *
  5617. * @see @ref streaming_example "Streaming Example"
  5618. */
  5619. XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
  5620. {
  5621. XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
  5622. if (state==NULL) return NULL;
  5623. XXH3_INITSTATE(state);
  5624. return state;
  5625. }
  5626. /*! @ingroup XXH3_family */
  5627. /*!
  5628. * @brief Frees an @ref XXH3_state_t.
  5629. *
  5630. * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
  5631. *
  5632. * @return @ref XXH_OK.
  5633. *
  5634. * @note Must be allocated with XXH3_createState().
  5635. *
  5636. * @see @ref streaming_example "Streaming Example"
  5637. */
  5638. XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
  5639. {
  5640. XXH_alignedFree(statePtr);
  5641. return XXH_OK;
  5642. }
  5643. /*! @ingroup XXH3_family */
  5644. XXH_PUBLIC_API void
  5645. XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
  5646. {
  5647. XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
  5648. }
  5649. static void
  5650. XXH3_reset_internal(XXH3_state_t* statePtr,
  5651. XXH64_hash_t seed,
  5652. const void* secret, size_t secretSize)
  5653. {
  5654. size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
  5655. size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
  5656. XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
  5657. XXH_ASSERT(statePtr != NULL);
  5658. /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
  5659. memset((char*)statePtr + initStart, 0, initLength);
  5660. statePtr->acc[0] = XXH_PRIME32_3;
  5661. statePtr->acc[1] = XXH_PRIME64_1;
  5662. statePtr->acc[2] = XXH_PRIME64_2;
  5663. statePtr->acc[3] = XXH_PRIME64_3;
  5664. statePtr->acc[4] = XXH_PRIME64_4;
  5665. statePtr->acc[5] = XXH_PRIME32_2;
  5666. statePtr->acc[6] = XXH_PRIME64_5;
  5667. statePtr->acc[7] = XXH_PRIME32_1;
  5668. statePtr->seed = seed;
  5669. statePtr->useSeed = (seed != 0);
  5670. statePtr->extSecret = (const unsigned char*)secret;
  5671. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  5672. statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
  5673. statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
  5674. }
  5675. /*! @ingroup XXH3_family */
  5676. XXH_PUBLIC_API XXH_errorcode
  5677. XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
  5678. {
  5679. if (statePtr == NULL) return XXH_ERROR;
  5680. XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
  5681. return XXH_OK;
  5682. }
  5683. /*! @ingroup XXH3_family */
  5684. XXH_PUBLIC_API XXH_errorcode
  5685. XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
  5686. {
  5687. if (statePtr == NULL) return XXH_ERROR;
  5688. XXH3_reset_internal(statePtr, 0, secret, secretSize);
  5689. if (secret == NULL) return XXH_ERROR;
  5690. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  5691. return XXH_OK;
  5692. }
  5693. /*! @ingroup XXH3_family */
  5694. XXH_PUBLIC_API XXH_errorcode
  5695. XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
  5696. {
  5697. if (statePtr == NULL) return XXH_ERROR;
  5698. if (seed==0) return XXH3_64bits_reset(statePtr);
  5699. if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
  5700. XXH3_initCustomSecret(statePtr->customSecret, seed);
  5701. XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
  5702. return XXH_OK;
  5703. }
  5704. /*! @ingroup XXH3_family */
  5705. XXH_PUBLIC_API XXH_errorcode
  5706. XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
  5707. {
  5708. if (statePtr == NULL) return XXH_ERROR;
  5709. if (secret == NULL) return XXH_ERROR;
  5710. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  5711. XXH3_reset_internal(statePtr, seed64, secret, secretSize);
  5712. statePtr->useSeed = 1; /* always, even if seed64==0 */
  5713. return XXH_OK;
  5714. }
  5715. /*!
  5716. * @internal
  5717. * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
  5718. *
  5719. * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
  5720. *
  5721. * @param acc Pointer to the 8 accumulator lanes
  5722. * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
  5723. * @param nbStripesPerBlock Number of stripes in a block
  5724. * @param input Input pointer
  5725. * @param nbStripes Number of stripes to process
  5726. * @param secret Secret pointer
  5727. * @param secretLimit Offset of the last block in @p secret
  5728. * @param f_acc Pointer to an XXH3_accumulate implementation
  5729. * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
  5730. * @return Pointer past the end of @p input after processing
  5731. */
  5732. XXH_FORCE_INLINE const xxh_u8 *
  5733. XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
  5734. size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
  5735. const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
  5736. const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
  5737. XXH3_f_accumulate f_acc,
  5738. XXH3_f_scrambleAcc f_scramble)
  5739. {
  5740. const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
  5741. /* Process full blocks */
  5742. if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
  5743. /* Process the initial partial block... */
  5744. size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
  5745. do {
  5746. /* Accumulate and scramble */
  5747. f_acc(acc, input, initialSecret, nbStripesThisIter);
  5748. f_scramble(acc, secret + secretLimit);
  5749. input += nbStripesThisIter * XXH_STRIPE_LEN;
  5750. nbStripes -= nbStripesThisIter;
  5751. /* Then continue the loop with the full block size */
  5752. nbStripesThisIter = nbStripesPerBlock;
  5753. initialSecret = secret;
  5754. } while (nbStripes >= nbStripesPerBlock);
  5755. *nbStripesSoFarPtr = 0;
  5756. }
  5757. /* Process a partial block */
  5758. if (nbStripes > 0) {
  5759. f_acc(acc, input, initialSecret, nbStripes);
  5760. input += nbStripes * XXH_STRIPE_LEN;
  5761. *nbStripesSoFarPtr += nbStripes;
  5762. }
  5763. /* Return end pointer */
  5764. return input;
  5765. }
  5766. #ifndef XXH3_STREAM_USE_STACK
  5767. # if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
  5768. # define XXH3_STREAM_USE_STACK 1
  5769. # endif
  5770. #endif
  5771. /*
  5772. * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
  5773. */
  5774. XXH_FORCE_INLINE XXH_errorcode
  5775. XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
  5776. const xxh_u8* XXH_RESTRICT input, size_t len,
  5777. XXH3_f_accumulate f_acc,
  5778. XXH3_f_scrambleAcc f_scramble)
  5779. {
  5780. if (input==NULL) {
  5781. XXH_ASSERT(len == 0);
  5782. return XXH_OK;
  5783. }
  5784. XXH_ASSERT(state != NULL);
  5785. { const xxh_u8* const bEnd = input + len;
  5786. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  5787. #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
  5788. /* For some reason, gcc and MSVC seem to suffer greatly
  5789. * when operating accumulators directly into state.
  5790. * Operating into stack space seems to enable proper optimization.
  5791. * clang, on the other hand, doesn't seem to need this trick */
  5792. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
  5793. XXH_memcpy(acc, state->acc, sizeof(acc));
  5794. #else
  5795. xxh_u64* XXH_RESTRICT const acc = state->acc;
  5796. #endif
  5797. state->totalLen += len;
  5798. XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
  5799. /* small input : just fill in tmp buffer */
  5800. if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
  5801. XXH_memcpy(state->buffer + state->bufferedSize, input, len);
  5802. state->bufferedSize += (XXH32_hash_t)len;
  5803. return XXH_OK;
  5804. }
  5805. /* total input is now > XXH3_INTERNALBUFFER_SIZE */
  5806. #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
  5807. XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
  5808. /*
  5809. * Internal buffer is partially filled (always, except at beginning)
  5810. * Complete it, then consume it.
  5811. */
  5812. if (state->bufferedSize) {
  5813. size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
  5814. XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
  5815. input += loadSize;
  5816. XXH3_consumeStripes(acc,
  5817. &state->nbStripesSoFar, state->nbStripesPerBlock,
  5818. state->buffer, XXH3_INTERNALBUFFER_STRIPES,
  5819. secret, state->secretLimit,
  5820. f_acc, f_scramble);
  5821. state->bufferedSize = 0;
  5822. }
  5823. XXH_ASSERT(input < bEnd);
  5824. if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
  5825. size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
  5826. input = XXH3_consumeStripes(acc,
  5827. &state->nbStripesSoFar, state->nbStripesPerBlock,
  5828. input, nbStripes,
  5829. secret, state->secretLimit,
  5830. f_acc, f_scramble);
  5831. XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
  5832. }
  5833. /* Some remaining input (always) : buffer it */
  5834. XXH_ASSERT(input < bEnd);
  5835. XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
  5836. XXH_ASSERT(state->bufferedSize == 0);
  5837. XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
  5838. state->bufferedSize = (XXH32_hash_t)(bEnd-input);
  5839. #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
  5840. /* save stack accumulators into state */
  5841. XXH_memcpy(state->acc, acc, sizeof(acc));
  5842. #endif
  5843. }
  5844. return XXH_OK;
  5845. }
  5846. /*! @ingroup XXH3_family */
  5847. XXH_PUBLIC_API XXH_errorcode
  5848. XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
  5849. {
  5850. return XXH3_update(state, (const xxh_u8*)input, len,
  5851. XXH3_accumulate, XXH3_scrambleAcc);
  5852. }
  5853. XXH_FORCE_INLINE void
  5854. XXH3_digest_long (XXH64_hash_t* acc,
  5855. const XXH3_state_t* state,
  5856. const unsigned char* secret)
  5857. {
  5858. xxh_u8 lastStripe[XXH_STRIPE_LEN];
  5859. const xxh_u8* lastStripePtr;
  5860. /*
  5861. * Digest on a local copy. This way, the state remains unaltered, and it can
  5862. * continue ingesting more input afterwards.
  5863. */
  5864. XXH_memcpy(acc, state->acc, sizeof(state->acc));
  5865. if (state->bufferedSize >= XXH_STRIPE_LEN) {
  5866. /* Consume remaining stripes then point to remaining data in buffer */
  5867. size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
  5868. size_t nbStripesSoFar = state->nbStripesSoFar;
  5869. XXH3_consumeStripes(acc,
  5870. &nbStripesSoFar, state->nbStripesPerBlock,
  5871. state->buffer, nbStripes,
  5872. secret, state->secretLimit,
  5873. XXH3_accumulate, XXH3_scrambleAcc);
  5874. lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
  5875. } else { /* bufferedSize < XXH_STRIPE_LEN */
  5876. /* Copy to temp buffer */
  5877. size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
  5878. XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
  5879. XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
  5880. XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
  5881. lastStripePtr = lastStripe;
  5882. }
  5883. /* Last stripe */
  5884. XXH3_accumulate_512(acc,
  5885. lastStripePtr,
  5886. secret + state->secretLimit - XXH_SECRET_LASTACC_START);
  5887. }
  5888. /*! @ingroup XXH3_family */
  5889. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
  5890. {
  5891. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  5892. if (state->totalLen > XXH3_MIDSIZE_MAX) {
  5893. XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
  5894. XXH3_digest_long(acc, state, secret);
  5895. return XXH3_mergeAccs(acc,
  5896. secret + XXH_SECRET_MERGEACCS_START,
  5897. (xxh_u64)state->totalLen * XXH_PRIME64_1);
  5898. }
  5899. /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
  5900. if (state->useSeed)
  5901. return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
  5902. return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
  5903. secret, state->secretLimit + XXH_STRIPE_LEN);
  5904. }
  5905. #endif /* !XXH_NO_STREAM */
  5906. /* ==========================================
  5907. * XXH3 128 bits (a.k.a XXH128)
  5908. * ==========================================
  5909. * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
  5910. * even without counting the significantly larger output size.
  5911. *
  5912. * For example, extra steps are taken to avoid the seed-dependent collisions
  5913. * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
  5914. *
  5915. * This strength naturally comes at the cost of some speed, especially on short
  5916. * lengths. Note that longer hashes are about as fast as the 64-bit version
  5917. * due to it using only a slight modification of the 64-bit loop.
  5918. *
  5919. * XXH128 is also more oriented towards 64-bit machines. It is still extremely
  5920. * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
  5921. */
  5922. XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
  5923. XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  5924. {
  5925. /* A doubled version of 1to3_64b with different constants. */
  5926. XXH_ASSERT(input != NULL);
  5927. XXH_ASSERT(1 <= len && len <= 3);
  5928. XXH_ASSERT(secret != NULL);
  5929. /*
  5930. * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
  5931. * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
  5932. * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
  5933. */
  5934. { xxh_u8 const c1 = input[0];
  5935. xxh_u8 const c2 = input[len >> 1];
  5936. xxh_u8 const c3 = input[len - 1];
  5937. xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
  5938. | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
  5939. xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
  5940. xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
  5941. xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
  5942. xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
  5943. xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
  5944. XXH128_hash_t h128;
  5945. h128.low64 = XXH64_avalanche(keyed_lo);
  5946. h128.high64 = XXH64_avalanche(keyed_hi);
  5947. return h128;
  5948. }
  5949. }
  5950. XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
  5951. XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  5952. {
  5953. XXH_ASSERT(input != NULL);
  5954. XXH_ASSERT(secret != NULL);
  5955. XXH_ASSERT(4 <= len && len <= 8);
  5956. seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
  5957. { xxh_u32 const input_lo = XXH_readLE32(input);
  5958. xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
  5959. xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
  5960. xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
  5961. xxh_u64 const keyed = input_64 ^ bitflip;
  5962. /* Shift len to the left to ensure it is even, this avoids even multiplies. */
  5963. XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
  5964. m128.high64 += (m128.low64 << 1);
  5965. m128.low64 ^= (m128.high64 >> 3);
  5966. m128.low64 = XXH_xorshift64(m128.low64, 35);
  5967. m128.low64 *= PRIME_MX2;
  5968. m128.low64 = XXH_xorshift64(m128.low64, 28);
  5969. m128.high64 = XXH3_avalanche(m128.high64);
  5970. return m128;
  5971. }
  5972. }
  5973. XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
  5974. XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  5975. {
  5976. XXH_ASSERT(input != NULL);
  5977. XXH_ASSERT(secret != NULL);
  5978. XXH_ASSERT(9 <= len && len <= 16);
  5979. { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
  5980. xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
  5981. xxh_u64 const input_lo = XXH_readLE64(input);
  5982. xxh_u64 input_hi = XXH_readLE64(input + len - 8);
  5983. XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
  5984. /*
  5985. * Put len in the middle of m128 to ensure that the length gets mixed to
  5986. * both the low and high bits in the 128x64 multiply below.
  5987. */
  5988. m128.low64 += (xxh_u64)(len - 1) << 54;
  5989. input_hi ^= bitfliph;
  5990. /*
  5991. * Add the high 32 bits of input_hi to the high 32 bits of m128, then
  5992. * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
  5993. * the high 64 bits of m128.
  5994. *
  5995. * The best approach to this operation is different on 32-bit and 64-bit.
  5996. */
  5997. if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
  5998. /*
  5999. * 32-bit optimized version, which is more readable.
  6000. *
  6001. * On 32-bit, it removes an ADC and delays a dependency between the two
  6002. * halves of m128.high64, but it generates an extra mask on 64-bit.
  6003. */
  6004. m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
  6005. } else {
  6006. /*
  6007. * 64-bit optimized (albeit more confusing) version.
  6008. *
  6009. * Uses some properties of addition and multiplication to remove the mask:
  6010. *
  6011. * Let:
  6012. * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
  6013. * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
  6014. * c = XXH_PRIME32_2
  6015. *
  6016. * a + (b * c)
  6017. * Inverse Property: x + y - x == y
  6018. * a + (b * (1 + c - 1))
  6019. * Distributive Property: x * (y + z) == (x * y) + (x * z)
  6020. * a + (b * 1) + (b * (c - 1))
  6021. * Identity Property: x * 1 == x
  6022. * a + b + (b * (c - 1))
  6023. *
  6024. * Substitute a, b, and c:
  6025. * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
  6026. *
  6027. * Since input_hi.hi + input_hi.lo == input_hi, we get this:
  6028. * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
  6029. */
  6030. m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
  6031. }
  6032. /* m128 ^= XXH_swap64(m128 >> 64); */
  6033. m128.low64 ^= XXH_swap64(m128.high64);
  6034. { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
  6035. XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
  6036. h128.high64 += m128.high64 * XXH_PRIME64_2;
  6037. h128.low64 = XXH3_avalanche(h128.low64);
  6038. h128.high64 = XXH3_avalanche(h128.high64);
  6039. return h128;
  6040. } }
  6041. }
  6042. /*
  6043. * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
  6044. */
  6045. XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
  6046. XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  6047. {
  6048. XXH_ASSERT(len <= 16);
  6049. { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
  6050. if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
  6051. if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
  6052. { XXH128_hash_t h128;
  6053. xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
  6054. xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
  6055. h128.low64 = XXH64_avalanche(seed ^ bitflipl);
  6056. h128.high64 = XXH64_avalanche( seed ^ bitfliph);
  6057. return h128;
  6058. } }
  6059. }
  6060. /*
  6061. * A bit slower than XXH3_mix16B, but handles multiply by zero better.
  6062. */
  6063. XXH_FORCE_INLINE XXH128_hash_t
  6064. XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
  6065. const xxh_u8* secret, XXH64_hash_t seed)
  6066. {
  6067. acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
  6068. acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
  6069. acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
  6070. acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
  6071. return acc;
  6072. }
  6073. XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
  6074. XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
  6075. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  6076. XXH64_hash_t seed)
  6077. {
  6078. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  6079. XXH_ASSERT(16 < len && len <= 128);
  6080. { XXH128_hash_t acc;
  6081. acc.low64 = len * XXH_PRIME64_1;
  6082. acc.high64 = 0;
  6083. #if XXH_SIZE_OPT >= 1
  6084. {
  6085. /* Smaller, but slightly slower. */
  6086. unsigned int i = (unsigned int)(len - 1) / 32;
  6087. do {
  6088. acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
  6089. } while (i-- != 0);
  6090. }
  6091. #else
  6092. if (len > 32) {
  6093. if (len > 64) {
  6094. if (len > 96) {
  6095. acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
  6096. }
  6097. acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
  6098. }
  6099. acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
  6100. }
  6101. acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
  6102. #endif
  6103. { XXH128_hash_t h128;
  6104. h128.low64 = acc.low64 + acc.high64;
  6105. h128.high64 = (acc.low64 * XXH_PRIME64_1)
  6106. + (acc.high64 * XXH_PRIME64_4)
  6107. + ((len - seed) * XXH_PRIME64_2);
  6108. h128.low64 = XXH3_avalanche(h128.low64);
  6109. h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
  6110. return h128;
  6111. }
  6112. }
  6113. }
  6114. XXH_NO_INLINE XXH_PUREF XXH128_hash_t
  6115. XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
  6116. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  6117. XXH64_hash_t seed)
  6118. {
  6119. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  6120. XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
  6121. { XXH128_hash_t acc;
  6122. unsigned i;
  6123. acc.low64 = len * XXH_PRIME64_1;
  6124. acc.high64 = 0;
  6125. /*
  6126. * We set as `i` as offset + 32. We do this so that unchanged
  6127. * `len` can be used as upper bound. This reaches a sweet spot
  6128. * where both x86 and aarch64 get simple agen and good codegen
  6129. * for the loop.
  6130. */
  6131. for (i = 32; i < 160; i += 32) {
  6132. acc = XXH128_mix32B(acc,
  6133. input + i - 32,
  6134. input + i - 16,
  6135. secret + i - 32,
  6136. seed);
  6137. }
  6138. acc.low64 = XXH3_avalanche(acc.low64);
  6139. acc.high64 = XXH3_avalanche(acc.high64);
  6140. /*
  6141. * NB: `i <= len` will duplicate the last 32-bytes if
  6142. * len % 32 was zero. This is an unfortunate necessity to keep
  6143. * the hash result stable.
  6144. */
  6145. for (i=160; i <= len; i += 32) {
  6146. acc = XXH128_mix32B(acc,
  6147. input + i - 32,
  6148. input + i - 16,
  6149. secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
  6150. seed);
  6151. }
  6152. /* last bytes */
  6153. acc = XXH128_mix32B(acc,
  6154. input + len - 16,
  6155. input + len - 32,
  6156. secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
  6157. (XXH64_hash_t)0 - seed);
  6158. { XXH128_hash_t h128;
  6159. h128.low64 = acc.low64 + acc.high64;
  6160. h128.high64 = (acc.low64 * XXH_PRIME64_1)
  6161. + (acc.high64 * XXH_PRIME64_4)
  6162. + ((len - seed) * XXH_PRIME64_2);
  6163. h128.low64 = XXH3_avalanche(h128.low64);
  6164. h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
  6165. return h128;
  6166. }
  6167. }
  6168. }
  6169. XXH_FORCE_INLINE XXH128_hash_t
  6170. XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
  6171. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  6172. XXH3_f_accumulate f_acc,
  6173. XXH3_f_scrambleAcc f_scramble)
  6174. {
  6175. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
  6176. XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
  6177. /* converge into final hash */
  6178. XXH_STATIC_ASSERT(sizeof(acc) == 64);
  6179. XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  6180. { XXH128_hash_t h128;
  6181. h128.low64 = XXH3_mergeAccs(acc,
  6182. secret + XXH_SECRET_MERGEACCS_START,
  6183. (xxh_u64)len * XXH_PRIME64_1);
  6184. h128.high64 = XXH3_mergeAccs(acc,
  6185. secret + secretSize
  6186. - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
  6187. ~((xxh_u64)len * XXH_PRIME64_2));
  6188. return h128;
  6189. }
  6190. }
  6191. /*
  6192. * It's important for performance that XXH3_hashLong() is not inlined.
  6193. */
  6194. XXH_NO_INLINE XXH_PUREF XXH128_hash_t
  6195. XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
  6196. XXH64_hash_t seed64,
  6197. const void* XXH_RESTRICT secret, size_t secretLen)
  6198. {
  6199. (void)seed64; (void)secret; (void)secretLen;
  6200. return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
  6201. XXH3_accumulate, XXH3_scrambleAcc);
  6202. }
  6203. /*
  6204. * It's important for performance to pass @p secretLen (when it's static)
  6205. * to the compiler, so that it can properly optimize the vectorized loop.
  6206. *
  6207. * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
  6208. * breaks -Og, this is XXH_NO_INLINE.
  6209. */
  6210. XXH3_WITH_SECRET_INLINE XXH128_hash_t
  6211. XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
  6212. XXH64_hash_t seed64,
  6213. const void* XXH_RESTRICT secret, size_t secretLen)
  6214. {
  6215. (void)seed64;
  6216. return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
  6217. XXH3_accumulate, XXH3_scrambleAcc);
  6218. }
  6219. XXH_FORCE_INLINE XXH128_hash_t
  6220. XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
  6221. XXH64_hash_t seed64,
  6222. XXH3_f_accumulate f_acc,
  6223. XXH3_f_scrambleAcc f_scramble,
  6224. XXH3_f_initCustomSecret f_initSec)
  6225. {
  6226. if (seed64 == 0)
  6227. return XXH3_hashLong_128b_internal(input, len,
  6228. XXH3_kSecret, sizeof(XXH3_kSecret),
  6229. f_acc, f_scramble);
  6230. { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  6231. f_initSec(secret, seed64);
  6232. return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
  6233. f_acc, f_scramble);
  6234. }
  6235. }
  6236. /*
  6237. * It's important for performance that XXH3_hashLong is not inlined.
  6238. */
  6239. XXH_NO_INLINE XXH128_hash_t
  6240. XXH3_hashLong_128b_withSeed(const void* input, size_t len,
  6241. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
  6242. {
  6243. (void)secret; (void)secretLen;
  6244. return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
  6245. XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
  6246. }
  6247. typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
  6248. XXH64_hash_t, const void* XXH_RESTRICT, size_t);
  6249. XXH_FORCE_INLINE XXH128_hash_t
  6250. XXH3_128bits_internal(const void* input, size_t len,
  6251. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
  6252. XXH3_hashLong128_f f_hl128)
  6253. {
  6254. XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
  6255. /*
  6256. * If an action is to be taken if `secret` conditions are not respected,
  6257. * it should be done here.
  6258. * For now, it's a contract pre-condition.
  6259. * Adding a check and a branch here would cost performance at every hash.
  6260. */
  6261. if (len <= 16)
  6262. return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
  6263. if (len <= 128)
  6264. return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  6265. if (len <= XXH3_MIDSIZE_MAX)
  6266. return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  6267. return f_hl128(input, len, seed64, secret, secretLen);
  6268. }
  6269. /* === Public XXH128 API === */
  6270. /*! @ingroup XXH3_family */
  6271. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
  6272. {
  6273. return XXH3_128bits_internal(input, len, 0,
  6274. XXH3_kSecret, sizeof(XXH3_kSecret),
  6275. XXH3_hashLong_128b_default);
  6276. }
  6277. /*! @ingroup XXH3_family */
  6278. XXH_PUBLIC_API XXH128_hash_t
  6279. XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
  6280. {
  6281. return XXH3_128bits_internal(input, len, 0,
  6282. (const xxh_u8*)secret, secretSize,
  6283. XXH3_hashLong_128b_withSecret);
  6284. }
  6285. /*! @ingroup XXH3_family */
  6286. XXH_PUBLIC_API XXH128_hash_t
  6287. XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
  6288. {
  6289. return XXH3_128bits_internal(input, len, seed,
  6290. XXH3_kSecret, sizeof(XXH3_kSecret),
  6291. XXH3_hashLong_128b_withSeed);
  6292. }
  6293. /*! @ingroup XXH3_family */
  6294. XXH_PUBLIC_API XXH128_hash_t
  6295. XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
  6296. {
  6297. if (len <= XXH3_MIDSIZE_MAX)
  6298. return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
  6299. return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
  6300. }
  6301. /*! @ingroup XXH3_family */
  6302. XXH_PUBLIC_API XXH128_hash_t
  6303. XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
  6304. {
  6305. return XXH3_128bits_withSeed(input, len, seed);
  6306. }
  6307. /* === XXH3 128-bit streaming === */
  6308. #ifndef XXH_NO_STREAM
  6309. /*
  6310. * All initialization and update functions are identical to 64-bit streaming variant.
  6311. * The only difference is the finalization routine.
  6312. */
  6313. /*! @ingroup XXH3_family */
  6314. XXH_PUBLIC_API XXH_errorcode
  6315. XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
  6316. {
  6317. return XXH3_64bits_reset(statePtr);
  6318. }
  6319. /*! @ingroup XXH3_family */
  6320. XXH_PUBLIC_API XXH_errorcode
  6321. XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
  6322. {
  6323. return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
  6324. }
  6325. /*! @ingroup XXH3_family */
  6326. XXH_PUBLIC_API XXH_errorcode
  6327. XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
  6328. {
  6329. return XXH3_64bits_reset_withSeed(statePtr, seed);
  6330. }
  6331. /*! @ingroup XXH3_family */
  6332. XXH_PUBLIC_API XXH_errorcode
  6333. XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
  6334. {
  6335. return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
  6336. }
  6337. /*! @ingroup XXH3_family */
  6338. XXH_PUBLIC_API XXH_errorcode
  6339. XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
  6340. {
  6341. return XXH3_64bits_update(state, input, len);
  6342. }
  6343. /*! @ingroup XXH3_family */
  6344. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
  6345. {
  6346. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  6347. if (state->totalLen > XXH3_MIDSIZE_MAX) {
  6348. XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
  6349. XXH3_digest_long(acc, state, secret);
  6350. XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  6351. { XXH128_hash_t h128;
  6352. h128.low64 = XXH3_mergeAccs(acc,
  6353. secret + XXH_SECRET_MERGEACCS_START,
  6354. (xxh_u64)state->totalLen * XXH_PRIME64_1);
  6355. h128.high64 = XXH3_mergeAccs(acc,
  6356. secret + state->secretLimit + XXH_STRIPE_LEN
  6357. - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
  6358. ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
  6359. return h128;
  6360. }
  6361. }
  6362. /* len <= XXH3_MIDSIZE_MAX : short code */
  6363. if (state->useSeed)
  6364. return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
  6365. return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
  6366. secret, state->secretLimit + XXH_STRIPE_LEN);
  6367. }
  6368. #endif /* !XXH_NO_STREAM */
  6369. /* 128-bit utility functions */
  6370. #include <string.h> /* memcmp, memcpy */
  6371. /* return : 1 is equal, 0 if different */
  6372. /*! @ingroup XXH3_family */
  6373. XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
  6374. {
  6375. /* note : XXH128_hash_t is compact, it has no padding byte */
  6376. return !(memcmp(&h1, &h2, sizeof(h1)));
  6377. }
  6378. /* This prototype is compatible with stdlib's qsort().
  6379. * @return : >0 if *h128_1 > *h128_2
  6380. * <0 if *h128_1 < *h128_2
  6381. * =0 if *h128_1 == *h128_2 */
  6382. /*! @ingroup XXH3_family */
  6383. XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
  6384. {
  6385. XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
  6386. XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
  6387. int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
  6388. /* note : bets that, in most cases, hash values are different */
  6389. if (hcmp) return hcmp;
  6390. return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
  6391. }
  6392. /*====== Canonical representation ======*/
  6393. /*! @ingroup XXH3_family */
  6394. XXH_PUBLIC_API void
  6395. XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
  6396. {
  6397. XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
  6398. if (XXH_CPU_LITTLE_ENDIAN) {
  6399. hash.high64 = XXH_swap64(hash.high64);
  6400. hash.low64 = XXH_swap64(hash.low64);
  6401. }
  6402. XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
  6403. XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
  6404. }
  6405. /*! @ingroup XXH3_family */
  6406. XXH_PUBLIC_API XXH128_hash_t
  6407. XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
  6408. {
  6409. XXH128_hash_t h;
  6410. h.high64 = XXH_readBE64(src);
  6411. h.low64 = XXH_readBE64(src->digest + 8);
  6412. return h;
  6413. }
  6414. /* ==========================================
  6415. * Secret generators
  6416. * ==========================================
  6417. */
  6418. #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
  6419. XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
  6420. {
  6421. XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
  6422. XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
  6423. }
  6424. /*! @ingroup XXH3_family */
  6425. XXH_PUBLIC_API XXH_errorcode
  6426. XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
  6427. {
  6428. #if (XXH_DEBUGLEVEL >= 1)
  6429. XXH_ASSERT(secretBuffer != NULL);
  6430. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  6431. #else
  6432. /* production mode, assert() are disabled */
  6433. if (secretBuffer == NULL) return XXH_ERROR;
  6434. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  6435. #endif
  6436. if (customSeedSize == 0) {
  6437. customSeed = XXH3_kSecret;
  6438. customSeedSize = XXH_SECRET_DEFAULT_SIZE;
  6439. }
  6440. #if (XXH_DEBUGLEVEL >= 1)
  6441. XXH_ASSERT(customSeed != NULL);
  6442. #else
  6443. if (customSeed == NULL) return XXH_ERROR;
  6444. #endif
  6445. /* Fill secretBuffer with a copy of customSeed - repeat as needed */
  6446. { size_t pos = 0;
  6447. while (pos < secretSize) {
  6448. size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
  6449. memcpy((char*)secretBuffer + pos, customSeed, toCopy);
  6450. pos += toCopy;
  6451. } }
  6452. { size_t const nbSeg16 = secretSize / 16;
  6453. size_t n;
  6454. XXH128_canonical_t scrambler;
  6455. XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
  6456. for (n=0; n<nbSeg16; n++) {
  6457. XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
  6458. XXH3_combine16((char*)secretBuffer + n*16, h128);
  6459. }
  6460. /* last segment */
  6461. XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
  6462. }
  6463. return XXH_OK;
  6464. }
  6465. /*! @ingroup XXH3_family */
  6466. XXH_PUBLIC_API void
  6467. XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
  6468. {
  6469. XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  6470. XXH3_initCustomSecret(secret, seed);
  6471. XXH_ASSERT(secretBuffer != NULL);
  6472. memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
  6473. }
  6474. /* Pop our optimization override from above */
  6475. #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
  6476. && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  6477. && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
  6478. # pragma GCC pop_options
  6479. #endif
  6480. #endif /* XXH_NO_LONG_LONG */
  6481. #endif /* XXH_NO_XXH3 */
  6482. /*!
  6483. * @}
  6484. */
  6485. #endif /* XXH_IMPLEMENTATION */
  6486. #if defined (__cplusplus)
  6487. } /* extern "C" */
  6488. #endif