tcp_main.c 125 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262
  1. /*
  2. * $Id$
  3. *
  4. * Copyright (C) 2001-2003 FhG Fokus
  5. *
  6. * This file is part of ser, a free SIP server.
  7. *
  8. * ser is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version
  12. *
  13. * For a license to use the ser software under conditions
  14. * other than those described here, or to purchase support for this
  15. * software, please contact iptel.org by e-mail at the following addresses:
  16. * [email protected]
  17. *
  18. * ser is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. /*
  28. * History:
  29. * --------
  30. * 2002-11-29 created by andrei
  31. * 2002-12-11 added tcp_send (andrei)
  32. * 2003-01-20 locking fixes, hashtables (andrei)
  33. * 2003-02-20 s/lock_t/gen_lock_t/ to avoid a conflict on solaris (andrei)
  34. * 2003-02-25 Nagle is disabled if -DDISABLE_NAGLE (andrei)
  35. * 2003-03-29 SO_REUSEADDR before calling bind to allow
  36. * server restart, Nagle set on the (hopefuly)
  37. * correct socket (jiri)
  38. * 2003-03-31 always try to find the corresponding tcp listen socket for
  39. * a temp. socket and store in in *->bind_address: added
  40. * find_tcp_si, modified tcpconn_connect (andrei)
  41. * 2003-04-14 set sockopts to TOS low delay (andrei)
  42. * 2003-06-30 moved tcp new connect checking & handling to
  43. * handle_new_connect (andrei)
  44. * 2003-07-09 tls_close called before closing the tcp connection (andrei)
  45. * 2003-10-24 converted to the new socket_info lists (andrei)
  46. * 2003-10-27 tcp port aliases support added (andrei)
  47. * 2003-11-04 always lock before manipulating refcnt; sendchild
  48. * does not inc refcnt by itself anymore (andrei)
  49. * 2003-11-07 different unix sockets are used for fd passing
  50. * to/from readers/writers (andrei)
  51. * 2003-11-17 handle_new_connect & tcp_connect will close the
  52. * new socket if tcpconn_new return 0 (e.g. out of mem) (andrei)
  53. * 2003-11-28 tcp_blocking_write & tcp_blocking_connect added (andrei)
  54. * 2004-11-08 dropped find_tcp_si and replaced with find_si (andrei)
  55. * 2005-06-07 new tcp optimized code, supports epoll (LT), sigio + real time
  56. * signals, poll & select (andrei)
  57. * 2005-06-26 *bsd kqueue support (andrei)
  58. * 2005-07-04 solaris /dev/poll support (andrei)
  59. * 2005-07-08 tcp_max_connections, tcp_connection_lifetime, don't accept
  60. * more connections if tcp_max_connections is exceeded (andrei)
  61. * 2005-10-21 cleanup all the open connections on exit
  62. * decrement the no. of open connections on timeout too (andrei) * 2006-01-30 queue send_fd request and execute them at the end of the
  63. * poll loop (#ifdef) (andrei)
  64. * process all children requests, before attempting to send
  65. * them new stuff (fixes some deadlocks) (andrei)
  66. * 2006-02-03 timers are run only once per s (andrei)
  67. * tcp children fds can be non-blocking; send fds are queued on
  68. * EAGAIN; lots of bug fixes (andrei)
  69. * 2006-02-06 better tcp_max_connections checks, tcp_connections_no moved to
  70. * shm (andrei)
  71. * 2006-04-12 tcp_send() changed to use struct dest_info (andrei)
  72. * 2006-11-02 switched to atomic ops for refcnt, locking improvements
  73. * (andrei)
  74. * 2006-11-04 switched to raw ticks (to fix conversion errors which could
  75. * result in inf. lifetime) (andrei)
  76. * 2007-07-25 tcpconn_connect can now bind the socket on a specified
  77. * source addr/port (andrei)
  78. * 2007-07-26 tcp_send() and tcpconn_get() can now use a specified source
  79. * addr./port (andrei)
  80. * 2007-08-23 getsockname() for INADDR_ANY(SI_IS_ANY) sockets (andrei)
  81. * 2007-08-27 split init_sock_opt into a lightweight init_sock_opt_accept()
  82. * used when accepting connections and init_sock_opt used for
  83. * connect/ new sockets (andrei)
  84. * 2007-11-22 always add the connection & clear the coresponding flags before
  85. * io_watch_add-ing its fd - it's safer this way (andrei)
  86. * 2007-11-26 improved tcp timers: switched to local_timer (andrei)
  87. * 2007-11-27 added send fd cache and reader fd reuse (andrei)
  88. * 2007-11-28 added support for TCP_DEFER_ACCEPT, KEEPALIVE, KEEPINTVL,
  89. * KEEPCNT, QUICKACK, SYNCNT, LINGER2 (andrei)
  90. * 2007-12-04 support for queueing write requests (andrei)
  91. * 2007-12-12 destroy connection asap on wbuf. timeout (andrei)
  92. * 2007-12-13 changed the refcnt and destroy scheme, now refcnt is 1 if
  93. * linked into the hash tables (was 0) (andrei)
  94. * 2007-12-21 support for pending connects (connections are added to the
  95. * hash immediately and writes on them are buffered) (andrei)
  96. * 2008-02-05 handle POLLRDHUP (if supported), POLLERR and
  97. * POLLHUP (andrei)
  98. * on write error check if there's still data in the socket
  99. * read buffer and process it first (andrei)
  100. * 2009-02-26 direct blacklist support (andrei)
  101. * 2009-03-20 s/wq_timeout/send_timeout ; send_timeout is now in ticks
  102. * (andrei)
  103. * 2009-04-09 tcp ev and tcp stats macros added (andrei)
  104. * 2009-09-15 support for force connection reuse and close after send
  105. * send flags (andrei)
  106. */
  107. #ifdef USE_TCP
  108. #ifndef SHM_MEM
  109. #error "shared memory support needed (add -DSHM_MEM to Makefile.defs)"
  110. #endif
  111. #define HANDLE_IO_INLINE
  112. #include "io_wait.h" /* include first to make sure the needed features are
  113. turned on (e.g. _GNU_SOURCE for POLLRDHUP) */
  114. #include <sys/time.h>
  115. #include <sys/types.h>
  116. #include <sys/select.h>
  117. #include <sys/socket.h>
  118. #ifdef HAVE_FILIO_H
  119. #include <sys/filio.h> /* needed on solaris 2.x for FIONREAD */
  120. #elif defined __OS_solaris
  121. #define BSD_COMP /* needed on older solaris for FIONREAD */
  122. #endif /* HAVE_FILIO_H / __OS_solaris */
  123. #include <sys/ioctl.h> /* ioctl() used on write error */
  124. #include <netinet/in.h>
  125. #include <netinet/in_systm.h>
  126. #include <netinet/ip.h>
  127. #include <netinet/tcp.h>
  128. #include <sys/uio.h> /* writev*/
  129. #include <netdb.h>
  130. #include <stdlib.h> /*exit() */
  131. #include <unistd.h>
  132. #include <errno.h>
  133. #include <string.h>
  134. #ifdef HAVE_SELECT
  135. #include <sys/select.h>
  136. #endif
  137. #include <sys/poll.h>
  138. #include "ip_addr.h"
  139. #include "pass_fd.h"
  140. #include "tcp_conn.h"
  141. #include "globals.h"
  142. #include "pt.h"
  143. #include "locking.h"
  144. #include "mem/mem.h"
  145. #include "mem/shm_mem.h"
  146. #include "timer.h"
  147. #include "sr_module.h"
  148. #include "tcp_server.h"
  149. #include "tcp_init.h"
  150. #include "tcp_stats.h"
  151. #include "tcp_ev.h"
  152. #include "tsend.h"
  153. #include "timer_ticks.h"
  154. #include "local_timer.h"
  155. #ifdef CORE_TLS
  156. #include "tls/tls_server.h"
  157. #define tls_loaded() 1
  158. #else
  159. #include "tls_hooks_init.h"
  160. #include "tls_hooks.h"
  161. #endif /* CORE_TLS*/
  162. #ifdef USE_DST_BLACKLIST
  163. #include "dst_blacklist.h"
  164. #endif /* USE_DST_BLACKLIST */
  165. #include "tcp_info.h"
  166. #include "tcp_options.h"
  167. #include "ut.h"
  168. #include "cfg/cfg_struct.h"
  169. #define local_malloc pkg_malloc
  170. #define local_free pkg_free
  171. #include <fcntl.h> /* must be included after io_wait.h if SIGIO_RT is used */
  172. #ifdef NO_MSG_DONTWAIT
  173. #ifndef MSG_DONTWAIT
  174. /* should work inside tcp_main */
  175. #define MSG_DONTWAIT 0
  176. #endif
  177. #endif /*NO_MSG_DONTWAIT */
  178. #define TCP_PASS_NEW_CONNECTION_ON_DATA /* don't pass a new connection
  179. immediately to a child, wait for
  180. some data on it first */
  181. #define TCP_LISTEN_BACKLOG 1024
  182. #define SEND_FD_QUEUE /* queue send fd requests on EAGAIN, instead of sending
  183. them immediately */
  184. #define TCP_CHILD_NON_BLOCKING
  185. #ifdef SEND_FD_QUEUE
  186. #ifndef TCP_CHILD_NON_BLOCKING
  187. #define TCP_CHILD_NON_BLOCKING
  188. #endif
  189. #define MAX_SEND_FD_QUEUE_SIZE tcp_main_max_fd_no
  190. #define SEND_FD_QUEUE_SIZE 128 /* initial size */
  191. #define MAX_SEND_FD_RETRIES 96 /* FIXME: not used for now */
  192. #define SEND_FD_QUEUE_TIMEOUT MS_TO_TICKS(2000) /* 2 s */
  193. #endif
  194. /* minimum interval local_timer_run() is allowed to run, in ticks */
  195. #define TCPCONN_TIMEOUT_MIN_RUN 1 /* once per tick */
  196. #define TCPCONN_WAIT_TIMEOUT 1 /* 1 tick */
  197. #ifdef TCP_ASYNC
  198. static unsigned int* tcp_total_wq=0;
  199. #endif
  200. enum fd_types { F_NONE, F_SOCKINFO /* a tcp_listen fd */,
  201. F_TCPCONN, F_TCPCHILD, F_PROC };
  202. #ifdef TCP_FD_CACHE
  203. #define TCP_FD_CACHE_SIZE 8
  204. struct fd_cache_entry{
  205. struct tcp_connection* con;
  206. int id;
  207. int fd;
  208. };
  209. static struct fd_cache_entry fd_cache[TCP_FD_CACHE_SIZE];
  210. #endif /* TCP_FD_CACHE */
  211. static int is_tcp_main=0;
  212. enum poll_types tcp_poll_method=0; /* by default choose the best method */
  213. int tcp_main_max_fd_no=0;
  214. int tcp_max_connections=DEFAULT_TCP_MAX_CONNECTIONS;
  215. static union sockaddr_union tcp_source_ipv4_addr; /* saved bind/srv v4 addr. */
  216. static union sockaddr_union* tcp_source_ipv4=0;
  217. #ifdef USE_IPV6
  218. static union sockaddr_union tcp_source_ipv6_addr; /* saved bind/src v6 addr. */
  219. static union sockaddr_union* tcp_source_ipv6=0;
  220. #endif
  221. static int* tcp_connections_no=0; /* current open connections */
  222. /* connection hash table (after ip&port) , includes also aliases */
  223. struct tcp_conn_alias** tcpconn_aliases_hash=0;
  224. /* connection hash table (after connection id) */
  225. struct tcp_connection** tcpconn_id_hash=0;
  226. gen_lock_t* tcpconn_lock=0;
  227. struct tcp_child* tcp_children;
  228. static int* connection_id=0; /* unique for each connection, used for
  229. quickly finding the corresponding connection
  230. for a reply */
  231. int unix_tcp_sock;
  232. static int tcp_proto_no=-1; /* tcp protocol number as returned by
  233. getprotobyname */
  234. static io_wait_h io_h;
  235. static struct local_timer tcp_main_ltimer;
  236. static ticks_t tcp_main_prev_ticks;
  237. static ticks_t tcpconn_main_timeout(ticks_t , struct timer_ln* , void* );
  238. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  239. struct ip_addr* l_ip, int l_port,
  240. int flags);
  241. /* sets source address used when opening new sockets and no source is specified
  242. * (by default the address is choosen by the kernel)
  243. * Should be used only on init.
  244. * returns -1 on error */
  245. int tcp_set_src_addr(struct ip_addr* ip)
  246. {
  247. switch (ip->af){
  248. case AF_INET:
  249. ip_addr2su(&tcp_source_ipv4_addr, ip, 0);
  250. tcp_source_ipv4=&tcp_source_ipv4_addr;
  251. break;
  252. #ifdef USE_IPV6
  253. case AF_INET6:
  254. ip_addr2su(&tcp_source_ipv6_addr, ip, 0);
  255. tcp_source_ipv6=&tcp_source_ipv6_addr;
  256. break;
  257. #endif
  258. default:
  259. return -1;
  260. }
  261. return 0;
  262. }
  263. static inline int init_sock_keepalive(int s)
  264. {
  265. int optval;
  266. #ifdef HAVE_SO_KEEPALIVE
  267. if (cfg_get(tcp, tcp_cfg, keepalive)){
  268. optval=1;
  269. if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, &optval,
  270. sizeof(optval))<0){
  271. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to enable"
  272. " SO_KEEPALIVE: %s\n", strerror(errno));
  273. return -1;
  274. }
  275. }
  276. #endif
  277. #ifdef HAVE_TCP_KEEPINTVL
  278. if ((optval=cfg_get(tcp, tcp_cfg, keepintvl))){
  279. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPINTVL, &optval,
  280. sizeof(optval))<0){
  281. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  282. " keepalive probes interval: %s\n", strerror(errno));
  283. }
  284. }
  285. #endif
  286. #ifdef HAVE_TCP_KEEPIDLE
  287. if ((optval=cfg_get(tcp, tcp_cfg, keepidle))){
  288. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPIDLE, &optval,
  289. sizeof(optval))<0){
  290. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  291. " keepalive idle interval: %s\n", strerror(errno));
  292. }
  293. }
  294. #endif
  295. #ifdef HAVE_TCP_KEEPCNT
  296. if ((optval=cfg_get(tcp, tcp_cfg, keepcnt))){
  297. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPCNT, &optval,
  298. sizeof(optval))<0){
  299. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  300. " maximum keepalive count: %s\n", strerror(errno));
  301. }
  302. }
  303. #endif
  304. return 0;
  305. }
  306. /* set all socket/fd options for new sockets (e.g. before connect):
  307. * disable nagle, tos lowdelay, reuseaddr, non-blocking
  308. *
  309. * return -1 on error */
  310. static int init_sock_opt(int s)
  311. {
  312. int flags;
  313. int optval;
  314. #ifdef DISABLE_NAGLE
  315. flags=1;
  316. if ( (tcp_proto_no!=-1) && (setsockopt(s, tcp_proto_no , TCP_NODELAY,
  317. &flags, sizeof(flags))<0) ){
  318. LOG(L_WARN, "WARNING: init_sock_opt: could not disable Nagle: %s\n",
  319. strerror(errno));
  320. }
  321. #endif
  322. /* tos*/
  323. optval = tos;
  324. if (setsockopt(s, IPPROTO_IP, IP_TOS, (void*)&optval,sizeof(optval)) ==-1){
  325. LOG(L_WARN, "WARNING: init_sock_opt: setsockopt tos: %s\n",
  326. strerror(errno));
  327. /* continue since this is not critical */
  328. }
  329. #if !defined(TCP_DONT_REUSEADDR)
  330. optval=1;
  331. if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,
  332. (void*)&optval, sizeof(optval))==-1){
  333. LOG(L_ERR, "ERROR: setsockopt SO_REUSEADDR %s\n",
  334. strerror(errno));
  335. /* continue, not critical */
  336. }
  337. #endif /* !TCP_DONT_REUSEADDR */
  338. #ifdef HAVE_TCP_SYNCNT
  339. if ((optval=cfg_get(tcp, tcp_cfg, syncnt))){
  340. if (setsockopt(s, IPPROTO_TCP, TCP_SYNCNT, &optval,
  341. sizeof(optval))<0){
  342. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  343. " maximum SYN retr. count: %s\n", strerror(errno));
  344. }
  345. }
  346. #endif
  347. #ifdef HAVE_TCP_LINGER2
  348. if ((optval=cfg_get(tcp, tcp_cfg, linger2))){
  349. if (setsockopt(s, IPPROTO_TCP, TCP_LINGER2, &optval,
  350. sizeof(optval))<0){
  351. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  352. " maximum LINGER2 timeout: %s\n", strerror(errno));
  353. }
  354. }
  355. #endif
  356. #ifdef HAVE_TCP_QUICKACK
  357. if (cfg_get(tcp, tcp_cfg, delayed_ack)){
  358. optval=0; /* reset quick ack => delayed ack */
  359. if (setsockopt(s, IPPROTO_TCP, TCP_QUICKACK, &optval,
  360. sizeof(optval))<0){
  361. LOG(L_WARN, "WARNING: init_sock_opt: failed to reset"
  362. " TCP_QUICKACK: %s\n", strerror(errno));
  363. }
  364. }
  365. #endif /* HAVE_TCP_QUICKACK */
  366. init_sock_keepalive(s);
  367. /* non-blocking */
  368. flags=fcntl(s, F_GETFL);
  369. if (flags==-1){
  370. LOG(L_ERR, "ERROR: init_sock_opt: fnctl failed: (%d) %s\n",
  371. errno, strerror(errno));
  372. goto error;
  373. }
  374. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  375. LOG(L_ERR, "ERROR: init_sock_opt: fcntl: set non-blocking failed:"
  376. " (%d) %s\n", errno, strerror(errno));
  377. goto error;
  378. }
  379. return 0;
  380. error:
  381. return -1;
  382. }
  383. /* set all socket/fd options for "accepted" sockets
  384. * only nonblocking is set since the rest is inherited from the
  385. * "parent" (listening) socket
  386. * Note: setting O_NONBLOCK is required on linux but it's not needed on
  387. * BSD and possibly solaris (where the flag is inherited from the
  388. * parent socket). However since there is no standard document
  389. * requiring a specific behaviour in this case it's safer to always set
  390. * it (at least for now) --andrei
  391. * TODO: check on which OSes O_NONBLOCK is inherited and make this
  392. * function a nop.
  393. *
  394. * return -1 on error */
  395. static int init_sock_opt_accept(int s)
  396. {
  397. int flags;
  398. /* non-blocking */
  399. flags=fcntl(s, F_GETFL);
  400. if (flags==-1){
  401. LOG(L_ERR, "ERROR: init_sock_opt_accept: fnctl failed: (%d) %s\n",
  402. errno, strerror(errno));
  403. goto error;
  404. }
  405. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  406. LOG(L_ERR, "ERROR: init_sock_opt_accept: "
  407. "fcntl: set non-blocking failed: (%d) %s\n",
  408. errno, strerror(errno));
  409. goto error;
  410. }
  411. return 0;
  412. error:
  413. return -1;
  414. }
  415. /* blocking connect on a non-blocking fd; it will timeout after
  416. * tcp_connect_timeout
  417. * if BLOCKING_USE_SELECT and HAVE_SELECT are defined it will internally
  418. * use select() instead of poll (bad if fd > FD_SET_SIZE, poll is preferred)
  419. */
  420. static int tcp_blocking_connect(int fd, int type, snd_flags_t* send_flags,
  421. const struct sockaddr *servaddr,
  422. socklen_t addrlen)
  423. {
  424. int n;
  425. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  426. fd_set sel_set;
  427. fd_set orig_set;
  428. struct timeval timeout;
  429. #else
  430. struct pollfd pf;
  431. #endif
  432. int elapsed;
  433. int to;
  434. int ticks;
  435. int err;
  436. unsigned int err_len;
  437. int poll_err;
  438. poll_err=0;
  439. to=cfg_get(tcp, tcp_cfg, connect_timeout_s);
  440. ticks=get_ticks();
  441. again:
  442. n=connect(fd, servaddr, addrlen);
  443. if (n==-1){
  444. if (errno==EINTR){
  445. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  446. if (elapsed<to) goto again;
  447. else goto error_timeout;
  448. }
  449. if (errno!=EINPROGRESS && errno!=EALREADY){
  450. goto error_errno;
  451. }
  452. }else goto end;
  453. /* poll/select loop */
  454. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  455. FD_ZERO(&orig_set);
  456. FD_SET(fd, &orig_set);
  457. #else
  458. pf.fd=fd;
  459. pf.events=POLLOUT;
  460. #endif
  461. while(1){
  462. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  463. if (elapsed>=to)
  464. goto error_timeout;
  465. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  466. sel_set=orig_set;
  467. timeout.tv_sec=to-elapsed;
  468. timeout.tv_usec=0;
  469. n=select(fd+1, 0, &sel_set, 0, &timeout);
  470. #else
  471. n=poll(&pf, 1, (to-elapsed)*1000);
  472. #endif
  473. if (n<0){
  474. if (errno==EINTR) continue;
  475. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll/select failed:"
  476. " (%d) %s\n",
  477. su2a((union sockaddr_union*)servaddr, addrlen),
  478. errno, strerror(errno));
  479. goto error;
  480. }else if (n==0) /* timeout */ continue;
  481. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  482. if (FD_ISSET(fd, &sel_set))
  483. #else
  484. if (pf.revents&(POLLERR|POLLHUP|POLLNVAL)){
  485. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll error: "
  486. "flags %x\n",
  487. su2a((union sockaddr_union*)servaddr, addrlen),
  488. pf.revents);
  489. poll_err=1;
  490. }
  491. #endif
  492. {
  493. err_len=sizeof(err);
  494. getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &err_len);
  495. if ((err==0) && (poll_err==0)) goto end;
  496. if (err!=EINPROGRESS && err!=EALREADY){
  497. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: SO_ERROR (%d) "
  498. "%s\n",
  499. su2a((union sockaddr_union*)servaddr, addrlen),
  500. err, strerror(err));
  501. errno=err;
  502. goto error_errno;
  503. }
  504. }
  505. }
  506. error_errno:
  507. switch(errno){
  508. case ENETUNREACH:
  509. case EHOSTUNREACH:
  510. #ifdef USE_DST_BLACKLIST
  511. dst_blacklist_su(BLST_ERR_CONNECT, type,
  512. (union sockaddr_union*)servaddr, send_flags, 0);
  513. #endif /* USE_DST_BLACKLIST */
  514. TCP_EV_CONNECT_UNREACHABLE(errno, 0, 0,
  515. (union sockaddr_union*)servaddr, type);
  516. break;
  517. case ETIMEDOUT:
  518. #ifdef USE_DST_BLACKLIST
  519. dst_blacklist_su(BLST_ERR_CONNECT, type,
  520. (union sockaddr_union*)servaddr, send_flags, 0);
  521. #endif /* USE_DST_BLACKLIST */
  522. TCP_EV_CONNECT_TIMEOUT(errno, 0, 0,
  523. (union sockaddr_union*)servaddr, type);
  524. break;
  525. case ECONNREFUSED:
  526. case ECONNRESET:
  527. #ifdef USE_DST_BLACKLIST
  528. dst_blacklist_su(BLST_ERR_CONNECT, type,
  529. (union sockaddr_union*)servaddr, send_flags, 0);
  530. #endif /* USE_DST_BLACKLIST */
  531. TCP_EV_CONNECT_RST(errno, 0, 0,
  532. (union sockaddr_union*)servaddr, type);
  533. break;
  534. case EAGAIN: /* not posix, but supported on linux and bsd */
  535. TCP_EV_CONNECT_NO_MORE_PORTS(errno, 0, 0,
  536. (union sockaddr_union*)servaddr, type);
  537. break;
  538. default:
  539. TCP_EV_CONNECT_ERR(errno, 0, 0,
  540. (union sockaddr_union*)servaddr, type);
  541. }
  542. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: (%d) %s\n",
  543. su2a((union sockaddr_union*)servaddr, addrlen),
  544. errno, strerror(errno));
  545. goto error;
  546. error_timeout:
  547. /* timeout */
  548. #ifdef USE_DST_BLACKLIST
  549. dst_blacklist_su(BLST_ERR_CONNECT, type,
  550. (union sockaddr_union*)servaddr, send_flags, 0);
  551. #endif /* USE_DST_BLACKLIST */
  552. TCP_EV_CONNECT_TIMEOUT(0, 0, 0, (union sockaddr_union*)servaddr, type);
  553. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: timeout %d s elapsed "
  554. "from %d s\n", su2a((union sockaddr_union*)servaddr, addrlen),
  555. elapsed, cfg_get(tcp, tcp_cfg, connect_timeout_s));
  556. error:
  557. TCP_STATS_CONNECT_FAILED();
  558. return -1;
  559. end:
  560. return 0;
  561. }
  562. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  563. char* buf, int len);
  564. #ifdef TCP_ASYNC
  565. /* unsafe version */
  566. #define _wbufq_empty(con) ((con)->wbuf_q.first==0)
  567. /* unsafe version */
  568. #define _wbufq_non_empty(con) ((con)->wbuf_q.first!=0)
  569. /* unsafe version, call while holding the connection write lock */
  570. inline static int _wbufq_add(struct tcp_connection* c, char* data,
  571. unsigned int size)
  572. {
  573. struct tcp_wbuffer_queue* q;
  574. struct tcp_wbuffer* wb;
  575. unsigned int last_free;
  576. unsigned int wb_size;
  577. unsigned int crt_size;
  578. ticks_t t;
  579. q=&c->wbuf_q;
  580. t=get_ticks_raw();
  581. if (unlikely( ((q->queued+size)>cfg_get(tcp, tcp_cfg, tcpconn_wq_max)) ||
  582. ((*tcp_total_wq+size)>cfg_get(tcp, tcp_cfg, tcp_wq_max)) ||
  583. (q->first &&
  584. TICKS_LT(q->wr_timeout, t)) )){
  585. LOG(L_ERR, "ERROR: wbufq_add(%d bytes): write queue full or timeout "
  586. " (%d, total %d, last write %d s ago)\n",
  587. size, q->queued, *tcp_total_wq,
  588. TICKS_TO_S(t-q->wr_timeout-
  589. cfg_get(tcp, tcp_cfg, send_timeout)));
  590. if (q->first && TICKS_LT(q->wr_timeout, t)){
  591. if (unlikely(c->state==S_CONN_CONNECT)){
  592. #ifdef USE_DST_BLACKLIST
  593. dst_blacklist_su( BLST_ERR_CONNECT, c->rcv.proto,
  594. &c->rcv.src_su, &c->send_flags, 0);
  595. #endif /* USE_DST_BLACKLIST */
  596. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(c), TCP_LPORT(c),
  597. TCP_PSU(c), TCP_PROTO(c));
  598. TCP_STATS_CONNECT_FAILED();
  599. }else{
  600. #ifdef USE_DST_BLACKLIST
  601. dst_blacklist_su( BLST_ERR_SEND, c->rcv.proto,
  602. &c->rcv.src_su, &c->send_flags, 0);
  603. #endif /* USE_DST_BLACKLIST */
  604. TCP_EV_SEND_TIMEOUT(0, &c->rcv);
  605. TCP_STATS_SEND_TIMEOUT();
  606. }
  607. }else{
  608. /* if it's not a timeout => queue full */
  609. TCP_EV_SENDQ_FULL(0, &c->rcv);
  610. TCP_STATS_SENDQ_FULL();
  611. }
  612. goto error;
  613. }
  614. if (unlikely(q->last==0)){
  615. wb_size=MAX_unsigned(cfg_get(tcp, tcp_cfg, wq_blk_size), size);
  616. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  617. if (unlikely(wb==0))
  618. goto error;
  619. wb->b_size=wb_size;
  620. wb->next=0;
  621. q->last=wb;
  622. q->first=wb;
  623. q->last_used=0;
  624. q->offset=0;
  625. q->wr_timeout=get_ticks_raw()+
  626. ((c->state==S_CONN_CONNECT)?
  627. S_TO_TICKS(cfg_get(tcp, tcp_cfg, connect_timeout_s)):
  628. cfg_get(tcp, tcp_cfg, send_timeout));
  629. }else{
  630. wb=q->last;
  631. }
  632. while(size){
  633. last_free=wb->b_size-q->last_used;
  634. if (last_free==0){
  635. wb_size=MAX_unsigned(cfg_get(tcp, tcp_cfg, wq_blk_size), size);
  636. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  637. if (unlikely(wb==0))
  638. goto error;
  639. wb->b_size=wb_size;
  640. wb->next=0;
  641. q->last->next=wb;
  642. q->last=wb;
  643. q->last_used=0;
  644. last_free=wb->b_size;
  645. }
  646. crt_size=MIN_unsigned(last_free, size);
  647. memcpy(wb->buf+q->last_used, data, crt_size);
  648. q->last_used+=crt_size;
  649. size-=crt_size;
  650. data+=crt_size;
  651. q->queued+=crt_size;
  652. atomic_add_int((int*)tcp_total_wq, crt_size);
  653. }
  654. return 0;
  655. error:
  656. return -1;
  657. }
  658. /* unsafe version, call while holding the connection write lock
  659. * inserts data at the beginning, it ignores the max queue size checks and
  660. * the timeout (use sparingly)
  661. * Note: it should never be called on a write buffer after wbufq_run() */
  662. inline static int _wbufq_insert(struct tcp_connection* c, char* data,
  663. unsigned int size)
  664. {
  665. struct tcp_wbuffer_queue* q;
  666. struct tcp_wbuffer* wb;
  667. q=&c->wbuf_q;
  668. if (likely(q->first==0)) /* if empty, use wbufq_add */
  669. return _wbufq_add(c, data, size);
  670. if (unlikely((*tcp_total_wq+size)>cfg_get(tcp, tcp_cfg, tcp_wq_max))){
  671. LOG(L_ERR, "ERROR: wbufq_insert(%d bytes): write queue full"
  672. " (%d, total %d, last write %d s ago)\n",
  673. size, q->queued, *tcp_total_wq,
  674. TICKS_TO_S(get_ticks_raw()-q->wr_timeout-
  675. cfg_get(tcp, tcp_cfg, send_timeout)));
  676. goto error;
  677. }
  678. if (unlikely(q->offset)){
  679. LOG(L_CRIT, "BUG: wbufq_insert: non-null offset %d (bad call, should"
  680. "never be called after the wbufq_run())\n", q->offset);
  681. goto error;
  682. }
  683. if ((q->first==q->last) && ((q->last->b_size-q->last_used)>=size)){
  684. /* one block with enough space in it for size bytes */
  685. memmove(q->first->buf+size, q->first->buf, size);
  686. memcpy(q->first->buf, data, size);
  687. q->last_used+=size;
  688. }else{
  689. /* create a size bytes block directly */
  690. wb=shm_malloc(sizeof(*wb)+size-1);
  691. if (unlikely(wb==0))
  692. goto error;
  693. wb->b_size=size;
  694. /* insert it */
  695. wb->next=q->first;
  696. q->first=wb;
  697. memcpy(wb->buf, data, size);
  698. }
  699. q->queued+=size;
  700. atomic_add_int((int*)tcp_total_wq, size);
  701. return 0;
  702. error:
  703. return -1;
  704. }
  705. /* unsafe version, call while holding the connection write lock */
  706. inline static void _wbufq_destroy( struct tcp_wbuffer_queue* q)
  707. {
  708. struct tcp_wbuffer* wb;
  709. struct tcp_wbuffer* next_wb;
  710. int unqueued;
  711. unqueued=0;
  712. if (likely(q->first)){
  713. wb=q->first;
  714. do{
  715. next_wb=wb->next;
  716. unqueued+=(wb==q->last)?q->last_used:wb->b_size;
  717. if (wb==q->first)
  718. unqueued-=q->offset;
  719. shm_free(wb);
  720. wb=next_wb;
  721. }while(wb);
  722. }
  723. memset(q, 0, sizeof(*q));
  724. atomic_add_int((int*)tcp_total_wq, -unqueued);
  725. }
  726. /* tries to empty the queue (safe version, c->write_lock must not be hold)
  727. * returns -1 on error, bytes written on success (>=0)
  728. * if the whole queue is emptied => sets *empty*/
  729. inline static int wbufq_run(int fd, struct tcp_connection* c, int* empty)
  730. {
  731. struct tcp_wbuffer_queue* q;
  732. struct tcp_wbuffer* wb;
  733. int n;
  734. int ret;
  735. int block_size;
  736. char* buf;
  737. *empty=0;
  738. ret=0;
  739. lock_get(&c->write_lock);
  740. q=&c->wbuf_q;
  741. while(q->first){
  742. block_size=((q->first==q->last)?q->last_used:q->first->b_size)-
  743. q->offset;
  744. buf=q->first->buf+q->offset;
  745. n=_tcpconn_write_nb(fd, c, buf, block_size);
  746. if (likely(n>0)){
  747. ret+=n;
  748. if (likely(n==block_size)){
  749. wb=q->first;
  750. q->first=q->first->next;
  751. shm_free(wb);
  752. q->offset=0;
  753. q->queued-=block_size;
  754. atomic_add_int((int*)tcp_total_wq, -block_size);
  755. }else{
  756. q->offset+=n;
  757. q->queued-=n;
  758. atomic_add_int((int*)tcp_total_wq, -n);
  759. break;
  760. }
  761. }else{
  762. if (n<0){
  763. /* EINTR is handled inside _tcpconn_write_nb */
  764. if (!(errno==EAGAIN || errno==EWOULDBLOCK)){
  765. if (unlikely(c->state==S_CONN_CONNECT)){
  766. switch(errno){
  767. case ENETUNREACH:
  768. case EHOSTUNREACH: /* not posix for send() */
  769. #ifdef USE_DST_BLACKLIST
  770. dst_blacklist_su(BLST_ERR_CONNECT,
  771. c->rcv.proto,
  772. &c->rcv.src_su,
  773. &c->send_flags, 0);
  774. #endif /* USE_DST_BLACKLIST */
  775. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  776. TCP_LPORT(c), TCP_PSU(c),
  777. TCP_PROTO(c));
  778. break;
  779. case ECONNREFUSED:
  780. case ECONNRESET:
  781. #ifdef USE_DST_BLACKLIST
  782. dst_blacklist_su(BLST_ERR_CONNECT,
  783. c->rcv.proto,
  784. &c->rcv.src_su,
  785. &c->send_flags, 0);
  786. #endif /* USE_DST_BLACKLIST */
  787. TCP_EV_CONNECT_RST(0, TCP_LADDR(c),
  788. TCP_LPORT(c), TCP_PSU(c),
  789. TCP_PROTO(c));
  790. break;
  791. default:
  792. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c),
  793. TCP_LPORT(c), TCP_PSU(c),
  794. TCP_PROTO(c));
  795. }
  796. TCP_STATS_CONNECT_FAILED();
  797. }else{
  798. switch(errno){
  799. case ECONNREFUSED:
  800. case ECONNRESET:
  801. TCP_STATS_CON_RESET();
  802. /* no break */
  803. case ENETUNREACH:
  804. case EHOSTUNREACH: /* not posix for send() */
  805. #ifdef USE_DST_BLACKLIST
  806. dst_blacklist_su(BLST_ERR_SEND,
  807. c->rcv.proto,
  808. &c->rcv.src_su,
  809. &c->send_flags, 0);
  810. #endif /* USE_DST_BLACKLIST */
  811. break;
  812. }
  813. }
  814. ret=-1;
  815. LOG(L_ERR, "ERROR: wbuf_runq: %s [%d]\n",
  816. strerror(errno), errno);
  817. }
  818. }
  819. break;
  820. }
  821. }
  822. if (likely(q->first==0)){
  823. q->last=0;
  824. q->last_used=0;
  825. q->offset=0;
  826. *empty=1;
  827. }
  828. lock_release(&c->write_lock);
  829. if (likely(ret>0)){
  830. q->wr_timeout=get_ticks_raw()+cfg_get(tcp, tcp_cfg, send_timeout);
  831. if (unlikely(c->state==S_CONN_CONNECT || c->state==S_CONN_ACCEPT)){
  832. TCP_STATS_ESTABLISHED(c->state);
  833. c->state=S_CONN_OK;
  834. }
  835. }
  836. return ret;
  837. }
  838. #endif /* TCP_ASYNC */
  839. #if 0
  840. /* blocking write even on non-blocking sockets
  841. * if TCP_TIMEOUT will return with error */
  842. static int tcp_blocking_write(struct tcp_connection* c, int fd, char* buf,
  843. unsigned int len)
  844. {
  845. int n;
  846. fd_set sel_set;
  847. struct timeval timeout;
  848. int ticks;
  849. int initial_len;
  850. initial_len=len;
  851. again:
  852. n=send(fd, buf, len,
  853. #ifdef HAVE_MSG_NOSIGNAL
  854. MSG_NOSIGNAL
  855. #else
  856. 0
  857. #endif
  858. );
  859. if (n<0){
  860. if (errno==EINTR) goto again;
  861. else if (errno!=EAGAIN && errno!=EWOULDBLOCK){
  862. LOG(L_ERR, "tcp_blocking_write: failed to send: (%d) %s\n",
  863. errno, strerror(errno));
  864. TCP_EV_SEND_TIMEOUT(errno, &c->rcv);
  865. TCP_STATS_SEND_TIMEOUT();
  866. goto error;
  867. }
  868. }else if (n<len){
  869. /* partial write */
  870. buf+=n;
  871. len-=n;
  872. }else{
  873. /* success: full write */
  874. goto end;
  875. }
  876. while(1){
  877. FD_ZERO(&sel_set);
  878. FD_SET(fd, &sel_set);
  879. timeout.tv_sec=tcp_send_timeout;
  880. timeout.tv_usec=0;
  881. ticks=get_ticks();
  882. n=select(fd+1, 0, &sel_set, 0, &timeout);
  883. if (n<0){
  884. if (errno==EINTR) continue; /* signal, ignore */
  885. LOG(L_ERR, "ERROR: tcp_blocking_write: select failed: "
  886. " (%d) %s\n", errno, strerror(errno));
  887. goto error;
  888. }else if (n==0){
  889. /* timeout */
  890. if (get_ticks()-ticks>=tcp_send_timeout){
  891. LOG(L_ERR, "ERROR: tcp_blocking_write: send timeout (%d)\n",
  892. tcp_send_timeout);
  893. goto error;
  894. }
  895. continue;
  896. }
  897. if (FD_ISSET(fd, &sel_set)){
  898. /* we can write again */
  899. goto again;
  900. }
  901. }
  902. error:
  903. return -1;
  904. end:
  905. return initial_len;
  906. }
  907. #endif
  908. struct tcp_connection* tcpconn_new(int sock, union sockaddr_union* su,
  909. union sockaddr_union* local_addr,
  910. struct socket_info* ba, int type,
  911. int state)
  912. {
  913. struct tcp_connection *c;
  914. int rd_b_size;
  915. rd_b_size=cfg_get(tcp, tcp_cfg, rd_buf_size);
  916. c=shm_malloc(sizeof(struct tcp_connection) + rd_b_size);
  917. if (c==0){
  918. LOG(L_ERR, "ERROR: tcpconn_new: mem. allocation failure\n");
  919. goto error;
  920. }
  921. memset(c, 0, sizeof(struct tcp_connection)); /* zero init (skip rd buf)*/
  922. c->s=sock;
  923. c->fd=-1; /* not initialized */
  924. if (lock_init(&c->write_lock)==0){
  925. LOG(L_ERR, "ERROR: tcpconn_new: init lock failed\n");
  926. goto error;
  927. }
  928. c->rcv.src_su=*su;
  929. atomic_set(&c->refcnt, 0);
  930. local_timer_init(&c->timer, tcpconn_main_timeout, c, 0);
  931. su2ip_addr(&c->rcv.src_ip, su);
  932. c->rcv.src_port=su_getport(su);
  933. c->rcv.bind_address=ba;
  934. if (likely(local_addr)){
  935. su2ip_addr(&c->rcv.dst_ip, local_addr);
  936. c->rcv.dst_port=su_getport(local_addr);
  937. }else if (ba){
  938. c->rcv.dst_ip=ba->address;
  939. c->rcv.dst_port=ba->port_no;
  940. }
  941. print_ip("tcpconn_new: new tcp connection: ", &c->rcv.src_ip, "\n");
  942. DBG( "tcpconn_new: on port %d, type %d\n", c->rcv.src_port, type);
  943. init_tcp_req(&c->req, (char*)c+sizeof(struct tcp_connection), rd_b_size);
  944. c->id=(*connection_id)++;
  945. c->rcv.proto_reserved1=0; /* this will be filled before receive_message*/
  946. c->rcv.proto_reserved2=0;
  947. c->state=state;
  948. c->extra_data=0;
  949. #ifdef USE_TLS
  950. if (type==PROTO_TLS){
  951. if (tls_tcpconn_init(c, sock)==-1) goto error;
  952. }else
  953. #endif /* USE_TLS*/
  954. {
  955. c->type=PROTO_TCP;
  956. c->rcv.proto=PROTO_TCP;
  957. c->timeout=get_ticks_raw()+cfg_get(tcp, tcp_cfg, con_lifetime);
  958. }
  959. return c;
  960. error:
  961. if (c) shm_free(c);
  962. return 0;
  963. }
  964. /* do the actual connect, set sock. options a.s.o
  965. * returns socket on success, -1 on error
  966. * sets also *res_local_addr, res_si and state (S_CONN_CONNECT for an
  967. * unfinished connect and S_CONN_OK for a finished one)*/
  968. inline static int tcp_do_connect( union sockaddr_union* server,
  969. union sockaddr_union* from,
  970. int type,
  971. snd_flags_t* send_flags,
  972. union sockaddr_union* res_local_addr,
  973. struct socket_info** res_si,
  974. enum tcp_conn_states *state
  975. )
  976. {
  977. int s;
  978. union sockaddr_union my_name;
  979. socklen_t my_name_len;
  980. struct ip_addr ip;
  981. #ifdef TCP_ASYNC
  982. int n;
  983. #endif /* TCP_ASYNC */
  984. s=socket(AF2PF(server->s.sa_family), SOCK_STREAM, 0);
  985. if (unlikely(s==-1)){
  986. LOG(L_ERR, "ERROR: tcp_do_connect %s: socket: (%d) %s\n",
  987. su2a(server, sizeof(*server)), errno, strerror(errno));
  988. goto error;
  989. }
  990. if (init_sock_opt(s)<0){
  991. LOG(L_ERR, "ERROR: tcp_do_connect %s: init_sock_opt failed\n",
  992. su2a(server, sizeof(*server)));
  993. goto error;
  994. }
  995. if (unlikely(from && bind(s, &from->s, sockaddru_len(*from)) != 0)){
  996. LOG(L_WARN, "WARNING: tcp_do_connect: binding to source address"
  997. " %s failed: %s [%d]\n", su2a(from, sizeof(*from)),
  998. strerror(errno), errno);
  999. }
  1000. *state=S_CONN_OK;
  1001. #ifdef TCP_ASYNC
  1002. if (likely(cfg_get(tcp, tcp_cfg, async))){
  1003. again:
  1004. n=connect(s, &server->s, sockaddru_len(*server));
  1005. if (likely(n==-1)){ /*non-blocking => most probable EINPROGRESS*/
  1006. if (likely(errno==EINPROGRESS))
  1007. *state=S_CONN_CONNECT;
  1008. else if (errno==EINTR) goto again;
  1009. else if (errno!=EALREADY){
  1010. switch(errno){
  1011. case ENETUNREACH:
  1012. case EHOSTUNREACH:
  1013. #ifdef USE_DST_BLACKLIST
  1014. dst_blacklist_su(BLST_ERR_CONNECT, type, server,
  1015. send_flags, 0);
  1016. #endif /* USE_DST_BLACKLIST */
  1017. TCP_EV_CONNECT_UNREACHABLE(errno, 0, 0, server, type);
  1018. break;
  1019. case ETIMEDOUT:
  1020. #ifdef USE_DST_BLACKLIST
  1021. dst_blacklist_su(BLST_ERR_CONNECT, type, server,
  1022. send_flags, 0);
  1023. #endif /* USE_DST_BLACKLIST */
  1024. TCP_EV_CONNECT_TIMEOUT(errno, 0, 0, server, type);
  1025. break;
  1026. case ECONNREFUSED:
  1027. case ECONNRESET:
  1028. #ifdef USE_DST_BLACKLIST
  1029. dst_blacklist_su(BLST_ERR_CONNECT, type, server,
  1030. send_flags, 0);
  1031. #endif /* USE_DST_BLACKLIST */
  1032. TCP_EV_CONNECT_RST(errno, 0, 0, server, type);
  1033. break;
  1034. case EAGAIN:/* not posix, but supported on linux and bsd */
  1035. TCP_EV_CONNECT_NO_MORE_PORTS(errno, 0, 0, server,type);
  1036. break;
  1037. default:
  1038. TCP_EV_CONNECT_ERR(errno, 0, 0, server, type);
  1039. }
  1040. TCP_STATS_CONNECT_FAILED();
  1041. LOG(L_ERR, "ERROR: tcp_do_connect: connect %s: (%d) %s\n",
  1042. su2a(server, sizeof(*server)),
  1043. errno, strerror(errno));
  1044. goto error;
  1045. }
  1046. }
  1047. }else{
  1048. #endif /* TCP_ASYNC */
  1049. if (tcp_blocking_connect(s, type, send_flags, &server->s,
  1050. sockaddru_len(*server))<0){
  1051. LOG(L_ERR, "ERROR: tcp_do_connect: tcp_blocking_connect %s"
  1052. " failed\n", su2a(server, sizeof(*server)));
  1053. goto error;
  1054. }
  1055. #ifdef TCP_ASYNC
  1056. }
  1057. #endif /* TCP_ASYNC */
  1058. if (from){
  1059. su2ip_addr(&ip, from);
  1060. if (!ip_addr_any(&ip))
  1061. /* we already know the source ip, skip the sys. call */
  1062. goto find_socket;
  1063. }
  1064. my_name_len=sizeof(my_name);
  1065. if (unlikely(getsockname(s, &my_name.s, &my_name_len)!=0)){
  1066. LOG(L_ERR, "ERROR: tcp_do_connect: getsockname failed: %s(%d)\n",
  1067. strerror(errno), errno);
  1068. *res_si=0;
  1069. goto error;
  1070. }
  1071. from=&my_name; /* update from with the real "from" address */
  1072. su2ip_addr(&ip, &my_name);
  1073. find_socket:
  1074. #ifdef USE_TLS
  1075. if (unlikely(type==PROTO_TLS))
  1076. *res_si=find_si(&ip, 0, PROTO_TLS);
  1077. else
  1078. #endif
  1079. *res_si=find_si(&ip, 0, PROTO_TCP);
  1080. if (unlikely(*res_si==0)){
  1081. LOG(L_WARN, "WARNING: tcp_do_connect %s: could not find corresponding"
  1082. " listening socket for %s, using default...\n",
  1083. su2a(server, sizeof(*server)), ip_addr2a(&ip));
  1084. if (server->s.sa_family==AF_INET) *res_si=sendipv4_tcp;
  1085. #ifdef USE_IPV6
  1086. else *res_si=sendipv6_tcp;
  1087. #endif
  1088. }
  1089. *res_local_addr=*from;
  1090. return s;
  1091. error:
  1092. if (s!=-1) close(s);
  1093. return -1;
  1094. }
  1095. struct tcp_connection* tcpconn_connect( union sockaddr_union* server,
  1096. union sockaddr_union* from,
  1097. int type, snd_flags_t* send_flags)
  1098. {
  1099. int s;
  1100. struct socket_info* si;
  1101. union sockaddr_union my_name;
  1102. struct tcp_connection* con;
  1103. enum tcp_conn_states state;
  1104. s=-1;
  1105. if (*tcp_connections_no >= cfg_get(tcp, tcp_cfg, max_connections)){
  1106. LOG(L_ERR, "ERROR: tcpconn_connect: maximum number of connections"
  1107. " exceeded (%d/%d)\n",
  1108. *tcp_connections_no,
  1109. cfg_get(tcp, tcp_cfg, max_connections));
  1110. goto error;
  1111. }
  1112. s=tcp_do_connect(server, from, type, send_flags, &my_name, &si, &state);
  1113. if (s==-1){
  1114. LOG(L_ERR, "ERROR: tcp_do_connect %s: failed (%d) %s\n",
  1115. su2a(server, sizeof(*server)), errno, strerror(errno));
  1116. goto error;
  1117. }
  1118. con=tcpconn_new(s, server, &my_name, si, type, state);
  1119. if (con==0){
  1120. LOG(L_ERR, "ERROR: tcp_connect %s: tcpconn_new failed, closing the "
  1121. " socket\n", su2a(server, sizeof(*server)));
  1122. goto error;
  1123. }
  1124. tcpconn_set_send_flags(con, *send_flags);
  1125. return con;
  1126. /*FIXME: set sock idx! */
  1127. error:
  1128. if (s!=-1) close(s); /* close the opened socket */
  1129. return 0;
  1130. }
  1131. #ifdef TCP_CONNECT_WAIT
  1132. int tcpconn_finish_connect( struct tcp_connection* c,
  1133. union sockaddr_union* from)
  1134. {
  1135. int s;
  1136. int r;
  1137. union sockaddr_union local_addr;
  1138. struct socket_info* si;
  1139. enum tcp_conn_states state;
  1140. struct tcp_conn_alias* a;
  1141. int new_conn_alias_flags;
  1142. s=tcp_do_connect(&c->rcv.src_su, from, c->type, &c->send_flags,
  1143. &local_addr, &si, &state);
  1144. if (unlikely(s==-1)){
  1145. LOG(L_ERR, "ERROR: tcpconn_finish_connect %s: tcp_do_connect for %p"
  1146. " failed\n", su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1147. c);
  1148. return -1;
  1149. }
  1150. c->rcv.bind_address=si;
  1151. su2ip_addr(&c->rcv.dst_ip, &local_addr);
  1152. c->rcv.dst_port=su_getport(&local_addr);
  1153. /* update aliases if needed */
  1154. if (likely(from==0)){
  1155. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1156. /* add aliases */
  1157. TCPCONN_LOCK;
  1158. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1159. new_conn_alias_flags);
  1160. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1161. c->rcv.dst_port, new_conn_alias_flags);
  1162. TCPCONN_UNLOCK;
  1163. }else if (su_cmp(from, &local_addr)!=1){
  1164. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1165. TCPCONN_LOCK;
  1166. /* remove all the aliases except the first one and re-add them
  1167. * (there shouldn't be more then the 3 default aliases at this
  1168. * stage) */
  1169. for (r=1; r<c->aliases; r++){
  1170. a=&c->con_aliases[r];
  1171. tcpconn_listrm(tcpconn_aliases_hash[a->hash], a, next, prev);
  1172. }
  1173. c->aliases=1;
  1174. /* add the local_ip:0 and local_ip:local_port aliases */
  1175. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1176. 0, new_conn_alias_flags);
  1177. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1178. c->rcv.dst_port, new_conn_alias_flags);
  1179. TCPCONN_UNLOCK;
  1180. }
  1181. return s;
  1182. }
  1183. #endif /* TCP_CONNECT_WAIT */
  1184. /* adds a tcp connection to the tcpconn hashes
  1185. * Note: it's called _only_ from the tcp_main process */
  1186. inline static struct tcp_connection* tcpconn_add(struct tcp_connection *c)
  1187. {
  1188. struct ip_addr zero_ip;
  1189. int new_conn_alias_flags;
  1190. if (likely(c)){
  1191. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1192. c->id_hash=tcp_id_hash(c->id);
  1193. c->aliases=0;
  1194. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1195. TCPCONN_LOCK;
  1196. c->flags|=F_CONN_HASHED;
  1197. /* add it at the begining of the list*/
  1198. tcpconn_listadd(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1199. /* set the aliases */
  1200. /* first alias is for (peer_ip, peer_port, 0 ,0) -- for finding
  1201. * any connection to peer_ip, peer_port
  1202. * the second alias is for (peer_ip, peer_port, local_addr, 0) -- for
  1203. * finding any conenction to peer_ip, peer_port from local_addr
  1204. * the third alias is for (peer_ip, peer_port, local_addr, local_port)
  1205. * -- for finding if a fully specified connection exists */
  1206. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &zero_ip, 0,
  1207. new_conn_alias_flags);
  1208. if (likely(c->rcv.dst_ip.af && ! ip_addr_any(&c->rcv.dst_ip))){
  1209. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1210. new_conn_alias_flags);
  1211. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1212. c->rcv.dst_port, new_conn_alias_flags);
  1213. }
  1214. /* ignore add_alias errors, there are some valid cases when one
  1215. * of the add_alias would fail (e.g. first add_alias for 2 connections
  1216. * with the same destination but different src. ip*/
  1217. TCPCONN_UNLOCK;
  1218. DBG("tcpconn_add: hashes: %d:%d:%d, %d\n",
  1219. c->con_aliases[0].hash,
  1220. c->con_aliases[1].hash,
  1221. c->con_aliases[2].hash,
  1222. c->id_hash);
  1223. return c;
  1224. }else{
  1225. LOG(L_CRIT, "tcpconn_add: BUG: null connection pointer\n");
  1226. return 0;
  1227. }
  1228. }
  1229. static inline void _tcpconn_detach(struct tcp_connection *c)
  1230. {
  1231. int r;
  1232. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1233. /* remove all the aliases */
  1234. for (r=0; r<c->aliases; r++)
  1235. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1236. &c->con_aliases[r], next, prev);
  1237. }
  1238. static inline void _tcpconn_free(struct tcp_connection* c)
  1239. {
  1240. #ifdef TCP_ASYNC
  1241. if (unlikely(_wbufq_non_empty(c)))
  1242. _wbufq_destroy(&c->wbuf_q);
  1243. #endif
  1244. lock_destroy(&c->write_lock);
  1245. #ifdef USE_TLS
  1246. if (unlikely(c->type==PROTO_TLS)) tls_tcpconn_clean(c);
  1247. #endif
  1248. shm_free(c);
  1249. }
  1250. /* unsafe tcpconn_rm version (nolocks) */
  1251. void _tcpconn_rm(struct tcp_connection* c)
  1252. {
  1253. _tcpconn_detach(c);
  1254. _tcpconn_free(c);
  1255. }
  1256. void tcpconn_rm(struct tcp_connection* c)
  1257. {
  1258. int r;
  1259. TCPCONN_LOCK;
  1260. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1261. /* remove all the aliases */
  1262. for (r=0; r<c->aliases; r++)
  1263. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1264. &c->con_aliases[r], next, prev);
  1265. TCPCONN_UNLOCK;
  1266. lock_destroy(&c->write_lock);
  1267. #ifdef USE_TLS
  1268. if ((c->type==PROTO_TLS)&&(c->extra_data)) tls_tcpconn_clean(c);
  1269. #endif
  1270. shm_free(c);
  1271. }
  1272. /* finds a connection, if id=0 uses the ip addr, port, local_ip and local port
  1273. * (host byte order) and tries to find the connection that matches all of
  1274. * them. Wild cards can be used for local_ip and local_port (a 0 filled
  1275. * ip address and/or a 0 local port).
  1276. * WARNING: unprotected (locks) use tcpconn_get unless you really
  1277. * know what you are doing */
  1278. struct tcp_connection* _tcpconn_find(int id, struct ip_addr* ip, int port,
  1279. struct ip_addr* l_ip, int l_port)
  1280. {
  1281. struct tcp_connection *c;
  1282. struct tcp_conn_alias* a;
  1283. unsigned hash;
  1284. int is_local_ip_any;
  1285. #ifdef EXTRA_DEBUG
  1286. DBG("tcpconn_find: %d port %d\n",id, port);
  1287. if (ip) print_ip("tcpconn_find: ip ", ip, "\n");
  1288. #endif
  1289. if (likely(id)){
  1290. hash=tcp_id_hash(id);
  1291. for (c=tcpconn_id_hash[hash]; c; c=c->id_next){
  1292. #ifdef EXTRA_DEBUG
  1293. DBG("c=%p, c->id=%d, port=%d\n",c, c->id, c->rcv.src_port);
  1294. print_ip("ip=", &c->rcv.src_ip, "\n");
  1295. #endif
  1296. if ((id==c->id)&&(c->state!=S_CONN_BAD)) return c;
  1297. }
  1298. }else if (likely(ip)){
  1299. hash=tcp_addr_hash(ip, port, l_ip, l_port);
  1300. is_local_ip_any=ip_addr_any(l_ip);
  1301. for (a=tcpconn_aliases_hash[hash]; a; a=a->next){
  1302. #ifdef EXTRA_DEBUG
  1303. DBG("a=%p, c=%p, c->id=%d, alias port= %d port=%d\n", a, a->parent,
  1304. a->parent->id, a->port, a->parent->rcv.src_port);
  1305. print_ip("ip=",&a->parent->rcv.src_ip,"\n");
  1306. #endif
  1307. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1308. ((l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1309. (ip_addr_cmp(ip, &a->parent->rcv.src_ip)) &&
  1310. (is_local_ip_any ||
  1311. ip_addr_cmp(l_ip, &a->parent->rcv.dst_ip))
  1312. )
  1313. return a->parent;
  1314. }
  1315. }
  1316. return 0;
  1317. }
  1318. /* _tcpconn_find with locks and timeout
  1319. * local_addr contains the desired local ip:port. If null any local address
  1320. * will be used. IN*ADDR_ANY or 0 port are wild cards.
  1321. */
  1322. struct tcp_connection* tcpconn_get(int id, struct ip_addr* ip, int port,
  1323. union sockaddr_union* local_addr,
  1324. ticks_t timeout)
  1325. {
  1326. struct tcp_connection* c;
  1327. struct ip_addr local_ip;
  1328. int local_port;
  1329. local_port=0;
  1330. if (likely(ip)){
  1331. if (unlikely(local_addr)){
  1332. su2ip_addr(&local_ip, local_addr);
  1333. local_port=su_getport(local_addr);
  1334. }else{
  1335. ip_addr_mk_any(ip->af, &local_ip);
  1336. local_port=0;
  1337. }
  1338. }
  1339. TCPCONN_LOCK;
  1340. c=_tcpconn_find(id, ip, port, &local_ip, local_port);
  1341. if (likely(c)){
  1342. atomic_inc(&c->refcnt);
  1343. /* update the timeout only if the connection is not handled
  1344. * by a tcp reader (the tcp reader process uses c->timeout for
  1345. * its own internal timeout and c->timeout will be overwritten
  1346. * anyway on return to tcp_main) */
  1347. if (likely(c->reader_pid==0))
  1348. c->timeout=get_ticks_raw()+timeout;
  1349. }
  1350. TCPCONN_UNLOCK;
  1351. return c;
  1352. }
  1353. /* add c->dst:port, local_addr as an alias for the "id" connection,
  1354. * flags: TCP_ALIAS_FORCE_ADD - add an alias even if a previous one exists
  1355. * TCP_ALIAS_REPLACE - if a prev. alias exists, replace it with the
  1356. * new one
  1357. * returns 0 on success, <0 on failure ( -1 - null c, -2 too many aliases,
  1358. * -3 alias already present and pointing to another connection)
  1359. * WARNING: must be called with TCPCONN_LOCK held */
  1360. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  1361. struct ip_addr* l_ip, int l_port,
  1362. int flags)
  1363. {
  1364. unsigned hash;
  1365. struct tcp_conn_alias* a;
  1366. struct tcp_conn_alias* nxt;
  1367. struct tcp_connection* p;
  1368. int is_local_ip_any;
  1369. int i;
  1370. int r;
  1371. a=0;
  1372. is_local_ip_any=ip_addr_any(l_ip);
  1373. if (likely(c)){
  1374. hash=tcp_addr_hash(&c->rcv.src_ip, port, l_ip, l_port);
  1375. /* search the aliases for an already existing one */
  1376. for (a=tcpconn_aliases_hash[hash], nxt=0; a; a=nxt){
  1377. nxt=a->next;
  1378. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1379. ( (l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1380. (ip_addr_cmp(&c->rcv.src_ip, &a->parent->rcv.src_ip)) &&
  1381. ( is_local_ip_any ||
  1382. ip_addr_cmp(&a->parent->rcv.dst_ip, l_ip))
  1383. ){
  1384. /* found */
  1385. if (unlikely(a->parent!=c)){
  1386. if (flags & TCP_ALIAS_FORCE_ADD)
  1387. /* still have to walk the whole list to check if
  1388. * the alias was not already added */
  1389. continue;
  1390. else if (flags & TCP_ALIAS_REPLACE){
  1391. /* remove the alias =>
  1392. * remove the current alias and all the following
  1393. * ones from the corresponding connection, shift the
  1394. * connection aliases array and re-add the other
  1395. * aliases (!= current one) */
  1396. p=a->parent;
  1397. for (i=0; (i<p->aliases) && (&(p->con_aliases[i])!=a);
  1398. i++);
  1399. if (unlikely(i==p->aliases)){
  1400. LOG(L_CRIT, "BUG: _tcpconn_add_alias_unsafe: "
  1401. " alias %p not found in con %p (id %d)\n",
  1402. a, p, p->id);
  1403. goto error_not_found;
  1404. }
  1405. for (r=i; r<p->aliases; r++){
  1406. tcpconn_listrm(
  1407. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1408. &p->con_aliases[r], next, prev);
  1409. }
  1410. if (likely((i+1)<p->aliases)){
  1411. memmove(&p->con_aliases[i], &p->con_aliases[i+1],
  1412. (p->aliases-i-1)*
  1413. sizeof(p->con_aliases[0]));
  1414. }
  1415. p->aliases--;
  1416. /* re-add the remaining aliases */
  1417. for (r=i; r<p->aliases; r++){
  1418. tcpconn_listadd(
  1419. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1420. &p->con_aliases[r], next, prev);
  1421. }
  1422. }else
  1423. goto error_sec;
  1424. }else goto ok;
  1425. }
  1426. }
  1427. if (unlikely(c->aliases>=TCP_CON_MAX_ALIASES)) goto error_aliases;
  1428. c->con_aliases[c->aliases].parent=c;
  1429. c->con_aliases[c->aliases].port=port;
  1430. c->con_aliases[c->aliases].hash=hash;
  1431. tcpconn_listadd(tcpconn_aliases_hash[hash],
  1432. &c->con_aliases[c->aliases], next, prev);
  1433. c->aliases++;
  1434. }else goto error_not_found;
  1435. ok:
  1436. #ifdef EXTRA_DEBUG
  1437. if (a) DBG("_tcpconn_add_alias_unsafe: alias already present\n");
  1438. else DBG("_tcpconn_add_alias_unsafe: alias port %d for hash %d, id %d\n",
  1439. port, hash, c->id);
  1440. #endif
  1441. return 0;
  1442. error_aliases:
  1443. /* too many aliases */
  1444. return -2;
  1445. error_not_found:
  1446. /* null connection */
  1447. return -1;
  1448. error_sec:
  1449. /* alias already present and pointing to a different connection
  1450. * (hijack attempt?) */
  1451. return -3;
  1452. }
  1453. /* add port as an alias for the "id" connection,
  1454. * returns 0 on success,-1 on failure */
  1455. int tcpconn_add_alias(int id, int port, int proto)
  1456. {
  1457. struct tcp_connection* c;
  1458. int ret;
  1459. struct ip_addr zero_ip;
  1460. int r;
  1461. int alias_flags;
  1462. /* fix the port */
  1463. port=port?port:((proto==PROTO_TLS)?SIPS_PORT:SIP_PORT);
  1464. TCPCONN_LOCK;
  1465. /* check if alias already exists */
  1466. c=_tcpconn_find(id, 0, 0, 0, 0);
  1467. if (likely(c)){
  1468. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1469. alias_flags=cfg_get(tcp, tcp_cfg, alias_flags);
  1470. /* alias src_ip:port, 0, 0 */
  1471. ret=_tcpconn_add_alias_unsafe(c, port, &zero_ip, 0,
  1472. alias_flags);
  1473. if (ret<0 && ret!=-3) goto error;
  1474. /* alias src_ip:port, local_ip, 0 */
  1475. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, 0,
  1476. alias_flags);
  1477. if (ret<0 && ret!=-3) goto error;
  1478. /* alias src_ip:port, local_ip, local_port */
  1479. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, c->rcv.dst_port,
  1480. alias_flags);
  1481. if (unlikely(ret<0)) goto error;
  1482. }else goto error_not_found;
  1483. TCPCONN_UNLOCK;
  1484. return 0;
  1485. error_not_found:
  1486. TCPCONN_UNLOCK;
  1487. LOG(L_ERR, "ERROR: tcpconn_add_alias: no connection found for id %d\n",id);
  1488. return -1;
  1489. error:
  1490. TCPCONN_UNLOCK;
  1491. switch(ret){
  1492. case -2:
  1493. LOG(L_ERR, "ERROR: tcpconn_add_alias: too many aliases (%d)"
  1494. " for connection %p (id %d) %s:%d <- %d\n",
  1495. c->aliases, c, c->id, ip_addr2a(&c->rcv.src_ip),
  1496. c->rcv.src_port, port);
  1497. for (r=0; r<c->aliases; r++){
  1498. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias %d: for %p (%d)"
  1499. " %s:%d <-%d hash %x\n", r, c, c->id,
  1500. ip_addr2a(&c->rcv.src_ip), c->rcv.src_port,
  1501. c->con_aliases[r].port, c->con_aliases[r].hash);
  1502. }
  1503. break;
  1504. case -3:
  1505. LOG(L_ERR, "ERROR: tcpconn_add_alias: possible port"
  1506. " hijack attempt\n");
  1507. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias for %d port %d already"
  1508. " present and points to another connection \n",
  1509. c->id, port);
  1510. break;
  1511. default:
  1512. LOG(L_ERR, "ERROR: tcpconn_add_alias: unkown error %d\n", ret);
  1513. }
  1514. return -1;
  1515. }
  1516. #ifdef TCP_FD_CACHE
  1517. static void tcp_fd_cache_init()
  1518. {
  1519. int r;
  1520. for (r=0; r<TCP_FD_CACHE_SIZE; r++)
  1521. fd_cache[r].fd=-1;
  1522. }
  1523. inline static struct fd_cache_entry* tcp_fd_cache_get(struct tcp_connection *c)
  1524. {
  1525. int h;
  1526. h=c->id%TCP_FD_CACHE_SIZE;
  1527. if ((fd_cache[h].fd>0) && (fd_cache[h].id==c->id) && (fd_cache[h].con==c))
  1528. return &fd_cache[h];
  1529. return 0;
  1530. }
  1531. inline static void tcp_fd_cache_rm(struct fd_cache_entry* e)
  1532. {
  1533. e->fd=-1;
  1534. }
  1535. inline static void tcp_fd_cache_add(struct tcp_connection *c, int fd)
  1536. {
  1537. int h;
  1538. h=c->id%TCP_FD_CACHE_SIZE;
  1539. if (likely(fd_cache[h].fd>0))
  1540. close(fd_cache[h].fd);
  1541. fd_cache[h].fd=fd;
  1542. fd_cache[h].id=c->id;
  1543. fd_cache[h].con=c;
  1544. }
  1545. #endif /* TCP_FD_CACHE */
  1546. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn);
  1547. /* finds a tcpconn & sends on it
  1548. * uses the dst members to, proto (TCP|TLS) and id and tries to send
  1549. * from the "from" address (if non null and id==0)
  1550. * returns: number of bytes written (>=0) on success
  1551. * <0 on error */
  1552. int tcp_send(struct dest_info* dst, union sockaddr_union* from,
  1553. char* buf, unsigned len)
  1554. {
  1555. struct tcp_connection *c;
  1556. struct tcp_connection *tmp;
  1557. struct ip_addr ip;
  1558. int port;
  1559. int fd;
  1560. long response[2];
  1561. int n;
  1562. int do_close_fd;
  1563. ticks_t con_lifetime;
  1564. #ifdef TCP_ASYNC
  1565. int enable_write_watch;
  1566. #endif /* TCP_ASYNC */
  1567. #ifdef TCP_FD_CACHE
  1568. struct fd_cache_entry* fd_cache_e;
  1569. int use_fd_cache;
  1570. use_fd_cache=cfg_get(tcp, tcp_cfg, fd_cache);
  1571. fd_cache_e=0;
  1572. #endif /* TCP_FD_CACHE */
  1573. do_close_fd=1; /* close the fd on exit */
  1574. port=su_getport(&dst->to);
  1575. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  1576. if (likely(port)){
  1577. su2ip_addr(&ip, &dst->to);
  1578. c=tcpconn_get(dst->id, &ip, port, from, con_lifetime);
  1579. }else if (likely(dst->id)){
  1580. c=tcpconn_get(dst->id, 0, 0, 0, con_lifetime);
  1581. }else{
  1582. LOG(L_CRIT, "BUG: tcp_send called with null id & to\n");
  1583. return -1;
  1584. }
  1585. if (likely(dst->id)){
  1586. if (unlikely(c==0)) {
  1587. if (likely(port)){
  1588. /* try again w/o id */
  1589. c=tcpconn_get(0, &ip, port, from, con_lifetime);
  1590. }else{
  1591. LOG(L_ERR, "ERROR: tcp_send: id %d not found, dropping\n",
  1592. dst->id);
  1593. return -1;
  1594. }
  1595. }
  1596. }
  1597. /* no_id: */
  1598. if (unlikely((c==0) || tcpconn_close_after_send(c))){
  1599. if (unlikely(c)){
  1600. /* can't use c if it's marked as close-after-send =>
  1601. release it and try opening new one */
  1602. tcpconn_chld_put(c); /* release c (dec refcnt & free on 0) */
  1603. c=0;
  1604. }
  1605. /* check if connect() is disabled */
  1606. if (unlikely((dst->send_flags.f & SND_F_FORCE_CON_REUSE) ||
  1607. cfg_get(tcp, tcp_cfg, no_connect)))
  1608. return -1;
  1609. DBG("tcp_send: no open tcp connection found, opening new one\n");
  1610. /* create tcp connection */
  1611. if (likely(from==0)){
  1612. /* check to see if we have to use a specific source addr. */
  1613. switch (dst->to.s.sa_family) {
  1614. case AF_INET:
  1615. from = tcp_source_ipv4;
  1616. break;
  1617. #ifdef USE_IPV6
  1618. case AF_INET6:
  1619. from = tcp_source_ipv6;
  1620. break;
  1621. #endif
  1622. default:
  1623. /* error, bad af, ignore ... */
  1624. break;
  1625. }
  1626. }
  1627. #if defined(TCP_CONNECT_WAIT) && defined(TCP_ASYNC)
  1628. if (likely(cfg_get(tcp, tcp_cfg, tcp_connect_wait) &&
  1629. cfg_get(tcp, tcp_cfg, async) )){
  1630. if (unlikely(*tcp_connections_no >=
  1631. cfg_get(tcp, tcp_cfg, max_connections))){
  1632. LOG(L_ERR, "ERROR: tcp_send %s: maximum number of"
  1633. " connections exceeded (%d/%d)\n",
  1634. su2a(&dst->to, sizeof(dst->to)),
  1635. *tcp_connections_no,
  1636. cfg_get(tcp, tcp_cfg, max_connections));
  1637. return -1;
  1638. }
  1639. c=tcpconn_new(-1, &dst->to, from, 0, dst->proto,
  1640. S_CONN_CONNECT);
  1641. if (unlikely(c==0)){
  1642. LOG(L_ERR, "ERROR: tcp_send %s: could not create new"
  1643. " connection\n",
  1644. su2a(&dst->to, sizeof(dst->to)));
  1645. return -1;
  1646. }
  1647. c->flags|=F_CONN_PENDING|F_CONN_FD_CLOSED;
  1648. tcpconn_set_send_flags(c, dst->send_flags);
  1649. atomic_set(&c->refcnt, 2); /* ref from here and from main hash
  1650. table */
  1651. /* add it to id hash and aliases */
  1652. if (unlikely(tcpconn_add(c)==0)){
  1653. LOG(L_ERR, "ERROR: tcp_send %s: could not add "
  1654. "connection %p\n",
  1655. su2a(&dst->to, sizeof(dst->to)),
  1656. c);
  1657. _tcpconn_free(c);
  1658. n=-1;
  1659. goto end_no_conn;
  1660. }
  1661. /* do connect and if src ip or port changed, update the
  1662. * aliases */
  1663. if (unlikely((fd=tcpconn_finish_connect(c, from))<0)){
  1664. /* tcpconn_finish_connect will automatically blacklist
  1665. on error => no need to do it here */
  1666. LOG(L_ERR, "ERROR: tcp_send %s: tcpconn_finish_connect(%p)"
  1667. " failed\n", su2a(&dst->to, sizeof(dst->to)),
  1668. c);
  1669. goto conn_wait_error;
  1670. }
  1671. /* ? TODO: it might be faster just to queue the write directly
  1672. * and send to main CONN_NEW_PENDING_WRITE */
  1673. /* delay sending the fd to main after the send */
  1674. /* NOTE: no lock here, because the connection is marked as
  1675. * pending and nobody else will try to write on it. However
  1676. * this might produce out-of-order writes. If this is not
  1677. * desired either lock before the write or use
  1678. * _wbufq_insert(...) */
  1679. n=_tcpconn_write_nb(fd, c, buf, len);
  1680. if (unlikely(n<(int)len)){
  1681. if ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK){
  1682. DBG("tcp_send: pending write on new connection %p "
  1683. " (%d/%d bytes written)\n", c, n, len);
  1684. if (n<0) n=0;
  1685. else{
  1686. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1687. c->state=S_CONN_OK; /* partial write => connect()
  1688. ended */
  1689. }
  1690. /* add to the write queue */
  1691. lock_get(&c->write_lock);
  1692. if (unlikely(_wbufq_insert(c, buf+n, len-n)<0)){
  1693. lock_release(&c->write_lock);
  1694. n=-1;
  1695. LOG(L_ERR, "ERROR: tcp_send %s: EAGAIN and"
  1696. " write queue full or failed for %p\n",
  1697. su2a(&dst->to, sizeof(dst->to)),
  1698. c);
  1699. goto conn_wait_error;
  1700. }
  1701. lock_release(&c->write_lock);
  1702. /* send to tcp_main */
  1703. response[0]=(long)c;
  1704. response[1]=CONN_NEW_PENDING_WRITE;
  1705. if (unlikely(send_fd(unix_tcp_sock, response,
  1706. sizeof(response), fd) <= 0)){
  1707. LOG(L_ERR, "BUG: tcp_send %s: "
  1708. "CONN_NEW_PENDING_WRITE for %p"
  1709. " failed:" " %s (%d)\n",
  1710. su2a(&dst->to, sizeof(dst->to)),
  1711. c, strerror(errno), errno);
  1712. goto conn_wait_error;
  1713. }
  1714. n=len;
  1715. goto end;
  1716. }
  1717. /* if first write failed it's most likely a
  1718. connect error */
  1719. switch(errno){
  1720. case ENETUNREACH:
  1721. case EHOSTUNREACH: /* not posix for send() */
  1722. #ifdef USE_DST_BLACKLIST
  1723. dst_blacklist_add( BLST_ERR_CONNECT, dst, 0);
  1724. #endif /* USE_DST_BLACKLIST */
  1725. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  1726. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1727. break;
  1728. case ECONNREFUSED:
  1729. case ECONNRESET:
  1730. #ifdef USE_DST_BLACKLIST
  1731. dst_blacklist_add( BLST_ERR_CONNECT, dst, 0);
  1732. #endif /* USE_DST_BLACKLIST */
  1733. TCP_EV_CONNECT_RST(errno, TCP_LADDR(c),
  1734. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1735. break;
  1736. default:
  1737. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c),
  1738. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1739. }
  1740. /* error: destroy it directly */
  1741. TCP_STATS_CONNECT_FAILED();
  1742. LOG(L_ERR, "ERROR: tcp_send %s: connect & send "
  1743. " for %p failed:" " %s (%d)\n",
  1744. su2a(&dst->to, sizeof(dst->to)),
  1745. c, strerror(errno), errno);
  1746. goto conn_wait_error;
  1747. }
  1748. LOG(L_INFO, "tcp_send: quick connect for %p\n", c);
  1749. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1750. if (unlikely(dst->send_flags.f & SND_F_CON_CLOSE)){
  1751. /* if close-after-send requested, don't bother
  1752. sending the fd back to tcp_main, try closing it
  1753. immediately (no other tcp_send should use it,
  1754. because it is marked as close-after-send before
  1755. being added to the hash */
  1756. goto conn_wait_close;
  1757. }
  1758. c->state=S_CONN_OK;
  1759. /* send to tcp_main */
  1760. response[0]=(long)c;
  1761. response[1]=CONN_NEW_COMPLETE;
  1762. if (unlikely(send_fd(unix_tcp_sock, response,
  1763. sizeof(response), fd) <= 0)){
  1764. LOG(L_ERR, "BUG: tcp_send %s: CONN_NEW_COMPLETE for %p"
  1765. " failed:" " %s (%d)\n",
  1766. su2a(&dst->to, sizeof(dst->to)),
  1767. c, strerror(errno), errno);
  1768. goto conn_wait_error;
  1769. }
  1770. goto end;
  1771. }
  1772. #endif /* TCP_CONNECT_WAIT && TCP_ASYNC */
  1773. if (unlikely((c=tcpconn_connect(&dst->to, from, dst->proto,
  1774. &dst->send_flags))==0)){
  1775. LOG(L_ERR, "ERROR: tcp_send %s: connect failed\n",
  1776. su2a(&dst->to, sizeof(dst->to)));
  1777. return -1;
  1778. }
  1779. tcpconn_set_send_flags(c, dst->send_flags);
  1780. if (likely(c->state==S_CONN_OK))
  1781. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1782. atomic_set(&c->refcnt, 2); /* ref. from here and it will also
  1783. be added in the tcp_main hash */
  1784. fd=c->s;
  1785. c->flags|=F_CONN_FD_CLOSED; /* not yet opened in main */
  1786. /* ? TODO: it might be faster just to queue the write and
  1787. * send to main a CONN_NEW_PENDING_WRITE */
  1788. /* send the new tcpconn to "tcp main" */
  1789. response[0]=(long)c;
  1790. response[1]=CONN_NEW;
  1791. n=send_fd(unix_tcp_sock, response, sizeof(response), c->s);
  1792. if (unlikely(n<=0)){
  1793. LOG(L_ERR, "BUG: tcp_send %s: failed send_fd: %s (%d)\n",
  1794. su2a(&dst->to, sizeof(dst->to)),
  1795. strerror(errno), errno);
  1796. /* we can safely delete it, it's not referenced by anybody */
  1797. _tcpconn_free(c);
  1798. n=-1;
  1799. goto end_no_conn;
  1800. }
  1801. goto send_it;
  1802. }
  1803. /* get_fd: */
  1804. #ifdef TCP_ASYNC
  1805. /* if data is already queued, we don't need the fd any more */
  1806. #ifdef TCP_CONNECT_WAIT
  1807. if (unlikely(cfg_get(tcp, tcp_cfg, async) &&
  1808. (_wbufq_non_empty(c) || (c->flags&F_CONN_PENDING)) ))
  1809. #else /* ! TCP_CONNECT_WAIT */
  1810. if (unlikely(cfg_get(tcp, tcp_cfg, async) && (_wbufq_non_empty(c)) ))
  1811. #endif /* TCP_CONNECT_WAIT */
  1812. {
  1813. lock_get(&c->write_lock);
  1814. #ifdef TCP_CONNECT_WAIT
  1815. if (likely(_wbufq_non_empty(c) || (c->flags&F_CONN_PENDING)))
  1816. #else /* ! TCP_CONNECT_WAIT */
  1817. if (likely(_wbufq_non_empty(c)))
  1818. #endif /* TCP_CONNECT_WAIT */
  1819. {
  1820. do_close_fd=0;
  1821. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1822. lock_release(&c->write_lock);
  1823. n=-1;
  1824. goto error;
  1825. }
  1826. n=len;
  1827. lock_release(&c->write_lock);
  1828. goto release_c;
  1829. }
  1830. lock_release(&c->write_lock);
  1831. }
  1832. #endif /* TCP_ASYNC */
  1833. /* check if this is not the same reader process holding
  1834. * c and if so send directly on c->fd */
  1835. if (c->reader_pid==my_pid()){
  1836. DBG("tcp_send: send from reader (%d (%d)), reusing fd\n",
  1837. my_pid(), process_no);
  1838. fd=c->fd;
  1839. do_close_fd=0; /* don't close the fd on exit, it's in use */
  1840. #ifdef TCP_FD_CACHE
  1841. use_fd_cache=0; /* don't cache: problems would arise due to the
  1842. close() on cache eviction (if the fd is still
  1843. used). If it has to be cached then dup() _must_
  1844. be used */
  1845. }else if (likely(use_fd_cache &&
  1846. ((fd_cache_e=tcp_fd_cache_get(c))!=0))){
  1847. fd=fd_cache_e->fd;
  1848. do_close_fd=0;
  1849. DBG("tcp_send: found fd in cache ( %d, %p, %d)\n",
  1850. fd, c, fd_cache_e->id);
  1851. #endif /* TCP_FD_CACHE */
  1852. }else{
  1853. DBG("tcp_send: tcp connection found (%p), acquiring fd\n", c);
  1854. /* get the fd */
  1855. response[0]=(long)c;
  1856. response[1]=CONN_GET_FD;
  1857. n=send_all(unix_tcp_sock, response, sizeof(response));
  1858. if (unlikely(n<=0)){
  1859. LOG(L_ERR, "BUG: tcp_send: failed to get fd(write):%s (%d)\n",
  1860. strerror(errno), errno);
  1861. n=-1;
  1862. goto release_c;
  1863. }
  1864. DBG("tcp_send, c= %p, n=%d\n", c, n);
  1865. n=receive_fd(unix_tcp_sock, &tmp, sizeof(tmp), &fd, MSG_WAITALL);
  1866. if (unlikely(n<=0)){
  1867. LOG(L_ERR, "BUG: tcp_send: failed to get fd(receive_fd):"
  1868. " %s (%d)\n", strerror(errno), errno);
  1869. n=-1;
  1870. do_close_fd=0;
  1871. goto release_c;
  1872. }
  1873. if (unlikely(c!=tmp)){
  1874. LOG(L_CRIT, "BUG: tcp_send: get_fd: got different connection:"
  1875. " %p (id= %d, refcnt=%d state=%d) != "
  1876. " %p (n=%d)\n",
  1877. c, c->id, atomic_get(&c->refcnt), c->state,
  1878. tmp, n
  1879. );
  1880. n=-1; /* fail */
  1881. goto end;
  1882. }
  1883. DBG("tcp_send: after receive_fd: c= %p n=%d fd=%d\n",c, n, fd);
  1884. }
  1885. send_it:
  1886. DBG("tcp_send: sending...\n");
  1887. lock_get(&c->write_lock);
  1888. /* update connection send flags with the current ones */
  1889. tcpconn_set_send_flags(c, dst->send_flags);
  1890. #ifdef TCP_ASYNC
  1891. if (likely(cfg_get(tcp, tcp_cfg, async))){
  1892. if (_wbufq_non_empty(c)
  1893. #ifdef TCP_CONNECT_WAIT
  1894. || (c->flags&F_CONN_PENDING)
  1895. #endif /* TCP_CONNECT_WAIT */
  1896. ){
  1897. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1898. lock_release(&c->write_lock);
  1899. n=-1;
  1900. goto error;
  1901. }
  1902. lock_release(&c->write_lock);
  1903. n=len;
  1904. goto end;
  1905. }
  1906. n=_tcpconn_write_nb(fd, c, buf, len);
  1907. }else{
  1908. #endif /* TCP_ASYNC */
  1909. #ifdef USE_TLS
  1910. if (c->type==PROTO_TLS)
  1911. n=tls_blocking_write(c, fd, buf, len);
  1912. else
  1913. #endif
  1914. /* n=tcp_blocking_write(c, fd, buf, len); */
  1915. n=tsend_stream(fd, buf, len,
  1916. TICKS_TO_S(cfg_get(tcp, tcp_cfg, send_timeout)) *
  1917. 1000);
  1918. #ifdef TCP_ASYNC
  1919. }
  1920. #else /* ! TCP_ASYNC */
  1921. lock_release(&c->write_lock);
  1922. #endif /* TCP_ASYNC */
  1923. DBG("tcp_send: after real write: c= %p n=%d fd=%d\n",c, n, fd);
  1924. DBG("tcp_send: buf=\n%.*s\n", (int)len, buf);
  1925. if (unlikely(n<(int)len)){
  1926. #ifdef TCP_ASYNC
  1927. if (cfg_get(tcp, tcp_cfg, async) &&
  1928. ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK)){
  1929. enable_write_watch=_wbufq_empty(c);
  1930. if (n<0) n=0;
  1931. else if (unlikely(c->state==S_CONN_CONNECT ||
  1932. c->state==S_CONN_ACCEPT)){
  1933. TCP_STATS_ESTABLISHED(c->state);
  1934. c->state=S_CONN_OK; /* something was written */
  1935. }
  1936. if (unlikely(_wbufq_add(c, buf+n, len-n)<0)){
  1937. lock_release(&c->write_lock);
  1938. n=-1;
  1939. goto error;
  1940. }
  1941. lock_release(&c->write_lock);
  1942. n=len;
  1943. if (likely(enable_write_watch)){
  1944. response[0]=(long)c;
  1945. response[1]=CONN_QUEUED_WRITE;
  1946. if (send_all(unix_tcp_sock, response, sizeof(response)) <= 0){
  1947. LOG(L_ERR, "BUG: tcp_send: error return failed "
  1948. "(write):%s (%d)\n", strerror(errno), errno);
  1949. n=-1;
  1950. goto error;
  1951. }
  1952. }
  1953. goto end;
  1954. }else{
  1955. lock_release(&c->write_lock);
  1956. }
  1957. #endif /* TCP_ASYNC */
  1958. if (unlikely(c->state==S_CONN_CONNECT)){
  1959. switch(errno){
  1960. case ENETUNREACH:
  1961. case EHOSTUNREACH: /* not posix for send() */
  1962. #ifdef USE_DST_BLACKLIST
  1963. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto,
  1964. &c->rcv.src_su, &c->send_flags, 0);
  1965. #endif /* USE_DST_BLACKLIST */
  1966. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  1967. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1968. break;
  1969. case ECONNREFUSED:
  1970. case ECONNRESET:
  1971. #ifdef USE_DST_BLACKLIST
  1972. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto,
  1973. &c->rcv.src_su, &c->send_flags, 0);
  1974. #endif /* USE_DST_BLACKLIST */
  1975. TCP_EV_CONNECT_RST(errno, TCP_LADDR(c), TCP_LPORT(c),
  1976. TCP_PSU(c), TCP_PROTO(c));
  1977. break;
  1978. default:
  1979. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c), TCP_LPORT(c),
  1980. TCP_PSU(c), TCP_PROTO(c));
  1981. }
  1982. TCP_STATS_CONNECT_FAILED();
  1983. }else{
  1984. switch(errno){
  1985. case ECONNREFUSED:
  1986. case ECONNRESET:
  1987. TCP_STATS_CON_RESET();
  1988. /* no break */
  1989. case ENETUNREACH:
  1990. /*case EHOSTUNREACH: -- not posix */
  1991. #ifdef USE_DST_BLACKLIST
  1992. dst_blacklist_su(BLST_ERR_SEND, c->rcv.proto,
  1993. &c->rcv.src_su, &c->send_flags, 0);
  1994. #endif /* USE_DST_BLACKLIST */
  1995. break;
  1996. }
  1997. }
  1998. LOG(L_ERR, "ERROR: tcp_send: failed to send on %p (%s:%d->%s): %s (%d)"
  1999. "\n", c, ip_addr2a(&c->rcv.dst_ip), c->rcv.dst_port,
  2000. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  2001. strerror(errno), errno);
  2002. #ifdef TCP_ASYNC
  2003. error:
  2004. #endif /* TCP_ASYNC */
  2005. /* error on the connection , mark it as bad and set 0 timeout */
  2006. c->state=S_CONN_BAD;
  2007. c->timeout=get_ticks_raw();
  2008. /* tell "main" it should drop this (optional it will t/o anyway?)*/
  2009. response[0]=(long)c;
  2010. response[1]=CONN_ERROR;
  2011. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  2012. LOG(L_CRIT, "BUG: tcp_send: error return failed (write):%s (%d)\n",
  2013. strerror(errno), errno);
  2014. tcpconn_chld_put(c); /* deref. it manually */
  2015. n=-1;
  2016. }
  2017. /* CONN_ERROR will auto-dec refcnt => we must not call tcpconn_put
  2018. * if it succeeds */
  2019. #ifdef TCP_FD_CACHE
  2020. if (unlikely(fd_cache_e)){
  2021. LOG(L_ERR, "ERROR: tcp_send %s: error on cached fd, removing from"
  2022. " the cache (%d, %p, %d)\n",
  2023. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  2024. fd, fd_cache_e->con, fd_cache_e->id);
  2025. tcp_fd_cache_rm(fd_cache_e);
  2026. close(fd);
  2027. }else
  2028. #endif /* TCP_FD_CACHE */
  2029. if (do_close_fd) close(fd);
  2030. return n; /* error return, no tcpconn_put */
  2031. }
  2032. #ifdef TCP_ASYNC
  2033. lock_release(&c->write_lock);
  2034. #endif /* TCP_ASYNC */
  2035. /* in non-async mode here we're either in S_CONN_OK or S_CONN_ACCEPT*/
  2036. if (unlikely(c->state==S_CONN_CONNECT || c->state==S_CONN_ACCEPT)){
  2037. TCP_STATS_ESTABLISHED(c->state);
  2038. c->state=S_CONN_OK;
  2039. }
  2040. if (unlikely(dst->send_flags.f & SND_F_CON_CLOSE)){
  2041. /* close after write => send EOF request to tcp_main */
  2042. c->state=S_CONN_BAD;
  2043. c->timeout=get_ticks_raw();
  2044. /* tell "main" it should drop this*/
  2045. response[0]=(long)c;
  2046. response[1]=CONN_EOF;
  2047. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  2048. LOG(L_CRIT, "BUG: tcp_send: error return failed (write):%s (%d)\n",
  2049. strerror(errno), errno);
  2050. tcpconn_chld_put(c); /* deref. it manually */
  2051. n=-1;
  2052. }
  2053. /* CONN_EOF will auto-dec refcnt => we must not call tcpconn_put
  2054. * if it succeeds */
  2055. #ifdef TCP_FD_CACHE
  2056. if (unlikely(fd_cache_e)){
  2057. tcp_fd_cache_rm(fd_cache_e);
  2058. fd_cache_e=0;
  2059. close(fd);
  2060. }else
  2061. #endif /* TCP_FD_CACHE */
  2062. if (do_close_fd) close(fd);
  2063. goto end_no_conn;
  2064. }
  2065. end:
  2066. #ifdef TCP_FD_CACHE
  2067. if (unlikely((fd_cache_e==0) && use_fd_cache)){
  2068. tcp_fd_cache_add(c, fd);
  2069. }else
  2070. #endif /* TCP_FD_CACHE */
  2071. if (do_close_fd) close(fd);
  2072. release_c:
  2073. tcpconn_chld_put(c); /* release c (dec refcnt & free on 0) */
  2074. end_no_conn:
  2075. return n;
  2076. #ifdef TCP_CONNECT_WAIT
  2077. conn_wait_error:
  2078. n=-1;
  2079. conn_wait_close:
  2080. /* connect or send failed or immediate close-after-send was requested on
  2081. * newly created connection which was not yet sent to tcp_main (but was
  2082. * already hashed) => don't send to main, unhash and destroy directly
  2083. * (if refcnt>2 it will be destroyed when the last sender releases the
  2084. * connection (tcpconn_chld_put(c))) or when tcp_main receives a
  2085. * CONN_ERROR it*/
  2086. c->state=S_CONN_BAD;
  2087. /* we are here only if we opened a new fd (and not reused a cached or
  2088. a reader one) => if the connect was successful close the fd */
  2089. if (fd>=0) close(fd);
  2090. TCPCONN_LOCK;
  2091. if (c->flags & F_CONN_HASHED){
  2092. /* if some other parallel tcp_send did send CONN_ERROR to
  2093. * tcp_main, the connection might be already detached */
  2094. _tcpconn_detach(c);
  2095. c->flags&=~F_CONN_HASHED;
  2096. TCPCONN_UNLOCK;
  2097. tcpconn_put(c);
  2098. }else
  2099. TCPCONN_UNLOCK;
  2100. /* dec refcnt -> mark it for destruction */
  2101. tcpconn_chld_put(c);
  2102. return n;
  2103. #endif /* TCP_CONNET_WAIT */
  2104. }
  2105. int tcp_init(struct socket_info* sock_info)
  2106. {
  2107. union sockaddr_union* addr;
  2108. int optval;
  2109. #ifdef HAVE_TCP_ACCEPT_FILTER
  2110. struct accept_filter_arg afa;
  2111. #endif /* HAVE_TCP_ACCEPT_FILTER */
  2112. #ifdef DISABLE_NAGLE
  2113. int flag;
  2114. struct protoent* pe;
  2115. if (tcp_proto_no==-1){ /* if not already set */
  2116. pe=getprotobyname("tcp");
  2117. if (pe==0){
  2118. LOG(L_ERR, "ERROR: tcp_init: could not get TCP protocol number\n");
  2119. tcp_proto_no=-1;
  2120. }else{
  2121. tcp_proto_no=pe->p_proto;
  2122. }
  2123. }
  2124. #endif
  2125. addr=&sock_info->su;
  2126. /* sock_info->proto=PROTO_TCP; */
  2127. if (init_su(addr, &sock_info->address, sock_info->port_no)<0){
  2128. LOG(L_ERR, "ERROR: tcp_init: could no init sockaddr_union\n");
  2129. goto error;
  2130. }
  2131. DBG("tcp_init: added %s\n", su2a(addr, sizeof(*addr)));
  2132. sock_info->socket=socket(AF2PF(addr->s.sa_family), SOCK_STREAM, 0);
  2133. if (sock_info->socket==-1){
  2134. LOG(L_ERR, "ERROR: tcp_init: socket: %s\n", strerror(errno));
  2135. goto error;
  2136. }
  2137. #ifdef DISABLE_NAGLE
  2138. flag=1;
  2139. if ( (tcp_proto_no!=-1) &&
  2140. (setsockopt(sock_info->socket, tcp_proto_no , TCP_NODELAY,
  2141. &flag, sizeof(flag))<0) ){
  2142. LOG(L_ERR, "ERROR: tcp_init: could not disable Nagle: %s\n",
  2143. strerror(errno));
  2144. }
  2145. #endif
  2146. #if !defined(TCP_DONT_REUSEADDR)
  2147. /* Stevens, "Network Programming", Section 7.5, "Generic Socket
  2148. * Options": "...server started,..a child continues..on existing
  2149. * connection..listening server is restarted...call to bind fails
  2150. * ... ALL TCP servers should specify the SO_REUSEADDRE option
  2151. * to allow the server to be restarted in this situation
  2152. *
  2153. * Indeed, without this option, the server can't restart.
  2154. * -jiri
  2155. */
  2156. optval=1;
  2157. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_REUSEADDR,
  2158. (void*)&optval, sizeof(optval))==-1) {
  2159. LOG(L_ERR, "ERROR: tcp_init: setsockopt %s\n",
  2160. strerror(errno));
  2161. goto error;
  2162. }
  2163. #endif
  2164. /* tos */
  2165. optval = tos;
  2166. if (setsockopt(sock_info->socket, IPPROTO_IP, IP_TOS, (void*)&optval,
  2167. sizeof(optval)) ==-1){
  2168. LOG(L_WARN, "WARNING: tcp_init: setsockopt tos: %s\n", strerror(errno));
  2169. /* continue since this is not critical */
  2170. }
  2171. #ifdef HAVE_TCP_DEFER_ACCEPT
  2172. /* linux only */
  2173. if ((optval=cfg_get(tcp, tcp_cfg, defer_accept))){
  2174. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_DEFER_ACCEPT,
  2175. (void*)&optval, sizeof(optval)) ==-1){
  2176. LOG(L_WARN, "WARNING: tcp_init: setsockopt TCP_DEFER_ACCEPT %s\n",
  2177. strerror(errno));
  2178. /* continue since this is not critical */
  2179. }
  2180. }
  2181. #endif /* HAVE_TCP_DEFFER_ACCEPT */
  2182. #ifdef HAVE_TCP_SYNCNT
  2183. if ((optval=cfg_get(tcp, tcp_cfg, syncnt))){
  2184. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_SYNCNT, &optval,
  2185. sizeof(optval))<0){
  2186. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  2187. " maximum SYN retr. count: %s\n", strerror(errno));
  2188. }
  2189. }
  2190. #endif
  2191. #ifdef HAVE_TCP_LINGER2
  2192. if ((optval=cfg_get(tcp, tcp_cfg, linger2))){
  2193. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_LINGER2, &optval,
  2194. sizeof(optval))<0){
  2195. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  2196. " maximum LINGER2 timeout: %s\n", strerror(errno));
  2197. }
  2198. }
  2199. #endif
  2200. init_sock_keepalive(sock_info->socket);
  2201. if (bind(sock_info->socket, &addr->s, sockaddru_len(*addr))==-1){
  2202. LOG(L_ERR, "ERROR: tcp_init: bind(%x, %p, %d) on %s:%d : %s\n",
  2203. sock_info->socket, &addr->s,
  2204. (unsigned)sockaddru_len(*addr),
  2205. sock_info->address_str.s,
  2206. sock_info->port_no,
  2207. strerror(errno));
  2208. goto error;
  2209. }
  2210. if (listen(sock_info->socket, TCP_LISTEN_BACKLOG)==-1){
  2211. LOG(L_ERR, "ERROR: tcp_init: listen(%x, %p, %d) on %s: %s\n",
  2212. sock_info->socket, &addr->s,
  2213. (unsigned)sockaddru_len(*addr),
  2214. sock_info->address_str.s,
  2215. strerror(errno));
  2216. goto error;
  2217. }
  2218. #ifdef HAVE_TCP_ACCEPT_FILTER
  2219. /* freebsd */
  2220. if (cfg_get(tcp, tcp_cfg, defer_accept)){
  2221. memset(&afa, 0, sizeof(afa));
  2222. strcpy(afa.af_name, "dataready");
  2223. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_ACCEPTFILTER,
  2224. (void*)&afa, sizeof(afa)) ==-1){
  2225. LOG(L_WARN, "WARNING: tcp_init: setsockopt SO_ACCEPTFILTER %s\n",
  2226. strerror(errno));
  2227. /* continue since this is not critical */
  2228. }
  2229. }
  2230. #endif /* HAVE_TCP_ACCEPT_FILTER */
  2231. return 0;
  2232. error:
  2233. if (sock_info->socket!=-1){
  2234. close(sock_info->socket);
  2235. sock_info->socket=-1;
  2236. }
  2237. return -1;
  2238. }
  2239. /* close tcp_main's fd from a tcpconn
  2240. * WARNING: call only in tcp_main context */
  2241. inline static void tcpconn_close_main_fd(struct tcp_connection* tcpconn)
  2242. {
  2243. int fd;
  2244. fd=tcpconn->s;
  2245. #ifdef USE_TLS
  2246. /*FIXME: lock ->writelock ? */
  2247. if (tcpconn->type==PROTO_TLS)
  2248. tls_close(tcpconn, fd);
  2249. #endif
  2250. #ifdef TCP_FD_CACHE
  2251. if (likely(cfg_get(tcp, tcp_cfg, fd_cache))) shutdown(fd, SHUT_RDWR);
  2252. #endif /* TCP_FD_CACHE */
  2253. close_again:
  2254. if (unlikely(close(fd)<0)){
  2255. if (errno==EINTR)
  2256. goto close_again;
  2257. LOG(L_ERR, "ERROR: tcpconn_put_destroy; close() failed: %s (%d)\n",
  2258. strerror(errno), errno);
  2259. }
  2260. }
  2261. /* dec refcnt & frees the connection if refcnt==0
  2262. * returns 1 if the connection is freed, 0 otherwise
  2263. *
  2264. * WARNING: use only from child processes */
  2265. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn)
  2266. {
  2267. if (unlikely(atomic_dec_and_test(&tcpconn->refcnt))){
  2268. DBG("tcpconn_chld_put: destroying connection %p (%d, %d) "
  2269. "flags %04x\n", tcpconn, tcpconn->id,
  2270. tcpconn->s, tcpconn->flags);
  2271. /* sanity checks */
  2272. membar_read_atomic_op(); /* make sure we see the current flags */
  2273. if (unlikely(!(tcpconn->flags & F_CONN_FD_CLOSED) ||
  2274. (tcpconn->flags &
  2275. (F_CONN_HASHED|F_CONN_MAIN_TIMER|
  2276. F_CONN_READ_W|F_CONN_WRITE_W)) )){
  2277. LOG(L_CRIT, "BUG: tcpconn_chld_put: %p bad flags = %0x\n",
  2278. tcpconn, tcpconn->flags);
  2279. abort();
  2280. }
  2281. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2282. return 1;
  2283. }
  2284. return 0;
  2285. }
  2286. /* simple destroy function (the connection should be already removed
  2287. * from the hashes and the fds should not be watched anymore for IO)
  2288. */
  2289. inline static void tcpconn_destroy(struct tcp_connection* tcpconn)
  2290. {
  2291. DBG("tcpconn_destroy: destroying connection %p (%d, %d) "
  2292. "flags %04x\n", tcpconn, tcpconn->id,
  2293. tcpconn->s, tcpconn->flags);
  2294. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2295. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed"
  2296. " connection (%p)\n", tcpconn);
  2297. /* try to continue */
  2298. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2299. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2300. TCPCONN_LOCK;
  2301. _tcpconn_detach(tcpconn);
  2302. TCPCONN_UNLOCK;
  2303. }
  2304. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2305. tcpconn_close_main_fd(tcpconn);
  2306. (*tcp_connections_no)--;
  2307. }
  2308. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2309. }
  2310. /* tries to destroy the connection: dec. refcnt and if 0 destroys the
  2311. * connection, else it will mark it as BAD and close the main fds
  2312. *
  2313. * returns 1 if the connection was destroyed, 0 otherwise
  2314. *
  2315. * WARNING: - the connection _has_ to be removed from the hash and timer
  2316. * first (use tcpconn_try_unhash() for this )
  2317. * - the fd should not be watched anymore (io_watch_del()...)
  2318. * - must be called _only_ from the tcp_main process context
  2319. * (or else the fd will remain open)
  2320. */
  2321. inline static int tcpconn_put_destroy(struct tcp_connection* tcpconn)
  2322. {
  2323. if (unlikely((tcpconn->flags &
  2324. (F_CONN_WRITE_W|F_CONN_HASHED|F_CONN_MAIN_TIMER|F_CONN_READ_W)) )){
  2325. /* sanity check */
  2326. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2327. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed and/or"
  2328. "on timer connection (%p), flags = %0x\n",
  2329. tcpconn, tcpconn->flags);
  2330. /* try to continue */
  2331. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2332. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2333. TCPCONN_LOCK;
  2334. _tcpconn_detach(tcpconn);
  2335. TCPCONN_UNLOCK;
  2336. }else{
  2337. LOG(L_CRIT, "BUG: tcpconn_put_destroy: %p flags = %0x\n",
  2338. tcpconn, tcpconn->flags);
  2339. }
  2340. }
  2341. tcpconn->state=S_CONN_BAD;
  2342. /* in case it's still in a reader timer */
  2343. tcpconn->timeout=get_ticks_raw();
  2344. /* fast close: close fds now */
  2345. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2346. tcpconn_close_main_fd(tcpconn);
  2347. tcpconn->flags|=F_CONN_FD_CLOSED;
  2348. (*tcp_connections_no)--;
  2349. }
  2350. /* all the flags / ops on the tcpconn must be done prior to decrementing
  2351. * the refcnt. and at least a membar_write_atomic_op() mem. barrier or
  2352. * a mb_atomic_* op must * be used to make sure all the changed flags are
  2353. * written into memory prior to the new refcnt value */
  2354. if (unlikely(mb_atomic_dec_and_test(&tcpconn->refcnt))){
  2355. _tcpconn_free(tcpconn);
  2356. return 1;
  2357. }
  2358. return 0;
  2359. }
  2360. /* try to remove a connection from the hashes and timer.
  2361. * returns 1 if the connection was removed, 0 if not (connection not in
  2362. * hash)
  2363. *
  2364. * WARNING: call it only in the tcp_main process context or else the
  2365. * timer removal won't work.
  2366. */
  2367. inline static int tcpconn_try_unhash(struct tcp_connection* tcpconn)
  2368. {
  2369. if (likely(tcpconn->flags & F_CONN_HASHED)){
  2370. tcpconn->state=S_CONN_BAD;
  2371. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER)){
  2372. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2373. tcpconn->flags&=~F_CONN_MAIN_TIMER;
  2374. }else
  2375. /* in case it's still in a reader timer */
  2376. tcpconn->timeout=get_ticks_raw();
  2377. TCPCONN_LOCK;
  2378. if (tcpconn->flags & F_CONN_HASHED){
  2379. tcpconn->flags&=~F_CONN_HASHED;
  2380. _tcpconn_detach(tcpconn);
  2381. TCPCONN_UNLOCK;
  2382. }else{
  2383. /* tcp_send was faster and did unhash it itself */
  2384. TCPCONN_UNLOCK;
  2385. return 0;
  2386. }
  2387. #ifdef TCP_ASYNC
  2388. /* empty possible write buffers (optional) */
  2389. if (unlikely(_wbufq_non_empty(tcpconn))){
  2390. lock_get(&tcpconn->write_lock);
  2391. /* check again, while holding the lock */
  2392. if (likely(_wbufq_non_empty(tcpconn)))
  2393. _wbufq_destroy(&tcpconn->wbuf_q);
  2394. lock_release(&tcpconn->write_lock);
  2395. }
  2396. #endif /* TCP_ASYNC */
  2397. return 1;
  2398. }
  2399. return 0;
  2400. }
  2401. #ifdef SEND_FD_QUEUE
  2402. struct send_fd_info{
  2403. struct tcp_connection* tcp_conn;
  2404. ticks_t expire;
  2405. int unix_sock;
  2406. unsigned int retries; /* debugging */
  2407. };
  2408. struct tcp_send_fd_q{
  2409. struct send_fd_info* data; /* buffer */
  2410. struct send_fd_info* crt; /* pointer inside the buffer */
  2411. struct send_fd_info* end; /* points after the last valid position */
  2412. };
  2413. static struct tcp_send_fd_q send2child_q;
  2414. static int send_fd_queue_init(struct tcp_send_fd_q *q, unsigned int size)
  2415. {
  2416. q->data=pkg_malloc(size*sizeof(struct send_fd_info));
  2417. if (q->data==0){
  2418. LOG(L_ERR, "ERROR: send_fd_queue_init: out of memory\n");
  2419. return -1;
  2420. }
  2421. q->crt=&q->data[0];
  2422. q->end=&q->data[size];
  2423. return 0;
  2424. }
  2425. static void send_fd_queue_destroy(struct tcp_send_fd_q *q)
  2426. {
  2427. if (q->data){
  2428. pkg_free(q->data);
  2429. q->data=0;
  2430. q->crt=q->end=0;
  2431. }
  2432. }
  2433. static int init_send_fd_queues()
  2434. {
  2435. if (send_fd_queue_init(&send2child_q, SEND_FD_QUEUE_SIZE)!=0)
  2436. goto error;
  2437. return 0;
  2438. error:
  2439. LOG(L_ERR, "ERROR: init_send_fd_queues: init failed\n");
  2440. return -1;
  2441. }
  2442. static void destroy_send_fd_queues()
  2443. {
  2444. send_fd_queue_destroy(&send2child_q);
  2445. }
  2446. inline static int send_fd_queue_add( struct tcp_send_fd_q* q,
  2447. int unix_sock,
  2448. struct tcp_connection *t)
  2449. {
  2450. struct send_fd_info* tmp;
  2451. unsigned long new_size;
  2452. if (q->crt>=q->end){
  2453. new_size=q->end-&q->data[0];
  2454. if (new_size< MAX_SEND_FD_QUEUE_SIZE/2){
  2455. new_size*=2;
  2456. }else new_size=MAX_SEND_FD_QUEUE_SIZE;
  2457. if (unlikely(q->crt>=&q->data[new_size])){
  2458. LOG(L_ERR, "ERROR: send_fd_queue_add: queue full: %ld/%ld\n",
  2459. (long)(q->crt-&q->data[0]-1), new_size);
  2460. goto error;
  2461. }
  2462. LOG(L_CRIT, "INFO: send_fd_queue: queue full: %ld, extending to %ld\n",
  2463. (long)(q->end-&q->data[0]), new_size);
  2464. tmp=pkg_realloc(q->data, new_size*sizeof(struct send_fd_info));
  2465. if (unlikely(tmp==0)){
  2466. LOG(L_ERR, "ERROR: send_fd_queue_add: out of memory\n");
  2467. goto error;
  2468. }
  2469. q->crt=(q->crt-&q->data[0])+tmp;
  2470. q->data=tmp;
  2471. q->end=&q->data[new_size];
  2472. }
  2473. q->crt->tcp_conn=t;
  2474. q->crt->unix_sock=unix_sock;
  2475. q->crt->expire=get_ticks_raw()+SEND_FD_QUEUE_TIMEOUT;
  2476. q->crt->retries=0;
  2477. q->crt++;
  2478. return 0;
  2479. error:
  2480. return -1;
  2481. }
  2482. inline static void send_fd_queue_run(struct tcp_send_fd_q* q)
  2483. {
  2484. struct send_fd_info* p;
  2485. struct send_fd_info* t;
  2486. for (p=t=&q->data[0]; p<q->crt; p++){
  2487. if (unlikely(send_fd(p->unix_sock, &(p->tcp_conn),
  2488. sizeof(struct tcp_connection*), p->tcp_conn->s)<=0)){
  2489. if ( ((errno==EAGAIN)||(errno==EWOULDBLOCK)) &&
  2490. ((s_ticks_t)(p->expire-get_ticks_raw())>0)){
  2491. /* leave in queue for a future try */
  2492. *t=*p;
  2493. t->retries++;
  2494. t++;
  2495. }else{
  2496. LOG(L_ERR, "ERROR: run_send_fd_queue: send_fd failed"
  2497. " on socket %d , queue entry %ld, retries %d,"
  2498. " connection %p, tcp socket %d, errno=%d (%s) \n",
  2499. p->unix_sock, (long)(p-&q->data[0]), p->retries,
  2500. p->tcp_conn, p->tcp_conn->s, errno,
  2501. strerror(errno));
  2502. #ifdef TCP_ASYNC
  2503. if (p->tcp_conn->flags & F_CONN_WRITE_W){
  2504. io_watch_del(&io_h, p->tcp_conn->s, -1, IO_FD_CLOSING);
  2505. p->tcp_conn->flags &=~F_CONN_WRITE_W;
  2506. }
  2507. #endif
  2508. p->tcp_conn->flags &= ~F_CONN_READER;
  2509. if (likely(tcpconn_try_unhash(p->tcp_conn)))
  2510. tcpconn_put(p->tcp_conn);
  2511. tcpconn_put_destroy(p->tcp_conn); /* dec refcnt & destroy */
  2512. }
  2513. }
  2514. }
  2515. q->crt=t;
  2516. }
  2517. #else
  2518. #define send_fd_queue_run(q)
  2519. #endif
  2520. /* non blocking write() on a tcpconnection, unsafe version (should be called
  2521. * while holding c->write_lock). The fd should be non-blocking.
  2522. * returns number of bytes written on success, -1 on error (and sets errno)
  2523. */
  2524. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  2525. char* buf, int len)
  2526. {
  2527. int n;
  2528. again:
  2529. #ifdef USE_TLS
  2530. if (unlikely(c->type==PROTO_TLS))
  2531. /* FIXME: tls_nonblocking_write !! */
  2532. n=tls_blocking_write(c, fd, buf, len);
  2533. else
  2534. #endif /* USE_TLS */
  2535. n=send(fd, buf, len,
  2536. #ifdef HAVE_MSG_NOSIGNAL
  2537. MSG_NOSIGNAL
  2538. #else
  2539. 0
  2540. #endif /* HAVE_MSG_NOSIGNAL */
  2541. );
  2542. if (unlikely(n<0)){
  2543. if (errno==EINTR) goto again;
  2544. }
  2545. return n;
  2546. }
  2547. /* handles io from a tcp child process
  2548. * params: tcp_c - pointer in the tcp_children array, to the entry for
  2549. * which an io event was detected
  2550. * fd_i - fd index in the fd_array (usefull for optimizing
  2551. * io_watch_deletes)
  2552. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  2553. * io events queued), >0 on success. success/error refer only to
  2554. * the reads from the fd.
  2555. */
  2556. inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
  2557. {
  2558. struct tcp_connection* tcpconn;
  2559. long response[2];
  2560. int cmd;
  2561. int bytes;
  2562. int n;
  2563. ticks_t t;
  2564. ticks_t crt_timeout;
  2565. ticks_t con_lifetime;
  2566. if (unlikely(tcp_c->unix_sock<=0)){
  2567. /* (we can't have a fd==0, 0 is never closed )*/
  2568. LOG(L_CRIT, "BUG: handle_tcp_child: fd %d for %d "
  2569. "(pid %ld, ser no %d)\n", tcp_c->unix_sock,
  2570. (int)(tcp_c-&tcp_children[0]), (long)tcp_c->pid,
  2571. tcp_c->proc_no);
  2572. goto error;
  2573. }
  2574. /* read until sizeof(response)
  2575. * (this is a SOCK_STREAM so read is not atomic) */
  2576. bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT);
  2577. if (unlikely(bytes<(int)sizeof(response))){
  2578. if (bytes==0){
  2579. /* EOF -> bad, child has died */
  2580. DBG("DBG: handle_tcp_child: dead tcp child %d (pid %ld, no %d)"
  2581. " (shutting down?)\n", (int)(tcp_c-&tcp_children[0]),
  2582. (long)tcp_c->pid, tcp_c->proc_no );
  2583. /* don't listen on it any more */
  2584. io_watch_del(&io_h, tcp_c->unix_sock, fd_i, 0);
  2585. goto error; /* eof. so no more io here, it's ok to return error */
  2586. }else if (bytes<0){
  2587. /* EAGAIN is ok if we try to empty the buffer
  2588. * e.g.: SIGIO_RT overflow mode or EPOLL ET */
  2589. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2590. LOG(L_CRIT, "ERROR: handle_tcp_child: read from tcp child %ld "
  2591. " (pid %ld, no %d) %s [%d]\n",
  2592. (long)(tcp_c-&tcp_children[0]), (long)tcp_c->pid,
  2593. tcp_c->proc_no, strerror(errno), errno );
  2594. }else{
  2595. bytes=0;
  2596. }
  2597. /* try to ignore ? */
  2598. goto end;
  2599. }else{
  2600. /* should never happen */
  2601. LOG(L_CRIT, "BUG: handle_tcp_child: too few bytes received (%d)\n",
  2602. bytes );
  2603. bytes=0; /* something was read so there is no error; otoh if
  2604. receive_fd returned less then requested => the receive
  2605. buffer is empty => no more io queued on this fd */
  2606. goto end;
  2607. }
  2608. }
  2609. DBG("handle_tcp_child: reader response= %lx, %ld from %d \n",
  2610. response[0], response[1], (int)(tcp_c-&tcp_children[0]));
  2611. cmd=response[1];
  2612. tcpconn=(struct tcp_connection*)response[0];
  2613. if (unlikely(tcpconn==0)){
  2614. /* should never happen */
  2615. LOG(L_CRIT, "BUG: handle_tcp_child: null tcpconn pointer received"
  2616. " from tcp child %d (pid %ld): %lx, %lx\n",
  2617. (int)(tcp_c-&tcp_children[0]), (long)tcp_c->pid,
  2618. response[0], response[1]) ;
  2619. goto end;
  2620. }
  2621. switch(cmd){
  2622. case CONN_RELEASE:
  2623. tcp_c->busy--;
  2624. if (unlikely(tcpconn_put(tcpconn))){
  2625. tcpconn_destroy(tcpconn);
  2626. break;
  2627. }
  2628. if (unlikely(tcpconn->state==S_CONN_BAD)){
  2629. #ifdef TCP_ASYNC
  2630. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2631. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2632. tcpconn->flags &= ~F_CONN_WRITE_W;
  2633. }
  2634. #endif /* TCP_ASYNC */
  2635. if (tcpconn_try_unhash(tcpconn))
  2636. tcpconn_put_destroy(tcpconn);
  2637. break;
  2638. }
  2639. /* update the timeout*/
  2640. t=get_ticks_raw();
  2641. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2642. tcpconn->timeout=t+con_lifetime;
  2643. crt_timeout=con_lifetime;
  2644. #ifdef TCP_ASYNC
  2645. if (unlikely(cfg_get(tcp, tcp_cfg, async) &&
  2646. _wbufq_non_empty(tcpconn) )){
  2647. if (unlikely(TICKS_GE(t, tcpconn->wbuf_q.wr_timeout))){
  2648. DBG("handle_tcp_child: wr. timeout on CONN_RELEASE for %p "
  2649. "refcnt= %d\n", tcpconn,
  2650. atomic_get(&tcpconn->refcnt));
  2651. /* timeout */
  2652. if (unlikely(tcpconn->state==S_CONN_CONNECT)){
  2653. #ifdef USE_DST_BLACKLIST
  2654. dst_blacklist_su( BLST_ERR_CONNECT,
  2655. tcpconn->rcv.proto,
  2656. &tcpconn->rcv.src_su,
  2657. &tcpconn->send_flags, 0);
  2658. #endif /* USE_DST_BLACKLIST */
  2659. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(tcpconn),
  2660. TCP_LPORT(tcpconn), TCP_PSU(tcpconn),
  2661. TCP_PROTO(tcpconn));
  2662. TCP_STATS_CONNECT_FAILED();
  2663. }else{
  2664. #ifdef USE_DST_BLACKLIST
  2665. dst_blacklist_su( BLST_ERR_SEND,
  2666. tcpconn->rcv.proto,
  2667. &tcpconn->rcv.src_su,
  2668. &tcpconn->send_flags, 0);
  2669. #endif /* USE_DST_BLACKLIST */
  2670. TCP_EV_SEND_TIMEOUT(0, &tcpconn->rcv);
  2671. TCP_STATS_SEND_TIMEOUT();
  2672. }
  2673. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2674. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2675. tcpconn->flags&=~F_CONN_WRITE_W;
  2676. }
  2677. if (tcpconn_try_unhash(tcpconn))
  2678. tcpconn_put_destroy(tcpconn);
  2679. break;
  2680. }else{
  2681. crt_timeout=MIN_unsigned(con_lifetime,
  2682. tcpconn->wbuf_q.wr_timeout-t);
  2683. }
  2684. }
  2685. #endif /* TCP_ASYNC */
  2686. /* re-activate the timer */
  2687. tcpconn->timer.f=tcpconn_main_timeout;
  2688. local_timer_reinit(&tcpconn->timer);
  2689. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, crt_timeout, t);
  2690. /* must be after the de-ref*/
  2691. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  2692. tcpconn->flags&=~(F_CONN_READER|F_CONN_OOB_DATA);
  2693. #ifdef TCP_ASYNC
  2694. if (unlikely(tcpconn->flags & F_CONN_WRITE_W))
  2695. n=io_watch_chg(&io_h, tcpconn->s, POLLIN| POLLOUT, -1);
  2696. else
  2697. #endif /* TCP_ASYNC */
  2698. n=io_watch_add(&io_h, tcpconn->s, POLLIN, F_TCPCONN, tcpconn);
  2699. if (unlikely(n<0)){
  2700. LOG(L_CRIT, "ERROR: tcp_main: handle_tcp_child: failed to add"
  2701. " new socket to the fd list\n");
  2702. tcpconn->flags&=~F_CONN_READ_W;
  2703. #ifdef TCP_ASYNC
  2704. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2705. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2706. tcpconn->flags&=~F_CONN_WRITE_W;
  2707. }
  2708. #endif /* TCP_ASYNC */
  2709. if (tcpconn_try_unhash(tcpconn))
  2710. tcpconn_put_destroy(tcpconn);
  2711. break;
  2712. }
  2713. DBG("handle_tcp_child: CONN_RELEASE %p refcnt= %d\n",
  2714. tcpconn, atomic_get(&tcpconn->refcnt));
  2715. break;
  2716. case CONN_ERROR:
  2717. case CONN_DESTROY:
  2718. case CONN_EOF:
  2719. /* WARNING: this will auto-dec. refcnt! */
  2720. tcp_c->busy--;
  2721. /* main doesn't listen on it => we don't have to delete it
  2722. if (tcpconn->s!=-1)
  2723. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2724. */
  2725. #ifdef TCP_ASYNC
  2726. if ((tcpconn->flags & F_CONN_WRITE_W) && (tcpconn->s!=-1)){
  2727. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2728. tcpconn->flags&=~F_CONN_WRITE_W;
  2729. }
  2730. #endif /* TCP_ASYNC */
  2731. if (tcpconn_try_unhash(tcpconn))
  2732. tcpconn_put(tcpconn);
  2733. tcpconn_put_destroy(tcpconn); /* deref & delete if refcnt==0 */
  2734. break;
  2735. default:
  2736. LOG(L_CRIT, "BUG: handle_tcp_child: unknown cmd %d"
  2737. " from tcp reader %d\n",
  2738. cmd, (int)(tcp_c-&tcp_children[0]));
  2739. }
  2740. end:
  2741. return bytes;
  2742. error:
  2743. return -1;
  2744. }
  2745. /* handles io from a "generic" ser process (get fd or new_fd from a tcp_send)
  2746. *
  2747. * params: p - pointer in the ser processes array (pt[]), to the entry for
  2748. * which an io event was detected
  2749. * fd_i - fd index in the fd_array (usefull for optimizing
  2750. * io_watch_deletes)
  2751. * returns: handle_* return convention:
  2752. * -1 on error reading from the fd,
  2753. * 0 on EAGAIN or when no more io events are queued
  2754. * (receive buffer empty),
  2755. * >0 on successfull reads from the fd (the receive buffer might
  2756. * be non-empty).
  2757. */
  2758. inline static int handle_ser_child(struct process_table* p, int fd_i)
  2759. {
  2760. struct tcp_connection* tcpconn;
  2761. long response[2];
  2762. int cmd;
  2763. int bytes;
  2764. int ret;
  2765. int fd;
  2766. int flags;
  2767. ticks_t t;
  2768. ticks_t con_lifetime;
  2769. #ifdef TCP_ASYNC
  2770. ticks_t nxt_timeout;
  2771. #endif /* TCP_ASYNC */
  2772. ret=-1;
  2773. if (unlikely(p->unix_sock<=0)){
  2774. /* (we can't have a fd==0, 0 is never closed )*/
  2775. LOG(L_CRIT, "BUG: handle_ser_child: fd %d for %d "
  2776. "(pid %d)\n", p->unix_sock, (int)(p-&pt[0]), p->pid);
  2777. goto error;
  2778. }
  2779. /* get all bytes and the fd (if transmitted)
  2780. * (this is a SOCK_STREAM so read is not atomic) */
  2781. bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd,
  2782. MSG_DONTWAIT);
  2783. if (unlikely(bytes<(int)sizeof(response))){
  2784. /* too few bytes read */
  2785. if (bytes==0){
  2786. /* EOF -> bad, child has died */
  2787. DBG("DBG: handle_ser_child: dead child %d, pid %d"
  2788. " (shutting down?)\n", (int)(p-&pt[0]), p->pid);
  2789. /* don't listen on it any more */
  2790. io_watch_del(&io_h, p->unix_sock, fd_i, 0);
  2791. goto error; /* child dead => no further io events from it */
  2792. }else if (bytes<0){
  2793. /* EAGAIN is ok if we try to empty the buffer
  2794. * e.g: SIGIO_RT overflow mode or EPOLL ET */
  2795. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2796. LOG(L_CRIT, "ERROR: handle_ser_child: read from child %d "
  2797. "(pid %d): %s [%d]\n", (int)(p-&pt[0]), p->pid,
  2798. strerror(errno), errno);
  2799. ret=-1;
  2800. }else{
  2801. ret=0;
  2802. }
  2803. /* try to ignore ? */
  2804. goto end;
  2805. }else{
  2806. /* should never happen */
  2807. LOG(L_CRIT, "BUG: handle_ser_child: too few bytes received (%d)\n",
  2808. bytes );
  2809. ret=0; /* something was read so there is no error; otoh if
  2810. receive_fd returned less then requested => the receive
  2811. buffer is empty => no more io queued on this fd */
  2812. goto end;
  2813. }
  2814. }
  2815. ret=1; /* something was received, there might be more queued */
  2816. DBG("handle_ser_child: read response= %lx, %ld, fd %d from %d (%d)\n",
  2817. response[0], response[1], fd, (int)(p-&pt[0]), p->pid);
  2818. cmd=response[1];
  2819. tcpconn=(struct tcp_connection*)response[0];
  2820. if (unlikely(tcpconn==0)){
  2821. LOG(L_CRIT, "BUG: handle_ser_child: null tcpconn pointer received"
  2822. " from child %d (pid %d): %lx, %lx\n",
  2823. (int)(p-&pt[0]), p->pid, response[0], response[1]) ;
  2824. goto end;
  2825. }
  2826. switch(cmd){
  2827. case CONN_ERROR:
  2828. LOG(L_ERR, "handle_ser_child: ERROR: received CON_ERROR for %p"
  2829. " (id %d), refcnt %d\n",
  2830. tcpconn, tcpconn->id, atomic_get(&tcpconn->refcnt));
  2831. case CONN_EOF: /* forced EOF after full send, due to send flags */
  2832. #ifdef TCP_CONNECT_WAIT
  2833. /* if the connection is pending => it might be on the way of
  2834. * reaching tcp_main (e.g. CONN_NEW_COMPLETE or
  2835. * CONN_NEW_PENDING_WRITE) => it cannot be destroyed here */
  2836. if ( !(tcpconn->flags & F_CONN_PENDING) &&
  2837. tcpconn_try_unhash(tcpconn) )
  2838. tcpconn_put(tcpconn);
  2839. #else /* ! TCP_CONNECT_WAIT */
  2840. if ( tcpconn_try_unhash(tcpconn) )
  2841. tcpconn_put(tcpconn);
  2842. #endif /* TCP_CONNECT_WAIT */
  2843. if ( ((tcpconn->flags & (F_CONN_WRITE_W|F_CONN_READ_W)) ) &&
  2844. (tcpconn->s!=-1)){
  2845. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2846. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2847. }
  2848. tcpconn_put_destroy(tcpconn); /* dec refcnt & destroy on 0 */
  2849. break;
  2850. case CONN_GET_FD:
  2851. /* send the requested FD */
  2852. /* WARNING: take care of setting refcnt properly to
  2853. * avoid race conditions */
  2854. if (unlikely(send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn),
  2855. tcpconn->s)<=0)){
  2856. LOG(L_ERR, "ERROR: handle_ser_child: send_fd failed\n");
  2857. }
  2858. break;
  2859. case CONN_NEW:
  2860. /* update the fd in the requested tcpconn*/
  2861. /* WARNING: take care of setting refcnt properly to
  2862. * avoid race conditions */
  2863. if (unlikely(fd==-1)){
  2864. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW:"
  2865. " no fd received\n");
  2866. tcpconn->flags|=F_CONN_FD_CLOSED;
  2867. tcpconn_put_destroy(tcpconn);
  2868. break;
  2869. }
  2870. (*tcp_connections_no)++;
  2871. tcpconn->s=fd;
  2872. /* add tcpconn to the list*/
  2873. tcpconn_add(tcpconn);
  2874. /* update the timeout*/
  2875. t=get_ticks_raw();
  2876. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2877. tcpconn->timeout=t+con_lifetime;
  2878. /* activate the timer (already properly init. in tcpconn_new())
  2879. * no need for reinit */
  2880. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2881. con_lifetime, t);
  2882. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD)
  2883. #ifdef TCP_ASYNC
  2884. /* not used for now, the connection is sent to tcp_main
  2885. * before knowing whether we can write on it or we should
  2886. * wait */
  2887. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1)&
  2888. F_CONN_WRITE_W)
  2889. #endif /* TCP_ASYNC */
  2890. ;
  2891. tcpconn->flags&=~F_CONN_FD_CLOSED;
  2892. flags=POLLIN
  2893. #ifdef TCP_ASYNC
  2894. /* not used for now, the connection is sent to tcp_main
  2895. * before knowing if we can write on it or we should
  2896. * wait */
  2897. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1) & POLLOUT)
  2898. #endif /* TCP_ASYNC */
  2899. ;
  2900. if (unlikely(
  2901. io_watch_add(&io_h, tcpconn->s, flags,
  2902. F_TCPCONN, tcpconn)<0)){
  2903. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2904. " new socket to the fd list\n");
  2905. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2906. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  2907. tcpconn_put_destroy(tcpconn);
  2908. }
  2909. break;
  2910. #ifdef TCP_ASYNC
  2911. case CONN_QUEUED_WRITE:
  2912. /* received only if the wr. queue is empty and a write finishes
  2913. * with EAGAIN (common after connect())
  2914. * it should only enable write watching on the fd. The connection
  2915. * should be already in the hash. The refcnt is not changed.
  2916. */
  2917. if (unlikely((tcpconn->state==S_CONN_BAD) ||
  2918. !(tcpconn->flags & F_CONN_HASHED) ))
  2919. break;
  2920. if (!(tcpconn->flags & F_CONN_WANTS_WR)){
  2921. tcpconn->flags|=F_CONN_WANTS_WR;
  2922. t=get_ticks_raw();
  2923. if (likely((tcpconn->flags & F_CONN_MAIN_TIMER) &&
  2924. (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)) &&
  2925. TICKS_LT(t, tcpconn->wbuf_q.wr_timeout) )){
  2926. /* _wbufq_nonempty() is guaranteed here */
  2927. /* update the timer */
  2928. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2929. local_timer_reinit(&tcpconn->timer);
  2930. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2931. tcpconn->wbuf_q.wr_timeout-t, t);
  2932. DBG("tcp_main: handle_ser_child: CONN_QUEUED_WRITE; %p "
  2933. "timeout adjusted to %d s\n", tcpconn,
  2934. TICKS_TO_S(tcpconn->wbuf_q.wr_timeout-t));
  2935. }
  2936. if (!(tcpconn->flags & F_CONN_WRITE_W)){
  2937. tcpconn->flags|=F_CONN_WRITE_W;
  2938. if (!(tcpconn->flags & F_CONN_READ_W)){
  2939. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLOUT,
  2940. F_TCPCONN, tcpconn)<0)){
  2941. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2942. " failed to enable write watch on"
  2943. " socket\n");
  2944. if (tcpconn_try_unhash(tcpconn))
  2945. tcpconn_put_destroy(tcpconn);
  2946. break;
  2947. }
  2948. }else{
  2949. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  2950. POLLIN|POLLOUT, -1)<0)){
  2951. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2952. " failed to change socket watch events\n");
  2953. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2954. tcpconn->flags&=~F_CONN_READ_W;
  2955. if (tcpconn_try_unhash(tcpconn))
  2956. tcpconn_put_destroy(tcpconn);
  2957. break;
  2958. }
  2959. }
  2960. }
  2961. }else{
  2962. LOG(L_WARN, "tcp_main: handler_ser_child: connection %p"
  2963. " already watched for write\n", tcpconn);
  2964. }
  2965. break;
  2966. #ifdef TCP_CONNECT_WAIT
  2967. case CONN_NEW_COMPLETE:
  2968. case CONN_NEW_PENDING_WRITE:
  2969. /* received when a pending connect completes in the same
  2970. * tcp_send() that initiated it
  2971. * the connection is already in the hash with F_CONN_PENDING
  2972. * flag (added by tcp_send()) and refcnt at least 1 (for the
  2973. * hash)*/
  2974. tcpconn->flags&=~(F_CONN_PENDING|F_CONN_FD_CLOSED);
  2975. if (unlikely((tcpconn->state==S_CONN_BAD) || (fd==-1))){
  2976. if (unlikely(fd==-1))
  2977. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW_COMPLETE:"
  2978. " no fd received\n");
  2979. else
  2980. LOG(L_WARN, "WARNING: handle_ser_child: CONN_NEW_COMPLETE:"
  2981. " received connection with error\n");
  2982. tcpconn->flags|=F_CONN_FD_CLOSED;
  2983. tcpconn->state=S_CONN_BAD;
  2984. tcpconn_try_unhash(tcpconn);
  2985. tcpconn_put_destroy(tcpconn);
  2986. break;
  2987. }
  2988. (*tcp_connections_no)++;
  2989. tcpconn->s=fd;
  2990. /* update the timeout*/
  2991. t=get_ticks_raw();
  2992. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2993. tcpconn->timeout=t+con_lifetime;
  2994. nxt_timeout=con_lifetime;
  2995. if (unlikely(cmd==CONN_NEW_COMPLETE)){
  2996. /* check if needs to be watched for write */
  2997. lock_get(&tcpconn->write_lock);
  2998. /* if queue non empty watch it for write */
  2999. flags=(_wbufq_empty(tcpconn)-1)&POLLOUT;
  3000. lock_release(&tcpconn->write_lock);
  3001. if (flags){
  3002. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  3003. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  3004. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  3005. tcpconn->flags|=F_CONN_WRITE_W|F_CONN_WANTS_WR;
  3006. }
  3007. /* activate the timer (already properly init. in
  3008. tcpconn_new()) no need for reinit */
  3009. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  3010. t);
  3011. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  3012. F_CONN_WANTS_RD;
  3013. }else{
  3014. /* CONN_NEW_PENDING_WRITE */
  3015. /* no need to check, we have something queued for write */
  3016. flags=POLLOUT;
  3017. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  3018. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  3019. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  3020. /* activate the timer (already properly init. in
  3021. tcpconn_new()) no need for reinit */
  3022. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  3023. t);
  3024. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  3025. F_CONN_WANTS_RD |
  3026. F_CONN_WRITE_W|F_CONN_WANTS_WR;
  3027. }
  3028. flags|=POLLIN;
  3029. if (unlikely(
  3030. io_watch_add(&io_h, tcpconn->s, flags,
  3031. F_TCPCONN, tcpconn)<0)){
  3032. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  3033. " new socket to the fd list\n");
  3034. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  3035. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  3036. tcpconn_put_destroy(tcpconn);
  3037. }
  3038. break;
  3039. #endif /* TCP_CONNECT_WAIT */
  3040. #endif /* TCP_ASYNC */
  3041. default:
  3042. LOG(L_CRIT, "BUG: handle_ser_child: unknown cmd %d\n", cmd);
  3043. }
  3044. end:
  3045. return ret;
  3046. error:
  3047. return -1;
  3048. }
  3049. /* sends a tcpconn + fd to a choosen child */
  3050. inline static int send2child(struct tcp_connection* tcpconn)
  3051. {
  3052. int i;
  3053. int min_busy;
  3054. int idx;
  3055. static int crt=0; /* current child */
  3056. int last;
  3057. min_busy=tcp_children[0].busy;
  3058. idx=0;
  3059. last=crt+tcp_children_no;
  3060. for (; crt<last; crt++){
  3061. i=crt%tcp_children_no;
  3062. if (!tcp_children[i].busy){
  3063. idx=i;
  3064. min_busy=0;
  3065. break;
  3066. }else if (min_busy>tcp_children[i].busy){
  3067. min_busy=tcp_children[i].busy;
  3068. idx=i;
  3069. }
  3070. }
  3071. crt=idx+1; /* next time we start with crt%tcp_children_no */
  3072. tcp_children[idx].busy++;
  3073. tcp_children[idx].n_reqs++;
  3074. if (unlikely(min_busy)){
  3075. DBG("WARNING: send2child: no free tcp receiver, "
  3076. " connection passed to the least busy one (%d)\n",
  3077. min_busy);
  3078. }
  3079. DBG("send2child: to tcp child %d %d(%ld), %p\n", idx,
  3080. tcp_children[idx].proc_no,
  3081. (long)tcp_children[idx].pid, tcpconn);
  3082. /* first make sure this child doesn't have pending request for
  3083. * tcp_main (to avoid a possible deadlock: e.g. child wants to
  3084. * send a release command, but the master fills its socket buffer
  3085. * with new connection commands => deadlock) */
  3086. /* answer tcp_send requests first */
  3087. while(handle_ser_child(&pt[tcp_children[idx].proc_no], -1)>0);
  3088. /* process tcp readers requests */
  3089. while(handle_tcp_child(&tcp_children[idx], -1)>0);
  3090. #ifdef SEND_FD_QUEUE
  3091. /* if queue full, try to queue the io */
  3092. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  3093. sizeof(tcpconn), tcpconn->s)<=0)){
  3094. if ((errno==EAGAIN)||(errno==EWOULDBLOCK)){
  3095. /* FIXME: remove after debugging */
  3096. LOG(L_CRIT, "INFO: tcp child %d, socket %d: queue full,"
  3097. " %d requests queued (total handled %d)\n",
  3098. idx, tcp_children[idx].unix_sock, min_busy,
  3099. tcp_children[idx].n_reqs-1);
  3100. if (send_fd_queue_add(&send2child_q, tcp_children[idx].unix_sock,
  3101. tcpconn)!=0){
  3102. LOG(L_ERR, "ERROR: send2child: queue send op. failed\n");
  3103. return -1;
  3104. }
  3105. }else{
  3106. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  3107. return -1;
  3108. }
  3109. }
  3110. #else
  3111. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  3112. sizeof(tcpconn), tcpconn->s)<=0)){
  3113. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  3114. return -1;
  3115. }
  3116. #endif
  3117. return 0;
  3118. }
  3119. /* handles a new connection, called internally by tcp_main_loop/handle_io.
  3120. * params: si - pointer to one of the tcp socket_info structures on which
  3121. * an io event was detected (connection attempt)
  3122. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  3123. * io events queued), >0 on success. success/error refer only to
  3124. * the accept.
  3125. */
  3126. static inline int handle_new_connect(struct socket_info* si)
  3127. {
  3128. union sockaddr_union su;
  3129. union sockaddr_union sock_name;
  3130. unsigned sock_name_len;
  3131. union sockaddr_union* dst_su;
  3132. struct tcp_connection* tcpconn;
  3133. socklen_t su_len;
  3134. int new_sock;
  3135. /* got a connection on r */
  3136. su_len=sizeof(su);
  3137. new_sock=accept(si->socket, &(su.s), &su_len);
  3138. if (unlikely(new_sock==-1)){
  3139. if ((errno==EAGAIN)||(errno==EWOULDBLOCK))
  3140. return 0;
  3141. LOG(L_ERR, "WARNING: handle_new_connect: error while accepting"
  3142. " connection(%d): %s\n", errno, strerror(errno));
  3143. return -1;
  3144. }
  3145. if (unlikely(*tcp_connections_no>=cfg_get(tcp, tcp_cfg, max_connections))){
  3146. LOG(L_ERR, "ERROR: maximum number of connections exceeded: %d/%d\n",
  3147. *tcp_connections_no,
  3148. cfg_get(tcp, tcp_cfg, max_connections));
  3149. close(new_sock);
  3150. TCP_STATS_LOCAL_REJECT();
  3151. return 1; /* success, because the accept was succesfull */
  3152. }
  3153. if (unlikely(init_sock_opt_accept(new_sock)<0)){
  3154. LOG(L_ERR, "ERROR: handle_new_connect: init_sock_opt failed\n");
  3155. close(new_sock);
  3156. return 1; /* success, because the accept was succesfull */
  3157. }
  3158. (*tcp_connections_no)++;
  3159. TCP_STATS_ESTABLISHED(S_CONN_ACCEPT);
  3160. dst_su=&si->su;
  3161. if (unlikely(si->flags & SI_IS_ANY)){
  3162. /* INADDR_ANY => get local dst */
  3163. sock_name_len=sizeof(sock_name);
  3164. if (getsockname(new_sock, &sock_name.s, &sock_name_len)!=0){
  3165. LOG(L_ERR, "ERROR: handle_new_connect:"
  3166. " getsockname failed: %s(%d)\n",
  3167. strerror(errno), errno);
  3168. /* go on with the 0.0.0.0 dst from the sock_info */
  3169. }else{
  3170. dst_su=&sock_name;
  3171. }
  3172. }
  3173. /* add socket to list */
  3174. tcpconn=tcpconn_new(new_sock, &su, dst_su, si, si->proto, S_CONN_ACCEPT);
  3175. if (likely(tcpconn)){
  3176. tcpconn->flags|=F_CONN_PASSIVE;
  3177. #ifdef TCP_PASS_NEW_CONNECTION_ON_DATA
  3178. atomic_set(&tcpconn->refcnt, 1); /* safe, not yet available to the
  3179. outside world */
  3180. tcpconn_add(tcpconn);
  3181. /* activate the timer */
  3182. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  3183. cfg_get(tcp, tcp_cfg, con_lifetime),
  3184. get_ticks_raw());
  3185. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  3186. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLIN,
  3187. F_TCPCONN, tcpconn)<0)){
  3188. LOG(L_CRIT, "ERROR: tcp_main: handle_new_connect: failed to add"
  3189. " new socket to the fd list\n");
  3190. tcpconn->flags&=~F_CONN_READ_W;
  3191. if (tcpconn_try_unhash(tcpconn))
  3192. tcpconn_put_destroy(tcpconn);
  3193. }
  3194. #else
  3195. atomic_set(&tcpconn->refcnt, 2); /* safe, not yet available to the
  3196. outside world */
  3197. /* prepare it for passing to a child */
  3198. tcpconn->flags|=F_CONN_READER;
  3199. tcpconn_add(tcpconn);
  3200. DBG("handle_new_connect: new connection from %s: %p %d flags: %04x\n",
  3201. su2a(&su, sizeof(su)), tcpconn, tcpconn->s, tcpconn->flags);
  3202. if(unlikely(send2child(tcpconn)<0)){
  3203. LOG(L_ERR,"ERROR: handle_new_connect: no children "
  3204. "available\n");
  3205. tcpconn->flags&=~F_CONN_READER;
  3206. tcpconn_put(tcpconn);
  3207. tcpconn_try_unhash(tcpconn);
  3208. tcpconn_put_destroy(tcpconn);
  3209. }
  3210. #endif
  3211. }else{ /*tcpconn==0 */
  3212. LOG(L_ERR, "ERROR: handle_new_connect: tcpconn_new failed, "
  3213. "closing socket\n");
  3214. close(new_sock);
  3215. (*tcp_connections_no)--;
  3216. }
  3217. return 1; /* accept() was succesfull */
  3218. }
  3219. /* handles an io event on one of the watched tcp connections
  3220. *
  3221. * params: tcpconn - pointer to the tcp_connection for which we have an io ev.
  3222. * fd_i - index in the fd_array table (needed for delete)
  3223. * returns: handle_* return convention, but on success it always returns 0
  3224. * (because it's one-shot, after a succesful execution the fd is
  3225. * removed from tcp_main's watch fd list and passed to a child =>
  3226. * tcp_main is not interested in further io events that might be
  3227. * queued for this fd)
  3228. */
  3229. inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, short ev,
  3230. int fd_i)
  3231. {
  3232. #ifdef TCP_ASYNC
  3233. int empty_q;
  3234. int bytes;
  3235. #endif /* TCP_ASYNC */
  3236. /* is refcnt!=0 really necessary?
  3237. * No, in fact it's a bug: I can have the following situation: a send only
  3238. * tcp connection used by n processes simultaneously => refcnt = n. In
  3239. * the same time I can have a read event and this situation is perfectly
  3240. * valid. -- andrei
  3241. */
  3242. #if 0
  3243. if ((tcpconn->refcnt!=0)){
  3244. /* FIXME: might be valid for sigio_rt iff fd flags are not cleared
  3245. * (there is a short window in which it could generate a sig
  3246. * that would be catched by tcp_main) */
  3247. LOG(L_CRIT, "BUG: handle_tcpconn_ev: io event on referenced"
  3248. " tcpconn (%p), refcnt=%d, fd=%d\n",
  3249. tcpconn, tcpconn->refcnt, tcpconn->s);
  3250. return -1;
  3251. }
  3252. #endif
  3253. /* pass it to child, so remove it from the io watch list and the local
  3254. * timer */
  3255. #ifdef TCP_ASYNC
  3256. empty_q=0; /* warning fix */
  3257. if (unlikely((ev & (POLLOUT|POLLERR|POLLHUP)) &&
  3258. (tcpconn->flags & F_CONN_WRITE_W))){
  3259. if (unlikely((ev & (POLLERR|POLLHUP)) ||
  3260. (wbufq_run(tcpconn->s, tcpconn, &empty_q)<0) ||
  3261. (empty_q && tcpconn_close_after_send(tcpconn))
  3262. )){
  3263. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3264. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(1) failed:"
  3265. " for %p, fd %d\n", tcpconn, tcpconn->s);
  3266. }
  3267. if ((tcpconn->flags & F_CONN_READ_W) && (ev & POLLIN)){
  3268. /* connection is watched for read and there is a read event
  3269. * (unfortunately if we have POLLIN here we don't know if
  3270. * there's really any data in the read buffer or the POLLIN
  3271. * was generated by the error or EOF => to avoid loosing
  3272. * data it's safer to either directly check the read buffer
  3273. * or try a read)*/
  3274. /* in most cases the read buffer will be empty, so in general
  3275. * is cheaper to check it here and then send the
  3276. * conn. to a a child only if needed (another syscall + at
  3277. * least 2 * syscalls in the reader + ...) */
  3278. if ((ioctl(tcpconn->s, FIONREAD, &bytes)>=0) && (bytes>0)){
  3279. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3280. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3281. tcpconn->flags|=F_CONN_FORCE_EOF|F_CONN_WR_ERROR;
  3282. goto send_to_child;
  3283. }
  3284. /* if bytes==0 or ioctl failed, destroy the connection now */
  3285. }
  3286. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3287. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3288. if (unlikely(ev & POLLERR)){
  3289. if (unlikely(tcpconn->state==S_CONN_CONNECT)){
  3290. #ifdef USE_DST_BLACKLIST
  3291. dst_blacklist_su(BLST_ERR_CONNECT, tcpconn->rcv.proto,
  3292. &tcpconn->rcv.src_su,
  3293. &tcpconn->send_flags, 0);
  3294. #endif /* USE_DST_BLACKLIST */
  3295. TCP_EV_CONNECT_ERR(0, TCP_LADDR(tcpconn),
  3296. TCP_LPORT(tcpconn), TCP_PSU(tcpconn),
  3297. TCP_PROTO(tcpconn));
  3298. TCP_STATS_CONNECT_FAILED();
  3299. }else{
  3300. #ifdef USE_DST_BLACKLIST
  3301. dst_blacklist_su(BLST_ERR_SEND, tcpconn->rcv.proto,
  3302. &tcpconn->rcv.src_su,
  3303. &tcpconn->send_flags, 0);
  3304. #endif /* USE_DST_BLACKLIST */
  3305. TCP_STATS_CON_RESET(); /* FIXME: it could != RST */
  3306. }
  3307. }
  3308. if (unlikely(!tcpconn_try_unhash(tcpconn))){
  3309. LOG(L_CRIT, "BUG: tcpconn_ev: unhashed connection %p\n",
  3310. tcpconn);
  3311. }
  3312. tcpconn_put_destroy(tcpconn);
  3313. goto error;
  3314. }
  3315. if (empty_q){
  3316. tcpconn->flags&=~F_CONN_WANTS_WR;
  3317. if (!(tcpconn->flags & F_CONN_READ_W)){
  3318. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3319. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(2)"
  3320. " failed:" " for %p, fd %d\n",
  3321. tcpconn, tcpconn->s);
  3322. goto error;
  3323. }
  3324. }else{
  3325. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  3326. POLLIN, fd_i)==-1)){
  3327. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(1)"
  3328. " failed:" " for %p, fd %d\n",
  3329. tcpconn, tcpconn->s);
  3330. goto error;
  3331. }
  3332. }
  3333. tcpconn->flags&=~F_CONN_WRITE_W;
  3334. }
  3335. ev&=~POLLOUT; /* clear POLLOUT */
  3336. }
  3337. if (likely(ev && (tcpconn->flags & F_CONN_READ_W))){
  3338. /* if still some other IO event (POLLIN|POLLHUP|POLLERR) and
  3339. * connection is still watched in tcp_main for reads, send it to a
  3340. * child and stop watching it for input (but continue watching for
  3341. * writes if needed): */
  3342. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  3343. if (unlikely(io_watch_chg(&io_h, tcpconn->s, POLLOUT, fd_i)==-1)){
  3344. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(2)"
  3345. " failed:" " for %p, fd %d\n",
  3346. tcpconn, tcpconn->s);
  3347. goto error;
  3348. }
  3349. }else
  3350. #else
  3351. {
  3352. #endif /* TCP_ASYNC */
  3353. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3354. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(3)"
  3355. " failed:" " for %p, fd %d\n",
  3356. tcpconn, tcpconn->s);
  3357. goto error;
  3358. }
  3359. #ifdef TCP_ASYNC
  3360. send_to_child:
  3361. #endif
  3362. DBG("tcp: DBG: sendig to child, events %x\n", ev);
  3363. #ifdef POLLRDHUP
  3364. tcpconn->flags|=((int)!(ev & (POLLRDHUP|POLLHUP|POLLERR)) -1) &
  3365. F_CONN_EOF_SEEN;
  3366. #else /* POLLRDHUP */
  3367. tcpconn->flags|=((int)!(ev & (POLLHUP|POLLERR)) -1) & F_CONN_EOF_SEEN;
  3368. #endif /* POLLRDHUP */
  3369. tcpconn->flags|= ((int)!(ev & POLLPRI) -1) & F_CONN_OOB_DATA;
  3370. tcpconn->flags|=F_CONN_READER;
  3371. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  3372. tcpconn->flags&=~(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  3373. tcpconn_ref(tcpconn); /* refcnt ++ */
  3374. if (unlikely(send2child(tcpconn)<0)){
  3375. LOG(L_ERR,"ERROR: handle_tcpconn_ev: no children available\n");
  3376. tcpconn->flags&=~F_CONN_READER;
  3377. #ifdef TCP_ASYNC
  3378. if (tcpconn->flags & F_CONN_WRITE_W){
  3379. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3380. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(4)"
  3381. " failed:" " for %p, fd %d\n",
  3382. tcpconn, tcpconn->s);
  3383. }
  3384. tcpconn->flags&=~F_CONN_WRITE_W;
  3385. }
  3386. #endif /* TCP_ASYNC */
  3387. tcpconn_put(tcpconn);
  3388. tcpconn_try_unhash(tcpconn);
  3389. tcpconn_put_destroy(tcpconn); /* because of the tcpconn_ref() */
  3390. }
  3391. }
  3392. return 0; /* we are not interested in possibly queued io events,
  3393. the fd was either passed to a child, closed, or for writes,
  3394. everything possible was already written */
  3395. error:
  3396. return -1;
  3397. }
  3398. /* generic handle io routine, it will call the appropiate
  3399. * handle_xxx() based on the fd_map type
  3400. *
  3401. * params: fm - pointer to a fd hash entry
  3402. * idx - index in the fd_array (or -1 if not known)
  3403. * return: -1 on error
  3404. * 0 on EAGAIN or when by some other way it is known that no more
  3405. * io events are queued on the fd (the receive buffer is empty).
  3406. * Usefull to detect when there are no more io events queued for
  3407. * sigio_rt, epoll_et, kqueue.
  3408. * >0 on successfull read from the fd (when there might be more io
  3409. * queued -- the receive buffer might still be non-empty)
  3410. */
  3411. inline static int handle_io(struct fd_map* fm, short ev, int idx)
  3412. {
  3413. int ret;
  3414. /* update the local config */
  3415. cfg_update();
  3416. switch(fm->type){
  3417. case F_SOCKINFO:
  3418. ret=handle_new_connect((struct socket_info*)fm->data);
  3419. break;
  3420. case F_TCPCONN:
  3421. ret=handle_tcpconn_ev((struct tcp_connection*)fm->data, ev, idx);
  3422. break;
  3423. case F_TCPCHILD:
  3424. ret=handle_tcp_child((struct tcp_child*)fm->data, idx);
  3425. break;
  3426. case F_PROC:
  3427. ret=handle_ser_child((struct process_table*)fm->data, idx);
  3428. break;
  3429. case F_NONE:
  3430. LOG(L_CRIT, "BUG: handle_io: empty fd map: %p {%d, %d, %p},"
  3431. " idx %d\n", fm, fm->fd, fm->type, fm->data, idx);
  3432. goto error;
  3433. default:
  3434. LOG(L_CRIT, "BUG: handle_io: uknown fd type %d\n", fm->type);
  3435. goto error;
  3436. }
  3437. return ret;
  3438. error:
  3439. return -1;
  3440. }
  3441. /* timer handler for tcpconnection handled by tcp_main */
  3442. static ticks_t tcpconn_main_timeout(ticks_t t, struct timer_ln* tl, void* data)
  3443. {
  3444. struct tcp_connection *c;
  3445. int fd;
  3446. int tcp_async;
  3447. c=(struct tcp_connection*)data;
  3448. /* or (struct tcp...*)(tl-offset(c->timer)) */
  3449. #ifdef TCP_ASYNC
  3450. DBG( "tcp_main: entering timer for %p (ticks=%d, timeout=%d (%d s), "
  3451. "wr_timeout=%d (%d s)), write queue: %d bytes\n",
  3452. c, t, c->timeout, TICKS_TO_S(c->timeout-t),
  3453. c->wbuf_q.wr_timeout, TICKS_TO_S(c->wbuf_q.wr_timeout-t),
  3454. c->wbuf_q.queued);
  3455. tcp_async=cfg_get(tcp, tcp_cfg, async);
  3456. if (likely(TICKS_LT(t, c->timeout) && ( !tcp_async | _wbufq_empty(c) |
  3457. TICKS_LT(t, c->wbuf_q.wr_timeout)) )){
  3458. if (unlikely(tcp_async && _wbufq_non_empty(c)))
  3459. return (ticks_t)MIN_unsigned(c->timeout-t, c->wbuf_q.wr_timeout-t);
  3460. else
  3461. return (ticks_t)(c->timeout - t);
  3462. }
  3463. /* if time out due to write, add it to the blacklist */
  3464. if (tcp_async && _wbufq_non_empty(c) && TICKS_GE(t, c->wbuf_q.wr_timeout)){
  3465. if (unlikely(c->state==S_CONN_CONNECT)){
  3466. #ifdef USE_DST_BLACKLIST
  3467. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto, &c->rcv.src_su,
  3468. &c->send_flags, 0);
  3469. #endif /* USE_DST_BLACKLIST */
  3470. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(c), TCP_LPORT(c), TCP_PSU(c),
  3471. TCP_PROTO(c));
  3472. TCP_STATS_CONNECT_FAILED();
  3473. }else{
  3474. #ifdef USE_DST_BLACKLIST
  3475. dst_blacklist_su(BLST_ERR_SEND, c->rcv.proto, &c->rcv.src_su,
  3476. &c->send_flags, 0);
  3477. #endif /* USE_DST_BLACKLIST */
  3478. TCP_EV_SEND_TIMEOUT(0, &c->rcv);
  3479. TCP_STATS_SEND_TIMEOUT();
  3480. }
  3481. }else{
  3482. /* idle timeout */
  3483. TCP_EV_IDLE_CONN_CLOSED(0, &c->rcv);
  3484. TCP_STATS_CON_TIMEOUT();
  3485. }
  3486. #else /* ! TCP_ASYNC */
  3487. if (TICKS_LT(t, c->timeout)){
  3488. /* timeout extended, exit */
  3489. return (ticks_t)(c->timeout - t);
  3490. }
  3491. /* idle timeout */
  3492. TCP_EV_IDLE_CONN_CLOSED(0, &c->rcv);
  3493. TCP_STATS_CON_TIMEOUT();
  3494. #endif /* TCP_ASYNC */
  3495. DBG("tcp_main: timeout for %p\n", c);
  3496. if (likely(c->flags & F_CONN_HASHED)){
  3497. c->flags&=~(F_CONN_HASHED|F_CONN_MAIN_TIMER);
  3498. c->state=S_CONN_BAD;
  3499. TCPCONN_LOCK;
  3500. _tcpconn_detach(c);
  3501. TCPCONN_UNLOCK;
  3502. }else{
  3503. c->flags&=~F_CONN_MAIN_TIMER;
  3504. LOG(L_CRIT, "BUG: tcp_main: timer: called with unhashed connection %p"
  3505. "\n", c);
  3506. tcpconn_ref(c); /* ugly hack to try to go on */
  3507. }
  3508. fd=c->s;
  3509. if (likely(fd>0)){
  3510. if (likely(c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3511. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3512. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3513. }
  3514. }
  3515. tcpconn_put_destroy(c);
  3516. return 0;
  3517. }
  3518. static inline void tcp_timer_run()
  3519. {
  3520. ticks_t ticks;
  3521. ticks=get_ticks_raw();
  3522. if (unlikely((ticks-tcp_main_prev_ticks)<TCPCONN_TIMEOUT_MIN_RUN)) return;
  3523. tcp_main_prev_ticks=ticks;
  3524. local_timer_run(&tcp_main_ltimer, ticks);
  3525. }
  3526. /* keep in sync with tcpconn_destroy, the "delete" part should be
  3527. * the same except for io_watch_del..
  3528. * Note: this function is called only on shutdown by the main ser process via
  3529. * cleanup(). However it's also safe to call it from the tcp_main process.
  3530. * => with the ser shutdown exception, it cannot execute in parallel
  3531. * with tcpconn_add() or tcpconn_destroy()*/
  3532. static inline void tcpconn_destroy_all()
  3533. {
  3534. struct tcp_connection *c, *next;
  3535. unsigned h;
  3536. int fd;
  3537. TCPCONN_LOCK;
  3538. for(h=0; h<TCP_ID_HASH_SIZE; h++){
  3539. c=tcpconn_id_hash[h];
  3540. while(c){
  3541. next=c->id_next;
  3542. if (is_tcp_main){
  3543. /* we cannot close or remove the fd if we are not in the
  3544. * tcp main proc.*/
  3545. if ((c->flags & F_CONN_MAIN_TIMER)){
  3546. local_timer_del(&tcp_main_ltimer, &c->timer);
  3547. c->flags&=~F_CONN_MAIN_TIMER;
  3548. } /* else still in some reader */
  3549. fd=c->s;
  3550. if (fd>0 && (c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3551. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3552. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3553. }
  3554. }else{
  3555. fd=-1;
  3556. }
  3557. #ifdef USE_TLS
  3558. if (fd>0 && c->type==PROTO_TLS)
  3559. tls_close(c, fd);
  3560. #endif
  3561. _tcpconn_rm(c);
  3562. if (fd>0) {
  3563. #ifdef TCP_FD_CACHE
  3564. if (likely(cfg_get(tcp, tcp_cfg, fd_cache)))
  3565. shutdown(fd, SHUT_RDWR);
  3566. #endif /* TCP_FD_CACHE */
  3567. close(fd);
  3568. }
  3569. (*tcp_connections_no)--;
  3570. c=next;
  3571. }
  3572. }
  3573. TCPCONN_UNLOCK;
  3574. }
  3575. /* tcp main loop */
  3576. void tcp_main_loop()
  3577. {
  3578. struct socket_info* si;
  3579. int r;
  3580. is_tcp_main=1; /* mark this process as tcp main */
  3581. tcp_main_max_fd_no=get_max_open_fds();
  3582. /* init send fd queues (here because we want mem. alloc only in the tcp
  3583. * process */
  3584. #ifdef SEND_FD_QUEUE
  3585. if (init_send_fd_queues()<0){
  3586. LOG(L_CRIT, "ERROR: init_tcp: could not init send fd queues\n");
  3587. goto error;
  3588. }
  3589. #endif
  3590. /* init io_wait (here because we want the memory allocated only in
  3591. * the tcp_main process) */
  3592. if (init_io_wait(&io_h, tcp_main_max_fd_no, tcp_poll_method)<0)
  3593. goto error;
  3594. /* init: start watching all the fds*/
  3595. /* init local timer */
  3596. tcp_main_prev_ticks=get_ticks_raw();
  3597. if (init_local_timer(&tcp_main_ltimer, get_ticks_raw())!=0){
  3598. LOG(L_ERR, "ERROR: init_tcp: failed to init local timer\n");
  3599. goto error;
  3600. }
  3601. #ifdef TCP_FD_CACHE
  3602. if (cfg_get(tcp, tcp_cfg, fd_cache)) tcp_fd_cache_init();
  3603. #endif /* TCP_FD_CACHE */
  3604. /* add all the sockets we listen on for connections */
  3605. for (si=tcp_listen; si; si=si->next){
  3606. if ((si->proto==PROTO_TCP) &&(si->socket!=-1)){
  3607. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3608. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3609. "listen socket to the fd list\n");
  3610. goto error;
  3611. }
  3612. }else{
  3613. LOG(L_CRIT, "BUG: tcp_main_loop: non tcp address in tcp_listen\n");
  3614. }
  3615. }
  3616. #ifdef USE_TLS
  3617. if (!tls_disable && tls_loaded()){
  3618. for (si=tls_listen; si; si=si->next){
  3619. if ((si->proto==PROTO_TLS) && (si->socket!=-1)){
  3620. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3621. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3622. "tls listen socket to the fd list\n");
  3623. goto error;
  3624. }
  3625. }else{
  3626. LOG(L_CRIT, "BUG: tcp_main_loop: non tls address"
  3627. " in tls_listen\n");
  3628. }
  3629. }
  3630. }
  3631. #endif
  3632. /* add all the unix sockets used for communcation with other ser processes
  3633. * (get fd, new connection a.s.o) */
  3634. for (r=1; r<process_no; r++){
  3635. if (pt[r].unix_sock>0) /* we can't have 0, we never close it!*/
  3636. if (io_watch_add(&io_h, pt[r].unix_sock, POLLIN,F_PROC, &pt[r])<0){
  3637. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3638. "process %d unix socket to the fd list\n", r);
  3639. goto error;
  3640. }
  3641. }
  3642. /* add all the unix sokets used for communication with the tcp childs */
  3643. for (r=0; r<tcp_children_no; r++){
  3644. if (tcp_children[r].unix_sock>0)/*we can't have 0, we never close it!*/
  3645. if (io_watch_add(&io_h, tcp_children[r].unix_sock, POLLIN,
  3646. F_TCPCHILD, &tcp_children[r]) <0){
  3647. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3648. "tcp child %d unix socket to the fd list\n", r);
  3649. goto error;
  3650. }
  3651. }
  3652. /* initialize the cfg framework */
  3653. if (cfg_child_init()) goto error;
  3654. /* main loop */
  3655. switch(io_h.poll_method){
  3656. case POLL_POLL:
  3657. while(1){
  3658. /* wait and process IO */
  3659. io_wait_loop_poll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3660. send_fd_queue_run(&send2child_q); /* then new io */
  3661. /* remove old connections */
  3662. tcp_timer_run();
  3663. }
  3664. break;
  3665. #ifdef HAVE_SELECT
  3666. case POLL_SELECT:
  3667. while(1){
  3668. io_wait_loop_select(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3669. send_fd_queue_run(&send2child_q); /* then new io */
  3670. tcp_timer_run();
  3671. }
  3672. break;
  3673. #endif
  3674. #ifdef HAVE_SIGIO_RT
  3675. case POLL_SIGIO_RT:
  3676. while(1){
  3677. io_wait_loop_sigio_rt(&io_h, TCP_MAIN_SELECT_TIMEOUT);
  3678. send_fd_queue_run(&send2child_q); /* then new io */
  3679. tcp_timer_run();
  3680. }
  3681. break;
  3682. #endif
  3683. #ifdef HAVE_EPOLL
  3684. case POLL_EPOLL_LT:
  3685. while(1){
  3686. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3687. send_fd_queue_run(&send2child_q); /* then new io */
  3688. tcp_timer_run();
  3689. }
  3690. break;
  3691. case POLL_EPOLL_ET:
  3692. while(1){
  3693. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 1);
  3694. send_fd_queue_run(&send2child_q); /* then new io */
  3695. tcp_timer_run();
  3696. }
  3697. break;
  3698. #endif
  3699. #ifdef HAVE_KQUEUE
  3700. case POLL_KQUEUE:
  3701. while(1){
  3702. io_wait_loop_kqueue(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3703. send_fd_queue_run(&send2child_q); /* then new io */
  3704. tcp_timer_run();
  3705. }
  3706. break;
  3707. #endif
  3708. #ifdef HAVE_DEVPOLL
  3709. case POLL_DEVPOLL:
  3710. while(1){
  3711. io_wait_loop_devpoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3712. send_fd_queue_run(&send2child_q); /* then new io */
  3713. tcp_timer_run();
  3714. }
  3715. break;
  3716. #endif
  3717. default:
  3718. LOG(L_CRIT, "BUG: tcp_main_loop: no support for poll method "
  3719. " %s (%d)\n",
  3720. poll_method_name(io_h.poll_method), io_h.poll_method);
  3721. goto error;
  3722. }
  3723. error:
  3724. #ifdef SEND_FD_QUEUE
  3725. destroy_send_fd_queues();
  3726. #endif
  3727. destroy_io_wait(&io_h);
  3728. LOG(L_CRIT, "ERROR: tcp_main_loop: exiting...");
  3729. exit(-1);
  3730. }
  3731. /* cleanup before exit */
  3732. void destroy_tcp()
  3733. {
  3734. if (tcpconn_id_hash){
  3735. if (tcpconn_lock)
  3736. TCPCONN_UNLOCK; /* hack: force-unlock the tcp lock in case
  3737. some process was terminated while holding
  3738. it; this will allow an almost gracious
  3739. shutdown */
  3740. tcpconn_destroy_all();
  3741. shm_free(tcpconn_id_hash);
  3742. tcpconn_id_hash=0;
  3743. }
  3744. DESTROY_TCP_STATS();
  3745. if (tcp_connections_no){
  3746. shm_free(tcp_connections_no);
  3747. tcp_connections_no=0;
  3748. }
  3749. #ifdef TCP_ASYNC
  3750. if (tcp_total_wq){
  3751. shm_free(tcp_total_wq);
  3752. tcp_total_wq=0;
  3753. }
  3754. #endif /* TCP_ASYNC */
  3755. if (connection_id){
  3756. shm_free(connection_id);
  3757. connection_id=0;
  3758. }
  3759. if (tcpconn_aliases_hash){
  3760. shm_free(tcpconn_aliases_hash);
  3761. tcpconn_aliases_hash=0;
  3762. }
  3763. if (tcpconn_lock){
  3764. lock_destroy(tcpconn_lock);
  3765. lock_dealloc((void*)tcpconn_lock);
  3766. tcpconn_lock=0;
  3767. }
  3768. if (tcp_children){
  3769. pkg_free(tcp_children);
  3770. tcp_children=0;
  3771. }
  3772. destroy_local_timer(&tcp_main_ltimer);
  3773. }
  3774. int init_tcp()
  3775. {
  3776. char* poll_err;
  3777. tcp_options_check();
  3778. if (tcp_cfg==0){
  3779. BUG("tcp_cfg not initialized\n");
  3780. goto error;
  3781. }
  3782. /* init lock */
  3783. tcpconn_lock=lock_alloc();
  3784. if (tcpconn_lock==0){
  3785. LOG(L_CRIT, "ERROR: init_tcp: could not alloc lock\n");
  3786. goto error;
  3787. }
  3788. if (lock_init(tcpconn_lock)==0){
  3789. LOG(L_CRIT, "ERROR: init_tcp: could not init lock\n");
  3790. lock_dealloc((void*)tcpconn_lock);
  3791. tcpconn_lock=0;
  3792. goto error;
  3793. }
  3794. /* init globals */
  3795. tcp_connections_no=shm_malloc(sizeof(int));
  3796. if (tcp_connections_no==0){
  3797. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3798. goto error;
  3799. }
  3800. *tcp_connections_no=0;
  3801. if (INIT_TCP_STATS()!=0) goto error;
  3802. connection_id=shm_malloc(sizeof(int));
  3803. if (connection_id==0){
  3804. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3805. goto error;
  3806. }
  3807. *connection_id=1;
  3808. #ifdef TCP_ASYNC
  3809. tcp_total_wq=shm_malloc(sizeof(*tcp_total_wq));
  3810. if (tcp_total_wq==0){
  3811. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3812. goto error;
  3813. }
  3814. #endif /* TCP_ASYNC */
  3815. /* alloc hashtables*/
  3816. tcpconn_aliases_hash=(struct tcp_conn_alias**)
  3817. shm_malloc(TCP_ALIAS_HASH_SIZE* sizeof(struct tcp_conn_alias*));
  3818. if (tcpconn_aliases_hash==0){
  3819. LOG(L_CRIT, "ERROR: init_tcp: could not alloc address hashtable\n");
  3820. goto error;
  3821. }
  3822. tcpconn_id_hash=(struct tcp_connection**)shm_malloc(TCP_ID_HASH_SIZE*
  3823. sizeof(struct tcp_connection*));
  3824. if (tcpconn_id_hash==0){
  3825. LOG(L_CRIT, "ERROR: init_tcp: could not alloc id hashtable\n");
  3826. goto error;
  3827. }
  3828. /* init hashtables*/
  3829. memset((void*)tcpconn_aliases_hash, 0,
  3830. TCP_ALIAS_HASH_SIZE * sizeof(struct tcp_conn_alias*));
  3831. memset((void*)tcpconn_id_hash, 0,
  3832. TCP_ID_HASH_SIZE * sizeof(struct tcp_connection*));
  3833. /* fix config variables */
  3834. poll_err=check_poll_method(tcp_poll_method);
  3835. /* set an appropriate poll method */
  3836. if (poll_err || (tcp_poll_method==0)){
  3837. tcp_poll_method=choose_poll_method();
  3838. if (poll_err){
  3839. LOG(L_ERR, "ERROR: init_tcp: %s, using %s instead\n",
  3840. poll_err, poll_method_name(tcp_poll_method));
  3841. }else{
  3842. LOG(L_INFO, "init_tcp: using %s as the io watch method"
  3843. " (auto detected)\n", poll_method_name(tcp_poll_method));
  3844. }
  3845. }else{
  3846. LOG(L_INFO, "init_tcp: using %s io watch method (config)\n",
  3847. poll_method_name(tcp_poll_method));
  3848. }
  3849. return 0;
  3850. error:
  3851. /* clean-up */
  3852. destroy_tcp();
  3853. return -1;
  3854. }
  3855. #ifdef TCP_CHILD_NON_BLOCKING
  3856. /* returns -1 on error */
  3857. static int set_non_blocking(int s)
  3858. {
  3859. int flags;
  3860. /* non-blocking */
  3861. flags=fcntl(s, F_GETFL);
  3862. if (flags==-1){
  3863. LOG(L_ERR, "ERROR: set_non_blocking: fnctl failed: (%d) %s\n",
  3864. errno, strerror(errno));
  3865. goto error;
  3866. }
  3867. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  3868. LOG(L_ERR, "ERROR: set_non_blocking: fcntl: set non-blocking failed:"
  3869. " (%d) %s\n", errno, strerror(errno));
  3870. goto error;
  3871. }
  3872. return 0;
  3873. error:
  3874. return -1;
  3875. }
  3876. #endif
  3877. /* returns -1 on error, 0 on success */
  3878. int tcp_fix_child_sockets(int* fd)
  3879. {
  3880. #ifdef TCP_CHILD_NON_BLOCKING
  3881. if ((set_non_blocking(fd[0])<0) ||
  3882. (set_non_blocking(fd[1])<0)){
  3883. return -1;
  3884. }
  3885. #endif
  3886. return 0;
  3887. }
  3888. /* starts the tcp processes */
  3889. int tcp_init_children()
  3890. {
  3891. int r;
  3892. int reader_fd_1; /* for comm. with the tcp children read */
  3893. pid_t pid;
  3894. struct socket_info *si;
  3895. /* estimate max fd. no:
  3896. * 1 tcp send unix socket/all_proc,
  3897. * + 1 udp sock/udp proc + 1 tcp_child sock/tcp child*
  3898. * + no_listen_tcp */
  3899. for(r=0, si=tcp_listen; si; si=si->next, r++);
  3900. #ifdef USE_TLS
  3901. if (! tls_disable)
  3902. for (si=tls_listen; si; si=si->next, r++);
  3903. #endif
  3904. register_fds(r+tcp_max_connections+get_max_procs()-1 /* tcp main */);
  3905. #if 0
  3906. tcp_max_fd_no=get_max_procs()*2 +r-1 /* timer */ +3; /* stdin/out/err*/
  3907. /* max connections can be temporarily exceeded with estimated_process_count
  3908. * - tcp_main (tcpconn_connect called simultaneously in all all the
  3909. * processes) */
  3910. tcp_max_fd_no+=tcp_max_connections+get_max_procs()-1 /* tcp main */;
  3911. #endif
  3912. /* alloc the children array */
  3913. tcp_children=pkg_malloc(sizeof(struct tcp_child)*tcp_children_no);
  3914. if (tcp_children==0){
  3915. LOG(L_ERR, "ERROR: tcp_init_children: out of memory\n");
  3916. goto error;
  3917. }
  3918. /* create the tcp sock_info structures */
  3919. /* copy the sockets --moved to main_loop*/
  3920. /* fork children & create the socket pairs*/
  3921. for(r=0; r<tcp_children_no; r++){
  3922. child_rank++;
  3923. pid=fork_tcp_process(child_rank, "tcp receiver", r, &reader_fd_1);
  3924. if (pid<0){
  3925. LOG(L_ERR, "ERROR: tcp_main: fork failed: %s\n",
  3926. strerror(errno));
  3927. goto error;
  3928. }else if (pid>0){
  3929. /* parent */
  3930. }else{
  3931. /* child */
  3932. bind_address=0; /* force a SEGFAULT if someone uses a non-init.
  3933. bind address on tcp */
  3934. tcp_receive_loop(reader_fd_1);
  3935. }
  3936. }
  3937. return 0;
  3938. error:
  3939. return -1;
  3940. }
  3941. void tcp_get_info(struct tcp_gen_info *ti)
  3942. {
  3943. ti->tcp_readers=tcp_children_no;
  3944. ti->tcp_max_connections=tcp_max_connections;
  3945. ti->tcp_connections_no=*tcp_connections_no;
  3946. #ifdef TCP_ASYNC
  3947. ti->tcp_write_queued=*tcp_total_wq;
  3948. #else
  3949. ti->tcp_write_queued=0;
  3950. #endif /* TCP_ASYNC */
  3951. }
  3952. #endif