tcp_main.c 123 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213
  1. /*
  2. * $Id$
  3. *
  4. * Copyright (C) 2001-2003 FhG Fokus
  5. *
  6. * This file is part of ser, a free SIP server.
  7. *
  8. * ser is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version
  12. *
  13. * For a license to use the ser software under conditions
  14. * other than those described here, or to purchase support for this
  15. * software, please contact iptel.org by e-mail at the following addresses:
  16. * [email protected]
  17. *
  18. * ser is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. /*
  28. * History:
  29. * --------
  30. * 2002-11-29 created by andrei
  31. * 2002-12-11 added tcp_send (andrei)
  32. * 2003-01-20 locking fixes, hashtables (andrei)
  33. * 2003-02-20 s/lock_t/gen_lock_t/ to avoid a conflict on solaris (andrei)
  34. * 2003-02-25 Nagle is disabled if -DDISABLE_NAGLE (andrei)
  35. * 2003-03-29 SO_REUSEADDR before calling bind to allow
  36. * server restart, Nagle set on the (hopefuly)
  37. * correct socket (jiri)
  38. * 2003-03-31 always try to find the corresponding tcp listen socket for
  39. * a temp. socket and store in in *->bind_address: added
  40. * find_tcp_si, modified tcpconn_connect (andrei)
  41. * 2003-04-14 set sockopts to TOS low delay (andrei)
  42. * 2003-06-30 moved tcp new connect checking & handling to
  43. * handle_new_connect (andrei)
  44. * 2003-07-09 tls_close called before closing the tcp connection (andrei)
  45. * 2003-10-24 converted to the new socket_info lists (andrei)
  46. * 2003-10-27 tcp port aliases support added (andrei)
  47. * 2003-11-04 always lock before manipulating refcnt; sendchild
  48. * does not inc refcnt by itself anymore (andrei)
  49. * 2003-11-07 different unix sockets are used for fd passing
  50. * to/from readers/writers (andrei)
  51. * 2003-11-17 handle_new_connect & tcp_connect will close the
  52. * new socket if tcpconn_new return 0 (e.g. out of mem) (andrei)
  53. * 2003-11-28 tcp_blocking_write & tcp_blocking_connect added (andrei)
  54. * 2004-11-08 dropped find_tcp_si and replaced with find_si (andrei)
  55. * 2005-06-07 new tcp optimized code, supports epoll (LT), sigio + real time
  56. * signals, poll & select (andrei)
  57. * 2005-06-26 *bsd kqueue support (andrei)
  58. * 2005-07-04 solaris /dev/poll support (andrei)
  59. * 2005-07-08 tcp_max_connections, tcp_connection_lifetime, don't accept
  60. * more connections if tcp_max_connections is exceeded (andrei)
  61. * 2005-10-21 cleanup all the open connections on exit
  62. * decrement the no. of open connections on timeout too (andrei) * 2006-01-30 queue send_fd request and execute them at the end of the
  63. * poll loop (#ifdef) (andrei)
  64. * process all children requests, before attempting to send
  65. * them new stuff (fixes some deadlocks) (andrei)
  66. * 2006-02-03 timers are run only once per s (andrei)
  67. * tcp children fds can be non-blocking; send fds are queued on
  68. * EAGAIN; lots of bug fixes (andrei)
  69. * 2006-02-06 better tcp_max_connections checks, tcp_connections_no moved to
  70. * shm (andrei)
  71. * 2006-04-12 tcp_send() changed to use struct dest_info (andrei)
  72. * 2006-11-02 switched to atomic ops for refcnt, locking improvements
  73. * (andrei)
  74. * 2006-11-04 switched to raw ticks (to fix conversion errors which could
  75. * result in inf. lifetime) (andrei)
  76. * 2007-07-25 tcpconn_connect can now bind the socket on a specified
  77. * source addr/port (andrei)
  78. * 2007-07-26 tcp_send() and tcpconn_get() can now use a specified source
  79. * addr./port (andrei)
  80. * 2007-08-23 getsockname() for INADDR_ANY(SI_IS_ANY) sockets (andrei)
  81. * 2007-08-27 split init_sock_opt into a lightweight init_sock_opt_accept()
  82. * used when accepting connections and init_sock_opt used for
  83. * connect/ new sockets (andrei)
  84. * 2007-11-22 always add the connection & clear the coresponding flags before
  85. * io_watch_add-ing its fd - it's safer this way (andrei)
  86. * 2007-11-26 improved tcp timers: switched to local_timer (andrei)
  87. * 2007-11-27 added send fd cache and reader fd reuse (andrei)
  88. * 2007-11-28 added support for TCP_DEFER_ACCEPT, KEEPALIVE, KEEPINTVL,
  89. * KEEPCNT, QUICKACK, SYNCNT, LINGER2 (andrei)
  90. * 2007-12-04 support for queueing write requests (andrei)
  91. * 2007-12-12 destroy connection asap on wbuf. timeout (andrei)
  92. * 2007-12-13 changed the refcnt and destroy scheme, now refcnt is 1 if
  93. * linked into the hash tables (was 0) (andrei)
  94. * 2007-12-21 support for pending connects (connections are added to the
  95. * hash immediately and writes on them are buffered) (andrei)
  96. * 2008-02-05 handle POLLRDHUP (if supported), POLLERR and
  97. * POLLHUP (andrei)
  98. * on write error check if there's still data in the socket
  99. * read buffer and process it first (andrei)
  100. * 2009-02-26 direct blacklist support (andrei)
  101. * 2009-03-20 s/wq_timeout/send_timeout ; send_timeout is now in ticks
  102. * (andrei)
  103. * 2009-04-09 tcp ev and tcp stats macros added (andrei)
  104. */
  105. #ifdef USE_TCP
  106. #ifndef SHM_MEM
  107. #error "shared memory support needed (add -DSHM_MEM to Makefile.defs)"
  108. #endif
  109. #define HANDLE_IO_INLINE
  110. #include "io_wait.h" /* include first to make sure the needed features are
  111. turned on (e.g. _GNU_SOURCE for POLLRDHUP) */
  112. #include <sys/time.h>
  113. #include <sys/types.h>
  114. #include <sys/select.h>
  115. #include <sys/socket.h>
  116. #ifdef HAVE_FILIO_H
  117. #include <sys/filio.h> /* needed on solaris 2.x for FIONREAD */
  118. #elif defined __OS_solaris
  119. #define BSD_COMP /* needed on older solaris for FIONREAD */
  120. #endif /* HAVE_FILIO_H / __OS_solaris */
  121. #include <sys/ioctl.h> /* ioctl() used on write error */
  122. #include <netinet/in.h>
  123. #include <netinet/in_systm.h>
  124. #include <netinet/ip.h>
  125. #include <netinet/tcp.h>
  126. #include <sys/uio.h> /* writev*/
  127. #include <netdb.h>
  128. #include <stdlib.h> /*exit() */
  129. #include <unistd.h>
  130. #include <errno.h>
  131. #include <string.h>
  132. #ifdef HAVE_SELECT
  133. #include <sys/select.h>
  134. #endif
  135. #include <sys/poll.h>
  136. #include "ip_addr.h"
  137. #include "pass_fd.h"
  138. #include "tcp_conn.h"
  139. #include "globals.h"
  140. #include "pt.h"
  141. #include "locking.h"
  142. #include "mem/mem.h"
  143. #include "mem/shm_mem.h"
  144. #include "timer.h"
  145. #include "sr_module.h"
  146. #include "tcp_server.h"
  147. #include "tcp_init.h"
  148. #include "tcp_stats.h"
  149. #include "tcp_ev.h"
  150. #include "tsend.h"
  151. #include "timer_ticks.h"
  152. #include "local_timer.h"
  153. #ifdef CORE_TLS
  154. #include "tls/tls_server.h"
  155. #define tls_loaded() 1
  156. #else
  157. #include "tls_hooks_init.h"
  158. #include "tls_hooks.h"
  159. #endif /* CORE_TLS*/
  160. #ifdef USE_DST_BLACKLIST
  161. #include "dst_blacklist.h"
  162. #endif /* USE_DST_BLACKLIST */
  163. #include "tcp_info.h"
  164. #include "tcp_options.h"
  165. #include "ut.h"
  166. #include "cfg/cfg_struct.h"
  167. #define local_malloc pkg_malloc
  168. #define local_free pkg_free
  169. #include <fcntl.h> /* must be included after io_wait.h if SIGIO_RT is used */
  170. #ifdef NO_MSG_DONTWAIT
  171. #ifndef MSG_DONTWAIT
  172. /* should work inside tcp_main */
  173. #define MSG_DONTWAIT 0
  174. #endif
  175. #endif /*NO_MSG_DONTWAIT */
  176. #define TCP_PASS_NEW_CONNECTION_ON_DATA /* don't pass a new connection
  177. immediately to a child, wait for
  178. some data on it first */
  179. #define TCP_LISTEN_BACKLOG 1024
  180. #define SEND_FD_QUEUE /* queue send fd requests on EAGAIN, instead of sending
  181. them immediately */
  182. #define TCP_CHILD_NON_BLOCKING
  183. #ifdef SEND_FD_QUEUE
  184. #ifndef TCP_CHILD_NON_BLOCKING
  185. #define TCP_CHILD_NON_BLOCKING
  186. #endif
  187. #define MAX_SEND_FD_QUEUE_SIZE tcp_main_max_fd_no
  188. #define SEND_FD_QUEUE_SIZE 128 /* initial size */
  189. #define MAX_SEND_FD_RETRIES 96 /* FIXME: not used for now */
  190. #define SEND_FD_QUEUE_TIMEOUT MS_TO_TICKS(2000) /* 2 s */
  191. #endif
  192. /* minimum interval local_timer_run() is allowed to run, in ticks */
  193. #define TCPCONN_TIMEOUT_MIN_RUN 1 /* once per tick */
  194. #define TCPCONN_WAIT_TIMEOUT 1 /* 1 tick */
  195. #ifdef TCP_ASYNC
  196. static unsigned int* tcp_total_wq=0;
  197. #endif
  198. enum fd_types { F_NONE, F_SOCKINFO /* a tcp_listen fd */,
  199. F_TCPCONN, F_TCPCHILD, F_PROC };
  200. #ifdef TCP_FD_CACHE
  201. #define TCP_FD_CACHE_SIZE 8
  202. struct fd_cache_entry{
  203. struct tcp_connection* con;
  204. int id;
  205. int fd;
  206. };
  207. static struct fd_cache_entry fd_cache[TCP_FD_CACHE_SIZE];
  208. #endif /* TCP_FD_CACHE */
  209. static int is_tcp_main=0;
  210. enum poll_types tcp_poll_method=0; /* by default choose the best method */
  211. int tcp_main_max_fd_no=0;
  212. int tcp_max_connections=DEFAULT_TCP_MAX_CONNECTIONS;
  213. static union sockaddr_union tcp_source_ipv4_addr; /* saved bind/srv v4 addr. */
  214. static union sockaddr_union* tcp_source_ipv4=0;
  215. #ifdef USE_IPV6
  216. static union sockaddr_union tcp_source_ipv6_addr; /* saved bind/src v6 addr. */
  217. static union sockaddr_union* tcp_source_ipv6=0;
  218. #endif
  219. static int* tcp_connections_no=0; /* current open connections */
  220. /* connection hash table (after ip&port) , includes also aliases */
  221. struct tcp_conn_alias** tcpconn_aliases_hash=0;
  222. /* connection hash table (after connection id) */
  223. struct tcp_connection** tcpconn_id_hash=0;
  224. gen_lock_t* tcpconn_lock=0;
  225. struct tcp_child* tcp_children;
  226. static int* connection_id=0; /* unique for each connection, used for
  227. quickly finding the corresponding connection
  228. for a reply */
  229. int unix_tcp_sock;
  230. static int tcp_proto_no=-1; /* tcp protocol number as returned by
  231. getprotobyname */
  232. static io_wait_h io_h;
  233. static struct local_timer tcp_main_ltimer;
  234. static ticks_t tcp_main_prev_ticks;
  235. static ticks_t tcpconn_main_timeout(ticks_t , struct timer_ln* , void* );
  236. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  237. struct ip_addr* l_ip, int l_port,
  238. int flags);
  239. /* sets source address used when opening new sockets and no source is specified
  240. * (by default the address is choosen by the kernel)
  241. * Should be used only on init.
  242. * returns -1 on error */
  243. int tcp_set_src_addr(struct ip_addr* ip)
  244. {
  245. switch (ip->af){
  246. case AF_INET:
  247. ip_addr2su(&tcp_source_ipv4_addr, ip, 0);
  248. tcp_source_ipv4=&tcp_source_ipv4_addr;
  249. break;
  250. #ifdef USE_IPV6
  251. case AF_INET6:
  252. ip_addr2su(&tcp_source_ipv6_addr, ip, 0);
  253. tcp_source_ipv6=&tcp_source_ipv6_addr;
  254. break;
  255. #endif
  256. default:
  257. return -1;
  258. }
  259. return 0;
  260. }
  261. static inline int init_sock_keepalive(int s)
  262. {
  263. int optval;
  264. #ifdef HAVE_SO_KEEPALIVE
  265. if (cfg_get(tcp, tcp_cfg, keepalive)){
  266. optval=1;
  267. if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, &optval,
  268. sizeof(optval))<0){
  269. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to enable"
  270. " SO_KEEPALIVE: %s\n", strerror(errno));
  271. return -1;
  272. }
  273. }
  274. #endif
  275. #ifdef HAVE_TCP_KEEPINTVL
  276. if ((optval=cfg_get(tcp, tcp_cfg, keepintvl))){
  277. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPINTVL, &optval,
  278. sizeof(optval))<0){
  279. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  280. " keepalive probes interval: %s\n", strerror(errno));
  281. }
  282. }
  283. #endif
  284. #ifdef HAVE_TCP_KEEPIDLE
  285. if ((optval=cfg_get(tcp, tcp_cfg, keepidle))){
  286. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPIDLE, &optval,
  287. sizeof(optval))<0){
  288. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  289. " keepalive idle interval: %s\n", strerror(errno));
  290. }
  291. }
  292. #endif
  293. #ifdef HAVE_TCP_KEEPCNT
  294. if ((optval=cfg_get(tcp, tcp_cfg, keepcnt))){
  295. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPCNT, &optval,
  296. sizeof(optval))<0){
  297. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  298. " maximum keepalive count: %s\n", strerror(errno));
  299. }
  300. }
  301. #endif
  302. return 0;
  303. }
  304. /* set all socket/fd options for new sockets (e.g. before connect):
  305. * disable nagle, tos lowdelay, reuseaddr, non-blocking
  306. *
  307. * return -1 on error */
  308. static int init_sock_opt(int s)
  309. {
  310. int flags;
  311. int optval;
  312. #ifdef DISABLE_NAGLE
  313. flags=1;
  314. if ( (tcp_proto_no!=-1) && (setsockopt(s, tcp_proto_no , TCP_NODELAY,
  315. &flags, sizeof(flags))<0) ){
  316. LOG(L_WARN, "WARNING: init_sock_opt: could not disable Nagle: %s\n",
  317. strerror(errno));
  318. }
  319. #endif
  320. /* tos*/
  321. optval = tos;
  322. if (setsockopt(s, IPPROTO_IP, IP_TOS, (void*)&optval,sizeof(optval)) ==-1){
  323. LOG(L_WARN, "WARNING: init_sock_opt: setsockopt tos: %s\n",
  324. strerror(errno));
  325. /* continue since this is not critical */
  326. }
  327. #if !defined(TCP_DONT_REUSEADDR)
  328. optval=1;
  329. if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,
  330. (void*)&optval, sizeof(optval))==-1){
  331. LOG(L_ERR, "ERROR: setsockopt SO_REUSEADDR %s\n",
  332. strerror(errno));
  333. /* continue, not critical */
  334. }
  335. #endif /* !TCP_DONT_REUSEADDR */
  336. #ifdef HAVE_TCP_SYNCNT
  337. if ((optval=cfg_get(tcp, tcp_cfg, syncnt))){
  338. if (setsockopt(s, IPPROTO_TCP, TCP_SYNCNT, &optval,
  339. sizeof(optval))<0){
  340. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  341. " maximum SYN retr. count: %s\n", strerror(errno));
  342. }
  343. }
  344. #endif
  345. #ifdef HAVE_TCP_LINGER2
  346. if ((optval=cfg_get(tcp, tcp_cfg, linger2))){
  347. if (setsockopt(s, IPPROTO_TCP, TCP_LINGER2, &optval,
  348. sizeof(optval))<0){
  349. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  350. " maximum LINGER2 timeout: %s\n", strerror(errno));
  351. }
  352. }
  353. #endif
  354. #ifdef HAVE_TCP_QUICKACK
  355. if (cfg_get(tcp, tcp_cfg, delayed_ack)){
  356. optval=0; /* reset quick ack => delayed ack */
  357. if (setsockopt(s, IPPROTO_TCP, TCP_QUICKACK, &optval,
  358. sizeof(optval))<0){
  359. LOG(L_WARN, "WARNING: init_sock_opt: failed to reset"
  360. " TCP_QUICKACK: %s\n", strerror(errno));
  361. }
  362. }
  363. #endif /* HAVE_TCP_QUICKACK */
  364. init_sock_keepalive(s);
  365. /* non-blocking */
  366. flags=fcntl(s, F_GETFL);
  367. if (flags==-1){
  368. LOG(L_ERR, "ERROR: init_sock_opt: fnctl failed: (%d) %s\n",
  369. errno, strerror(errno));
  370. goto error;
  371. }
  372. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  373. LOG(L_ERR, "ERROR: init_sock_opt: fcntl: set non-blocking failed:"
  374. " (%d) %s\n", errno, strerror(errno));
  375. goto error;
  376. }
  377. return 0;
  378. error:
  379. return -1;
  380. }
  381. /* set all socket/fd options for "accepted" sockets
  382. * only nonblocking is set since the rest is inherited from the
  383. * "parent" (listening) socket
  384. * Note: setting O_NONBLOCK is required on linux but it's not needed on
  385. * BSD and possibly solaris (where the flag is inherited from the
  386. * parent socket). However since there is no standard document
  387. * requiring a specific behaviour in this case it's safer to always set
  388. * it (at least for now) --andrei
  389. * TODO: check on which OSes O_NONBLOCK is inherited and make this
  390. * function a nop.
  391. *
  392. * return -1 on error */
  393. static int init_sock_opt_accept(int s)
  394. {
  395. int flags;
  396. /* non-blocking */
  397. flags=fcntl(s, F_GETFL);
  398. if (flags==-1){
  399. LOG(L_ERR, "ERROR: init_sock_opt_accept: fnctl failed: (%d) %s\n",
  400. errno, strerror(errno));
  401. goto error;
  402. }
  403. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  404. LOG(L_ERR, "ERROR: init_sock_opt_accept: "
  405. "fcntl: set non-blocking failed: (%d) %s\n",
  406. errno, strerror(errno));
  407. goto error;
  408. }
  409. return 0;
  410. error:
  411. return -1;
  412. }
  413. /* blocking connect on a non-blocking fd; it will timeout after
  414. * tcp_connect_timeout
  415. * if BLOCKING_USE_SELECT and HAVE_SELECT are defined it will internally
  416. * use select() instead of poll (bad if fd > FD_SET_SIZE, poll is preferred)
  417. */
  418. static int tcp_blocking_connect(int fd, int type,
  419. const struct sockaddr *servaddr,
  420. socklen_t addrlen)
  421. {
  422. int n;
  423. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  424. fd_set sel_set;
  425. fd_set orig_set;
  426. struct timeval timeout;
  427. #else
  428. struct pollfd pf;
  429. #endif
  430. int elapsed;
  431. int to;
  432. int ticks;
  433. int err;
  434. unsigned int err_len;
  435. int poll_err;
  436. poll_err=0;
  437. to=cfg_get(tcp, tcp_cfg, connect_timeout_s);
  438. ticks=get_ticks();
  439. again:
  440. n=connect(fd, servaddr, addrlen);
  441. if (n==-1){
  442. if (errno==EINTR){
  443. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  444. if (elapsed<to) goto again;
  445. else goto error_timeout;
  446. }
  447. if (errno!=EINPROGRESS && errno!=EALREADY){
  448. goto error_errno;
  449. }
  450. }else goto end;
  451. /* poll/select loop */
  452. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  453. FD_ZERO(&orig_set);
  454. FD_SET(fd, &orig_set);
  455. #else
  456. pf.fd=fd;
  457. pf.events=POLLOUT;
  458. #endif
  459. while(1){
  460. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  461. if (elapsed>=to)
  462. goto error_timeout;
  463. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  464. sel_set=orig_set;
  465. timeout.tv_sec=to-elapsed;
  466. timeout.tv_usec=0;
  467. n=select(fd+1, 0, &sel_set, 0, &timeout);
  468. #else
  469. n=poll(&pf, 1, (to-elapsed)*1000);
  470. #endif
  471. if (n<0){
  472. if (errno==EINTR) continue;
  473. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll/select failed:"
  474. " (%d) %s\n",
  475. su2a((union sockaddr_union*)servaddr, addrlen),
  476. errno, strerror(errno));
  477. goto error;
  478. }else if (n==0) /* timeout */ continue;
  479. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  480. if (FD_ISSET(fd, &sel_set))
  481. #else
  482. if (pf.revents&(POLLERR|POLLHUP|POLLNVAL)){
  483. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll error: "
  484. "flags %x\n",
  485. su2a((union sockaddr_union*)servaddr, addrlen),
  486. pf.revents);
  487. poll_err=1;
  488. }
  489. #endif
  490. {
  491. err_len=sizeof(err);
  492. getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &err_len);
  493. if ((err==0) && (poll_err==0)) goto end;
  494. if (err!=EINPROGRESS && err!=EALREADY){
  495. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: SO_ERROR (%d) "
  496. "%s\n",
  497. su2a((union sockaddr_union*)servaddr, addrlen),
  498. err, strerror(err));
  499. errno=err;
  500. goto error_errno;
  501. }
  502. }
  503. }
  504. error_errno:
  505. switch(errno){
  506. case ENETUNREACH:
  507. case EHOSTUNREACH:
  508. #ifdef USE_DST_BLACKLIST
  509. if (cfg_get(core, core_cfg, use_dst_blacklist))
  510. dst_blacklist_su(BLST_ERR_CONNECT, type,
  511. (union sockaddr_union*)servaddr, 0);
  512. #endif /* USE_DST_BLACKLIST */
  513. TCP_EV_CONNECT_UNREACHABLE(errno, 0, 0,
  514. (union sockaddr_union*)servaddr, type);
  515. break;
  516. case ETIMEDOUT:
  517. #ifdef USE_DST_BLACKLIST
  518. if (cfg_get(core, core_cfg, use_dst_blacklist))
  519. dst_blacklist_su(BLST_ERR_CONNECT, type,
  520. (union sockaddr_union*)servaddr, 0);
  521. #endif /* USE_DST_BLACKLIST */
  522. TCP_EV_CONNECT_TIMEOUT(errno, 0, 0,
  523. (union sockaddr_union*)servaddr, type);
  524. break;
  525. case ECONNREFUSED:
  526. case ECONNRESET:
  527. #ifdef USE_DST_BLACKLIST
  528. if (cfg_get(core, core_cfg, use_dst_blacklist))
  529. dst_blacklist_su(BLST_ERR_CONNECT, type,
  530. (union sockaddr_union*)servaddr, 0);
  531. #endif /* USE_DST_BLACKLIST */
  532. TCP_EV_CONNECT_RST(errno, 0, 0,
  533. (union sockaddr_union*)servaddr, type);
  534. break;
  535. case EAGAIN: /* not posix, but supported on linux and bsd */
  536. TCP_EV_CONNECT_NO_MORE_PORTS(errno, 0, 0,
  537. (union sockaddr_union*)servaddr, type);
  538. break;
  539. default:
  540. TCP_EV_CONNECT_ERR(errno, 0, 0,
  541. (union sockaddr_union*)servaddr, type);
  542. }
  543. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: (%d) %s\n",
  544. su2a((union sockaddr_union*)servaddr, addrlen),
  545. errno, strerror(errno));
  546. goto error;
  547. error_timeout:
  548. /* timeout */
  549. #ifdef USE_DST_BLACKLIST
  550. if (cfg_get(core, core_cfg, use_dst_blacklist))
  551. dst_blacklist_su(BLST_ERR_CONNECT, type,
  552. (union sockaddr_union*)servaddr, 0);
  553. #endif /* USE_DST_BLACKLIST */
  554. TCP_EV_CONNECT_TIMEOUT(0, 0, 0, (union sockaddr_union*)servaddr, type);
  555. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: timeout %d s elapsed "
  556. "from %d s\n", su2a((union sockaddr_union*)servaddr, addrlen),
  557. elapsed, cfg_get(tcp, tcp_cfg, connect_timeout_s));
  558. error:
  559. TCP_STATS_CONNECT_FAILED();
  560. return -1;
  561. end:
  562. return 0;
  563. }
  564. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  565. char* buf, int len);
  566. #ifdef TCP_ASYNC
  567. /* unsafe version */
  568. #define _wbufq_empty(con) ((con)->wbuf_q.first==0)
  569. /* unsafe version */
  570. #define _wbufq_non_empty(con) ((con)->wbuf_q.first!=0)
  571. /* unsafe version, call while holding the connection write lock */
  572. inline static int _wbufq_add(struct tcp_connection* c, char* data,
  573. unsigned int size)
  574. {
  575. struct tcp_wbuffer_queue* q;
  576. struct tcp_wbuffer* wb;
  577. unsigned int last_free;
  578. unsigned int wb_size;
  579. unsigned int crt_size;
  580. ticks_t t;
  581. q=&c->wbuf_q;
  582. t=get_ticks_raw();
  583. if (unlikely( ((q->queued+size)>cfg_get(tcp, tcp_cfg, tcpconn_wq_max)) ||
  584. ((*tcp_total_wq+size)>cfg_get(tcp, tcp_cfg, tcp_wq_max)) ||
  585. (q->first &&
  586. TICKS_LT(q->wr_timeout, t)) )){
  587. LOG(L_ERR, "ERROR: wbufq_add(%d bytes): write queue full or timeout "
  588. " (%d, total %d, last write %d s ago)\n",
  589. size, q->queued, *tcp_total_wq,
  590. TICKS_TO_S(t-q->wr_timeout-
  591. cfg_get(tcp, tcp_cfg, send_timeout)));
  592. if (q->first && TICKS_LT(q->wr_timeout, t)){
  593. if (unlikely(c->state==S_CONN_CONNECT)){
  594. #ifdef USE_DST_BLACKLIST
  595. if (likely(cfg_get(core, core_cfg, use_dst_blacklist))){
  596. DBG("blacklisting, state=%d\n", c->state);
  597. dst_blacklist_su( BLST_ERR_CONNECT, c->rcv.proto,
  598. &c->rcv.src_su, 0);
  599. }
  600. #endif /* USE_DST_BLACKLIST */
  601. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(c), TCP_LPORT(c),
  602. TCP_PSU(c), TCP_PROTO(c));
  603. TCP_STATS_CONNECT_FAILED();
  604. }else{
  605. #ifdef USE_DST_BLACKLIST
  606. if (likely(cfg_get(core, core_cfg, use_dst_blacklist))){
  607. DBG("blacklisting, state=%d\n", c->state);
  608. dst_blacklist_su( BLST_ERR_SEND, c->rcv.proto,
  609. &c->rcv.src_su, 0);
  610. }
  611. #endif /* USE_DST_BLACKLIST */
  612. TCP_EV_SEND_TIMEOUT(0, &c->rcv);
  613. TCP_STATS_SEND_TIMEOUT();
  614. }
  615. }else{
  616. /* if it's not a timeout => queue full */
  617. TCP_EV_SENDQ_FULL(0, &c->rcv);
  618. TCP_STATS_SENDQ_FULL();
  619. }
  620. goto error;
  621. }
  622. if (unlikely(q->last==0)){
  623. wb_size=MAX_unsigned(cfg_get(tcp, tcp_cfg, wq_blk_size), size);
  624. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  625. if (unlikely(wb==0))
  626. goto error;
  627. wb->b_size=wb_size;
  628. wb->next=0;
  629. q->last=wb;
  630. q->first=wb;
  631. q->last_used=0;
  632. q->offset=0;
  633. q->wr_timeout=get_ticks_raw()+
  634. ((c->state==S_CONN_CONNECT)?
  635. S_TO_TICKS(cfg_get(tcp, tcp_cfg, connect_timeout_s)):
  636. cfg_get(tcp, tcp_cfg, send_timeout));
  637. }else{
  638. wb=q->last;
  639. }
  640. while(size){
  641. last_free=wb->b_size-q->last_used;
  642. if (last_free==0){
  643. wb_size=MAX_unsigned(cfg_get(tcp, tcp_cfg, wq_blk_size), size);
  644. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  645. if (unlikely(wb==0))
  646. goto error;
  647. wb->b_size=wb_size;
  648. wb->next=0;
  649. q->last->next=wb;
  650. q->last=wb;
  651. q->last_used=0;
  652. last_free=wb->b_size;
  653. }
  654. crt_size=MIN_unsigned(last_free, size);
  655. memcpy(wb->buf+q->last_used, data, crt_size);
  656. q->last_used+=crt_size;
  657. size-=crt_size;
  658. data+=crt_size;
  659. q->queued+=crt_size;
  660. atomic_add_int((int*)tcp_total_wq, crt_size);
  661. }
  662. return 0;
  663. error:
  664. return -1;
  665. }
  666. /* unsafe version, call while holding the connection write lock
  667. * inserts data at the beginning, it ignores the max queue size checks and
  668. * the timeout (use sparingly)
  669. * Note: it should never be called on a write buffer after wbufq_run() */
  670. inline static int _wbufq_insert(struct tcp_connection* c, char* data,
  671. unsigned int size)
  672. {
  673. struct tcp_wbuffer_queue* q;
  674. struct tcp_wbuffer* wb;
  675. q=&c->wbuf_q;
  676. if (likely(q->first==0)) /* if empty, use wbufq_add */
  677. return _wbufq_add(c, data, size);
  678. if (unlikely((*tcp_total_wq+size)>cfg_get(tcp, tcp_cfg, tcp_wq_max))){
  679. LOG(L_ERR, "ERROR: wbufq_insert(%d bytes): write queue full"
  680. " (%d, total %d, last write %d s ago)\n",
  681. size, q->queued, *tcp_total_wq,
  682. TICKS_TO_S(get_ticks_raw()-q->wr_timeout-
  683. cfg_get(tcp, tcp_cfg, send_timeout)));
  684. goto error;
  685. }
  686. if (unlikely(q->offset)){
  687. LOG(L_CRIT, "BUG: wbufq_insert: non-null offset %d (bad call, should"
  688. "never be called after the wbufq_run())\n", q->offset);
  689. goto error;
  690. }
  691. if ((q->first==q->last) && ((q->last->b_size-q->last_used)>=size)){
  692. /* one block with enough space in it for size bytes */
  693. memmove(q->first->buf+size, q->first->buf, size);
  694. memcpy(q->first->buf, data, size);
  695. q->last_used+=size;
  696. }else{
  697. /* create a size bytes block directly */
  698. wb=shm_malloc(sizeof(*wb)+size-1);
  699. if (unlikely(wb==0))
  700. goto error;
  701. wb->b_size=size;
  702. /* insert it */
  703. wb->next=q->first;
  704. q->first=wb;
  705. memcpy(wb->buf, data, size);
  706. }
  707. q->queued+=size;
  708. atomic_add_int((int*)tcp_total_wq, size);
  709. return 0;
  710. error:
  711. return -1;
  712. }
  713. /* unsafe version, call while holding the connection write lock */
  714. inline static void _wbufq_destroy( struct tcp_wbuffer_queue* q)
  715. {
  716. struct tcp_wbuffer* wb;
  717. struct tcp_wbuffer* next_wb;
  718. int unqueued;
  719. unqueued=0;
  720. if (likely(q->first)){
  721. wb=q->first;
  722. do{
  723. next_wb=wb->next;
  724. unqueued+=(wb==q->last)?q->last_used:wb->b_size;
  725. if (wb==q->first)
  726. unqueued-=q->offset;
  727. shm_free(wb);
  728. wb=next_wb;
  729. }while(wb);
  730. }
  731. memset(q, 0, sizeof(*q));
  732. atomic_add_int((int*)tcp_total_wq, -unqueued);
  733. }
  734. /* tries to empty the queue (safe version, c->write_lock must not be hold)
  735. * returns -1 on error, bytes written on success (>=0)
  736. * if the whole queue is emptied => sets *empty*/
  737. inline static int wbufq_run(int fd, struct tcp_connection* c, int* empty)
  738. {
  739. struct tcp_wbuffer_queue* q;
  740. struct tcp_wbuffer* wb;
  741. int n;
  742. int ret;
  743. int block_size;
  744. char* buf;
  745. *empty=0;
  746. ret=0;
  747. lock_get(&c->write_lock);
  748. q=&c->wbuf_q;
  749. while(q->first){
  750. block_size=((q->first==q->last)?q->last_used:q->first->b_size)-
  751. q->offset;
  752. buf=q->first->buf+q->offset;
  753. n=_tcpconn_write_nb(fd, c, buf, block_size);
  754. if (likely(n>0)){
  755. ret+=n;
  756. if (likely(n==block_size)){
  757. wb=q->first;
  758. q->first=q->first->next;
  759. shm_free(wb);
  760. q->offset=0;
  761. q->queued-=block_size;
  762. atomic_add_int((int*)tcp_total_wq, -block_size);
  763. }else{
  764. q->offset+=n;
  765. q->queued-=n;
  766. atomic_add_int((int*)tcp_total_wq, -n);
  767. break;
  768. }
  769. }else{
  770. if (n<0){
  771. /* EINTR is handled inside _tcpconn_write_nb */
  772. if (!(errno==EAGAIN || errno==EWOULDBLOCK)){
  773. if (unlikely(c->state==S_CONN_CONNECT)){
  774. switch(errno){
  775. case ENETUNREACH:
  776. case EHOSTUNREACH: /* not posix for send() */
  777. #ifdef USE_DST_BLACKLIST
  778. if (cfg_get(core, core_cfg, use_dst_blacklist))
  779. dst_blacklist_su(BLST_ERR_CONNECT,
  780. c->rcv.proto,
  781. &c->rcv.src_su, 0);
  782. #endif /* USE_DST_BLACKLIST */
  783. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  784. TCP_LPORT(c), TCP_PSU(c),
  785. TCP_PROTO(c));
  786. break;
  787. case ECONNREFUSED:
  788. case ECONNRESET:
  789. #ifdef USE_DST_BLACKLIST
  790. if (cfg_get(core, core_cfg, use_dst_blacklist))
  791. dst_blacklist_su(BLST_ERR_CONNECT,
  792. c->rcv.proto,
  793. &c->rcv.src_su, 0);
  794. #endif /* USE_DST_BLACKLIST */
  795. TCP_EV_CONNECT_RST(0, TCP_LADDR(c),
  796. TCP_LPORT(c), TCP_PSU(c),
  797. TCP_PROTO(c));
  798. break;
  799. default:
  800. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c),
  801. TCP_LPORT(c), TCP_PSU(c),
  802. TCP_PROTO(c));
  803. }
  804. TCP_STATS_CONNECT_FAILED();
  805. }else{
  806. switch(errno){
  807. case ECONNREFUSED:
  808. case ECONNRESET:
  809. TCP_STATS_CON_RESET();
  810. /* no break */
  811. case ENETUNREACH:
  812. case EHOSTUNREACH: /* not posix for send() */
  813. #ifdef USE_DST_BLACKLIST
  814. if (cfg_get(core, core_cfg, use_dst_blacklist))
  815. dst_blacklist_su(BLST_ERR_SEND,
  816. c->rcv.proto,
  817. &c->rcv.src_su, 0);
  818. #endif /* USE_DST_BLACKLIST */
  819. break;
  820. }
  821. }
  822. ret=-1;
  823. LOG(L_ERR, "ERROR: wbuf_runq: %s [%d]\n",
  824. strerror(errno), errno);
  825. }
  826. }
  827. break;
  828. }
  829. }
  830. if (likely(q->first==0)){
  831. q->last=0;
  832. q->last_used=0;
  833. q->offset=0;
  834. *empty=1;
  835. }
  836. lock_release(&c->write_lock);
  837. if (likely(ret>0)){
  838. q->wr_timeout=get_ticks_raw()+cfg_get(tcp, tcp_cfg, send_timeout);
  839. if (unlikely(c->state==S_CONN_CONNECT || c->state==S_CONN_ACCEPT)){
  840. TCP_STATS_ESTABLISHED(c->state);
  841. c->state=S_CONN_OK;
  842. }
  843. }
  844. return ret;
  845. }
  846. #endif /* TCP_ASYNC */
  847. #if 0
  848. /* blocking write even on non-blocking sockets
  849. * if TCP_TIMEOUT will return with error */
  850. static int tcp_blocking_write(struct tcp_connection* c, int fd, char* buf,
  851. unsigned int len)
  852. {
  853. int n;
  854. fd_set sel_set;
  855. struct timeval timeout;
  856. int ticks;
  857. int initial_len;
  858. initial_len=len;
  859. again:
  860. n=send(fd, buf, len,
  861. #ifdef HAVE_MSG_NOSIGNAL
  862. MSG_NOSIGNAL
  863. #else
  864. 0
  865. #endif
  866. );
  867. if (n<0){
  868. if (errno==EINTR) goto again;
  869. else if (errno!=EAGAIN && errno!=EWOULDBLOCK){
  870. LOG(L_ERR, "tcp_blocking_write: failed to send: (%d) %s\n",
  871. errno, strerror(errno));
  872. TCP_EV_SEND_TIMEOUT(errno, &c->rcv);
  873. TCP_STATS_SEND_TIMEOUT();
  874. goto error;
  875. }
  876. }else if (n<len){
  877. /* partial write */
  878. buf+=n;
  879. len-=n;
  880. }else{
  881. /* success: full write */
  882. goto end;
  883. }
  884. while(1){
  885. FD_ZERO(&sel_set);
  886. FD_SET(fd, &sel_set);
  887. timeout.tv_sec=tcp_send_timeout;
  888. timeout.tv_usec=0;
  889. ticks=get_ticks();
  890. n=select(fd+1, 0, &sel_set, 0, &timeout);
  891. if (n<0){
  892. if (errno==EINTR) continue; /* signal, ignore */
  893. LOG(L_ERR, "ERROR: tcp_blocking_write: select failed: "
  894. " (%d) %s\n", errno, strerror(errno));
  895. goto error;
  896. }else if (n==0){
  897. /* timeout */
  898. if (get_ticks()-ticks>=tcp_send_timeout){
  899. LOG(L_ERR, "ERROR: tcp_blocking_write: send timeout (%d)\n",
  900. tcp_send_timeout);
  901. goto error;
  902. }
  903. continue;
  904. }
  905. if (FD_ISSET(fd, &sel_set)){
  906. /* we can write again */
  907. goto again;
  908. }
  909. }
  910. error:
  911. return -1;
  912. end:
  913. return initial_len;
  914. }
  915. #endif
  916. struct tcp_connection* tcpconn_new(int sock, union sockaddr_union* su,
  917. union sockaddr_union* local_addr,
  918. struct socket_info* ba, int type,
  919. int state)
  920. {
  921. struct tcp_connection *c;
  922. int rd_b_size;
  923. rd_b_size=cfg_get(tcp, tcp_cfg, rd_buf_size);
  924. c=shm_malloc(sizeof(struct tcp_connection) + rd_b_size);
  925. if (c==0){
  926. LOG(L_ERR, "ERROR: tcpconn_new: mem. allocation failure\n");
  927. goto error;
  928. }
  929. memset(c, 0, sizeof(struct tcp_connection)); /* zero init (skip rd buf)*/
  930. c->s=sock;
  931. c->fd=-1; /* not initialized */
  932. if (lock_init(&c->write_lock)==0){
  933. LOG(L_ERR, "ERROR: tcpconn_new: init lock failed\n");
  934. goto error;
  935. }
  936. c->rcv.src_su=*su;
  937. atomic_set(&c->refcnt, 0);
  938. local_timer_init(&c->timer, tcpconn_main_timeout, c, 0);
  939. su2ip_addr(&c->rcv.src_ip, su);
  940. c->rcv.src_port=su_getport(su);
  941. c->rcv.bind_address=ba;
  942. if (likely(local_addr)){
  943. su2ip_addr(&c->rcv.dst_ip, local_addr);
  944. c->rcv.dst_port=su_getport(local_addr);
  945. }else if (ba){
  946. c->rcv.dst_ip=ba->address;
  947. c->rcv.dst_port=ba->port_no;
  948. }
  949. print_ip("tcpconn_new: new tcp connection: ", &c->rcv.src_ip, "\n");
  950. DBG( "tcpconn_new: on port %d, type %d\n", c->rcv.src_port, type);
  951. init_tcp_req(&c->req, (char*)c+sizeof(struct tcp_connection), rd_b_size);
  952. c->id=(*connection_id)++;
  953. c->rcv.proto_reserved1=0; /* this will be filled before receive_message*/
  954. c->rcv.proto_reserved2=0;
  955. c->state=state;
  956. c->extra_data=0;
  957. #ifdef USE_TLS
  958. if (type==PROTO_TLS){
  959. if (tls_tcpconn_init(c, sock)==-1) goto error;
  960. }else
  961. #endif /* USE_TLS*/
  962. {
  963. c->type=PROTO_TCP;
  964. c->rcv.proto=PROTO_TCP;
  965. c->timeout=get_ticks_raw()+cfg_get(tcp, tcp_cfg, con_lifetime);
  966. }
  967. return c;
  968. error:
  969. if (c) shm_free(c);
  970. return 0;
  971. }
  972. /* do the actual connect, set sock. options a.s.o
  973. * returns socket on success, -1 on error
  974. * sets also *res_local_addr, res_si and state (S_CONN_CONNECT for an
  975. * unfinished connect and S_CONN_OK for a finished one)*/
  976. inline static int tcp_do_connect( union sockaddr_union* server,
  977. union sockaddr_union* from,
  978. int type,
  979. union sockaddr_union* res_local_addr,
  980. struct socket_info** res_si,
  981. enum tcp_conn_states *state
  982. )
  983. {
  984. int s;
  985. union sockaddr_union my_name;
  986. socklen_t my_name_len;
  987. struct ip_addr ip;
  988. #ifdef TCP_ASYNC
  989. int n;
  990. #endif /* TCP_ASYNC */
  991. s=socket(AF2PF(server->s.sa_family), SOCK_STREAM, 0);
  992. if (unlikely(s==-1)){
  993. LOG(L_ERR, "ERROR: tcp_do_connect %s: socket: (%d) %s\n",
  994. su2a(server, sizeof(*server)), errno, strerror(errno));
  995. goto error;
  996. }
  997. if (init_sock_opt(s)<0){
  998. LOG(L_ERR, "ERROR: tcp_do_connect %s: init_sock_opt failed\n",
  999. su2a(server, sizeof(*server)));
  1000. goto error;
  1001. }
  1002. if (unlikely(from && bind(s, &from->s, sockaddru_len(*from)) != 0)){
  1003. LOG(L_WARN, "WARNING: tcp_do_connect: binding to source address"
  1004. " %s failed: %s [%d]\n", su2a(from, sizeof(*from)),
  1005. strerror(errno), errno);
  1006. }
  1007. *state=S_CONN_OK;
  1008. #ifdef TCP_ASYNC
  1009. if (likely(cfg_get(tcp, tcp_cfg, async))){
  1010. again:
  1011. n=connect(s, &server->s, sockaddru_len(*server));
  1012. if (likely(n==-1)){ /*non-blocking => most probable EINPROGRESS*/
  1013. if (likely(errno==EINPROGRESS))
  1014. *state=S_CONN_CONNECT;
  1015. else if (errno==EINTR) goto again;
  1016. else if (errno!=EALREADY){
  1017. switch(errno){
  1018. case ENETUNREACH:
  1019. case EHOSTUNREACH:
  1020. #ifdef USE_DST_BLACKLIST
  1021. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1022. dst_blacklist_su(BLST_ERR_CONNECT, type, server,0);
  1023. #endif /* USE_DST_BLACKLIST */
  1024. TCP_EV_CONNECT_UNREACHABLE(errno, 0, 0, server, type);
  1025. break;
  1026. case ETIMEDOUT:
  1027. #ifdef USE_DST_BLACKLIST
  1028. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1029. dst_blacklist_su(BLST_ERR_CONNECT, type, server,0);
  1030. #endif /* USE_DST_BLACKLIST */
  1031. TCP_EV_CONNECT_TIMEOUT(errno, 0, 0, server, type);
  1032. break;
  1033. case ECONNREFUSED:
  1034. case ECONNRESET:
  1035. #ifdef USE_DST_BLACKLIST
  1036. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1037. dst_blacklist_su(BLST_ERR_CONNECT, type, server,0);
  1038. #endif /* USE_DST_BLACKLIST */
  1039. TCP_EV_CONNECT_RST(errno, 0, 0, server, type);
  1040. break;
  1041. case EAGAIN:/* not posix, but supported on linux and bsd */
  1042. TCP_EV_CONNECT_NO_MORE_PORTS(errno, 0, 0, server,type);
  1043. break;
  1044. default:
  1045. TCP_EV_CONNECT_ERR(errno, 0, 0, server, type);
  1046. }
  1047. TCP_STATS_CONNECT_FAILED();
  1048. LOG(L_ERR, "ERROR: tcp_do_connect: connect %s: (%d) %s\n",
  1049. su2a(server, sizeof(*server)),
  1050. errno, strerror(errno));
  1051. goto error;
  1052. }
  1053. }
  1054. }else{
  1055. #endif /* TCP_ASYNC */
  1056. if (tcp_blocking_connect(s, type, &server->s,
  1057. sockaddru_len(*server))<0){
  1058. LOG(L_ERR, "ERROR: tcp_do_connect: tcp_blocking_connect %s"
  1059. " failed\n", su2a(server, sizeof(*server)));
  1060. goto error;
  1061. }
  1062. #ifdef TCP_ASYNC
  1063. }
  1064. #endif /* TCP_ASYNC */
  1065. if (from){
  1066. su2ip_addr(&ip, from);
  1067. if (!ip_addr_any(&ip))
  1068. /* we already know the source ip, skip the sys. call */
  1069. goto find_socket;
  1070. }
  1071. my_name_len=sizeof(my_name);
  1072. if (unlikely(getsockname(s, &my_name.s, &my_name_len)!=0)){
  1073. LOG(L_ERR, "ERROR: tcp_do_connect: getsockname failed: %s(%d)\n",
  1074. strerror(errno), errno);
  1075. *res_si=0;
  1076. goto error;
  1077. }
  1078. from=&my_name; /* update from with the real "from" address */
  1079. su2ip_addr(&ip, &my_name);
  1080. find_socket:
  1081. #ifdef USE_TLS
  1082. if (unlikely(type==PROTO_TLS))
  1083. *res_si=find_si(&ip, 0, PROTO_TLS);
  1084. else
  1085. #endif
  1086. *res_si=find_si(&ip, 0, PROTO_TCP);
  1087. if (unlikely(*res_si==0)){
  1088. LOG(L_WARN, "WARNING: tcp_do_connect %s: could not find corresponding"
  1089. " listening socket for %s, using default...\n",
  1090. su2a(server, sizeof(*server)), ip_addr2a(&ip));
  1091. if (server->s.sa_family==AF_INET) *res_si=sendipv4_tcp;
  1092. #ifdef USE_IPV6
  1093. else *res_si=sendipv6_tcp;
  1094. #endif
  1095. }
  1096. *res_local_addr=*from;
  1097. return s;
  1098. error:
  1099. if (s!=-1) close(s);
  1100. return -1;
  1101. }
  1102. struct tcp_connection* tcpconn_connect( union sockaddr_union* server,
  1103. union sockaddr_union* from,
  1104. int type)
  1105. {
  1106. int s;
  1107. struct socket_info* si;
  1108. union sockaddr_union my_name;
  1109. struct tcp_connection* con;
  1110. enum tcp_conn_states state;
  1111. s=-1;
  1112. if (*tcp_connections_no >= cfg_get(tcp, tcp_cfg, max_connections)){
  1113. LOG(L_ERR, "ERROR: tcpconn_connect: maximum number of connections"
  1114. " exceeded (%d/%d)\n",
  1115. *tcp_connections_no,
  1116. cfg_get(tcp, tcp_cfg, max_connections));
  1117. goto error;
  1118. }
  1119. s=tcp_do_connect(server, from, type, &my_name, &si, &state);
  1120. if (s==-1){
  1121. LOG(L_ERR, "ERROR: tcp_do_connect %s: failed (%d) %s\n",
  1122. su2a(server, sizeof(*server)), errno, strerror(errno));
  1123. goto error;
  1124. }
  1125. con=tcpconn_new(s, server, &my_name, si, type, state);
  1126. if (con==0){
  1127. LOG(L_ERR, "ERROR: tcp_connect %s: tcpconn_new failed, closing the "
  1128. " socket\n", su2a(server, sizeof(*server)));
  1129. goto error;
  1130. }
  1131. return con;
  1132. /*FIXME: set sock idx! */
  1133. error:
  1134. if (s!=-1) close(s); /* close the opened socket */
  1135. return 0;
  1136. }
  1137. #ifdef TCP_CONNECT_WAIT
  1138. int tcpconn_finish_connect( struct tcp_connection* c,
  1139. union sockaddr_union* from)
  1140. {
  1141. int s;
  1142. int r;
  1143. union sockaddr_union local_addr;
  1144. struct socket_info* si;
  1145. enum tcp_conn_states state;
  1146. struct tcp_conn_alias* a;
  1147. int new_conn_alias_flags;
  1148. s=tcp_do_connect(&c->rcv.src_su, from, c->type, &local_addr, &si, &state);
  1149. if (unlikely(s==-1)){
  1150. LOG(L_ERR, "ERROR: tcpconn_finish_connect %s: tcp_do_connect for %p"
  1151. " failed\n", su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1152. c);
  1153. return -1;
  1154. }
  1155. c->rcv.bind_address=si;
  1156. su2ip_addr(&c->rcv.dst_ip, &local_addr);
  1157. c->rcv.dst_port=su_getport(&local_addr);
  1158. /* update aliases if needed */
  1159. if (likely(from==0)){
  1160. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1161. /* add aliases */
  1162. TCPCONN_LOCK;
  1163. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1164. new_conn_alias_flags);
  1165. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1166. c->rcv.dst_port, new_conn_alias_flags);
  1167. TCPCONN_UNLOCK;
  1168. }else if (su_cmp(from, &local_addr)!=1){
  1169. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1170. TCPCONN_LOCK;
  1171. /* remove all the aliases except the first one and re-add them
  1172. * (there shouldn't be more then the 3 default aliases at this
  1173. * stage) */
  1174. for (r=1; r<c->aliases; r++){
  1175. a=&c->con_aliases[r];
  1176. tcpconn_listrm(tcpconn_aliases_hash[a->hash], a, next, prev);
  1177. }
  1178. c->aliases=1;
  1179. /* add the local_ip:0 and local_ip:local_port aliases */
  1180. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1181. 0, new_conn_alias_flags);
  1182. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1183. c->rcv.dst_port, new_conn_alias_flags);
  1184. TCPCONN_UNLOCK;
  1185. }
  1186. return s;
  1187. }
  1188. #endif /* TCP_CONNECT_WAIT */
  1189. /* adds a tcp connection to the tcpconn hashes
  1190. * Note: it's called _only_ from the tcp_main process */
  1191. inline static struct tcp_connection* tcpconn_add(struct tcp_connection *c)
  1192. {
  1193. struct ip_addr zero_ip;
  1194. int new_conn_alias_flags;
  1195. if (likely(c)){
  1196. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1197. c->id_hash=tcp_id_hash(c->id);
  1198. c->aliases=0;
  1199. new_conn_alias_flags=cfg_get(tcp, tcp_cfg, new_conn_alias_flags);
  1200. TCPCONN_LOCK;
  1201. c->flags|=F_CONN_HASHED;
  1202. /* add it at the begining of the list*/
  1203. tcpconn_listadd(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1204. /* set the aliases */
  1205. /* first alias is for (peer_ip, peer_port, 0 ,0) -- for finding
  1206. * any connection to peer_ip, peer_port
  1207. * the second alias is for (peer_ip, peer_port, local_addr, 0) -- for
  1208. * finding any conenction to peer_ip, peer_port from local_addr
  1209. * the third alias is for (peer_ip, peer_port, local_addr, local_port)
  1210. * -- for finding if a fully specified connection exists */
  1211. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &zero_ip, 0,
  1212. new_conn_alias_flags);
  1213. if (likely(c->rcv.dst_ip.af && ! ip_addr_any(&c->rcv.dst_ip))){
  1214. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1215. new_conn_alias_flags);
  1216. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1217. c->rcv.dst_port, new_conn_alias_flags);
  1218. }
  1219. /* ignore add_alias errors, there are some valid cases when one
  1220. * of the add_alias would fail (e.g. first add_alias for 2 connections
  1221. * with the same destination but different src. ip*/
  1222. TCPCONN_UNLOCK;
  1223. DBG("tcpconn_add: hashes: %d:%d:%d, %d\n",
  1224. c->con_aliases[0].hash,
  1225. c->con_aliases[1].hash,
  1226. c->con_aliases[2].hash,
  1227. c->id_hash);
  1228. return c;
  1229. }else{
  1230. LOG(L_CRIT, "tcpconn_add: BUG: null connection pointer\n");
  1231. return 0;
  1232. }
  1233. }
  1234. static inline void _tcpconn_detach(struct tcp_connection *c)
  1235. {
  1236. int r;
  1237. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1238. /* remove all the aliases */
  1239. for (r=0; r<c->aliases; r++)
  1240. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1241. &c->con_aliases[r], next, prev);
  1242. }
  1243. static inline void _tcpconn_free(struct tcp_connection* c)
  1244. {
  1245. #ifdef TCP_ASYNC
  1246. if (unlikely(_wbufq_non_empty(c)))
  1247. _wbufq_destroy(&c->wbuf_q);
  1248. #endif
  1249. lock_destroy(&c->write_lock);
  1250. #ifdef USE_TLS
  1251. if (unlikely(c->type==PROTO_TLS)) tls_tcpconn_clean(c);
  1252. #endif
  1253. shm_free(c);
  1254. }
  1255. /* unsafe tcpconn_rm version (nolocks) */
  1256. void _tcpconn_rm(struct tcp_connection* c)
  1257. {
  1258. _tcpconn_detach(c);
  1259. _tcpconn_free(c);
  1260. }
  1261. void tcpconn_rm(struct tcp_connection* c)
  1262. {
  1263. int r;
  1264. TCPCONN_LOCK;
  1265. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1266. /* remove all the aliases */
  1267. for (r=0; r<c->aliases; r++)
  1268. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1269. &c->con_aliases[r], next, prev);
  1270. TCPCONN_UNLOCK;
  1271. lock_destroy(&c->write_lock);
  1272. #ifdef USE_TLS
  1273. if ((c->type==PROTO_TLS)&&(c->extra_data)) tls_tcpconn_clean(c);
  1274. #endif
  1275. shm_free(c);
  1276. }
  1277. /* finds a connection, if id=0 uses the ip addr, port, local_ip and local port
  1278. * (host byte order) and tries to find the connection that matches all of
  1279. * them. Wild cards can be used for local_ip and local_port (a 0 filled
  1280. * ip address and/or a 0 local port).
  1281. * WARNING: unprotected (locks) use tcpconn_get unless you really
  1282. * know what you are doing */
  1283. struct tcp_connection* _tcpconn_find(int id, struct ip_addr* ip, int port,
  1284. struct ip_addr* l_ip, int l_port)
  1285. {
  1286. struct tcp_connection *c;
  1287. struct tcp_conn_alias* a;
  1288. unsigned hash;
  1289. int is_local_ip_any;
  1290. #ifdef EXTRA_DEBUG
  1291. DBG("tcpconn_find: %d port %d\n",id, port);
  1292. if (ip) print_ip("tcpconn_find: ip ", ip, "\n");
  1293. #endif
  1294. if (likely(id)){
  1295. hash=tcp_id_hash(id);
  1296. for (c=tcpconn_id_hash[hash]; c; c=c->id_next){
  1297. #ifdef EXTRA_DEBUG
  1298. DBG("c=%p, c->id=%d, port=%d\n",c, c->id, c->rcv.src_port);
  1299. print_ip("ip=", &c->rcv.src_ip, "\n");
  1300. #endif
  1301. if ((id==c->id)&&(c->state!=S_CONN_BAD)) return c;
  1302. }
  1303. }else if (likely(ip)){
  1304. hash=tcp_addr_hash(ip, port, l_ip, l_port);
  1305. is_local_ip_any=ip_addr_any(l_ip);
  1306. for (a=tcpconn_aliases_hash[hash]; a; a=a->next){
  1307. #ifdef EXTRA_DEBUG
  1308. DBG("a=%p, c=%p, c->id=%d, alias port= %d port=%d\n", a, a->parent,
  1309. a->parent->id, a->port, a->parent->rcv.src_port);
  1310. print_ip("ip=",&a->parent->rcv.src_ip,"\n");
  1311. #endif
  1312. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1313. ((l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1314. (ip_addr_cmp(ip, &a->parent->rcv.src_ip)) &&
  1315. (is_local_ip_any ||
  1316. ip_addr_cmp(l_ip, &a->parent->rcv.dst_ip))
  1317. )
  1318. return a->parent;
  1319. }
  1320. }
  1321. return 0;
  1322. }
  1323. /* _tcpconn_find with locks and timeout
  1324. * local_addr contains the desired local ip:port. If null any local address
  1325. * will be used. IN*ADDR_ANY or 0 port are wild cards.
  1326. */
  1327. struct tcp_connection* tcpconn_get(int id, struct ip_addr* ip, int port,
  1328. union sockaddr_union* local_addr,
  1329. ticks_t timeout)
  1330. {
  1331. struct tcp_connection* c;
  1332. struct ip_addr local_ip;
  1333. int local_port;
  1334. local_port=0;
  1335. if (likely(ip)){
  1336. if (unlikely(local_addr)){
  1337. su2ip_addr(&local_ip, local_addr);
  1338. local_port=su_getport(local_addr);
  1339. }else{
  1340. ip_addr_mk_any(ip->af, &local_ip);
  1341. local_port=0;
  1342. }
  1343. }
  1344. TCPCONN_LOCK;
  1345. c=_tcpconn_find(id, ip, port, &local_ip, local_port);
  1346. if (likely(c)){
  1347. atomic_inc(&c->refcnt);
  1348. /* update the timeout only if the connection is not handled
  1349. * by a tcp reader (the tcp reader process uses c->timeout for
  1350. * its own internal timeout and c->timeout will be overwritten
  1351. * anyway on return to tcp_main) */
  1352. if (likely(c->reader_pid==0))
  1353. c->timeout=get_ticks_raw()+timeout;
  1354. }
  1355. TCPCONN_UNLOCK;
  1356. return c;
  1357. }
  1358. /* add c->dst:port, local_addr as an alias for the "id" connection,
  1359. * flags: TCP_ALIAS_FORCE_ADD - add an alias even if a previous one exists
  1360. * TCP_ALIAS_REPLACE - if a prev. alias exists, replace it with the
  1361. * new one
  1362. * returns 0 on success, <0 on failure ( -1 - null c, -2 too many aliases,
  1363. * -3 alias already present and pointing to another connection)
  1364. * WARNING: must be called with TCPCONN_LOCK held */
  1365. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  1366. struct ip_addr* l_ip, int l_port,
  1367. int flags)
  1368. {
  1369. unsigned hash;
  1370. struct tcp_conn_alias* a;
  1371. struct tcp_conn_alias* nxt;
  1372. struct tcp_connection* p;
  1373. int is_local_ip_any;
  1374. int i;
  1375. int r;
  1376. a=0;
  1377. is_local_ip_any=ip_addr_any(l_ip);
  1378. if (likely(c)){
  1379. hash=tcp_addr_hash(&c->rcv.src_ip, port, l_ip, l_port);
  1380. /* search the aliases for an already existing one */
  1381. for (a=tcpconn_aliases_hash[hash], nxt=0; a; a=nxt){
  1382. nxt=a->next;
  1383. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1384. ( (l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1385. (ip_addr_cmp(&c->rcv.src_ip, &a->parent->rcv.src_ip)) &&
  1386. ( is_local_ip_any ||
  1387. ip_addr_cmp(&a->parent->rcv.dst_ip, l_ip))
  1388. ){
  1389. /* found */
  1390. if (unlikely(a->parent!=c)){
  1391. if (flags & TCP_ALIAS_FORCE_ADD)
  1392. /* still have to walk the whole list to check if
  1393. * the alias was not already added */
  1394. continue;
  1395. else if (flags & TCP_ALIAS_REPLACE){
  1396. /* remove the alias =>
  1397. * remove the current alias and all the following
  1398. * ones from the corresponding connection, shift the
  1399. * connection aliases array and re-add the other
  1400. * aliases (!= current one) */
  1401. p=a->parent;
  1402. for (i=0; (i<p->aliases) && (&(p->con_aliases[i])!=a);
  1403. i++);
  1404. if (unlikely(i==p->aliases)){
  1405. LOG(L_CRIT, "BUG: _tcpconn_add_alias_unsafe: "
  1406. " alias %p not found in con %p (id %d)\n",
  1407. a, p, p->id);
  1408. goto error_not_found;
  1409. }
  1410. for (r=i; r<p->aliases; r++){
  1411. tcpconn_listrm(
  1412. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1413. &p->con_aliases[r], next, prev);
  1414. }
  1415. if (likely((i+1)<p->aliases)){
  1416. memmove(&p->con_aliases[i], &p->con_aliases[i+1],
  1417. (p->aliases-i-1)*
  1418. sizeof(p->con_aliases[0]));
  1419. }
  1420. p->aliases--;
  1421. /* re-add the remaining aliases */
  1422. for (r=i; r<p->aliases; r++){
  1423. tcpconn_listadd(
  1424. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1425. &p->con_aliases[r], next, prev);
  1426. }
  1427. }else
  1428. goto error_sec;
  1429. }else goto ok;
  1430. }
  1431. }
  1432. if (unlikely(c->aliases>=TCP_CON_MAX_ALIASES)) goto error_aliases;
  1433. c->con_aliases[c->aliases].parent=c;
  1434. c->con_aliases[c->aliases].port=port;
  1435. c->con_aliases[c->aliases].hash=hash;
  1436. tcpconn_listadd(tcpconn_aliases_hash[hash],
  1437. &c->con_aliases[c->aliases], next, prev);
  1438. c->aliases++;
  1439. }else goto error_not_found;
  1440. ok:
  1441. #ifdef EXTRA_DEBUG
  1442. if (a) DBG("_tcpconn_add_alias_unsafe: alias already present\n");
  1443. else DBG("_tcpconn_add_alias_unsafe: alias port %d for hash %d, id %d\n",
  1444. port, hash, c->id);
  1445. #endif
  1446. return 0;
  1447. error_aliases:
  1448. /* too many aliases */
  1449. return -2;
  1450. error_not_found:
  1451. /* null connection */
  1452. return -1;
  1453. error_sec:
  1454. /* alias already present and pointing to a different connection
  1455. * (hijack attempt?) */
  1456. return -3;
  1457. }
  1458. /* add port as an alias for the "id" connection,
  1459. * returns 0 on success,-1 on failure */
  1460. int tcpconn_add_alias(int id, int port, int proto)
  1461. {
  1462. struct tcp_connection* c;
  1463. int ret;
  1464. struct ip_addr zero_ip;
  1465. int r;
  1466. int alias_flags;
  1467. /* fix the port */
  1468. port=port?port:((proto==PROTO_TLS)?SIPS_PORT:SIP_PORT);
  1469. TCPCONN_LOCK;
  1470. /* check if alias already exists */
  1471. c=_tcpconn_find(id, 0, 0, 0, 0);
  1472. if (likely(c)){
  1473. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1474. alias_flags=cfg_get(tcp, tcp_cfg, alias_flags);
  1475. /* alias src_ip:port, 0, 0 */
  1476. ret=_tcpconn_add_alias_unsafe(c, port, &zero_ip, 0,
  1477. alias_flags);
  1478. if (ret<0 && ret!=-3) goto error;
  1479. /* alias src_ip:port, local_ip, 0 */
  1480. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, 0,
  1481. alias_flags);
  1482. if (ret<0 && ret!=-3) goto error;
  1483. /* alias src_ip:port, local_ip, local_port */
  1484. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, c->rcv.dst_port,
  1485. alias_flags);
  1486. if (unlikely(ret<0)) goto error;
  1487. }else goto error_not_found;
  1488. TCPCONN_UNLOCK;
  1489. return 0;
  1490. error_not_found:
  1491. TCPCONN_UNLOCK;
  1492. LOG(L_ERR, "ERROR: tcpconn_add_alias: no connection found for id %d\n",id);
  1493. return -1;
  1494. error:
  1495. TCPCONN_UNLOCK;
  1496. switch(ret){
  1497. case -2:
  1498. LOG(L_ERR, "ERROR: tcpconn_add_alias: too many aliases (%d)"
  1499. " for connection %p (id %d) %s:%d <- %d\n",
  1500. c->aliases, c, c->id, ip_addr2a(&c->rcv.src_ip),
  1501. c->rcv.src_port, port);
  1502. for (r=0; r<c->aliases; r++){
  1503. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias %d: for %p (%d)"
  1504. " %s:%d <-%d hash %x\n", r, c, c->id,
  1505. ip_addr2a(&c->rcv.src_ip), c->rcv.src_port,
  1506. c->con_aliases[r].port, c->con_aliases[r].hash);
  1507. }
  1508. break;
  1509. case -3:
  1510. LOG(L_ERR, "ERROR: tcpconn_add_alias: possible port"
  1511. " hijack attempt\n");
  1512. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias for %d port %d already"
  1513. " present and points to another connection \n",
  1514. c->id, port);
  1515. break;
  1516. default:
  1517. LOG(L_ERR, "ERROR: tcpconn_add_alias: unkown error %d\n", ret);
  1518. }
  1519. return -1;
  1520. }
  1521. #ifdef TCP_FD_CACHE
  1522. static void tcp_fd_cache_init()
  1523. {
  1524. int r;
  1525. for (r=0; r<TCP_FD_CACHE_SIZE; r++)
  1526. fd_cache[r].fd=-1;
  1527. }
  1528. inline static struct fd_cache_entry* tcp_fd_cache_get(struct tcp_connection *c)
  1529. {
  1530. int h;
  1531. h=c->id%TCP_FD_CACHE_SIZE;
  1532. if ((fd_cache[h].fd>0) && (fd_cache[h].id==c->id) && (fd_cache[h].con==c))
  1533. return &fd_cache[h];
  1534. return 0;
  1535. }
  1536. inline static void tcp_fd_cache_rm(struct fd_cache_entry* e)
  1537. {
  1538. e->fd=-1;
  1539. }
  1540. inline static void tcp_fd_cache_add(struct tcp_connection *c, int fd)
  1541. {
  1542. int h;
  1543. h=c->id%TCP_FD_CACHE_SIZE;
  1544. if (likely(fd_cache[h].fd>0))
  1545. close(fd_cache[h].fd);
  1546. fd_cache[h].fd=fd;
  1547. fd_cache[h].id=c->id;
  1548. fd_cache[h].con=c;
  1549. }
  1550. #endif /* TCP_FD_CACHE */
  1551. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn);
  1552. /* finds a tcpconn & sends on it
  1553. * uses the dst members to, proto (TCP|TLS) and id and tries to send
  1554. * from the "from" address (if non null and id==0)
  1555. * returns: number of bytes written (>=0) on success
  1556. * <0 on error */
  1557. int tcp_send(struct dest_info* dst, union sockaddr_union* from,
  1558. char* buf, unsigned len)
  1559. {
  1560. struct tcp_connection *c;
  1561. struct tcp_connection *tmp;
  1562. struct ip_addr ip;
  1563. int port;
  1564. int fd;
  1565. long response[2];
  1566. int n;
  1567. int do_close_fd;
  1568. ticks_t con_lifetime;
  1569. #ifdef TCP_ASYNC
  1570. int enable_write_watch;
  1571. #endif /* TCP_ASYNC */
  1572. #ifdef TCP_FD_CACHE
  1573. struct fd_cache_entry* fd_cache_e;
  1574. int use_fd_cache;
  1575. use_fd_cache=cfg_get(tcp, tcp_cfg, fd_cache);
  1576. fd_cache_e=0;
  1577. #endif /* TCP_FD_CACHE */
  1578. do_close_fd=1; /* close the fd on exit */
  1579. port=su_getport(&dst->to);
  1580. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  1581. if (likely(port)){
  1582. su2ip_addr(&ip, &dst->to);
  1583. c=tcpconn_get(dst->id, &ip, port, from, con_lifetime);
  1584. }else if (likely(dst->id)){
  1585. c=tcpconn_get(dst->id, 0, 0, 0, con_lifetime);
  1586. }else{
  1587. LOG(L_CRIT, "BUG: tcp_send called with null id & to\n");
  1588. return -1;
  1589. }
  1590. if (likely(dst->id)){
  1591. if (unlikely(c==0)) {
  1592. if (likely(port)){
  1593. /* try again w/o id */
  1594. c=tcpconn_get(0, &ip, port, from, con_lifetime);
  1595. goto no_id;
  1596. }else{
  1597. LOG(L_ERR, "ERROR: tcp_send: id %d not found, dropping\n",
  1598. dst->id);
  1599. return -1;
  1600. }
  1601. }else goto get_fd;
  1602. }
  1603. no_id:
  1604. if (unlikely(c==0)){
  1605. /* check if connect() is disabled */
  1606. if (cfg_get(tcp, tcp_cfg, no_connect))
  1607. return -1;
  1608. DBG("tcp_send: no open tcp connection found, opening new one\n");
  1609. /* create tcp connection */
  1610. if (likely(from==0)){
  1611. /* check to see if we have to use a specific source addr. */
  1612. switch (dst->to.s.sa_family) {
  1613. case AF_INET:
  1614. from = tcp_source_ipv4;
  1615. break;
  1616. #ifdef USE_IPV6
  1617. case AF_INET6:
  1618. from = tcp_source_ipv6;
  1619. break;
  1620. #endif
  1621. default:
  1622. /* error, bad af, ignore ... */
  1623. break;
  1624. }
  1625. }
  1626. #if defined(TCP_CONNECT_WAIT) && defined(TCP_ASYNC)
  1627. if (likely(cfg_get(tcp, tcp_cfg, tcp_connect_wait) &&
  1628. cfg_get(tcp, tcp_cfg, async) )){
  1629. if (unlikely(*tcp_connections_no >=
  1630. cfg_get(tcp, tcp_cfg, max_connections))){
  1631. LOG(L_ERR, "ERROR: tcp_send %s: maximum number of"
  1632. " connections exceeded (%d/%d)\n",
  1633. su2a(&dst->to, sizeof(dst->to)),
  1634. *tcp_connections_no,
  1635. cfg_get(tcp, tcp_cfg, max_connections));
  1636. return -1;
  1637. }
  1638. c=tcpconn_new(-1, &dst->to, from, 0, dst->proto,
  1639. S_CONN_CONNECT);
  1640. if (unlikely(c==0)){
  1641. LOG(L_ERR, "ERROR: tcp_send %s: could not create new"
  1642. " connection\n",
  1643. su2a(&dst->to, sizeof(dst->to)));
  1644. return -1;
  1645. }
  1646. c->flags|=F_CONN_PENDING|F_CONN_FD_CLOSED;
  1647. atomic_set(&c->refcnt, 2); /* ref from here and from main hash
  1648. table */
  1649. /* add it to id hash and aliases */
  1650. if (unlikely(tcpconn_add(c)==0)){
  1651. LOG(L_ERR, "ERROR: tcp_send %s: could not add "
  1652. "connection %p\n",
  1653. su2a(&dst->to, sizeof(dst->to)),
  1654. c);
  1655. _tcpconn_free(c);
  1656. n=-1;
  1657. goto end_no_conn;
  1658. }
  1659. /* do connect and if src ip or port changed, update the
  1660. * aliases */
  1661. if (unlikely((fd=tcpconn_finish_connect(c, from))<0)){
  1662. /* tcpconn_finish_connect will automatically blacklist
  1663. on error => no need to do it here */
  1664. LOG(L_ERR, "ERROR: tcp_send %s: tcpconn_finish_connect(%p)"
  1665. " failed\n", su2a(&dst->to, sizeof(dst->to)),
  1666. c);
  1667. goto conn_wait_error;
  1668. }
  1669. /* ? TODO: it might be faster just to queue the write directly
  1670. * and send to main CONN_NEW_PENDING_WRITE */
  1671. /* delay sending the fd to main after the send */
  1672. /* NOTE: no lock here, because the connection is marked as
  1673. * pending and nobody else will try to write on it. However
  1674. * this might produce out-of-order writes. If this is not
  1675. * desired either lock before the write or use
  1676. * _wbufq_insert(...) */
  1677. n=_tcpconn_write_nb(fd, c, buf, len);
  1678. if (unlikely(n<(int)len)){
  1679. if ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK){
  1680. DBG("tcp_send: pending write on new connection %p "
  1681. " (%d/%d bytes written)\n", c, n, len);
  1682. if (n<0) n=0;
  1683. else{
  1684. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1685. c->state=S_CONN_OK; /* partial write => connect()
  1686. ended */
  1687. }
  1688. /* add to the write queue */
  1689. lock_get(&c->write_lock);
  1690. if (unlikely(_wbufq_insert(c, buf+n, len-n)<0)){
  1691. lock_release(&c->write_lock);
  1692. n=-1;
  1693. LOG(L_ERR, "ERROR: tcp_send %s: EAGAIN and"
  1694. " write queue full or failed for %p\n",
  1695. su2a(&dst->to, sizeof(dst->to)),
  1696. c);
  1697. goto conn_wait_error;
  1698. }
  1699. lock_release(&c->write_lock);
  1700. /* send to tcp_main */
  1701. response[0]=(long)c;
  1702. response[1]=CONN_NEW_PENDING_WRITE;
  1703. if (unlikely(send_fd(unix_tcp_sock, response,
  1704. sizeof(response), fd) <= 0)){
  1705. LOG(L_ERR, "BUG: tcp_send %s: "
  1706. "CONN_NEW_PENDING_WRITE for %p"
  1707. " failed:" " %s (%d)\n",
  1708. su2a(&dst->to, sizeof(dst->to)),
  1709. c, strerror(errno), errno);
  1710. goto conn_wait_error;
  1711. }
  1712. n=len;
  1713. goto end;
  1714. }
  1715. /* if first write failed it's most likely a
  1716. connect error */
  1717. switch(errno){
  1718. case ENETUNREACH:
  1719. case EHOSTUNREACH: /* not posix for send() */
  1720. #ifdef USE_DST_BLACKLIST
  1721. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1722. dst_blacklist_add( BLST_ERR_CONNECT, dst, 0);
  1723. #endif /* USE_DST_BLACKLIST */
  1724. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  1725. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1726. break;
  1727. case ECONNREFUSED:
  1728. case ECONNRESET:
  1729. #ifdef USE_DST_BLACKLIST
  1730. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1731. dst_blacklist_add( BLST_ERR_CONNECT, dst, 0);
  1732. #endif /* USE_DST_BLACKLIST */
  1733. TCP_EV_CONNECT_RST(errno, TCP_LADDR(c),
  1734. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1735. break;
  1736. default:
  1737. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c),
  1738. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1739. }
  1740. /* error: destroy it directly */
  1741. TCP_STATS_CONNECT_FAILED();
  1742. LOG(L_ERR, "ERROR: tcp_send %s: connect & send "
  1743. " for %p failed:" " %s (%d)\n",
  1744. su2a(&dst->to, sizeof(dst->to)),
  1745. c, strerror(errno), errno);
  1746. goto conn_wait_error;
  1747. }
  1748. LOG(L_INFO, "tcp_send: quick connect for %p\n", c);
  1749. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1750. c->state=S_CONN_OK;
  1751. /* send to tcp_main */
  1752. response[0]=(long)c;
  1753. response[1]=CONN_NEW_COMPLETE;
  1754. if (unlikely(send_fd(unix_tcp_sock, response,
  1755. sizeof(response), fd) <= 0)){
  1756. LOG(L_ERR, "BUG: tcp_send %s: CONN_NEW_COMPLETE for %p"
  1757. " failed:" " %s (%d)\n",
  1758. su2a(&dst->to, sizeof(dst->to)),
  1759. c, strerror(errno), errno);
  1760. goto conn_wait_error;
  1761. }
  1762. goto end;
  1763. }
  1764. #endif /* TCP_CONNECT_WAIT && TCP_ASYNC */
  1765. if (unlikely((c=tcpconn_connect(&dst->to, from, dst->proto))==0)){
  1766. LOG(L_ERR, "ERROR: tcp_send %s: connect failed\n",
  1767. su2a(&dst->to, sizeof(dst->to)));
  1768. return -1;
  1769. }
  1770. if (likely(c->state==S_CONN_OK))
  1771. TCP_STATS_ESTABLISHED(S_CONN_CONNECT);
  1772. atomic_set(&c->refcnt, 2); /* ref. from here and it will also
  1773. be added in the tcp_main hash */
  1774. fd=c->s;
  1775. c->flags|=F_CONN_FD_CLOSED; /* not yet opened in main */
  1776. /* ? TODO: it might be faster just to queue the write and
  1777. * send to main a CONN_NEW_PENDING_WRITE */
  1778. /* send the new tcpconn to "tcp main" */
  1779. response[0]=(long)c;
  1780. response[1]=CONN_NEW;
  1781. n=send_fd(unix_tcp_sock, response, sizeof(response), c->s);
  1782. if (unlikely(n<=0)){
  1783. LOG(L_ERR, "BUG: tcp_send %s: failed send_fd: %s (%d)\n",
  1784. su2a(&dst->to, sizeof(dst->to)),
  1785. strerror(errno), errno);
  1786. /* we can safely delete it, it's not referenced by anybody */
  1787. _tcpconn_free(c);
  1788. n=-1;
  1789. goto end_no_conn;
  1790. }
  1791. goto send_it;
  1792. }
  1793. get_fd:
  1794. #ifdef TCP_ASYNC
  1795. /* if data is already queued, we don't need the fd any more */
  1796. if (unlikely(cfg_get(tcp, tcp_cfg, async) &&
  1797. (_wbufq_non_empty(c)
  1798. #ifdef TCP_CONNECT_WAIT
  1799. || (c->flags&F_CONN_PENDING)
  1800. #endif /* TCP_CONNECT_WAIT */
  1801. ) )){
  1802. lock_get(&c->write_lock);
  1803. if (likely(_wbufq_non_empty(c)
  1804. #ifdef TCP_CONNECT_WAIT
  1805. || (c->flags&F_CONN_PENDING)
  1806. #endif /* TCP_CONNECT_WAIT */
  1807. )){
  1808. do_close_fd=0;
  1809. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1810. lock_release(&c->write_lock);
  1811. n=-1;
  1812. goto error;
  1813. }
  1814. n=len;
  1815. lock_release(&c->write_lock);
  1816. goto release_c;
  1817. }
  1818. lock_release(&c->write_lock);
  1819. }
  1820. #endif /* TCP_ASYNC */
  1821. /* check if this is not the same reader process holding
  1822. * c and if so send directly on c->fd */
  1823. if (c->reader_pid==my_pid()){
  1824. DBG("tcp_send: send from reader (%d (%d)), reusing fd\n",
  1825. my_pid(), process_no);
  1826. fd=c->fd;
  1827. do_close_fd=0; /* don't close the fd on exit, it's in use */
  1828. #ifdef TCP_FD_CACHE
  1829. use_fd_cache=0; /* don't cache: problems would arise due to the
  1830. close() on cache eviction (if the fd is still
  1831. used). If it has to be cached then dup() _must_
  1832. be used */
  1833. }else if (likely(use_fd_cache &&
  1834. ((fd_cache_e=tcp_fd_cache_get(c))!=0))){
  1835. fd=fd_cache_e->fd;
  1836. do_close_fd=0;
  1837. DBG("tcp_send: found fd in cache ( %d, %p, %d)\n",
  1838. fd, c, fd_cache_e->id);
  1839. #endif /* TCP_FD_CACHE */
  1840. }else{
  1841. DBG("tcp_send: tcp connection found (%p), acquiring fd\n", c);
  1842. /* get the fd */
  1843. response[0]=(long)c;
  1844. response[1]=CONN_GET_FD;
  1845. n=send_all(unix_tcp_sock, response, sizeof(response));
  1846. if (unlikely(n<=0)){
  1847. LOG(L_ERR, "BUG: tcp_send: failed to get fd(write):%s (%d)\n",
  1848. strerror(errno), errno);
  1849. n=-1;
  1850. goto release_c;
  1851. }
  1852. DBG("tcp_send, c= %p, n=%d\n", c, n);
  1853. n=receive_fd(unix_tcp_sock, &tmp, sizeof(tmp), &fd, MSG_WAITALL);
  1854. if (unlikely(n<=0)){
  1855. LOG(L_ERR, "BUG: tcp_send: failed to get fd(receive_fd):"
  1856. " %s (%d)\n", strerror(errno), errno);
  1857. n=-1;
  1858. do_close_fd=0;
  1859. goto release_c;
  1860. }
  1861. if (unlikely(c!=tmp)){
  1862. LOG(L_CRIT, "BUG: tcp_send: get_fd: got different connection:"
  1863. " %p (id= %d, refcnt=%d state=%d) != "
  1864. " %p (n=%d)\n",
  1865. c, c->id, atomic_get(&c->refcnt), c->state,
  1866. tmp, n
  1867. );
  1868. n=-1; /* fail */
  1869. goto end;
  1870. }
  1871. DBG("tcp_send: after receive_fd: c= %p n=%d fd=%d\n",c, n, fd);
  1872. }
  1873. send_it:
  1874. DBG("tcp_send: sending...\n");
  1875. lock_get(&c->write_lock);
  1876. #ifdef TCP_ASYNC
  1877. if (likely(cfg_get(tcp, tcp_cfg, async))){
  1878. if (_wbufq_non_empty(c)
  1879. #ifdef TCP_CONNECT_WAIT
  1880. || (c->flags&F_CONN_PENDING)
  1881. #endif /* TCP_CONNECT_WAIT */
  1882. ){
  1883. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1884. lock_release(&c->write_lock);
  1885. n=-1;
  1886. goto error;
  1887. }
  1888. lock_release(&c->write_lock);
  1889. n=len;
  1890. goto end;
  1891. }
  1892. n=_tcpconn_write_nb(fd, c, buf, len);
  1893. }else{
  1894. #endif /* TCP_ASYNC */
  1895. #ifdef USE_TLS
  1896. if (c->type==PROTO_TLS)
  1897. n=tls_blocking_write(c, fd, buf, len);
  1898. else
  1899. #endif
  1900. /* n=tcp_blocking_write(c, fd, buf, len); */
  1901. n=tsend_stream(fd, buf, len,
  1902. TICKS_TO_S(cfg_get(tcp, tcp_cfg, send_timeout)) *
  1903. 1000);
  1904. #ifdef TCP_ASYNC
  1905. }
  1906. #else /* ! TCP_ASYNC */
  1907. lock_release(&c->write_lock);
  1908. #endif /* TCP_ASYNC */
  1909. DBG("tcp_send: after real write: c= %p n=%d fd=%d\n",c, n, fd);
  1910. DBG("tcp_send: buf=\n%.*s\n", (int)len, buf);
  1911. if (unlikely(n<(int)len)){
  1912. #ifdef TCP_ASYNC
  1913. if (cfg_get(tcp, tcp_cfg, async) &&
  1914. ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK)){
  1915. enable_write_watch=_wbufq_empty(c);
  1916. if (n<0) n=0;
  1917. else if (unlikely(c->state==S_CONN_CONNECT ||
  1918. c->state==S_CONN_ACCEPT)){
  1919. TCP_STATS_ESTABLISHED(c->state);
  1920. c->state=S_CONN_OK; /* something was written */
  1921. }
  1922. if (unlikely(_wbufq_add(c, buf+n, len-n)<0)){
  1923. lock_release(&c->write_lock);
  1924. n=-1;
  1925. goto error;
  1926. }
  1927. lock_release(&c->write_lock);
  1928. n=len;
  1929. if (likely(enable_write_watch)){
  1930. response[0]=(long)c;
  1931. response[1]=CONN_QUEUED_WRITE;
  1932. if (send_all(unix_tcp_sock, response, sizeof(response)) <= 0){
  1933. LOG(L_ERR, "BUG: tcp_send: error return failed "
  1934. "(write):%s (%d)\n", strerror(errno), errno);
  1935. n=-1;
  1936. goto error;
  1937. }
  1938. }
  1939. goto end;
  1940. }else{
  1941. lock_release(&c->write_lock);
  1942. }
  1943. #endif /* TCP_ASYNC */
  1944. if (unlikely(c->state==S_CONN_CONNECT)){
  1945. switch(errno){
  1946. case ENETUNREACH:
  1947. case EHOSTUNREACH: /* not posix for send() */
  1948. #ifdef USE_DST_BLACKLIST
  1949. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1950. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto,
  1951. &c->rcv.src_su, 0);
  1952. #endif /* USE_DST_BLACKLIST */
  1953. TCP_EV_CONNECT_UNREACHABLE(errno, TCP_LADDR(c),
  1954. TCP_LPORT(c), TCP_PSU(c), TCP_PROTO(c));
  1955. break;
  1956. case ECONNREFUSED:
  1957. case ECONNRESET:
  1958. #ifdef USE_DST_BLACKLIST
  1959. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1960. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto,
  1961. &c->rcv.src_su, 0);
  1962. #endif /* USE_DST_BLACKLIST */
  1963. TCP_EV_CONNECT_RST(errno, TCP_LADDR(c), TCP_LPORT(c),
  1964. TCP_PSU(c), TCP_PROTO(c));
  1965. break;
  1966. default:
  1967. TCP_EV_CONNECT_ERR(errno, TCP_LADDR(c), TCP_LPORT(c),
  1968. TCP_PSU(c), TCP_PROTO(c));
  1969. }
  1970. TCP_STATS_CONNECT_FAILED();
  1971. }else{
  1972. switch(errno){
  1973. case ECONNREFUSED:
  1974. case ECONNRESET:
  1975. TCP_STATS_CON_RESET();
  1976. /* no break */
  1977. case ENETUNREACH:
  1978. /*case EHOSTUNREACH: -- not posix */
  1979. #ifdef USE_DST_BLACKLIST
  1980. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1981. dst_blacklist_su(BLST_ERR_SEND, c->rcv.proto,
  1982. &c->rcv.src_su, 0);
  1983. #endif /* USE_DST_BLACKLIST */
  1984. break;
  1985. }
  1986. }
  1987. LOG(L_ERR, "ERROR: tcp_send: failed to send on %p (%s:%d->%s): %s (%d)"
  1988. "\n", c, ip_addr2a(&c->rcv.dst_ip), c->rcv.dst_port,
  1989. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1990. strerror(errno), errno);
  1991. #ifdef TCP_ASYNC
  1992. error:
  1993. #endif /* TCP_ASYNC */
  1994. /* error on the connection , mark it as bad and set 0 timeout */
  1995. c->state=S_CONN_BAD;
  1996. c->timeout=get_ticks_raw();
  1997. /* tell "main" it should drop this (optional it will t/o anyway?)*/
  1998. response[0]=(long)c;
  1999. response[1]=CONN_ERROR;
  2000. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  2001. LOG(L_CRIT, "BUG: tcp_send: error return failed (write):%s (%d)\n",
  2002. strerror(errno), errno);
  2003. tcpconn_chld_put(c); /* deref. it manually */
  2004. n=-1;
  2005. }
  2006. /* CONN_ERROR will auto-dec refcnt => we must not call tcpconn_put
  2007. * if it succeeds */
  2008. #ifdef TCP_FD_CACHE
  2009. if (unlikely(fd_cache_e)){
  2010. LOG(L_ERR, "ERROR: tcp_send %s: error on cached fd, removing from"
  2011. " the cache (%d, %p, %d)\n",
  2012. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  2013. fd, fd_cache_e->con, fd_cache_e->id);
  2014. tcp_fd_cache_rm(fd_cache_e);
  2015. close(fd);
  2016. }else
  2017. #endif /* TCP_FD_CACHE */
  2018. if (do_close_fd) close(fd);
  2019. return n; /* error return, no tcpconn_put */
  2020. }
  2021. #ifdef TCP_ASYNC
  2022. lock_release(&c->write_lock);
  2023. #endif /* TCP_ASYNC */
  2024. /* in non-async mode here we're either in S_CONN_OK or S_CONN_ACCEPT*/
  2025. if (unlikely(c->state==S_CONN_CONNECT || c->state==S_CONN_ACCEPT)){
  2026. TCP_STATS_ESTABLISHED(c->state);
  2027. c->state=S_CONN_OK;
  2028. }
  2029. end:
  2030. #ifdef TCP_FD_CACHE
  2031. if (unlikely((fd_cache_e==0) && use_fd_cache)){
  2032. tcp_fd_cache_add(c, fd);
  2033. }else
  2034. #endif /* TCP_FD_CACHE */
  2035. if (do_close_fd) close(fd);
  2036. release_c:
  2037. tcpconn_chld_put(c); /* release c (dec refcnt & free on 0) */
  2038. end_no_conn:
  2039. return n;
  2040. #ifdef TCP_CONNECT_WAIT
  2041. conn_wait_error:
  2042. /* connect or send failed on newly created connection which was not
  2043. * yet sent to tcp_main (but was already hashed) => don't send to main,
  2044. * unhash and destroy directly (if refcnt>2 it will be destroyed when the
  2045. * last sender releases the connection (tcpconn_chld_put(c))) or when
  2046. * tcp_main receives a CONN_ERROR it*/
  2047. c->state=S_CONN_BAD;
  2048. TCPCONN_LOCK;
  2049. if (c->flags & F_CONN_HASHED){
  2050. /* if some other parallel tcp_send did send CONN_ERROR to
  2051. * tcp_main, the connection might be already detached */
  2052. _tcpconn_detach(c);
  2053. c->flags&=~F_CONN_HASHED;
  2054. TCPCONN_UNLOCK;
  2055. tcpconn_put(c);
  2056. }else
  2057. TCPCONN_UNLOCK;
  2058. /* dec refcnt -> mark it for destruction */
  2059. tcpconn_chld_put(c);
  2060. return -1;
  2061. #endif /* TCP_CONNET_WAIT */
  2062. }
  2063. int tcp_init(struct socket_info* sock_info)
  2064. {
  2065. union sockaddr_union* addr;
  2066. int optval;
  2067. #ifdef HAVE_TCP_ACCEPT_FILTER
  2068. struct accept_filter_arg afa;
  2069. #endif /* HAVE_TCP_ACCEPT_FILTER */
  2070. #ifdef DISABLE_NAGLE
  2071. int flag;
  2072. struct protoent* pe;
  2073. if (tcp_proto_no==-1){ /* if not already set */
  2074. pe=getprotobyname("tcp");
  2075. if (pe==0){
  2076. LOG(L_ERR, "ERROR: tcp_init: could not get TCP protocol number\n");
  2077. tcp_proto_no=-1;
  2078. }else{
  2079. tcp_proto_no=pe->p_proto;
  2080. }
  2081. }
  2082. #endif
  2083. addr=&sock_info->su;
  2084. /* sock_info->proto=PROTO_TCP; */
  2085. if (init_su(addr, &sock_info->address, sock_info->port_no)<0){
  2086. LOG(L_ERR, "ERROR: tcp_init: could no init sockaddr_union\n");
  2087. goto error;
  2088. }
  2089. DBG("tcp_init: added %s\n", su2a(addr, sizeof(*addr)));
  2090. sock_info->socket=socket(AF2PF(addr->s.sa_family), SOCK_STREAM, 0);
  2091. if (sock_info->socket==-1){
  2092. LOG(L_ERR, "ERROR: tcp_init: socket: %s\n", strerror(errno));
  2093. goto error;
  2094. }
  2095. #ifdef DISABLE_NAGLE
  2096. flag=1;
  2097. if ( (tcp_proto_no!=-1) &&
  2098. (setsockopt(sock_info->socket, tcp_proto_no , TCP_NODELAY,
  2099. &flag, sizeof(flag))<0) ){
  2100. LOG(L_ERR, "ERROR: tcp_init: could not disable Nagle: %s\n",
  2101. strerror(errno));
  2102. }
  2103. #endif
  2104. #if !defined(TCP_DONT_REUSEADDR)
  2105. /* Stevens, "Network Programming", Section 7.5, "Generic Socket
  2106. * Options": "...server started,..a child continues..on existing
  2107. * connection..listening server is restarted...call to bind fails
  2108. * ... ALL TCP servers should specify the SO_REUSEADDRE option
  2109. * to allow the server to be restarted in this situation
  2110. *
  2111. * Indeed, without this option, the server can't restart.
  2112. * -jiri
  2113. */
  2114. optval=1;
  2115. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_REUSEADDR,
  2116. (void*)&optval, sizeof(optval))==-1) {
  2117. LOG(L_ERR, "ERROR: tcp_init: setsockopt %s\n",
  2118. strerror(errno));
  2119. goto error;
  2120. }
  2121. #endif
  2122. /* tos */
  2123. optval = tos;
  2124. if (setsockopt(sock_info->socket, IPPROTO_IP, IP_TOS, (void*)&optval,
  2125. sizeof(optval)) ==-1){
  2126. LOG(L_WARN, "WARNING: tcp_init: setsockopt tos: %s\n", strerror(errno));
  2127. /* continue since this is not critical */
  2128. }
  2129. #ifdef HAVE_TCP_DEFER_ACCEPT
  2130. /* linux only */
  2131. if ((optval=cfg_get(tcp, tcp_cfg, defer_accept))){
  2132. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_DEFER_ACCEPT,
  2133. (void*)&optval, sizeof(optval)) ==-1){
  2134. LOG(L_WARN, "WARNING: tcp_init: setsockopt TCP_DEFER_ACCEPT %s\n",
  2135. strerror(errno));
  2136. /* continue since this is not critical */
  2137. }
  2138. }
  2139. #endif /* HAVE_TCP_DEFFER_ACCEPT */
  2140. #ifdef HAVE_TCP_SYNCNT
  2141. if ((optval=cfg_get(tcp, tcp_cfg, syncnt))){
  2142. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_SYNCNT, &optval,
  2143. sizeof(optval))<0){
  2144. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  2145. " maximum SYN retr. count: %s\n", strerror(errno));
  2146. }
  2147. }
  2148. #endif
  2149. #ifdef HAVE_TCP_LINGER2
  2150. if ((optval=cfg_get(tcp, tcp_cfg, linger2))){
  2151. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_LINGER2, &optval,
  2152. sizeof(optval))<0){
  2153. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  2154. " maximum LINGER2 timeout: %s\n", strerror(errno));
  2155. }
  2156. }
  2157. #endif
  2158. init_sock_keepalive(sock_info->socket);
  2159. if (bind(sock_info->socket, &addr->s, sockaddru_len(*addr))==-1){
  2160. LOG(L_ERR, "ERROR: tcp_init: bind(%x, %p, %d) on %s:%d : %s\n",
  2161. sock_info->socket, &addr->s,
  2162. (unsigned)sockaddru_len(*addr),
  2163. sock_info->address_str.s,
  2164. sock_info->port_no,
  2165. strerror(errno));
  2166. goto error;
  2167. }
  2168. if (listen(sock_info->socket, TCP_LISTEN_BACKLOG)==-1){
  2169. LOG(L_ERR, "ERROR: tcp_init: listen(%x, %p, %d) on %s: %s\n",
  2170. sock_info->socket, &addr->s,
  2171. (unsigned)sockaddru_len(*addr),
  2172. sock_info->address_str.s,
  2173. strerror(errno));
  2174. goto error;
  2175. }
  2176. #ifdef HAVE_TCP_ACCEPT_FILTER
  2177. /* freebsd */
  2178. if (cfg_get(tcp, tcp_cfg, defer_accept)){
  2179. memset(&afa, 0, sizeof(afa));
  2180. strcpy(afa.af_name, "dataready");
  2181. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_ACCEPTFILTER,
  2182. (void*)&afa, sizeof(afa)) ==-1){
  2183. LOG(L_WARN, "WARNING: tcp_init: setsockopt SO_ACCEPTFILTER %s\n",
  2184. strerror(errno));
  2185. /* continue since this is not critical */
  2186. }
  2187. }
  2188. #endif /* HAVE_TCP_ACCEPT_FILTER */
  2189. return 0;
  2190. error:
  2191. if (sock_info->socket!=-1){
  2192. close(sock_info->socket);
  2193. sock_info->socket=-1;
  2194. }
  2195. return -1;
  2196. }
  2197. /* close tcp_main's fd from a tcpconn
  2198. * WARNING: call only in tcp_main context */
  2199. inline static void tcpconn_close_main_fd(struct tcp_connection* tcpconn)
  2200. {
  2201. int fd;
  2202. fd=tcpconn->s;
  2203. #ifdef USE_TLS
  2204. /*FIXME: lock ->writelock ? */
  2205. if (tcpconn->type==PROTO_TLS)
  2206. tls_close(tcpconn, fd);
  2207. #endif
  2208. #ifdef TCP_FD_CACHE
  2209. if (likely(cfg_get(tcp, tcp_cfg, fd_cache))) shutdown(fd, SHUT_RDWR);
  2210. #endif /* TCP_FD_CACHE */
  2211. close_again:
  2212. if (unlikely(close(fd)<0)){
  2213. if (errno==EINTR)
  2214. goto close_again;
  2215. LOG(L_ERR, "ERROR: tcpconn_put_destroy; close() failed: %s (%d)\n",
  2216. strerror(errno), errno);
  2217. }
  2218. }
  2219. /* dec refcnt & frees the connection if refcnt==0
  2220. * returns 1 if the connection is freed, 0 otherwise
  2221. *
  2222. * WARNING: use only from child processes */
  2223. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn)
  2224. {
  2225. if (unlikely(atomic_dec_and_test(&tcpconn->refcnt))){
  2226. DBG("tcpconn_chld_put: destroying connection %p (%d, %d) "
  2227. "flags %04x\n", tcpconn, tcpconn->id,
  2228. tcpconn->s, tcpconn->flags);
  2229. /* sanity checks */
  2230. membar_read_atomic_op(); /* make sure we see the current flags */
  2231. if (unlikely(!(tcpconn->flags & F_CONN_FD_CLOSED) ||
  2232. (tcpconn->flags &
  2233. (F_CONN_HASHED|F_CONN_MAIN_TIMER|
  2234. F_CONN_READ_W|F_CONN_WRITE_W)) )){
  2235. LOG(L_CRIT, "BUG: tcpconn_chld_put: %p bad flags = %0x\n",
  2236. tcpconn, tcpconn->flags);
  2237. abort();
  2238. }
  2239. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2240. return 1;
  2241. }
  2242. return 0;
  2243. }
  2244. /* simple destroy function (the connection should be already removed
  2245. * from the hashes and the fds should not be watched anymore for IO)
  2246. */
  2247. inline static void tcpconn_destroy(struct tcp_connection* tcpconn)
  2248. {
  2249. DBG("tcpconn_destroy: destroying connection %p (%d, %d) "
  2250. "flags %04x\n", tcpconn, tcpconn->id,
  2251. tcpconn->s, tcpconn->flags);
  2252. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2253. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed"
  2254. " connection (%p)\n", tcpconn);
  2255. /* try to continue */
  2256. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2257. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2258. TCPCONN_LOCK;
  2259. _tcpconn_detach(tcpconn);
  2260. TCPCONN_UNLOCK;
  2261. }
  2262. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2263. tcpconn_close_main_fd(tcpconn);
  2264. (*tcp_connections_no)--;
  2265. }
  2266. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2267. }
  2268. /* tries to destroy the connection: dec. refcnt and if 0 destroys the
  2269. * connection, else it will mark it as BAD and close the main fds
  2270. *
  2271. * returns 1 if the connection was destroyed, 0 otherwise
  2272. *
  2273. * WARNING: - the connection _has_ to be removed from the hash and timer
  2274. * first (use tcpconn_try_unhash() for this )
  2275. * - the fd should not be watched anymore (io_watch_del()...)
  2276. * - must be called _only_ from the tcp_main process context
  2277. * (or else the fd will remain open)
  2278. */
  2279. inline static int tcpconn_put_destroy(struct tcp_connection* tcpconn)
  2280. {
  2281. if (unlikely((tcpconn->flags &
  2282. (F_CONN_WRITE_W|F_CONN_HASHED|F_CONN_MAIN_TIMER|F_CONN_READ_W)) )){
  2283. /* sanity check */
  2284. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2285. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed and/or"
  2286. "on timer connection (%p), flags = %0x\n",
  2287. tcpconn, tcpconn->flags);
  2288. /* try to continue */
  2289. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2290. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2291. TCPCONN_LOCK;
  2292. _tcpconn_detach(tcpconn);
  2293. TCPCONN_UNLOCK;
  2294. }else{
  2295. LOG(L_CRIT, "BUG: tcpconn_put_destroy: %p flags = %0x\n",
  2296. tcpconn, tcpconn->flags);
  2297. }
  2298. }
  2299. tcpconn->state=S_CONN_BAD;
  2300. /* in case it's still in a reader timer */
  2301. tcpconn->timeout=get_ticks_raw();
  2302. /* fast close: close fds now */
  2303. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2304. tcpconn_close_main_fd(tcpconn);
  2305. tcpconn->flags|=F_CONN_FD_CLOSED;
  2306. (*tcp_connections_no)--;
  2307. }
  2308. /* all the flags / ops on the tcpconn must be done prior to decrementing
  2309. * the refcnt. and at least a membar_write_atomic_op() mem. barrier or
  2310. * a mb_atomic_* op must * be used to make sure all the changed flags are
  2311. * written into memory prior to the new refcnt value */
  2312. if (unlikely(mb_atomic_dec_and_test(&tcpconn->refcnt))){
  2313. _tcpconn_free(tcpconn);
  2314. return 1;
  2315. }
  2316. return 0;
  2317. }
  2318. /* try to remove a connection from the hashes and timer.
  2319. * returns 1 if the connection was removed, 0 if not (connection not in
  2320. * hash)
  2321. *
  2322. * WARNING: call it only in the tcp_main process context or else the
  2323. * timer removal won't work.
  2324. */
  2325. inline static int tcpconn_try_unhash(struct tcp_connection* tcpconn)
  2326. {
  2327. if (likely(tcpconn->flags & F_CONN_HASHED)){
  2328. tcpconn->state=S_CONN_BAD;
  2329. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER)){
  2330. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2331. tcpconn->flags&=~F_CONN_MAIN_TIMER;
  2332. }else
  2333. /* in case it's still in a reader timer */
  2334. tcpconn->timeout=get_ticks_raw();
  2335. TCPCONN_LOCK;
  2336. if (tcpconn->flags & F_CONN_HASHED){
  2337. tcpconn->flags&=~F_CONN_HASHED;
  2338. _tcpconn_detach(tcpconn);
  2339. TCPCONN_UNLOCK;
  2340. }else{
  2341. /* tcp_send was faster and did unhash it itself */
  2342. TCPCONN_UNLOCK;
  2343. return 0;
  2344. }
  2345. #ifdef TCP_ASYNC
  2346. /* empty possible write buffers (optional) */
  2347. if (unlikely(_wbufq_non_empty(tcpconn))){
  2348. lock_get(&tcpconn->write_lock);
  2349. /* check again, while holding the lock */
  2350. if (likely(_wbufq_non_empty(tcpconn)))
  2351. _wbufq_destroy(&tcpconn->wbuf_q);
  2352. lock_release(&tcpconn->write_lock);
  2353. }
  2354. #endif /* TCP_ASYNC */
  2355. return 1;
  2356. }
  2357. return 0;
  2358. }
  2359. #ifdef SEND_FD_QUEUE
  2360. struct send_fd_info{
  2361. struct tcp_connection* tcp_conn;
  2362. ticks_t expire;
  2363. int unix_sock;
  2364. unsigned int retries; /* debugging */
  2365. };
  2366. struct tcp_send_fd_q{
  2367. struct send_fd_info* data; /* buffer */
  2368. struct send_fd_info* crt; /* pointer inside the buffer */
  2369. struct send_fd_info* end; /* points after the last valid position */
  2370. };
  2371. static struct tcp_send_fd_q send2child_q;
  2372. static int send_fd_queue_init(struct tcp_send_fd_q *q, unsigned int size)
  2373. {
  2374. q->data=pkg_malloc(size*sizeof(struct send_fd_info));
  2375. if (q->data==0){
  2376. LOG(L_ERR, "ERROR: send_fd_queue_init: out of memory\n");
  2377. return -1;
  2378. }
  2379. q->crt=&q->data[0];
  2380. q->end=&q->data[size];
  2381. return 0;
  2382. }
  2383. static void send_fd_queue_destroy(struct tcp_send_fd_q *q)
  2384. {
  2385. if (q->data){
  2386. pkg_free(q->data);
  2387. q->data=0;
  2388. q->crt=q->end=0;
  2389. }
  2390. }
  2391. static int init_send_fd_queues()
  2392. {
  2393. if (send_fd_queue_init(&send2child_q, SEND_FD_QUEUE_SIZE)!=0)
  2394. goto error;
  2395. return 0;
  2396. error:
  2397. LOG(L_ERR, "ERROR: init_send_fd_queues: init failed\n");
  2398. return -1;
  2399. }
  2400. static void destroy_send_fd_queues()
  2401. {
  2402. send_fd_queue_destroy(&send2child_q);
  2403. }
  2404. inline static int send_fd_queue_add( struct tcp_send_fd_q* q,
  2405. int unix_sock,
  2406. struct tcp_connection *t)
  2407. {
  2408. struct send_fd_info* tmp;
  2409. unsigned long new_size;
  2410. if (q->crt>=q->end){
  2411. new_size=q->end-&q->data[0];
  2412. if (new_size< MAX_SEND_FD_QUEUE_SIZE/2){
  2413. new_size*=2;
  2414. }else new_size=MAX_SEND_FD_QUEUE_SIZE;
  2415. if (unlikely(q->crt>=&q->data[new_size])){
  2416. LOG(L_ERR, "ERROR: send_fd_queue_add: queue full: %ld/%ld\n",
  2417. (long)(q->crt-&q->data[0]-1), new_size);
  2418. goto error;
  2419. }
  2420. LOG(L_CRIT, "INFO: send_fd_queue: queue full: %ld, extending to %ld\n",
  2421. (long)(q->end-&q->data[0]), new_size);
  2422. tmp=pkg_realloc(q->data, new_size*sizeof(struct send_fd_info));
  2423. if (unlikely(tmp==0)){
  2424. LOG(L_ERR, "ERROR: send_fd_queue_add: out of memory\n");
  2425. goto error;
  2426. }
  2427. q->crt=(q->crt-&q->data[0])+tmp;
  2428. q->data=tmp;
  2429. q->end=&q->data[new_size];
  2430. }
  2431. q->crt->tcp_conn=t;
  2432. q->crt->unix_sock=unix_sock;
  2433. q->crt->expire=get_ticks_raw()+SEND_FD_QUEUE_TIMEOUT;
  2434. q->crt->retries=0;
  2435. q->crt++;
  2436. return 0;
  2437. error:
  2438. return -1;
  2439. }
  2440. inline static void send_fd_queue_run(struct tcp_send_fd_q* q)
  2441. {
  2442. struct send_fd_info* p;
  2443. struct send_fd_info* t;
  2444. for (p=t=&q->data[0]; p<q->crt; p++){
  2445. if (unlikely(send_fd(p->unix_sock, &(p->tcp_conn),
  2446. sizeof(struct tcp_connection*), p->tcp_conn->s)<=0)){
  2447. if ( ((errno==EAGAIN)||(errno==EWOULDBLOCK)) &&
  2448. ((s_ticks_t)(p->expire-get_ticks_raw())>0)){
  2449. /* leave in queue for a future try */
  2450. *t=*p;
  2451. t->retries++;
  2452. t++;
  2453. }else{
  2454. LOG(L_ERR, "ERROR: run_send_fd_queue: send_fd failed"
  2455. " on socket %d , queue entry %ld, retries %d,"
  2456. " connection %p, tcp socket %d, errno=%d (%s) \n",
  2457. p->unix_sock, (long)(p-&q->data[0]), p->retries,
  2458. p->tcp_conn, p->tcp_conn->s, errno,
  2459. strerror(errno));
  2460. #ifdef TCP_ASYNC
  2461. if (p->tcp_conn->flags & F_CONN_WRITE_W){
  2462. io_watch_del(&io_h, p->tcp_conn->s, -1, IO_FD_CLOSING);
  2463. p->tcp_conn->flags &=~F_CONN_WRITE_W;
  2464. }
  2465. #endif
  2466. p->tcp_conn->flags &= ~F_CONN_READER;
  2467. if (likely(tcpconn_try_unhash(p->tcp_conn)))
  2468. tcpconn_put(p->tcp_conn);
  2469. tcpconn_put_destroy(p->tcp_conn); /* dec refcnt & destroy */
  2470. }
  2471. }
  2472. }
  2473. q->crt=t;
  2474. }
  2475. #else
  2476. #define send_fd_queue_run(q)
  2477. #endif
  2478. /* non blocking write() on a tcpconnection, unsafe version (should be called
  2479. * while holding c->write_lock). The fd should be non-blocking.
  2480. * returns number of bytes written on success, -1 on error (and sets errno)
  2481. */
  2482. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  2483. char* buf, int len)
  2484. {
  2485. int n;
  2486. again:
  2487. #ifdef USE_TLS
  2488. if (unlikely(c->type==PROTO_TLS))
  2489. /* FIXME: tls_nonblocking_write !! */
  2490. n=tls_blocking_write(c, fd, buf, len);
  2491. else
  2492. #endif /* USE_TLS */
  2493. n=send(fd, buf, len,
  2494. #ifdef HAVE_MSG_NOSIGNAL
  2495. MSG_NOSIGNAL
  2496. #else
  2497. 0
  2498. #endif /* HAVE_MSG_NOSIGNAL */
  2499. );
  2500. if (unlikely(n<0)){
  2501. if (errno==EINTR) goto again;
  2502. }
  2503. return n;
  2504. }
  2505. /* handles io from a tcp child process
  2506. * params: tcp_c - pointer in the tcp_children array, to the entry for
  2507. * which an io event was detected
  2508. * fd_i - fd index in the fd_array (usefull for optimizing
  2509. * io_watch_deletes)
  2510. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  2511. * io events queued), >0 on success. success/error refer only to
  2512. * the reads from the fd.
  2513. */
  2514. inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
  2515. {
  2516. struct tcp_connection* tcpconn;
  2517. long response[2];
  2518. int cmd;
  2519. int bytes;
  2520. int n;
  2521. ticks_t t;
  2522. ticks_t crt_timeout;
  2523. ticks_t con_lifetime;
  2524. if (unlikely(tcp_c->unix_sock<=0)){
  2525. /* (we can't have a fd==0, 0 is never closed )*/
  2526. LOG(L_CRIT, "BUG: handle_tcp_child: fd %d for %d "
  2527. "(pid %d, ser no %d)\n", tcp_c->unix_sock,
  2528. (int)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no);
  2529. goto error;
  2530. }
  2531. /* read until sizeof(response)
  2532. * (this is a SOCK_STREAM so read is not atomic) */
  2533. bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT);
  2534. if (unlikely(bytes<(int)sizeof(response))){
  2535. if (bytes==0){
  2536. /* EOF -> bad, child has died */
  2537. DBG("DBG: handle_tcp_child: dead tcp child %d (pid %d, no %d)"
  2538. " (shutting down?)\n", (int)(tcp_c-&tcp_children[0]),
  2539. tcp_c->pid, tcp_c->proc_no );
  2540. /* don't listen on it any more */
  2541. io_watch_del(&io_h, tcp_c->unix_sock, fd_i, 0);
  2542. goto error; /* eof. so no more io here, it's ok to return error */
  2543. }else if (bytes<0){
  2544. /* EAGAIN is ok if we try to empty the buffer
  2545. * e.g.: SIGIO_RT overflow mode or EPOLL ET */
  2546. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2547. LOG(L_CRIT, "ERROR: handle_tcp_child: read from tcp child %ld "
  2548. " (pid %d, no %d) %s [%d]\n",
  2549. (long)(tcp_c-&tcp_children[0]), tcp_c->pid,
  2550. tcp_c->proc_no, strerror(errno), errno );
  2551. }else{
  2552. bytes=0;
  2553. }
  2554. /* try to ignore ? */
  2555. goto end;
  2556. }else{
  2557. /* should never happen */
  2558. LOG(L_CRIT, "BUG: handle_tcp_child: too few bytes received (%d)\n",
  2559. bytes );
  2560. bytes=0; /* something was read so there is no error; otoh if
  2561. receive_fd returned less then requested => the receive
  2562. buffer is empty => no more io queued on this fd */
  2563. goto end;
  2564. }
  2565. }
  2566. DBG("handle_tcp_child: reader response= %lx, %ld from %d \n",
  2567. response[0], response[1], (int)(tcp_c-&tcp_children[0]));
  2568. cmd=response[1];
  2569. tcpconn=(struct tcp_connection*)response[0];
  2570. if (unlikely(tcpconn==0)){
  2571. /* should never happen */
  2572. LOG(L_CRIT, "BUG: handle_tcp_child: null tcpconn pointer received"
  2573. " from tcp child %d (pid %d): %lx, %lx\n",
  2574. (int)(tcp_c-&tcp_children[0]), tcp_c->pid,
  2575. response[0], response[1]) ;
  2576. goto end;
  2577. }
  2578. switch(cmd){
  2579. case CONN_RELEASE:
  2580. tcp_c->busy--;
  2581. if (unlikely(tcpconn_put(tcpconn))){
  2582. tcpconn_destroy(tcpconn);
  2583. break;
  2584. }
  2585. if (unlikely(tcpconn->state==S_CONN_BAD)){
  2586. #ifdef TCP_ASYNC
  2587. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2588. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2589. tcpconn->flags &= ~F_CONN_WRITE_W;
  2590. }
  2591. #endif /* TCP_ASYNC */
  2592. if (tcpconn_try_unhash(tcpconn))
  2593. tcpconn_put_destroy(tcpconn);
  2594. break;
  2595. }
  2596. /* update the timeout*/
  2597. t=get_ticks_raw();
  2598. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2599. tcpconn->timeout=t+con_lifetime;
  2600. crt_timeout=con_lifetime;
  2601. #ifdef TCP_ASYNC
  2602. if (unlikely(cfg_get(tcp, tcp_cfg, async) &&
  2603. _wbufq_non_empty(tcpconn) )){
  2604. if (unlikely(TICKS_GE(t, tcpconn->wbuf_q.wr_timeout))){
  2605. DBG("handle_tcp_child: wr. timeout on CONN_RELEASE for %p "
  2606. "refcnt= %d\n", tcpconn,
  2607. atomic_get(&tcpconn->refcnt));
  2608. /* timeout */
  2609. if (unlikely(tcpconn->state==S_CONN_CONNECT)){
  2610. #ifdef USE_DST_BLACKLIST
  2611. if (cfg_get(core, core_cfg, use_dst_blacklist))
  2612. dst_blacklist_su( BLST_ERR_CONNECT,
  2613. tcpconn->rcv.proto,
  2614. &tcpconn->rcv.src_su, 0);
  2615. #endif /* USE_DST_BLACKLIST */
  2616. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(tcpconn),
  2617. TCP_LPORT(tcpconn), TCP_PSU(tcpconn),
  2618. TCP_PROTO(tcpconn));
  2619. TCP_STATS_CONNECT_FAILED();
  2620. }else{
  2621. #ifdef USE_DST_BLACKLIST
  2622. if (cfg_get(core, core_cfg, use_dst_blacklist))
  2623. dst_blacklist_su( BLST_ERR_SEND,
  2624. tcpconn->rcv.proto,
  2625. &tcpconn->rcv.src_su, 0);
  2626. #endif /* USE_DST_BLACKLIST */
  2627. TCP_EV_SEND_TIMEOUT(0, &tcpconn->rcv);
  2628. TCP_STATS_SEND_TIMEOUT();
  2629. }
  2630. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2631. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2632. tcpconn->flags&=~F_CONN_WRITE_W;
  2633. }
  2634. if (tcpconn_try_unhash(tcpconn))
  2635. tcpconn_put_destroy(tcpconn);
  2636. break;
  2637. }else{
  2638. crt_timeout=MIN_unsigned(con_lifetime,
  2639. tcpconn->wbuf_q.wr_timeout-t);
  2640. }
  2641. }
  2642. #endif /* TCP_ASYNC */
  2643. /* re-activate the timer */
  2644. tcpconn->timer.f=tcpconn_main_timeout;
  2645. local_timer_reinit(&tcpconn->timer);
  2646. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, crt_timeout, t);
  2647. /* must be after the de-ref*/
  2648. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  2649. tcpconn->flags&=~(F_CONN_READER|F_CONN_OOB_DATA);
  2650. #ifdef TCP_ASYNC
  2651. if (unlikely(tcpconn->flags & F_CONN_WRITE_W))
  2652. n=io_watch_chg(&io_h, tcpconn->s, POLLIN| POLLOUT, -1);
  2653. else
  2654. #endif /* TCP_ASYNC */
  2655. n=io_watch_add(&io_h, tcpconn->s, POLLIN, F_TCPCONN, tcpconn);
  2656. if (unlikely(n<0)){
  2657. LOG(L_CRIT, "ERROR: tcp_main: handle_tcp_child: failed to add"
  2658. " new socket to the fd list\n");
  2659. tcpconn->flags&=~F_CONN_READ_W;
  2660. #ifdef TCP_ASYNC
  2661. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2662. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2663. tcpconn->flags&=~F_CONN_WRITE_W;
  2664. }
  2665. #endif /* TCP_ASYNC */
  2666. if (tcpconn_try_unhash(tcpconn))
  2667. tcpconn_put_destroy(tcpconn);
  2668. break;
  2669. }
  2670. DBG("handle_tcp_child: CONN_RELEASE %p refcnt= %d\n",
  2671. tcpconn, atomic_get(&tcpconn->refcnt));
  2672. break;
  2673. case CONN_ERROR:
  2674. case CONN_DESTROY:
  2675. case CONN_EOF:
  2676. /* WARNING: this will auto-dec. refcnt! */
  2677. tcp_c->busy--;
  2678. /* main doesn't listen on it => we don't have to delete it
  2679. if (tcpconn->s!=-1)
  2680. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2681. */
  2682. #ifdef TCP_ASYNC
  2683. if ((tcpconn->flags & F_CONN_WRITE_W) && (tcpconn->s!=-1)){
  2684. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2685. tcpconn->flags&=~F_CONN_WRITE_W;
  2686. }
  2687. #endif /* TCP_ASYNC */
  2688. if (tcpconn_try_unhash(tcpconn))
  2689. tcpconn_put(tcpconn);
  2690. tcpconn_put_destroy(tcpconn); /* deref & delete if refcnt==0 */
  2691. break;
  2692. default:
  2693. LOG(L_CRIT, "BUG: handle_tcp_child: unknown cmd %d"
  2694. " from tcp reader %d\n",
  2695. cmd, (int)(tcp_c-&tcp_children[0]));
  2696. }
  2697. end:
  2698. return bytes;
  2699. error:
  2700. return -1;
  2701. }
  2702. /* handles io from a "generic" ser process (get fd or new_fd from a tcp_send)
  2703. *
  2704. * params: p - pointer in the ser processes array (pt[]), to the entry for
  2705. * which an io event was detected
  2706. * fd_i - fd index in the fd_array (usefull for optimizing
  2707. * io_watch_deletes)
  2708. * returns: handle_* return convention:
  2709. * -1 on error reading from the fd,
  2710. * 0 on EAGAIN or when no more io events are queued
  2711. * (receive buffer empty),
  2712. * >0 on successfull reads from the fd (the receive buffer might
  2713. * be non-empty).
  2714. */
  2715. inline static int handle_ser_child(struct process_table* p, int fd_i)
  2716. {
  2717. struct tcp_connection* tcpconn;
  2718. long response[2];
  2719. int cmd;
  2720. int bytes;
  2721. int ret;
  2722. int fd;
  2723. int flags;
  2724. ticks_t t;
  2725. ticks_t con_lifetime;
  2726. #ifdef TCP_ASYNC
  2727. ticks_t nxt_timeout;
  2728. #endif /* TCP_ASYNC */
  2729. ret=-1;
  2730. if (unlikely(p->unix_sock<=0)){
  2731. /* (we can't have a fd==0, 0 is never closed )*/
  2732. LOG(L_CRIT, "BUG: handle_ser_child: fd %d for %d "
  2733. "(pid %d)\n", p->unix_sock, (int)(p-&pt[0]), p->pid);
  2734. goto error;
  2735. }
  2736. /* get all bytes and the fd (if transmitted)
  2737. * (this is a SOCK_STREAM so read is not atomic) */
  2738. bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd,
  2739. MSG_DONTWAIT);
  2740. if (unlikely(bytes<(int)sizeof(response))){
  2741. /* too few bytes read */
  2742. if (bytes==0){
  2743. /* EOF -> bad, child has died */
  2744. DBG("DBG: handle_ser_child: dead child %d, pid %d"
  2745. " (shutting down?)\n", (int)(p-&pt[0]), p->pid);
  2746. /* don't listen on it any more */
  2747. io_watch_del(&io_h, p->unix_sock, fd_i, 0);
  2748. goto error; /* child dead => no further io events from it */
  2749. }else if (bytes<0){
  2750. /* EAGAIN is ok if we try to empty the buffer
  2751. * e.g: SIGIO_RT overflow mode or EPOLL ET */
  2752. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2753. LOG(L_CRIT, "ERROR: handle_ser_child: read from child %d "
  2754. "(pid %d): %s [%d]\n", (int)(p-&pt[0]), p->pid,
  2755. strerror(errno), errno);
  2756. ret=-1;
  2757. }else{
  2758. ret=0;
  2759. }
  2760. /* try to ignore ? */
  2761. goto end;
  2762. }else{
  2763. /* should never happen */
  2764. LOG(L_CRIT, "BUG: handle_ser_child: too few bytes received (%d)\n",
  2765. bytes );
  2766. ret=0; /* something was read so there is no error; otoh if
  2767. receive_fd returned less then requested => the receive
  2768. buffer is empty => no more io queued on this fd */
  2769. goto end;
  2770. }
  2771. }
  2772. ret=1; /* something was received, there might be more queued */
  2773. DBG("handle_ser_child: read response= %lx, %ld, fd %d from %d (%d)\n",
  2774. response[0], response[1], fd, (int)(p-&pt[0]), p->pid);
  2775. cmd=response[1];
  2776. tcpconn=(struct tcp_connection*)response[0];
  2777. if (unlikely(tcpconn==0)){
  2778. LOG(L_CRIT, "BUG: handle_ser_child: null tcpconn pointer received"
  2779. " from child %d (pid %d): %lx, %lx\n",
  2780. (int)(p-&pt[0]), p->pid, response[0], response[1]) ;
  2781. goto end;
  2782. }
  2783. switch(cmd){
  2784. case CONN_ERROR:
  2785. LOG(L_ERR, "handle_ser_child: ERROR: received CON_ERROR for %p"
  2786. " (id %d), refcnt %d\n",
  2787. tcpconn, tcpconn->id, atomic_get(&tcpconn->refcnt));
  2788. #ifdef TCP_CONNECT_WAIT
  2789. /* if the connection is pending => it might be on the way of
  2790. * reaching tcp_main (e.g. CONN_NEW_COMPLETE or
  2791. * CONN_NEW_PENDING_WRITE) => it cannot be destroyed here */
  2792. if ( !(tcpconn->flags & F_CONN_PENDING) &&
  2793. tcpconn_try_unhash(tcpconn) )
  2794. tcpconn_put(tcpconn);
  2795. #else /* ! TCP_CONNECT_WAIT */
  2796. if ( tcpconn_try_unhash(tcpconn) )
  2797. tcpconn_put(tcpconn);
  2798. #endif /* TCP_CONNECT_WAIT */
  2799. if ( ((tcpconn->flags & (F_CONN_WRITE_W|F_CONN_READ_W)) ) &&
  2800. (tcpconn->s!=-1)){
  2801. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2802. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2803. }
  2804. tcpconn_put_destroy(tcpconn); /* dec refcnt & destroy on 0 */
  2805. break;
  2806. case CONN_GET_FD:
  2807. /* send the requested FD */
  2808. /* WARNING: take care of setting refcnt properly to
  2809. * avoid race conditions */
  2810. if (unlikely(send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn),
  2811. tcpconn->s)<=0)){
  2812. LOG(L_ERR, "ERROR: handle_ser_child: send_fd failed\n");
  2813. }
  2814. break;
  2815. case CONN_NEW:
  2816. /* update the fd in the requested tcpconn*/
  2817. /* WARNING: take care of setting refcnt properly to
  2818. * avoid race conditions */
  2819. if (unlikely(fd==-1)){
  2820. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW:"
  2821. " no fd received\n");
  2822. tcpconn->flags|=F_CONN_FD_CLOSED;
  2823. tcpconn_put_destroy(tcpconn);
  2824. break;
  2825. }
  2826. (*tcp_connections_no)++;
  2827. tcpconn->s=fd;
  2828. /* add tcpconn to the list*/
  2829. tcpconn_add(tcpconn);
  2830. /* update the timeout*/
  2831. t=get_ticks_raw();
  2832. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2833. tcpconn->timeout=t+con_lifetime;
  2834. /* activate the timer (already properly init. in tcpconn_new())
  2835. * no need for reinit */
  2836. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2837. con_lifetime, t);
  2838. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD)
  2839. #ifdef TCP_ASYNC
  2840. /* not used for now, the connection is sent to tcp_main
  2841. * before knowing whether we can write on it or we should
  2842. * wait */
  2843. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1)&
  2844. F_CONN_WRITE_W)
  2845. #endif /* TCP_ASYNC */
  2846. ;
  2847. tcpconn->flags&=~F_CONN_FD_CLOSED;
  2848. flags=POLLIN
  2849. #ifdef TCP_ASYNC
  2850. /* not used for now, the connection is sent to tcp_main
  2851. * before knowing if we can write on it or we should
  2852. * wait */
  2853. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1) & POLLOUT)
  2854. #endif /* TCP_ASYNC */
  2855. ;
  2856. if (unlikely(
  2857. io_watch_add(&io_h, tcpconn->s, flags,
  2858. F_TCPCONN, tcpconn)<0)){
  2859. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2860. " new socket to the fd list\n");
  2861. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2862. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  2863. tcpconn_put_destroy(tcpconn);
  2864. }
  2865. break;
  2866. #ifdef TCP_ASYNC
  2867. case CONN_QUEUED_WRITE:
  2868. /* received only if the wr. queue is empty and a write finishes
  2869. * with EAGAIN (common after connect())
  2870. * it should only enable write watching on the fd. The connection
  2871. * should be already in the hash. The refcnt is not changed.
  2872. */
  2873. if (unlikely((tcpconn->state==S_CONN_BAD) ||
  2874. !(tcpconn->flags & F_CONN_HASHED) ))
  2875. break;
  2876. if (!(tcpconn->flags & F_CONN_WANTS_WR)){
  2877. tcpconn->flags|=F_CONN_WANTS_WR;
  2878. t=get_ticks_raw();
  2879. if (likely((tcpconn->flags & F_CONN_MAIN_TIMER) &&
  2880. (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)) &&
  2881. TICKS_LT(t, tcpconn->wbuf_q.wr_timeout) )){
  2882. /* _wbufq_nonempty() is guaranteed here */
  2883. /* update the timer */
  2884. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2885. local_timer_reinit(&tcpconn->timer);
  2886. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2887. tcpconn->wbuf_q.wr_timeout-t, t);
  2888. DBG("tcp_main: handle_ser_child: CONN_QUEUED_WRITE; %p "
  2889. "timeout adjusted to %d s\n", tcpconn,
  2890. TICKS_TO_S(tcpconn->wbuf_q.wr_timeout-t));
  2891. }
  2892. if (!(tcpconn->flags & F_CONN_WRITE_W)){
  2893. tcpconn->flags|=F_CONN_WRITE_W;
  2894. if (!(tcpconn->flags & F_CONN_READ_W)){
  2895. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLOUT,
  2896. F_TCPCONN, tcpconn)<0)){
  2897. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2898. " failed to enable write watch on"
  2899. " socket\n");
  2900. if (tcpconn_try_unhash(tcpconn))
  2901. tcpconn_put_destroy(tcpconn);
  2902. break;
  2903. }
  2904. }else{
  2905. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  2906. POLLIN|POLLOUT, -1)<0)){
  2907. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2908. " failed to change socket watch events\n");
  2909. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2910. tcpconn->flags&=~F_CONN_READ_W;
  2911. if (tcpconn_try_unhash(tcpconn))
  2912. tcpconn_put_destroy(tcpconn);
  2913. break;
  2914. }
  2915. }
  2916. }
  2917. }else{
  2918. LOG(L_WARN, "tcp_main: hanlder_ser_child: connection %p"
  2919. " already watched for write\n", tcpconn);
  2920. }
  2921. break;
  2922. #ifdef TCP_CONNECT_WAIT
  2923. case CONN_NEW_COMPLETE:
  2924. case CONN_NEW_PENDING_WRITE:
  2925. /* received when a pending connect completes in the same
  2926. * tcp_send() that initiated it
  2927. * the connection is already in the hash with F_CONN_PENDING
  2928. * flag (added by tcp_send()) and refcnt at least 1 (for the
  2929. * hash)*/
  2930. tcpconn->flags&=~(F_CONN_PENDING|F_CONN_FD_CLOSED);
  2931. if (unlikely((tcpconn->state==S_CONN_BAD) || (fd==-1))){
  2932. if (unlikely(fd==-1))
  2933. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW_COMPLETE:"
  2934. " no fd received\n");
  2935. else
  2936. LOG(L_WARN, "WARNING: handle_ser_child: CONN_NEW_COMPLETE:"
  2937. " received connection with error\n");
  2938. tcpconn->flags|=F_CONN_FD_CLOSED;
  2939. tcpconn->state=S_CONN_BAD;
  2940. tcpconn_try_unhash(tcpconn);
  2941. tcpconn_put_destroy(tcpconn);
  2942. break;
  2943. }
  2944. (*tcp_connections_no)++;
  2945. tcpconn->s=fd;
  2946. /* update the timeout*/
  2947. t=get_ticks_raw();
  2948. con_lifetime=cfg_get(tcp, tcp_cfg, con_lifetime);
  2949. tcpconn->timeout=t+con_lifetime;
  2950. nxt_timeout=con_lifetime;
  2951. if (unlikely(cmd==CONN_NEW_COMPLETE)){
  2952. /* check if needs to be watched for write */
  2953. lock_get(&tcpconn->write_lock);
  2954. /* if queue non empty watch it for write */
  2955. flags=(_wbufq_empty(tcpconn)-1)&POLLOUT;
  2956. lock_release(&tcpconn->write_lock);
  2957. if (flags){
  2958. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  2959. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  2960. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  2961. tcpconn->flags|=F_CONN_WRITE_W|F_CONN_WANTS_WR;
  2962. }
  2963. /* activate the timer (already properly init. in
  2964. tcpconn_new()) no need for reinit */
  2965. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  2966. t);
  2967. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  2968. F_CONN_WANTS_RD;
  2969. }else{
  2970. /* CONN_NEW_PENDING_WRITE */
  2971. /* no need to check, we have something queued for write */
  2972. flags=POLLOUT;
  2973. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  2974. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  2975. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  2976. /* activate the timer (already properly init. in
  2977. tcpconn_new()) no need for reinit */
  2978. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  2979. t);
  2980. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  2981. F_CONN_WANTS_RD |
  2982. F_CONN_WRITE_W|F_CONN_WANTS_WR;
  2983. }
  2984. flags|=POLLIN;
  2985. if (unlikely(
  2986. io_watch_add(&io_h, tcpconn->s, flags,
  2987. F_TCPCONN, tcpconn)<0)){
  2988. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2989. " new socket to the fd list\n");
  2990. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2991. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  2992. tcpconn_put_destroy(tcpconn);
  2993. }
  2994. break;
  2995. #endif /* TCP_CONNECT_WAIT */
  2996. #endif /* TCP_ASYNC */
  2997. default:
  2998. LOG(L_CRIT, "BUG: handle_ser_child: unknown cmd %d\n", cmd);
  2999. }
  3000. end:
  3001. return ret;
  3002. error:
  3003. return -1;
  3004. }
  3005. /* sends a tcpconn + fd to a choosen child */
  3006. inline static int send2child(struct tcp_connection* tcpconn)
  3007. {
  3008. int i;
  3009. int min_busy;
  3010. int idx;
  3011. static int crt=0; /* current child */
  3012. int last;
  3013. min_busy=tcp_children[0].busy;
  3014. idx=0;
  3015. last=crt+tcp_children_no;
  3016. for (; crt<last; crt++){
  3017. i=crt%tcp_children_no;
  3018. if (!tcp_children[i].busy){
  3019. idx=i;
  3020. min_busy=0;
  3021. break;
  3022. }else if (min_busy>tcp_children[i].busy){
  3023. min_busy=tcp_children[i].busy;
  3024. idx=i;
  3025. }
  3026. }
  3027. crt=idx+1; /* next time we start with crt%tcp_children_no */
  3028. tcp_children[idx].busy++;
  3029. tcp_children[idx].n_reqs++;
  3030. if (unlikely(min_busy)){
  3031. DBG("WARNING: send2child: no free tcp receiver, "
  3032. " connection passed to the least busy one (%d)\n",
  3033. min_busy);
  3034. }
  3035. DBG("send2child: to tcp child %d %d(%d), %p\n", idx,
  3036. tcp_children[idx].proc_no,
  3037. tcp_children[idx].pid, tcpconn);
  3038. /* first make sure this child doesn't have pending request for
  3039. * tcp_main (to avoid a possible deadlock: e.g. child wants to
  3040. * send a release command, but the master fills its socket buffer
  3041. * with new connection commands => deadlock) */
  3042. /* answer tcp_send requests first */
  3043. while(handle_ser_child(&pt[tcp_children[idx].proc_no], -1)>0);
  3044. /* process tcp readers requests */
  3045. while(handle_tcp_child(&tcp_children[idx], -1)>0);
  3046. #ifdef SEND_FD_QUEUE
  3047. /* if queue full, try to queue the io */
  3048. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  3049. sizeof(tcpconn), tcpconn->s)<=0)){
  3050. if ((errno==EAGAIN)||(errno==EWOULDBLOCK)){
  3051. /* FIXME: remove after debugging */
  3052. LOG(L_CRIT, "INFO: tcp child %d, socket %d: queue full,"
  3053. " %d requests queued (total handled %d)\n",
  3054. idx, tcp_children[idx].unix_sock, min_busy,
  3055. tcp_children[idx].n_reqs-1);
  3056. if (send_fd_queue_add(&send2child_q, tcp_children[idx].unix_sock,
  3057. tcpconn)!=0){
  3058. LOG(L_ERR, "ERROR: send2child: queue send op. failed\n");
  3059. return -1;
  3060. }
  3061. }else{
  3062. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  3063. return -1;
  3064. }
  3065. }
  3066. #else
  3067. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  3068. sizeof(tcpconn), tcpconn->s)<=0)){
  3069. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  3070. return -1;
  3071. }
  3072. #endif
  3073. return 0;
  3074. }
  3075. /* handles a new connection, called internally by tcp_main_loop/handle_io.
  3076. * params: si - pointer to one of the tcp socket_info structures on which
  3077. * an io event was detected (connection attempt)
  3078. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  3079. * io events queued), >0 on success. success/error refer only to
  3080. * the accept.
  3081. */
  3082. static inline int handle_new_connect(struct socket_info* si)
  3083. {
  3084. union sockaddr_union su;
  3085. union sockaddr_union sock_name;
  3086. unsigned sock_name_len;
  3087. union sockaddr_union* dst_su;
  3088. struct tcp_connection* tcpconn;
  3089. socklen_t su_len;
  3090. int new_sock;
  3091. /* got a connection on r */
  3092. su_len=sizeof(su);
  3093. new_sock=accept(si->socket, &(su.s), &su_len);
  3094. if (unlikely(new_sock==-1)){
  3095. if ((errno==EAGAIN)||(errno==EWOULDBLOCK))
  3096. return 0;
  3097. LOG(L_ERR, "WARNING: handle_new_connect: error while accepting"
  3098. " connection(%d): %s\n", errno, strerror(errno));
  3099. return -1;
  3100. }
  3101. if (unlikely(*tcp_connections_no>=cfg_get(tcp, tcp_cfg, max_connections))){
  3102. LOG(L_ERR, "ERROR: maximum number of connections exceeded: %d/%d\n",
  3103. *tcp_connections_no,
  3104. cfg_get(tcp, tcp_cfg, max_connections));
  3105. close(new_sock);
  3106. TCP_STATS_LOCAL_REJECT();
  3107. return 1; /* success, because the accept was succesfull */
  3108. }
  3109. if (unlikely(init_sock_opt_accept(new_sock)<0)){
  3110. LOG(L_ERR, "ERROR: handle_new_connect: init_sock_opt failed\n");
  3111. close(new_sock);
  3112. return 1; /* success, because the accept was succesfull */
  3113. }
  3114. (*tcp_connections_no)++;
  3115. TCP_STATS_ESTABLISHED(S_CONN_ACCEPT);
  3116. dst_su=&si->su;
  3117. if (unlikely(si->flags & SI_IS_ANY)){
  3118. /* INADDR_ANY => get local dst */
  3119. sock_name_len=sizeof(sock_name);
  3120. if (getsockname(new_sock, &sock_name.s, &sock_name_len)!=0){
  3121. LOG(L_ERR, "ERROR: handle_new_connect:"
  3122. " getsockname failed: %s(%d)\n",
  3123. strerror(errno), errno);
  3124. /* go on with the 0.0.0.0 dst from the sock_info */
  3125. }else{
  3126. dst_su=&sock_name;
  3127. }
  3128. }
  3129. /* add socket to list */
  3130. tcpconn=tcpconn_new(new_sock, &su, dst_su, si, si->proto, S_CONN_ACCEPT);
  3131. if (likely(tcpconn)){
  3132. #ifdef TCP_PASS_NEW_CONNECTION_ON_DATA
  3133. atomic_set(&tcpconn->refcnt, 1); /* safe, not yet available to the
  3134. outside world */
  3135. tcpconn_add(tcpconn);
  3136. /* activate the timer */
  3137. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  3138. cfg_get(tcp, tcp_cfg, con_lifetime),
  3139. get_ticks_raw());
  3140. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  3141. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLIN,
  3142. F_TCPCONN, tcpconn)<0)){
  3143. LOG(L_CRIT, "ERROR: tcp_main: handle_new_connect: failed to add"
  3144. " new socket to the fd list\n");
  3145. tcpconn->flags&=~F_CONN_READ_W;
  3146. if (tcpconn_try_unhash(tcpconn))
  3147. tcpconn_put_destroy(tcpconn);
  3148. }
  3149. #else
  3150. atomic_set(&tcpconn->refcnt, 2); /* safe, not yet available to the
  3151. outside world */
  3152. /* prepare it for passing to a child */
  3153. tcpconn->flags|=F_CONN_READER;
  3154. tcpconn_add(tcpconn);
  3155. DBG("handle_new_connect: new connection from %s: %p %d flags: %04x\n",
  3156. su2a(&su, sizeof(su)), tcpconn, tcpconn->s, tcpconn->flags);
  3157. if(unlikely(send2child(tcpconn)<0)){
  3158. LOG(L_ERR,"ERROR: handle_new_connect: no children "
  3159. "available\n");
  3160. tcpconn->flags&=~F_CONN_READER;
  3161. tcpconn_put(tcpconn);
  3162. tcpconn_try_unhash(tcpconn);
  3163. tcpconn_put_destroy(tcpconn);
  3164. }
  3165. #endif
  3166. }else{ /*tcpconn==0 */
  3167. LOG(L_ERR, "ERROR: handle_new_connect: tcpconn_new failed, "
  3168. "closing socket\n");
  3169. close(new_sock);
  3170. (*tcp_connections_no)--;
  3171. }
  3172. return 1; /* accept() was succesfull */
  3173. }
  3174. /* handles an io event on one of the watched tcp connections
  3175. *
  3176. * params: tcpconn - pointer to the tcp_connection for which we have an io ev.
  3177. * fd_i - index in the fd_array table (needed for delete)
  3178. * returns: handle_* return convention, but on success it always returns 0
  3179. * (because it's one-shot, after a succesful execution the fd is
  3180. * removed from tcp_main's watch fd list and passed to a child =>
  3181. * tcp_main is not interested in further io events that might be
  3182. * queued for this fd)
  3183. */
  3184. inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, short ev,
  3185. int fd_i)
  3186. {
  3187. #ifdef TCP_ASYNC
  3188. int empty_q;
  3189. int bytes;
  3190. #endif /* TCP_ASYNC */
  3191. /* is refcnt!=0 really necessary?
  3192. * No, in fact it's a bug: I can have the following situation: a send only
  3193. * tcp connection used by n processes simultaneously => refcnt = n. In
  3194. * the same time I can have a read event and this situation is perfectly
  3195. * valid. -- andrei
  3196. */
  3197. #if 0
  3198. if ((tcpconn->refcnt!=0)){
  3199. /* FIXME: might be valid for sigio_rt iff fd flags are not cleared
  3200. * (there is a short window in which it could generate a sig
  3201. * that would be catched by tcp_main) */
  3202. LOG(L_CRIT, "BUG: handle_tcpconn_ev: io event on referenced"
  3203. " tcpconn (%p), refcnt=%d, fd=%d\n",
  3204. tcpconn, tcpconn->refcnt, tcpconn->s);
  3205. return -1;
  3206. }
  3207. #endif
  3208. /* pass it to child, so remove it from the io watch list and the local
  3209. * timer */
  3210. #ifdef TCP_ASYNC
  3211. empty_q=0; /* warning fix */
  3212. if (unlikely((ev & (POLLOUT|POLLERR|POLLHUP)) &&
  3213. (tcpconn->flags & F_CONN_WRITE_W))){
  3214. if (unlikely((ev & (POLLERR|POLLHUP)) ||
  3215. (wbufq_run(tcpconn->s, tcpconn, &empty_q)<0))){
  3216. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3217. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(1) failed:"
  3218. " for %p, fd %d\n", tcpconn, tcpconn->s);
  3219. }
  3220. if ((tcpconn->flags & F_CONN_READ_W) && (ev & POLLIN)){
  3221. /* connection is watched for read and there is a read event
  3222. * (unfortunately if we have POLLIN here we don't know if
  3223. * there's really any data in the read buffer or the POLLIN
  3224. * was generated by the error or EOF => to avoid loosing
  3225. * data it's safer to either directly check the read buffer
  3226. * or try a read)*/
  3227. /* in most cases the read buffer will be empty, so in general
  3228. * is cheaper to check it here and then send the
  3229. * conn. to a a child only if needed (another syscall + at
  3230. * least 2 * syscalls in the reader + ...) */
  3231. if ((ioctl(tcpconn->s, FIONREAD, &bytes)>=0) && (bytes>0)){
  3232. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3233. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3234. tcpconn->flags|=F_CONN_FORCE_EOF|F_CONN_WR_ERROR;
  3235. goto send_to_child;
  3236. }
  3237. /* if bytes==0 or ioctl failed, destroy the connection now */
  3238. }
  3239. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3240. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3241. if (unlikely(!tcpconn_try_unhash(tcpconn))){
  3242. LOG(L_CRIT, "BUG: tcpconn_ev: unhashed connection %p\n",
  3243. tcpconn);
  3244. }
  3245. if (unlikely(ev & POLLERR)){
  3246. if (unlikely(tcpconn->state=S_CONN_CONNECT)){
  3247. #ifdef USE_DST_BLACKLIST
  3248. if (cfg_get(core, core_cfg, use_dst_blacklist))
  3249. dst_blacklist_su(BLST_ERR_CONNECT, tcpconn->rcv.proto,
  3250. &tcpconn->rcv.src_su, 0);
  3251. #endif /* USE_DST_BLACKLIST */
  3252. TCP_EV_CONNECT_ERR(0, TCP_LADDR(tcpconn),
  3253. TCP_LPORT(tcpconn), TCP_PSU(tcpconn),
  3254. TCP_PROTO(tcpconn));
  3255. TCP_STATS_CONNECT_FAILED();
  3256. }else{
  3257. #ifdef USE_DST_BLACKLIST
  3258. if (cfg_get(core, core_cfg, use_dst_blacklist))
  3259. dst_blacklist_su(BLST_ERR_SEND, tcpconn->rcv.proto,
  3260. &tcpconn->rcv.src_su, 0);
  3261. #endif /* USE_DST_BLACKLIST */
  3262. TCP_STATS_CON_RESET(); /* FIXME: it could != RST */
  3263. }
  3264. }
  3265. tcpconn_put_destroy(tcpconn);
  3266. goto error;
  3267. }
  3268. if (empty_q){
  3269. tcpconn->flags&=~F_CONN_WANTS_WR;
  3270. if (!(tcpconn->flags & F_CONN_READ_W)){
  3271. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3272. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(2)"
  3273. " failed:" " for %p, fd %d\n",
  3274. tcpconn, tcpconn->s);
  3275. goto error;
  3276. }
  3277. }else{
  3278. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  3279. POLLIN, fd_i)==-1)){
  3280. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(1)"
  3281. " failed:" " for %p, fd %d\n",
  3282. tcpconn, tcpconn->s);
  3283. goto error;
  3284. }
  3285. }
  3286. tcpconn->flags&=~F_CONN_WRITE_W;
  3287. }
  3288. ev&=~POLLOUT; /* clear POLLOUT */
  3289. }
  3290. if (likely(ev && (tcpconn->flags & F_CONN_READ_W))){
  3291. /* if still some other IO event (POLLIN|POLLHUP|POLLERR) and
  3292. * connection is still watched in tcp_main for reads, send it to a
  3293. * child and stop watching it for input (but continue watching for
  3294. * writes if needed): */
  3295. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  3296. if (unlikely(io_watch_chg(&io_h, tcpconn->s, POLLOUT, fd_i)==-1)){
  3297. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(2)"
  3298. " failed:" " for %p, fd %d\n",
  3299. tcpconn, tcpconn->s);
  3300. goto error;
  3301. }
  3302. }else
  3303. #else
  3304. {
  3305. #endif /* TCP_ASYNC */
  3306. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3307. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(3)"
  3308. " failed:" " for %p, fd %d\n",
  3309. tcpconn, tcpconn->s);
  3310. goto error;
  3311. }
  3312. #ifdef TCP_ASYNC
  3313. send_to_child:
  3314. #endif
  3315. DBG("tcp: DBG: sendig to child, events %x\n", ev);
  3316. #ifdef POLLRDHUP
  3317. tcpconn->flags|=((int)!(ev & (POLLRDHUP|POLLHUP|POLLERR)) -1) &
  3318. F_CONN_EOF_SEEN;
  3319. #else /* POLLRDHUP */
  3320. tcpconn->flags|=((int)!(ev & (POLLHUP|POLLERR)) -1) & F_CONN_EOF_SEEN;
  3321. #endif /* POLLRDHUP */
  3322. tcpconn->flags|= ((int)!(ev & POLLPRI) -1) & F_CONN_OOB_DATA;
  3323. tcpconn->flags|=F_CONN_READER;
  3324. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  3325. tcpconn->flags&=~(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  3326. tcpconn_ref(tcpconn); /* refcnt ++ */
  3327. if (unlikely(send2child(tcpconn)<0)){
  3328. LOG(L_ERR,"ERROR: handle_tcpconn_ev: no children available\n");
  3329. tcpconn->flags&=~F_CONN_READER;
  3330. #ifdef TCP_ASYNC
  3331. if (tcpconn->flags & F_CONN_WRITE_W){
  3332. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3333. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(4)"
  3334. " failed:" " for %p, fd %d\n",
  3335. tcpconn, tcpconn->s);
  3336. }
  3337. tcpconn->flags&=~F_CONN_WRITE_W;
  3338. }
  3339. #endif /* TCP_ASYNC */
  3340. tcpconn_put(tcpconn);
  3341. tcpconn_try_unhash(tcpconn);
  3342. tcpconn_put_destroy(tcpconn); /* because of the tcpconn_ref() */
  3343. }
  3344. }
  3345. return 0; /* we are not interested in possibly queued io events,
  3346. the fd was either passed to a child, closed, or for writes,
  3347. everything possible was already written */
  3348. error:
  3349. return -1;
  3350. }
  3351. /* generic handle io routine, it will call the appropiate
  3352. * handle_xxx() based on the fd_map type
  3353. *
  3354. * params: fm - pointer to a fd hash entry
  3355. * idx - index in the fd_array (or -1 if not known)
  3356. * return: -1 on error
  3357. * 0 on EAGAIN or when by some other way it is known that no more
  3358. * io events are queued on the fd (the receive buffer is empty).
  3359. * Usefull to detect when there are no more io events queued for
  3360. * sigio_rt, epoll_et, kqueue.
  3361. * >0 on successfull read from the fd (when there might be more io
  3362. * queued -- the receive buffer might still be non-empty)
  3363. */
  3364. inline static int handle_io(struct fd_map* fm, short ev, int idx)
  3365. {
  3366. int ret;
  3367. /* update the local config */
  3368. cfg_update();
  3369. switch(fm->type){
  3370. case F_SOCKINFO:
  3371. ret=handle_new_connect((struct socket_info*)fm->data);
  3372. break;
  3373. case F_TCPCONN:
  3374. ret=handle_tcpconn_ev((struct tcp_connection*)fm->data, ev, idx);
  3375. break;
  3376. case F_TCPCHILD:
  3377. ret=handle_tcp_child((struct tcp_child*)fm->data, idx);
  3378. break;
  3379. case F_PROC:
  3380. ret=handle_ser_child((struct process_table*)fm->data, idx);
  3381. break;
  3382. case F_NONE:
  3383. LOG(L_CRIT, "BUG: handle_io: empty fd map: %p {%d, %d, %p},"
  3384. " idx %d\n", fm, fm->fd, fm->type, fm->data, idx);
  3385. goto error;
  3386. default:
  3387. LOG(L_CRIT, "BUG: handle_io: uknown fd type %d\n", fm->type);
  3388. goto error;
  3389. }
  3390. return ret;
  3391. error:
  3392. return -1;
  3393. }
  3394. /* timer handler for tcpconnection handled by tcp_main */
  3395. static ticks_t tcpconn_main_timeout(ticks_t t, struct timer_ln* tl, void* data)
  3396. {
  3397. struct tcp_connection *c;
  3398. int fd;
  3399. int tcp_async;
  3400. c=(struct tcp_connection*)data;
  3401. /* or (struct tcp...*)(tl-offset(c->timer)) */
  3402. #ifdef TCP_ASYNC
  3403. DBG( "tcp_main: entering timer for %p (ticks=%d, timeout=%d (%d s), "
  3404. "wr_timeout=%d (%d s)), write queue: %d bytes\n",
  3405. c, t, c->timeout, TICKS_TO_S(c->timeout-t),
  3406. c->wbuf_q.wr_timeout, TICKS_TO_S(c->wbuf_q.wr_timeout-t),
  3407. c->wbuf_q.queued);
  3408. tcp_async=cfg_get(tcp, tcp_cfg, async);
  3409. if (likely(TICKS_LT(t, c->timeout) && ( !tcp_async | _wbufq_empty(c) |
  3410. TICKS_LT(t, c->wbuf_q.wr_timeout)) )){
  3411. if (unlikely(tcp_async && _wbufq_non_empty(c)))
  3412. return (ticks_t)MIN_unsigned(c->timeout-t, c->wbuf_q.wr_timeout-t);
  3413. else
  3414. return (ticks_t)(c->timeout - t);
  3415. }
  3416. /* if time out due to write, add it to the blacklist */
  3417. if (tcp_async && _wbufq_non_empty(c) && TICKS_GE(t, c->wbuf_q.wr_timeout)){
  3418. if (unlikely(c->state==S_CONN_CONNECT)){
  3419. #ifdef USE_DST_BLACKLIST
  3420. if (cfg_get(core, core_cfg, use_dst_blacklist))
  3421. dst_blacklist_su(BLST_ERR_CONNECT, c->rcv.proto,
  3422. &c->rcv.src_su, 0);
  3423. #endif /* USE_DST_BLACKLIST */
  3424. TCP_EV_CONNECT_TIMEOUT(0, TCP_LADDR(c), TCP_LPORT(c), TCP_PSU(c),
  3425. TCP_PROTO(c));
  3426. TCP_STATS_CONNECT_FAILED();
  3427. }else{
  3428. #ifdef USE_DST_BLACKLIST
  3429. if (cfg_get(core, core_cfg, use_dst_blacklist))
  3430. dst_blacklist_su(BLST_ERR_SEND, c->rcv.proto,
  3431. &c->rcv.src_su, 0);
  3432. #endif /* USE_DST_BLACKLIST */
  3433. TCP_EV_SEND_TIMEOUT(0, &c->rcv);
  3434. TCP_STATS_SEND_TIMEOUT();
  3435. }
  3436. }
  3437. #else /* ! TCP_ASYNC */
  3438. if (TICKS_LT(t, c->timeout)){
  3439. /* timeout extended, exit */
  3440. return (ticks_t)(c->timeout - t);
  3441. }
  3442. #endif /* TCP_ASYNC */
  3443. DBG("tcp_main: timeout for %p\n", c);
  3444. if (likely(c->flags & F_CONN_HASHED)){
  3445. c->flags&=~(F_CONN_HASHED|F_CONN_MAIN_TIMER);
  3446. c->state=S_CONN_BAD;
  3447. TCPCONN_LOCK;
  3448. _tcpconn_detach(c);
  3449. TCPCONN_UNLOCK;
  3450. }else{
  3451. c->flags&=~F_CONN_MAIN_TIMER;
  3452. LOG(L_CRIT, "BUG: tcp_main: timer: called with unhashed connection %p"
  3453. "\n", c);
  3454. tcpconn_ref(c); /* ugly hack to try to go on */
  3455. }
  3456. fd=c->s;
  3457. if (likely(fd>0)){
  3458. if (likely(c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3459. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3460. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3461. }
  3462. }
  3463. TCP_EV_IDLE_CONN_CLOSED(0, &c->rcv);
  3464. TCP_STATS_CON_TIMEOUT();
  3465. tcpconn_put_destroy(c);
  3466. return 0;
  3467. }
  3468. static inline void tcp_timer_run()
  3469. {
  3470. ticks_t ticks;
  3471. ticks=get_ticks_raw();
  3472. if (unlikely((ticks-tcp_main_prev_ticks)<TCPCONN_TIMEOUT_MIN_RUN)) return;
  3473. tcp_main_prev_ticks=ticks;
  3474. local_timer_run(&tcp_main_ltimer, ticks);
  3475. }
  3476. /* keep in sync with tcpconn_destroy, the "delete" part should be
  3477. * the same except for io_watch_del..
  3478. * Note: this function is called only on shutdown by the main ser process via
  3479. * cleanup(). However it's also safe to call it from the tcp_main process.
  3480. * => with the ser shutdown exception, it cannot execute in parallel
  3481. * with tcpconn_add() or tcpconn_destroy()*/
  3482. static inline void tcpconn_destroy_all()
  3483. {
  3484. struct tcp_connection *c, *next;
  3485. unsigned h;
  3486. int fd;
  3487. TCPCONN_LOCK;
  3488. for(h=0; h<TCP_ID_HASH_SIZE; h++){
  3489. c=tcpconn_id_hash[h];
  3490. while(c){
  3491. next=c->id_next;
  3492. if (is_tcp_main){
  3493. /* we cannot close or remove the fd if we are not in the
  3494. * tcp main proc.*/
  3495. if ((c->flags & F_CONN_MAIN_TIMER)){
  3496. local_timer_del(&tcp_main_ltimer, &c->timer);
  3497. c->flags&=~F_CONN_MAIN_TIMER;
  3498. } /* else still in some reader */
  3499. fd=c->s;
  3500. if (fd>0 && (c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3501. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3502. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3503. }
  3504. }else{
  3505. fd=-1;
  3506. }
  3507. #ifdef USE_TLS
  3508. if (fd>0 && c->type==PROTO_TLS)
  3509. tls_close(c, fd);
  3510. #endif
  3511. _tcpconn_rm(c);
  3512. if (fd>0) {
  3513. #ifdef TCP_FD_CACHE
  3514. if (likely(cfg_get(tcp, tcp_cfg, fd_cache)))
  3515. shutdown(fd, SHUT_RDWR);
  3516. #endif /* TCP_FD_CACHE */
  3517. close(fd);
  3518. }
  3519. (*tcp_connections_no)--;
  3520. c=next;
  3521. }
  3522. }
  3523. TCPCONN_UNLOCK;
  3524. }
  3525. /* tcp main loop */
  3526. void tcp_main_loop()
  3527. {
  3528. struct socket_info* si;
  3529. int r;
  3530. is_tcp_main=1; /* mark this process as tcp main */
  3531. tcp_main_max_fd_no=get_max_open_fds();
  3532. /* init send fd queues (here because we want mem. alloc only in the tcp
  3533. * process */
  3534. #ifdef SEND_FD_QUEUE
  3535. if (init_send_fd_queues()<0){
  3536. LOG(L_CRIT, "ERROR: init_tcp: could not init send fd queues\n");
  3537. goto error;
  3538. }
  3539. #endif
  3540. /* init io_wait (here because we want the memory allocated only in
  3541. * the tcp_main process) */
  3542. if (init_io_wait(&io_h, tcp_main_max_fd_no, tcp_poll_method)<0)
  3543. goto error;
  3544. /* init: start watching all the fds*/
  3545. /* init local timer */
  3546. tcp_main_prev_ticks=get_ticks_raw();
  3547. if (init_local_timer(&tcp_main_ltimer, get_ticks_raw())!=0){
  3548. LOG(L_ERR, "ERROR: init_tcp: failed to init local timer\n");
  3549. goto error;
  3550. }
  3551. #ifdef TCP_FD_CACHE
  3552. if (cfg_get(tcp, tcp_cfg, fd_cache)) tcp_fd_cache_init();
  3553. #endif /* TCP_FD_CACHE */
  3554. /* add all the sockets we listen on for connections */
  3555. for (si=tcp_listen; si; si=si->next){
  3556. if ((si->proto==PROTO_TCP) &&(si->socket!=-1)){
  3557. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3558. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3559. "listen socket to the fd list\n");
  3560. goto error;
  3561. }
  3562. }else{
  3563. LOG(L_CRIT, "BUG: tcp_main_loop: non tcp address in tcp_listen\n");
  3564. }
  3565. }
  3566. #ifdef USE_TLS
  3567. if (!tls_disable && tls_loaded()){
  3568. for (si=tls_listen; si; si=si->next){
  3569. if ((si->proto==PROTO_TLS) && (si->socket!=-1)){
  3570. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3571. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3572. "tls listen socket to the fd list\n");
  3573. goto error;
  3574. }
  3575. }else{
  3576. LOG(L_CRIT, "BUG: tcp_main_loop: non tls address"
  3577. " in tls_listen\n");
  3578. }
  3579. }
  3580. }
  3581. #endif
  3582. /* add all the unix sockets used for communcation with other ser processes
  3583. * (get fd, new connection a.s.o) */
  3584. for (r=1; r<process_no; r++){
  3585. if (pt[r].unix_sock>0) /* we can't have 0, we never close it!*/
  3586. if (io_watch_add(&io_h, pt[r].unix_sock, POLLIN,F_PROC, &pt[r])<0){
  3587. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3588. "process %d unix socket to the fd list\n", r);
  3589. goto error;
  3590. }
  3591. }
  3592. /* add all the unix sokets used for communication with the tcp childs */
  3593. for (r=0; r<tcp_children_no; r++){
  3594. if (tcp_children[r].unix_sock>0)/*we can't have 0, we never close it!*/
  3595. if (io_watch_add(&io_h, tcp_children[r].unix_sock, POLLIN,
  3596. F_TCPCHILD, &tcp_children[r]) <0){
  3597. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3598. "tcp child %d unix socket to the fd list\n", r);
  3599. goto error;
  3600. }
  3601. }
  3602. /* initialize the cfg framework */
  3603. if (cfg_child_init()) goto error;
  3604. /* main loop */
  3605. switch(io_h.poll_method){
  3606. case POLL_POLL:
  3607. while(1){
  3608. /* wait and process IO */
  3609. io_wait_loop_poll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3610. send_fd_queue_run(&send2child_q); /* then new io */
  3611. /* remove old connections */
  3612. tcp_timer_run();
  3613. }
  3614. break;
  3615. #ifdef HAVE_SELECT
  3616. case POLL_SELECT:
  3617. while(1){
  3618. io_wait_loop_select(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3619. send_fd_queue_run(&send2child_q); /* then new io */
  3620. tcp_timer_run();
  3621. }
  3622. break;
  3623. #endif
  3624. #ifdef HAVE_SIGIO_RT
  3625. case POLL_SIGIO_RT:
  3626. while(1){
  3627. io_wait_loop_sigio_rt(&io_h, TCP_MAIN_SELECT_TIMEOUT);
  3628. send_fd_queue_run(&send2child_q); /* then new io */
  3629. tcp_timer_run();
  3630. }
  3631. break;
  3632. #endif
  3633. #ifdef HAVE_EPOLL
  3634. case POLL_EPOLL_LT:
  3635. while(1){
  3636. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3637. send_fd_queue_run(&send2child_q); /* then new io */
  3638. tcp_timer_run();
  3639. }
  3640. break;
  3641. case POLL_EPOLL_ET:
  3642. while(1){
  3643. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 1);
  3644. send_fd_queue_run(&send2child_q); /* then new io */
  3645. tcp_timer_run();
  3646. }
  3647. break;
  3648. #endif
  3649. #ifdef HAVE_KQUEUE
  3650. case POLL_KQUEUE:
  3651. while(1){
  3652. io_wait_loop_kqueue(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3653. send_fd_queue_run(&send2child_q); /* then new io */
  3654. tcp_timer_run();
  3655. }
  3656. break;
  3657. #endif
  3658. #ifdef HAVE_DEVPOLL
  3659. case POLL_DEVPOLL:
  3660. while(1){
  3661. io_wait_loop_devpoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3662. send_fd_queue_run(&send2child_q); /* then new io */
  3663. tcp_timer_run();
  3664. }
  3665. break;
  3666. #endif
  3667. default:
  3668. LOG(L_CRIT, "BUG: tcp_main_loop: no support for poll method "
  3669. " %s (%d)\n",
  3670. poll_method_name(io_h.poll_method), io_h.poll_method);
  3671. goto error;
  3672. }
  3673. error:
  3674. #ifdef SEND_FD_QUEUE
  3675. destroy_send_fd_queues();
  3676. #endif
  3677. destroy_io_wait(&io_h);
  3678. LOG(L_CRIT, "ERROR: tcp_main_loop: exiting...");
  3679. exit(-1);
  3680. }
  3681. /* cleanup before exit */
  3682. void destroy_tcp()
  3683. {
  3684. if (tcpconn_id_hash){
  3685. if (tcpconn_lock)
  3686. TCPCONN_UNLOCK; /* hack: force-unlock the tcp lock in case
  3687. some process was terminated while holding
  3688. it; this will allow an almost gracious
  3689. shutdown */
  3690. tcpconn_destroy_all();
  3691. shm_free(tcpconn_id_hash);
  3692. tcpconn_id_hash=0;
  3693. }
  3694. DESTROY_TCP_STATS();
  3695. if (tcp_connections_no){
  3696. shm_free(tcp_connections_no);
  3697. tcp_connections_no=0;
  3698. }
  3699. #ifdef TCP_ASYNC
  3700. if (tcp_total_wq){
  3701. shm_free(tcp_total_wq);
  3702. tcp_total_wq=0;
  3703. }
  3704. #endif /* TCP_ASYNC */
  3705. if (connection_id){
  3706. shm_free(connection_id);
  3707. connection_id=0;
  3708. }
  3709. if (tcpconn_aliases_hash){
  3710. shm_free(tcpconn_aliases_hash);
  3711. tcpconn_aliases_hash=0;
  3712. }
  3713. if (tcpconn_lock){
  3714. lock_destroy(tcpconn_lock);
  3715. lock_dealloc((void*)tcpconn_lock);
  3716. tcpconn_lock=0;
  3717. }
  3718. if (tcp_children){
  3719. pkg_free(tcp_children);
  3720. tcp_children=0;
  3721. }
  3722. destroy_local_timer(&tcp_main_ltimer);
  3723. }
  3724. int init_tcp()
  3725. {
  3726. char* poll_err;
  3727. tcp_options_check();
  3728. if (tcp_cfg==0){
  3729. BUG("tcp_cfg not initialized\n");
  3730. goto error;
  3731. }
  3732. /* init lock */
  3733. tcpconn_lock=lock_alloc();
  3734. if (tcpconn_lock==0){
  3735. LOG(L_CRIT, "ERROR: init_tcp: could not alloc lock\n");
  3736. goto error;
  3737. }
  3738. if (lock_init(tcpconn_lock)==0){
  3739. LOG(L_CRIT, "ERROR: init_tcp: could not init lock\n");
  3740. lock_dealloc((void*)tcpconn_lock);
  3741. tcpconn_lock=0;
  3742. goto error;
  3743. }
  3744. /* init globals */
  3745. tcp_connections_no=shm_malloc(sizeof(int));
  3746. if (tcp_connections_no==0){
  3747. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3748. goto error;
  3749. }
  3750. *tcp_connections_no=0;
  3751. if (INIT_TCP_STATS()!=0) goto error;
  3752. connection_id=shm_malloc(sizeof(int));
  3753. if (connection_id==0){
  3754. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3755. goto error;
  3756. }
  3757. *connection_id=1;
  3758. #ifdef TCP_ASYNC
  3759. tcp_total_wq=shm_malloc(sizeof(*tcp_total_wq));
  3760. if (tcp_total_wq==0){
  3761. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3762. goto error;
  3763. }
  3764. #endif /* TCP_ASYNC */
  3765. /* alloc hashtables*/
  3766. tcpconn_aliases_hash=(struct tcp_conn_alias**)
  3767. shm_malloc(TCP_ALIAS_HASH_SIZE* sizeof(struct tcp_conn_alias*));
  3768. if (tcpconn_aliases_hash==0){
  3769. LOG(L_CRIT, "ERROR: init_tcp: could not alloc address hashtable\n");
  3770. goto error;
  3771. }
  3772. tcpconn_id_hash=(struct tcp_connection**)shm_malloc(TCP_ID_HASH_SIZE*
  3773. sizeof(struct tcp_connection*));
  3774. if (tcpconn_id_hash==0){
  3775. LOG(L_CRIT, "ERROR: init_tcp: could not alloc id hashtable\n");
  3776. goto error;
  3777. }
  3778. /* init hashtables*/
  3779. memset((void*)tcpconn_aliases_hash, 0,
  3780. TCP_ALIAS_HASH_SIZE * sizeof(struct tcp_conn_alias*));
  3781. memset((void*)tcpconn_id_hash, 0,
  3782. TCP_ID_HASH_SIZE * sizeof(struct tcp_connection*));
  3783. /* fix config variables */
  3784. poll_err=check_poll_method(tcp_poll_method);
  3785. /* set an appropriate poll method */
  3786. if (poll_err || (tcp_poll_method==0)){
  3787. tcp_poll_method=choose_poll_method();
  3788. if (poll_err){
  3789. LOG(L_ERR, "ERROR: init_tcp: %s, using %s instead\n",
  3790. poll_err, poll_method_name(tcp_poll_method));
  3791. }else{
  3792. LOG(L_INFO, "init_tcp: using %s as the io watch method"
  3793. " (auto detected)\n", poll_method_name(tcp_poll_method));
  3794. }
  3795. }else{
  3796. LOG(L_INFO, "init_tcp: using %s io watch method (config)\n",
  3797. poll_method_name(tcp_poll_method));
  3798. }
  3799. return 0;
  3800. error:
  3801. /* clean-up */
  3802. destroy_tcp();
  3803. return -1;
  3804. }
  3805. #ifdef TCP_CHILD_NON_BLOCKING
  3806. /* returns -1 on error */
  3807. static int set_non_blocking(int s)
  3808. {
  3809. int flags;
  3810. /* non-blocking */
  3811. flags=fcntl(s, F_GETFL);
  3812. if (flags==-1){
  3813. LOG(L_ERR, "ERROR: set_non_blocking: fnctl failed: (%d) %s\n",
  3814. errno, strerror(errno));
  3815. goto error;
  3816. }
  3817. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  3818. LOG(L_ERR, "ERROR: set_non_blocking: fcntl: set non-blocking failed:"
  3819. " (%d) %s\n", errno, strerror(errno));
  3820. goto error;
  3821. }
  3822. return 0;
  3823. error:
  3824. return -1;
  3825. }
  3826. #endif
  3827. /* returns -1 on error, 0 on success */
  3828. int tcp_fix_child_sockets(int* fd)
  3829. {
  3830. #ifdef TCP_CHILD_NON_BLOCKING
  3831. if ((set_non_blocking(fd[0])<0) ||
  3832. (set_non_blocking(fd[1])<0)){
  3833. return -1;
  3834. }
  3835. #endif
  3836. return 0;
  3837. }
  3838. /* starts the tcp processes */
  3839. int tcp_init_children()
  3840. {
  3841. int r;
  3842. int reader_fd_1; /* for comm. with the tcp children read */
  3843. pid_t pid;
  3844. struct socket_info *si;
  3845. /* estimate max fd. no:
  3846. * 1 tcp send unix socket/all_proc,
  3847. * + 1 udp sock/udp proc + 1 tcp_child sock/tcp child*
  3848. * + no_listen_tcp */
  3849. for(r=0, si=tcp_listen; si; si=si->next, r++);
  3850. #ifdef USE_TLS
  3851. if (! tls_disable)
  3852. for (si=tls_listen; si; si=si->next, r++);
  3853. #endif
  3854. register_fds(r+tcp_max_connections+get_max_procs()-1 /* tcp main */);
  3855. #if 0
  3856. tcp_max_fd_no=get_max_procs()*2 +r-1 /* timer */ +3; /* stdin/out/err*/
  3857. /* max connections can be temporarily exceeded with estimated_process_count
  3858. * - tcp_main (tcpconn_connect called simultaneously in all all the
  3859. * processes) */
  3860. tcp_max_fd_no+=tcp_max_connections+get_max_procs()-1 /* tcp main */;
  3861. #endif
  3862. /* alloc the children array */
  3863. tcp_children=pkg_malloc(sizeof(struct tcp_child)*tcp_children_no);
  3864. if (tcp_children==0){
  3865. LOG(L_ERR, "ERROR: tcp_init_children: out of memory\n");
  3866. goto error;
  3867. }
  3868. /* create the tcp sock_info structures */
  3869. /* copy the sockets --moved to main_loop*/
  3870. /* fork children & create the socket pairs*/
  3871. for(r=0; r<tcp_children_no; r++){
  3872. child_rank++;
  3873. pid=fork_tcp_process(child_rank, "tcp receiver", r, &reader_fd_1);
  3874. if (pid<0){
  3875. LOG(L_ERR, "ERROR: tcp_main: fork failed: %s\n",
  3876. strerror(errno));
  3877. goto error;
  3878. }else if (pid>0){
  3879. /* parent */
  3880. }else{
  3881. /* child */
  3882. bind_address=0; /* force a SEGFAULT if someone uses a non-init.
  3883. bind address on tcp */
  3884. tcp_receive_loop(reader_fd_1);
  3885. }
  3886. }
  3887. return 0;
  3888. error:
  3889. return -1;
  3890. }
  3891. void tcp_get_info(struct tcp_gen_info *ti)
  3892. {
  3893. ti->tcp_readers=tcp_children_no;
  3894. ti->tcp_max_connections=tcp_max_connections;
  3895. ti->tcp_connections_no=*tcp_connections_no;
  3896. #ifdef TCP_ASYNC
  3897. ti->tcp_write_queued=*tcp_total_wq;
  3898. #else
  3899. ti->tcp_write_queued=0;
  3900. #endif /* TCP_ASYNC */
  3901. }
  3902. #endif