tcp_main.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000
  1. /*
  2. * $Id$
  3. *
  4. * Copyright (C) 2001-2003 FhG Fokus
  5. *
  6. * This file is part of ser, a free SIP server.
  7. *
  8. * ser is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version
  12. *
  13. * For a license to use the ser software under conditions
  14. * other than those described here, or to purchase support for this
  15. * software, please contact iptel.org by e-mail at the following addresses:
  16. * [email protected]
  17. *
  18. * ser is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. /*
  28. * History:
  29. * --------
  30. * 2002-11-29 created by andrei
  31. * 2002-12-11 added tcp_send (andrei)
  32. * 2003-01-20 locking fixes, hashtables (andrei)
  33. * 2003-02-20 s/lock_t/gen_lock_t/ to avoid a conflict on solaris (andrei)
  34. * 2003-02-25 Nagle is disabled if -DDISABLE_NAGLE (andrei)
  35. * 2003-03-29 SO_REUSEADDR before calling bind to allow
  36. * server restart, Nagle set on the (hopefuly)
  37. * correct socket (jiri)
  38. * 2003-03-31 always try to find the corresponding tcp listen socket for
  39. * a temp. socket and store in in *->bind_address: added
  40. * find_tcp_si, modified tcpconn_connect (andrei)
  41. * 2003-04-14 set sockopts to TOS low delay (andrei)
  42. * 2003-06-30 moved tcp new connect checking & handling to
  43. * handle_new_connect (andrei)
  44. * 2003-07-09 tls_close called before closing the tcp connection (andrei)
  45. * 2003-10-24 converted to the new socket_info lists (andrei)
  46. * 2003-10-27 tcp port aliases support added (andrei)
  47. * 2003-11-04 always lock before manipulating refcnt; sendchild
  48. * does not inc refcnt by itself anymore (andrei)
  49. * 2003-11-07 different unix sockets are used for fd passing
  50. * to/from readers/writers (andrei)
  51. * 2003-11-17 handle_new_connect & tcp_connect will close the
  52. * new socket if tcpconn_new return 0 (e.g. out of mem) (andrei)
  53. * 2003-11-28 tcp_blocking_write & tcp_blocking_connect added (andrei)
  54. * 2004-11-08 dropped find_tcp_si and replaced with find_si (andrei)
  55. * 2005-06-07 new tcp optimized code, supports epoll (LT), sigio + real time
  56. * signals, poll & select (andrei)
  57. * 2005-06-26 *bsd kqueue support (andrei)
  58. * 2005-07-04 solaris /dev/poll support (andrei)
  59. * 2005-07-08 tcp_max_connections, tcp_connection_lifetime, don't accept
  60. * more connections if tcp_max_connections is exceeded (andrei)
  61. * 2005-10-21 cleanup all the open connections on exit
  62. * decrement the no. of open connections on timeout too (andrei) * 2006-01-30 queue send_fd request and execute them at the end of the
  63. * poll loop (#ifdef) (andrei)
  64. * process all children requests, before attempting to send
  65. * them new stuff (fixes some deadlocks) (andrei)
  66. * 2006-02-03 timers are run only once per s (andrei)
  67. * tcp children fds can be non-blocking; send fds are queued on
  68. * EAGAIN; lots of bug fixes (andrei)
  69. * 2006-02-06 better tcp_max_connections checks, tcp_connections_no moved to
  70. * shm (andrei)
  71. * 2006-04-12 tcp_send() changed to use struct dest_info (andrei)
  72. * 2006-11-02 switched to atomic ops for refcnt, locking improvements
  73. * (andrei)
  74. * 2006-11-04 switched to raw ticks (to fix conversion errors which could
  75. * result in inf. lifetime) (andrei)
  76. * 2007-07-25 tcpconn_connect can now bind the socket on a specified
  77. * source addr/port (andrei)
  78. * 2007-07-26 tcp_send() and tcpconn_get() can now use a specified source
  79. * addr./port (andrei)
  80. * 2007-08-23 getsockname() for INADDR_ANY(SI_IS_ANY) sockets (andrei)
  81. * 2007-08-27 split init_sock_opt into a lightweight init_sock_opt_accept()
  82. * used when accepting connections and init_sock_opt used for
  83. * connect/ new sockets (andrei)
  84. * 2007-11-22 always add the connection & clear the coresponding flags before
  85. * io_watch_add-ing its fd - it's safer this way (andrei)
  86. * 2007-11-26 improved tcp timers: switched to local_timer (andrei)
  87. * 2007-11-27 added send fd cache and reader fd reuse (andrei)
  88. * 2007-11-28 added support for TCP_DEFER_ACCEPT, KEEPALIVE, KEEPINTVL,
  89. * KEEPCNT, QUICKACK, SYNCNT, LINGER2 (andrei)
  90. * 2007-12-04 support for queueing write requests (andrei)
  91. * 2007-12-12 destroy connection asap on wbuf. timeout (andrei)
  92. * 2007-12-13 changed the refcnt and destroy scheme, now refcnt is 1 if
  93. * linked into the hash tables (was 0) (andrei)
  94. * 2007-12-21 support for pending connects (connections are added to the
  95. * hash immediately and writes on them are buffered) (andrei)
  96. * 2008-02-05 handle POLLRDHUP (if supported), POLLERR and
  97. * POLLHUP (andrei)
  98. * on write error check if there's still data in the socket
  99. * read buffer and process it first (andrei)
  100. * 2009-02-26 direct blacklist support (andrei)
  101. */
  102. #ifdef USE_TCP
  103. #ifndef SHM_MEM
  104. #error "shared memory support needed (add -DSHM_MEM to Makefile.defs)"
  105. #endif
  106. #define HANDLE_IO_INLINE
  107. #include "io_wait.h" /* include first to make sure the needed features are
  108. turned on (e.g. _GNU_SOURCE for POLLRDHUP) */
  109. #include <sys/time.h>
  110. #include <sys/types.h>
  111. #include <sys/select.h>
  112. #include <sys/socket.h>
  113. #ifdef HAVE_FILIO_H
  114. #include <sys/filio.h> /* needed on solaris 2.x for FIONREAD */
  115. #elif defined __OS_solaris
  116. #define BSD_COMP /* needed on older solaris for FIONREAD */
  117. #endif /* HAVE_FILIO_H / __OS_solaris */
  118. #include <sys/ioctl.h> /* ioctl() used on write error */
  119. #include <netinet/in.h>
  120. #include <netinet/in_systm.h>
  121. #include <netinet/ip.h>
  122. #include <netinet/tcp.h>
  123. #include <sys/uio.h> /* writev*/
  124. #include <netdb.h>
  125. #include <stdlib.h> /*exit() */
  126. #include <unistd.h>
  127. #include <errno.h>
  128. #include <string.h>
  129. #ifdef HAVE_SELECT
  130. #include <sys/select.h>
  131. #endif
  132. #include <sys/poll.h>
  133. #include "ip_addr.h"
  134. #include "pass_fd.h"
  135. #include "tcp_conn.h"
  136. #include "globals.h"
  137. #include "pt.h"
  138. #include "locking.h"
  139. #include "mem/mem.h"
  140. #include "mem/shm_mem.h"
  141. #include "timer.h"
  142. #include "sr_module.h"
  143. #include "tcp_server.h"
  144. #include "tcp_init.h"
  145. #include "tsend.h"
  146. #include "timer_ticks.h"
  147. #include "local_timer.h"
  148. #ifdef CORE_TLS
  149. #include "tls/tls_server.h"
  150. #define tls_loaded() 1
  151. #else
  152. #include "tls_hooks_init.h"
  153. #include "tls_hooks.h"
  154. #endif /* CORE_TLS*/
  155. #ifdef USE_DST_BLACKLIST
  156. #include "dst_blacklist.h"
  157. #endif /* USE_DST_BLACKLIST */
  158. #include "tcp_info.h"
  159. #include "tcp_options.h"
  160. #include "ut.h"
  161. #include "cfg/cfg_struct.h"
  162. #define local_malloc pkg_malloc
  163. #define local_free pkg_free
  164. #include <fcntl.h> /* must be included after io_wait.h if SIGIO_RT is used */
  165. #ifdef NO_MSG_DONTWAIT
  166. #ifndef MSG_DONTWAIT
  167. /* should work inside tcp_main */
  168. #define MSG_DONTWAIT 0
  169. #endif
  170. #endif /*NO_MSG_DONTWAIT */
  171. #define TCP_PASS_NEW_CONNECTION_ON_DATA /* don't pass a new connection
  172. immediately to a child, wait for
  173. some data on it first */
  174. #define TCP_LISTEN_BACKLOG 1024
  175. #define SEND_FD_QUEUE /* queue send fd requests on EAGAIN, instead of sending
  176. them immediately */
  177. #define TCP_CHILD_NON_BLOCKING
  178. #ifdef SEND_FD_QUEUE
  179. #ifndef TCP_CHILD_NON_BLOCKING
  180. #define TCP_CHILD_NON_BLOCKING
  181. #endif
  182. #define MAX_SEND_FD_QUEUE_SIZE tcp_main_max_fd_no
  183. #define SEND_FD_QUEUE_SIZE 128 /* initial size */
  184. #define MAX_SEND_FD_RETRIES 96 /* FIXME: not used for now */
  185. #define SEND_FD_QUEUE_TIMEOUT MS_TO_TICKS(2000) /* 2 s */
  186. #endif
  187. /* maximum accepted lifetime (maximum possible is ~ MAXINT/2) */
  188. #define MAX_TCP_CON_LIFETIME ((1U<<(sizeof(ticks_t)*8-1))-1)
  189. /* minimum interval local_timer_run() is allowed to run, in ticks */
  190. #define TCPCONN_TIMEOUT_MIN_RUN 1 /* once per tick */
  191. #define TCPCONN_WAIT_TIMEOUT 1 /* 1 tick */
  192. #ifdef TCP_BUF_WRITE
  193. #define TCP_WBUF_SIZE 1024 /* FIXME: after debugging switch to 16-32k */
  194. static unsigned int* tcp_total_wq=0;
  195. #endif
  196. enum fd_types { F_NONE, F_SOCKINFO /* a tcp_listen fd */,
  197. F_TCPCONN, F_TCPCHILD, F_PROC };
  198. #ifdef TCP_FD_CACHE
  199. #define TCP_FD_CACHE_SIZE 8
  200. struct fd_cache_entry{
  201. struct tcp_connection* con;
  202. int id;
  203. int fd;
  204. };
  205. static struct fd_cache_entry fd_cache[TCP_FD_CACHE_SIZE];
  206. #endif /* TCP_FD_CACHE */
  207. static int is_tcp_main=0;
  208. int tcp_accept_aliases=0; /* by default don't accept aliases */
  209. /* flags used for adding new aliases */
  210. int tcp_alias_flags=TCP_ALIAS_FORCE_ADD;
  211. /* flags used for adding the default aliases of a new tcp connection */
  212. int tcp_new_conn_alias_flags=TCP_ALIAS_REPLACE;
  213. int tcp_connect_timeout=DEFAULT_TCP_CONNECT_TIMEOUT;
  214. int tcp_send_timeout=DEFAULT_TCP_SEND_TIMEOUT;
  215. int tcp_con_lifetime=DEFAULT_TCP_CONNECTION_LIFETIME;
  216. enum poll_types tcp_poll_method=0; /* by default choose the best method */
  217. int tcp_max_connections=DEFAULT_TCP_MAX_CONNECTIONS;
  218. int tcp_main_max_fd_no=0;
  219. static union sockaddr_union tcp_source_ipv4_addr; /* saved bind/srv v4 addr. */
  220. static union sockaddr_union* tcp_source_ipv4=0;
  221. #ifdef USE_IPV6
  222. static union sockaddr_union tcp_source_ipv6_addr; /* saved bind/src v6 addr. */
  223. static union sockaddr_union* tcp_source_ipv6=0;
  224. #endif
  225. static int* tcp_connections_no=0; /* current open connections */
  226. /* connection hash table (after ip&port) , includes also aliases */
  227. struct tcp_conn_alias** tcpconn_aliases_hash=0;
  228. /* connection hash table (after connection id) */
  229. struct tcp_connection** tcpconn_id_hash=0;
  230. gen_lock_t* tcpconn_lock=0;
  231. struct tcp_child* tcp_children;
  232. static int* connection_id=0; /* unique for each connection, used for
  233. quickly finding the corresponding connection
  234. for a reply */
  235. int unix_tcp_sock;
  236. static int tcp_proto_no=-1; /* tcp protocol number as returned by
  237. getprotobyname */
  238. static io_wait_h io_h;
  239. static struct local_timer tcp_main_ltimer;
  240. static ticks_t tcp_main_prev_ticks;
  241. static ticks_t tcpconn_main_timeout(ticks_t , struct timer_ln* , void* );
  242. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  243. struct ip_addr* l_ip, int l_port,
  244. int flags);
  245. /* sets source address used when opening new sockets and no source is specified
  246. * (by default the address is choosen by the kernel)
  247. * Should be used only on init.
  248. * returns -1 on error */
  249. int tcp_set_src_addr(struct ip_addr* ip)
  250. {
  251. switch (ip->af){
  252. case AF_INET:
  253. ip_addr2su(&tcp_source_ipv4_addr, ip, 0);
  254. tcp_source_ipv4=&tcp_source_ipv4_addr;
  255. break;
  256. #ifdef USE_IPV6
  257. case AF_INET6:
  258. ip_addr2su(&tcp_source_ipv6_addr, ip, 0);
  259. tcp_source_ipv6=&tcp_source_ipv6_addr;
  260. break;
  261. #endif
  262. default:
  263. return -1;
  264. }
  265. return 0;
  266. }
  267. static inline int init_sock_keepalive(int s)
  268. {
  269. int optval;
  270. #ifdef HAVE_SO_KEEPALIVE
  271. if (tcp_options.keepalive){
  272. optval=1;
  273. if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, &optval,
  274. sizeof(optval))<0){
  275. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to enable"
  276. " SO_KEEPALIVE: %s\n", strerror(errno));
  277. return -1;
  278. }
  279. }
  280. #endif
  281. #ifdef HAVE_TCP_KEEPINTVL
  282. if (tcp_options.keepintvl){
  283. optval=tcp_options.keepintvl;
  284. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPINTVL, &optval,
  285. sizeof(optval))<0){
  286. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  287. " keepalive probes interval: %s\n", strerror(errno));
  288. }
  289. }
  290. #endif
  291. #ifdef HAVE_TCP_KEEPIDLE
  292. if (tcp_options.keepidle){
  293. optval=tcp_options.keepidle;
  294. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPIDLE, &optval,
  295. sizeof(optval))<0){
  296. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  297. " keepalive idle interval: %s\n", strerror(errno));
  298. }
  299. }
  300. #endif
  301. #ifdef HAVE_TCP_KEEPCNT
  302. if (tcp_options.keepcnt){
  303. optval=tcp_options.keepcnt;
  304. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPCNT, &optval,
  305. sizeof(optval))<0){
  306. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  307. " maximum keepalive count: %s\n", strerror(errno));
  308. }
  309. }
  310. #endif
  311. return 0;
  312. }
  313. /* set all socket/fd options for new sockets (e.g. before connect):
  314. * disable nagle, tos lowdelay, reuseaddr, non-blocking
  315. *
  316. * return -1 on error */
  317. static int init_sock_opt(int s)
  318. {
  319. int flags;
  320. int optval;
  321. #ifdef DISABLE_NAGLE
  322. flags=1;
  323. if ( (tcp_proto_no!=-1) && (setsockopt(s, tcp_proto_no , TCP_NODELAY,
  324. &flags, sizeof(flags))<0) ){
  325. LOG(L_WARN, "WARNING: init_sock_opt: could not disable Nagle: %s\n",
  326. strerror(errno));
  327. }
  328. #endif
  329. /* tos*/
  330. optval = tos;
  331. if (setsockopt(s, IPPROTO_IP, IP_TOS, (void*)&optval,sizeof(optval)) ==-1){
  332. LOG(L_WARN, "WARNING: init_sock_opt: setsockopt tos: %s\n",
  333. strerror(errno));
  334. /* continue since this is not critical */
  335. }
  336. #if !defined(TCP_DONT_REUSEADDR)
  337. optval=1;
  338. if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,
  339. (void*)&optval, sizeof(optval))==-1){
  340. LOG(L_ERR, "ERROR: setsockopt SO_REUSEADDR %s\n",
  341. strerror(errno));
  342. /* continue, not critical */
  343. }
  344. #endif /* !TCP_DONT_REUSEADDR */
  345. #ifdef HAVE_TCP_SYNCNT
  346. if (tcp_options.syncnt){
  347. optval=tcp_options.syncnt;
  348. if (setsockopt(s, IPPROTO_TCP, TCP_SYNCNT, &optval,
  349. sizeof(optval))<0){
  350. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  351. " maximum SYN retr. count: %s\n", strerror(errno));
  352. }
  353. }
  354. #endif
  355. #ifdef HAVE_TCP_LINGER2
  356. if (tcp_options.linger2){
  357. optval=tcp_options.linger2;
  358. if (setsockopt(s, IPPROTO_TCP, TCP_LINGER2, &optval,
  359. sizeof(optval))<0){
  360. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  361. " maximum LINGER2 timeout: %s\n", strerror(errno));
  362. }
  363. }
  364. #endif
  365. #ifdef HAVE_TCP_QUICKACK
  366. if (tcp_options.delayed_ack){
  367. optval=0; /* reset quick ack => delayed ack */
  368. if (setsockopt(s, IPPROTO_TCP, TCP_QUICKACK, &optval,
  369. sizeof(optval))<0){
  370. LOG(L_WARN, "WARNING: init_sock_opt: failed to reset"
  371. " TCP_QUICKACK: %s\n", strerror(errno));
  372. }
  373. }
  374. #endif /* HAVE_TCP_QUICKACK */
  375. init_sock_keepalive(s);
  376. /* non-blocking */
  377. flags=fcntl(s, F_GETFL);
  378. if (flags==-1){
  379. LOG(L_ERR, "ERROR: init_sock_opt: fnctl failed: (%d) %s\n",
  380. errno, strerror(errno));
  381. goto error;
  382. }
  383. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  384. LOG(L_ERR, "ERROR: init_sock_opt: fcntl: set non-blocking failed:"
  385. " (%d) %s\n", errno, strerror(errno));
  386. goto error;
  387. }
  388. return 0;
  389. error:
  390. return -1;
  391. }
  392. /* set all socket/fd options for "accepted" sockets
  393. * only nonblocking is set since the rest is inherited from the
  394. * "parent" (listening) socket
  395. * Note: setting O_NONBLOCK is required on linux but it's not needed on
  396. * BSD and possibly solaris (where the flag is inherited from the
  397. * parent socket). However since there is no standard document
  398. * requiring a specific behaviour in this case it's safer to always set
  399. * it (at least for now) --andrei
  400. * TODO: check on which OSes O_NONBLOCK is inherited and make this
  401. * function a nop.
  402. *
  403. * return -1 on error */
  404. static int init_sock_opt_accept(int s)
  405. {
  406. int flags;
  407. /* non-blocking */
  408. flags=fcntl(s, F_GETFL);
  409. if (flags==-1){
  410. LOG(L_ERR, "ERROR: init_sock_opt_accept: fnctl failed: (%d) %s\n",
  411. errno, strerror(errno));
  412. goto error;
  413. }
  414. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  415. LOG(L_ERR, "ERROR: init_sock_opt_accept: "
  416. "fcntl: set non-blocking failed: (%d) %s\n",
  417. errno, strerror(errno));
  418. goto error;
  419. }
  420. return 0;
  421. error:
  422. return -1;
  423. }
  424. /* blocking connect on a non-blocking fd; it will timeout after
  425. * tcp_connect_timeout
  426. * if BLOCKING_USE_SELECT and HAVE_SELECT are defined it will internally
  427. * use select() instead of poll (bad if fd > FD_SET_SIZE, poll is preferred)
  428. */
  429. static int tcp_blocking_connect(int fd, int type,
  430. const struct sockaddr *servaddr,
  431. socklen_t addrlen)
  432. {
  433. int n;
  434. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  435. fd_set sel_set;
  436. fd_set orig_set;
  437. struct timeval timeout;
  438. #else
  439. struct pollfd pf;
  440. #endif
  441. int elapsed;
  442. int to;
  443. int ticks;
  444. int err;
  445. unsigned int err_len;
  446. int poll_err;
  447. poll_err=0;
  448. to=tcp_connect_timeout;
  449. ticks=get_ticks();
  450. again:
  451. n=connect(fd, servaddr, addrlen);
  452. if (n==-1){
  453. if (errno==EINTR){
  454. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  455. if (elapsed<to) goto again;
  456. else goto error_timeout;
  457. }
  458. if (errno!=EINPROGRESS && errno!=EALREADY){
  459. #ifdef USE_DST_BLACKLIST
  460. if (cfg_get(core, core_cfg, use_dst_blacklist))
  461. switch(errno){
  462. case ECONNREFUSED:
  463. case ENETUNREACH:
  464. case ETIMEDOUT:
  465. case ECONNRESET:
  466. case EHOSTUNREACH:
  467. dst_blacklist_su(BLST_ERR_CONNECT, type,
  468. (union sockaddr_union*)servaddr, 0);
  469. break;
  470. }
  471. #endif /* USE_DST_BLACKLIST */
  472. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: (%d) %s\n",
  473. su2a((union sockaddr_union*)servaddr, addrlen),
  474. errno, strerror(errno));
  475. goto error;
  476. }
  477. }else goto end;
  478. /* poll/select loop */
  479. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  480. FD_ZERO(&orig_set);
  481. FD_SET(fd, &orig_set);
  482. #else
  483. pf.fd=fd;
  484. pf.events=POLLOUT;
  485. #endif
  486. while(1){
  487. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  488. if (elapsed<to)
  489. to-=elapsed;
  490. else
  491. goto error_timeout;
  492. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  493. sel_set=orig_set;
  494. timeout.tv_sec=to;
  495. timeout.tv_usec=0;
  496. n=select(fd+1, 0, &sel_set, 0, &timeout);
  497. #else
  498. n=poll(&pf, 1, to*1000);
  499. #endif
  500. if (n<0){
  501. if (errno==EINTR) continue;
  502. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll/select failed:"
  503. " (%d) %s\n",
  504. su2a((union sockaddr_union*)servaddr, addrlen),
  505. errno, strerror(errno));
  506. goto error;
  507. }else if (n==0) /* timeout */ continue;
  508. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  509. if (FD_ISSET(fd, &sel_set))
  510. #else
  511. if (pf.revents&(POLLERR|POLLHUP|POLLNVAL)){
  512. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: poll error: "
  513. "flags %x\n",
  514. su2a((union sockaddr_union*)servaddr, addrlen),
  515. pf.revents);
  516. poll_err=1;
  517. }
  518. #endif
  519. {
  520. err_len=sizeof(err);
  521. getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &err_len);
  522. if ((err==0) && (poll_err==0)) goto end;
  523. if (err!=EINPROGRESS && err!=EALREADY){
  524. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: SO_ERROR (%d) "
  525. "%s\n",
  526. su2a((union sockaddr_union*)servaddr, addrlen),
  527. err, strerror(err));
  528. goto error;
  529. }
  530. }
  531. }
  532. error_timeout:
  533. /* timeout */
  534. #ifdef USE_DST_BLACKLIST
  535. if (cfg_get(core, core_cfg, use_dst_blacklist))
  536. dst_blacklist_su(BLST_ERR_CONNECT, type,
  537. (union sockaddr_union*)servaddr, 0);
  538. #endif /* USE_DST_BLACKLIST */
  539. LOG(L_ERR, "ERROR: tcp_blocking_connect %s: timeout %d s elapsed "
  540. "from %d s\n", su2a((union sockaddr_union*)servaddr, addrlen),
  541. elapsed, tcp_connect_timeout);
  542. error:
  543. return -1;
  544. end:
  545. return 0;
  546. }
  547. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  548. char* buf, int len);
  549. #ifdef TCP_BUF_WRITE
  550. /* unsafe version */
  551. #define _wbufq_empty(con) ((con)->wbuf_q.first==0)
  552. /* unsafe version */
  553. #define _wbufq_non_empty(con) ((con)->wbuf_q.first!=0)
  554. /* unsafe version, call while holding the connection write lock */
  555. inline static int _wbufq_add(struct tcp_connection* c, char* data,
  556. unsigned int size)
  557. {
  558. struct tcp_wbuffer_queue* q;
  559. struct tcp_wbuffer* wb;
  560. unsigned int last_free;
  561. unsigned int wb_size;
  562. unsigned int crt_size;
  563. ticks_t t;
  564. q=&c->wbuf_q;
  565. t=get_ticks_raw();
  566. if (unlikely( ((q->queued+size)>tcp_options.tcpconn_wq_max) ||
  567. ((*tcp_total_wq+size)>tcp_options.tcp_wq_max) ||
  568. (q->first &&
  569. TICKS_LT(q->wr_timeout, t)) )){
  570. LOG(L_ERR, "ERROR: wbufq_add(%d bytes): write queue full or timeout "
  571. " (%d, total %d, last write %d s ago)\n",
  572. size, q->queued, *tcp_total_wq,
  573. TICKS_TO_S(t-q->wr_timeout-tcp_options.tcp_wq_timeout));
  574. #ifdef USE_DST_BLACKLIST
  575. if (q->first && TICKS_LT(q->wr_timeout, t) &&
  576. cfg_get(core, core_cfg, use_dst_blacklist)){
  577. ERR("blacklisting, state=%d\n", c->state);
  578. dst_blacklist_su((c->state==S_CONN_CONNECT)? BLST_ERR_CONNECT:
  579. BLST_ERR_SEND,
  580. c->rcv.proto, &c->rcv.src_su, 0);
  581. }
  582. #endif /* USE_DST_BLACKLIST */
  583. goto error;
  584. }
  585. if (unlikely(q->last==0)){
  586. wb_size=MAX_unsigned(TCP_WBUF_SIZE, size);
  587. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  588. if (unlikely(wb==0))
  589. goto error;
  590. wb->b_size=wb_size;
  591. wb->next=0;
  592. q->last=wb;
  593. q->first=wb;
  594. q->last_used=0;
  595. q->offset=0;
  596. q->wr_timeout=get_ticks_raw()+tcp_options.tcp_wq_timeout;
  597. }else{
  598. wb=q->last;
  599. }
  600. while(size){
  601. last_free=wb->b_size-q->last_used;
  602. if (last_free==0){
  603. wb_size=MAX_unsigned(TCP_WBUF_SIZE, size);
  604. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  605. if (unlikely(wb==0))
  606. goto error;
  607. wb->b_size=wb_size;
  608. wb->next=0;
  609. q->last->next=wb;
  610. q->last=wb;
  611. q->last_used=0;
  612. last_free=wb->b_size;
  613. }
  614. crt_size=MIN_unsigned(last_free, size);
  615. memcpy(wb->buf+q->last_used, data, crt_size);
  616. q->last_used+=crt_size;
  617. size-=crt_size;
  618. data+=crt_size;
  619. q->queued+=crt_size;
  620. atomic_add_int((int*)tcp_total_wq, crt_size);
  621. }
  622. return 0;
  623. error:
  624. return -1;
  625. }
  626. /* unsafe version, call while holding the connection write lock
  627. * inserts data at the beginning, it ignores the max queue size checks and
  628. * the timeout (use sparingly)
  629. * Note: it should never be called on a write buffer after wbufq_run() */
  630. inline static int _wbufq_insert(struct tcp_connection* c, char* data,
  631. unsigned int size)
  632. {
  633. struct tcp_wbuffer_queue* q;
  634. struct tcp_wbuffer* wb;
  635. q=&c->wbuf_q;
  636. if (likely(q->first==0)) /* if empty, use wbufq_add */
  637. return _wbufq_add(c, data, size);
  638. if (unlikely((*tcp_total_wq+size)>tcp_options.tcp_wq_max)){
  639. LOG(L_ERR, "ERROR: wbufq_insert(%d bytes): write queue full"
  640. " (%d, total %d, last write %d s ago)\n",
  641. size, q->queued, *tcp_total_wq,
  642. TICKS_TO_S(get_ticks_raw()-q->wr_timeout-
  643. tcp_options.tcp_wq_timeout));
  644. goto error;
  645. }
  646. if (unlikely(q->offset)){
  647. LOG(L_CRIT, "BUG: wbufq_insert: non-null offset %d (bad call, should"
  648. "never be called after the wbufq_run())\n", q->offset);
  649. goto error;
  650. }
  651. if ((q->first==q->last) && ((q->last->b_size-q->last_used)>=size)){
  652. /* one block with enough space in it for size bytes */
  653. memmove(q->first->buf+size, q->first->buf, size);
  654. memcpy(q->first->buf, data, size);
  655. q->last_used+=size;
  656. }else{
  657. /* create a size bytes block directly */
  658. wb=shm_malloc(sizeof(*wb)+size-1);
  659. if (unlikely(wb==0))
  660. goto error;
  661. wb->b_size=size;
  662. /* insert it */
  663. wb->next=q->first;
  664. q->first=wb;
  665. memcpy(wb->buf, data, size);
  666. }
  667. q->queued+=size;
  668. atomic_add_int((int*)tcp_total_wq, size);
  669. return 0;
  670. error:
  671. return -1;
  672. }
  673. /* unsafe version, call while holding the connection write lock */
  674. inline static void _wbufq_destroy( struct tcp_wbuffer_queue* q)
  675. {
  676. struct tcp_wbuffer* wb;
  677. struct tcp_wbuffer* next_wb;
  678. int unqueued;
  679. unqueued=0;
  680. if (likely(q->first)){
  681. wb=q->first;
  682. do{
  683. next_wb=wb->next;
  684. unqueued+=(wb==q->last)?q->last_used:wb->b_size;
  685. if (wb==q->first)
  686. unqueued-=q->offset;
  687. shm_free(wb);
  688. wb=next_wb;
  689. }while(wb);
  690. }
  691. memset(q, 0, sizeof(*q));
  692. atomic_add_int((int*)tcp_total_wq, -unqueued);
  693. }
  694. /* tries to empty the queue (safe version, c->write_lock must not be hold)
  695. * returns -1 on error, bytes written on success (>=0)
  696. * if the whole queue is emptied => sets *empty*/
  697. inline static int wbufq_run(int fd, struct tcp_connection* c, int* empty)
  698. {
  699. struct tcp_wbuffer_queue* q;
  700. struct tcp_wbuffer* wb;
  701. int n;
  702. int ret;
  703. int block_size;
  704. ticks_t t;
  705. char* buf;
  706. *empty=0;
  707. ret=0;
  708. t=get_ticks_raw();
  709. lock_get(&c->write_lock);
  710. q=&c->wbuf_q;
  711. while(q->first){
  712. block_size=((q->first==q->last)?q->last_used:q->first->b_size)-
  713. q->offset;
  714. buf=q->first->buf+q->offset;
  715. n=_tcpconn_write_nb(fd, c, buf, block_size);
  716. if (likely(n>0)){
  717. ret+=n;
  718. if (likely(n==block_size)){
  719. wb=q->first;
  720. q->first=q->first->next;
  721. shm_free(wb);
  722. q->offset=0;
  723. q->queued-=block_size;
  724. atomic_add_int((int*)tcp_total_wq, -block_size);
  725. }else{
  726. q->offset+=n;
  727. q->queued-=n;
  728. atomic_add_int((int*)tcp_total_wq, -n);
  729. break;
  730. }
  731. q->wr_timeout=t+tcp_options.tcp_wq_timeout;
  732. }else{
  733. if (n<0){
  734. /* EINTR is handled inside _tcpconn_write_nb */
  735. if (!(errno==EAGAIN || errno==EWOULDBLOCK)){
  736. #ifdef USE_DST_BLACKLIST
  737. if (cfg_get(core, core_cfg, use_dst_blacklist))
  738. switch(errno){
  739. case ENETUNREACH:
  740. case ECONNRESET:
  741. /*case EHOSTUNREACH: -- not posix */
  742. dst_blacklist_su((c->state==S_CONN_CONNECT)?
  743. BLST_ERR_CONNECT:
  744. BLST_ERR_SEND,
  745. c->rcv.proto,
  746. &c->rcv.src_su, 0);
  747. break;
  748. }
  749. #endif /* USE_DST_BLACKLIST */
  750. ret=-1;
  751. LOG(L_ERR, "ERROR: wbuf_runq: %s [%d]\n",
  752. strerror(errno), errno);
  753. }
  754. }
  755. break;
  756. }
  757. }
  758. if (likely(q->first==0)){
  759. q->last=0;
  760. q->last_used=0;
  761. q->offset=0;
  762. *empty=1;
  763. }
  764. if (unlikely(c->state==S_CONN_CONNECT && (ret>0)))
  765. c->state=S_CONN_OK;
  766. lock_release(&c->write_lock);
  767. return ret;
  768. }
  769. #endif /* TCP_BUF_WRITE */
  770. #if 0
  771. /* blocking write even on non-blocking sockets
  772. * if TCP_TIMEOUT will return with error */
  773. static int tcp_blocking_write(struct tcp_connection* c, int fd, char* buf,
  774. unsigned int len)
  775. {
  776. int n;
  777. fd_set sel_set;
  778. struct timeval timeout;
  779. int ticks;
  780. int initial_len;
  781. initial_len=len;
  782. again:
  783. n=send(fd, buf, len,
  784. #ifdef HAVE_MSG_NOSIGNAL
  785. MSG_NOSIGNAL
  786. #else
  787. 0
  788. #endif
  789. );
  790. if (n<0){
  791. if (errno==EINTR) goto again;
  792. else if (errno!=EAGAIN && errno!=EWOULDBLOCK){
  793. LOG(L_ERR, "tcp_blocking_write: failed to send: (%d) %s\n",
  794. errno, strerror(errno));
  795. goto error;
  796. }
  797. }else if (n<len){
  798. /* partial write */
  799. buf+=n;
  800. len-=n;
  801. }else{
  802. /* success: full write */
  803. goto end;
  804. }
  805. while(1){
  806. FD_ZERO(&sel_set);
  807. FD_SET(fd, &sel_set);
  808. timeout.tv_sec=tcp_send_timeout;
  809. timeout.tv_usec=0;
  810. ticks=get_ticks();
  811. n=select(fd+1, 0, &sel_set, 0, &timeout);
  812. if (n<0){
  813. if (errno==EINTR) continue; /* signal, ignore */
  814. LOG(L_ERR, "ERROR: tcp_blocking_write: select failed: "
  815. " (%d) %s\n", errno, strerror(errno));
  816. goto error;
  817. }else if (n==0){
  818. /* timeout */
  819. if (get_ticks()-ticks>=tcp_send_timeout){
  820. LOG(L_ERR, "ERROR: tcp_blocking_write: send timeout (%d)\n",
  821. tcp_send_timeout);
  822. goto error;
  823. }
  824. continue;
  825. }
  826. if (FD_ISSET(fd, &sel_set)){
  827. /* we can write again */
  828. goto again;
  829. }
  830. }
  831. error:
  832. return -1;
  833. end:
  834. return initial_len;
  835. }
  836. #endif
  837. struct tcp_connection* tcpconn_new(int sock, union sockaddr_union* su,
  838. union sockaddr_union* local_addr,
  839. struct socket_info* ba, int type,
  840. int state)
  841. {
  842. struct tcp_connection *c;
  843. c=(struct tcp_connection*)shm_malloc(sizeof(struct tcp_connection));
  844. if (c==0){
  845. LOG(L_ERR, "ERROR: tcpconn_new: mem. allocation failure\n");
  846. goto error;
  847. }
  848. memset(c, 0, sizeof(struct tcp_connection)); /* zero init */
  849. c->s=sock;
  850. c->fd=-1; /* not initialized */
  851. if (lock_init(&c->write_lock)==0){
  852. LOG(L_ERR, "ERROR: tcpconn_new: init lock failed\n");
  853. goto error;
  854. }
  855. c->rcv.src_su=*su;
  856. atomic_set(&c->refcnt, 0);
  857. local_timer_init(&c->timer, tcpconn_main_timeout, c, 0);
  858. su2ip_addr(&c->rcv.src_ip, su);
  859. c->rcv.src_port=su_getport(su);
  860. c->rcv.bind_address=ba;
  861. if (likely(local_addr)){
  862. su2ip_addr(&c->rcv.dst_ip, local_addr);
  863. c->rcv.dst_port=su_getport(local_addr);
  864. }else if (ba){
  865. c->rcv.dst_ip=ba->address;
  866. c->rcv.dst_port=ba->port_no;
  867. }
  868. print_ip("tcpconn_new: new tcp connection: ", &c->rcv.src_ip, "\n");
  869. DBG( "tcpconn_new: on port %d, type %d\n", c->rcv.src_port, type);
  870. init_tcp_req(&c->req);
  871. c->id=(*connection_id)++;
  872. c->rcv.proto_reserved1=0; /* this will be filled before receive_message*/
  873. c->rcv.proto_reserved2=0;
  874. c->state=state;
  875. c->extra_data=0;
  876. #ifdef USE_TLS
  877. if (type==PROTO_TLS){
  878. if (tls_tcpconn_init(c, sock)==-1) goto error;
  879. }else
  880. #endif /* USE_TLS*/
  881. {
  882. c->type=PROTO_TCP;
  883. c->rcv.proto=PROTO_TCP;
  884. c->timeout=get_ticks_raw()+tcp_con_lifetime;
  885. }
  886. return c;
  887. error:
  888. if (c) shm_free(c);
  889. return 0;
  890. }
  891. /* do the actual connect, set sock. options a.s.o
  892. * returns socket on success, -1 on error
  893. * sets also *res_local_addr, res_si and state (S_CONN_CONNECT for an
  894. * unfinished connect and S_CONN_OK for a finished one)*/
  895. inline static int tcp_do_connect( union sockaddr_union* server,
  896. union sockaddr_union* from,
  897. int type,
  898. union sockaddr_union* res_local_addr,
  899. struct socket_info** res_si,
  900. enum tcp_conn_states *state
  901. )
  902. {
  903. int s;
  904. union sockaddr_union my_name;
  905. socklen_t my_name_len;
  906. struct ip_addr ip;
  907. #ifdef TCP_BUF_WRITE
  908. int n;
  909. #endif /* TCP_BUF_WRITE */
  910. s=socket(AF2PF(server->s.sa_family), SOCK_STREAM, 0);
  911. if (unlikely(s==-1)){
  912. LOG(L_ERR, "ERROR: tcp_do_connect %s: socket: (%d) %s\n",
  913. su2a(server, sizeof(*server)), errno, strerror(errno));
  914. goto error;
  915. }
  916. if (init_sock_opt(s)<0){
  917. LOG(L_ERR, "ERROR: tcp_do_connect %s: init_sock_opt failed\n",
  918. su2a(server, sizeof(*server)));
  919. goto error;
  920. }
  921. if (unlikely(from && bind(s, &from->s, sockaddru_len(*from)) != 0)){
  922. LOG(L_WARN, "WARNING: tcp_do_connect: binding to source address"
  923. " %s failed: %s [%d]\n", su2a(from, sizeof(*from)),
  924. strerror(errno), errno);
  925. }
  926. *state=S_CONN_OK;
  927. #ifdef TCP_BUF_WRITE
  928. if (likely(tcp_options.tcp_buf_write)){
  929. again:
  930. n=connect(s, &server->s, sockaddru_len(*server));
  931. if (unlikely(n==-1)){
  932. if (errno==EINTR) goto again;
  933. if (likely(errno==EINPROGRESS))
  934. *state=S_CONN_CONNECT;
  935. else if (errno!=EALREADY){
  936. #ifdef USE_DST_BLACKLIST
  937. if (cfg_get(core, core_cfg, use_dst_blacklist))
  938. switch(errno){
  939. case ECONNREFUSED:
  940. case ENETUNREACH:
  941. case ETIMEDOUT:
  942. case ECONNRESET:
  943. case EHOSTUNREACH:
  944. dst_blacklist_su(BLST_ERR_CONNECT, type, server,
  945. 0);
  946. break;
  947. }
  948. #endif /* USE_DST_BLACKLIST */
  949. LOG(L_ERR, "ERROR: tcp_do_connect: connect %s: (%d) %s\n",
  950. su2a(server, sizeof(*server)),
  951. errno, strerror(errno));
  952. goto error;
  953. }
  954. }
  955. }else{
  956. #endif /* TCP_BUF_WRITE */
  957. if (tcp_blocking_connect(s, type, &server->s,
  958. sockaddru_len(*server))<0){
  959. LOG(L_ERR, "ERROR: tcp_do_connect: tcp_blocking_connect %s"
  960. " failed\n", su2a(server, sizeof(*server)));
  961. goto error;
  962. }
  963. #ifdef TCP_BUF_WRITE
  964. }
  965. #endif /* TCP_BUF_WRITE */
  966. if (from){
  967. su2ip_addr(&ip, from);
  968. if (!ip_addr_any(&ip))
  969. /* we already know the source ip, skip the sys. call */
  970. goto find_socket;
  971. }
  972. my_name_len=sizeof(my_name);
  973. if (unlikely(getsockname(s, &my_name.s, &my_name_len)!=0)){
  974. LOG(L_ERR, "ERROR: tcp_do_connect: getsockname failed: %s(%d)\n",
  975. strerror(errno), errno);
  976. *res_si=0;
  977. goto error;
  978. }
  979. from=&my_name; /* update from with the real "from" address */
  980. su2ip_addr(&ip, &my_name);
  981. find_socket:
  982. #ifdef USE_TLS
  983. if (unlikely(type==PROTO_TLS))
  984. *res_si=find_si(&ip, 0, PROTO_TLS);
  985. else
  986. #endif
  987. *res_si=find_si(&ip, 0, PROTO_TCP);
  988. if (unlikely(*res_si==0)){
  989. LOG(L_WARN, "WARNING: tcp_do_connect %s: could not find corresponding"
  990. " listening socket for %s, using default...\n",
  991. su2a(server, sizeof(*server)), ip_addr2a(&ip));
  992. if (server->s.sa_family==AF_INET) *res_si=sendipv4_tcp;
  993. #ifdef USE_IPV6
  994. else *res_si=sendipv6_tcp;
  995. #endif
  996. }
  997. *res_local_addr=*from;
  998. return s;
  999. error:
  1000. if (s!=-1) close(s);
  1001. return -1;
  1002. }
  1003. struct tcp_connection* tcpconn_connect( union sockaddr_union* server,
  1004. union sockaddr_union* from,
  1005. int type)
  1006. {
  1007. int s;
  1008. struct socket_info* si;
  1009. union sockaddr_union my_name;
  1010. struct tcp_connection* con;
  1011. enum tcp_conn_states state;
  1012. s=-1;
  1013. if (*tcp_connections_no >= tcp_max_connections){
  1014. LOG(L_ERR, "ERROR: tcpconn_connect: maximum number of connections"
  1015. " exceeded (%d/%d)\n",
  1016. *tcp_connections_no, tcp_max_connections);
  1017. goto error;
  1018. }
  1019. s=tcp_do_connect(server, from, type, &my_name, &si, &state);
  1020. if (s==-1){
  1021. LOG(L_ERR, "ERROR: tcp_do_connect %s: failed (%d) %s\n",
  1022. su2a(server, sizeof(*server)), errno, strerror(errno));
  1023. goto error;
  1024. }
  1025. con=tcpconn_new(s, server, &my_name, si, type, state);
  1026. if (con==0){
  1027. LOG(L_ERR, "ERROR: tcp_connect %s: tcpconn_new failed, closing the "
  1028. " socket\n", su2a(server, sizeof(*server)));
  1029. goto error;
  1030. }
  1031. return con;
  1032. /*FIXME: set sock idx! */
  1033. error:
  1034. if (s!=-1) close(s); /* close the opened socket */
  1035. return 0;
  1036. }
  1037. #ifdef TCP_CONNECT_WAIT
  1038. int tcpconn_finish_connect( struct tcp_connection* c,
  1039. union sockaddr_union* from)
  1040. {
  1041. int s;
  1042. int r;
  1043. union sockaddr_union local_addr;
  1044. struct socket_info* si;
  1045. enum tcp_conn_states state;
  1046. struct tcp_conn_alias* a;
  1047. s=tcp_do_connect(&c->rcv.src_su, from, c->type, &local_addr, &si, &state);
  1048. if (unlikely(s==-1)){
  1049. LOG(L_ERR, "ERROR: tcpconn_finish_connect %s: tcp_do_connect for %p"
  1050. " failed\n", su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1051. c);
  1052. return -1;
  1053. }
  1054. c->rcv.bind_address=si;
  1055. su2ip_addr(&c->rcv.dst_ip, &local_addr);
  1056. c->rcv.dst_port=su_getport(&local_addr);
  1057. /* update aliases if needed */
  1058. if (likely(from==0)){
  1059. /* add aliases */
  1060. TCPCONN_LOCK;
  1061. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1062. tcp_new_conn_alias_flags);
  1063. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1064. c->rcv.dst_port, tcp_new_conn_alias_flags);
  1065. TCPCONN_UNLOCK;
  1066. }else if (su_cmp(from, &local_addr)!=1){
  1067. TCPCONN_LOCK;
  1068. /* remove all the aliases except the first one and re-add them
  1069. * (there shouldn't be more then the 3 default aliases at this
  1070. * stage) */
  1071. for (r=1; r<c->aliases; r++){
  1072. a=&c->con_aliases[r];
  1073. tcpconn_listrm(tcpconn_aliases_hash[a->hash], a, next, prev);
  1074. }
  1075. c->aliases=1;
  1076. /* add the local_ip:0 and local_ip:local_port aliases */
  1077. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1078. 0, tcp_new_conn_alias_flags);
  1079. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1080. c->rcv.dst_port, tcp_new_conn_alias_flags);
  1081. TCPCONN_UNLOCK;
  1082. }
  1083. return s;
  1084. }
  1085. #endif /* TCP_CONNECT_WAIT */
  1086. /* adds a tcp connection to the tcpconn hashes
  1087. * Note: it's called _only_ from the tcp_main process */
  1088. inline static struct tcp_connection* tcpconn_add(struct tcp_connection *c)
  1089. {
  1090. struct ip_addr zero_ip;
  1091. if (likely(c)){
  1092. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1093. c->id_hash=tcp_id_hash(c->id);
  1094. c->aliases=0;
  1095. TCPCONN_LOCK;
  1096. c->flags|=F_CONN_HASHED;
  1097. /* add it at the begining of the list*/
  1098. tcpconn_listadd(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1099. /* set the aliases */
  1100. /* first alias is for (peer_ip, peer_port, 0 ,0) -- for finding
  1101. * any connection to peer_ip, peer_port
  1102. * the second alias is for (peer_ip, peer_port, local_addr, 0) -- for
  1103. * finding any conenction to peer_ip, peer_port from local_addr
  1104. * the third alias is for (peer_ip, peer_port, local_addr, local_port)
  1105. * -- for finding if a fully specified connection exists */
  1106. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &zero_ip, 0,
  1107. tcp_new_conn_alias_flags);
  1108. if (likely(c->rcv.dst_ip.af && ! ip_addr_any(&c->rcv.dst_ip))){
  1109. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  1110. tcp_new_conn_alias_flags);
  1111. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  1112. c->rcv.dst_port, tcp_new_conn_alias_flags);
  1113. }
  1114. /* ignore add_alias errors, there are some valid cases when one
  1115. * of the add_alias would fail (e.g. first add_alias for 2 connections
  1116. * with the same destination but different src. ip*/
  1117. TCPCONN_UNLOCK;
  1118. DBG("tcpconn_add: hashes: %d:%d:%d, %d\n",
  1119. c->con_aliases[0].hash,
  1120. c->con_aliases[1].hash,
  1121. c->con_aliases[2].hash,
  1122. c->id_hash);
  1123. return c;
  1124. }else{
  1125. LOG(L_CRIT, "tcpconn_add: BUG: null connection pointer\n");
  1126. return 0;
  1127. }
  1128. }
  1129. static inline void _tcpconn_detach(struct tcp_connection *c)
  1130. {
  1131. int r;
  1132. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1133. /* remove all the aliases */
  1134. for (r=0; r<c->aliases; r++)
  1135. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1136. &c->con_aliases[r], next, prev);
  1137. }
  1138. static inline void _tcpconn_free(struct tcp_connection* c)
  1139. {
  1140. #ifdef TCP_BUF_WRITE
  1141. if (unlikely(_wbufq_non_empty(c)))
  1142. _wbufq_destroy(&c->wbuf_q);
  1143. #endif
  1144. lock_destroy(&c->write_lock);
  1145. #ifdef USE_TLS
  1146. if (unlikely(c->type==PROTO_TLS)) tls_tcpconn_clean(c);
  1147. #endif
  1148. shm_free(c);
  1149. }
  1150. /* unsafe tcpconn_rm version (nolocks) */
  1151. void _tcpconn_rm(struct tcp_connection* c)
  1152. {
  1153. _tcpconn_detach(c);
  1154. _tcpconn_free(c);
  1155. }
  1156. void tcpconn_rm(struct tcp_connection* c)
  1157. {
  1158. int r;
  1159. TCPCONN_LOCK;
  1160. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  1161. /* remove all the aliases */
  1162. for (r=0; r<c->aliases; r++)
  1163. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  1164. &c->con_aliases[r], next, prev);
  1165. TCPCONN_UNLOCK;
  1166. lock_destroy(&c->write_lock);
  1167. #ifdef USE_TLS
  1168. if ((c->type==PROTO_TLS)&&(c->extra_data)) tls_tcpconn_clean(c);
  1169. #endif
  1170. shm_free(c);
  1171. }
  1172. /* finds a connection, if id=0 uses the ip addr, port, local_ip and local port
  1173. * (host byte order) and tries to find the connection that matches all of
  1174. * them. Wild cards can be used for local_ip and local_port (a 0 filled
  1175. * ip address and/or a 0 local port).
  1176. * WARNING: unprotected (locks) use tcpconn_get unless you really
  1177. * know what you are doing */
  1178. struct tcp_connection* _tcpconn_find(int id, struct ip_addr* ip, int port,
  1179. struct ip_addr* l_ip, int l_port)
  1180. {
  1181. struct tcp_connection *c;
  1182. struct tcp_conn_alias* a;
  1183. unsigned hash;
  1184. int is_local_ip_any;
  1185. #ifdef EXTRA_DEBUG
  1186. DBG("tcpconn_find: %d port %d\n",id, port);
  1187. if (ip) print_ip("tcpconn_find: ip ", ip, "\n");
  1188. #endif
  1189. if (likely(id)){
  1190. hash=tcp_id_hash(id);
  1191. for (c=tcpconn_id_hash[hash]; c; c=c->id_next){
  1192. #ifdef EXTRA_DEBUG
  1193. DBG("c=%p, c->id=%d, port=%d\n",c, c->id, c->rcv.src_port);
  1194. print_ip("ip=", &c->rcv.src_ip, "\n");
  1195. #endif
  1196. if ((id==c->id)&&(c->state!=S_CONN_BAD)) return c;
  1197. }
  1198. }else if (likely(ip)){
  1199. hash=tcp_addr_hash(ip, port, l_ip, l_port);
  1200. is_local_ip_any=ip_addr_any(l_ip);
  1201. for (a=tcpconn_aliases_hash[hash]; a; a=a->next){
  1202. #ifdef EXTRA_DEBUG
  1203. DBG("a=%p, c=%p, c->id=%d, alias port= %d port=%d\n", a, a->parent,
  1204. a->parent->id, a->port, a->parent->rcv.src_port);
  1205. print_ip("ip=",&a->parent->rcv.src_ip,"\n");
  1206. #endif
  1207. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1208. ((l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1209. (ip_addr_cmp(ip, &a->parent->rcv.src_ip)) &&
  1210. (is_local_ip_any ||
  1211. ip_addr_cmp(l_ip, &a->parent->rcv.dst_ip))
  1212. )
  1213. return a->parent;
  1214. }
  1215. }
  1216. return 0;
  1217. }
  1218. /* _tcpconn_find with locks and timeout
  1219. * local_addr contains the desired local ip:port. If null any local address
  1220. * will be used. IN*ADDR_ANY or 0 port are wild cards.
  1221. */
  1222. struct tcp_connection* tcpconn_get(int id, struct ip_addr* ip, int port,
  1223. union sockaddr_union* local_addr,
  1224. ticks_t timeout)
  1225. {
  1226. struct tcp_connection* c;
  1227. struct ip_addr local_ip;
  1228. int local_port;
  1229. local_port=0;
  1230. if (likely(ip)){
  1231. if (unlikely(local_addr)){
  1232. su2ip_addr(&local_ip, local_addr);
  1233. local_port=su_getport(local_addr);
  1234. }else{
  1235. ip_addr_mk_any(ip->af, &local_ip);
  1236. local_port=0;
  1237. }
  1238. }
  1239. TCPCONN_LOCK;
  1240. c=_tcpconn_find(id, ip, port, &local_ip, local_port);
  1241. if (likely(c)){
  1242. atomic_inc(&c->refcnt);
  1243. /* update the timeout only if the connection is not handled
  1244. * by a tcp reader (the tcp reader process uses c->timeout for
  1245. * its own internal timeout and c->timeout will be overwritten
  1246. * anyway on return to tcp_main) */
  1247. if (likely(c->reader_pid==0))
  1248. c->timeout=get_ticks_raw()+timeout;
  1249. }
  1250. TCPCONN_UNLOCK;
  1251. return c;
  1252. }
  1253. /* add c->dst:port, local_addr as an alias for the "id" connection,
  1254. * flags: TCP_ALIAS_FORCE_ADD - add an alias even if a previous one exists
  1255. * TCP_ALIAS_REPLACE - if a prev. alias exists, replace it with the
  1256. * new one
  1257. * returns 0 on success, <0 on failure ( -1 - null c, -2 too many aliases,
  1258. * -3 alias already present and pointing to another connection)
  1259. * WARNING: must be called with TCPCONN_LOCK held */
  1260. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  1261. struct ip_addr* l_ip, int l_port,
  1262. int flags)
  1263. {
  1264. unsigned hash;
  1265. struct tcp_conn_alias* a;
  1266. struct tcp_conn_alias* nxt;
  1267. struct tcp_connection* p;
  1268. int is_local_ip_any;
  1269. int i;
  1270. int r;
  1271. a=0;
  1272. is_local_ip_any=ip_addr_any(l_ip);
  1273. if (likely(c)){
  1274. hash=tcp_addr_hash(&c->rcv.src_ip, port, l_ip, l_port);
  1275. /* search the aliases for an already existing one */
  1276. for (a=tcpconn_aliases_hash[hash], nxt=0; a; a=nxt){
  1277. nxt=a->next;
  1278. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1279. ( (l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1280. (ip_addr_cmp(&c->rcv.src_ip, &a->parent->rcv.src_ip)) &&
  1281. ( is_local_ip_any ||
  1282. ip_addr_cmp(&a->parent->rcv.dst_ip, l_ip))
  1283. ){
  1284. /* found */
  1285. if (unlikely(a->parent!=c)){
  1286. if (flags & TCP_ALIAS_FORCE_ADD)
  1287. /* still have to walk the whole list to check if
  1288. * the alias was not already added */
  1289. continue;
  1290. else if (flags & TCP_ALIAS_REPLACE){
  1291. /* remove the alias =>
  1292. * remove the current alias and all the following
  1293. * ones from the corresponding connection, shift the
  1294. * connection aliases array and re-add the other
  1295. * aliases (!= current one) */
  1296. p=a->parent;
  1297. for (i=0; (i<p->aliases) && (&(p->con_aliases[i])!=a);
  1298. i++);
  1299. if (unlikely(i==p->aliases)){
  1300. LOG(L_CRIT, "BUG: _tcpconn_add_alias_unsafe: "
  1301. " alias %p not found in con %p (id %d)\n",
  1302. a, p, p->id);
  1303. goto error_not_found;
  1304. }
  1305. for (r=i; r<p->aliases; r++){
  1306. tcpconn_listrm(
  1307. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1308. &p->con_aliases[r], next, prev);
  1309. }
  1310. if (likely((i+1)<p->aliases)){
  1311. memmove(&p->con_aliases[i], &p->con_aliases[i+1],
  1312. (p->aliases-i-1)*
  1313. sizeof(p->con_aliases[0]));
  1314. }
  1315. p->aliases--;
  1316. /* re-add the remaining aliases */
  1317. for (r=i; r<p->aliases; r++){
  1318. tcpconn_listadd(
  1319. tcpconn_aliases_hash[p->con_aliases[r].hash],
  1320. &p->con_aliases[r], next, prev);
  1321. }
  1322. }else
  1323. goto error_sec;
  1324. }else goto ok;
  1325. }
  1326. }
  1327. if (unlikely(c->aliases>=TCP_CON_MAX_ALIASES)) goto error_aliases;
  1328. c->con_aliases[c->aliases].parent=c;
  1329. c->con_aliases[c->aliases].port=port;
  1330. c->con_aliases[c->aliases].hash=hash;
  1331. tcpconn_listadd(tcpconn_aliases_hash[hash],
  1332. &c->con_aliases[c->aliases], next, prev);
  1333. c->aliases++;
  1334. }else goto error_not_found;
  1335. ok:
  1336. #ifdef EXTRA_DEBUG
  1337. if (a) DBG("_tcpconn_add_alias_unsafe: alias already present\n");
  1338. else DBG("_tcpconn_add_alias_unsafe: alias port %d for hash %d, id %d\n",
  1339. port, hash, c->id);
  1340. #endif
  1341. return 0;
  1342. error_aliases:
  1343. /* too many aliases */
  1344. return -2;
  1345. error_not_found:
  1346. /* null connection */
  1347. return -1;
  1348. error_sec:
  1349. /* alias already present and pointing to a different connection
  1350. * (hijack attempt?) */
  1351. return -3;
  1352. }
  1353. /* add port as an alias for the "id" connection,
  1354. * returns 0 on success,-1 on failure */
  1355. int tcpconn_add_alias(int id, int port, int proto)
  1356. {
  1357. struct tcp_connection* c;
  1358. int ret;
  1359. struct ip_addr zero_ip;
  1360. int r;
  1361. /* fix the port */
  1362. port=port?port:((proto==PROTO_TLS)?SIPS_PORT:SIP_PORT);
  1363. TCPCONN_LOCK;
  1364. /* check if alias already exists */
  1365. c=_tcpconn_find(id, 0, 0, 0, 0);
  1366. if (likely(c)){
  1367. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1368. /* alias src_ip:port, 0, 0 */
  1369. ret=_tcpconn_add_alias_unsafe(c, port, &zero_ip, 0,
  1370. tcp_alias_flags);
  1371. if (ret<0 && ret!=-3) goto error;
  1372. /* alias src_ip:port, local_ip, 0 */
  1373. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, 0,
  1374. tcp_alias_flags);
  1375. if (ret<0 && ret!=-3) goto error;
  1376. /* alias src_ip:port, local_ip, local_port */
  1377. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, c->rcv.dst_port,
  1378. tcp_alias_flags);
  1379. if (unlikely(ret<0)) goto error;
  1380. }else goto error_not_found;
  1381. TCPCONN_UNLOCK;
  1382. return 0;
  1383. error_not_found:
  1384. TCPCONN_UNLOCK;
  1385. LOG(L_ERR, "ERROR: tcpconn_add_alias: no connection found for id %d\n",id);
  1386. return -1;
  1387. error:
  1388. TCPCONN_UNLOCK;
  1389. switch(ret){
  1390. case -2:
  1391. LOG(L_ERR, "ERROR: tcpconn_add_alias: too many aliases (%d)"
  1392. " for connection %p (id %d) %s:%d <- %d\n",
  1393. c->aliases, c, c->id, ip_addr2a(&c->rcv.src_ip),
  1394. c->rcv.src_port, port);
  1395. for (r=0; r<c->aliases; r++){
  1396. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias %d: for %p (%d)"
  1397. " %s:%d <-%d hash %x\n", r, c, c->id,
  1398. ip_addr2a(&c->rcv.src_ip), c->rcv.src_port,
  1399. c->con_aliases[r].port, c->con_aliases[r].hash);
  1400. }
  1401. break;
  1402. case -3:
  1403. LOG(L_ERR, "ERROR: tcpconn_add_alias: possible port"
  1404. " hijack attempt\n");
  1405. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias for %d port %d already"
  1406. " present and points to another connection \n",
  1407. c->id, port);
  1408. break;
  1409. default:
  1410. LOG(L_ERR, "ERROR: tcpconn_add_alias: unkown error %d\n", ret);
  1411. }
  1412. return -1;
  1413. }
  1414. #ifdef TCP_FD_CACHE
  1415. static void tcp_fd_cache_init()
  1416. {
  1417. int r;
  1418. for (r=0; r<TCP_FD_CACHE_SIZE; r++)
  1419. fd_cache[r].fd=-1;
  1420. }
  1421. inline static struct fd_cache_entry* tcp_fd_cache_get(struct tcp_connection *c)
  1422. {
  1423. int h;
  1424. h=c->id%TCP_FD_CACHE_SIZE;
  1425. if ((fd_cache[h].fd>0) && (fd_cache[h].id==c->id) && (fd_cache[h].con==c))
  1426. return &fd_cache[h];
  1427. return 0;
  1428. }
  1429. inline static void tcp_fd_cache_rm(struct fd_cache_entry* e)
  1430. {
  1431. e->fd=-1;
  1432. }
  1433. inline static void tcp_fd_cache_add(struct tcp_connection *c, int fd)
  1434. {
  1435. int h;
  1436. h=c->id%TCP_FD_CACHE_SIZE;
  1437. if (likely(fd_cache[h].fd>0))
  1438. close(fd_cache[h].fd);
  1439. fd_cache[h].fd=fd;
  1440. fd_cache[h].id=c->id;
  1441. fd_cache[h].con=c;
  1442. }
  1443. #endif /* TCP_FD_CACHE */
  1444. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn);
  1445. /* finds a tcpconn & sends on it
  1446. * uses the dst members to, proto (TCP|TLS) and id and tries to send
  1447. * from the "from" address (if non null and id==0)
  1448. * returns: number of bytes written (>=0) on success
  1449. * <0 on error */
  1450. int tcp_send(struct dest_info* dst, union sockaddr_union* from,
  1451. char* buf, unsigned len)
  1452. {
  1453. struct tcp_connection *c;
  1454. struct tcp_connection *tmp;
  1455. struct ip_addr ip;
  1456. int port;
  1457. int fd;
  1458. long response[2];
  1459. int n;
  1460. int do_close_fd;
  1461. #ifdef TCP_BUF_WRITE
  1462. int enable_write_watch;
  1463. #endif /* TCP_BUF_WRITE */
  1464. #ifdef TCP_FD_CACHE
  1465. struct fd_cache_entry* fd_cache_e;
  1466. int use_fd_cache;
  1467. use_fd_cache=tcp_options.fd_cache;
  1468. fd_cache_e=0;
  1469. #endif /* TCP_FD_CACHE */
  1470. do_close_fd=1; /* close the fd on exit */
  1471. port=su_getport(&dst->to);
  1472. if (likely(port)){
  1473. su2ip_addr(&ip, &dst->to);
  1474. c=tcpconn_get(dst->id, &ip, port, from, tcp_con_lifetime);
  1475. }else if (likely(dst->id)){
  1476. c=tcpconn_get(dst->id, 0, 0, 0, tcp_con_lifetime);
  1477. }else{
  1478. LOG(L_CRIT, "BUG: tcp_send called with null id & to\n");
  1479. return -1;
  1480. }
  1481. if (likely(dst->id)){
  1482. if (unlikely(c==0)) {
  1483. if (likely(port)){
  1484. /* try again w/o id */
  1485. c=tcpconn_get(0, &ip, port, from, tcp_con_lifetime);
  1486. goto no_id;
  1487. }else{
  1488. LOG(L_ERR, "ERROR: tcp_send: id %d not found, dropping\n",
  1489. dst->id);
  1490. return -1;
  1491. }
  1492. }else goto get_fd;
  1493. }
  1494. no_id:
  1495. if (unlikely(c==0)){
  1496. DBG("tcp_send: no open tcp connection found, opening new one\n");
  1497. /* create tcp connection */
  1498. if (likely(from==0)){
  1499. /* check to see if we have to use a specific source addr. */
  1500. switch (dst->to.s.sa_family) {
  1501. case AF_INET:
  1502. from = tcp_source_ipv4;
  1503. break;
  1504. #ifdef USE_IPV6
  1505. case AF_INET6:
  1506. from = tcp_source_ipv6;
  1507. break;
  1508. #endif
  1509. default:
  1510. /* error, bad af, ignore ... */
  1511. break;
  1512. }
  1513. }
  1514. #if defined(TCP_CONNECT_WAIT) && defined(TCP_BUF_WRITE)
  1515. if (likely(tcp_options.tcp_connect_wait &&
  1516. tcp_options.tcp_buf_write )){
  1517. if (unlikely(*tcp_connections_no >= tcp_max_connections)){
  1518. LOG(L_ERR, "ERROR: tcp_send %s: maximum number of"
  1519. " connections exceeded (%d/%d)\n",
  1520. su2a(&dst->to, sizeof(dst->to)),
  1521. *tcp_connections_no, tcp_max_connections);
  1522. return -1;
  1523. }
  1524. c=tcpconn_new(-1, &dst->to, from, 0, dst->proto,
  1525. S_CONN_PENDING);
  1526. if (unlikely(c==0)){
  1527. LOG(L_ERR, "ERROR: tcp_send %s: could not create new"
  1528. " connection\n",
  1529. su2a(&dst->to, sizeof(dst->to)));
  1530. return -1;
  1531. }
  1532. c->flags|=F_CONN_PENDING|F_CONN_FD_CLOSED;
  1533. atomic_set(&c->refcnt, 2); /* ref from here and from main hash
  1534. table */
  1535. /* add it to id hash and aliases */
  1536. if (unlikely(tcpconn_add(c)==0)){
  1537. LOG(L_ERR, "ERROR: tcp_send %s: could not add "
  1538. "connection %p\n",
  1539. su2a(&dst->to, sizeof(dst->to)),
  1540. c);
  1541. _tcpconn_free(c);
  1542. n=-1;
  1543. goto end_no_conn;
  1544. }
  1545. /* do connect and if src ip or port changed, update the
  1546. * aliases */
  1547. if (unlikely((fd=tcpconn_finish_connect(c, from))<0)){
  1548. /* tcpconn_finish_connect will automatically blacklist
  1549. on error => no need to do it here */
  1550. LOG(L_ERR, "ERROR: tcp_send %s: tcpconn_finish_connect(%p)"
  1551. " failed\n", su2a(&dst->to, sizeof(dst->to)),
  1552. c);
  1553. goto conn_wait_error;
  1554. }
  1555. /* ? TODO: it might be faster just to queue the write directly
  1556. * and send to main CONN_NEW_PENDING_WRITE */
  1557. /* delay sending the fd to main after the send */
  1558. /* NOTE: no lock here, because the connection is marked as
  1559. * pending and nobody else will try to write on it. However
  1560. * this might produce out-of-order writes. If this is not
  1561. * desired either lock before the write or use
  1562. * _wbufq_insert(...) */
  1563. n=_tcpconn_write_nb(fd, c, buf, len);
  1564. if (unlikely(n<(int)len)){
  1565. if ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK){
  1566. DBG("tcp_send: pending write on new connection %p "
  1567. " (%d/%d bytes written)\n", c, n, len);
  1568. if (n<0) n=0;
  1569. /* add to the write queue */
  1570. lock_get(&c->write_lock);
  1571. if (unlikely(_wbufq_insert(c, buf+n, len-n)<0)){
  1572. lock_release(&c->write_lock);
  1573. n=-1;
  1574. LOG(L_ERR, "ERROR: tcp_send %s: EAGAIN and"
  1575. " write queue full or failed for %p\n",
  1576. su2a(&dst->to, sizeof(dst->to)),
  1577. c);
  1578. goto conn_wait_error;
  1579. }
  1580. lock_release(&c->write_lock);
  1581. /* send to tcp_main */
  1582. response[0]=(long)c;
  1583. response[1]=CONN_NEW_PENDING_WRITE;
  1584. if (unlikely(send_fd(unix_tcp_sock, response,
  1585. sizeof(response), fd) <= 0)){
  1586. LOG(L_ERR, "BUG: tcp_send %s: "
  1587. "CONN_NEW_PENDING_WRITE for %p"
  1588. " failed:" " %s (%d)\n",
  1589. su2a(&dst->to, sizeof(dst->to)),
  1590. c, strerror(errno), errno);
  1591. goto conn_wait_error;
  1592. }
  1593. n=len;
  1594. goto end;
  1595. }
  1596. #ifdef USE_DST_BLACKLIST
  1597. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1598. switch(errno){
  1599. case ENETUNREACH:
  1600. case ECONNRESET:
  1601. /*case EHOSTUNREACH: -- not posix */
  1602. /* if first write failed it's most likely a
  1603. connect error */
  1604. dst_blacklist_add( BLST_ERR_CONNECT, dst, 0);
  1605. break;
  1606. }
  1607. #endif /* USE_DST_BLACKLIST */
  1608. /* error: destroy it directly */
  1609. LOG(L_ERR, "ERROR: tcp_send %s: connect & send "
  1610. " for %p failed:" " %s (%d)\n",
  1611. su2a(&dst->to, sizeof(dst->to)),
  1612. c, strerror(errno), errno);
  1613. goto conn_wait_error;
  1614. }
  1615. LOG(L_INFO, "tcp_send: quick connect for %p\n", c);
  1616. /* send to tcp_main */
  1617. response[0]=(long)c;
  1618. response[1]=CONN_NEW_COMPLETE;
  1619. if (unlikely(send_fd(unix_tcp_sock, response,
  1620. sizeof(response), fd) <= 0)){
  1621. LOG(L_ERR, "BUG: tcp_send %s: CONN_NEW_COMPLETE for %p"
  1622. " failed:" " %s (%d)\n",
  1623. su2a(&dst->to, sizeof(dst->to)),
  1624. c, strerror(errno), errno);
  1625. goto conn_wait_error;
  1626. }
  1627. goto end;
  1628. }
  1629. #endif /* TCP_CONNECT_WAIT && TCP_BUF_WRITE */
  1630. if (unlikely((c=tcpconn_connect(&dst->to, from, dst->proto))==0)){
  1631. LOG(L_ERR, "ERROR: tcp_send %s: connect failed\n",
  1632. su2a(&dst->to, sizeof(dst->to)));
  1633. return -1;
  1634. }
  1635. atomic_set(&c->refcnt, 2); /* ref. from here and it will also
  1636. be added in the tcp_main hash */
  1637. fd=c->s;
  1638. c->flags|=F_CONN_FD_CLOSED; /* not yet opened in main */
  1639. /* ? TODO: it might be faster just to queue the write and
  1640. * send to main a CONN_NEW_PENDING_WRITE */
  1641. /* send the new tcpconn to "tcp main" */
  1642. response[0]=(long)c;
  1643. response[1]=CONN_NEW;
  1644. n=send_fd(unix_tcp_sock, response, sizeof(response), c->s);
  1645. if (unlikely(n<=0)){
  1646. LOG(L_ERR, "BUG: tcp_send %s: failed send_fd: %s (%d)\n",
  1647. su2a(&dst->to, sizeof(dst->to)),
  1648. strerror(errno), errno);
  1649. /* we can safely delete it, it's not referenced by anybody */
  1650. _tcpconn_free(c);
  1651. n=-1;
  1652. goto end_no_conn;
  1653. }
  1654. goto send_it;
  1655. }
  1656. get_fd:
  1657. #ifdef TCP_BUF_WRITE
  1658. /* if data is already queued, we don't need the fd any more */
  1659. if (unlikely(tcp_options.tcp_buf_write && (_wbufq_non_empty(c)
  1660. #ifdef TCP_CONNECT_WAIT
  1661. || (c->state==S_CONN_PENDING)
  1662. #endif /* TCP_CONNECT_WAIT */
  1663. ) )){
  1664. lock_get(&c->write_lock);
  1665. if (likely(_wbufq_non_empty(c)
  1666. #ifdef TCP_CONNECT_WAIT
  1667. || (c->state==S_CONN_PENDING)
  1668. #endif /* TCP_CONNECT_WAIT */
  1669. )){
  1670. do_close_fd=0;
  1671. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1672. lock_release(&c->write_lock);
  1673. n=-1;
  1674. goto error;
  1675. }
  1676. n=len;
  1677. lock_release(&c->write_lock);
  1678. goto release_c;
  1679. }
  1680. lock_release(&c->write_lock);
  1681. }
  1682. #endif /* TCP_BUF_WRITE */
  1683. /* check if this is not the same reader process holding
  1684. * c and if so send directly on c->fd */
  1685. if (c->reader_pid==my_pid()){
  1686. DBG("tcp_send: send from reader (%d (%d)), reusing fd\n",
  1687. my_pid(), process_no);
  1688. fd=c->fd;
  1689. do_close_fd=0; /* don't close the fd on exit, it's in use */
  1690. #ifdef TCP_FD_CACHE
  1691. use_fd_cache=0; /* don't cache: problems would arise due to the
  1692. close() on cache eviction (if the fd is still
  1693. used). If it has to be cached then dup() _must_
  1694. be used */
  1695. }else if (likely(use_fd_cache &&
  1696. ((fd_cache_e=tcp_fd_cache_get(c))!=0))){
  1697. fd=fd_cache_e->fd;
  1698. do_close_fd=0;
  1699. DBG("tcp_send: found fd in cache ( %d, %p, %d)\n",
  1700. fd, c, fd_cache_e->id);
  1701. #endif /* TCP_FD_CACHE */
  1702. }else{
  1703. DBG("tcp_send: tcp connection found (%p), acquiring fd\n", c);
  1704. /* get the fd */
  1705. response[0]=(long)c;
  1706. response[1]=CONN_GET_FD;
  1707. n=send_all(unix_tcp_sock, response, sizeof(response));
  1708. if (unlikely(n<=0)){
  1709. LOG(L_ERR, "BUG: tcp_send: failed to get fd(write):%s (%d)\n",
  1710. strerror(errno), errno);
  1711. n=-1;
  1712. goto release_c;
  1713. }
  1714. DBG("tcp_send, c= %p, n=%d\n", c, n);
  1715. n=receive_fd(unix_tcp_sock, &tmp, sizeof(tmp), &fd, MSG_WAITALL);
  1716. if (unlikely(n<=0)){
  1717. LOG(L_ERR, "BUG: tcp_send: failed to get fd(receive_fd):"
  1718. " %s (%d)\n", strerror(errno), errno);
  1719. n=-1;
  1720. do_close_fd=0;
  1721. goto release_c;
  1722. }
  1723. if (unlikely(c!=tmp)){
  1724. LOG(L_CRIT, "BUG: tcp_send: get_fd: got different connection:"
  1725. " %p (id= %d, refcnt=%d state=%d) != "
  1726. " %p (n=%d)\n",
  1727. c, c->id, atomic_get(&c->refcnt), c->state,
  1728. tmp, n
  1729. );
  1730. n=-1; /* fail */
  1731. goto end;
  1732. }
  1733. DBG("tcp_send: after receive_fd: c= %p n=%d fd=%d\n",c, n, fd);
  1734. }
  1735. send_it:
  1736. DBG("tcp_send: sending...\n");
  1737. lock_get(&c->write_lock);
  1738. #ifdef TCP_BUF_WRITE
  1739. if (likely(tcp_options.tcp_buf_write)){
  1740. if (_wbufq_non_empty(c)
  1741. #ifdef TCP_CONNECT_WAIT
  1742. || (c->state==S_CONN_PENDING)
  1743. #endif /* TCP_CONNECT_WAIT */
  1744. ){
  1745. if (unlikely(_wbufq_add(c, buf, len)<0)){
  1746. lock_release(&c->write_lock);
  1747. n=-1;
  1748. goto error;
  1749. }
  1750. lock_release(&c->write_lock);
  1751. n=len;
  1752. goto end;
  1753. }
  1754. n=_tcpconn_write_nb(fd, c, buf, len);
  1755. }else{
  1756. #endif /* TCP_BUF_WRITE */
  1757. #ifdef USE_TLS
  1758. if (c->type==PROTO_TLS)
  1759. n=tls_blocking_write(c, fd, buf, len);
  1760. else
  1761. #endif
  1762. /* n=tcp_blocking_write(c, fd, buf, len); */
  1763. n=tsend_stream(fd, buf, len, tcp_send_timeout*1000);
  1764. #ifdef TCP_BUF_WRITE
  1765. }
  1766. #else /* ! TCP_BUF_WRITE */
  1767. lock_release(&c->write_lock);
  1768. #endif /* TCP_BUF_WRITE */
  1769. DBG("tcp_send: after real write: c= %p n=%d fd=%d\n",c, n, fd);
  1770. DBG("tcp_send: buf=\n%.*s\n", (int)len, buf);
  1771. if (unlikely(n<(int)len)){
  1772. #ifdef TCP_BUF_WRITE
  1773. if (tcp_options.tcp_buf_write &&
  1774. ((n>=0) || errno==EAGAIN || errno==EWOULDBLOCK)){
  1775. enable_write_watch=_wbufq_empty(c);
  1776. if (n<0) n=0;
  1777. if (unlikely(_wbufq_add(c, buf+n, len-n)<0)){
  1778. lock_release(&c->write_lock);
  1779. n=-1;
  1780. goto error;
  1781. }
  1782. lock_release(&c->write_lock);
  1783. n=len;
  1784. if (likely(enable_write_watch)){
  1785. response[0]=(long)c;
  1786. response[1]=CONN_QUEUED_WRITE;
  1787. if (send_all(unix_tcp_sock, response, sizeof(response)) <= 0){
  1788. LOG(L_ERR, "BUG: tcp_send: error return failed "
  1789. "(write):%s (%d)\n", strerror(errno), errno);
  1790. n=-1;
  1791. goto error;
  1792. }
  1793. }
  1794. goto end;
  1795. }else{
  1796. lock_release(&c->write_lock);
  1797. }
  1798. #endif /* TCP_BUF_WRITE */
  1799. #ifdef USE_DST_BLACKLIST
  1800. if (cfg_get(core, core_cfg, use_dst_blacklist))
  1801. switch(errno){
  1802. case ENETUNREACH:
  1803. case ECONNRESET:
  1804. /*case EHOSTUNREACH: -- not posix */
  1805. dst_blacklist_su((c->state==S_CONN_CONNECT)?
  1806. BLST_ERR_CONNECT:
  1807. BLST_ERR_SEND,
  1808. c->rcv.proto,
  1809. &c->rcv.src_su, 0);
  1810. break;
  1811. }
  1812. #endif /* USE_DST_BLACKLIST */
  1813. LOG(L_ERR, "ERROR: tcp_send: failed to send on %p (%s:%d->%s): %s (%d)"
  1814. "\n", c, ip_addr2a(&c->rcv.dst_ip), c->rcv.dst_port,
  1815. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1816. strerror(errno), errno);
  1817. #ifdef TCP_BUF_WRITE
  1818. error:
  1819. #endif /* TCP_BUF_WRITE */
  1820. /* error on the connection , mark it as bad and set 0 timeout */
  1821. c->state=S_CONN_BAD;
  1822. c->timeout=get_ticks_raw();
  1823. /* tell "main" it should drop this (optional it will t/o anyway?)*/
  1824. response[0]=(long)c;
  1825. response[1]=CONN_ERROR;
  1826. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  1827. LOG(L_CRIT, "BUG: tcp_send: error return failed (write):%s (%d)\n",
  1828. strerror(errno), errno);
  1829. tcpconn_chld_put(c); /* deref. it manually */
  1830. n=-1;
  1831. }
  1832. /* CONN_ERROR will auto-dec refcnt => we must not call tcpconn_put
  1833. * if it succeeds */
  1834. #ifdef TCP_FD_CACHE
  1835. if (unlikely(fd_cache_e)){
  1836. LOG(L_ERR, "ERROR: tcp_send %s: error on cached fd, removing from"
  1837. " the cache (%d, %p, %d)\n",
  1838. su2a(&c->rcv.src_su, sizeof(c->rcv.src_su)),
  1839. fd, fd_cache_e->con, fd_cache_e->id);
  1840. tcp_fd_cache_rm(fd_cache_e);
  1841. close(fd);
  1842. }else
  1843. #endif /* TCP_FD_CACHE */
  1844. if (do_close_fd) close(fd);
  1845. return n; /* error return, no tcpconn_put */
  1846. }
  1847. #ifdef TCP_BUF_WRITE
  1848. lock_release(&c->write_lock);
  1849. if (likely(tcp_options.tcp_buf_write)){
  1850. if (unlikely(c->state==S_CONN_CONNECT))
  1851. c->state=S_CONN_OK;
  1852. }
  1853. #endif /* TCP_BUF_WRITE */
  1854. end:
  1855. #ifdef TCP_FD_CACHE
  1856. if (unlikely((fd_cache_e==0) && use_fd_cache)){
  1857. tcp_fd_cache_add(c, fd);
  1858. }else
  1859. #endif /* TCP_FD_CACHE */
  1860. if (do_close_fd) close(fd);
  1861. release_c:
  1862. tcpconn_chld_put(c); /* release c (dec refcnt & free on 0) */
  1863. end_no_conn:
  1864. return n;
  1865. #ifdef TCP_CONNECT_WAIT
  1866. conn_wait_error:
  1867. /* connect or send failed on newly created connection which was not
  1868. * yet sent to tcp_main (but was already hashed) => don't send to main,
  1869. * unhash and destroy directly (if refcnt>2 it will be destroyed when the
  1870. * last sender releases the connection (tcpconn_chld_put(c))) or when
  1871. * tcp_main receives a CONN_ERROR it*/
  1872. c->state=S_CONN_BAD;
  1873. TCPCONN_LOCK;
  1874. if (c->flags & F_CONN_HASHED){
  1875. /* if some other parallel tcp_send did send CONN_ERROR to
  1876. * tcp_main, the connection might be already detached */
  1877. _tcpconn_detach(c);
  1878. c->flags&=~F_CONN_HASHED;
  1879. TCPCONN_UNLOCK;
  1880. tcpconn_put(c);
  1881. }else
  1882. TCPCONN_UNLOCK;
  1883. /* dec refcnt -> mark it for destruction */
  1884. tcpconn_chld_put(c);
  1885. return -1;
  1886. #endif /* TCP_CONNET_WAIT */
  1887. }
  1888. int tcp_init(struct socket_info* sock_info)
  1889. {
  1890. union sockaddr_union* addr;
  1891. int optval;
  1892. #ifdef HAVE_TCP_ACCEPT_FILTER
  1893. struct accept_filter_arg afa;
  1894. #endif /* HAVE_TCP_ACCEPT_FILTER */
  1895. #ifdef DISABLE_NAGLE
  1896. int flag;
  1897. struct protoent* pe;
  1898. if (tcp_proto_no==-1){ /* if not already set */
  1899. pe=getprotobyname("tcp");
  1900. if (pe==0){
  1901. LOG(L_ERR, "ERROR: tcp_init: could not get TCP protocol number\n");
  1902. tcp_proto_no=-1;
  1903. }else{
  1904. tcp_proto_no=pe->p_proto;
  1905. }
  1906. }
  1907. #endif
  1908. addr=&sock_info->su;
  1909. /* sock_info->proto=PROTO_TCP; */
  1910. if (init_su(addr, &sock_info->address, sock_info->port_no)<0){
  1911. LOG(L_ERR, "ERROR: tcp_init: could no init sockaddr_union\n");
  1912. goto error;
  1913. }
  1914. DBG("tcp_init: added %s\n", su2a(addr, sizeof(*addr)));
  1915. sock_info->socket=socket(AF2PF(addr->s.sa_family), SOCK_STREAM, 0);
  1916. if (sock_info->socket==-1){
  1917. LOG(L_ERR, "ERROR: tcp_init: socket: %s\n", strerror(errno));
  1918. goto error;
  1919. }
  1920. #ifdef DISABLE_NAGLE
  1921. flag=1;
  1922. if ( (tcp_proto_no!=-1) &&
  1923. (setsockopt(sock_info->socket, tcp_proto_no , TCP_NODELAY,
  1924. &flag, sizeof(flag))<0) ){
  1925. LOG(L_ERR, "ERROR: tcp_init: could not disable Nagle: %s\n",
  1926. strerror(errno));
  1927. }
  1928. #endif
  1929. #if !defined(TCP_DONT_REUSEADDR)
  1930. /* Stevens, "Network Programming", Section 7.5, "Generic Socket
  1931. * Options": "...server started,..a child continues..on existing
  1932. * connection..listening server is restarted...call to bind fails
  1933. * ... ALL TCP servers should specify the SO_REUSEADDRE option
  1934. * to allow the server to be restarted in this situation
  1935. *
  1936. * Indeed, without this option, the server can't restart.
  1937. * -jiri
  1938. */
  1939. optval=1;
  1940. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_REUSEADDR,
  1941. (void*)&optval, sizeof(optval))==-1) {
  1942. LOG(L_ERR, "ERROR: tcp_init: setsockopt %s\n",
  1943. strerror(errno));
  1944. goto error;
  1945. }
  1946. #endif
  1947. /* tos */
  1948. optval = tos;
  1949. if (setsockopt(sock_info->socket, IPPROTO_IP, IP_TOS, (void*)&optval,
  1950. sizeof(optval)) ==-1){
  1951. LOG(L_WARN, "WARNING: tcp_init: setsockopt tos: %s\n", strerror(errno));
  1952. /* continue since this is not critical */
  1953. }
  1954. #ifdef HAVE_TCP_DEFER_ACCEPT
  1955. /* linux only */
  1956. if (tcp_options.defer_accept){
  1957. optval=tcp_options.defer_accept;
  1958. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_DEFER_ACCEPT,
  1959. (void*)&optval, sizeof(optval)) ==-1){
  1960. LOG(L_WARN, "WARNING: tcp_init: setsockopt TCP_DEFER_ACCEPT %s\n",
  1961. strerror(errno));
  1962. /* continue since this is not critical */
  1963. }
  1964. }
  1965. #endif /* HAVE_TCP_DEFFER_ACCEPT */
  1966. #ifdef HAVE_TCP_SYNCNT
  1967. if (tcp_options.syncnt){
  1968. optval=tcp_options.syncnt;
  1969. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_SYNCNT, &optval,
  1970. sizeof(optval))<0){
  1971. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  1972. " maximum SYN retr. count: %s\n", strerror(errno));
  1973. }
  1974. }
  1975. #endif
  1976. #ifdef HAVE_TCP_LINGER2
  1977. if (tcp_options.linger2){
  1978. optval=tcp_options.linger2;
  1979. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_LINGER2, &optval,
  1980. sizeof(optval))<0){
  1981. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  1982. " maximum LINGER2 timeout: %s\n", strerror(errno));
  1983. }
  1984. }
  1985. #endif
  1986. init_sock_keepalive(sock_info->socket);
  1987. if (bind(sock_info->socket, &addr->s, sockaddru_len(*addr))==-1){
  1988. LOG(L_ERR, "ERROR: tcp_init: bind(%x, %p, %d) on %s:%d : %s\n",
  1989. sock_info->socket, &addr->s,
  1990. (unsigned)sockaddru_len(*addr),
  1991. sock_info->address_str.s,
  1992. sock_info->port_no,
  1993. strerror(errno));
  1994. goto error;
  1995. }
  1996. if (listen(sock_info->socket, TCP_LISTEN_BACKLOG)==-1){
  1997. LOG(L_ERR, "ERROR: tcp_init: listen(%x, %p, %d) on %s: %s\n",
  1998. sock_info->socket, &addr->s,
  1999. (unsigned)sockaddru_len(*addr),
  2000. sock_info->address_str.s,
  2001. strerror(errno));
  2002. goto error;
  2003. }
  2004. #ifdef HAVE_TCP_ACCEPT_FILTER
  2005. /* freebsd */
  2006. if (tcp_options.defer_accept){
  2007. memset(&afa, 0, sizeof(afa));
  2008. strcpy(afa.af_name, "dataready");
  2009. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_ACCEPTFILTER,
  2010. (void*)&afa, sizeof(afa)) ==-1){
  2011. LOG(L_WARN, "WARNING: tcp_init: setsockopt SO_ACCEPTFILTER %s\n",
  2012. strerror(errno));
  2013. /* continue since this is not critical */
  2014. }
  2015. }
  2016. #endif /* HAVE_TCP_ACCEPT_FILTER */
  2017. return 0;
  2018. error:
  2019. if (sock_info->socket!=-1){
  2020. close(sock_info->socket);
  2021. sock_info->socket=-1;
  2022. }
  2023. return -1;
  2024. }
  2025. /* close tcp_main's fd from a tcpconn
  2026. * WARNING: call only in tcp_main context */
  2027. inline static void tcpconn_close_main_fd(struct tcp_connection* tcpconn)
  2028. {
  2029. int fd;
  2030. fd=tcpconn->s;
  2031. #ifdef USE_TLS
  2032. /*FIXME: lock ->writelock ? */
  2033. if (tcpconn->type==PROTO_TLS)
  2034. tls_close(tcpconn, fd);
  2035. #endif
  2036. #ifdef TCP_FD_CACHE
  2037. if (likely(tcp_options.fd_cache)) shutdown(fd, SHUT_RDWR);
  2038. #endif /* TCP_FD_CACHE */
  2039. close_again:
  2040. if (unlikely(close(fd)<0)){
  2041. if (errno==EINTR)
  2042. goto close_again;
  2043. LOG(L_ERR, "ERROR: tcpconn_put_destroy; close() failed: %s (%d)\n",
  2044. strerror(errno), errno);
  2045. }
  2046. }
  2047. /* dec refcnt & frees the connection if refcnt==0
  2048. * returns 1 if the connection is freed, 0 otherwise
  2049. *
  2050. * WARNING: use only from child processes */
  2051. inline static int tcpconn_chld_put(struct tcp_connection* tcpconn)
  2052. {
  2053. if (unlikely(atomic_dec_and_test(&tcpconn->refcnt))){
  2054. DBG("tcpconn_chld_put: destroying connection %p (%d, %d) "
  2055. "flags %04x\n", tcpconn, tcpconn->id,
  2056. tcpconn->s, tcpconn->flags);
  2057. /* sanity checks */
  2058. membar_read_atomic_op(); /* make sure we see the current flags */
  2059. if (unlikely(!(tcpconn->flags & F_CONN_FD_CLOSED) ||
  2060. (tcpconn->flags &
  2061. (F_CONN_HASHED|F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WRITE_W)) )){
  2062. LOG(L_CRIT, "BUG: tcpconn_chld_put: %p bad flags = %0x\n",
  2063. tcpconn, tcpconn->flags);
  2064. abort();
  2065. }
  2066. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2067. return 1;
  2068. }
  2069. return 0;
  2070. }
  2071. /* simple destroy function (the connection should be already removed
  2072. * from the hashes and the fds should not be watched anymore for IO)
  2073. */
  2074. inline static void tcpconn_destroy(struct tcp_connection* tcpconn)
  2075. {
  2076. DBG("tcpconn_destroy: destroying connection %p (%d, %d) "
  2077. "flags %04x\n", tcpconn, tcpconn->id,
  2078. tcpconn->s, tcpconn->flags);
  2079. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2080. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed"
  2081. " connection (%p)\n", tcpconn);
  2082. /* try to continue */
  2083. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2084. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2085. TCPCONN_LOCK;
  2086. _tcpconn_detach(tcpconn);
  2087. TCPCONN_UNLOCK;
  2088. }
  2089. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2090. tcpconn_close_main_fd(tcpconn);
  2091. (*tcp_connections_no)--;
  2092. }
  2093. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  2094. }
  2095. /* tries to destroy the connection: dec. refcnt and if 0 destroys the
  2096. * connection, else it will mark it as BAD and close the main fds
  2097. *
  2098. * returns 1 if the connection was destroyed, 0 otherwise
  2099. *
  2100. * WARNING: - the connection _has_ to be removed from the hash and timer
  2101. * first (use tcpconn_try_unhash() for this )
  2102. * - the fd should not be watched anymore (io_watch_del()...)
  2103. * - must be called _only_ from the tcp_main process context
  2104. * (or else the fd will remain open)
  2105. */
  2106. inline static int tcpconn_put_destroy(struct tcp_connection* tcpconn)
  2107. {
  2108. if (unlikely((tcpconn->flags &
  2109. (F_CONN_WRITE_W|F_CONN_HASHED|F_CONN_MAIN_TIMER|F_CONN_READ_W)) )){
  2110. /* sanity check */
  2111. if (unlikely(tcpconn->flags & F_CONN_HASHED)){
  2112. LOG(L_CRIT, "BUG: tcpconn_destroy: called with hashed and/or"
  2113. "on timer connection (%p), flags = %0x\n",
  2114. tcpconn, tcpconn->flags);
  2115. /* try to continue */
  2116. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER))
  2117. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2118. TCPCONN_LOCK;
  2119. _tcpconn_detach(tcpconn);
  2120. TCPCONN_UNLOCK;
  2121. }else{
  2122. LOG(L_CRIT, "BUG: tcpconn_put_destroy: %p flags = %0x\n",
  2123. tcpconn, tcpconn->flags);
  2124. }
  2125. }
  2126. tcpconn->state=S_CONN_BAD;
  2127. /* in case it's still in a reader timer */
  2128. tcpconn->timeout=get_ticks_raw();
  2129. /* fast close: close fds now */
  2130. if (likely(!(tcpconn->flags & F_CONN_FD_CLOSED))){
  2131. tcpconn_close_main_fd(tcpconn);
  2132. tcpconn->flags|=F_CONN_FD_CLOSED;
  2133. (*tcp_connections_no)--;
  2134. }
  2135. /* all the flags / ops on the tcpconn must be done prior to decrementing
  2136. * the refcnt. and at least a membar_write_atomic_op() mem. barrier or
  2137. * a mb_atomic_* op must * be used to make sure all the changed flags are
  2138. * written into memory prior to the new refcnt value */
  2139. if (unlikely(mb_atomic_dec_and_test(&tcpconn->refcnt))){
  2140. _tcpconn_free(tcpconn);
  2141. return 1;
  2142. }
  2143. return 0;
  2144. }
  2145. /* try to remove a connection from the hashes and timer.
  2146. * returns 1 if the connection was removed, 0 if not (connection not in
  2147. * hash)
  2148. *
  2149. * WARNING: call it only in the tcp_main process context or else the
  2150. * timer removal won't work.
  2151. */
  2152. inline static int tcpconn_try_unhash(struct tcp_connection* tcpconn)
  2153. {
  2154. if (likely(tcpconn->flags & F_CONN_HASHED)){
  2155. tcpconn->state=S_CONN_BAD;
  2156. if (likely(tcpconn->flags & F_CONN_MAIN_TIMER)){
  2157. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2158. tcpconn->flags&=~F_CONN_MAIN_TIMER;
  2159. }else
  2160. /* in case it's still in a reader timer */
  2161. tcpconn->timeout=get_ticks_raw();
  2162. TCPCONN_LOCK;
  2163. if (tcpconn->flags & F_CONN_HASHED){
  2164. tcpconn->flags&=~F_CONN_HASHED;
  2165. _tcpconn_detach(tcpconn);
  2166. TCPCONN_UNLOCK;
  2167. }else{
  2168. /* tcp_send was faster and did unhash it itself */
  2169. TCPCONN_UNLOCK;
  2170. return 0;
  2171. }
  2172. #ifdef TCP_BUF_WRITE
  2173. /* empty possible write buffers (optional) */
  2174. if (unlikely(_wbufq_non_empty(tcpconn))){
  2175. lock_get(&tcpconn->write_lock);
  2176. /* check again, while holding the lock */
  2177. if (likely(_wbufq_non_empty(tcpconn)))
  2178. _wbufq_destroy(&tcpconn->wbuf_q);
  2179. lock_release(&tcpconn->write_lock);
  2180. }
  2181. #endif /* TCP_BUF_WRITE */
  2182. return 1;
  2183. }
  2184. return 0;
  2185. }
  2186. #ifdef SEND_FD_QUEUE
  2187. struct send_fd_info{
  2188. struct tcp_connection* tcp_conn;
  2189. ticks_t expire;
  2190. int unix_sock;
  2191. unsigned int retries; /* debugging */
  2192. };
  2193. struct tcp_send_fd_q{
  2194. struct send_fd_info* data; /* buffer */
  2195. struct send_fd_info* crt; /* pointer inside the buffer */
  2196. struct send_fd_info* end; /* points after the last valid position */
  2197. };
  2198. static struct tcp_send_fd_q send2child_q;
  2199. static int send_fd_queue_init(struct tcp_send_fd_q *q, unsigned int size)
  2200. {
  2201. q->data=pkg_malloc(size*sizeof(struct send_fd_info));
  2202. if (q->data==0){
  2203. LOG(L_ERR, "ERROR: send_fd_queue_init: out of memory\n");
  2204. return -1;
  2205. }
  2206. q->crt=&q->data[0];
  2207. q->end=&q->data[size];
  2208. return 0;
  2209. }
  2210. static void send_fd_queue_destroy(struct tcp_send_fd_q *q)
  2211. {
  2212. if (q->data){
  2213. pkg_free(q->data);
  2214. q->data=0;
  2215. q->crt=q->end=0;
  2216. }
  2217. }
  2218. static int init_send_fd_queues()
  2219. {
  2220. if (send_fd_queue_init(&send2child_q, SEND_FD_QUEUE_SIZE)!=0)
  2221. goto error;
  2222. return 0;
  2223. error:
  2224. LOG(L_ERR, "ERROR: init_send_fd_queues: init failed\n");
  2225. return -1;
  2226. }
  2227. static void destroy_send_fd_queues()
  2228. {
  2229. send_fd_queue_destroy(&send2child_q);
  2230. }
  2231. inline static int send_fd_queue_add( struct tcp_send_fd_q* q,
  2232. int unix_sock,
  2233. struct tcp_connection *t)
  2234. {
  2235. struct send_fd_info* tmp;
  2236. unsigned long new_size;
  2237. if (q->crt>=q->end){
  2238. new_size=q->end-&q->data[0];
  2239. if (new_size< MAX_SEND_FD_QUEUE_SIZE/2){
  2240. new_size*=2;
  2241. }else new_size=MAX_SEND_FD_QUEUE_SIZE;
  2242. if (unlikely(q->crt>=&q->data[new_size])){
  2243. LOG(L_ERR, "ERROR: send_fd_queue_add: queue full: %ld/%ld\n",
  2244. (long)(q->crt-&q->data[0]-1), new_size);
  2245. goto error;
  2246. }
  2247. LOG(L_CRIT, "INFO: send_fd_queue: queue full: %ld, extending to %ld\n",
  2248. (long)(q->end-&q->data[0]), new_size);
  2249. tmp=pkg_realloc(q->data, new_size*sizeof(struct send_fd_info));
  2250. if (unlikely(tmp==0)){
  2251. LOG(L_ERR, "ERROR: send_fd_queue_add: out of memory\n");
  2252. goto error;
  2253. }
  2254. q->crt=(q->crt-&q->data[0])+tmp;
  2255. q->data=tmp;
  2256. q->end=&q->data[new_size];
  2257. }
  2258. q->crt->tcp_conn=t;
  2259. q->crt->unix_sock=unix_sock;
  2260. q->crt->expire=get_ticks_raw()+SEND_FD_QUEUE_TIMEOUT;
  2261. q->crt->retries=0;
  2262. q->crt++;
  2263. return 0;
  2264. error:
  2265. return -1;
  2266. }
  2267. inline static void send_fd_queue_run(struct tcp_send_fd_q* q)
  2268. {
  2269. struct send_fd_info* p;
  2270. struct send_fd_info* t;
  2271. for (p=t=&q->data[0]; p<q->crt; p++){
  2272. if (unlikely(send_fd(p->unix_sock, &(p->tcp_conn),
  2273. sizeof(struct tcp_connection*), p->tcp_conn->s)<=0)){
  2274. if ( ((errno==EAGAIN)||(errno==EWOULDBLOCK)) &&
  2275. ((s_ticks_t)(p->expire-get_ticks_raw())>0)){
  2276. /* leave in queue for a future try */
  2277. *t=*p;
  2278. t->retries++;
  2279. t++;
  2280. }else{
  2281. LOG(L_ERR, "ERROR: run_send_fd_queue: send_fd failed"
  2282. " on socket %d , queue entry %ld, retries %d,"
  2283. " connection %p, tcp socket %d, errno=%d (%s) \n",
  2284. p->unix_sock, (long)(p-&q->data[0]), p->retries,
  2285. p->tcp_conn, p->tcp_conn->s, errno,
  2286. strerror(errno));
  2287. #ifdef TCP_BUF_WRITE
  2288. if (p->tcp_conn->flags & F_CONN_WRITE_W){
  2289. io_watch_del(&io_h, p->tcp_conn->s, -1, IO_FD_CLOSING);
  2290. p->tcp_conn->flags &=~F_CONN_WRITE_W;
  2291. }
  2292. #endif
  2293. p->tcp_conn->flags &= ~F_CONN_READER;
  2294. if (likely(tcpconn_try_unhash(p->tcp_conn)))
  2295. tcpconn_put(p->tcp_conn);
  2296. tcpconn_put_destroy(p->tcp_conn); /* dec refcnt & destroy */
  2297. }
  2298. }
  2299. }
  2300. q->crt=t;
  2301. }
  2302. #else
  2303. #define send_fd_queue_run(q)
  2304. #endif
  2305. /* non blocking write() on a tcpconnection, unsafe version (should be called
  2306. * while holding c->write_lock). The fd should be non-blocking.
  2307. * returns number of bytes written on success, -1 on error (and sets errno)
  2308. */
  2309. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  2310. char* buf, int len)
  2311. {
  2312. int n;
  2313. again:
  2314. #ifdef USE_TLS
  2315. if (unlikely(c->type==PROTO_TLS))
  2316. /* FIXME: tls_nonblocking_write !! */
  2317. n=tls_blocking_write(c, fd, buf, len);
  2318. else
  2319. #endif /* USE_TLS */
  2320. n=send(fd, buf, len,
  2321. #ifdef HAVE_MSG_NOSIGNAL
  2322. MSG_NOSIGNAL
  2323. #else
  2324. 0
  2325. #endif /* HAVE_MSG_NOSIGNAL */
  2326. );
  2327. if (unlikely(n<0)){
  2328. if (errno==EINTR) goto again;
  2329. }
  2330. return n;
  2331. }
  2332. /* handles io from a tcp child process
  2333. * params: tcp_c - pointer in the tcp_children array, to the entry for
  2334. * which an io event was detected
  2335. * fd_i - fd index in the fd_array (usefull for optimizing
  2336. * io_watch_deletes)
  2337. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  2338. * io events queued), >0 on success. success/error refer only to
  2339. * the reads from the fd.
  2340. */
  2341. inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
  2342. {
  2343. struct tcp_connection* tcpconn;
  2344. long response[2];
  2345. int cmd;
  2346. int bytes;
  2347. int n;
  2348. ticks_t t;
  2349. ticks_t crt_timeout;
  2350. if (unlikely(tcp_c->unix_sock<=0)){
  2351. /* (we can't have a fd==0, 0 is never closed )*/
  2352. LOG(L_CRIT, "BUG: handle_tcp_child: fd %d for %d "
  2353. "(pid %d, ser no %d)\n", tcp_c->unix_sock,
  2354. (int)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no);
  2355. goto error;
  2356. }
  2357. /* read until sizeof(response)
  2358. * (this is a SOCK_STREAM so read is not atomic) */
  2359. bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT);
  2360. if (unlikely(bytes<(int)sizeof(response))){
  2361. if (bytes==0){
  2362. /* EOF -> bad, child has died */
  2363. DBG("DBG: handle_tcp_child: dead tcp child %d (pid %d, no %d)"
  2364. " (shutting down?)\n", (int)(tcp_c-&tcp_children[0]),
  2365. tcp_c->pid, tcp_c->proc_no );
  2366. /* don't listen on it any more */
  2367. io_watch_del(&io_h, tcp_c->unix_sock, fd_i, 0);
  2368. goto error; /* eof. so no more io here, it's ok to return error */
  2369. }else if (bytes<0){
  2370. /* EAGAIN is ok if we try to empty the buffer
  2371. * e.g.: SIGIO_RT overflow mode or EPOLL ET */
  2372. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2373. LOG(L_CRIT, "ERROR: handle_tcp_child: read from tcp child %ld "
  2374. " (pid %d, no %d) %s [%d]\n",
  2375. (long)(tcp_c-&tcp_children[0]), tcp_c->pid,
  2376. tcp_c->proc_no, strerror(errno), errno );
  2377. }else{
  2378. bytes=0;
  2379. }
  2380. /* try to ignore ? */
  2381. goto end;
  2382. }else{
  2383. /* should never happen */
  2384. LOG(L_CRIT, "BUG: handle_tcp_child: too few bytes received (%d)\n",
  2385. bytes );
  2386. bytes=0; /* something was read so there is no error; otoh if
  2387. receive_fd returned less then requested => the receive
  2388. buffer is empty => no more io queued on this fd */
  2389. goto end;
  2390. }
  2391. }
  2392. DBG("handle_tcp_child: reader response= %lx, %ld from %d \n",
  2393. response[0], response[1], (int)(tcp_c-&tcp_children[0]));
  2394. cmd=response[1];
  2395. tcpconn=(struct tcp_connection*)response[0];
  2396. if (unlikely(tcpconn==0)){
  2397. /* should never happen */
  2398. LOG(L_CRIT, "BUG: handle_tcp_child: null tcpconn pointer received"
  2399. " from tcp child %d (pid %d): %lx, %lx\n",
  2400. (int)(tcp_c-&tcp_children[0]), tcp_c->pid,
  2401. response[0], response[1]) ;
  2402. goto end;
  2403. }
  2404. switch(cmd){
  2405. case CONN_RELEASE:
  2406. tcp_c->busy--;
  2407. if (unlikely(tcpconn_put(tcpconn))){
  2408. tcpconn_destroy(tcpconn);
  2409. break;
  2410. }
  2411. if (unlikely(tcpconn->state==S_CONN_BAD)){
  2412. #ifdef TCP_BUF_WRITE
  2413. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2414. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2415. tcpconn->flags &= ~F_CONN_WRITE_W;
  2416. }
  2417. #endif /* TCP_BUF_WRITE */
  2418. if (tcpconn_try_unhash(tcpconn))
  2419. tcpconn_put_destroy(tcpconn);
  2420. break;
  2421. }
  2422. /* update the timeout*/
  2423. t=get_ticks_raw();
  2424. tcpconn->timeout=t+tcp_con_lifetime;
  2425. crt_timeout=tcp_con_lifetime;
  2426. #ifdef TCP_BUF_WRITE
  2427. if (unlikely(tcp_options.tcp_buf_write &&
  2428. _wbufq_non_empty(tcpconn) )){
  2429. if (unlikely(TICKS_GE(t, tcpconn->wbuf_q.wr_timeout))){
  2430. DBG("handle_tcp_child: wr. timeout on CONN_RELEASE for %p "
  2431. "refcnt= %d\n", tcpconn,
  2432. atomic_get(&tcpconn->refcnt));
  2433. /* timeout */
  2434. #ifdef USE_DST_BLACKLIST
  2435. if (cfg_get(core, core_cfg, use_dst_blacklist))
  2436. dst_blacklist_su((tcpconn->state==S_CONN_CONNECT)?
  2437. BLST_ERR_CONNECT:
  2438. BLST_ERR_SEND,
  2439. tcpconn->rcv.proto,
  2440. &tcpconn->rcv.src_su, 0);
  2441. #endif /* USE_DST_BLACKLIST */
  2442. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2443. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2444. tcpconn->flags&=~F_CONN_WRITE_W;
  2445. }
  2446. if (tcpconn_try_unhash(tcpconn))
  2447. tcpconn_put_destroy(tcpconn);
  2448. break;
  2449. }else{
  2450. crt_timeout=MIN_unsigned(tcp_con_lifetime,
  2451. tcpconn->wbuf_q.wr_timeout-t);
  2452. }
  2453. }
  2454. #endif /* TCP_BUF_WRITE */
  2455. /* re-activate the timer */
  2456. tcpconn->timer.f=tcpconn_main_timeout;
  2457. local_timer_reinit(&tcpconn->timer);
  2458. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, crt_timeout, t);
  2459. /* must be after the de-ref*/
  2460. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  2461. tcpconn->flags&=~(F_CONN_READER|F_CONN_OOB_DATA);
  2462. #ifdef TCP_BUF_WRITE
  2463. if (unlikely(tcpconn->flags & F_CONN_WRITE_W))
  2464. n=io_watch_chg(&io_h, tcpconn->s, POLLIN| POLLOUT, -1);
  2465. else
  2466. #endif /* TCP_BUF_WRITE */
  2467. n=io_watch_add(&io_h, tcpconn->s, POLLIN, F_TCPCONN, tcpconn);
  2468. if (unlikely(n<0)){
  2469. LOG(L_CRIT, "ERROR: tcp_main: handle_tcp_child: failed to add"
  2470. " new socket to the fd list\n");
  2471. tcpconn->flags&=~F_CONN_READ_W;
  2472. #ifdef TCP_BUF_WRITE
  2473. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2474. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2475. tcpconn->flags&=~F_CONN_WRITE_W;
  2476. }
  2477. #endif /* TCP_BUF_WRITE */
  2478. if (tcpconn_try_unhash(tcpconn));
  2479. tcpconn_put_destroy(tcpconn);
  2480. break;
  2481. }
  2482. DBG("handle_tcp_child: CONN_RELEASE %p refcnt= %d\n",
  2483. tcpconn, atomic_get(&tcpconn->refcnt));
  2484. break;
  2485. case CONN_ERROR:
  2486. case CONN_DESTROY:
  2487. case CONN_EOF:
  2488. /* WARNING: this will auto-dec. refcnt! */
  2489. tcp_c->busy--;
  2490. /* main doesn't listen on it => we don't have to delete it
  2491. if (tcpconn->s!=-1)
  2492. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2493. */
  2494. #ifdef TCP_BUF_WRITE
  2495. if ((tcpconn->flags & F_CONN_WRITE_W) && (tcpconn->s!=-1)){
  2496. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2497. tcpconn->flags&=~F_CONN_WRITE_W;
  2498. }
  2499. #endif /* TCP_BUF_WRITE */
  2500. if (tcpconn_try_unhash(tcpconn))
  2501. tcpconn_put(tcpconn);
  2502. tcpconn_put_destroy(tcpconn); /* deref & delete if refcnt==0 */
  2503. break;
  2504. default:
  2505. LOG(L_CRIT, "BUG: handle_tcp_child: unknown cmd %d"
  2506. " from tcp reader %d\n",
  2507. cmd, (int)(tcp_c-&tcp_children[0]));
  2508. }
  2509. end:
  2510. return bytes;
  2511. error:
  2512. return -1;
  2513. }
  2514. /* handles io from a "generic" ser process (get fd or new_fd from a tcp_send)
  2515. *
  2516. * params: p - pointer in the ser processes array (pt[]), to the entry for
  2517. * which an io event was detected
  2518. * fd_i - fd index in the fd_array (usefull for optimizing
  2519. * io_watch_deletes)
  2520. * returns: handle_* return convention:
  2521. * -1 on error reading from the fd,
  2522. * 0 on EAGAIN or when no more io events are queued
  2523. * (receive buffer empty),
  2524. * >0 on successfull reads from the fd (the receive buffer might
  2525. * be non-empty).
  2526. */
  2527. inline static int handle_ser_child(struct process_table* p, int fd_i)
  2528. {
  2529. struct tcp_connection* tcpconn;
  2530. long response[2];
  2531. int cmd;
  2532. int bytes;
  2533. int ret;
  2534. int fd;
  2535. int flags;
  2536. ticks_t t;
  2537. #ifdef TCP_BUF_WRITE
  2538. ticks_t nxt_timeout;
  2539. #endif /* TCP_BUF_WRITE */
  2540. ret=-1;
  2541. if (unlikely(p->unix_sock<=0)){
  2542. /* (we can't have a fd==0, 0 is never closed )*/
  2543. LOG(L_CRIT, "BUG: handle_ser_child: fd %d for %d "
  2544. "(pid %d)\n", p->unix_sock, (int)(p-&pt[0]), p->pid);
  2545. goto error;
  2546. }
  2547. /* get all bytes and the fd (if transmitted)
  2548. * (this is a SOCK_STREAM so read is not atomic) */
  2549. bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd,
  2550. MSG_DONTWAIT);
  2551. if (unlikely(bytes<(int)sizeof(response))){
  2552. /* too few bytes read */
  2553. if (bytes==0){
  2554. /* EOF -> bad, child has died */
  2555. DBG("DBG: handle_ser_child: dead child %d, pid %d"
  2556. " (shutting down?)\n", (int)(p-&pt[0]), p->pid);
  2557. /* don't listen on it any more */
  2558. io_watch_del(&io_h, p->unix_sock, fd_i, 0);
  2559. goto error; /* child dead => no further io events from it */
  2560. }else if (bytes<0){
  2561. /* EAGAIN is ok if we try to empty the buffer
  2562. * e.g: SIGIO_RT overflow mode or EPOLL ET */
  2563. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  2564. LOG(L_CRIT, "ERROR: handle_ser_child: read from child %d "
  2565. "(pid %d): %s [%d]\n", (int)(p-&pt[0]), p->pid,
  2566. strerror(errno), errno);
  2567. ret=-1;
  2568. }else{
  2569. ret=0;
  2570. }
  2571. /* try to ignore ? */
  2572. goto end;
  2573. }else{
  2574. /* should never happen */
  2575. LOG(L_CRIT, "BUG: handle_ser_child: too few bytes received (%d)\n",
  2576. bytes );
  2577. ret=0; /* something was read so there is no error; otoh if
  2578. receive_fd returned less then requested => the receive
  2579. buffer is empty => no more io queued on this fd */
  2580. goto end;
  2581. }
  2582. }
  2583. ret=1; /* something was received, there might be more queued */
  2584. DBG("handle_ser_child: read response= %lx, %ld, fd %d from %d (%d)\n",
  2585. response[0], response[1], fd, (int)(p-&pt[0]), p->pid);
  2586. cmd=response[1];
  2587. tcpconn=(struct tcp_connection*)response[0];
  2588. if (unlikely(tcpconn==0)){
  2589. LOG(L_CRIT, "BUG: handle_ser_child: null tcpconn pointer received"
  2590. " from child %d (pid %d): %lx, %lx\n",
  2591. (int)(p-&pt[0]), p->pid, response[0], response[1]) ;
  2592. goto end;
  2593. }
  2594. switch(cmd){
  2595. case CONN_ERROR:
  2596. LOG(L_ERR, "handle_ser_child: ERROR: received CON_ERROR for %p"
  2597. " (id %d), refcnt %d\n",
  2598. tcpconn, tcpconn->id, atomic_get(&tcpconn->refcnt));
  2599. #ifdef TCP_CONNECT_WAIT
  2600. /* if the connection is pending => it might be on the way of
  2601. * reaching tcp_main (e.g. CONN_NEW_COMPLETE or
  2602. * CONN_NEW_PENDING_WRITE) => it cannot be destroyed here */
  2603. if ( !(tcpconn->flags & F_CONN_PENDING) &&
  2604. tcpconn_try_unhash(tcpconn) )
  2605. tcpconn_put(tcpconn);
  2606. #else /* ! TCP_CONNECT_WAIT */
  2607. if ( tcpconn_try_unhash(tcpconn) )
  2608. tcpconn_put(tcpconn);
  2609. #endif /* TCP_CONNECT_WAIT */
  2610. if ( ((tcpconn->flags & (F_CONN_WRITE_W|F_CONN_READ_W)) ) &&
  2611. (tcpconn->s!=-1)){
  2612. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2613. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2614. }
  2615. tcpconn_put_destroy(tcpconn); /* dec refcnt & destroy on 0 */
  2616. break;
  2617. case CONN_GET_FD:
  2618. /* send the requested FD */
  2619. /* WARNING: take care of setting refcnt properly to
  2620. * avoid race conditions */
  2621. if (unlikely(send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn),
  2622. tcpconn->s)<=0)){
  2623. LOG(L_ERR, "ERROR: handle_ser_child: send_fd failed\n");
  2624. }
  2625. break;
  2626. case CONN_NEW:
  2627. /* update the fd in the requested tcpconn*/
  2628. /* WARNING: take care of setting refcnt properly to
  2629. * avoid race conditions */
  2630. if (unlikely(fd==-1)){
  2631. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW:"
  2632. " no fd received\n");
  2633. tcpconn->flags|=F_CONN_FD_CLOSED;
  2634. tcpconn_put_destroy(tcpconn);
  2635. break;
  2636. }
  2637. (*tcp_connections_no)++;
  2638. tcpconn->s=fd;
  2639. /* add tcpconn to the list*/
  2640. tcpconn_add(tcpconn);
  2641. /* update the timeout*/
  2642. t=get_ticks_raw();
  2643. tcpconn->timeout=t+tcp_con_lifetime;
  2644. /* activate the timer (already properly init. in tcpconn_new())
  2645. * no need for reinit */
  2646. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2647. tcp_con_lifetime, t);
  2648. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD)
  2649. #ifdef TCP_BUF_WRITE
  2650. /* not used for now, the connection is sent to tcp_main
  2651. * before knowing whether we can write on it or we should
  2652. * wait */
  2653. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1)&
  2654. F_CONN_WRITE_W)
  2655. #endif /* TCP_BUF_WRITE */
  2656. ;
  2657. tcpconn->flags&=~F_CONN_FD_CLOSED;
  2658. flags=POLLIN
  2659. #ifdef TCP_BUF_WRITE
  2660. /* not used for now, the connection is sent to tcp_main
  2661. * before knowing if we can write on it or we should
  2662. * wait */
  2663. | (((int)!(tcpconn->flags & F_CONN_WANTS_WR)-1) & POLLOUT)
  2664. #endif /* TCP_BUF_WRITE */
  2665. ;
  2666. if (unlikely(
  2667. io_watch_add(&io_h, tcpconn->s, flags,
  2668. F_TCPCONN, tcpconn)<0)){
  2669. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2670. " new socket to the fd list\n");
  2671. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2672. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  2673. tcpconn_put_destroy(tcpconn);
  2674. }
  2675. break;
  2676. #ifdef TCP_BUF_WRITE
  2677. case CONN_QUEUED_WRITE:
  2678. /* received only if the wr. queue is empty and a write finishes
  2679. * with EAGAIN (common after connect())
  2680. * it should only enable write watching on the fd. The connection
  2681. * should be already in the hash. The refcnt is not changed.
  2682. */
  2683. if (unlikely((tcpconn->state==S_CONN_BAD) ||
  2684. !(tcpconn->flags & F_CONN_HASHED) ))
  2685. break;
  2686. if (!(tcpconn->flags & F_CONN_WANTS_WR)){
  2687. tcpconn->flags|=F_CONN_WANTS_WR;
  2688. t=get_ticks_raw();
  2689. if (likely((tcpconn->flags & F_CONN_MAIN_TIMER) &&
  2690. (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)) &&
  2691. TICKS_LT(t, tcpconn->wbuf_q.wr_timeout) )){
  2692. /* _wbufq_nonempty() is guaranteed here */
  2693. /* update the timer */
  2694. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2695. local_timer_reinit(&tcpconn->timer);
  2696. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2697. tcpconn->wbuf_q.wr_timeout-t, t);
  2698. DBG("tcp_main: handle_ser_child: CONN_QUEUED_WRITE; %p "
  2699. "timeout adjusted to %d s\n", tcpconn,
  2700. TICKS_TO_S(tcpconn->wbuf_q.wr_timeout-t));
  2701. }
  2702. if (!(tcpconn->flags & F_CONN_WRITE_W)){
  2703. tcpconn->flags|=F_CONN_WRITE_W;
  2704. if (!(tcpconn->flags & F_CONN_READ_W)){
  2705. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLOUT,
  2706. F_TCPCONN, tcpconn)<0)){
  2707. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2708. " failed to enable write watch on"
  2709. " socket\n");
  2710. if (tcpconn_try_unhash(tcpconn))
  2711. tcpconn_put_destroy(tcpconn);
  2712. break;
  2713. }
  2714. }else{
  2715. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  2716. POLLIN|POLLOUT, -1)<0)){
  2717. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child:"
  2718. " failed to change socket watch events\n");
  2719. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2720. tcpconn->flags&=~F_CONN_READ_W;
  2721. if (tcpconn_try_unhash(tcpconn))
  2722. tcpconn_put_destroy(tcpconn);
  2723. break;
  2724. }
  2725. }
  2726. }
  2727. }else{
  2728. LOG(L_WARN, "tcp_main: hanlder_ser_child: connection %p"
  2729. " already watched for write\n", tcpconn);
  2730. }
  2731. break;
  2732. #ifdef TCP_CONNECT_WAIT
  2733. case CONN_NEW_COMPLETE:
  2734. case CONN_NEW_PENDING_WRITE:
  2735. /* received when a pending connect completes in the same
  2736. * tcp_send() that initiated it
  2737. * the connection is already in the hash with S_CONN_PENDING
  2738. * state (added by tcp_send()) and refcnt at least 1 (for the
  2739. * hash)*/
  2740. tcpconn->flags&=~(F_CONN_PENDING|F_CONN_FD_CLOSED);
  2741. if (unlikely((tcpconn->state==S_CONN_BAD) || (fd==-1))){
  2742. if (unlikely(fd==-1))
  2743. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW_COMPLETE:"
  2744. " no fd received\n");
  2745. else
  2746. LOG(L_WARN, "WARNING: handle_ser_child: CONN_NEW_COMPLETE:"
  2747. " received connection with error\n");
  2748. tcpconn->flags|=F_CONN_FD_CLOSED;
  2749. tcpconn->state=S_CONN_BAD;
  2750. tcpconn_try_unhash(tcpconn);
  2751. tcpconn_put_destroy(tcpconn);
  2752. break;
  2753. }
  2754. (*tcp_connections_no)++;
  2755. tcpconn->s=fd;
  2756. /* update the timeout*/
  2757. t=get_ticks_raw();
  2758. tcpconn->timeout=t+tcp_con_lifetime;
  2759. nxt_timeout=tcp_con_lifetime;
  2760. if (unlikely(cmd==CONN_NEW_COMPLETE)){
  2761. tcpconn->state=S_CONN_OK;
  2762. /* check if needs to be watched for write */
  2763. lock_get(&tcpconn->write_lock);
  2764. /* if queue non empty watch it for write */
  2765. flags=(_wbufq_empty(tcpconn)-1)&POLLOUT;
  2766. lock_release(&tcpconn->write_lock);
  2767. if (flags){
  2768. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  2769. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  2770. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  2771. tcpconn->flags|=F_CONN_WRITE_W|F_CONN_WANTS_WR;
  2772. }
  2773. /* activate the timer (already properly init. in
  2774. tcpconn_new()) no need for reinit */
  2775. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  2776. t);
  2777. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  2778. F_CONN_WANTS_RD;
  2779. }else{
  2780. /* CONN_NEW_PENDING_WRITE */
  2781. /* we don't know if we successfully sent anything, but
  2782. for sure we haven't sent all what we wanted, so consider
  2783. the connection in "connecting" state */
  2784. tcpconn->state=S_CONN_CONNECT;
  2785. /* no need to check, we have something queued for write */
  2786. flags=POLLOUT;
  2787. if (TICKS_LT(tcpconn->wbuf_q.wr_timeout, tcpconn->timeout)
  2788. && TICKS_LT(t, tcpconn->wbuf_q.wr_timeout))
  2789. nxt_timeout=tcpconn->wbuf_q.wr_timeout-t;
  2790. /* activate the timer (already properly init. in
  2791. tcpconn_new()) no need for reinit */
  2792. local_timer_add(&tcp_main_ltimer, &tcpconn->timer, nxt_timeout,
  2793. t);
  2794. tcpconn->flags|=F_CONN_MAIN_TIMER|F_CONN_READ_W|
  2795. F_CONN_WANTS_RD |
  2796. F_CONN_WRITE_W|F_CONN_WANTS_WR;
  2797. }
  2798. flags|=POLLIN;
  2799. if (unlikely(
  2800. io_watch_add(&io_h, tcpconn->s, flags,
  2801. F_TCPCONN, tcpconn)<0)){
  2802. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2803. " new socket to the fd list\n");
  2804. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W);
  2805. tcpconn_try_unhash(tcpconn); /* unhash & dec refcnt */
  2806. tcpconn_put_destroy(tcpconn);
  2807. }
  2808. break;
  2809. #endif /* TCP_CONNECT_WAIT */
  2810. #endif /* TCP_BUF_WRITE */
  2811. default:
  2812. LOG(L_CRIT, "BUG: handle_ser_child: unknown cmd %d\n", cmd);
  2813. }
  2814. end:
  2815. return ret;
  2816. error:
  2817. return -1;
  2818. }
  2819. /* sends a tcpconn + fd to a choosen child */
  2820. inline static int send2child(struct tcp_connection* tcpconn)
  2821. {
  2822. int i;
  2823. int min_busy;
  2824. int idx;
  2825. static int crt=0; /* current child */
  2826. int last;
  2827. min_busy=tcp_children[0].busy;
  2828. idx=0;
  2829. last=crt+tcp_children_no;
  2830. for (; crt<last; crt++){
  2831. i=crt%tcp_children_no;
  2832. if (!tcp_children[i].busy){
  2833. idx=i;
  2834. min_busy=0;
  2835. break;
  2836. }else if (min_busy>tcp_children[i].busy){
  2837. min_busy=tcp_children[i].busy;
  2838. idx=i;
  2839. }
  2840. }
  2841. crt=idx+1; /* next time we start with crt%tcp_children_no */
  2842. tcp_children[idx].busy++;
  2843. tcp_children[idx].n_reqs++;
  2844. if (unlikely(min_busy)){
  2845. DBG("WARNING: send2child: no free tcp receiver, "
  2846. " connection passed to the least busy one (%d)\n",
  2847. min_busy);
  2848. }
  2849. DBG("send2child: to tcp child %d %d(%d), %p\n", idx,
  2850. tcp_children[idx].proc_no,
  2851. tcp_children[idx].pid, tcpconn);
  2852. /* first make sure this child doesn't have pending request for
  2853. * tcp_main (to avoid a possible deadlock: e.g. child wants to
  2854. * send a release command, but the master fills its socket buffer
  2855. * with new connection commands => deadlock) */
  2856. /* answer tcp_send requests first */
  2857. while(handle_ser_child(&pt[tcp_children[idx].proc_no], -1)>0);
  2858. /* process tcp readers requests */
  2859. while(handle_tcp_child(&tcp_children[idx], -1)>0);
  2860. #ifdef SEND_FD_QUEUE
  2861. /* if queue full, try to queue the io */
  2862. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  2863. sizeof(tcpconn), tcpconn->s)<=0)){
  2864. if ((errno==EAGAIN)||(errno==EWOULDBLOCK)){
  2865. /* FIXME: remove after debugging */
  2866. LOG(L_CRIT, "INFO: tcp child %d, socket %d: queue full,"
  2867. " %d requests queued (total handled %d)\n",
  2868. idx, tcp_children[idx].unix_sock, min_busy,
  2869. tcp_children[idx].n_reqs-1);
  2870. if (send_fd_queue_add(&send2child_q, tcp_children[idx].unix_sock,
  2871. tcpconn)!=0){
  2872. LOG(L_ERR, "ERROR: send2child: queue send op. failed\n");
  2873. return -1;
  2874. }
  2875. }else{
  2876. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  2877. return -1;
  2878. }
  2879. }
  2880. #else
  2881. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  2882. sizeof(tcpconn), tcpconn->s)<=0)){
  2883. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  2884. return -1;
  2885. }
  2886. #endif
  2887. return 0;
  2888. }
  2889. /* handles a new connection, called internally by tcp_main_loop/handle_io.
  2890. * params: si - pointer to one of the tcp socket_info structures on which
  2891. * an io event was detected (connection attempt)
  2892. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  2893. * io events queued), >0 on success. success/error refer only to
  2894. * the accept.
  2895. */
  2896. static inline int handle_new_connect(struct socket_info* si)
  2897. {
  2898. union sockaddr_union su;
  2899. union sockaddr_union sock_name;
  2900. unsigned sock_name_len;
  2901. union sockaddr_union* dst_su;
  2902. struct tcp_connection* tcpconn;
  2903. socklen_t su_len;
  2904. int new_sock;
  2905. /* got a connection on r */
  2906. su_len=sizeof(su);
  2907. new_sock=accept(si->socket, &(su.s), &su_len);
  2908. if (unlikely(new_sock==-1)){
  2909. if ((errno==EAGAIN)||(errno==EWOULDBLOCK))
  2910. return 0;
  2911. LOG(L_ERR, "WARNING: handle_new_connect: error while accepting"
  2912. " connection(%d): %s\n", errno, strerror(errno));
  2913. return -1;
  2914. }
  2915. if (unlikely(*tcp_connections_no>=tcp_max_connections)){
  2916. LOG(L_ERR, "ERROR: maximum number of connections exceeded: %d/%d\n",
  2917. *tcp_connections_no, tcp_max_connections);
  2918. close(new_sock);
  2919. return 1; /* success, because the accept was succesfull */
  2920. }
  2921. if (unlikely(init_sock_opt_accept(new_sock)<0)){
  2922. LOG(L_ERR, "ERROR: handle_new_connect: init_sock_opt failed\n");
  2923. close(new_sock);
  2924. return 1; /* success, because the accept was succesfull */
  2925. }
  2926. (*tcp_connections_no)++;
  2927. dst_su=&si->su;
  2928. if (unlikely(si->flags & SI_IS_ANY)){
  2929. /* INADDR_ANY => get local dst */
  2930. sock_name_len=sizeof(sock_name);
  2931. if (getsockname(new_sock, &sock_name.s, &sock_name_len)!=0){
  2932. LOG(L_ERR, "ERROR: handle_new_connect:"
  2933. " getsockname failed: %s(%d)\n",
  2934. strerror(errno), errno);
  2935. /* go on with the 0.0.0.0 dst from the sock_info */
  2936. }else{
  2937. dst_su=&sock_name;
  2938. }
  2939. }
  2940. /* add socket to list */
  2941. tcpconn=tcpconn_new(new_sock, &su, dst_su, si, si->proto, S_CONN_ACCEPT);
  2942. if (likely(tcpconn)){
  2943. #ifdef TCP_PASS_NEW_CONNECTION_ON_DATA
  2944. atomic_set(&tcpconn->refcnt, 1); /* safe, not yet available to the
  2945. outside world */
  2946. tcpconn_add(tcpconn);
  2947. /* activate the timer */
  2948. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2949. tcp_con_lifetime, get_ticks_raw());
  2950. tcpconn->flags|=(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  2951. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLIN,
  2952. F_TCPCONN, tcpconn)<0)){
  2953. LOG(L_CRIT, "ERROR: tcp_main: handle_new_connect: failed to add"
  2954. " new socket to the fd list\n");
  2955. tcpconn->flags&=~F_CONN_READ_W;
  2956. if (tcpconn_try_unhash(tcpconn))
  2957. tcpconn_put_destroy(tcpconn);
  2958. }
  2959. #else
  2960. atomic_set(&tcpconn->refcnt, 2); /* safe, not yet available to the
  2961. outside world */
  2962. /* prepare it for passing to a child */
  2963. tcpconn->flags|=F_CONN_READER;
  2964. tcpconn_add(tcpconn);
  2965. DBG("handle_new_connect: new connection from %s: %p %d flags: %04x\n",
  2966. su2a(&su, sizeof(su)), tcpconn, tcpconn->s, tcpconn->flags);
  2967. if(unlikely(send2child(tcpconn)<0)){
  2968. LOG(L_ERR,"ERROR: handle_new_connect: no children "
  2969. "available\n");
  2970. tcpconn->flags&=~F_CONN_READER;
  2971. tcpconn_put(tcpconn);
  2972. tcpconn_try_unhash(tcpconn);
  2973. tcpconn_put_destroy(tcpconn);
  2974. }
  2975. #endif
  2976. }else{ /*tcpconn==0 */
  2977. LOG(L_ERR, "ERROR: handle_new_connect: tcpconn_new failed, "
  2978. "closing socket\n");
  2979. close(new_sock);
  2980. (*tcp_connections_no)--;
  2981. }
  2982. return 1; /* accept() was succesfull */
  2983. }
  2984. /* handles an io event on one of the watched tcp connections
  2985. *
  2986. * params: tcpconn - pointer to the tcp_connection for which we have an io ev.
  2987. * fd_i - index in the fd_array table (needed for delete)
  2988. * returns: handle_* return convention, but on success it always returns 0
  2989. * (because it's one-shot, after a succesful execution the fd is
  2990. * removed from tcp_main's watch fd list and passed to a child =>
  2991. * tcp_main is not interested in further io events that might be
  2992. * queued for this fd)
  2993. */
  2994. inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, short ev,
  2995. int fd_i)
  2996. {
  2997. #ifdef TCP_BUF_WRITE
  2998. int empty_q;
  2999. int bytes;
  3000. #endif /* TCP_BUF_WRITE */
  3001. /* is refcnt!=0 really necessary?
  3002. * No, in fact it's a bug: I can have the following situation: a send only
  3003. * tcp connection used by n processes simultaneously => refcnt = n. In
  3004. * the same time I can have a read event and this situation is perfectly
  3005. * valid. -- andrei
  3006. */
  3007. #if 0
  3008. if ((tcpconn->refcnt!=0)){
  3009. /* FIXME: might be valid for sigio_rt iff fd flags are not cleared
  3010. * (there is a short window in which it could generate a sig
  3011. * that would be catched by tcp_main) */
  3012. LOG(L_CRIT, "BUG: handle_tcpconn_ev: io event on referenced"
  3013. " tcpconn (%p), refcnt=%d, fd=%d\n",
  3014. tcpconn, tcpconn->refcnt, tcpconn->s);
  3015. return -1;
  3016. }
  3017. #endif
  3018. /* pass it to child, so remove it from the io watch list and the local
  3019. * timer */
  3020. #ifdef TCP_BUF_WRITE
  3021. empty_q=0; /* warning fix */
  3022. if (unlikely((ev & (POLLOUT|POLLERR|POLLHUP)) &&
  3023. (tcpconn->flags & F_CONN_WRITE_W))){
  3024. if (unlikely((ev & (POLLERR|POLLHUP)) ||
  3025. (wbufq_run(tcpconn->s, tcpconn, &empty_q)<0))){
  3026. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3027. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(1) failed:"
  3028. " for %p, fd %d\n", tcpconn, tcpconn->s);
  3029. }
  3030. if ((tcpconn->flags & F_CONN_READ_W) && (ev & POLLIN)){
  3031. /* connection is watched for read and there is a read event
  3032. * (unfortunately if we have POLLIN here we don't know if
  3033. * there's really any data in the read buffer or the POLLIN
  3034. * was generated by the error or EOF => to avoid loosing
  3035. * data it's safer to either directly check the read buffer
  3036. * or try a read)*/
  3037. /* in most cases the read buffer will be empty, so in general
  3038. * is cheaper to check it here and then send the
  3039. * conn. to a a child only if needed (another syscall + at
  3040. * least 2 * syscalls in the reader + ...) */
  3041. if ((ioctl(tcpconn->s, FIONREAD, &bytes)>=0) && (bytes>0)){
  3042. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3043. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3044. tcpconn->flags|=F_CONN_FORCE_EOF|F_CONN_WR_ERROR;
  3045. goto send_to_child;
  3046. }
  3047. /* if bytes==0 or ioctl failed, destroy the connection now */
  3048. }
  3049. tcpconn->flags&=~(F_CONN_WRITE_W|F_CONN_READ_W|
  3050. F_CONN_WANTS_RD|F_CONN_WANTS_WR);
  3051. if (unlikely(!tcpconn_try_unhash(tcpconn))){
  3052. LOG(L_CRIT, "BUG: tcpconn_ev: unhashed connection %p\n",
  3053. tcpconn);
  3054. }
  3055. tcpconn_put_destroy(tcpconn);
  3056. goto error;
  3057. }
  3058. if (empty_q){
  3059. tcpconn->flags&=~F_CONN_WANTS_WR;
  3060. if (!(tcpconn->flags & F_CONN_READ_W)){
  3061. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3062. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(2)"
  3063. " failed:" " for %p, fd %d\n",
  3064. tcpconn, tcpconn->s);
  3065. goto error;
  3066. }
  3067. }else{
  3068. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  3069. POLLIN, fd_i)==-1)){
  3070. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(1)"
  3071. " failed:" " for %p, fd %d\n",
  3072. tcpconn, tcpconn->s);
  3073. goto error;
  3074. }
  3075. }
  3076. tcpconn->flags&=~F_CONN_WRITE_W;
  3077. }
  3078. ev&=~POLLOUT; /* clear POLLOUT */
  3079. }
  3080. if (likely(ev && (tcpconn->flags & F_CONN_READ_W))){
  3081. /* if still some other IO event (POLLIN|POLLHUP|POLLERR) and
  3082. * connection is still watched in tcp_main for reads, send it to a
  3083. * child and stop watching it for input (but continue watching for
  3084. * writes if needed): */
  3085. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  3086. if (unlikely(io_watch_chg(&io_h, tcpconn->s, POLLOUT, fd_i)==-1)){
  3087. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_chg(2)"
  3088. " failed:" " for %p, fd %d\n",
  3089. tcpconn, tcpconn->s);
  3090. goto error;
  3091. }
  3092. }else
  3093. #else
  3094. {
  3095. #endif /* TCP_BUF_WRITE */
  3096. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1)){
  3097. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(3)"
  3098. " failed:" " for %p, fd %d\n",
  3099. tcpconn, tcpconn->s);
  3100. goto error;
  3101. }
  3102. #ifdef TCP_BUF_WRITE
  3103. send_to_child:
  3104. #endif
  3105. DBG("tcp: DBG: sendig to child, events %x\n", ev);
  3106. #ifdef POLLRDHUP
  3107. tcpconn->flags|=((int)!(ev & (POLLRDHUP|POLLHUP|POLLERR)) -1) &
  3108. F_CONN_EOF_SEEN;
  3109. #else /* POLLRDHUP */
  3110. tcpconn->flags|=((int)!(ev & (POLLHUP|POLLERR)) -1) & F_CONN_EOF_SEEN;
  3111. #endif /* POLLRDHUP */
  3112. tcpconn->flags|= ((int)!(ev & POLLPRI) -1) & F_CONN_OOB_DATA;
  3113. tcpconn->flags|=F_CONN_READER;
  3114. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  3115. tcpconn->flags&=~(F_CONN_MAIN_TIMER|F_CONN_READ_W|F_CONN_WANTS_RD);
  3116. tcpconn_ref(tcpconn); /* refcnt ++ */
  3117. if (unlikely(send2child(tcpconn)<0)){
  3118. LOG(L_ERR,"ERROR: handle_tcpconn_ev: no children available\n");
  3119. tcpconn->flags&=~F_CONN_READER;
  3120. #ifdef TCP_BUF_WRITE
  3121. if (tcpconn->flags & F_CONN_WRITE_W){
  3122. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)<0)){
  3123. LOG(L_ERR, "ERROR: handle_tcpconn_ev: io_watch_del(4)"
  3124. " failed:" " for %p, fd %d\n",
  3125. tcpconn, tcpconn->s);
  3126. }
  3127. tcpconn->flags&=~F_CONN_WRITE_W;
  3128. }
  3129. #endif /* TCP_BUF_WRITE */
  3130. tcpconn_put(tcpconn);
  3131. tcpconn_try_unhash(tcpconn);
  3132. tcpconn_put_destroy(tcpconn); /* because of the tcpconn_ref() */
  3133. }
  3134. }
  3135. return 0; /* we are not interested in possibly queued io events,
  3136. the fd was either passed to a child, closed, or for writes,
  3137. everything possible was already written */
  3138. error:
  3139. return -1;
  3140. }
  3141. /* generic handle io routine, it will call the appropiate
  3142. * handle_xxx() based on the fd_map type
  3143. *
  3144. * params: fm - pointer to a fd hash entry
  3145. * idx - index in the fd_array (or -1 if not known)
  3146. * return: -1 on error
  3147. * 0 on EAGAIN or when by some other way it is known that no more
  3148. * io events are queued on the fd (the receive buffer is empty).
  3149. * Usefull to detect when there are no more io events queued for
  3150. * sigio_rt, epoll_et, kqueue.
  3151. * >0 on successfull read from the fd (when there might be more io
  3152. * queued -- the receive buffer might still be non-empty)
  3153. */
  3154. inline static int handle_io(struct fd_map* fm, short ev, int idx)
  3155. {
  3156. int ret;
  3157. /* update the local config */
  3158. cfg_update();
  3159. switch(fm->type){
  3160. case F_SOCKINFO:
  3161. ret=handle_new_connect((struct socket_info*)fm->data);
  3162. break;
  3163. case F_TCPCONN:
  3164. ret=handle_tcpconn_ev((struct tcp_connection*)fm->data, ev, idx);
  3165. break;
  3166. case F_TCPCHILD:
  3167. ret=handle_tcp_child((struct tcp_child*)fm->data, idx);
  3168. break;
  3169. case F_PROC:
  3170. ret=handle_ser_child((struct process_table*)fm->data, idx);
  3171. break;
  3172. case F_NONE:
  3173. LOG(L_CRIT, "BUG: handle_io: empty fd map: %p {%d, %d, %p},"
  3174. " idx %d\n", fm, fm->fd, fm->type, fm->data, idx);
  3175. goto error;
  3176. default:
  3177. LOG(L_CRIT, "BUG: handle_io: uknown fd type %d\n", fm->type);
  3178. goto error;
  3179. }
  3180. return ret;
  3181. error:
  3182. return -1;
  3183. }
  3184. /* timer handler for tcpconnection handled by tcp_main */
  3185. static ticks_t tcpconn_main_timeout(ticks_t t, struct timer_ln* tl, void* data)
  3186. {
  3187. struct tcp_connection *c;
  3188. int fd;
  3189. c=(struct tcp_connection*)data;
  3190. /* or (struct tcp...*)(tl-offset(c->timer)) */
  3191. #ifdef TCP_BUF_WRITE
  3192. DBG( "tcp_main: entering timer for %p (ticks=%d, timeout=%d (%d s), "
  3193. "wr_timeout=%d (%d s)), write queue: %d bytes\n",
  3194. c, t, c->timeout, TICKS_TO_S(c->timeout-t),
  3195. c->wbuf_q.wr_timeout, TICKS_TO_S(c->wbuf_q.wr_timeout-t),
  3196. c->wbuf_q.queued);
  3197. if (TICKS_LT(t, c->timeout) &&
  3198. (!tcp_options.tcp_buf_write | _wbufq_empty(c) |
  3199. TICKS_LT(t, c->wbuf_q.wr_timeout)) ){
  3200. if (unlikely(tcp_options.tcp_buf_write && _wbufq_non_empty(c)))
  3201. return (ticks_t)MIN_unsigned(c->timeout-t, c->wbuf_q.wr_timeout-t);
  3202. else
  3203. return (ticks_t)(c->timeout - t);
  3204. }
  3205. #ifdef USE_DST_BLACKLIST
  3206. /* if time out due to write, add it to the blacklist */
  3207. if (tcp_options.tcp_buf_write && _wbufq_non_empty(c) &&
  3208. TICKS_GE(t, c->wbuf_q.wr_timeout) &&
  3209. cfg_get(core, core_cfg, use_dst_blacklist))
  3210. dst_blacklist_su((c->state==S_CONN_CONNECT)? BLST_ERR_CONNECT:
  3211. BLST_ERR_SEND,
  3212. c->rcv.proto, &c->rcv.src_su, 0);
  3213. #endif /* USE_DST_BLACKLIST */
  3214. #else /* ! TCP_BUF_WRITE */
  3215. if (TICKS_LT(t, c->timeout)){
  3216. /* timeout extended, exit */
  3217. return (ticks_t)(c->timeout - t);
  3218. }
  3219. #endif /* TCP_BUF_WRITE */
  3220. DBG("tcp_main: timeout for %p\n", c);
  3221. if (likely(c->flags & F_CONN_HASHED)){
  3222. c->flags&=~(F_CONN_HASHED|F_CONN_MAIN_TIMER);
  3223. c->state=S_CONN_BAD;
  3224. TCPCONN_LOCK;
  3225. _tcpconn_detach(c);
  3226. TCPCONN_UNLOCK;
  3227. }else{
  3228. c->flags&=~F_CONN_MAIN_TIMER;
  3229. LOG(L_CRIT, "BUG: tcp_main: timer: called with unhashed connection %p"
  3230. "\n", c);
  3231. tcpconn_ref(c); /* ugly hack to try to go on */
  3232. }
  3233. fd=c->s;
  3234. if (likely(fd>0)){
  3235. if (likely(c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3236. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3237. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3238. }
  3239. }
  3240. tcpconn_put_destroy(c);
  3241. return 0;
  3242. }
  3243. static inline void tcp_timer_run()
  3244. {
  3245. ticks_t ticks;
  3246. ticks=get_ticks_raw();
  3247. if (unlikely((ticks-tcp_main_prev_ticks)<TCPCONN_TIMEOUT_MIN_RUN)) return;
  3248. tcp_main_prev_ticks=ticks;
  3249. local_timer_run(&tcp_main_ltimer, ticks);
  3250. }
  3251. /* keep in sync with tcpconn_destroy, the "delete" part should be
  3252. * the same except for io_watch_del..
  3253. * Note: this function is called only on shutdown by the main ser process via
  3254. * cleanup(). However it's also safe to call it from the tcp_main process.
  3255. * => with the ser shutdown exception, it cannot execute in parallel
  3256. * with tcpconn_add() or tcpconn_destroy()*/
  3257. static inline void tcpconn_destroy_all()
  3258. {
  3259. struct tcp_connection *c, *next;
  3260. unsigned h;
  3261. int fd;
  3262. TCPCONN_LOCK;
  3263. for(h=0; h<TCP_ID_HASH_SIZE; h++){
  3264. c=tcpconn_id_hash[h];
  3265. while(c){
  3266. next=c->id_next;
  3267. if (is_tcp_main){
  3268. /* we cannot close or remove the fd if we are not in the
  3269. * tcp main proc.*/
  3270. if ((c->flags & F_CONN_MAIN_TIMER)){
  3271. local_timer_del(&tcp_main_ltimer, &c->timer);
  3272. c->flags&=~F_CONN_MAIN_TIMER;
  3273. } /* else still in some reader */
  3274. fd=c->s;
  3275. if (fd>0 && (c->flags & (F_CONN_READ_W|F_CONN_WRITE_W))){
  3276. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  3277. c->flags&=~(F_CONN_READ_W|F_CONN_WRITE_W);
  3278. }
  3279. }else{
  3280. fd=-1;
  3281. }
  3282. #ifdef USE_TLS
  3283. if (fd>0 && c->type==PROTO_TLS)
  3284. tls_close(c, fd);
  3285. #endif
  3286. _tcpconn_rm(c);
  3287. if (fd>0) {
  3288. #ifdef TCP_FD_CACHE
  3289. if (likely(tcp_options.fd_cache)) shutdown(fd, SHUT_RDWR);
  3290. #endif /* TCP_FD_CACHE */
  3291. close(fd);
  3292. }
  3293. (*tcp_connections_no)--;
  3294. c=next;
  3295. }
  3296. }
  3297. TCPCONN_UNLOCK;
  3298. }
  3299. /* tcp main loop */
  3300. void tcp_main_loop()
  3301. {
  3302. struct socket_info* si;
  3303. int r;
  3304. is_tcp_main=1; /* mark this process as tcp main */
  3305. tcp_main_max_fd_no=get_max_open_fds();
  3306. /* init send fd queues (here because we want mem. alloc only in the tcp
  3307. * process */
  3308. #ifdef SEND_FD_QUEUE
  3309. if (init_send_fd_queues()<0){
  3310. LOG(L_CRIT, "ERROR: init_tcp: could not init send fd queues\n");
  3311. goto error;
  3312. }
  3313. #endif
  3314. /* init io_wait (here because we want the memory allocated only in
  3315. * the tcp_main process) */
  3316. if (init_io_wait(&io_h, tcp_main_max_fd_no, tcp_poll_method)<0)
  3317. goto error;
  3318. /* init: start watching all the fds*/
  3319. /* init local timer */
  3320. tcp_main_prev_ticks=get_ticks_raw();
  3321. if (init_local_timer(&tcp_main_ltimer, get_ticks_raw())!=0){
  3322. LOG(L_ERR, "ERROR: init_tcp: failed to init local timer\n");
  3323. goto error;
  3324. }
  3325. #ifdef TCP_FD_CACHE
  3326. if (tcp_options.fd_cache) tcp_fd_cache_init();
  3327. #endif /* TCP_FD_CACHE */
  3328. /* add all the sockets we listen on for connections */
  3329. for (si=tcp_listen; si; si=si->next){
  3330. if ((si->proto==PROTO_TCP) &&(si->socket!=-1)){
  3331. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3332. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3333. "listen socket to the fd list\n");
  3334. goto error;
  3335. }
  3336. }else{
  3337. LOG(L_CRIT, "BUG: tcp_main_loop: non tcp address in tcp_listen\n");
  3338. }
  3339. }
  3340. #ifdef USE_TLS
  3341. if (!tls_disable && tls_loaded()){
  3342. for (si=tls_listen; si; si=si->next){
  3343. if ((si->proto==PROTO_TLS) && (si->socket!=-1)){
  3344. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  3345. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3346. "tls listen socket to the fd list\n");
  3347. goto error;
  3348. }
  3349. }else{
  3350. LOG(L_CRIT, "BUG: tcp_main_loop: non tls address"
  3351. " in tls_listen\n");
  3352. }
  3353. }
  3354. }
  3355. #endif
  3356. /* add all the unix sockets used for communcation with other ser processes
  3357. * (get fd, new connection a.s.o) */
  3358. for (r=1; r<process_no; r++){
  3359. if (pt[r].unix_sock>0) /* we can't have 0, we never close it!*/
  3360. if (io_watch_add(&io_h, pt[r].unix_sock, POLLIN,F_PROC, &pt[r])<0){
  3361. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3362. "process %d unix socket to the fd list\n", r);
  3363. goto error;
  3364. }
  3365. }
  3366. /* add all the unix sokets used for communication with the tcp childs */
  3367. for (r=0; r<tcp_children_no; r++){
  3368. if (tcp_children[r].unix_sock>0)/*we can't have 0, we never close it!*/
  3369. if (io_watch_add(&io_h, tcp_children[r].unix_sock, POLLIN,
  3370. F_TCPCHILD, &tcp_children[r]) <0){
  3371. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  3372. "tcp child %d unix socket to the fd list\n", r);
  3373. goto error;
  3374. }
  3375. }
  3376. /* initialize the cfg framework */
  3377. if (cfg_child_init()) goto error;
  3378. /* main loop */
  3379. switch(io_h.poll_method){
  3380. case POLL_POLL:
  3381. while(1){
  3382. /* wait and process IO */
  3383. io_wait_loop_poll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3384. send_fd_queue_run(&send2child_q); /* then new io */
  3385. /* remove old connections */
  3386. tcp_timer_run();
  3387. }
  3388. break;
  3389. #ifdef HAVE_SELECT
  3390. case POLL_SELECT:
  3391. while(1){
  3392. io_wait_loop_select(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3393. send_fd_queue_run(&send2child_q); /* then new io */
  3394. tcp_timer_run();
  3395. }
  3396. break;
  3397. #endif
  3398. #ifdef HAVE_SIGIO_RT
  3399. case POLL_SIGIO_RT:
  3400. while(1){
  3401. io_wait_loop_sigio_rt(&io_h, TCP_MAIN_SELECT_TIMEOUT);
  3402. send_fd_queue_run(&send2child_q); /* then new io */
  3403. tcp_timer_run();
  3404. }
  3405. break;
  3406. #endif
  3407. #ifdef HAVE_EPOLL
  3408. case POLL_EPOLL_LT:
  3409. while(1){
  3410. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3411. send_fd_queue_run(&send2child_q); /* then new io */
  3412. tcp_timer_run();
  3413. }
  3414. break;
  3415. case POLL_EPOLL_ET:
  3416. while(1){
  3417. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 1);
  3418. send_fd_queue_run(&send2child_q); /* then new io */
  3419. tcp_timer_run();
  3420. }
  3421. break;
  3422. #endif
  3423. #ifdef HAVE_KQUEUE
  3424. case POLL_KQUEUE:
  3425. while(1){
  3426. io_wait_loop_kqueue(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3427. send_fd_queue_run(&send2child_q); /* then new io */
  3428. tcp_timer_run();
  3429. }
  3430. break;
  3431. #endif
  3432. #ifdef HAVE_DEVPOLL
  3433. case POLL_DEVPOLL:
  3434. while(1){
  3435. io_wait_loop_devpoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  3436. send_fd_queue_run(&send2child_q); /* then new io */
  3437. tcp_timer_run();
  3438. }
  3439. break;
  3440. #endif
  3441. default:
  3442. LOG(L_CRIT, "BUG: tcp_main_loop: no support for poll method "
  3443. " %s (%d)\n",
  3444. poll_method_name(io_h.poll_method), io_h.poll_method);
  3445. goto error;
  3446. }
  3447. error:
  3448. #ifdef SEND_FD_QUEUE
  3449. destroy_send_fd_queues();
  3450. #endif
  3451. destroy_io_wait(&io_h);
  3452. LOG(L_CRIT, "ERROR: tcp_main_loop: exiting...");
  3453. exit(-1);
  3454. }
  3455. /* cleanup before exit */
  3456. void destroy_tcp()
  3457. {
  3458. if (tcpconn_id_hash){
  3459. if (tcpconn_lock)
  3460. TCPCONN_UNLOCK; /* hack: force-unlock the tcp lock in case
  3461. some process was terminated while holding
  3462. it; this will allow an almost gracious
  3463. shutdown */
  3464. tcpconn_destroy_all();
  3465. shm_free(tcpconn_id_hash);
  3466. tcpconn_id_hash=0;
  3467. }
  3468. if (tcp_connections_no){
  3469. shm_free(tcp_connections_no);
  3470. tcp_connections_no=0;
  3471. }
  3472. #ifdef TCP_BUF_WRITE
  3473. if (tcp_total_wq){
  3474. shm_free(tcp_total_wq);
  3475. tcp_total_wq=0;
  3476. }
  3477. #endif /* TCP_BUF_WRITE */
  3478. if (connection_id){
  3479. shm_free(connection_id);
  3480. connection_id=0;
  3481. }
  3482. if (tcpconn_aliases_hash){
  3483. shm_free(tcpconn_aliases_hash);
  3484. tcpconn_aliases_hash=0;
  3485. }
  3486. if (tcpconn_lock){
  3487. lock_destroy(tcpconn_lock);
  3488. lock_dealloc((void*)tcpconn_lock);
  3489. tcpconn_lock=0;
  3490. }
  3491. if (tcp_children){
  3492. pkg_free(tcp_children);
  3493. tcp_children=0;
  3494. }
  3495. destroy_local_timer(&tcp_main_ltimer);
  3496. }
  3497. int init_tcp()
  3498. {
  3499. char* poll_err;
  3500. tcp_options_check();
  3501. /* init lock */
  3502. tcpconn_lock=lock_alloc();
  3503. if (tcpconn_lock==0){
  3504. LOG(L_CRIT, "ERROR: init_tcp: could not alloc lock\n");
  3505. goto error;
  3506. }
  3507. if (lock_init(tcpconn_lock)==0){
  3508. LOG(L_CRIT, "ERROR: init_tcp: could not init lock\n");
  3509. lock_dealloc((void*)tcpconn_lock);
  3510. tcpconn_lock=0;
  3511. goto error;
  3512. }
  3513. /* init globals */
  3514. tcp_connections_no=shm_malloc(sizeof(int));
  3515. if (tcp_connections_no==0){
  3516. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3517. goto error;
  3518. }
  3519. *tcp_connections_no=0;
  3520. connection_id=shm_malloc(sizeof(int));
  3521. if (connection_id==0){
  3522. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3523. goto error;
  3524. }
  3525. *connection_id=1;
  3526. #ifdef TCP_BUF_WRITE
  3527. tcp_total_wq=shm_malloc(sizeof(*tcp_total_wq));
  3528. if (tcp_total_wq==0){
  3529. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  3530. goto error;
  3531. }
  3532. #endif /* TCP_BUF_WRITE */
  3533. /* alloc hashtables*/
  3534. tcpconn_aliases_hash=(struct tcp_conn_alias**)
  3535. shm_malloc(TCP_ALIAS_HASH_SIZE* sizeof(struct tcp_conn_alias*));
  3536. if (tcpconn_aliases_hash==0){
  3537. LOG(L_CRIT, "ERROR: init_tcp: could not alloc address hashtable\n");
  3538. goto error;
  3539. }
  3540. tcpconn_id_hash=(struct tcp_connection**)shm_malloc(TCP_ID_HASH_SIZE*
  3541. sizeof(struct tcp_connection*));
  3542. if (tcpconn_id_hash==0){
  3543. LOG(L_CRIT, "ERROR: init_tcp: could not alloc id hashtable\n");
  3544. goto error;
  3545. }
  3546. /* init hashtables*/
  3547. memset((void*)tcpconn_aliases_hash, 0,
  3548. TCP_ALIAS_HASH_SIZE * sizeof(struct tcp_conn_alias*));
  3549. memset((void*)tcpconn_id_hash, 0,
  3550. TCP_ID_HASH_SIZE * sizeof(struct tcp_connection*));
  3551. /* fix config variables */
  3552. if (tcp_connect_timeout<0)
  3553. tcp_connect_timeout=DEFAULT_TCP_CONNECT_TIMEOUT;
  3554. if (tcp_send_timeout<0)
  3555. tcp_send_timeout=DEFAULT_TCP_SEND_TIMEOUT;
  3556. if (tcp_con_lifetime<0){
  3557. /* set to max value (~ 1/2 MAX_INT) */
  3558. tcp_con_lifetime=MAX_TCP_CON_LIFETIME;
  3559. }else{
  3560. if ((unsigned)tcp_con_lifetime >
  3561. (unsigned)TICKS_TO_S(MAX_TCP_CON_LIFETIME)){
  3562. LOG(L_WARN, "init_tcp: tcp_con_lifetime too big (%u s), "
  3563. " the maximum value is %u\n", tcp_con_lifetime,
  3564. TICKS_TO_S(MAX_TCP_CON_LIFETIME));
  3565. tcp_con_lifetime=MAX_TCP_CON_LIFETIME;
  3566. }else{
  3567. tcp_con_lifetime=S_TO_TICKS(tcp_con_lifetime);
  3568. }
  3569. }
  3570. poll_err=check_poll_method(tcp_poll_method);
  3571. /* set an appropriate poll method */
  3572. if (poll_err || (tcp_poll_method==0)){
  3573. tcp_poll_method=choose_poll_method();
  3574. if (poll_err){
  3575. LOG(L_ERR, "ERROR: init_tcp: %s, using %s instead\n",
  3576. poll_err, poll_method_name(tcp_poll_method));
  3577. }else{
  3578. LOG(L_INFO, "init_tcp: using %s as the io watch method"
  3579. " (auto detected)\n", poll_method_name(tcp_poll_method));
  3580. }
  3581. }else{
  3582. LOG(L_INFO, "init_tcp: using %s io watch method (config)\n",
  3583. poll_method_name(tcp_poll_method));
  3584. }
  3585. return 0;
  3586. error:
  3587. /* clean-up */
  3588. destroy_tcp();
  3589. return -1;
  3590. }
  3591. #ifdef TCP_CHILD_NON_BLOCKING
  3592. /* returns -1 on error */
  3593. static int set_non_blocking(int s)
  3594. {
  3595. int flags;
  3596. /* non-blocking */
  3597. flags=fcntl(s, F_GETFL);
  3598. if (flags==-1){
  3599. LOG(L_ERR, "ERROR: set_non_blocking: fnctl failed: (%d) %s\n",
  3600. errno, strerror(errno));
  3601. goto error;
  3602. }
  3603. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  3604. LOG(L_ERR, "ERROR: set_non_blocking: fcntl: set non-blocking failed:"
  3605. " (%d) %s\n", errno, strerror(errno));
  3606. goto error;
  3607. }
  3608. return 0;
  3609. error:
  3610. return -1;
  3611. }
  3612. #endif
  3613. /* returns -1 on error, 0 on success */
  3614. int tcp_fix_child_sockets(int* fd)
  3615. {
  3616. #ifdef TCP_CHILD_NON_BLOCKING
  3617. if ((set_non_blocking(fd[0])<0) ||
  3618. (set_non_blocking(fd[1])<0)){
  3619. return -1;
  3620. }
  3621. #endif
  3622. return 0;
  3623. }
  3624. /* starts the tcp processes */
  3625. int tcp_init_children()
  3626. {
  3627. int r;
  3628. int reader_fd_1; /* for comm. with the tcp children read */
  3629. pid_t pid;
  3630. struct socket_info *si;
  3631. /* estimate max fd. no:
  3632. * 1 tcp send unix socket/all_proc,
  3633. * + 1 udp sock/udp proc + 1 tcp_child sock/tcp child*
  3634. * + no_listen_tcp */
  3635. for(r=0, si=tcp_listen; si; si=si->next, r++);
  3636. #ifdef USE_TLS
  3637. if (! tls_disable)
  3638. for (si=tls_listen; si; si=si->next, r++);
  3639. #endif
  3640. register_fds(r+tcp_max_connections+get_max_procs()-1 /* tcp main */);
  3641. #if 0
  3642. tcp_max_fd_no=get_max_procs()*2 +r-1 /* timer */ +3; /* stdin/out/err*/
  3643. /* max connections can be temporarily exceeded with estimated_process_count
  3644. * - tcp_main (tcpconn_connect called simultaneously in all all the
  3645. * processes) */
  3646. tcp_max_fd_no+=tcp_max_connections+get_max_procs()-1 /* tcp main */;
  3647. #endif
  3648. /* alloc the children array */
  3649. tcp_children=pkg_malloc(sizeof(struct tcp_child)*tcp_children_no);
  3650. if (tcp_children==0){
  3651. LOG(L_ERR, "ERROR: tcp_init_children: out of memory\n");
  3652. goto error;
  3653. }
  3654. /* create the tcp sock_info structures */
  3655. /* copy the sockets --moved to main_loop*/
  3656. /* fork children & create the socket pairs*/
  3657. for(r=0; r<tcp_children_no; r++){
  3658. child_rank++;
  3659. pid=fork_tcp_process(child_rank, "tcp receiver", r, &reader_fd_1);
  3660. if (pid<0){
  3661. LOG(L_ERR, "ERROR: tcp_main: fork failed: %s\n",
  3662. strerror(errno));
  3663. goto error;
  3664. }else if (pid>0){
  3665. /* parent */
  3666. }else{
  3667. /* child */
  3668. bind_address=0; /* force a SEGFAULT if someone uses a non-init.
  3669. bind address on tcp */
  3670. tcp_receive_loop(reader_fd_1);
  3671. }
  3672. }
  3673. return 0;
  3674. error:
  3675. return -1;
  3676. }
  3677. void tcp_get_info(struct tcp_gen_info *ti)
  3678. {
  3679. ti->tcp_readers=tcp_children_no;
  3680. ti->tcp_max_connections=tcp_max_connections;
  3681. ti->tcp_connections_no=*tcp_connections_no;
  3682. #ifdef TCP_BUF_WRITE
  3683. ti->tcp_write_queued=*tcp_total_wq;
  3684. #else
  3685. ti->tcp_write_queued=0;
  3686. #endif /* TCP_BUF_WRITE */
  3687. }
  3688. #endif