tcp_main.c 88 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165
  1. /*
  2. * $Id$
  3. *
  4. * Copyright (C) 2001-2003 FhG Fokus
  5. *
  6. * This file is part of ser, a free SIP server.
  7. *
  8. * ser is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version
  12. *
  13. * For a license to use the ser software under conditions
  14. * other than those described here, or to purchase support for this
  15. * software, please contact iptel.org by e-mail at the following addresses:
  16. * [email protected]
  17. *
  18. * ser is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. /*
  28. * History:
  29. * --------
  30. * 2002-11-29 created by andrei
  31. * 2002-12-11 added tcp_send (andrei)
  32. * 2003-01-20 locking fixes, hashtables (andrei)
  33. * 2003-02-20 s/lock_t/gen_lock_t/ to avoid a conflict on solaris (andrei)
  34. * 2003-02-25 Nagle is disabled if -DDISABLE_NAGLE (andrei)
  35. * 2003-03-29 SO_REUSEADDR before calling bind to allow
  36. * server restart, Nagle set on the (hopefuly)
  37. * correct socket (jiri)
  38. * 2003-03-31 always try to find the corresponding tcp listen socket for
  39. * a temp. socket and store in in *->bind_address: added
  40. * find_tcp_si, modified tcpconn_connect (andrei)
  41. * 2003-04-14 set sockopts to TOS low delay (andrei)
  42. * 2003-06-30 moved tcp new connect checking & handling to
  43. * handle_new_connect (andrei)
  44. * 2003-07-09 tls_close called before closing the tcp connection (andrei)
  45. * 2003-10-24 converted to the new socket_info lists (andrei)
  46. * 2003-10-27 tcp port aliases support added (andrei)
  47. * 2003-11-04 always lock before manipulating refcnt; sendchild
  48. * does not inc refcnt by itself anymore (andrei)
  49. * 2003-11-07 different unix sockets are used for fd passing
  50. * to/from readers/writers (andrei)
  51. * 2003-11-17 handle_new_connect & tcp_connect will close the
  52. * new socket if tcpconn_new return 0 (e.g. out of mem) (andrei)
  53. * 2003-11-28 tcp_blocking_write & tcp_blocking_connect added (andrei)
  54. * 2004-11-08 dropped find_tcp_si and replaced with find_si (andrei)
  55. * 2005-06-07 new tcp optimized code, supports epoll (LT), sigio + real time
  56. * signals, poll & select (andrei)
  57. * 2005-06-26 *bsd kqueue support (andrei)
  58. * 2005-07-04 solaris /dev/poll support (andrei)
  59. * 2005-07-08 tcp_max_connections, tcp_connection_lifetime, don't accept
  60. * more connections if tcp_max_connections is exceeded (andrei)
  61. * 2005-10-21 cleanup all the open connections on exit
  62. * decrement the no. of open connections on timeout too (andrei) * 2006-01-30 queue send_fd request and execute them at the end of the
  63. * poll loop (#ifdef) (andrei)
  64. * process all children requests, before attempting to send
  65. * them new stuff (fixes some deadlocks) (andrei)
  66. * 2006-02-03 timers are run only once per s (andrei)
  67. * tcp children fds can be non-blocking; send fds are queued on
  68. * EAGAIN; lots of bug fixes (andrei)
  69. * 2006-02-06 better tcp_max_connections checks, tcp_connections_no moved to
  70. * shm (andrei)
  71. * 2006-04-12 tcp_send() changed to use struct dest_info (andrei)
  72. * 2006-11-02 switched to atomic ops for refcnt, locking improvements
  73. * (andrei)
  74. * 2006-11-04 switched to raw ticks (to fix conversion errors which could
  75. * result in inf. lifetime) (andrei)
  76. * 2007-07-25 tcpconn_connect can now bind the socket on a specified
  77. * source addr/port (andrei)
  78. * 2007-07-26 tcp_send() and tcpconn_get() can now use a specified source
  79. * addr./port (andrei)
  80. * 2007-08-23 getsockname() for INADDR_ANY(SI_IS_ANY) sockets (andrei)
  81. * 2007-08-27 split init_sock_opt into a lightweight init_sock_opt_accept()
  82. * used when accepting connections and init_sock_opt used for
  83. * connect/ new sockets (andrei)
  84. * 2007-11-22 always add the connection & clear the coresponding flags before
  85. * io_watch_add-ing its fd - it's safer this way (andrei)
  86. * 2007-11-26 improved tcp timers: switched to local_timer (andrei)
  87. * 2007-11-27 added send fd cache and reader fd reuse (andrei)
  88. * 2007-11-28 added support for TCP_DEFER_ACCEPT, KEEPALIVE, KEEPINTVL,
  89. * KEEPCNT, QUICKACK, SYNCNT, LINGER2 (andrei)
  90. * 2007-12-04 support for queueing write requests (andrei)
  91. */
  92. #ifdef USE_TCP
  93. #ifndef SHM_MEM
  94. #error "shared memory support needed (add -DSHM_MEM to Makefile.defs)"
  95. #endif
  96. #include <sys/time.h>
  97. #include <sys/types.h>
  98. #include <sys/select.h>
  99. #include <sys/socket.h>
  100. #include <netinet/in.h>
  101. #include <netinet/in_systm.h>
  102. #include <netinet/ip.h>
  103. #include <netinet/tcp.h>
  104. #include <sys/uio.h> /* writev*/
  105. #include <netdb.h>
  106. #include <stdlib.h> /*exit() */
  107. #include <unistd.h>
  108. #include <errno.h>
  109. #include <string.h>
  110. #ifdef HAVE_SELECT
  111. #include <sys/select.h>
  112. #endif
  113. #include <sys/poll.h>
  114. #include "ip_addr.h"
  115. #include "pass_fd.h"
  116. #include "tcp_conn.h"
  117. #include "globals.h"
  118. #include "pt.h"
  119. #include "locking.h"
  120. #include "mem/mem.h"
  121. #include "mem/shm_mem.h"
  122. #include "timer.h"
  123. #include "sr_module.h"
  124. #include "tcp_server.h"
  125. #include "tcp_init.h"
  126. #include "tsend.h"
  127. #include "timer_ticks.h"
  128. #include "local_timer.h"
  129. #ifdef CORE_TLS
  130. #include "tls/tls_server.h"
  131. #define tls_loaded() 1
  132. #else
  133. #include "tls_hooks_init.h"
  134. #include "tls_hooks.h"
  135. #endif
  136. #include "tcp_info.h"
  137. #include "tcp_options.h"
  138. #include "ut.h"
  139. #include "cfg/cfg_struct.h"
  140. #define local_malloc pkg_malloc
  141. #define local_free pkg_free
  142. #define HANDLE_IO_INLINE
  143. #include "io_wait.h"
  144. #include <fcntl.h> /* must be included after io_wait.h if SIGIO_RT is used */
  145. #define TCP_PASS_NEW_CONNECTION_ON_DATA /* don't pass a new connection
  146. immediately to a child, wait for
  147. some data on it first */
  148. #define TCP_LISTEN_BACKLOG 1024
  149. #define SEND_FD_QUEUE /* queue send fd requests on EAGAIN, instead of sending
  150. them immediately */
  151. #define TCP_CHILD_NON_BLOCKING
  152. #ifdef SEND_FD_QUEUE
  153. #ifndef TCP_CHILD_NON_BLOCKING
  154. #define TCP_CHILD_NON_BLOCKING
  155. #endif
  156. #define MAX_SEND_FD_QUEUE_SIZE tcp_main_max_fd_no
  157. #define SEND_FD_QUEUE_SIZE 128 /* initial size */
  158. #define MAX_SEND_FD_RETRIES 96 /* FIXME: not used for now */
  159. #define SEND_FD_QUEUE_TIMEOUT MS_TO_TICKS(2000) /* 2 s */
  160. #endif
  161. /* maximum accepted lifetime (maximum possible is ~ MAXINT/2) */
  162. #define MAX_TCP_CON_LIFETIME ((1U<<(sizeof(ticks_t)*8-1))-1)
  163. /* minimum interval local_timer_run() is allowed to run, in ticks */
  164. #define TCPCONN_TIMEOUT_MIN_RUN 1 /* once per tick */
  165. #define TCPCONN_WAIT_TIMEOUT 1 /* 1 tick */
  166. #ifdef TCP_BUF_WRITE
  167. #define TCP_WBUF_SIZE 1024 /* FIXME: after debugging switch to 16-32k */
  168. static unsigned int* tcp_total_wq=0;
  169. #endif
  170. enum fd_types { F_NONE, F_SOCKINFO /* a tcp_listen fd */,
  171. F_TCPCONN, F_TCPCHILD, F_PROC };
  172. #ifdef TCP_FD_CACHE
  173. #define TCP_FD_CACHE_SIZE 8
  174. struct fd_cache_entry{
  175. struct tcp_connection* con;
  176. int id;
  177. int fd;
  178. };
  179. static struct fd_cache_entry fd_cache[TCP_FD_CACHE_SIZE];
  180. #endif /* TCP_FD_CACHE */
  181. static int is_tcp_main=0;
  182. int tcp_accept_aliases=0; /* by default don't accept aliases */
  183. /* flags used for adding new aliases */
  184. int tcp_alias_flags=TCP_ALIAS_FORCE_ADD;
  185. /* flags used for adding the default aliases of a new tcp connection */
  186. int tcp_new_conn_alias_flags=TCP_ALIAS_REPLACE;
  187. int tcp_connect_timeout=DEFAULT_TCP_CONNECT_TIMEOUT;
  188. int tcp_send_timeout=DEFAULT_TCP_SEND_TIMEOUT;
  189. int tcp_con_lifetime=DEFAULT_TCP_CONNECTION_LIFETIME;
  190. enum poll_types tcp_poll_method=0; /* by default choose the best method */
  191. int tcp_max_connections=DEFAULT_TCP_MAX_CONNECTIONS;
  192. int tcp_main_max_fd_no=0;
  193. static union sockaddr_union tcp_source_ipv4_addr; /* saved bind/srv v4 addr. */
  194. static union sockaddr_union* tcp_source_ipv4=0;
  195. #ifdef USE_IPV6
  196. static union sockaddr_union tcp_source_ipv6_addr; /* saved bind/src v6 addr. */
  197. static union sockaddr_union* tcp_source_ipv6=0;
  198. #endif
  199. static int* tcp_connections_no=0; /* current open connections */
  200. /* connection hash table (after ip&port) , includes also aliases */
  201. struct tcp_conn_alias** tcpconn_aliases_hash=0;
  202. /* connection hash table (after connection id) */
  203. struct tcp_connection** tcpconn_id_hash=0;
  204. gen_lock_t* tcpconn_lock=0;
  205. struct tcp_child* tcp_children;
  206. static int* connection_id=0; /* unique for each connection, used for
  207. quickly finding the corresponding connection
  208. for a reply */
  209. int unix_tcp_sock;
  210. static int tcp_proto_no=-1; /* tcp protocol number as returned by
  211. getprotobyname */
  212. static io_wait_h io_h;
  213. static struct local_timer tcp_main_ltimer;
  214. static ticks_t tcpconn_main_timeout(ticks_t , struct timer_ln* , void* );
  215. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  216. struct ip_addr* l_ip, int l_port,
  217. int flags);
  218. /* sets source address used when opening new sockets and no source is specified
  219. * (by default the address is choosen by the kernel)
  220. * Should be used only on init.
  221. * returns -1 on error */
  222. int tcp_set_src_addr(struct ip_addr* ip)
  223. {
  224. switch (ip->af){
  225. case AF_INET:
  226. ip_addr2su(&tcp_source_ipv4_addr, ip, 0);
  227. tcp_source_ipv4=&tcp_source_ipv4_addr;
  228. break;
  229. #ifdef USE_IPV6
  230. case AF_INET6:
  231. ip_addr2su(&tcp_source_ipv6_addr, ip, 0);
  232. tcp_source_ipv6=&tcp_source_ipv6_addr;
  233. break;
  234. #endif
  235. default:
  236. return -1;
  237. }
  238. return 0;
  239. }
  240. static inline int init_sock_keepalive(int s)
  241. {
  242. int optval;
  243. #ifdef HAVE_SO_KEEPALIVE
  244. if (tcp_options.keepalive){
  245. optval=1;
  246. if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, &optval,
  247. sizeof(optval))<0){
  248. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to enable"
  249. " SO_KEEPALIVE: %s\n", strerror(errno));
  250. return -1;
  251. }
  252. }
  253. #endif
  254. #ifdef HAVE_TCP_KEEPINTVL
  255. if (tcp_options.keepintvl){
  256. optval=tcp_options.keepintvl;
  257. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPINTVL, &optval,
  258. sizeof(optval))<0){
  259. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  260. " keepalive probes interval: %s\n", strerror(errno));
  261. }
  262. }
  263. #endif
  264. #ifdef HAVE_TCP_KEEPIDLE
  265. if (tcp_options.keepidle){
  266. optval=tcp_options.keepidle;
  267. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPIDLE, &optval,
  268. sizeof(optval))<0){
  269. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  270. " keepalive idle interval: %s\n", strerror(errno));
  271. }
  272. }
  273. #endif
  274. #ifdef HAVE_TCP_KEEPCNT
  275. if (tcp_options.keepcnt){
  276. optval=tcp_options.keepcnt;
  277. if (setsockopt(s, IPPROTO_TCP, TCP_KEEPCNT, &optval,
  278. sizeof(optval))<0){
  279. LOG(L_WARN, "WARNING: init_sock_keepalive: failed to set"
  280. " maximum keepalive count: %s\n", strerror(errno));
  281. }
  282. }
  283. #endif
  284. return 0;
  285. }
  286. /* set all socket/fd options for new sockets (e.g. before connect):
  287. * disable nagle, tos lowdelay, reuseaddr, non-blocking
  288. *
  289. * return -1 on error */
  290. static int init_sock_opt(int s)
  291. {
  292. int flags;
  293. int optval;
  294. #ifdef DISABLE_NAGLE
  295. flags=1;
  296. if ( (tcp_proto_no!=-1) && (setsockopt(s, tcp_proto_no , TCP_NODELAY,
  297. &flags, sizeof(flags))<0) ){
  298. LOG(L_WARN, "WARNING: init_sock_opt: could not disable Nagle: %s\n",
  299. strerror(errno));
  300. }
  301. #endif
  302. /* tos*/
  303. optval = tos;
  304. if (setsockopt(s, IPPROTO_IP, IP_TOS, (void*)&optval,sizeof(optval)) ==-1){
  305. LOG(L_WARN, "WARNING: init_sock_opt: setsockopt tos: %s\n",
  306. strerror(errno));
  307. /* continue since this is not critical */
  308. }
  309. #if !defined(TCP_DONT_REUSEADDR)
  310. optval=1;
  311. if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,
  312. (void*)&optval, sizeof(optval))==-1){
  313. LOG(L_ERR, "ERROR: setsockopt SO_REUSEADDR %s\n",
  314. strerror(errno));
  315. /* continue, not critical */
  316. }
  317. #endif /* !TCP_DONT_REUSEADDR */
  318. #ifdef HAVE_TCP_SYNCNT
  319. if (tcp_options.syncnt){
  320. optval=tcp_options.syncnt;
  321. if (setsockopt(s, IPPROTO_TCP, TCP_SYNCNT, &optval,
  322. sizeof(optval))<0){
  323. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  324. " maximum SYN retr. count: %s\n", strerror(errno));
  325. }
  326. }
  327. #endif
  328. #ifdef HAVE_TCP_LINGER2
  329. if (tcp_options.linger2){
  330. optval=tcp_options.linger2;
  331. if (setsockopt(s, IPPROTO_TCP, TCP_LINGER2, &optval,
  332. sizeof(optval))<0){
  333. LOG(L_WARN, "WARNING: init_sock_opt: failed to set"
  334. " maximum LINGER2 timeout: %s\n", strerror(errno));
  335. }
  336. }
  337. #endif
  338. #ifdef HAVE_TCP_QUICKACK
  339. if (tcp_options.delayed_ack){
  340. optval=0; /* reset quick ack => delayed ack */
  341. if (setsockopt(s, IPPROTO_TCP, TCP_QUICKACK, &optval,
  342. sizeof(optval))<0){
  343. LOG(L_WARN, "WARNING: init_sock_opt: failed to reset"
  344. " TCP_QUICKACK: %s\n", strerror(errno));
  345. }
  346. }
  347. #endif /* HAVE_TCP_QUICKACK */
  348. init_sock_keepalive(s);
  349. /* non-blocking */
  350. flags=fcntl(s, F_GETFL);
  351. if (flags==-1){
  352. LOG(L_ERR, "ERROR: init_sock_opt: fnctl failed: (%d) %s\n",
  353. errno, strerror(errno));
  354. goto error;
  355. }
  356. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  357. LOG(L_ERR, "ERROR: init_sock_opt: fcntl: set non-blocking failed:"
  358. " (%d) %s\n", errno, strerror(errno));
  359. goto error;
  360. }
  361. return 0;
  362. error:
  363. return -1;
  364. }
  365. /* set all socket/fd options for "accepted" sockets
  366. * only nonblocking is set since the rest is inherited from the
  367. * "parent" (listening) socket
  368. * Note: setting O_NONBLOCK is required on linux but it's not needed on
  369. * BSD and possibly solaris (where the flag is inherited from the
  370. * parent socket). However since there is no standard document
  371. * requiring a specific behaviour in this case it's safer to always set
  372. * it (at least for now) --andrei
  373. * TODO: check on which OSes O_NONBLOCK is inherited and make this
  374. * function a nop.
  375. *
  376. * return -1 on error */
  377. static int init_sock_opt_accept(int s)
  378. {
  379. int flags;
  380. /* non-blocking */
  381. flags=fcntl(s, F_GETFL);
  382. if (flags==-1){
  383. LOG(L_ERR, "ERROR: init_sock_opt_accept: fnctl failed: (%d) %s\n",
  384. errno, strerror(errno));
  385. goto error;
  386. }
  387. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  388. LOG(L_ERR, "ERROR: init_sock_opt_accept: "
  389. "fcntl: set non-blocking failed: (%d) %s\n",
  390. errno, strerror(errno));
  391. goto error;
  392. }
  393. return 0;
  394. error:
  395. return -1;
  396. }
  397. /* blocking connect on a non-blocking fd; it will timeout after
  398. * tcp_connect_timeout
  399. * if BLOCKING_USE_SELECT and HAVE_SELECT are defined it will internally
  400. * use select() instead of poll (bad if fd > FD_SET_SIZE, poll is preferred)
  401. */
  402. static int tcp_blocking_connect(int fd, const struct sockaddr *servaddr,
  403. socklen_t addrlen)
  404. {
  405. int n;
  406. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  407. fd_set sel_set;
  408. fd_set orig_set;
  409. struct timeval timeout;
  410. #else
  411. struct pollfd pf;
  412. #endif
  413. int elapsed;
  414. int to;
  415. int ticks;
  416. int err;
  417. unsigned int err_len;
  418. int poll_err;
  419. poll_err=0;
  420. to=tcp_connect_timeout;
  421. ticks=get_ticks();
  422. again:
  423. n=connect(fd, servaddr, addrlen);
  424. if (n==-1){
  425. if (errno==EINTR){
  426. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  427. if (elapsed<to) goto again;
  428. else goto error_timeout;
  429. }
  430. if (errno!=EINPROGRESS && errno!=EALREADY){
  431. LOG(L_ERR, "ERROR: tcp_blocking_connect: (%d) %s\n",
  432. errno, strerror(errno));
  433. goto error;
  434. }
  435. }else goto end;
  436. /* poll/select loop */
  437. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  438. FD_ZERO(&orig_set);
  439. FD_SET(fd, &orig_set);
  440. #else
  441. pf.fd=fd;
  442. pf.events=POLLOUT;
  443. #endif
  444. while(1){
  445. elapsed=(get_ticks()-ticks)*TIMER_TICK;
  446. if (elapsed<to)
  447. to-=elapsed;
  448. else
  449. goto error_timeout;
  450. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  451. sel_set=orig_set;
  452. timeout.tv_sec=to;
  453. timeout.tv_usec=0;
  454. n=select(fd+1, 0, &sel_set, 0, &timeout);
  455. #else
  456. n=poll(&pf, 1, to*1000);
  457. #endif
  458. if (n<0){
  459. if (errno==EINTR) continue;
  460. LOG(L_ERR, "ERROR: tcp_blocking_connect: poll/select failed:"
  461. " (%d) %s\n", errno, strerror(errno));
  462. goto error;
  463. }else if (n==0) /* timeout */ continue;
  464. #if defined(HAVE_SELECT) && defined(BLOCKING_USE_SELECT)
  465. if (FD_ISSET(fd, &sel_set))
  466. #else
  467. if (pf.revents&(POLLERR|POLLHUP|POLLNVAL)){
  468. LOG(L_ERR, "ERROR: tcp_blocking_connect: poll error: flags %x\n",
  469. pf.revents);
  470. poll_err=1;
  471. }
  472. #endif
  473. {
  474. err_len=sizeof(err);
  475. getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &err_len);
  476. if ((err==0) && (poll_err==0)) goto end;
  477. if (err!=EINPROGRESS && err!=EALREADY){
  478. LOG(L_ERR, "ERROR: tcp_blocking_connect: SO_ERROR (%d) %s\n",
  479. err, strerror(err));
  480. goto error;
  481. }
  482. }
  483. }
  484. error_timeout:
  485. /* timeout */
  486. LOG(L_ERR, "ERROR: tcp_blocking_connect: timeout %d s elapsed from %d s\n",
  487. elapsed, tcp_connect_timeout);
  488. error:
  489. return -1;
  490. end:
  491. return 0;
  492. }
  493. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  494. char* buf, int len);
  495. #ifdef TCP_BUF_WRITE
  496. inline static int wbufq_add(struct tcp_connection* c, char* data,
  497. unsigned int size)
  498. {
  499. struct tcp_wbuffer_queue* q;
  500. struct tcp_wbuffer* wb;
  501. unsigned int last_free;
  502. unsigned int wb_size;
  503. unsigned int crt_size;
  504. ticks_t t;
  505. q=&c->wbuf_q;
  506. t=get_ticks_raw();
  507. if (unlikely( ((q->queued+size)>tcp_options.tcpconn_wq_max) ||
  508. ((*tcp_total_wq+size)>tcp_options.tcp_wq_max) ||
  509. (q->first &&
  510. TICKS_GT(t, c->last_write+tcp_options.tcp_wq_timeout)) )){
  511. LOG(L_ERR, "ERROR: wbufq_add(%d bytes): write queue full or timeout "
  512. " (%d, total %d, last write %d s ago)\n",
  513. size, q->queued, *tcp_total_wq,
  514. TICKS_TO_S(t-c->last_write));
  515. goto error;
  516. }
  517. if (unlikely(q->last==0)){
  518. wb_size=MAX_unsigned(TCP_WBUF_SIZE, size);
  519. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  520. if (unlikely(wb==0))
  521. goto error;
  522. wb->b_size=wb_size;
  523. wb->next=0;
  524. q->last=wb;
  525. q->first=wb;
  526. q->last_used=0;
  527. q->offset=0;
  528. c->last_write=get_ticks_raw(); /* start with the crt. time */
  529. }else{
  530. wb=q->last;
  531. }
  532. while(size){
  533. last_free=wb->b_size-q->last_used;
  534. if (last_free==0){
  535. wb_size=MAX_unsigned(TCP_WBUF_SIZE, size);
  536. wb=shm_malloc(sizeof(*wb)+wb_size-1);
  537. if (unlikely(wb==0))
  538. goto error;
  539. wb->b_size=wb_size;
  540. wb->next=0;
  541. q->last->next=wb;
  542. q->last=wb;
  543. q->last_used=0;
  544. last_free=wb->b_size;
  545. }
  546. crt_size=MIN_unsigned(last_free, size);
  547. memcpy(wb->buf, data, crt_size);
  548. q->last_used+=crt_size;
  549. size-=crt_size;
  550. data+=crt_size;
  551. q->queued+=crt_size;
  552. atomic_add_int((int*)tcp_total_wq, crt_size);
  553. }
  554. return 0;
  555. error:
  556. return -1;
  557. }
  558. inline static void wbufq_destroy( struct tcp_wbuffer_queue* q)
  559. {
  560. struct tcp_wbuffer* wb;
  561. struct tcp_wbuffer* next_wb;
  562. int unqueued;
  563. unqueued=0;
  564. if (likely(q->first)){
  565. wb=q->first;
  566. do{
  567. next_wb=wb->next;
  568. unqueued+=(wb==q->last)?q->last_used:wb->b_size;
  569. if (wb==q->first)
  570. unqueued-=q->offset;
  571. shm_free(wb);
  572. wb=next_wb;
  573. }while(wb);
  574. }
  575. memset(q, 0, sizeof(*q));
  576. atomic_add_int((int*)tcp_total_wq, -unqueued);
  577. }
  578. /* tries to empty the queue
  579. * returns -1 on error, bytes written on success (>=0)
  580. * if the whole queue is emptied => sets *empty*/
  581. inline static int wbufq_run(int fd, struct tcp_connection* c, int* empty)
  582. {
  583. struct tcp_wbuffer_queue* q;
  584. struct tcp_wbuffer* wb;
  585. int n;
  586. int ret;
  587. int block_size;
  588. ticks_t t;
  589. char* buf;
  590. *empty=0;
  591. ret=0;
  592. t=get_ticks_raw();
  593. lock_get(&c->write_lock);
  594. q=&c->wbuf_q;
  595. while(q->first){
  596. block_size=((q->first==q->last)?q->last_used:q->first->b_size)-
  597. q->offset;
  598. buf=q->first->buf+q->offset;
  599. n=_tcpconn_write_nb(fd, c, buf, block_size);
  600. if (likely(n>0)){
  601. ret+=n;
  602. if (likely(n==block_size)){
  603. wb=q->first;
  604. q->first=q->first->next;
  605. shm_free(wb);
  606. q->offset=0;
  607. q->queued-=block_size;
  608. atomic_add_int((int*)tcp_total_wq, -block_size);
  609. }else{
  610. q->offset+=n;
  611. q->queued-=n;
  612. atomic_add_int((int*)tcp_total_wq, -n);
  613. break;
  614. }
  615. c->last_write=t;
  616. c->state=S_CONN_OK;
  617. }else{
  618. if (n<0){
  619. /* EINTR is handled inside _tcpconn_write_nb */
  620. if (!(errno==EAGAIN || errno==EWOULDBLOCK)){
  621. ret=-1;
  622. LOG(L_ERR, "ERROR: wbuf_runq: %s [%d]\n",
  623. strerror(errno), errno);
  624. }
  625. }
  626. break;
  627. }
  628. }
  629. if (likely(q->first==0)){
  630. q->last=0;
  631. q->last_used=0;
  632. q->offset=0;
  633. *empty=1;
  634. }
  635. if (unlikely(c->state==S_CONN_CONNECT && (ret>0)))
  636. c->state=S_CONN_OK;
  637. lock_release(&c->write_lock);
  638. return ret;
  639. }
  640. #endif /* TCP_BUF_WRITE */
  641. #if 0
  642. /* blocking write even on non-blocking sockets
  643. * if TCP_TIMEOUT will return with error */
  644. static int tcp_blocking_write(struct tcp_connection* c, int fd, char* buf,
  645. unsigned int len)
  646. {
  647. int n;
  648. fd_set sel_set;
  649. struct timeval timeout;
  650. int ticks;
  651. int initial_len;
  652. initial_len=len;
  653. again:
  654. n=send(fd, buf, len,
  655. #ifdef HAVE_MSG_NOSIGNAL
  656. MSG_NOSIGNAL
  657. #else
  658. 0
  659. #endif
  660. );
  661. if (n<0){
  662. if (errno==EINTR) goto again;
  663. else if (errno!=EAGAIN && errno!=EWOULDBLOCK){
  664. LOG(L_ERR, "tcp_blocking_write: failed to send: (%d) %s\n",
  665. errno, strerror(errno));
  666. goto error;
  667. }
  668. }else if (n<len){
  669. /* partial write */
  670. buf+=n;
  671. len-=n;
  672. }else{
  673. /* success: full write */
  674. goto end;
  675. }
  676. while(1){
  677. FD_ZERO(&sel_set);
  678. FD_SET(fd, &sel_set);
  679. timeout.tv_sec=tcp_send_timeout;
  680. timeout.tv_usec=0;
  681. ticks=get_ticks();
  682. n=select(fd+1, 0, &sel_set, 0, &timeout);
  683. if (n<0){
  684. if (errno==EINTR) continue; /* signal, ignore */
  685. LOG(L_ERR, "ERROR: tcp_blocking_write: select failed: "
  686. " (%d) %s\n", errno, strerror(errno));
  687. goto error;
  688. }else if (n==0){
  689. /* timeout */
  690. if (get_ticks()-ticks>=tcp_send_timeout){
  691. LOG(L_ERR, "ERROR: tcp_blocking_write: send timeout (%d)\n",
  692. tcp_send_timeout);
  693. goto error;
  694. }
  695. continue;
  696. }
  697. if (FD_ISSET(fd, &sel_set)){
  698. /* we can write again */
  699. goto again;
  700. }
  701. }
  702. error:
  703. return -1;
  704. end:
  705. return initial_len;
  706. }
  707. #endif
  708. struct tcp_connection* tcpconn_new(int sock, union sockaddr_union* su,
  709. union sockaddr_union* local_addr,
  710. struct socket_info* ba, int type,
  711. int state)
  712. {
  713. struct tcp_connection *c;
  714. c=(struct tcp_connection*)shm_malloc(sizeof(struct tcp_connection));
  715. if (c==0){
  716. LOG(L_ERR, "ERROR: tcpconn_new: mem. allocation failure\n");
  717. goto error;
  718. }
  719. memset(c, 0, sizeof(struct tcp_connection)); /* zero init */
  720. c->s=sock;
  721. c->fd=-1; /* not initialized */
  722. if (lock_init(&c->write_lock)==0){
  723. LOG(L_ERR, "ERROR: tcpconn_new: init lock failed\n");
  724. goto error;
  725. }
  726. c->rcv.src_su=*su;
  727. atomic_set(&c->refcnt, 0);
  728. local_timer_init(&c->timer, tcpconn_main_timeout, c, 0);
  729. su2ip_addr(&c->rcv.src_ip, su);
  730. c->rcv.src_port=su_getport(su);
  731. c->rcv.bind_address=ba;
  732. if (likely(local_addr)){
  733. su2ip_addr(&c->rcv.dst_ip, local_addr);
  734. c->rcv.dst_port=su_getport(local_addr);
  735. }else if (ba){
  736. c->rcv.dst_ip=ba->address;
  737. c->rcv.dst_port=ba->port_no;
  738. }
  739. print_ip("tcpconn_new: new tcp connection: ", &c->rcv.src_ip, "\n");
  740. DBG( "tcpconn_new: on port %d, type %d\n", c->rcv.src_port, type);
  741. init_tcp_req(&c->req);
  742. c->id=(*connection_id)++;
  743. c->rcv.proto_reserved1=0; /* this will be filled before receive_message*/
  744. c->rcv.proto_reserved2=0;
  745. c->state=state;
  746. c->extra_data=0;
  747. #ifdef USE_TLS
  748. if (type==PROTO_TLS){
  749. if (tls_tcpconn_init(c, sock)==-1) goto error;
  750. }else
  751. #endif /* USE_TLS*/
  752. {
  753. c->type=PROTO_TCP;
  754. c->rcv.proto=PROTO_TCP;
  755. c->timeout=get_ticks_raw()+tcp_con_lifetime;
  756. }
  757. c->flags|=F_CONN_REMOVED;
  758. return c;
  759. error:
  760. if (c) shm_free(c);
  761. return 0;
  762. }
  763. struct tcp_connection* tcpconn_connect( union sockaddr_union* server,
  764. union sockaddr_union* from,
  765. int type)
  766. {
  767. int s;
  768. struct socket_info* si;
  769. union sockaddr_union my_name;
  770. socklen_t my_name_len;
  771. struct tcp_connection* con;
  772. struct ip_addr ip;
  773. enum tcp_conn_states state;
  774. #ifdef TCP_BUF_WRITE
  775. int n;
  776. #endif /* TCP_BUF_WRITE */
  777. s=-1;
  778. if (*tcp_connections_no >= tcp_max_connections){
  779. LOG(L_ERR, "ERROR: tcpconn_connect: maximum number of connections"
  780. " exceeded (%d/%d)\n",
  781. *tcp_connections_no, tcp_max_connections);
  782. goto error;
  783. }
  784. s=socket(AF2PF(server->s.sa_family), SOCK_STREAM, 0);
  785. if (s==-1){
  786. LOG(L_ERR, "ERROR: tcpconn_connect: socket: (%d) %s\n",
  787. errno, strerror(errno));
  788. goto error;
  789. }
  790. if (init_sock_opt(s)<0){
  791. LOG(L_ERR, "ERROR: tcpconn_connect: init_sock_opt failed\n");
  792. goto error;
  793. }
  794. if (from && bind(s, &from->s, sockaddru_len(*from)) != 0)
  795. LOG(L_WARN, "WARNING: tcpconn_connect: binding to source address"
  796. " failed: %s [%d]\n", strerror(errno), errno);
  797. state=S_CONN_OK;
  798. #ifdef TCP_BUF_WRITE
  799. if (likely(tcp_options.tcp_buf_write)){
  800. again:
  801. n=connect(s, &server->s, sockaddru_len(*server));
  802. if (unlikely(n==-1)){
  803. if (errno==EINTR) goto again;
  804. if (errno!=EINPROGRESS && errno!=EALREADY){
  805. LOG(L_ERR, "ERROR: tcpconn_connect: connect: (%d) %s\n",
  806. errno, strerror(errno));
  807. goto error;
  808. }
  809. state=S_CONN_CONNECT;
  810. }
  811. }else{
  812. #endif /* TCP_BUF_WRITE */
  813. if (tcp_blocking_connect(s, &server->s, sockaddru_len(*server))<0){
  814. LOG(L_ERR, "ERROR: tcpconn_connect: tcp_blocking_connect"
  815. " failed\n");
  816. goto error;
  817. }
  818. #ifdef TCP_BUF_WRITE
  819. }
  820. #endif /* TCP_BUF_WRITE */
  821. if (from){
  822. su2ip_addr(&ip, from);
  823. if (!ip_addr_any(&ip))
  824. /* we already know the source ip, skip the sys. call */
  825. goto find_socket;
  826. }
  827. my_name_len=sizeof(my_name);
  828. if (getsockname(s, &my_name.s, &my_name_len)!=0){
  829. LOG(L_ERR, "ERROR: tcp_connect: getsockname failed: %s(%d)\n",
  830. strerror(errno), errno);
  831. si=0; /* try to go on */
  832. goto skip;
  833. }
  834. from=&my_name; /* update from with the real "from" address */
  835. su2ip_addr(&ip, &my_name);
  836. find_socket:
  837. #ifdef USE_TLS
  838. if (type==PROTO_TLS)
  839. si=find_si(&ip, 0, PROTO_TLS);
  840. else
  841. #endif
  842. si=find_si(&ip, 0, PROTO_TCP);
  843. skip:
  844. if (si==0){
  845. LOG(L_WARN, "WARNING: tcp_connect: could not find corresponding"
  846. " listening socket, using default...\n");
  847. if (server->s.sa_family==AF_INET) si=sendipv4_tcp;
  848. #ifdef USE_IPV6
  849. else si=sendipv6_tcp;
  850. #endif
  851. }
  852. con=tcpconn_new(s, server, from, si, type, state);
  853. if (con==0){
  854. LOG(L_ERR, "ERROR: tcp_connect: tcpconn_new failed, closing the "
  855. " socket\n");
  856. goto error;
  857. }
  858. return con;
  859. /*FIXME: set sock idx! */
  860. error:
  861. if (s!=-1) close(s); /* close the opened socket */
  862. return 0;
  863. }
  864. /* adds a tcp connection to the tcpconn hashes
  865. * Note: it's called _only_ from the tcp_main process */
  866. inline static struct tcp_connection* tcpconn_add(struct tcp_connection *c)
  867. {
  868. struct ip_addr zero_ip;
  869. if (likely(c)){
  870. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  871. c->id_hash=tcp_id_hash(c->id);
  872. c->aliases=0;
  873. TCPCONN_LOCK;
  874. /* add it at the begining of the list*/
  875. tcpconn_listadd(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  876. /* set the aliases */
  877. /* first alias is for (peer_ip, peer_port, 0 ,0) -- for finding
  878. * any connection to peer_ip, peer_port
  879. * the second alias is for (peer_ip, peer_port, local_addr, 0) -- for
  880. * finding any conenction to peer_ip, peer_port from local_addr
  881. * the third alias is for (peer_ip, peer_port, local_addr, local_port)
  882. * -- for finding if a fully specified connection exists */
  883. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &zero_ip, 0,
  884. tcp_new_conn_alias_flags);
  885. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip, 0,
  886. tcp_new_conn_alias_flags);
  887. _tcpconn_add_alias_unsafe(c, c->rcv.src_port, &c->rcv.dst_ip,
  888. c->rcv.dst_port, tcp_new_conn_alias_flags);
  889. /* ignore add_alias errors, there are some valid cases when one
  890. * of the add_alias would fail (e.g. first add_alias for 2 connections
  891. * with the same destination but different src. ip*/
  892. TCPCONN_UNLOCK;
  893. DBG("tcpconn_add: hashes: %d:%d:%d, %d\n",
  894. c->con_aliases[0].hash,
  895. c->con_aliases[1].hash,
  896. c->con_aliases[2].hash,
  897. c->id_hash);
  898. return c;
  899. }else{
  900. LOG(L_CRIT, "tcpconn_add: BUG: null connection pointer\n");
  901. return 0;
  902. }
  903. }
  904. static inline void _tcpconn_detach(struct tcp_connection *c)
  905. {
  906. int r;
  907. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  908. /* remove all the aliases */
  909. for (r=0; r<c->aliases; r++)
  910. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  911. &c->con_aliases[r], next, prev);
  912. }
  913. static inline void _tcpconn_free(struct tcp_connection* c)
  914. {
  915. #ifdef TCP_BUF_WRITE
  916. if (unlikely(c->wbuf_q.first))
  917. wbufq_destroy(&c->wbuf_q);
  918. #endif
  919. lock_destroy(&c->write_lock);
  920. #ifdef USE_TLS
  921. if (unlikely(c->type==PROTO_TLS)) tls_tcpconn_clean(c);
  922. #endif
  923. shm_free(c);
  924. }
  925. /* unsafe tcpconn_rm version (nolocks) */
  926. void _tcpconn_rm(struct tcp_connection* c)
  927. {
  928. _tcpconn_detach(c);
  929. _tcpconn_free(c);
  930. }
  931. void tcpconn_rm(struct tcp_connection* c)
  932. {
  933. int r;
  934. TCPCONN_LOCK;
  935. tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev);
  936. /* remove all the aliases */
  937. for (r=0; r<c->aliases; r++)
  938. tcpconn_listrm(tcpconn_aliases_hash[c->con_aliases[r].hash],
  939. &c->con_aliases[r], next, prev);
  940. TCPCONN_UNLOCK;
  941. lock_destroy(&c->write_lock);
  942. #ifdef USE_TLS
  943. if ((c->type==PROTO_TLS)&&(c->extra_data)) tls_tcpconn_clean(c);
  944. #endif
  945. shm_free(c);
  946. }
  947. /* finds a connection, if id=0 uses the ip addr, port, local_ip and local port
  948. * (host byte order) and tries to find the connection that matches all of
  949. * them. Wild cards can be used for local_ip and local_port (a 0 filled
  950. * ip address and/or a 0 local port).
  951. * WARNING: unprotected (locks) use tcpconn_get unless you really
  952. * know what you are doing */
  953. struct tcp_connection* _tcpconn_find(int id, struct ip_addr* ip, int port,
  954. struct ip_addr* l_ip, int l_port)
  955. {
  956. struct tcp_connection *c;
  957. struct tcp_conn_alias* a;
  958. unsigned hash;
  959. int is_local_ip_any;
  960. #ifdef EXTRA_DEBUG
  961. DBG("tcpconn_find: %d port %d\n",id, port);
  962. if (ip) print_ip("tcpconn_find: ip ", ip, "\n");
  963. #endif
  964. if (likely(id)){
  965. hash=tcp_id_hash(id);
  966. for (c=tcpconn_id_hash[hash]; c; c=c->id_next){
  967. #ifdef EXTRA_DEBUG
  968. DBG("c=%p, c->id=%d, port=%d\n",c, c->id, c->rcv.src_port);
  969. print_ip("ip=", &c->rcv.src_ip, "\n");
  970. #endif
  971. if ((id==c->id)&&(c->state!=S_CONN_BAD)) return c;
  972. }
  973. }else if (likely(ip)){
  974. hash=tcp_addr_hash(ip, port, l_ip, l_port);
  975. is_local_ip_any=ip_addr_any(l_ip);
  976. for (a=tcpconn_aliases_hash[hash]; a; a=a->next){
  977. #ifdef EXTRA_DEBUG
  978. DBG("a=%p, c=%p, c->id=%d, alias port= %d port=%d\n", a, a->parent,
  979. a->parent->id, a->port, a->parent->rcv.src_port);
  980. print_ip("ip=",&a->parent->rcv.src_ip,"\n");
  981. #endif
  982. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  983. ((l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  984. (ip_addr_cmp(ip, &a->parent->rcv.src_ip)) &&
  985. (is_local_ip_any ||
  986. ip_addr_cmp(l_ip, &a->parent->rcv.dst_ip))
  987. )
  988. return a->parent;
  989. }
  990. }
  991. return 0;
  992. }
  993. /* _tcpconn_find with locks and timeout
  994. * local_addr contains the desired local ip:port. If null any local address
  995. * will be used. IN*ADDR_ANY or 0 port are wild cards.
  996. */
  997. struct tcp_connection* tcpconn_get(int id, struct ip_addr* ip, int port,
  998. union sockaddr_union* local_addr,
  999. ticks_t timeout)
  1000. {
  1001. struct tcp_connection* c;
  1002. struct ip_addr local_ip;
  1003. int local_port;
  1004. local_port=0;
  1005. if (likely(ip)){
  1006. if (unlikely(local_addr)){
  1007. su2ip_addr(&local_ip, local_addr);
  1008. local_port=su_getport(local_addr);
  1009. }else{
  1010. ip_addr_mk_any(ip->af, &local_ip);
  1011. local_port=0;
  1012. }
  1013. }
  1014. TCPCONN_LOCK;
  1015. c=_tcpconn_find(id, ip, port, &local_ip, local_port);
  1016. if (likely(c)){
  1017. atomic_inc(&c->refcnt);
  1018. /* update the timeout only if the connection is not handled
  1019. * by a tcp reader (the tcp reader process uses c->timeout for
  1020. * its own internal timeout and c->timeout will be overwritten
  1021. * anyway on return to tcp_main) */
  1022. if (likely(c->reader_pid==0))
  1023. c->timeout=get_ticks_raw()+timeout;
  1024. }
  1025. TCPCONN_UNLOCK;
  1026. return c;
  1027. }
  1028. /* add c->dst:port, local_addr as an alias for the "id" connection,
  1029. * flags: TCP_ALIAS_FORCE_ADD - add an alias even if a previous one exists
  1030. * TCP_ALIAS_REPLACE - if a prev. alias exists, replace it with the
  1031. * new one
  1032. * returns 0 on success, <0 on failure ( -1 - null c, -2 too many aliases,
  1033. * -3 alias already present and pointing to another connection)
  1034. * WARNING: must be called with TCPCONN_LOCK held */
  1035. inline static int _tcpconn_add_alias_unsafe(struct tcp_connection* c, int port,
  1036. struct ip_addr* l_ip, int l_port,
  1037. int flags)
  1038. {
  1039. unsigned hash;
  1040. struct tcp_conn_alias* a;
  1041. struct tcp_conn_alias* nxt;
  1042. int is_local_ip_any;
  1043. a=0;
  1044. is_local_ip_any=ip_addr_any(l_ip);
  1045. if (likely(c)){
  1046. hash=tcp_addr_hash(&c->rcv.src_ip, port, l_ip, l_port);
  1047. /* search the aliases for an already existing one */
  1048. for (a=tcpconn_aliases_hash[hash], nxt=0; a; a=nxt){
  1049. nxt=a->next;
  1050. if ( (a->parent->state!=S_CONN_BAD) && (port==a->port) &&
  1051. ( (l_port==0) || (l_port==a->parent->rcv.dst_port)) &&
  1052. (ip_addr_cmp(&c->rcv.src_ip, &a->parent->rcv.src_ip)) &&
  1053. ( is_local_ip_any ||
  1054. ip_addr_cmp(&a->parent->rcv.dst_ip, l_ip))
  1055. ){
  1056. /* found */
  1057. if (unlikely(a->parent!=c)){
  1058. if (flags & TCP_ALIAS_FORCE_ADD)
  1059. /* still have to walk the whole list to check if
  1060. * the alias was not already added */
  1061. continue;
  1062. else if (flags & TCP_ALIAS_REPLACE){
  1063. /* remove the current one */
  1064. tcpconn_listrm(tcpconn_aliases_hash[hash],
  1065. a, next, prev);
  1066. a->next=0;
  1067. a->prev=0;
  1068. }else
  1069. goto error_sec;
  1070. }else goto ok;
  1071. }
  1072. }
  1073. if (unlikely(c->aliases>=TCP_CON_MAX_ALIASES)) goto error_aliases;
  1074. c->con_aliases[c->aliases].parent=c;
  1075. c->con_aliases[c->aliases].port=port;
  1076. c->con_aliases[c->aliases].hash=hash;
  1077. tcpconn_listadd(tcpconn_aliases_hash[hash],
  1078. &c->con_aliases[c->aliases], next, prev);
  1079. c->aliases++;
  1080. }else goto error_not_found;
  1081. ok:
  1082. #ifdef EXTRA_DEBUG
  1083. if (a) DBG("_tcpconn_add_alias_unsafe: alias already present\n");
  1084. else DBG("_tcpconn_add_alias_unsafe: alias port %d for hash %d, id %d\n",
  1085. port, hash, c->id);
  1086. #endif
  1087. return 0;
  1088. error_aliases:
  1089. /* too many aliases */
  1090. return -2;
  1091. error_not_found:
  1092. /* null connection */
  1093. return -1;
  1094. error_sec:
  1095. /* alias already present and pointing to a different connection
  1096. * (hijack attempt?) */
  1097. return -3;
  1098. }
  1099. /* add port as an alias for the "id" connection,
  1100. * returns 0 on success,-1 on failure */
  1101. int tcpconn_add_alias(int id, int port, int proto)
  1102. {
  1103. struct tcp_connection* c;
  1104. int ret;
  1105. struct ip_addr zero_ip;
  1106. /* fix the port */
  1107. port=port?port:((proto==PROTO_TLS)?SIPS_PORT:SIP_PORT);
  1108. TCPCONN_LOCK;
  1109. /* check if alias already exists */
  1110. c=_tcpconn_find(id, 0, 0, 0, 0);
  1111. if (likely(c)){
  1112. ip_addr_mk_any(c->rcv.src_ip.af, &zero_ip);
  1113. /* alias src_ip:port, 0, 0 */
  1114. ret=_tcpconn_add_alias_unsafe(c, port, &zero_ip, 0,
  1115. tcp_alias_flags);
  1116. if (ret<0 && ret!=-3) goto error;
  1117. /* alias src_ip:port, local_ip, 0 */
  1118. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, 0,
  1119. tcp_alias_flags);
  1120. if (ret<0 && ret!=-3) goto error;
  1121. /* alias src_ip:port, local_ip, local_port */
  1122. ret=_tcpconn_add_alias_unsafe(c, port, &c->rcv.dst_ip, c->rcv.dst_port,
  1123. tcp_alias_flags);
  1124. if (unlikely(ret<0)) goto error;
  1125. }else goto error_not_found;
  1126. TCPCONN_UNLOCK;
  1127. return 0;
  1128. error_not_found:
  1129. TCPCONN_UNLOCK;
  1130. LOG(L_ERR, "ERROR: tcpconn_add_alias: no connection found for id %d\n",id);
  1131. return -1;
  1132. error:
  1133. TCPCONN_UNLOCK;
  1134. switch(ret){
  1135. case -2:
  1136. LOG(L_ERR, "ERROR: tcpconn_add_alias: too many aliases"
  1137. " for connection %p (%d)\n", c, c->id);
  1138. break;
  1139. case -3:
  1140. LOG(L_ERR, "ERROR: tcpconn_add_alias: possible port"
  1141. " hijack attempt\n");
  1142. LOG(L_ERR, "ERROR: tcpconn_add_alias: alias for %d port %d already"
  1143. " present and points to another connection \n",
  1144. c->id, port);
  1145. break;
  1146. default:
  1147. LOG(L_ERR, "ERROR: tcpconn_add_alias: unkown error %d\n", ret);
  1148. }
  1149. return -1;
  1150. }
  1151. #ifdef TCP_FD_CACHE
  1152. static void tcp_fd_cache_init()
  1153. {
  1154. int r;
  1155. for (r=0; r<TCP_FD_CACHE_SIZE; r++)
  1156. fd_cache[r].fd=-1;
  1157. }
  1158. inline static struct fd_cache_entry* tcp_fd_cache_get(struct tcp_connection *c)
  1159. {
  1160. int h;
  1161. h=c->id%TCP_FD_CACHE_SIZE;
  1162. if ((fd_cache[h].fd>0) && (fd_cache[h].id==c->id) && (fd_cache[h].con==c))
  1163. return &fd_cache[h];
  1164. return 0;
  1165. }
  1166. inline static void tcp_fd_cache_rm(struct fd_cache_entry* e)
  1167. {
  1168. e->fd=-1;
  1169. }
  1170. inline static void tcp_fd_cache_add(struct tcp_connection *c, int fd)
  1171. {
  1172. int h;
  1173. h=c->id%TCP_FD_CACHE_SIZE;
  1174. if (likely(fd_cache[h].fd>0))
  1175. close(fd_cache[h].fd);
  1176. fd_cache[h].fd=fd;
  1177. fd_cache[h].id=c->id;
  1178. fd_cache[h].con=c;
  1179. }
  1180. #endif /* TCP_FD_CACHE */
  1181. /* finds a tcpconn & sends on it
  1182. * uses the dst members to, proto (TCP|TLS) and id and tries to send
  1183. * from the "from" address (if non null and id==0)
  1184. * returns: number of bytes written (>=0) on success
  1185. * <0 on error */
  1186. int tcp_send(struct dest_info* dst, union sockaddr_union* from,
  1187. char* buf, unsigned len)
  1188. {
  1189. struct tcp_connection *c;
  1190. struct tcp_connection *tmp;
  1191. struct ip_addr ip;
  1192. int port;
  1193. int fd;
  1194. long response[2];
  1195. int n;
  1196. int do_close_fd;
  1197. #ifdef TCP_BUF_WRITE
  1198. int enable_write_watch;
  1199. #endif /* TCP_BUF_WRITE */
  1200. #ifdef TCP_FD_CACHE
  1201. struct fd_cache_entry* fd_cache_e;
  1202. fd_cache_e=0;
  1203. #endif /* TCP_FD_CACHE */
  1204. do_close_fd=1; /* close the fd on exit */
  1205. port=su_getport(&dst->to);
  1206. if (likely(port)){
  1207. su2ip_addr(&ip, &dst->to);
  1208. c=tcpconn_get(dst->id, &ip, port, from, tcp_con_lifetime);
  1209. }else if (likely(dst->id)){
  1210. c=tcpconn_get(dst->id, 0, 0, 0, tcp_con_lifetime);
  1211. }else{
  1212. LOG(L_CRIT, "BUG: tcp_send called with null id & to\n");
  1213. return -1;
  1214. }
  1215. if (likely(dst->id)){
  1216. if (unlikely(c==0)) {
  1217. if (likely(port)){
  1218. /* try again w/o id */
  1219. c=tcpconn_get(0, &ip, port, from, tcp_con_lifetime);
  1220. goto no_id;
  1221. }else{
  1222. LOG(L_ERR, "ERROR: tcp_send: id %d not found, dropping\n",
  1223. dst->id);
  1224. return -1;
  1225. }
  1226. }else goto get_fd;
  1227. }
  1228. no_id:
  1229. if (unlikely(c==0)){
  1230. DBG("tcp_send: no open tcp connection found, opening new one\n");
  1231. /* create tcp connection */
  1232. if (likely(from==0)){
  1233. /* check to see if we have to use a specific source addr. */
  1234. switch (dst->to.s.sa_family) {
  1235. case AF_INET:
  1236. from = tcp_source_ipv4;
  1237. break;
  1238. #ifdef USE_IPV6
  1239. case AF_INET6:
  1240. from = tcp_source_ipv6;
  1241. break;
  1242. #endif
  1243. default:
  1244. /* error, bad af, ignore ... */
  1245. break;
  1246. }
  1247. }
  1248. if (unlikely((c=tcpconn_connect(&dst->to, from, dst->proto))==0)){
  1249. LOG(L_ERR, "ERROR: tcp_send: connect failed\n");
  1250. return -1;
  1251. }
  1252. atomic_set(&c->refcnt, 1); /* ref. only from here for now */
  1253. fd=c->s;
  1254. /* send the new tcpconn to "tcp main" */
  1255. response[0]=(long)c;
  1256. response[1]=CONN_NEW;
  1257. n=send_fd(unix_tcp_sock, response, sizeof(response), c->s);
  1258. if (unlikely(n<=0)){
  1259. LOG(L_ERR, "BUG: tcp_send: failed send_fd: %s (%d)\n",
  1260. strerror(errno), errno);
  1261. n=-1;
  1262. goto end;
  1263. }
  1264. goto send_it;
  1265. }
  1266. get_fd:
  1267. #ifdef TCP_BUF_WRITE
  1268. /* if data is already queued, we don't need the fd any more */
  1269. if (unlikely(tcp_options.tcp_buf_write && c->wbuf_q.first)){
  1270. lock_get(&c->write_lock);
  1271. if (likely(c->wbuf_q.first)){
  1272. do_close_fd=0;
  1273. if (unlikely(wbufq_add(c, buf, len)<0)){
  1274. lock_release(&c->write_lock);
  1275. n=-1;
  1276. goto error;
  1277. }
  1278. n=len;
  1279. lock_release(&c->write_lock);
  1280. goto release_c;
  1281. }
  1282. lock_release(&c->write_lock);
  1283. }
  1284. #endif /* TCP_BUF_WRITE */
  1285. /* check if this is not the same reader process holding
  1286. * c and if so send directly on c->fd */
  1287. if (c->reader_pid==my_pid()){
  1288. DBG("tcp_send: send from reader (%d (%d)), reusing fd\n",
  1289. my_pid(), process_no);
  1290. fd=c->fd;
  1291. do_close_fd=0; /* don't close the fd on exit, it's in use */
  1292. #ifdef TCP_FD_CACHE
  1293. }else if (likely(tcp_options.fd_cache &&
  1294. ((fd_cache_e=tcp_fd_cache_get(c))!=0))){
  1295. fd=fd_cache_e->fd;
  1296. do_close_fd=0;
  1297. DBG("tcp_send: found fd in cache ( %d, %p, %d)\n",
  1298. fd, c, fd_cache_e->id);
  1299. #endif /* TCP_FD_CACHE */
  1300. }else{
  1301. DBG("tcp_send: tcp connection found (%p), acquiring fd\n", c);
  1302. /* get the fd */
  1303. response[0]=(long)c;
  1304. response[1]=CONN_GET_FD;
  1305. n=send_all(unix_tcp_sock, response, sizeof(response));
  1306. if (unlikely(n<=0)){
  1307. LOG(L_ERR, "BUG: tcp_send: failed to get fd(write):%s (%d)\n",
  1308. strerror(errno), errno);
  1309. n=-1;
  1310. goto release_c;
  1311. }
  1312. DBG("tcp_send, c= %p, n=%d\n", c, n);
  1313. n=receive_fd(unix_tcp_sock, &tmp, sizeof(tmp), &fd, MSG_WAITALL);
  1314. if (unlikely(n<=0)){
  1315. LOG(L_ERR, "BUG: tcp_send: failed to get fd(receive_fd):"
  1316. " %s (%d)\n", strerror(errno), errno);
  1317. n=-1;
  1318. do_close_fd=0;
  1319. goto release_c;
  1320. }
  1321. if (unlikely(c!=tmp)){
  1322. LOG(L_CRIT, "BUG: tcp_send: get_fd: got different connection:"
  1323. " %p (id= %d, refcnt=%d state=%d) != "
  1324. " %p (n=%d)\n",
  1325. c, c->id, atomic_get(&c->refcnt), c->state,
  1326. tmp, n
  1327. );
  1328. n=-1; /* fail */
  1329. goto end;
  1330. }
  1331. DBG("tcp_send: after receive_fd: c= %p n=%d fd=%d\n",c, n, fd);
  1332. }
  1333. send_it:
  1334. DBG("tcp_send: sending...\n");
  1335. lock_get(&c->write_lock);
  1336. #ifdef TCP_BUF_WRITE
  1337. if (likely(tcp_options.tcp_buf_write)){
  1338. if (c->wbuf_q.first){
  1339. if (unlikely(wbufq_add(c, buf, len)<0)){
  1340. lock_release(&c->write_lock);
  1341. n=-1;
  1342. goto error;
  1343. }
  1344. lock_release(&c->write_lock);
  1345. n=len;
  1346. goto end;
  1347. }
  1348. n=_tcpconn_write_nb(fd, c, buf, len);
  1349. }else{
  1350. #endif /* TCP_BUF_WRITE */
  1351. #ifdef USE_TLS
  1352. if (c->type==PROTO_TLS)
  1353. n=tls_blocking_write(c, fd, buf, len);
  1354. else
  1355. #endif
  1356. /* n=tcp_blocking_write(c, fd, buf, len); */
  1357. n=tsend_stream(fd, buf, len, tcp_send_timeout*1000);
  1358. #ifdef TCP_BUF_WRITE
  1359. }
  1360. #endif /* TCP_BUF_WRITE */
  1361. lock_release(&c->write_lock);
  1362. DBG("tcp_send: after write: c= %p n=%d fd=%d\n",c, n, fd);
  1363. DBG("tcp_send: buf=\n%.*s\n", (int)len, buf);
  1364. if (unlikely(n<0)){
  1365. #ifdef TCP_BUF_WRITE
  1366. if (tcp_options.tcp_buf_write &&
  1367. (errno==EAGAIN || errno==EWOULDBLOCK)){
  1368. lock_get(&c->write_lock);
  1369. enable_write_watch=(c->wbuf_q.first==0);
  1370. if (unlikely(wbufq_add(c, buf, len)<0)){
  1371. lock_release(&c->write_lock);
  1372. n=-1;
  1373. goto error;
  1374. }
  1375. lock_release(&c->write_lock);
  1376. n=len;
  1377. if (enable_write_watch){
  1378. response[0]=(long)c;
  1379. response[1]=CONN_QUEUED_WRITE;
  1380. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  1381. LOG(L_ERR, "BUG: tcp_send: error return failed "
  1382. "(write):%s (%d)\n", strerror(errno), errno);
  1383. n=-1;
  1384. goto error;
  1385. }
  1386. }
  1387. goto end;
  1388. }
  1389. error:
  1390. #endif /* TCP_BUF_WRITE */
  1391. LOG(L_ERR, "ERROR: tcp_send: failed to send\n");
  1392. /* error on the connection , mark it as bad and set 0 timeout */
  1393. c->state=S_CONN_BAD;
  1394. c->timeout=get_ticks_raw();
  1395. /* tell "main" it should drop this (optional it will t/o anyway?)*/
  1396. response[0]=(long)c;
  1397. response[1]=CONN_ERROR;
  1398. if (send_all(unix_tcp_sock, response, sizeof(response))<=0){
  1399. LOG(L_ERR, "BUG: tcp_send: error return failed (write):%s (%d)\n",
  1400. strerror(errno), errno);
  1401. tcpconn_put(c); /* deref. it manually */
  1402. n=-1;
  1403. }
  1404. /* CONN_ERROR will auto-dec refcnt => we must not call tcpconn_put
  1405. * if it succeeds */
  1406. #ifdef TCP_FD_CACHE
  1407. if (unlikely(fd_cache_e)){
  1408. LOG(L_ERR, "ERROR: tcp_send: error on cached fd, removing from the"
  1409. "cache (%d, %p, %d)\n",
  1410. fd, fd_cache_e->con, fd_cache_e->id);
  1411. tcp_fd_cache_rm(fd_cache_e);
  1412. close(fd);
  1413. }else
  1414. #endif /* TCP_FD_CACHE */
  1415. if (do_close_fd) close(fd);
  1416. return n; /* error return, no tcpconn_put */
  1417. }
  1418. #ifdef TCP_BUF_WRITE
  1419. if (likely(tcp_options.tcp_buf_write)){
  1420. if (unlikely(c->state==S_CONN_CONNECT))
  1421. c->state=S_CONN_OK;
  1422. c->last_write=get_ticks_raw();
  1423. }
  1424. #endif /* TCP_BUF_WRITE */
  1425. end:
  1426. #ifdef TCP_FD_CACHE
  1427. if (unlikely((fd_cache_e==0) && tcp_options.fd_cache)){
  1428. tcp_fd_cache_add(c, fd);
  1429. }else
  1430. #endif /* TCP_FD_CACHE */
  1431. if (do_close_fd) close(fd);
  1432. release_c:
  1433. tcpconn_put(c); /* release c (lock; dec refcnt; unlock) */
  1434. return n;
  1435. }
  1436. int tcp_init(struct socket_info* sock_info)
  1437. {
  1438. union sockaddr_union* addr;
  1439. int optval;
  1440. #ifdef HAVE_TCP_ACCEPT_FILTER
  1441. struct accept_filter_arg afa;
  1442. #endif /* HAVE_TCP_ACCEPT_FILTER */
  1443. #ifdef DISABLE_NAGLE
  1444. int flag;
  1445. struct protoent* pe;
  1446. if (tcp_proto_no==-1){ /* if not already set */
  1447. pe=getprotobyname("tcp");
  1448. if (pe==0){
  1449. LOG(L_ERR, "ERROR: tcp_init: could not get TCP protocol number\n");
  1450. tcp_proto_no=-1;
  1451. }else{
  1452. tcp_proto_no=pe->p_proto;
  1453. }
  1454. }
  1455. #endif
  1456. addr=&sock_info->su;
  1457. /* sock_info->proto=PROTO_TCP; */
  1458. if (init_su(addr, &sock_info->address, sock_info->port_no)<0){
  1459. LOG(L_ERR, "ERROR: tcp_init: could no init sockaddr_union\n");
  1460. goto error;
  1461. }
  1462. sock_info->socket=socket(AF2PF(addr->s.sa_family), SOCK_STREAM, 0);
  1463. if (sock_info->socket==-1){
  1464. LOG(L_ERR, "ERROR: tcp_init: socket: %s\n", strerror(errno));
  1465. goto error;
  1466. }
  1467. #ifdef DISABLE_NAGLE
  1468. flag=1;
  1469. if ( (tcp_proto_no!=-1) &&
  1470. (setsockopt(sock_info->socket, tcp_proto_no , TCP_NODELAY,
  1471. &flag, sizeof(flag))<0) ){
  1472. LOG(L_ERR, "ERROR: tcp_init: could not disable Nagle: %s\n",
  1473. strerror(errno));
  1474. }
  1475. #endif
  1476. #if !defined(TCP_DONT_REUSEADDR)
  1477. /* Stevens, "Network Programming", Section 7.5, "Generic Socket
  1478. * Options": "...server started,..a child continues..on existing
  1479. * connection..listening server is restarted...call to bind fails
  1480. * ... ALL TCP servers should specify the SO_REUSEADDRE option
  1481. * to allow the server to be restarted in this situation
  1482. *
  1483. * Indeed, without this option, the server can't restart.
  1484. * -jiri
  1485. */
  1486. optval=1;
  1487. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_REUSEADDR,
  1488. (void*)&optval, sizeof(optval))==-1) {
  1489. LOG(L_ERR, "ERROR: tcp_init: setsockopt %s\n",
  1490. strerror(errno));
  1491. goto error;
  1492. }
  1493. #endif
  1494. /* tos */
  1495. optval = tos;
  1496. if (setsockopt(sock_info->socket, IPPROTO_IP, IP_TOS, (void*)&optval,
  1497. sizeof(optval)) ==-1){
  1498. LOG(L_WARN, "WARNING: tcp_init: setsockopt tos: %s\n", strerror(errno));
  1499. /* continue since this is not critical */
  1500. }
  1501. #ifdef HAVE_TCP_DEFER_ACCEPT
  1502. /* linux only */
  1503. if (tcp_options.defer_accept){
  1504. optval=tcp_options.defer_accept;
  1505. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_DEFER_ACCEPT,
  1506. (void*)&optval, sizeof(optval)) ==-1){
  1507. LOG(L_WARN, "WARNING: tcp_init: setsockopt TCP_DEFER_ACCEPT %s\n",
  1508. strerror(errno));
  1509. /* continue since this is not critical */
  1510. }
  1511. }
  1512. #endif /* HAVE_TCP_DEFFER_ACCEPT */
  1513. #ifdef HAVE_TCP_SYNCNT
  1514. if (tcp_options.syncnt){
  1515. optval=tcp_options.syncnt;
  1516. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_SYNCNT, &optval,
  1517. sizeof(optval))<0){
  1518. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  1519. " maximum SYN retr. count: %s\n", strerror(errno));
  1520. }
  1521. }
  1522. #endif
  1523. #ifdef HAVE_TCP_LINGER2
  1524. if (tcp_options.linger2){
  1525. optval=tcp_options.linger2;
  1526. if (setsockopt(sock_info->socket, IPPROTO_TCP, TCP_LINGER2, &optval,
  1527. sizeof(optval))<0){
  1528. LOG(L_WARN, "WARNING: tcp_init: failed to set"
  1529. " maximum LINGER2 timeout: %s\n", strerror(errno));
  1530. }
  1531. }
  1532. #endif
  1533. init_sock_keepalive(sock_info->socket);
  1534. if (bind(sock_info->socket, &addr->s, sockaddru_len(*addr))==-1){
  1535. LOG(L_ERR, "ERROR: tcp_init: bind(%x, %p, %d) on %s:%d : %s\n",
  1536. sock_info->socket, &addr->s,
  1537. (unsigned)sockaddru_len(*addr),
  1538. sock_info->address_str.s,
  1539. sock_info->port_no,
  1540. strerror(errno));
  1541. goto error;
  1542. }
  1543. if (listen(sock_info->socket, TCP_LISTEN_BACKLOG)==-1){
  1544. LOG(L_ERR, "ERROR: tcp_init: listen(%x, %p, %d) on %s: %s\n",
  1545. sock_info->socket, &addr->s,
  1546. (unsigned)sockaddru_len(*addr),
  1547. sock_info->address_str.s,
  1548. strerror(errno));
  1549. goto error;
  1550. }
  1551. #ifdef HAVE_TCP_ACCEPT_FILTER
  1552. /* freebsd */
  1553. if (tcp_options.defer_accept){
  1554. memset(&afa, 0, sizeof(afa));
  1555. strcpy(afa.af_name, "dataready");
  1556. if (setsockopt(sock_info->socket, SOL_SOCKET, SO_ACCEPTFILTER,
  1557. (void*)&afa, sizeof(afa)) ==-1){
  1558. LOG(L_WARN, "WARNING: tcp_init: setsockopt SO_ACCEPTFILTER %s\n",
  1559. strerror(errno));
  1560. /* continue since this is not critical */
  1561. }
  1562. }
  1563. #endif /* HAVE_TCP_ACCEPT_FILTER */
  1564. return 0;
  1565. error:
  1566. if (sock_info->socket!=-1){
  1567. close(sock_info->socket);
  1568. sock_info->socket=-1;
  1569. }
  1570. return -1;
  1571. }
  1572. /* used internally by tcp_main_loop()
  1573. * tries to destroy a tcp connection (if it cannot it will force a timeout)
  1574. * Note: it's called _only_ from the tcp_main process */
  1575. static void tcpconn_destroy(struct tcp_connection* tcpconn)
  1576. {
  1577. int fd;
  1578. ticks_t t;
  1579. /* always try to remove the timer to protect against tcpconn_destroy
  1580. * being called several times for the same connection
  1581. * (if the timer is already removed, nothing happens) */
  1582. if (likely(!(tcpconn->flags & F_CONN_READER)))
  1583. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  1584. #ifdef TCP_BUF_WRITE
  1585. if (unlikely((tcpconn->flags & F_CONN_WRITE_W) ||
  1586. !(tcpconn->flags & F_CONN_REMOVED))){
  1587. LOG(L_CRIT, "tcpconn_destroy: possible BUG: flags = %0x\n",
  1588. tcpconn->flags);
  1589. }
  1590. if (unlikely(tcpconn->wbuf_q.first)){
  1591. lock_get(&tcpconn->write_lock);
  1592. /* check again, while holding the lock */
  1593. if (likely(tcpconn->wbuf_q.first))
  1594. wbufq_destroy(&tcpconn->wbuf_q);
  1595. lock_release(&tcpconn->write_lock);
  1596. }
  1597. #endif /* TCP_BUF_WRITE */
  1598. TCPCONN_LOCK; /*avoid races w/ tcp_send*/
  1599. if (likely(atomic_dec_and_test(&tcpconn->refcnt))){
  1600. _tcpconn_detach(tcpconn);
  1601. TCPCONN_UNLOCK;
  1602. DBG("tcpconn_destroy: destroying connection %p (%d, %d) flags %04x\n",
  1603. tcpconn, tcpconn->id, tcpconn->s, tcpconn->flags);
  1604. fd=tcpconn->s;
  1605. #ifdef USE_TLS
  1606. /*FIXME: lock ->writelock ? */
  1607. if (tcpconn->type==PROTO_TLS)
  1608. tls_close(tcpconn, fd);
  1609. #endif
  1610. _tcpconn_free(tcpconn); /* destroys also the wbuf_q if still present*/
  1611. #ifdef TCP_FD_CACHE
  1612. if (likely(tcp_options.fd_cache)) shutdown(fd, SHUT_RDWR);
  1613. #endif /* TCP_FD_CACHE */
  1614. if (unlikely(close(fd)<0)){
  1615. LOG(L_ERR, "ERROR: tcpconn_destroy; close() failed: %s (%d)\n",
  1616. strerror(errno), errno);
  1617. }
  1618. (*tcp_connections_no)--;
  1619. }else{
  1620. TCPCONN_UNLOCK;
  1621. /* force timeout */
  1622. t=get_ticks_raw();
  1623. tcpconn->timeout=t+TCPCONN_WAIT_TIMEOUT;
  1624. tcpconn->state=S_CONN_BAD;
  1625. if (!(tcpconn->flags & F_CONN_READER)){
  1626. /* re-activate the timer only if the connection is handled
  1627. * by tcp_main (and not by a tcp reader)*/
  1628. tcpconn->timer.f=tcpconn_main_timeout;
  1629. local_timer_reinit(&tcpconn->timer);
  1630. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  1631. TCPCONN_WAIT_TIMEOUT, t);
  1632. }
  1633. DBG("tcpconn_destroy: delaying (%p, flags %04x) ...\n",
  1634. tcpconn, tcpconn->flags);
  1635. }
  1636. }
  1637. #ifdef SEND_FD_QUEUE
  1638. struct send_fd_info{
  1639. struct tcp_connection* tcp_conn;
  1640. ticks_t expire;
  1641. int unix_sock;
  1642. unsigned int retries; /* debugging */
  1643. };
  1644. struct tcp_send_fd_q{
  1645. struct send_fd_info* data; /* buffer */
  1646. struct send_fd_info* crt; /* pointer inside the buffer */
  1647. struct send_fd_info* end; /* points after the last valid position */
  1648. };
  1649. static struct tcp_send_fd_q send2child_q;
  1650. static int send_fd_queue_init(struct tcp_send_fd_q *q, unsigned int size)
  1651. {
  1652. q->data=pkg_malloc(size*sizeof(struct send_fd_info));
  1653. if (q->data==0){
  1654. LOG(L_ERR, "ERROR: send_fd_queue_init: out of memory\n");
  1655. return -1;
  1656. }
  1657. q->crt=&q->data[0];
  1658. q->end=&q->data[size];
  1659. return 0;
  1660. }
  1661. static void send_fd_queue_destroy(struct tcp_send_fd_q *q)
  1662. {
  1663. if (q->data){
  1664. pkg_free(q->data);
  1665. q->data=0;
  1666. q->crt=q->end=0;
  1667. }
  1668. }
  1669. static int init_send_fd_queues()
  1670. {
  1671. if (send_fd_queue_init(&send2child_q, SEND_FD_QUEUE_SIZE)!=0)
  1672. goto error;
  1673. return 0;
  1674. error:
  1675. LOG(L_ERR, "ERROR: init_send_fd_queues: init failed\n");
  1676. return -1;
  1677. }
  1678. static void destroy_send_fd_queues()
  1679. {
  1680. send_fd_queue_destroy(&send2child_q);
  1681. }
  1682. inline static int send_fd_queue_add( struct tcp_send_fd_q* q,
  1683. int unix_sock,
  1684. struct tcp_connection *t)
  1685. {
  1686. struct send_fd_info* tmp;
  1687. unsigned long new_size;
  1688. if (q->crt>=q->end){
  1689. new_size=q->end-&q->data[0];
  1690. if (new_size< MAX_SEND_FD_QUEUE_SIZE/2){
  1691. new_size*=2;
  1692. }else new_size=MAX_SEND_FD_QUEUE_SIZE;
  1693. if (unlikely(q->crt>=&q->data[new_size])){
  1694. LOG(L_ERR, "ERROR: send_fd_queue_add: queue full: %ld/%ld\n",
  1695. (long)(q->crt-&q->data[0]-1), new_size);
  1696. goto error;
  1697. }
  1698. LOG(L_CRIT, "INFO: send_fd_queue: queue full: %ld, extending to %ld\n",
  1699. (long)(q->end-&q->data[0]), new_size);
  1700. tmp=pkg_realloc(q->data, new_size*sizeof(struct send_fd_info));
  1701. if (unlikely(tmp==0)){
  1702. LOG(L_ERR, "ERROR: send_fd_queue_add: out of memory\n");
  1703. goto error;
  1704. }
  1705. q->crt=(q->crt-&q->data[0])+tmp;
  1706. q->data=tmp;
  1707. q->end=&q->data[new_size];
  1708. }
  1709. q->crt->tcp_conn=t;
  1710. q->crt->unix_sock=unix_sock;
  1711. q->crt->expire=get_ticks_raw()+SEND_FD_QUEUE_TIMEOUT;
  1712. q->crt->retries=0;
  1713. q->crt++;
  1714. return 0;
  1715. error:
  1716. return -1;
  1717. }
  1718. inline static void send_fd_queue_run(struct tcp_send_fd_q* q)
  1719. {
  1720. struct send_fd_info* p;
  1721. struct send_fd_info* t;
  1722. for (p=t=&q->data[0]; p<q->crt; p++){
  1723. if (unlikely(send_fd(p->unix_sock, &(p->tcp_conn),
  1724. sizeof(struct tcp_connection*), p->tcp_conn->s)<=0)){
  1725. if ( ((errno==EAGAIN)||(errno==EWOULDBLOCK)) &&
  1726. ((s_ticks_t)(p->expire-get_ticks_raw())>0)){
  1727. /* leave in queue for a future try */
  1728. *t=*p;
  1729. t->retries++;
  1730. t++;
  1731. }else{
  1732. LOG(L_ERR, "ERROR: run_send_fd_queue: send_fd failed"
  1733. " on socket %d , queue entry %ld, retries %d,"
  1734. " connection %p, tcp socket %d, errno=%d (%s) \n",
  1735. p->unix_sock, (long)(p-&q->data[0]), p->retries,
  1736. p->tcp_conn, p->tcp_conn->s, errno,
  1737. strerror(errno));
  1738. #ifdef TCP_BUF_WRITE
  1739. if (p->tcp_conn->flags & F_CONN_WRITE_W){
  1740. io_watch_del(&io_h, p->tcp_conn->s, -1, IO_FD_CLOSING);
  1741. p->tcp_conn->flags &=~F_CONN_WRITE_W;
  1742. }
  1743. #endif
  1744. p->tcp_conn->flags &= ~F_CONN_READER;
  1745. tcpconn_destroy(p->tcp_conn);
  1746. }
  1747. }
  1748. }
  1749. q->crt=t;
  1750. }
  1751. #else
  1752. #define send_fd_queue_run(q)
  1753. #endif
  1754. /* non blocking write() on a tcpconnection, unsafe version (should be called
  1755. * while holding c->write_lock). The fd should be non-blocking.
  1756. * returns number of bytes written on success, -1 on error (and sets errno)
  1757. */
  1758. inline static int _tcpconn_write_nb(int fd, struct tcp_connection* c,
  1759. char* buf, int len)
  1760. {
  1761. int n;
  1762. again:
  1763. #ifdef USE_TLS
  1764. if (unlikely(c->type==PROTO_TLS))
  1765. /* FIXME: tls_nonblocking_write !! */
  1766. n=tls_blocking_write(c, fd, buf, len);
  1767. else
  1768. #endif /* USE_TLS */
  1769. n=send(fd, buf, len,
  1770. #ifdef HAVE_MSG_NOSIGNAL
  1771. MSG_NOSIGNAL
  1772. #else
  1773. 0
  1774. #endif /* HAVE_MSG_NOSIGNAL */
  1775. );
  1776. if (unlikely(n<0)){
  1777. if (errno==EINTR) goto again;
  1778. }
  1779. return n;
  1780. }
  1781. /* handles io from a tcp child process
  1782. * params: tcp_c - pointer in the tcp_children array, to the entry for
  1783. * which an io event was detected
  1784. * fd_i - fd index in the fd_array (usefull for optimizing
  1785. * io_watch_deletes)
  1786. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  1787. * io events queued), >0 on success. success/error refer only to
  1788. * the reads from the fd.
  1789. */
  1790. inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
  1791. {
  1792. struct tcp_connection* tcpconn;
  1793. long response[2];
  1794. int cmd;
  1795. int bytes;
  1796. int n;
  1797. ticks_t t;
  1798. if (unlikely(tcp_c->unix_sock<=0)){
  1799. /* (we can't have a fd==0, 0 is never closed )*/
  1800. LOG(L_CRIT, "BUG: handle_tcp_child: fd %d for %d "
  1801. "(pid %d, ser no %d)\n", tcp_c->unix_sock,
  1802. (int)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no);
  1803. goto error;
  1804. }
  1805. /* read until sizeof(response)
  1806. * (this is a SOCK_STREAM so read is not atomic) */
  1807. bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT);
  1808. if (unlikely(bytes<(int)sizeof(response))){
  1809. if (bytes==0){
  1810. /* EOF -> bad, child has died */
  1811. DBG("DBG: handle_tcp_child: dead tcp child %d (pid %d, no %d)"
  1812. " (shutting down?)\n", (int)(tcp_c-&tcp_children[0]),
  1813. tcp_c->pid, tcp_c->proc_no );
  1814. /* don't listen on it any more */
  1815. io_watch_del(&io_h, tcp_c->unix_sock, fd_i, 0);
  1816. goto error; /* eof. so no more io here, it's ok to return error */
  1817. }else if (bytes<0){
  1818. /* EAGAIN is ok if we try to empty the buffer
  1819. * e.g.: SIGIO_RT overflow mode or EPOLL ET */
  1820. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  1821. LOG(L_CRIT, "ERROR: handle_tcp_child: read from tcp child %ld "
  1822. " (pid %d, no %d) %s [%d]\n",
  1823. (long)(tcp_c-&tcp_children[0]), tcp_c->pid,
  1824. tcp_c->proc_no, strerror(errno), errno );
  1825. }else{
  1826. bytes=0;
  1827. }
  1828. /* try to ignore ? */
  1829. goto end;
  1830. }else{
  1831. /* should never happen */
  1832. LOG(L_CRIT, "BUG: handle_tcp_child: too few bytes received (%d)\n",
  1833. bytes );
  1834. bytes=0; /* something was read so there is no error; otoh if
  1835. receive_fd returned less then requested => the receive
  1836. buffer is empty => no more io queued on this fd */
  1837. goto end;
  1838. }
  1839. }
  1840. DBG("handle_tcp_child: reader response= %lx, %ld from %d \n",
  1841. response[0], response[1], (int)(tcp_c-&tcp_children[0]));
  1842. cmd=response[1];
  1843. tcpconn=(struct tcp_connection*)response[0];
  1844. if (unlikely(tcpconn==0)){
  1845. /* should never happen */
  1846. LOG(L_CRIT, "BUG: handle_tcp_child: null tcpconn pointer received"
  1847. " from tcp child %d (pid %d): %lx, %lx\n",
  1848. (int)(tcp_c-&tcp_children[0]), tcp_c->pid,
  1849. response[0], response[1]) ;
  1850. goto end;
  1851. }
  1852. switch(cmd){
  1853. case CONN_RELEASE:
  1854. tcp_c->busy--;
  1855. if (unlikely(tcpconn->state==S_CONN_BAD)){
  1856. #ifdef TCP_BUF_WRITE
  1857. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  1858. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  1859. tcpconn->flags &= ~F_CONN_WRITE_W;
  1860. }
  1861. #endif /* TCP_BUF_WRITE */
  1862. tcpconn_destroy(tcpconn);
  1863. break;
  1864. }
  1865. /* update the timeout*/
  1866. t=get_ticks_raw();
  1867. tcpconn->timeout=t+tcp_con_lifetime;
  1868. tcpconn_put(tcpconn);
  1869. /* re-activate the timer */
  1870. tcpconn->timer.f=tcpconn_main_timeout;
  1871. local_timer_reinit(&tcpconn->timer);
  1872. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  1873. tcp_con_lifetime, t);
  1874. /* must be after the de-ref*/
  1875. tcpconn->flags&=~(F_CONN_REMOVED|F_CONN_READER);
  1876. #ifdef TCP_BUF_WRITE
  1877. if (unlikely(tcpconn->flags & F_CONN_WRITE_W))
  1878. n=io_watch_chg(&io_h, tcpconn->s, POLLIN| POLLOUT, -1);
  1879. else
  1880. #endif /* TCP_BUF_WRITE */
  1881. n=io_watch_add(&io_h, tcpconn->s, POLLIN, F_TCPCONN, tcpconn);
  1882. if (unlikely(n<0)){
  1883. LOG(L_CRIT, "ERROR: tcp_main: handle_tcp_child: failed to add"
  1884. " new socket to the fd list\n");
  1885. tcpconn->flags|=F_CONN_REMOVED;
  1886. #ifdef TCP_BUF_WRITE
  1887. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  1888. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  1889. tcpconn->flags&=~F_CONN_WRITE_W;
  1890. }
  1891. #endif /* TCP_BUF_WRITE */
  1892. tcpconn_destroy(tcpconn); /* closes also the fd */
  1893. }
  1894. DBG("handle_tcp_child: CONN_RELEASE %p refcnt= %d\n",
  1895. tcpconn, atomic_get(&tcpconn->refcnt));
  1896. break;
  1897. case CONN_ERROR:
  1898. case CONN_DESTROY:
  1899. case CONN_EOF:
  1900. /* WARNING: this will auto-dec. refcnt! */
  1901. tcp_c->busy--;
  1902. /* main doesn't listen on it => we don't have to delete it
  1903. if (tcpconn->s!=-1)
  1904. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  1905. */
  1906. #ifdef TCP_BUF_WRITE
  1907. if ((tcpconn->flags & F_CONN_WRITE_W) && (tcpconn->s!=-1)){
  1908. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  1909. tcpconn->flags&=~F_CONN_WRITE_W;
  1910. }
  1911. #endif /* TCP_BUF_WRITE */
  1912. tcpconn_destroy(tcpconn); /* closes also the fd */
  1913. break;
  1914. default:
  1915. LOG(L_CRIT, "BUG: handle_tcp_child: unknown cmd %d"
  1916. " from tcp reader %d\n",
  1917. cmd, (int)(tcp_c-&tcp_children[0]));
  1918. }
  1919. end:
  1920. return bytes;
  1921. error:
  1922. return -1;
  1923. }
  1924. /* handles io from a "generic" ser process (get fd or new_fd from a tcp_send)
  1925. *
  1926. * params: p - pointer in the ser processes array (pt[]), to the entry for
  1927. * which an io event was detected
  1928. * fd_i - fd index in the fd_array (usefull for optimizing
  1929. * io_watch_deletes)
  1930. * returns: handle_* return convention:
  1931. * -1 on error reading from the fd,
  1932. * 0 on EAGAIN or when no more io events are queued
  1933. * (receive buffer empty),
  1934. * >0 on successfull reads from the fd (the receive buffer might
  1935. * be non-empty).
  1936. */
  1937. inline static int handle_ser_child(struct process_table* p, int fd_i)
  1938. {
  1939. struct tcp_connection* tcpconn;
  1940. long response[2];
  1941. int cmd;
  1942. int bytes;
  1943. int ret;
  1944. int fd;
  1945. int flags;
  1946. ticks_t t;
  1947. ret=-1;
  1948. if (unlikely(p->unix_sock<=0)){
  1949. /* (we can't have a fd==0, 0 is never closed )*/
  1950. LOG(L_CRIT, "BUG: handle_ser_child: fd %d for %d "
  1951. "(pid %d)\n", p->unix_sock, (int)(p-&pt[0]), p->pid);
  1952. goto error;
  1953. }
  1954. /* get all bytes and the fd (if transmitted)
  1955. * (this is a SOCK_STREAM so read is not atomic) */
  1956. bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd,
  1957. MSG_DONTWAIT);
  1958. if (unlikely(bytes<(int)sizeof(response))){
  1959. /* too few bytes read */
  1960. if (bytes==0){
  1961. /* EOF -> bad, child has died */
  1962. DBG("DBG: handle_ser_child: dead child %d, pid %d"
  1963. " (shutting down?)\n", (int)(p-&pt[0]), p->pid);
  1964. /* don't listen on it any more */
  1965. io_watch_del(&io_h, p->unix_sock, fd_i, 0);
  1966. goto error; /* child dead => no further io events from it */
  1967. }else if (bytes<0){
  1968. /* EAGAIN is ok if we try to empty the buffer
  1969. * e.g: SIGIO_RT overflow mode or EPOLL ET */
  1970. if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
  1971. LOG(L_CRIT, "ERROR: handle_ser_child: read from child %d "
  1972. "(pid %d): %s [%d]\n", (int)(p-&pt[0]), p->pid,
  1973. strerror(errno), errno);
  1974. ret=-1;
  1975. }else{
  1976. ret=0;
  1977. }
  1978. /* try to ignore ? */
  1979. goto end;
  1980. }else{
  1981. /* should never happen */
  1982. LOG(L_CRIT, "BUG: handle_ser_child: too few bytes received (%d)\n",
  1983. bytes );
  1984. ret=0; /* something was read so there is no error; otoh if
  1985. receive_fd returned less then requested => the receive
  1986. buffer is empty => no more io queued on this fd */
  1987. goto end;
  1988. }
  1989. }
  1990. ret=1; /* something was received, there might be more queued */
  1991. DBG("handle_ser_child: read response= %lx, %ld, fd %d from %d (%d)\n",
  1992. response[0], response[1], fd, (int)(p-&pt[0]), p->pid);
  1993. cmd=response[1];
  1994. tcpconn=(struct tcp_connection*)response[0];
  1995. if (unlikely(tcpconn==0)){
  1996. LOG(L_CRIT, "BUG: handle_ser_child: null tcpconn pointer received"
  1997. " from child %d (pid %d): %lx, %lx\n",
  1998. (int)(p-&pt[0]), p->pid, response[0], response[1]) ;
  1999. goto end;
  2000. }
  2001. switch(cmd){
  2002. case CONN_ERROR:
  2003. if ( (!(tcpconn->flags & F_CONN_REMOVED) ||
  2004. (tcpconn->flags & F_CONN_WRITE_W) ) && (tcpconn->s!=-1)){
  2005. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2006. tcpconn->flags|=F_CONN_REMOVED;
  2007. tcpconn->flags&=~F_CONN_WRITE_W;
  2008. }
  2009. LOG(L_ERR, "handle_ser_child: ERROR: received CON_ERROR for %p"
  2010. " (id %d), refcnt %d\n",
  2011. tcpconn, tcpconn->id, atomic_get(&tcpconn->refcnt));
  2012. tcpconn_destroy(tcpconn); /* will close also the fd */
  2013. break;
  2014. case CONN_GET_FD:
  2015. /* send the requested FD */
  2016. /* WARNING: take care of setting refcnt properly to
  2017. * avoid race condition */
  2018. if (unlikely(send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn),
  2019. tcpconn->s)<=0)){
  2020. LOG(L_ERR, "ERROR: handle_ser_child: send_fd failed\n");
  2021. }
  2022. break;
  2023. case CONN_NEW:
  2024. /* update the fd in the requested tcpconn*/
  2025. /* WARNING: take care of setting refcnt properly to
  2026. * avoid race condition */
  2027. if (unlikely(fd==-1)){
  2028. LOG(L_CRIT, "BUG: handle_ser_child: CONN_NEW:"
  2029. " no fd received\n");
  2030. break;
  2031. }
  2032. (*tcp_connections_no)++;
  2033. tcpconn->s=fd;
  2034. /* add tcpconn to the list*/
  2035. tcpconn_add(tcpconn);
  2036. /* update the timeout*/
  2037. t=get_ticks_raw();
  2038. tcpconn->timeout=t+tcp_con_lifetime;
  2039. /* activate the timer (already properly init. in tcpconn_new() */
  2040. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2041. tcp_con_lifetime, t);
  2042. tcpconn->flags&=~F_CONN_REMOVED;
  2043. flags=POLLIN
  2044. #ifdef TCP_BUF_WRITE
  2045. /* not used for now, the connection is sent to tcp_main
  2046. * before knowing if we can write on it or we should
  2047. * wait */
  2048. | (((int)!(tcpconn->flags & F_CONN_WRITE_W)-1) & POLLOUT)
  2049. #endif /* TCP_BUF_WRITE */
  2050. ;
  2051. if (unlikely(
  2052. io_watch_add(&io_h, tcpconn->s, flags,
  2053. F_TCPCONN, tcpconn)<0)){
  2054. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
  2055. " new socket to the fd list\n");
  2056. tcpconn->flags|=F_CONN_REMOVED;
  2057. tcpconn->flags&=~F_CONN_WRITE_W;
  2058. tcpconn_destroy(tcpconn); /* closes also the fd */
  2059. }
  2060. break;
  2061. #ifdef TCP_BUF_WRITE
  2062. case CONN_QUEUED_WRITE:
  2063. if (!(tcpconn->flags & F_CONN_WRITE_W)){
  2064. if (tcpconn->flags& F_CONN_REMOVED){
  2065. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLOUT,
  2066. F_TCPCONN, tcpconn)<0)){
  2067. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed"
  2068. " to enable write watch on socket\n");
  2069. tcpconn_destroy(tcpconn);
  2070. break;
  2071. }
  2072. }else{
  2073. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  2074. POLLIN|POLLOUT, -1)<0)){
  2075. LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed"
  2076. " to change socket watch events\n");
  2077. io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING);
  2078. tcpconn->flags|=F_CONN_REMOVED;
  2079. tcpconn_destroy(tcpconn);
  2080. break;
  2081. }
  2082. }
  2083. tcpconn->flags|=F_CONN_WRITE_W;
  2084. }else{
  2085. LOG(L_WARN, "tcp_main: hanlder_ser_child: connection %p"
  2086. " already watched for write\n", tcpconn);
  2087. }
  2088. break;
  2089. #endif /* TCP_BUF_WRITE */
  2090. default:
  2091. LOG(L_CRIT, "BUG: handle_ser_child: unknown cmd %d\n", cmd);
  2092. }
  2093. end:
  2094. return ret;
  2095. error:
  2096. return -1;
  2097. }
  2098. /* sends a tcpconn + fd to a choosen child */
  2099. inline static int send2child(struct tcp_connection* tcpconn)
  2100. {
  2101. int i;
  2102. int min_busy;
  2103. int idx;
  2104. static int crt=0; /* current child */
  2105. int last;
  2106. min_busy=tcp_children[0].busy;
  2107. idx=0;
  2108. last=crt+tcp_children_no;
  2109. for (; crt<last; crt++){
  2110. i=crt%tcp_children_no;
  2111. if (!tcp_children[i].busy){
  2112. idx=i;
  2113. min_busy=0;
  2114. break;
  2115. }else if (min_busy>tcp_children[i].busy){
  2116. min_busy=tcp_children[i].busy;
  2117. idx=i;
  2118. }
  2119. }
  2120. crt=idx+1; /* next time we start with crt%tcp_children_no */
  2121. tcp_children[idx].busy++;
  2122. tcp_children[idx].n_reqs++;
  2123. if (unlikely(min_busy)){
  2124. DBG("WARNING: send2child: no free tcp receiver, "
  2125. " connection passed to the least busy one (%d)\n",
  2126. min_busy);
  2127. }
  2128. DBG("send2child: to tcp child %d %d(%d), %p\n", idx,
  2129. tcp_children[idx].proc_no,
  2130. tcp_children[idx].pid, tcpconn);
  2131. /* first make sure this child doesn't have pending request for
  2132. * tcp_main (to avoid a possible deadlock: e.g. child wants to
  2133. * send a release command, but the master fills its socket buffer
  2134. * with new connection commands => deadlock) */
  2135. /* answer tcp_send requests first */
  2136. while(handle_ser_child(&pt[tcp_children[idx].proc_no], -1)>0);
  2137. /* process tcp readers requests */
  2138. while(handle_tcp_child(&tcp_children[idx], -1)>0);
  2139. #ifdef SEND_FD_QUEUE
  2140. /* if queue full, try to queue the io */
  2141. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  2142. sizeof(tcpconn), tcpconn->s)<=0)){
  2143. if ((errno==EAGAIN)||(errno==EWOULDBLOCK)){
  2144. /* FIXME: remove after debugging */
  2145. LOG(L_CRIT, "INFO: tcp child %d, socket %d: queue full,"
  2146. " %d requests queued (total handled %d)\n",
  2147. idx, tcp_children[idx].unix_sock, min_busy,
  2148. tcp_children[idx].n_reqs-1);
  2149. if (send_fd_queue_add(&send2child_q, tcp_children[idx].unix_sock,
  2150. tcpconn)!=0){
  2151. LOG(L_ERR, "ERROR: send2child: queue send op. failed\n");
  2152. return -1;
  2153. }
  2154. }else{
  2155. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  2156. return -1;
  2157. }
  2158. }
  2159. #else
  2160. if (unlikely(send_fd(tcp_children[idx].unix_sock, &tcpconn,
  2161. sizeof(tcpconn), tcpconn->s)<=0)){
  2162. LOG(L_ERR, "ERROR: send2child: send_fd failed\n");
  2163. return -1;
  2164. }
  2165. #endif
  2166. return 0;
  2167. }
  2168. /* handles a new connection, called internally by tcp_main_loop/handle_io.
  2169. * params: si - pointer to one of the tcp socket_info structures on which
  2170. * an io event was detected (connection attempt)
  2171. * returns: handle_* return convention: -1 on error, 0 on EAGAIN (no more
  2172. * io events queued), >0 on success. success/error refer only to
  2173. * the accept.
  2174. */
  2175. static inline int handle_new_connect(struct socket_info* si)
  2176. {
  2177. union sockaddr_union su;
  2178. union sockaddr_union sock_name;
  2179. unsigned sock_name_len;
  2180. union sockaddr_union* dst_su;
  2181. struct tcp_connection* tcpconn;
  2182. socklen_t su_len;
  2183. int new_sock;
  2184. /* got a connection on r */
  2185. su_len=sizeof(su);
  2186. new_sock=accept(si->socket, &(su.s), &su_len);
  2187. if (unlikely(new_sock==-1)){
  2188. if ((errno==EAGAIN)||(errno==EWOULDBLOCK))
  2189. return 0;
  2190. LOG(L_ERR, "WARNING: handle_new_connect: error while accepting"
  2191. " connection(%d): %s\n", errno, strerror(errno));
  2192. return -1;
  2193. }
  2194. if (unlikely(*tcp_connections_no>=tcp_max_connections)){
  2195. LOG(L_ERR, "ERROR: maximum number of connections exceeded: %d/%d\n",
  2196. *tcp_connections_no, tcp_max_connections);
  2197. close(new_sock);
  2198. return 1; /* success, because the accept was succesfull */
  2199. }
  2200. if (unlikely(init_sock_opt_accept(new_sock)<0)){
  2201. LOG(L_ERR, "ERROR: handle_new_connect: init_sock_opt failed\n");
  2202. close(new_sock);
  2203. return 1; /* success, because the accept was succesfull */
  2204. }
  2205. (*tcp_connections_no)++;
  2206. dst_su=&si->su;
  2207. if (unlikely(si->flags & SI_IS_ANY)){
  2208. /* INADDR_ANY => get local dst */
  2209. sock_name_len=sizeof(sock_name);
  2210. if (getsockname(new_sock, &sock_name.s, &sock_name_len)!=0){
  2211. LOG(L_ERR, "ERROR: handle_new_connect:"
  2212. " getsockname failed: %s(%d)\n",
  2213. strerror(errno), errno);
  2214. /* go on with the 0.0.0.0 dst from the sock_info */
  2215. }else{
  2216. dst_su=&sock_name;
  2217. }
  2218. }
  2219. /* add socket to list */
  2220. tcpconn=tcpconn_new(new_sock, &su, dst_su, si, si->proto, S_CONN_ACCEPT);
  2221. if (likely(tcpconn)){
  2222. #ifdef TCP_PASS_NEW_CONNECTION_ON_DATA
  2223. tcpconn_add(tcpconn);
  2224. /* activate the timer */
  2225. local_timer_add(&tcp_main_ltimer, &tcpconn->timer,
  2226. tcp_con_lifetime, get_ticks_raw());
  2227. tcpconn->flags&=~F_CONN_REMOVED;
  2228. if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLIN,
  2229. F_TCPCONN, tcpconn)<0)){
  2230. LOG(L_CRIT, "ERROR: tcp_main: handle_new_connect: failed to add"
  2231. " new socket to the fd list\n");
  2232. tcpconn->flags|=F_CONN_REMOVED;
  2233. tcpconn_destroy(tcpconn); /* closes also the fd */
  2234. }
  2235. #else
  2236. atomic_set(&tcpconn->refcnt, 1); /* safe, not yet available to the
  2237. outside world */
  2238. tcpconn_add(tcpconn);
  2239. DBG("handle_new_connect: new connection: %p %d flags: %04x\n",
  2240. tcpconn, tcpconn->s, tcpconn->flags);
  2241. /* pass it to a child */
  2242. tcpconn->flags|=F_CONN_READER;
  2243. if(unlikely(send2child(tcpconn)<0)){
  2244. LOG(L_ERR,"ERROR: handle_new_connect: no children "
  2245. "available\n");
  2246. tcpconn->flags&=~F_CONN_READER;
  2247. tcpconn_destroy(tcpconn);
  2248. }
  2249. #endif
  2250. }else{ /*tcpconn==0 */
  2251. LOG(L_ERR, "ERROR: handle_new_connect: tcpconn_new failed, "
  2252. "closing socket\n");
  2253. close(new_sock);
  2254. (*tcp_connections_no)--;
  2255. }
  2256. return 1; /* accept() was succesfull */
  2257. }
  2258. /* handles an io event on one of the watched tcp connections
  2259. *
  2260. * params: tcpconn - pointer to the tcp_connection for which we have an io ev.
  2261. * fd_i - index in the fd_array table (needed for delete)
  2262. * returns: handle_* return convention, but on success it always returns 0
  2263. * (because it's one-shot, after a succesful execution the fd is
  2264. * removed from tcp_main's watch fd list and passed to a child =>
  2265. * tcp_main is not interested in further io events that might be
  2266. * queued for this fd)
  2267. */
  2268. inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, short ev,
  2269. int fd_i)
  2270. {
  2271. #ifdef TCP_BUF_WRITE
  2272. int empty_q;
  2273. #endif /* TCP_BUF_WRITE */
  2274. /* is refcnt!=0 really necessary?
  2275. * No, in fact it's a bug: I can have the following situation: a send only
  2276. * tcp connection used by n processes simultaneously => refcnt = n. In
  2277. * the same time I can have a read event and this situation is perfectly
  2278. * valid. -- andrei
  2279. */
  2280. #if 0
  2281. if ((tcpconn->refcnt!=0)){
  2282. /* FIXME: might be valid for sigio_rt iff fd flags are not cleared
  2283. * (there is a short window in which it could generate a sig
  2284. * that would be catched by tcp_main) */
  2285. LOG(L_CRIT, "BUG: handle_tcpconn_ev: io event on referenced"
  2286. " tcpconn (%p), refcnt=%d, fd=%d\n",
  2287. tcpconn, tcpconn->refcnt, tcpconn->s);
  2288. return -1;
  2289. }
  2290. #endif
  2291. /* pass it to child, so remove it from the io watch list and the local
  2292. * timer */
  2293. DBG("handle_tcpconn_ev: ev (%0x) on %p %d\n", ev, tcpconn, tcpconn->s);
  2294. #ifdef TCP_BUF_WRITE
  2295. if (unlikely((ev & POLLOUT) && (tcpconn->flags & F_CONN_WRITE_W))){
  2296. if (unlikely(wbufq_run(tcpconn->s, tcpconn, &empty_q)<0)){
  2297. io_watch_del(&io_h, tcpconn->s, fd_i, 0);
  2298. tcpconn->flags|=F_CONN_REMOVED;
  2299. tcpconn->flags&=~F_CONN_WRITE_W;
  2300. tcpconn_destroy(tcpconn);
  2301. goto error;
  2302. }
  2303. if (empty_q){
  2304. if (tcpconn->flags & F_CONN_REMOVED){
  2305. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1))
  2306. goto error;
  2307. }else{
  2308. if (unlikely(io_watch_chg(&io_h, tcpconn->s,
  2309. POLLIN, fd_i)==-1))
  2310. goto error;
  2311. }
  2312. tcpconn->flags&=~F_CONN_WRITE_W;
  2313. }
  2314. ev&=~POLLOUT; /* clear POLLOUT */
  2315. }
  2316. if (likely(ev && !(tcpconn->flags & F_CONN_REMOVED))){
  2317. /* if still some other IO event (POLLIN|POLLHUP|POLLERR) and
  2318. * connection is still watched in tcp_main for reads, send it to a
  2319. * child and stop watching it for input (but continue watching for
  2320. * writes if needed): */
  2321. if (unlikely(tcpconn->flags & F_CONN_WRITE_W)){
  2322. if (unlikely(io_watch_chg(&io_h, tcpconn->s, POLLOUT, fd_i)==-1))
  2323. goto error;
  2324. }else
  2325. #else
  2326. {
  2327. #endif /* TCP_BUF_WRITE */
  2328. if (unlikely(io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1))
  2329. goto error;
  2330. tcpconn->flags|=F_CONN_REMOVED|F_CONN_READER;
  2331. local_timer_del(&tcp_main_ltimer, &tcpconn->timer);
  2332. tcpconn_ref(tcpconn); /* refcnt ++ */
  2333. if (unlikely(send2child(tcpconn)<0)){
  2334. LOG(L_ERR,"ERROR: handle_tcpconn_ev: no children available\n");
  2335. tcpconn->flags&=~F_CONN_READER;
  2336. #ifdef TCP_BUF_WRITE
  2337. if (tcpconn->flags & F_CONN_WRITE_W){
  2338. io_watch_del(&io_h, tcpconn->s, fd_i, 0);
  2339. tcpconn->flags&=~F_CONN_WRITE_W;
  2340. }
  2341. #endif /* TCP_BUF_WRITE */
  2342. tcpconn_destroy(tcpconn);
  2343. }
  2344. }
  2345. return 0; /* we are not interested in possibly queued io events,
  2346. the fd was either passed to a child, closed, or for writes,
  2347. everything possible was already written */
  2348. error:
  2349. return -1;
  2350. }
  2351. /* generic handle io routine, it will call the appropiate
  2352. * handle_xxx() based on the fd_map type
  2353. *
  2354. * params: fm - pointer to a fd hash entry
  2355. * idx - index in the fd_array (or -1 if not known)
  2356. * return: -1 on error
  2357. * 0 on EAGAIN or when by some other way it is known that no more
  2358. * io events are queued on the fd (the receive buffer is empty).
  2359. * Usefull to detect when there are no more io events queued for
  2360. * sigio_rt, epoll_et, kqueue.
  2361. * >0 on successfull read from the fd (when there might be more io
  2362. * queued -- the receive buffer might still be non-empty)
  2363. */
  2364. inline static int handle_io(struct fd_map* fm, short ev, int idx)
  2365. {
  2366. int ret;
  2367. /* update the local config */
  2368. cfg_update();
  2369. switch(fm->type){
  2370. case F_SOCKINFO:
  2371. ret=handle_new_connect((struct socket_info*)fm->data);
  2372. break;
  2373. case F_TCPCONN:
  2374. ret=handle_tcpconn_ev((struct tcp_connection*)fm->data, ev, idx);
  2375. break;
  2376. case F_TCPCHILD:
  2377. ret=handle_tcp_child((struct tcp_child*)fm->data, idx);
  2378. break;
  2379. case F_PROC:
  2380. ret=handle_ser_child((struct process_table*)fm->data, idx);
  2381. break;
  2382. case F_NONE:
  2383. LOG(L_CRIT, "BUG: handle_io: empty fd map: %p {%d, %d, %p},"
  2384. " idx %d\n", fm, fm->fd, fm->type, fm->data, idx);
  2385. goto error;
  2386. default:
  2387. LOG(L_CRIT, "BUG: handle_io: uknown fd type %d\n", fm->type);
  2388. goto error;
  2389. }
  2390. return ret;
  2391. error:
  2392. return -1;
  2393. }
  2394. /* timer handler for tcpconnection handled by tcp_main */
  2395. static ticks_t tcpconn_main_timeout(ticks_t t, struct timer_ln* tl, void* data)
  2396. {
  2397. struct tcp_connection *c;
  2398. int fd;
  2399. c=(struct tcp_connection*)data;
  2400. /* or (struct tcp...*)(tl-offset(c->timer)) */
  2401. if (TICKS_LT(t, c->timeout)){
  2402. /* timeout extended, exit */
  2403. return (ticks_t)(c->timeout - t);
  2404. }
  2405. if (likely(atomic_get(&c->refcnt)==0)){
  2406. TCPCONN_LOCK;
  2407. /* check again to avoid races with tcp_send() */
  2408. if (likely(atomic_get(&c->refcnt)==0)){
  2409. /* delete */
  2410. _tcpconn_detach(c);
  2411. TCPCONN_UNLOCK; /* unlock as soon as possible */
  2412. fd=c->s;
  2413. if (likely(fd>0)){
  2414. if (likely(!(c->flags & F_CONN_REMOVED)
  2415. #ifdef TCP_BUF_WRITE
  2416. || (c->flags & F_CONN_WRITE_W)
  2417. #endif /* TCP_BUF_WRITE */
  2418. )){
  2419. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  2420. c->flags|=F_CONN_REMOVED;
  2421. #ifdef TCP_BUF_WRITE
  2422. c->flags&=~F_CONN_WRITE_W;
  2423. #endif /* TCP_BUF_WRITE */
  2424. }
  2425. #ifdef USE_TLS
  2426. if (unlikely(c->type==PROTO_TLS ))
  2427. tls_close(c, fd);
  2428. #endif /* USE_TLS */
  2429. _tcpconn_free(c);
  2430. #ifdef TCP_FD_CACHE
  2431. if (likely(tcp_options.fd_cache)) shutdown(fd, SHUT_RDWR);
  2432. #endif /* TCP_FD_CACHE */
  2433. close(fd);
  2434. }
  2435. (*tcp_connections_no)--; /* modified only in tcp_main
  2436. => no lock needed */
  2437. return 0; /* don't prolong the timer anymore */
  2438. }
  2439. TCPCONN_UNLOCK;
  2440. }
  2441. /* if we are here we can't delete the connection, it's still referenced
  2442. * => we just delay deleting it */
  2443. return TCPCONN_WAIT_TIMEOUT;
  2444. }
  2445. static inline void tcp_timer_run()
  2446. {
  2447. ticks_t ticks;
  2448. static ticks_t prev_ticks=0;
  2449. ticks=get_ticks_raw();
  2450. if (unlikely((ticks-prev_ticks)<TCPCONN_TIMEOUT_MIN_RUN)) return;
  2451. prev_ticks=ticks;
  2452. local_timer_run(&tcp_main_ltimer, ticks);
  2453. }
  2454. /* keep in sync with tcpconn_destroy, the "delete" part should be
  2455. * the same except for io_watch_del..
  2456. * Note: this function is called only on shutdown by the main ser process via
  2457. * cleanup(). However it's also safe to call it from the tcp_main process.
  2458. * => with the ser shutdown exception, it cannot execute in parallel
  2459. * with tcpconn_add() or tcpconn_destroy()*/
  2460. static inline void tcpconn_destroy_all()
  2461. {
  2462. struct tcp_connection *c, *next;
  2463. unsigned h;
  2464. int fd;
  2465. TCPCONN_LOCK;
  2466. for(h=0; h<TCP_ID_HASH_SIZE; h++){
  2467. c=tcpconn_id_hash[h];
  2468. while(c){
  2469. next=c->id_next;
  2470. if (is_tcp_main){
  2471. /* we cannot close or remove the fd if we are not in the
  2472. * tcp main proc.*/
  2473. if (!(c->flags & F_CONN_READER))
  2474. local_timer_del(&tcp_main_ltimer, &c->timer);
  2475. /* else still in some reader */
  2476. fd=c->s;
  2477. if (fd>0 && (!(c->flags & F_CONN_REMOVED)
  2478. #ifdef TCP_BUF_WRITE
  2479. || (c->flags & F_CONN_WRITE_W)
  2480. #endif /* TCP_BUF_WRITE */
  2481. )){
  2482. io_watch_del(&io_h, fd, -1, IO_FD_CLOSING);
  2483. c->flags|=F_CONN_REMOVED;
  2484. #ifdef TCP_BUF_WRITE
  2485. c->flags&=~F_CONN_WRITE_W;
  2486. #endif /* TCP_BUF_WRITE */
  2487. }
  2488. }else{
  2489. fd=-1;
  2490. }
  2491. #ifdef USE_TLS
  2492. if (fd>0 && c->type==PROTO_TLS)
  2493. tls_close(c, fd);
  2494. #endif
  2495. _tcpconn_rm(c);
  2496. if (fd>0) {
  2497. #ifdef TCP_FD_CACHE
  2498. if (likely(tcp_options.fd_cache)) shutdown(fd, SHUT_RDWR);
  2499. #endif /* TCP_FD_CACHE */
  2500. close(fd);
  2501. }
  2502. (*tcp_connections_no)--;
  2503. c=next;
  2504. }
  2505. }
  2506. TCPCONN_UNLOCK;
  2507. }
  2508. /* tcp main loop */
  2509. void tcp_main_loop()
  2510. {
  2511. struct socket_info* si;
  2512. int r;
  2513. is_tcp_main=1; /* mark this process as tcp main */
  2514. tcp_main_max_fd_no=get_max_open_fds();
  2515. /* init send fd queues (here because we want mem. alloc only in the tcp
  2516. * process */
  2517. #ifdef SEND_FD_QUEUE
  2518. if (init_send_fd_queues()<0){
  2519. LOG(L_CRIT, "ERROR: init_tcp: could not init send fd queues\n");
  2520. goto error;
  2521. }
  2522. #endif
  2523. /* init io_wait (here because we want the memory allocated only in
  2524. * the tcp_main process) */
  2525. if (init_io_wait(&io_h, tcp_main_max_fd_no, tcp_poll_method)<0)
  2526. goto error;
  2527. /* init: start watching all the fds*/
  2528. /* init local timer */
  2529. if (init_local_timer(&tcp_main_ltimer, get_ticks_raw())!=0){
  2530. LOG(L_ERR, "ERROR: init_tcp: failed to init local timer\n");
  2531. goto error;
  2532. }
  2533. #ifdef TCP_FD_CACHE
  2534. if (tcp_options.fd_cache) tcp_fd_cache_init();
  2535. #endif /* TCP_FD_CACHE */
  2536. /* add all the sockets we listen on for connections */
  2537. for (si=tcp_listen; si; si=si->next){
  2538. if ((si->proto==PROTO_TCP) &&(si->socket!=-1)){
  2539. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  2540. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  2541. "listen socket to the fd list\n");
  2542. goto error;
  2543. }
  2544. }else{
  2545. LOG(L_CRIT, "BUG: tcp_main_loop: non tcp address in tcp_listen\n");
  2546. }
  2547. }
  2548. #ifdef USE_TLS
  2549. if (!tls_disable && tls_loaded()){
  2550. for (si=tls_listen; si; si=si->next){
  2551. if ((si->proto==PROTO_TLS) && (si->socket!=-1)){
  2552. if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
  2553. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  2554. "tls listen socket to the fd list\n");
  2555. goto error;
  2556. }
  2557. }else{
  2558. LOG(L_CRIT, "BUG: tcp_main_loop: non tls address"
  2559. " in tls_listen\n");
  2560. }
  2561. }
  2562. }
  2563. #endif
  2564. /* add all the unix sockets used for communcation with other ser processes
  2565. * (get fd, new connection a.s.o) */
  2566. for (r=1; r<process_no; r++){
  2567. if (pt[r].unix_sock>0) /* we can't have 0, we never close it!*/
  2568. if (io_watch_add(&io_h, pt[r].unix_sock, POLLIN,F_PROC, &pt[r])<0){
  2569. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  2570. "process %d unix socket to the fd list\n", r);
  2571. goto error;
  2572. }
  2573. }
  2574. /* add all the unix sokets used for communication with the tcp childs */
  2575. for (r=0; r<tcp_children_no; r++){
  2576. if (tcp_children[r].unix_sock>0)/*we can't have 0, we never close it!*/
  2577. if (io_watch_add(&io_h, tcp_children[r].unix_sock, POLLIN,
  2578. F_TCPCHILD, &tcp_children[r]) <0){
  2579. LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
  2580. "tcp child %d unix socket to the fd list\n", r);
  2581. goto error;
  2582. }
  2583. }
  2584. /* initialize the cfg framework */
  2585. if (cfg_child_init()) goto error;
  2586. /* main loop */
  2587. switch(io_h.poll_method){
  2588. case POLL_POLL:
  2589. while(1){
  2590. /* wait and process IO */
  2591. io_wait_loop_poll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  2592. send_fd_queue_run(&send2child_q); /* then new io */
  2593. /* remove old connections */
  2594. tcp_timer_run();
  2595. }
  2596. break;
  2597. #ifdef HAVE_SELECT
  2598. case POLL_SELECT:
  2599. while(1){
  2600. io_wait_loop_select(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  2601. send_fd_queue_run(&send2child_q); /* then new io */
  2602. tcp_timer_run();
  2603. }
  2604. break;
  2605. #endif
  2606. #ifdef HAVE_SIGIO_RT
  2607. case POLL_SIGIO_RT:
  2608. while(1){
  2609. io_wait_loop_sigio_rt(&io_h, TCP_MAIN_SELECT_TIMEOUT);
  2610. send_fd_queue_run(&send2child_q); /* then new io */
  2611. tcp_timer_run();
  2612. }
  2613. break;
  2614. #endif
  2615. #ifdef HAVE_EPOLL
  2616. case POLL_EPOLL_LT:
  2617. while(1){
  2618. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  2619. send_fd_queue_run(&send2child_q); /* then new io */
  2620. tcp_timer_run();
  2621. }
  2622. break;
  2623. case POLL_EPOLL_ET:
  2624. while(1){
  2625. io_wait_loop_epoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 1);
  2626. send_fd_queue_run(&send2child_q); /* then new io */
  2627. tcp_timer_run();
  2628. }
  2629. break;
  2630. #endif
  2631. #ifdef HAVE_KQUEUE
  2632. case POLL_KQUEUE:
  2633. while(1){
  2634. io_wait_loop_kqueue(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  2635. send_fd_queue_run(&send2child_q); /* then new io */
  2636. tcp_timer_run();
  2637. }
  2638. break;
  2639. #endif
  2640. #ifdef HAVE_DEVPOLL
  2641. case POLL_DEVPOLL:
  2642. while(1){
  2643. io_wait_loop_devpoll(&io_h, TCP_MAIN_SELECT_TIMEOUT, 0);
  2644. send_fd_queue_run(&send2child_q); /* then new io */
  2645. tcp_timer_run();
  2646. }
  2647. break;
  2648. #endif
  2649. default:
  2650. LOG(L_CRIT, "BUG: tcp_main_loop: no support for poll method "
  2651. " %s (%d)\n",
  2652. poll_method_name(io_h.poll_method), io_h.poll_method);
  2653. goto error;
  2654. }
  2655. error:
  2656. #ifdef SEND_FD_QUEUE
  2657. destroy_send_fd_queues();
  2658. #endif
  2659. destroy_io_wait(&io_h);
  2660. LOG(L_CRIT, "ERROR: tcp_main_loop: exiting...");
  2661. exit(-1);
  2662. }
  2663. /* cleanup before exit */
  2664. void destroy_tcp()
  2665. {
  2666. if (tcpconn_id_hash){
  2667. if (tcpconn_lock)
  2668. TCPCONN_UNLOCK; /* hack: force-unlock the tcp lock in case
  2669. some process was terminated while holding
  2670. it; this will allow an almost gracious
  2671. shutdown */
  2672. tcpconn_destroy_all();
  2673. shm_free(tcpconn_id_hash);
  2674. tcpconn_id_hash=0;
  2675. }
  2676. if (tcp_connections_no){
  2677. shm_free(tcp_connections_no);
  2678. tcp_connections_no=0;
  2679. }
  2680. #ifdef TCP_BUF_WRITE
  2681. if (tcp_total_wq){
  2682. shm_free(tcp_total_wq);
  2683. tcp_total_wq=0;
  2684. }
  2685. #endif /* TCP_BUF_WRITE */
  2686. if (connection_id){
  2687. shm_free(connection_id);
  2688. connection_id=0;
  2689. }
  2690. if (tcpconn_aliases_hash){
  2691. shm_free(tcpconn_aliases_hash);
  2692. tcpconn_aliases_hash=0;
  2693. }
  2694. if (tcpconn_lock){
  2695. lock_destroy(tcpconn_lock);
  2696. lock_dealloc((void*)tcpconn_lock);
  2697. tcpconn_lock=0;
  2698. }
  2699. if (tcp_children){
  2700. pkg_free(tcp_children);
  2701. tcp_children=0;
  2702. }
  2703. destroy_local_timer(&tcp_main_ltimer);
  2704. }
  2705. int init_tcp()
  2706. {
  2707. char* poll_err;
  2708. tcp_options_check();
  2709. /* init lock */
  2710. tcpconn_lock=lock_alloc();
  2711. if (tcpconn_lock==0){
  2712. LOG(L_CRIT, "ERROR: init_tcp: could not alloc lock\n");
  2713. goto error;
  2714. }
  2715. if (lock_init(tcpconn_lock)==0){
  2716. LOG(L_CRIT, "ERROR: init_tcp: could not init lock\n");
  2717. lock_dealloc((void*)tcpconn_lock);
  2718. tcpconn_lock=0;
  2719. goto error;
  2720. }
  2721. /* init globals */
  2722. tcp_connections_no=shm_malloc(sizeof(int));
  2723. if (tcp_connections_no==0){
  2724. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  2725. goto error;
  2726. }
  2727. *tcp_connections_no=0;
  2728. connection_id=shm_malloc(sizeof(int));
  2729. if (connection_id==0){
  2730. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  2731. goto error;
  2732. }
  2733. *connection_id=1;
  2734. #ifdef TCP_BUF_WRITE
  2735. tcp_total_wq=shm_malloc(sizeof(*tcp_total_wq));
  2736. if (tcp_total_wq==0){
  2737. LOG(L_CRIT, "ERROR: init_tcp: could not alloc globals\n");
  2738. goto error;
  2739. }
  2740. #endif /* TCP_BUF_WRITE */
  2741. /* alloc hashtables*/
  2742. tcpconn_aliases_hash=(struct tcp_conn_alias**)
  2743. shm_malloc(TCP_ALIAS_HASH_SIZE* sizeof(struct tcp_conn_alias*));
  2744. if (tcpconn_aliases_hash==0){
  2745. LOG(L_CRIT, "ERROR: init_tcp: could not alloc address hashtable\n");
  2746. goto error;
  2747. }
  2748. tcpconn_id_hash=(struct tcp_connection**)shm_malloc(TCP_ID_HASH_SIZE*
  2749. sizeof(struct tcp_connection*));
  2750. if (tcpconn_id_hash==0){
  2751. LOG(L_CRIT, "ERROR: init_tcp: could not alloc id hashtable\n");
  2752. goto error;
  2753. }
  2754. /* init hashtables*/
  2755. memset((void*)tcpconn_aliases_hash, 0,
  2756. TCP_ALIAS_HASH_SIZE * sizeof(struct tcp_conn_alias*));
  2757. memset((void*)tcpconn_id_hash, 0,
  2758. TCP_ID_HASH_SIZE * sizeof(struct tcp_connection*));
  2759. /* fix config variables */
  2760. if (tcp_connect_timeout<0)
  2761. tcp_connect_timeout=DEFAULT_TCP_CONNECT_TIMEOUT;
  2762. if (tcp_send_timeout<0)
  2763. tcp_send_timeout=DEFAULT_TCP_SEND_TIMEOUT;
  2764. if (tcp_con_lifetime<0){
  2765. /* set to max value (~ 1/2 MAX_INT) */
  2766. tcp_con_lifetime=MAX_TCP_CON_LIFETIME;
  2767. }else{
  2768. if ((unsigned)tcp_con_lifetime >
  2769. (unsigned)TICKS_TO_S(MAX_TCP_CON_LIFETIME)){
  2770. LOG(L_WARN, "init_tcp: tcp_con_lifetime too big (%u s), "
  2771. " the maximum value is %u\n", tcp_con_lifetime,
  2772. TICKS_TO_S(MAX_TCP_CON_LIFETIME));
  2773. tcp_con_lifetime=MAX_TCP_CON_LIFETIME;
  2774. }else{
  2775. tcp_con_lifetime=S_TO_TICKS(tcp_con_lifetime);
  2776. }
  2777. }
  2778. poll_err=check_poll_method(tcp_poll_method);
  2779. /* set an appropriate poll method */
  2780. if (poll_err || (tcp_poll_method==0)){
  2781. tcp_poll_method=choose_poll_method();
  2782. if (poll_err){
  2783. LOG(L_ERR, "ERROR: init_tcp: %s, using %s instead\n",
  2784. poll_err, poll_method_name(tcp_poll_method));
  2785. }else{
  2786. LOG(L_INFO, "init_tcp: using %s as the io watch method"
  2787. " (auto detected)\n", poll_method_name(tcp_poll_method));
  2788. }
  2789. }else{
  2790. LOG(L_INFO, "init_tcp: using %s io watch method (config)\n",
  2791. poll_method_name(tcp_poll_method));
  2792. }
  2793. return 0;
  2794. error:
  2795. /* clean-up */
  2796. destroy_tcp();
  2797. return -1;
  2798. }
  2799. #ifdef TCP_CHILD_NON_BLOCKING
  2800. /* returns -1 on error */
  2801. static int set_non_blocking(int s)
  2802. {
  2803. int flags;
  2804. /* non-blocking */
  2805. flags=fcntl(s, F_GETFL);
  2806. if (flags==-1){
  2807. LOG(L_ERR, "ERROR: set_non_blocking: fnctl failed: (%d) %s\n",
  2808. errno, strerror(errno));
  2809. goto error;
  2810. }
  2811. if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
  2812. LOG(L_ERR, "ERROR: set_non_blocking: fcntl: set non-blocking failed:"
  2813. " (%d) %s\n", errno, strerror(errno));
  2814. goto error;
  2815. }
  2816. return 0;
  2817. error:
  2818. return -1;
  2819. }
  2820. #endif
  2821. /* returns -1 on error, 0 on success */
  2822. int tcp_fix_child_sockets(int* fd)
  2823. {
  2824. #ifdef TCP_CHILD_NON_BLOCKING
  2825. if ((set_non_blocking(fd[0])<0) ||
  2826. (set_non_blocking(fd[1])<0)){
  2827. return -1;
  2828. }
  2829. #endif
  2830. return 0;
  2831. }
  2832. /* starts the tcp processes */
  2833. int tcp_init_children()
  2834. {
  2835. int r;
  2836. int reader_fd_1; /* for comm. with the tcp children read */
  2837. pid_t pid;
  2838. struct socket_info *si;
  2839. /* estimate max fd. no:
  2840. * 1 tcp send unix socket/all_proc,
  2841. * + 1 udp sock/udp proc + 1 tcp_child sock/tcp child*
  2842. * + no_listen_tcp */
  2843. for(r=0, si=tcp_listen; si; si=si->next, r++);
  2844. #ifdef USE_TLS
  2845. if (! tls_disable)
  2846. for (si=tls_listen; si; si=si->next, r++);
  2847. #endif
  2848. register_fds(r+tcp_max_connections+get_max_procs()-1 /* tcp main */);
  2849. #if 0
  2850. tcp_max_fd_no=get_max_procs()*2 +r-1 /* timer */ +3; /* stdin/out/err*/
  2851. /* max connections can be temporarily exceeded with estimated_process_count
  2852. * - tcp_main (tcpconn_connect called simultaneously in all all the
  2853. * processes) */
  2854. tcp_max_fd_no+=tcp_max_connections+get_max_procs()-1 /* tcp main */;
  2855. #endif
  2856. /* alloc the children array */
  2857. tcp_children=pkg_malloc(sizeof(struct tcp_child)*tcp_children_no);
  2858. if (tcp_children==0){
  2859. LOG(L_ERR, "ERROR: tcp_init_children: out of memory\n");
  2860. goto error;
  2861. }
  2862. /* create the tcp sock_info structures */
  2863. /* copy the sockets --moved to main_loop*/
  2864. /* fork children & create the socket pairs*/
  2865. for(r=0; r<tcp_children_no; r++){
  2866. child_rank++;
  2867. pid=fork_tcp_process(child_rank, "tcp receiver", r, &reader_fd_1);
  2868. if (pid<0){
  2869. LOG(L_ERR, "ERROR: tcp_main: fork failed: %s\n",
  2870. strerror(errno));
  2871. goto error;
  2872. }else if (pid>0){
  2873. /* parent */
  2874. }else{
  2875. /* child */
  2876. bind_address=0; /* force a SEGFAULT if someone uses a non-init.
  2877. bind address on tcp */
  2878. tcp_receive_loop(reader_fd_1);
  2879. }
  2880. }
  2881. return 0;
  2882. error:
  2883. return -1;
  2884. }
  2885. void tcp_get_info(struct tcp_gen_info *ti)
  2886. {
  2887. ti->tcp_readers=tcp_children_no;
  2888. ti->tcp_max_connections=tcp_max_connections;
  2889. ti->tcp_connections_no=*tcp_connections_no;
  2890. #ifdef TCP_BUF_WRITE
  2891. ti->tcp_write_queued=*tcp_total_wq;
  2892. #else
  2893. ti->tcp_write_queued=0;
  2894. #endif /* TCP_BUF_WRITE */
  2895. }
  2896. #endif