t_reply.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298
  1. /*
  2. * $Id$
  3. *
  4. *
  5. * Copyright (C) 2001-2003 FhG Fokus
  6. *
  7. * This file is part of ser, a free SIP server.
  8. *
  9. * ser is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version
  13. *
  14. * For a license to use the ser software under conditions
  15. * other than those described here, or to purchase support for this
  16. * software, please contact iptel.org by e-mail at the following addresses:
  17. * [email protected]
  18. *
  19. * ser is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  27. *
  28. * History:
  29. * --------
  30. * 2003-01-19 faked lump list created in on_reply handlers
  31. * 2003-01-27 next baby-step to removing ZT - PRESERVE_ZT (jiri)
  32. * 2003-02-13 updated to use rb->dst (andrei)
  33. * 2003-02-18 replaced TOTAG_LEN w/ TOTAG_VALUE_LEN (TOTAG_LEN was defined
  34. * twice with different values!) (andrei)
  35. * 2003-02-28 scratchpad compatibility abandoned (jiri)
  36. * 2003-03-01 kr set through a function now (jiri)
  37. * 2003-03-06 saving of to-tags for ACK/200 matching introduced,
  38. * voicemail changes accepted, updated to new callback
  39. * names (jiri)
  40. * 2003-03-10 fixed new to tag bug/typo (if w/o {}) (andrei)
  41. * 2003-03-16 removed _TOTAG (jiri)
  42. * 2003-03-31 200 for INVITE/UAS resent even for UDP (jiri)
  43. * 2003-03-31 removed msg->repl_add_rm (andrei)
  44. * 2003-04-05 s/reply_route/failure_route, onreply_route introduced (jiri)
  45. * 2003-04-14 local acks generated before reply processing to avoid
  46. * delays in length reply processing (like opening TCP
  47. * connection to an unavailable destination) (jiri)
  48. * 2003-09-11 updates to new build_res_buf_from_sip_req() interface (bogdan)
  49. * 2003-09-11 t_reply_with_body() reshaped to use reply_lumps +
  50. * build_res_buf_from_sip_req() instead of
  51. * build_res_buf_with_body_from_sip_req() (bogdan)
  52. * 2003-11-05 flag context updated from failure/reply handlers back
  53. * to transaction context (jiri)
  54. * 2003-11-11: build_lump_rpl() removed, add_lump_rpl() has flags (bogdan)
  55. * 2003-12-04 global TM callbacks switched to per transaction callbacks
  56. * (bogdan)
  57. * 2004-02-06: support for user pref. added - destroy_avps (bogdan)
  58. * 2003-11-05 flag context updated from failure/reply handlers back
  59. * to transaction context (jiri)
  60. * 2003-11-11: build_lump_rpl() removed, add_lump_rpl() has flags (bogdan)
  61. * 2004-02-13: t->is_invite and t->local replaced with flags (bogdan)
  62. * 2004-02-18 fifo_t_reply imported from vm module (bogdan)
  63. * 2004-08-23 avp list is available from failure/on_reply routes (bogdan)
  64. * 2004-10-01 added a new param.: restart_fr_on_each_reply (andrei)
  65. * 2005-03-01 force for statefull replies the incoming interface of
  66. * the request (bogdan)
  67. * 2005-09-01 reverted to the old way of checking response.dst.send_sock
  68. * in t_retransmit_reply & reply_light (andrei)
  69. * 2005-11-09 updated to the new timers interface (andrei)
  70. * 2006-02-07 named routes support (andrei)
  71. * 2006-09-13 t_pick_branch will skip also over branches with empty reply
  72. * t_should_relay_response will re-pick the branch if failure
  73. * route /handlers added new branches (andrei)
  74. * 2006-10-05 better final reply selection: t_pick_branch will prefer 6xx,
  75. * if no 6xx reply => lowest class/code; if class==4xx =>
  76. * prefer 401, 407, 415, 420 and 484 (andrei)
  77. * 2006-10-12 dns failover when a 503 is received
  78. * replace a 503 final relayed reply by a 500 (andrei)
  79. * 2006-10-16 aggregate all the authorization headers/challenges when
  80. * the final response is 401 or 407 (andrei)
  81. * 2007-03-08 membar_write() used in update_totag_set(...)(andrei)
  82. * 2007-03-15 build_local_ack: removed next_hop and replaced with dst to
  83. * avoid resolving next_hop twice
  84. * added TMCB_ONSEND callbacks support for replies & ACKs (andrei)
  85. * 2007-05-28: build_ack() constructs the ACK from the
  86. * outgoing INVITE instead of the incomming one.
  87. * (it can be disabled with reparse_invite=0) (Miklos)
  88. * 2007-09-03: drop_replies() has been introduced (Miklos)
  89. * 2008-03-12 use cancel_b_method on 6xx (andrei)
  90. * 2008-05-30 make sure the wait timer is started after we don't need t
  91. * anymore to allow safe calls from fr_timer (andrei)
  92. *
  93. */
  94. #ifdef EXTRA_DEBUG
  95. #include <assert.h>
  96. #endif
  97. #include "../../comp_defs.h"
  98. #include "../../hash_func.h"
  99. #include "../../dprint.h"
  100. #include "../../config.h"
  101. #include "../../parser/parser_f.h"
  102. #include "../../ut.h"
  103. #include "../../timer.h"
  104. #include "../../error.h"
  105. #include "../../action.h"
  106. #include "../../dset.h"
  107. #include "../../tags.h"
  108. #include "../../data_lump.h"
  109. #include "../../data_lump_rpl.h"
  110. #include "../../usr_avp.h"
  111. #include "../../atomic_ops.h" /* membar_write() */
  112. #include "../../compiler_opt.h"
  113. #include "../../select_buf.h" /* reset_static_buffer() */
  114. #ifdef USE_DST_BLACKLIST
  115. #include "../../dst_blacklist.h"
  116. #endif
  117. #ifdef USE_DNS_FAILOVER
  118. #include "../../dns_cache.h"
  119. #include "../../cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
  120. #endif
  121. #include "defs.h"
  122. #include "config.h"
  123. #include "h_table.h"
  124. #include "t_hooks.h"
  125. #include "t_funcs.h"
  126. #include "t_reply.h"
  127. #include "t_cancel.h"
  128. #include "t_msgbuilder.h"
  129. #include "t_lookup.h"
  130. #include "t_fwd.h"
  131. #include "fix_lumps.h"
  132. #include "t_stats.h"
  133. #include "uac.h"
  134. /* are we processing original or shmemed request ? */
  135. enum route_mode rmode=MODE_REQUEST;
  136. /* private place where we create to-tags for replies */
  137. /* janakj: made public, I need to access this value to store it in dialogs */
  138. char tm_tags[TOTAG_VALUE_LEN];
  139. /* bogdan: pack tm_tag buffer and len into a str to pass them to
  140. * build_res_buf_from_sip_req() */
  141. static str tm_tag = {tm_tags,TOTAG_VALUE_LEN};
  142. char *tm_tag_suffix;
  143. /* where to go if there is no positive reply */
  144. static int goto_on_negative=0;
  145. /* where to go on receipt of reply */
  146. static int goto_on_reply=0;
  147. /* where to go on receipt of reply without transaction context */
  148. int goto_on_sl_reply=0;
  149. /* responses priority (used by t_pick_branch)
  150. * 0xx is used only for the initial value (=> should have no chance to be
  151. * selected => the highest value); 1xx is not used */
  152. static unsigned short resp_class_prio[]={
  153. 32000, /* 0-99, special */
  154. 11000, /* 1xx, special, should never be used */
  155. 0, /* 2xx, high priority (not used, 2xx are immediately
  156. forwarded and t_pick_branch will never be called if
  157. a 2xx was received) */
  158. 3000, /* 3xx */
  159. 4000, /* 4xx */
  160. 5000, /* 5xx */
  161. 1000 /* 6xx, highest priority */
  162. };
  163. /* we store the reply_route # in private memory which is
  164. then processed during t_relay; we cannot set this value
  165. before t_relay creates transaction context or after
  166. t_relay when a reply may arrive after we set this
  167. value; that's why we do it how we do it, i.e.,
  168. *inside* t_relay using hints stored in private memory
  169. before t_relay is called
  170. */
  171. void t_on_negative( unsigned int go_to )
  172. {
  173. struct cell *t = get_t();
  174. /* in MODE_REPLY and MODE_ONFAILURE T will be set to current transaction;
  175. * in MODE_REQUEST T will be set only if the transaction was already
  176. * created; if not -> use the static variable */
  177. if (!t || t==T_UNDEFINED )
  178. goto_on_negative=go_to;
  179. else
  180. get_t()->on_negative = go_to;
  181. }
  182. void t_on_reply( unsigned int go_to )
  183. {
  184. struct cell *t = get_t();
  185. /* in MODE_REPLY and MODE_ONFAILURE T will be set to current transaction;
  186. * in MODE_REQUEST T will be set only if the transaction was already
  187. * created; if not -> use the static variable */
  188. if (!t || t==T_UNDEFINED )
  189. goto_on_reply=go_to;
  190. else
  191. get_t()->on_reply = go_to;
  192. }
  193. unsigned int get_on_negative()
  194. {
  195. return goto_on_negative;
  196. }
  197. unsigned int get_on_reply()
  198. {
  199. return goto_on_reply;
  200. }
  201. void tm_init_tags()
  202. {
  203. init_tags(tm_tags, &tm_tag_suffix,
  204. "SER-TM/tags", TM_TAG_SEPARATOR );
  205. }
  206. /* returns 0 if the message was previously acknowledged
  207. * (i.e., no E2EACK callback is needed) and one if the
  208. * callback shall be executed */
  209. int unmatched_totag(struct cell *t, struct sip_msg *ack)
  210. {
  211. struct totag_elem *i;
  212. str *tag;
  213. if (parse_headers(ack, HDR_TO_F,0)==-1 ||
  214. !ack->to ) {
  215. LOG(L_ERR, "ERROR: unmatched_totag: To invalid\n");
  216. return 1;
  217. }
  218. tag=&get_to(ack)->tag_value;
  219. i=t->fwded_totags;
  220. while(i){
  221. membar_depends(); /* make sure we don't see some old i content
  222. (needed on CPUs like Alpha) */
  223. if (i->tag.len==tag->len
  224. && memcmp(i->tag.s, tag->s, tag->len)==0) {
  225. DBG("DEBUG: totag for e2e ACK found: %d\n", i->acked);
  226. /* mark totag as acked and return 0 if this was the first ack
  227. * and 1 otherwise */
  228. return atomic_get_and_set_int(&i->acked, 1);
  229. }
  230. i=i->next;
  231. }
  232. /* surprising: to-tag never sighted before */
  233. return 1;
  234. }
  235. static inline void update_local_tags(struct cell *trans,
  236. struct bookmark *bm, char *dst_buffer,
  237. char *src_buffer /* to which bm refers */)
  238. {
  239. if (bm->to_tag_val.s) {
  240. trans->uas.local_totag.s=bm->to_tag_val.s-src_buffer+dst_buffer;
  241. trans->uas.local_totag.len=bm->to_tag_val.len;
  242. }
  243. }
  244. /* append a newly received tag from a 200/INVITE to
  245. * transaction's set; (only safe if called from within
  246. * a REPLY_LOCK); it returns 1 if such a to tag already
  247. * exists
  248. */
  249. inline static int update_totag_set(struct cell *t, struct sip_msg *ok)
  250. {
  251. struct totag_elem *i, *n;
  252. str *tag;
  253. char *s;
  254. if (!ok->to || !ok->to->parsed) {
  255. LOG(L_ERR, "ERROR: update_totag_set: to not parsed\n");
  256. return 0;
  257. }
  258. tag=&get_to(ok)->tag_value;
  259. if (!tag->s) {
  260. DBG("ERROR: update_totag_set: no tag in to\n");
  261. return 0;
  262. }
  263. for (i=t->fwded_totags; i; i=i->next) {
  264. if (i->tag.len==tag->len
  265. && memcmp(i->tag.s, tag->s, tag->len) ==0 ){
  266. /* to tag already recorded */
  267. #ifdef XL_DEBUG
  268. LOG(L_CRIT, "DEBUG: update_totag_set: totag retransmission\n");
  269. #else
  270. DBG("DEBUG: update_totag_set: totag retransmission\n");
  271. #endif
  272. return 1;
  273. }
  274. }
  275. /* that's a new to-tag -- record it */
  276. shm_lock();
  277. n=(struct totag_elem*) shm_malloc_unsafe(sizeof(struct totag_elem));
  278. s=(char *)shm_malloc_unsafe(tag->len);
  279. shm_unlock();
  280. if (!s || !n) {
  281. LOG(L_ERR, "ERROR: update_totag_set: no memory \n");
  282. if (n) shm_free(n);
  283. if (s) shm_free(s);
  284. return 0;
  285. }
  286. memset(n, 0, sizeof(struct totag_elem));
  287. memcpy(s, tag->s, tag->len );
  288. n->tag.s=s;n->tag.len=tag->len;
  289. n->next=t->fwded_totags;
  290. membar_write(); /* make sure all the changes to n are visible on all cpus
  291. before we update t->fwded_totags. This is needed for
  292. three reasons: the compiler might reorder some of the
  293. writes, the cpu/cache could also reorder them with
  294. respect to the visibility on other cpus
  295. (e.g. some of the changes to n could be visible on
  296. another cpu _after_ seeing t->fwded_totags=n) and
  297. the "readers" (unmatched_tags()) do not use locks and
  298. can be called simultaneously on another cpu.*/
  299. t->fwded_totags=n;
  300. DBG("DEBUG: update_totag_set: new totag \n");
  301. return 0;
  302. }
  303. /*
  304. * Build an ACK to a negative reply
  305. */
  306. static char *build_ack(struct sip_msg* rpl,struct cell *trans,int branch,
  307. unsigned int *ret_len)
  308. {
  309. str to;
  310. if (parse_headers(rpl,HDR_TO_F, 0)==-1 || !rpl->to ) {
  311. LOG(L_ERR, "ERROR: build_ack: "
  312. "cannot generate a HBH ACK if key HFs in reply missing\n");
  313. return NULL;
  314. }
  315. to.s=rpl->to->name.s;
  316. to.len=rpl->to->len;
  317. if (cfg_get(tm, tm_cfg, reparse_invite)) {
  318. /* build the ACK from the INVITE which was sent out */
  319. return build_local_reparse( trans, branch, ret_len,
  320. ACK, ACK_LEN, &to );
  321. } else {
  322. /* build the ACK from the reveived INVITE */
  323. return build_local( trans, branch, ret_len,
  324. ACK, ACK_LEN, &to );
  325. }
  326. }
  327. /*
  328. * The function builds an ACK to 200 OK of local transactions, honoring the
  329. * route set.
  330. * The destination to which the message should be sent will be returned
  331. * in the dst parameter.
  332. * returns 0 on error and a pkg_malloc'ed buffer with length in ret_len
  333. * and intended destination in dst on success.
  334. */
  335. static char *build_local_ack(struct sip_msg* rpl, struct cell *trans,
  336. int branch, unsigned int *ret_len,
  337. struct dest_info* dst)
  338. {
  339. #ifdef WITH_AS_SUPPORT
  340. struct retr_buf *local_ack, *old_lack;
  341. /* do we have the ACK cache, previously build? */
  342. if ((local_ack = trans->uac[0].local_ack) && local_ack->buffer_len) {
  343. DEBUG("reusing ACK retr. buffer.\n");
  344. *ret_len = local_ack->buffer_len;
  345. *dst = local_ack->dst;
  346. return local_ack->buffer;
  347. }
  348. /* the ACK will be built (and cached) by the AS (ack_local_uac()) */
  349. if (trans->flags & T_NO_AUTO_ACK)
  350. return NULL;
  351. if (! (local_ack = local_ack_rb(rpl, trans, branch, /*hdrs*/NULL,
  352. /*body*/NULL))) {
  353. ERR("failed to build local ACK retransmission buffer (T@%p).\n",trans);
  354. return NULL;
  355. }
  356. /* set the new buffer, but only if not already set (concurrent 2xx) */
  357. if ((old_lack = (struct retr_buf *)atomic_cmpxchg_long(
  358. (void *)&trans->uac[0].local_ack, 0, (long)local_ack))) {
  359. /* buffer already set: trash current and use the winning one */
  360. INFO("concurrent 2xx to local INVITE detected (T@%p).\n", trans);
  361. free_local_ack(local_ack);
  362. local_ack = old_lack;
  363. }
  364. *ret_len = local_ack->buffer_len;
  365. *dst = local_ack->dst;
  366. return local_ack->buffer;
  367. #else /* ! WITH_AS_SUPPORT */
  368. return build_dlg_ack(rpl, trans, branch, /*hdrs*/NULL, /*body*/NULL,
  369. ret_len, dst);
  370. #endif /* WITH_AS_SUPPORT */
  371. }
  372. #if 0 /* candidate for removal --andrei */
  373. /*
  374. * The function is used to send a localy generated ACK to INVITE
  375. * (tm generates the ACK on behalf of application using UAC
  376. */
  377. static int send_local_ack(struct sip_msg* msg, str* next_hop,
  378. char* ack, int ack_len)
  379. {
  380. struct dest_info dst;
  381. #ifdef USE_DNS_FAILOVER
  382. struct dns_srv_handle dns_h;
  383. #endif
  384. if (!next_hop) {
  385. LOG(L_ERR, "send_local_ack: Invalid parameter value\n");
  386. return -1;
  387. }
  388. #ifdef USE_DNS_FAILOVER
  389. if (cfg_get(core, core_cfg, use_dns_failover)){
  390. dns_srv_handle_init(&dns_h);
  391. if ((uri2dst(&dns_h, &dst, msg, next_hop, PROTO_NONE)==0) ||
  392. (dst.send_sock==0)){
  393. dns_srv_handle_put(&dns_h);
  394. LOG(L_ERR, "send_local_ack: no socket found\n");
  395. return -1;
  396. }
  397. dns_srv_handle_put(&dns_h); /* not needed anymore */
  398. }else{
  399. if ((uri2dst(0, &dst, msg, next_hop, PROTO_NONE)==0) ||
  400. (dst.send_sock==0)){
  401. LOG(L_ERR, "send_local_ack: no socket found\n");
  402. return -1;
  403. }
  404. }
  405. #else
  406. if ((uri2dst(&dst, msg, next_hop, PROTO_NONE)==0) || (dst.send_sock==0)){
  407. LOG(L_ERR, "send_local_ack: no socket found\n");
  408. return -1;
  409. }
  410. #endif
  411. return msg_send(&dst, ack, ack_len);
  412. }
  413. #endif
  414. inline static void start_final_repl_retr( struct cell *t )
  415. {
  416. if (unlikely(!is_local(t) && t->uas.request->REQ_METHOD==METHOD_INVITE )){
  417. /* crank timers for negative replies */
  418. if (t->uas.status>=300) {
  419. if (start_retr(&t->uas.response)!=0)
  420. LOG(L_CRIT, "BUG: start_final_repl_retr: start retr failed"
  421. " for %p\n", &t->uas.response);
  422. return;
  423. }
  424. /* local UAS retransmits too */
  425. if (t->relayed_reply_branch==-2 && t->uas.status>=200) {
  426. /* we retransmit 200/INVs regardless of transport --
  427. even if TCP used, UDP could be used upstream and
  428. loose the 200, which is not retransmitted by proxies
  429. */
  430. if (force_retr( &t->uas.response )!=0)
  431. LOG(L_CRIT, "BUG: start_final_repl_retr: force retr failed for"
  432. " %p\n", &t->uas.response);
  433. return;
  434. }
  435. }
  436. }
  437. static int _reply_light( struct cell *trans, char* buf, unsigned int len,
  438. unsigned int code, char * text,
  439. char *to_tag, unsigned int to_tag_len, int lock,
  440. struct bookmark *bm )
  441. {
  442. struct retr_buf *rb;
  443. unsigned int buf_len;
  444. branch_bm_t cancel_bitmap;
  445. #ifdef TMCB_ONSEND
  446. struct tmcb_params onsend_params;
  447. #endif
  448. if (!buf)
  449. {
  450. DBG("DEBUG: _reply_light: response building failed\n");
  451. /* determine if there are some branches to be canceled */
  452. if ( is_invite(trans) ) {
  453. if (lock) LOCK_REPLIES( trans );
  454. which_cancel(trans, &cancel_bitmap );
  455. if (lock) UNLOCK_REPLIES( trans );
  456. }
  457. /* and clean-up, including cancellations, if needed */
  458. goto error;
  459. }
  460. cancel_bitmap=0;
  461. if (lock) LOCK_REPLIES( trans );
  462. if ( is_invite(trans) ) which_cancel(trans, &cancel_bitmap );
  463. if (trans->uas.status>=200) {
  464. LOG( L_ERR, "ERROR: _reply_light: can't generate %d reply"
  465. " when a final %d was sent out\n", code, trans->uas.status);
  466. goto error2;
  467. }
  468. rb = & trans->uas.response;
  469. rb->activ_type=code;
  470. trans->uas.status = code;
  471. buf_len = rb->buffer ? len : len + REPLY_OVERBUFFER_LEN;
  472. rb->buffer = (char*)shm_resize( rb->buffer, buf_len );
  473. /* puts the reply's buffer to uas.response */
  474. if (! rb->buffer ) {
  475. LOG(L_ERR, "ERROR: _reply_light: cannot allocate shmem buffer\n");
  476. goto error3;
  477. }
  478. update_local_tags(trans, bm, rb->buffer, buf);
  479. rb->buffer_len = len ;
  480. memcpy( rb->buffer , buf , len );
  481. /* needs to be protected too because what timers are set depends
  482. on current transactions status */
  483. /* t_update_timers_after_sending_reply( rb ); */
  484. update_reply_stats( code );
  485. trans->relayed_reply_branch=-2;
  486. t_stats_replied_locally();
  487. if (lock) UNLOCK_REPLIES( trans );
  488. /* do UAC cleanup procedures in case we generated
  489. a final answer whereas there are pending UACs */
  490. if (code>=200) {
  491. if ( is_local(trans) ) {
  492. DBG("DEBUG: local transaction completed from _reply\n");
  493. if ( unlikely(has_tran_tmcbs(trans, TMCB_LOCAL_COMPLETED)) )
  494. run_trans_callbacks( TMCB_LOCAL_COMPLETED, trans,
  495. 0, FAKED_REPLY, code);
  496. } else {
  497. if ( unlikely(has_tran_tmcbs(trans, TMCB_RESPONSE_OUT)) )
  498. run_trans_callbacks( TMCB_RESPONSE_OUT, trans,
  499. trans->uas.request, FAKED_REPLY, code);
  500. }
  501. cleanup_uac_timers( trans );
  502. if (is_invite(trans))
  503. cancel_uacs( trans, cancel_bitmap, F_CANCEL_B_KILL );
  504. start_final_repl_retr( trans );
  505. }
  506. /* send it out */
  507. /* first check if we managed to resolve topmost Via -- if
  508. not yet, don't try to retransmit
  509. */
  510. /*
  511. response.dst.send_sock might be unset if the process that created
  512. the original transaction has not finished initialising the
  513. retransmission buffer (see t_newtran/ init_rb).
  514. If reply_to_via is set and via contains a host name (and not an ip)
  515. the chances for this increase a lot.
  516. */
  517. if (!trans->uas.response.dst.send_sock) {
  518. LOG(L_ERR, "ERROR: _reply_light: no resolved dst to send reply to\n");
  519. } else {
  520. #ifdef TMCB_ONSEND
  521. if (SEND_PR_BUFFER( rb, buf, len )>=0)
  522. if (unlikely(has_tran_tmcbs(trans, TMCB_RESPONSE_SENT))){
  523. INIT_TMCB_ONSEND_PARAMS(onsend_params, 0, 0, rb, &rb->dst,
  524. buf, len, TMCB_LOCAL_F, rb->branch, code);
  525. run_onsend_callbacks2(TMCB_RESPONSE_SENT, trans,
  526. &onsend_params);
  527. }
  528. #else
  529. SEND_PR_BUFFER( rb, buf, len );
  530. #endif
  531. DBG("DEBUG: reply sent out. buf=%p: %.9s..., shmem=%p: %.9s\n",
  532. buf, buf, rb->buffer, rb->buffer );
  533. }
  534. if (code>=200) {
  535. /* start wait timer after finishing with t so that this function can
  536. * be safely called from a fr_timer which allows quick timer dels
  537. * (timer_allow_del()) (there's no chance of having the wait handler
  538. * executed while we still need t) --andrei */
  539. put_on_wait(trans);
  540. }
  541. pkg_free( buf ) ;
  542. DBG("DEBUG: _reply_light: finished\n");
  543. return 1;
  544. error3:
  545. error2:
  546. if (lock) UNLOCK_REPLIES( trans );
  547. pkg_free ( buf );
  548. error:
  549. /* do UAC cleanup */
  550. cleanup_uac_timers( trans );
  551. if ( is_invite(trans) )
  552. cancel_uacs( trans, cancel_bitmap, F_CANCEL_B_KILL);
  553. /* we did not succeed -- put the transaction on wait */
  554. put_on_wait(trans);
  555. return -1;
  556. }
  557. /* send a UAS reply
  558. * returns 1 if everything was OK or -1 for error
  559. */
  560. static int _reply( struct cell *trans, struct sip_msg* p_msg,
  561. unsigned int code, char * text, int lock )
  562. {
  563. unsigned int len;
  564. char * buf, *dset;
  565. struct bookmark bm;
  566. int dset_len;
  567. if (code>=200) set_kr(REQ_RPLD);
  568. /* compute the buffer in private memory prior to entering lock;
  569. * create to-tag if needed */
  570. /* if that is a redirection message, dump current message set to it */
  571. if (code>=300 && code<400) {
  572. dset=print_dset(p_msg, &dset_len);
  573. if (dset) {
  574. add_lump_rpl(p_msg, dset, dset_len, LUMP_RPL_HDR);
  575. }
  576. }
  577. if (code>=180 && p_msg->to
  578. && (get_to(p_msg)->tag_value.s==0
  579. || get_to(p_msg)->tag_value.len==0)) {
  580. calc_crc_suffix( p_msg, tm_tag_suffix );
  581. buf = build_res_buf_from_sip_req(code,text, &tm_tag, p_msg, &len, &bm);
  582. return _reply_light( trans, buf, len, code, text,
  583. tm_tag.s, TOTAG_VALUE_LEN, lock, &bm);
  584. } else {
  585. buf = build_res_buf_from_sip_req(code,text, 0 /*no to-tag*/,
  586. p_msg, &len, &bm);
  587. return _reply_light(trans,buf,len,code,text,
  588. 0, 0, /* no to-tag */lock, &bm);
  589. }
  590. }
  591. /*if msg is set -> it will fake the env. vars conforming with the msg; if NULL
  592. * the env. will be restore to original */
  593. void faked_env( struct cell *t,struct sip_msg *msg)
  594. {
  595. static enum route_mode backup_mode;
  596. static struct cell *backup_t;
  597. static unsigned int backup_msgid;
  598. static avp_list_t* backup_user_from, *backup_user_to;
  599. static avp_list_t* backup_domain_from, *backup_domain_to;
  600. static avp_list_t* backup_uri_from, *backup_uri_to;
  601. static struct socket_info* backup_si;
  602. if (msg) {
  603. /* remember we are back in request processing, but process
  604. * a shmem-ed replica of the request; advertise it in rmode;
  605. * for example t_reply needs to know that
  606. */
  607. backup_mode=rmode;
  608. rmode=MODE_ONFAILURE;
  609. /* also, tm actions look in beginning whether transaction is
  610. * set -- whether we are called from a reply-processing
  611. * or a timer process, we need to set current transaction;
  612. * otherwise the actions would attempt to look the transaction
  613. * up (unnecessary overhead, refcounting)
  614. */
  615. /* backup */
  616. backup_t=get_t();
  617. backup_msgid=global_msg_id;
  618. /* fake transaction and message id */
  619. global_msg_id=msg->id;
  620. set_t(t);
  621. /* make available the avp list from transaction */
  622. backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
  623. backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
  624. backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
  625. backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
  626. backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
  627. backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
  628. /* set default send address to the saved value */
  629. backup_si=bind_address;
  630. bind_address=t->uac[0].request.dst.send_sock;
  631. } else {
  632. /* restore original environment */
  633. set_t(backup_t);
  634. global_msg_id=backup_msgid;
  635. rmode=backup_mode;
  636. /* restore original avp list */
  637. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from );
  638. set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to );
  639. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from );
  640. set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to );
  641. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from );
  642. set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to );
  643. bind_address=backup_si;
  644. }
  645. }
  646. int fake_req(struct sip_msg *faked_req,
  647. struct sip_msg *shmem_msg, int extra_flags)
  648. {
  649. /* on_negative_reply faked msg now copied from shmem msg (as opposed
  650. * to zero-ing) -- more "read-only" actions (exec in particular) will
  651. * work from reply_route as they will see msg->from, etc.; caution,
  652. * rw actions may append some pkg stuff to msg, which will possibly be
  653. * never released (shmem is released in a single block) */
  654. memcpy( faked_req, shmem_msg, sizeof(struct sip_msg));
  655. /* if we set msg_id to something different from current's message
  656. * id, the first t_fork will properly clean new branch URIs */
  657. faked_req->id=shmem_msg->id-1;
  658. /* msg->parsed_uri_ok must be reset since msg_parsed_uri is
  659. * not cloned (and cannot be cloned) */
  660. faked_req->parsed_uri_ok = 0;
  661. faked_req->msg_flags|=extra_flags; /* set the extra tm flags */
  662. /* new_uri can change -- make a private copy */
  663. if (shmem_msg->new_uri.s!=0 && shmem_msg->new_uri.len!=0) {
  664. faked_req->new_uri.s=pkg_malloc(shmem_msg->new_uri.len+1);
  665. if (!faked_req->new_uri.s) {
  666. LOG(L_ERR, "ERROR: fake_req: no uri/pkg mem\n");
  667. goto error00;
  668. }
  669. faked_req->new_uri.len=shmem_msg->new_uri.len;
  670. memcpy( faked_req->new_uri.s, shmem_msg->new_uri.s,
  671. faked_req->new_uri.len);
  672. faked_req->new_uri.s[faked_req->new_uri.len]=0;
  673. }
  674. /* dst_uri can change ALSO!!! -- make a private copy */
  675. if (shmem_msg->dst_uri.s!=0 && shmem_msg->dst_uri.len!=0) {
  676. faked_req->dst_uri.s=pkg_malloc(shmem_msg->dst_uri.len+1);
  677. if (!faked_req->dst_uri.s) {
  678. LOG(L_ERR, "ERROR: fake_req: no uri/pkg mem\n");
  679. goto error00;
  680. }
  681. faked_req->dst_uri.len=shmem_msg->dst_uri.len;
  682. memcpy( faked_req->dst_uri.s, shmem_msg->dst_uri.s,
  683. faked_req->dst_uri.len);
  684. faked_req->dst_uri.s[faked_req->dst_uri.len]=0;
  685. }
  686. return 1;
  687. error00:
  688. return 0;
  689. }
  690. void free_faked_req(struct sip_msg *faked_req, struct cell *t)
  691. {
  692. struct hdr_field *hdr;
  693. if (faked_req->new_uri.s) {
  694. pkg_free(faked_req->new_uri.s);
  695. faked_req->new_uri.s = 0;
  696. }
  697. if (faked_req->dst_uri.s) {
  698. pkg_free(faked_req->dst_uri.s);
  699. faked_req->dst_uri.s = 0;
  700. }
  701. /* free all types of lump that were added in failure handlers */
  702. del_nonshm_lump( &(faked_req->add_rm) );
  703. del_nonshm_lump( &(faked_req->body_lumps) );
  704. del_nonshm_lump_rpl( &(faked_req->reply_lump) );
  705. /* free header's parsed structures that were added by failure handlers */
  706. for( hdr=faked_req->headers ; hdr ; hdr=hdr->next ) {
  707. if ( hdr->parsed && hdr_allocs_parse(hdr) &&
  708. (hdr->parsed<(void*)t->uas.request ||
  709. hdr->parsed>=(void*)t->uas.end_request)) {
  710. /* header parsed filed doesn't point inside uas.request memory
  711. * chunck -> it was added by failure funcs.-> free it as pkg */
  712. DBG("DBG:free_faked_req: removing hdr->parsed %d\n",
  713. hdr->type);
  714. clean_hdr_field(hdr);
  715. hdr->parsed = 0;
  716. }
  717. }
  718. }
  719. /* return 1 if a failure_route processes */
  720. int run_failure_handlers(struct cell *t, struct sip_msg *rpl,
  721. int code, int extra_flags)
  722. {
  723. static struct sip_msg faked_req;
  724. struct sip_msg *shmem_msg = t->uas.request;
  725. int on_failure;
  726. struct run_act_ctx ra_ctx;
  727. /* failure_route for a local UAC? */
  728. if (!shmem_msg) {
  729. LOG(L_WARN,"Warning: run_failure_handlers: no UAC support (%d, %d) \n",
  730. t->on_negative, t->tmcb_hl.reg_types);
  731. return 0;
  732. }
  733. /* don't start faking anything if we don't have to */
  734. if (unlikely(!t->on_negative && !has_tran_tmcbs( t, TMCB_ON_FAILURE))) {
  735. LOG(L_WARN,
  736. "Warning: run_failure_handlers: no negative handler (%d, %d)\n",
  737. t->on_negative,
  738. t->tmcb_hl.reg_types);
  739. return 1;
  740. }
  741. if (!fake_req(&faked_req, shmem_msg, extra_flags)) {
  742. LOG(L_ERR, "ERROR: run_failure_handlers: fake_req failed\n");
  743. return 0;
  744. }
  745. /* fake also the env. conforming to the fake msg */
  746. faked_env( t, &faked_req);
  747. /* DONE with faking ;-) -> run the failure handlers */
  748. if (unlikely(has_tran_tmcbs( t, TMCB_ON_FAILURE)) ) {
  749. run_trans_callbacks( TMCB_ON_FAILURE, t, &faked_req, rpl, code);
  750. }
  751. if (t->on_negative) {
  752. /* avoid recursion -- if failure_route forwards, and does not
  753. * set next failure route, failure_route will not be reentered
  754. * on failure */
  755. on_failure = t->on_negative;
  756. t->on_negative=0;
  757. reset_static_buffer();
  758. /* run a reply_route action if some was marked */
  759. init_run_actions_ctx(&ra_ctx);
  760. if (run_actions(&ra_ctx, failure_rt.rlist[on_failure], &faked_req)<0)
  761. LOG(L_ERR, "ERROR: run_failure_handlers: Error in do_action\n");
  762. }
  763. /* restore original environment and free the fake msg */
  764. faked_env( t, 0);
  765. free_faked_req(&faked_req,t);
  766. /* if failure handler changed flag, update transaction context */
  767. shmem_msg->flags = faked_req.flags;
  768. return 1;
  769. }
  770. /* 401, 407, 415, 420, and 484 have priority over the other 4xx*/
  771. inline static short int get_4xx_prio(unsigned char xx)
  772. {
  773. switch(xx){
  774. case 1:
  775. case 7:
  776. case 15:
  777. case 20:
  778. case 84:
  779. return xx;
  780. break;
  781. }
  782. return 100+xx;
  783. }
  784. /* returns response priority, lower number => highest prio
  785. *
  786. * responses priority val
  787. * 0-99 32000+reponse (special)
  788. * 1xx 11000+reponse (special)
  789. * 700-999 10000+response (very low)
  790. * 5xx 5000+xx (low)
  791. * 4xx 4000+xx
  792. * 3xx 3000+xx
  793. * 6xx 1000+xx (high)
  794. * 2xx 0000+xx (highest)
  795. */
  796. inline static short int get_prio(unsigned int resp)
  797. {
  798. int class;
  799. int xx;
  800. class=resp/100;
  801. if (class<7){
  802. xx=resp%100;
  803. return resp_class_prio[class]+((class==4)?get_4xx_prio(xx):xx);
  804. }
  805. return 10000+resp; /* unknown response class => return very low prio */
  806. }
  807. /* select a branch for forwarding; returns:
  808. * 0..X ... branch number
  809. * -1 ... error
  810. * -2 ... can't decide yet -- incomplete branches present
  811. */
  812. int t_pick_branch(int inc_branch, int inc_code, struct cell *t, int *res_code)
  813. {
  814. int best_b, best_s, b;
  815. best_b=-1; best_s=0;
  816. for ( b=0; b<t->nr_of_outgoings ; b++ ) {
  817. /* "fake" for the currently processed branch */
  818. if (b==inc_branch) {
  819. if (get_prio(inc_code)<get_prio(best_s)) {
  820. best_b=b;
  821. best_s=inc_code;
  822. }
  823. continue;
  824. }
  825. /* skip 'empty branches' */
  826. if (!t->uac[b].request.buffer) continue;
  827. /* there is still an unfinished UAC transaction; wait now! */
  828. if ( t->uac[b].last_received<200 )
  829. return -2;
  830. /* if reply is null => t_send_branch "faked" reply, skip over it */
  831. if ( t->uac[b].reply &&
  832. get_prio(t->uac[b].last_received)<get_prio(best_s) ) {
  833. best_b =b;
  834. best_s = t->uac[b].last_received;
  835. }
  836. } /* find lowest branch */
  837. *res_code=best_s;
  838. return best_b;
  839. }
  840. /* flag indicating whether it is requested
  841. * to drop the already saved replies or not */
  842. static unsigned char drop_replies;
  843. /* This is the neurological point of reply processing -- called
  844. * from within a REPLY_LOCK, t_should_relay_response decides
  845. * how a reply shall be processed and how transaction state is
  846. * affected.
  847. *
  848. * Checks if the new reply (with new_code status) should be sent or not
  849. * based on the current
  850. * transaction status.
  851. * Returns - branch number (0,1,...) which should be relayed
  852. * -1 if nothing to be relayed
  853. */
  854. static enum rps t_should_relay_response( struct cell *Trans , int new_code,
  855. int branch , int *should_store, int *should_relay,
  856. branch_bm_t *cancel_bitmap, struct sip_msg *reply )
  857. {
  858. int branch_cnt;
  859. int picked_branch;
  860. int picked_code;
  861. int new_branch;
  862. int inv_through;
  863. int extra_flags;
  864. int i;
  865. /* note: this code never lets replies to CANCEL go through;
  866. we generate always a local 200 for CANCEL; 200s are
  867. not relayed because it's not an INVITE transaction;
  868. >= 300 are not relayed because 200 was already sent
  869. out
  870. */
  871. DBG("->>>>>>>>> T_code=%d, new_code=%d\n",Trans->uas.status,new_code);
  872. inv_through=new_code>=200 && new_code<300 && is_invite(Trans);
  873. /* if final response sent out, allow only INVITE 2xx */
  874. if ( Trans->uas.status >= 200 ) {
  875. if (inv_through) {
  876. DBG("DBG: t_should_relay_response: 200 INV after final sent\n");
  877. *should_store=0;
  878. Trans->uac[branch].last_received=new_code;
  879. *should_relay=branch;
  880. return RPS_PUSHED_AFTER_COMPLETION;
  881. }
  882. /* except the exception above, too late messages will
  883. be discarded */
  884. goto discard;
  885. }
  886. /* if final response received at this branch, allow only INVITE 2xx */
  887. if (Trans->uac[branch].last_received>=200
  888. && !(inv_through && Trans->uac[branch].last_received<300)) {
  889. /* don't report on retransmissions */
  890. if (Trans->uac[branch].last_received==new_code) {
  891. DBG("DEBUG: final reply retransmission\n");
  892. goto discard;
  893. }
  894. /* if you FR-timed-out, faked a local 408 and 487 came or
  895. * faked a CANCEL on a non-replied branch don't
  896. * report on it either */
  897. if ((Trans->uac[branch].last_received==487) ||
  898. (Trans->uac[branch].last_received==408 && new_code==487)) {
  899. DBG("DEBUG: %d came for a %d branch (ignored)\n",
  900. new_code, Trans->uac[branch].last_received);
  901. goto discard;
  902. }
  903. /* this looks however how a very strange status rewrite attempt;
  904. * report on it */
  905. LOG(L_ERR, "ERROR: t_should_relay_response: status rewrite by UAS: "
  906. "stored: %d, received: %d\n",
  907. Trans->uac[branch].last_received, new_code );
  908. goto discard;
  909. }
  910. /* no final response sent yet */
  911. /* negative replies subject to fork picking */
  912. if (new_code >=300 ) {
  913. Trans->uac[branch].last_received=new_code;
  914. /* if all_final return lowest */
  915. picked_branch=t_pick_branch(branch,new_code, Trans, &picked_code);
  916. if (picked_branch==-2) { /* branches open yet */
  917. *should_store=1;
  918. *should_relay=-1;
  919. if (new_code>=600 && new_code<=699){
  920. if (!(Trans->flags & T_6xx)){
  921. /* cancel only the first time we get a 6xx */
  922. which_cancel(Trans, cancel_bitmap);
  923. Trans->flags|=T_6xx;
  924. }
  925. }
  926. return RPS_STORE;
  927. }
  928. if (picked_branch==-1) {
  929. LOG(L_CRIT, "ERROR: t_should_relay_response: lowest==-1\n");
  930. goto error;
  931. }
  932. /* no more pending branches -- try if that changes after
  933. a callback; save branch count to be able to determine
  934. later if new branches were initiated */
  935. branch_cnt=Trans->nr_of_outgoings;
  936. /* also append the current reply to the transaction to
  937. * make it available in failure routes - a kind of "fake"
  938. * save of the final reply per branch */
  939. Trans->uac[branch].reply = reply;
  940. Trans->flags&=~T_6xx; /* clear the 6xx flag , we want to
  941. allow new branches from the failure route */
  942. drop_replies = 0;
  943. /* run ON_FAILURE handlers ( route and callbacks) */
  944. if (unlikely(has_tran_tmcbs( Trans, TMCB_ON_FAILURE_RO|TMCB_ON_FAILURE)
  945. || Trans->on_negative )) {
  946. extra_flags=
  947. ((Trans->uac[picked_branch].request.flags & F_RB_TIMEOUT)?
  948. FL_TIMEOUT:0) |
  949. ((Trans->uac[picked_branch].request.flags & F_RB_REPLIED)?
  950. FL_REPLIED:0);
  951. run_failure_handlers( Trans, Trans->uac[picked_branch].reply,
  952. picked_code, extra_flags);
  953. if (unlikely(drop_replies)) {
  954. /* drop all the replies that we have already saved */
  955. for (i=0; i<branch_cnt; i++) {
  956. if (Trans->uac[i].reply &&
  957. (Trans->uac[i].reply != FAKED_REPLY) &&
  958. (Trans->uac[i].reply->msg_flags & FL_SHM_CLONE))
  959. /* we have to drop the reply which is already in shm mem */
  960. sip_msg_free(Trans->uac[i].reply);
  961. Trans->uac[i].reply = 0;
  962. }
  963. /* make sure that the selected reply is not relayed even if
  964. there is not any new branch added -- should not happen */
  965. picked_branch = -1;
  966. }
  967. }
  968. /* now reset it; after the failure logic, the reply may
  969. * not be stored any more and we don't want to keep into
  970. * transaction some broken reference */
  971. Trans->uac[branch].reply = 0;
  972. /* look if the callback perhaps replied transaction; it also
  973. covers the case in which a transaction is replied localy
  974. on CANCEL -- then it would make no sense to proceed to
  975. new branches bellow
  976. */
  977. if (Trans->uas.status >= 200) {
  978. *should_store=0;
  979. *should_relay=-1;
  980. /* this might deserve an improvement -- if something
  981. was already replied, it was put on wait and then,
  982. returning RPS_COMPLETED will make t_on_reply
  983. put it on wait again; perhaps splitting put_on_wait
  984. from send_reply or a new RPS_ code would be healthy
  985. */
  986. return RPS_COMPLETED;
  987. }
  988. /* look if the callback/failure_route introduced new branches ... */
  989. if (branch_cnt<Trans->nr_of_outgoings){
  990. /* the new branches might be already "finished" => we
  991. * must use t_pick_branch again */
  992. new_branch=t_pick_branch((drop_replies==0)?
  993. branch :
  994. -1, /* make sure we do not pick
  995. the current branch */
  996. new_code,
  997. Trans,
  998. &picked_code);
  999. if (new_branch<0){
  1000. if (likely(drop_replies==0)) {
  1001. if (new_branch==-2) { /* branches open yet */
  1002. *should_store=1;
  1003. *should_relay=-1;
  1004. return RPS_STORE;
  1005. }
  1006. /* error, use the old picked_branch */
  1007. } else {
  1008. if (new_branch==-2) { /* branches open yet */
  1009. /* we are not allowed to relay the reply */
  1010. *should_store=0;
  1011. *should_relay=-1;
  1012. return RPS_DISCARDED;
  1013. } else {
  1014. /* There are no open branches,
  1015. and all the newly created branches failed
  1016. as well. We are not allowed to send back
  1017. the previously picked-up branch, thus,
  1018. let us reply with an error instead. */
  1019. goto branches_failed;
  1020. }
  1021. }
  1022. }else{
  1023. /* found a new_branch */
  1024. picked_branch=new_branch;
  1025. }
  1026. } else if (unlikely(drop_replies)) {
  1027. /* Either the script writer did not add new branches
  1028. after calling t_drop_replies(), or tm was unable
  1029. to add the new branches to the transaction. */
  1030. goto branches_failed;
  1031. }
  1032. /* really no more pending branches -- return lowest code */
  1033. *should_store=0;
  1034. *should_relay=picked_branch;
  1035. /* we dont need 'which_cancel' here -- all branches
  1036. known to have completed */
  1037. /* which_cancel( Trans, cancel_bitmap ); */
  1038. return RPS_COMPLETED;
  1039. }
  1040. /* not >=300 ... it must be 2xx or provisional 1xx */
  1041. if (new_code>=100) {
  1042. #ifdef WITH_AS_SUPPORT
  1043. /* need a copy of the message for ACK generation */
  1044. *should_store = (inv_through && is_local(Trans) &&
  1045. (Trans->uac[branch].last_received < 200) &&
  1046. (Trans->flags & T_NO_AUTO_ACK)) ? 1 : 0;
  1047. #else
  1048. *should_store=0;
  1049. #endif
  1050. /* 1xx and 2xx except 100 will be relayed */
  1051. Trans->uac[branch].last_received=new_code;
  1052. *should_relay= new_code==100? -1 : branch;
  1053. if (new_code>=200 ) {
  1054. which_cancel( Trans, cancel_bitmap );
  1055. return RPS_COMPLETED;
  1056. } else return RPS_PROVISIONAL;
  1057. }
  1058. error:
  1059. /* reply_status didn't match -- it must be something weird */
  1060. LOG(L_CRIT, "ERROR: Oh my gooosh! We don't know whether to relay %d\n",
  1061. new_code);
  1062. discard:
  1063. *should_store=0;
  1064. *should_relay=-1;
  1065. return RPS_DISCARDED;
  1066. branches_failed:
  1067. *should_store=0;
  1068. *should_relay=-1;
  1069. /* We have hopefully set tm_error in failure_route when
  1070. the branches failed. If not, reply with E_UNSPEC */
  1071. if ((kill_transaction_unsafe(Trans,
  1072. tm_error ? tm_error : E_UNSPEC)
  1073. ) <=0 ){
  1074. LOG(L_ERR, "ERROR: t_should_relay_response: "
  1075. "reply generation failed\n");
  1076. }
  1077. return RPS_COMPLETED;
  1078. }
  1079. /* Retransmits the last sent inbound reply.
  1080. * input: p_msg==request for which I want to retransmit an associated reply
  1081. * Returns -1 - error
  1082. * 1 - OK
  1083. */
  1084. int t_retransmit_reply( struct cell *t )
  1085. {
  1086. static char b[BUF_SIZE];
  1087. int len;
  1088. /* first check if we managed to resolve topmost Via -- if
  1089. not yet, don't try to retransmit
  1090. */
  1091. /*
  1092. response.dst.send_sock might be unset if the process that created
  1093. the original transaction has not finished initialising the
  1094. retransmission buffer (see t_newtran/ init_rb).
  1095. If reply_to_via is set and via contains a host name (and not an ip)
  1096. the chances for this increase a lot.
  1097. */
  1098. if (!t->uas.response.dst.send_sock) {
  1099. LOG(L_WARN, "WARNING: t_retransmit_reply: "
  1100. "no resolved dst to retransmit\n");
  1101. return -1;
  1102. }
  1103. /* we need to lock the transaction as messages from
  1104. upstream may change it continuously
  1105. */
  1106. LOCK_REPLIES( t );
  1107. if (!t->uas.response.buffer) {
  1108. DBG("DBG: t_retransmit_reply: nothing to retransmit\n");
  1109. goto error;
  1110. }
  1111. len=t->uas.response.buffer_len;
  1112. if ( len==0 || len>BUF_SIZE ) {
  1113. DBG("DBG: t_retransmit_reply: "
  1114. "zero length or too big to retransmit: %d\n", len);
  1115. goto error;
  1116. }
  1117. memcpy( b, t->uas.response.buffer, len );
  1118. UNLOCK_REPLIES( t );
  1119. SEND_PR_BUFFER( & t->uas.response, b, len );
  1120. #ifdef TMCB_ONSEND
  1121. if (unlikely(has_tran_tmcbs(t, TMCB_RESPONSE_SENT))){
  1122. /* we don't know if it's a retransmission of a local reply or a
  1123. * forwarded reply */
  1124. run_onsend_callbacks(TMCB_RESPONSE_SENT, &t->uas.response, 0, 0,
  1125. TMCB_RETR_F);
  1126. }
  1127. #endif
  1128. DBG("DEBUG: reply retransmitted. buf=%p: %.9s..., shmem=%p: %.9s\n",
  1129. b, b, t->uas.response.buffer, t->uas.response.buffer );
  1130. return 1;
  1131. error:
  1132. UNLOCK_REPLIES(t);
  1133. return -1;
  1134. }
  1135. int t_reply( struct cell *t, struct sip_msg* p_msg, unsigned int code,
  1136. char * text )
  1137. {
  1138. return _reply( t, p_msg, code, text, 1 /* lock replies */ );
  1139. }
  1140. int t_reply_unsafe( struct cell *t, struct sip_msg* p_msg, unsigned int code,
  1141. char * text )
  1142. {
  1143. return _reply( t, p_msg, code, text, 0 /* don't lock replies */ );
  1144. }
  1145. void set_final_timer( struct cell *t )
  1146. {
  1147. start_final_repl_retr(t);
  1148. put_on_wait(t);
  1149. }
  1150. void cleanup_uac_timers( struct cell *t )
  1151. {
  1152. int i;
  1153. /* reset FR/retransmission timers */
  1154. for (i=0; i<t->nr_of_outgoings; i++ ){
  1155. stop_rb_timers(&t->uac[i].request);
  1156. }
  1157. DBG("DEBUG: cleanup_uac_timers: RETR/FR timers reset\n");
  1158. }
  1159. static int store_reply( struct cell *trans, int branch, struct sip_msg *rpl)
  1160. {
  1161. # ifdef EXTRA_DEBUG
  1162. if (trans->uac[branch].reply) {
  1163. LOG(L_ERR, "ERROR: replacing stored reply; aborting\n");
  1164. abort();
  1165. }
  1166. # endif
  1167. /* when we later do things such as challenge aggregation,
  1168. we should parse the message here before we conserve
  1169. it in shared memory; -jiri
  1170. */
  1171. if (rpl==FAKED_REPLY)
  1172. trans->uac[branch].reply=FAKED_REPLY;
  1173. else
  1174. trans->uac[branch].reply = sip_msg_cloner( rpl, 0 );
  1175. if (! trans->uac[branch].reply ) {
  1176. LOG(L_ERR, "ERROR: store_reply: can't alloc' clone memory\n");
  1177. return 0;
  1178. }
  1179. return 1;
  1180. }
  1181. /* returns the number of authenticate replies (401 and 407) received so far
  1182. * (FAKED_REPLYes are excluded)
  1183. * It must be called with the REPLY_LOCK held */
  1184. inline static int auth_reply_count(struct cell *t, struct sip_msg* crt_reply)
  1185. {
  1186. int count;
  1187. int r;
  1188. count=0;
  1189. if (crt_reply && (crt_reply!=FAKED_REPLY) &&
  1190. (crt_reply->REPLY_STATUS ==401 || crt_reply->REPLY_STATUS ==407))
  1191. count=1;
  1192. for (r=0; r<t->nr_of_outgoings; r++){
  1193. if (t->uac[r].reply && (t->uac[r].reply!=FAKED_REPLY) &&
  1194. (t->uac[r].last_received==401 || t->uac[r].last_received==407))
  1195. count++;
  1196. }
  1197. return count;
  1198. }
  1199. /* must be called with the REPY_LOCK held */
  1200. inline static char* reply_aggregate_auth(int code, char* txt, str* new_tag,
  1201. struct cell* t, unsigned int* res_len,
  1202. struct bookmark* bm)
  1203. {
  1204. int r;
  1205. struct hdr_field* hdr;
  1206. struct lump_rpl** first;
  1207. struct lump_rpl** crt;
  1208. struct lump_rpl* lst;
  1209. struct lump_rpl* lst_end;
  1210. struct sip_msg* req;
  1211. char* buf;
  1212. first=0;
  1213. lst_end=0;
  1214. req=t->uas.request;
  1215. for (r=0; r<t->nr_of_outgoings; r++){
  1216. if (t->uac[r].reply && (t->uac[r].reply!=FAKED_REPLY) &&
  1217. (t->uac[r].last_received==401 || t->uac[r].last_received==407)){
  1218. for (hdr=t->uac[r].reply->headers; hdr; hdr=hdr->next){
  1219. if (hdr->type==HDR_WWW_AUTHENTICATE_T ||
  1220. hdr->type==HDR_PROXY_AUTHENTICATE_T){
  1221. crt=add_lump_rpl2(req, hdr->name.s, hdr->len,
  1222. LUMP_RPL_HDR|LUMP_RPL_NODUP|LUMP_RPL_NOFREE);
  1223. if (crt==0){
  1224. /* some kind of error, better stop */
  1225. LOG(L_ERR, "ERROR: tm:reply_aggregate_auth:"
  1226. " add_lump_rpl2 failed\n");
  1227. goto skip;
  1228. }
  1229. lst_end=*crt;
  1230. if (first==0) first=crt;
  1231. }
  1232. }
  1233. }
  1234. }
  1235. skip:
  1236. buf=build_res_buf_from_sip_req(code, txt, new_tag, req, res_len, bm);
  1237. /* clean the added lumps */
  1238. if (first){
  1239. lst=*first;
  1240. *first=lst_end->next; /* "detach" the list of added rpl_lumps */
  1241. lst_end->next=0; /* terminate lst */
  1242. del_nonshm_lump_rpl(&lst);
  1243. if (lst){
  1244. LOG(L_CRIT, "BUG: tm: repply_aggregate_auth: rpl_lump list"
  1245. "contains shm alloc'ed lumps\n");
  1246. abort();
  1247. }
  1248. }
  1249. return buf;
  1250. }
  1251. /* this is the code which decides what and when shall be relayed
  1252. upstream; note well -- it assumes it is entered locked with
  1253. REPLY_LOCK and it returns unlocked!
  1254. If do_put_on_wait==1 and this is the final reply, the transaction
  1255. wait timer will be started (put_on_wait(t)).
  1256. */
  1257. enum rps relay_reply( struct cell *t, struct sip_msg *p_msg, int branch,
  1258. unsigned int msg_status, branch_bm_t *cancel_bitmap, int do_put_on_wait )
  1259. {
  1260. int relay;
  1261. int save_clone;
  1262. char *buf;
  1263. /* length of outbound reply */
  1264. unsigned int res_len;
  1265. int relayed_code;
  1266. struct sip_msg *relayed_msg;
  1267. struct sip_msg *reply_bak;
  1268. struct bookmark bm;
  1269. int totag_retr;
  1270. enum rps reply_status;
  1271. /* retransmission structure of outbound reply and request */
  1272. struct retr_buf *uas_rb;
  1273. str* to_tag;
  1274. #ifdef TMCB_ONSEND
  1275. struct tmcb_params onsend_params;
  1276. #endif
  1277. /* keep compiler warnings about use of uninit vars silent */
  1278. res_len=0;
  1279. buf=0;
  1280. relayed_msg=0;
  1281. relayed_code=0;
  1282. totag_retr=0;
  1283. /* remember, what was sent upstream to know whether we are
  1284. * forwarding a first final reply or not */
  1285. /* *** store and relay message as needed *** */
  1286. reply_status = t_should_relay_response(t, msg_status, branch,
  1287. &save_clone, &relay, cancel_bitmap, p_msg );
  1288. DBG("DEBUG: relay_reply: branch=%d, save=%d, relay=%d\n",
  1289. branch, save_clone, relay );
  1290. /* store the message if needed */
  1291. if (save_clone) /* save for later use, typically branch picking */
  1292. {
  1293. if (!store_reply( t, branch, p_msg ))
  1294. goto error01;
  1295. }
  1296. uas_rb = & t->uas.response;
  1297. if (relay >= 0 ) {
  1298. /* initialize sockets for outbound reply */
  1299. uas_rb->activ_type=msg_status;
  1300. /* only messages known to be relayed immediately will be
  1301. * be called on; we do not evoke this callback on messages
  1302. * stored in shmem -- they are fixed and one cannot change them
  1303. * anyway */
  1304. if (unlikely(msg_status<300 && branch==relay
  1305. && has_tran_tmcbs(t,TMCB_RESPONSE_FWDED)) ) {
  1306. run_trans_callbacks( TMCB_RESPONSE_FWDED, t, t->uas.request,
  1307. p_msg, msg_status );
  1308. }
  1309. /* try building the outbound reply from either the current
  1310. * or a stored message */
  1311. relayed_msg = branch==relay ? p_msg : t->uac[relay].reply;
  1312. if (relayed_msg==FAKED_REPLY) {
  1313. relayed_code = branch==relay
  1314. ? msg_status : t->uac[relay].last_received;
  1315. /* use to_tag from the original request, or if not present,
  1316. * generate a new one */
  1317. if (relayed_code>=180 && t->uas.request->to
  1318. && (get_to(t->uas.request)->tag_value.s==0
  1319. || get_to(t->uas.request)->tag_value.len==0)) {
  1320. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1321. to_tag=&tm_tag;
  1322. } else {
  1323. to_tag=0;
  1324. }
  1325. if (cfg_get(tm, tm_cfg, tm_aggregate_auth) &&
  1326. (relayed_code==401 || relayed_code==407) &&
  1327. (auth_reply_count(t, p_msg)>1)){
  1328. /* aggregate 401 & 407 www & proxy authenticate headers in
  1329. * a "FAKE" reply*/
  1330. /* temporarily "store" the current reply */
  1331. reply_bak=t->uac[branch].reply;
  1332. t->uac[branch].reply=p_msg;
  1333. buf=reply_aggregate_auth(relayed_code,
  1334. error_text(relayed_code), to_tag, t, &res_len, &bm);
  1335. /* revert the temporary "store" reply above */
  1336. t->uac[branch].reply=reply_bak;
  1337. }else{
  1338. buf = build_res_buf_from_sip_req( relayed_code,
  1339. error_text(relayed_code), to_tag,
  1340. t->uas.request, &res_len, &bm );
  1341. }
  1342. } else {
  1343. relayed_code=relayed_msg->REPLY_STATUS;
  1344. if (relayed_code==503){
  1345. /* replace a final 503 with a 500:
  1346. * generate a "FAKE" reply and a new to_tag (for easier
  1347. * debugging)*/
  1348. relayed_msg=FAKED_REPLY;
  1349. if ((get_to(t->uas.request)->tag_value.s==0 ||
  1350. get_to(t->uas.request)->tag_value.len==0)) {
  1351. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1352. to_tag=&tm_tag;
  1353. } else {
  1354. to_tag=0;
  1355. }
  1356. /* don't relay a 503, replace it w/ 500 (rfc3261) */
  1357. buf=build_res_buf_from_sip_req(500, error_text(relayed_code),
  1358. to_tag, t->uas.request, &res_len, &bm);
  1359. relayed_code=500;
  1360. }else if (cfg_get(tm, tm_cfg, tm_aggregate_auth) &&
  1361. (relayed_code==401 || relayed_code==407) &&
  1362. (auth_reply_count(t, p_msg)>1)){
  1363. /* aggregate 401 & 407 www & proxy authenticate headers in
  1364. * a "FAKE" reply*/
  1365. if ((get_to(t->uas.request)->tag_value.s==0 ||
  1366. get_to(t->uas.request)->tag_value.len==0)) {
  1367. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1368. to_tag=&tm_tag;
  1369. } else {
  1370. to_tag=0;
  1371. }
  1372. /* temporarily "store" the current reply */
  1373. reply_bak=t->uac[branch].reply;
  1374. t->uac[branch].reply=p_msg;
  1375. buf=reply_aggregate_auth(relayed_code,
  1376. error_text(relayed_code), to_tag, t, &res_len, &bm);
  1377. /* revert the temporary "store" reply above */
  1378. t->uac[branch].reply=reply_bak;;
  1379. relayed_msg=FAKED_REPLY; /* mark the relayed_msg as a "FAKE" */
  1380. }else{
  1381. buf = build_res_buf_from_sip_res( relayed_msg, &res_len );
  1382. /* if we build a message from shmem, we need to remove
  1383. via delete lumps which are now stirred in the shmem-ed
  1384. structure
  1385. */
  1386. if (branch!=relay) {
  1387. free_via_clen_lump(&relayed_msg->add_rm);
  1388. }
  1389. }
  1390. }
  1391. update_reply_stats( relayed_code );
  1392. if (!buf) {
  1393. LOG(L_ERR, "ERROR: relay_reply: "
  1394. "no mem for outbound reply buffer\n");
  1395. goto error02;
  1396. }
  1397. /* attempt to copy the message to UAS's shmem:
  1398. - copy to-tag for ACK matching as well
  1399. - allocate little a bit more for provisional as
  1400. larger messages are likely to follow and we will be
  1401. able to reuse the memory frag
  1402. */
  1403. uas_rb->buffer = (char*)shm_resize( uas_rb->buffer, res_len +
  1404. (msg_status<200 ? REPLY_OVERBUFFER_LEN : 0));
  1405. if (!uas_rb->buffer) {
  1406. LOG(L_ERR, "ERROR: relay_reply: cannot alloc reply shmem\n");
  1407. goto error03;
  1408. }
  1409. uas_rb->buffer_len = res_len;
  1410. memcpy( uas_rb->buffer, buf, res_len );
  1411. if (relayed_msg==FAKED_REPLY) { /* to-tags for local replies */
  1412. update_local_tags(t, &bm, uas_rb->buffer, buf);
  1413. t_stats_replied_locally();
  1414. }
  1415. /* update the status ... */
  1416. t->uas.status = relayed_code;
  1417. t->relayed_reply_branch = relay;
  1418. if ( unlikely(is_invite(t) && relayed_msg!=FAKED_REPLY
  1419. && relayed_code>=200 && relayed_code < 300
  1420. && has_tran_tmcbs( t,
  1421. TMCB_RESPONSE_OUT|TMCB_E2EACK_IN|TMCB_E2EACK_RETR_IN))) {
  1422. totag_retr=update_totag_set(t, relayed_msg);
  1423. }
  1424. }; /* if relay ... */
  1425. UNLOCK_REPLIES( t );
  1426. /* send it now (from the private buffer) */
  1427. if (relay >= 0) {
  1428. /* Set retransmission timer before the reply is sent out to avoid
  1429. * race conditions
  1430. *
  1431. * Call start_final_repl_retr/put_on_wait() only if we really send out
  1432. * the reply. It can happen that the reply has been already sent from
  1433. * failure_route or from a callback and the timer has been already
  1434. * started. (Miklos)
  1435. */
  1436. if (reply_status == RPS_COMPLETED) {
  1437. start_final_repl_retr(t);
  1438. }
  1439. if (SEND_PR_BUFFER( uas_rb, buf, res_len )>=0){
  1440. if (unlikely(!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_OUT))){
  1441. run_trans_callbacks( TMCB_RESPONSE_OUT, t, t->uas.request,
  1442. relayed_msg, relayed_code);
  1443. }
  1444. #ifdef TMCB_ONSEND
  1445. if (unlikely(has_tran_tmcbs(t, TMCB_RESPONSE_SENT))){
  1446. INIT_TMCB_ONSEND_PARAMS(onsend_params, t->uas.request,
  1447. relayed_msg, uas_rb, &uas_rb->dst, buf,
  1448. res_len,
  1449. (relayed_msg==FAKED_REPLY)?TMCB_LOCAL_F:0,
  1450. uas_rb->branch, relayed_code);
  1451. run_onsend_callbacks2(TMCB_RESPONSE_SENT, t, &onsend_params);
  1452. }
  1453. #endif
  1454. }
  1455. /* Call put_on_wait() only if we really send out
  1456. * the reply. It can happen that the reply has been already sent from
  1457. * failure_route or from a callback and the timer has been already
  1458. * started. (Miklos)
  1459. *
  1460. * put_on_wait() should always be called after we finished dealling
  1461. * with t, because otherwise the wait timer might fire before we
  1462. * finish with t, and by the time we want to use t it could
  1463. * be already deleted. This could happen only if this function is
  1464. * called from timer (fr_timer) (the timer doesn't refcnt) and the
  1465. * timer allows quick dels (timer_allow_del()). --andrei
  1466. */
  1467. if (do_put_on_wait && (reply_status == RPS_COMPLETED)) {
  1468. put_on_wait(t);
  1469. }
  1470. pkg_free( buf );
  1471. }
  1472. /* success */
  1473. return reply_status;
  1474. error03:
  1475. pkg_free( buf );
  1476. error02:
  1477. if (save_clone) {
  1478. if (t->uac[branch].reply!=FAKED_REPLY)
  1479. sip_msg_free( t->uac[branch].reply );
  1480. t->uac[branch].reply = NULL;
  1481. }
  1482. error01:
  1483. t_reply_unsafe( t, t->uas.request, 500, "Reply processing error" );
  1484. *cancel_bitmap=0; /* t_reply_unsafe already canceled everything needed */
  1485. UNLOCK_REPLIES(t);
  1486. /* if (is_invite(t)) cancel_uacs( t, *cancel_bitmap, 0);
  1487. * -- not needed, t_reply_unsafe took care of this */
  1488. /* a serious error occurred -- attempt to send an error reply;
  1489. it will take care of clean-ups */
  1490. /* failure */
  1491. return RPS_ERROR;
  1492. }
  1493. /* this is the "UAC" above transaction layer; if a final reply
  1494. is received, it triggers a callback; note well -- it assumes
  1495. it is entered locked with REPLY_LOCK and it returns unlocked!
  1496. */
  1497. enum rps local_reply( struct cell *t, struct sip_msg *p_msg, int branch,
  1498. unsigned int msg_status, branch_bm_t *cancel_bitmap)
  1499. {
  1500. /* how to deal with replies for local transaction */
  1501. int local_store, local_winner;
  1502. enum rps reply_status;
  1503. struct sip_msg *winning_msg;
  1504. int winning_code;
  1505. int totag_retr;
  1506. /* branch_bm_t cancel_bitmap; */
  1507. /* keep warning 'var might be used un-inited' silent */
  1508. winning_msg=0;
  1509. winning_code=0;
  1510. totag_retr=0;
  1511. *cancel_bitmap=0;
  1512. reply_status=t_should_relay_response( t, msg_status, branch,
  1513. &local_store, &local_winner, cancel_bitmap, p_msg );
  1514. DBG("DEBUG: local_reply: branch=%d, save=%d, winner=%d\n",
  1515. branch, local_store, local_winner );
  1516. if (local_store) {
  1517. if (!store_reply(t, branch, p_msg))
  1518. goto error;
  1519. }
  1520. if (local_winner>=0) {
  1521. winning_msg= branch==local_winner
  1522. ? p_msg : t->uac[local_winner].reply;
  1523. if (winning_msg==FAKED_REPLY) {
  1524. t_stats_replied_locally();
  1525. winning_code = branch==local_winner
  1526. ? msg_status : t->uac[local_winner].last_received;
  1527. } else {
  1528. winning_code=winning_msg->REPLY_STATUS;
  1529. }
  1530. t->uas.status = winning_code;
  1531. update_reply_stats( winning_code );
  1532. if (unlikely(is_invite(t) && winning_msg!=FAKED_REPLY &&
  1533. winning_code>=200 && winning_code <300 &&
  1534. has_tran_tmcbs(t, TMCB_LOCAL_COMPLETED) )) {
  1535. totag_retr=update_totag_set(t, winning_msg);
  1536. }
  1537. }
  1538. UNLOCK_REPLIES(t);
  1539. if (local_winner >= 0
  1540. && cfg_get(tm, tm_cfg, pass_provisional_replies)
  1541. && winning_code < 200) {
  1542. /* no retr. detection for provisional replies &
  1543. * TMCB_LOCAL_RESPONSE_OUT */
  1544. if (unlikely(has_tran_tmcbs(t, TMCB_LOCAL_RESPONSE_OUT) )) {
  1545. run_trans_callbacks( TMCB_LOCAL_RESPONSE_OUT, t, 0,
  1546. winning_msg, winning_code);
  1547. }
  1548. }
  1549. if (local_winner>=0 && winning_code>=200 ) {
  1550. DBG("DEBUG: local transaction completed\n");
  1551. if (!totag_retr) {
  1552. if (unlikely(has_tran_tmcbs(t,TMCB_LOCAL_COMPLETED) ))
  1553. run_trans_callbacks( TMCB_LOCAL_COMPLETED, t, 0,
  1554. winning_msg, winning_code );
  1555. }
  1556. }
  1557. return reply_status;
  1558. error:
  1559. which_cancel(t, cancel_bitmap);
  1560. UNLOCK_REPLIES(t);
  1561. cleanup_uac_timers(t);
  1562. if ( get_cseq(p_msg)->method.len==INVITE_LEN
  1563. && memcmp( get_cseq(p_msg)->method.s, INVITE, INVITE_LEN)==0)
  1564. cancel_uacs( t, *cancel_bitmap, F_CANCEL_B_KILL);
  1565. *cancel_bitmap=0; /* we've already took care of everything */
  1566. put_on_wait(t);
  1567. return RPS_ERROR;
  1568. }
  1569. /* This function is called whenever a reply for our module is received;
  1570. * we need to register this function on module initialization;
  1571. * Returns : 0 - core router stops
  1572. * 1 - core router relay statelessly
  1573. */
  1574. int reply_received( struct sip_msg *p_msg )
  1575. {
  1576. int msg_status;
  1577. int last_uac_status;
  1578. char *ack;
  1579. unsigned int ack_len;
  1580. int branch;
  1581. /* has the transaction completed now and we need to clean-up? */
  1582. int reply_status;
  1583. branch_bm_t cancel_bitmap;
  1584. struct ua_client *uac;
  1585. struct cell *t;
  1586. struct dest_info lack_dst;
  1587. avp_list_t* backup_user_from, *backup_user_to;
  1588. avp_list_t* backup_domain_from, *backup_domain_to;
  1589. avp_list_t* backup_uri_from, *backup_uri_to;
  1590. struct run_act_ctx ra_ctx;
  1591. #ifdef USE_DNS_FAILOVER
  1592. int branch_ret;
  1593. int prev_branch;
  1594. #endif
  1595. #ifdef USE_DST_BLACKLIST
  1596. int blst_503_timeout;
  1597. struct dest_info src;
  1598. struct hdr_field* hf;
  1599. #endif
  1600. #ifdef TMCB_ONSEND
  1601. struct tmcb_params onsend_params;
  1602. #endif
  1603. /* make sure we know the associated transaction ... */
  1604. if (t_check( p_msg , &branch )==-1)
  1605. goto trans_not_found;
  1606. /*... if there is none, tell the core router to fwd statelessly */
  1607. t=get_t();
  1608. if ( (t==0)||(t==T_UNDEFINED))
  1609. goto trans_not_found;
  1610. cancel_bitmap=0;
  1611. msg_status=p_msg->REPLY_STATUS;
  1612. uac=&t->uac[branch];
  1613. DBG("DEBUG: reply_received: org. status uas=%d, "
  1614. "uac[%d]=%d local=%d is_invite=%d)\n",
  1615. t->uas.status, branch, uac->last_received,
  1616. is_local(t), is_invite(t));
  1617. last_uac_status=uac->last_received;
  1618. /* it's a cancel ... ? */
  1619. if (get_cseq(p_msg)->method.len==CANCEL_LEN
  1620. && memcmp( get_cseq(p_msg)->method.s, CANCEL, CANCEL_LEN)==0
  1621. /* .. which is not e2e ? ... */
  1622. && is_invite(t) ) {
  1623. /* ... then just stop timers */
  1624. if ( msg_status >= 200 )
  1625. stop_rb_timers(&uac->local_cancel); /* stop retr & fr */
  1626. else
  1627. stop_rb_retr(&uac->local_cancel); /* stop only retr */
  1628. DBG("DEBUG: reply to local CANCEL processed\n");
  1629. goto done;
  1630. }
  1631. if ( msg_status >= 200 ){
  1632. /* stop final response timer & retr. only if I got a final response */
  1633. stop_rb_timers(&uac->request);
  1634. /* acknowledge negative INVITE replies (do it before detailed
  1635. * on_reply processing, which may take very long, like if it
  1636. * is attempted to establish a TCP connection to a fail-over dst */
  1637. if (is_invite(t)) {
  1638. if (msg_status >= 300) {
  1639. ack = build_ack(p_msg, t, branch, &ack_len);
  1640. if (ack) {
  1641. #ifdef TMCB_ONSEND
  1642. if (SEND_PR_BUFFER(&uac->request, ack, ack_len)>=0)
  1643. if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT))){
  1644. INIT_TMCB_ONSEND_PARAMS(onsend_params,
  1645. t->uas.request, p_msg, &uac->request,
  1646. &uac->request.dst, ack, ack_len,
  1647. TMCB_LOCAL_F, branch, TYPE_LOCAL_ACK);
  1648. run_onsend_callbacks2(TMCB_REQUEST_SENT, t,
  1649. &onsend_params);
  1650. }
  1651. #else
  1652. SEND_PR_BUFFER(&uac->request, ack, ack_len);
  1653. #endif
  1654. shm_free(ack);
  1655. }
  1656. } else if (is_local(t) /*&& 200 <= msg_status < 300*/) {
  1657. ack = build_local_ack(p_msg, t, branch, &ack_len, &lack_dst);
  1658. if (ack) {
  1659. if (msg_send(&lack_dst, ack, ack_len)<0)
  1660. LOG(L_ERR, "Error while sending local ACK\n");
  1661. #ifdef TMCB_ONSEND
  1662. else if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT))){
  1663. INIT_TMCB_ONSEND_PARAMS(onsend_params,
  1664. t->uas.request, p_msg, &uac->request,
  1665. &lack_dst, ack, ack_len, TMCB_LOCAL_F,
  1666. branch, TYPE_LOCAL_ACK);
  1667. run_onsend_callbacks2(TMCB_REQUEST_SENT, t,
  1668. &onsend_params);
  1669. }
  1670. #endif
  1671. #ifndef WITH_AS_SUPPORT
  1672. shm_free(ack);
  1673. #endif
  1674. }
  1675. }
  1676. }
  1677. }else{
  1678. /* if branch already canceled re-transmit or generate cancel
  1679. * TODO: check if it really makes sense to do it for non-invites too */
  1680. if (uac->request.flags & F_RB_CANCELED){
  1681. if (uac->local_cancel.buffer_len){
  1682. membar_read(); /* make sure we get the current value of
  1683. local_cancel */
  1684. /* re-transmit if cancel already built */
  1685. DBG("tm: reply_received: branch CANCEL retransmit\n");
  1686. #ifdef TMCB_ONSEND
  1687. if (SEND_BUFFER( &uac->local_cancel)>=0){
  1688. if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
  1689. run_onsend_callbacks(TMCB_REQUEST_SENT,
  1690. &uac->local_cancel,
  1691. 0, 0, TMCB_LOCAL_F);
  1692. }
  1693. #else
  1694. SEND_BUFFER( &uac->local_cancel );
  1695. #endif
  1696. /* retrs. should be already started so do nothing */
  1697. }else if (atomic_cmpxchg_long((void*)&uac->local_cancel.buffer, 0,
  1698. (long)BUSY_BUFFER)==0){
  1699. /* try to rebuild it if empty (not set or marked as BUSY).
  1700. * if BUSY or set just exit, a cancel will be (or was) sent
  1701. * shortly on this branch */
  1702. DBG("tm: reply_received: branch CANCEL created\n");
  1703. cancel_branch(t, branch, F_CANCEL_B_FORCE_C);
  1704. }
  1705. goto done; /* nothing to do */
  1706. }
  1707. if (is_invite(t)){
  1708. /* stop only retr. (and not fr) */
  1709. stop_rb_retr(&uac->request);
  1710. }else{
  1711. /* non-invite: increase retransmissions interval (slow now) */
  1712. switch_rb_retr_to_t2(&uac->request);
  1713. }
  1714. }
  1715. /* processing of on_reply block */
  1716. if (t->on_reply) {
  1717. rmode=MODE_ONREPLY;
  1718. /* transfer transaction flag to message context */
  1719. if (t->uas.request) p_msg->flags=t->uas.request->flags;
  1720. /* set the as avp_list the one from transaction */
  1721. backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
  1722. backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
  1723. backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
  1724. backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
  1725. backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
  1726. backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
  1727. reset_static_buffer();
  1728. init_run_actions_ctx(&ra_ctx);
  1729. if (run_actions(&ra_ctx, onreply_rt.rlist[t->on_reply], p_msg)<0)
  1730. LOG(L_ERR, "ERROR: on_reply processing failed\n");
  1731. /* transfer current message context back to t */
  1732. if (t->uas.request) t->uas.request->flags=p_msg->flags;
  1733. /* restore original avp list */
  1734. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from );
  1735. set_avp_list( AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to );
  1736. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from );
  1737. set_avp_list( AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to );
  1738. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from );
  1739. set_avp_list( AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to );
  1740. }
  1741. #ifdef USE_DST_BLACKLIST
  1742. /* add temporary to the blacklist the source of a 503 reply */
  1743. if (cfg_get(tm, tm_cfg, tm_blst_503)
  1744. && cfg_get(core, core_cfg, use_dst_blacklist)
  1745. && (msg_status==503)
  1746. ){
  1747. blst_503_timeout=cfg_get(tm, tm_cfg, tm_blst_503_default);
  1748. if ((parse_headers(p_msg, HDR_RETRY_AFTER_F, 0)==0) &&
  1749. (p_msg->parsed_flag & HDR_RETRY_AFTER_F)){
  1750. for (hf=p_msg->headers; hf; hf=hf->next)
  1751. if (hf->type==HDR_RETRY_AFTER_T){
  1752. /* found */
  1753. blst_503_timeout=(unsigned)(unsigned long)hf->parsed;
  1754. blst_503_timeout=MAX_unsigned(blst_503_timeout,
  1755. cfg_get(tm, tm_cfg, tm_blst_503_min));
  1756. blst_503_timeout=MIN_unsigned(blst_503_timeout,
  1757. cfg_get(tm, tm_cfg, tm_blst_503_max));
  1758. break;
  1759. }
  1760. }
  1761. if (blst_503_timeout){
  1762. src.send_sock=0;
  1763. src.to=p_msg->rcv.src_su;
  1764. src.id=p_msg->rcv.proto_reserved1;
  1765. src.proto=p_msg->rcv.proto;
  1766. dst_blacklist_add_to(BLST_503, &src, p_msg,
  1767. S_TO_TICKS(blst_503_timeout));
  1768. }
  1769. }
  1770. #endif /* USE_DST_BLACKLIST */
  1771. #ifdef USE_DNS_FAILOVER
  1772. /* if this is a 503 reply, and the destination resolves to more ips,
  1773. * add another branch/uac.
  1774. * This code is out of LOCK_REPLIES() to minimize the time the
  1775. * reply lock is held (the lock won't be held while sending the
  1776. * message)*/
  1777. if (cfg_get(core, core_cfg, use_dns_failover) && (msg_status==503)) {
  1778. branch_ret=add_uac_dns_fallback(t, t->uas.request, uac, 1);
  1779. prev_branch=-1;
  1780. while((branch_ret>=0) &&(branch_ret!=prev_branch)){
  1781. prev_branch=branch_ret;
  1782. branch_ret=t_send_branch(t, branch_ret, t->uas.request , 0, 1);
  1783. }
  1784. }
  1785. #endif
  1786. LOCK_REPLIES( t );
  1787. if ( is_local(t) ) {
  1788. reply_status=local_reply( t, p_msg, branch, msg_status, &cancel_bitmap );
  1789. if (reply_status == RPS_COMPLETED) {
  1790. /* no more UAC FR/RETR (if I received a 2xx, there may
  1791. * be still pending branches ...
  1792. */
  1793. cleanup_uac_timers( t );
  1794. if (is_invite(t)) cancel_uacs(t, cancel_bitmap, F_CANCEL_B_KILL);
  1795. /* There is no need to call set_final_timer because we know
  1796. * that the transaction is local */
  1797. put_on_wait(t);
  1798. }else if (cancel_bitmap){
  1799. /* cancel everything, even non-INVITEs (e.g in case of 6xx), use
  1800. * cancel_b_method for canceling unreplied branches */
  1801. cancel_uacs(t, cancel_bitmap, cfg_get(tm,tm_cfg, cancel_b_flags));
  1802. }
  1803. } else {
  1804. reply_status=relay_reply( t, p_msg, branch, msg_status,
  1805. &cancel_bitmap, 1 );
  1806. if (reply_status == RPS_COMPLETED) {
  1807. /* no more UAC FR/RETR (if I received a 2xx, there may
  1808. be still pending branches ...
  1809. */
  1810. cleanup_uac_timers( t );
  1811. /* 2xx is a special case: we can have a COMPLETED request
  1812. * with branches still open => we have to cancel them */
  1813. if (is_invite(t) && cancel_bitmap)
  1814. cancel_uacs( t, cancel_bitmap, F_CANCEL_B_KILL);
  1815. /* FR for negative INVITES, WAIT anything else */
  1816. /* Call to set_final_timer is embedded in relay_reply to avoid
  1817. * race conditions when reply is sent out and an ACK to stop
  1818. * retransmissions comes before retransmission timer is set.*/
  1819. }else if (cancel_bitmap){
  1820. /* cancel everything, even non-INVITEs (e.g in case of 6xx), use
  1821. * cancel_b_method for canceling unreplied branches */
  1822. cancel_uacs(t, cancel_bitmap, cfg_get(tm,tm_cfg, cancel_b_flags));
  1823. }
  1824. }
  1825. uac->request.flags|=F_RB_REPLIED;
  1826. if (reply_status==RPS_ERROR)
  1827. goto done;
  1828. /* update FR/RETR timers on provisional replies */
  1829. if (is_invite(t) && msg_status<200 &&
  1830. ( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) ||
  1831. ( (last_uac_status<msg_status) &&
  1832. ((msg_status>=180) || (last_uac_status==0)) )
  1833. ) ) { /* provisional now */
  1834. restart_rb_fr(& uac->request, t->fr_inv_timeout);
  1835. uac->request.flags|=F_RB_FR_INV; /* mark fr_inv */
  1836. } /* provisional replies */
  1837. done:
  1838. /* we are done with the transaction, so unref it - the reference
  1839. * was incremented by t_check() function -bogdan*/
  1840. t_unref(p_msg);
  1841. /* don't try to relay statelessly neither on success
  1842. (we forwarded statefully) nor on error; on troubles,
  1843. simply do nothing; that will make the other party to
  1844. retransmit; hopefuly, we'll then be better off */
  1845. return 0;
  1846. trans_not_found:
  1847. /* transaction context was not found */
  1848. if (goto_on_sl_reply) {
  1849. /* the script writer has a chance to decide whether to
  1850. forward the reply or not */
  1851. init_run_actions_ctx(&ra_ctx);
  1852. return run_actions(&ra_ctx, onreply_rt.rlist[goto_on_sl_reply], p_msg);
  1853. } else {
  1854. /* let the core forward the reply */
  1855. return 1;
  1856. }
  1857. }
  1858. int t_reply_with_body( struct cell *trans, unsigned int code,
  1859. char * text, char * body, char * new_header, char * to_tag )
  1860. {
  1861. struct lump_rpl *hdr_lump;
  1862. struct lump_rpl *body_lump;
  1863. str s_to_tag;
  1864. str rpl;
  1865. int ret;
  1866. struct bookmark bm;
  1867. s_to_tag.s = to_tag;
  1868. if(to_tag)
  1869. s_to_tag.len = strlen(to_tag);
  1870. else
  1871. s_to_tag.len = 0;
  1872. /* mark the transaction as replied */
  1873. if (code>=200) set_kr(REQ_RPLD);
  1874. /* add the lumps for new_header and for body (by bogdan) */
  1875. if (new_header && strlen(new_header)) {
  1876. hdr_lump = add_lump_rpl( trans->uas.request, new_header,
  1877. strlen(new_header), LUMP_RPL_HDR );
  1878. if ( !hdr_lump ) {
  1879. LOG(L_ERR,"ERROR:tm:t_reply_with_body: cannot add hdr lump\n");
  1880. goto error;
  1881. }
  1882. } else {
  1883. hdr_lump = 0;
  1884. }
  1885. /* body lump */
  1886. if(body && strlen(body)) {
  1887. body_lump = add_lump_rpl( trans->uas.request, body, strlen(body),
  1888. LUMP_RPL_BODY );
  1889. if (body_lump==0) {
  1890. LOG(L_ERR,"ERROR:tm:t_reply_with_body: cannot add body lump\n");
  1891. goto error_1;
  1892. }
  1893. } else {
  1894. body_lump = 0;
  1895. }
  1896. rpl.s = build_res_buf_from_sip_req(
  1897. code, text, &s_to_tag,
  1898. trans->uas.request, (unsigned int*)&rpl.len, &bm);
  1899. /* since the msg (trans->uas.request) is a clone into shm memory, to avoid
  1900. * memory leak or crashing (lumps are create in private memory) I will
  1901. * remove the lumps by myself here (bogdan) */
  1902. if ( hdr_lump ) {
  1903. unlink_lump_rpl( trans->uas.request, hdr_lump);
  1904. free_lump_rpl( hdr_lump );
  1905. }
  1906. if( body_lump ) {
  1907. unlink_lump_rpl( trans->uas.request, body_lump);
  1908. free_lump_rpl( body_lump );
  1909. }
  1910. if (rpl.s==0) {
  1911. LOG(L_ERR,"ERROR:tm:t_reply_with_body: failed in doing "
  1912. "build_res_buf_from_sip_req()\n");
  1913. goto error;
  1914. }
  1915. DBG("t_reply_with_body: buffer computed\n");
  1916. // frees 'res.s' ... no panic !
  1917. ret=_reply_light( trans, rpl.s, rpl.len, code, text,
  1918. s_to_tag.s, s_to_tag.len, 1 /* lock replies */, &bm );
  1919. /* this is ugly hack -- the function caller may wish to continue with
  1920. * transaction and I unref; however, there is now only one use from
  1921. * vm/fifo_vm_reply and I'm currently to lazy to export UNREF; -jiri
  1922. */
  1923. UNREF(trans);
  1924. return ret;
  1925. error_1:
  1926. if ( hdr_lump ) {
  1927. unlink_lump_rpl( trans->uas.request, hdr_lump);
  1928. free_lump_rpl( hdr_lump );
  1929. }
  1930. error:
  1931. return -1;
  1932. }
  1933. /* drops all the replies to make sure
  1934. * that none of them is picked up again
  1935. */
  1936. void t_drop_replies(void)
  1937. {
  1938. /* It is too risky to free the replies that are in shm mem
  1939. at the middle of failure_route block, because other functions might
  1940. need them as well. And it can also happen that the current reply is not yet
  1941. in shm mem, we are just going to clone it. So better to set a flag
  1942. and check it after failure_route has ended. (Miklos) */
  1943. drop_replies = 1;
  1944. }
  1945. #if 0
  1946. static int send_reply(struct cell *trans, unsigned int code, str* text, str* body, str* headers, str* to_tag)
  1947. {
  1948. struct lump_rpl *hdr_lump, *body_lump;
  1949. str rpl;
  1950. int ret;
  1951. struct bookmark bm;
  1952. /* mark the transaction as replied */
  1953. if (code >= 200) set_kr(REQ_RPLD);
  1954. /* add the lumps for new_header and for body (by bogdan) */
  1955. if (headers && headers->len) {
  1956. hdr_lump = add_lump_rpl(trans->uas.request, headers->s, headers->len, LUMP_RPL_HDR);
  1957. if (!hdr_lump) {
  1958. LOG(L_ERR, "send_reply: cannot add hdr lump\n");
  1959. goto sr_error;
  1960. }
  1961. } else {
  1962. hdr_lump = 0;
  1963. }
  1964. /* body lump */
  1965. if (body && body->len) {
  1966. body_lump = add_lump_rpl(trans->uas.request, body->s, body->len, LUMP_RPL_BODY);
  1967. if (body_lump == 0) {
  1968. LOG(L_ERR,"send_reply: cannot add body lump\n");
  1969. goto sr_error_1;
  1970. }
  1971. } else {
  1972. body_lump = 0;
  1973. }
  1974. /* We can safely zero-terminate the text here, because it is followed
  1975. * by next line in the received message
  1976. */
  1977. text->s[text->len] = '\0';
  1978. rpl.s = build_res_buf_from_sip_req(code, text->s, to_tag, trans->uas.request, (unsigned int*)&rpl.len, &bm);
  1979. /* since the msg (trans->uas.request) is a clone into shm memory, to avoid
  1980. * memory leak or crashing (lumps are create in private memory) I will
  1981. * remove the lumps by myself here (bogdan) */
  1982. if (hdr_lump) {
  1983. unlink_lump_rpl(trans->uas.request, hdr_lump);
  1984. free_lump_rpl(hdr_lump);
  1985. }
  1986. if (body_lump) {
  1987. unlink_lump_rpl(trans->uas.request, body_lump);
  1988. free_lump_rpl(body_lump);
  1989. }
  1990. if (rpl.s == 0) {
  1991. LOG(L_ERR,"send_reply: failed in build_res_buf_from_sip_req\n");
  1992. goto sr_error;
  1993. }
  1994. ret = _reply_light(trans, rpl.s, rpl.len, code, text->s, to_tag->s, to_tag->len, 1 /* lock replies */, &bm);
  1995. /* this is ugly hack -- the function caller may wish to continue with
  1996. * transaction and I unref; however, there is now only one use from
  1997. * vm/fifo_vm_reply and I'm currently to lazy to export UNREF; -jiri
  1998. */
  1999. UNREF(trans);
  2000. return ret;
  2001. sr_error_1:
  2002. if (hdr_lump) {
  2003. unlink_lump_rpl(trans->uas.request, hdr_lump);
  2004. free_lump_rpl(hdr_lump);
  2005. }
  2006. sr_error:
  2007. return -1;
  2008. }
  2009. #endif
  2010. const char* rpc_reply_doc[2] = {
  2011. "Reply transaction",
  2012. 0
  2013. };
  2014. /*
  2015. Syntax:
  2016. ":tm.reply:[response file]\n
  2017. code\n
  2018. reason\n
  2019. trans_id\n
  2020. to_tag\n
  2021. [new headers]\n
  2022. \n
  2023. [Body]\n
  2024. .\n
  2025. \n"
  2026. */
  2027. void rpc_reply(rpc_t* rpc, void* c)
  2028. {
  2029. int ret;
  2030. struct cell *trans;
  2031. unsigned int hash_index, label, code;
  2032. str ti;
  2033. char* reason, *body, *headers, *tag;
  2034. if (rpc->scan(c, "d", &code) < 1) {
  2035. rpc->fault(c, 400, "Reply code expected");
  2036. return;
  2037. }
  2038. if (rpc->scan(c, "s", &reason) < 1) {
  2039. rpc->fault(c, 400, "Reason phrase expected");
  2040. return;
  2041. }
  2042. if (rpc->scan(c, "s", &ti.s) < 1) {
  2043. rpc->fault(c, 400, "Transaction ID expected");
  2044. return;
  2045. }
  2046. ti.len = strlen(ti.s);
  2047. if (rpc->scan(c, "s", &tag) < 1) {
  2048. rpc->fault(c, 400, "To tag expected");
  2049. return;
  2050. }
  2051. if (rpc->scan(c, "s", &headers) < 0) return;
  2052. if (rpc->scan(c, "s", &body) < 0) return;
  2053. if(sscanf(ti.s,"%u:%u", &hash_index, &label) != 2) {
  2054. ERR("Invalid trans_id (%s)\n", ti.s);
  2055. rpc->fault(c, 400, "Invalid transaction ID");
  2056. return;
  2057. }
  2058. DBG("hash_index=%u label=%u\n", hash_index, label);
  2059. if( t_lookup_ident(&trans, hash_index, label) < 0 ) {
  2060. ERR("Lookup failed\n");
  2061. rpc->fault(c, 481, "No such transaction");
  2062. return;
  2063. }
  2064. /* it's refcounted now, t_reply_with body unrefs for me -- I can
  2065. * continue but may not use T anymore */
  2066. ret = t_reply_with_body(trans, code, reason, body, headers, tag);
  2067. if (ret < 0) {
  2068. ERR("Reply failed\n");
  2069. rpc->fault(c, 500, "Reply failed");
  2070. return;
  2071. }
  2072. }