t_reply.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507
  1. /*
  2. * $Id$
  3. *
  4. *
  5. * Copyright (C) 2001-2003 FhG Fokus
  6. *
  7. * This file is part of ser, a free SIP server.
  8. *
  9. * ser is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version
  13. *
  14. * For a license to use the ser software under conditions
  15. * other than those described here, or to purchase support for this
  16. * software, please contact iptel.org by e-mail at the following addresses:
  17. * [email protected]
  18. *
  19. * ser is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  27. *
  28. * History:
  29. * --------
  30. * 2003-01-19 faked lump list created in on_reply handlers
  31. * 2003-01-27 next baby-step to removing ZT - PRESERVE_ZT (jiri)
  32. * 2003-02-13 updated to use rb->dst (andrei)
  33. * 2003-02-18 replaced TOTAG_LEN w/ TOTAG_VALUE_LEN (TOTAG_LEN was defined
  34. * twice with different values!) (andrei)
  35. * 2003-02-28 scratchpad compatibility abandoned (jiri)
  36. * 2003-03-01 kr set through a function now (jiri)
  37. * 2003-03-06 saving of to-tags for ACK/200 matching introduced,
  38. * voicemail changes accepted, updated to new callback
  39. * names (jiri)
  40. * 2003-03-10 fixed new to tag bug/typo (if w/o {}) (andrei)
  41. * 2003-03-16 removed _TOTAG (jiri)
  42. * 2003-03-31 200 for INVITE/UAS resent even for UDP (jiri)
  43. * 2003-03-31 removed msg->repl_add_rm (andrei)
  44. * 2003-04-05 s/reply_route/failure_route, onreply_route introduced (jiri)
  45. * 2003-04-14 local acks generated before reply processing to avoid
  46. * delays in length reply processing (like opening TCP
  47. * connection to an unavailable destination) (jiri)
  48. * 2003-09-11 updates to new build_res_buf_from_sip_req() interface (bogdan)
  49. * 2003-09-11 t_reply_with_body() reshaped to use reply_lumps +
  50. * build_res_buf_from_sip_req() instead of
  51. * build_res_buf_with_body_from_sip_req() (bogdan)
  52. * 2003-11-05 flag context updated from failure/reply handlers back
  53. * to transaction context (jiri)
  54. * 2003-11-11: build_lump_rpl() removed, add_lump_rpl() has flags (bogdan)
  55. * 2003-12-04 global TM callbacks switched to per transaction callbacks
  56. * (bogdan)
  57. * 2004-02-06: support for user pref. added - destroy_avps (bogdan)
  58. * 2003-11-05 flag context updated from failure/reply handlers back
  59. * to transaction context (jiri)
  60. * 2003-11-11: build_lump_rpl() removed, add_lump_rpl() has flags (bogdan)
  61. * 2004-02-13: t->is_invite and t->local replaced with flags (bogdan)
  62. * 2004-02-18 fifo_t_reply imported from vm module (bogdan)
  63. * 2004-08-23 avp list is available from failure/on_reply routes (bogdan)
  64. * 2004-10-01 added a new param.: restart_fr_on_each_reply (andrei)
  65. * 2005-03-01 force for statefull replies the incoming interface of
  66. * the request (bogdan)
  67. * 2005-09-01 reverted to the old way of checking response.dst.send_sock
  68. * in t_retransmit_reply & reply_light (andrei)
  69. * 2005-11-09 updated to the new timers interface (andrei)
  70. * 2006-02-07 named routes support (andrei)
  71. * 2006-09-13 t_pick_branch will skip also over branches with empty reply
  72. * t_should_relay_response will re-pick the branch if failure
  73. * route /handlers added new branches (andrei)
  74. * 2006-10-05 better final reply selection: t_pick_branch will prefer 6xx,
  75. * if no 6xx reply => lowest class/code; if class==4xx =>
  76. * prefer 401, 407, 415, 420 and 484 (andrei)
  77. * 2006-10-12 dns failover when a 503 is received
  78. * replace a 503 final relayed reply by a 500 (andrei)
  79. * 2006-10-16 aggregate all the authorization headers/challenges when
  80. * the final response is 401 or 407 (andrei)
  81. * 2007-03-08 membar_write() used in update_totag_set(...)(andrei)
  82. * 2007-03-15 build_local_ack: removed next_hop and replaced with dst to
  83. * avoid resolving next_hop twice
  84. * added TMCB_ONSEND callbacks support for replies & ACKs (andrei)
  85. * 2007-05-28: build_ack() constructs the ACK from the
  86. * outgoing INVITE instead of the incomming one.
  87. * (it can be disabled with reparse_invite=0) (Miklos)
  88. * 2007-09-03: drop_replies() has been introduced (Miklos)
  89. * 2008-03-12 use cancel_b_method on 6xx (andrei)
  90. * 2008-05-30 make sure the wait timer is started after we don't need t
  91. * anymore to allow safe calls from fr_timer (andrei)
  92. * 2009-06-01 Pre- and post-script callbacks of branch route are
  93. * executed (Miklos)
  94. * 2009-12-10 reply route is executed under lock to protect the avps (andrei)
  95. * 2010-02-22 _reply() will cleanup any reply lumps that it might have added
  96. * (andrei)
  97. * 2010-02-26 added experimental support for final reply dropping, not
  98. * enabled by default (performance hit) (andrei)
  99. *
  100. */
  101. /* Defines:
  102. * TM_ONREPLY_FINAL_DROP_OK - allows dropping the final reply
  103. * from the tm onreply_routes, but comes with a small performance
  104. * hit (extra unlock()/lock() for each final reply when a onreply
  105. * route is set).
  106. */
  107. #ifdef EXTRA_DEBUG
  108. #include <assert.h>
  109. #endif
  110. #include "../../comp_defs.h"
  111. #include "../../hash_func.h"
  112. #include "../../dprint.h"
  113. #include "../../config.h"
  114. #include "../../parser/parser_f.h"
  115. #include "../../parser/parse_to.h"
  116. #include "../../ut.h"
  117. #include "../../timer.h"
  118. #include "../../error.h"
  119. #include "../../action.h"
  120. #include "../../script_cb.h"
  121. #include "../../dset.h"
  122. #include "../../tags.h"
  123. #include "../../route.h"
  124. #include "../../data_lump.h"
  125. #include "../../data_lump_rpl.h"
  126. #include "../../usr_avp.h"
  127. #ifdef WITH_XAVP
  128. #include "../../usr_avp.h"
  129. #endif
  130. #include "../../atomic_ops.h" /* membar_write() */
  131. #include "../../compiler_opt.h"
  132. #ifdef USE_DST_BLACKLIST
  133. #include "../../dst_blacklist.h"
  134. #endif
  135. #ifdef USE_DNS_FAILOVER
  136. #include "../../dns_cache.h"
  137. #include "../../cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
  138. #endif
  139. #include "defs.h"
  140. #include "config.h"
  141. #include "h_table.h"
  142. #include "t_hooks.h"
  143. #include "t_funcs.h"
  144. #include "t_reply.h"
  145. #include "t_cancel.h"
  146. #include "t_msgbuilder.h"
  147. #include "t_lookup.h"
  148. #include "t_fwd.h"
  149. #include "../../fix_lumps.h"
  150. #include "../../sr_compat.h"
  151. #include "t_stats.h"
  152. #include "uac.h"
  153. #ifdef NO_TM_ONREPLY_FINAL_DROP_OK
  154. #undef TM_ONREPLY_FINAL_DROP_OK
  155. #endif
  156. /* private place where we create to-tags for replies */
  157. /* janakj: made public, I need to access this value to store it in dialogs */
  158. char tm_tags[TOTAG_VALUE_LEN];
  159. /* bogdan: pack tm_tag buffer and len into a str to pass them to
  160. * build_res_buf_from_sip_req() */
  161. static str tm_tag = {tm_tags,TOTAG_VALUE_LEN};
  162. char *tm_tag_suffix;
  163. /* where to go if there is no positive reply */
  164. static int goto_on_negative=0;
  165. /* where to go on receipt of reply */
  166. static int goto_on_reply=0;
  167. /* where to go on receipt of reply without transaction context */
  168. int goto_on_sl_reply=0;
  169. /* responses priority (used by t_pick_branch)
  170. * 0xx is used only for the initial value (=> should have no chance to be
  171. * selected => the highest value); 1xx is not used */
  172. static unsigned short resp_class_prio[]={
  173. 32000, /* 0-99, special */
  174. 11000, /* 1xx, special, should never be used */
  175. 0, /* 2xx, high priority (not used, 2xx are immediately
  176. forwarded and t_pick_branch will never be called if
  177. a 2xx was received) */
  178. 3000, /* 3xx */
  179. 4000, /* 4xx */
  180. 5000, /* 5xx */
  181. 1000 /* 6xx, highest priority */
  182. };
  183. int t_get_reply_totag(struct sip_msg *msg, str *totag)
  184. {
  185. if(msg==NULL || totag==NULL) {
  186. return -1;
  187. }
  188. calc_crc_suffix(msg, tm_tag_suffix);
  189. *totag = tm_tag;
  190. return 1;
  191. }
  192. static int picked_branch = -1;
  193. /*! \brief returns the picked branch */
  194. int t_get_picked_branch(void)
  195. {
  196. return picked_branch;
  197. }
  198. /* we store the reply_route # in private memory which is
  199. then processed during t_relay; we cannot set this value
  200. before t_relay creates transaction context or after
  201. t_relay when a reply may arrive after we set this
  202. value; that's why we do it how we do it, i.e.,
  203. *inside* t_relay using hints stored in private memory
  204. before t_relay is called
  205. */
  206. void t_on_negative( unsigned int go_to )
  207. {
  208. struct cell *t = get_t();
  209. /* in REPLY_ROUTE and FAILURE_ROUTE T will be set to current transaction;
  210. * in REQUEST_ROUTE T will be set only if the transaction was already
  211. * created; if not -> use the static variable */
  212. if (!t || t==T_UNDEFINED )
  213. goto_on_negative=go_to;
  214. else
  215. get_t()->on_negative = go_to;
  216. }
  217. void t_on_reply( unsigned int go_to )
  218. {
  219. struct cell *t = get_t();
  220. /* in REPLY_ROUTE and FAILURE_ROUTE T will be set to current transaction;
  221. * in REQUEST_ROUTE T will be set only if the transaction was already
  222. * created; if not -> use the static variable */
  223. if (!t || t==T_UNDEFINED )
  224. goto_on_reply=go_to;
  225. else
  226. get_t()->on_reply = go_to;
  227. }
  228. unsigned int get_on_negative()
  229. {
  230. return goto_on_negative;
  231. }
  232. unsigned int get_on_reply()
  233. {
  234. return goto_on_reply;
  235. }
  236. void tm_init_tags()
  237. {
  238. init_tags(tm_tags, &tm_tag_suffix,
  239. "SER-TM/tags", TM_TAG_SEPARATOR );
  240. }
  241. /* returns 0 if the message was previously acknowledged
  242. * (i.e., no E2EACK callback is needed) and one if the
  243. * callback shall be executed */
  244. int unmatched_totag(struct cell *t, struct sip_msg *ack)
  245. {
  246. struct totag_elem *i;
  247. str *tag;
  248. if (parse_headers(ack, HDR_TO_F,0)==-1 ||
  249. !ack->to ) {
  250. LOG(L_ERR, "ERROR: unmatched_totag: To invalid\n");
  251. return 1;
  252. }
  253. tag=&get_to(ack)->tag_value;
  254. i=t->fwded_totags;
  255. while(i){
  256. membar_depends(); /* make sure we don't see some old i content
  257. (needed on CPUs like Alpha) */
  258. if (i->tag.len==tag->len
  259. && memcmp(i->tag.s, tag->s, tag->len)==0) {
  260. DBG("DEBUG: totag for e2e ACK found: %d\n", i->acked);
  261. /* mark totag as acked and return 1 if this was the first ack
  262. * and 0 otherwise */
  263. return (atomic_get_and_set_int(&i->acked, 1)==0);
  264. }
  265. i=i->next;
  266. }
  267. /* surprising: to-tag never sighted before */
  268. return 1;
  269. }
  270. static inline void update_local_tags(struct cell *trans,
  271. struct bookmark *bm, char *dst_buffer,
  272. char *src_buffer /* to which bm refers */)
  273. {
  274. if (bm->to_tag_val.s) {
  275. trans->uas.local_totag.s=bm->to_tag_val.s-src_buffer+dst_buffer;
  276. trans->uas.local_totag.len=bm->to_tag_val.len;
  277. }
  278. }
  279. /* append a newly received tag from a 200/INVITE to
  280. * transaction's set; (only safe if called from within
  281. * a REPLY_LOCK); it returns 1 if such a to tag already
  282. * exists
  283. */
  284. inline static int update_totag_set(struct cell *t, struct sip_msg *ok)
  285. {
  286. struct totag_elem *i, *n;
  287. str *tag;
  288. char *s;
  289. if (!ok->to || !ok->to->parsed) {
  290. LOG(L_ERR, "ERROR: update_totag_set: to not parsed\n");
  291. return 0;
  292. }
  293. tag=&get_to(ok)->tag_value;
  294. if (!tag->s) {
  295. DBG("ERROR: update_totag_set: no tag in to\n");
  296. return 0;
  297. }
  298. for (i=t->fwded_totags; i; i=i->next) {
  299. if (i->tag.len==tag->len
  300. && memcmp(i->tag.s, tag->s, tag->len) ==0 ){
  301. /* to tag already recorded */
  302. #ifdef XL_DEBUG
  303. LOG(L_CRIT, "DEBUG: update_totag_set: totag retransmission\n");
  304. #else
  305. DBG("DEBUG: update_totag_set: totag retransmission\n");
  306. #endif
  307. return 1;
  308. }
  309. }
  310. /* that's a new to-tag -- record it */
  311. shm_lock();
  312. n=(struct totag_elem*) shm_malloc_unsafe(sizeof(struct totag_elem));
  313. s=(char *)shm_malloc_unsafe(tag->len);
  314. shm_unlock();
  315. if (!s || !n) {
  316. LOG(L_ERR, "ERROR: update_totag_set: no memory \n");
  317. if (n) shm_free(n);
  318. if (s) shm_free(s);
  319. return 0;
  320. }
  321. memset(n, 0, sizeof(struct totag_elem));
  322. memcpy(s, tag->s, tag->len );
  323. n->tag.s=s;n->tag.len=tag->len;
  324. n->next=t->fwded_totags;
  325. membar_write(); /* make sure all the changes to n are visible on all cpus
  326. before we update t->fwded_totags. This is needed for
  327. three reasons: the compiler might reorder some of the
  328. writes, the cpu/cache could also reorder them with
  329. respect to the visibility on other cpus
  330. (e.g. some of the changes to n could be visible on
  331. another cpu _after_ seeing t->fwded_totags=n) and
  332. the "readers" (unmatched_tags()) do not use locks and
  333. can be called simultaneously on another cpu.*/
  334. t->fwded_totags=n;
  335. DBG("DEBUG: update_totag_set: new totag \n");
  336. return 0;
  337. }
  338. /*
  339. * Build an ACK to a negative reply
  340. */
  341. static char *build_ack(struct sip_msg* rpl,struct cell *trans,int branch,
  342. unsigned int *ret_len)
  343. {
  344. str to;
  345. if (parse_headers(rpl,HDR_TO_F, 0)==-1 || !rpl->to ) {
  346. LOG(L_ERR, "ERROR: build_ack: "
  347. "cannot generate a HBH ACK if key HFs in reply missing\n");
  348. return NULL;
  349. }
  350. to.s=rpl->to->name.s;
  351. to.len=rpl->to->len;
  352. if (cfg_get(tm, tm_cfg, reparse_invite)) {
  353. /* build the ACK from the INVITE which was sent out */
  354. return build_local_reparse( trans, branch, ret_len,
  355. ACK, ACK_LEN, &to );
  356. } else {
  357. /* build the ACK from the reveived INVITE */
  358. return build_local( trans, branch, ret_len,
  359. ACK, ACK_LEN, &to );
  360. }
  361. }
  362. /*
  363. * The function builds an ACK to 200 OK of local transactions, honoring the
  364. * route set.
  365. * The destination to which the message should be sent will be returned
  366. * in the dst parameter.
  367. * returns 0 on error and a pkg_malloc'ed buffer with length in ret_len
  368. * and intended destination in dst on success.
  369. */
  370. static char *build_local_ack(struct sip_msg* rpl, struct cell *trans,
  371. int branch, unsigned int *ret_len,
  372. struct dest_info* dst)
  373. {
  374. #ifdef WITH_AS_SUPPORT
  375. struct retr_buf *local_ack, *old_lack;
  376. /* do we have the ACK cache, previously build? */
  377. if ((local_ack = trans->uac[0].local_ack) && local_ack->buffer_len) {
  378. DEBUG("reusing ACK retr. buffer.\n");
  379. *ret_len = local_ack->buffer_len;
  380. *dst = local_ack->dst;
  381. return local_ack->buffer;
  382. }
  383. /* the ACK will be built (and cached) by the AS (ack_local_uac()) */
  384. if (trans->flags & T_NO_AUTO_ACK)
  385. return NULL;
  386. if (! (local_ack = local_ack_rb(rpl, trans, branch, /*hdrs*/NULL,
  387. /*body*/NULL))) {
  388. ERR("failed to build local ACK retransmission buffer (T@%p).\n",trans);
  389. return NULL;
  390. }
  391. /* set the new buffer, but only if not already set (concurrent 2xx) */
  392. /* a memory write barrier is needed to make sure the local_ack
  393. content is fully written, before we try to add it to the transaction
  394. -- andrei */
  395. membar_write_atomic_op();
  396. if ((old_lack = (struct retr_buf *)atomic_cmpxchg_long(
  397. (void *)&trans->uac[0].local_ack, 0, (long)local_ack))) {
  398. /* buffer already set: trash current and use the winning one */
  399. INFO("concurrent 2xx to local INVITE detected (T@%p).\n", trans);
  400. free_local_ack(local_ack);
  401. local_ack = old_lack;
  402. }
  403. *ret_len = local_ack->buffer_len;
  404. *dst = local_ack->dst;
  405. return local_ack->buffer;
  406. #else /* ! WITH_AS_SUPPORT */
  407. return build_dlg_ack(rpl, trans, branch, /*hdrs*/NULL, /*body*/NULL,
  408. ret_len, dst);
  409. #endif /* WITH_AS_SUPPORT */
  410. }
  411. #if 0 /* candidate for removal --andrei */
  412. /*
  413. * The function is used to send a localy generated ACK to INVITE
  414. * (tm generates the ACK on behalf of application using UAC
  415. */
  416. static int send_local_ack(struct sip_msg* msg, str* next_hop,
  417. char* ack, int ack_len)
  418. {
  419. struct dest_info dst;
  420. #ifdef USE_DNS_FAILOVER
  421. struct dns_srv_handle dns_h;
  422. #endif
  423. if (!next_hop) {
  424. LOG(L_ERR, "send_local_ack: Invalid parameter value\n");
  425. return -1;
  426. }
  427. #ifdef USE_DNS_FAILOVER
  428. if (cfg_get(core, core_cfg, use_dns_failover)){
  429. dns_srv_handle_init(&dns_h);
  430. if ((uri2dst(&dns_h, &dst, msg, next_hop, PROTO_NONE)==0) ||
  431. (dst.send_sock==0)){
  432. dns_srv_handle_put(&dns_h);
  433. LOG(L_ERR, "send_local_ack: no socket found\n");
  434. return -1;
  435. }
  436. dns_srv_handle_put(&dns_h); /* not needed anymore */
  437. }else{
  438. if ((uri2dst(0, &dst, msg, next_hop, PROTO_NONE)==0) ||
  439. (dst.send_sock==0)){
  440. LOG(L_ERR, "send_local_ack: no socket found\n");
  441. return -1;
  442. }
  443. }
  444. #else
  445. if ((uri2dst(&dst, msg, next_hop, PROTO_NONE)==0) || (dst.send_sock==0)){
  446. LOG(L_ERR, "send_local_ack: no socket found\n");
  447. return -1;
  448. }
  449. #endif
  450. return msg_send(&dst, ack, ack_len);
  451. }
  452. #endif
  453. inline static void start_final_repl_retr( struct cell *t )
  454. {
  455. if (unlikely(!is_local(t) && t->uas.request->REQ_METHOD==METHOD_INVITE )){
  456. /* crank timers for negative replies */
  457. if (t->uas.status>=300) {
  458. if (start_retr(&t->uas.response)!=0)
  459. LOG(L_CRIT, "BUG: start_final_repl_retr: start retr failed"
  460. " for %p\n", &t->uas.response);
  461. return;
  462. }
  463. /* local UAS retransmits too */
  464. if (t->relayed_reply_branch==-2 && t->uas.status>=200) {
  465. /* we retransmit 200/INVs regardless of transport --
  466. even if TCP used, UDP could be used upstream and
  467. loose the 200, which is not retransmitted by proxies
  468. */
  469. if (force_retr( &t->uas.response )!=0)
  470. LOG(L_CRIT, "BUG: start_final_repl_retr: force retr failed for"
  471. " %p\n", &t->uas.response);
  472. return;
  473. }
  474. }
  475. }
  476. static int _reply_light( struct cell *trans, char* buf, unsigned int len,
  477. unsigned int code, char * text,
  478. char *to_tag, unsigned int to_tag_len, int lock,
  479. struct bookmark *bm )
  480. {
  481. struct retr_buf *rb;
  482. unsigned int buf_len;
  483. branch_bm_t cancel_bitmap;
  484. #ifdef TMCB_ONSEND
  485. struct tmcb_params onsend_params;
  486. #endif
  487. if (!buf)
  488. {
  489. DBG("DEBUG: _reply_light: response building failed\n");
  490. /* determine if there are some branches to be canceled */
  491. if ( is_invite(trans) ) {
  492. prepare_to_cancel(trans, &cancel_bitmap, 0);
  493. }
  494. /* and clean-up, including cancellations, if needed */
  495. goto error;
  496. }
  497. cancel_bitmap=0;
  498. if (lock) LOCK_REPLIES( trans );
  499. if (trans->uas.status>=200) {
  500. LOG( L_ERR, "ERROR: _reply_light: can't generate %d reply"
  501. " when a final %d was sent out\n", code, trans->uas.status);
  502. goto error2;
  503. }
  504. rb = & trans->uas.response;
  505. rb->activ_type=code;
  506. trans->uas.status = code;
  507. buf_len = rb->buffer ? len : len + REPLY_OVERBUFFER_LEN;
  508. rb->buffer = (char*)shm_resize( rb->buffer, buf_len );
  509. /* puts the reply's buffer to uas.response */
  510. if (! rb->buffer ) {
  511. LOG(L_ERR, "ERROR: _reply_light: cannot allocate shmem buffer\n");
  512. goto error3;
  513. }
  514. update_local_tags(trans, bm, rb->buffer, buf);
  515. rb->buffer_len = len ;
  516. memcpy( rb->buffer , buf , len );
  517. /* needs to be protected too because what timers are set depends
  518. on current transactions status */
  519. /* t_update_timers_after_sending_reply( rb ); */
  520. update_reply_stats( code );
  521. trans->relayed_reply_branch=-2;
  522. t_stats_replied_locally();
  523. if (lock) UNLOCK_REPLIES( trans );
  524. /* do UAC cleanup procedures in case we generated
  525. a final answer whereas there are pending UACs */
  526. if (code>=200) {
  527. if (unlikely(is_local(trans) &&
  528. has_tran_tmcbs(trans, TMCB_LOCAL_COMPLETED) ))
  529. run_trans_callbacks(TMCB_LOCAL_COMPLETED, trans,
  530. 0, FAKED_REPLY, code);
  531. cleanup_uac_timers( trans );
  532. if (is_invite(trans)){
  533. prepare_to_cancel(trans, &cancel_bitmap, 0);
  534. cancel_uacs( trans, cancel_bitmap, F_CANCEL_B_KILL );
  535. }
  536. start_final_repl_retr( trans );
  537. }
  538. /* send it out */
  539. /* first check if we managed to resolve topmost Via -- if
  540. not yet, don't try to retransmit
  541. */
  542. /*
  543. response.dst.send_sock might be unset if the process that created
  544. the original transaction has not finished initialising the
  545. retransmission buffer (see t_newtran/ init_rb).
  546. If reply_to_via is set and via contains a host name (and not an ip)
  547. the chances for this increase a lot.
  548. */
  549. if (!trans->uas.response.dst.send_sock) {
  550. LOG(L_ERR, "ERROR: _reply_light: no resolved dst to send reply to\n");
  551. } else {
  552. if (likely(SEND_PR_BUFFER( rb, buf, len )>=0)){
  553. if (unlikely(code>=200 && !is_local(trans) &&
  554. has_tran_tmcbs(trans, TMCB_RESPONSE_OUT)) )
  555. run_trans_callbacks(TMCB_RESPONSE_OUT, trans,
  556. trans->uas.request, FAKED_REPLY, code);
  557. #ifdef TMCB_ONSEND
  558. if (unlikely(has_tran_tmcbs(trans, TMCB_RESPONSE_SENT))){
  559. INIT_TMCB_ONSEND_PARAMS(onsend_params, trans->uas.request,
  560. FAKED_REPLY, rb, &rb->dst,
  561. buf, len, TMCB_LOCAL_F, rb->branch, code);
  562. run_onsend_callbacks2(TMCB_RESPONSE_SENT, trans,
  563. &onsend_params);
  564. }
  565. #endif /* TMCB_ONSEND */
  566. }
  567. DBG("DEBUG: reply sent out. buf=%p: %.20s..., shmem=%p: %.20s\n",
  568. buf, buf, rb->buffer, rb->buffer );
  569. }
  570. if (code>=200) {
  571. /* start wait timer after finishing with t so that this function can
  572. * be safely called from a fr_timer which allows quick timer dels
  573. * (timer_allow_del()) (there's no chance of having the wait handler
  574. * executed while we still need t) --andrei */
  575. put_on_wait(trans);
  576. }
  577. pkg_free( buf ) ;
  578. DBG("DEBUG: _reply_light: finished\n");
  579. return 1;
  580. error3:
  581. prepare_to_cancel(trans, &cancel_bitmap, 0);
  582. error2:
  583. if (lock) UNLOCK_REPLIES( trans );
  584. pkg_free ( buf );
  585. error:
  586. /* do UAC cleanup */
  587. cleanup_uac_timers( trans );
  588. if ( is_invite(trans) && cancel_bitmap )
  589. cancel_uacs( trans, cancel_bitmap, F_CANCEL_B_KILL);
  590. /* we did not succeed -- put the transaction on wait */
  591. put_on_wait(trans);
  592. return -1;
  593. }
  594. /* send a UAS reply
  595. * returns 1 if everything was OK or -1 for error
  596. */
  597. static int _reply( struct cell *trans, struct sip_msg* p_msg,
  598. unsigned int code, char * text, int lock )
  599. {
  600. unsigned int len;
  601. char * buf, *dset;
  602. struct bookmark bm;
  603. int dset_len;
  604. struct lump_rpl* rpl_l;
  605. rpl_l=0;
  606. if (code>=200) set_kr(REQ_RPLD);
  607. /* compute the buffer in private memory prior to entering lock;
  608. * create to-tag if needed */
  609. /* if that is a redirection message, dump current message set to it */
  610. if (code>=300 && code<400) {
  611. dset=print_dset(p_msg, &dset_len);
  612. if (dset) {
  613. add_lump_rpl(p_msg, dset, dset_len, LUMP_RPL_HDR);
  614. }
  615. }
  616. if (code>=180 && p_msg->to
  617. && (get_to(p_msg)->tag_value.s==0
  618. || get_to(p_msg)->tag_value.len==0)) {
  619. calc_crc_suffix( p_msg, tm_tag_suffix );
  620. buf = build_res_buf_from_sip_req(code,text, &tm_tag, p_msg, &len, &bm);
  621. if (unlikely(rpl_l)){
  622. unlink_lump_rpl(p_msg, rpl_l);
  623. free_lump_rpl(rpl_l);
  624. }
  625. return _reply_light( trans, buf, len, code, text,
  626. tm_tag.s, TOTAG_VALUE_LEN, lock, &bm);
  627. } else {
  628. buf = build_res_buf_from_sip_req(code,text, 0 /*no to-tag*/,
  629. p_msg, &len, &bm);
  630. if (unlikely(rpl_l)){
  631. unlink_lump_rpl(p_msg, rpl_l);
  632. free_lump_rpl(rpl_l);
  633. }
  634. return _reply_light(trans,buf,len,code,text,
  635. 0, 0, /* no to-tag */lock, &bm);
  636. }
  637. }
  638. /** create or restore a "fake environment" for running a failure_route.
  639. *if msg is set -> it will fake the env. vars conforming with the msg; if NULL
  640. * the env. will be restore to original.
  641. */
  642. void faked_env( struct cell *t, struct sip_msg *msg)
  643. {
  644. static int backup_route_type;
  645. static struct cell *backup_t;
  646. static int backup_branch;
  647. static unsigned int backup_msgid;
  648. static avp_list_t* backup_user_from, *backup_user_to;
  649. static avp_list_t* backup_domain_from, *backup_domain_to;
  650. static avp_list_t* backup_uri_from, *backup_uri_to;
  651. #ifdef WITH_XAVP
  652. static sr_xavp_t **backup_xavps;
  653. #endif
  654. static struct socket_info* backup_si;
  655. if (msg) {
  656. /* remember we are back in request processing, but process
  657. * a shmem-ed replica of the request; advertise it in route type;
  658. * for example t_reply needs to know that
  659. */
  660. backup_route_type=get_route_type();
  661. set_route_type(FAILURE_ROUTE);
  662. /* also, tm actions look in beginning whether transaction is
  663. * set -- whether we are called from a reply-processing
  664. * or a timer process, we need to set current transaction;
  665. * otherwise the actions would attempt to look the transaction
  666. * up (unnecessary overhead, refcounting)
  667. */
  668. /* backup */
  669. backup_t=get_t();
  670. backup_branch=get_t_branch();
  671. backup_msgid=global_msg_id;
  672. /* fake transaction and message id */
  673. global_msg_id=msg->id;
  674. set_t(t, T_BR_UNDEFINED);
  675. /* make available the avp list from transaction */
  676. backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
  677. backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
  678. backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
  679. backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
  680. backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
  681. backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
  682. #ifdef WITH_XAVP
  683. backup_xavps = xavp_set_list(&t->xavps_list);
  684. #endif
  685. /* set default send address to the saved value */
  686. backup_si=bind_address;
  687. bind_address=t->uac[0].request.dst.send_sock;
  688. } else {
  689. /* restore original environment */
  690. set_t(backup_t, backup_branch);
  691. global_msg_id=backup_msgid;
  692. set_route_type(backup_route_type);
  693. /* restore original avp list */
  694. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from );
  695. set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to );
  696. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from );
  697. set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to );
  698. set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from );
  699. set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to );
  700. #ifdef WITH_XAVP
  701. xavp_set_list(backup_xavps);
  702. #endif
  703. bind_address=backup_si;
  704. }
  705. }
  706. int fake_req(struct sip_msg *faked_req,
  707. struct sip_msg *shmem_msg, int extra_flags, struct ua_client *uac)
  708. {
  709. /* on_negative_reply faked msg now copied from shmem msg (as opposed
  710. * to zero-ing) -- more "read-only" actions (exec in particular) will
  711. * work from reply_route as they will see msg->from, etc.; caution,
  712. * rw actions may append some pkg stuff to msg, which will possibly be
  713. * never released (shmem is released in a single block) */
  714. memcpy( faked_req, shmem_msg, sizeof(struct sip_msg));
  715. /* if we set msg_id to something different from current's message
  716. * id, the first t_fork will properly clean new branch URIs */
  717. faked_req->id=shmem_msg->id-1;
  718. /* msg->parsed_uri_ok must be reset since msg_parsed_uri is
  719. * not cloned (and cannot be cloned) */
  720. faked_req->parsed_uri_ok = 0;
  721. faked_req->msg_flags|=extra_flags; /* set the extra tm flags */
  722. /* dst_uri can change ALSO!!! -- make a private copy */
  723. if (shmem_msg->dst_uri.s!=0 && shmem_msg->dst_uri.len!=0) {
  724. faked_req->dst_uri.s=pkg_malloc(shmem_msg->dst_uri.len+1);
  725. if (!faked_req->dst_uri.s) {
  726. LOG(L_ERR, "ERROR: fake_req: no uri/pkg mem\n");
  727. goto error01;
  728. }
  729. faked_req->dst_uri.len=shmem_msg->dst_uri.len;
  730. memcpy( faked_req->dst_uri.s, shmem_msg->dst_uri.s,
  731. faked_req->dst_uri.len);
  732. faked_req->dst_uri.s[faked_req->dst_uri.len]=0;
  733. }else{
  734. /* in case len==0, but shmem_msg->dst_uri.s!=0 (extra safety) */
  735. faked_req->dst_uri.s = 0;
  736. }
  737. /* new_uri can change -- make a private copy */
  738. if (shmem_msg->new_uri.s!=0 && shmem_msg->new_uri.len!=0) {
  739. faked_req->new_uri.s=pkg_malloc(shmem_msg->new_uri.len+1);
  740. if (!faked_req->new_uri.s) {
  741. LOG(L_ERR, "ERROR: fake_req: no uri/pkg mem\n");
  742. goto error00;
  743. }
  744. faked_req->new_uri.len=shmem_msg->new_uri.len;
  745. memcpy( faked_req->new_uri.s, shmem_msg->new_uri.s,
  746. faked_req->new_uri.len);
  747. faked_req->new_uri.s[faked_req->new_uri.len]=0;
  748. }else{
  749. /* in case len==0, but shmem_msg->new_uri.s!=0 (extra safety)*/
  750. faked_req->new_uri.s = 0;
  751. }
  752. if(uac) setbflagsval(0, uac->branch_flags);
  753. else setbflagsval(0, 0);
  754. return 1;
  755. error00:
  756. if (faked_req->dst_uri.s) {
  757. pkg_free(faked_req->dst_uri.s);
  758. faked_req->dst_uri.s = 0;
  759. }
  760. error01:
  761. return 0;
  762. }
  763. void free_faked_req(struct sip_msg *faked_req, struct cell *t)
  764. {
  765. struct hdr_field *hdr;
  766. if (faked_req->new_uri.s) {
  767. pkg_free(faked_req->new_uri.s);
  768. faked_req->new_uri.s = 0;
  769. }
  770. if (faked_req->dst_uri.s) {
  771. pkg_free(faked_req->dst_uri.s);
  772. faked_req->dst_uri.s = 0;
  773. }
  774. /* free all types of lump that were added in failure handlers */
  775. del_nonshm_lump( &(faked_req->add_rm) );
  776. del_nonshm_lump( &(faked_req->body_lumps) );
  777. del_nonshm_lump_rpl( &(faked_req->reply_lump) );
  778. /* free header's parsed structures that were added by failure handlers */
  779. for( hdr=faked_req->headers ; hdr ; hdr=hdr->next ) {
  780. if ( hdr->parsed && hdr_allocs_parse(hdr) &&
  781. (hdr->parsed<(void*)t->uas.request ||
  782. hdr->parsed>=(void*)t->uas.end_request)) {
  783. /* header parsed filed doesn't point inside uas.request memory
  784. * chunck -> it was added by failure funcs.-> free it as pkg */
  785. DBG("DBG:free_faked_req: removing hdr->parsed %d\n",
  786. hdr->type);
  787. clean_hdr_field(hdr);
  788. hdr->parsed = 0;
  789. }
  790. }
  791. }
  792. /* return 1 if a failure_route processes */
  793. int run_failure_handlers(struct cell *t, struct sip_msg *rpl,
  794. int code, int extra_flags)
  795. {
  796. static struct sip_msg faked_req;
  797. struct sip_msg *shmem_msg = t->uas.request;
  798. int on_failure;
  799. /* failure_route for a local UAC? */
  800. if (!shmem_msg) {
  801. LOG(L_WARN,"Warning: run_failure_handlers: no UAC support (%d, %d) \n",
  802. t->on_negative, t->tmcb_hl.reg_types);
  803. return 0;
  804. }
  805. /* don't start faking anything if we don't have to */
  806. if (unlikely(!t->on_negative && !has_tran_tmcbs( t, TMCB_ON_FAILURE))) {
  807. LOG(L_WARN,
  808. "Warning: run_failure_handlers: no negative handler (%d, %d)\n",
  809. t->on_negative,
  810. t->tmcb_hl.reg_types);
  811. return 1;
  812. }
  813. if (!fake_req(&faked_req, shmem_msg, extra_flags, &t->uac[picked_branch])) {
  814. LOG(L_ERR, "ERROR: run_failure_handlers: fake_req failed\n");
  815. return 0;
  816. }
  817. /* fake also the env. conforming to the fake msg */
  818. faked_env( t, &faked_req);
  819. /* DONE with faking ;-) -> run the failure handlers */
  820. if (unlikely(has_tran_tmcbs( t, TMCB_ON_FAILURE)) ) {
  821. run_trans_callbacks( TMCB_ON_FAILURE, t, &faked_req, rpl, code);
  822. }
  823. if (t->on_negative) {
  824. /* avoid recursion -- if failure_route forwards, and does not
  825. * set next failure route, failure_route will not be reentered
  826. * on failure */
  827. on_failure = t->on_negative;
  828. t->on_negative=0;
  829. if (exec_pre_script_cb(&faked_req, FAILURE_CB_TYPE)>0) {
  830. /* run a reply_route action if some was marked */
  831. if (run_top_route(failure_rt.rlist[on_failure], &faked_req, 0)<0)
  832. LOG(L_ERR, "ERROR: run_failure_handlers: Error in run_top_route\n");
  833. exec_post_script_cb(&faked_req, FAILURE_CB_TYPE);
  834. }
  835. }
  836. /* restore original environment and free the fake msg */
  837. faked_env( t, 0);
  838. free_faked_req(&faked_req,t);
  839. /* if failure handler changed flag, update transaction context */
  840. shmem_msg->flags = faked_req.flags;
  841. return 1;
  842. }
  843. /* 401, 407, 415, 420, and 484 have priority over the other 4xx*/
  844. inline static short int get_4xx_prio(unsigned char xx)
  845. {
  846. switch(xx){
  847. case 1:
  848. case 7:
  849. case 15:
  850. case 20:
  851. case 84:
  852. return xx;
  853. break;
  854. }
  855. return 100+xx;
  856. }
  857. /* returns response priority, lower number => highest prio
  858. *
  859. * responses priority val
  860. * 0-99 32000+reponse (special)
  861. * 1xx 11000+reponse (special)
  862. * 700-999 10000+response (very low)
  863. * 5xx 5000+xx (low)
  864. * 4xx 4000+xx
  865. * 3xx 3000+xx
  866. * 6xx 1000+xx (high)
  867. * 2xx 0000+xx (highest)
  868. */
  869. inline static short int get_prio(unsigned int resp)
  870. {
  871. int class;
  872. int xx;
  873. class=resp/100;
  874. if (class<7){
  875. xx=resp%100;
  876. return resp_class_prio[class]+((class==4)?get_4xx_prio(xx):xx);
  877. }
  878. return 10000+resp; /* unknown response class => return very low prio */
  879. }
  880. /* select a branch for forwarding; returns:
  881. * 0..X ... branch number
  882. * -1 ... error
  883. * -2 ... can't decide yet -- incomplete branches present
  884. */
  885. int t_pick_branch(int inc_branch, int inc_code, struct cell *t, int *res_code)
  886. {
  887. int best_b, best_s, b;
  888. best_b=-1; best_s=0;
  889. for ( b=0; b<t->nr_of_outgoings ; b++ ) {
  890. /* "fake" for the currently processed branch */
  891. if (b==inc_branch) {
  892. if (get_prio(inc_code)<get_prio(best_s)) {
  893. best_b=b;
  894. best_s=inc_code;
  895. }
  896. continue;
  897. }
  898. /* skip 'empty branches' */
  899. if (!t->uac[b].request.buffer) continue;
  900. /* there is still an unfinished UAC transaction; wait now! */
  901. if ( t->uac[b].last_received<200 )
  902. return -2;
  903. /* if reply is null => t_send_branch "faked" reply, skip over it */
  904. if ( t->uac[b].reply &&
  905. get_prio(t->uac[b].last_received)<get_prio(best_s) ) {
  906. best_b =b;
  907. best_s = t->uac[b].last_received;
  908. }
  909. } /* find lowest branch */
  910. *res_code=best_s;
  911. return best_b;
  912. }
  913. /* The same as t_pick_branch(), but allows also
  914. * blind branches to be picked up.
  915. * This function should be used only in failure_route
  916. * to check which response has been
  917. * picked up by t_pick_branch().
  918. * returns:
  919. * 0..X ... branch number
  920. * -1 ... error
  921. * -2 ... can't decide yet -- incomplete branches present
  922. */
  923. int t_pick_branch_blind(struct cell *t, int *res_code)
  924. {
  925. int best_b, best_s, b;
  926. best_b=-1; best_s=0;
  927. for ( b=0; b<t->nr_of_outgoings ; b++ ) {
  928. /* there is still an unfinished UAC transaction; wait now! */
  929. if ( t->uac[b].last_received<200 )
  930. return -2;
  931. /* if reply is null => t_send_branch "faked" reply, skip over it */
  932. if ( t->uac[b].reply &&
  933. get_prio(t->uac[b].last_received)<get_prio(best_s) ) {
  934. best_b = b;
  935. best_s = t->uac[b].last_received;
  936. }
  937. } /* find lowest branch */
  938. *res_code=best_s;
  939. return best_b;
  940. }
  941. /* flag indicating whether it is requested
  942. * to drop the already saved replies or not */
  943. static unsigned char drop_replies;
  944. /* This is the neurological point of reply processing -- called
  945. * from within a REPLY_LOCK, t_should_relay_response decides
  946. * how a reply shall be processed and how transaction state is
  947. * affected.
  948. *
  949. * Checks if the new reply (with new_code status) should be sent or not
  950. * based on the current
  951. * transaction status.
  952. * Returns - branch number (0,1,...) which should be relayed
  953. * -1 if nothing to be relayed
  954. */
  955. static enum rps t_should_relay_response( struct cell *Trans , int new_code,
  956. int branch , int *should_store, int *should_relay,
  957. branch_bm_t *cancel_bitmap, struct sip_msg *reply )
  958. {
  959. int branch_cnt;
  960. int picked_code;
  961. int new_branch;
  962. int inv_through;
  963. int extra_flags;
  964. int i;
  965. int replies_dropped;
  966. /* note: this code never lets replies to CANCEL go through;
  967. we generate always a local 200 for CANCEL; 200s are
  968. not relayed because it's not an INVITE transaction;
  969. >= 300 are not relayed because 200 was already sent
  970. out
  971. */
  972. DBG("->>>>>>>>> T_code=%d, new_code=%d\n",Trans->uas.status,new_code);
  973. inv_through=new_code>=200 && new_code<300 && is_invite(Trans);
  974. /* if final response sent out, allow only INVITE 2xx */
  975. if ( Trans->uas.status >= 200 ) {
  976. if (inv_through) {
  977. DBG("DBG: t_should_relay_response: 200 INV after final sent\n");
  978. *should_store=0;
  979. Trans->uac[branch].last_received=new_code;
  980. *should_relay=branch;
  981. return RPS_PUSHED_AFTER_COMPLETION;
  982. }
  983. /* except the exception above, too late messages will
  984. be discarded */
  985. goto discard;
  986. }
  987. /* if final response received at this branch, allow only INVITE 2xx */
  988. if (Trans->uac[branch].last_received>=200
  989. && !(inv_through && Trans->uac[branch].last_received<300)) {
  990. /* don't report on retransmissions */
  991. if (Trans->uac[branch].last_received==new_code) {
  992. DBG("DEBUG: final reply retransmission\n");
  993. goto discard;
  994. }
  995. /* if you FR-timed-out, faked a local 408 and 487 came or
  996. * faked a CANCEL on a non-replied branch don't
  997. * report on it either */
  998. if ((Trans->uac[branch].last_received==487) ||
  999. (Trans->uac[branch].last_received==408 && new_code==487)) {
  1000. DBG("DEBUG: %d came for a %d branch (ignored)\n",
  1001. new_code, Trans->uac[branch].last_received);
  1002. goto discard;
  1003. }
  1004. /* this looks however how a very strange status rewrite attempt;
  1005. * report on it */
  1006. LOG(L_ERR, "ERROR: t_should_relay_response: status rewrite by UAS: "
  1007. "stored: %d, received: %d\n",
  1008. Trans->uac[branch].last_received, new_code );
  1009. goto discard;
  1010. }
  1011. /* no final response sent yet */
  1012. /* negative replies subject to fork picking */
  1013. if (new_code >=300 ) {
  1014. Trans->uac[branch].last_received=new_code;
  1015. /* if all_final return lowest */
  1016. picked_branch=t_pick_branch(branch,new_code, Trans, &picked_code);
  1017. if (picked_branch==-2) { /* branches open yet */
  1018. *should_store=1;
  1019. *should_relay=-1;
  1020. if (new_code>=600 && new_code<=699){
  1021. if (!(Trans->flags & (T_6xx | T_DISABLE_6xx))){
  1022. /* cancel only the first time we get a 6xx and only
  1023. if the 6xx handling is not disabled */
  1024. prepare_to_cancel(Trans, cancel_bitmap, 0);
  1025. Trans->flags|=T_6xx;
  1026. }
  1027. }
  1028. return RPS_STORE;
  1029. }
  1030. if (picked_branch==-1) {
  1031. LOG(L_CRIT, "ERROR: t_should_relay_response: lowest==-1\n");
  1032. goto error;
  1033. }
  1034. /* no more pending branches -- try if that changes after
  1035. a callback; save branch count to be able to determine
  1036. later if new branches were initiated */
  1037. branch_cnt=Trans->nr_of_outgoings;
  1038. /* also append the current reply to the transaction to
  1039. * make it available in failure routes - a kind of "fake"
  1040. * save of the final reply per branch */
  1041. Trans->uac[branch].reply = reply;
  1042. Trans->flags&=~T_6xx; /* clear the 6xx flag , we want to
  1043. allow new branches from the failure route */
  1044. if(sr_cfg_compat==SR_COMPAT_KAMAILIO)
  1045. drop_replies = 3;
  1046. else
  1047. drop_replies = 0;
  1048. replies_dropped = 0;
  1049. /* run ON_FAILURE handlers ( route and callbacks) */
  1050. if (unlikely(has_tran_tmcbs( Trans, TMCB_ON_FAILURE_RO|TMCB_ON_FAILURE)
  1051. || Trans->on_negative )) {
  1052. extra_flags=
  1053. ((Trans->uac[picked_branch].request.flags & F_RB_TIMEOUT)?
  1054. FL_TIMEOUT:0) |
  1055. ((Trans->uac[picked_branch].request.flags & F_RB_REPLIED)?
  1056. FL_REPLIED:0);
  1057. run_failure_handlers( Trans, Trans->uac[picked_branch].reply,
  1058. picked_code, extra_flags);
  1059. if (unlikely((drop_replies==3 && branch_cnt<Trans->nr_of_outgoings) ||
  1060. (drop_replies!=0 && drop_replies!=3))
  1061. ) {
  1062. /* drop all the replies that we have already saved */
  1063. i = 0;
  1064. if(drop_replies==2)
  1065. {
  1066. for(i=branch_cnt-1; i>=0; i--)
  1067. if(Trans->uac[i].flags&TM_UAC_FLAG_FB)
  1068. break;
  1069. if(i<0) i=0;
  1070. }
  1071. for (; i<branch_cnt; i++) {
  1072. if (Trans->uac[i].reply &&
  1073. (Trans->uac[i].reply != FAKED_REPLY) &&
  1074. (Trans->uac[i].reply->msg_flags & FL_SHM_CLONE))
  1075. /* we have to drop the reply which is already in shm mem */
  1076. sip_msg_free(Trans->uac[i].reply);
  1077. Trans->uac[i].reply = 0;
  1078. }
  1079. /* make sure that the selected reply is not relayed even if
  1080. there is not any new branch added -- should not happen */
  1081. picked_branch = -1;
  1082. replies_dropped = 1;
  1083. }
  1084. }
  1085. /* now reset it; after the failure logic, the reply may
  1086. * not be stored any more and we don't want to keep into
  1087. * transaction some broken reference */
  1088. Trans->uac[branch].reply = 0;
  1089. /* look if the callback perhaps replied transaction; it also
  1090. covers the case in which a transaction is replied localy
  1091. on CANCEL -- then it would make no sense to proceed to
  1092. new branches bellow
  1093. */
  1094. if (Trans->uas.status >= 200) {
  1095. *should_store=0;
  1096. *should_relay=-1;
  1097. /* this might deserve an improvement -- if something
  1098. was already replied, it was put on wait and then,
  1099. returning RPS_COMPLETED will make t_on_reply
  1100. put it on wait again; perhaps splitting put_on_wait
  1101. from send_reply or a new RPS_ code would be healthy
  1102. */
  1103. return RPS_COMPLETED;
  1104. }
  1105. /* look if the callback/failure_route introduced new branches ... */
  1106. if (branch_cnt<Trans->nr_of_outgoings){
  1107. /* the new branches might be already "finished" => we
  1108. * must use t_pick_branch again */
  1109. new_branch=t_pick_branch((replies_dropped==0)?
  1110. branch :
  1111. -1, /* make sure we do not pick
  1112. the current branch */
  1113. new_code,
  1114. Trans,
  1115. &picked_code);
  1116. if (new_branch<0){
  1117. if (likely(replies_dropped==0)) {
  1118. if (new_branch==-2) { /* branches open yet */
  1119. *should_store=1;
  1120. *should_relay=-1;
  1121. return RPS_STORE;
  1122. }
  1123. /* error, use the old picked_branch */
  1124. } else {
  1125. if (new_branch==-2) { /* branches open yet */
  1126. /* we are not allowed to relay the reply */
  1127. *should_store=0;
  1128. *should_relay=-1;
  1129. return RPS_DISCARDED;
  1130. } else {
  1131. /* There are no open branches,
  1132. and all the newly created branches failed
  1133. as well. We are not allowed to send back
  1134. the previously picked-up branch, thus,
  1135. let us reply with an error instead. */
  1136. goto branches_failed;
  1137. }
  1138. }
  1139. }else{
  1140. /* found a new_branch */
  1141. picked_branch=new_branch;
  1142. }
  1143. } else if (unlikely(replies_dropped)) {
  1144. /* Either the script writer did not add new branches
  1145. after calling t_drop_replies(), or tm was unable
  1146. to add the new branches to the transaction. */
  1147. goto branches_failed;
  1148. }
  1149. /* really no more pending branches -- return lowest code */
  1150. *should_store=0;
  1151. *should_relay=picked_branch;
  1152. /* we dont need 'prepare_to_cancel' here -- all branches
  1153. known to have completed */
  1154. /* prepare_to_cancel( Trans, cancel_bitmap, 0 ); */
  1155. return RPS_COMPLETED;
  1156. }
  1157. /* not >=300 ... it must be 2xx or provisional 1xx */
  1158. if (new_code>=100) {
  1159. #ifdef WITH_AS_SUPPORT
  1160. /* need a copy of the message for ACK generation */
  1161. *should_store = (inv_through && is_local(Trans) &&
  1162. (Trans->uac[branch].last_received < 200) &&
  1163. (Trans->flags & T_NO_AUTO_ACK)) ? 1 : 0;
  1164. #else
  1165. *should_store=0;
  1166. #endif
  1167. /* 1xx and 2xx except 100 will be relayed */
  1168. Trans->uac[branch].last_received=new_code;
  1169. *should_relay= new_code==100? -1 : branch;
  1170. if (new_code>=200 ) {
  1171. prepare_to_cancel( Trans, cancel_bitmap, 0);
  1172. return RPS_COMPLETED;
  1173. } else return RPS_PROVISIONAL;
  1174. }
  1175. error:
  1176. /* reply_status didn't match -- it must be something weird */
  1177. LOG(L_CRIT, "ERROR: Oh my gooosh! We don't know whether to relay %d\n",
  1178. new_code);
  1179. discard:
  1180. *should_store=0;
  1181. *should_relay=-1;
  1182. return RPS_DISCARDED;
  1183. branches_failed:
  1184. *should_store=0;
  1185. if (is_local(Trans)){
  1186. /* for local transactions use the current reply */
  1187. *should_relay=branch;
  1188. }else{
  1189. *should_relay=-1;
  1190. /* We have hopefully set tm_error in failure_route when
  1191. the branches failed. If not, reply with E_UNSPEC */
  1192. if ((kill_transaction_unsafe(Trans,
  1193. tm_error ? tm_error : E_UNSPEC)) <=0 ){
  1194. LOG(L_ERR, "ERROR: t_should_relay_response: "
  1195. "reply generation failed\n");
  1196. }
  1197. }
  1198. return RPS_COMPLETED;
  1199. }
  1200. /* Retransmits the last sent inbound reply.
  1201. * input: p_msg==request for which I want to retransmit an associated reply
  1202. * Returns -1 - error
  1203. * 1 - OK
  1204. */
  1205. int t_retransmit_reply( struct cell *t )
  1206. {
  1207. static char b[BUF_SIZE];
  1208. int len;
  1209. /* first check if we managed to resolve topmost Via -- if
  1210. not yet, don't try to retransmit
  1211. */
  1212. /*
  1213. response.dst.send_sock might be unset if the process that created
  1214. the original transaction has not finished initialising the
  1215. retransmission buffer (see t_newtran/ init_rb).
  1216. If reply_to_via is set and via contains a host name (and not an ip)
  1217. the chances for this increase a lot.
  1218. */
  1219. if (!t->uas.response.dst.send_sock) {
  1220. LOG(L_WARN, "WARNING: t_retransmit_reply: "
  1221. "no resolved dst to retransmit\n");
  1222. return -1;
  1223. }
  1224. /* we need to lock the transaction as messages from
  1225. upstream may change it continuously
  1226. */
  1227. LOCK_REPLIES( t );
  1228. if (!t->uas.response.buffer) {
  1229. DBG("DBG: t_retransmit_reply: nothing to retransmit\n");
  1230. goto error;
  1231. }
  1232. len=t->uas.response.buffer_len;
  1233. if ( len==0 || len>BUF_SIZE ) {
  1234. DBG("DBG: t_retransmit_reply: "
  1235. "zero length or too big to retransmit: %d\n", len);
  1236. goto error;
  1237. }
  1238. memcpy( b, t->uas.response.buffer, len );
  1239. UNLOCK_REPLIES( t );
  1240. SEND_PR_BUFFER( & t->uas.response, b, len );
  1241. #ifdef TMCB_ONSEND
  1242. if (unlikely(has_tran_tmcbs(t, TMCB_RESPONSE_SENT))){
  1243. /* we don't know if it's a retransmission of a local reply or a
  1244. * forwarded reply */
  1245. run_onsend_callbacks(TMCB_RESPONSE_SENT, &t->uas.response, 0, 0,
  1246. TMCB_RETR_F);
  1247. }
  1248. #endif
  1249. DBG("DEBUG: reply retransmitted. buf=%p: %.9s..., shmem=%p: %.9s\n",
  1250. b, b, t->uas.response.buffer, t->uas.response.buffer );
  1251. return 1;
  1252. error:
  1253. UNLOCK_REPLIES(t);
  1254. return -1;
  1255. }
  1256. int t_reply( struct cell *t, struct sip_msg* p_msg, unsigned int code,
  1257. char * text )
  1258. {
  1259. return _reply( t, p_msg, code, text, 1 /* lock replies */ );
  1260. }
  1261. int t_reply_unsafe( struct cell *t, struct sip_msg* p_msg, unsigned int code,
  1262. char * text )
  1263. {
  1264. return _reply( t, p_msg, code, text, 0 /* don't lock replies */ );
  1265. }
  1266. void set_final_timer( struct cell *t )
  1267. {
  1268. start_final_repl_retr(t);
  1269. put_on_wait(t);
  1270. }
  1271. void cleanup_uac_timers( struct cell *t )
  1272. {
  1273. int i;
  1274. /* reset FR/retransmission timers */
  1275. for (i=0; i<t->nr_of_outgoings; i++ ){
  1276. stop_rb_timers(&t->uac[i].request);
  1277. }
  1278. DBG("DEBUG: cleanup_uac_timers: RETR/FR timers reset\n");
  1279. }
  1280. static int store_reply( struct cell *trans, int branch, struct sip_msg *rpl)
  1281. {
  1282. # ifdef EXTRA_DEBUG
  1283. if (trans->uac[branch].reply) {
  1284. LOG(L_ERR, "ERROR: replacing stored reply; aborting\n");
  1285. abort();
  1286. }
  1287. # endif
  1288. /* when we later do things such as challenge aggregation,
  1289. we should parse the message here before we conserve
  1290. it in shared memory; -jiri
  1291. */
  1292. if (rpl==FAKED_REPLY)
  1293. trans->uac[branch].reply=FAKED_REPLY;
  1294. else
  1295. trans->uac[branch].reply = sip_msg_cloner( rpl, 0 );
  1296. if (! trans->uac[branch].reply ) {
  1297. LOG(L_ERR, "ERROR: store_reply: can't alloc' clone memory\n");
  1298. return 0;
  1299. }
  1300. return 1;
  1301. }
  1302. /* returns the number of authenticate replies (401 and 407) received so far
  1303. * (FAKED_REPLYes are excluded)
  1304. * It must be called with the REPLY_LOCK held */
  1305. inline static int auth_reply_count(struct cell *t, struct sip_msg* crt_reply)
  1306. {
  1307. int count;
  1308. int r;
  1309. count=0;
  1310. if (crt_reply && (crt_reply!=FAKED_REPLY) &&
  1311. (crt_reply->REPLY_STATUS ==401 || crt_reply->REPLY_STATUS ==407))
  1312. count=1;
  1313. for (r=0; r<t->nr_of_outgoings; r++){
  1314. if (t->uac[r].reply && (t->uac[r].reply!=FAKED_REPLY) &&
  1315. (t->uac[r].last_received==401 || t->uac[r].last_received==407))
  1316. count++;
  1317. }
  1318. return count;
  1319. }
  1320. /* must be called with the REPY_LOCK held */
  1321. inline static char* reply_aggregate_auth(int code, char* txt, str* new_tag,
  1322. struct cell* t, unsigned int* res_len,
  1323. struct bookmark* bm)
  1324. {
  1325. int r;
  1326. struct hdr_field* hdr;
  1327. struct lump_rpl** first;
  1328. struct lump_rpl** crt;
  1329. struct lump_rpl* lst;
  1330. struct lump_rpl* lst_end;
  1331. struct sip_msg* req;
  1332. char* buf;
  1333. first=0;
  1334. lst_end=0;
  1335. req=t->uas.request;
  1336. for (r=0; r<t->nr_of_outgoings; r++){
  1337. if (t->uac[r].reply && (t->uac[r].reply!=FAKED_REPLY) &&
  1338. (t->uac[r].last_received==401 || t->uac[r].last_received==407)){
  1339. for (hdr=t->uac[r].reply->headers; hdr; hdr=hdr->next){
  1340. if (hdr->type==HDR_WWW_AUTHENTICATE_T ||
  1341. hdr->type==HDR_PROXY_AUTHENTICATE_T){
  1342. crt=add_lump_rpl2(req, hdr->name.s, hdr->len,
  1343. LUMP_RPL_HDR|LUMP_RPL_NODUP|LUMP_RPL_NOFREE);
  1344. if (crt==0){
  1345. /* some kind of error, better stop */
  1346. LOG(L_ERR, "ERROR: tm:reply_aggregate_auth:"
  1347. " add_lump_rpl2 failed\n");
  1348. goto skip;
  1349. }
  1350. lst_end=*crt;
  1351. if (first==0) first=crt;
  1352. }
  1353. }
  1354. }
  1355. }
  1356. skip:
  1357. buf=build_res_buf_from_sip_req(code, txt, new_tag, req, res_len, bm);
  1358. /* clean the added lumps */
  1359. if (first){
  1360. lst=*first;
  1361. *first=lst_end->next; /* "detach" the list of added rpl_lumps */
  1362. lst_end->next=0; /* terminate lst */
  1363. del_nonshm_lump_rpl(&lst);
  1364. if (lst){
  1365. LOG(L_CRIT, "BUG: tm: repply_aggregate_auth: rpl_lump list"
  1366. "contains shm alloc'ed lumps\n");
  1367. abort();
  1368. }
  1369. }
  1370. return buf;
  1371. }
  1372. /* this is the code which decides what and when shall be relayed
  1373. upstream; note well -- it assumes it is entered locked with
  1374. REPLY_LOCK and it returns unlocked!
  1375. If do_put_on_wait==1 and this is the final reply, the transaction
  1376. wait timer will be started (put_on_wait(t)).
  1377. */
  1378. enum rps relay_reply( struct cell *t, struct sip_msg *p_msg, int branch,
  1379. unsigned int msg_status, branch_bm_t *cancel_bitmap, int do_put_on_wait )
  1380. {
  1381. int relay;
  1382. int save_clone;
  1383. char *buf;
  1384. /* length of outbound reply */
  1385. unsigned int res_len;
  1386. int relayed_code;
  1387. struct sip_msg *relayed_msg;
  1388. struct sip_msg *reply_bak;
  1389. struct bookmark bm;
  1390. int totag_retr;
  1391. enum rps reply_status;
  1392. /* retransmission structure of outbound reply and request */
  1393. struct retr_buf *uas_rb;
  1394. str* to_tag;
  1395. #ifdef TMCB_ONSEND
  1396. struct tmcb_params onsend_params;
  1397. #endif
  1398. /* keep compiler warnings about use of uninit vars silent */
  1399. res_len=0;
  1400. buf=0;
  1401. relayed_msg=0;
  1402. relayed_code=0;
  1403. totag_retr=0;
  1404. /* remember, what was sent upstream to know whether we are
  1405. * forwarding a first final reply or not */
  1406. /* *** store and relay message as needed *** */
  1407. reply_status = t_should_relay_response(t, msg_status, branch,
  1408. &save_clone, &relay, cancel_bitmap, p_msg );
  1409. DBG("DEBUG: relay_reply: branch=%d, save=%d, relay=%d\n",
  1410. branch, save_clone, relay );
  1411. /* store the message if needed */
  1412. if (save_clone) /* save for later use, typically branch picking */
  1413. {
  1414. if (!store_reply( t, branch, p_msg ))
  1415. goto error01;
  1416. }
  1417. uas_rb = & t->uas.response;
  1418. if (relay >= 0 ) {
  1419. /* initialize sockets for outbound reply */
  1420. uas_rb->activ_type=msg_status;
  1421. /* only messages known to be relayed immediately will be
  1422. * be called on; we do not evoke this callback on messages
  1423. * stored in shmem -- they are fixed and one cannot change them
  1424. * anyway */
  1425. if (unlikely(msg_status<300 && branch==relay
  1426. && has_tran_tmcbs(t,TMCB_RESPONSE_FWDED)) ) {
  1427. run_trans_callbacks( TMCB_RESPONSE_FWDED, t, t->uas.request,
  1428. p_msg, msg_status );
  1429. }
  1430. /* try building the outbound reply from either the current
  1431. * or a stored message */
  1432. relayed_msg = branch==relay ? p_msg : t->uac[relay].reply;
  1433. if (relayed_msg==FAKED_REPLY) {
  1434. relayed_code = branch==relay
  1435. ? msg_status : t->uac[relay].last_received;
  1436. /* use to_tag from the original request, or if not present,
  1437. * generate a new one */
  1438. if (relayed_code>=180 && t->uas.request->to
  1439. && (get_to(t->uas.request)->tag_value.s==0
  1440. || get_to(t->uas.request)->tag_value.len==0)) {
  1441. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1442. to_tag=&tm_tag;
  1443. } else {
  1444. to_tag=0;
  1445. }
  1446. if (cfg_get(tm, tm_cfg, tm_aggregate_auth) &&
  1447. (relayed_code==401 || relayed_code==407) &&
  1448. (auth_reply_count(t, p_msg)>1)){
  1449. /* aggregate 401 & 407 www & proxy authenticate headers in
  1450. * a "FAKE" reply*/
  1451. /* temporarily "store" the current reply */
  1452. reply_bak=t->uac[branch].reply;
  1453. t->uac[branch].reply=p_msg;
  1454. buf=reply_aggregate_auth(relayed_code,
  1455. error_text(relayed_code), to_tag, t, &res_len, &bm);
  1456. /* revert the temporary "store" reply above */
  1457. t->uac[branch].reply=reply_bak;
  1458. }else{
  1459. buf = build_res_buf_from_sip_req( relayed_code,
  1460. error_text(relayed_code), to_tag,
  1461. t->uas.request, &res_len, &bm );
  1462. }
  1463. } else {
  1464. relayed_code=relayed_msg->REPLY_STATUS;
  1465. if (relayed_code==503){
  1466. /* replace a final 503 with a 500:
  1467. * generate a "FAKE" reply and a new to_tag (for easier
  1468. * debugging)*/
  1469. relayed_msg=FAKED_REPLY;
  1470. if ((get_to(t->uas.request)->tag_value.s==0 ||
  1471. get_to(t->uas.request)->tag_value.len==0)) {
  1472. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1473. to_tag=&tm_tag;
  1474. } else {
  1475. to_tag=0;
  1476. }
  1477. /* don't relay a 503, replace it w/ 500 (rfc3261) */
  1478. buf=build_res_buf_from_sip_req(500, error_text(relayed_code),
  1479. to_tag, t->uas.request, &res_len, &bm);
  1480. relayed_code=500;
  1481. }else if (cfg_get(tm, tm_cfg, tm_aggregate_auth) &&
  1482. (relayed_code==401 || relayed_code==407) &&
  1483. (auth_reply_count(t, p_msg)>1)){
  1484. /* aggregate 401 & 407 www & proxy authenticate headers in
  1485. * a "FAKE" reply*/
  1486. if ((get_to(t->uas.request)->tag_value.s==0 ||
  1487. get_to(t->uas.request)->tag_value.len==0)) {
  1488. calc_crc_suffix( t->uas.request, tm_tag_suffix );
  1489. to_tag=&tm_tag;
  1490. } else {
  1491. to_tag=0;
  1492. }
  1493. /* temporarily "store" the current reply */
  1494. reply_bak=t->uac[branch].reply;
  1495. t->uac[branch].reply=p_msg;
  1496. buf=reply_aggregate_auth(relayed_code,
  1497. error_text(relayed_code), to_tag, t, &res_len, &bm);
  1498. /* revert the temporary "store" reply above */
  1499. t->uac[branch].reply=reply_bak;;
  1500. relayed_msg=FAKED_REPLY; /* mark the relayed_msg as a "FAKE" */
  1501. }else{
  1502. buf = build_res_buf_from_sip_res( relayed_msg, &res_len );
  1503. /* if we build a message from shmem, we need to remove
  1504. via delete lumps which are now stirred in the shmem-ed
  1505. structure
  1506. */
  1507. if (branch!=relay) {
  1508. free_via_clen_lump(&relayed_msg->add_rm);
  1509. }
  1510. /* update send_flags with possible additions from the
  1511. reply route */
  1512. SND_FLAGS_OR(&uas_rb->dst.send_flags, &uas_rb->dst.send_flags,
  1513. &relayed_msg->rpl_send_flags);
  1514. }
  1515. }
  1516. update_reply_stats( relayed_code );
  1517. if (!buf) {
  1518. LOG(L_ERR, "ERROR: relay_reply: "
  1519. "no mem for outbound reply buffer\n");
  1520. goto error02;
  1521. }
  1522. /* attempt to copy the message to UAS's shmem:
  1523. - copy to-tag for ACK matching as well
  1524. - allocate little a bit more for provisional as
  1525. larger messages are likely to follow and we will be
  1526. able to reuse the memory frag
  1527. */
  1528. uas_rb->buffer = (char*)shm_resize( uas_rb->buffer, res_len +
  1529. (msg_status<200 ? REPLY_OVERBUFFER_LEN : 0));
  1530. if (!uas_rb->buffer) {
  1531. LOG(L_ERR, "ERROR: relay_reply: cannot alloc reply shmem\n");
  1532. goto error03;
  1533. }
  1534. uas_rb->buffer_len = res_len;
  1535. memcpy( uas_rb->buffer, buf, res_len );
  1536. if (relayed_msg==FAKED_REPLY) { /* to-tags for local replies */
  1537. update_local_tags(t, &bm, uas_rb->buffer, buf);
  1538. t_stats_replied_locally();
  1539. }
  1540. /* update the status ... */
  1541. t->uas.status = relayed_code;
  1542. t->relayed_reply_branch = relay;
  1543. if ( unlikely(is_invite(t) && relayed_msg!=FAKED_REPLY
  1544. && relayed_code>=200 && relayed_code < 300
  1545. && has_tran_tmcbs( t,
  1546. TMCB_RESPONSE_OUT|TMCB_E2EACK_IN|TMCB_E2EACK_RETR_IN))) {
  1547. totag_retr=update_totag_set(t, relayed_msg);
  1548. }
  1549. }; /* if relay ... */
  1550. UNLOCK_REPLIES( t );
  1551. /* send it now (from the private buffer) */
  1552. if (relay >= 0) {
  1553. /* Set retransmission timer before the reply is sent out to avoid
  1554. * race conditions
  1555. *
  1556. * Call start_final_repl_retr/put_on_wait() only if we really send out
  1557. * the reply. It can happen that the reply has been already sent from
  1558. * failure_route or from a callback and the timer has been already
  1559. * started. (Miklos)
  1560. */
  1561. if (reply_status == RPS_COMPLETED) {
  1562. start_final_repl_retr(t);
  1563. }
  1564. if (SEND_PR_BUFFER( uas_rb, buf, res_len )>=0){
  1565. if (unlikely(!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_OUT))){
  1566. run_trans_callbacks( TMCB_RESPONSE_OUT, t, t->uas.request,
  1567. relayed_msg, relayed_code);
  1568. }
  1569. #ifdef TMCB_ONSEND
  1570. if (unlikely(has_tran_tmcbs(t, TMCB_RESPONSE_SENT))){
  1571. INIT_TMCB_ONSEND_PARAMS(onsend_params, t->uas.request,
  1572. relayed_msg, uas_rb, &uas_rb->dst, buf,
  1573. res_len,
  1574. (relayed_msg==FAKED_REPLY)?TMCB_LOCAL_F:0,
  1575. uas_rb->branch, relayed_code);
  1576. run_onsend_callbacks2(TMCB_RESPONSE_SENT, t, &onsend_params);
  1577. }
  1578. #endif
  1579. }
  1580. /* Call put_on_wait() only if we really send out
  1581. * the reply. It can happen that the reply has been already sent from
  1582. * failure_route or from a callback and the timer has been already
  1583. * started. (Miklos)
  1584. *
  1585. * put_on_wait() should always be called after we finished dealling
  1586. * with t, because otherwise the wait timer might fire before we
  1587. * finish with t, and by the time we want to use t it could
  1588. * be already deleted. This could happen only if this function is
  1589. * called from timer (fr_timer) (the timer doesn't refcnt) and the
  1590. * timer allows quick dels (timer_allow_del()). --andrei
  1591. */
  1592. if (do_put_on_wait && (reply_status == RPS_COMPLETED)) {
  1593. put_on_wait(t);
  1594. }
  1595. pkg_free( buf );
  1596. }
  1597. /* success */
  1598. return reply_status;
  1599. error03:
  1600. pkg_free( buf );
  1601. error02:
  1602. if (save_clone) {
  1603. if (t->uac[branch].reply!=FAKED_REPLY)
  1604. sip_msg_free( t->uac[branch].reply );
  1605. t->uac[branch].reply = NULL;
  1606. }
  1607. error01:
  1608. t_reply_unsafe( t, t->uas.request, 500, "Reply processing error" );
  1609. *cancel_bitmap=0; /* t_reply_unsafe already canceled everything needed */
  1610. UNLOCK_REPLIES(t);
  1611. /* if (is_invite(t)) cancel_uacs( t, *cancel_bitmap, 0);
  1612. * -- not needed, t_reply_unsafe took care of this */
  1613. /* a serious error occurred -- attempt to send an error reply;
  1614. it will take care of clean-ups */
  1615. /* failure */
  1616. return RPS_ERROR;
  1617. }
  1618. /* this is the "UAC" above transaction layer; if a final reply
  1619. is received, it triggers a callback; note well -- it assumes
  1620. it is entered locked with REPLY_LOCK and it returns unlocked!
  1621. */
  1622. enum rps local_reply( struct cell *t, struct sip_msg *p_msg, int branch,
  1623. unsigned int msg_status, branch_bm_t *cancel_bitmap)
  1624. {
  1625. /* how to deal with replies for local transaction */
  1626. int local_store, local_winner;
  1627. enum rps reply_status;
  1628. struct sip_msg *winning_msg;
  1629. int winning_code;
  1630. int totag_retr;
  1631. /* branch_bm_t cancel_bitmap; */
  1632. /* keep warning 'var might be used un-inited' silent */
  1633. winning_msg=0;
  1634. winning_code=0;
  1635. totag_retr=0;
  1636. *cancel_bitmap=0;
  1637. reply_status=t_should_relay_response( t, msg_status, branch,
  1638. &local_store, &local_winner, cancel_bitmap, p_msg );
  1639. DBG("DEBUG: local_reply: branch=%d, save=%d, winner=%d\n",
  1640. branch, local_store, local_winner );
  1641. if (local_store) {
  1642. if (!store_reply(t, branch, p_msg))
  1643. goto error;
  1644. }
  1645. if (local_winner>=0) {
  1646. winning_msg= branch==local_winner
  1647. ? p_msg : t->uac[local_winner].reply;
  1648. if (winning_msg==FAKED_REPLY) {
  1649. t_stats_replied_locally();
  1650. winning_code = branch==local_winner
  1651. ? msg_status : t->uac[local_winner].last_received;
  1652. } else {
  1653. winning_code=winning_msg->REPLY_STATUS;
  1654. }
  1655. t->uas.status = winning_code;
  1656. update_reply_stats( winning_code );
  1657. if (unlikely(is_invite(t) && winning_msg!=FAKED_REPLY &&
  1658. winning_code>=200 && winning_code <300 &&
  1659. has_tran_tmcbs(t, TMCB_LOCAL_COMPLETED) )) {
  1660. totag_retr=update_totag_set(t, winning_msg);
  1661. }
  1662. }
  1663. UNLOCK_REPLIES(t);
  1664. if (local_winner >= 0
  1665. && cfg_get(tm, tm_cfg, pass_provisional_replies)
  1666. && winning_code < 200) {
  1667. /* no retr. detection for provisional replies &
  1668. * TMCB_LOCAL_RESPONSE_OUT */
  1669. if (unlikely(has_tran_tmcbs(t, TMCB_LOCAL_RESPONSE_OUT) )) {
  1670. run_trans_callbacks( TMCB_LOCAL_RESPONSE_OUT, t, 0,
  1671. winning_msg, winning_code);
  1672. }
  1673. }
  1674. if (local_winner>=0 && winning_code>=200 ) {
  1675. DBG("DEBUG: local transaction completed\n");
  1676. if (!totag_retr) {
  1677. if (unlikely(has_tran_tmcbs(t,TMCB_LOCAL_COMPLETED) ))
  1678. run_trans_callbacks( TMCB_LOCAL_COMPLETED, t, 0,
  1679. winning_msg, winning_code );
  1680. }
  1681. }
  1682. return reply_status;
  1683. error:
  1684. prepare_to_cancel(t, cancel_bitmap, 0);
  1685. UNLOCK_REPLIES(t);
  1686. cleanup_uac_timers(t);
  1687. if (p_msg && p_msg!=FAKED_REPLY && get_cseq(p_msg)->method.len==INVITE_LEN
  1688. && memcmp( get_cseq(p_msg)->method.s, INVITE, INVITE_LEN)==0)
  1689. cancel_uacs( t, *cancel_bitmap, F_CANCEL_B_KILL);
  1690. *cancel_bitmap=0; /* we've already took care of everything */
  1691. put_on_wait(t);
  1692. return RPS_ERROR;
  1693. }
  1694. /* This function is called whenever a reply for our module is received;
  1695. * we need to register this function on module initialization;
  1696. * Returns : 0 - core router stops
  1697. * 1 - core router relay statelessly
  1698. */
  1699. int reply_received( struct sip_msg *p_msg )
  1700. {
  1701. int msg_status;
  1702. int last_uac_status;
  1703. char *ack;
  1704. unsigned int ack_len;
  1705. int branch;
  1706. /* has the transaction completed now and we need to clean-up? */
  1707. int reply_status;
  1708. int onreply_route;
  1709. branch_bm_t cancel_bitmap;
  1710. struct ua_client *uac;
  1711. struct cell *t;
  1712. struct dest_info lack_dst;
  1713. avp_list_t* backup_user_from, *backup_user_to;
  1714. avp_list_t* backup_domain_from, *backup_domain_to;
  1715. avp_list_t* backup_uri_from, *backup_uri_to;
  1716. #ifdef WITH_XAVP
  1717. sr_xavp_t **backup_xavps;
  1718. #endif
  1719. int replies_locked;
  1720. #ifdef USE_DNS_FAILOVER
  1721. int branch_ret;
  1722. int prev_branch;
  1723. #endif
  1724. #ifdef USE_DST_BLACKLIST
  1725. int blst_503_timeout;
  1726. struct hdr_field* hf;
  1727. #endif
  1728. #ifdef TMCB_ONSEND
  1729. struct tmcb_params onsend_params;
  1730. #endif
  1731. struct run_act_ctx ctx;
  1732. /* make sure we know the associated transaction ... */
  1733. if (t_check( p_msg , &branch )==-1)
  1734. goto trans_not_found;
  1735. /*... if there is none, tell the core router to fwd statelessly */
  1736. t=get_t();
  1737. if ( (t==0)||(t==T_UNDEFINED))
  1738. goto trans_not_found;
  1739. if (unlikely(branch==T_BR_UNDEFINED))
  1740. BUG("invalid branch, please report to [email protected]\n");
  1741. tm_ctx_set_branch_index(branch);
  1742. cancel_bitmap=0;
  1743. msg_status=p_msg->REPLY_STATUS;
  1744. replies_locked=0;
  1745. uac=&t->uac[branch];
  1746. DBG("DEBUG: reply_received: org. status uas=%d, "
  1747. "uac[%d]=%d local=%d is_invite=%d)\n",
  1748. t->uas.status, branch, uac->last_received,
  1749. is_local(t), is_invite(t));
  1750. last_uac_status=uac->last_received;
  1751. /* it's a cancel ... ? */
  1752. if (get_cseq(p_msg)->method.len==CANCEL_LEN
  1753. && memcmp( get_cseq(p_msg)->method.s, CANCEL, CANCEL_LEN)==0
  1754. /* .. which is not e2e ? ... */
  1755. && is_invite(t) ) {
  1756. /* ... then just stop timers */
  1757. if ( msg_status >= 200 )
  1758. stop_rb_timers(&uac->local_cancel); /* stop retr & fr */
  1759. else
  1760. stop_rb_retr(&uac->local_cancel); /* stop only retr */
  1761. DBG("DEBUG: reply to local CANCEL processed\n");
  1762. goto done;
  1763. }
  1764. onreply_route=t->on_reply;
  1765. if ( msg_status >= 200 ){
  1766. #ifdef TM_ONREPLY_FINAL_DROP_OK
  1767. #warning Experimental tm onreply_route final reply DROP support active
  1768. if (onreply_route)
  1769. /* stop only retr., but leave the final reply timers on, in case
  1770. the final reply is dropped in the on_reply route */
  1771. stop_rb_retr(&uac->request);
  1772. else
  1773. #endif /* TM_ONREPLY_FINAL_DROP_OK */
  1774. /* stop final response timer & retr. if I got a
  1775. final response */
  1776. stop_rb_timers(&uac->request);
  1777. /* acknowledge negative INVITE replies (do it before detailed
  1778. * on_reply processing, which may take very long, like if it
  1779. * is attempted to establish a TCP connection to a fail-over dst */
  1780. if (is_invite(t)) {
  1781. if (msg_status >= 300) {
  1782. ack = build_ack(p_msg, t, branch, &ack_len);
  1783. if (ack) {
  1784. #ifdef TMCB_ONSEND
  1785. if (SEND_PR_BUFFER(&uac->request, ack, ack_len)>=0)
  1786. if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT))){
  1787. INIT_TMCB_ONSEND_PARAMS(onsend_params,
  1788. t->uas.request, p_msg, &uac->request,
  1789. &uac->request.dst, ack, ack_len,
  1790. TMCB_LOCAL_F, branch, TYPE_LOCAL_ACK);
  1791. run_onsend_callbacks2(TMCB_REQUEST_SENT, t,
  1792. &onsend_params);
  1793. }
  1794. #else
  1795. SEND_PR_BUFFER(&uac->request, ack, ack_len);
  1796. #endif
  1797. shm_free(ack);
  1798. }
  1799. } else if (is_local(t) /*&& 200 <= msg_status < 300*/) {
  1800. ack = build_local_ack(p_msg, t, branch, &ack_len, &lack_dst);
  1801. if (ack) {
  1802. if (msg_send(&lack_dst, ack, ack_len)<0)
  1803. LOG(L_ERR, "Error while sending local ACK\n");
  1804. #ifdef TMCB_ONSEND
  1805. else if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT))){
  1806. INIT_TMCB_ONSEND_PARAMS(onsend_params,
  1807. t->uas.request, p_msg, &uac->request,
  1808. &lack_dst, ack, ack_len, TMCB_LOCAL_F,
  1809. branch, TYPE_LOCAL_ACK);
  1810. run_onsend_callbacks2(TMCB_REQUEST_SENT, t,
  1811. &onsend_params);
  1812. }
  1813. #endif
  1814. #ifndef WITH_AS_SUPPORT
  1815. shm_free(ack);
  1816. #endif
  1817. }
  1818. }
  1819. }
  1820. }else{
  1821. /* if branch already canceled re-transmit or generate cancel
  1822. * TODO: check if it really makes sense to do it for non-invites too */
  1823. if (uac->request.flags & F_RB_CANCELED){
  1824. if (uac->local_cancel.buffer_len){
  1825. membar_read(); /* make sure we get the current value of
  1826. local_cancel */
  1827. /* re-transmit if cancel already built */
  1828. DBG("tm: reply_received: branch CANCEL retransmit\n");
  1829. #ifdef TMCB_ONSEND
  1830. if (SEND_BUFFER( &uac->local_cancel)>=0){
  1831. if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
  1832. run_onsend_callbacks(TMCB_REQUEST_SENT,
  1833. &uac->local_cancel,
  1834. 0, 0, TMCB_LOCAL_F);
  1835. }
  1836. #else
  1837. SEND_BUFFER( &uac->local_cancel );
  1838. #endif
  1839. /* retrs. should be already started so do nothing */
  1840. }else if (atomic_cmpxchg_long((void*)&uac->local_cancel.buffer, 0,
  1841. (long)BUSY_BUFFER)==0){
  1842. /* try to rebuild it if empty (not set or marked as BUSY).
  1843. * if BUSY or set just exit, a cancel will be (or was) sent
  1844. * shortly on this branch */
  1845. DBG("tm: reply_received: branch CANCEL created\n");
  1846. cancel_branch(t, branch, F_CANCEL_B_FORCE_C);
  1847. }
  1848. goto done; /* nothing to do */
  1849. }
  1850. if (is_invite(t)){
  1851. /* stop only retr. (and not fr) */
  1852. stop_rb_retr(&uac->request);
  1853. }else{
  1854. /* non-invite: increase retransmissions interval (slow now) */
  1855. switch_rb_retr_to_t2(&uac->request);
  1856. }
  1857. }
  1858. /* pre-set the ignore BLST_503 flag in the message, if the
  1859. corresponding branch had it set on send */
  1860. p_msg->fwd_send_flags.blst_imask|=
  1861. uac->request.dst.send_flags.blst_imask & BLST_503;
  1862. /* processing of on_reply block */
  1863. if (onreply_route) {
  1864. set_route_type(TM_ONREPLY_ROUTE);
  1865. /* transfer transaction flag to message context */
  1866. if (t->uas.request) p_msg->flags=t->uas.request->flags;
  1867. /* set the as avp_list the one from transaction */
  1868. backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
  1869. backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
  1870. backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
  1871. backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
  1872. backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
  1873. backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
  1874. #ifdef WITH_XAVP
  1875. backup_xavps = xavp_set_list(&t->xavps_list);
  1876. #endif
  1877. setbflagsval(0, uac->branch_flags);
  1878. /* Pre- and post-script callbacks have already
  1879. * been executed by the core. (Miklos)
  1880. */
  1881. /* lock onreply_route, for safe avp usage */
  1882. LOCK_REPLIES( t );
  1883. replies_locked=1;
  1884. run_top_route(onreply_rt.rlist[onreply_route], p_msg, &ctx);
  1885. /* transfer current message context back to t */
  1886. if (t->uas.request) t->uas.request->flags=p_msg->flags;
  1887. getbflagsval(0, &uac->branch_flags);
  1888. /* restore original avp list */
  1889. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from );
  1890. set_avp_list( AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to );
  1891. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from );
  1892. set_avp_list( AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to );
  1893. set_avp_list( AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from );
  1894. set_avp_list( AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to );
  1895. #ifdef WITH_XAVP
  1896. xavp_set_list(backup_xavps);
  1897. #endif
  1898. /* handle a possible DROP in the script, but only if this
  1899. is not a final reply (final replies already stop the timers
  1900. and droping them might leave a transaction living forever) */
  1901. #ifdef TM_ONREPLY_FINAL_DROP_OK
  1902. if (unlikely(ctx.run_flags&DROP_R_F))
  1903. #else
  1904. if (unlikely((ctx.run_flags&DROP_R_F) && (msg_status<200)))
  1905. #endif /* TM_ONREPLY_FINAL_DROP_OK */
  1906. {
  1907. if (likely(replies_locked)) {
  1908. replies_locked = 0;
  1909. UNLOCK_REPLIES( t );
  1910. }
  1911. goto done;
  1912. }
  1913. #ifdef TM_ONREPLY_FINAL_DROP_OK
  1914. if (msg_status >= 200) {
  1915. /* stop final reply timers, now that we executed the onreply route
  1916. and the reply was not DROPed */
  1917. if (likely(replies_locked)){
  1918. /* if final reply => we have to execute stop_rb_timers, but
  1919. with replies unlocked to avoid a possible deadlock
  1920. (if the timer is currently running, stop_rb_timers()
  1921. will wait until the timer handler ends, but the
  1922. final_response_handler() will try to lock replies =>
  1923. deadlock).
  1924. */
  1925. UNLOCK_REPLIES( t );
  1926. replies_locked=0;
  1927. }
  1928. stop_rb_timers(&uac->request);
  1929. }
  1930. #endif /* TM_ONREPLY_FINAL_DROP_OK */
  1931. }
  1932. #ifdef USE_DST_BLACKLIST
  1933. /* add temporary to the blacklist the source of a 503 reply */
  1934. if ( (msg_status==503) &&
  1935. cfg_get(tm, tm_cfg, tm_blst_503) &&
  1936. /* check if the request sent on the branch had the the
  1937. blst 503 ignore flags set or it was set in the onreply_r*/
  1938. should_blacklist_su(BLST_503, &p_msg->fwd_send_flags,
  1939. p_msg->rcv.proto, &p_msg->rcv.src_su)
  1940. ){
  1941. blst_503_timeout=cfg_get(tm, tm_cfg, tm_blst_503_default);
  1942. if ((parse_headers(p_msg, HDR_RETRY_AFTER_F, 0)==0) &&
  1943. (p_msg->parsed_flag & HDR_RETRY_AFTER_F)){
  1944. for (hf=p_msg->headers; hf; hf=hf->next)
  1945. if (hf->type==HDR_RETRY_AFTER_T){
  1946. /* found */
  1947. blst_503_timeout=(unsigned)(unsigned long)hf->parsed;
  1948. blst_503_timeout=MAX_unsigned(blst_503_timeout,
  1949. cfg_get(tm, tm_cfg, tm_blst_503_min));
  1950. blst_503_timeout=MIN_unsigned(blst_503_timeout,
  1951. cfg_get(tm, tm_cfg, tm_blst_503_max));
  1952. break;
  1953. }
  1954. }
  1955. if (blst_503_timeout){
  1956. dst_blacklist_force_su_to(BLST_503, p_msg->rcv.proto,
  1957. &p_msg->rcv.src_su, p_msg,
  1958. S_TO_TICKS(blst_503_timeout));
  1959. }
  1960. }
  1961. #endif /* USE_DST_BLACKLIST */
  1962. #ifdef USE_DNS_FAILOVER
  1963. /* if this is a 503 reply, and the destination resolves to more ips,
  1964. * add another branch/uac.
  1965. * This code is out of LOCK_REPLIES() to minimize the time the
  1966. * reply lock is held (the lock won't be held while sending the
  1967. * message)*/
  1968. if (cfg_get(core, core_cfg, use_dns_failover) && (msg_status==503)) {
  1969. branch_ret=add_uac_dns_fallback(t, t->uas.request,
  1970. uac, !replies_locked);
  1971. prev_branch=-1;
  1972. /* unlock replies to avoid sending() while holding a lock */
  1973. if (unlikely(replies_locked)) {
  1974. UNLOCK_REPLIES( t );
  1975. replies_locked = 0;
  1976. }
  1977. while((branch_ret>=0) &&(branch_ret!=prev_branch)){
  1978. prev_branch=branch_ret;
  1979. branch_ret=t_send_branch(t, branch_ret, t->uas.request , 0, 1);
  1980. }
  1981. }
  1982. #endif
  1983. if (unlikely(!replies_locked)){
  1984. LOCK_REPLIES( t );
  1985. replies_locked=1;
  1986. }
  1987. if ( is_local(t) ) {
  1988. reply_status=local_reply( t, p_msg, branch, msg_status, &cancel_bitmap );
  1989. if (reply_status == RPS_COMPLETED) {
  1990. /* no more UAC FR/RETR (if I received a 2xx, there may
  1991. * be still pending branches ...
  1992. */
  1993. cleanup_uac_timers( t );
  1994. if (is_invite(t)) cancel_uacs(t, cancel_bitmap, F_CANCEL_B_KILL);
  1995. /* There is no need to call set_final_timer because we know
  1996. * that the transaction is local */
  1997. put_on_wait(t);
  1998. }else if (cancel_bitmap){
  1999. /* cancel everything, even non-INVITEs (e.g in case of 6xx), use
  2000. * cancel_b_method for canceling unreplied branches */
  2001. cancel_uacs(t, cancel_bitmap, cfg_get(tm,tm_cfg, cancel_b_flags));
  2002. }
  2003. } else {
  2004. reply_status=relay_reply( t, p_msg, branch, msg_status,
  2005. &cancel_bitmap, 1 );
  2006. if (reply_status == RPS_COMPLETED) {
  2007. /* no more UAC FR/RETR (if I received a 2xx, there may
  2008. be still pending branches ...
  2009. */
  2010. cleanup_uac_timers( t );
  2011. /* 2xx is a special case: we can have a COMPLETED request
  2012. * with branches still open => we have to cancel them */
  2013. if (is_invite(t) && cancel_bitmap)
  2014. cancel_uacs( t, cancel_bitmap, F_CANCEL_B_KILL);
  2015. /* FR for negative INVITES, WAIT anything else */
  2016. /* Call to set_final_timer is embedded in relay_reply to avoid
  2017. * race conditions when reply is sent out and an ACK to stop
  2018. * retransmissions comes before retransmission timer is set.*/
  2019. }else if (cancel_bitmap){
  2020. /* cancel everything, even non-INVITEs (e.g in case of 6xx), use
  2021. * cancel_b_method for canceling unreplied branches */
  2022. cancel_uacs(t, cancel_bitmap, cfg_get(tm,tm_cfg, cancel_b_flags));
  2023. }
  2024. }
  2025. uac->request.flags|=F_RB_REPLIED;
  2026. if (reply_status==RPS_ERROR)
  2027. goto done;
  2028. /* update FR/RETR timers on provisional replies */
  2029. if (is_invite(t) && msg_status<200 &&
  2030. ( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) ||
  2031. ( (last_uac_status<msg_status) &&
  2032. ((msg_status>=180) || (last_uac_status==0)) )
  2033. ) ) { /* provisional now */
  2034. restart_rb_fr(& uac->request, t->fr_inv_timeout);
  2035. uac->request.flags|=F_RB_FR_INV; /* mark fr_inv */
  2036. } /* provisional replies */
  2037. done:
  2038. tm_ctx_set_branch_index(0);
  2039. /* we are done with the transaction, so unref it - the reference
  2040. * was incremented by t_check() function -bogdan*/
  2041. t_unref(p_msg);
  2042. /* don't try to relay statelessly neither on success
  2043. (we forwarded statefully) nor on error; on troubles,
  2044. simply do nothing; that will make the other party to
  2045. retransmit; hopefuly, we'll then be better off */
  2046. return 0;
  2047. trans_not_found:
  2048. /* transaction context was not found */
  2049. if (goto_on_sl_reply) {
  2050. /* The script writer has a chance to decide whether to
  2051. * forward the reply or not.
  2052. * Pre- and post-script callbacks have already
  2053. * been execueted by the core. (Miklos)
  2054. */
  2055. return run_top_route(onreply_rt.rlist[goto_on_sl_reply], p_msg, 0);
  2056. } else {
  2057. /* let the core forward the reply */
  2058. return 1;
  2059. }
  2060. }
  2061. int t_reply_with_body( struct cell *trans, unsigned int code,
  2062. char * text, char * body, char * new_header, char * to_tag )
  2063. {
  2064. struct lump_rpl *hdr_lump;
  2065. struct lump_rpl *body_lump;
  2066. str s_to_tag;
  2067. str rpl;
  2068. int ret;
  2069. struct bookmark bm;
  2070. s_to_tag.s = to_tag;
  2071. if(to_tag)
  2072. s_to_tag.len = strlen(to_tag);
  2073. else
  2074. s_to_tag.len = 0;
  2075. /* mark the transaction as replied */
  2076. if (code>=200) set_kr(REQ_RPLD);
  2077. /* add the lumps for new_header and for body (by bogdan) */
  2078. if (new_header && strlen(new_header)) {
  2079. hdr_lump = add_lump_rpl( trans->uas.request, new_header,
  2080. strlen(new_header), LUMP_RPL_HDR );
  2081. if ( !hdr_lump ) {
  2082. LOG(L_ERR,"ERROR:tm:t_reply_with_body: cannot add hdr lump\n");
  2083. goto error;
  2084. }
  2085. } else {
  2086. hdr_lump = 0;
  2087. }
  2088. /* body lump */
  2089. if(body && strlen(body)) {
  2090. body_lump = add_lump_rpl( trans->uas.request, body, strlen(body),
  2091. LUMP_RPL_BODY );
  2092. if (body_lump==0) {
  2093. LOG(L_ERR,"ERROR:tm:t_reply_with_body: cannot add body lump\n");
  2094. goto error_1;
  2095. }
  2096. } else {
  2097. body_lump = 0;
  2098. }
  2099. rpl.s = build_res_buf_from_sip_req(
  2100. code, text, &s_to_tag,
  2101. trans->uas.request, (unsigned int*)&rpl.len, &bm);
  2102. /* since the msg (trans->uas.request) is a clone into shm memory, to avoid
  2103. * memory leak or crashing (lumps are create in private memory) I will
  2104. * remove the lumps by myself here (bogdan) */
  2105. if ( hdr_lump ) {
  2106. unlink_lump_rpl( trans->uas.request, hdr_lump);
  2107. free_lump_rpl( hdr_lump );
  2108. }
  2109. if( body_lump ) {
  2110. unlink_lump_rpl( trans->uas.request, body_lump);
  2111. free_lump_rpl( body_lump );
  2112. }
  2113. if (rpl.s==0) {
  2114. LOG(L_ERR,"ERROR:tm:t_reply_with_body: failed in doing "
  2115. "build_res_buf_from_sip_req()\n");
  2116. goto error;
  2117. }
  2118. DBG("t_reply_with_body: buffer computed\n");
  2119. // frees 'res.s' ... no panic !
  2120. ret=_reply_light( trans, rpl.s, rpl.len, code, text,
  2121. s_to_tag.s, s_to_tag.len, 1 /* lock replies */, &bm );
  2122. /* this is ugly hack -- the function caller may wish to continue with
  2123. * transaction and I unref; however, there is now only one use from
  2124. * vm/fifo_vm_reply and I'm currently to lazy to export UNREF; -jiri
  2125. */
  2126. UNREF(trans);
  2127. return ret;
  2128. error_1:
  2129. if ( hdr_lump ) {
  2130. unlink_lump_rpl( trans->uas.request, hdr_lump);
  2131. free_lump_rpl( hdr_lump );
  2132. }
  2133. error:
  2134. return -1;
  2135. }
  2136. /* drops all the replies to make sure
  2137. * that none of them is picked up again
  2138. */
  2139. void t_drop_replies(int v)
  2140. {
  2141. /* It is too risky to free the replies that are in shm mem
  2142. at the middle of failure_route block, because other functions might
  2143. need them as well. And it can also happen that the current reply is not yet
  2144. in shm mem, we are just going to clone it. So better to set a flag
  2145. and check it after failure_route has ended. (Miklos) */
  2146. drop_replies = v;
  2147. }
  2148. #if 0
  2149. static int send_reply(struct cell *trans, unsigned int code, str* text, str* body, str* headers, str* to_tag)
  2150. {
  2151. struct lump_rpl *hdr_lump, *body_lump;
  2152. str rpl;
  2153. int ret;
  2154. struct bookmark bm;
  2155. /* mark the transaction as replied */
  2156. if (code >= 200) set_kr(REQ_RPLD);
  2157. /* add the lumps for new_header and for body (by bogdan) */
  2158. if (headers && headers->len) {
  2159. hdr_lump = add_lump_rpl(trans->uas.request, headers->s, headers->len, LUMP_RPL_HDR);
  2160. if (!hdr_lump) {
  2161. LOG(L_ERR, "send_reply: cannot add hdr lump\n");
  2162. goto sr_error;
  2163. }
  2164. } else {
  2165. hdr_lump = 0;
  2166. }
  2167. /* body lump */
  2168. if (body && body->len) {
  2169. body_lump = add_lump_rpl(trans->uas.request, body->s, body->len, LUMP_RPL_BODY);
  2170. if (body_lump == 0) {
  2171. LOG(L_ERR,"send_reply: cannot add body lump\n");
  2172. goto sr_error_1;
  2173. }
  2174. } else {
  2175. body_lump = 0;
  2176. }
  2177. /* We can safely zero-terminate the text here, because it is followed
  2178. * by next line in the received message
  2179. */
  2180. text->s[text->len] = '\0';
  2181. rpl.s = build_res_buf_from_sip_req(code, text->s, to_tag, trans->uas.request, (unsigned int*)&rpl.len, &bm);
  2182. /* since the msg (trans->uas.request) is a clone into shm memory, to avoid
  2183. * memory leak or crashing (lumps are create in private memory) I will
  2184. * remove the lumps by myself here (bogdan) */
  2185. if (hdr_lump) {
  2186. unlink_lump_rpl(trans->uas.request, hdr_lump);
  2187. free_lump_rpl(hdr_lump);
  2188. }
  2189. if (body_lump) {
  2190. unlink_lump_rpl(trans->uas.request, body_lump);
  2191. free_lump_rpl(body_lump);
  2192. }
  2193. if (rpl.s == 0) {
  2194. LOG(L_ERR,"send_reply: failed in build_res_buf_from_sip_req\n");
  2195. goto sr_error;
  2196. }
  2197. ret = _reply_light(trans, rpl.s, rpl.len, code, text->s, to_tag->s, to_tag->len, 1 /* lock replies */, &bm);
  2198. /* this is ugly hack -- the function caller may wish to continue with
  2199. * transaction and I unref; however, there is now only one use from
  2200. * vm/fifo_vm_reply and I'm currently to lazy to export UNREF; -jiri
  2201. */
  2202. UNREF(trans);
  2203. return ret;
  2204. sr_error_1:
  2205. if (hdr_lump) {
  2206. unlink_lump_rpl(trans->uas.request, hdr_lump);
  2207. free_lump_rpl(hdr_lump);
  2208. }
  2209. sr_error:
  2210. return -1;
  2211. }
  2212. #endif
  2213. const char* rpc_reply_doc[2] = {
  2214. "Reply transaction",
  2215. 0
  2216. };
  2217. /*
  2218. Syntax:
  2219. ":tm.reply:[response file]\n
  2220. code\n
  2221. reason\n
  2222. trans_id\n
  2223. to_tag\n
  2224. [new headers]\n
  2225. \n
  2226. [Body]\n
  2227. .\n
  2228. \n"
  2229. */
  2230. void rpc_reply(rpc_t* rpc, void* c)
  2231. {
  2232. int ret;
  2233. struct cell *trans;
  2234. unsigned int hash_index, label, code;
  2235. str ti;
  2236. char* reason, *body, *headers, *tag;
  2237. if (rpc->scan(c, "d", &code) < 1) {
  2238. rpc->fault(c, 400, "Reply code expected");
  2239. return;
  2240. }
  2241. if (rpc->scan(c, "s", &reason) < 1) {
  2242. rpc->fault(c, 400, "Reason phrase expected");
  2243. return;
  2244. }
  2245. if (rpc->scan(c, "s", &ti.s) < 1) {
  2246. rpc->fault(c, 400, "Transaction ID expected");
  2247. return;
  2248. }
  2249. ti.len = strlen(ti.s);
  2250. if (rpc->scan(c, "s", &tag) < 1) {
  2251. rpc->fault(c, 400, "To tag expected");
  2252. return;
  2253. }
  2254. if (rpc->scan(c, "s", &headers) < 0) return;
  2255. if (rpc->scan(c, "s", &body) < 0) return;
  2256. if(sscanf(ti.s,"%u:%u", &hash_index, &label) != 2) {
  2257. ERR("Invalid trans_id (%s)\n", ti.s);
  2258. rpc->fault(c, 400, "Invalid transaction ID");
  2259. return;
  2260. }
  2261. DBG("hash_index=%u label=%u\n", hash_index, label);
  2262. if( t_lookup_ident(&trans, hash_index, label) < 0 ) {
  2263. ERR("Lookup failed\n");
  2264. rpc->fault(c, 481, "No such transaction");
  2265. return;
  2266. }
  2267. /* it's refcounted now, t_reply_with body unrefs for me -- I can
  2268. * continue but may not use T anymore */
  2269. ret = t_reply_with_body(trans, code, reason, body, headers, tag);
  2270. if (ret < 0) {
  2271. ERR("Reply failed\n");
  2272. rpc->fault(c, 500, "Reply failed");
  2273. return;
  2274. }
  2275. }