wsi-timeout.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010 - 2019 Andy Green <[email protected]>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22. * IN THE SOFTWARE.
  23. */
  24. #include "private-lib-core.h"
  25. void
  26. __lws_wsi_remove_from_sul(struct lws *wsi)
  27. {
  28. //struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  29. //lwsl_notice("%s: wsi %p, to %p, hr %p\n", __func__, wsi,
  30. // &wsi->sul_timeout.list, &wsi->sul_hrtimer.list);
  31. // lws_dll2_describe(&pt->pt_sul_owner, "pre-remove");
  32. lws_dll2_remove(&wsi->sul_timeout.list);
  33. lws_dll2_remove(&wsi->sul_hrtimer.list);
  34. lws_dll2_remove(&wsi->sul_validity.list);
  35. // lws_dll2_describe(&pt->pt_sul_owner, "post-remove");
  36. }
  37. /*
  38. * hrtimer
  39. */
  40. static void
  41. lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul)
  42. {
  43. struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
  44. if (wsi->a.protocol &&
  45. wsi->a.protocol->callback(wsi, LWS_CALLBACK_TIMER,
  46. wsi->user_space, NULL, 0))
  47. __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  48. "hrtimer cb errored");
  49. }
  50. void
  51. __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
  52. {
  53. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  54. wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
  55. __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
  56. &wsi->sul_hrtimer, us);
  57. }
  58. void
  59. lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
  60. {
  61. __lws_set_timer_usecs(wsi, usecs);
  62. }
  63. /*
  64. * wsi timeout
  65. */
  66. static void
  67. lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul)
  68. {
  69. struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
  70. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  71. if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
  72. lws_stats_bump(pt, LWSSTATS_C_TIMEOUTS, 1);
  73. /* no need to log normal idle keepalive timeout */
  74. // if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
  75. #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
  76. if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
  77. lwsl_info("wsi %p: TIMEDOUT WAITING on %d "
  78. "(did hdr %d, ah %p, wl %d)\n",
  79. (void *)wsi, wsi->pending_timeout,
  80. wsi->hdr_parsing_completed, wsi->http.ah,
  81. pt->http.ah_wait_list_length);
  82. #if defined(LWS_WITH_CGI)
  83. if (wsi->http.cgi)
  84. lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
  85. #endif
  86. #else
  87. if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
  88. lwsl_info("wsi %p: TIMEDOUT WAITING on %d ", (void *)wsi,
  89. wsi->pending_timeout);
  90. #endif
  91. /* cgi timeout */
  92. if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
  93. /*
  94. * Since he failed a timeout, he already had a chance to
  95. * do something and was unable to... that includes
  96. * situations like half closed connections. So process
  97. * this "failed timeout" close as a violent death and
  98. * don't try to do protocol cleanup like flush partials.
  99. */
  100. wsi->socket_is_permanently_unusable = 1;
  101. #if defined(LWS_WITH_CLIENT)
  102. if (lwsi_state(wsi) == LRS_WAITING_SSL)
  103. lws_inform_client_conn_fail(wsi,
  104. (void *)"Timed out waiting SSL", 21);
  105. #endif
  106. lws_pt_lock(pt, __func__);
  107. __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
  108. lws_pt_unlock(pt);
  109. }
  110. void
  111. __lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
  112. {
  113. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  114. wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
  115. __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
  116. &wsi->sul_timeout,
  117. ((lws_usec_t)secs) * LWS_US_PER_SEC);
  118. lwsl_debug("%s: %p: %d secs, reason %d\n", __func__, wsi, secs, reason);
  119. wsi->pending_timeout = reason;
  120. }
  121. void
  122. lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
  123. {
  124. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  125. lws_context_lock(pt->context, __func__);
  126. lws_pt_lock(pt, __func__);
  127. lws_dll2_remove(&wsi->sul_timeout.list);
  128. lws_pt_unlock(pt);
  129. if (!secs)
  130. goto bail;
  131. if (secs == LWS_TO_KILL_SYNC) {
  132. lwsl_debug("synchronously killing %p\n", wsi);
  133. lws_context_unlock(pt->context);
  134. lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  135. "to sync kill");
  136. return;
  137. }
  138. if (secs == LWS_TO_KILL_ASYNC)
  139. secs = 0;
  140. // assert(!secs || !wsi->mux_stream_immortal);
  141. if (secs && wsi->mux_stream_immortal)
  142. lwsl_err("%s: on immortal stream %d %d\n", __func__, reason, secs);
  143. lws_pt_lock(pt, __func__);
  144. __lws_set_timeout(wsi, reason, secs);
  145. lws_pt_unlock(pt);
  146. bail:
  147. lws_context_unlock(pt->context);
  148. }
  149. void
  150. lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
  151. {
  152. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  153. lws_pt_lock(pt, __func__);
  154. lws_dll2_remove(&wsi->sul_timeout.list);
  155. lws_pt_unlock(pt);
  156. if (!us)
  157. return;
  158. lws_pt_lock(pt, __func__);
  159. __lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
  160. &wsi->sul_timeout, us);
  161. lwsl_notice("%s: %p: %llu us, reason %d\n", __func__, wsi,
  162. (unsigned long long)us, reason);
  163. wsi->pending_timeout = reason;
  164. lws_pt_unlock(pt);
  165. }
  166. #if defined(LWS_WITH_DEPRECATED_THINGS)
  167. /* requires context + vh lock */
  168. int
  169. __lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p)
  170. {
  171. lws_start_foreach_llp_safe(struct lws_timed_vh_protocol **, pt,
  172. vh->timed_vh_protocol_list, next) {
  173. if (*pt == p) {
  174. *pt = p->next;
  175. lws_dll2_remove(&p->sul.list);
  176. lws_free(p);
  177. return 0;
  178. }
  179. } lws_end_foreach_llp_safe(pt);
  180. return 1;
  181. }
  182. void
  183. lws_sul_timed_callback_vh_protocol_cb(lws_sorted_usec_list_t *sul)
  184. {
  185. struct lws_timed_vh_protocol *tvp = lws_container_of(sul,
  186. struct lws_timed_vh_protocol, sul);
  187. lws_fakewsi_def_plwsa(&tvp->vhost->context->pt[0]);
  188. lws_fakewsi_prep_plwsa_ctx(tvp->vhost->context);
  189. plwsa->vhost = tvp->vhost; /* not a real bound wsi */
  190. plwsa->protocol = tvp->protocol;
  191. lwsl_debug("%s: timed cb: vh %s, protocol %s, reason %d\n", __func__,
  192. tvp->vhost->name, tvp->protocol->name, tvp->reason);
  193. tvp->protocol->callback((struct lws *)plwsa, tvp->reason, NULL, NULL, 0);
  194. __lws_timed_callback_remove(tvp->vhost, tvp);
  195. }
  196. int
  197. lws_timed_callback_vh_protocol_us(struct lws_vhost *vh,
  198. const struct lws_protocols *prot, int reason,
  199. lws_usec_t us)
  200. {
  201. struct lws_timed_vh_protocol *p = (struct lws_timed_vh_protocol *)
  202. lws_malloc(sizeof(*p), "timed_vh");
  203. if (!p)
  204. return 1;
  205. memset(p, 0, sizeof(*p));
  206. p->tsi_req = lws_pthread_self_to_tsi(vh->context);
  207. if (p->tsi_req < 0) /* not called from a service thread --> tsi 0 */
  208. p->tsi_req = 0;
  209. lws_context_lock(vh->context, __func__); /* context ----------------- */
  210. p->protocol = prot;
  211. p->reason = reason;
  212. p->vhost = vh;
  213. p->sul.cb = lws_sul_timed_callback_vh_protocol_cb;
  214. /* list is always at the very top of the sul */
  215. __lws_sul_insert(&vh->context->pt[p->tsi_req].pt_sul_owner,
  216. (lws_sorted_usec_list_t *)&p->sul.list, us);
  217. // lwsl_notice("%s: %s.%s %d\n", __func__, vh->name, prot->name, secs);
  218. lws_vhost_lock(vh); /* vhost ---------------------------------------- */
  219. p->next = vh->timed_vh_protocol_list;
  220. vh->timed_vh_protocol_list = p;
  221. lws_vhost_unlock(vh); /* -------------------------------------- vhost */
  222. lws_context_unlock(vh->context); /* ------------------------- context */
  223. return 0;
  224. }
  225. int
  226. lws_timed_callback_vh_protocol(struct lws_vhost *vh,
  227. const struct lws_protocols *prot, int reason,
  228. int secs)
  229. {
  230. return lws_timed_callback_vh_protocol_us(vh, prot, reason,
  231. ((lws_usec_t)secs) * LWS_US_PER_SEC);
  232. }
  233. #endif
  234. static void
  235. lws_validity_cb(lws_sorted_usec_list_t *sul)
  236. {
  237. struct lws *wsi = lws_container_of(sul, struct lws, sul_validity);
  238. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  239. const lws_retry_bo_t *rbo = wsi->retry_policy;
  240. /* one of either the ping or hangup validity threshold was crossed */
  241. if (wsi->validity_hup) {
  242. lwsl_info("%s: wsi %p: validity too old\n", __func__, wsi);
  243. __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  244. "validity timeout");
  245. return;
  246. }
  247. /* schedule a protocol-dependent ping */
  248. lwsl_info("%s: wsi %p: scheduling validity check\n", __func__, wsi);
  249. if (wsi->role_ops && wsi->role_ops->issue_keepalive)
  250. wsi->role_ops->issue_keepalive(wsi, 0);
  251. /*
  252. * We arrange to come back here after the additional ping to hangup time
  253. * and do the hangup, unless we get validated (by, eg, a PONG) and
  254. * reset the timer
  255. */
  256. assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
  257. wsi->validity_hup = 1;
  258. __lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
  259. &wsi->sul_validity,
  260. ((uint64_t)rbo->secs_since_valid_hangup -
  261. rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
  262. }
  263. /*
  264. * The role calls this back to actually confirm validity on a particular wsi
  265. * (which may not be the original wsi)
  266. */
  267. void
  268. _lws_validity_confirmed_role(struct lws *wsi)
  269. {
  270. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  271. const lws_retry_bo_t *rbo = wsi->retry_policy;
  272. if (!rbo || !rbo->secs_since_valid_hangup)
  273. return;
  274. wsi->validity_hup = 0;
  275. wsi->sul_validity.cb = lws_validity_cb;
  276. wsi->validity_hup = rbo->secs_since_valid_ping >=
  277. rbo->secs_since_valid_hangup;
  278. lwsl_info("%s: wsi %p: setting validity timer %ds (hup %d)\n",
  279. __func__, wsi,
  280. wsi->validity_hup ? rbo->secs_since_valid_hangup :
  281. rbo->secs_since_valid_ping,
  282. wsi->validity_hup);
  283. __lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
  284. &wsi->sul_validity,
  285. ((uint64_t)(wsi->validity_hup ?
  286. rbo->secs_since_valid_hangup :
  287. rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
  288. }
  289. void
  290. lws_validity_confirmed(struct lws *wsi)
  291. {
  292. /*
  293. * This may be a stream inside a muxed network connection... leave it
  294. * to the role to figure out who actually needs to understand their
  295. * validity was confirmed.
  296. */
  297. if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */
  298. wsi->role_ops && wsi->role_ops->issue_keepalive)
  299. wsi->role_ops->issue_keepalive(wsi, 1);
  300. }