libuv.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010 - 2019 Andy Green <[email protected]>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22. * IN THE SOFTWARE.
  23. */
  24. #include "private-lib-core.h"
  25. #include "private-lib-event-libs-libuv.h"
  26. #define pt_to_priv_uv(_pt) ((struct lws_pt_eventlibs_libuv *)(_pt)->evlib_pt)
  27. #define wsi_to_priv_uv(_w) ((struct lws_wsi_eventlibs_libuv *)(_w)->evlib_wsi)
  28. static void
  29. lws_uv_sultimer_cb(uv_timer_t *timer
  30. #if UV_VERSION_MAJOR == 0
  31. , int status
  32. #endif
  33. )
  34. {
  35. struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(timer,
  36. struct lws_pt_eventlibs_libuv, sultimer);
  37. struct lws_context_per_thread *pt = ptpr->pt;
  38. lws_usec_t us;
  39. lws_context_lock(pt->context, __func__);
  40. lws_pt_lock(pt, __func__);
  41. us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
  42. lws_now_usecs());
  43. if (us)
  44. uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
  45. LWS_US_TO_MS(us), 0);
  46. lws_pt_unlock(pt);
  47. lws_context_unlock(pt->context);
  48. }
  49. static void
  50. lws_uv_idle(uv_idle_t *handle
  51. #if UV_VERSION_MAJOR == 0
  52. , int status
  53. #endif
  54. )
  55. { struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(handle,
  56. struct lws_pt_eventlibs_libuv, idle);
  57. struct lws_context_per_thread *pt = ptpr->pt;
  58. lws_usec_t us;
  59. lws_service_do_ripe_rxflow(pt);
  60. lws_context_lock(pt->context, __func__);
  61. lws_pt_lock(pt, __func__);
  62. /*
  63. * is there anybody with pending stuff that needs service forcing?
  64. */
  65. if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
  66. /* -1 timeout means just do forced service */
  67. _lws_plat_service_forced_tsi(pt->context, pt->tid);
  68. /* account for sultimer */
  69. us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
  70. lws_now_usecs());
  71. if (us)
  72. uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
  73. LWS_US_TO_MS(us), 0);
  74. /* there is nobody who needs service forcing, shut down idle */
  75. uv_idle_stop(handle);
  76. lws_pt_unlock(pt);
  77. lws_context_unlock(pt->context);
  78. }
  79. static void
  80. lws_io_cb(uv_poll_t *watcher, int status, int revents)
  81. {
  82. struct lws *wsi = (struct lws *)((uv_handle_t *)watcher)->data;
  83. struct lws_context *context = wsi->a.context;
  84. struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
  85. struct lws_pollfd eventfd;
  86. lws_context_lock(pt->context, __func__);
  87. lws_pt_lock(pt, __func__);
  88. if (pt->is_destroyed)
  89. goto bail;
  90. #if defined(WIN32) || defined(_WIN32)
  91. eventfd.fd = watcher->socket;
  92. #else
  93. eventfd.fd = watcher->io_watcher.fd;
  94. #endif
  95. eventfd.events = 0;
  96. eventfd.revents = 0;
  97. if (status < 0) {
  98. /*
  99. * At this point status will be an UV error, like UV_EBADF,
  100. * we treat all errors as LWS_POLLHUP
  101. *
  102. * You might want to return; instead of servicing the fd in
  103. * some cases */
  104. if (status == UV_EAGAIN)
  105. goto bail;
  106. eventfd.events |= LWS_POLLHUP;
  107. eventfd.revents |= LWS_POLLHUP;
  108. } else {
  109. if (revents & UV_READABLE) {
  110. eventfd.events |= LWS_POLLIN;
  111. eventfd.revents |= LWS_POLLIN;
  112. }
  113. if (revents & UV_WRITABLE) {
  114. eventfd.events |= LWS_POLLOUT;
  115. eventfd.revents |= LWS_POLLOUT;
  116. }
  117. }
  118. lws_pt_unlock(pt);
  119. lws_context_unlock(pt->context);
  120. lws_service_fd_tsi(context, &eventfd, wsi->tsi);
  121. if (pt->destroy_self) {
  122. lws_context_destroy(pt->context);
  123. return;
  124. }
  125. uv_idle_start(&pt_to_priv_uv(pt)->idle, lws_uv_idle);
  126. return;
  127. bail:
  128. lws_pt_unlock(pt);
  129. lws_context_unlock(pt->context);
  130. }
  131. /*
  132. * This does not actually stop the event loop. The reason is we have to pass
  133. * libuv handle closures through its event loop. So this tries to close all
  134. * wsi, and set a flag; when all the wsi closures are finalized then we
  135. * actually stop the libuv event loops.
  136. */
  137. static void
  138. lws_libuv_stop(struct lws_context *context)
  139. {
  140. struct lws_context_per_thread *pt;
  141. int n, m;
  142. lwsl_err("%s\n", __func__);
  143. if (context->requested_kill) {
  144. lwsl_err("%s: ignoring\n", __func__);
  145. return;
  146. }
  147. context->requested_kill = 1;
  148. m = context->count_threads;
  149. context->being_destroyed = 1;
  150. /*
  151. * Phase 1: start the close of every dynamic uv handle
  152. */
  153. while (m--) {
  154. pt = &context->pt[m];
  155. if (pt->pipe_wsi) {
  156. uv_poll_stop(wsi_to_priv_uv(pt->pipe_wsi)->w_read.pwatcher);
  157. lws_destroy_event_pipe(pt->pipe_wsi);
  158. pt->pipe_wsi = NULL;
  159. }
  160. for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
  161. struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
  162. if (!wsi)
  163. continue;
  164. lws_close_free_wsi(wsi,
  165. LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
  166. __func__ /* no protocol close */);
  167. n--;
  168. }
  169. }
  170. lwsl_info("%s: started closing all wsi\n", __func__);
  171. /* we cannot have completed... there are at least the cancel pipes */
  172. }
  173. static void
  174. lws_uv_signal_handler(uv_signal_t *watcher, int signum)
  175. {
  176. struct lws_context *context = watcher->data;
  177. if (context->eventlib_signal_cb) {
  178. context->eventlib_signal_cb((void *)watcher, signum);
  179. return;
  180. }
  181. lwsl_err("internal signal handler caught signal %d\n", signum);
  182. lws_libuv_stop(watcher->data);
  183. }
  184. static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
  185. /*
  186. * Closing Phase 2: Close callback for a static UV asset
  187. */
  188. static void
  189. lws_uv_close_cb_sa(uv_handle_t *handle)
  190. {
  191. struct lws_context *context =
  192. LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(handle);
  193. int n;
  194. lwsl_info("%s: sa left %d: dyn left: %d\n", __func__,
  195. context->count_event_loop_static_asset_handles,
  196. context->count_wsi_allocated);
  197. /* any static assets left? */
  198. if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
  199. context->count_wsi_allocated)
  200. return;
  201. /*
  202. * That's it... all wsi were down, and now every
  203. * static asset lws had a UV handle for is down.
  204. *
  205. * Stop the loop so we can get out of here.
  206. */
  207. for (n = 0; n < context->count_threads; n++) {
  208. struct lws_context_per_thread *pt = &context->pt[n];
  209. if (pt_to_priv_uv(pt)->io_loop && !pt->event_loop_foreign)
  210. uv_stop(pt_to_priv_uv(pt)->io_loop);
  211. }
  212. if (!context->pt[0].event_loop_foreign) {
  213. lwsl_info("%s: calling lws_context_destroy2\n", __func__);
  214. lws_context_destroy2(context);
  215. }
  216. lwsl_info("%s: all done\n", __func__);
  217. }
  218. /*
  219. * These must be called by protocols that want to use libuv objects directly...
  220. *
  221. * .... when the libuv object is created...
  222. */
  223. void
  224. lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context)
  225. {
  226. LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, context);
  227. }
  228. /*
  229. * ... and in the close callback when the object is closed.
  230. */
  231. void
  232. lws_libuv_static_refcount_del(uv_handle_t *h)
  233. {
  234. lws_uv_close_cb_sa(h);
  235. }
  236. static void lws_uv_close_cb(uv_handle_t *handle)
  237. {
  238. }
  239. static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
  240. {
  241. if (!uv_is_closing(handle))
  242. uv_close(handle, lws_uv_close_cb);
  243. }
  244. void
  245. lws_close_all_handles_in_loop(uv_loop_t *loop)
  246. {
  247. uv_walk(loop, lws_uv_walk_cb, NULL);
  248. }
  249. void
  250. lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
  251. {
  252. if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
  253. uv_stop(pt_to_priv_uv(&context->pt[tsi])->io_loop);
  254. }
  255. uv_loop_t *
  256. lws_uv_getloop(struct lws_context *context, int tsi)
  257. {
  258. if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
  259. return pt_to_priv_uv(&context->pt[tsi])->io_loop;
  260. return NULL;
  261. }
  262. int
  263. lws_libuv_check_watcher_active(struct lws *wsi)
  264. {
  265. uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
  266. if (!h)
  267. return 0;
  268. return uv_is_active(h);
  269. }
  270. static int
  271. elops_init_context_uv(struct lws_context *context,
  272. const struct lws_context_creation_info *info)
  273. {
  274. int n;
  275. context->eventlib_signal_cb = info->signal_cb;
  276. for (n = 0; n < context->count_threads; n++)
  277. pt_to_priv_uv(&context->pt[n])->w_sigint.context = context;
  278. return 0;
  279. }
  280. static int
  281. elops_destroy_context1_uv(struct lws_context *context)
  282. {
  283. struct lws_context_per_thread *pt;
  284. int n, m = 0;
  285. for (n = 0; n < context->count_threads; n++) {
  286. int budget = 10000;
  287. pt = &context->pt[n];
  288. /* only for internal loops... */
  289. if (!pt->event_loop_foreign) {
  290. while (budget-- && (m = uv_run(pt_to_priv_uv(pt)->io_loop,
  291. UV_RUN_NOWAIT)))
  292. ;
  293. if (m)
  294. lwsl_info("%s: tsi %d: not all closed\n",
  295. __func__, n);
  296. }
  297. }
  298. /* call destroy2 if internal loop */
  299. return !context->pt[0].event_loop_foreign;
  300. }
  301. static int
  302. elops_destroy_context2_uv(struct lws_context *context)
  303. {
  304. struct lws_context_per_thread *pt;
  305. int n, internal = 0;
  306. for (n = 0; n < context->count_threads; n++) {
  307. pt = &context->pt[n];
  308. /* only for internal loops... */
  309. if (!pt->event_loop_foreign && pt_to_priv_uv(pt)->io_loop) {
  310. internal = 1;
  311. if (!context->finalize_destroy_after_internal_loops_stopped)
  312. uv_stop(pt_to_priv_uv(pt)->io_loop);
  313. else {
  314. #if UV_VERSION_MAJOR > 0
  315. uv_loop_close(pt_to_priv_uv(pt)->io_loop);
  316. #endif
  317. lws_free_set_NULL(pt_to_priv_uv(pt)->io_loop);
  318. }
  319. }
  320. }
  321. return internal;
  322. }
  323. static int
  324. elops_wsi_logical_close_uv(struct lws *wsi)
  325. {
  326. if (!lws_socket_is_valid(wsi->desc.sockfd) &&
  327. wsi->role_ops && strcmp(wsi->role_ops->name, "raw-file"))
  328. return 0;
  329. if (wsi->listener || wsi->event_pipe) {
  330. lwsl_debug("%s: %p: %d %d stop listener / pipe poll\n",
  331. __func__, wsi, wsi->listener, wsi->event_pipe);
  332. if (wsi_to_priv_uv(wsi)->w_read.pwatcher)
  333. uv_poll_stop(wsi_to_priv_uv(wsi)->w_read.pwatcher);
  334. }
  335. lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi);
  336. /*
  337. * libuv has to do his own close handle processing asynchronously
  338. */
  339. lws_libuv_closehandle(wsi);
  340. return 1; /* do not complete the wsi close, uv close cb will do it */
  341. }
  342. static int
  343. elops_check_client_connect_ok_uv(struct lws *wsi)
  344. {
  345. if (lws_libuv_check_watcher_active(wsi)) {
  346. lwsl_warn("Waiting for libuv watcher to close\n");
  347. return 1;
  348. }
  349. return 0;
  350. }
  351. static void
  352. lws_libuv_closewsi_m(uv_handle_t* handle)
  353. {
  354. lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
  355. lwsl_debug("%s: sockfd %d\n", __func__, sockfd);
  356. compatible_close(sockfd);
  357. lws_free(handle);
  358. }
  359. static void
  360. elops_close_handle_manually_uv(struct lws *wsi)
  361. {
  362. uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
  363. lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi);
  364. /*
  365. * the "manual" variant only closes the handle itself and the
  366. * related fd. handle->data is the fd.
  367. */
  368. h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
  369. /*
  370. * We take responsibility to close / destroy these now.
  371. * Remove any trace from the wsi.
  372. */
  373. wsi->desc.sockfd = LWS_SOCK_INVALID;
  374. wsi_to_priv_uv(wsi)->w_read.pwatcher = NULL;
  375. wsi->told_event_loop_closed = 1;
  376. uv_close(h, lws_libuv_closewsi_m);
  377. }
  378. static int
  379. elops_accept_uv(struct lws *wsi)
  380. {
  381. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  382. struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
  383. w_read->context = wsi->a.context;
  384. w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
  385. if (!w_read->pwatcher)
  386. return -1;
  387. if (wsi->role_ops->file_handle)
  388. uv_poll_init(pt_to_priv_uv(pt)->io_loop, w_read->pwatcher,
  389. (int)(lws_intptr_t)wsi->desc.filefd);
  390. else
  391. uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
  392. w_read->pwatcher, wsi->desc.sockfd);
  393. ((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
  394. return 0;
  395. }
  396. static void
  397. elops_io_uv(struct lws *wsi, int flags)
  398. {
  399. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  400. struct lws_io_watcher_libuv *w = &(wsi_to_priv_uv(wsi)->w_read);
  401. int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE);
  402. lwsl_debug("%s: %p: %d\n", __func__, wsi, flags);
  403. /* w->context is set after the loop is initialized */
  404. if (!pt_to_priv_uv(pt)->io_loop || !w->context) {
  405. lwsl_info("%s: no io loop yet\n", __func__);
  406. return;
  407. }
  408. if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
  409. (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
  410. lwsl_err("%s: assert: flags %d", __func__, flags);
  411. assert(0);
  412. }
  413. if (!w->pwatcher || wsi->told_event_loop_closed) {
  414. lwsl_info("%s: no watcher\n", __func__);
  415. return;
  416. }
  417. if (flags & LWS_EV_START) {
  418. if (flags & LWS_EV_WRITE)
  419. current_events |= UV_WRITABLE;
  420. if (flags & LWS_EV_READ)
  421. current_events |= UV_READABLE;
  422. uv_poll_start(w->pwatcher, current_events, lws_io_cb);
  423. } else {
  424. if (flags & LWS_EV_WRITE)
  425. current_events &= ~UV_WRITABLE;
  426. if (flags & LWS_EV_READ)
  427. current_events &= ~UV_READABLE;
  428. if (!(current_events & (UV_READABLE | UV_WRITABLE)))
  429. uv_poll_stop(w->pwatcher);
  430. else
  431. uv_poll_start(w->pwatcher, current_events, lws_io_cb);
  432. }
  433. w->actual_events = current_events;
  434. }
  435. static int
  436. elops_init_vhost_listen_wsi_uv(struct lws *wsi)
  437. {
  438. struct lws_context_per_thread *pt;
  439. struct lws_io_watcher_libuv *w_read;
  440. int n;
  441. if (!wsi)
  442. return 0;
  443. w_read = &wsi_to_priv_uv(wsi)->w_read;
  444. if (w_read->context)
  445. return 0;
  446. pt = &wsi->a.context->pt[(int)wsi->tsi];
  447. if (!pt_to_priv_uv(pt)->io_loop)
  448. return 0;
  449. w_read->context = wsi->a.context;
  450. w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
  451. if (!w_read->pwatcher)
  452. return -1;
  453. n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
  454. w_read->pwatcher, wsi->desc.sockfd);
  455. if (n) {
  456. lwsl_err("uv_poll_init failed %d, sockfd=%p\n", n,
  457. (void *)(lws_intptr_t)wsi->desc.sockfd);
  458. return -1;
  459. }
  460. ((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
  461. elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
  462. return 0;
  463. }
  464. static void
  465. elops_run_pt_uv(struct lws_context *context, int tsi)
  466. {
  467. if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
  468. uv_run(pt_to_priv_uv(&context->pt[tsi])->io_loop, 0);
  469. }
  470. static void
  471. elops_destroy_pt_uv(struct lws_context *context, int tsi)
  472. {
  473. struct lws_context_per_thread *pt = &context->pt[tsi];
  474. int m, ns;
  475. lwsl_info("%s: %d\n", __func__, tsi);
  476. if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
  477. return;
  478. if (!pt_to_priv_uv(pt)->io_loop)
  479. return;
  480. if (pt->event_loop_destroy_processing_done)
  481. return;
  482. pt->event_loop_destroy_processing_done = 1;
  483. if (!pt->event_loop_foreign) {
  484. uv_signal_stop(&pt_to_priv_uv(pt)->w_sigint.watcher);
  485. ns = LWS_ARRAY_SIZE(sigs);
  486. if (lws_check_opt(context->options,
  487. LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
  488. ns = 2;
  489. for (m = 0; m < ns; m++) {
  490. uv_signal_stop(&pt_to_priv_uv(pt)->signals[m]);
  491. uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->signals[m],
  492. lws_uv_close_cb_sa);
  493. }
  494. } else
  495. lwsl_debug("%s: not closing pt signals\n", __func__);
  496. uv_timer_stop(&pt_to_priv_uv(pt)->sultimer);
  497. uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->sultimer, lws_uv_close_cb_sa);
  498. uv_idle_stop(&pt_to_priv_uv(pt)->idle);
  499. uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->idle, lws_uv_close_cb_sa);
  500. }
  501. /*
  502. * This needs to be called after vhosts have been defined.
  503. *
  504. * If later, after server start, another vhost is added, this must be
  505. * called again to bind the vhost
  506. */
  507. int
  508. elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
  509. {
  510. struct lws_context_per_thread *pt = &context->pt[tsi];
  511. struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
  512. struct lws_vhost *vh = context->vhost_list;
  513. int status = 0, n, ns, first = 1;
  514. uv_loop_t *loop = (uv_loop_t *)_loop;
  515. ptpriv->pt = pt;
  516. if (!ptpriv->io_loop) {
  517. if (!loop) {
  518. loop = lws_malloc(sizeof(*loop), "libuv loop");
  519. if (!loop) {
  520. lwsl_err("OOM\n");
  521. return -1;
  522. }
  523. #if UV_VERSION_MAJOR > 0
  524. uv_loop_init(loop);
  525. #else
  526. lwsl_err("This libuv is too old to work...\n");
  527. return 1;
  528. #endif
  529. pt->event_loop_foreign = 0;
  530. } else {
  531. lwsl_notice(" Using foreign event loop...\n");
  532. pt->event_loop_foreign = 1;
  533. }
  534. ptpriv->io_loop = loop;
  535. uv_idle_init(loop, &ptpriv->idle);
  536. LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, context);
  537. uv_idle_start(&ptpriv->idle, lws_uv_idle);
  538. ns = LWS_ARRAY_SIZE(sigs);
  539. if (lws_check_opt(context->options,
  540. LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
  541. ns = 2;
  542. if (!pt->event_loop_foreign) {
  543. assert(ns <= (int)LWS_ARRAY_SIZE(ptpriv->signals));
  544. for (n = 0; n < ns; n++) {
  545. uv_signal_init(loop, &ptpriv->signals[n]);
  546. LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->signals[n],
  547. context);
  548. ptpriv->signals[n].data = pt->context;
  549. uv_signal_start(&ptpriv->signals[n],
  550. lws_uv_signal_handler, sigs[n]);
  551. }
  552. }
  553. } else
  554. first = 0;
  555. /*
  556. * Initialize the accept wsi read watcher with all the listening sockets
  557. * and register a callback for read operations
  558. *
  559. * We have to do it here because the uv loop(s) are not
  560. * initialized until after context creation.
  561. */
  562. while (vh) {
  563. if (elops_init_vhost_listen_wsi_uv(vh->lserv_wsi) == -1)
  564. return -1;
  565. vh = vh->vhost_next;
  566. }
  567. if (!first)
  568. return status;
  569. uv_timer_init(ptpriv->io_loop, &ptpriv->sultimer);
  570. LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, context);
  571. return status;
  572. }
  573. static void
  574. lws_libuv_closewsi(uv_handle_t* handle)
  575. {
  576. struct lws *wsi = (struct lws *)handle->data;
  577. struct lws_context *context = lws_get_context(wsi);
  578. struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
  579. #if defined(LWS_WITH_SERVER)
  580. int lspd = 0;
  581. #endif
  582. lwsl_info("%s: %p\n", __func__, wsi);
  583. lws_context_lock(context, __func__);
  584. /*
  585. * We get called back here for every wsi that closes
  586. */
  587. #if defined(LWS_WITH_SERVER)
  588. if (wsi->role_ops && !strcmp(wsi->role_ops->name, "listen") &&
  589. wsi->a.context->deprecated) {
  590. lspd = 1;
  591. context->deprecation_pending_listen_close_count--;
  592. if (!context->deprecation_pending_listen_close_count)
  593. lspd = 2;
  594. }
  595. #endif
  596. lws_pt_lock(pt, __func__);
  597. __lws_close_free_wsi_final(wsi);
  598. lws_pt_unlock(pt);
  599. /* it's our job to close the handle finally */
  600. lws_free(handle);
  601. #if defined(LWS_WITH_SERVER)
  602. if (lspd == 2 && context->deprecation_cb) {
  603. lwsl_notice("calling deprecation callback\n");
  604. context->deprecation_cb();
  605. }
  606. #endif
  607. lwsl_info("%s: sa left %d: dyn left: %d (rk %d)\n", __func__,
  608. context->count_event_loop_static_asset_handles,
  609. context->count_wsi_allocated, context->requested_kill);
  610. /*
  611. * eventually, we closed all the wsi...
  612. */
  613. if (context->requested_kill && !context->count_wsi_allocated) {
  614. struct lws_vhost *vh = context->vhost_list;
  615. int m;
  616. /*
  617. * Start Closing Phase 2: close of static handles
  618. */
  619. lwsl_info("%s: all lws dynamic handles down, closing static\n",
  620. __func__);
  621. for (m = 0; m < context->count_threads; m++)
  622. elops_destroy_pt_uv(context, m);
  623. /* protocols may have initialized libuv objects */
  624. while (vh) {
  625. lws_vhost_destroy1(vh);
  626. vh = vh->vhost_next;
  627. }
  628. if (!context->count_event_loop_static_asset_handles &&
  629. context->pt[0].event_loop_foreign) {
  630. lwsl_info("%s: call lws_context_destroy2\n", __func__);
  631. lws_context_unlock(context);
  632. lws_context_destroy2(context);
  633. return;
  634. }
  635. }
  636. lws_context_unlock(context);
  637. }
  638. void
  639. lws_libuv_closehandle(struct lws *wsi)
  640. {
  641. uv_handle_t* handle;
  642. struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
  643. if (!w_read->pwatcher)
  644. return;
  645. if (wsi->told_event_loop_closed) {
  646. // assert(0);
  647. return;
  648. }
  649. lwsl_debug("%s: %p\n", __func__, wsi);
  650. wsi->told_event_loop_closed = 1;
  651. /*
  652. * The normal close path attaches the related wsi as the
  653. * handle->data.
  654. */
  655. handle = (uv_handle_t *)w_read->pwatcher;
  656. /* ensure we can only do this once */
  657. w_read->pwatcher = NULL;
  658. uv_close(handle, lws_libuv_closewsi);
  659. }
  660. static const struct lws_event_loop_ops event_loop_ops_uv = {
  661. /* name */ "libuv",
  662. /* init_context */ elops_init_context_uv,
  663. /* destroy_context1 */ elops_destroy_context1_uv,
  664. /* destroy_context2 */ elops_destroy_context2_uv,
  665. /* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_uv,
  666. /* init_pt */ elops_init_pt_uv,
  667. /* wsi_logical_close */ elops_wsi_logical_close_uv,
  668. /* check_client_connect_ok */ elops_check_client_connect_ok_uv,
  669. /* close_handle_manually */ elops_close_handle_manually_uv,
  670. /* accept */ elops_accept_uv,
  671. /* io */ elops_io_uv,
  672. /* run_pt */ elops_run_pt_uv,
  673. /* destroy_pt */ elops_destroy_pt_uv,
  674. /* destroy wsi */ NULL,
  675. /* flags */ 0,
  676. /* evlib_size_ctx */ sizeof(struct lws_context_eventlibs_libuv),
  677. /* evlib_size_pt */ sizeof(struct lws_pt_eventlibs_libuv),
  678. /* evlib_size_vh */ 0,
  679. /* evlib_size_wsi */ sizeof(struct lws_io_watcher_libuv),
  680. };
  681. #if defined(LWS_WITH_EVLIB_PLUGINS)
  682. LWS_VISIBLE
  683. #endif
  684. const lws_plugin_evlib_t evlib_uv = {
  685. .hdr = {
  686. "libuv event loop",
  687. "lws_evlib_plugin",
  688. LWS_PLUGIN_API_MAGIC
  689. },
  690. .ops = &event_loop_ops_uv
  691. };