service.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010 - 2019 Andy Green <[email protected]>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22. * IN THE SOFTWARE.
  23. */
  24. #include "private-lib-core.h"
  25. int
  26. lws_callback_as_writeable(struct lws *wsi)
  27. {
  28. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  29. int n, m;
  30. lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB, 1);
  31. #if defined(LWS_WITH_STATS)
  32. if (wsi->active_writable_req_us) {
  33. uint64_t ul = lws_now_usecs() -
  34. wsi->active_writable_req_us;
  35. lws_stats_bump(pt, LWSSTATS_US_WRITABLE_DELAY_AVG, ul);
  36. lws_stats_max(pt, LWSSTATS_US_WORST_WRITABLE_DELAY, ul);
  37. wsi->active_writable_req_us = 0;
  38. }
  39. #endif
  40. #if defined(LWS_WITH_DETAILED_LATENCY)
  41. if (wsi->a.context->detailed_latency_cb && lwsi_state_est(wsi)) {
  42. lws_usec_t us = lws_now_usecs();
  43. wsi->detlat.earliest_write_req_pre_write =
  44. wsi->detlat.earliest_write_req;
  45. wsi->detlat.earliest_write_req = 0;
  46. wsi->detlat.latencies[LAT_DUR_PROXY_RX_TO_ONWARD_TX] =
  47. ((uint32_t)us - wsi->detlat.earliest_write_req_pre_write);
  48. }
  49. #endif
  50. n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
  51. m = user_callback_handle_rxflow(wsi->a.protocol->callback,
  52. wsi, (enum lws_callback_reasons) n,
  53. wsi->user_space, NULL, 0);
  54. return m;
  55. }
  56. int
  57. lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
  58. {
  59. volatile struct lws *vwsi = (volatile struct lws *)wsi;
  60. int n;
  61. // lwsl_notice("%s: %p\n", __func__, wsi);
  62. vwsi->leave_pollout_active = 0;
  63. vwsi->handling_pollout = 1;
  64. /*
  65. * if another thread wants POLLOUT on us, from here on while
  66. * handling_pollout is set, he will only set leave_pollout_active.
  67. * If we are going to disable POLLOUT, we will check that first.
  68. */
  69. wsi->could_have_pending = 0; /* clear back-to-back write detection */
  70. /*
  71. * user callback is lowest priority to get these notifications
  72. * actually, since other pending things cannot be disordered
  73. *
  74. * Priority 1: pending truncated sends are incomplete ws fragments
  75. * If anything else sent first the protocol would be
  76. * corrupted.
  77. *
  78. * These are post- any compression transform
  79. */
  80. if (lws_has_buffered_out(wsi)) {
  81. //lwsl_notice("%s: completing partial\n", __func__);
  82. if (lws_issue_raw(wsi, NULL, 0) < 0) {
  83. lwsl_info("%s signalling to close\n", __func__);
  84. goto bail_die;
  85. }
  86. /* leave POLLOUT active either way */
  87. goto bail_ok;
  88. } else
  89. if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
  90. wsi->socket_is_permanently_unusable = 1;
  91. goto bail_die; /* retry closing now */
  92. }
  93. /* Priority 2: pre- compression transform */
  94. #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
  95. if (wsi->http.comp_ctx.buflist_comp ||
  96. wsi->http.comp_ctx.may_have_more) {
  97. enum lws_write_protocol wp = LWS_WRITE_HTTP;
  98. lwsl_info("%s: completing comp partial (buflist_comp %p, may %d)\n",
  99. __func__, wsi->http.comp_ctx.buflist_comp,
  100. wsi->http.comp_ctx.may_have_more
  101. );
  102. if (wsi->role_ops->write_role_protocol(wsi, NULL, 0, &wp) < 0) {
  103. lwsl_info("%s signalling to close\n", __func__);
  104. goto bail_die;
  105. }
  106. lws_callback_on_writable(wsi);
  107. goto bail_ok;
  108. }
  109. #endif
  110. #ifdef LWS_WITH_CGI
  111. /*
  112. * A cgi master's wire protocol remains h1 or h2. He is just getting
  113. * his data from his child cgis.
  114. */
  115. if (wsi->http.cgi) {
  116. /* also one shot */
  117. if (pollfd)
  118. if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
  119. lwsl_info("failed at set pollfd\n");
  120. return 1;
  121. }
  122. goto user_service_go_again;
  123. }
  124. #endif
  125. /* if we got here, we should have wire protocol ops set on the wsi */
  126. assert(wsi->role_ops);
  127. if (!wsi->role_ops->handle_POLLOUT)
  128. goto bail_ok;
  129. n = wsi->role_ops->handle_POLLOUT(wsi);
  130. switch (n) {
  131. case LWS_HP_RET_BAIL_OK:
  132. goto bail_ok;
  133. case LWS_HP_RET_BAIL_DIE:
  134. goto bail_die;
  135. case LWS_HP_RET_DROP_POLLOUT:
  136. case LWS_HP_RET_USER_SERVICE:
  137. break;
  138. default:
  139. assert(0);
  140. }
  141. /* one shot */
  142. if (pollfd) {
  143. int eff = vwsi->leave_pollout_active;
  144. if (!eff) {
  145. if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
  146. lwsl_info("failed at set pollfd\n");
  147. goto bail_die;
  148. }
  149. }
  150. vwsi->handling_pollout = 0;
  151. /* cannot get leave_pollout_active set after the above */
  152. if (!eff && wsi->leave_pollout_active) {
  153. /*
  154. * got set inbetween sampling eff and clearing
  155. * handling_pollout, force POLLOUT on
  156. */
  157. lwsl_debug("leave_pollout_active\n");
  158. if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
  159. lwsl_info("failed at set pollfd\n");
  160. goto bail_die;
  161. }
  162. }
  163. vwsi->leave_pollout_active = 0;
  164. }
  165. if (lwsi_role_client(wsi) && !wsi->hdr_parsing_completed &&
  166. lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
  167. lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY)
  168. goto bail_ok;
  169. if (n == LWS_HP_RET_DROP_POLLOUT)
  170. goto bail_ok;
  171. #ifdef LWS_WITH_CGI
  172. user_service_go_again:
  173. #endif
  174. if (wsi->role_ops->perform_user_POLLOUT) {
  175. if (wsi->role_ops->perform_user_POLLOUT(wsi) == -1)
  176. goto bail_die;
  177. else
  178. goto bail_ok;
  179. }
  180. lwsl_debug("%s: %p: non mux: wsistate 0x%lx, ops %s\n", __func__, wsi,
  181. (unsigned long)wsi->wsistate, wsi->role_ops->name);
  182. vwsi = (volatile struct lws *)wsi;
  183. vwsi->leave_pollout_active = 0;
  184. n = lws_callback_as_writeable(wsi);
  185. vwsi->handling_pollout = 0;
  186. if (vwsi->leave_pollout_active)
  187. if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
  188. goto bail_die;
  189. return n;
  190. /*
  191. * since these don't disable the POLLOUT, they are always doing the
  192. * right thing for leave_pollout_active whether it was set or not.
  193. */
  194. bail_ok:
  195. vwsi->handling_pollout = 0;
  196. vwsi->leave_pollout_active = 0;
  197. return 0;
  198. bail_die:
  199. vwsi->handling_pollout = 0;
  200. vwsi->leave_pollout_active = 0;
  201. return -1;
  202. }
  203. int
  204. lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
  205. {
  206. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  207. uint8_t *buffered;
  208. size_t blen;
  209. int ret = LWSRXFC_CACHED, m;
  210. /* his RX is flowcontrolled, don't send remaining now */
  211. blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
  212. if (blen) {
  213. if (buf >= buffered && buf + len <= buffered + blen &&
  214. blen != (size_t)len) {
  215. /*
  216. * rxflow while we were spilling prev rxflow
  217. *
  218. * len indicates how much was unused, then... so trim
  219. * the head buflist to match that situation
  220. */
  221. lws_buflist_use_segment(&wsi->buflist, blen - len);
  222. lwsl_debug("%s: trim existing rxflow %d -> %d\n",
  223. __func__, (int)blen, (int)len);
  224. return LWSRXFC_TRIMMED;
  225. }
  226. ret = LWSRXFC_ADDITIONAL;
  227. }
  228. /* a new rxflow, buffer it and warn caller */
  229. lwsl_debug("%s: rxflow append %d\n", __func__, len - n);
  230. m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
  231. if (m < 0)
  232. return LWSRXFC_ERROR;
  233. if (m) {
  234. lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
  235. if (lws_dll2_is_detached(&wsi->dll_buflist))
  236. lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
  237. }
  238. return ret;
  239. }
  240. /* this is used by the platform service code to stop us waiting for network
  241. * activity in poll() when we have something that already needs service
  242. */
  243. int
  244. lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
  245. {
  246. struct lws_context_per_thread *pt;
  247. if (!context)
  248. return 1;
  249. #if defined(LWS_WITH_SYS_SMD)
  250. if (!tsi && lws_smd_message_pending(context)) {
  251. lws_smd_msg_distribute(context);
  252. if (lws_smd_message_pending(context))
  253. return 0;
  254. }
  255. #endif
  256. pt = &context->pt[tsi];
  257. #if defined(LWS_WITH_EXTERNAL_POLL)
  258. {
  259. lws_usec_t u = __lws_sul_service_ripe(pt->pt_sul_owner,
  260. LWS_COUNT_PT_SUL_OWNERS, lws_now_usecs());
  261. if (u < timeout_ms * 1000)
  262. timeout_ms = u / 1000;
  263. }
  264. #endif
  265. /*
  266. * Figure out if we really want to wait in poll()... we only need to
  267. * wait if really nothing already to do and we have to wait for
  268. * something from network
  269. */
  270. #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
  271. /* 1) if we know we are draining rx ext, do not wait in poll */
  272. if (pt->ws.rx_draining_ext_list)
  273. return 0;
  274. #endif
  275. #if defined(LWS_WITH_TLS)
  276. /* 2) if we know we have non-network pending data,
  277. * do not wait in poll */
  278. if (pt->context->tls_ops &&
  279. pt->context->tls_ops->fake_POLLIN_for_buffered &&
  280. pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
  281. return 0;
  282. #endif
  283. /*
  284. * 4) If there is any wsi with rxflow buffered and in a state to process
  285. * it, we should not wait in poll
  286. */
  287. lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
  288. struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
  289. if (!lws_is_flowcontrolled(wsi) &&
  290. lwsi_state(wsi) != LRS_DEFERRING_ACTION)
  291. return 0;
  292. /*
  293. * 5) If any guys with http compression to spill, we shouldn't wait in
  294. * poll but hurry along and service them
  295. */
  296. } lws_end_foreach_dll(d);
  297. return timeout_ms;
  298. }
  299. /*
  300. * POLLIN said there is something... we must read it, and either use it; or
  301. * if other material already in the buflist append it and return the buflist
  302. * head material.
  303. */
  304. int
  305. lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
  306. struct lws_tokens *ebuf, char fr, const char *hint)
  307. {
  308. int n, e, bns;
  309. uint8_t *ep, *b;
  310. // lwsl_debug("%s: wsi %p: %s: prior %d\n", __func__, wsi, hint, prior);
  311. // lws_buflist_describe(&wsi->buflist, wsi, __func__);
  312. (void)hint;
  313. if (!ebuf->token)
  314. ebuf->token = pt->serv_buf + LWS_PRE;
  315. if (!ebuf->len ||
  316. (unsigned int)ebuf->len > wsi->a.context->pt_serv_buf_size - LWS_PRE)
  317. ebuf->len = wsi->a.context->pt_serv_buf_size - LWS_PRE;
  318. e = ebuf->len;
  319. ep = ebuf->token;
  320. /* h2 or muxed stream... must force the read due to HOL blocking */
  321. if (wsi->mux_substream)
  322. fr = 1;
  323. /* there's something on the buflist? */
  324. bns = (int)lws_buflist_next_segment_len(&wsi->buflist, &ebuf->token);
  325. b = ebuf->token;
  326. if (!fr && bns)
  327. goto buflist_material;
  328. /* we're going to read something */
  329. ebuf->token = ep;
  330. ebuf->len = n = lws_ssl_capable_read(wsi, ep, e);
  331. lwsl_debug("%s: wsi %p: %s: ssl_capable_read %d\n", __func__,
  332. wsi, hint, ebuf->len);
  333. if (!bns && /* only acknowledge error when we handled buflist content */
  334. n == LWS_SSL_CAPABLE_ERROR) {
  335. lwsl_debug("%s: SSL_CAPABLE_ERROR\n", __func__);
  336. return -1;
  337. }
  338. if (n <= 0 && bns)
  339. /*
  340. * There wasn't anything to read yet, but there's something
  341. * on the buflist to give him
  342. */
  343. goto buflist_material;
  344. /* we read something */
  345. if (fr && bns) {
  346. /*
  347. * Stash what we read, since there's earlier buflist material
  348. */
  349. n = lws_buflist_append_segment(&wsi->buflist, ebuf->token, ebuf->len);
  350. if (n < 0)
  351. return -1;
  352. if (n && lws_dll2_is_detached(&wsi->dll_buflist))
  353. lws_dll2_add_head(&wsi->dll_buflist,
  354. &pt->dll_buflist_owner);
  355. goto buflist_material;
  356. }
  357. /*
  358. * directly return what we read
  359. */
  360. return 0;
  361. buflist_material:
  362. ebuf->token = b;
  363. if (e < bns)
  364. /* restrict to e, if more than e available */
  365. ebuf->len = e;
  366. else
  367. ebuf->len = bns;
  368. return 1; /* from buflist */
  369. }
  370. int
  371. lws_buflist_aware_finished_consuming(struct lws *wsi, struct lws_tokens *ebuf,
  372. int used, int buffered, const char *hint)
  373. {
  374. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  375. int m;
  376. //lwsl_debug("%s %s consuming buffered %d used %zu / %zu\n", __func__, hint,
  377. // buffered, (size_t)used, (size_t)ebuf->len);
  378. // lws_buflist_describe(&wsi->buflist, wsi, __func__);
  379. /* it's in the buflist; we didn't use any */
  380. if (!used && buffered)
  381. return 0;
  382. if (used && buffered) {
  383. if (wsi->buflist) {
  384. m = (int)lws_buflist_use_segment(&wsi->buflist, (size_t)used);
  385. // lwsl_notice("%s: used %d, next %d\n", __func__, used, m);
  386. // lws_buflist_describe(&wsi->buflist, wsi, __func__);
  387. if (m)
  388. return 0;
  389. }
  390. lwsl_info("%s: removed %p from dll_buflist\n", __func__, wsi);
  391. lws_dll2_remove(&wsi->dll_buflist);
  392. return 0;
  393. }
  394. /* any remainder goes on the buflist */
  395. if (used != ebuf->len) {
  396. // lwsl_notice("%s %s bac appending %d\n", __func__, hint,
  397. // ebuf->len - used);
  398. m = lws_buflist_append_segment(&wsi->buflist,
  399. ebuf->token + used,
  400. ebuf->len - used);
  401. if (m < 0)
  402. return 1; /* OOM */
  403. if (m) {
  404. lwsl_debug("%s: added %p to rxflow list\n",
  405. __func__, wsi);
  406. if (lws_dll2_is_detached(&wsi->dll_buflist))
  407. lws_dll2_add_head(&wsi->dll_buflist,
  408. &pt->dll_buflist_owner);
  409. }
  410. // lws_buflist_describe(&wsi->buflist, wsi, __func__);
  411. }
  412. return 0;
  413. }
  414. void
  415. lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
  416. {
  417. struct lws_pollfd pfd;
  418. if (!pt->dll_buflist_owner.head)
  419. return;
  420. /*
  421. * service all guys with pending rxflow that reached a state they can
  422. * accept the pending data
  423. */
  424. lws_pt_lock(pt, __func__);
  425. lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
  426. pt->dll_buflist_owner.head) {
  427. struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
  428. pfd.events = LWS_POLLIN;
  429. pfd.revents = LWS_POLLIN;
  430. pfd.fd = -1;
  431. lwsl_debug("%s: rxflow processing: %p fc=%d, 0x%lx\n", __func__,
  432. wsi, lws_is_flowcontrolled(wsi),
  433. (unsigned long)wsi->wsistate);
  434. if (!lws_is_flowcontrolled(wsi) &&
  435. lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
  436. pt->inside_lws_service = 1;
  437. if ((wsi->role_ops->handle_POLLIN)(pt, wsi, &pfd) ==
  438. LWS_HPI_RET_PLEASE_CLOSE_ME)
  439. lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  440. "close_and_handled");
  441. pt->inside_lws_service = 0;
  442. }
  443. } lws_end_foreach_dll_safe(d, d1);
  444. lws_pt_unlock(pt);
  445. }
  446. /*
  447. * guys that need POLLIN service again without waiting for network action
  448. * can force POLLIN here if not flowcontrolled, so they will get service.
  449. *
  450. * Return nonzero if anybody got their POLLIN faked
  451. */
  452. int
  453. lws_service_flag_pending(struct lws_context *context, int tsi)
  454. {
  455. struct lws_context_per_thread *pt;
  456. int forced = 0;
  457. if (!context)
  458. return 1;
  459. pt = &context->pt[tsi];
  460. lws_pt_lock(pt, __func__);
  461. /*
  462. * 1) If there is any wsi with a buflist and in a state to process
  463. * it, we should not wait in poll
  464. */
  465. lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
  466. struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
  467. if (!lws_is_flowcontrolled(wsi) &&
  468. lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
  469. forced = 1;
  470. break;
  471. }
  472. } lws_end_foreach_dll(d);
  473. #if defined(LWS_ROLE_WS)
  474. forced |= role_ops_ws.service_flag_pending(context, tsi);
  475. #endif
  476. #if defined(LWS_WITH_TLS)
  477. /*
  478. * 2) For all guys with buffered SSL read data already saved up, if they
  479. * are not flowcontrolled, fake their POLLIN status so they'll get
  480. * service to use up the buffered incoming data, even though their
  481. * network socket may have nothing
  482. */
  483. lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
  484. lws_dll2_get_head(&pt->tls.dll_pending_tls_owner)) {
  485. struct lws *wsi = lws_container_of(p, struct lws,
  486. tls.dll_pending_tls);
  487. if (wsi->position_in_fds_table >= 0) {
  488. pt->fds[wsi->position_in_fds_table].revents |=
  489. pt->fds[wsi->position_in_fds_table].events &
  490. LWS_POLLIN;
  491. if (pt->fds[wsi->position_in_fds_table].revents &
  492. LWS_POLLIN)
  493. /*
  494. * We're not going to remove the wsi from the
  495. * pending tls list. The processing will have
  496. * to do it if he exhausts the pending tls.
  497. */
  498. forced = 1;
  499. }
  500. } lws_end_foreach_dll_safe(p, p1);
  501. #endif
  502. lws_pt_unlock(pt);
  503. return forced;
  504. }
  505. int
  506. lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
  507. int tsi)
  508. {
  509. struct lws_context_per_thread *pt;
  510. struct lws *wsi;
  511. if (!context || context->being_destroyed1)
  512. return -1;
  513. pt = &context->pt[tsi];
  514. if (!pollfd) {
  515. /*
  516. * calling with NULL pollfd for periodic background processing
  517. * is no longer needed and is now illegal.
  518. */
  519. assert(pollfd);
  520. return -1;
  521. }
  522. assert(lws_socket_is_valid(pollfd->fd));
  523. /* no, here to service a socket descriptor */
  524. wsi = wsi_from_fd(context, pollfd->fd);
  525. if (!wsi)
  526. /* not lws connection ... leave revents alone and return */
  527. return 0;
  528. #if LWS_MAX_SMP > 1
  529. if (wsi->undergoing_init_from_other_pt)
  530. /*
  531. * Temporary situation that other service thread is initializing
  532. * this wsi right now for use on our service thread.
  533. */
  534. return 0;
  535. #endif
  536. /*
  537. * so that caller can tell we handled, past here we need to
  538. * zero down pollfd->revents after handling
  539. */
  540. /* handle session socket closed */
  541. if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
  542. (pollfd->revents & LWS_POLLHUP)) {
  543. wsi->socket_is_permanently_unusable = 1;
  544. lwsl_debug("Session Socket %p (fd=%d) dead\n",
  545. (void *)wsi, pollfd->fd);
  546. goto close_and_handled;
  547. }
  548. #ifdef _WIN32
  549. if (pollfd->revents & LWS_POLLOUT)
  550. wsi->sock_send_blocking = FALSE;
  551. #endif
  552. if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
  553. (pollfd->revents & LWS_POLLHUP)) {
  554. lwsl_debug("pollhup\n");
  555. wsi->socket_is_permanently_unusable = 1;
  556. goto close_and_handled;
  557. }
  558. #if defined(LWS_WITH_TLS)
  559. if (lwsi_state(wsi) == LRS_SHUTDOWN &&
  560. lws_is_ssl(wsi) && wsi->tls.ssl) {
  561. switch (__lws_tls_shutdown(wsi)) {
  562. case LWS_SSL_CAPABLE_DONE:
  563. case LWS_SSL_CAPABLE_ERROR:
  564. goto close_and_handled;
  565. case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
  566. case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
  567. case LWS_SSL_CAPABLE_MORE_SERVICE:
  568. goto handled;
  569. }
  570. }
  571. #endif
  572. wsi->could_have_pending = 0; /* clear back-to-back write detection */
  573. pt->inside_lws_service = 1;
  574. /* okay, what we came here to do... */
  575. /* if we got here, we should have wire protocol ops set on the wsi */
  576. assert(wsi->role_ops);
  577. // lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
  578. // wsi->wsistate);
  579. switch ((wsi->role_ops->handle_POLLIN)(pt, wsi, pollfd)) {
  580. case LWS_HPI_RET_WSI_ALREADY_DIED:
  581. pt->inside_lws_service = 0;
  582. return 1;
  583. case LWS_HPI_RET_HANDLED:
  584. break;
  585. case LWS_HPI_RET_PLEASE_CLOSE_ME:
  586. //lwsl_notice("%s: %s pollin says please close me\n", __func__,
  587. // wsi->role_ops->name);
  588. close_and_handled:
  589. lwsl_debug("%p: Close and handled\n", wsi);
  590. lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  591. "close_and_handled");
  592. #if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
  593. /*
  594. * confirm close has no problem being called again while
  595. * it waits for libuv service to complete the first async
  596. * close
  597. */
  598. if (!strcmp(context->event_loop_ops->name, "libuv"))
  599. lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
  600. "close_and_handled uv repeat test");
  601. #endif
  602. /*
  603. * pollfd may point to something else after the close
  604. * due to pollfd swapping scheme on delete on some platforms
  605. * we can't clear revents now because it'd be the wrong guy's
  606. * revents
  607. */
  608. pt->inside_lws_service = 0;
  609. return 1;
  610. default:
  611. assert(0);
  612. }
  613. #if defined(LWS_WITH_TLS)
  614. handled:
  615. #endif
  616. pollfd->revents = 0;
  617. pt->inside_lws_service = 0;
  618. return 0;
  619. }
  620. int
  621. lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
  622. {
  623. return lws_service_fd_tsi(context, pollfd, 0);
  624. }
  625. int
  626. lws_service(struct lws_context *context, int timeout_ms)
  627. {
  628. struct lws_context_per_thread *pt;
  629. int n;
  630. if (!context)
  631. return 1;
  632. pt = &context->pt[0];
  633. pt->inside_service = 1;
  634. if (context->event_loop_ops->run_pt) {
  635. /* we are configured for an event loop */
  636. context->event_loop_ops->run_pt(context, 0);
  637. pt->inside_service = 0;
  638. return 1;
  639. }
  640. n = lws_plat_service(context, timeout_ms);
  641. pt->inside_service = 0;
  642. return n;
  643. }
  644. int
  645. lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
  646. {
  647. struct lws_context_per_thread *pt;
  648. int n;
  649. if (!context)
  650. return 1;
  651. pt = &context->pt[tsi];
  652. pt->inside_service = 1;
  653. #if LWS_MAX_SMP > 1
  654. pt->self = pthread_self();
  655. #endif
  656. if (context->event_loop_ops->run_pt) {
  657. /* we are configured for an event loop */
  658. context->event_loop_ops->run_pt(context, tsi);
  659. pt->inside_service = 0;
  660. return 1;
  661. }
  662. n = _lws_plat_service_tsi(context, timeout_ms, tsi);
  663. pt->inside_service = 0;
  664. return n;
  665. }