pollfd.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010 - 2020 Andy Green <[email protected]>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22. * IN THE SOFTWARE.
  23. */
  24. #include "private-lib-core.h"
  25. int
  26. _lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
  27. {
  28. #if !defined(LWS_WITH_EVENT_LIBS)
  29. volatile struct lws_context_per_thread *vpt;
  30. #endif
  31. struct lws_context_per_thread *pt;
  32. struct lws_context *context;
  33. int ret = 0, pa_events;
  34. struct lws_pollfd *pfd;
  35. int sampled_tid, tid;
  36. if (!wsi)
  37. return 0;
  38. assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
  39. wsi->position_in_fds_table >= 0);
  40. if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
  41. return 0;
  42. if (((volatile struct lws *)wsi)->handling_pollout &&
  43. !_and && _or == LWS_POLLOUT) {
  44. /*
  45. * Happening alongside service thread handling POLLOUT.
  46. * The danger is when he is finished, he will disable POLLOUT,
  47. * countermanding what we changed here.
  48. *
  49. * Instead of changing the fds, inform the service thread
  50. * what happened, and ask it to leave POLLOUT active on exit
  51. */
  52. ((volatile struct lws *)wsi)->leave_pollout_active = 1;
  53. /*
  54. * by definition service thread is not in poll wait, so no need
  55. * to cancel service
  56. */
  57. lwsl_debug("%s: using leave_pollout_active\n", __func__);
  58. return 0;
  59. }
  60. context = wsi->a.context;
  61. pt = &context->pt[(int)wsi->tsi];
  62. assert(wsi->position_in_fds_table < (int)pt->fds_count);
  63. #if !defined(LWS_WITH_EVENT_LIBS)
  64. /*
  65. * This only applies when we use the default poll() event loop.
  66. *
  67. * BSD can revert pa->events at any time, when the kernel decides to
  68. * exit from poll(). We can't protect against it using locking.
  69. *
  70. * Therefore we must check first if the service thread is in poll()
  71. * wait; if so, we know we must be being called from a foreign thread,
  72. * and we must keep a strictly ordered list of changes we made instead
  73. * of trying to apply them, since when poll() exits, which may happen
  74. * at any time it would revert our changes.
  75. *
  76. * The plat code will apply them when it leaves the poll() wait
  77. * before doing anything else.
  78. */
  79. vpt = (volatile struct lws_context_per_thread *)pt;
  80. vpt->foreign_spinlock = 1;
  81. lws_memory_barrier();
  82. if (vpt->inside_poll) {
  83. struct lws_foreign_thread_pollfd *ftp, **ftp1;
  84. /*
  85. * We are certainly a foreign thread trying to change events
  86. * while the service thread is in the poll() wait.
  87. *
  88. * Create a list of changes to be applied after poll() exit,
  89. * instead of trying to apply them now.
  90. */
  91. ftp = lws_malloc(sizeof(*ftp), "ftp");
  92. if (!ftp) {
  93. vpt->foreign_spinlock = 0;
  94. lws_memory_barrier();
  95. ret = -1;
  96. goto bail;
  97. }
  98. ftp->_and = _and;
  99. ftp->_or = _or;
  100. ftp->fd_index = wsi->position_in_fds_table;
  101. ftp->next = NULL;
  102. lws_pt_lock(pt, __func__);
  103. /* place at END of list to maintain order */
  104. ftp1 = (struct lws_foreign_thread_pollfd **)
  105. &vpt->foreign_pfd_list;
  106. while (*ftp1)
  107. ftp1 = &((*ftp1)->next);
  108. *ftp1 = ftp;
  109. vpt->foreign_spinlock = 0;
  110. lws_memory_barrier();
  111. lws_pt_unlock(pt);
  112. lws_cancel_service_pt(wsi);
  113. return 0;
  114. }
  115. vpt->foreign_spinlock = 0;
  116. lws_memory_barrier();
  117. #endif
  118. #if !defined(__linux__)
  119. /* OSX couldn't see close on stdin pipe side otherwise */
  120. _or |= LWS_POLLHUP;
  121. #endif
  122. pfd = &pt->fds[wsi->position_in_fds_table];
  123. pa->fd = wsi->desc.sockfd;
  124. lwsl_debug("%s: wsi %p: fd %d events %d -> %d\n", __func__, wsi,
  125. pa->fd, pfd->events, (pfd->events & ~_and) | _or);
  126. pa->prev_events = pfd->events;
  127. pa->events = pfd->events = (pfd->events & ~_and) | _or;
  128. if (wsi->mux_substream)
  129. return 0;
  130. #if defined(LWS_WITH_EXTERNAL_POLL)
  131. if (wsi->a.vhost &&
  132. wsi->a.vhost->protocols[0].callback(wsi,
  133. LWS_CALLBACK_CHANGE_MODE_POLL_FD,
  134. wsi->user_space, (void *)pa, 0)) {
  135. ret = -1;
  136. goto bail;
  137. }
  138. #endif
  139. if (context->event_loop_ops->io) {
  140. if (_and & LWS_POLLIN)
  141. context->event_loop_ops->io(wsi,
  142. LWS_EV_STOP | LWS_EV_READ);
  143. if (_or & LWS_POLLIN)
  144. context->event_loop_ops->io(wsi,
  145. LWS_EV_START | LWS_EV_READ);
  146. if (_and & LWS_POLLOUT)
  147. context->event_loop_ops->io(wsi,
  148. LWS_EV_STOP | LWS_EV_WRITE);
  149. if (_or & LWS_POLLOUT)
  150. context->event_loop_ops->io(wsi,
  151. LWS_EV_START | LWS_EV_WRITE);
  152. }
  153. /*
  154. * if we changed something in this pollfd...
  155. * ... and we're running in a different thread context
  156. * than the service thread...
  157. * ... and the service thread is waiting ...
  158. * then cancel it to force a restart with our changed events
  159. */
  160. pa_events = pa->prev_events != pa->events;
  161. if (pa_events) {
  162. if (lws_plat_change_pollfd(context, wsi, pfd)) {
  163. lwsl_info("%s failed\n", __func__);
  164. ret = -1;
  165. goto bail;
  166. }
  167. sampled_tid = pt->service_tid;
  168. if (sampled_tid && wsi->a.vhost) {
  169. tid = wsi->a.vhost->protocols[0].callback(wsi,
  170. LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
  171. if (tid == -1) {
  172. ret = -1;
  173. goto bail;
  174. }
  175. if (tid != sampled_tid)
  176. lws_cancel_service_pt(wsi);
  177. }
  178. }
  179. bail:
  180. return ret;
  181. }
  182. #if defined(LWS_WITH_SERVER)
  183. /*
  184. * Enable or disable listen sockets on this pt globally...
  185. * it's modulated according to the pt having space for a new accept.
  186. */
  187. static void
  188. lws_accept_modulation(struct lws_context *context,
  189. struct lws_context_per_thread *pt, int allow)
  190. {
  191. struct lws_vhost *vh = context->vhost_list;
  192. struct lws_pollargs pa1;
  193. while (vh) {
  194. if (vh->lserv_wsi) {
  195. if (allow)
  196. _lws_change_pollfd(vh->lserv_wsi,
  197. 0, LWS_POLLIN, &pa1);
  198. else
  199. _lws_change_pollfd(vh->lserv_wsi,
  200. LWS_POLLIN, 0, &pa1);
  201. }
  202. vh = vh->vhost_next;
  203. }
  204. }
  205. #endif
  206. #if _LWS_ENABLED_LOGS & LLL_WARN
  207. void
  208. __dump_fds(struct lws_context_per_thread *pt, const char *s)
  209. {
  210. unsigned int n;
  211. lwsl_warn("%s: fds_count %u, %s\n", __func__, pt->fds_count, s);
  212. for (n = 0; n < pt->fds_count; n++) {
  213. struct lws *wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
  214. lwsl_warn(" %d: fd %d, wsi %p, pos_in_fds: %d\n",
  215. n + 1, pt->fds[n].fd, wsi,
  216. wsi ? wsi->position_in_fds_table : -1);
  217. }
  218. }
  219. #else
  220. #define __dump_fds(x, y)
  221. #endif
  222. int
  223. __insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
  224. {
  225. #if defined(LWS_WITH_EXTERNAL_POLL)
  226. struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
  227. #endif
  228. struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
  229. int ret = 0;
  230. // __dump_fds(pt, "pre insert");
  231. lws_pt_assert_lock_held(pt);
  232. lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n",
  233. __func__, wsi, wsi->tsi, wsi->desc.sockfd, pt->fds_count);
  234. if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
  235. lwsl_err("Too many fds (%d vs %d)\n", context->max_fds,
  236. context->fd_limit_per_thread );
  237. return 1;
  238. }
  239. #if !defined(_WIN32)
  240. if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
  241. wsi->desc.sockfd - lws_plat_socket_offset() >= context->max_fds) {
  242. lwsl_err("Socket fd %d is too high (%d) offset %d\n",
  243. wsi->desc.sockfd, context->max_fds,
  244. lws_plat_socket_offset());
  245. return 1;
  246. }
  247. #endif
  248. assert(wsi);
  249. assert(wsi->event_pipe || wsi->a.vhost);
  250. assert(lws_socket_is_valid(wsi->desc.sockfd));
  251. #if defined(LWS_WITH_EXTERNAL_POLL)
  252. if (wsi->a.vhost &&
  253. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
  254. wsi->user_space, (void *) &pa, 1))
  255. return -1;
  256. #endif
  257. if (insert_wsi(context, wsi))
  258. return -1;
  259. pt->count_conns++;
  260. wsi->position_in_fds_table = pt->fds_count;
  261. pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
  262. pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
  263. #if defined(LWS_WITH_EXTERNAL_POLL)
  264. pa.events = pt->fds[pt->fds_count].events;
  265. #endif
  266. lws_plat_insert_socket_into_fds(context, wsi);
  267. #if defined(LWS_WITH_EXTERNAL_POLL)
  268. /* external POLL support via protocol 0 */
  269. if (wsi->a.vhost &&
  270. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
  271. wsi->user_space, (void *) &pa, 0))
  272. ret = -1;
  273. #endif
  274. #if defined(LWS_WITH_SERVER)
  275. /* if no more room, defeat accepts on this service thread */
  276. if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
  277. lws_accept_modulation(context, pt, 0);
  278. #endif
  279. #if defined(LWS_WITH_EXTERNAL_POLL)
  280. if (wsi->a.vhost &&
  281. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
  282. wsi->user_space, (void *)&pa, 1))
  283. ret = -1;
  284. #endif
  285. // __dump_fds(pt, "post insert");
  286. return ret;
  287. }
  288. int
  289. __remove_wsi_socket_from_fds(struct lws *wsi)
  290. {
  291. struct lws_context *context = wsi->a.context;
  292. #if defined(LWS_WITH_EXTERNAL_POLL)
  293. struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
  294. #endif
  295. struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
  296. struct lws *end_wsi;
  297. int v, m, ret = 0;
  298. lws_pt_assert_lock_held(pt);
  299. // __dump_fds(pt, "pre remove");
  300. #if !defined(_WIN32)
  301. if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
  302. wsi->desc.sockfd - lws_plat_socket_offset() > context->max_fds) {
  303. lwsl_err("fd %d too high (%d)\n", wsi->desc.sockfd,
  304. context->max_fds);
  305. return 1;
  306. }
  307. #endif
  308. #if defined(LWS_WITH_EXTERNAL_POLL)
  309. if (wsi->a.vhost && wsi->a.vhost->protocols &&
  310. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
  311. wsi->user_space, (void *)&pa, 1))
  312. return -1;
  313. #endif
  314. __lws_same_vh_protocol_remove(wsi);
  315. /* the guy who is to be deleted's slot index in pt->fds */
  316. m = wsi->position_in_fds_table;
  317. /* these are the only valid possibilities for position_in_fds_table */
  318. assert(m == LWS_NO_FDS_POS || (m >= 0 && (unsigned int)m < pt->fds_count));
  319. if (context->event_loop_ops->io)
  320. context->event_loop_ops->io(wsi,
  321. LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE |
  322. LWS_EV_PREPARE_DELETION);
  323. /*
  324. lwsl_notice("%s: wsi=%p, skt=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
  325. __func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
  326. pt->fds_count, pt->fds[pt->fds_count - 1].fd); */
  327. if (m != LWS_NO_FDS_POS) {
  328. char fixup = 0;
  329. assert(pt->fds_count && (unsigned int)m != pt->fds_count);
  330. /* deletion guy's lws_lookup entry needs nuking */
  331. delete_from_fd(context, wsi->desc.sockfd);
  332. if ((unsigned int)m != pt->fds_count - 1) {
  333. /* have the last guy take up the now vacant slot */
  334. pt->fds[m] = pt->fds[pt->fds_count - 1];
  335. fixup = 1;
  336. }
  337. pt->fds[pt->fds_count - 1].fd = -1;
  338. /* this decrements pt->fds_count */
  339. lws_plat_delete_socket_from_fds(context, wsi, m);
  340. pt->count_conns--;
  341. if (fixup) {
  342. v = (int) pt->fds[m].fd;
  343. /* old end guy's "position in fds table" is now the
  344. * deletion guy's old one */
  345. end_wsi = wsi_from_fd(context, v);
  346. if (!end_wsi) {
  347. lwsl_err("no wsi for fd %d pos %d, "
  348. "pt->fds_count=%d\n",
  349. (int)pt->fds[m].fd, m, pt->fds_count);
  350. // assert(0);
  351. } else
  352. end_wsi->position_in_fds_table = m;
  353. }
  354. /* removed wsi has no position any more */
  355. wsi->position_in_fds_table = LWS_NO_FDS_POS;
  356. }
  357. #if defined(LWS_WITH_EXTERNAL_POLL)
  358. /* remove also from external POLL support via protocol 0 */
  359. if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->a.vhost &&
  360. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
  361. wsi->user_space, (void *) &pa, 0))
  362. ret = -1;
  363. #endif
  364. #if defined(LWS_WITH_SERVER)
  365. if (!context->being_destroyed &&
  366. /* if this made some room, accept connects on this thread */
  367. (unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
  368. lws_accept_modulation(context, pt, 1);
  369. #endif
  370. #if defined(LWS_WITH_EXTERNAL_POLL)
  371. if (wsi->a.vhost &&
  372. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
  373. wsi->user_space, (void *) &pa, 1))
  374. ret = -1;
  375. #endif
  376. // __dump_fds(pt, "post remove");
  377. return ret;
  378. }
  379. int
  380. __lws_change_pollfd(struct lws *wsi, int _and, int _or)
  381. {
  382. struct lws_context *context;
  383. struct lws_pollargs pa;
  384. int ret = 0;
  385. if (!wsi || (!wsi->a.protocol && !wsi->event_pipe) ||
  386. wsi->position_in_fds_table == LWS_NO_FDS_POS)
  387. return 0;
  388. context = lws_get_context(wsi);
  389. if (!context)
  390. return 1;
  391. #if defined(LWS_WITH_EXTERNAL_POLL)
  392. if (wsi->a.vhost &&
  393. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
  394. wsi->user_space, (void *) &pa, 0))
  395. return -1;
  396. #endif
  397. ret = _lws_change_pollfd(wsi, _and, _or, &pa);
  398. #if defined(LWS_WITH_EXTERNAL_POLL)
  399. if (wsi->a.vhost &&
  400. wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
  401. wsi->user_space, (void *) &pa, 0))
  402. ret = -1;
  403. #endif
  404. return ret;
  405. }
  406. int
  407. lws_change_pollfd(struct lws *wsi, int _and, int _or)
  408. {
  409. struct lws_context_per_thread *pt;
  410. int ret = 0;
  411. pt = &wsi->a.context->pt[(int)wsi->tsi];
  412. lws_pt_lock(pt, __func__);
  413. ret = __lws_change_pollfd(wsi, _and, _or);
  414. lws_pt_unlock(pt);
  415. return ret;
  416. }
  417. int
  418. lws_callback_on_writable(struct lws *wsi)
  419. {
  420. struct lws_context_per_thread *pt;
  421. struct lws *w = wsi;
  422. if (lwsi_state(wsi) == LRS_SHUTDOWN)
  423. return 0;
  424. if (wsi->socket_is_permanently_unusable)
  425. return 0;
  426. pt = &wsi->a.context->pt[(int)wsi->tsi];
  427. #if defined(LWS_WITH_DETAILED_LATENCY)
  428. if (!wsi->detlat.earliest_write_req)
  429. wsi->detlat.earliest_write_req = lws_now_usecs();
  430. #endif
  431. lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB_REQ, 1);
  432. #if defined(LWS_WITH_STATS)
  433. if (!wsi->active_writable_req_us) {
  434. wsi->active_writable_req_us = lws_now_usecs();
  435. lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
  436. }
  437. #endif
  438. if (wsi->role_ops->callback_on_writable) {
  439. int q = wsi->role_ops->callback_on_writable(wsi);
  440. //lwsl_notice("%s: rops_cow says %d\n", __func__, q);
  441. if (q)
  442. return 1;
  443. w = lws_get_network_wsi(wsi);
  444. } else
  445. if (w->position_in_fds_table == LWS_NO_FDS_POS) {
  446. lwsl_debug("%s: failed to find socket %d\n", __func__,
  447. wsi->desc.sockfd);
  448. return -1;
  449. }
  450. //lwsl_notice("%s: marking for POLLOUT %p (wsi %p)\n", __func__, w, wsi);
  451. if (__lws_change_pollfd(w, 0, LWS_POLLOUT))
  452. return -1;
  453. return 1;
  454. }
  455. /*
  456. * stitch protocol choice into the vh protocol linked list
  457. * We always insert ourselves at the start of the list
  458. *
  459. * X <-> B
  460. * X <-> pAn <-> pB
  461. *
  462. * Illegal to attach more than once without detach inbetween
  463. */
  464. void
  465. lws_same_vh_protocol_insert(struct lws *wsi, int n)
  466. {
  467. lws_vhost_lock(wsi->a.vhost);
  468. lws_dll2_remove(&wsi->same_vh_protocol);
  469. lws_dll2_add_head(&wsi->same_vh_protocol,
  470. &wsi->a.vhost->same_vh_protocol_owner[n]);
  471. wsi->bound_vhost_index = n;
  472. lws_vhost_unlock(wsi->a.vhost);
  473. }
  474. void
  475. __lws_same_vh_protocol_remove(struct lws *wsi)
  476. {
  477. if (wsi->a.vhost && wsi->a.vhost->same_vh_protocol_owner)
  478. lws_dll2_remove(&wsi->same_vh_protocol);
  479. }
  480. void
  481. lws_same_vh_protocol_remove(struct lws *wsi)
  482. {
  483. if (!wsi->a.vhost)
  484. return;
  485. lws_vhost_lock(wsi->a.vhost);
  486. __lws_same_vh_protocol_remove(wsi);
  487. lws_vhost_unlock(wsi->a.vhost);
  488. }
  489. int
  490. lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
  491. const struct lws_protocols *protocol)
  492. {
  493. struct lws *wsi;
  494. int n;
  495. if (protocol < vhost->protocols ||
  496. protocol >= (vhost->protocols + vhost->count_protocols)) {
  497. lwsl_err("%s: protocol %p is not from vhost %p (%p - %p)\n",
  498. __func__, protocol, vhost->protocols, vhost,
  499. (vhost->protocols + vhost->count_protocols));
  500. return -1;
  501. }
  502. n = (int)(protocol - vhost->protocols);
  503. lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
  504. lws_dll2_get_head(&vhost->same_vh_protocol_owner[n])) {
  505. wsi = lws_container_of(d, struct lws, same_vh_protocol);
  506. assert(wsi->a.protocol == protocol);
  507. lws_callback_on_writable(wsi);
  508. } lws_end_foreach_dll_safe(d, d1);
  509. return 0;
  510. }
  511. int
  512. lws_callback_on_writable_all_protocol(const struct lws_context *context,
  513. const struct lws_protocols *protocol)
  514. {
  515. struct lws_vhost *vhost;
  516. int n;
  517. if (!context)
  518. return 0;
  519. vhost = context->vhost_list;
  520. while (vhost) {
  521. for (n = 0; n < vhost->count_protocols; n++)
  522. if (protocol->callback ==
  523. vhost->protocols[n].callback &&
  524. !strcmp(protocol->name, vhost->protocols[n].name))
  525. break;
  526. if (n != vhost->count_protocols)
  527. lws_callback_on_writable_all_protocol_vhost(
  528. vhost, &vhost->protocols[n]);
  529. vhost = vhost->vhost_next;
  530. }
  531. return 0;
  532. }