wsi.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010 - 2019 Andy Green <[email protected]>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22. * IN THE SOFTWARE.
  23. */
  24. #include "private-lib-core.h"
  25. #if defined (_DEBUG)
  26. void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
  27. {
  28. wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
  29. lwsl_debug("lwsi_set_role(%p, 0x%lx)\n", wsi,
  30. (unsigned long)wsi->wsistate);
  31. }
  32. void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
  33. {
  34. wsi->wsistate = (wsi->wsistate & (~LRS_MASK)) | lrs;
  35. lwsl_debug("lwsi_set_state(%p, 0x%lx)\n", wsi,
  36. (unsigned long)wsi->wsistate);
  37. }
  38. #endif
  39. void
  40. lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi)
  41. {
  42. if (wsi->a.vhost == vh)
  43. return;
  44. lws_context_lock(vh->context, __func__); /* ---------- context { */
  45. wsi->a.vhost = vh;
  46. vh->count_bound_wsi++;
  47. lws_context_unlock(vh->context); /* } context ---------- */
  48. lwsl_debug("%s: vh %s: wsi %s/%s, count_bound_wsi %d\n", __func__,
  49. vh->name, wsi->role_ops ? wsi->role_ops->name : "none",
  50. wsi->a.protocol ? wsi->a.protocol->name : "none",
  51. vh->count_bound_wsi);
  52. assert(wsi->a.vhost->count_bound_wsi > 0);
  53. }
  54. void
  55. lws_vhost_unbind_wsi(struct lws *wsi)
  56. {
  57. if (!wsi->a.vhost)
  58. return;
  59. lws_context_lock(wsi->a.context, __func__); /* ---------- context { */
  60. assert(wsi->a.vhost->count_bound_wsi > 0);
  61. wsi->a.vhost->count_bound_wsi--;
  62. lwsl_debug("%s: vh %s: count_bound_wsi %d\n", __func__,
  63. wsi->a.vhost->name, wsi->a.vhost->count_bound_wsi);
  64. if (!wsi->a.vhost->count_bound_wsi &&
  65. wsi->a.vhost->being_destroyed) {
  66. /*
  67. * We have closed all wsi that were bound to this vhost
  68. * by any pt: nothing can be servicing any wsi belonging
  69. * to it any more.
  70. *
  71. * Finalize the vh destruction
  72. */
  73. __lws_vhost_destroy2(wsi->a.vhost);
  74. }
  75. wsi->a.vhost = NULL;
  76. lws_context_unlock(wsi->a.context); /* } context ---------- */
  77. }
  78. struct lws *
  79. lws_get_network_wsi(struct lws *wsi)
  80. {
  81. if (!wsi)
  82. return NULL;
  83. #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT)
  84. if (!wsi->mux_substream
  85. #if defined(LWS_WITH_CLIENT)
  86. && !wsi->client_mux_substream
  87. #endif
  88. )
  89. return wsi;
  90. while (wsi->mux.parent_wsi)
  91. wsi = wsi->mux.parent_wsi;
  92. #endif
  93. return wsi;
  94. }
  95. const struct lws_protocols *
  96. lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
  97. {
  98. int n;
  99. for (n = 0; n < vh->count_protocols; n++)
  100. if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name))
  101. return &vh->protocols[n];
  102. return NULL;
  103. }
  104. int
  105. lws_callback_all_protocol(struct lws_context *context,
  106. const struct lws_protocols *protocol, int reason)
  107. {
  108. struct lws_context_per_thread *pt = &context->pt[0];
  109. unsigned int n, m = context->count_threads;
  110. struct lws *wsi;
  111. while (m--) {
  112. for (n = 0; n < pt->fds_count; n++) {
  113. wsi = wsi_from_fd(context, pt->fds[n].fd);
  114. if (!wsi)
  115. continue;
  116. if (wsi->a.protocol == protocol)
  117. protocol->callback(wsi, reason, wsi->user_space,
  118. NULL, 0);
  119. }
  120. pt++;
  121. }
  122. return 0;
  123. }
  124. int
  125. lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
  126. const struct lws_protocols *protocol, int reason,
  127. void *argp, size_t len)
  128. {
  129. struct lws_context *context = vh->context;
  130. struct lws_context_per_thread *pt = &context->pt[0];
  131. unsigned int n, m = context->count_threads;
  132. struct lws *wsi;
  133. while (m--) {
  134. for (n = 0; n < pt->fds_count; n++) {
  135. wsi = wsi_from_fd(context, pt->fds[n].fd);
  136. if (!wsi)
  137. continue;
  138. if (wsi->a.vhost == vh && (wsi->a.protocol == protocol ||
  139. !protocol))
  140. wsi->a.protocol->callback(wsi, reason,
  141. wsi->user_space, argp, len);
  142. }
  143. pt++;
  144. }
  145. return 0;
  146. }
  147. int
  148. lws_callback_all_protocol_vhost(struct lws_vhost *vh,
  149. const struct lws_protocols *protocol, int reason)
  150. {
  151. return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
  152. }
  153. int
  154. lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, int len)
  155. {
  156. int n;
  157. for (n = 0; n < wsi->a.vhost->count_protocols; n++)
  158. if (wsi->a.vhost->protocols[n].callback(wsi, reason, NULL, in, len))
  159. return 1;
  160. return 0;
  161. }
  162. int
  163. lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
  164. size_t len)
  165. {
  166. int n;
  167. struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
  168. if (!wsi)
  169. return 1;
  170. wsi->a.context = vh->context;
  171. lws_vhost_bind_wsi(vh, wsi);
  172. for (n = 0; n < wsi->a.vhost->count_protocols; n++) {
  173. wsi->a.protocol = &vh->protocols[n];
  174. if (wsi->a.protocol->callback(wsi, reason, NULL, in, len)) {
  175. lws_free(wsi);
  176. return 1;
  177. }
  178. }
  179. lws_free(wsi);
  180. return 0;
  181. }
  182. int
  183. lws_rx_flow_control(struct lws *wsi, int _enable)
  184. {
  185. struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
  186. int en = _enable;
  187. // h2 ignores rx flow control atm
  188. if (lwsi_role_h2(wsi) || wsi->mux_substream ||
  189. lwsi_role_h2_ENCAPSULATION(wsi))
  190. return 0; // !!!
  191. lwsl_info("%s: %p 0x%x\n", __func__, wsi, _enable);
  192. if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
  193. /*
  194. * convert user bool style to bitmap style... in user simple
  195. * bool style _enable = 0 = flow control it, = 1 = allow rx
  196. */
  197. en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
  198. if (_enable & 1)
  199. en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
  200. }
  201. lws_pt_lock(pt, __func__);
  202. /* any bit set in rxflow_bitmap DISABLEs rxflow control */
  203. if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
  204. wsi->rxflow_bitmap &= ~(en & 0xff);
  205. else
  206. wsi->rxflow_bitmap |= en & 0xff;
  207. if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
  208. wsi->rxflow_change_to)
  209. goto skip;
  210. wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE |
  211. (!wsi->rxflow_bitmap);
  212. lwsl_info("%s: %p: bitmap 0x%x: en 0x%x, ch 0x%x\n", __func__, wsi,
  213. wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
  214. if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
  215. !wsi->rxflow_will_be_applied) {
  216. en = __lws_rx_flow_control(wsi);
  217. lws_pt_unlock(pt);
  218. return en;
  219. }
  220. skip:
  221. lws_pt_unlock(pt);
  222. return 0;
  223. }
  224. void
  225. lws_rx_flow_allow_all_protocol(const struct lws_context *context,
  226. const struct lws_protocols *protocol)
  227. {
  228. const struct lws_context_per_thread *pt = &context->pt[0];
  229. struct lws *wsi;
  230. unsigned int n, m = context->count_threads;
  231. while (m--) {
  232. for (n = 0; n < pt->fds_count; n++) {
  233. wsi = wsi_from_fd(context, pt->fds[n].fd);
  234. if (!wsi)
  235. continue;
  236. if (wsi->a.protocol == protocol)
  237. lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
  238. }
  239. pt++;
  240. }
  241. }
  242. int user_callback_handle_rxflow(lws_callback_function callback_function,
  243. struct lws *wsi,
  244. enum lws_callback_reasons reason, void *user,
  245. void *in, size_t len)
  246. {
  247. int n;
  248. wsi->rxflow_will_be_applied = 1;
  249. n = callback_function(wsi, reason, user, in, len);
  250. wsi->rxflow_will_be_applied = 0;
  251. if (!n)
  252. n = __lws_rx_flow_control(wsi);
  253. return n;
  254. }
  255. int
  256. __lws_rx_flow_control(struct lws *wsi)
  257. {
  258. struct lws *wsic = wsi->child_list;
  259. // h2 ignores rx flow control atm
  260. if (lwsi_role_h2(wsi) || wsi->mux_substream ||
  261. lwsi_role_h2_ENCAPSULATION(wsi))
  262. return 0; // !!!
  263. /* if he has children, do those if they were changed */
  264. while (wsic) {
  265. if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
  266. __lws_rx_flow_control(wsic);
  267. wsic = wsic->sibling_list;
  268. }
  269. /* there is no pending change */
  270. if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
  271. return 0;
  272. /* stuff is still buffered, not ready to really accept new input */
  273. if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
  274. /* get ourselves called back to deal with stashed buffer */
  275. lws_callback_on_writable(wsi);
  276. // return 0;
  277. }
  278. /* now the pending is cleared, we can change rxflow state */
  279. wsi->rxflow_change_to &= ~LWS_RXFLOW_PENDING_CHANGE;
  280. lwsl_info("rxflow: wsi %p change_to %d\n", wsi,
  281. wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
  282. /* adjust the pollfd for this wsi */
  283. if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
  284. lwsl_info("%s: reenable POLLIN\n", __func__);
  285. // lws_buflist_describe(&wsi->buflist, NULL, __func__);
  286. if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
  287. lwsl_info("%s: fail\n", __func__);
  288. return -1;
  289. }
  290. } else
  291. if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
  292. return -1;
  293. return 0;
  294. }
  295. const struct lws_protocols *
  296. lws_get_protocol(struct lws *wsi)
  297. {
  298. return wsi->a.protocol;
  299. }
  300. int
  301. lws_ensure_user_space(struct lws *wsi)
  302. {
  303. if (!wsi->a.protocol)
  304. return 0;
  305. /* allocate the per-connection user memory (if any) */
  306. if (wsi->a.protocol->per_session_data_size && !wsi->user_space) {
  307. wsi->user_space = lws_zalloc(
  308. wsi->a.protocol->per_session_data_size, "user space");
  309. if (wsi->user_space == NULL) {
  310. lwsl_err("%s: OOM\n", __func__);
  311. return 1;
  312. }
  313. } else
  314. lwsl_debug("%s: %p protocol pss %lu, user_space=%p\n", __func__,
  315. wsi, (long)wsi->a.protocol->per_session_data_size,
  316. wsi->user_space);
  317. return 0;
  318. }
  319. void *
  320. lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
  321. {
  322. ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
  323. new_size;
  324. if (lws_ensure_user_space(wsi))
  325. return NULL;
  326. return wsi->user_space;
  327. }
  328. int
  329. lws_get_tsi(struct lws *wsi)
  330. {
  331. return (int)wsi->tsi;
  332. }
  333. int
  334. lws_is_ssl(struct lws *wsi)
  335. {
  336. #if defined(LWS_WITH_TLS)
  337. return wsi->tls.use_ssl & LCCSCF_USE_SSL;
  338. #else
  339. (void)wsi;
  340. return 0;
  341. #endif
  342. }
  343. #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
  344. lws_tls_conn*
  345. lws_get_ssl(struct lws *wsi)
  346. {
  347. return wsi->tls.ssl;
  348. }
  349. #endif
  350. int
  351. lws_partial_buffered(struct lws *wsi)
  352. {
  353. return lws_has_buffered_out(wsi);
  354. }
  355. lws_fileofs_t
  356. lws_get_peer_write_allowance(struct lws *wsi)
  357. {
  358. if (!wsi->role_ops->tx_credit)
  359. return -1;
  360. return wsi->role_ops->tx_credit(wsi, LWSTXCR_US_TO_PEER, 0);
  361. }
  362. void
  363. lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
  364. const struct lws_role_ops *ops)
  365. {
  366. #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
  367. const char *name = "(unset)";
  368. #endif
  369. wsi->wsistate = role | state;
  370. if (ops)
  371. wsi->role_ops = ops;
  372. #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
  373. if (wsi->role_ops)
  374. name = wsi->role_ops->name;
  375. lwsl_debug("%s: %p: wsistate 0x%lx, ops %s\n", __func__, wsi,
  376. (unsigned long)wsi->wsistate, name);
  377. #endif
  378. }
  379. int
  380. lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
  381. const char **path)
  382. {
  383. const char *end;
  384. char unix_skt = 0;
  385. /* cut up the location into address, port and path */
  386. *prot = p;
  387. while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
  388. p++;
  389. if (!*p) {
  390. end = p;
  391. p = (char *)*prot;
  392. *prot = end;
  393. } else {
  394. *p = '\0';
  395. p += 3;
  396. }
  397. if (*p == '+') /* unix skt */
  398. unix_skt = 1;
  399. *ads = p;
  400. if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
  401. *port = 80;
  402. else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
  403. *port = 443;
  404. if (*p == '[') {
  405. ++(*ads);
  406. while (*p && *p != ']')
  407. p++;
  408. if (*p)
  409. *p++ = '\0';
  410. } else
  411. while (*p && *p != ':' && (unix_skt || *p != '/'))
  412. p++;
  413. if (*p == ':') {
  414. *p++ = '\0';
  415. *port = atoi(p);
  416. while (*p && *p != '/')
  417. p++;
  418. }
  419. *path = "/";
  420. if (*p) {
  421. *p++ = '\0';
  422. if (*p)
  423. *path = p;
  424. }
  425. return 0;
  426. }
  427. /* ... */
  428. const char *
  429. lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
  430. {
  431. int n = 0, sl = (int)strlen(name);
  432. while (lws_hdr_copy_fragment(wsi, buf, len,
  433. WSI_TOKEN_HTTP_URI_ARGS, n) >= 0) {
  434. if (!strncmp(buf, name, sl))
  435. return buf + sl;
  436. n++;
  437. }
  438. return NULL;
  439. }
  440. #if defined(LWS_WITHOUT_EXTENSIONS)
  441. /* we need to provide dummy callbacks for internal exts
  442. * so user code runs when faced with a lib compiled with
  443. * extensions disabled.
  444. */
  445. int
  446. lws_extension_callback_pm_deflate(struct lws_context *context,
  447. const struct lws_extension *ext,
  448. struct lws *wsi,
  449. enum lws_extension_callback_reasons reason,
  450. void *user, void *in, size_t len)
  451. {
  452. (void)context;
  453. (void)ext;
  454. (void)wsi;
  455. (void)reason;
  456. (void)user;
  457. (void)in;
  458. (void)len;
  459. return 0;
  460. }
  461. int
  462. lws_set_extension_option(struct lws *wsi, const char *ext_name,
  463. const char *opt_name, const char *opt_val)
  464. {
  465. return -1;
  466. }
  467. #endif
  468. int
  469. lws_is_cgi(struct lws *wsi) {
  470. #ifdef LWS_WITH_CGI
  471. return !!wsi->http.cgi;
  472. #else
  473. return 0;
  474. #endif
  475. }
  476. const struct lws_protocol_vhost_options *
  477. lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
  478. {
  479. while (pvo) {
  480. if (!strcmp(pvo->name, name))
  481. break;
  482. pvo = pvo->next;
  483. }
  484. return pvo;
  485. }
  486. int
  487. lws_pvo_get_str(void *in, const char *name, const char **result)
  488. {
  489. const struct lws_protocol_vhost_options *pv =
  490. lws_pvo_search((const struct lws_protocol_vhost_options *)in,
  491. name);
  492. if (!pv)
  493. return 1;
  494. *result = (const char *)pv->value;
  495. return 0;
  496. }
  497. int
  498. lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, size_t len)
  499. {
  500. struct lws_vhost *v = pt->context->vhost_list;
  501. lws_fakewsi_def_plwsa(pt);
  502. int n, ret = 0;
  503. lws_fakewsi_prep_plwsa_ctx(pt->context);
  504. #if !defined(LWS_PLAT_FREERTOS) && LWS_MAX_SMP > 1
  505. ((struct lws *)plwsa)->tsi = (int)(pt - &pt->context->pt[0]);
  506. #endif
  507. while (v) {
  508. const struct lws_protocols *p = v->protocols;
  509. plwsa->vhost = v; /* not a real bound wsi */
  510. for (n = 0; n < v->count_protocols; n++) {
  511. plwsa->protocol = p;
  512. if (p->callback &&
  513. p->callback((struct lws *)plwsa, reason, NULL, in, len))
  514. ret |= 1;
  515. p++;
  516. }
  517. v = v->vhost_next;
  518. }
  519. return ret;
  520. }
  521. void *
  522. lws_wsi_user(struct lws *wsi)
  523. {
  524. return wsi->user_space;
  525. }
  526. int
  527. lws_wsi_tsi(struct lws *wsi)
  528. {
  529. return wsi->tsi;
  530. }
  531. void
  532. lws_set_wsi_user(struct lws *wsi, void *data)
  533. {
  534. if (!wsi->user_space_externally_allocated && wsi->user_space)
  535. lws_free(wsi->user_space);
  536. wsi->user_space_externally_allocated = 1;
  537. wsi->user_space = data;
  538. }
  539. struct lws *
  540. lws_get_parent(const struct lws *wsi)
  541. {
  542. return wsi->parent;
  543. }
  544. struct lws *
  545. lws_get_child(const struct lws *wsi)
  546. {
  547. return wsi->child_list;
  548. }
  549. void *
  550. lws_get_opaque_parent_data(const struct lws *wsi)
  551. {
  552. return wsi->opaque_parent_data;
  553. }
  554. void
  555. lws_set_opaque_parent_data(struct lws *wsi, void *data)
  556. {
  557. wsi->opaque_parent_data = data;
  558. }
  559. void *
  560. lws_get_opaque_user_data(const struct lws *wsi)
  561. {
  562. return wsi->a.opaque_user_data;
  563. }
  564. void
  565. lws_set_opaque_user_data(struct lws *wsi, void *data)
  566. {
  567. wsi->a.opaque_user_data = data;
  568. }
  569. int
  570. lws_get_child_pending_on_writable(const struct lws *wsi)
  571. {
  572. return wsi->parent_pending_cb_on_writable;
  573. }
  574. void
  575. lws_clear_child_pending_on_writable(struct lws *wsi)
  576. {
  577. wsi->parent_pending_cb_on_writable = 0;
  578. }
  579. const char *
  580. lws_get_vhost_name(struct lws_vhost *vhost)
  581. {
  582. return vhost->name;
  583. }
  584. int
  585. lws_get_vhost_port(struct lws_vhost *vhost)
  586. {
  587. return vhost->listen_port;
  588. }
  589. void *
  590. lws_get_vhost_user(struct lws_vhost *vhost)
  591. {
  592. return vhost->user;
  593. }
  594. const char *
  595. lws_get_vhost_iface(struct lws_vhost *vhost)
  596. {
  597. return vhost->iface;
  598. }
  599. lws_sockfd_type
  600. lws_get_socket_fd(struct lws *wsi)
  601. {
  602. if (!wsi)
  603. return -1;
  604. return wsi->desc.sockfd;
  605. }
  606. struct lws_vhost *
  607. lws_vhost_get(struct lws *wsi)
  608. {
  609. return wsi->a.vhost;
  610. }
  611. struct lws_vhost *
  612. lws_get_vhost(struct lws *wsi)
  613. {
  614. return wsi->a.vhost;
  615. }
  616. const struct lws_protocols *
  617. lws_protocol_get(struct lws *wsi)
  618. {
  619. return wsi->a.protocol;
  620. }
  621. #if defined(LWS_WITH_UDP)
  622. const struct lws_udp *
  623. lws_get_udp(const struct lws *wsi)
  624. {
  625. return wsi->udp;
  626. }
  627. #endif
  628. struct lws_context *
  629. lws_get_context(const struct lws *wsi)
  630. {
  631. return wsi->a.context;
  632. }
  633. #if defined(LWS_WITH_CLIENT)
  634. int
  635. _lws_generic_transaction_completed_active_conn(struct lws **_wsi, char take_vh_lock)
  636. {
  637. struct lws *wnew, *wsi = *_wsi;
  638. /*
  639. * Are we constitutionally capable of having a queue, ie, we are on
  640. * the "active client connections" list?
  641. *
  642. * If not, that's it for us.
  643. */
  644. if (lws_dll2_is_detached(&wsi->dll_cli_active_conns))
  645. return 0; /* no new transaction */
  646. /*
  647. * With h1 queuing, the original "active client" moves his attributes
  648. * like fd, ssl, queue and active client list entry to the next guy in
  649. * the queue before closing... it's because the user code knows the
  650. * individual wsi and the action must take place in the correct wsi
  651. * context. Note this means we don't truly pipeline headers.
  652. *
  653. * Trying to keep the original "active client" in place to do the work
  654. * of the wsi breaks down when dealing with queued POSTs otherwise; it's
  655. * also competing with the real mux child arrangements and complicating
  656. * the code.
  657. *
  658. * For that reason, see if we have any queued child now...
  659. */
  660. if (!wsi->dll2_cli_txn_queue_owner.head) {
  661. /*
  662. * Nothing pipelined... we should hang around a bit
  663. * in case something turns up... otherwise we'll close
  664. */
  665. lwsl_info("%s: nothing pipelined waiting\n", __func__);
  666. lwsi_set_state(wsi, LRS_IDLING);
  667. lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE,
  668. wsi->keep_warm_secs);
  669. return 0; /* no new transaction right now */
  670. }
  671. /*
  672. * We have a queued child wsi we should bequeath our assets to, before
  673. * closing ourself
  674. */
  675. if (take_vh_lock)
  676. lws_vhost_lock(wsi->a.vhost);
  677. wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws,
  678. dll2_cli_txn_queue);
  679. assert(wsi != wnew);
  680. lws_dll2_remove(&wnew->dll2_cli_txn_queue);
  681. assert(lws_socket_is_valid(wsi->desc.sockfd));
  682. __lws_change_pollfd(wsi, LWS_POLLOUT | LWS_POLLIN, 0);
  683. /* copy the fd */
  684. wnew->desc = wsi->desc;
  685. assert(lws_socket_is_valid(wnew->desc.sockfd));
  686. /* disconnect the fd from association with old wsi */
  687. if (__remove_wsi_socket_from_fds(wsi))
  688. return -1;
  689. sanity_assert_no_wsi_traces(wsi->a.context, wsi);
  690. sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd);
  691. wsi->desc.sockfd = LWS_SOCK_INVALID;
  692. __lws_wsi_remove_from_sul(wsi);
  693. /*
  694. * ... we're doing some magic here in terms of handing off the socket
  695. * that has been active to a wsi that has not yet itself been active...
  696. * depending on the event lib we may need to give a magic spark to the
  697. * new guy and snuff out the old guy's magic spark at that level as well
  698. */
  699. #if defined(LWS_WITH_EVENT_LIBS)
  700. if (wsi->a.context->event_loop_ops->destroy_wsi)
  701. wsi->a.context->event_loop_ops->destroy_wsi(wsi);
  702. if (wsi->a.context->event_loop_ops->sock_accept)
  703. wsi->a.context->event_loop_ops->sock_accept(wnew);
  704. #endif
  705. /* point the fd table entry to new guy */
  706. assert(lws_socket_is_valid(wnew->desc.sockfd));
  707. if (__insert_wsi_socket_into_fds(wsi->a.context, wnew))
  708. return -1;
  709. #if defined(LWS_WITH_TLS)
  710. /* pass on the tls */
  711. wnew->tls = wsi->tls;
  712. wsi->tls.client_bio = NULL;
  713. wsi->tls.ssl = NULL;
  714. wsi->tls.use_ssl = 0;
  715. #endif
  716. /* take over his copy of his endpoint as an active connection */
  717. wnew->cli_hostname_copy = wsi->cli_hostname_copy;
  718. wsi->cli_hostname_copy = NULL;
  719. wnew->keep_warm_secs = wsi->keep_warm_secs;
  720. /*
  721. * selected queued guy now replaces the original leader on the
  722. * active client conn list
  723. */
  724. lws_dll2_remove(&wsi->dll_cli_active_conns);
  725. lws_dll2_add_tail(&wnew->dll_cli_active_conns,
  726. &wsi->a.vhost->dll_cli_active_conns_owner);
  727. /* move any queued guys to queue on new active conn */
  728. lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
  729. wsi->dll2_cli_txn_queue_owner.head) {
  730. struct lws *ww = lws_container_of(d, struct lws,
  731. dll2_cli_txn_queue);
  732. lws_dll2_remove(&ww->dll2_cli_txn_queue);
  733. lws_dll2_add_tail(&ww->dll2_cli_txn_queue,
  734. &wnew->dll2_cli_txn_queue_owner);
  735. } lws_end_foreach_dll_safe(d, d1);
  736. if (take_vh_lock)
  737. lws_vhost_unlock(wsi->a.vhost);
  738. /*
  739. * The original leader who passed on all his powers already can die...
  740. * in the call stack above us there are guys who still want to touch
  741. * him, so have him die next time around the event loop, not now.
  742. */
  743. wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */
  744. lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC);
  745. /* after the first one, they can only be coming from the queue */
  746. wnew->transaction_from_pipeline_queue = 1;
  747. lwsl_notice("%s: pipeline queue passed wsi %p on to queued wsi %p\n",
  748. __func__, wsi, wnew);
  749. *_wsi = wnew; /* inform caller we swapped */
  750. return 1; /* new transaction */
  751. }
  752. #endif
  753. int LWS_WARN_UNUSED_RESULT
  754. lws_raw_transaction_completed(struct lws *wsi)
  755. {
  756. if (lws_has_buffered_out(wsi)) {
  757. /*
  758. * ...so he tried to send something large, but it went out
  759. * as a partial, but he immediately called us to say he wants
  760. * to close the connection.
  761. *
  762. * Defer the close until the last part of the partial is sent.
  763. *
  764. */
  765. lwsl_debug("%s: %p: deferring due to partial\n", __func__, wsi);
  766. wsi->close_when_buffered_out_drained = 1;
  767. lws_callback_on_writable(wsi);
  768. return 0;
  769. }
  770. return -1;
  771. }
  772. int
  773. lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p,
  774. const char *reason)
  775. {
  776. // if (wsi->a.protocol == p)
  777. // return 0;
  778. const struct lws_protocols *vp = wsi->a.vhost->protocols, *vpo;
  779. if (wsi->a.protocol && wsi->protocol_bind_balance) {
  780. wsi->a.protocol->callback(wsi,
  781. wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)],
  782. wsi->user_space, (void *)reason, 0);
  783. wsi->protocol_bind_balance = 0;
  784. }
  785. if (!wsi->user_space_externally_allocated)
  786. lws_free_set_NULL(wsi->user_space);
  787. lws_same_vh_protocol_remove(wsi);
  788. wsi->a.protocol = p;
  789. if (!p)
  790. return 0;
  791. if (lws_ensure_user_space(wsi))
  792. return 1;
  793. if (p > vp && p < &vp[wsi->a.vhost->count_protocols])
  794. lws_same_vh_protocol_insert(wsi, (int)(p - vp));
  795. else {
  796. int n = wsi->a.vhost->count_protocols;
  797. int hit = 0;
  798. vpo = vp;
  799. while (n--) {
  800. if (p->name && vp->name && !strcmp(p->name, vp->name)) {
  801. hit = 1;
  802. lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
  803. break;
  804. }
  805. vp++;
  806. }
  807. if (!hit)
  808. lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
  809. __func__, p, wsi->a.vhost->name);
  810. }
  811. if (wsi->a.protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[
  812. !!lwsi_role_server(wsi)],
  813. wsi->user_space, NULL, 0))
  814. return 1;
  815. wsi->protocol_bind_balance = 1;
  816. return 0;
  817. }
  818. void
  819. lws_http_close_immortal(struct lws *wsi)
  820. {
  821. struct lws *nwsi;
  822. if (!wsi->mux_substream)
  823. return;
  824. assert(wsi->mux_stream_immortal);
  825. wsi->mux_stream_immortal = 0;
  826. nwsi = lws_get_network_wsi(wsi);
  827. lwsl_debug("%s: %p %p %d\n", __func__, wsi, nwsi,
  828. nwsi->immortal_substream_count);
  829. assert(nwsi->immortal_substream_count);
  830. nwsi->immortal_substream_count--;
  831. if (!nwsi->immortal_substream_count)
  832. /*
  833. * since we closed the only immortal stream on this nwsi, we
  834. * need to reapply a normal timeout regime to the nwsi
  835. */
  836. lws_set_timeout(nwsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE,
  837. wsi->a.vhost->keepalive_timeout ?
  838. wsi->a.vhost->keepalive_timeout : 31);
  839. }
  840. void
  841. lws_mux_mark_immortal(struct lws *wsi)
  842. {
  843. struct lws *nwsi;
  844. lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
  845. if (!wsi->mux_substream
  846. #if defined(LWS_WITH_CLIENT)
  847. && !wsi->client_mux_substream
  848. #endif
  849. ) {
  850. lwsl_err("%s: not h2 substream\n", __func__);
  851. return;
  852. }
  853. nwsi = lws_get_network_wsi(wsi);
  854. if (!nwsi)
  855. return;
  856. lwsl_debug("%s: %p %p %d\n", __func__, wsi, nwsi,
  857. nwsi->immortal_substream_count);
  858. wsi->mux_stream_immortal = 1;
  859. assert(nwsi->immortal_substream_count < 255); /* largest count */
  860. nwsi->immortal_substream_count++;
  861. if (nwsi->immortal_substream_count == 1)
  862. lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0);
  863. }
  864. int
  865. lws_http_mark_sse(struct lws *wsi)
  866. {
  867. if (!wsi)
  868. return 0;
  869. lws_http_headers_detach(wsi);
  870. lws_mux_mark_immortal(wsi);
  871. if (wsi->mux_substream)
  872. wsi->h2_stream_carries_sse = 1;
  873. return 0;
  874. }
  875. #if defined(LWS_WITH_CLIENT)
  876. const char *
  877. lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, int hdr_idx)
  878. {
  879. /* try the generic client stash */
  880. if (wsi->stash)
  881. return wsi->stash->cis[stash_idx];
  882. #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
  883. /* if not, use the ah stash if applicable */
  884. return lws_hdr_simple_ptr(wsi, hdr_idx);
  885. #else
  886. return NULL;
  887. #endif
  888. }
  889. #endif
  890. #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT)
  891. void
  892. lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, int sid)
  893. {
  894. lwsl_info("%s: wsi %p, par %p: assign sid %d (curr %d)\n", __func__,
  895. wsi, parent_wsi, sid, wsi->mux.my_sid);
  896. if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid)
  897. assert(0);
  898. wsi->mux.my_sid = sid;
  899. wsi->mux.parent_wsi = parent_wsi;
  900. wsi->role_ops = parent_wsi->role_ops;
  901. /* new guy's sibling is whoever was the first child before */
  902. wsi->mux.sibling_list = parent_wsi->mux.child_list;
  903. /* first child is now the new guy */
  904. parent_wsi->mux.child_list = wsi;
  905. parent_wsi->mux.child_count++;
  906. }
  907. struct lws *
  908. lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid)
  909. {
  910. lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) {
  911. if (wsi->mux.my_sid == sid)
  912. return wsi;
  913. } lws_end_foreach_ll(wsi, mux.sibling_list);
  914. return NULL;
  915. }
  916. void
  917. lws_wsi_mux_dump_children(struct lws *wsi)
  918. {
  919. #if defined(_DEBUG)
  920. if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO))
  921. return;
  922. lws_start_foreach_llp(struct lws **, w,
  923. wsi->mux.parent_wsi->mux.child_list) {
  924. lwsl_info(" \\---- child %s %p\n",
  925. (*w)->role_ops ? (*w)->role_ops->name : "?", *w);
  926. assert(*w != (*w)->mux.sibling_list);
  927. } lws_end_foreach_llp(w, mux.sibling_list);
  928. #endif
  929. }
  930. void
  931. lws_wsi_mux_close_children(struct lws *wsi, int reason)
  932. {
  933. struct lws *wsi2;
  934. struct lws **w;
  935. if (!wsi->mux.child_list)
  936. return;
  937. w = &wsi->mux.child_list;
  938. while (*w) {
  939. lwsl_info(" closing child %p\n", *w);
  940. /* disconnect from siblings */
  941. wsi2 = (*w)->mux.sibling_list;
  942. assert (wsi2 != *w);
  943. (*w)->mux.sibling_list = NULL;
  944. (*w)->socket_is_permanently_unusable = 1;
  945. __lws_close_free_wsi(*w, reason, "mux child recurse");
  946. *w = wsi2;
  947. }
  948. }
  949. void
  950. lws_wsi_mux_sibling_disconnect(struct lws *wsi)
  951. {
  952. struct lws *wsi2;
  953. lws_start_foreach_llp(struct lws **, w,
  954. wsi->mux.parent_wsi->mux.child_list) {
  955. /* disconnect from siblings */
  956. if (*w == wsi) {
  957. wsi2 = (*w)->mux.sibling_list;
  958. (*w)->mux.sibling_list = NULL;
  959. *w = wsi2;
  960. lwsl_debug(" %p disentangled from sibling %p\n",
  961. wsi, wsi2);
  962. break;
  963. }
  964. } lws_end_foreach_llp(w, mux.sibling_list);
  965. wsi->mux.parent_wsi->mux.child_count--;
  966. wsi->mux.parent_wsi = NULL;
  967. }
  968. void
  969. lws_wsi_mux_dump_waiting_children(struct lws *wsi)
  970. {
  971. #if defined(_DEBUG)
  972. lwsl_info("%s: %p: children waiting for POLLOUT service:\n",
  973. __func__, wsi);
  974. wsi = wsi->mux.child_list;
  975. while (wsi) {
  976. lwsl_info(" %c %p: sid %u: 0x%x %s %s\n",
  977. wsi->mux.requested_POLLOUT ? '*' : ' ',
  978. wsi, wsi->mux.my_sid, lwsi_state(wsi),
  979. wsi->role_ops->name,
  980. wsi->a.protocol ? wsi->a.protocol->name : "noprotocol");
  981. wsi = wsi->mux.sibling_list;
  982. }
  983. #endif
  984. }
  985. int
  986. lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi)
  987. {
  988. struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2;
  989. //int already = network_wsi->mux.requested_POLLOUT;
  990. /* mark everybody above him as requesting pollout */
  991. wsi2 = wsi;
  992. while (wsi2) {
  993. wsi2->mux.requested_POLLOUT = 1;
  994. lwsl_info("%s: mark wsi: %p, sid %u, pending writable\n",
  995. __func__, wsi2, wsi2->mux.my_sid);
  996. wsi2 = wsi2->mux.parent_wsi;
  997. }
  998. return 0; // already;
  999. }
  1000. struct lws *
  1001. lws_wsi_mux_move_child_to_tail(struct lws **wsi2)
  1002. {
  1003. struct lws *w = *wsi2;
  1004. while (w) {
  1005. if (!w->mux.sibling_list) { /* w is the current last */
  1006. lwsl_debug("w=%p, *wsi2 = %p\n", w, *wsi2);
  1007. if (w == *wsi2) /* we are already last */
  1008. break;
  1009. /* last points to us as new last */
  1010. w->mux.sibling_list = *wsi2;
  1011. /* guy pointing to us until now points to
  1012. * our old next */
  1013. *wsi2 = (*wsi2)->mux.sibling_list;
  1014. /* we point to nothing because we are last */
  1015. w->mux.sibling_list->mux.sibling_list = NULL;
  1016. /* w becomes us */
  1017. w = w->mux.sibling_list;
  1018. break;
  1019. }
  1020. w = w->mux.sibling_list;
  1021. }
  1022. /* clear the waiting for POLLOUT on the guy that was chosen */
  1023. if (w)
  1024. w->mux.requested_POLLOUT = 0;
  1025. return w;
  1026. }
  1027. int
  1028. lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi)
  1029. {
  1030. struct lws *w = wsi->mux.child_list;
  1031. while (w) {
  1032. if (w->mux.requested_POLLOUT) {
  1033. if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
  1034. return -1;
  1035. return 0;
  1036. }
  1037. w = w->mux.sibling_list;
  1038. }
  1039. if (lws_change_pollfd(wsi, LWS_POLLOUT, 0))
  1040. return -1;
  1041. return 0;
  1042. }
  1043. int
  1044. lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr)
  1045. {
  1046. if (txc->tx_cr <= 0) {
  1047. /*
  1048. * If other side is not able to cope with us sending any DATA
  1049. * so no matter if we have POLLOUT on our side if it's DATA we
  1050. * want to send.
  1051. */
  1052. if (!txc->skint)
  1053. lwsl_info("%s: %p: skint (%d)\n", __func__, txc,
  1054. (int)txc->tx_cr);
  1055. txc->skint = 1;
  1056. return 1;
  1057. }
  1058. if (txc->skint)
  1059. lwsl_info("%s: %p: unskint (%d)\n", __func__, txc,
  1060. (int)txc->tx_cr);
  1061. txc->skint = 0;
  1062. return 0;
  1063. }
  1064. #if defined(_DEBUG)
  1065. void
  1066. lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, uint32_t sid)
  1067. {
  1068. lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n",
  1069. __func__, txc, at, (int)sid, txc->skint ? "SKINT, " : "",
  1070. (int)txc->peer_tx_cr_est, (int)txc->tx_cr);
  1071. }
  1072. #endif
  1073. int
  1074. lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add)
  1075. {
  1076. if (wsi->role_ops && wsi->role_ops->tx_credit)
  1077. return wsi->role_ops->tx_credit(wsi, peer_to_us, add);
  1078. return 0;
  1079. }
  1080. /*
  1081. * Let the protocol know about incoming tx credit window updates if it's
  1082. * managing the flow control manually (it may want to proxy this information)
  1083. */
  1084. int
  1085. lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump)
  1086. {
  1087. if (!wsi->txc.manual)
  1088. /*
  1089. * If we don't care about managing it manually, no need to
  1090. * report it
  1091. */
  1092. return 0;
  1093. return user_callback_handle_rxflow(wsi->a.protocol->callback,
  1094. wsi, LWS_CALLBACK_WSI_TX_CREDIT_GET,
  1095. wsi->user_space, NULL, (size_t)bump);
  1096. }
  1097. #if defined(LWS_WITH_CLIENT)
  1098. int
  1099. lws_wsi_mux_apply_queue(struct lws *wsi)
  1100. {
  1101. /* we have a transaction queue that wants to pipeline */
  1102. lws_vhost_lock(wsi->a.vhost);
  1103. lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
  1104. wsi->dll2_cli_txn_queue_owner.head) {
  1105. struct lws *w = lws_container_of(d, struct lws,
  1106. dll2_cli_txn_queue);
  1107. #if defined(LWS_ROLE_H2)
  1108. if (lwsi_role_http(wsi) &&
  1109. lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) {
  1110. lwsl_info("%s: cli pipeq %p to be h2\n", __func__, w);
  1111. lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2);
  1112. /* remove ourselves from client queue */
  1113. lws_dll2_remove(&w->dll2_cli_txn_queue);
  1114. /* attach ourselves as an h2 stream */
  1115. lws_wsi_h2_adopt(wsi, w);
  1116. }
  1117. #endif
  1118. #if defined(LWS_ROLE_MQTT)
  1119. if (lwsi_role_mqtt(wsi) &&
  1120. lwsi_state(wsi) == LRS_ESTABLISHED) {
  1121. lwsl_info("%s: cli pipeq %p to be mqtt\n", __func__, w);
  1122. /* remove ourselves from client queue */
  1123. lws_dll2_remove(&w->dll2_cli_txn_queue);
  1124. /* attach ourselves as an h2 stream */
  1125. lws_wsi_mqtt_adopt(wsi, w);
  1126. }
  1127. #endif
  1128. } lws_end_foreach_dll_safe(d, d1);
  1129. lws_vhost_unlock(wsi->a.vhost);
  1130. return 0;
  1131. }
  1132. #endif
  1133. #endif