basisu_enc.cpp 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. // basisu_enc.cpp
  2. // Copyright (C) 2019 Binomial LLC. All Rights Reserved.
  3. //
  4. // Licensed under the Apache License, Version 2.0 (the "License");
  5. // you may not use this file except in compliance with the License.
  6. // You may obtain a copy of the License at
  7. //
  8. // http://www.apache.org/licenses/LICENSE-2.0
  9. //
  10. // Unless required by applicable law or agreed to in writing, software
  11. // distributed under the License is distributed on an "AS IS" BASIS,
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. // See the License for the specific language governing permissions and
  14. // limitations under the License.
  15. #include "basisu_enc.h"
  16. #include "lodepng.h"
  17. #include "basisu_resampler.h"
  18. #include "basisu_resampler_filters.h"
  19. #include "basisu_etc.h"
  20. #include "transcoder/basisu_transcoder.h"
  21. #if defined(_WIN32)
  22. // For QueryPerformanceCounter/QueryPerformanceFrequency
  23. #define WIN32_LEAN_AND_MEAN
  24. #include <windows.h>
  25. #endif
  26. namespace basisu
  27. {
  28. uint64_t interval_timer::g_init_ticks, interval_timer::g_freq;
  29. double interval_timer::g_timer_freq;
  30. uint8_t g_hamming_dist[256] =
  31. {
  32. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
  33. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  34. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  35. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  36. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  37. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  38. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  39. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  40. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  41. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  42. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  43. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  44. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  45. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  46. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  47. 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
  48. };
  49. // Encoder library initialization (just call once at startup)
  50. void basisu_encoder_init()
  51. {
  52. basist::basisu_transcoder_init();
  53. }
  54. void error_printf(const char *pFmt, ...)
  55. {
  56. char buf[2048];
  57. va_list args;
  58. va_start(args, pFmt);
  59. #ifdef _WIN32
  60. vsprintf_s(buf, sizeof(buf), pFmt, args);
  61. #else
  62. vsnprintf(buf, sizeof(buf), pFmt, args);
  63. #endif
  64. va_end(args);
  65. fprintf(stderr, "ERROR: %s", buf);
  66. }
  67. #if defined(_WIN32)
  68. inline void query_counter(timer_ticks* pTicks)
  69. {
  70. QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER*>(pTicks));
  71. }
  72. inline void query_counter_frequency(timer_ticks* pTicks)
  73. {
  74. QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER*>(pTicks));
  75. }
  76. #elif defined(__APPLE__)
  77. #include <sys/time.h>
  78. inline void query_counter(timer_ticks* pTicks)
  79. {
  80. struct timeval cur_time;
  81. gettimeofday(&cur_time, NULL);
  82. *pTicks = static_cast<unsigned long long>(cur_time.tv_sec) * 1000000ULL + static_cast<unsigned long long>(cur_time.tv_usec);
  83. }
  84. inline void query_counter_frequency(timer_ticks* pTicks)
  85. {
  86. *pTicks = 1000000;
  87. }
  88. #elif defined(__GNUC__)
  89. #include <sys/timex.h>
  90. inline void query_counter(timer_ticks* pTicks)
  91. {
  92. struct timeval cur_time;
  93. gettimeofday(&cur_time, NULL);
  94. *pTicks = static_cast<unsigned long long>(cur_time.tv_sec) * 1000000ULL + static_cast<unsigned long long>(cur_time.tv_usec);
  95. }
  96. inline void query_counter_frequency(timer_ticks* pTicks)
  97. {
  98. *pTicks = 1000000;
  99. }
  100. #else
  101. #error TODO
  102. #endif
  103. interval_timer::interval_timer() : m_start_time(0), m_stop_time(0), m_started(false), m_stopped(false)
  104. {
  105. if (!g_timer_freq)
  106. init();
  107. }
  108. void interval_timer::start()
  109. {
  110. query_counter(&m_start_time);
  111. m_started = true;
  112. m_stopped = false;
  113. }
  114. void interval_timer::stop()
  115. {
  116. assert(m_started);
  117. query_counter(&m_stop_time);
  118. m_stopped = true;
  119. }
  120. double interval_timer::get_elapsed_secs() const
  121. {
  122. assert(m_started);
  123. if (!m_started)
  124. return 0;
  125. timer_ticks stop_time = m_stop_time;
  126. if (!m_stopped)
  127. query_counter(&stop_time);
  128. timer_ticks delta = stop_time - m_start_time;
  129. return delta * g_timer_freq;
  130. }
  131. void interval_timer::init()
  132. {
  133. if (!g_timer_freq)
  134. {
  135. query_counter_frequency(&g_freq);
  136. g_timer_freq = 1.0f / g_freq;
  137. query_counter(&g_init_ticks);
  138. }
  139. }
  140. timer_ticks interval_timer::get_ticks()
  141. {
  142. if (!g_timer_freq)
  143. init();
  144. timer_ticks ticks;
  145. query_counter(&ticks);
  146. return ticks - g_init_ticks;
  147. }
  148. double interval_timer::ticks_to_secs(timer_ticks ticks)
  149. {
  150. if (!g_timer_freq)
  151. init();
  152. return ticks * g_timer_freq;
  153. }
  154. bool load_png(const char* pFilename, image& img)
  155. {
  156. std::vector<uint8_t> buffer;
  157. unsigned err = lodepng::load_file(buffer, std::string(pFilename));
  158. if (err)
  159. return false;
  160. unsigned w = 0, h = 0;
  161. if (sizeof(void *) == sizeof(uint32_t))
  162. {
  163. // Inspect the image first on 32-bit builds, to see if the image would require too much memory.
  164. lodepng::State state;
  165. err = lodepng_inspect(&w, &h, &state, &buffer[0], buffer.size());
  166. if ((err != 0) || (!w) || (!h))
  167. return false;
  168. const uint32_t exepected_alloc_size = w * h * sizeof(uint32_t);
  169. // If the file is too large on 32-bit builds then just bail now, to prevent causing a memory exception.
  170. const uint32_t MAX_ALLOC_SIZE = 250000000;
  171. if (exepected_alloc_size >= MAX_ALLOC_SIZE)
  172. {
  173. error_printf("Image \"%s\" is too large (%ux%u) to process in a 32-bit build!\n", pFilename, w, h);
  174. return false;
  175. }
  176. w = h = 0;
  177. }
  178. std::vector<uint8_t> out;
  179. err = lodepng::decode(out, w, h, &buffer[0], buffer.size());
  180. if ((err != 0) || (!w) || (!h))
  181. return false;
  182. if (out.size() != (w * h * 4))
  183. return false;
  184. img.resize(w, h);
  185. memcpy(img.get_ptr(), &out[0], out.size());
  186. return true;
  187. }
  188. bool save_png(const char* pFilename, const image & img, uint32_t image_save_flags, uint32_t grayscale_comp)
  189. {
  190. if (!img.get_total_pixels())
  191. return false;
  192. std::vector<uint8_t> out;
  193. unsigned err = 0;
  194. if (image_save_flags & cImageSaveGrayscale)
  195. {
  196. uint8_vec g_pixels(img.get_width() * img.get_height());
  197. uint8_t *pDst = &g_pixels[0];
  198. for (uint32_t y = 0; y < img.get_height(); y++)
  199. for (uint32_t x = 0; x < img.get_width(); x++)
  200. *pDst++ = img(x, y)[grayscale_comp];
  201. err = lodepng::encode(out, (const uint8_t*)& g_pixels[0], img.get_width(), img.get_height(), LCT_GREY, 8);
  202. }
  203. else
  204. {
  205. bool has_alpha = img.has_alpha();
  206. if ((!has_alpha) || ((image_save_flags & cImageSaveIgnoreAlpha) != 0))
  207. {
  208. uint8_vec rgb_pixels(img.get_width() * 3 * img.get_height());
  209. uint8_t *pDst = &rgb_pixels[0];
  210. for (uint32_t y = 0; y < img.get_height(); y++)
  211. {
  212. for (uint32_t x = 0; x < img.get_width(); x++)
  213. {
  214. const color_rgba& c = img(x, y);
  215. pDst[0] = c.r;
  216. pDst[1] = c.g;
  217. pDst[2] = c.b;
  218. pDst += 3;
  219. }
  220. }
  221. err = lodepng::encode(out, (const uint8_t*)& rgb_pixels[0], img.get_width(), img.get_height(), LCT_RGB, 8);
  222. }
  223. else
  224. {
  225. err = lodepng::encode(out, (const uint8_t*)img.get_ptr(), img.get_width(), img.get_height(), LCT_RGBA, 8);
  226. }
  227. }
  228. err = lodepng::save_file(out, std::string(pFilename));
  229. if (err)
  230. return false;
  231. return true;
  232. }
  233. bool read_file_to_vec(const char* pFilename, uint8_vec& data)
  234. {
  235. FILE* pFile = nullptr;
  236. #ifdef _WIN32
  237. fopen_s(&pFile, pFilename, "rb");
  238. #else
  239. pFile = fopen(pFilename, "rb");
  240. #endif
  241. if (!pFile)
  242. return false;
  243. fseek(pFile, 0, SEEK_END);
  244. #ifdef _WIN32
  245. int64_t filesize = _ftelli64(pFile);
  246. #else
  247. int64_t filesize = ftello(pFile);
  248. #endif
  249. if (filesize < 0)
  250. {
  251. fclose(pFile);
  252. return false;
  253. }
  254. fseek(pFile, 0, SEEK_SET);
  255. if (sizeof(size_t) == sizeof(uint32_t))
  256. {
  257. if (filesize > 0x70000000)
  258. {
  259. // File might be too big to load safely in one alloc
  260. fclose(pFile);
  261. return false;
  262. }
  263. }
  264. data.resize((size_t)filesize);
  265. if (filesize)
  266. {
  267. if (fread(&data[0], 1, (size_t)filesize, pFile) != (size_t)filesize)
  268. {
  269. fclose(pFile);
  270. return false;
  271. }
  272. }
  273. fclose(pFile);
  274. return true;
  275. }
  276. bool write_data_to_file(const char* pFilename, const void* pData, size_t len)
  277. {
  278. FILE* pFile = nullptr;
  279. #ifdef _WIN32
  280. fopen_s(&pFile, pFilename, "wb");
  281. #else
  282. pFile = fopen(pFilename, "wb");
  283. #endif
  284. if (!pFile)
  285. return false;
  286. if (len)
  287. {
  288. if (fwrite(pData, 1, len, pFile) != len)
  289. {
  290. fclose(pFile);
  291. return false;
  292. }
  293. }
  294. return fclose(pFile) != EOF;
  295. }
  296. float linear_to_srgb(float l)
  297. {
  298. assert(l >= 0.0f && l <= 1.0f);
  299. if (l < .0031308f)
  300. return saturate(l * 12.92f);
  301. else
  302. return saturate(1.055f * powf(l, 1.0f/2.4f) - .055f);
  303. }
  304. float srgb_to_linear(float s)
  305. {
  306. assert(s >= 0.0f && s <= 1.0f);
  307. if (s < .04045f)
  308. return saturate(s * (1.0f/12.92f));
  309. else
  310. return saturate(powf((s + .055f) * (1.0f/1.055f), 2.4f));
  311. }
  312. bool image_resample(const image &src, image &dst, bool srgb,
  313. const char *pFilter, float filter_scale,
  314. bool wrapping,
  315. uint32_t first_comp, uint32_t num_comps)
  316. {
  317. assert((first_comp + num_comps) <= 4);
  318. const int cMaxComps = 4;
  319. const uint32_t src_w = src.get_width(), src_h = src.get_height();
  320. const uint32_t dst_w = dst.get_width(), dst_h = dst.get_height();
  321. if (maximum(src_w, src_h) > BASISU_RESAMPLER_MAX_DIMENSION)
  322. {
  323. printf("Image is too large!\n");
  324. return false;
  325. }
  326. if (!src_w || !src_h || !dst_w || !dst_h)
  327. return false;
  328. if ((num_comps < 1) || (num_comps > cMaxComps))
  329. return false;
  330. if ((minimum(dst_w, dst_h) < 1) || (maximum(dst_w, dst_h) > BASISU_RESAMPLER_MAX_DIMENSION))
  331. {
  332. printf("Image is too large!\n");
  333. return false;
  334. }
  335. if ((src_w == dst_w) && (src_h == dst_h))
  336. {
  337. dst = src;
  338. return true;
  339. }
  340. float srgb_to_linear_table[256];
  341. if (srgb)
  342. {
  343. for (int i = 0; i < 256; ++i)
  344. srgb_to_linear_table[i] = srgb_to_linear((float)i * (1.0f/255.0f));
  345. }
  346. const int LINEAR_TO_SRGB_TABLE_SIZE = 8192;
  347. uint8_t linear_to_srgb_table[LINEAR_TO_SRGB_TABLE_SIZE];
  348. if (srgb)
  349. {
  350. for (int i = 0; i < LINEAR_TO_SRGB_TABLE_SIZE; ++i)
  351. linear_to_srgb_table[i] = (uint8_t)clamp<int>((int)(255.0f * linear_to_srgb((float)i * (1.0f / (LINEAR_TO_SRGB_TABLE_SIZE - 1))) + .5f), 0, 255);
  352. }
  353. std::vector<float> samples[cMaxComps];
  354. Resampler *resamplers[cMaxComps];
  355. resamplers[0] = new Resampler(src_w, src_h, dst_w, dst_h,
  356. wrapping ? Resampler::BOUNDARY_WRAP : Resampler::BOUNDARY_CLAMP, 0.0f, 1.0f,
  357. pFilter, nullptr, nullptr, filter_scale, filter_scale, 0, 0);
  358. samples[0].resize(src_w);
  359. for (uint32_t i = 1; i < num_comps; ++i)
  360. {
  361. resamplers[i] = new Resampler(src_w, src_h, dst_w, dst_h,
  362. wrapping ? Resampler::BOUNDARY_WRAP : Resampler::BOUNDARY_CLAMP, 0.0f, 1.0f,
  363. pFilter, resamplers[0]->get_clist_x(), resamplers[0]->get_clist_y(), filter_scale, filter_scale, 0, 0);
  364. samples[i].resize(src_w);
  365. }
  366. uint32_t dst_y = 0;
  367. for (uint32_t src_y = 0; src_y < src_h; ++src_y)
  368. {
  369. const color_rgba *pSrc = &src(0, src_y);
  370. // Put source lines into resampler(s)
  371. for (uint32_t x = 0; x < src_w; ++x)
  372. {
  373. for (uint32_t c = 0; c < num_comps; ++c)
  374. {
  375. const uint32_t comp_index = first_comp + c;
  376. const uint32_t v = (*pSrc)[comp_index];
  377. if (!srgb || (comp_index == 3))
  378. samples[c][x] = v * (1.0f / 255.0f);
  379. else
  380. samples[c][x] = srgb_to_linear_table[v];
  381. }
  382. pSrc++;
  383. }
  384. for (uint32_t c = 0; c < num_comps; ++c)
  385. {
  386. if (!resamplers[c]->put_line(&samples[c][0]))
  387. {
  388. for (uint32_t i = 0; i < num_comps; i++)
  389. delete resamplers[i];
  390. return false;
  391. }
  392. }
  393. // Now retrieve any output lines
  394. for (;;)
  395. {
  396. uint32_t c;
  397. for (c = 0; c < num_comps; ++c)
  398. {
  399. const uint32_t comp_index = first_comp + c;
  400. const float *pOutput_samples = resamplers[c]->get_line();
  401. if (!pOutput_samples)
  402. break;
  403. const bool linear_flag = !srgb || (comp_index == 3);
  404. color_rgba *pDst = &dst(0, dst_y);
  405. for (uint32_t x = 0; x < dst_w; x++)
  406. {
  407. // TODO: Add dithering
  408. if (linear_flag)
  409. {
  410. int j = (int)(255.0f * pOutput_samples[x] + .5f);
  411. (*pDst)[comp_index] = (uint8_t)clamp<int>(j, 0, 255);
  412. }
  413. else
  414. {
  415. int j = (int)((LINEAR_TO_SRGB_TABLE_SIZE - 1) * pOutput_samples[x] + .5f);
  416. (*pDst)[comp_index] = linear_to_srgb_table[clamp<int>(j, 0, LINEAR_TO_SRGB_TABLE_SIZE - 1)];
  417. }
  418. pDst++;
  419. }
  420. }
  421. if (c < num_comps)
  422. break;
  423. ++dst_y;
  424. }
  425. }
  426. for (uint32_t i = 0; i < num_comps; ++i)
  427. delete resamplers[i];
  428. return true;
  429. }
  430. void canonical_huffman_calculate_minimum_redundancy(sym_freq *A, int num_syms)
  431. {
  432. // See the paper "In-Place Calculation of Minimum Redundancy Codes" by Moffat and Katajainen
  433. if (!num_syms)
  434. return;
  435. if (1 == num_syms)
  436. {
  437. A[0].m_key = 1;
  438. return;
  439. }
  440. A[0].m_key += A[1].m_key;
  441. int s = 2, r = 0, next;
  442. for (next = 1; next < (num_syms - 1); ++next)
  443. {
  444. if ((s >= num_syms) || (A[r].m_key < A[s].m_key))
  445. {
  446. A[next].m_key = A[r].m_key;
  447. A[r].m_key = static_cast<uint16_t>(next);
  448. ++r;
  449. }
  450. else
  451. {
  452. A[next].m_key = A[s].m_key;
  453. ++s;
  454. }
  455. if ((s >= num_syms) || ((r < next) && A[r].m_key < A[s].m_key))
  456. {
  457. A[next].m_key = static_cast<uint16_t>(A[next].m_key + A[r].m_key);
  458. A[r].m_key = static_cast<uint16_t>(next);
  459. ++r;
  460. }
  461. else
  462. {
  463. A[next].m_key = static_cast<uint16_t>(A[next].m_key + A[s].m_key);
  464. ++s;
  465. }
  466. }
  467. A[num_syms - 2].m_key = 0;
  468. for (next = num_syms - 3; next >= 0; --next)
  469. {
  470. A[next].m_key = 1 + A[A[next].m_key].m_key;
  471. }
  472. int num_avail = 1, num_used = 0, depth = 0;
  473. r = num_syms - 2;
  474. next = num_syms - 1;
  475. while (num_avail > 0)
  476. {
  477. for ( ; (r >= 0) && ((int)A[r].m_key == depth); ++num_used, --r )
  478. ;
  479. for ( ; num_avail > num_used; --next, --num_avail)
  480. A[next].m_key = static_cast<uint16_t>(depth);
  481. num_avail = 2 * num_used;
  482. num_used = 0;
  483. ++depth;
  484. }
  485. }
  486. void canonical_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
  487. {
  488. int i;
  489. uint32_t total = 0;
  490. if (code_list_len <= 1)
  491. return;
  492. for (i = max_code_size + 1; i <= cHuffmanMaxSupportedInternalCodeSize; i++)
  493. pNum_codes[max_code_size] += pNum_codes[i];
  494. for (i = max_code_size; i > 0; i--)
  495. total += (((uint32_t)pNum_codes[i]) << (max_code_size - i));
  496. while (total != (1UL << max_code_size))
  497. {
  498. pNum_codes[max_code_size]--;
  499. for (i = max_code_size - 1; i > 0; i--)
  500. {
  501. if (pNum_codes[i])
  502. {
  503. pNum_codes[i]--;
  504. pNum_codes[i + 1] += 2;
  505. break;
  506. }
  507. }
  508. total--;
  509. }
  510. }
  511. sym_freq *canonical_huffman_radix_sort_syms(uint32_t num_syms, sym_freq *pSyms0, sym_freq *pSyms1)
  512. {
  513. uint32_t total_passes = 2, pass_shift, pass, i, hist[256 * 2];
  514. sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
  515. clear_obj(hist);
  516. for (i = 0; i < num_syms; i++)
  517. {
  518. uint32_t freq = pSyms0[i].m_key;
  519. hist[freq & 0xFF]++;
  520. hist[256 + ((freq >> 8) & 0xFF)]++;
  521. }
  522. while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
  523. total_passes--;
  524. for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
  525. {
  526. const uint32_t *pHist = &hist[pass << 8];
  527. uint32_t offsets[256], cur_ofs = 0;
  528. for (i = 0; i < 256; i++)
  529. {
  530. offsets[i] = cur_ofs;
  531. cur_ofs += pHist[i];
  532. }
  533. for (i = 0; i < num_syms; i++)
  534. pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
  535. sym_freq *t = pCur_syms;
  536. pCur_syms = pNew_syms;
  537. pNew_syms = t;
  538. }
  539. return pCur_syms;
  540. }
  541. bool huffman_encoding_table::init(uint32_t num_syms, const uint16_t *pFreq, uint32_t max_code_size)
  542. {
  543. if (max_code_size > cHuffmanMaxSupportedCodeSize)
  544. return false;
  545. if ((!num_syms) || (num_syms > cHuffmanMaxSyms))
  546. return false;
  547. uint32_t total_used_syms = 0;
  548. for (uint32_t i = 0; i < num_syms; i++)
  549. if (pFreq[i])
  550. total_used_syms++;
  551. if (!total_used_syms)
  552. return false;
  553. std::vector<sym_freq> sym_freq0(total_used_syms), sym_freq1(total_used_syms);
  554. for (uint32_t i = 0, j = 0; i < num_syms; i++)
  555. {
  556. if (pFreq[i])
  557. {
  558. sym_freq0[j].m_key = pFreq[i];
  559. sym_freq0[j++].m_sym_index = static_cast<uint16_t>(i);
  560. }
  561. }
  562. sym_freq *pSym_freq = canonical_huffman_radix_sort_syms(total_used_syms, &sym_freq0[0], &sym_freq1[0]);
  563. canonical_huffman_calculate_minimum_redundancy(pSym_freq, total_used_syms);
  564. int num_codes[cHuffmanMaxSupportedInternalCodeSize + 1];
  565. clear_obj(num_codes);
  566. for (uint32_t i = 0; i < total_used_syms; i++)
  567. {
  568. if (pSym_freq[i].m_key > cHuffmanMaxSupportedInternalCodeSize)
  569. return false;
  570. num_codes[pSym_freq[i].m_key]++;
  571. }
  572. canonical_huffman_enforce_max_code_size(num_codes, total_used_syms, max_code_size);
  573. m_code_sizes.resize(0);
  574. m_code_sizes.resize(num_syms);
  575. m_codes.resize(0);
  576. m_codes.resize(num_syms);
  577. for (uint32_t i = 1, j = total_used_syms; i <= max_code_size; i++)
  578. for (uint32_t l = num_codes[i]; l > 0; l--)
  579. m_code_sizes[pSym_freq[--j].m_sym_index] = static_cast<uint8_t>(i);
  580. uint32_t next_code[cHuffmanMaxSupportedInternalCodeSize + 1];
  581. next_code[1] = 0;
  582. for (uint32_t j = 0, i = 2; i <= max_code_size; i++)
  583. next_code[i] = j = ((j + num_codes[i - 1]) << 1);
  584. for (uint32_t i = 0; i < num_syms; i++)
  585. {
  586. uint32_t rev_code = 0, code, code_size;
  587. if ((code_size = m_code_sizes[i]) == 0)
  588. continue;
  589. if (code_size > cHuffmanMaxSupportedInternalCodeSize)
  590. return false;
  591. code = next_code[code_size]++;
  592. for (uint32_t l = code_size; l > 0; l--, code >>= 1)
  593. rev_code = (rev_code << 1) | (code & 1);
  594. m_codes[i] = static_cast<uint16_t>(rev_code);
  595. }
  596. return true;
  597. }
  598. bool huffman_encoding_table::init(uint32_t num_syms, const uint32_t *pSym_freq, uint32_t max_code_size)
  599. {
  600. if ((!num_syms) || (num_syms > cHuffmanMaxSyms))
  601. return false;
  602. uint16_vec sym_freq(num_syms);
  603. uint32_t max_freq = 0;
  604. for (uint32_t i = 0; i < num_syms; i++)
  605. max_freq = maximum(max_freq, pSym_freq[i]);
  606. if (max_freq < UINT16_MAX)
  607. {
  608. for (uint32_t i = 0; i < num_syms; i++)
  609. sym_freq[i] = static_cast<uint16_t>(pSym_freq[i]);
  610. }
  611. else
  612. {
  613. for (uint32_t i = 0; i < num_syms; i++)
  614. if (pSym_freq[i])
  615. sym_freq[i] = static_cast<uint16_t>(maximum<uint32_t>((pSym_freq[i] * 65534U + (max_freq >> 1)) / max_freq, 1));
  616. }
  617. return init(num_syms, &sym_freq[0], max_code_size);
  618. }
  619. void bitwise_coder::end_nonzero_run(uint16_vec &syms, uint32_t &run_size, uint32_t len)
  620. {
  621. if (run_size)
  622. {
  623. if (run_size < cHuffmanSmallRepeatSizeMin)
  624. {
  625. while (run_size--)
  626. syms.push_back(static_cast<uint16_t>(len));
  627. }
  628. else if (run_size <= cHuffmanSmallRepeatSizeMax)
  629. {
  630. syms.push_back(static_cast<uint16_t>(cHuffmanSmallRepeatCode | ((run_size - cHuffmanSmallRepeatSizeMin) << 6)));
  631. }
  632. else
  633. {
  634. assert((run_size >= cHuffmanBigRepeatSizeMin) && (run_size <= cHuffmanBigRepeatSizeMax));
  635. syms.push_back(static_cast<uint16_t>(cHuffmanBigRepeatCode | ((run_size - cHuffmanBigRepeatSizeMin) << 6)));
  636. }
  637. }
  638. run_size = 0;
  639. }
  640. void bitwise_coder::end_zero_run(uint16_vec &syms, uint32_t &run_size)
  641. {
  642. if (run_size)
  643. {
  644. if (run_size < cHuffmanSmallZeroRunSizeMin)
  645. {
  646. while (run_size--)
  647. syms.push_back(0);
  648. }
  649. else if (run_size <= cHuffmanSmallZeroRunSizeMax)
  650. {
  651. syms.push_back(static_cast<uint16_t>(cHuffmanSmallZeroRunCode | ((run_size - cHuffmanSmallZeroRunSizeMin) << 6)));
  652. }
  653. else
  654. {
  655. assert((run_size >= cHuffmanBigZeroRunSizeMin) && (run_size <= cHuffmanBigZeroRunSizeMax));
  656. syms.push_back(static_cast<uint16_t>(cHuffmanBigZeroRunCode | ((run_size - cHuffmanBigZeroRunSizeMin) << 6)));
  657. }
  658. }
  659. run_size = 0;
  660. }
  661. uint32_t bitwise_coder::emit_huffman_table(const huffman_encoding_table &tab)
  662. {
  663. const uint64_t start_bits = m_total_bits;
  664. const uint8_vec &code_sizes = tab.get_code_sizes();
  665. uint32_t total_used = tab.get_total_used_codes();
  666. put_bits(total_used, cHuffmanMaxSymsLog2);
  667. if (!total_used)
  668. return 0;
  669. uint16_vec syms;
  670. syms.reserve(total_used + 16);
  671. uint32_t prev_code_len = UINT_MAX, zero_run_size = 0, nonzero_run_size = 0;
  672. for (uint32_t i = 0; i <= total_used; ++i)
  673. {
  674. const uint32_t code_len = (i == total_used) ? 0xFF : code_sizes[i];
  675. assert((code_len == 0xFF) || (code_len <= 16));
  676. if (code_len)
  677. {
  678. end_zero_run(syms, zero_run_size);
  679. if (code_len != prev_code_len)
  680. {
  681. end_nonzero_run(syms, nonzero_run_size, prev_code_len);
  682. if (code_len != 0xFF)
  683. syms.push_back(static_cast<uint16_t>(code_len));
  684. }
  685. else if (++nonzero_run_size == cHuffmanBigRepeatSizeMax)
  686. end_nonzero_run(syms, nonzero_run_size, prev_code_len);
  687. }
  688. else
  689. {
  690. end_nonzero_run(syms, nonzero_run_size, prev_code_len);
  691. if (++zero_run_size == cHuffmanBigZeroRunSizeMax)
  692. end_zero_run(syms, zero_run_size);
  693. }
  694. prev_code_len = code_len;
  695. }
  696. histogram h(cHuffmanTotalCodelengthCodes);
  697. for (uint32_t i = 0; i < syms.size(); i++)
  698. h.inc(syms[i] & 63);
  699. huffman_encoding_table ct;
  700. if (!ct.init(h, 7))
  701. return 0;
  702. assert(cHuffmanTotalSortedCodelengthCodes == cHuffmanTotalCodelengthCodes);
  703. uint32_t total_codelength_codes;
  704. for (total_codelength_codes = cHuffmanTotalSortedCodelengthCodes; total_codelength_codes > 0; total_codelength_codes--)
  705. if (ct.get_code_sizes()[g_huffman_sorted_codelength_codes[total_codelength_codes - 1]])
  706. break;
  707. assert(total_codelength_codes);
  708. put_bits(total_codelength_codes, 5);
  709. for (uint32_t i = 0; i < total_codelength_codes; i++)
  710. put_bits(ct.get_code_sizes()[g_huffman_sorted_codelength_codes[i]], 3);
  711. for (uint32_t i = 0; i < syms.size(); ++i)
  712. {
  713. const uint32_t l = syms[i] & 63, e = syms[i] >> 6;
  714. put_code(l, ct);
  715. if (l == cHuffmanSmallZeroRunCode)
  716. put_bits(e, cHuffmanSmallZeroRunExtraBits);
  717. else if (l == cHuffmanBigZeroRunCode)
  718. put_bits(e, cHuffmanBigZeroRunExtraBits);
  719. else if (l == cHuffmanSmallRepeatCode)
  720. put_bits(e, cHuffmanSmallRepeatExtraBits);
  721. else if (l == cHuffmanBigRepeatCode)
  722. put_bits(e, cHuffmanBigRepeatExtraBits);
  723. }
  724. return (uint32_t)(m_total_bits - start_bits);
  725. }
  726. bool huffman_test(int rand_seed)
  727. {
  728. histogram h(19);
  729. // Feed in a fibonacci sequence to force large codesizes
  730. h[0] += 1; h[1] += 1; h[2] += 2; h[3] += 3;
  731. h[4] += 5; h[5] += 8; h[6] += 13; h[7] += 21;
  732. h[8] += 34; h[9] += 55; h[10] += 89; h[11] += 144;
  733. h[12] += 233; h[13] += 377; h[14] += 610; h[15] += 987;
  734. h[16] += 1597; h[17] += 2584; h[18] += 4181;
  735. huffman_encoding_table etab;
  736. etab.init(h, 16);
  737. {
  738. bitwise_coder c;
  739. c.init(1024);
  740. c.emit_huffman_table(etab);
  741. for (int i = 0; i < 19; i++)
  742. c.put_code(i, etab);
  743. c.flush();
  744. basist::bitwise_decoder d;
  745. d.init(&c.get_bytes()[0], static_cast<uint32_t>(c.get_bytes().size()));
  746. basist::huffman_decoding_table dtab;
  747. bool success = d.read_huffman_table(dtab);
  748. if (!success)
  749. {
  750. assert(0);
  751. printf("Failure 5\n");
  752. return false;
  753. }
  754. for (uint32_t i = 0; i < 19; i++)
  755. {
  756. uint32_t s = d.decode_huffman(dtab);
  757. if (s != i)
  758. {
  759. assert(0);
  760. printf("Failure 5\n");
  761. return false;
  762. }
  763. }
  764. }
  765. basisu::rand r;
  766. r.seed(rand_seed);
  767. for (int iter = 0; iter < 500000; iter++)
  768. {
  769. printf("%u\n", iter);
  770. uint32_t max_sym = r.irand(0, 8193);
  771. uint32_t num_codes = r.irand(1, 10000);
  772. uint_vec syms(num_codes);
  773. for (uint32_t i = 0; i < num_codes; i++)
  774. {
  775. if (r.bit())
  776. syms[i] = r.irand(0, max_sym);
  777. else
  778. {
  779. int s = (int)(r.gaussian((float)max_sym / 2, (float)maximum<int>(1, max_sym / 2)) + .5f);
  780. s = basisu::clamp<int>(s, 0, max_sym);
  781. syms[i] = s;
  782. }
  783. }
  784. histogram h1(max_sym + 1);
  785. for (uint32_t i = 0; i < num_codes; i++)
  786. h1[syms[i]]++;
  787. huffman_encoding_table etab2;
  788. if (!etab2.init(h1, 16))
  789. {
  790. assert(0);
  791. printf("Failed 0\n");
  792. return false;
  793. }
  794. bitwise_coder c;
  795. c.init(1024);
  796. c.emit_huffman_table(etab2);
  797. for (uint32_t i = 0; i < num_codes; i++)
  798. c.put_code(syms[i], etab2);
  799. c.flush();
  800. basist::bitwise_decoder d;
  801. d.init(&c.get_bytes()[0], (uint32_t)c.get_bytes().size());
  802. basist::huffman_decoding_table dtab;
  803. bool success = d.read_huffman_table(dtab);
  804. if (!success)
  805. {
  806. assert(0);
  807. printf("Failed 2\n");
  808. return false;
  809. }
  810. for (uint32_t i = 0; i < num_codes; i++)
  811. {
  812. uint32_t s = d.decode_huffman(dtab);
  813. if (s != syms[i])
  814. {
  815. assert(0);
  816. printf("Failed 4\n");
  817. return false;
  818. }
  819. }
  820. }
  821. return true;
  822. }
  823. void palette_index_reorderer::init(uint32_t num_indices, const uint32_t *pIndices, uint32_t num_syms, pEntry_dist_func pDist_func, void *pCtx, float dist_func_weight)
  824. {
  825. assert((num_syms > 0) && (num_indices > 0));
  826. assert((dist_func_weight >= 0.0f) && (dist_func_weight <= 1.0f));
  827. clear();
  828. m_remap_table.resize(num_syms);
  829. m_entries_picked.reserve(num_syms);
  830. m_total_count_to_picked.resize(num_syms);
  831. if (num_indices <= 1)
  832. return;
  833. prepare_hist(num_syms, num_indices, pIndices);
  834. find_initial(num_syms);
  835. while (m_entries_to_do.size())
  836. {
  837. // Find the best entry to move into the picked list.
  838. uint32_t best_entry;
  839. double best_count;
  840. find_next_entry(best_entry, best_count, pDist_func, pCtx, dist_func_weight);
  841. // We now have chosen an entry to place in the picked list, now determine which side it goes on.
  842. const uint32_t entry_to_move = m_entries_to_do[best_entry];
  843. float side = pick_side(num_syms, entry_to_move, pDist_func, pCtx, dist_func_weight);
  844. // Put entry_to_move either on the "left" or "right" side of the picked entries
  845. if (side <= 0)
  846. m_entries_picked.push_back(entry_to_move);
  847. else
  848. m_entries_picked.insert(m_entries_picked.begin(), entry_to_move);
  849. // Erase best_entry from the todo list
  850. m_entries_to_do.erase(m_entries_to_do.begin() + best_entry);
  851. // We've just moved best_entry to the picked list, so now we need to update m_total_count_to_picked[] to factor the additional count to best_entry
  852. for (uint32_t i = 0; i < m_entries_to_do.size(); i++)
  853. m_total_count_to_picked[m_entries_to_do[i]] += get_hist(m_entries_to_do[i], entry_to_move, num_syms);
  854. }
  855. for (uint32_t i = 0; i < num_syms; i++)
  856. m_remap_table[m_entries_picked[i]] = i;
  857. }
  858. void palette_index_reorderer::prepare_hist(uint32_t num_syms, uint32_t num_indices, const uint32_t *pIndices)
  859. {
  860. m_hist.resize(0);
  861. m_hist.resize(num_syms * num_syms);
  862. for (uint32_t i = 0; i < num_indices; i++)
  863. {
  864. const uint32_t idx = pIndices[i];
  865. inc_hist(idx, (i < (num_indices - 1)) ? pIndices[i + 1] : -1, num_syms);
  866. inc_hist(idx, (i > 0) ? pIndices[i - 1] : -1, num_syms);
  867. }
  868. }
  869. void palette_index_reorderer::find_initial(uint32_t num_syms)
  870. {
  871. uint32_t max_count = 0, max_index = 0;
  872. for (uint32_t i = 0; i < num_syms * num_syms; i++)
  873. if (m_hist[i] > max_count)
  874. max_count = m_hist[i], max_index = i;
  875. uint32_t a = max_index / num_syms, b = max_index % num_syms;
  876. m_entries_picked.push_back(a);
  877. m_entries_picked.push_back(b);
  878. for (uint32_t i = 0; i < num_syms; i++)
  879. if ((i != b) && (i != a))
  880. m_entries_to_do.push_back(i);
  881. for (uint32_t i = 0; i < m_entries_to_do.size(); i++)
  882. for (uint32_t j = 0; j < m_entries_picked.size(); j++)
  883. m_total_count_to_picked[m_entries_to_do[i]] += get_hist(m_entries_to_do[i], m_entries_picked[j], num_syms);
  884. }
  885. void palette_index_reorderer::find_next_entry(uint32_t &best_entry, double &best_count, pEntry_dist_func pDist_func, void *pCtx, float dist_func_weight)
  886. {
  887. best_entry = 0;
  888. best_count = 0;
  889. for (uint32_t i = 0; i < m_entries_to_do.size(); i++)
  890. {
  891. const uint32_t u = m_entries_to_do[i];
  892. double total_count = m_total_count_to_picked[u];
  893. if (pDist_func)
  894. {
  895. float w = maximum<float>((*pDist_func)(u, m_entries_picked.front(), pCtx), (*pDist_func)(u, m_entries_picked.back(), pCtx));
  896. assert((w >= 0.0f) && (w <= 1.0f));
  897. total_count = (total_count + 1.0f) * lerp(1.0f - dist_func_weight, 1.0f + dist_func_weight, w);
  898. }
  899. if (total_count <= best_count)
  900. continue;
  901. best_entry = i;
  902. best_count = total_count;
  903. }
  904. }
  905. float palette_index_reorderer::pick_side(uint32_t num_syms, uint32_t entry_to_move, pEntry_dist_func pDist_func, void *pCtx, float dist_func_weight)
  906. {
  907. float which_side = 0;
  908. int l_count = 0, r_count = 0;
  909. for (uint32_t j = 0; j < m_entries_picked.size(); j++)
  910. {
  911. const int count = get_hist(entry_to_move, m_entries_picked[j], num_syms), r = ((int)m_entries_picked.size() + 1 - 2 * (j + 1));
  912. which_side += static_cast<float>(r * count);
  913. if (r >= 0)
  914. l_count += r * count;
  915. else
  916. r_count += -r * count;
  917. }
  918. if (pDist_func)
  919. {
  920. float w_left = lerp(1.0f - dist_func_weight, 1.0f + dist_func_weight, (*pDist_func)(entry_to_move, m_entries_picked.front(), pCtx));
  921. float w_right = lerp(1.0f - dist_func_weight, 1.0f + dist_func_weight, (*pDist_func)(entry_to_move, m_entries_picked.back(), pCtx));
  922. which_side = w_left * l_count - w_right * r_count;
  923. }
  924. return which_side;
  925. }
  926. void image_metrics::calc(const image &a, const image &b, uint32_t first_chan, uint32_t total_chans, bool avg_comp_error, bool use_601_luma)
  927. {
  928. assert((first_chan < 4U) && (first_chan + total_chans <= 4U));
  929. const uint32_t width = std::min(a.get_width(), b.get_width());
  930. const uint32_t height = std::min(a.get_height(), b.get_height());
  931. double hist[256];
  932. clear_obj(hist);
  933. for (uint32_t y = 0; y < height; y++)
  934. {
  935. for (uint32_t x = 0; x < width; x++)
  936. {
  937. const color_rgba &ca = a(x, y), &cb = b(x, y);
  938. if (total_chans)
  939. {
  940. for (uint32_t c = 0; c < total_chans; c++)
  941. hist[iabs(ca[first_chan + c] - cb[first_chan + c])]++;
  942. }
  943. else
  944. {
  945. if (use_601_luma)
  946. hist[iabs(ca.get_601_luma() - cb.get_601_luma())]++;
  947. else
  948. hist[iabs(ca.get_709_luma() - cb.get_709_luma())]++;
  949. }
  950. }
  951. }
  952. m_max = 0;
  953. double sum = 0.0f, sum2 = 0.0f;
  954. for (uint32_t i = 0; i < 256; i++)
  955. {
  956. if (hist[i])
  957. {
  958. m_max = std::max<float>(m_max, (float)i);
  959. double v = i * hist[i];
  960. sum += v;
  961. sum2 += i * v;
  962. }
  963. }
  964. double total_values = (double)width * (double)height;
  965. if (avg_comp_error)
  966. total_values *= (double)clamp<uint32_t>(total_chans, 1, 4);
  967. m_mean = (float)clamp<double>(sum / total_values, 0.0f, 255.0);
  968. m_mean_squared = (float)clamp<double>(sum2 / total_values, 0.0f, 255.0 * 255.0);
  969. m_rms = (float)sqrt(m_mean_squared);
  970. m_psnr = m_rms ? (float)clamp<double>(log10(255.0 / m_rms) * 20.0, 0.0f, 300.0f) : 1e+10f;
  971. }
  972. void fill_buffer_with_random_bytes(void *pBuf, size_t size, uint32_t seed)
  973. {
  974. rand r(seed);
  975. uint8_t *pDst = static_cast<uint8_t *>(pBuf);
  976. while (size >= sizeof(uint32_t))
  977. {
  978. *(uint32_t *)pDst = r.urand32();
  979. pDst += sizeof(uint32_t);
  980. size -= sizeof(uint32_t);
  981. }
  982. while (size)
  983. {
  984. *pDst++ = r.byte();
  985. size--;
  986. }
  987. }
  988. uint32_t hash_hsieh(const uint8_t *pBuf, size_t len)
  989. {
  990. if (!pBuf || !len)
  991. return 0;
  992. uint32_t h = static_cast<uint32_t>(len);
  993. const uint32_t bytes_left = len & 3;
  994. len >>= 2;
  995. while (len--)
  996. {
  997. const uint16_t *pWords = reinterpret_cast<const uint16_t *>(pBuf);
  998. h += pWords[0];
  999. const uint32_t t = (pWords[1] << 11) ^ h;
  1000. h = (h << 16) ^ t;
  1001. pBuf += sizeof(uint32_t);
  1002. h += h >> 11;
  1003. }
  1004. switch (bytes_left)
  1005. {
  1006. case 1:
  1007. h += *reinterpret_cast<const signed char*>(pBuf);
  1008. h ^= h << 10;
  1009. h += h >> 1;
  1010. break;
  1011. case 2:
  1012. h += *reinterpret_cast<const uint16_t *>(pBuf);
  1013. h ^= h << 11;
  1014. h += h >> 17;
  1015. break;
  1016. case 3:
  1017. h += *reinterpret_cast<const uint16_t *>(pBuf);
  1018. h ^= h << 16;
  1019. h ^= (static_cast<signed char>(pBuf[sizeof(uint16_t)])) << 18;
  1020. h += h >> 11;
  1021. break;
  1022. default:
  1023. break;
  1024. }
  1025. h ^= h << 3;
  1026. h += h >> 5;
  1027. h ^= h << 4;
  1028. h += h >> 17;
  1029. h ^= h << 25;
  1030. h += h >> 6;
  1031. return h;
  1032. }
  1033. job_pool::job_pool(uint32_t num_threads) :
  1034. m_kill_flag(false),
  1035. m_num_active_jobs(0)
  1036. {
  1037. assert(num_threads >= 1U);
  1038. debug_printf("job_pool::job_pool: %u total threads\n", num_threads);
  1039. if (num_threads > 1)
  1040. {
  1041. m_threads.resize(num_threads - 1);
  1042. for (int i = 0; i < ((int)num_threads - 1); i++)
  1043. m_threads[i] = std::thread([this, i] { job_thread(i); });
  1044. }
  1045. }
  1046. job_pool::~job_pool()
  1047. {
  1048. debug_printf("job_pool::~job_pool\n");
  1049. // Notify all workers that they need to die right now.
  1050. m_kill_flag = true;
  1051. m_has_work.notify_all();
  1052. // Wait for all workers to die.
  1053. for (uint32_t i = 0; i < m_threads.size(); i++)
  1054. m_threads[i].join();
  1055. }
  1056. void job_pool::add_job(const std::function<void()>& job)
  1057. {
  1058. std::unique_lock<std::mutex> lock(m_mutex);
  1059. m_queue.emplace_back(job);
  1060. const size_t queue_size = m_queue.size();
  1061. lock.unlock();
  1062. if (queue_size > 1)
  1063. m_has_work.notify_one();
  1064. }
  1065. void job_pool::add_job(std::function<void()>&& job)
  1066. {
  1067. std::unique_lock<std::mutex> lock(m_mutex);
  1068. m_queue.emplace_back(std::move(job));
  1069. const size_t queue_size = m_queue.size();
  1070. lock.unlock();
  1071. if (queue_size > 1)
  1072. m_has_work.notify_one();
  1073. }
  1074. void job_pool::wait_for_all()
  1075. {
  1076. std::unique_lock<std::mutex> lock(m_mutex);
  1077. // Drain the job queue on the calling thread.
  1078. while (!m_queue.empty())
  1079. {
  1080. std::function<void()> job(m_queue.back());
  1081. m_queue.pop_back();
  1082. lock.unlock();
  1083. job();
  1084. lock.lock();
  1085. }
  1086. // The queue is empty, now wait for all active jobs to finish up.
  1087. m_no_more_jobs.wait(lock, [this]{ return !m_num_active_jobs; } );
  1088. }
  1089. void job_pool::job_thread(uint32_t index)
  1090. {
  1091. debug_printf("job_pool::job_thread: starting %u\n", index);
  1092. while (true)
  1093. {
  1094. std::unique_lock<std::mutex> lock(m_mutex);
  1095. // Wait for any jobs to be issued.
  1096. m_has_work.wait(lock, [this] { return m_kill_flag || m_queue.size(); } );
  1097. // Check to see if we're supposed to exit.
  1098. if (m_kill_flag)
  1099. break;
  1100. // Get the job and execute it.
  1101. std::function<void()> job(m_queue.back());
  1102. m_queue.pop_back();
  1103. ++m_num_active_jobs;
  1104. lock.unlock();
  1105. job();
  1106. lock.lock();
  1107. --m_num_active_jobs;
  1108. // Now check if there are no more jobs remaining.
  1109. const bool all_done = m_queue.empty() && !m_num_active_jobs;
  1110. lock.unlock();
  1111. if (all_done)
  1112. m_no_more_jobs.notify_all();
  1113. }
  1114. debug_printf("job_pool::job_thread: exiting\n");
  1115. }
  1116. } // namespace basisu