runner.odin 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. #+private
  2. package testing
  3. /*
  4. (c) Copyright 2024 Feoramund <[email protected]>.
  5. Made available under Odin's BSD-3 license.
  6. List of contributors:
  7. Ginger Bill: Initial implementation.
  8. Feoramund: Total rewrite.
  9. */
  10. import "base:intrinsics"
  11. import "base:runtime"
  12. import "core:bytes"
  13. import "core:encoding/ansi"
  14. @require import "core:encoding/base64"
  15. @require import "core:encoding/json"
  16. import "core:fmt"
  17. import "core:io"
  18. @require import "core:log"
  19. import "core:math/rand"
  20. import "core:mem"
  21. import "core:os"
  22. import "core:slice"
  23. @require import "core:strings"
  24. import "core:sync/chan"
  25. import "core:thread"
  26. import "core:time"
  27. // Specify how many threads to use when running tests.
  28. TEST_THREADS : int : #config(ODIN_TEST_THREADS, 0)
  29. // Track the memory used by each test.
  30. TRACKING_MEMORY : bool : #config(ODIN_TEST_TRACK_MEMORY, true)
  31. // Always report how much memory is used, even when there are no leaks or bad frees.
  32. ALWAYS_REPORT_MEMORY : bool : #config(ODIN_TEST_ALWAYS_REPORT_MEMORY, false)
  33. // Treat memory leaks and bad frees as errors.
  34. FAIL_ON_BAD_MEMORY : bool : #config(ODIN_TEST_FAIL_ON_BAD_MEMORY, false)
  35. // Specify how much memory each thread allocator starts with.
  36. PER_THREAD_MEMORY : int : #config(ODIN_TEST_THREAD_MEMORY, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE)
  37. // Select a specific set of tests to run by name.
  38. // Each test is separated by a comma and may optionally include the package name.
  39. // This may be useful when running tests on multiple packages with `-all-packages`.
  40. // The format is: `package.test_name,test_name_only,...`
  41. TEST_NAMES : string : #config(ODIN_TEST_NAMES, "")
  42. // Show the fancy animated progress report.
  43. FANCY_OUTPUT : bool : #config(ODIN_TEST_FANCY, true)
  44. // Copy failed tests to the clipboard when done.
  45. USE_CLIPBOARD : bool : #config(ODIN_TEST_CLIPBOARD, false)
  46. // How many test results to show at a time per package.
  47. PROGRESS_WIDTH : int : #config(ODIN_TEST_PROGRESS_WIDTH, 24)
  48. // This is the random seed that will be sent to each test.
  49. // If it is unspecified, it will be set to the system cycle counter at startup.
  50. SHARED_RANDOM_SEED : u64 : #config(ODIN_TEST_RANDOM_SEED, 0)
  51. // Set the lowest log level for this test run.
  52. LOG_LEVEL_DEFAULT : string : "debug" when ODIN_DEBUG else "info"
  53. LOG_LEVEL : string : #config(ODIN_TEST_LOG_LEVEL, LOG_LEVEL_DEFAULT)
  54. // Show only the most necessary logging information.
  55. USING_SHORT_LOGS : bool : #config(ODIN_TEST_SHORT_LOGS, false)
  56. // Output a report of the tests to the given path.
  57. JSON_REPORT : string : #config(ODIN_TEST_JSON_REPORT, "")
  58. get_log_level :: #force_inline proc() -> runtime.Logger_Level {
  59. when LOG_LEVEL == "debug" { return .Debug } else
  60. when LOG_LEVEL == "info" { return .Info } else
  61. when LOG_LEVEL == "warning" { return .Warning } else
  62. when LOG_LEVEL == "error" { return .Error } else
  63. when LOG_LEVEL == "fatal" { return .Fatal } else {
  64. #panic("Unknown `ODIN_TEST_LOG_LEVEL`: \"" + LOG_LEVEL + "\", possible levels are: \"debug\", \"info\", \"warning\", \"error\", or \"fatal\".")
  65. }
  66. }
  67. JSON :: struct {
  68. total: int,
  69. success: int,
  70. duration: time.Duration,
  71. packages: map[string][dynamic]JSON_Test,
  72. }
  73. JSON_Test :: struct {
  74. success: bool,
  75. name: string,
  76. }
  77. end_t :: proc(t: ^T) {
  78. for i := len(t.cleanups)-1; i >= 0; i -= 1 {
  79. #no_bounds_check c := t.cleanups[i]
  80. context = c.ctx
  81. c.procedure(c.user_data)
  82. }
  83. delete(t.cleanups)
  84. t.cleanups = {}
  85. }
  86. when TRACKING_MEMORY && FAIL_ON_BAD_MEMORY {
  87. Task_Data :: struct {
  88. it: Internal_Test,
  89. t: T,
  90. allocator_index: int,
  91. tracking_allocator: ^mem.Tracking_Allocator,
  92. }
  93. } else {
  94. Task_Data :: struct {
  95. it: Internal_Test,
  96. t: T,
  97. allocator_index: int,
  98. }
  99. }
  100. Task_Timeout :: struct {
  101. test_index: int,
  102. at_time: time.Time,
  103. location: runtime.Source_Code_Location,
  104. }
  105. run_test_task :: proc(task: thread.Task) {
  106. data := cast(^Task_Data)(task.data)
  107. setup_task_signal_handler(task.user_index)
  108. chan.send(data.t.channel, Event_New_Test {
  109. test_index = task.user_index,
  110. })
  111. chan.send(data.t.channel, Event_State_Change {
  112. new_state = .Running,
  113. })
  114. context.assertion_failure_proc = test_assertion_failure_proc
  115. context.logger = {
  116. procedure = test_logger_proc,
  117. data = &data.t,
  118. lowest_level = get_log_level(),
  119. options = Default_Test_Logger_Opts,
  120. }
  121. random_generator_state: runtime.Default_Random_State
  122. context.random_generator = {
  123. procedure = runtime.default_random_generator_proc,
  124. data = &random_generator_state,
  125. }
  126. rand.reset(data.t.seed)
  127. free_all(context.temp_allocator)
  128. data.it.p(&data.t)
  129. end_t(&data.t)
  130. when TRACKING_MEMORY && FAIL_ON_BAD_MEMORY {
  131. // NOTE(Feoramund): The simplest way to handle treating memory failures
  132. // as errors is to allow the test task runner to access the tracking
  133. // allocator itself.
  134. //
  135. // This way, it's still able to send up a log message, which will be
  136. // used in the end summary, and it can set the test state to `Failed`
  137. // under the usual conditions.
  138. //
  139. // No outside intervention needed.
  140. memory_leaks := len(data.tracking_allocator.allocation_map)
  141. bad_frees := len(data.tracking_allocator.bad_free_array)
  142. memory_is_in_bad_state := memory_leaks + bad_frees > 0
  143. data.t.error_count += memory_leaks + bad_frees
  144. if memory_is_in_bad_state {
  145. log.errorf("Memory failure in `%s.%s` with %i leak%s and %i bad free%s.",
  146. data.it.pkg, data.it.name,
  147. memory_leaks, "" if memory_leaks == 1 else "s",
  148. bad_frees, "" if bad_frees == 1 else "s")
  149. }
  150. }
  151. new_state : Test_State = .Failed if failed(&data.t) else .Successful
  152. chan.send(data.t.channel, Event_State_Change {
  153. new_state = new_state,
  154. })
  155. }
  156. runner :: proc(internal_tests: []Internal_Test) -> bool {
  157. BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte
  158. POOL_BLOCK_SIZE :: 16 * mem.Kilobyte
  159. CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte
  160. BUFFERED_EVENTS_PER_CHANNEL :: 16
  161. RESERVED_LOG_MESSAGES :: 64
  162. RESERVED_TEST_FAILURES :: 64
  163. ERROR_STRING_TIMEOUT : string : "Test timed out."
  164. ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons."
  165. OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST
  166. safe_delete_string :: proc(s: string, allocator := context.allocator) {
  167. // Guard against bad frees on static strings.
  168. switch raw_data(s) {
  169. case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN):
  170. return
  171. case:
  172. delete(s, allocator)
  173. }
  174. }
  175. when ODIN_OS == .Windows {
  176. console_ansi_init()
  177. }
  178. stdout := io.to_writer(os.stream_from_handle(os.stdout))
  179. stderr := io.to_writer(os.stream_from_handle(os.stderr))
  180. // -- Prepare test data.
  181. alloc_error: mem.Allocator_Error
  182. when TEST_NAMES != "" {
  183. select_internal_tests: [dynamic]Internal_Test
  184. defer delete(select_internal_tests)
  185. {
  186. index_list := TEST_NAMES
  187. for selector in strings.split_iterator(&index_list, ",") {
  188. // Temp allocator is fine since we just need to identify which test it's referring to.
  189. split_selector := strings.split(selector, ".", context.temp_allocator)
  190. found := false
  191. switch len(split_selector) {
  192. case 1:
  193. // Only the test name?
  194. #no_bounds_check name := split_selector[0]
  195. find_test_by_name: for it in internal_tests {
  196. if it.name == name {
  197. found = true
  198. _, alloc_error = append(&select_internal_tests, it)
  199. fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
  200. break find_test_by_name
  201. }
  202. }
  203. case 2:
  204. #no_bounds_check pkg := split_selector[0]
  205. #no_bounds_check name := split_selector[1]
  206. find_test_by_pkg_and_name: for it in internal_tests {
  207. if it.pkg == pkg && it.name == name {
  208. found = true
  209. _, alloc_error = append(&select_internal_tests, it)
  210. fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
  211. break find_test_by_pkg_and_name
  212. }
  213. }
  214. }
  215. if !found {
  216. fmt.wprintfln(stderr, "No test found for the name: %q", selector)
  217. }
  218. }
  219. }
  220. // `-vet` needs parameters to be shadowed by themselves first as an
  221. // explicit declaration, to allow the next line to work.
  222. internal_tests := internal_tests
  223. // Intentional shadow with user-specified tests.
  224. internal_tests = select_internal_tests[:]
  225. }
  226. total_failure_count := 0
  227. total_success_count := 0
  228. total_done_count := 0
  229. total_test_count := len(internal_tests)
  230. when !FANCY_OUTPUT {
  231. // This is strictly for updating the window title when the progress
  232. // report is disabled. We're otherwise able to depend on the call to
  233. // `needs_to_redraw`.
  234. last_done_count := -1
  235. }
  236. if total_test_count == 0 {
  237. // Exit early.
  238. fmt.wprintln(stdout, "No tests to run.")
  239. return true
  240. }
  241. for it in internal_tests {
  242. // NOTE(Feoramund): The old test runner skipped over tests with nil
  243. // procedures, but I couldn't find any case where they occurred.
  244. // This assert stands to prevent any oversight on my part.
  245. fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name)
  246. }
  247. slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool {
  248. if a.pkg == b.pkg {
  249. return a.name < b.name
  250. } else {
  251. return a.pkg < b.pkg
  252. }
  253. })
  254. // -- Set thread count.
  255. when TEST_THREADS == 0 {
  256. thread_count := os.processor_core_count()
  257. } else {
  258. thread_count := max(1, TEST_THREADS)
  259. }
  260. thread_count = min(thread_count, total_test_count)
  261. // -- Allocate.
  262. pool_stack: mem.Rollback_Stack
  263. alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE)
  264. fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error)
  265. defer mem.rollback_stack_destroy(&pool_stack)
  266. pool: thread.Pool
  267. thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count)
  268. defer thread.pool_destroy(&pool)
  269. task_channels: []Task_Channel = ---
  270. task_channels, alloc_error = make([]Task_Channel, thread_count)
  271. fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error)
  272. defer delete(task_channels)
  273. for &task_channel, index in task_channels {
  274. task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator)
  275. fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error)
  276. }
  277. defer for &task_channel in task_channels {
  278. chan.destroy(&task_channel.channel)
  279. }
  280. // This buffer is used to batch writes to STDOUT or STDERR, to help reduce
  281. // screen flickering.
  282. batch_buffer: bytes.Buffer
  283. bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE)
  284. batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer))
  285. defer bytes.buffer_destroy(&batch_buffer)
  286. report: Report = ---
  287. report, alloc_error = make_report(internal_tests)
  288. fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error)
  289. defer destroy_report(&report)
  290. when FANCY_OUTPUT {
  291. // We cannot make use of the ANSI save/restore cursor codes, because they
  292. // work by absolute screen coordinates. This will cause unnecessary
  293. // scrollback if we print at the bottom of someone's terminal.
  294. ansi_redraw_string := fmt.aprintf(
  295. // ANSI for "go up N lines then erase the screen from the cursor forward."
  296. ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED +
  297. // We'll combine this with the window title format string, since it
  298. // can be printed at the same time.
  299. "%s",
  300. // 1 extra line for the status bar.
  301. 1 + len(report.packages), OSC_WINDOW_TITLE)
  302. assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.")
  303. defer delete(ansi_redraw_string)
  304. thread_count_status_string: string = ---
  305. {
  306. PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH
  307. unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s")
  308. thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING)
  309. assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.")
  310. }
  311. defer delete(thread_count_status_string)
  312. }
  313. task_data_slots: []Task_Data = ---
  314. task_data_slots, alloc_error = make([]Task_Data, thread_count)
  315. fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error)
  316. defer delete(task_data_slots)
  317. // Tests rotate through these allocators as they finish.
  318. task_allocators: []mem.Rollback_Stack = ---
  319. task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count)
  320. fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error)
  321. defer delete(task_allocators)
  322. when TRACKING_MEMORY {
  323. task_memory_trackers: []mem.Tracking_Allocator = ---
  324. task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count)
  325. fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error)
  326. defer delete(task_memory_trackers)
  327. }
  328. #no_bounds_check for i in 0 ..< thread_count {
  329. alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY)
  330. fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error)
  331. when TRACKING_MEMORY {
  332. mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i]))
  333. task_memory_trackers[i].bad_free_callback = mem.tracking_allocator_bad_free_callback_add_to_array
  334. }
  335. }
  336. defer #no_bounds_check for i in 0 ..< thread_count {
  337. when TRACKING_MEMORY {
  338. mem.tracking_allocator_destroy(&task_memory_trackers[i])
  339. }
  340. mem.rollback_stack_destroy(&task_allocators[i])
  341. }
  342. task_timeouts: [dynamic]Task_Timeout = ---
  343. task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count)
  344. fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error)
  345. defer delete(task_timeouts)
  346. failed_test_reason_map: map[int]string = ---
  347. failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES)
  348. fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error)
  349. defer delete(failed_test_reason_map)
  350. log_messages: [dynamic]Log_Message = ---
  351. log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES)
  352. fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error)
  353. defer delete(log_messages)
  354. sorted_failed_test_reasons: [dynamic]int = ---
  355. sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES)
  356. fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error)
  357. defer delete(sorted_failed_test_reasons)
  358. when USE_CLIPBOARD {
  359. clipboard_buffer: bytes.Buffer
  360. bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE)
  361. defer bytes.buffer_destroy(&clipboard_buffer)
  362. }
  363. when SHARED_RANDOM_SEED == 0 {
  364. shared_random_seed := cast(u64)intrinsics.read_cycle_counter()
  365. } else {
  366. shared_random_seed := SHARED_RANDOM_SEED
  367. }
  368. // -- Setup initial tasks.
  369. // NOTE(Feoramund): This is the allocator that will be used by threads to
  370. // persist log messages past their lifetimes. It has its own variable name
  371. // in the event it needs to be changed from `context.allocator` without
  372. // digging through the source to divine everywhere it is used for that.
  373. shared_log_allocator := context.allocator
  374. context.logger = {
  375. procedure = runner_logger_proc,
  376. data = &log_messages,
  377. lowest_level = get_log_level(),
  378. options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure},
  379. }
  380. run_index: int
  381. setup_tasks: for &data, task_index in task_data_slots {
  382. setup_next_test: for run_index < total_test_count {
  383. #no_bounds_check it := internal_tests[run_index]
  384. defer run_index += 1
  385. data.it = it
  386. data.t.seed = shared_random_seed
  387. #no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel)
  388. data.t._log_allocator = shared_log_allocator
  389. data.allocator_index = task_index
  390. #no_bounds_check when TRACKING_MEMORY {
  391. task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index])
  392. when FAIL_ON_BAD_MEMORY {
  393. data.tracking_allocator = &task_memory_trackers[task_index]
  394. }
  395. } else {
  396. task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index])
  397. }
  398. thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index)
  399. continue setup_tasks
  400. }
  401. }
  402. // -- Run tests.
  403. setup_signal_handler()
  404. fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE)
  405. when FANCY_OUTPUT {
  406. signals_were_raised := false
  407. redraw_report(stdout, report)
  408. draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count)
  409. }
  410. when TEST_THREADS == 0 {
  411. log.infof("Starting test runner with %i thread%s. Set with -define:ODIN_TEST_THREADS=n.",
  412. thread_count,
  413. "" if thread_count == 1 else "s")
  414. } else {
  415. log.infof("Starting test runner with %i thread%s.",
  416. thread_count,
  417. "" if thread_count == 1 else "s")
  418. }
  419. when SHARED_RANDOM_SEED == 0 {
  420. log.infof("The random seed sent to every test is: %v. Set with -define:ODIN_TEST_RANDOM_SEED=n.", shared_random_seed)
  421. } else {
  422. log.infof("The random seed sent to every test is: %v.", shared_random_seed)
  423. }
  424. when TRACKING_MEMORY {
  425. when ALWAYS_REPORT_MEMORY {
  426. log.info("Memory tracking is enabled. Tests will log their memory usage when complete.")
  427. } else {
  428. log.info("Memory tracking is enabled. Tests will log their memory usage if there's an issue.")
  429. }
  430. log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]")
  431. } else {
  432. when ALWAYS_REPORT_MEMORY {
  433. log.warn("ODIN_TEST_ALWAYS_REPORT_MEMORY is true, but ODIN_TEST_TRACK_MEMORY is false.")
  434. }
  435. when FAIL_ON_BAD_MEMORY {
  436. log.warn("ODIN_TEST_FAIL_ON_BAD_MEMORY is true, but ODIN_TEST_TRACK_MEMORY is false.")
  437. }
  438. }
  439. start_time := time.now()
  440. thread.pool_start(&pool)
  441. main_loop: for !thread.pool_is_empty(&pool) {
  442. {
  443. events_pending := thread.pool_num_done(&pool) > 0
  444. if !events_pending {
  445. poll_tasks: for &task_channel in task_channels {
  446. if chan.len(task_channel.channel) > 0 {
  447. events_pending = true
  448. break poll_tasks
  449. }
  450. }
  451. }
  452. if !events_pending {
  453. // Keep the main thread from pegging a core at 100% usage.
  454. time.sleep(1 * time.Microsecond)
  455. }
  456. }
  457. cycle_pool: for task in thread.pool_pop_done(&pool) {
  458. data := cast(^Task_Data)(task.data)
  459. when TRACKING_MEMORY {
  460. #no_bounds_check tracker := &task_memory_trackers[data.allocator_index]
  461. memory_is_in_bad_state := len(tracker.allocation_map) + len(tracker.bad_free_array) > 0
  462. when ALWAYS_REPORT_MEMORY {
  463. should_report := true
  464. } else {
  465. should_report := memory_is_in_bad_state
  466. }
  467. if should_report {
  468. write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name)
  469. when FAIL_ON_BAD_MEMORY {
  470. log.log(.Error if memory_is_in_bad_state else .Info, bytes.buffer_to_string(&batch_buffer))
  471. } else {
  472. log.log(.Warning if memory_is_in_bad_state else .Info, bytes.buffer_to_string(&batch_buffer))
  473. }
  474. bytes.buffer_reset(&batch_buffer)
  475. }
  476. mem.tracking_allocator_reset(tracker)
  477. }
  478. free_all(task.allocator)
  479. if run_index < total_test_count {
  480. #no_bounds_check it := internal_tests[run_index]
  481. defer run_index += 1
  482. data.it = it
  483. data.t.seed = shared_random_seed
  484. data.t.error_count = 0
  485. data.t._fail_now_called = false
  486. thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index)
  487. }
  488. }
  489. handle_events: for &task_channel in task_channels {
  490. for ev in chan.try_recv(task_channel.channel) {
  491. switch event in ev {
  492. case Event_New_Test:
  493. task_channel.test_index = event.test_index
  494. case Event_State_Change:
  495. #no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state
  496. #no_bounds_check it := internal_tests[task_channel.test_index]
  497. #no_bounds_check pkg := report.packages_by_name[it.pkg]
  498. #partial switch event.new_state {
  499. case .Failed:
  500. if task_channel.test_index not_in failed_test_reason_map {
  501. failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN
  502. }
  503. total_failure_count += 1
  504. total_done_count += 1
  505. case .Successful:
  506. total_success_count += 1
  507. total_done_count += 1
  508. }
  509. when ODIN_DEBUG {
  510. log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state)
  511. }
  512. pkg.last_change_state = event.new_state
  513. pkg.last_change_name = it.name
  514. pkg.frame_ready = false
  515. case Event_Set_Fail_Timeout:
  516. _, alloc_error = append(&task_timeouts, Task_Timeout {
  517. test_index = task_channel.test_index,
  518. at_time = event.at_time,
  519. location = event.location,
  520. })
  521. fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error)
  522. case Event_Log_Message:
  523. _, alloc_error = append(&log_messages, Log_Message {
  524. level = event.level,
  525. text = event.formatted_text,
  526. time = event.time,
  527. allocator = shared_log_allocator,
  528. })
  529. fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
  530. if event.level >= .Error {
  531. // Save the message for the final summary.
  532. if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok {
  533. safe_delete_string(old_error, shared_log_allocator)
  534. }
  535. failed_test_reason_map[task_channel.test_index] = event.text
  536. } else {
  537. delete(event.text, shared_log_allocator)
  538. }
  539. }
  540. }
  541. }
  542. check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 {
  543. #no_bounds_check timeout := &task_timeouts[i]
  544. if time.since(timeout.at_time) < 0 {
  545. continue check_timeouts
  546. }
  547. defer unordered_remove(&task_timeouts, i)
  548. #no_bounds_check if report.all_test_states[timeout.test_index] > .Running {
  549. continue check_timeouts
  550. }
  551. if !thread.pool_stop_task(&pool, timeout.test_index) {
  552. // The task may have stopped a split second after we started
  553. // checking, but we haven't handled the new state yet.
  554. continue check_timeouts
  555. }
  556. #no_bounds_check report.all_test_states[timeout.test_index] = .Failed
  557. #no_bounds_check it := internal_tests[timeout.test_index]
  558. #no_bounds_check pkg := report.packages_by_name[it.pkg]
  559. pkg.frame_ready = false
  560. if old_error, ok := failed_test_reason_map[timeout.test_index]; ok {
  561. safe_delete_string(old_error, shared_log_allocator)
  562. }
  563. failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT
  564. total_failure_count += 1
  565. total_done_count += 1
  566. now := time.now()
  567. _, alloc_error = append(&log_messages, Log_Message {
  568. level = .Error,
  569. text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now),
  570. time = now,
  571. allocator = context.allocator,
  572. })
  573. fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
  574. find_task_data_for_timeout: for &data in task_data_slots {
  575. if data.it.pkg == it.pkg && data.it.name == it.name {
  576. end_t(&data.t)
  577. break find_task_data_for_timeout
  578. }
  579. }
  580. }
  581. if should_stop_runner() {
  582. fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.")
  583. thread.pool_shutdown(&pool)
  584. break main_loop
  585. }
  586. when FANCY_OUTPUT {
  587. // Because the bounds checking procs send directly to STDERR with
  588. // no way to redirect or handle them, we need to at least try to
  589. // let the user see those messages when using the animated progress
  590. // report. This flag may be set by the block of code below if a
  591. // signal is raised.
  592. //
  593. // It'll be purely by luck if the output is interleaved properly,
  594. // given the nature of non-thread-safe printing.
  595. //
  596. // At worst, if Odin did not print any error for this signal, we'll
  597. // just re-display the progress report. The fatal log error message
  598. // should be enough to clue the user in that something dire has
  599. // occurred.
  600. bypass_progress_overwrite := false
  601. }
  602. if test_index, reason, ok := should_stop_test(); ok {
  603. #no_bounds_check report.all_test_states[test_index] = .Failed
  604. #no_bounds_check it := internal_tests[test_index]
  605. #no_bounds_check pkg := report.packages_by_name[it.pkg]
  606. pkg.frame_ready = false
  607. found := thread.pool_stop_task(&pool, test_index)
  608. fmt.assertf(found, "A signal (%v) was raised to stop test #%i %s.%s, but it was unable to be found.",
  609. reason, test_index, it.pkg, it.name)
  610. // The order this is handled in is a little particular.
  611. task_data: ^Task_Data
  612. find_task_data_for_stop_signal: for &data in task_data_slots {
  613. if data.it.pkg == it.pkg && data.it.name == it.name {
  614. task_data = &data
  615. break find_task_data_for_stop_signal
  616. }
  617. }
  618. fmt.assertf(task_data != nil, "A signal (%v) was raised to stop test #%i %s.%s, but its task data is missing.",
  619. reason, test_index, it.pkg, it.name)
  620. if !task_data.t._fail_now_called {
  621. if test_index not_in failed_test_reason_map {
  622. // We only write a new error message here if there wasn't one
  623. // already, because the message we can provide based only on
  624. // the signal won't be very useful, whereas asserts and panics
  625. // will provide a user-written error message.
  626. failed_test_reason_map[test_index] = fmt.aprintf("Signal caught: %v", reason, allocator = shared_log_allocator)
  627. log.fatalf("Caught signal to stop test #%i %s.%s for: %v.", test_index, it.pkg, it.name, reason)
  628. }
  629. when FANCY_OUTPUT {
  630. bypass_progress_overwrite = true
  631. signals_were_raised = true
  632. }
  633. }
  634. end_t(&task_data.t)
  635. total_failure_count += 1
  636. total_done_count += 1
  637. }
  638. // -- Redraw.
  639. when FANCY_OUTPUT {
  640. if len(log_messages) == 0 && !needs_to_redraw(report) {
  641. continue main_loop
  642. }
  643. if !bypass_progress_overwrite {
  644. fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count)
  645. }
  646. } else {
  647. if total_done_count != last_done_count {
  648. fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count)
  649. last_done_count = total_done_count
  650. }
  651. if len(log_messages) == 0 {
  652. continue main_loop
  653. }
  654. }
  655. // Because each thread has its own messenger channel, log messages
  656. // arrive in chunks that are in-order, but when they're merged with the
  657. // logs from other threads, they become out-of-order.
  658. slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool {
  659. return time.diff(a.time, b.time) > 0
  660. })
  661. for message in log_messages {
  662. fmt.wprintln(batch_writer, message.text)
  663. delete(message.text, message.allocator)
  664. }
  665. fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer))
  666. clear(&log_messages)
  667. bytes.buffer_reset(&batch_buffer)
  668. when FANCY_OUTPUT {
  669. redraw_report(batch_writer, report)
  670. draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count)
  671. fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer))
  672. bytes.buffer_reset(&batch_buffer)
  673. }
  674. }
  675. // -- All tests are complete, or the runner has been interrupted.
  676. // NOTE(Feoramund): If you've arrived here after receiving signal 11 or
  677. // SIGSEGV on the main runner thread, while using a UNIX-like platform,
  678. // there is the possibility that you may have encountered a rare edge case
  679. // involving the joining of threads.
  680. //
  681. // At the time of writing, the thread library is undergoing a rewrite that
  682. // should solve this problem; it is not an issue with the test runner itself.
  683. thread.pool_join(&pool)
  684. finished_in := time.since(start_time)
  685. when !FANCY_OUTPUT {
  686. // One line to space out the results, since we don't have the status
  687. // bar in plain mode.
  688. fmt.wprintln(batch_writer)
  689. }
  690. fmt.wprintf(batch_writer,
  691. "Finished %i test%s in %v.",
  692. total_done_count,
  693. "" if total_done_count == 1 else "s",
  694. finished_in)
  695. if total_done_count != total_test_count {
  696. not_run_count := total_test_count - total_done_count
  697. fmt.wprintf(batch_writer,
  698. " " + SGR_READY + "%i" + SGR_RESET + " %s left undone.",
  699. not_run_count,
  700. "test was" if not_run_count == 1 else "tests were")
  701. }
  702. if total_success_count == total_test_count {
  703. fmt.wprintfln(batch_writer,
  704. " %s " + SGR_SUCCESS + "successful." + SGR_RESET,
  705. "The test was" if total_test_count == 1 else "All tests were")
  706. } else if total_failure_count > 0 {
  707. if total_failure_count == total_test_count {
  708. fmt.wprintfln(batch_writer,
  709. " %s " + SGR_FAILED + "failed." + SGR_RESET,
  710. "The test" if total_test_count == 1 else "All tests")
  711. } else {
  712. fmt.wprintfln(batch_writer,
  713. " " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.",
  714. total_failure_count,
  715. "" if total_failure_count == 1 else "s")
  716. }
  717. for test_index in failed_test_reason_map {
  718. _, alloc_error = append(&sorted_failed_test_reasons, test_index)
  719. fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error)
  720. }
  721. slice.sort(sorted_failed_test_reasons[:])
  722. for test_index in sorted_failed_test_reasons {
  723. #no_bounds_check last_error := failed_test_reason_map[test_index]
  724. #no_bounds_check it := internal_tests[test_index]
  725. pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name)
  726. fmt.wprintfln(batch_writer, " - %- *[1]s\t%s",
  727. pkg_and_name,
  728. report.pkg_column_len + report.test_column_len,
  729. last_error)
  730. safe_delete_string(last_error, shared_log_allocator)
  731. }
  732. if total_success_count > 0 {
  733. when USE_CLIPBOARD {
  734. clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer))
  735. fmt.wprint(clipboard_writer, "-define:ODIN_TEST_NAMES=")
  736. for test_index in sorted_failed_test_reasons {
  737. #no_bounds_check it := internal_tests[test_index]
  738. fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name)
  739. }
  740. encoded_names := base64.encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator)
  741. fmt.wprintf(batch_writer,
  742. ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST +
  743. "\nThe name%s of the failed test%s been copied to your clipboard.",
  744. encoded_names,
  745. "" if total_failure_count == 1 else "s",
  746. " has" if total_failure_count == 1 else "s have")
  747. } else {
  748. fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:ODIN_TEST_NAMES=",
  749. "" if total_failure_count == 1 else "s")
  750. for test_index in sorted_failed_test_reasons {
  751. #no_bounds_check it := internal_tests[test_index]
  752. fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name)
  753. }
  754. fmt.wprint(batch_writer, "\n\nIf your terminal supports OSC 52, you may use -define:ODIN_TEST_CLIPBOARD to have this copied directly to your clipboard.")
  755. }
  756. fmt.wprintln(batch_writer)
  757. }
  758. }
  759. fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_SHOW)
  760. when FANCY_OUTPUT {
  761. if signals_were_raised {
  762. fmt.wprintln(batch_writer, `
  763. Signals were raised during this test run. Log messages are likely to have collided with each other.
  764. To partly mitigate this, redirect STDERR to a file or use the -define:ODIN_TEST_FANCY=false option.`)
  765. }
  766. }
  767. fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer))
  768. when JSON_REPORT != "" {
  769. json_report: JSON
  770. mode: int
  771. when ODIN_OS != .Windows {
  772. mode = os.S_IRUSR|os.S_IWUSR|os.S_IRGRP|os.S_IROTH
  773. }
  774. json_fd, err := os.open(JSON_REPORT, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
  775. fmt.assertf(err == nil, "unable to open file %q for writing of JSON report, error: %v", JSON_REPORT, err)
  776. defer os.close(json_fd)
  777. for test, i in report.all_tests {
  778. #no_bounds_check state := report.all_test_states[i]
  779. if test.pkg not_in json_report.packages {
  780. json_report.packages[test.pkg] = {}
  781. }
  782. tests := &json_report.packages[test.pkg]
  783. append(tests, JSON_Test{name = test.name, success = state == .Successful})
  784. }
  785. json_report.total = len(internal_tests)
  786. json_report.success = total_success_count
  787. json_report.duration = finished_in
  788. err := json.marshal_to_writer(os.stream_from_handle(json_fd), json_report, &{ pretty = true })
  789. fmt.assertf(err == nil, "Error writing JSON report: %v", err)
  790. }
  791. return total_success_count == total_test_count
  792. }