run_thread.gd 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. extends Node
  2. var control_script
  3. var progress_label
  4. var progress_bar
  5. var graph_edit
  6. var console_output
  7. var progress_window
  8. var console_window
  9. var process_successful #tracks if the last run process was successful
  10. var process_info = {} #tracks the data of the currently running process
  11. var process_running := false #tracks if a process is currently running
  12. var process_cancelled = false #checks if the currently running process has been cancelled
  13. var final_output_dir
  14. # Called when the node enters the scene tree for the first time.
  15. func _ready() -> void:
  16. pass
  17. func init(main_node: Node, progresswindow: Window, progresslabel: Label, progressbar: ProgressBar, graphedit: GraphEdit, consolewindow: Window, consoleoutput: RichTextLabel) -> void:
  18. control_script = main_node
  19. progress_window = progresswindow
  20. progress_label = progresslabel
  21. progress_bar = progressbar
  22. graph_edit = graphedit
  23. console_window = consolewindow
  24. console_output = consoleoutput
  25. func run_thread_with_branches():
  26. process_cancelled = false
  27. process_successful = true
  28. progress_bar.value = 0
  29. progress_label.text = "Initialising Inputs"
  30. console_window.find_child("KillProcess").disabled = false
  31. # Detect platform: Determine if the OS is Windows
  32. var is_windows := OS.get_name() == "Windows"
  33. # Choose appropriate commands based on OS
  34. var delete_cmd = "del" if is_windows else "rm"
  35. var rename_cmd = "ren" if is_windows else "mv"
  36. var path_sep := "/" # Always use forward slash for paths
  37. # Get all node connections in the GraphEdit
  38. var connections = graph_edit.get_connection_list()
  39. # Prepare data structures for graph traversal
  40. var graph = {} # forward adjacency list
  41. var reverse_graph = {} # reverse adjacency list (for input lookup)
  42. var indegree = {} # used for topological sort
  43. var all_nodes = {} # map of node name -> GraphNode reference
  44. #store input nodes for sample rate and stereo matching
  45. var input_nodes = []
  46. var nodes_with_sample_rates = []
  47. var processing_sample_rate = 0 #sample rate that processing is being done at after input file is normalised, if this stays at 0 only synthesis exists in thread and highest value from that should be used
  48. var processing_bit_depth = 1 #stores the file type and bit-depth in the format used by the copysfx cdp function 1: 16-bit 2: 32-bit int 3: 32-bit float 4: 24-bit
  49. var intermediate_files = [] # Files to delete later
  50. var breakfiles = [] #breakfiles to delete later
  51. log_console("Mapping thread.", true)
  52. await get_tree().process_frame # Let UI update
  53. #Step 0: check thread is valid
  54. var is_valid = path_exists_through_all_nodes()
  55. if is_valid == false:
  56. log_console("[color=#9c2828][b]Error: Valid Thread not found[/b][/color]", true)
  57. log_console("Threads must contain at least one processing node and a valid path from the Input File or Synthesis node to the Output File.", true)
  58. await get_tree().process_frame # Let UI update
  59. if progress_window.visible:
  60. progress_window.hide()
  61. if !console_window.visible:
  62. console_window.popup_centered()
  63. return
  64. else:
  65. log_console("[color=#638382][b]Valid Thread found[/b][/color]", true)
  66. await get_tree().process_frame # Let UI update
  67. # Step 1: Gather nodes from the GraphEdit
  68. var inputcount = 0 # used for tracking the number of input nodes and trims on input files for progress bar
  69. for child in graph_edit.get_children():
  70. if child is GraphNode:
  71. var includenode = true
  72. var name = str(child.name)
  73. all_nodes[name] = child
  74. if child.has_meta("utility"):
  75. includenode = false
  76. else:
  77. #check if node has inputs
  78. if child.get_input_port_count() > 0:
  79. #if it does scan through those inputs
  80. for i in range(child.get_input_port_count()):
  81. #check if it can find any valid connections
  82. var connected = false
  83. for conn in connections:
  84. if conn["to_node"] == name and conn["to_port"] == i:
  85. connected = true
  86. break
  87. #if no valid connections are found break the for loop to skip checking other inputs and set include to false
  88. if connected == false:
  89. log_console(name + " input is not connected, skipping node.", true)
  90. includenode = false
  91. break
  92. #check if node has outputs
  93. if child.get_output_port_count() > 0:
  94. #if it does scan through those outputs
  95. for i in range(child.get_output_port_count()):
  96. #check if it can find any valid connections
  97. var connected = false
  98. for conn in connections:
  99. if conn["from_node"] == name and conn["from_port"] == i:
  100. connected = true
  101. break
  102. #if no valid connections are found break the for loop to skip checking other inputs and set include to false
  103. if connected == false:
  104. log_console(name + " output is not connected, skipping node.", true)
  105. includenode = false
  106. break
  107. if includenode == true:
  108. graph[name] = []
  109. reverse_graph[name] = []
  110. indegree[name] = 0 # Start with zero incoming edges
  111. if child.get_meta("command") == "inputfile":
  112. inputcount -= 1
  113. input_nodes.append(child)
  114. if child.get_node("AudioPlayer").get_meta("trimfile"):
  115. inputcount += 1
  116. #check if node has internal sample rate, e.g. synthesis nodes and add to array for checking if this is set correctly
  117. if child.has_meta("node_sets_sample_rate") and child.get_meta("node_sets_sample_rate") == true:
  118. nodes_with_sample_rates.append(child)
  119. #do calculations for progress bar
  120. var progress_step
  121. progress_step = 100 / (graph.size() + 3 + inputcount)
  122. #check if input file sample rates and bit depths match
  123. if input_nodes.size() > 1:
  124. var match_input_files = await match_input_file_sample_rates_and_bit_depths(input_nodes)
  125. var stereo = []
  126. if control_script.delete_intermediate_outputs:
  127. for f in match_input_files[0]:
  128. intermediate_files.append(f)
  129. processing_sample_rate = match_input_files[1]
  130. processing_bit_depth = match_input_files[2]
  131. elif input_nodes.size() == 1:
  132. #reset upsampled if it has previously been set on this node
  133. input_nodes[0].get_node("AudioPlayer").set_meta("upsampled", false)
  134. #get sample rate and bit-depth so that any synthesis nodes can have the correct sample rate set
  135. processing_sample_rate = input_nodes[0].get_node("AudioPlayer").get_meta("sample_rate")
  136. var soundfile_properties = get_soundfile_properties(input_nodes[0].get_node("AudioPlayer").get_meta("inputfile"))
  137. processing_bit_depth = classify_format(soundfile_properties["format"], soundfile_properties["bitdepth"])
  138. #check if the sample rate of synthesis nodes match and if they match any files in the input file nodes
  139. if (nodes_with_sample_rates.size() > 0 and input_nodes.size() > 0) or nodes_with_sample_rates.size() > 1:
  140. var sythesis_sample_rates = []
  141. var highest_synthesis_sample_rate
  142. var final_synthesis_sample_rate
  143. var change_synthesis_sample_rate
  144. for node in nodes_with_sample_rates:
  145. #get the sample rate from the meta and add to an array
  146. var sample_rate_option_button = node.get_node("samplerate")
  147. sythesis_sample_rates.append(int(sample_rate_option_button.get_item_text(sample_rate_option_button.selected)))
  148. #Check if all sample rates are the same
  149. if sythesis_sample_rates.all(func(v): return v == sythesis_sample_rates[0]):
  150. highest_synthesis_sample_rate = sythesis_sample_rates[0]
  151. if processing_sample_rate != 0 and processing_sample_rate != highest_synthesis_sample_rate:
  152. change_synthesis_sample_rate = true
  153. final_synthesis_sample_rate = processing_sample_rate
  154. else:
  155. #if not find the highest sample rate
  156. change_synthesis_sample_rate = true
  157. highest_synthesis_sample_rate = sythesis_sample_rates.max()
  158. if processing_sample_rate != 0 and processing_sample_rate != highest_synthesis_sample_rate:
  159. final_synthesis_sample_rate = processing_sample_rate
  160. else:
  161. final_synthesis_sample_rate = highest_synthesis_sample_rate
  162. if change_synthesis_sample_rate:
  163. log_console("Sample rate in synthesis nodes do not match the rest of the thread. Adjusting values to " + str(final_synthesis_sample_rate) + "Hz", true)
  164. for node in nodes_with_sample_rates:
  165. #get the sample rate from the meta and add to an array
  166. node.get_node("samplerate").set_meta("adjusted_sample_rate", true)
  167. node.get_node("samplerate").set_meta("new_sample_rate", final_synthesis_sample_rate)
  168. # Step 2: Build graph relationships from connections
  169. if process_cancelled:
  170. progress_label.text = "Thread Stopped"
  171. log_console("[b]Thread Stopped[/b]", true)
  172. return
  173. else:
  174. progress_label.text = "Building Thread"
  175. for conn in connections:
  176. var from = str(conn["from_node"])
  177. var to = str(conn["to_node"])
  178. if graph.has(from) and graph.has(to):
  179. graph[from].append(to)
  180. reverse_graph[to].append(from)
  181. indegree[to] += 1 # Count incoming edges
  182. # check for loops
  183. var has_cycle := detect_cycles(graph, {}) # pass loop_nodes list later
  184. if has_cycle:
  185. log_console("[color=#9c2828][b]Error: Thread not valid, Threads cannot contain loops.[/b][/color]", true)
  186. if progress_window.visible:
  187. progress_window.hide()
  188. if !console_window.visible:
  189. console_window.popup_centered()
  190. return
  191. # Step 3: Topological sort to get execution order
  192. var sorted = [] # Sorted list of node names
  193. var queue = [] # Queue of nodes with 0 indegree
  194. for node in graph.keys():
  195. if indegree[node] == 0:
  196. queue.append(node)
  197. while not queue.is_empty():
  198. var current = queue.pop_front()
  199. sorted.append(current)
  200. for neighbor in graph[current]:
  201. indegree[neighbor] -= 1
  202. if indegree[neighbor] == 0:
  203. queue.append(neighbor)
  204. # If not all nodes were processed, there's a cycle
  205. #if sorted.size() != graph.size():
  206. #log_console("[color=#9c2828][b]Error: Thread not valid[/b][/color]", true)
  207. #log_console("Threads cannot contain loops.", true)
  208. #return
  209. progress_bar.value = progress_step
  210. # Step 4: Start processing audio
  211. # Dictionary to keep track of each node's output file
  212. var output_files = {}
  213. var process_count = 0
  214. #var current_infile
  215. # Iterate over the processing nodes in topological order
  216. for node_name in sorted:
  217. var node = all_nodes[node_name]
  218. if process_cancelled:
  219. progress_label.text = "Thread Stopped"
  220. log_console("[b]Thread Stopped[/b]", true)
  221. break
  222. else:
  223. progress_label.text = "Running process: " + node.get_title()
  224. # Find upstream nodes connected to the current node
  225. # Build an array of all inlet connections
  226. var input_connections := []
  227. for conn in connections:
  228. if conn["to_node"] == node_name:
  229. input_connections.append(conn)
  230. input_connections.sort_custom(func(a, b): return a["to_port"] < b["to_port"])
  231. #build a dictionary with all inputs sorted by inlet number
  232. var inlet_inputs = {}
  233. for conn in input_connections:
  234. var inlet_idx = conn["to_port"]
  235. var upstream_node = conn["from_node"]
  236. if output_files.has(upstream_node):
  237. if not inlet_inputs.has(inlet_idx):
  238. inlet_inputs[inlet_idx] = []
  239. inlet_inputs[inlet_idx].append(output_files[upstream_node])
  240. # Merge inputs if inlet has more than one input and build infile dictionary
  241. var current_infiles = {} #dictionary to store input files by inlet number
  242. for inlet_idx in inlet_inputs.keys():
  243. var files = inlet_inputs[inlet_idx]
  244. if files.size() > 1: #if more than one file mix them together
  245. var runmerge = await merge_many_files(inlet_idx, process_count, files)
  246. var merge_output = runmerge[0] #mixed output file name
  247. var converted_files = runmerge[1] #intermediate files created from merge
  248. current_infiles[inlet_idx] = merge_output #input filename added to dictionary sorted by inlet number
  249. #add intermediate files to delete list if toggled
  250. if control_script.delete_intermediate_outputs:
  251. intermediate_files.append(merge_output)
  252. for f in converted_files:
  253. intermediate_files.append(f)
  254. elif files.size() == 1:
  255. current_infiles[inlet_idx] = files[0] #only one file, do not merge add to dictionary
  256. #if the dictionary has more than one entry there is more than one inlet and files need to be matched
  257. #however this should only be done to nodes with audio files rather than pvoc nodes
  258. if current_infiles.size() > 1 and node.get_slot_type_left(0) == 0:
  259. #check all files in dictionary have the same sample rate and channel count and fix if not
  260. var all_files = current_infiles.values()
  261. var match_channels = await match_file_channels(0, process_count, all_files)
  262. var matched_files = match_channels[0]
  263. #add intermediate files
  264. if control_script.delete_intermediate_outputs:
  265. for f in match_channels[1]:
  266. intermediate_files.append(f)
  267. #replace files in dictionary with matched files
  268. var idx = 0
  269. for key in current_infiles.keys():
  270. current_infiles[key] = matched_files[idx]
  271. idx += 1
  272. #check if node is some form of input node
  273. if node.get_input_port_count() == 0:
  274. if node.get_meta("command") == "inputfile":
  275. var loadedfile
  276. #get the inputfile from the nodes meta
  277. if node.get_node("AudioPlayer").get_meta("upsampled"):
  278. loadedfile = node.get_node("AudioPlayer").get_meta("upsampled_file")
  279. else:
  280. loadedfile = node.get_node("AudioPlayer").get_meta("inputfile")
  281. #get wether trim has been enabled
  282. var trimfile = node.get_node("AudioPlayer").get_meta("trimfile")
  283. #if trim is enabled trim the file
  284. if trimfile == true:
  285. #get the start and end points
  286. var start = node.get_node("AudioPlayer").get_meta("trimpoints")[0]
  287. var end = node.get_node("AudioPlayer").get_meta("trimpoints")[1]
  288. if process_cancelled:
  289. #exit out of process if cancelled
  290. progress_label.text = "Thread Stopped"
  291. log_console("[b]Thread Stopped[/b]", true)
  292. return
  293. else:
  294. progress_label.text = "Trimming input audio"
  295. await run_command(control_script.cdpprogs_location + "/sfedit", ["cut", "1", loadedfile, "%s_%d_input_trim.wav" % [Global.outfile, process_count], str(start), str(end)])
  296. output_files[node_name] = "%s_%d_input_trim.wav" % [Global.outfile, process_count]
  297. # Mark trimmed file for cleanup if needed
  298. if control_script.delete_intermediate_outputs:
  299. intermediate_files.append("%s_%d_input_trim.wav" % [Global.outfile, process_count])
  300. progress_bar.value += progress_step
  301. else:
  302. #if trim not enabled pass the loaded file
  303. output_files[node_name] = loadedfile
  304. process_count += 1
  305. else: #not an audio file must be synthesis
  306. var slider_data = _get_slider_values_ordered(node)
  307. var makeprocess = await make_process(node, process_count, [], slider_data)
  308. # run the command
  309. await run_command(makeprocess[0], makeprocess[3])
  310. await get_tree().process_frame
  311. var output_file = makeprocess[1]
  312. #check if bitdepth matches other files in thread and convert if needed
  313. var soundfile_properties = get_soundfile_properties(output_file)
  314. if processing_bit_depth != classify_format(soundfile_properties["format"], soundfile_properties["bitdepth"]):
  315. var bit_convert_output = output_file.get_basename() + "_bit_depth_convert.wav"
  316. await run_command(control_script.cdpprogs_location + "/copysfx", ["-h0", "-s" + str(processing_bit_depth), output_file, bit_convert_output])
  317. #store converted output file path for this node
  318. output_files[node_name] = bit_convert_output
  319. #mark for cleanup if needed
  320. if control_script.delete_intermediate_outputs:
  321. intermediate_files.append(bit_convert_output)
  322. else:
  323. # Store original output file path for this node
  324. output_files[node_name] = output_file
  325. # Mark file for cleanup if needed
  326. if control_script.delete_intermediate_outputs:
  327. for file in makeprocess[2]:
  328. breakfiles.append(file)
  329. intermediate_files.append(output_file)
  330. process_count += 1
  331. else:
  332. # Build the command for the current node's audio processing
  333. var slider_data = _get_slider_values_ordered(node)
  334. if node.get_slot_type_right(0) == 1: #detect if process outputs pvoc data
  335. if is_pvoc_stereo(current_infiles): #check if infiles contain an array meaning at least one input pvoc process has be processed in dual mono mode
  336. var split_files = await process_dual_mono_pvoc(current_infiles, node, process_count, slider_data)
  337. var pvoc_stereo_files = split_files[0]
  338. # Mark file for cleanup if needed
  339. if control_script.delete_intermediate_outputs:
  340. for file in split_files[1]:
  341. breakfiles.append(file)
  342. for file in pvoc_stereo_files:
  343. intermediate_files.append(file)
  344. process_count += 1
  345. output_files[node_name] = pvoc_stereo_files
  346. else:
  347. var input_stereo = await is_stereo(current_infiles.values()[0])
  348. if input_stereo == true:
  349. #audio file is stereo and needs to be split for pvoc processing
  350. var pvoc_stereo_files = []
  351. ##Split stereo to c1/c2 and process
  352. var split_files = await stereo_split_and_process(current_infiles.values(), node, process_count, slider_data)
  353. pvoc_stereo_files = split_files[0]
  354. # Mark file for cleanup if needed
  355. if control_script.delete_intermediate_outputs:
  356. for file in split_files[1]:
  357. breakfiles.append(file)
  358. for file in pvoc_stereo_files:
  359. intermediate_files.append(file)
  360. #Delete c1 and c2 because they can be in the wrong folder and if the same infile is used more than once
  361. #with this stereo process CDP will throw errors in the console even though its fine
  362. var files_to_delete = split_files[2] + split_files[3]
  363. for file in files_to_delete:
  364. if is_windows:
  365. file = file.replace("/", "\\")
  366. await run_command(delete_cmd, [file])
  367. #advance process count to match the advancement in the stereo_split_and_process function
  368. process_count += 1
  369. # Store output file path for this node
  370. output_files[node_name] = pvoc_stereo_files
  371. else:
  372. #input file is mono run through process
  373. var makeprocess = await make_process(node, process_count, current_infiles.values(), slider_data)
  374. # run the command
  375. await run_command(makeprocess[0], makeprocess[3])
  376. await get_tree().process_frame
  377. var output_file = makeprocess[1]
  378. # Store output file path for this node
  379. output_files[node_name] = output_file
  380. # Mark file for cleanup if needed
  381. if control_script.delete_intermediate_outputs:
  382. for file in makeprocess[2]:
  383. breakfiles.append(file)
  384. intermediate_files.append(output_file)
  385. # Increase the process step count
  386. process_count += 1
  387. else:
  388. #Process outputs audio
  389. #check if this is the last pvoc process in a stereo processing chain and check if infile is an array meaning that the last pvoc process was run in dual mono mode
  390. if node.get_meta("command") == "pvoc_synth" and is_pvoc_stereo(current_infiles):
  391. var split_files = await process_dual_mono_pvoc(current_infiles, node, process_count, slider_data)
  392. var pvoc_stereo_files = split_files[0]
  393. # Mark file for cleanup if needed
  394. if control_script.delete_intermediate_outputs:
  395. for file in split_files[1]:
  396. breakfiles.append(file)
  397. for file in pvoc_stereo_files:
  398. intermediate_files.append(file)
  399. process_count += 1
  400. #interleave left and right
  401. var output_file = Global.outfile.get_basename() + str(process_count) + "_interleaved.wav"
  402. await run_command(control_script.cdpprogs_location + "/submix", ["interleave", pvoc_stereo_files[0], pvoc_stereo_files[1], output_file])
  403. # Store output file path for this node
  404. output_files[node_name] = output_file
  405. # Mark file for cleanup if needed
  406. if control_script.delete_intermediate_outputs:
  407. intermediate_files.append(output_file)
  408. elif node.get_meta("command") == "preview":
  409. var preview_audioplayer = node.get_child(1)
  410. var preview_file = current_infiles.values()[0]
  411. preview_audioplayer._on_file_selected(preview_file)
  412. if preview_file in intermediate_files:
  413. intermediate_files.erase(preview_file)
  414. else:
  415. #Detect if input file is mono or stereo
  416. var input_stereo = await is_stereo(current_infiles.values()[0])
  417. #var input_stereo = true #bypassing stereo check just for testing need to reimplement
  418. if input_stereo == true:
  419. if node.get_meta("stereo_input") == true: #audio file is stereo and process is stereo, run file through process
  420. #current_infile = current_infiles.values()
  421. var makeprocess = await make_process(node, process_count, current_infiles.values(), slider_data)
  422. # run the command
  423. await run_command(makeprocess[0], makeprocess[3])
  424. await get_tree().process_frame
  425. var output_file = makeprocess[1]
  426. # Store output file path for this node
  427. output_files[node_name] = output_file
  428. # Mark file for cleanup if needed
  429. if control_script.delete_intermediate_outputs:
  430. for file in makeprocess[2]:
  431. breakfiles.append(file)
  432. intermediate_files.append(output_file)
  433. else: #audio file is stereo and process is mono, split stereo, process and recombine
  434. ##Split stereo to c1/c2 and process
  435. var split_files = await stereo_split_and_process(current_infiles.values(), node, process_count, slider_data)
  436. var dual_mono_output = split_files[0]
  437. # Mark file for cleanup if needed
  438. if control_script.delete_intermediate_outputs:
  439. for file in split_files[1]:
  440. breakfiles.append(file)
  441. for file in dual_mono_output:
  442. intermediate_files.append(file)
  443. #Delete c1 and c2 because they can be in the wrong folder and if the same infile is used more than once
  444. #with this stereo process CDP will throw errors in the console even though its fine
  445. var files_to_delete = split_files[2] + split_files[3]
  446. for file in files_to_delete:
  447. if is_windows:
  448. file = file.replace("/", "\\")
  449. await run_command(delete_cmd, [file])
  450. #advance process count to match the advancement in the stereo_split_and_process function
  451. process_count += 1
  452. var output_file = Global.outfile.get_basename() + str(process_count) + "_interleaved.wav"
  453. await run_command(control_script.cdpprogs_location + "/submix", ["interleave", dual_mono_output[0], dual_mono_output[1], output_file])
  454. # Store output file path for this node
  455. output_files[node_name] = output_file
  456. # Mark file for cleanup if needed
  457. if control_script.delete_intermediate_outputs:
  458. intermediate_files.append(output_file)
  459. else: #audio file is mono, run through the process
  460. var makeprocess = await make_process(node, process_count, current_infiles.values(), slider_data)
  461. # run the command
  462. await run_command(makeprocess[0], makeprocess[3])
  463. await get_tree().process_frame
  464. var output_file = makeprocess[1]
  465. # Store output file path for this node
  466. output_files[node_name] = output_file
  467. # Mark file for cleanup if needed
  468. if control_script.delete_intermediate_outputs:
  469. for file in makeprocess[2]:
  470. breakfiles.append(file)
  471. intermediate_files.append(output_file)
  472. # Increase the process step count
  473. process_count += 1
  474. progress_bar.value += progress_step
  475. # FINAL OUTPUT STAGE
  476. # Collect all nodes that are connected to the outputfile node
  477. if process_cancelled:
  478. progress_label.text = "Thread Stopped"
  479. log_console("[b]Thread Stopped[/b]", true)
  480. return
  481. else:
  482. progress_label.text = "Finalising output"
  483. var output_inputs := []
  484. for conn in connections:
  485. var to_node = str(conn["to_node"])
  486. if all_nodes.has(to_node) and all_nodes[to_node].get_meta("command") == "outputfile":
  487. output_inputs.append(str(conn["from_node"]))
  488. # List to hold the final output files to be merged (if needed)
  489. var final_outputs := []
  490. for node_name in output_inputs:
  491. if output_files.has(node_name):
  492. final_outputs.append(output_files[node_name])
  493. # If multiple outputs go to the outputfile node, merge them
  494. if final_outputs.size() > 1:
  495. var runmerge = await merge_many_files(0, process_count, final_outputs)
  496. final_output_dir = runmerge[0]
  497. var converted_files = runmerge[1]
  498. if control_script.delete_intermediate_outputs:
  499. for f in converted_files:
  500. intermediate_files.append(f)
  501. # Only one output, no merge needed
  502. elif final_outputs.size() == 1:
  503. var single_output = final_outputs[0]
  504. final_output_dir = single_output
  505. intermediate_files.erase(single_output)
  506. progress_bar.value += progress_step
  507. # CLEANUP: Delete intermediate files after processing, rename final output and reset upsampling meta
  508. if process_cancelled:
  509. progress_label.text = "Thread Stopped"
  510. log_console("[b]Thread Stopped[/b]", true)
  511. return
  512. else:
  513. log_console("Cleaning up intermediate files.", true)
  514. progress_label.text = "Cleaning up"
  515. for file_path in intermediate_files:
  516. # Adjust file path format for Windows if needed
  517. var fixed_path = file_path
  518. if is_windows:
  519. fixed_path = fixed_path.replace("/", "\\")
  520. await run_command(delete_cmd, [fixed_path])
  521. await get_tree().process_frame
  522. #delete break files
  523. for file_path in breakfiles:
  524. # Adjust file path format for Windows if needed
  525. var fixed_path = file_path
  526. if is_windows:
  527. fixed_path = fixed_path.replace("/", "\\")
  528. await run_command(delete_cmd, [fixed_path])
  529. await get_tree().process_frame
  530. var final_filename = "%s.wav" % Global.outfile
  531. var final_output_dir_fixed_path = final_output_dir
  532. if is_windows:
  533. final_output_dir_fixed_path = final_output_dir_fixed_path.replace("/", "\\")
  534. await run_command(rename_cmd, [final_output_dir_fixed_path, final_filename.get_file()])
  535. else:
  536. await run_command(rename_cmd, [final_output_dir_fixed_path, "%s.wav" % Global.outfile])
  537. final_output_dir = Global.outfile + ".wav"
  538. control_script.output_audio_player.play_outfile(final_output_dir)
  539. Global.cdpoutput = final_output_dir
  540. progress_bar.value = 100.0
  541. var interface_settings = ConfigHandler.load_interface_settings() #checks if close console is enabled and closes console on a success
  542. progress_window.hide()
  543. progress_bar.value = 0
  544. progress_label.text = ""
  545. console_window.find_child("KillProcess").disabled = true
  546. if interface_settings.auto_close_console and process_successful == true:
  547. console_window.hide()
  548. func stereo_split_and_process(files: Array, node: Node, process_count: int, slider_data: Array) -> Array:
  549. var dual_mono_output:= []
  550. var left:= []
  551. var right:= []
  552. var intermediate_files:= []
  553. for file in files:
  554. await run_command(control_script.cdpprogs_location + "/housekeep",["chans", "2", file])
  555. left.append(file.get_basename() + "_%s.%s" % ["c1", file.get_extension()])
  556. right.append(file.get_basename() + "_%s.%s" % ["c2", file.get_extension()])
  557. #loop through the left and right arrays and make and run the process for each of them
  558. for channel in [left, right]:
  559. var makeprocess = await make_process(node, process_count, channel, slider_data)
  560. # run the command
  561. await run_command(makeprocess[0], makeprocess[3])
  562. await get_tree().process_frame
  563. var output_file = makeprocess[1]
  564. dual_mono_output.append(output_file)
  565. for file in makeprocess[2]:
  566. intermediate_files.append(file)
  567. #advance process count to maintain unique file names
  568. process_count += 1
  569. #return the two output files, any breakfiles generated and the split files for deletion
  570. return [dual_mono_output, intermediate_files, left, right]
  571. func process_dual_mono_pvoc(current_infiles: Dictionary, node: Node, process_count: int, slider_data: Array) -> Array:
  572. match_pvoc_channels(current_infiles) #normalise dictionary to ensure that all entries are dual mono (any mono only processes are duplicated to both left and right)
  573. var infiles_left = []
  574. var infiles_right = []
  575. var pvoc_stereo_files = []
  576. var intermediate_files = []
  577. # extract left and right infiles from dictionary
  578. for value in current_infiles.values():
  579. infiles_left.append(value[0])
  580. infiles_right.append(value[1])
  581. for infiles in [infiles_left, infiles_right]:
  582. var makeprocess = await make_process(node, process_count, infiles, slider_data)
  583. # run the command
  584. await run_command(makeprocess[0], makeprocess[3])
  585. await get_tree().process_frame
  586. var output_file = makeprocess[1]
  587. pvoc_stereo_files.append(output_file)
  588. for file in makeprocess[2]:
  589. intermediate_files.append(file)
  590. #advance process count to maintain unique file names
  591. process_count += 1
  592. return [pvoc_stereo_files, intermediate_files]
  593. func is_stereo(file: String) -> bool:
  594. var soundfile_properties = get_soundfile_properties(file)
  595. if soundfile_properties["channels"] == 2:
  596. return true
  597. else:
  598. return false
  599. func is_pvoc_stereo(current_infiles: Dictionary) -> bool:
  600. for value in current_infiles.values():
  601. if value is Array:
  602. return true
  603. return false
  604. ## Returns properties of a WAV file as a Dictionary:
  605. ## {
  606. ## "format": 1 or 3,
  607. ## "channels": number of channels,
  608. ## "samplerate": sample rate in Hz,
  609. ## "bitdepth": bits per sample,
  610. ## "duration": length in seconds
  611. ## }
  612. func get_soundfile_properties(file: String) -> Dictionary:
  613. var soundfile_properties:= {
  614. "format": 0,
  615. "channels": 0,
  616. "samplerate": 0,
  617. "bitdepth": 0,
  618. "duration": 0.0
  619. }
  620. #open the audio file
  621. var f = FileAccess.open(file, FileAccess.READ)
  622. if f == null:
  623. log_console("Could not find file: " + file, true)
  624. return soundfile_properties # couldn't open
  625. #Skip the RIFF header (12 bytes: "RIFF", file size, "WAVE")
  626. f.seek(12)
  627. var audio_chunk_size = 0
  628. #read through file until end of file if needed
  629. while f.get_position() + 8 <= f.get_length():
  630. #read the 4 byte chunk id to identify what this chunk is
  631. var chunk_id = f.get_buffer(4).get_string_from_ascii()
  632. #read how big this chunk is
  633. var chunk_size = f.get_32()
  634. if chunk_id == "fmt ":
  635. #found the format chunk
  636. #fmt chunk layout:
  637. #2 bytes: Audio format (1 = PCM, 3 = IEEE float, etc.)
  638. #2 bytes: Number of channels (1 = mono, 2 = stereo, ...)
  639. #4 bytes: Sample rate
  640. #4 bytes: Byte rate
  641. #2 bytes: Block align
  642. #2 bytes: Bits per sample
  643. #potentially misc other stuff depending on format
  644. soundfile_properties["format"] = f.get_16() #format 2 bytes: 1 = int PCM, 3 = float
  645. soundfile_properties["channels"] = f.get_16() #num of channels 2 bytes
  646. soundfile_properties["samplerate"] = f.get_32() #sample rate 4 bytes
  647. f.seek(f.get_position() + 6)
  648. soundfile_properties["bitdepth"] = f.get_16() #bitdepth 2 bytes
  649. #check if we have already found the data chunk (not likely) and break the loop
  650. if audio_chunk_size > 0:
  651. f.close()
  652. break
  653. #skip to the end of the fmt chunk - max protects against skipping weirdly if wav is malformed and we have already moved too far into the file
  654. f.seek(f.get_position() + (max(chunk_size - 16, 0)))
  655. elif chunk_id == "data":
  656. #this is where the audio is stored
  657. audio_chunk_size = chunk_size
  658. #check if we have already found the fmt chunk and break loop
  659. if soundfile_properties["format"] > 0:
  660. f.close()
  661. break
  662. #skip the rest of the chunk
  663. f.seek(f.get_position() + chunk_size)
  664. else:
  665. #don't care about any other data in the file skip it
  666. f.seek(f.get_position() + chunk_size)
  667. #close the file
  668. f.close()
  669. if audio_chunk_size > 0 and soundfile_properties["channels"] > 0 and soundfile_properties["bitdepth"] > 0 and soundfile_properties["samplerate"] > 0:
  670. #(channels * bitdepth) / 8 - div 8 to convet bits to bytes
  671. var block_align = int((soundfile_properties["channels"] * soundfile_properties["bitdepth"]) / 8)
  672. #number of frames = size of audio chunk / block size in bytes
  673. var num_frames = int(audio_chunk_size / block_align)
  674. #length in seconds = number of frames / sample rate
  675. soundfile_properties.duration = (num_frames) / soundfile_properties["samplerate"]
  676. else:
  677. #something = 0 and something has gone wrong
  678. log_console("No valid fmt chunk found in wav file, unable to establish, format, channel count, samplerate or bit-depth", true)
  679. for key in soundfile_properties:
  680. #normalise dictionary to 0 so code can detect errors later even if some values have ended up in the dictionary
  681. soundfile_properties[key] = 0
  682. return soundfile_properties #no fmt chunk found, invalid wav file
  683. return soundfile_properties
  684. func merge_many_files(inlet_id: int, process_count: int, input_files: Array) -> Array:
  685. var merge_output = "%s_merge_%d_%d.wav" % [Global.outfile.get_basename(), inlet_id, process_count]
  686. var converted_files := [] # Track any mono->stereo converted files or upsampled files
  687. #check if there are a mix of mono and stereo files and interleave mono files if required
  688. var match_channels = await match_file_channels(inlet_id, process_count, input_files)
  689. input_files = match_channels[0]
  690. converted_files += match_channels[1]
  691. # Merge all input files (converted or original)
  692. log_console("Mixing files to combined input.", true)
  693. var command := ["mergemany"]
  694. command += input_files
  695. command.append(merge_output)
  696. await run_command(control_script.cdpprogs_location + "/submix", command)
  697. if process_successful == false:
  698. log_console("Failed to to merge files to" + merge_output, true)
  699. return [merge_output, converted_files]
  700. func match_input_file_sample_rates_and_bit_depths(input_nodes: Array) -> Array:
  701. var sample_rates := []
  702. var input_files := [] #used to track input files so that the same file is not upsampled more than once should it be loaded into more than one input node
  703. var converted_files := []
  704. var highest_sample_rate
  705. var bit_depths:= []
  706. var file_types:= []
  707. var highest_bit_depth
  708. var int_float
  709. var final_format
  710. #get the sample rate, bit depth and file type (int/float) for each file and add to arrays
  711. for node in input_nodes:
  712. var soundfile_props = get_soundfile_properties(node.get_node("AudioPlayer").get_meta("inputfile"))
  713. file_types.append(soundfile_props["format"])
  714. sample_rates.append(soundfile_props["samplerate"])
  715. bit_depths.append(soundfile_props["bitdepth"])
  716. #set upsampled meta to false to allow for repeat runs of thread
  717. node.get_node("AudioPlayer").set_meta("upsampled", false)
  718. #Check if all sample rates are the same
  719. if sample_rates.all(func(v): return v == sample_rates[0]):
  720. highest_sample_rate = sample_rates[0]
  721. pass
  722. else:
  723. #if not find the highest sample rate
  724. highest_sample_rate = sample_rates.max()
  725. log_console("Different sample rates found in input files, upsampling files to match highest sample rate (" + str(highest_sample_rate) + "Hz) before processing.", true)
  726. #move through all input files and compare match their index to the sample_rate array
  727. for node in input_nodes:
  728. #check if sample rate of current node is less than the highest sample rate
  729. if node.get_node("AudioPlayer").get_meta("sample_rate") < highest_sample_rate:
  730. var input_file = node.get_node("AudioPlayer").get_meta("inputfile")
  731. #up sample it to the highest sample rate if so
  732. var upsample_output = Global.outfile + "_" + input_file.get_file().get_slice(".wav", 0) + "_" + str(highest_sample_rate) + ".wav"
  733. #check if file has previously been upsampled and if not upsample it
  734. if !input_files.has(input_file):
  735. input_files.append(input_file)
  736. await run_command(control_script.cdpprogs_location + "/housekeep", ["respec", "1", input_file, upsample_output, str(highest_sample_rate)])
  737. #add to converted files for cleanup if needed
  738. converted_files.append(upsample_output)
  739. node.get_node("AudioPlayer").set_meta("upsampled", true)
  740. node.get_node("AudioPlayer").set_meta("upsampled_file", upsample_output)
  741. input_files = [] #clear input files array for reuse with bitdepths
  742. #check if all file types and bit-depths are the same
  743. if file_types.all(func(v): return v == sample_rates[0]) and bit_depths.all(func(v): return v == sample_rates[0]):
  744. highest_bit_depth = bit_depths[0]
  745. int_float = file_types[0]
  746. #convert this to the value cdp uses in copysfx for potential use with synthesis nodes later
  747. final_format = classify_format(int_float, highest_bit_depth)
  748. else:
  749. highest_bit_depth = bit_depths.max()
  750. int_float = file_types.max()
  751. #convert this to the value cdp needs to convert file types using copysfx
  752. final_format = classify_format(int_float, highest_bit_depth)
  753. log_console("Different bit-depths found in input files, converting files to match highest bit-depth (" + str(highest_bit_depth) + "-bit) before processing.", true)
  754. #move through all input file nodes and compare them to the highest bit depth and file type
  755. var index = 0
  756. for node in input_nodes:
  757. if classify_format(file_types[index], bit_depths[index]) != final_format:
  758. var input_file
  759. #check if input file has already been upsampled and respec that file instead
  760. if node.get_node("AudioPlayer").get_meta("upsampled") == true:
  761. input_file = node.get_node("AudioPlayer").get_meta("upsampled_file")
  762. else:
  763. input_file = node.get_node("AudioPlayer").get_meta("inputfile")
  764. #build unique output name
  765. var bit_convert_output = Global.outfile + "_" + input_file.get_file().get_slice(".wav", 0) + "_" + str(highest_bit_depth) + "-bit" + ".wav"
  766. #check if this file has already been respeced (two input nodes with the same file loaded for some reason)
  767. if !input_files.has(input_file):
  768. input_files.append(input_file)
  769. await run_command(control_script.cdpprogs_location + "/copysfx", ["-h0", "-s" + str(final_format), input_file, bit_convert_output])
  770. #add to converted files for cleanup if needed
  771. converted_files.append(bit_convert_output)
  772. node.get_node("AudioPlayer").set_meta("upsampled", true)
  773. node.get_node("AudioPlayer").set_meta("upsampled_file", bit_convert_output)
  774. index += 1
  775. return [converted_files, highest_sample_rate, final_format]
  776. func classify_format(file_type: int, bit_depth: int) -> int:
  777. #takes the bitdepth and file type (int/float) of a wav file and outputs a number that can be used by the cdp process copysfx to respec a files bit-depth
  778. match [file_type, bit_depth]:
  779. [1, 16]:
  780. return 1
  781. [1, 32]:
  782. return 2
  783. [3, 32]:
  784. return 3
  785. [1, 24]:
  786. return 4
  787. _:
  788. return -1
  789. #need to remove this function as not needed
  790. #func match_file_sample_rates(inlet_id: int, process_count: int, input_files: Array) -> Array:
  791. #var sample_rates := []
  792. #var converted_files := []
  793. #
  794. ##Get all sample rates
  795. #for f in input_files:
  796. #var samplerate = await get_samplerate(f)
  797. #sample_rates.append(samplerate)
  798. #
  799. ##Check if all sample rates are the same
  800. #if sample_rates.all(func(v): return v == sample_rates[0]):
  801. #pass
  802. #else:
  803. #log_console("Different sample rates found, upsampling files to match highest current sample rate before processing.", true)
  804. ##if not find the highest sample rate
  805. #var highest_sample_rate = sample_rates.max()
  806. #var index = 0
  807. ##move through all input files and compare match their index to the sample_rate array
  808. #for f in input_files:
  809. ##check if sample rate of current file is less than the highest sample rate
  810. #if sample_rates[index] < highest_sample_rate:
  811. ##up sample it to the highest sample rate if so
  812. #var upsample_output = Global.outfile + "_" + str(inlet_id) + "_" + str(process_count) + f.get_file().get_slice(".wav", 0) + "_" + str(highest_sample_rate) + ".wav"
  813. #await run_command(control_script.cdpprogs_location + "/housekeep", ["respec", "1", f, upsample_output, str(highest_sample_rate)])
  814. ##replace the file in the input_file index with the new upsampled file
  815. #input_files[index] = upsample_output
  816. #converted_files.append(upsample_output)
  817. #
  818. #index += 1
  819. #return [input_files, converted_files]
  820. func match_file_channels(inlet_id: int, process_count: int, input_files: Array) -> Array:
  821. var converted_files := []
  822. var channel_counts := []
  823. # Check each file's channel count and build channel count array
  824. for f in input_files:
  825. var stereo = await is_stereo(f)
  826. channel_counts.append(stereo)
  827. # Check if there is a mix of mono and stereo files
  828. if channel_counts.has(true) and channel_counts.has(false):
  829. log_console("Mix of mono and stereo files found, interleaving mono files to stereo before mixing.", true)
  830. var index = 0
  831. for f in input_files:
  832. if channel_counts[index] == false: #file is mono
  833. var stereo_file = Global.outfile + "_" + str(inlet_id) + "_" + str(process_count) + f.get_file().get_slice(".wav", 0) + "_stereo.wav"
  834. await run_command(control_script.cdpprogs_location + "/submix", ["interleave", f, f, stereo_file])
  835. if process_successful == false:
  836. log_console("Failed to interleave mono file: %s" % f, true)
  837. else:
  838. converted_files.append(stereo_file)
  839. input_files[index] = stereo_file
  840. index += 1
  841. return [input_files, converted_files]
  842. func match_pvoc_channels(dict: Dictionary) -> void:
  843. #work through dictionary of files and make all entries dual arrays for stereo pvoc processing
  844. for key in dict.keys():
  845. var value = dict[key]
  846. if value is String:
  847. dict[key] = [value, value]
  848. func _get_slider_values_ordered(node: Node) -> Array:
  849. var results := []
  850. for child in node.get_children():
  851. if child is Range:
  852. var flag = child.get_meta("flag") if child.has_meta("flag") else ""
  853. var time = child.get_meta("time")
  854. var brk_data = []
  855. var min_slider = child.min_value
  856. var max_slider = child.max_value
  857. var exp = child.exp_edit
  858. if child.has_meta("brk_data"):
  859. brk_data = child.get_meta("brk_data")
  860. results.append(["slider", flag, child.value, time, brk_data, min_slider, max_slider, exp])
  861. elif child is CheckButton:
  862. var flag = child.get_meta("flag") if child.has_meta("flag") else ""
  863. results.append(["checkbutton", flag, child.button_pressed])
  864. elif child is OptionButton:
  865. var flag = child.get_meta("flag") if child.has_meta("flag") else ""
  866. var value = child.get_item_text(child.selected)
  867. #check if there has been a sample rate mismatch in the thread and adjust the this parameter to match the threads sample rate
  868. if child.has_meta("adjusted_sample_rate") and child.get_meta("adjusted_sample_rate"):
  869. value = str(child.get_meta("new_sample_rate"))
  870. child.set_meta("adjusted_sample_rate", false)
  871. results.append(["optionbutton", flag, value])
  872. #call this function recursively to find any nested sliders in scenes
  873. if child.get_child_count() > 0:
  874. var nested := _get_slider_values_ordered(child)
  875. results.append_array(nested)
  876. return results
  877. func make_process(node: Node, process_count: int, current_infile: Array, slider_data: Array) -> Array:
  878. var args:= []
  879. var command
  880. var cleanup = []
  881. # Determine output extension: .wav or .ana based on the node's slot type
  882. var extension = ".wav" if node.get_slot_type_right(0) == 0 else ".ana"
  883. # Construct output filename for this step
  884. var output_file = "%s_%d%s" % [Global.outfile.get_basename(), process_count, extension]
  885. #special case for morph glide as it requires spec grab to have been run first
  886. if node.get_meta("command") == "morph_glide":
  887. #get slider values nothing else needed
  888. var window1 = slider_data[0][2]
  889. var window2 = slider_data[1][2]
  890. var duration = slider_data[2][2]
  891. #get length of the two input files
  892. var soundfile_1_props = get_soundfile_properties(current_infile[0])
  893. var infile_1_length = soundfile_1_props["duration"]
  894. var soundfile_2_props = get_soundfile_properties(current_infile[1])
  895. var infile_2_length = soundfile_2_props["duration"]
  896. if window1 == 100:
  897. #if slider is set to 100% default to 10 milliseconds before the end of the file to stop cdp moaning about rounding errors
  898. window1 = infile_1_length - 0.1
  899. else:
  900. window1 = infile_1_length * (window1 / 100) #calculate percentage time of the input file
  901. if window2 == 100:
  902. #if slider is set to 100% default to 10 milliseconds before the end of the file to stop cdp moaning about rounding errors
  903. window2 = infile_2_length - 0.1
  904. else:
  905. window2 = infile_2_length * (window2 / 100) #calculate percentage time of the input file
  906. #run spec grab to extract the chosen windows
  907. var window1_outfile = "%s_%d_%s%s" % [Global.outfile.get_basename(), process_count, "window1", extension]
  908. run_command("%s/%s" %[control_script.cdpprogs_location, "spec"], ["grab", current_infile[0], window1_outfile, str(window1)])
  909. cleanup.append(window1_outfile)
  910. var window2_outfile = "%s_%d_%s%s" % [Global.outfile.get_basename(), process_count, "window2", extension]
  911. run_command("%s/%s" %[control_script.cdpprogs_location, "spec"], ["grab", current_infile[1], window2_outfile, str(window2)])
  912. cleanup.append(window2_outfile)
  913. #build actual glide command
  914. command = "%s/%s" %[control_script.cdpprogs_location, "morph"]
  915. args = ["glide", window1_outfile, window2_outfile, output_file, duration]
  916. else:
  917. # Get the command name from metadata
  918. var command_name = str(node.get_meta("command"))
  919. if command_name.find("_") != -1:
  920. command_name = command_name.split("_", true, 1)
  921. command = "%s/%s" %[control_script.cdpprogs_location, command_name[0]]
  922. args = command_name[1].split("_", true, 1)
  923. else:
  924. command = "%s/%s" %[control_script.cdpprogs_location, command_name]
  925. if current_infile.size() > 0:
  926. #check if input is empty, e.g. synthesis nodes, otherwise append input file to arguments
  927. for file in current_infile:
  928. args.append(file)
  929. args.append(output_file)
  930. # Append parameter values from the sliders, include flags if present
  931. var slider_count = 0
  932. for entry in slider_data:
  933. if entry[0] == "slider":
  934. var flag = entry[1]
  935. var value = entry[2]
  936. #if value == int(value):
  937. #value = int(value)
  938. var time = entry[3] #checks if slider is a time percentage slider
  939. var brk_data = entry[4]
  940. var min_slider = entry[5]
  941. var max_slider = entry[6]
  942. var exp = entry[7]
  943. if brk_data.size() > 0: #if breakpoint data is present on slider
  944. #Sort all points by time
  945. var sorted_brk_data = []
  946. sorted_brk_data = brk_data.duplicate()
  947. sorted_brk_data.sort_custom(sort_points)
  948. var calculated_brk = []
  949. #get length of input file in seconds
  950. var infile_length = 1 #set infile length to dummy value just incase it does get used where it shouldn't to avoid crashes
  951. if current_infile.size() > 0:
  952. var soundfile_props = get_soundfile_properties(current_infile[0])
  953. infile_length = soundfile_props["duration"]
  954. #scale values from automation window to the right length for file and correct slider values
  955. #if node has an output duration then breakpoint files should be x = outputduration y= slider value else x=input duration, y=value
  956. if node.has_meta("outputduration"):
  957. for i in range(sorted_brk_data.size()):
  958. var point = sorted_brk_data[i]
  959. var new_x = float(node.get_meta("outputduration")) * (point.x / 700) #output time
  960. if i == sorted_brk_data.size() - 1: #check if this is last automation point
  961. new_x = float(node.get_meta("outputduration")) + 0.1 # force last point's x to infile_length + 100ms to make sure the file is defo over
  962. var new_y
  963. #check if slider is exponential and scale automation
  964. if exp:
  965. new_y = remap_y_to_log_scale(point.y, 0.0, 255.0, min_slider, max_slider)
  966. else:
  967. new_y = remap(point.y, 255, 0, min_slider, max_slider) #slider value
  968. if time: #check if this is a time slider and convert to percentage of input file
  969. new_y = infile_length * (new_y / 100)
  970. calculated_brk.append(Vector2(new_x, new_y))
  971. else:
  972. for i in range(sorted_brk_data.size()):
  973. var point = sorted_brk_data[i]
  974. var new_x = infile_length * (point.x / 700) #time
  975. if i == sorted_brk_data.size() - 1: #check if this is last automation point
  976. new_x = infile_length + 0.1 # force last point's x to infile_length + 100ms to make sure the file is defo over
  977. var new_y
  978. #check if slider is exponential and scale automation
  979. if exp:
  980. new_y = remap_y_to_log_scale(point.y, 0.0, 255.0, min_slider, max_slider)
  981. else:
  982. new_y = remap(point.y, 255, 0, min_slider, max_slider) #slider value
  983. calculated_brk.append(Vector2(new_x, new_y))
  984. #make text file
  985. var brk_file_path = output_file.get_basename() + "_" + str(slider_count) + ".txt"
  986. write_breakfile(calculated_brk, brk_file_path)
  987. #add breakfile to cleanup before adding flag
  988. cleanup.append(brk_file_path)
  989. #append text file in place of value
  990. #include flag if this param has a flag
  991. if flag.begins_with("-"):
  992. brk_file_path = flag + brk_file_path
  993. args.append(brk_file_path)
  994. else: #no break file append slider value
  995. if time == true:
  996. var soundfile_props = get_soundfile_properties(current_infile[0])
  997. var infile_length = soundfile_props["duration"]
  998. if value == 100:
  999. #if slider is set to 100% default to a millisecond before the end of the file to stop cdp moaning about rounding errors
  1000. value = infile_length - 0.1
  1001. else:
  1002. value = infile_length * (value / 100) #calculate percentage time of the input file
  1003. #line += ("%s%.2f " % [flag, value]) if flag.begins_with("-") else ("%.2f " % value)
  1004. args.append(("%s%.2f " % [flag, value]) if flag.begins_with("-") else str(value))
  1005. elif entry[0] == "checkbutton":
  1006. var flag = entry[1]
  1007. var value = entry[2]
  1008. #if button is pressed add the flag to the arguments list
  1009. if value == true:
  1010. args.append(flag)
  1011. elif entry[0] == "optionbutton":
  1012. var flag = entry[1]
  1013. var value = entry[2]
  1014. args.append(("%s%.2f " % [flag, value]) if flag.begins_with("-") else str(value))
  1015. slider_count += 1
  1016. return [command, output_file, cleanup, args]
  1017. #return [line.strip_edges(), output_file, cleanup]
  1018. func remap_y_to_log_scale(y: float, min_y: float, max_y: float, min_val: float, max_val: float) -> float:
  1019. var t = clamp((y - min_y) / (max_y - min_y), 0.0, 1.0)
  1020. # Since y goes top-down (0 = top, 255 = bottom), we invert t
  1021. t = 1.0 - t
  1022. var log_min = log(min_val) / log(10)
  1023. var log_max = log(max_val) / log(10)
  1024. var log_val = lerp(log_min, log_max, t)
  1025. return pow(10.0, log_val)
  1026. func sort_points(a, b):
  1027. return a.x < b.x
  1028. func write_breakfile(points: Array, path: String):
  1029. var file = FileAccess.open(path, FileAccess.WRITE)
  1030. if file:
  1031. for point in points:
  1032. var line = str(point.x) + " " + str(point.y) + "\n"
  1033. file.store_string(line)
  1034. file.close()
  1035. else:
  1036. log_console("Failed to open file to write breakfile", true)
  1037. func _on_kill_process_button_down() -> void:
  1038. if process_running and process_info.has("pid"):
  1039. progress_window.hide()
  1040. # Terminate the process by PID
  1041. OS.kill(process_info["pid"])
  1042. process_running = false
  1043. process_cancelled = true
  1044. func path_exists_through_all_nodes() -> bool:
  1045. var graph = {}
  1046. var input_node_names = []
  1047. var output_node_name = ""
  1048. # Gather nodes and initialize adjacency list
  1049. for child in graph_edit.get_children():
  1050. if child is GraphNode:
  1051. var name = str(child.name)
  1052. var command = child.get_meta("command")
  1053. var input = child.get_meta("input")
  1054. if input:
  1055. input_node_names.append(name)
  1056. elif command == "outputfile":
  1057. output_node_name = name
  1058. graph[name] = []
  1059. # Add edges
  1060. for conn in graph_edit.get_connection_list():
  1061. var from_node = str(conn["from_node"])
  1062. var to_node = str(conn["to_node"])
  1063. if graph.has(from_node):
  1064. graph[from_node].append(to_node)
  1065. # BFS from each input node
  1066. for input_node in input_node_names:
  1067. var queue = [[input_node]] # store paths, not just nodes
  1068. while queue.size() > 0:
  1069. var path = queue.pop_front()
  1070. var current = path[-1]
  1071. if current == output_node_name:
  1072. # Candidate path found; validate multi-inlets
  1073. if validate_path_inlets(path, graph, input_node_names):
  1074. return true # fully valid path found
  1075. for neighbor in graph.get(current, []):
  1076. if neighbor in path:
  1077. continue # avoid cycles
  1078. var new_path = path.duplicate()
  1079. new_path.append(neighbor)
  1080. queue.append(new_path)
  1081. return false
  1082. # Validate all nodes along a candidate path for multi-inlets
  1083. func validate_path_inlets(path: Array, graph: Dictionary, input_node_names: Array) -> bool:
  1084. for node_name in path:
  1085. var child = graph_edit.get_node(node_name)
  1086. var input_count = child.get_input_port_count()
  1087. if input_count <= 1:
  1088. continue # single-inlet nodes are trivially valid
  1089. # Check each inlet
  1090. for i in range(input_count):
  1091. var inlet_valid = false
  1092. for conn in graph_edit.get_connection_list():
  1093. if str(conn["to_node"]) == node_name and conn["to_port"] == i:
  1094. var src_node = str(conn["from_node"])
  1095. if path_has_input(src_node, graph, input_node_names):
  1096. inlet_valid = true
  1097. break
  1098. if not inlet_valid:
  1099. return false # this inlet cannot reach any input
  1100. return true
  1101. # Step backwards from a node to see if a path exists to any input node
  1102. func path_has_input(current: String, graph: Dictionary, input_node_names: Array, visited: Dictionary = {}) -> bool:
  1103. if current in input_node_names:
  1104. return true
  1105. if current in visited:
  1106. return false
  1107. visited[current] = true
  1108. # Check all nodes that lead to current
  1109. for conn in graph_edit.get_connection_list():
  1110. if str(conn["to_node"]) == current:
  1111. var src_node = str(conn["from_node"])
  1112. if path_has_input(src_node, graph, input_node_names, visited.duplicate()):
  1113. return true
  1114. return false
  1115. #func path_exists_through_all_nodes() -> bool:
  1116. #var graph = {}
  1117. #var input_node_names = []
  1118. #var output_node_name = ""
  1119. #
  1120. ## Gather nodes and build empty graph
  1121. #for child in graph_edit.get_children():
  1122. #if child is GraphNode:
  1123. #var name = str(child.name)
  1124. #var command = child.get_meta("command")
  1125. #var input = child.get_meta("input")
  1126. #
  1127. #if input:
  1128. #input_node_names.append(name)
  1129. #elif command == "outputfile":
  1130. #output_node_name = name
  1131. #
  1132. #graph[name] = [] # Initialize adjacency list
  1133. #
  1134. ## Add connections (edges)
  1135. #for conn in graph_edit.get_connection_list():
  1136. #var from = str(conn["from_node"])
  1137. #var to = str(conn["to_node"])
  1138. #if graph.has(from):
  1139. #graph[from].append(to)
  1140. #
  1141. ## BFS to check if any input node reaches the output
  1142. #for input_node in input_node_names:
  1143. #var visited = {}
  1144. #var queue = [input_node]
  1145. #
  1146. #while queue.size() > 0:
  1147. #var current = queue.pop_front()
  1148. #
  1149. #if current == output_node_name:
  1150. #return true # Path found
  1151. #
  1152. #if current in visited:
  1153. #continue
  1154. #visited[current] = true
  1155. #
  1156. #for neighbor in graph.get(current, []):
  1157. #queue.append(neighbor)
  1158. #
  1159. ## No path from any input node to output
  1160. #return false
  1161. func log_console(text: String, update: bool) -> void:
  1162. console_output.append_text(text + "\n \n")
  1163. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1164. if update == true:
  1165. await get_tree().process_frame # Optional: ensure UI updates
  1166. func run_command(command: String, args: Array) -> String:
  1167. var is_windows = OS.get_name() == "Windows"
  1168. console_output.append_text(command + " " + " ".join(args) + "\n")
  1169. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1170. await get_tree().process_frame
  1171. if is_windows and (command == "del" or command == "ren"): #checks if the command is a windows system command and runs it through cmd.exe
  1172. args.insert(0, command)
  1173. args.insert(0, "/C")
  1174. process_info = OS.execute_with_pipe("cmd.exe", args, false)
  1175. else:
  1176. process_info = OS.execute_with_pipe(command, args, false)
  1177. # Check if the process was successfully started
  1178. if !process_info.has("pid"):
  1179. log_console("Failed to start process]", true)
  1180. return ""
  1181. process_running = true
  1182. # Start monitoring the process output and status
  1183. return await monitor_process(process_info["pid"], process_info["stdio"], process_info["stderr"])
  1184. func monitor_process(pid: int, stdout: FileAccess, stderr: FileAccess) -> String:
  1185. var output := ""
  1186. while OS.is_process_running(pid):
  1187. await get_tree().process_frame
  1188. while stdout.get_position() < stdout.get_length():
  1189. var line = stdout.get_line()
  1190. output += line
  1191. console_output.append_text(line + "\n")
  1192. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1193. while stderr.get_position() < stderr.get_length():
  1194. var line = stderr.get_line()
  1195. output += line
  1196. console_output.append_text(line + "\n")
  1197. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1198. var exit_code = OS.get_process_exit_code(pid)
  1199. if exit_code == 0:
  1200. if output.contains("ERROR:"): #checks if CDP reported an error but passed exit code 0 anyway
  1201. console_output.append_text("[color=#9c2828][b]Processes failed[/b][/color]\n\n")
  1202. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1203. process_successful = false
  1204. if process_cancelled == false:
  1205. progress_window.hide()
  1206. if !console_window.visible:
  1207. console_window.popup_centered()
  1208. else:
  1209. console_output.append_text("[color=#638382]Processes ran successfully[/color]\n\n")
  1210. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1211. else:
  1212. console_output.append_text("[color=#9c2828][b]Processes failed with exit code: %d[/b][/color]\n" % exit_code + "\n")
  1213. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1214. process_successful = false
  1215. if process_cancelled == false:
  1216. progress_window.hide()
  1217. if !console_window.visible:
  1218. console_window.popup_centered()
  1219. if output.contains("as an internal or external command"): #check for cdprogs location error on windows
  1220. console_output.append_text("[color=#9c2828][b]Please make sure your cdprogs folder is set to the correct location in the Settings menu. The default location is C:\\CDPR8\\_cdp\\_cdprogs[/b][/color]\n\n")
  1221. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1222. if output.contains("command not found"): #check for cdprogs location error on unix systems
  1223. console_output.append_text("[color=#9c2828][b]Please make sure your cdprogs folder is set to the correct location in the Settings menu. The default location is ~/cdpr8/_cdp/_cdprogs[/b][/color]\n\n")
  1224. console_output.scroll_to_line(console_output.get_line_count() - 1)
  1225. process_running = false
  1226. return output
  1227. # Main cycle detection
  1228. func detect_cycles(graph: Dictionary, loop_nodes: Dictionary) -> bool:
  1229. var visited := {}
  1230. var stack := {}
  1231. for node in graph.keys():
  1232. if _dfs_cycle(node, graph, visited, stack, loop_nodes):
  1233. return true
  1234. return false
  1235. func _dfs_cycle(node: String, graph: Dictionary, visited: Dictionary, stack: Dictionary, loop_nodes: Dictionary) -> bool:
  1236. if not visited.has(node):
  1237. visited[node] = true
  1238. stack[node] = true
  1239. for neighbor in graph[node]:
  1240. # If neighbor hasn't been visited, recurse
  1241. if not visited.has(neighbor):
  1242. if _dfs_cycle(neighbor, graph, visited, stack, loop_nodes):
  1243. # Cycle found down this path
  1244. if not (loop_nodes.has(node) or loop_nodes.has(neighbor)):
  1245. return true
  1246. elif stack.has(neighbor):
  1247. # Back edge found → cycle
  1248. if not (loop_nodes.has(node) or loop_nodes.has(neighbor)):
  1249. return true
  1250. # Done exploring this node
  1251. stack.erase(node)
  1252. return false