alffplay.cpp 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111
  1. /*
  2. * An example showing how to play a stream sync'd to video, using ffmpeg.
  3. *
  4. * Requires C++14.
  5. */
  6. #include <condition_variable>
  7. #include <functional>
  8. #include <algorithm>
  9. #include <iostream>
  10. #include <utility>
  11. #include <iomanip>
  12. #include <cstdint>
  13. #include <cstring>
  14. #include <cstdlib>
  15. #include <atomic>
  16. #include <cerrno>
  17. #include <chrono>
  18. #include <cstdio>
  19. #include <memory>
  20. #include <string>
  21. #include <thread>
  22. #include <vector>
  23. #include <array>
  24. #include <cmath>
  25. #include <deque>
  26. #include <mutex>
  27. #include <ratio>
  28. extern "C" {
  29. #ifdef __GNUC__
  30. _Pragma("GCC diagnostic push")
  31. _Pragma("GCC diagnostic ignored \"-Wconversion\"")
  32. _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
  33. #endif
  34. #include "libavcodec/avcodec.h"
  35. #include "libavformat/avformat.h"
  36. #include "libavformat/avio.h"
  37. #include "libavformat/version.h"
  38. #include "libavutil/avutil.h"
  39. #include "libavutil/error.h"
  40. #include "libavutil/frame.h"
  41. #include "libavutil/mem.h"
  42. #include "libavutil/pixfmt.h"
  43. #include "libavutil/rational.h"
  44. #include "libavutil/samplefmt.h"
  45. #include "libavutil/time.h"
  46. #include "libavutil/version.h"
  47. #include "libavutil/channel_layout.h"
  48. #include "libswscale/swscale.h"
  49. #include "libswresample/swresample.h"
  50. constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
  51. constexpr auto AVErrorEOF = AVERROR_EOF;
  52. struct SwsContext;
  53. #ifdef __GNUC__
  54. _Pragma("GCC diagnostic pop")
  55. #endif
  56. }
  57. #include "SDL.h"
  58. #include "AL/alc.h"
  59. #include "AL/al.h"
  60. #include "AL/alext.h"
  61. #include "common/alhelpers.h"
  62. extern "C" {
  63. /* Undefine this to disable use of experimental extensions. Don't use for
  64. * production code! Interfaces and behavior may change prior to being
  65. * finalized.
  66. */
  67. #define ALLOW_EXPERIMENTAL_EXTS
  68. #ifdef ALLOW_EXPERIMENTAL_EXTS
  69. #ifndef AL_SOFT_callback_buffer
  70. #define AL_SOFT_callback_buffer
  71. typedef unsigned int ALbitfieldSOFT;
  72. #define AL_BUFFER_CALLBACK_FUNCTION_SOFT 0x19A0
  73. #define AL_BUFFER_CALLBACK_USER_PARAM_SOFT 0x19A1
  74. typedef ALsizei (AL_APIENTRY*LPALBUFFERCALLBACKTYPESOFT)(ALvoid *userptr, ALvoid *sampledata, ALsizei numsamples);
  75. typedef void (AL_APIENTRY*LPALBUFFERCALLBACKSOFT)(ALuint buffer, ALenum format, ALsizei freq, LPALBUFFERCALLBACKTYPESOFT callback, ALvoid *userptr, ALbitfieldSOFT flags);
  76. typedef void (AL_APIENTRY*LPALGETBUFFERPTRSOFT)(ALuint buffer, ALenum param, ALvoid **value);
  77. typedef void (AL_APIENTRY*LPALGETBUFFER3PTRSOFT)(ALuint buffer, ALenum param, ALvoid **value1, ALvoid **value2, ALvoid **value3);
  78. typedef void (AL_APIENTRY*LPALGETBUFFERPTRVSOFT)(ALuint buffer, ALenum param, ALvoid **values);
  79. #endif
  80. #endif /* ALLOW_EXPERIMENTAL_EXTS */
  81. }
  82. namespace {
  83. inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
  84. #ifndef M_PI
  85. #define M_PI (3.14159265358979323846)
  86. #endif
  87. using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
  88. using nanoseconds = std::chrono::nanoseconds;
  89. using microseconds = std::chrono::microseconds;
  90. using milliseconds = std::chrono::milliseconds;
  91. using seconds = std::chrono::seconds;
  92. using seconds_d64 = std::chrono::duration<double>;
  93. using std::chrono::duration_cast;
  94. const std::string AppName{"alffplay"};
  95. ALenum DirectOutMode{AL_FALSE};
  96. bool EnableWideStereo{false};
  97. bool DisableVideo{false};
  98. LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
  99. LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
  100. #ifdef AL_SOFT_events
  101. LPALEVENTCONTROLSOFT alEventControlSOFT;
  102. LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
  103. #endif
  104. #ifdef AL_SOFT_callback_buffer
  105. LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT;
  106. #endif
  107. const seconds AVNoSyncThreshold{10};
  108. #define VIDEO_PICTURE_QUEUE_SIZE 24
  109. const seconds_d64 AudioSyncThreshold{0.03};
  110. const milliseconds AudioSampleCorrectionMax{50};
  111. /* Averaging filter coefficient for audio sync. */
  112. #define AUDIO_DIFF_AVG_NB 20
  113. const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
  114. /* Per-buffer size, in time */
  115. constexpr milliseconds AudioBufferTime{20};
  116. /* Buffer total size, in time (should be divisible by the buffer time) */
  117. constexpr milliseconds AudioBufferTotalTime{800};
  118. constexpr auto AudioBufferCount = AudioBufferTotalTime / AudioBufferTime;
  119. enum {
  120. FF_MOVIE_DONE_EVENT = SDL_USEREVENT
  121. };
  122. enum class SyncMaster {
  123. Audio,
  124. Video,
  125. External,
  126. Default = External
  127. };
  128. inline microseconds get_avtime()
  129. { return microseconds{av_gettime()}; }
  130. /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
  131. struct AVIOContextDeleter {
  132. void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
  133. };
  134. using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
  135. struct AVFormatCtxDeleter {
  136. void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
  137. };
  138. using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
  139. struct AVCodecCtxDeleter {
  140. void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
  141. };
  142. using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
  143. struct AVFrameDeleter {
  144. void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
  145. };
  146. using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
  147. struct SwrContextDeleter {
  148. void operator()(SwrContext *ptr) { swr_free(&ptr); }
  149. };
  150. using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
  151. struct SwsContextDeleter {
  152. void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
  153. };
  154. using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
  155. template<size_t SizeLimit>
  156. class PacketQueue {
  157. std::mutex mMutex;
  158. std::condition_variable mCondVar;
  159. std::deque<AVPacket> mPackets;
  160. size_t mTotalSize{0};
  161. bool mFinished{false};
  162. AVPacket *getPacket(std::unique_lock<std::mutex> &lock)
  163. {
  164. while(mPackets.empty() && !mFinished)
  165. mCondVar.wait(lock);
  166. return mPackets.empty() ? nullptr : &mPackets.front();
  167. }
  168. void pop()
  169. {
  170. AVPacket *pkt = &mPackets.front();
  171. mTotalSize -= static_cast<unsigned int>(pkt->size);
  172. av_packet_unref(pkt);
  173. mPackets.pop_front();
  174. }
  175. public:
  176. ~PacketQueue()
  177. {
  178. for(AVPacket &pkt : mPackets)
  179. av_packet_unref(&pkt);
  180. mPackets.clear();
  181. mTotalSize = 0;
  182. }
  183. int sendTo(AVCodecContext *codecctx)
  184. {
  185. std::unique_lock<std::mutex> lock{mMutex};
  186. AVPacket *pkt{getPacket(lock)};
  187. if(!pkt) return avcodec_send_packet(codecctx, nullptr);
  188. const int ret{avcodec_send_packet(codecctx, pkt)};
  189. if(ret != AVERROR(EAGAIN))
  190. {
  191. if(ret < 0)
  192. std::cerr<< "Failed to send packet: "<<ret <<std::endl;
  193. pop();
  194. }
  195. return ret;
  196. }
  197. void setFinished()
  198. {
  199. {
  200. std::lock_guard<std::mutex> _{mMutex};
  201. mFinished = true;
  202. }
  203. mCondVar.notify_one();
  204. }
  205. bool put(const AVPacket *pkt)
  206. {
  207. {
  208. std::unique_lock<std::mutex> lock{mMutex};
  209. if(mTotalSize >= SizeLimit)
  210. return false;
  211. mPackets.push_back(AVPacket{});
  212. if(av_packet_ref(&mPackets.back(), pkt) != 0)
  213. {
  214. mPackets.pop_back();
  215. return true;
  216. }
  217. mTotalSize += static_cast<unsigned int>(mPackets.back().size);
  218. }
  219. mCondVar.notify_one();
  220. return true;
  221. }
  222. };
  223. struct MovieState;
  224. struct AudioState {
  225. MovieState &mMovie;
  226. AVStream *mStream{nullptr};
  227. AVCodecCtxPtr mCodecCtx;
  228. PacketQueue<2*1024*1024> mPackets;
  229. /* Used for clock difference average computation */
  230. seconds_d64 mClockDiffAvg{0};
  231. /* Time of the next sample to be buffered */
  232. nanoseconds mCurrentPts{0};
  233. /* Device clock time that the stream started at. */
  234. nanoseconds mDeviceStartTime{nanoseconds::min()};
  235. /* Decompressed sample frame, and swresample context for conversion */
  236. AVFramePtr mDecodedFrame;
  237. SwrContextPtr mSwresCtx;
  238. /* Conversion format, for what gets fed to OpenAL */
  239. uint64_t mDstChanLayout{0};
  240. AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
  241. /* Storage of converted samples */
  242. uint8_t *mSamples{nullptr};
  243. int mSamplesLen{0}; /* In samples */
  244. int mSamplesPos{0};
  245. int mSamplesMax{0};
  246. std::unique_ptr<uint8_t[]> mBufferData;
  247. size_t mBufferDataSize{0};
  248. std::atomic<size_t> mReadPos{0};
  249. std::atomic<size_t> mWritePos{0};
  250. /* OpenAL format */
  251. ALenum mFormat{AL_NONE};
  252. ALuint mFrameSize{0};
  253. std::mutex mSrcMutex;
  254. std::condition_variable mSrcCond;
  255. std::atomic_flag mConnected;
  256. ALuint mSource{0};
  257. std::array<ALuint,AudioBufferCount> mBuffers{};
  258. ALuint mBufferIdx{0};
  259. AudioState(MovieState &movie) : mMovie(movie)
  260. { mConnected.test_and_set(std::memory_order_relaxed); }
  261. ~AudioState()
  262. {
  263. if(mSource)
  264. alDeleteSources(1, &mSource);
  265. if(mBuffers[0])
  266. alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
  267. av_freep(&mSamples);
  268. }
  269. #ifdef AL_SOFT_events
  270. static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
  271. ALsizei length, const ALchar *message, void *userParam);
  272. #endif
  273. #ifdef AL_SOFT_callback_buffer
  274. static ALsizei AL_APIENTRY bufferCallbackC(void *userptr, void *data, ALsizei size)
  275. { return static_cast<AudioState*>(userptr)->bufferCallback(data, size); }
  276. ALsizei bufferCallback(void *data, ALsizei size);
  277. #endif
  278. nanoseconds getClockNoLock();
  279. nanoseconds getClock()
  280. {
  281. std::lock_guard<std::mutex> lock{mSrcMutex};
  282. return getClockNoLock();
  283. }
  284. bool startPlayback();
  285. int getSync();
  286. int decodeFrame();
  287. bool readAudio(uint8_t *samples, unsigned int length, int &sample_skip);
  288. void readAudio(int sample_skip);
  289. int handler();
  290. };
  291. struct VideoState {
  292. MovieState &mMovie;
  293. AVStream *mStream{nullptr};
  294. AVCodecCtxPtr mCodecCtx;
  295. PacketQueue<14*1024*1024> mPackets;
  296. /* The pts of the currently displayed frame, and the time (av_gettime) it
  297. * was last updated - used to have running video pts
  298. */
  299. nanoseconds mDisplayPts{0};
  300. microseconds mDisplayPtsTime{microseconds::min()};
  301. std::mutex mDispPtsMutex;
  302. /* Swscale context for format conversion */
  303. SwsContextPtr mSwscaleCtx;
  304. struct Picture {
  305. AVFramePtr mFrame{};
  306. nanoseconds mPts{nanoseconds::min()};
  307. };
  308. std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
  309. std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u};
  310. std::mutex mPictQMutex;
  311. std::condition_variable mPictQCond;
  312. SDL_Texture *mImage{nullptr};
  313. int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
  314. bool mFirstUpdate{true};
  315. std::atomic<bool> mEOS{false};
  316. std::atomic<bool> mFinalUpdate{false};
  317. VideoState(MovieState &movie) : mMovie(movie) { }
  318. ~VideoState()
  319. {
  320. if(mImage)
  321. SDL_DestroyTexture(mImage);
  322. mImage = nullptr;
  323. }
  324. nanoseconds getClock();
  325. void display(SDL_Window *screen, SDL_Renderer *renderer);
  326. void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw);
  327. int handler();
  328. };
  329. struct MovieState {
  330. AVIOContextPtr mIOContext;
  331. AVFormatCtxPtr mFormatCtx;
  332. SyncMaster mAVSyncType{SyncMaster::Default};
  333. microseconds mClockBase{microseconds::min()};
  334. std::atomic<bool> mQuit{false};
  335. AudioState mAudio;
  336. VideoState mVideo;
  337. std::thread mParseThread;
  338. std::thread mAudioThread;
  339. std::thread mVideoThread;
  340. std::string mFilename;
  341. MovieState(std::string fname)
  342. : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
  343. { }
  344. ~MovieState()
  345. {
  346. mQuit = true;
  347. if(mParseThread.joinable())
  348. mParseThread.join();
  349. }
  350. static int decode_interrupt_cb(void *ctx);
  351. bool prepare();
  352. void setTitle(SDL_Window *window);
  353. nanoseconds getClock();
  354. nanoseconds getMasterClock();
  355. nanoseconds getDuration();
  356. int streamComponentOpen(unsigned int stream_index);
  357. int parse_handler();
  358. };
  359. nanoseconds AudioState::getClockNoLock()
  360. {
  361. // The audio clock is the timestamp of the sample currently being heard.
  362. if(alcGetInteger64vSOFT)
  363. {
  364. // If device start time = min, we aren't playing yet.
  365. if(mDeviceStartTime == nanoseconds::min())
  366. return nanoseconds::zero();
  367. // Get the current device clock time and latency.
  368. auto device = alcGetContextsDevice(alcGetCurrentContext());
  369. ALCint64SOFT devtimes[2]{0,0};
  370. alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
  371. auto latency = nanoseconds{devtimes[1]};
  372. auto device_time = nanoseconds{devtimes[0]};
  373. // The clock is simply the current device time relative to the recorded
  374. // start time. We can also subtract the latency to get more a accurate
  375. // position of where the audio device actually is in the output stream.
  376. return device_time - mDeviceStartTime - latency;
  377. }
  378. if(mBufferDataSize > 0)
  379. {
  380. if(mDeviceStartTime == nanoseconds::min())
  381. return nanoseconds::zero();
  382. /* With a callback buffer and no device clock, mDeviceStartTime is
  383. * actually the timestamp of the first sample frame played. The audio
  384. * clock, then, is that plus the current source offset.
  385. */
  386. ALint64SOFT offset[2];
  387. if(alGetSourcei64vSOFT)
  388. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  389. else
  390. {
  391. ALint ioffset;
  392. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  393. offset[0] = ALint64SOFT{ioffset} << 32;
  394. offset[1] = 0;
  395. }
  396. /* NOTE: The source state must be checked last, in case an underrun
  397. * occurs and the source stops between getting the state and retrieving
  398. * the offset+latency.
  399. */
  400. ALint status;
  401. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  402. nanoseconds pts{};
  403. if(status == AL_PLAYING || status == AL_PAUSED)
  404. pts = mDeviceStartTime - nanoseconds{offset[1]} +
  405. duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
  406. else
  407. {
  408. /* If the source is stopped, the pts of the next sample to be heard
  409. * is the pts of the next sample to be buffered, minus the amount
  410. * already in the buffer ready to play.
  411. */
  412. const size_t woffset{mWritePos.load(std::memory_order_acquire)};
  413. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  414. const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
  415. roffset};
  416. pts = mCurrentPts - nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
  417. }
  418. return pts;
  419. }
  420. /* The source-based clock is based on 4 components:
  421. * 1 - The timestamp of the next sample to buffer (mCurrentPts)
  422. * 2 - The length of the source's buffer queue
  423. * (AudioBufferTime*AL_BUFFERS_QUEUED)
  424. * 3 - The offset OpenAL is currently at in the source (the first value
  425. * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
  426. * 4 - The latency between OpenAL and the DAC (the second value from
  427. * AL_SAMPLE_OFFSET_LATENCY_SOFT)
  428. *
  429. * Subtracting the length of the source queue from the next sample's
  430. * timestamp gives the timestamp of the sample at the start of the source
  431. * queue. Adding the source offset to that results in the timestamp for the
  432. * sample at OpenAL's current position, and subtracting the source latency
  433. * from that gives the timestamp of the sample currently at the DAC.
  434. */
  435. nanoseconds pts{mCurrentPts};
  436. if(mSource)
  437. {
  438. ALint64SOFT offset[2];
  439. if(alGetSourcei64vSOFT)
  440. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  441. else
  442. {
  443. ALint ioffset;
  444. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  445. offset[0] = ALint64SOFT{ioffset} << 32;
  446. offset[1] = 0;
  447. }
  448. ALint queued, status;
  449. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  450. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  451. /* If the source is AL_STOPPED, then there was an underrun and all
  452. * buffers are processed, so ignore the source queue. The audio thread
  453. * will put the source into an AL_INITIAL state and clear the queue
  454. * when it starts recovery.
  455. */
  456. if(status != AL_STOPPED)
  457. {
  458. pts -= AudioBufferTime*queued;
  459. pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
  460. }
  461. /* Don't offset by the latency if the source isn't playing. */
  462. if(status == AL_PLAYING)
  463. pts -= nanoseconds{offset[1]};
  464. }
  465. return std::max(pts, nanoseconds::zero());
  466. }
  467. bool AudioState::startPlayback()
  468. {
  469. const size_t woffset{mWritePos.load(std::memory_order_acquire)};
  470. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  471. const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
  472. roffset};
  473. if(mBufferDataSize > 0)
  474. {
  475. if(readable == 0)
  476. return false;
  477. if(!alcGetInteger64vSOFT)
  478. mDeviceStartTime = mCurrentPts -
  479. nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
  480. }
  481. else
  482. {
  483. ALint queued{};
  484. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  485. if(queued == 0) return false;
  486. }
  487. alSourcePlay(mSource);
  488. if(alcGetInteger64vSOFT)
  489. {
  490. /* Subtract the total buffer queue time from the current pts to get the
  491. * pts of the start of the queue.
  492. */
  493. int64_t srctimes[2]{0,0};
  494. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
  495. auto device_time = nanoseconds{srctimes[1]};
  496. auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
  497. mCodecCtx->sample_rate;
  498. /* The mixer may have ticked and incremented the device time and sample
  499. * offset, so subtract the source offset from the device time to get
  500. * the device time the source started at. Also subtract startpts to get
  501. * the device time the stream would have started at to reach where it
  502. * is now.
  503. */
  504. if(mBufferDataSize > 0)
  505. {
  506. nanoseconds startpts{mCurrentPts -
  507. nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate};
  508. mDeviceStartTime = device_time - src_offset - startpts;
  509. }
  510. else
  511. {
  512. nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
  513. mDeviceStartTime = device_time - src_offset - startpts;
  514. }
  515. }
  516. return true;
  517. }
  518. int AudioState::getSync()
  519. {
  520. if(mMovie.mAVSyncType == SyncMaster::Audio)
  521. return 0;
  522. auto ref_clock = mMovie.getMasterClock();
  523. auto diff = ref_clock - getClockNoLock();
  524. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  525. {
  526. /* Difference is TOO big; reset accumulated average */
  527. mClockDiffAvg = seconds_d64::zero();
  528. return 0;
  529. }
  530. /* Accumulate the diffs */
  531. mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
  532. auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
  533. if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
  534. return 0;
  535. /* Constrain the per-update difference to avoid exceedingly large skips */
  536. diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
  537. return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
  538. }
  539. int AudioState::decodeFrame()
  540. {
  541. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  542. {
  543. int ret;
  544. while((ret=avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get())) == AVERROR(EAGAIN))
  545. mPackets.sendTo(mCodecCtx.get());
  546. if(ret != 0)
  547. {
  548. if(ret == AVErrorEOF) break;
  549. std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
  550. continue;
  551. }
  552. if(mDecodedFrame->nb_samples <= 0)
  553. continue;
  554. /* If provided, update w/ pts */
  555. if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
  556. mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
  557. static_cast<double>(mDecodedFrame->best_effort_timestamp)});
  558. if(mDecodedFrame->nb_samples > mSamplesMax)
  559. {
  560. av_freep(&mSamples);
  561. av_samples_alloc(&mSamples, nullptr, mCodecCtx->channels, mDecodedFrame->nb_samples,
  562. mDstSampleFmt, 0);
  563. mSamplesMax = mDecodedFrame->nb_samples;
  564. }
  565. /* Return the amount of sample frames converted */
  566. int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
  567. const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)};
  568. av_frame_unref(mDecodedFrame.get());
  569. return data_size;
  570. }
  571. return 0;
  572. }
  573. /* Duplicates the sample at in to out, count times. The frame size is a
  574. * multiple of the template type size.
  575. */
  576. template<typename T>
  577. static void sample_dup(uint8_t *out, const uint8_t *in, size_t count, size_t frame_size)
  578. {
  579. auto *sample = reinterpret_cast<const T*>(in);
  580. auto *dst = reinterpret_cast<T*>(out);
  581. if(frame_size == sizeof(T))
  582. std::fill_n(dst, count, *sample);
  583. else
  584. {
  585. /* NOTE: frame_size is a multiple of sizeof(T). */
  586. size_t type_mult{frame_size / sizeof(T)};
  587. size_t i{0};
  588. std::generate_n(dst, count*type_mult,
  589. [sample,type_mult,&i]() -> T
  590. {
  591. T ret = sample[i];
  592. i = (i+1)%type_mult;
  593. return ret;
  594. }
  595. );
  596. }
  597. }
  598. bool AudioState::readAudio(uint8_t *samples, unsigned int length, int &sample_skip)
  599. {
  600. unsigned int audio_size{0};
  601. /* Read the next chunk of data, refill the buffer, and queue it
  602. * on the source */
  603. length /= mFrameSize;
  604. while(mSamplesLen > 0 && audio_size < length)
  605. {
  606. unsigned int rem{length - audio_size};
  607. if(mSamplesPos >= 0)
  608. {
  609. const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos);
  610. if(rem > len) rem = len;
  611. std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize,
  612. rem*mFrameSize, samples);
  613. }
  614. else
  615. {
  616. rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos));
  617. /* Add samples by copying the first sample */
  618. if((mFrameSize&7) == 0)
  619. sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
  620. else if((mFrameSize&3) == 0)
  621. sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
  622. else if((mFrameSize&1) == 0)
  623. sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
  624. else
  625. sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
  626. }
  627. mSamplesPos += rem;
  628. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  629. samples += rem*mFrameSize;
  630. audio_size += rem;
  631. while(mSamplesPos >= mSamplesLen)
  632. {
  633. int frame_len = decodeFrame();
  634. if(frame_len <= 0) break;
  635. mSamplesLen = frame_len;
  636. mSamplesPos = std::min(mSamplesLen, sample_skip);
  637. sample_skip -= mSamplesPos;
  638. // Adjust the device start time and current pts by the amount we're
  639. // skipping/duplicating, so that the clock remains correct for the
  640. // current stream position.
  641. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  642. mDeviceStartTime -= skip;
  643. mCurrentPts += skip;
  644. continue;
  645. }
  646. }
  647. if(audio_size <= 0)
  648. return false;
  649. if(audio_size < length)
  650. {
  651. const unsigned int rem{length - audio_size};
  652. std::fill_n(samples, rem*mFrameSize,
  653. (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
  654. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  655. audio_size += rem;
  656. }
  657. return true;
  658. }
  659. void AudioState::readAudio(int sample_skip)
  660. {
  661. size_t woffset{mWritePos.load(std::memory_order_acquire)};
  662. while(mSamplesLen > 0)
  663. {
  664. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  665. if(mSamplesPos < 0)
  666. {
  667. size_t rem{(((roffset > woffset) ? roffset-1
  668. : ((roffset == 0) ? mBufferDataSize-1
  669. : mBufferDataSize)) - woffset) / mFrameSize};
  670. rem = std::min<size_t>(rem, static_cast<ALuint>(-mSamplesPos));
  671. if(rem == 0) break;
  672. auto *splout{&mBufferData[woffset]};
  673. if((mFrameSize&7) == 0)
  674. sample_dup<uint64_t>(splout, mSamples, rem, mFrameSize);
  675. else if((mFrameSize&3) == 0)
  676. sample_dup<uint32_t>(splout, mSamples, rem, mFrameSize);
  677. else if((mFrameSize&1) == 0)
  678. sample_dup<uint16_t>(splout, mSamples, rem, mFrameSize);
  679. else
  680. sample_dup<uint8_t>(splout, mSamples, rem, mFrameSize);
  681. woffset += rem * mFrameSize;
  682. if(woffset == mBufferDataSize)
  683. woffset = 0;
  684. mWritePos.store(woffset, std::memory_order_release);
  685. mSamplesPos += static_cast<int>(rem);
  686. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  687. continue;
  688. }
  689. const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
  690. const size_t nbytes{static_cast<ALuint>(mSamplesLen)*size_t{mFrameSize} -
  691. boffset};
  692. if(roffset > woffset)
  693. {
  694. const size_t writable{roffset-woffset-1};
  695. if(writable < nbytes) break;
  696. memcpy(&mBufferData[woffset], mSamples+boffset, nbytes);
  697. woffset += nbytes;
  698. }
  699. else
  700. {
  701. const size_t writable{mBufferDataSize+roffset-woffset-1};
  702. if(writable < nbytes) break;
  703. const size_t todo1{std::min<size_t>(nbytes, mBufferDataSize-woffset)};
  704. const size_t todo2{nbytes - todo1};
  705. memcpy(&mBufferData[woffset], mSamples+boffset, todo1);
  706. woffset += todo1;
  707. if(woffset == mBufferDataSize)
  708. {
  709. woffset = 0;
  710. if(todo2 > 0)
  711. {
  712. memcpy(&mBufferData[woffset], mSamples+boffset+todo1, todo2);
  713. woffset += todo2;
  714. }
  715. }
  716. }
  717. mWritePos.store(woffset, std::memory_order_release);
  718. mCurrentPts += nanoseconds{seconds{mSamplesLen-mSamplesPos}} / mCodecCtx->sample_rate;
  719. do {
  720. mSamplesLen = decodeFrame();
  721. if(mSamplesLen <= 0) break;
  722. mSamplesPos = std::min(mSamplesLen, sample_skip);
  723. sample_skip -= mSamplesPos;
  724. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  725. mDeviceStartTime -= skip;
  726. mCurrentPts += skip;
  727. } while(mSamplesPos >= mSamplesLen);
  728. }
  729. }
  730. #ifdef AL_SOFT_events
  731. void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
  732. ALsizei length, const ALchar *message, void *userParam)
  733. {
  734. auto self = static_cast<AudioState*>(userParam);
  735. if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
  736. {
  737. /* Temporarily lock the source mutex to ensure it's not between
  738. * checking the processed count and going to sleep.
  739. */
  740. std::unique_lock<std::mutex>{self->mSrcMutex}.unlock();
  741. self->mSrcCond.notify_one();
  742. return;
  743. }
  744. std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
  745. switch(eventType)
  746. {
  747. case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
  748. case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
  749. case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
  750. default:
  751. std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<std::dec<<
  752. std::setw(0)<<std::setfill(' '); break;
  753. }
  754. std::cout<< "\n"
  755. "Object ID: "<<object<<"\n"
  756. "Parameter: "<<param<<"\n"
  757. "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<<
  758. std::endl;
  759. if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
  760. {
  761. {
  762. std::lock_guard<std::mutex> lock{self->mSrcMutex};
  763. self->mConnected.clear(std::memory_order_release);
  764. }
  765. self->mSrcCond.notify_one();
  766. }
  767. }
  768. #endif
  769. #ifdef AL_SOFT_callback_buffer
  770. ALsizei AudioState::bufferCallback(void *data, ALsizei size)
  771. {
  772. ALsizei got{0};
  773. size_t roffset{mReadPos.load(std::memory_order_acquire)};
  774. while(got < size)
  775. {
  776. const size_t woffset{mWritePos.load(std::memory_order_relaxed)};
  777. if(woffset == roffset) break;
  778. size_t todo{((woffset < roffset) ? mBufferDataSize : woffset) - roffset};
  779. todo = std::min<size_t>(todo, static_cast<ALuint>(size-got));
  780. memcpy(data, &mBufferData[roffset], todo);
  781. data = static_cast<ALbyte*>(data) + todo;
  782. got += static_cast<ALsizei>(todo);
  783. roffset += todo;
  784. if(roffset == mBufferDataSize)
  785. roffset = 0;
  786. }
  787. mReadPos.store(roffset, std::memory_order_release);
  788. return got;
  789. }
  790. #endif
  791. int AudioState::handler()
  792. {
  793. std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock};
  794. milliseconds sleep_time{AudioBufferTime / 3};
  795. ALenum fmt;
  796. #ifdef AL_SOFT_events
  797. const std::array<ALenum,3> evt_types{{
  798. AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
  799. AL_EVENT_TYPE_DISCONNECTED_SOFT}};
  800. if(alEventControlSOFT)
  801. {
  802. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
  803. alEventCallbackSOFT(EventCallback, this);
  804. sleep_time = AudioBufferTotalTime;
  805. }
  806. #endif
  807. #ifdef AL_SOFT_bformat_ex
  808. const bool has_bfmt_ex{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE};
  809. ALenum ambi_layout{AL_FUMA_SOFT};
  810. ALenum ambi_scale{AL_FUMA_SOFT};
  811. #endif
  812. /* Find a suitable format for OpenAL. */
  813. mDstChanLayout = 0;
  814. mFormat = AL_NONE;
  815. if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
  816. alIsExtensionPresent("AL_EXT_FLOAT32"))
  817. {
  818. mDstSampleFmt = AV_SAMPLE_FMT_FLT;
  819. mFrameSize = 4;
  820. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  821. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  822. (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
  823. {
  824. mDstChanLayout = mCodecCtx->channel_layout;
  825. mFrameSize *= 8;
  826. mFormat = fmt;
  827. }
  828. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  829. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  830. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  831. (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
  832. {
  833. mDstChanLayout = mCodecCtx->channel_layout;
  834. mFrameSize *= 6;
  835. mFormat = fmt;
  836. }
  837. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  838. {
  839. mDstChanLayout = mCodecCtx->channel_layout;
  840. mFrameSize *= 1;
  841. mFormat = AL_FORMAT_MONO_FLOAT32;
  842. }
  843. /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
  844. * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
  845. * have no way to specify if the source is actually B-Format (let alone
  846. * if it's 2D or 3D).
  847. */
  848. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  849. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  850. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE && fmt != -1)
  851. {
  852. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  853. if((order+1)*(order+1) == mCodecCtx->channels ||
  854. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  855. {
  856. /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
  857. * is 4 channels for 3D buffers.
  858. */
  859. mFrameSize *= 4;
  860. mFormat = fmt;
  861. }
  862. }
  863. if(!mFormat)
  864. {
  865. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  866. mFrameSize *= 2;
  867. mFormat = AL_FORMAT_STEREO_FLOAT32;
  868. }
  869. }
  870. if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
  871. {
  872. mDstSampleFmt = AV_SAMPLE_FMT_U8;
  873. mFrameSize = 1;
  874. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  875. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  876. (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
  877. {
  878. mDstChanLayout = mCodecCtx->channel_layout;
  879. mFrameSize *= 8;
  880. mFormat = fmt;
  881. }
  882. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  883. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  884. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  885. (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
  886. {
  887. mDstChanLayout = mCodecCtx->channel_layout;
  888. mFrameSize *= 6;
  889. mFormat = fmt;
  890. }
  891. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  892. {
  893. mDstChanLayout = mCodecCtx->channel_layout;
  894. mFrameSize *= 1;
  895. mFormat = AL_FORMAT_MONO8;
  896. }
  897. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  898. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  899. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE && fmt != -1)
  900. {
  901. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  902. if((order+1)*(order+1) == mCodecCtx->channels ||
  903. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  904. {
  905. mFrameSize *= 4;
  906. mFormat = fmt;
  907. }
  908. }
  909. if(!mFormat)
  910. {
  911. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  912. mFrameSize *= 2;
  913. mFormat = AL_FORMAT_STEREO8;
  914. }
  915. }
  916. if(!mFormat)
  917. {
  918. mDstSampleFmt = AV_SAMPLE_FMT_S16;
  919. mFrameSize = 2;
  920. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  921. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  922. (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
  923. {
  924. mDstChanLayout = mCodecCtx->channel_layout;
  925. mFrameSize *= 8;
  926. mFormat = fmt;
  927. }
  928. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  929. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  930. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  931. (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
  932. {
  933. mDstChanLayout = mCodecCtx->channel_layout;
  934. mFrameSize *= 6;
  935. mFormat = fmt;
  936. }
  937. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  938. {
  939. mDstChanLayout = mCodecCtx->channel_layout;
  940. mFrameSize *= 1;
  941. mFormat = AL_FORMAT_MONO16;
  942. }
  943. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  944. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  945. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE && fmt != -1)
  946. {
  947. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  948. if((order+1)*(order+1) == mCodecCtx->channels ||
  949. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  950. {
  951. mFrameSize *= 4;
  952. mFormat = fmt;
  953. }
  954. }
  955. if(!mFormat)
  956. {
  957. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  958. mFrameSize *= 2;
  959. mFormat = AL_FORMAT_STEREO16;
  960. }
  961. }
  962. void *samples{nullptr};
  963. ALsizei buffer_len{0};
  964. mSamples = nullptr;
  965. mSamplesMax = 0;
  966. mSamplesPos = 0;
  967. mSamplesLen = 0;
  968. mDecodedFrame.reset(av_frame_alloc());
  969. if(!mDecodedFrame)
  970. {
  971. std::cerr<< "Failed to allocate audio frame" <<std::endl;
  972. goto finish;
  973. }
  974. if(!mDstChanLayout)
  975. {
  976. /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
  977. * we have to drop any extra channels.
  978. */
  979. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  980. (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate,
  981. (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  982. 0, nullptr));
  983. /* Note that ffmpeg/libavcodec has no method to check the ambisonic
  984. * channel order and normalization, so we can only assume AmbiX as the
  985. * defacto-standard. This is not true for .amb files, which use FuMa.
  986. */
  987. std::vector<double> mtx(64*64, 0.0);
  988. #ifdef AL_SOFT_bformat_ex
  989. ambi_layout = AL_ACN_SOFT;
  990. ambi_scale = AL_SN3D_SOFT;
  991. if(has_bfmt_ex)
  992. {
  993. /* An identity matrix that doesn't remix any channels. */
  994. std::cout<< "Found AL_SOFT_bformat_ex" <<std::endl;
  995. mtx[0 + 0*64] = 1.0;
  996. mtx[1 + 1*64] = 1.0;
  997. mtx[2 + 2*64] = 1.0;
  998. mtx[3 + 3*64] = 1.0;
  999. }
  1000. else
  1001. #endif
  1002. {
  1003. std::cout<< "Found AL_EXT_BFORMAT" <<std::endl;
  1004. /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
  1005. * ordering and normalization, so a custom matrix is needed to
  1006. * scale and reorder the source from AmbiX.
  1007. */
  1008. mtx[0 + 0*64] = std::sqrt(0.5);
  1009. mtx[3 + 1*64] = 1.0;
  1010. mtx[1 + 2*64] = 1.0;
  1011. mtx[2 + 3*64] = 1.0;
  1012. }
  1013. swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
  1014. }
  1015. else
  1016. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  1017. static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate,
  1018. mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout)
  1019. : av_get_default_channel_layout(mCodecCtx->channels),
  1020. mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  1021. 0, nullptr));
  1022. if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
  1023. {
  1024. std::cerr<< "Failed to initialize audio converter" <<std::endl;
  1025. goto finish;
  1026. }
  1027. alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
  1028. alGenSources(1, &mSource);
  1029. if(DirectOutMode)
  1030. alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, DirectOutMode);
  1031. if(EnableWideStereo)
  1032. {
  1033. const float angles[2]{static_cast<float>(M_PI / 3.0), static_cast<float>(-M_PI / 3.0)};
  1034. alSourcefv(mSource, AL_STEREO_ANGLES, angles);
  1035. }
  1036. #ifdef AL_SOFT_bformat_ex
  1037. if(has_bfmt_ex)
  1038. {
  1039. for(ALuint bufid : mBuffers)
  1040. {
  1041. alBufferi(bufid, AL_AMBISONIC_LAYOUT_SOFT, ambi_layout);
  1042. alBufferi(bufid, AL_AMBISONIC_SCALING_SOFT, ambi_scale);
  1043. }
  1044. }
  1045. #endif
  1046. if(alGetError() != AL_NO_ERROR)
  1047. goto finish;
  1048. #ifdef AL_SOFT_callback_buffer
  1049. if(alBufferCallbackSOFT)
  1050. {
  1051. alBufferCallbackSOFT(mBuffers[0], mFormat, mCodecCtx->sample_rate, bufferCallbackC, this,
  1052. 0);
  1053. alSourcei(mSource, AL_BUFFER, static_cast<ALint>(mBuffers[0]));
  1054. if(alGetError() != AL_NO_ERROR)
  1055. {
  1056. fprintf(stderr, "Failed to set buffer callback\n");
  1057. alSourcei(mSource, AL_BUFFER, 0);
  1058. buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
  1059. AudioBufferTime).count() * mFrameSize);
  1060. }
  1061. else
  1062. {
  1063. mBufferDataSize = static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
  1064. AudioBufferTotalTime).count()) * mFrameSize;
  1065. mBufferData.reset(new uint8_t[mBufferDataSize]);
  1066. mReadPos.store(0, std::memory_order_relaxed);
  1067. mWritePos.store(0, std::memory_order_relaxed);
  1068. ALCint refresh{};
  1069. alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH, 1, &refresh);
  1070. sleep_time = milliseconds{seconds{1}} / refresh;
  1071. }
  1072. }
  1073. else
  1074. #endif
  1075. buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
  1076. AudioBufferTime).count() * mFrameSize);
  1077. if(buffer_len > 0)
  1078. samples = av_malloc(static_cast<ALuint>(buffer_len));
  1079. /* Prefill the codec buffer. */
  1080. do {
  1081. const int ret{mPackets.sendTo(mCodecCtx.get())};
  1082. if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
  1083. break;
  1084. } while(1);
  1085. srclock.lock();
  1086. if(alcGetInteger64vSOFT)
  1087. {
  1088. int64_t devtime{};
  1089. alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT,
  1090. 1, &devtime);
  1091. mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
  1092. }
  1093. mSamplesLen = decodeFrame();
  1094. if(mSamplesLen > 0)
  1095. {
  1096. mSamplesPos = std::min(mSamplesLen, getSync());
  1097. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  1098. mDeviceStartTime -= skip;
  1099. mCurrentPts += skip;
  1100. }
  1101. while(!mMovie.mQuit.load(std::memory_order_relaxed)
  1102. && mConnected.test_and_set(std::memory_order_relaxed))
  1103. {
  1104. ALenum state;
  1105. if(mBufferDataSize > 0)
  1106. {
  1107. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  1108. readAudio(getSync());
  1109. }
  1110. else
  1111. {
  1112. ALint processed, queued;
  1113. /* First remove any processed buffers. */
  1114. alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
  1115. while(processed > 0)
  1116. {
  1117. ALuint bid;
  1118. alSourceUnqueueBuffers(mSource, 1, &bid);
  1119. --processed;
  1120. }
  1121. /* Refill the buffer queue. */
  1122. int sync_skip{getSync()};
  1123. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  1124. while(static_cast<ALuint>(queued) < mBuffers.size())
  1125. {
  1126. /* Read the next chunk of data, filling the buffer, and queue
  1127. * it on the source.
  1128. */
  1129. const bool got_audio{readAudio(static_cast<uint8_t*>(samples),
  1130. static_cast<ALuint>(buffer_len), sync_skip)};
  1131. if(!got_audio) break;
  1132. const ALuint bufid{mBuffers[mBufferIdx]};
  1133. mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
  1134. alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
  1135. alSourceQueueBuffers(mSource, 1, &bufid);
  1136. ++queued;
  1137. }
  1138. /* Check that the source is playing. */
  1139. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  1140. if(state == AL_STOPPED)
  1141. {
  1142. /* AL_STOPPED means there was an underrun. Clear the buffer
  1143. * queue since this likely means we're late, and rewind the
  1144. * source to get it back into an AL_INITIAL state.
  1145. */
  1146. alSourceRewind(mSource);
  1147. alSourcei(mSource, AL_BUFFER, 0);
  1148. if(alcGetInteger64vSOFT)
  1149. {
  1150. /* Also update the device start time with the current
  1151. * device clock, so the decoder knows we're running behind.
  1152. */
  1153. int64_t devtime{};
  1154. alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
  1155. ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
  1156. mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
  1157. }
  1158. continue;
  1159. }
  1160. }
  1161. /* (re)start the source if needed, and wait for a buffer to finish */
  1162. if(state != AL_PLAYING && state != AL_PAUSED)
  1163. {
  1164. if(!startPlayback())
  1165. break;
  1166. }
  1167. if(alGetError() != AL_NO_ERROR)
  1168. return false;
  1169. mSrcCond.wait_for(srclock, sleep_time);
  1170. }
  1171. alSourceRewind(mSource);
  1172. alSourcei(mSource, AL_BUFFER, 0);
  1173. srclock.unlock();
  1174. finish:
  1175. av_freep(&samples);
  1176. #ifdef AL_SOFT_events
  1177. if(alEventControlSOFT)
  1178. {
  1179. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
  1180. alEventCallbackSOFT(nullptr, nullptr);
  1181. }
  1182. #endif
  1183. return 0;
  1184. }
  1185. nanoseconds VideoState::getClock()
  1186. {
  1187. /* NOTE: This returns incorrect times while not playing. */
  1188. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1189. if(mDisplayPtsTime == microseconds::min())
  1190. return nanoseconds::zero();
  1191. auto delta = get_avtime() - mDisplayPtsTime;
  1192. return mDisplayPts + delta;
  1193. }
  1194. /* Called by VideoState::updateVideo to display the next video frame. */
  1195. void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
  1196. {
  1197. if(!mImage)
  1198. return;
  1199. double aspect_ratio;
  1200. int win_w, win_h;
  1201. int w, h, x, y;
  1202. if(mCodecCtx->sample_aspect_ratio.num == 0)
  1203. aspect_ratio = 0.0;
  1204. else
  1205. {
  1206. aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
  1207. mCodecCtx->height;
  1208. }
  1209. if(aspect_ratio <= 0.0)
  1210. aspect_ratio = static_cast<double>(mCodecCtx->width) / mCodecCtx->height;
  1211. SDL_GetWindowSize(screen, &win_w, &win_h);
  1212. h = win_h;
  1213. w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3;
  1214. if(w > win_w)
  1215. {
  1216. w = win_w;
  1217. h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3;
  1218. }
  1219. x = (win_w - w) / 2;
  1220. y = (win_h - h) / 2;
  1221. SDL_Rect src_rect{ 0, 0, mWidth, mHeight };
  1222. SDL_Rect dst_rect{ x, y, w, h };
  1223. SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect);
  1224. SDL_RenderPresent(renderer);
  1225. }
  1226. /* Called regularly on the main thread where the SDL_Renderer was created. It
  1227. * handles updating the textures of decoded frames and displaying the latest
  1228. * frame.
  1229. */
  1230. void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw)
  1231. {
  1232. size_t read_idx{mPictQRead.load(std::memory_order_relaxed)};
  1233. Picture *vp{&mPictQ[read_idx]};
  1234. auto clocktime = mMovie.getMasterClock();
  1235. bool updated{false};
  1236. while(1)
  1237. {
  1238. size_t next_idx{(read_idx+1)%mPictQ.size()};
  1239. if(next_idx == mPictQWrite.load(std::memory_order_acquire))
  1240. break;
  1241. Picture *nextvp{&mPictQ[next_idx]};
  1242. if(clocktime < nextvp->mPts)
  1243. break;
  1244. vp = nextvp;
  1245. updated = true;
  1246. read_idx = next_idx;
  1247. }
  1248. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1249. {
  1250. if(mEOS)
  1251. mFinalUpdate = true;
  1252. mPictQRead.store(read_idx, std::memory_order_release);
  1253. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1254. mPictQCond.notify_one();
  1255. return;
  1256. }
  1257. if(updated)
  1258. {
  1259. mPictQRead.store(read_idx, std::memory_order_release);
  1260. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1261. mPictQCond.notify_one();
  1262. /* allocate or resize the buffer! */
  1263. bool fmt_updated{false};
  1264. if(!mImage || mWidth != mCodecCtx->width || mHeight != mCodecCtx->height)
  1265. {
  1266. fmt_updated = true;
  1267. if(mImage)
  1268. SDL_DestroyTexture(mImage);
  1269. mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
  1270. mCodecCtx->coded_width, mCodecCtx->coded_height);
  1271. if(!mImage)
  1272. std::cerr<< "Failed to create YV12 texture!" <<std::endl;
  1273. mWidth = mCodecCtx->width;
  1274. mHeight = mCodecCtx->height;
  1275. if(mFirstUpdate && mWidth > 0 && mHeight > 0)
  1276. {
  1277. /* For the first update, set the window size to the video size. */
  1278. mFirstUpdate = false;
  1279. int w{mWidth};
  1280. int h{mHeight};
  1281. if(mCodecCtx->sample_aspect_ratio.den != 0)
  1282. {
  1283. double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
  1284. if(aspect_ratio >= 1.0)
  1285. w = static_cast<int>(w*aspect_ratio + 0.5);
  1286. else if(aspect_ratio > 0.0)
  1287. h = static_cast<int>(h/aspect_ratio + 0.5);
  1288. }
  1289. SDL_SetWindowSize(screen, w, h);
  1290. }
  1291. }
  1292. if(mImage)
  1293. {
  1294. AVFrame *frame{vp->mFrame.get()};
  1295. void *pixels{nullptr};
  1296. int pitch{0};
  1297. if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
  1298. SDL_UpdateYUVTexture(mImage, nullptr,
  1299. frame->data[0], frame->linesize[0],
  1300. frame->data[1], frame->linesize[1],
  1301. frame->data[2], frame->linesize[2]
  1302. );
  1303. else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0)
  1304. std::cerr<< "Failed to lock texture" <<std::endl;
  1305. else
  1306. {
  1307. // Convert the image into YUV format that SDL uses
  1308. int coded_w{mCodecCtx->coded_width};
  1309. int coded_h{mCodecCtx->coded_height};
  1310. int w{mCodecCtx->width};
  1311. int h{mCodecCtx->height};
  1312. if(!mSwscaleCtx || fmt_updated)
  1313. {
  1314. mSwscaleCtx.reset(sws_getContext(
  1315. w, h, mCodecCtx->pix_fmt,
  1316. w, h, AV_PIX_FMT_YUV420P, 0,
  1317. nullptr, nullptr, nullptr
  1318. ));
  1319. }
  1320. /* point pict at the queue */
  1321. uint8_t *pict_data[3];
  1322. pict_data[0] = static_cast<uint8_t*>(pixels);
  1323. pict_data[1] = pict_data[0] + coded_w*coded_h;
  1324. pict_data[2] = pict_data[1] + coded_w*coded_h/4;
  1325. int pict_linesize[3];
  1326. pict_linesize[0] = pitch;
  1327. pict_linesize[1] = pitch / 2;
  1328. pict_linesize[2] = pitch / 2;
  1329. sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize,
  1330. 0, h, pict_data, pict_linesize);
  1331. SDL_UnlockTexture(mImage);
  1332. }
  1333. }
  1334. redraw = true;
  1335. }
  1336. if(redraw)
  1337. {
  1338. /* Show the picture! */
  1339. display(screen, renderer);
  1340. }
  1341. if(updated)
  1342. {
  1343. auto disp_time = get_avtime();
  1344. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1345. mDisplayPts = vp->mPts;
  1346. mDisplayPtsTime = disp_time;
  1347. }
  1348. if(mEOS.load(std::memory_order_acquire))
  1349. {
  1350. if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire))
  1351. {
  1352. mFinalUpdate = true;
  1353. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1354. mPictQCond.notify_one();
  1355. }
  1356. }
  1357. }
  1358. int VideoState::handler()
  1359. {
  1360. std::for_each(mPictQ.begin(), mPictQ.end(),
  1361. [](Picture &pict) -> void
  1362. { pict.mFrame = AVFramePtr{av_frame_alloc()}; });
  1363. /* Prefill the codec buffer. */
  1364. do {
  1365. const int ret{mPackets.sendTo(mCodecCtx.get())};
  1366. if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
  1367. break;
  1368. } while(1);
  1369. {
  1370. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1371. mDisplayPtsTime = get_avtime();
  1372. }
  1373. auto current_pts = nanoseconds::zero();
  1374. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  1375. {
  1376. size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)};
  1377. Picture *vp{&mPictQ[write_idx]};
  1378. /* Retrieve video frame. */
  1379. AVFrame *decoded_frame{vp->mFrame.get()};
  1380. int ret;
  1381. while((ret=avcodec_receive_frame(mCodecCtx.get(), decoded_frame)) == AVERROR(EAGAIN))
  1382. mPackets.sendTo(mCodecCtx.get());
  1383. if(ret != 0)
  1384. {
  1385. if(ret == AVErrorEOF) break;
  1386. std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
  1387. continue;
  1388. }
  1389. /* Get the PTS for this frame. */
  1390. if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
  1391. current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
  1392. static_cast<double>(decoded_frame->best_effort_timestamp)});
  1393. vp->mPts = current_pts;
  1394. /* Update the video clock to the next expected PTS. */
  1395. auto frame_delay = av_q2d(mCodecCtx->time_base);
  1396. frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
  1397. current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
  1398. /* Put the frame in the queue to be loaded into a texture and displayed
  1399. * by the rendering thread.
  1400. */
  1401. write_idx = (write_idx+1)%mPictQ.size();
  1402. mPictQWrite.store(write_idx, std::memory_order_release);
  1403. /* Send a packet now so it's hopefully ready by the time it's needed. */
  1404. mPackets.sendTo(mCodecCtx.get());
  1405. if(write_idx == mPictQRead.load(std::memory_order_acquire))
  1406. {
  1407. /* Wait until we have space for a new pic */
  1408. std::unique_lock<std::mutex> lock{mPictQMutex};
  1409. while(write_idx == mPictQRead.load(std::memory_order_acquire) &&
  1410. !mMovie.mQuit.load(std::memory_order_relaxed))
  1411. mPictQCond.wait(lock);
  1412. }
  1413. }
  1414. mEOS = true;
  1415. std::unique_lock<std::mutex> lock{mPictQMutex};
  1416. while(!mFinalUpdate) mPictQCond.wait(lock);
  1417. return 0;
  1418. }
  1419. int MovieState::decode_interrupt_cb(void *ctx)
  1420. {
  1421. return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
  1422. }
  1423. bool MovieState::prepare()
  1424. {
  1425. AVIOContext *avioctx{nullptr};
  1426. AVIOInterruptCB intcb{decode_interrupt_cb, this};
  1427. if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
  1428. {
  1429. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1430. return false;
  1431. }
  1432. mIOContext.reset(avioctx);
  1433. /* Open movie file. If avformat_open_input fails it will automatically free
  1434. * this context, so don't set it onto a smart pointer yet.
  1435. */
  1436. AVFormatContext *fmtctx{avformat_alloc_context()};
  1437. fmtctx->pb = mIOContext.get();
  1438. fmtctx->interrupt_callback = intcb;
  1439. if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
  1440. {
  1441. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1442. return false;
  1443. }
  1444. mFormatCtx.reset(fmtctx);
  1445. /* Retrieve stream information */
  1446. if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
  1447. {
  1448. std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
  1449. return false;
  1450. }
  1451. /* Dump information about file onto standard error */
  1452. av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
  1453. mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this};
  1454. return true;
  1455. }
  1456. void MovieState::setTitle(SDL_Window *window)
  1457. {
  1458. auto pos1 = mFilename.rfind('/');
  1459. auto pos2 = mFilename.rfind('\\');
  1460. auto fpos = ((pos1 == std::string::npos) ? pos2 :
  1461. (pos2 == std::string::npos) ? pos1 :
  1462. std::max(pos1, pos2)) + 1;
  1463. SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
  1464. }
  1465. nanoseconds MovieState::getClock()
  1466. {
  1467. if(mClockBase == microseconds::min())
  1468. return nanoseconds::zero();
  1469. return get_avtime() - mClockBase;
  1470. }
  1471. nanoseconds MovieState::getMasterClock()
  1472. {
  1473. if(mAVSyncType == SyncMaster::Video)
  1474. return mVideo.getClock();
  1475. if(mAVSyncType == SyncMaster::Audio)
  1476. return mAudio.getClock();
  1477. return getClock();
  1478. }
  1479. nanoseconds MovieState::getDuration()
  1480. { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
  1481. int MovieState::streamComponentOpen(unsigned int stream_index)
  1482. {
  1483. if(stream_index >= mFormatCtx->nb_streams)
  1484. return -1;
  1485. /* Get a pointer to the codec context for the stream, and open the
  1486. * associated codec.
  1487. */
  1488. AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)};
  1489. if(!avctx) return -1;
  1490. if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
  1491. return -1;
  1492. AVCodec *codec{avcodec_find_decoder(avctx->codec_id)};
  1493. if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
  1494. {
  1495. std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
  1496. << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
  1497. return -1;
  1498. }
  1499. /* Initialize and start the media type handler */
  1500. switch(avctx->codec_type)
  1501. {
  1502. case AVMEDIA_TYPE_AUDIO:
  1503. mAudio.mStream = mFormatCtx->streams[stream_index];
  1504. mAudio.mCodecCtx = std::move(avctx);
  1505. break;
  1506. case AVMEDIA_TYPE_VIDEO:
  1507. mVideo.mStream = mFormatCtx->streams[stream_index];
  1508. mVideo.mCodecCtx = std::move(avctx);
  1509. break;
  1510. default:
  1511. return -1;
  1512. }
  1513. return static_cast<int>(stream_index);
  1514. }
  1515. int MovieState::parse_handler()
  1516. {
  1517. auto &audio_queue = mAudio.mPackets;
  1518. auto &video_queue = mVideo.mPackets;
  1519. int video_index{-1};
  1520. int audio_index{-1};
  1521. /* Find the first video and audio streams */
  1522. for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++)
  1523. {
  1524. auto codecpar = mFormatCtx->streams[i]->codecpar;
  1525. if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0)
  1526. video_index = streamComponentOpen(i);
  1527. else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
  1528. audio_index = streamComponentOpen(i);
  1529. }
  1530. if(video_index < 0 && audio_index < 0)
  1531. {
  1532. std::cerr<< mFilename<<": could not open codecs" <<std::endl;
  1533. mQuit = true;
  1534. }
  1535. /* Set the base time 750ms ahead of the current av time. */
  1536. mClockBase = get_avtime() + milliseconds{750};
  1537. if(audio_index >= 0)
  1538. mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio};
  1539. if(video_index >= 0)
  1540. mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo};
  1541. /* Main packet reading/dispatching loop */
  1542. while(!mQuit.load(std::memory_order_relaxed))
  1543. {
  1544. AVPacket packet;
  1545. if(av_read_frame(mFormatCtx.get(), &packet) < 0)
  1546. break;
  1547. /* Copy the packet into the queue it's meant for. */
  1548. if(packet.stream_index == video_index)
  1549. {
  1550. while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(&packet))
  1551. std::this_thread::sleep_for(milliseconds{100});
  1552. }
  1553. else if(packet.stream_index == audio_index)
  1554. {
  1555. while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(&packet))
  1556. std::this_thread::sleep_for(milliseconds{100});
  1557. }
  1558. av_packet_unref(&packet);
  1559. }
  1560. /* Finish the queues so the receivers know nothing more is coming. */
  1561. if(mVideo.mCodecCtx) video_queue.setFinished();
  1562. if(mAudio.mCodecCtx) audio_queue.setFinished();
  1563. /* all done - wait for it */
  1564. if(mVideoThread.joinable())
  1565. mVideoThread.join();
  1566. if(mAudioThread.joinable())
  1567. mAudioThread.join();
  1568. mVideo.mEOS = true;
  1569. std::unique_lock<std::mutex> lock{mVideo.mPictQMutex};
  1570. while(!mVideo.mFinalUpdate)
  1571. mVideo.mPictQCond.wait(lock);
  1572. lock.unlock();
  1573. SDL_Event evt{};
  1574. evt.user.type = FF_MOVIE_DONE_EVENT;
  1575. SDL_PushEvent(&evt);
  1576. return 0;
  1577. }
  1578. // Helper class+method to print the time with human-readable formatting.
  1579. struct PrettyTime {
  1580. seconds mTime;
  1581. };
  1582. std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
  1583. {
  1584. using hours = std::chrono::hours;
  1585. using minutes = std::chrono::minutes;
  1586. seconds t{rhs.mTime};
  1587. if(t.count() < 0)
  1588. {
  1589. os << '-';
  1590. t *= -1;
  1591. }
  1592. // Only handle up to hour formatting
  1593. if(t >= hours{1})
  1594. os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
  1595. << (duration_cast<minutes>(t).count() % 60) << 'm';
  1596. else
  1597. os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
  1598. os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
  1599. << std::setfill(' ');
  1600. return os;
  1601. }
  1602. } // namespace
  1603. int main(int argc, char *argv[])
  1604. {
  1605. std::unique_ptr<MovieState> movState;
  1606. if(argc < 2)
  1607. {
  1608. std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
  1609. return 1;
  1610. }
  1611. /* Register all formats and codecs */
  1612. #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
  1613. av_register_all();
  1614. #endif
  1615. /* Initialize networking protocols */
  1616. avformat_network_init();
  1617. if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS))
  1618. {
  1619. std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
  1620. return 1;
  1621. }
  1622. /* Make a window to put our video */
  1623. SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)};
  1624. if(!screen)
  1625. {
  1626. std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
  1627. return 1;
  1628. }
  1629. /* Make a renderer to handle the texture image surface and rendering. */
  1630. Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC};
  1631. SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)};
  1632. if(renderer)
  1633. {
  1634. SDL_RendererInfo rinf{};
  1635. bool ok{false};
  1636. /* Make sure the renderer supports IYUV textures. If not, fallback to a
  1637. * software renderer. */
  1638. if(SDL_GetRendererInfo(renderer, &rinf) == 0)
  1639. {
  1640. for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++)
  1641. ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
  1642. }
  1643. if(!ok)
  1644. {
  1645. std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
  1646. SDL_DestroyRenderer(renderer);
  1647. renderer = nullptr;
  1648. }
  1649. }
  1650. if(!renderer)
  1651. {
  1652. render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
  1653. renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1654. }
  1655. if(!renderer)
  1656. {
  1657. std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
  1658. return 1;
  1659. }
  1660. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1661. SDL_RenderFillRect(renderer, nullptr);
  1662. SDL_RenderPresent(renderer);
  1663. /* Open an audio device */
  1664. ++argv; --argc;
  1665. if(InitAL(&argv, &argc))
  1666. {
  1667. std::cerr<< "Failed to set up audio device" <<std::endl;
  1668. return 1;
  1669. }
  1670. {
  1671. auto device = alcGetContextsDevice(alcGetCurrentContext());
  1672. if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
  1673. {
  1674. std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
  1675. alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
  1676. alcGetProcAddress(device, "alcGetInteger64vSOFT")
  1677. );
  1678. }
  1679. }
  1680. if(alIsExtensionPresent("AL_SOFT_source_latency"))
  1681. {
  1682. std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
  1683. alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
  1684. alGetProcAddress("alGetSourcei64vSOFT")
  1685. );
  1686. }
  1687. #ifdef AL_SOFT_events
  1688. if(alIsExtensionPresent("AL_SOFT_events"))
  1689. {
  1690. std::cout<< "Found AL_SOFT_events" <<std::endl;
  1691. alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
  1692. alGetProcAddress("alEventControlSOFT"));
  1693. alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
  1694. alGetProcAddress("alEventCallbackSOFT"));
  1695. }
  1696. #endif
  1697. #ifdef AL_SOFT_callback_buffer
  1698. if(alIsExtensionPresent("AL_SOFTX_callback_buffer"))
  1699. {
  1700. std::cout<< "Found AL_SOFT_callback_buffer" <<std::endl;
  1701. alBufferCallbackSOFT = reinterpret_cast<LPALBUFFERCALLBACKSOFT>(
  1702. alGetProcAddress("alBufferCallbackSOFT"));
  1703. }
  1704. #endif
  1705. int fileidx{0};
  1706. for(;fileidx < argc;++fileidx)
  1707. {
  1708. if(strcmp(argv[fileidx], "-direct") == 0)
  1709. {
  1710. if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
  1711. {
  1712. std::cout<< "Found AL_SOFT_direct_channels_remix" <<std::endl;
  1713. DirectOutMode = AL_REMIX_UNMATCHED_SOFT;
  1714. }
  1715. else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
  1716. {
  1717. std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
  1718. DirectOutMode = AL_DROP_UNMATCHED_SOFT;
  1719. }
  1720. else
  1721. std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
  1722. }
  1723. else if(strcmp(argv[fileidx], "-wide") == 0)
  1724. {
  1725. if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
  1726. std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
  1727. else
  1728. {
  1729. std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
  1730. EnableWideStereo = true;
  1731. }
  1732. }
  1733. else if(strcmp(argv[fileidx], "-novideo") == 0)
  1734. DisableVideo = true;
  1735. else
  1736. break;
  1737. }
  1738. while(fileidx < argc && !movState)
  1739. {
  1740. movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
  1741. if(!movState->prepare()) movState = nullptr;
  1742. }
  1743. if(!movState)
  1744. {
  1745. std::cerr<< "Could not start a video" <<std::endl;
  1746. return 1;
  1747. }
  1748. movState->setTitle(screen);
  1749. /* Default to going to the next movie at the end of one. */
  1750. enum class EomAction {
  1751. Next, Quit
  1752. } eom_action{EomAction::Next};
  1753. seconds last_time{seconds::min()};
  1754. while(1)
  1755. {
  1756. SDL_Event event{};
  1757. int have_evt{SDL_WaitEventTimeout(&event, 10)};
  1758. auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
  1759. if(cur_time != last_time)
  1760. {
  1761. auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
  1762. std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
  1763. last_time = cur_time;
  1764. }
  1765. bool force_redraw{false};
  1766. if(have_evt) do {
  1767. switch(event.type)
  1768. {
  1769. case SDL_KEYDOWN:
  1770. switch(event.key.keysym.sym)
  1771. {
  1772. case SDLK_ESCAPE:
  1773. movState->mQuit = true;
  1774. eom_action = EomAction::Quit;
  1775. break;
  1776. case SDLK_n:
  1777. movState->mQuit = true;
  1778. eom_action = EomAction::Next;
  1779. break;
  1780. default:
  1781. break;
  1782. }
  1783. break;
  1784. case SDL_WINDOWEVENT:
  1785. switch(event.window.event)
  1786. {
  1787. case SDL_WINDOWEVENT_RESIZED:
  1788. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1789. SDL_RenderFillRect(renderer, nullptr);
  1790. force_redraw = true;
  1791. break;
  1792. case SDL_WINDOWEVENT_EXPOSED:
  1793. force_redraw = true;
  1794. break;
  1795. default:
  1796. break;
  1797. }
  1798. break;
  1799. case SDL_QUIT:
  1800. movState->mQuit = true;
  1801. eom_action = EomAction::Quit;
  1802. break;
  1803. case FF_MOVIE_DONE_EVENT:
  1804. std::cout<<'\n';
  1805. last_time = seconds::min();
  1806. if(eom_action != EomAction::Quit)
  1807. {
  1808. movState = nullptr;
  1809. while(fileidx < argc && !movState)
  1810. {
  1811. movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
  1812. if(!movState->prepare()) movState = nullptr;
  1813. }
  1814. if(movState)
  1815. {
  1816. movState->setTitle(screen);
  1817. break;
  1818. }
  1819. }
  1820. /* Nothing more to play. Shut everything down and quit. */
  1821. movState = nullptr;
  1822. CloseAL();
  1823. SDL_DestroyRenderer(renderer);
  1824. renderer = nullptr;
  1825. SDL_DestroyWindow(screen);
  1826. screen = nullptr;
  1827. SDL_Quit();
  1828. exit(0);
  1829. default:
  1830. break;
  1831. }
  1832. } while(SDL_PollEvent(&event));
  1833. movState->mVideo.updateVideo(screen, renderer, force_redraw);
  1834. }
  1835. std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
  1836. return 1;
  1837. }