alffplay.cpp 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915
  1. /*
  2. * An example showing how to play a stream sync'd to video, using ffmpeg.
  3. *
  4. * Requires C++11.
  5. */
  6. #include <condition_variable>
  7. #include <functional>
  8. #include <algorithm>
  9. #include <iostream>
  10. #include <iomanip>
  11. #include <cstring>
  12. #include <limits>
  13. #include <thread>
  14. #include <chrono>
  15. #include <atomic>
  16. #include <vector>
  17. #include <mutex>
  18. #include <deque>
  19. #include <array>
  20. #include <cmath>
  21. #include <string>
  22. extern "C" {
  23. #include "libavcodec/avcodec.h"
  24. #include "libavformat/avformat.h"
  25. #include "libavformat/avio.h"
  26. #include "libavutil/time.h"
  27. #include "libavutil/pixfmt.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavutil/channel_layout.h"
  30. #include "libswscale/swscale.h"
  31. #include "libswresample/swresample.h"
  32. }
  33. #include "SDL.h"
  34. #include "AL/alc.h"
  35. #include "AL/al.h"
  36. #include "AL/alext.h"
  37. #include "common/alhelpers.h"
  38. extern "C" {
  39. /* Undefine this to disable use of experimental extensions. Don't use for
  40. * production code! Interfaces and behavior may change prior to being
  41. * finalized.
  42. */
  43. #define ALLOW_EXPERIMENTAL_EXTS
  44. #ifdef ALLOW_EXPERIMENTAL_EXTS
  45. #ifndef AL_SOFT_map_buffer
  46. #define AL_SOFT_map_buffer 1
  47. typedef unsigned int ALbitfieldSOFT;
  48. #define AL_MAP_READ_BIT_SOFT 0x00000001
  49. #define AL_MAP_WRITE_BIT_SOFT 0x00000002
  50. #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
  51. #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
  52. typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
  53. typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
  54. typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
  55. typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
  56. #endif
  57. #ifndef AL_SOFT_events
  58. #define AL_SOFT_events 1
  59. #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
  60. #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
  61. #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
  62. #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
  63. #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
  64. #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
  65. #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
  66. #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
  67. typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
  68. ALsizei length, const ALchar *message,
  69. void *userParam);
  70. typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
  71. typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
  72. typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
  73. typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
  74. #endif
  75. #endif /* ALLOW_EXPERIMENTAL_EXTS */
  76. }
  77. namespace {
  78. #ifndef M_PI
  79. #define M_PI (3.14159265358979323846)
  80. #endif
  81. using nanoseconds = std::chrono::nanoseconds;
  82. using microseconds = std::chrono::microseconds;
  83. using milliseconds = std::chrono::milliseconds;
  84. using seconds = std::chrono::seconds;
  85. using seconds_d64 = std::chrono::duration<double>;
  86. const std::string AppName("alffplay");
  87. bool EnableDirectOut = false;
  88. bool EnableWideStereo = false;
  89. LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
  90. LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
  91. #ifdef AL_SOFT_map_buffer
  92. LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
  93. LPALMAPBUFFERSOFT alMapBufferSOFT;
  94. LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
  95. #endif
  96. #ifdef AL_SOFT_events
  97. LPALEVENTCONTROLSOFT alEventControlSOFT;
  98. LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
  99. #endif
  100. const seconds AVNoSyncThreshold(10);
  101. const milliseconds VideoSyncThreshold(10);
  102. #define VIDEO_PICTURE_QUEUE_SIZE 16
  103. const seconds_d64 AudioSyncThreshold(0.03);
  104. const milliseconds AudioSampleCorrectionMax(50);
  105. /* Averaging filter coefficient for audio sync. */
  106. #define AUDIO_DIFF_AVG_NB 20
  107. const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
  108. /* Per-buffer size, in time */
  109. const milliseconds AudioBufferTime(20);
  110. /* Buffer total size, in time (should be divisible by the buffer time) */
  111. const milliseconds AudioBufferTotalTime(800);
  112. #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
  113. enum {
  114. FF_UPDATE_EVENT = SDL_USEREVENT,
  115. FF_REFRESH_EVENT,
  116. FF_MOVIE_DONE_EVENT
  117. };
  118. enum class SyncMaster {
  119. Audio,
  120. Video,
  121. External,
  122. Default = External
  123. };
  124. inline microseconds get_avtime()
  125. { return microseconds(av_gettime()); }
  126. /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
  127. struct AVIOContextDeleter {
  128. void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
  129. };
  130. using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
  131. struct AVFormatCtxDeleter {
  132. void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
  133. };
  134. using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
  135. struct AVCodecCtxDeleter {
  136. void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
  137. };
  138. using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
  139. struct AVFrameDeleter {
  140. void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
  141. };
  142. using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
  143. struct SwrContextDeleter {
  144. void operator()(SwrContext *ptr) { swr_free(&ptr); }
  145. };
  146. using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
  147. struct SwsContextDeleter {
  148. void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
  149. };
  150. using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
  151. class PacketQueue {
  152. std::deque<AVPacket> mPackets;
  153. size_t mTotalSize{0};
  154. public:
  155. ~PacketQueue() { clear(); }
  156. bool empty() const noexcept { return mPackets.empty(); }
  157. size_t totalSize() const noexcept { return mTotalSize; }
  158. void put(const AVPacket *pkt)
  159. {
  160. mPackets.push_back(AVPacket{});
  161. if(av_packet_ref(&mPackets.back(), pkt) != 0)
  162. mPackets.pop_back();
  163. else
  164. mTotalSize += mPackets.back().size;
  165. }
  166. AVPacket *front() noexcept
  167. { return &mPackets.front(); }
  168. void pop()
  169. {
  170. AVPacket *pkt = &mPackets.front();
  171. mTotalSize -= pkt->size;
  172. av_packet_unref(pkt);
  173. mPackets.pop_front();
  174. }
  175. void clear()
  176. {
  177. for(AVPacket &pkt : mPackets)
  178. av_packet_unref(&pkt);
  179. mPackets.clear();
  180. mTotalSize = 0;
  181. }
  182. };
  183. struct MovieState;
  184. struct AudioState {
  185. MovieState &mMovie;
  186. AVStream *mStream{nullptr};
  187. AVCodecCtxPtr mCodecCtx;
  188. std::mutex mQueueMtx;
  189. std::condition_variable mQueueCond;
  190. /* Used for clock difference average computation */
  191. seconds_d64 mClockDiffAvg{0};
  192. /* Time of the next sample to be buffered */
  193. nanoseconds mCurrentPts{0};
  194. /* Device clock time that the stream started at. */
  195. nanoseconds mDeviceStartTime{nanoseconds::min()};
  196. /* Decompressed sample frame, and swresample context for conversion */
  197. AVFramePtr mDecodedFrame;
  198. SwrContextPtr mSwresCtx;
  199. /* Conversion format, for what gets fed to OpenAL */
  200. int mDstChanLayout{0};
  201. AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
  202. /* Storage of converted samples */
  203. uint8_t *mSamples{nullptr};
  204. int mSamplesLen{0}; /* In samples */
  205. int mSamplesPos{0};
  206. int mSamplesMax{0};
  207. /* OpenAL format */
  208. ALenum mFormat{AL_NONE};
  209. ALsizei mFrameSize{0};
  210. std::mutex mSrcMutex;
  211. std::condition_variable mSrcCond;
  212. std::atomic_flag mConnected;
  213. ALuint mSource{0};
  214. std::vector<ALuint> mBuffers;
  215. ALsizei mBufferIdx{0};
  216. AudioState(MovieState &movie) : mMovie(movie)
  217. { mConnected.test_and_set(std::memory_order_relaxed); }
  218. ~AudioState()
  219. {
  220. if(mSource)
  221. alDeleteSources(1, &mSource);
  222. if(!mBuffers.empty())
  223. alDeleteBuffers(mBuffers.size(), mBuffers.data());
  224. av_freep(&mSamples);
  225. }
  226. #ifdef AL_SOFT_events
  227. static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
  228. ALsizei length, const ALchar *message,
  229. void *userParam);
  230. #endif
  231. nanoseconds getClockNoLock();
  232. nanoseconds getClock()
  233. {
  234. std::lock_guard<std::mutex> lock(mSrcMutex);
  235. return getClockNoLock();
  236. }
  237. bool isBufferFilled();
  238. void startPlayback();
  239. int getSync();
  240. int decodeFrame();
  241. bool readAudio(uint8_t *samples, int length);
  242. int handler();
  243. };
  244. struct VideoState {
  245. MovieState &mMovie;
  246. AVStream *mStream{nullptr};
  247. AVCodecCtxPtr mCodecCtx;
  248. std::mutex mQueueMtx;
  249. std::condition_variable mQueueCond;
  250. nanoseconds mClock{0};
  251. nanoseconds mFrameTimer{0};
  252. nanoseconds mFrameLastPts{0};
  253. nanoseconds mFrameLastDelay{0};
  254. nanoseconds mCurrentPts{0};
  255. /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
  256. microseconds mCurrentPtsTime{0};
  257. /* Decompressed video frame, and swscale context for conversion */
  258. AVFramePtr mDecodedFrame;
  259. SwsContextPtr mSwscaleCtx;
  260. struct Picture {
  261. SDL_Texture *mImage{nullptr};
  262. int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
  263. std::atomic<bool> mUpdated{false};
  264. nanoseconds mPts{0};
  265. ~Picture()
  266. {
  267. if(mImage)
  268. SDL_DestroyTexture(mImage);
  269. mImage = nullptr;
  270. }
  271. };
  272. std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
  273. size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
  274. std::mutex mPictQMutex;
  275. std::condition_variable mPictQCond;
  276. bool mFirstUpdate{true};
  277. std::atomic<bool> mEOS{false};
  278. std::atomic<bool> mFinalUpdate{false};
  279. VideoState(MovieState &movie) : mMovie(movie) { }
  280. nanoseconds getClock();
  281. bool isBufferFilled();
  282. static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
  283. void schedRefresh(milliseconds delay);
  284. void display(SDL_Window *screen, SDL_Renderer *renderer);
  285. void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
  286. void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
  287. int queuePicture(nanoseconds pts);
  288. int handler();
  289. };
  290. struct MovieState {
  291. AVIOContextPtr mIOContext;
  292. AVFormatCtxPtr mFormatCtx;
  293. SyncMaster mAVSyncType{SyncMaster::Default};
  294. microseconds mClockBase{0};
  295. std::atomic<bool> mPlaying{false};
  296. std::mutex mSendMtx;
  297. std::condition_variable mSendCond;
  298. /* NOTE: false/clear = need data, true/set = no data needed */
  299. std::atomic_flag mSendDataGood;
  300. std::atomic<bool> mQuit{false};
  301. AudioState mAudio;
  302. VideoState mVideo;
  303. std::thread mParseThread;
  304. std::thread mAudioThread;
  305. std::thread mVideoThread;
  306. std::string mFilename;
  307. MovieState(std::string fname)
  308. : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
  309. { }
  310. ~MovieState()
  311. {
  312. mQuit = true;
  313. if(mParseThread.joinable())
  314. mParseThread.join();
  315. }
  316. static int decode_interrupt_cb(void *ctx);
  317. bool prepare();
  318. void setTitle(SDL_Window *window);
  319. nanoseconds getClock();
  320. nanoseconds getMasterClock();
  321. nanoseconds getDuration();
  322. int streamComponentOpen(int stream_index);
  323. int parse_handler();
  324. };
  325. nanoseconds AudioState::getClockNoLock()
  326. {
  327. // The audio clock is the timestamp of the sample currently being heard.
  328. if(alcGetInteger64vSOFT)
  329. {
  330. // If device start time = min, we aren't playing yet.
  331. if(mDeviceStartTime == nanoseconds::min())
  332. return nanoseconds::zero();
  333. // Get the current device clock time and latency.
  334. auto device = alcGetContextsDevice(alcGetCurrentContext());
  335. ALCint64SOFT devtimes[2] = {0,0};
  336. alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
  337. auto latency = nanoseconds(devtimes[1]);
  338. auto device_time = nanoseconds(devtimes[0]);
  339. // The clock is simply the current device time relative to the recorded
  340. // start time. We can also subtract the latency to get more a accurate
  341. // position of where the audio device actually is in the output stream.
  342. return device_time - mDeviceStartTime - latency;
  343. }
  344. /* The source-based clock is based on 4 components:
  345. * 1 - The timestamp of the next sample to buffer (mCurrentPts)
  346. * 2 - The length of the source's buffer queue
  347. * (AudioBufferTime*AL_BUFFERS_QUEUED)
  348. * 3 - The offset OpenAL is currently at in the source (the first value
  349. * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
  350. * 4 - The latency between OpenAL and the DAC (the second value from
  351. * AL_SAMPLE_OFFSET_LATENCY_SOFT)
  352. *
  353. * Subtracting the length of the source queue from the next sample's
  354. * timestamp gives the timestamp of the sample at the start of the source
  355. * queue. Adding the source offset to that results in the timestamp for the
  356. * sample at OpenAL's current position, and subtracting the source latency
  357. * from that gives the timestamp of the sample currently at the DAC.
  358. */
  359. nanoseconds pts = mCurrentPts;
  360. if(mSource)
  361. {
  362. ALint64SOFT offset[2];
  363. ALint queued;
  364. ALint status;
  365. /* NOTE: The source state must be checked last, in case an underrun
  366. * occurs and the source stops between retrieving the offset+latency
  367. * and getting the state. */
  368. if(alGetSourcei64vSOFT)
  369. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  370. else
  371. {
  372. ALint ioffset;
  373. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  374. offset[0] = (ALint64SOFT)ioffset << 32;
  375. offset[1] = 0;
  376. }
  377. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  378. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  379. /* If the source is AL_STOPPED, then there was an underrun and all
  380. * buffers are processed, so ignore the source queue. The audio thread
  381. * will put the source into an AL_INITIAL state and clear the queue
  382. * when it starts recovery. */
  383. if(status != AL_STOPPED)
  384. {
  385. using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
  386. pts -= AudioBufferTime*queued;
  387. pts += std::chrono::duration_cast<nanoseconds>(
  388. fixed32(offset[0] / mCodecCtx->sample_rate)
  389. );
  390. }
  391. /* Don't offset by the latency if the source isn't playing. */
  392. if(status == AL_PLAYING)
  393. pts -= nanoseconds(offset[1]);
  394. }
  395. return std::max(pts, nanoseconds::zero());
  396. }
  397. bool AudioState::isBufferFilled()
  398. {
  399. /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
  400. * does the source gen. So when we're able to grab the lock and the source
  401. * is valid, the queue must be full.
  402. */
  403. std::lock_guard<std::mutex> lock(mSrcMutex);
  404. return mSource != 0;
  405. }
  406. void AudioState::startPlayback()
  407. {
  408. alSourcePlay(mSource);
  409. if(alcGetInteger64vSOFT)
  410. {
  411. using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
  412. // Subtract the total buffer queue time from the current pts to get the
  413. // pts of the start of the queue.
  414. nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
  415. int64_t srctimes[2]={0,0};
  416. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
  417. auto device_time = nanoseconds(srctimes[1]);
  418. auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
  419. mCodecCtx->sample_rate;
  420. // The mixer may have ticked and incremented the device time and sample
  421. // offset, so subtract the source offset from the device time to get
  422. // the device time the source started at. Also subtract startpts to get
  423. // the device time the stream would have started at to reach where it
  424. // is now.
  425. mDeviceStartTime = device_time - src_offset - startpts;
  426. }
  427. }
  428. int AudioState::getSync()
  429. {
  430. if(mMovie.mAVSyncType == SyncMaster::Audio)
  431. return 0;
  432. auto ref_clock = mMovie.getMasterClock();
  433. auto diff = ref_clock - getClockNoLock();
  434. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  435. {
  436. /* Difference is TOO big; reset accumulated average */
  437. mClockDiffAvg = seconds_d64::zero();
  438. return 0;
  439. }
  440. /* Accumulate the diffs */
  441. mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
  442. auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
  443. if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
  444. return 0;
  445. /* Constrain the per-update difference to avoid exceedingly large skips */
  446. diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
  447. AudioSampleCorrectionMax);
  448. return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
  449. }
  450. int AudioState::decodeFrame()
  451. {
  452. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  453. {
  454. std::unique_lock<std::mutex> lock(mQueueMtx);
  455. int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  456. if(ret == AVERROR(EAGAIN))
  457. {
  458. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  459. std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
  460. mMovie.mSendCond.notify_one();
  461. do {
  462. mQueueCond.wait(lock);
  463. ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  464. } while(ret == AVERROR(EAGAIN));
  465. }
  466. lock.unlock();
  467. if(ret == AVERROR_EOF) break;
  468. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  469. mMovie.mSendCond.notify_one();
  470. if(ret < 0)
  471. {
  472. std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
  473. return 0;
  474. }
  475. if(mDecodedFrame->nb_samples <= 0)
  476. {
  477. av_frame_unref(mDecodedFrame.get());
  478. continue;
  479. }
  480. /* If provided, update w/ pts */
  481. if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
  482. mCurrentPts = std::chrono::duration_cast<nanoseconds>(
  483. seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
  484. );
  485. if(mDecodedFrame->nb_samples > mSamplesMax)
  486. {
  487. av_freep(&mSamples);
  488. av_samples_alloc(
  489. &mSamples, nullptr, mCodecCtx->channels,
  490. mDecodedFrame->nb_samples, mDstSampleFmt, 0
  491. );
  492. mSamplesMax = mDecodedFrame->nb_samples;
  493. }
  494. /* Return the amount of sample frames converted */
  495. int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
  496. (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
  497. );
  498. av_frame_unref(mDecodedFrame.get());
  499. return data_size;
  500. }
  501. return 0;
  502. }
  503. /* Duplicates the sample at in to out, count times. The frame size is a
  504. * multiple of the template type size.
  505. */
  506. template<typename T>
  507. static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
  508. {
  509. const T *sample = reinterpret_cast<const T*>(in);
  510. T *dst = reinterpret_cast<T*>(out);
  511. if(frame_size == sizeof(T))
  512. std::fill_n(dst, count, *sample);
  513. else
  514. {
  515. /* NOTE: frame_size is a multiple of sizeof(T). */
  516. int type_mult = frame_size / sizeof(T);
  517. int i = 0;
  518. std::generate_n(dst, count*type_mult,
  519. [sample,type_mult,&i]() -> T
  520. {
  521. T ret = sample[i];
  522. i = (i+1)%type_mult;
  523. return ret;
  524. }
  525. );
  526. }
  527. }
  528. bool AudioState::readAudio(uint8_t *samples, int length)
  529. {
  530. int sample_skip = getSync();
  531. int audio_size = 0;
  532. /* Read the next chunk of data, refill the buffer, and queue it
  533. * on the source */
  534. length /= mFrameSize;
  535. while(audio_size < length)
  536. {
  537. if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
  538. {
  539. int frame_len = decodeFrame();
  540. if(frame_len <= 0) break;
  541. mSamplesLen = frame_len;
  542. mSamplesPos = std::min(mSamplesLen, sample_skip);
  543. sample_skip -= mSamplesPos;
  544. // Adjust the device start time and current pts by the amount we're
  545. // skipping/duplicating, so that the clock remains correct for the
  546. // current stream position.
  547. auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
  548. mDeviceStartTime -= skip;
  549. mCurrentPts += skip;
  550. continue;
  551. }
  552. int rem = length - audio_size;
  553. if(mSamplesPos >= 0)
  554. {
  555. int len = mSamplesLen - mSamplesPos;
  556. if(rem > len) rem = len;
  557. memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
  558. }
  559. else
  560. {
  561. rem = std::min(rem, -mSamplesPos);
  562. /* Add samples by copying the first sample */
  563. if((mFrameSize&7) == 0)
  564. sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
  565. else if((mFrameSize&3) == 0)
  566. sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
  567. else if((mFrameSize&1) == 0)
  568. sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
  569. else
  570. sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
  571. }
  572. mSamplesPos += rem;
  573. mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
  574. samples += rem*mFrameSize;
  575. audio_size += rem;
  576. }
  577. if(audio_size <= 0)
  578. return false;
  579. if(audio_size < length)
  580. {
  581. int rem = length - audio_size;
  582. std::fill_n(samples, rem*mFrameSize,
  583. (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
  584. mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
  585. audio_size += rem;
  586. }
  587. return true;
  588. }
  589. #ifdef AL_SOFT_events
  590. void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
  591. ALsizei length, const ALchar *message,
  592. void *userParam)
  593. {
  594. AudioState *self = reinterpret_cast<AudioState*>(userParam);
  595. if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
  596. {
  597. /* Temporarily lock the source mutex to ensure it's not between
  598. * checking the processed count and going to sleep.
  599. */
  600. std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
  601. self->mSrcCond.notify_one();
  602. return;
  603. }
  604. std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
  605. switch(eventType)
  606. {
  607. case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
  608. case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
  609. case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
  610. case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
  611. case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
  612. case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
  613. default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
  614. std::dec<<std::setw(0)<<std::setfill(' '); break;
  615. }
  616. std::cout<< "\n"
  617. "Object ID: "<<object<<"\n"
  618. "Parameter: "<<param<<"\n"
  619. "Message: "<<std::string(message, length)<<"\n----"<<
  620. std::endl;
  621. if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
  622. {
  623. { std::lock_guard<std::mutex> lock(self->mSrcMutex);
  624. self->mConnected.clear(std::memory_order_release);
  625. }
  626. std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
  627. self->mSrcCond.notify_one();
  628. }
  629. }
  630. #endif
  631. int AudioState::handler()
  632. {
  633. std::unique_lock<std::mutex> lock(mSrcMutex);
  634. milliseconds sleep_time = AudioBufferTime / 3;
  635. ALenum fmt;
  636. #ifdef AL_SOFT_events
  637. const std::array<ALenum,6> evt_types{{
  638. AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
  639. AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
  640. AL_EVENT_TYPE_DISCONNECTED_SOFT
  641. }};
  642. if(alEventControlSOFT)
  643. {
  644. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
  645. alEventCallbackSOFT(EventCallback, this);
  646. sleep_time = AudioBufferTotalTime;
  647. }
  648. #endif
  649. /* Find a suitable format for OpenAL. */
  650. mDstChanLayout = 0;
  651. if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
  652. {
  653. mDstSampleFmt = AV_SAMPLE_FMT_U8;
  654. mFrameSize = 1;
  655. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  656. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  657. (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
  658. {
  659. mDstChanLayout = mCodecCtx->channel_layout;
  660. mFrameSize *= 8;
  661. mFormat = fmt;
  662. }
  663. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  664. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  665. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  666. (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
  667. {
  668. mDstChanLayout = mCodecCtx->channel_layout;
  669. mFrameSize *= 6;
  670. mFormat = fmt;
  671. }
  672. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  673. {
  674. mDstChanLayout = mCodecCtx->channel_layout;
  675. mFrameSize *= 1;
  676. mFormat = AL_FORMAT_MONO8;
  677. }
  678. if(!mDstChanLayout)
  679. {
  680. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  681. mFrameSize *= 2;
  682. mFormat = AL_FORMAT_STEREO8;
  683. }
  684. }
  685. if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
  686. alIsExtensionPresent("AL_EXT_FLOAT32"))
  687. {
  688. mDstSampleFmt = AV_SAMPLE_FMT_FLT;
  689. mFrameSize = 4;
  690. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  691. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  692. (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
  693. {
  694. mDstChanLayout = mCodecCtx->channel_layout;
  695. mFrameSize *= 8;
  696. mFormat = fmt;
  697. }
  698. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  699. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  700. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  701. (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
  702. {
  703. mDstChanLayout = mCodecCtx->channel_layout;
  704. mFrameSize *= 6;
  705. mFormat = fmt;
  706. }
  707. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  708. {
  709. mDstChanLayout = mCodecCtx->channel_layout;
  710. mFrameSize *= 1;
  711. mFormat = AL_FORMAT_MONO_FLOAT32;
  712. }
  713. if(!mDstChanLayout)
  714. {
  715. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  716. mFrameSize *= 2;
  717. mFormat = AL_FORMAT_STEREO_FLOAT32;
  718. }
  719. }
  720. if(!mDstChanLayout)
  721. {
  722. mDstSampleFmt = AV_SAMPLE_FMT_S16;
  723. mFrameSize = 2;
  724. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  725. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  726. (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
  727. {
  728. mDstChanLayout = mCodecCtx->channel_layout;
  729. mFrameSize *= 8;
  730. mFormat = fmt;
  731. }
  732. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  733. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  734. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  735. (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
  736. {
  737. mDstChanLayout = mCodecCtx->channel_layout;
  738. mFrameSize *= 6;
  739. mFormat = fmt;
  740. }
  741. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  742. {
  743. mDstChanLayout = mCodecCtx->channel_layout;
  744. mFrameSize *= 1;
  745. mFormat = AL_FORMAT_MONO16;
  746. }
  747. if(!mDstChanLayout)
  748. {
  749. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  750. mFrameSize *= 2;
  751. mFormat = AL_FORMAT_STEREO16;
  752. }
  753. }
  754. void *samples = nullptr;
  755. ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
  756. mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
  757. mSamples = NULL;
  758. mSamplesMax = 0;
  759. mSamplesPos = 0;
  760. mSamplesLen = 0;
  761. mDecodedFrame.reset(av_frame_alloc());
  762. if(!mDecodedFrame)
  763. {
  764. std::cerr<< "Failed to allocate audio frame" <<std::endl;
  765. goto finish;
  766. }
  767. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  768. mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
  769. mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
  770. (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
  771. mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  772. 0, nullptr
  773. ));
  774. if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
  775. {
  776. std::cerr<< "Failed to initialize audio converter" <<std::endl;
  777. goto finish;
  778. }
  779. mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
  780. alGenBuffers(mBuffers.size(), mBuffers.data());
  781. alGenSources(1, &mSource);
  782. if(EnableDirectOut)
  783. alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
  784. if(EnableWideStereo)
  785. {
  786. ALfloat angles[2] = { (ALfloat)(M_PI/3.0), (ALfloat)(-M_PI/3.0) };
  787. alSourcefv(mSource, AL_STEREO_ANGLES, angles);
  788. }
  789. if(alGetError() != AL_NO_ERROR)
  790. goto finish;
  791. #ifdef AL_SOFT_map_buffer
  792. if(alBufferStorageSOFT)
  793. {
  794. for(ALuint bufid : mBuffers)
  795. alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
  796. AL_MAP_WRITE_BIT_SOFT);
  797. if(alGetError() != AL_NO_ERROR)
  798. {
  799. fprintf(stderr, "Failed to use mapped buffers\n");
  800. samples = av_malloc(buffer_len);
  801. }
  802. }
  803. else
  804. #endif
  805. samples = av_malloc(buffer_len);
  806. while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
  807. mConnected.test_and_set(std::memory_order_relaxed))
  808. {
  809. /* First remove any processed buffers. */
  810. ALint processed;
  811. alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
  812. while(processed > 0)
  813. {
  814. std::array<ALuint,4> bids;
  815. alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
  816. bids.data());
  817. processed -= std::min<ALsizei>(bids.size(), processed);
  818. }
  819. /* Refill the buffer queue. */
  820. ALint queued;
  821. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  822. while((ALuint)queued < mBuffers.size())
  823. {
  824. ALuint bufid = mBuffers[mBufferIdx];
  825. uint8_t *ptr = reinterpret_cast<uint8_t*>(samples
  826. #ifdef AL_SOFT_map_buffer
  827. ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
  828. #endif
  829. );
  830. if(!ptr) break;
  831. /* Read the next chunk of data, filling the buffer, and queue it on
  832. * the source */
  833. bool got_audio = readAudio(ptr, buffer_len);
  834. #ifdef AL_SOFT_map_buffer
  835. if(!samples) alUnmapBufferSOFT(bufid);
  836. #endif
  837. if(!got_audio) break;
  838. if(samples)
  839. alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
  840. alSourceQueueBuffers(mSource, 1, &bufid);
  841. mBufferIdx = (mBufferIdx+1) % mBuffers.size();
  842. ++queued;
  843. }
  844. if(queued == 0)
  845. break;
  846. /* Check that the source is playing. */
  847. ALint state;
  848. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  849. if(state == AL_STOPPED)
  850. {
  851. /* AL_STOPPED means there was an underrun. Clear the buffer queue
  852. * since this likely means we're late, and rewind the source to get
  853. * it back into an AL_INITIAL state.
  854. */
  855. alSourceRewind(mSource);
  856. alSourcei(mSource, AL_BUFFER, 0);
  857. continue;
  858. }
  859. /* (re)start the source if needed, and wait for a buffer to finish */
  860. if(state != AL_PLAYING && state != AL_PAUSED &&
  861. mMovie.mPlaying.load(std::memory_order_relaxed))
  862. startPlayback();
  863. mSrcCond.wait_for(lock, sleep_time);
  864. }
  865. alSourceRewind(mSource);
  866. alSourcei(mSource, AL_BUFFER, 0);
  867. finish:
  868. av_freep(&samples);
  869. #ifdef AL_SOFT_events
  870. if(alEventControlSOFT)
  871. {
  872. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
  873. alEventCallbackSOFT(nullptr, nullptr);
  874. }
  875. #endif
  876. return 0;
  877. }
  878. nanoseconds VideoState::getClock()
  879. {
  880. /* NOTE: This returns incorrect times while not playing. */
  881. auto delta = get_avtime() - mCurrentPtsTime;
  882. return mCurrentPts + delta;
  883. }
  884. bool VideoState::isBufferFilled()
  885. {
  886. std::unique_lock<std::mutex> lock(mPictQMutex);
  887. return mPictQSize >= mPictQ.size();
  888. }
  889. Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
  890. {
  891. SDL_Event evt{};
  892. evt.user.type = FF_REFRESH_EVENT;
  893. evt.user.data1 = opaque;
  894. SDL_PushEvent(&evt);
  895. return 0; /* 0 means stop timer */
  896. }
  897. /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
  898. void VideoState::schedRefresh(milliseconds delay)
  899. {
  900. SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
  901. }
  902. /* Called by VideoState::refreshTimer to display the next video frame. */
  903. void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
  904. {
  905. Picture *vp = &mPictQ[mPictQRead];
  906. if(!vp->mImage)
  907. return;
  908. float aspect_ratio;
  909. int win_w, win_h;
  910. int w, h, x, y;
  911. if(mCodecCtx->sample_aspect_ratio.num == 0)
  912. aspect_ratio = 0.0f;
  913. else
  914. {
  915. aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
  916. mCodecCtx->height;
  917. }
  918. if(aspect_ratio <= 0.0f)
  919. aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
  920. SDL_GetWindowSize(screen, &win_w, &win_h);
  921. h = win_h;
  922. w = ((int)rint(h * aspect_ratio) + 3) & ~3;
  923. if(w > win_w)
  924. {
  925. w = win_w;
  926. h = ((int)rint(w / aspect_ratio) + 3) & ~3;
  927. }
  928. x = (win_w - w) / 2;
  929. y = (win_h - h) / 2;
  930. SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
  931. SDL_Rect dst_rect{ x, y, w, h };
  932. SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
  933. SDL_RenderPresent(renderer);
  934. }
  935. /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
  936. * was created. It handles the display of the next decoded video frame (if not
  937. * falling behind), and sets up the timer for the following video frame.
  938. */
  939. void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
  940. {
  941. if(!mStream)
  942. {
  943. if(mEOS)
  944. {
  945. mFinalUpdate = true;
  946. std::unique_lock<std::mutex>(mPictQMutex).unlock();
  947. mPictQCond.notify_all();
  948. return;
  949. }
  950. schedRefresh(milliseconds(100));
  951. return;
  952. }
  953. if(!mMovie.mPlaying.load(std::memory_order_relaxed))
  954. {
  955. schedRefresh(milliseconds(1));
  956. return;
  957. }
  958. std::unique_lock<std::mutex> lock(mPictQMutex);
  959. retry:
  960. if(mPictQSize == 0)
  961. {
  962. if(mEOS)
  963. mFinalUpdate = true;
  964. else
  965. schedRefresh(milliseconds(1));
  966. lock.unlock();
  967. mPictQCond.notify_all();
  968. return;
  969. }
  970. Picture *vp = &mPictQ[mPictQRead];
  971. mCurrentPts = vp->mPts;
  972. mCurrentPtsTime = get_avtime();
  973. /* Get delay using the frame pts and the pts from last frame. */
  974. auto delay = vp->mPts - mFrameLastPts;
  975. if(delay <= seconds::zero() || delay >= seconds(1))
  976. {
  977. /* If incorrect delay, use previous one. */
  978. delay = mFrameLastDelay;
  979. }
  980. /* Save for next frame. */
  981. mFrameLastDelay = delay;
  982. mFrameLastPts = vp->mPts;
  983. /* Update delay to sync to clock if not master source. */
  984. if(mMovie.mAVSyncType != SyncMaster::Video)
  985. {
  986. auto ref_clock = mMovie.getMasterClock();
  987. auto diff = vp->mPts - ref_clock;
  988. /* Skip or repeat the frame. Take delay into account. */
  989. auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
  990. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  991. {
  992. if(diff <= -sync_threshold)
  993. delay = nanoseconds::zero();
  994. else if(diff >= sync_threshold)
  995. delay *= 2;
  996. }
  997. }
  998. mFrameTimer += delay;
  999. /* Compute the REAL delay. */
  1000. auto actual_delay = mFrameTimer - get_avtime();
  1001. if(!(actual_delay >= VideoSyncThreshold))
  1002. {
  1003. /* We don't have time to handle this picture, just skip to the next one. */
  1004. mPictQRead = (mPictQRead+1)%mPictQ.size();
  1005. mPictQSize--;
  1006. goto retry;
  1007. }
  1008. schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
  1009. /* Show the picture! */
  1010. display(screen, renderer);
  1011. /* Update queue for next picture. */
  1012. mPictQRead = (mPictQRead+1)%mPictQ.size();
  1013. mPictQSize--;
  1014. lock.unlock();
  1015. mPictQCond.notify_all();
  1016. }
  1017. /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
  1018. * main thread where the renderer was created.
  1019. */
  1020. void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
  1021. {
  1022. Picture *vp = &mPictQ[mPictQWrite];
  1023. bool fmt_updated = false;
  1024. /* allocate or resize the buffer! */
  1025. if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
  1026. {
  1027. fmt_updated = true;
  1028. if(vp->mImage)
  1029. SDL_DestroyTexture(vp->mImage);
  1030. vp->mImage = SDL_CreateTexture(
  1031. renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
  1032. mCodecCtx->coded_width, mCodecCtx->coded_height
  1033. );
  1034. if(!vp->mImage)
  1035. std::cerr<< "Failed to create YV12 texture!" <<std::endl;
  1036. vp->mWidth = mCodecCtx->width;
  1037. vp->mHeight = mCodecCtx->height;
  1038. if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
  1039. {
  1040. /* For the first update, set the window size to the video size. */
  1041. mFirstUpdate = false;
  1042. int w = vp->mWidth;
  1043. int h = vp->mHeight;
  1044. if(mCodecCtx->sample_aspect_ratio.den != 0)
  1045. {
  1046. double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
  1047. if(aspect_ratio >= 1.0)
  1048. w = (int)(w*aspect_ratio + 0.5);
  1049. else if(aspect_ratio > 0.0)
  1050. h = (int)(h/aspect_ratio + 0.5);
  1051. }
  1052. SDL_SetWindowSize(screen, w, h);
  1053. }
  1054. }
  1055. if(vp->mImage)
  1056. {
  1057. AVFrame *frame = mDecodedFrame.get();
  1058. void *pixels = nullptr;
  1059. int pitch = 0;
  1060. if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
  1061. SDL_UpdateYUVTexture(vp->mImage, nullptr,
  1062. frame->data[0], frame->linesize[0],
  1063. frame->data[1], frame->linesize[1],
  1064. frame->data[2], frame->linesize[2]
  1065. );
  1066. else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
  1067. std::cerr<< "Failed to lock texture" <<std::endl;
  1068. else
  1069. {
  1070. // Convert the image into YUV format that SDL uses
  1071. int coded_w = mCodecCtx->coded_width;
  1072. int coded_h = mCodecCtx->coded_height;
  1073. int w = mCodecCtx->width;
  1074. int h = mCodecCtx->height;
  1075. if(!mSwscaleCtx || fmt_updated)
  1076. {
  1077. mSwscaleCtx.reset(sws_getContext(
  1078. w, h, mCodecCtx->pix_fmt,
  1079. w, h, AV_PIX_FMT_YUV420P, 0,
  1080. nullptr, nullptr, nullptr
  1081. ));
  1082. }
  1083. /* point pict at the queue */
  1084. uint8_t *pict_data[3];
  1085. pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
  1086. pict_data[1] = pict_data[0] + coded_w*coded_h;
  1087. pict_data[2] = pict_data[1] + coded_w*coded_h/4;
  1088. int pict_linesize[3];
  1089. pict_linesize[0] = pitch;
  1090. pict_linesize[1] = pitch / 2;
  1091. pict_linesize[2] = pitch / 2;
  1092. sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
  1093. frame->linesize, 0, h, pict_data, pict_linesize);
  1094. SDL_UnlockTexture(vp->mImage);
  1095. }
  1096. }
  1097. vp->mUpdated.store(true, std::memory_order_release);
  1098. std::unique_lock<std::mutex>(mPictQMutex).unlock();
  1099. mPictQCond.notify_one();
  1100. }
  1101. int VideoState::queuePicture(nanoseconds pts)
  1102. {
  1103. /* Wait until we have space for a new pic */
  1104. std::unique_lock<std::mutex> lock(mPictQMutex);
  1105. while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
  1106. mPictQCond.wait(lock);
  1107. lock.unlock();
  1108. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1109. return -1;
  1110. Picture *vp = &mPictQ[mPictQWrite];
  1111. /* We have to create/update the picture in the main thread */
  1112. vp->mUpdated.store(false, std::memory_order_relaxed);
  1113. SDL_Event evt{};
  1114. evt.user.type = FF_UPDATE_EVENT;
  1115. evt.user.data1 = this;
  1116. SDL_PushEvent(&evt);
  1117. /* Wait until the picture is updated. */
  1118. lock.lock();
  1119. while(!vp->mUpdated.load(std::memory_order_relaxed))
  1120. {
  1121. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1122. return -1;
  1123. mPictQCond.wait(lock);
  1124. }
  1125. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1126. return -1;
  1127. vp->mPts = pts;
  1128. mPictQWrite = (mPictQWrite+1)%mPictQ.size();
  1129. mPictQSize++;
  1130. lock.unlock();
  1131. return 0;
  1132. }
  1133. int VideoState::handler()
  1134. {
  1135. mDecodedFrame.reset(av_frame_alloc());
  1136. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  1137. {
  1138. std::unique_lock<std::mutex> lock(mQueueMtx);
  1139. /* Decode video frame */
  1140. int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  1141. if(ret == AVERROR(EAGAIN))
  1142. {
  1143. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  1144. std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
  1145. mMovie.mSendCond.notify_one();
  1146. do {
  1147. mQueueCond.wait(lock);
  1148. ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  1149. } while(ret == AVERROR(EAGAIN));
  1150. }
  1151. lock.unlock();
  1152. if(ret == AVERROR_EOF) break;
  1153. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  1154. mMovie.mSendCond.notify_one();
  1155. if(ret < 0)
  1156. {
  1157. std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
  1158. continue;
  1159. }
  1160. /* Get the PTS for this frame. */
  1161. nanoseconds pts;
  1162. if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
  1163. mClock = std::chrono::duration_cast<nanoseconds>(
  1164. seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
  1165. );
  1166. pts = mClock;
  1167. /* Update the video clock to the next expected PTS. */
  1168. auto frame_delay = av_q2d(mCodecCtx->time_base);
  1169. frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
  1170. mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
  1171. if(queuePicture(pts) < 0)
  1172. break;
  1173. av_frame_unref(mDecodedFrame.get());
  1174. }
  1175. mEOS = true;
  1176. std::unique_lock<std::mutex> lock(mPictQMutex);
  1177. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1178. {
  1179. mPictQRead = 0;
  1180. mPictQWrite = 0;
  1181. mPictQSize = 0;
  1182. }
  1183. while(!mFinalUpdate)
  1184. mPictQCond.wait(lock);
  1185. return 0;
  1186. }
  1187. int MovieState::decode_interrupt_cb(void *ctx)
  1188. {
  1189. return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
  1190. }
  1191. bool MovieState::prepare()
  1192. {
  1193. AVIOContext *avioctx = nullptr;
  1194. AVIOInterruptCB intcb = { decode_interrupt_cb, this };
  1195. if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
  1196. {
  1197. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1198. return false;
  1199. }
  1200. mIOContext.reset(avioctx);
  1201. /* Open movie file. If avformat_open_input fails it will automatically free
  1202. * this context, so don't set it onto a smart pointer yet.
  1203. */
  1204. AVFormatContext *fmtctx = avformat_alloc_context();
  1205. fmtctx->pb = mIOContext.get();
  1206. fmtctx->interrupt_callback = intcb;
  1207. if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
  1208. {
  1209. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1210. return false;
  1211. }
  1212. mFormatCtx.reset(fmtctx);
  1213. /* Retrieve stream information */
  1214. if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
  1215. {
  1216. std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
  1217. return false;
  1218. }
  1219. mVideo.schedRefresh(milliseconds(40));
  1220. mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
  1221. return true;
  1222. }
  1223. void MovieState::setTitle(SDL_Window *window)
  1224. {
  1225. auto pos1 = mFilename.rfind('/');
  1226. auto pos2 = mFilename.rfind('\\');
  1227. auto fpos = ((pos1 == std::string::npos) ? pos2 :
  1228. (pos2 == std::string::npos) ? pos1 :
  1229. std::max(pos1, pos2)) + 1;
  1230. SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
  1231. }
  1232. nanoseconds MovieState::getClock()
  1233. {
  1234. if(!mPlaying.load(std::memory_order_relaxed))
  1235. return nanoseconds::zero();
  1236. return get_avtime() - mClockBase;
  1237. }
  1238. nanoseconds MovieState::getMasterClock()
  1239. {
  1240. if(mAVSyncType == SyncMaster::Video)
  1241. return mVideo.getClock();
  1242. if(mAVSyncType == SyncMaster::Audio)
  1243. return mAudio.getClock();
  1244. return getClock();
  1245. }
  1246. nanoseconds MovieState::getDuration()
  1247. { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
  1248. int MovieState::streamComponentOpen(int stream_index)
  1249. {
  1250. if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
  1251. return -1;
  1252. /* Get a pointer to the codec context for the stream, and open the
  1253. * associated codec.
  1254. */
  1255. AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
  1256. if(!avctx) return -1;
  1257. if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
  1258. return -1;
  1259. AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
  1260. if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
  1261. {
  1262. std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
  1263. << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
  1264. return -1;
  1265. }
  1266. /* Initialize and start the media type handler */
  1267. switch(avctx->codec_type)
  1268. {
  1269. case AVMEDIA_TYPE_AUDIO:
  1270. mAudio.mStream = mFormatCtx->streams[stream_index];
  1271. mAudio.mCodecCtx = std::move(avctx);
  1272. mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
  1273. break;
  1274. case AVMEDIA_TYPE_VIDEO:
  1275. mVideo.mStream = mFormatCtx->streams[stream_index];
  1276. mVideo.mCodecCtx = std::move(avctx);
  1277. mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
  1278. break;
  1279. default:
  1280. return -1;
  1281. }
  1282. return stream_index;
  1283. }
  1284. int MovieState::parse_handler()
  1285. {
  1286. int video_index = -1;
  1287. int audio_index = -1;
  1288. /* Dump information about file onto standard error */
  1289. av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
  1290. /* Find the first video and audio streams */
  1291. for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
  1292. {
  1293. auto codecpar = mFormatCtx->streams[i]->codecpar;
  1294. if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
  1295. video_index = streamComponentOpen(i);
  1296. else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
  1297. audio_index = streamComponentOpen(i);
  1298. }
  1299. if(video_index < 0 && audio_index < 0)
  1300. {
  1301. std::cerr<< mFilename<<": could not open codecs" <<std::endl;
  1302. mQuit = true;
  1303. }
  1304. PacketQueue audio_queue, video_queue;
  1305. bool input_finished = false;
  1306. /* Main packet reading/dispatching loop */
  1307. while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
  1308. {
  1309. AVPacket packet;
  1310. if(av_read_frame(mFormatCtx.get(), &packet) < 0)
  1311. input_finished = true;
  1312. else
  1313. {
  1314. /* Copy the packet into the queue it's meant for. */
  1315. if(packet.stream_index == video_index)
  1316. video_queue.put(&packet);
  1317. else if(packet.stream_index == audio_index)
  1318. audio_queue.put(&packet);
  1319. av_packet_unref(&packet);
  1320. }
  1321. do {
  1322. /* Send whatever queued packets we have. */
  1323. if(!audio_queue.empty())
  1324. {
  1325. std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
  1326. int ret;
  1327. do {
  1328. ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
  1329. if(ret != AVERROR(EAGAIN)) audio_queue.pop();
  1330. } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
  1331. lock.unlock();
  1332. mAudio.mQueueCond.notify_one();
  1333. }
  1334. if(!video_queue.empty())
  1335. {
  1336. std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
  1337. int ret;
  1338. do {
  1339. ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
  1340. if(ret != AVERROR(EAGAIN)) video_queue.pop();
  1341. } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
  1342. lock.unlock();
  1343. mVideo.mQueueCond.notify_one();
  1344. }
  1345. /* If the queues are completely empty, or it's not full and there's
  1346. * more input to read, go get more.
  1347. */
  1348. size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
  1349. if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
  1350. break;
  1351. if(!mPlaying.load(std::memory_order_relaxed))
  1352. {
  1353. if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
  1354. (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
  1355. {
  1356. /* Set the base time 50ms ahead of the current av time. */
  1357. mClockBase = get_avtime() + milliseconds(50);
  1358. mVideo.mCurrentPtsTime = mClockBase;
  1359. mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
  1360. mAudio.startPlayback();
  1361. mPlaying.store(std::memory_order_release);
  1362. }
  1363. }
  1364. /* Nothing to send or get for now, wait a bit and try again. */
  1365. { std::unique_lock<std::mutex> lock(mSendMtx);
  1366. if(mSendDataGood.test_and_set(std::memory_order_relaxed))
  1367. mSendCond.wait_for(lock, milliseconds(10));
  1368. }
  1369. } while(!mQuit.load(std::memory_order_relaxed));
  1370. }
  1371. /* Pass a null packet to finish the send buffers (the receive functions
  1372. * will get AVERROR_EOF when emptied).
  1373. */
  1374. if(mVideo.mCodecCtx)
  1375. {
  1376. { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
  1377. avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
  1378. }
  1379. mVideo.mQueueCond.notify_one();
  1380. }
  1381. if(mAudio.mCodecCtx)
  1382. {
  1383. { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
  1384. avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
  1385. }
  1386. mAudio.mQueueCond.notify_one();
  1387. }
  1388. video_queue.clear();
  1389. audio_queue.clear();
  1390. /* all done - wait for it */
  1391. if(mVideoThread.joinable())
  1392. mVideoThread.join();
  1393. if(mAudioThread.joinable())
  1394. mAudioThread.join();
  1395. mVideo.mEOS = true;
  1396. std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
  1397. while(!mVideo.mFinalUpdate)
  1398. mVideo.mPictQCond.wait(lock);
  1399. lock.unlock();
  1400. SDL_Event evt{};
  1401. evt.user.type = FF_MOVIE_DONE_EVENT;
  1402. SDL_PushEvent(&evt);
  1403. return 0;
  1404. }
  1405. // Helper class+method to print the time with human-readable formatting.
  1406. struct PrettyTime {
  1407. seconds mTime;
  1408. };
  1409. inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
  1410. {
  1411. using hours = std::chrono::hours;
  1412. using minutes = std::chrono::minutes;
  1413. using std::chrono::duration_cast;
  1414. seconds t = rhs.mTime;
  1415. if(t.count() < 0)
  1416. {
  1417. os << '-';
  1418. t *= -1;
  1419. }
  1420. // Only handle up to hour formatting
  1421. if(t >= hours(1))
  1422. os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
  1423. << (duration_cast<minutes>(t).count() % 60) << 'm';
  1424. else
  1425. os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
  1426. os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
  1427. << std::setfill(' ');
  1428. return os;
  1429. }
  1430. } // namespace
  1431. int main(int argc, char *argv[])
  1432. {
  1433. std::unique_ptr<MovieState> movState;
  1434. if(argc < 2)
  1435. {
  1436. std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
  1437. return 1;
  1438. }
  1439. /* Register all formats and codecs */
  1440. av_register_all();
  1441. /* Initialize networking protocols */
  1442. avformat_network_init();
  1443. if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
  1444. {
  1445. std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
  1446. return 1;
  1447. }
  1448. /* Make a window to put our video */
  1449. SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
  1450. if(!screen)
  1451. {
  1452. std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
  1453. return 1;
  1454. }
  1455. /* Make a renderer to handle the texture image surface and rendering. */
  1456. Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
  1457. SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1458. if(renderer)
  1459. {
  1460. SDL_RendererInfo rinf{};
  1461. bool ok = false;
  1462. /* Make sure the renderer supports IYUV textures. If not, fallback to a
  1463. * software renderer. */
  1464. if(SDL_GetRendererInfo(renderer, &rinf) == 0)
  1465. {
  1466. for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
  1467. ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
  1468. }
  1469. if(!ok)
  1470. {
  1471. std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
  1472. SDL_DestroyRenderer(renderer);
  1473. renderer = nullptr;
  1474. }
  1475. }
  1476. if(!renderer)
  1477. {
  1478. render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
  1479. renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1480. }
  1481. if(!renderer)
  1482. {
  1483. std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
  1484. return 1;
  1485. }
  1486. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1487. SDL_RenderFillRect(renderer, nullptr);
  1488. SDL_RenderPresent(renderer);
  1489. /* Open an audio device */
  1490. ++argv; --argc;
  1491. if(InitAL(&argv, &argc))
  1492. {
  1493. std::cerr<< "Failed to set up audio device" <<std::endl;
  1494. return 1;
  1495. }
  1496. { auto device = alcGetContextsDevice(alcGetCurrentContext());
  1497. if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
  1498. {
  1499. std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
  1500. alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
  1501. alcGetProcAddress(device, "alcGetInteger64vSOFT")
  1502. );
  1503. }
  1504. }
  1505. if(alIsExtensionPresent("AL_SOFT_source_latency"))
  1506. {
  1507. std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
  1508. alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
  1509. alGetProcAddress("alGetSourcei64vSOFT")
  1510. );
  1511. }
  1512. #ifdef AL_SOFT_map_buffer
  1513. if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
  1514. {
  1515. std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
  1516. alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
  1517. alGetProcAddress("alBufferStorageSOFT"));
  1518. alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
  1519. alGetProcAddress("alMapBufferSOFT"));
  1520. alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
  1521. alGetProcAddress("alUnmapBufferSOFT"));
  1522. }
  1523. #endif
  1524. #ifdef AL_SOFT_events
  1525. if(alIsExtensionPresent("AL_SOFTX_events"))
  1526. {
  1527. std::cout<< "Found AL_SOFT_events" <<std::endl;
  1528. alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
  1529. alGetProcAddress("alEventControlSOFT"));
  1530. alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
  1531. alGetProcAddress("alEventCallbackSOFT"));
  1532. }
  1533. #endif
  1534. int fileidx = 0;
  1535. for(;fileidx < argc;++fileidx)
  1536. {
  1537. if(strcmp(argv[fileidx], "-direct") == 0)
  1538. {
  1539. if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
  1540. std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
  1541. else
  1542. {
  1543. std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
  1544. EnableDirectOut = true;
  1545. }
  1546. }
  1547. else if(strcmp(argv[fileidx], "-wide") == 0)
  1548. {
  1549. if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
  1550. std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
  1551. else
  1552. {
  1553. std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
  1554. EnableWideStereo = true;
  1555. }
  1556. }
  1557. else
  1558. break;
  1559. }
  1560. while(fileidx < argc && !movState)
  1561. {
  1562. movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
  1563. if(!movState->prepare()) movState = nullptr;
  1564. }
  1565. if(!movState)
  1566. {
  1567. std::cerr<< "Could not start a video" <<std::endl;
  1568. return 1;
  1569. }
  1570. movState->setTitle(screen);
  1571. /* Default to going to the next movie at the end of one. */
  1572. enum class EomAction {
  1573. Next, Quit
  1574. } eom_action = EomAction::Next;
  1575. seconds last_time(-1);
  1576. SDL_Event event;
  1577. while(1)
  1578. {
  1579. int have_evt = SDL_WaitEventTimeout(&event, 10);
  1580. auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
  1581. if(cur_time != last_time)
  1582. {
  1583. auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
  1584. std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
  1585. last_time = cur_time;
  1586. }
  1587. if(!have_evt) continue;
  1588. switch(event.type)
  1589. {
  1590. case SDL_KEYDOWN:
  1591. switch(event.key.keysym.sym)
  1592. {
  1593. case SDLK_ESCAPE:
  1594. movState->mQuit = true;
  1595. eom_action = EomAction::Quit;
  1596. break;
  1597. case SDLK_n:
  1598. movState->mQuit = true;
  1599. eom_action = EomAction::Next;
  1600. break;
  1601. default:
  1602. break;
  1603. }
  1604. break;
  1605. case SDL_WINDOWEVENT:
  1606. switch(event.window.event)
  1607. {
  1608. case SDL_WINDOWEVENT_RESIZED:
  1609. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1610. SDL_RenderFillRect(renderer, nullptr);
  1611. break;
  1612. default:
  1613. break;
  1614. }
  1615. break;
  1616. case SDL_QUIT:
  1617. movState->mQuit = true;
  1618. eom_action = EomAction::Quit;
  1619. break;
  1620. case FF_UPDATE_EVENT:
  1621. reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
  1622. screen, renderer
  1623. );
  1624. break;
  1625. case FF_REFRESH_EVENT:
  1626. reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
  1627. screen, renderer
  1628. );
  1629. break;
  1630. case FF_MOVIE_DONE_EVENT:
  1631. std::cout<<'\n';
  1632. last_time = seconds(-1);
  1633. if(eom_action != EomAction::Quit)
  1634. {
  1635. movState = nullptr;
  1636. while(fileidx < argc && !movState)
  1637. {
  1638. movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
  1639. if(!movState->prepare()) movState = nullptr;
  1640. }
  1641. if(movState)
  1642. {
  1643. movState->setTitle(screen);
  1644. break;
  1645. }
  1646. }
  1647. /* Nothing more to play. Shut everything down and quit. */
  1648. movState = nullptr;
  1649. CloseAL();
  1650. SDL_DestroyRenderer(renderer);
  1651. renderer = nullptr;
  1652. SDL_DestroyWindow(screen);
  1653. screen = nullptr;
  1654. SDL_Quit();
  1655. exit(0);
  1656. default:
  1657. break;
  1658. }
  1659. }
  1660. std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
  1661. return 1;
  1662. }