alffplay.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533
  1. /*
  2. * alffplay.c
  3. *
  4. * A pedagogical video player that really works! Now with seeking features.
  5. *
  6. * Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, and a tutorial by
  7. * Martin Bohme <[email protected]>.
  8. *
  9. * Requires C99.
  10. */
  11. #include <stdio.h>
  12. #include <math.h>
  13. #include <libavcodec/avcodec.h>
  14. #include <libavformat/avformat.h>
  15. #include <libavformat/avio.h>
  16. #include <libavutil/time.h>
  17. #include <libavutil/avstring.h>
  18. #include <libavutil/channel_layout.h>
  19. #include <libswscale/swscale.h>
  20. #include <libswresample/swresample.h>
  21. #include <SDL.h>
  22. #include <SDL_thread.h>
  23. #include <SDL_video.h>
  24. #include "threads.h"
  25. #include "bool.h"
  26. #include "AL/al.h"
  27. #include "AL/alc.h"
  28. #include "AL/alext.h"
  29. static bool has_latency_check = false;
  30. static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
  31. #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
  32. #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
  33. #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) /* Bytes of compressed audio data to keep queued */
  34. #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) /* Bytes of compressed video data to keep queued */
  35. #define AV_SYNC_THRESHOLD 0.01
  36. #define AV_NOSYNC_THRESHOLD 10.0
  37. #define SAMPLE_CORRECTION_MAX_DIFF 0.1
  38. #define AUDIO_DIFF_AVG_NB 20
  39. #define VIDEO_PICTURE_QUEUE_SIZE 16
  40. enum {
  41. FF_UPDATE_EVENT = SDL_USEREVENT,
  42. FF_REFRESH_EVENT,
  43. FF_QUIT_EVENT
  44. };
  45. typedef struct PacketQueue {
  46. AVPacketList *first_pkt, *last_pkt;
  47. volatile int nb_packets;
  48. volatile int size;
  49. volatile bool flushing;
  50. almtx_t mutex;
  51. alcnd_t cond;
  52. } PacketQueue;
  53. typedef struct VideoPicture {
  54. SDL_Texture *bmp;
  55. int width, height; /* Logical image size (actual size may be larger) */
  56. volatile bool updated;
  57. double pts;
  58. } VideoPicture;
  59. typedef struct AudioState {
  60. AVStream *st;
  61. PacketQueue q;
  62. AVPacket pkt;
  63. /* Used for clock difference average computation */
  64. double diff_accum;
  65. double diff_avg_coef;
  66. double diff_threshold;
  67. /* Time (in seconds) of the next sample to be buffered */
  68. double current_pts;
  69. /* Decompressed sample frame, and swresample context for conversion */
  70. AVFrame *decoded_aframe;
  71. struct SwrContext *swres_ctx;
  72. /* Conversion format, for what gets fed to OpenAL */
  73. int dst_ch_layout;
  74. enum AVSampleFormat dst_sample_fmt;
  75. /* Storage of converted samples */
  76. uint8_t *samples;
  77. ssize_t samples_len; /* In samples */
  78. ssize_t samples_pos;
  79. int samples_max;
  80. /* OpenAL format */
  81. ALenum format;
  82. ALint frame_size;
  83. ALuint source;
  84. ALuint buffer[AUDIO_BUFFER_QUEUE_SIZE];
  85. ALuint buffer_idx;
  86. almtx_t src_mutex;
  87. althrd_t thread;
  88. } AudioState;
  89. typedef struct VideoState {
  90. AVStream *st;
  91. PacketQueue q;
  92. double clock;
  93. double frame_timer;
  94. double frame_last_pts;
  95. double frame_last_delay;
  96. double current_pts;
  97. /* time (av_gettime) at which we updated current_pts - used to have running video pts */
  98. int64_t current_pts_time;
  99. /* Decompressed video frame, and swscale context for conversion */
  100. AVFrame *decoded_vframe;
  101. struct SwsContext *swscale_ctx;
  102. VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  103. int pictq_size, pictq_rindex, pictq_windex;
  104. almtx_t pictq_mutex;
  105. alcnd_t pictq_cond;
  106. althrd_t thread;
  107. } VideoState;
  108. typedef struct MovieState {
  109. AVFormatContext *pFormatCtx;
  110. int videoStream, audioStream;
  111. volatile bool seek_req;
  112. int64_t seek_pos;
  113. int av_sync_type;
  114. int64_t external_clock_base;
  115. AudioState audio;
  116. VideoState video;
  117. althrd_t parse_thread;
  118. char filename[1024];
  119. volatile bool quit;
  120. } MovieState;
  121. enum {
  122. AV_SYNC_AUDIO_MASTER,
  123. AV_SYNC_VIDEO_MASTER,
  124. AV_SYNC_EXTERNAL_MASTER,
  125. DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
  126. };
  127. static AVPacket flush_pkt = { .data = (uint8_t*)"FLUSH" };
  128. static void packet_queue_init(PacketQueue *q)
  129. {
  130. memset(q, 0, sizeof(PacketQueue));
  131. almtx_init(&q->mutex, almtx_plain);
  132. alcnd_init(&q->cond);
  133. }
  134. static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
  135. {
  136. AVPacketList *pkt1;
  137. if(pkt != &flush_pkt && !pkt->buf && av_dup_packet(pkt) < 0)
  138. return -1;
  139. pkt1 = av_malloc(sizeof(AVPacketList));
  140. if(!pkt1) return -1;
  141. pkt1->pkt = *pkt;
  142. pkt1->next = NULL;
  143. almtx_lock(&q->mutex);
  144. if(!q->last_pkt)
  145. q->first_pkt = pkt1;
  146. else
  147. q->last_pkt->next = pkt1;
  148. q->last_pkt = pkt1;
  149. q->nb_packets++;
  150. q->size += pkt1->pkt.size;
  151. almtx_unlock(&q->mutex);
  152. alcnd_signal(&q->cond);
  153. return 0;
  154. }
  155. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, MovieState *state)
  156. {
  157. AVPacketList *pkt1;
  158. int ret = -1;
  159. almtx_lock(&q->mutex);
  160. while(!state->quit)
  161. {
  162. pkt1 = q->first_pkt;
  163. if(pkt1)
  164. {
  165. q->first_pkt = pkt1->next;
  166. if(!q->first_pkt)
  167. q->last_pkt = NULL;
  168. q->nb_packets--;
  169. q->size -= pkt1->pkt.size;
  170. *pkt = pkt1->pkt;
  171. av_free(pkt1);
  172. ret = 1;
  173. break;
  174. }
  175. if(q->flushing)
  176. {
  177. ret = 0;
  178. break;
  179. }
  180. alcnd_wait(&q->cond, &q->mutex);
  181. }
  182. almtx_unlock(&q->mutex);
  183. return ret;
  184. }
  185. static void packet_queue_clear(PacketQueue *q)
  186. {
  187. AVPacketList *pkt, *pkt1;
  188. almtx_lock(&q->mutex);
  189. for(pkt = q->first_pkt;pkt != NULL;pkt = pkt1)
  190. {
  191. pkt1 = pkt->next;
  192. if(pkt->pkt.data != flush_pkt.data)
  193. av_free_packet(&pkt->pkt);
  194. av_freep(&pkt);
  195. }
  196. q->last_pkt = NULL;
  197. q->first_pkt = NULL;
  198. q->nb_packets = 0;
  199. q->size = 0;
  200. almtx_unlock(&q->mutex);
  201. }
  202. static void packet_queue_flush(PacketQueue *q)
  203. {
  204. almtx_lock(&q->mutex);
  205. q->flushing = true;
  206. almtx_unlock(&q->mutex);
  207. alcnd_signal(&q->cond);
  208. }
  209. static void packet_queue_deinit(PacketQueue *q)
  210. {
  211. packet_queue_clear(q);
  212. alcnd_destroy(&q->cond);
  213. almtx_destroy(&q->mutex);
  214. }
  215. static double get_audio_clock(AudioState *state)
  216. {
  217. double pts;
  218. almtx_lock(&state->src_mutex);
  219. /* The audio clock is the timestamp of the sample currently being heard.
  220. * It's based on 4 components:
  221. * 1 - The timestamp of the next sample to buffer (state->current_pts)
  222. * 2 - The length of the source's buffer queue (AL_SEC_LENGTH_SOFT)
  223. * 3 - The offset OpenAL is currently at in the source (the first value
  224. * from AL_SEC_OFFSET_LATENCY_SOFT)
  225. * 4 - The latency between OpenAL and the DAC (the second value from
  226. * AL_SEC_OFFSET_LATENCY_SOFT)
  227. *
  228. * Subtracting the length of the source queue from the next sample's
  229. * timestamp gives the timestamp of the sample at start of the source
  230. * queue. Adding the source offset to that results in the timestamp for
  231. * OpenAL's current position, and subtracting the source latency from that
  232. * gives the timestamp of the sample currently at the DAC.
  233. */
  234. pts = state->current_pts;
  235. if(state->source)
  236. {
  237. ALdouble offset[2] = { 0.0, 0.0 };
  238. ALdouble queue_len = 0.0;
  239. ALint status;
  240. /* NOTE: The source state must be checked last, in case an underrun
  241. * occurs and the source stops between retrieving the offset+latency
  242. * and getting the state. */
  243. if(has_latency_check)
  244. {
  245. alGetSourcedvSOFT(state->source, AL_SEC_OFFSET_LATENCY_SOFT, offset);
  246. alGetSourcedvSOFT(state->source, AL_SEC_LENGTH_SOFT, &queue_len);
  247. }
  248. else
  249. {
  250. ALint ioffset, ilen;
  251. alGetSourcei(state->source, AL_SAMPLE_OFFSET, &ioffset);
  252. alGetSourcei(state->source, AL_SAMPLE_LENGTH_SOFT, &ilen);
  253. offset[0] = (double)ioffset / state->st->codec->sample_rate;
  254. queue_len = (double)ilen / state->st->codec->sample_rate;
  255. }
  256. alGetSourcei(state->source, AL_SOURCE_STATE, &status);
  257. /* If the source is AL_STOPPED, then there was an underrun and all
  258. * buffers are processed, so ignore the source queue. The audio thread
  259. * will put the source into an AL_INITIAL state and clear the queue
  260. * when it starts recovery. */
  261. if(status != AL_STOPPED)
  262. pts = pts - queue_len + offset[0];
  263. if(status == AL_PLAYING)
  264. pts = pts - offset[1];
  265. }
  266. almtx_unlock(&state->src_mutex);
  267. return (pts >= 0.0) ? pts : 0.0;
  268. }
  269. static double get_video_clock(VideoState *state)
  270. {
  271. double delta = (av_gettime() - state->current_pts_time) / 1000000.0;
  272. return state->current_pts + delta;
  273. }
  274. static double get_external_clock(MovieState *movState)
  275. {
  276. return (av_gettime()-movState->external_clock_base) / 1000000.0;
  277. }
  278. double get_master_clock(MovieState *movState)
  279. {
  280. if(movState->av_sync_type == AV_SYNC_VIDEO_MASTER)
  281. return get_video_clock(&movState->video);
  282. if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
  283. return get_audio_clock(&movState->audio);
  284. return get_external_clock(movState);
  285. }
  286. /* Return how many samples to skip to maintain sync (negative means to
  287. * duplicate samples). */
  288. static int synchronize_audio(MovieState *movState)
  289. {
  290. double diff, avg_diff;
  291. double ref_clock;
  292. if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
  293. return 0;
  294. ref_clock = get_master_clock(movState);
  295. diff = ref_clock - get_audio_clock(&movState->audio);
  296. if(!(diff < AV_NOSYNC_THRESHOLD))
  297. {
  298. /* Difference is TOO big; reset diff stuff */
  299. movState->audio.diff_accum = 0.0;
  300. return 0;
  301. }
  302. /* Accumulate the diffs */
  303. movState->audio.diff_accum = movState->audio.diff_accum*movState->audio.diff_avg_coef + diff;
  304. avg_diff = movState->audio.diff_accum*(1.0 - movState->audio.diff_avg_coef);
  305. if(fabs(avg_diff) < movState->audio.diff_threshold)
  306. return 0;
  307. /* Constrain the per-update difference to avoid exceedingly large skips */
  308. if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
  309. diff = SAMPLE_CORRECTION_MAX_DIFF;
  310. else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
  311. diff = -SAMPLE_CORRECTION_MAX_DIFF;
  312. return (int)(diff*movState->audio.st->codec->sample_rate);
  313. }
  314. static int audio_decode_frame(MovieState *movState)
  315. {
  316. AVPacket *pkt = &movState->audio.pkt;
  317. while(!movState->quit)
  318. {
  319. while(!movState->quit && pkt->size == 0)
  320. {
  321. av_free_packet(pkt);
  322. /* Get the next packet */
  323. int err;
  324. if((err=packet_queue_get(&movState->audio.q, pkt, movState)) <= 0)
  325. {
  326. if(err == 0)
  327. break;
  328. return err;
  329. }
  330. if(pkt->data == flush_pkt.data)
  331. {
  332. avcodec_flush_buffers(movState->audio.st->codec);
  333. movState->audio.diff_accum = 0.0;
  334. movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
  335. alSourceRewind(movState->audio.source);
  336. alSourcei(movState->audio.source, AL_BUFFER, 0);
  337. av_new_packet(pkt, 0);
  338. return -1;
  339. }
  340. /* If provided, update w/ pts */
  341. if(pkt->pts != AV_NOPTS_VALUE)
  342. movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
  343. }
  344. AVFrame *frame = movState->audio.decoded_aframe;
  345. int got_frame = 0;
  346. int len1 = avcodec_decode_audio4(movState->audio.st->codec, frame,
  347. &got_frame, pkt);
  348. if(len1 < 0) break;
  349. if(len1 <= pkt->size)
  350. {
  351. /* Move the unread data to the front and clear the end bits */
  352. int remaining = pkt->size - len1;
  353. memmove(pkt->data, &pkt->data[len1], remaining);
  354. av_shrink_packet(pkt, remaining);
  355. }
  356. if(!got_frame || frame->nb_samples <= 0)
  357. {
  358. av_frame_unref(frame);
  359. continue;
  360. }
  361. if(frame->nb_samples > movState->audio.samples_max)
  362. {
  363. av_freep(&movState->audio.samples);
  364. av_samples_alloc(
  365. &movState->audio.samples, NULL, movState->audio.st->codec->channels,
  366. frame->nb_samples, movState->audio.dst_sample_fmt, 0
  367. );
  368. movState->audio.samples_max = frame->nb_samples;
  369. }
  370. /* Return the amount of sample frames converted */
  371. int data_size = swr_convert(movState->audio.swres_ctx,
  372. &movState->audio.samples, frame->nb_samples,
  373. (const uint8_t**)frame->data, frame->nb_samples
  374. );
  375. av_frame_unref(frame);
  376. return data_size;
  377. }
  378. return -1;
  379. }
  380. static int read_audio(MovieState *movState, uint8_t *samples, int length)
  381. {
  382. int sample_skip = synchronize_audio(movState);
  383. int audio_size = 0;
  384. /* Read the next chunk of data, refill the buffer, and queue it
  385. * on the source */
  386. length /= movState->audio.frame_size;
  387. while(audio_size < length)
  388. {
  389. if(movState->audio.samples_len <= 0 || movState->audio.samples_pos >= movState->audio.samples_len)
  390. {
  391. int frame_len = audio_decode_frame(movState);
  392. if(frame_len < 0) return -1;
  393. movState->audio.samples_len = frame_len;
  394. if(movState->audio.samples_len == 0)
  395. break;
  396. movState->audio.samples_pos = (movState->audio.samples_len < sample_skip) ?
  397. movState->audio.samples_len : sample_skip;
  398. sample_skip -= movState->audio.samples_pos;
  399. movState->audio.current_pts += (double)movState->audio.samples_pos /
  400. (double)movState->audio.st->codec->sample_rate;
  401. continue;
  402. }
  403. int rem = length - audio_size;
  404. if(movState->audio.samples_pos >= 0)
  405. {
  406. int n = movState->audio.frame_size;
  407. int len = movState->audio.samples_len - movState->audio.samples_pos;
  408. if(rem > len) rem = len;
  409. memcpy(samples + audio_size*n,
  410. movState->audio.samples + movState->audio.samples_pos*n,
  411. rem*n);
  412. }
  413. else
  414. {
  415. int n = movState->audio.frame_size;
  416. int len = -movState->audio.samples_pos;
  417. if(rem > len) rem = len;
  418. /* Add samples by copying the first sample */
  419. if(n == 1)
  420. {
  421. uint8_t sample = ((uint8_t*)movState->audio.samples)[0];
  422. uint8_t *q = (uint8_t*)samples + audio_size;
  423. for(int i = 0;i < rem;i++)
  424. *(q++) = sample;
  425. }
  426. else if(n == 2)
  427. {
  428. uint16_t sample = ((uint16_t*)movState->audio.samples)[0];
  429. uint16_t *q = (uint16_t*)samples + audio_size;
  430. for(int i = 0;i < rem;i++)
  431. *(q++) = sample;
  432. }
  433. else if(n == 4)
  434. {
  435. uint32_t sample = ((uint32_t*)movState->audio.samples)[0];
  436. uint32_t *q = (uint32_t*)samples + audio_size;
  437. for(int i = 0;i < rem;i++)
  438. *(q++) = sample;
  439. }
  440. else if(n == 8)
  441. {
  442. uint64_t sample = ((uint64_t*)movState->audio.samples)[0];
  443. uint64_t *q = (uint64_t*)samples + audio_size;
  444. for(int i = 0;i < rem;i++)
  445. *(q++) = sample;
  446. }
  447. else
  448. {
  449. uint8_t *sample = movState->audio.samples;
  450. uint8_t *q = samples + audio_size*n;
  451. for(int i = 0;i < rem;i++)
  452. {
  453. memcpy(q, sample, n);
  454. q += n;
  455. }
  456. }
  457. }
  458. movState->audio.samples_pos += rem;
  459. movState->audio.current_pts += (double)rem / movState->audio.st->codec->sample_rate;
  460. audio_size += rem;
  461. }
  462. return audio_size * movState->audio.frame_size;
  463. }
  464. static int audio_thread(void *userdata)
  465. {
  466. MovieState *movState = (MovieState*)userdata;
  467. uint8_t *samples = NULL;
  468. ALsizei buffer_len;
  469. ALenum fmt;
  470. alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
  471. alGenSources(1, &movState->audio.source);
  472. alSourcei(movState->audio.source, AL_SOURCE_RELATIVE, AL_TRUE);
  473. alSourcei(movState->audio.source, AL_ROLLOFF_FACTOR, 0);
  474. av_new_packet(&movState->audio.pkt, 0);
  475. /* Find a suitable format for OpenAL. */
  476. movState->audio.format = AL_NONE;
  477. if(movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8 ||
  478. movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8P)
  479. {
  480. movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_U8;
  481. movState->audio.frame_size = 1;
  482. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  483. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  484. (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
  485. {
  486. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  487. movState->audio.frame_size *= 8;
  488. movState->audio.format = fmt;
  489. }
  490. if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  491. movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  492. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  493. (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
  494. {
  495. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  496. movState->audio.frame_size *= 6;
  497. movState->audio.format = fmt;
  498. }
  499. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
  500. {
  501. movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
  502. movState->audio.frame_size *= 1;
  503. movState->audio.format = AL_FORMAT_MONO8;
  504. }
  505. if(movState->audio.format == AL_NONE)
  506. {
  507. movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
  508. movState->audio.frame_size *= 2;
  509. movState->audio.format = AL_FORMAT_STEREO8;
  510. }
  511. }
  512. if((movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLT ||
  513. movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
  514. alIsExtensionPresent("AL_EXT_FLOAT32"))
  515. {
  516. movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_FLT;
  517. movState->audio.frame_size = 4;
  518. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  519. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  520. (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
  521. {
  522. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  523. movState->audio.frame_size *= 8;
  524. movState->audio.format = fmt;
  525. }
  526. if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  527. movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  528. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  529. (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
  530. {
  531. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  532. movState->audio.frame_size *= 6;
  533. movState->audio.format = fmt;
  534. }
  535. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
  536. {
  537. movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
  538. movState->audio.frame_size *= 1;
  539. movState->audio.format = AL_FORMAT_MONO_FLOAT32;
  540. }
  541. if(movState->audio.format == AL_NONE)
  542. {
  543. movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
  544. movState->audio.frame_size *= 2;
  545. movState->audio.format = AL_FORMAT_STEREO_FLOAT32;
  546. }
  547. }
  548. if(movState->audio.format == AL_NONE)
  549. {
  550. movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_S16;
  551. movState->audio.frame_size = 2;
  552. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  553. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  554. (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
  555. {
  556. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  557. movState->audio.frame_size *= 8;
  558. movState->audio.format = fmt;
  559. }
  560. if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  561. movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  562. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  563. (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
  564. {
  565. movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
  566. movState->audio.frame_size *= 6;
  567. movState->audio.format = fmt;
  568. }
  569. if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
  570. {
  571. movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
  572. movState->audio.frame_size *= 1;
  573. movState->audio.format = AL_FORMAT_MONO16;
  574. }
  575. if(movState->audio.format == AL_NONE)
  576. {
  577. movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
  578. movState->audio.frame_size *= 2;
  579. movState->audio.format = AL_FORMAT_STEREO16;
  580. }
  581. }
  582. buffer_len = AUDIO_BUFFER_TIME * movState->audio.st->codec->sample_rate / 1000 *
  583. movState->audio.frame_size;
  584. samples = av_malloc(buffer_len);
  585. movState->audio.samples = NULL;
  586. movState->audio.samples_max = 0;
  587. movState->audio.samples_pos = 0;
  588. movState->audio.samples_len = 0;
  589. if(!(movState->audio.decoded_aframe=av_frame_alloc()))
  590. {
  591. fprintf(stderr, "Failed to allocate audio frame\n");
  592. goto finish;
  593. }
  594. movState->audio.swres_ctx = swr_alloc_set_opts(NULL,
  595. movState->audio.dst_ch_layout,
  596. movState->audio.dst_sample_fmt,
  597. movState->audio.st->codec->sample_rate,
  598. movState->audio.st->codec->channel_layout ?
  599. movState->audio.st->codec->channel_layout :
  600. (uint64_t)av_get_default_channel_layout(movState->audio.st->codec->channels),
  601. movState->audio.st->codec->sample_fmt,
  602. movState->audio.st->codec->sample_rate,
  603. 0, NULL
  604. );
  605. if(!movState->audio.swres_ctx || swr_init(movState->audio.swres_ctx) != 0)
  606. {
  607. fprintf(stderr, "Failed to initialize audio converter\n");
  608. goto finish;
  609. }
  610. almtx_lock(&movState->audio.src_mutex);
  611. while(alGetError() == AL_NO_ERROR && !movState->quit)
  612. {
  613. /* First remove any processed buffers. */
  614. ALint processed;
  615. alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
  616. alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
  617. /* Refill the buffer queue. */
  618. ALint queued;
  619. alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
  620. while(queued < AUDIO_BUFFER_QUEUE_SIZE)
  621. {
  622. int audio_size;
  623. /* Read the next chunk of data, fill the buffer, and queue it on
  624. * the source */
  625. audio_size = read_audio(movState, samples, buffer_len);
  626. if(audio_size < 0) break;
  627. ALuint bufid = movState->audio.buffer[movState->audio.buffer_idx++];
  628. movState->audio.buffer_idx %= AUDIO_BUFFER_QUEUE_SIZE;
  629. alBufferData(bufid, movState->audio.format, samples, audio_size,
  630. movState->audio.st->codec->sample_rate);
  631. alSourceQueueBuffers(movState->audio.source, 1, &bufid);
  632. queued++;
  633. }
  634. /* Check that the source is playing. */
  635. ALint state;
  636. alGetSourcei(movState->audio.source, AL_SOURCE_STATE, &state);
  637. if(state == AL_STOPPED)
  638. {
  639. /* AL_STOPPED means there was an underrun. Double-check that all
  640. * processed buffers are removed, then rewind the source to get it
  641. * back into an AL_INITIAL state. */
  642. alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
  643. alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
  644. alSourceRewind(movState->audio.source);
  645. continue;
  646. }
  647. almtx_unlock(&movState->audio.src_mutex);
  648. /* (re)start the source if needed, and wait for a buffer to finish */
  649. if(state != AL_PLAYING && state != AL_PAUSED)
  650. {
  651. alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
  652. if(queued > 0) alSourcePlay(movState->audio.source);
  653. }
  654. SDL_Delay(AUDIO_BUFFER_TIME);
  655. almtx_lock(&movState->audio.src_mutex);
  656. }
  657. almtx_unlock(&movState->audio.src_mutex);
  658. finish:
  659. av_frame_free(&movState->audio.decoded_aframe);
  660. swr_free(&movState->audio.swres_ctx);
  661. av_freep(&samples);
  662. av_freep(&movState->audio.samples);
  663. alDeleteSources(1, &movState->audio.source);
  664. alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
  665. return 0;
  666. }
  667. static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
  668. {
  669. (void)interval;
  670. SDL_PushEvent(&(SDL_Event){ .user={.type=FF_REFRESH_EVENT, .data1=opaque} });
  671. return 0; /* 0 means stop timer */
  672. }
  673. /* Schedule a video refresh in 'delay' ms */
  674. static void schedule_refresh(MovieState *movState, int delay)
  675. {
  676. SDL_AddTimer(delay, sdl_refresh_timer_cb, movState);
  677. }
  678. static void video_display(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
  679. {
  680. VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
  681. if(!vp->bmp)
  682. return;
  683. float aspect_ratio;
  684. int win_w, win_h;
  685. int w, h, x, y;
  686. if(movState->video.st->codec->sample_aspect_ratio.num == 0)
  687. aspect_ratio = 0.0f;
  688. else
  689. {
  690. aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio) *
  691. movState->video.st->codec->width /
  692. movState->video.st->codec->height;
  693. }
  694. if(aspect_ratio <= 0.0f)
  695. {
  696. aspect_ratio = (float)movState->video.st->codec->width /
  697. (float)movState->video.st->codec->height;
  698. }
  699. SDL_GetWindowSize(screen, &win_w, &win_h);
  700. h = win_h;
  701. w = ((int)rint(h * aspect_ratio) + 3) & ~3;
  702. if(w > win_w)
  703. {
  704. w = win_w;
  705. h = ((int)rint(w / aspect_ratio) + 3) & ~3;
  706. }
  707. x = (win_w - w) / 2;
  708. y = (win_h - h) / 2;
  709. SDL_RenderCopy(renderer, vp->bmp,
  710. &(SDL_Rect){ .x=0, .y=0, .w=vp->width, .h=vp->height },
  711. &(SDL_Rect){ .x=x, .y=y, .w=w, .h=h }
  712. );
  713. SDL_RenderPresent(renderer);
  714. }
  715. static void video_refresh_timer(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
  716. {
  717. if(!movState->video.st)
  718. {
  719. schedule_refresh(movState, 100);
  720. return;
  721. }
  722. almtx_lock(&movState->video.pictq_mutex);
  723. retry:
  724. if(movState->video.pictq_size == 0)
  725. schedule_refresh(movState, 1);
  726. else
  727. {
  728. VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
  729. double actual_delay, delay, sync_threshold, ref_clock, diff;
  730. movState->video.current_pts = vp->pts;
  731. movState->video.current_pts_time = av_gettime();
  732. delay = vp->pts - movState->video.frame_last_pts; /* the pts from last time */
  733. if(delay <= 0 || delay >= 1.0)
  734. {
  735. /* if incorrect delay, use previous one */
  736. delay = movState->video.frame_last_delay;
  737. }
  738. /* save for next time */
  739. movState->video.frame_last_delay = delay;
  740. movState->video.frame_last_pts = vp->pts;
  741. /* Update delay to sync to clock if not master source. */
  742. if(movState->av_sync_type != AV_SYNC_VIDEO_MASTER)
  743. {
  744. ref_clock = get_master_clock(movState);
  745. diff = vp->pts - ref_clock;
  746. /* Skip or repeat the frame. Take delay into account. */
  747. sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
  748. if(fabs(diff) < AV_NOSYNC_THRESHOLD)
  749. {
  750. if(diff <= -sync_threshold)
  751. delay = 0;
  752. else if(diff >= sync_threshold)
  753. delay = 2 * delay;
  754. }
  755. }
  756. movState->video.frame_timer += delay;
  757. /* Compute the REAL delay. */
  758. actual_delay = movState->video.frame_timer - (av_gettime() / 1000000.0);
  759. if(!(actual_delay >= 0.010))
  760. {
  761. /* We don't have time to handle this picture, just skip to the next one. */
  762. movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
  763. movState->video.pictq_size--;
  764. alcnd_signal(&movState->video.pictq_cond);
  765. goto retry;
  766. }
  767. schedule_refresh(movState, (int)(actual_delay*1000.0 + 0.5));
  768. /* Show the picture! */
  769. video_display(movState, screen, renderer);
  770. /* Update queue for next picture. */
  771. movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
  772. movState->video.pictq_size--;
  773. alcnd_signal(&movState->video.pictq_cond);
  774. }
  775. almtx_unlock(&movState->video.pictq_mutex);
  776. }
  777. static void update_picture(MovieState *movState, bool *first_update, SDL_Window *screen, SDL_Renderer *renderer)
  778. {
  779. VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
  780. /* allocate or resize the buffer! */
  781. if(!vp->bmp || vp->width != movState->video.st->codec->width ||
  782. vp->height != movState->video.st->codec->height)
  783. {
  784. if(vp->bmp)
  785. SDL_DestroyTexture(vp->bmp);
  786. vp->bmp = SDL_CreateTexture(
  787. renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
  788. movState->video.st->codec->coded_width, movState->video.st->codec->coded_height
  789. );
  790. if(!vp->bmp)
  791. fprintf(stderr, "Failed to create YV12 texture!\n");
  792. vp->width = movState->video.st->codec->width;
  793. vp->height = movState->video.st->codec->height;
  794. if(*first_update && vp->width > 0 && vp->height > 0)
  795. {
  796. /* For the first update, set the window size to the video size. */
  797. *first_update = false;
  798. int w = vp->width;
  799. int h = vp->height;
  800. if(movState->video.st->codec->sample_aspect_ratio.num != 0 &&
  801. movState->video.st->codec->sample_aspect_ratio.den != 0)
  802. {
  803. double aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio);
  804. if(aspect_ratio >= 1.0)
  805. w = (int)(w*aspect_ratio + 0.5);
  806. else if(aspect_ratio > 0.0)
  807. h = (int)(h/aspect_ratio + 0.5);
  808. }
  809. SDL_SetWindowSize(screen, w, h);
  810. }
  811. }
  812. if(vp->bmp)
  813. {
  814. AVFrame *frame = movState->video.decoded_vframe;
  815. void *pixels = NULL;
  816. int pitch = 0;
  817. if(movState->video.st->codec->pix_fmt == PIX_FMT_YUV420P)
  818. SDL_UpdateYUVTexture(vp->bmp, NULL,
  819. frame->data[0], frame->linesize[0],
  820. frame->data[1], frame->linesize[1],
  821. frame->data[2], frame->linesize[2]
  822. );
  823. else if(SDL_LockTexture(vp->bmp, NULL, &pixels, &pitch) != 0)
  824. fprintf(stderr, "Failed to lock texture\n");
  825. else
  826. {
  827. // Convert the image into YUV format that SDL uses
  828. int coded_w = movState->video.st->codec->coded_width;
  829. int coded_h = movState->video.st->codec->coded_height;
  830. int w = movState->video.st->codec->width;
  831. int h = movState->video.st->codec->height;
  832. if(!movState->video.swscale_ctx)
  833. movState->video.swscale_ctx = sws_getContext(
  834. w, h, movState->video.st->codec->pix_fmt,
  835. w, h, PIX_FMT_YUV420P, SWS_X, NULL, NULL, NULL
  836. );
  837. /* point pict at the queue */
  838. AVPicture pict;
  839. pict.data[0] = pixels;
  840. pict.data[2] = pict.data[0] + coded_w*coded_h;
  841. pict.data[1] = pict.data[2] + coded_w*coded_h/4;
  842. pict.linesize[0] = pitch;
  843. pict.linesize[2] = pitch / 2;
  844. pict.linesize[1] = pitch / 2;
  845. sws_scale(movState->video.swscale_ctx, (const uint8_t**)frame->data,
  846. frame->linesize, 0, h, pict.data, pict.linesize);
  847. SDL_UnlockTexture(vp->bmp);
  848. }
  849. }
  850. almtx_lock(&movState->video.pictq_mutex);
  851. vp->updated = true;
  852. almtx_unlock(&movState->video.pictq_mutex);
  853. alcnd_signal(&movState->video.pictq_cond);
  854. }
  855. static int queue_picture(MovieState *movState, double pts)
  856. {
  857. /* Wait until we have space for a new pic */
  858. almtx_lock(&movState->video.pictq_mutex);
  859. while(movState->video.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !movState->quit)
  860. alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
  861. almtx_unlock(&movState->video.pictq_mutex);
  862. if(movState->quit)
  863. return -1;
  864. VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
  865. /* We have to create/update the picture in the main thread */
  866. vp->updated = false;
  867. SDL_PushEvent(&(SDL_Event){ .user={.type=FF_UPDATE_EVENT, .data1=movState} });
  868. /* Wait until the picture is updated. */
  869. almtx_lock(&movState->video.pictq_mutex);
  870. while(!vp->updated && !movState->quit)
  871. alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
  872. almtx_unlock(&movState->video.pictq_mutex);
  873. if(movState->quit)
  874. return -1;
  875. vp->pts = pts;
  876. movState->video.pictq_windex = (movState->video.pictq_windex+1)%VIDEO_PICTURE_QUEUE_SIZE;
  877. almtx_lock(&movState->video.pictq_mutex);
  878. movState->video.pictq_size++;
  879. almtx_unlock(&movState->video.pictq_mutex);
  880. return 0;
  881. }
  882. static double synchronize_video(MovieState *movState, double pts)
  883. {
  884. double frame_delay;
  885. if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
  886. pts = movState->video.clock;
  887. else /* if we have pts, set video clock to it */
  888. movState->video.clock = pts;
  889. /* update the video clock */
  890. frame_delay = av_q2d(movState->video.st->codec->time_base);
  891. /* if we are repeating a frame, adjust clock accordingly */
  892. frame_delay += movState->video.decoded_vframe->repeat_pict * (frame_delay * 0.5);
  893. movState->video.clock += frame_delay;
  894. return pts;
  895. }
  896. int video_thread(void *arg)
  897. {
  898. MovieState *movState = (MovieState*)arg;
  899. AVPacket *packet = (AVPacket[1]){};
  900. int64_t saved_pts, pkt_pts;
  901. int frameFinished;
  902. movState->video.decoded_vframe = av_frame_alloc();
  903. while(packet_queue_get(&movState->video.q, packet, movState) >= 0)
  904. {
  905. if(packet->data == flush_pkt.data)
  906. {
  907. avcodec_flush_buffers(movState->video.st->codec);
  908. almtx_lock(&movState->video.pictq_mutex);
  909. movState->video.pictq_size = 0;
  910. movState->video.pictq_rindex = 0;
  911. movState->video.pictq_windex = 0;
  912. almtx_unlock(&movState->video.pictq_mutex);
  913. movState->video.clock = av_q2d(movState->video.st->time_base)*packet->pts;
  914. movState->video.current_pts = movState->video.clock;
  915. movState->video.current_pts_time = av_gettime();
  916. continue;
  917. }
  918. pkt_pts = packet->pts;
  919. /* Decode video frame */
  920. avcodec_decode_video2(movState->video.st->codec, movState->video.decoded_vframe,
  921. &frameFinished, packet);
  922. if(pkt_pts != AV_NOPTS_VALUE && !movState->video.decoded_vframe->opaque)
  923. {
  924. /* Store the packet's original pts in the frame, in case the frame
  925. * is not finished decoding yet. */
  926. saved_pts = pkt_pts;
  927. movState->video.decoded_vframe->opaque = &saved_pts;
  928. }
  929. av_free_packet(packet);
  930. if(frameFinished)
  931. {
  932. double pts = av_q2d(movState->video.st->time_base);
  933. if(packet->dts != AV_NOPTS_VALUE)
  934. pts *= packet->dts;
  935. else if(movState->video.decoded_vframe->opaque)
  936. pts *= *(int64_t*)movState->video.decoded_vframe->opaque;
  937. else
  938. pts *= 0.0;
  939. movState->video.decoded_vframe->opaque = NULL;
  940. pts = synchronize_video(movState, pts);
  941. if(queue_picture(movState, pts) < 0)
  942. break;
  943. }
  944. }
  945. sws_freeContext(movState->video.swscale_ctx);
  946. movState->video.swscale_ctx = NULL;
  947. av_frame_free(&movState->video.decoded_vframe);
  948. return 0;
  949. }
  950. static int stream_component_open(MovieState *movState, int stream_index)
  951. {
  952. AVFormatContext *pFormatCtx = movState->pFormatCtx;
  953. AVCodecContext *codecCtx;
  954. AVCodec *codec;
  955. if(stream_index < 0 || (unsigned int)stream_index >= pFormatCtx->nb_streams)
  956. return -1;
  957. /* Get a pointer to the codec context for the video stream, and open the
  958. * associated codec */
  959. codecCtx = pFormatCtx->streams[stream_index]->codec;
  960. codec = avcodec_find_decoder(codecCtx->codec_id);
  961. if(!codec || avcodec_open2(codecCtx, codec, NULL) < 0)
  962. {
  963. fprintf(stderr, "Unsupported codec!\n");
  964. return -1;
  965. }
  966. /* Initialize and start the media type handler */
  967. switch(codecCtx->codec_type)
  968. {
  969. case AVMEDIA_TYPE_AUDIO:
  970. movState->audioStream = stream_index;
  971. movState->audio.st = pFormatCtx->streams[stream_index];
  972. /* Averaging filter for audio sync */
  973. movState->audio.diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
  974. /* Correct audio only if larger error than this */
  975. movState->audio.diff_threshold = 2.0 * 0.050/* 50 ms */;
  976. memset(&movState->audio.pkt, 0, sizeof(movState->audio.pkt));
  977. if(althrd_create(&movState->audio.thread, audio_thread, movState) != althrd_success)
  978. {
  979. movState->audioStream = -1;
  980. movState->audio.st = NULL;
  981. }
  982. break;
  983. case AVMEDIA_TYPE_VIDEO:
  984. movState->videoStream = stream_index;
  985. movState->video.st = pFormatCtx->streams[stream_index];
  986. movState->video.current_pts_time = av_gettime();
  987. movState->video.frame_timer = (double)movState->video.current_pts_time /
  988. 1000000.0;
  989. movState->video.frame_last_delay = 40e-3;
  990. if(althrd_create(&movState->video.thread, video_thread, movState) != althrd_success)
  991. {
  992. movState->videoStream = -1;
  993. movState->video.st = NULL;
  994. }
  995. break;
  996. default:
  997. break;
  998. }
  999. return 0;
  1000. }
  1001. static int decode_interrupt_cb(void *ctx)
  1002. {
  1003. return ((MovieState*)ctx)->quit;
  1004. }
  1005. int decode_thread(void *arg)
  1006. {
  1007. MovieState *movState = (MovieState *)arg;
  1008. AVFormatContext *fmtCtx = movState->pFormatCtx;
  1009. AVPacket *packet = (AVPacket[1]){};
  1010. int video_index = -1;
  1011. int audio_index = -1;
  1012. movState->videoStream = -1;
  1013. movState->audioStream = -1;
  1014. /* Dump information about file onto standard error */
  1015. av_dump_format(fmtCtx, 0, movState->filename, 0);
  1016. /* Find the first video and audio streams */
  1017. for(unsigned int i = 0;i < fmtCtx->nb_streams;i++)
  1018. {
  1019. if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
  1020. video_index = i;
  1021. else if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
  1022. audio_index = i;
  1023. }
  1024. movState->external_clock_base = av_gettime();
  1025. if(audio_index >= 0)
  1026. stream_component_open(movState, audio_index);
  1027. if(video_index >= 0)
  1028. stream_component_open(movState, video_index);
  1029. if(movState->videoStream < 0 && movState->audioStream < 0)
  1030. {
  1031. fprintf(stderr, "%s: could not open codecs\n", movState->filename);
  1032. goto fail;
  1033. }
  1034. /* Main packet handling loop */
  1035. while(!movState->quit)
  1036. {
  1037. if(movState->seek_req)
  1038. {
  1039. int64_t seek_target = movState->seek_pos;
  1040. int stream_index= -1;
  1041. /* Prefer seeking on the video stream. */
  1042. if(movState->videoStream >= 0)
  1043. stream_index = movState->videoStream;
  1044. else if(movState->audioStream >= 0)
  1045. stream_index = movState->audioStream;
  1046. /* Get a seek timestamp for the appropriate stream. */
  1047. int64_t timestamp = seek_target;
  1048. if(stream_index >= 0)
  1049. timestamp = av_rescale_q(seek_target, AV_TIME_BASE_Q, fmtCtx->streams[stream_index]->time_base);
  1050. if(av_seek_frame(movState->pFormatCtx, stream_index, timestamp, 0) < 0)
  1051. fprintf(stderr, "%s: error while seeking\n", movState->pFormatCtx->filename);
  1052. else
  1053. {
  1054. /* Seek successful, clear the packet queues and send a special
  1055. * 'flush' packet with the new stream clock time. */
  1056. if(movState->audioStream >= 0)
  1057. {
  1058. packet_queue_clear(&movState->audio.q);
  1059. flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
  1060. fmtCtx->streams[movState->audioStream]->time_base
  1061. );
  1062. packet_queue_put(&movState->audio.q, &flush_pkt);
  1063. }
  1064. if(movState->videoStream >= 0)
  1065. {
  1066. packet_queue_clear(&movState->video.q);
  1067. flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
  1068. fmtCtx->streams[movState->videoStream]->time_base
  1069. );
  1070. packet_queue_put(&movState->video.q, &flush_pkt);
  1071. }
  1072. movState->external_clock_base = av_gettime() - seek_target;
  1073. }
  1074. movState->seek_req = false;
  1075. }
  1076. if(movState->audio.q.size >= MAX_AUDIOQ_SIZE ||
  1077. movState->video.q.size >= MAX_VIDEOQ_SIZE)
  1078. {
  1079. SDL_Delay(10);
  1080. continue;
  1081. }
  1082. if(av_read_frame(movState->pFormatCtx, packet) < 0)
  1083. {
  1084. packet_queue_flush(&movState->video.q);
  1085. packet_queue_flush(&movState->audio.q);
  1086. break;
  1087. }
  1088. /* Place the packet in the queue it's meant for, or discard it. */
  1089. if(packet->stream_index == movState->videoStream)
  1090. packet_queue_put(&movState->video.q, packet);
  1091. else if(packet->stream_index == movState->audioStream)
  1092. packet_queue_put(&movState->audio.q, packet);
  1093. else
  1094. av_free_packet(packet);
  1095. }
  1096. /* all done - wait for it */
  1097. while(!movState->quit)
  1098. {
  1099. if(movState->audio.q.nb_packets == 0 && movState->video.q.nb_packets == 0)
  1100. break;
  1101. SDL_Delay(100);
  1102. }
  1103. fail:
  1104. movState->quit = true;
  1105. packet_queue_flush(&movState->video.q);
  1106. packet_queue_flush(&movState->audio.q);
  1107. if(movState->videoStream >= 0)
  1108. althrd_join(movState->video.thread, NULL);
  1109. if(movState->audioStream >= 0)
  1110. althrd_join(movState->audio.thread, NULL);
  1111. SDL_PushEvent(&(SDL_Event){ .user={.type=FF_QUIT_EVENT, .data1=movState} });
  1112. return 0;
  1113. }
  1114. static void stream_seek(MovieState *movState, double incr)
  1115. {
  1116. if(!movState->seek_req)
  1117. {
  1118. double newtime = get_master_clock(movState)+incr;
  1119. if(newtime <= 0.0) movState->seek_pos = 0;
  1120. else movState->seek_pos = (int64_t)(newtime * AV_TIME_BASE);
  1121. movState->seek_req = true;
  1122. }
  1123. }
  1124. int main(int argc, char *argv[])
  1125. {
  1126. SDL_Event event;
  1127. MovieState *movState;
  1128. bool first_update = true;
  1129. SDL_Window *screen;
  1130. SDL_Renderer *renderer;
  1131. ALCdevice *device;
  1132. ALCcontext *context;
  1133. if(argc < 2)
  1134. {
  1135. fprintf(stderr, "Usage: %s <file>\n", argv[0]);
  1136. return 1;
  1137. }
  1138. /* Register all formats and codecs */
  1139. av_register_all();
  1140. /* Initialize networking protocols */
  1141. avformat_network_init();
  1142. if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
  1143. {
  1144. fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  1145. return 1;
  1146. }
  1147. /* Make a window to put our video */
  1148. screen = SDL_CreateWindow("alffplay", 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
  1149. if(!screen)
  1150. {
  1151. fprintf(stderr, "SDL: could not set video mode - exiting\n");
  1152. return 1;
  1153. }
  1154. /* Make a renderer to handle the texture image surface and rendering. */
  1155. renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
  1156. if(renderer)
  1157. {
  1158. SDL_RendererInfo rinf;
  1159. bool ok = false;
  1160. /* Make sure the renderer supports YV12 textures. If not, fallback to a
  1161. * software renderer. */
  1162. if(SDL_GetRendererInfo(renderer, &rinf) == 0)
  1163. {
  1164. for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
  1165. ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_YV12);
  1166. }
  1167. if(!ok)
  1168. {
  1169. fprintf(stderr, "YV12 pixelformat textures not supported on renderer %s\n", rinf.name);
  1170. SDL_DestroyRenderer(renderer);
  1171. renderer = NULL;
  1172. }
  1173. }
  1174. if(!renderer)
  1175. renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
  1176. if(!renderer)
  1177. {
  1178. fprintf(stderr, "SDL: could not create renderer - exiting\n");
  1179. return 1;
  1180. }
  1181. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1182. SDL_RenderFillRect(renderer, NULL);
  1183. SDL_RenderPresent(renderer);
  1184. /* Open an audio device */
  1185. device = alcOpenDevice(NULL);
  1186. if(!device)
  1187. {
  1188. fprintf(stderr, "OpenAL: could not open device - exiting\n");
  1189. return 1;
  1190. }
  1191. context = alcCreateContext(device, NULL);
  1192. if(!context)
  1193. {
  1194. fprintf(stderr, "OpenAL: could not create context - exiting\n");
  1195. return 1;
  1196. }
  1197. if(alcMakeContextCurrent(context) == ALC_FALSE)
  1198. {
  1199. fprintf(stderr, "OpenAL: could not make context current - exiting\n");
  1200. return 1;
  1201. }
  1202. if(!alIsExtensionPresent("AL_SOFT_source_length"))
  1203. {
  1204. fprintf(stderr, "Required AL_SOFT_source_length not supported - exiting\n");
  1205. return 1;
  1206. }
  1207. if(!alIsExtensionPresent("AL_SOFT_source_latency"))
  1208. fprintf(stderr, "AL_SOFT_source_latency not supported, audio may be a bit laggy.\n");
  1209. else
  1210. {
  1211. alGetSourcedvSOFT = alGetProcAddress("alGetSourcedvSOFT");
  1212. has_latency_check = true;
  1213. }
  1214. movState = av_mallocz(sizeof(MovieState));
  1215. av_strlcpy(movState->filename, argv[1], sizeof(movState->filename));
  1216. packet_queue_init(&movState->audio.q);
  1217. packet_queue_init(&movState->video.q);
  1218. almtx_init(&movState->video.pictq_mutex, almtx_plain);
  1219. alcnd_init(&movState->video.pictq_cond);
  1220. almtx_init(&movState->audio.src_mutex, almtx_recursive);
  1221. movState->av_sync_type = DEFAULT_AV_SYNC_TYPE;
  1222. movState->pFormatCtx = avformat_alloc_context();
  1223. movState->pFormatCtx->interrupt_callback = (AVIOInterruptCB){.callback=decode_interrupt_cb, .opaque=movState};
  1224. if(avio_open2(&movState->pFormatCtx->pb, movState->filename, AVIO_FLAG_READ,
  1225. &movState->pFormatCtx->interrupt_callback, NULL))
  1226. {
  1227. fprintf(stderr, "Failed to open %s\n", movState->filename);
  1228. return 1;
  1229. }
  1230. /* Open movie file */
  1231. if(avformat_open_input(&movState->pFormatCtx, movState->filename, NULL, NULL) != 0)
  1232. {
  1233. fprintf(stderr, "Failed to open %s\n", movState->filename);
  1234. return 1;
  1235. }
  1236. /* Retrieve stream information */
  1237. if(avformat_find_stream_info(movState->pFormatCtx, NULL) < 0)
  1238. {
  1239. fprintf(stderr, "%s: failed to find stream info\n", movState->filename);
  1240. return 1;
  1241. }
  1242. schedule_refresh(movState, 40);
  1243. if(althrd_create(&movState->parse_thread, decode_thread, movState) != althrd_success)
  1244. {
  1245. fprintf(stderr, "Failed to create parse thread!\n");
  1246. return 1;
  1247. }
  1248. while(SDL_WaitEvent(&event) == 1)
  1249. {
  1250. switch(event.type)
  1251. {
  1252. case SDL_KEYDOWN:
  1253. switch(event.key.keysym.sym)
  1254. {
  1255. case SDLK_ESCAPE:
  1256. movState->quit = true;
  1257. break;
  1258. case SDLK_LEFT:
  1259. stream_seek(movState, -10.0);
  1260. break;
  1261. case SDLK_RIGHT:
  1262. stream_seek(movState, 10.0);
  1263. break;
  1264. case SDLK_UP:
  1265. stream_seek(movState, 30.0);
  1266. break;
  1267. case SDLK_DOWN:
  1268. stream_seek(movState, -30.0);
  1269. break;
  1270. default:
  1271. break;
  1272. }
  1273. break;
  1274. case SDL_WINDOWEVENT:
  1275. switch(event.window.event)
  1276. {
  1277. case SDL_WINDOWEVENT_RESIZED:
  1278. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1279. SDL_RenderFillRect(renderer, NULL);
  1280. break;
  1281. default:
  1282. break;
  1283. }
  1284. break;
  1285. case SDL_QUIT:
  1286. movState->quit = true;
  1287. break;
  1288. case FF_UPDATE_EVENT:
  1289. update_picture(event.user.data1, &first_update, screen, renderer);
  1290. break;
  1291. case FF_REFRESH_EVENT:
  1292. video_refresh_timer(event.user.data1, screen, renderer);
  1293. break;
  1294. case FF_QUIT_EVENT:
  1295. althrd_join(movState->parse_thread, NULL);
  1296. avformat_close_input(&movState->pFormatCtx);
  1297. almtx_destroy(&movState->audio.src_mutex);
  1298. almtx_destroy(&movState->video.pictq_mutex);
  1299. alcnd_destroy(&movState->video.pictq_cond);
  1300. packet_queue_deinit(&movState->video.q);
  1301. packet_queue_deinit(&movState->audio.q);
  1302. alcMakeContextCurrent(NULL);
  1303. alcDestroyContext(context);
  1304. alcCloseDevice(device);
  1305. SDL_Quit();
  1306. exit(0);
  1307. default:
  1308. break;
  1309. }
  1310. }
  1311. fprintf(stderr, "SDL_WaitEvent error - %s\n", SDL_GetError());
  1312. return 1;
  1313. }