Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/modules/theora/video_stream_theora.cpp
10277 views
1
/**************************************************************************/
2
/* video_stream_theora.cpp */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#include "video_stream_theora.h"
32
33
#include "core/config/project_settings.h"
34
#include "core/io/image.h"
35
#include "scene/resources/image_texture.h"
36
37
#include "thirdparty/misc/yuv2rgb.h"
38
39
int VideoStreamPlaybackTheora::buffer_data() {
40
char *buffer = ogg_sync_buffer(&oy, 4096);
41
42
uint64_t bytes = file->get_buffer((uint8_t *)buffer, 4096);
43
ogg_sync_wrote(&oy, bytes);
44
return bytes;
45
}
46
47
int VideoStreamPlaybackTheora::queue_page(ogg_page *page) {
48
ogg_stream_pagein(&to, page);
49
if (to.e_o_s) {
50
theora_eos = true;
51
}
52
if (has_audio) {
53
ogg_stream_pagein(&vo, page);
54
if (vo.e_o_s) {
55
vorbis_eos = true;
56
}
57
}
58
return 0;
59
}
60
61
int VideoStreamPlaybackTheora::read_page(ogg_page *page) {
62
int ret = 0;
63
64
while (ret <= 0) {
65
ret = ogg_sync_pageout(&oy, page);
66
if (ret <= 0) {
67
int bytes = buffer_data();
68
if (bytes == 0) {
69
return 0;
70
}
71
}
72
}
73
74
return ret;
75
}
76
77
double VideoStreamPlaybackTheora::get_page_time(ogg_page *page) {
78
uint64_t granulepos = ogg_page_granulepos(page);
79
int page_serialno = ogg_page_serialno(page);
80
double page_time = -1;
81
82
if (page_serialno == to.serialno) {
83
page_time = th_granule_time(td, granulepos);
84
}
85
if (has_audio && page_serialno == vo.serialno) {
86
page_time = vorbis_granule_time(&vd, granulepos);
87
}
88
89
return page_time;
90
}
91
92
// Read one buffer worth of pages and feed them to the streams.
93
int VideoStreamPlaybackTheora::feed_pages() {
94
int pages = 0;
95
ogg_page og;
96
97
while (pages == 0) {
98
while (ogg_sync_pageout(&oy, &og) > 0) {
99
queue_page(&og);
100
pages++;
101
}
102
if (pages == 0) {
103
int bytes = buffer_data();
104
if (bytes == 0) {
105
break;
106
}
107
}
108
}
109
110
return pages;
111
}
112
113
// Seek the video and audio streams simultaneously to find the granulepos where we should start decoding.
114
// It will return the position where we should start reading pages, and the video and audio granulepos.
115
int64_t VideoStreamPlaybackTheora::seek_streams(double p_time, int64_t &cur_video_granulepos, int64_t &cur_audio_granulepos) {
116
// Backtracking less than this is probably a waste of time.
117
const int64_t min_seek = 512 * 1024;
118
int64_t target_video_granulepos;
119
int64_t target_audio_granulepos;
120
double target_time = 0;
121
int64_t seek_pos;
122
123
// Make a guess where we should start reading in the file, and scan from there.
124
// We base the guess on the mean bitrate of the streams. It would be theoretically faster to use the bisect method but
125
// in practice there's a lot of linear scanning to do to find the right pages.
126
// We want to catch the previous keyframe to the seek time. Since we only know the max GOP, we use that.
127
if (p_time == -1) { // This is a special case to find the last packets and calculate the video length.
128
seek_pos = MAX(stream_data_size - min_seek, stream_data_offset);
129
target_video_granulepos = INT64_MAX;
130
target_audio_granulepos = INT64_MAX;
131
} else {
132
int64_t video_frame = (int64_t)(p_time / frame_duration);
133
target_video_granulepos = MAX(1LL, video_frame - (1LL << ti.keyframe_granule_shift)) << ti.keyframe_granule_shift;
134
target_audio_granulepos = 0;
135
seek_pos = MAX(((target_video_granulepos >> ti.keyframe_granule_shift) - 1) * frame_duration * stream_data_size / stream_length, stream_data_offset);
136
target_time = th_granule_time(td, target_video_granulepos);
137
if (has_audio) {
138
target_audio_granulepos = video_frame * frame_duration * vi.rate;
139
target_time = MIN(target_time, vorbis_granule_time(&vd, target_audio_granulepos));
140
}
141
}
142
143
int64_t video_seek_pos = seek_pos;
144
int64_t audio_seek_pos = seek_pos;
145
double backtrack_time = 0;
146
bool video_catch = false;
147
bool audio_catch = false;
148
int64_t last_video_granule_seek_pos = seek_pos;
149
int64_t last_audio_granule_seek_pos = seek_pos;
150
151
cur_video_granulepos = -1;
152
cur_audio_granulepos = -1;
153
154
while (!video_catch || (has_audio && !audio_catch)) { // Backtracking loop
155
if (seek_pos < stream_data_offset) {
156
seek_pos = stream_data_offset;
157
}
158
file->seek(seek_pos);
159
ogg_sync_reset(&oy);
160
161
backtrack_time = 0;
162
last_video_granule_seek_pos = seek_pos;
163
last_audio_granule_seek_pos = seek_pos;
164
while (!video_catch || (has_audio && !audio_catch)) { // Page scanning loop
165
ogg_page page;
166
uint64_t last_seek_pos = file->get_position() - oy.fill + oy.returned;
167
int ret = read_page(&page);
168
if (ret <= 0) { // End of file.
169
if (seek_pos < stream_data_offset) { // We've already searched the whole file
170
return -1;
171
}
172
seek_pos -= min_seek;
173
break;
174
}
175
int64_t cur_granulepos = ogg_page_granulepos(&page);
176
if (cur_granulepos >= 0) {
177
int page_serialno = ogg_page_serialno(&page);
178
if (!video_catch && page_serialno == to.serialno) {
179
if (cur_granulepos >= target_video_granulepos) {
180
video_catch = true;
181
if (cur_video_granulepos < 0) {
182
// Adding 1s helps catching the start of the page and avoids backtrack_time = 0.
183
backtrack_time = MAX(backtrack_time, 1 + th_granule_time(td, cur_granulepos) - target_time);
184
}
185
} else {
186
video_seek_pos = last_video_granule_seek_pos;
187
cur_video_granulepos = cur_granulepos;
188
}
189
last_video_granule_seek_pos = last_seek_pos;
190
}
191
if ((has_audio && !audio_catch) && page_serialno == vo.serialno) {
192
if (cur_granulepos >= target_audio_granulepos) {
193
audio_catch = true;
194
if (cur_audio_granulepos < 0) {
195
// Adding 1s helps catching the start of the page and avoids backtrack_time = 0.
196
backtrack_time = MAX(backtrack_time, 1 + vorbis_granule_time(&vd, cur_granulepos) - target_time);
197
}
198
} else {
199
audio_seek_pos = last_audio_granule_seek_pos;
200
cur_audio_granulepos = cur_granulepos;
201
}
202
last_audio_granule_seek_pos = last_seek_pos;
203
}
204
}
205
}
206
if (backtrack_time > 0) {
207
if (seek_pos <= stream_data_offset) {
208
break;
209
}
210
int64_t delta_seek = MAX(backtrack_time * stream_data_size / stream_length, min_seek);
211
seek_pos -= delta_seek;
212
}
213
video_catch = cur_video_granulepos != -1;
214
audio_catch = cur_audio_granulepos != -1;
215
}
216
217
if (cur_video_granulepos < (1LL << ti.keyframe_granule_shift)) {
218
video_seek_pos = stream_data_offset;
219
cur_video_granulepos = 1LL << ti.keyframe_granule_shift;
220
}
221
if (has_audio) {
222
if (cur_audio_granulepos == -1) {
223
audio_seek_pos = stream_data_offset;
224
cur_audio_granulepos = 0;
225
}
226
seek_pos = MIN(video_seek_pos, audio_seek_pos);
227
} else {
228
seek_pos = video_seek_pos;
229
}
230
231
return seek_pos;
232
}
233
234
void VideoStreamPlaybackTheora::video_write(th_ycbcr_buffer yuv) {
235
uint8_t *w = frame_data.ptrw();
236
char *dst = (char *)w;
237
uint32_t y_offset = region.position.y * yuv[0].stride + region.position.x;
238
uint32_t uv_offset = 0;
239
240
if (px_fmt == TH_PF_444) {
241
uv_offset += region.position.y * yuv[1].stride + region.position.x;
242
yuv444_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
243
} else if (px_fmt == TH_PF_422) {
244
uv_offset += region.position.y * yuv[1].stride + region.position.x / 2;
245
yuv422_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
246
} else if (px_fmt == TH_PF_420) {
247
uv_offset += region.position.y * yuv[1].stride / 2 + region.position.x / 2;
248
yuv420_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
249
}
250
251
Ref<Image> img;
252
img.instantiate(region.size.x, region.size.y, false, Image::FORMAT_RGBA8, frame_data); //zero copy image creation
253
254
texture->update(img); // Zero-copy send to rendering server.
255
}
256
257
void VideoStreamPlaybackTheora::clear() {
258
if (!file.is_null()) {
259
file.unref();
260
}
261
if (has_audio) {
262
vorbis_block_clear(&vb);
263
vorbis_dsp_clear(&vd);
264
vorbis_comment_clear(&vc);
265
vorbis_info_clear(&vi);
266
ogg_stream_clear(&vo);
267
if (audio_buffer_size) {
268
memdelete_arr(audio_buffer);
269
}
270
}
271
if (has_video) {
272
th_decode_free(td);
273
th_comment_clear(&tc);
274
th_info_clear(&ti);
275
ogg_stream_clear(&to);
276
ogg_sync_clear(&oy);
277
}
278
279
audio_buffer = nullptr;
280
playing = false;
281
has_video = false;
282
has_audio = false;
283
theora_eos = false;
284
vorbis_eos = false;
285
}
286
287
void VideoStreamPlaybackTheora::find_streams(th_setup_info *&ts) {
288
ogg_stream_state test;
289
ogg_packet op;
290
ogg_page og;
291
int stateflag = 0;
292
int audio_track_skip = audio_track;
293
294
/* Only interested in Vorbis/Theora streams */
295
while (!stateflag) {
296
int ret = buffer_data();
297
if (!ret) {
298
break;
299
}
300
while (ogg_sync_pageout(&oy, &og) > 0) {
301
/* is this a mandated initial header? If not, stop parsing */
302
if (!ogg_page_bos(&og)) {
303
/* don't leak the page; get it into the appropriate stream */
304
queue_page(&og);
305
stateflag = 1;
306
break;
307
}
308
309
ogg_stream_init(&test, ogg_page_serialno(&og));
310
ogg_stream_pagein(&test, &og);
311
ogg_stream_packetout(&test, &op);
312
313
/* identify the codec: try theora */
314
if (!has_video && th_decode_headerin(&ti, &tc, &ts, &op) >= 0) {
315
/* it is theora */
316
memcpy(&to, &test, sizeof(test));
317
has_video = true;
318
} else if (!has_audio && vorbis_synthesis_headerin(&vi, &vc, &op) >= 0) {
319
/* it is vorbis */
320
if (audio_track_skip) {
321
vorbis_info_clear(&vi);
322
vorbis_comment_clear(&vc);
323
ogg_stream_clear(&test);
324
vorbis_info_init(&vi);
325
vorbis_comment_init(&vc);
326
audio_track_skip--;
327
} else {
328
memcpy(&vo, &test, sizeof(test));
329
has_audio = true;
330
}
331
} else {
332
/* whatever it is, we don't care about it */
333
ogg_stream_clear(&test);
334
}
335
}
336
}
337
}
338
339
void VideoStreamPlaybackTheora::read_headers(th_setup_info *&ts) {
340
ogg_packet op;
341
int theora_header_packets = 1;
342
int vorbis_header_packets = 1;
343
344
/* we're expecting more header packets. */
345
while (theora_header_packets < 3 || (has_audio && vorbis_header_packets < 3)) {
346
/* look for further theora headers */
347
// The API says there can be more than three but only three are mandatory.
348
while (theora_header_packets < 3 && ogg_stream_packetout(&to, &op) > 0) {
349
if (th_decode_headerin(&ti, &tc, &ts, &op) > 0) {
350
theora_header_packets++;
351
}
352
}
353
354
/* look for more vorbis header packets */
355
while (has_audio && vorbis_header_packets < 3 && ogg_stream_packetout(&vo, &op) > 0) {
356
if (!vorbis_synthesis_headerin(&vi, &vc, &op)) {
357
vorbis_header_packets++;
358
}
359
}
360
361
/* The header pages/packets will arrive before anything else we care about, or the stream is not obeying spec */
362
if (theora_header_packets < 3 || (has_audio && vorbis_header_packets < 3)) {
363
ogg_page page;
364
if (read_page(&page)) {
365
queue_page(&page);
366
} else {
367
fprintf(stderr, "End of file while searching for codec headers.\n");
368
break;
369
}
370
}
371
}
372
373
has_video = theora_header_packets == 3;
374
has_audio = vorbis_header_packets == 3;
375
}
376
377
void VideoStreamPlaybackTheora::set_file(const String &p_file) {
378
ERR_FAIL_COND(playing);
379
th_setup_info *ts = nullptr;
380
381
clear();
382
383
file = FileAccess::open(p_file, FileAccess::READ);
384
ERR_FAIL_COND_MSG(file.is_null(), "Cannot open file '" + p_file + "'.");
385
386
file_name = p_file;
387
388
ogg_sync_init(&oy);
389
390
/* init supporting Vorbis structures needed in header parsing */
391
vorbis_info_init(&vi);
392
vorbis_comment_init(&vc);
393
394
/* init supporting Theora structures needed in header parsing */
395
th_comment_init(&tc);
396
th_info_init(&ti);
397
398
/* Zero stream state structs so they can be checked later. */
399
memset(&to, 0, sizeof(to));
400
memset(&vo, 0, sizeof(vo));
401
402
/* Ogg file open; parse the headers */
403
find_streams(ts);
404
read_headers(ts);
405
406
if (!has_audio) {
407
vorbis_comment_clear(&vc);
408
vorbis_info_clear(&vi);
409
if (!ogg_stream_check(&vo)) {
410
ogg_stream_clear(&vo);
411
}
412
}
413
414
// One video stream is mandatory.
415
if (!has_video) {
416
th_setup_free(ts);
417
th_comment_clear(&tc);
418
th_info_clear(&ti);
419
if (!ogg_stream_check(&to)) {
420
ogg_stream_clear(&to);
421
}
422
file.unref();
423
return;
424
}
425
426
/* And now we have it all. Initialize decoders. */
427
td = th_decode_alloc(&ti, ts);
428
th_setup_free(ts);
429
px_fmt = ti.pixel_fmt;
430
switch (ti.pixel_fmt) {
431
case TH_PF_420:
432
case TH_PF_422:
433
case TH_PF_444:
434
break;
435
default:
436
WARN_PRINT(" video\n (UNKNOWN Chroma sampling!)\n");
437
break;
438
}
439
th_decode_ctl(td, TH_DECCTL_GET_PPLEVEL_MAX, &pp_level_max, sizeof(pp_level_max));
440
pp_level = 0;
441
th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
442
pp_inc = 0;
443
444
size.x = ti.frame_width;
445
size.y = ti.frame_height;
446
region.position.x = ti.pic_x;
447
region.position.y = ti.pic_y;
448
region.size.x = ti.pic_width;
449
region.size.y = ti.pic_height;
450
451
Ref<Image> img = Image::create_empty(region.size.x, region.size.y, false, Image::FORMAT_RGBA8);
452
texture->set_image(img);
453
frame_data.resize(region.size.x * region.size.y * 4);
454
455
frame_duration = (double)ti.fps_denominator / ti.fps_numerator;
456
457
if (has_audio) {
458
vorbis_synthesis_init(&vd, &vi);
459
vorbis_block_init(&vd, &vb);
460
audio_buffer_size = MIN(vi.channels, 8) * 1024;
461
audio_buffer = memnew_arr(float, audio_buffer_size);
462
}
463
464
stream_data_offset = file->get_position() - oy.fill + oy.returned;
465
stream_data_size = file->get_length() - stream_data_offset;
466
467
// Sync to last page to find video length.
468
int64_t seek_pos = MAX(stream_data_offset, (int64_t)file->get_length() - 64 * 1024);
469
int64_t video_granulepos = INT64_MAX;
470
int64_t audio_granulepos = INT64_MAX;
471
file->seek(seek_pos);
472
seek_pos = seek_streams(-1, video_granulepos, audio_granulepos);
473
file->seek(seek_pos);
474
ogg_sync_reset(&oy);
475
476
stream_length = 0;
477
ogg_page page;
478
while (read_page(&page) > 0) {
479
// Use MAX because, even though pages are ordered, page time can be -1
480
// for pages without full frames. Streams could be truncated too.
481
stream_length = MAX(stream_length, get_page_time(&page));
482
}
483
484
seek(0);
485
}
486
487
double VideoStreamPlaybackTheora::get_time() const {
488
// FIXME: AudioServer output latency was fixed in af9bb0e, previously it used to
489
// systematically return 0. Now that it gives a proper latency, it broke this
490
// code where the delay compensation likely never really worked.
491
return time - /* AudioServer::get_singleton()->get_output_latency() - */ delay_compensation;
492
}
493
494
Ref<Texture2D> VideoStreamPlaybackTheora::get_texture() const {
495
return texture;
496
}
497
498
void VideoStreamPlaybackTheora::update(double p_delta) {
499
if (file.is_null()) {
500
return;
501
}
502
503
if (!playing || paused) {
504
return;
505
}
506
507
time += p_delta;
508
509
double comp_time = get_time();
510
bool audio_ready = false;
511
512
// Read data until we fill the audio buffer and get a new video frame.
513
while ((!audio_ready && !audio_done) || (!video_ready && !video_done)) {
514
ogg_packet op;
515
516
while (!audio_ready && !audio_done) {
517
// Send remaining frames
518
if (!send_audio()) {
519
audio_ready = true;
520
break;
521
}
522
523
float **pcm;
524
int ret = vorbis_synthesis_pcmout(&vd, &pcm);
525
if (ret > 0) {
526
int frames_read = 0;
527
while (frames_read < ret) {
528
int m = MIN(audio_buffer_size / vi.channels, ret - frames_read);
529
int count = 0;
530
for (int j = 0; j < m; j++) {
531
for (int i = 0; i < vi.channels; i++) {
532
audio_buffer[count++] = pcm[i][frames_read + j];
533
}
534
}
535
frames_read += m;
536
audio_ptr_end = m;
537
if (!send_audio()) {
538
audio_ready = true;
539
break;
540
}
541
}
542
vorbis_synthesis_read(&vd, frames_read);
543
} else {
544
/* no pending audio; is there a pending packet to decode? */
545
if (ogg_stream_packetout(&vo, &op) > 0) {
546
if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */
547
vorbis_synthesis_blockin(&vd, &vb);
548
}
549
} else { /* we need more data; break out to suck in another page */
550
audio_done = vorbis_eos;
551
break;
552
}
553
}
554
}
555
556
while (!video_ready && !video_done) {
557
if (ogg_stream_packetout(&to, &op) > 0) {
558
if (op.granulepos >= 0) {
559
th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos, sizeof(op.granulepos));
560
}
561
int64_t videobuf_granulepos;
562
int ret = th_decode_packetin(td, &op, &videobuf_granulepos);
563
if (ret == 0 || ret == TH_DUPFRAME) {
564
next_frame_time = th_granule_time(td, videobuf_granulepos);
565
if (next_frame_time > comp_time) {
566
dup_frame = (ret == TH_DUPFRAME);
567
video_ready = true;
568
} else {
569
/*If we are too slow, reduce the pp level.*/
570
pp_inc = pp_level > 0 ? -1 : 0;
571
}
572
}
573
} else { /* we need more data; break out to suck in another page */
574
video_done = theora_eos;
575
break;
576
}
577
}
578
579
if (!video_ready || !audio_ready) {
580
int ret = feed_pages();
581
if (ret == 0) {
582
vorbis_eos = true;
583
theora_eos = true;
584
break;
585
}
586
}
587
588
double tdiff = next_frame_time - comp_time;
589
/*If we have lots of extra time, increase the post-processing level.*/
590
if (tdiff > ti.fps_denominator * 0.25 / ti.fps_numerator) {
591
pp_inc = pp_level < pp_level_max ? 1 : 0;
592
} else if (tdiff < ti.fps_denominator * 0.05 / ti.fps_numerator) {
593
pp_inc = pp_level > 0 ? -1 : 0;
594
}
595
}
596
597
if (!video_ready && video_done && audio_done) {
598
stop();
599
return;
600
}
601
602
// Wait for the last frame to end before rendering the next one.
603
if (video_ready && comp_time >= current_frame_time) {
604
if (!dup_frame) {
605
th_ycbcr_buffer yuv;
606
th_decode_ycbcr_out(td, yuv);
607
video_write(yuv);
608
}
609
dup_frame = false;
610
video_ready = false;
611
current_frame_time = next_frame_time;
612
}
613
}
614
615
void VideoStreamPlaybackTheora::play() {
616
if (playing) {
617
return;
618
}
619
620
playing = true;
621
delay_compensation = GLOBAL_GET("audio/video/video_delay_compensation_ms");
622
delay_compensation /= 1000.0;
623
}
624
625
void VideoStreamPlaybackTheora::stop() {
626
playing = false;
627
seek(0);
628
}
629
630
bool VideoStreamPlaybackTheora::is_playing() const {
631
return playing;
632
}
633
634
void VideoStreamPlaybackTheora::set_paused(bool p_paused) {
635
paused = p_paused;
636
}
637
638
bool VideoStreamPlaybackTheora::is_paused() const {
639
return paused;
640
}
641
642
double VideoStreamPlaybackTheora::get_length() const {
643
return stream_length;
644
}
645
646
double VideoStreamPlaybackTheora::get_playback_position() const {
647
return get_time();
648
}
649
650
void VideoStreamPlaybackTheora::seek(double p_time) {
651
if (file.is_null()) {
652
return;
653
}
654
if (p_time >= stream_length) {
655
return;
656
}
657
658
video_ready = false;
659
next_frame_time = 0;
660
current_frame_time = -1;
661
dup_frame = false;
662
video_done = false;
663
audio_done = !has_audio;
664
theora_eos = false;
665
vorbis_eos = false;
666
audio_ptr_start = 0;
667
audio_ptr_end = 0;
668
669
ogg_stream_reset(&to);
670
if (has_audio) {
671
ogg_stream_reset(&vo);
672
vorbis_synthesis_restart(&vd);
673
}
674
675
int64_t seek_pos;
676
int64_t video_granulepos;
677
int64_t audio_granulepos;
678
// Find the granules we need so we can start playing at the seek time.
679
seek_pos = seek_streams(p_time, video_granulepos, audio_granulepos);
680
if (seek_pos < 0) {
681
return;
682
}
683
file->seek(seek_pos);
684
ogg_sync_reset(&oy);
685
686
time = p_time;
687
688
double last_audio_time = 0;
689
double last_video_time = 0;
690
bool first_frame_decoded = false;
691
bool start_audio = (audio_granulepos == 0);
692
bool start_video = (video_granulepos == (1LL << ti.keyframe_granule_shift));
693
bool keyframe_found = false;
694
uint64_t current_frame = 0;
695
696
// Read from the streams skipping pages until we reach the granules we want. We won't skip pages from both video and
697
// audio streams, only one of them, until decoding of both starts.
698
// video_granulepos and audio_granulepos are guaranteed to be found by checking the granulepos in the packets, no
699
// need to keep track of packets with granulepos == -1 until decoding starts.
700
while ((has_audio && last_audio_time < p_time) || (last_video_time <= p_time)) {
701
ogg_packet op;
702
if (feed_pages() == 0) {
703
break;
704
}
705
while (has_audio && last_audio_time < p_time && ogg_stream_packetout(&vo, &op) > 0) {
706
if (start_audio) {
707
if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */
708
vorbis_synthesis_blockin(&vd, &vb);
709
float **pcm;
710
int samples_left = ceil((p_time - last_audio_time) * vi.rate);
711
int samples_read = vorbis_synthesis_pcmout(&vd, &pcm);
712
int samples_consumed = MIN(samples_left, samples_read);
713
vorbis_synthesis_read(&vd, samples_consumed);
714
last_audio_time += (double)samples_consumed / vi.rate;
715
}
716
} else if (op.granulepos >= audio_granulepos) {
717
last_audio_time = vorbis_granule_time(&vd, op.granulepos);
718
// Start tracking audio now. This won't produce any samples but will update the decoder state.
719
if (vorbis_synthesis_trackonly(&vb, &op) == 0) {
720
vorbis_synthesis_blockin(&vd, &vb);
721
}
722
start_audio = true;
723
}
724
}
725
while (last_video_time <= p_time && ogg_stream_packetout(&to, &op) > 0) {
726
if (!start_video && (op.granulepos >= video_granulepos || video_granulepos == (1LL << ti.keyframe_granule_shift))) {
727
if (op.granulepos > 0) {
728
current_frame = th_granule_frame(td, op.granulepos);
729
}
730
start_video = true;
731
}
732
// Don't start decoding until a keyframe is found, but count frames.
733
if (start_video) {
734
if (!keyframe_found && th_packet_iskeyframe(&op)) {
735
keyframe_found = true;
736
int64_t cur_granulepos = (current_frame + 1) << ti.keyframe_granule_shift;
737
th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &cur_granulepos, sizeof(cur_granulepos));
738
}
739
if (keyframe_found) {
740
int64_t videobuf_granulepos;
741
if (op.granulepos >= 0) {
742
th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos, sizeof(op.granulepos));
743
}
744
int ret = th_decode_packetin(td, &op, &videobuf_granulepos);
745
if (ret == 0 || ret == TH_DUPFRAME) {
746
last_video_time = th_granule_time(td, videobuf_granulepos);
747
first_frame_decoded = true;
748
}
749
} else {
750
current_frame++;
751
}
752
}
753
}
754
}
755
756
if (first_frame_decoded) {
757
if (is_playing()) {
758
// Draw the current frame.
759
th_ycbcr_buffer yuv;
760
th_decode_ycbcr_out(td, yuv);
761
video_write(yuv);
762
current_frame_time = last_video_time;
763
} else {
764
next_frame_time = current_frame_time;
765
video_ready = true;
766
}
767
}
768
}
769
770
int VideoStreamPlaybackTheora::get_channels() const {
771
return vi.channels;
772
}
773
774
void VideoStreamPlaybackTheora::set_audio_track(int p_idx) {
775
audio_track = p_idx;
776
}
777
778
int VideoStreamPlaybackTheora::get_mix_rate() const {
779
return vi.rate;
780
}
781
782
VideoStreamPlaybackTheora::VideoStreamPlaybackTheora() {
783
texture.instantiate();
784
}
785
786
VideoStreamPlaybackTheora::~VideoStreamPlaybackTheora() {
787
clear();
788
}
789
790
void VideoStreamTheora::_bind_methods() {}
791
792
Ref<Resource> ResourceFormatLoaderTheora::load(const String &p_path, const String &p_original_path, Error *r_error, bool p_use_sub_threads, float *r_progress, CacheMode p_cache_mode) {
793
Ref<FileAccess> f = FileAccess::open(p_path, FileAccess::READ);
794
if (f.is_null()) {
795
if (r_error) {
796
*r_error = ERR_CANT_OPEN;
797
}
798
return Ref<Resource>();
799
}
800
801
VideoStreamTheora *stream = memnew(VideoStreamTheora);
802
stream->set_file(p_path);
803
804
Ref<VideoStreamTheora> ogv_stream = Ref<VideoStreamTheora>(stream);
805
806
if (r_error) {
807
*r_error = OK;
808
}
809
810
return ogv_stream;
811
}
812
813
void ResourceFormatLoaderTheora::get_recognized_extensions(List<String> *p_extensions) const {
814
p_extensions->push_back("ogv");
815
}
816
817
bool ResourceFormatLoaderTheora::handles_type(const String &p_type) const {
818
return ClassDB::is_parent_class(p_type, "VideoStream");
819
}
820
821
String ResourceFormatLoaderTheora::get_resource_type(const String &p_path) const {
822
String el = p_path.get_extension().to_lower();
823
if (el == "ogv") {
824
return "VideoStreamTheora";
825
}
826
return "";
827
}
828
829