Merge pull request #101958 from berarma/theora_fixes

Fix Theora video issues
This commit is contained in:
Rémi Verschelde 2025-02-17 09:47:36 +01:00
commit 032cec51a7
No known key found for this signature in database
GPG key ID: C3336907360768E1
4 changed files with 91 additions and 231 deletions

View file

@ -39,31 +39,9 @@
int VideoStreamPlaybackTheora::buffer_data() {
char *buffer = ogg_sync_buffer(&oy, 4096);
#ifdef THEORA_USE_THREAD_STREAMING
int read;
do {
thread_sem->post();
read = MIN(ring_buffer.data_left(), 4096);
if (read) {
ring_buffer.read((uint8_t *)buffer, read);
ogg_sync_wrote(&oy, read);
} else {
OS::get_singleton()->delay_usec(100);
}
} while (read == 0);
return read;
#else
uint64_t bytes = file->get_buffer((uint8_t *)buffer, 4096);
ogg_sync_wrote(&oy, bytes);
return (bytes);
#endif
}
int VideoStreamPlaybackTheora::queue_page(ogg_page *page) {
@ -82,34 +60,24 @@ int VideoStreamPlaybackTheora::queue_page(ogg_page *page) {
return 0;
}
void VideoStreamPlaybackTheora::video_write() {
th_ycbcr_buffer yuv;
th_decode_ycbcr_out(td, yuv);
int pitch = 4;
frame_data.resize(size.x * size.y * pitch);
{
void VideoStreamPlaybackTheora::video_write(th_ycbcr_buffer yuv) {
uint8_t *w = frame_data.ptrw();
char *dst = (char *)w;
uint32_t y_offset = region.position.y * yuv[0].stride + region.position.x;
uint32_t uv_offset = region.position.y * yuv[1].stride + region.position.x;
if (px_fmt == TH_PF_444) {
yuv444_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2);
yuv444_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
} else if (px_fmt == TH_PF_422) {
yuv422_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2);
yuv422_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
} else if (px_fmt == TH_PF_420) {
yuv420_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2);
yuv420_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data + y_offset, (uint8_t *)yuv[1].data + uv_offset, (uint8_t *)yuv[2].data + uv_offset, region.size.x, region.size.y, yuv[0].stride, yuv[1].stride, region.size.x << 2);
}
format = Image::FORMAT_RGBA8;
}
Ref<Image> img = memnew(Image(size.x, size.y, false, Image::FORMAT_RGBA8, frame_data)); //zero copy image creation
Ref<Image> img;
img.instantiate(region.size.x, region.size.y, false, Image::FORMAT_RGBA8, frame_data); //zero copy image creation
texture->update(img); //zero copy send to rendering server
frames_pending = 1;
}
void VideoStreamPlaybackTheora::clear() {
@ -136,20 +104,15 @@ void VideoStreamPlaybackTheora::clear() {
}
ogg_sync_clear(&oy);
#ifdef THEORA_USE_THREAD_STREAMING
thread_exit = true;
thread_sem->post(); //just in case
thread.wait_to_finish();
ring_buffer.clear();
#endif
theora_p = 0;
vorbis_p = 0;
videobuf_ready = 0;
frames_pending = 0;
videobuf_time = 0;
next_frame_time = 0;
current_frame_time = 0;
theora_eos = false;
vorbis_eos = false;
video_ready = false;
video_done = false;
audio_done = false;
file.unref();
playing = false;
@ -164,17 +127,6 @@ void VideoStreamPlaybackTheora::set_file(const String &p_file) {
file = FileAccess::open(p_file, FileAccess::READ);
ERR_FAIL_COND_MSG(file.is_null(), "Cannot open file '" + p_file + "'.");
#ifdef THEORA_USE_THREAD_STREAMING
thread_exit = false;
thread_eof = false;
//pre-fill buffer
int to_read = ring_buffer.space_left();
uint64_t read = file->get_buffer(read_buffer.ptr(), to_read);
ring_buffer.write(read_buffer.ptr(), read);
thread.start(_streaming_thread, this);
#endif
ogg_sync_init(&oy);
/* init supporting Vorbis structures needed in header parsing */
@ -327,16 +279,18 @@ void VideoStreamPlaybackTheora::set_file(const String &p_file) {
th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
pp_inc = 0;
int w;
int h;
w = ((ti.pic_x + ti.frame_width + 1) & ~1) - (ti.pic_x & ~1);
h = ((ti.pic_y + ti.frame_height + 1) & ~1) - (ti.pic_y & ~1);
size.x = w;
size.y = h;
size.x = ti.frame_width;
size.y = ti.frame_height;
region.position.x = ti.pic_x;
region.position.y = ti.pic_y;
region.size.x = ti.pic_width;
region.size.y = ti.pic_height;
Ref<Image> img = Image::create_empty(w, h, false, Image::FORMAT_RGBA8);
Ref<Image> img = Image::create_empty(region.size.x, region.size.y, false, Image::FORMAT_RGBA8);
texture->set_image(img);
frame_data.resize(region.size.x * region.size.y * 4);
frame_duration = (double)ti.fps_denominator / ti.fps_numerator;
} else {
/* tear down the partial theora setup */
th_info_clear(&ti);
@ -358,7 +312,8 @@ void VideoStreamPlaybackTheora::set_file(const String &p_file) {
playing = false;
buffering = true;
time = 0;
audio_frames_wrote = 0;
video_done = !theora_p;
audio_done = !vorbis_p;
}
double VideoStreamPlaybackTheora::get_time() const {
@ -378,68 +333,41 @@ void VideoStreamPlaybackTheora::update(double p_delta) {
}
if (!playing || paused) {
//printf("not playing\n");
return;
}
#ifdef THEORA_USE_THREAD_STREAMING
thread_sem->post();
#endif
time += p_delta;
if (videobuf_time > get_time()) {
return; //no new frames need to be produced
}
bool frame_done = false;
bool audio_done = !vorbis_p;
while (!frame_done || (!audio_done && !vorbis_eos)) {
//a frame needs to be produced
double comp_time = get_time();
bool audio_ready = false;
// Read data until we fill the audio buffer and get a new video frame.
while ((!audio_ready && !audio_done) || (!video_ready && !video_done)) {
ogg_packet op;
bool no_theora = false;
bool buffer_full = false;
while (vorbis_p && !audio_done && !buffer_full) {
int ret;
while (!audio_ready && !audio_done) {
float **pcm;
/* if there's pending, decoded audio, grab it */
ret = vorbis_synthesis_pcmout(&vd, &pcm);
int ret = vorbis_synthesis_pcmout(&vd, &pcm);
if (ret > 0) {
const int AUXBUF_LEN = 4096;
int to_read = ret;
float aux_buffer[AUXBUF_LEN];
while (to_read) {
int m = MIN(AUXBUF_LEN / vi.channels, to_read);
int count = 0;
for (int j = 0; j < m; j++) {
for (int i = 0; i < vi.channels; i++) {
aux_buffer[count++] = pcm[i][j];
}
}
if (mix_callback) {
int mixed = mix_callback(mix_udata, aux_buffer, m);
to_read -= mixed;
if (mixed != m) { //could mix no more
buffer_full = true;
audio_ready = true;
break;
}
} else {
to_read -= m; //just pretend we sent the audio
}
}
vorbis_synthesis_read(&vd, ret - to_read);
audio_frames_wrote += ret - to_read;
} else {
/* no pending audio; is there a pending packet to decode? */
if (ogg_stream_packetout(&vo, &op) > 0) {
@ -447,19 +375,13 @@ void VideoStreamPlaybackTheora::update(double p_delta) {
vorbis_synthesis_blockin(&vd, &vb);
}
} else { /* we need more data; break out to suck in another page */
audio_done = vorbis_eos;
break;
}
}
audio_done = videobuf_time < (audio_frames_wrote / float(vi.rate));
if (buffer_full) {
break;
}
}
while (theora_p && !frame_done) {
/* theora is one in, one out... */
while (!video_ready && !video_done) {
if (ogg_stream_packetout(&to, &op) > 0) {
/*HACK: This should be set after a seek or a gap, but we might not have
a granulepos for the first packet (we only have them for the last
@ -472,62 +394,37 @@ void VideoStreamPlaybackTheora::update(double p_delta) {
sizeof(op.granulepos));
}
ogg_int64_t videobuf_granulepos;
if (th_decode_packetin(td, &op, &videobuf_granulepos) == 0) {
videobuf_time = th_granule_time(td, videobuf_granulepos);
//printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
/* is it already too old to be useful? This is only actually
useful cosmetically after a SIGSTOP. Note that we have to
decode the frame even if we don't show it (for now) due to
keyframing. Soon enough libtheora will be able to deal
with non-keyframe seeks. */
if (videobuf_time >= get_time()) {
frame_done = true;
int ret = th_decode_packetin(td, &op, &videobuf_granulepos);
if (ret == 0 || ret == TH_DUPFRAME) {
next_frame_time = th_granule_time(td, videobuf_granulepos);
if (next_frame_time > comp_time) {
dup_frame = (ret == TH_DUPFRAME);
video_ready = true;
} else {
/*If we are too slow, reduce the pp level.*/
pp_inc = pp_level > 0 ? -1 : 0;
}
}
} else {
no_theora = true;
} else { /* we need more data; break out to suck in another page */
video_done = theora_eos;
break;
}
}
#ifdef THEORA_USE_THREAD_STREAMING
if (file.is_valid() && thread_eof && no_theora && theora_eos && ring_buffer.data_left() == 0) {
#else
if (file.is_valid() && /*!videobuf_ready && */ no_theora && theora_eos) {
#endif
//printf("video done, stopping\n");
stop();
return;
}
if (!frame_done || !audio_done) {
//what's the point of waiting for audio to grab a page?
buffer_data();
if (!video_ready || !audio_ready) {
int ret = buffer_data();
if (ret > 0) {
while (ogg_sync_pageout(&oy, &og) > 0) {
queue_page(&og);
}
}
/* If playback has begun, top audio buffer off immediately. */
//if(stateflag) audio_write_nonblocking();
/* are we at or past time for this video frame? */
if (videobuf_ready && videobuf_time <= get_time()) {
//video_write();
//videobuf_ready=0;
} else {
//printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
vorbis_eos = true;
theora_eos = true;
break;
}
}
double tdiff = videobuf_time - get_time();
double tdiff = next_frame_time - comp_time;
/*If we have lots of extra time, increase the post-processing level.*/
if (tdiff > ti.fps_denominator * 0.25 / ti.fps_numerator) {
pp_inc = pp_level < pp_level_max ? 1 : 0;
@ -536,7 +433,22 @@ void VideoStreamPlaybackTheora::update(double p_delta) {
}
}
video_write();
if (!video_ready && video_done && audio_done) {
stop();
return;
}
// Wait for the last frame to end before rendering the next one.
if (video_ready && comp_time >= current_frame_time) {
if (!dup_frame) {
th_ycbcr_buffer yuv;
th_decode_ycbcr_out(td, yuv);
video_write(yuv);
}
dup_frame = false;
video_ready = false;
current_frame_time = next_frame_time;
}
}
void VideoStreamPlaybackTheora::play() {
@ -596,44 +508,11 @@ int VideoStreamPlaybackTheora::get_mix_rate() const {
return vi.rate;
}
#ifdef THEORA_USE_THREAD_STREAMING
void VideoStreamPlaybackTheora::_streaming_thread(void *ud) {
VideoStreamPlaybackTheora *vs = static_cast<VideoStreamPlaybackTheora *>(ud);
while (!vs->thread_exit) {
//just fill back the buffer
if (!vs->thread_eof) {
int to_read = vs->ring_buffer.space_left();
if (to_read > 0) {
uint64_t read = vs->file->get_buffer(vs->read_buffer.ptr(), to_read);
vs->ring_buffer.write(vs->read_buffer.ptr(), read);
vs->thread_eof = vs->file->eof_reached();
}
}
vs->thread_sem->wait();
}
}
#endif
VideoStreamPlaybackTheora::VideoStreamPlaybackTheora() {
texture.instantiate();
#ifdef THEORA_USE_THREAD_STREAMING
int rb_power = nearest_shift(RB_SIZE_KB * 1024);
ring_buffer.resize(rb_power);
read_buffer.resize(RB_SIZE_KB * 1024);
thread_sem = Semaphore::create();
#endif
}
VideoStreamPlaybackTheora::~VideoStreamPlaybackTheora() {
#ifdef THEORA_USE_THREAD_STREAMING
memdelete(thread_sem);
#endif
clear();
}

View file

@ -41,27 +41,20 @@
class ImageTexture;
//#define THEORA_USE_THREAD_STREAMING
class VideoStreamPlaybackTheora : public VideoStreamPlayback {
GDCLASS(VideoStreamPlaybackTheora, VideoStreamPlayback);
enum {
MAX_FRAMES = 4,
};
//Image frames[MAX_FRAMES];
Image::Format format = Image::Format::FORMAT_L8;
Vector<uint8_t> frame_data;
int frames_pending = 0;
Ref<FileAccess> file;
String file_name;
int audio_frames_wrote = 0;
Point2i size;
Rect2i region;
int buffer_data();
int queue_page(ogg_page *page);
void video_write();
void video_write(th_ycbcr_buffer yuv);
double get_time() const;
bool theora_eos = false;
@ -79,43 +72,30 @@ class VideoStreamPlaybackTheora : public VideoStreamPlayback {
vorbis_block vb;
vorbis_comment vc;
th_pixel_fmt px_fmt;
double videobuf_time = 0;
int pp_inc = 0;
double frame_duration;
int theora_p = 0;
int vorbis_p = 0;
int pp_level_max = 0;
int pp_level = 0;
int videobuf_ready = 0;
int pp_inc = 0;
bool playing = false;
bool buffering = false;
bool paused = false;
bool dup_frame = false;
bool video_ready = false;
bool video_done = false;
bool audio_done = false;
double last_update_time = 0;
double time = 0;
double next_frame_time = 0;
double current_frame_time = 0;
double delay_compensation = 0;
Ref<ImageTexture> texture;
bool paused = false;
#ifdef THEORA_USE_THREAD_STREAMING
enum {
RB_SIZE_KB = 1024
};
RingBuffer<uint8_t> ring_buffer;
Vector<uint8_t> read_buffer;
bool thread_eof = false;
Semaphore *thread_sem = nullptr;
Thread thread;
SafeFlag thread_exit;
static void _streaming_thread(void *ud);
#endif
int audio_track = 0;
protected:

View file

@ -158,6 +158,7 @@ void VideoStreamPlayer::_notification(int p_notification) {
playback->update(delta); // playback->is_playing() returns false in the last video frame
if (!playback->is_playing()) {
resampler.flush();
if (loop) {
play();
return;

View file

@ -86,7 +86,7 @@ public:
} else if (w < r) {
space = r - w - 1;
} else {
space = (rb_len - r) + w - 1;
space = (rb_len - w) + (r - 1);
}
return space;