[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F...
From: |
Sandro Santilli |
Subject: |
[Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F... |
Date: |
Tue, 27 May 2008 11:58:46 +0000 |
CVSROOT: /sources/gnash
Module name: gnash
Changes by: Sandro Santilli <strk> 08/05/27 11:58:46
Modified files:
. : ChangeLog
libbase : FLVParser.cpp FLVParser.h
server/asobj : NetConnection.cpp NetConnection.h
NetStreamFfmpeg.cpp NetStreamFfmpeg.h
Log message:
* libbase/FLVParser.{cpp,h}: take ownership of the input stream.
* server/asobj/NetConnection.{cpp,h}: drop getConnectedParser().
* server/asobj/NetStreamFfmpeg.{cpp,h}: load, parse and decode
in main thread, consume audio in audio buffers in sdl thread.
Have a compile-time macro to enable load&parsing in a separate
thread
CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/gnash/ChangeLog?cvsroot=gnash&r1=1.6720&r2=1.6721
http://cvs.savannah.gnu.org/viewcvs/gnash/libbase/FLVParser.cpp?cvsroot=gnash&r1=1.41&r2=1.42
http://cvs.savannah.gnu.org/viewcvs/gnash/libbase/FLVParser.h?cvsroot=gnash&r1=1.31&r2=1.32
http://cvs.savannah.gnu.org/viewcvs/gnash/server/asobj/NetConnection.cpp?cvsroot=gnash&r1=1.61&r2=1.62
http://cvs.savannah.gnu.org/viewcvs/gnash/server/asobj/NetConnection.h?cvsroot=gnash&r1=1.42&r2=1.43
http://cvs.savannah.gnu.org/viewcvs/gnash/server/asobj/NetStreamFfmpeg.cpp?cvsroot=gnash&r1=1.135&r2=1.136
http://cvs.savannah.gnu.org/viewcvs/gnash/server/asobj/NetStreamFfmpeg.h?cvsroot=gnash&r1=1.69&r2=1.70
Patches:
Index: ChangeLog
===================================================================
RCS file: /sources/gnash/gnash/ChangeLog,v
retrieving revision 1.6720
retrieving revision 1.6721
diff -u -b -r1.6720 -r1.6721
--- ChangeLog 27 May 2008 09:55:25 -0000 1.6720
+++ ChangeLog 27 May 2008 11:58:45 -0000 1.6721
@@ -1,3 +1,12 @@
+2008-05-27 Sandro Santilli <address@hidden>
+
+ * libbase/FLVParser.{cpp,h}: take ownership of the input stream.
+ * server/asobj/NetConnection.{cpp,h}: drop getConnectedParser().
+ * server/asobj/NetStreamFfmpeg.{cpp,h}: load, parse and decode
+ in main thread, consume audio in audio buffers in sdl thread.
+ Have a compile-time macro to enable load&parsing in a separate
+ thread
+
2008-05-27 Benjamin Wolsey <address@hidden>
* libbase/log.{cpp,h}: header cleanup, rename DEBUGLEVEL to
Index: libbase/FLVParser.cpp
===================================================================
RCS file: /sources/gnash/gnash/libbase/FLVParser.cpp,v
retrieving revision 1.41
retrieving revision 1.42
diff -u -b -r1.41 -r1.42
--- libbase/FLVParser.cpp 26 May 2008 20:24:21 -0000 1.41
+++ libbase/FLVParser.cpp 27 May 2008 11:58:45 -0000 1.42
@@ -93,7 +93,7 @@
return frame;
}
-FLVParser::FLVParser(tu_file& lt)
+FLVParser::FLVParser(std::auto_ptr<tu_file> lt)
:
_lt(lt),
_lastParsedPosition(0),
@@ -208,7 +208,7 @@
FLVAudioFrameInfo* frameInfo = peekNextAudioFrameInfo();
if ( ! frameInfo ) return 0;
- std::auto_ptr<FLVFrame> frame = makeAudioFrame(_lt, *frameInfo);
+ std::auto_ptr<FLVFrame> frame = makeAudioFrame(*_lt, *frameInfo);
if ( ! frame.get() )
{
log_error("Could not make audio frame %d", _nextAudioFrame);
@@ -249,7 +249,7 @@
FLVFrame* FLVParser::nextVideoFrame()
{
FLVVideoFrameInfo* frameInfo = peekNextVideoFrameInfo();
- std::auto_ptr<FLVFrame> frame = makeVideoFrame(_lt, *frameInfo);
+ std::auto_ptr<FLVFrame> frame = makeVideoFrame(*_lt, *frameInfo);
if ( ! frame.get() )
{
log_error("Could not make video frame %d", _nextVideoFrame);
@@ -456,7 +456,7 @@
if (_lastParsedPosition == 0 && !parseHeader()) return false;
// Seek to next frame and skip the size of the last tag
- if ( _lt.set_position(_lastParsedPosition+4) )
+ if ( _lt->set_position(_lastParsedPosition+4) )
{
log_error("FLVParser::parseNextTag: can't seek to %d",
_lastParsedPosition+4);
_parsingComplete=true;
@@ -465,12 +465,12 @@
// Read the tag info
boost::uint8_t tag[12];
- int actuallyRead = _lt.read_bytes(tag, 12);
+ int actuallyRead = _lt->read_bytes(tag, 12);
if ( actuallyRead < 12 )
{
if ( actuallyRead )
log_error("FLVParser::parseNextTag: can't read tag info
(needed 12 bytes, only got %d)", actuallyRead);
- // else { assert(_lt.get_eof(); } ?
+ // else { assert(_lt->get_eof(); } ?
_parsingComplete=true;
return false;
}
@@ -489,7 +489,7 @@
FLVAudioFrameInfo* frame = new FLVAudioFrameInfo;
frame->dataSize = bodyLength - 1;
frame->timestamp = timestamp;
- frame->dataPosition = _lt.get_position();
+ frame->dataPosition = _lt->get_position();
_audioFrames.push_back(frame);
// If this is the first audioframe no info about the
@@ -516,7 +516,7 @@
FLVVideoFrameInfo* frame = new FLVVideoFrameInfo;
frame->dataSize = bodyLength - 1;
frame->timestamp = timestamp;
- frame->dataPosition = _lt.get_position();
+ frame->dataPosition = _lt->get_position();
frame->frameType = (tag[11] & 0xf0) >> 4;
_videoFrames.push_back(frame);
@@ -531,14 +531,14 @@
// Extract the video size from the videodata header
if (codec == VIDEO_CODEC_H263) {
- if ( _lt.set_position(frame->dataPosition) )
+ if ( _lt->set_position(frame->dataPosition) )
{
log_error(" Couldn't seek to VideoTag
data position");
_parsingComplete=true;
return false;
}
boost::uint8_t videohead[12];
- int actuallyRead = _lt.read_bytes(videohead,
12);
+ int actuallyRead = _lt->read_bytes(videohead,
12);
if ( actuallyRead < 12 )
{
log_error("FLVParser::parseNextTag: can't read H263 video
header (needed 12 bytes, only got %d)", actuallyRead);
@@ -589,9 +589,9 @@
} else if (tag[0] == META_TAG) {
LOG_ONCE( log_unimpl("FLV MetaTag parser") );
// Extract information from the meta tag
- /*_lt.seek(_lastParsedPosition+16);
+ /*_lt->seek(_lastParsedPosition+16);
char* metaTag = new char[bodyLength];
- size_t actuallyRead = _lt.read(metaTag, bodyLength);
+ size_t actuallyRead = _lt->read(metaTag, bodyLength);
if ( actuallyRead < bodyLength )
{
log_error("FLVParser::parseNextTag: can't read metaTag
(%d) body (needed %d bytes, only got %d)",
@@ -614,11 +614,11 @@
bool FLVParser::parseHeader()
{
// seek to the begining of the file
- _lt.set_position(0); // seek back ? really ?
+ _lt->set_position(0); // seek back ? really ?
// Read the header
boost::uint8_t header[9];
- if ( _lt.read_bytes(header, 9) != 9 )
+ if ( _lt->read_bytes(header, 9) != 9 )
{
log_error("FLVParser::parseHeader: couldn't read 9 bytes of
header");
return false;
@@ -654,7 +654,7 @@
boost::uint64_t
FLVParser::getBytesTotal() const
{
- return _lt.get_size();
+ return _lt->get_size();
}
FLVFrame* FLVParser::nextMediaFrame()
@@ -693,7 +693,7 @@
FLVAudioFrameInfo* frameInfo = _audioFrames[_nextAudioFrame];
- std::auto_ptr<FLVFrame> frame = makeAudioFrame(_lt, *frameInfo);
+ std::auto_ptr<FLVFrame> frame = makeAudioFrame(*_lt,
*frameInfo);
if ( ! frame.get() )
{
log_error("Could not make audio frame %d",
_nextAudioFrame);
@@ -706,7 +706,7 @@
} else {
FLVVideoFrameInfo* frameInfo = _videoFrames[_nextVideoFrame];
- std::auto_ptr<FLVFrame> frame = makeVideoFrame(_lt, *frameInfo);
+ std::auto_ptr<FLVFrame> frame = makeVideoFrame(*_lt,
*frameInfo);
if ( ! frame.get() )
{
log_error("Could not make video frame %d",
_nextVideoFrame);
Index: libbase/FLVParser.h
===================================================================
RCS file: /sources/gnash/gnash/libbase/FLVParser.h,v
retrieving revision 1.31
retrieving revision 1.32
diff -u -b -r1.31 -r1.32
--- libbase/FLVParser.h 24 May 2008 22:03:32 -0000 1.31
+++ libbase/FLVParser.h 27 May 2008 11:58:45 -0000 1.32
@@ -202,10 +202,9 @@
//
/// @param lt
/// LoadThread to use for input.
- /// Ownership left to the caller.
- /// TODO: take ownership
+ /// Ownership transferred.
///
- FLVParser(tu_file& lt);
+ FLVParser(std::auto_ptr<tu_file> lt);
/// Kills the parser...
~FLVParser();
@@ -360,7 +359,7 @@
inline boost::uint32_t getUInt24(boost::uint8_t* in);
/// The interface to the file, externally owned
- tu_file& _lt;
+ std::auto_ptr<tu_file> _lt;
// NOTE: FLVVideoFrameInfo is a relatively small structure,
// chances are keeping by value here would reduce
Index: server/asobj/NetConnection.cpp
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetConnection.cpp,v
retrieving revision 1.61
retrieving revision 1.62
diff -u -b -r1.61 -r1.62
--- server/asobj/NetConnection.cpp 9 May 2008 15:33:45 -0000 1.61
+++ server/asobj/NetConnection.cpp 27 May 2008 11:58:45 -0000 1.62
@@ -234,12 +234,13 @@
std::auto_ptr<FLVParser>
-NetConnection::getConnectedParser() const
+NetConnection::getConnectedParser()
{
std::auto_ptr<FLVParser> ret;
if ( _loader.get() ) {
- ret.reset( new FLVParser(*_loader) );
+ ret.reset( new FLVParser(_loader) ); // transfer loader ownership
+ assert(!_loader.get());
}
return ret;
Index: server/asobj/NetConnection.h
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetConnection.h,v
retrieving revision 1.42
retrieving revision 1.43
diff -u -b -r1.42 -r1.43
--- server/asobj/NetConnection.h 9 May 2008 15:33:45 -0000 1.42
+++ server/asobj/NetConnection.h 27 May 2008 11:58:46 -0000 1.43
@@ -145,7 +145,7 @@
//
/// If not connected, a NULL auto_ptr is returned.
///
- std::auto_ptr<FLVParser> getConnectedParser() const;
+ std::auto_ptr<FLVParser> getConnectedParser();
/// Returns whether the load is complete
//
Index: server/asobj/NetStreamFfmpeg.cpp
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStreamFfmpeg.cpp,v
retrieving revision 1.135
retrieving revision 1.136
diff -u -b -r1.135 -r1.136
--- server/asobj/NetStreamFfmpeg.cpp 26 May 2008 20:30:52 -0000 1.135
+++ server/asobj/NetStreamFfmpeg.cpp 27 May 2008 11:58:46 -0000 1.136
@@ -32,7 +32,7 @@
#include "movie_root.h"
#include "sound_handler.h"
#include "VideoDecoderFfmpeg.h"
-#include "ClockTime.h" // TODO: use the VirtualClock instead ?
+#include "SystemClock.h"
#include "FLVParser.h"
@@ -48,24 +48,31 @@
#endif
/// Define this to add debugging prints for locking
-#define GNASH_DEBUG_THREADS
+//#define GNASH_DEBUG_THREADS
// Define the following macro to have status notification handling debugged
//#define GNASH_DEBUG_STATUS
+// Define the following macro to have decoding activity debugged
+//#define GNASH_DEBUG_DECODING
+
+namespace {
+
// Used to free data in the AVPackets we create our self
-static void avpacket_destruct(AVPacket* av)
+void avpacket_destruct(AVPacket* av)
{
delete [] av->data;
}
+} // anonymous namespace
+
namespace gnash {
-NetStreamFfmpeg::NetStreamFfmpeg():
+NetStreamFfmpeg::NetStreamFfmpeg()
+ :
- _playback_state(PLAY_NONE),
_decoding_state(DEC_NONE),
m_video_index(-1),
@@ -76,16 +83,19 @@
m_FormatCtx(NULL),
m_Frame(NULL),
- _decodeThread(NULL),
- _decodeThreadBarrier(2), // main and decoder threads
- _qFillerKillRequest(false),
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ _parserThread(NULL),
+ _parserThreadBarrier(2), // main and decoder threads
+ _parserKillRequest(false),
+#endif
m_last_video_timestamp(0),
m_last_audio_timestamp(0),
- m_current_timestamp(0),
+
+ _playbackClock(new InterruptableVirtualClock(new SystemClock)),
+ _playHead(_playbackClock.get()),
+
m_unqueued_data(NULL),
- m_time_of_pause(0),
- m_start_onbuffer(false),
_decoderBuffer(0),
_soundHandler(get_sound_handler())
@@ -107,13 +117,11 @@
void NetStreamFfmpeg::pause( PauseMode mode )
{
log_debug("::pause(%d) called ", mode);
- switch ( mode ) {
+ switch ( mode )
+ {
case pauseModeToggle:
- if ( playbackStatus() == PLAY_PAUSED ) {
- unpausePlayback();
- } else {
- pausePlayback();
- }
+ if ( _playHead.getState() == PlayHead::PLAY_PAUSED)
unpausePlayback();
+ else pausePlayback();
break;
case pauseModePause:
pausePlayback();
@@ -131,7 +139,9 @@
{
GNASH_REPORT_FUNCTION;
- killDecodeThread();
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ killParserThread();
+#endif
// When closing gnash before playback is finished, the soundhandler
// seems to be removed before netstream is destroyed.
@@ -165,11 +175,6 @@
delete m_unqueued_data;
m_unqueued_data = NULL;
- boost::mutex::scoped_lock lock(_qMutex);
-
- m_qvideo.clear();
- m_qaudio.clear();
-
delete [] ByteIOCxt.buffer;
}
@@ -180,10 +185,12 @@
{
NetStreamFfmpeg* ns = static_cast<NetStreamFfmpeg*>(opaque);
- boost::intrusive_ptr<NetConnection> nc = ns->_netCon;
- size_t ret = nc->read(static_cast<void*>(buf), buf_size);
- ns->inputPos += ret;
+ assert( ns->_inputStream.get() );
+ tu_file& in = *(ns->_inputStream);
+
+ size_t ret = in.read_bytes(static_cast<void*>(buf), buf_size);
+ ns->inputPos += ret; // what for ??
return ret;
}
@@ -194,21 +201,21 @@
{
NetStreamFfmpeg* ns = static_cast<NetStreamFfmpeg*>(opaque);
- boost::intrusive_ptr<NetConnection> nc = ns->_netCon;
+ tu_file& in = *(ns->_inputStream);
// Offset is absolute new position in the file
if (whence == SEEK_SET)
{
- nc->seek(offset);
- ns->inputPos = offset;
+ in.set_position(offset);
+ ns->inputPos = offset; // what for ?!
// New position is offset + old position
}
else if (whence == SEEK_CUR)
{
- nc->seek(ns->inputPos + offset);
- ns->inputPos = ns->inputPos + offset;
+ in.set_position(ns->inputPos + offset);
+ ns->inputPos = ns->inputPos + offset; // what for ?!
// New position is offset + end of file
}
@@ -216,23 +223,22 @@
{
// This is (most likely) a streamed file, so we can't seek to
the end!
// Instead we seek to 50.000 bytes... seems to work fine...
- nc->seek(50000);
- ns->inputPos = 50000;
+ in.set_position(50000);
+ ns->inputPos = 50000; // what for ?!
}
- return ns->inputPos;
+ return ns->inputPos; // ah, thats why ! :/
}
void
NetStreamFfmpeg::play(const std::string& c_url)
{
-
// Is it already playing ?
- if (playbackStatus() != PLAY_NONE && playbackStatus() != PLAY_STOPPED)
+ if ( m_parser.get() )
{
- log_error("NetStream.play() called already playing ?"); //
TODO: fix this case
- //unpausePlayback(); // will check for playbackStatus itself..
+ // TODO: check what to do in these cases
+ log_error("FIXME: NetStream.play() called while already
streaming");
return;
}
@@ -245,13 +251,34 @@
return;
}
- if (url.size() == 0) url += c_url;
+ url = c_url;
+
// Remove any "mp3:" prefix. Maybe should use this to mark as audio-only
if (url.compare(0, 4, std::string("mp3:")) == 0)
{
url = url.substr(4);
}
+ // TODO: check what is this needed for, I'm not sure it would be
needed..
+ url = _netCon->validateURL(url);
+ if (url.empty())
+ {
+ log_error("Couldn't load URL %s", c_url);
+ return;
+ }
+
+ log_security( _("Connecting to movie: %s"), url );
+
+ StreamProvider& streamProvider = StreamProvider::getDefaultInstance();
+ _inputStream.reset( streamProvider.getStream( url ) );
+
+ if ( ! _inputStream.get() )
+ {
+ log_error( _("Gnash could not open this url: %s"), url );
+ setStatus(streamNotFound);
+ return;
+ }
+
// We need to start playback
if (!startPlayback())
{
@@ -259,15 +286,15 @@
return;
}
- //decodingStatus(DEC_BUFFERING);
-
// We need to restart the audio
if (_soundHandler)
_soundHandler->attach_aux_streamer(audio_streamer, this);
- // This starts the decoding thread
- _decodeThread = new
boost::thread(boost::bind(NetStreamFfmpeg::av_streamer, this));
- _decodeThreadBarrier.wait();
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ // This starts the parser thread
+ _parserThread = new
boost::thread(boost::bind(NetStreamFfmpeg::parseAllInput, this));
+ _parserThreadBarrier.wait();
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
return;
}
@@ -354,6 +381,7 @@
FLVAudioInfo* audioInfo = parser.getAudioInfo();
if (!audioInfo)
{
+ log_debug("No audio in FLV stream");
return NULL;
}
@@ -407,43 +435,38 @@
bool
NetStreamFfmpeg::startPlayback()
{
+ assert(_inputStream.get());
+ assert(_inputStream->get_position() == 0);
- boost::intrusive_ptr<NetConnection> nc = _netCon;
- assert(nc);
-
- // Pass stuff from/to the NetConnection object.
- if ( !nc->openConnection(url) )
- {
- log_error(_("Gnash could not open movie: %s"), url.c_str());
- setStatus(streamNotFound);
- return false;
- }
-
- nc->seek(0);
inputPos = 0;
// Check if the file is a FLV, in which case we use our own parser
char head[4] = {0, 0, 0, 0};
- if (nc->read(head, 3) < 3)
+ if (_inputStream->read_bytes(head, 3) < 3)
{
+ log_error(_("Could not read 3 bytes from NetStream input"));
+ // not really correct, the stream was found, just wasn't what
we expected..
setStatus(streamNotFound);
return false;
}
- nc->seek(0);
+
+ _inputStream->set_position(0);
if (std::string(head) == "FLV")
{
m_isFLV = true;
- if (!m_parser.get())
- {
- m_parser = nc->getConnectedParser();
+ assert ( !m_parser.get() );
+
+ m_parser.reset( new FLVParser(_inputStream) );
+ assert(! _inputStream.get() ); // TODO: when ownership will be
transferred...
+
if (! m_parser.get() )
{
- setStatus(streamNotFound);
log_error(_("Gnash could not open FLV movie:
%s"), url.c_str());
+ // not really correct, the stream was found, just
wasn't what we expected..
+ setStatus(streamNotFound);
return false;
}
- }
// Init the avdecoder-decoder
avcodec_init();
@@ -459,8 +482,9 @@
m_ACodecCtx = initFlvAudio(*m_parser);
if (!m_ACodecCtx)
{
- log_error(_("Failed to initialize FLV audio codec"));
- return false;
+ // There might simply be no audio, no problem...
+ //log_error(_("Failed to initialize FLV audio codec"));
+ //return false;
}
// We just define the indexes here, they're not really used when
@@ -468,8 +492,6 @@
m_video_index = 0;
m_audio_index = 1;
- m_start_onbuffer = true;
-
// Allocate a frame to store the decoded frame in
m_Frame = avcodec_alloc_frame();
}
@@ -490,7 +512,9 @@
}
// After the format probe, reset to the beginning of the file.
- nc->seek(0);
+ // TODO: have this done by probeStream !
+ // (actually, have the whole thing done by MediaParser)
+ _inputStream->set_position(0);
// Setup the filereader/seeker mechanism. 7th argument (NULL)
is the writer function,
// which isn't needed.
@@ -603,8 +627,17 @@
}
}
- playbackStatus(PLAY_PLAYING);
- m_start_clock = clocktime::getTicks();
+ //_playHead.init(m_VCodecCtx!=0, false); // second arg should be
m_ACodecCtx!=0, but we're testing video only for now
+ _playHead.init(m_VCodecCtx!=0, m_ACodecCtx!=0);
+ _playHead.setState(PlayHead::PLAY_PLAYING);
+
+ decodingStatus(DEC_BUFFERING);
+
+#ifdef GNASH_DEBUG_STATUS
+ log_debug("Setting playStart status");
+#endif // GNASH_DEBUG_STATUS
+ setStatus(playStart);
+
return true;
}
@@ -632,115 +665,59 @@
}
}
-// decoder thread
-void NetStreamFfmpeg::av_streamer(NetStreamFfmpeg* ns)
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+// to be run in parser thread
+void NetStreamFfmpeg::parseAllInput(NetStreamFfmpeg* ns)
{
//GNASH_REPORT_FUNCTION;
- ns->_decodeThreadBarrier.wait();
-
- //assert (ns->m_ACodecCtx); // is only set if audio decoder could be
initialized
- //assert (ns->m_VCodecCtx); // is only set if video decder could be
initialized
- //assert (ns->m_FormatCtx); // is only set for non-flv
-
- ns->setStatus(playStart);
-
- ns->m_last_video_timestamp = 0;
- ns->m_last_audio_timestamp = 0;
- ns->m_current_timestamp = 0;
+ ns->_parserThreadBarrier.wait();
- ns->m_start_clock = clocktime::getTicks();
-
- ns->m_unqueued_data = NULL;
-
- // Loop until killed
- while ( ! ns->decodeThreadKillRequested() ) // locks _qMutex
- {
- unsigned long int sleepTime = 1000;
-
- {
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in av_streamer");
-#endif
- boost::mutex::scoped_lock lock(ns->_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in av_streamer");
-#endif
-
- if ( ns->decodingStatus() == DEC_STOPPED )
- {
- log_debug("Dec stopped (eof), waiting on qNeedRefill
condition");
- ns->_qFillerResume.wait(lock);
- continue; // will release the lock for a moment
- }
-
-#ifdef GNASH_DEBUG_THREADS
- log_debug("Decoding iteration. bufferTime=%lu, bufferLen=%lu,
videoFrames=%lu, audioFrames=%lu",
- ns->bufferTime(), ns->bufferLength(),
ns->m_qvideo.size(), ns->m_qaudio.size());
-#endif
-
- if (ns->m_isFLV)
- {
- // If any of the two queues are full don't bother
fetching more
- // (next consumer will wake us up)
- //
- if ( ns->m_qvideo.full() || ns->m_qaudio.full() )
+ // Parse in a thread...
+ while ( 1 )
{
- ns->decodingStatus(DEC_DECODING); // that's to
say: not buffering anymore
+ // this one will lock _parserKillRequestMutex
+ if ( ns->parserThreadKillRequested() ) break;
- // Instead wait till waked up by short-queues
event
- log_debug("Queues full, waiting on qNeedRefill
condition");
- ns->_qFillerResume.wait(lock);
- }
- else
- {
- log_debug("Calling decodeFLVFrame");
- bool successDecoding = ns->decodeFLVFrame();
- //log_debug("decodeFLVFrame returned %d",
successDecoding);
- if ( ! successDecoding )
- {
- // Possible failures:
- // 1. could not decode frame... lot's
of possible
- // reasons...
- // 2. EOF reached
- if ( ns->m_videoFrameFormat !=
render::NONE )
{
- log_error("Could not decode FLV
frame");
- }
- // else it's expected, we'll keep going
anyway
+ boost::mutex::scoped_lock lock(ns->_parserMutex);
+ if ( ns->m_parser->parsingCompleted() ) break;
+ ns->m_parser->parseNextTag();
}
+ usleep(10); // task switch (after lock was released!)
}
+}
- }
- else
- {
+void
+NetStreamFfmpeg::killParserThread()
+{
+ GNASH_REPORT_FUNCTION;
- // If we have problems with decoding - break
- if (ns->decodeMediaFrame() == false &&
ns->m_start_onbuffer == false && ns->m_qvideo.size() == 0 &&
ns->m_qaudio.size() == 0)
{
- break;
- }
-
- }
-
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in av_streamer");
-#endif
+ boost::mutex::scoped_lock lock(_parserKillRequestMutex);
+ _parserKillRequest = true;
}
- //log_debug("Sleeping %d microseconds", sleepTime);
- usleep(sleepTime); // Sleep 1ms to avoid busying the processor.
-
+ // might as well be never started
+ if ( _parserThread )
+ {
+ _parserThread->join();
}
-//#ifdef GNASH_DEBUG_THREADS
- log_debug("Out of decoding loop. playbackStatus:%d, decodingStatus:%d",
ns->playbackStatus(), ns->decodingStatus());
-//#endif
- ns->decodingStatus(DEC_STOPPED);
+ delete _parserThread;
+ _parserThread = NULL;
+}
+bool
+NetStreamFfmpeg::parserThreadKillRequested()
+{
+ boost::mutex::scoped_lock lock(_parserKillRequestMutex);
+ return _parserKillRequest;
}
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
+
// audio callback is running in sound handler thread
bool NetStreamFfmpeg::audio_streamer(void *owner, boost::uint8_t *stream, int
len)
{
@@ -748,32 +725,24 @@
NetStreamFfmpeg* ns = static_cast<NetStreamFfmpeg*>(owner);
- PlaybackState pbStatus = ns->playbackStatus();
- if (pbStatus != PLAY_PLAYING)
- {
- log_debug("playback status is paused, won't consume audio
frames");
- return false;
- }
+ boost::mutex::scoped_lock lock(ns->_audioQueueMutex);
+
+#if 0
+ log_debug("audio_streamer called, audioQueue size: %d, "
+ "requested %d bytes of fill-up",
+ ns->_audioQueue.size(), len);
+#endif
+
while (len > 0)
{
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in audio_streamer");
-#endif
- boost::mutex::scoped_lock lock(ns->_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in audio_streamer");
-#endif
- if ( ns->m_qaudio.empty() )
+ if ( ns->_audioQueue.empty() )
{
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in audio_streamer");
-#endif
break;
}
- media::raw_mediadata_t* samples = ns->m_qaudio.front();
+ media::raw_mediadata_t* samples = ns->_audioQueue.front();
int n = std::min<int>(samples->m_size, len);
memcpy(stream, samples->m_ptr, n);
@@ -782,26 +751,156 @@
samples->m_size -= n;
len -= n;
- ns->m_current_timestamp = samples->m_pts;
-
if (samples->m_size == 0)
{
- ns->m_qaudio.pop();
delete samples;
-
- // wake up filler (TODO: do only if decoder is running)
- ns->_qFillerResume.notify_all();
+ ns->_audioQueue.pop_front();
}
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in audio_streamer");
-#endif
}
+
return true;
}
-bool NetStreamFfmpeg::decodeFLVFrame()
+media::raw_mediadata_t*
+NetStreamFfmpeg::getDecodedVideoFrame(boost::uint32_t ts)
{
+ assert(m_parser.get());
+ if ( ! m_parser.get() )
+ {
+ log_error("getDecodedVideoFrame: no parser available");
+ return 0; // no parser, no party
+ }
+
+ FLVVideoFrameInfo* info = m_parser->peekNextVideoFrameInfo();
+ if ( ! info )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("getDecodedVideoFrame(%d): no more video frames in
input (peekNextVideoFrameInfo returned false)");
+#endif // GNASH_DEBUG_DECODING
+ decodingStatus(DEC_STOPPED);
+ return 0;
+ }
+
+ if ( info->timestamp > ts )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.getDecodedVideoFrame(%d): next video frame is in
the future (%d)",
+ this, ts, info->timestamp);
+#endif // GNASH_DEBUG_DECODING
+ return 0; // next frame is in the future
+ }
+
+ // Loop until a good frame is found
+ media::raw_mediadata_t* video = 0;
+ while ( 1 )
+ {
+ video = decodeNextVideoFrame();
+ if ( ! video )
+ {
+ log_error("peekNextVideoFrameInfo returned some info, "
+ "but decodeNextVideoFrame returned null, "
+ "I don't think this should ever happen");
+ break;
+ }
+
+ FLVVideoFrameInfo* info = m_parser->peekNextVideoFrameInfo();
+ if ( ! info )
+ {
+ // the one we decoded was the last one
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.getDecodedVideoFrame(%d): last video
frame decoded "
+ "(should set playback status to STOP?)", this,
ts);
+#endif // GNASH_DEBUG_DECODING
+ break;
+ }
+ if ( info->timestamp > ts )
+ {
+ // the next one is in the future, we'll return this one.
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.getDecodedVideoFrame(%d): "
+ "next video frame is in the future, "
+ "we'll return this one",
+ this, ts);
+#endif // GNASH_DEBUG_DECODING
+ break; // the one we decoded
+ }
+ }
+
+ return video;
+}
+
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeNextVideoFrame()
+{
+ if ( ! m_parser.get() )
+ {
+ log_error("decodeNextVideoFrame: no parser available");
+ return 0; // no parser, no party
+ }
+
+ FLVFrame* frame = m_parser->nextVideoFrame();
+ if (frame == NULL)
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.decodeNextVideoFrame(): "
+ "no more video frames in input",
+ this);
+#endif // GNASH_DEBUG_DECODING
+ return 0;
+ }
+ assert (frame->type == videoFrame);
+
+ AVPacket packet;
+
+ packet.destruct = avpacket_destruct; // needed ?
+ packet.size = frame->dataSize;
+ packet.data = frame->data;
+ // FIXME: is this the right value for packet.dts?
+ packet.pts = packet.dts = static_cast<boost::int64_t>(frame->timestamp);
+ assert (frame->type == videoFrame);
+ packet.stream_index = 0;
+
+ return decodeVideo(&packet);
+}
+
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeNextAudioFrame()
+{
+ assert ( m_parser.get() );
+
+ FLVFrame* frame = m_parser->nextAudioFrame();
+ if (frame == NULL)
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.decodeNextAudioFrame: "
+ "no more video frames in input",
+ this);
+#endif // GNASH_DEBUG_DECODING
+ return 0;
+ }
+ assert (frame->type == audioFrame);
+
+ AVPacket packet;
+
+ packet.destruct = avpacket_destruct;
+ packet.size = frame->dataSize;
+ packet.data = frame->data;
+ // FIXME: is this the right value for packet.dts?
+ packet.pts = packet.dts = static_cast<boost::int64_t>(frame->timestamp);
+ assert(frame->type == audioFrame);
+ packet.stream_index = 1;
+
+ return decodeAudio(&packet);
+}
+
+bool
+NetStreamFfmpeg::decodeFLVFrame()
+{
+#if 1
+ abort();
+ return false;
+#else
FLVFrame* frame = m_parser->nextMediaFrame(); // we don't care which
one, do we ?
if (frame == NULL)
@@ -823,21 +922,40 @@
if (frame->type == videoFrame)
{
packet.stream_index = 0;
- return decodeVideo(&packet);
+ media::raw_mediadata_t* video = decodeVideo(&packet);
+ assert (m_isFLV);
+ if (video)
+ {
+ // NOTE: Caller is assumed to have locked _qMutex
already
+ if ( ! m_qvideo.push(video) )
+ {
+ log_error("Video queue full !");
+ }
+ }
}
else
{
assert(frame->type == audioFrame);
packet.stream_index = 1;
- return decodeAudio(&packet);
+ media::raw_mediadata_t* audio = decodeAudio(&packet);
+ if ( audio )
+ {
+ if ( ! m_qaudio.push(audio) )
+ {
+ log_error("Audio queue full!");
+ }
+ }
}
+ return true;
+#endif
}
-bool NetStreamFfmpeg::decodeAudio( AVPacket* packet )
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeAudio( AVPacket* packet )
{
- if (!m_ACodecCtx) return false;
+ if (!m_ACodecCtx) return 0;
int frame_size;
//static const unsigned int bufsize = (AVCODEC_MAX_AUDIO_FRAME_SIZE *
3) / 2;
@@ -945,26 +1063,21 @@
m_last_audio_timestamp += frame_delay;
- if (m_isFLV)
- {
- if ( ! m_qaudio.push(raw) )
- {
- log_error("Audio queue full!");
- }
+ return raw;
}
- else m_unqueued_data = m_qaudio.push(raw) ? NULL : raw;
- }
- return true;
+ return 0;
}
-bool NetStreamFfmpeg::decodeVideo(AVPacket* packet)
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeVideo(AVPacket* packet)
{
- if (!m_VCodecCtx) return false;
+ if (!m_VCodecCtx) return NULL;
+ if (!m_Frame) return NULL;
int got = 0;
avcodec_decode_video(m_VCodecCtx, m_Frame, &got, packet->data,
packet->size);
- if (!got) return false;
+ if (!got) return NULL;
// This tmpImage is really only used to compute proper size of the
video data...
// stupid isn't it ?
@@ -983,7 +1096,7 @@
if (m_videoFrameFormat == render::NONE)
{
// NullGui?
- return false;
+ return NULL;
}
else if (m_videoFrameFormat == render::YUV && m_VCodecCtx->pix_fmt !=
PIX_FMT_YUV420P)
@@ -998,7 +1111,7 @@
rgbpicture =
media::VideoDecoderFfmpeg::convertRGB24(m_VCodecCtx, *m_Frame);
if (!rgbpicture.data[0])
{
- return false;
+ return NULL;
}
}
@@ -1089,21 +1202,14 @@
}
- // NOTE: Caller is assumed to have locked _qMutex already
- if (m_isFLV)
- {
- if ( ! m_qvideo.push(video) )
- {
- log_error("Video queue full !");
- }
- }
- else m_unqueued_data = m_qvideo.push(video) ? NULL : video;
-
- return true;
+ return video;
}
bool NetStreamFfmpeg::decodeMediaFrame()
{
+ return false;
+
+#if 0 // Only FLV for now (non-FLV should be threated the same as FLV, using a
MediaParser in place of the FLVParser)
if (m_unqueued_data)
{
@@ -1133,20 +1239,24 @@
{
if (packet.stream_index == m_audio_index && _soundHandler)
{
- if (!decodeAudio(&packet))
+ media::raw_mediadata_t* audio = decodeAudio(&packet);
+ if (!audio)
{
log_error(_("Problems decoding audio frame"));
return false;
}
+ m_unqueued_data = m_qaudio.push(audio) ? NULL : audio;
}
else
if (packet.stream_index == m_video_index)
{
- if (!decodeVideo(&packet))
+ media::raw_mediadata_t* video = decodeVideo(&packet);
+ if (!video)
{
log_error(_("Problems decoding video frame"));
return false;
}
+ m_unqueued_data = m_qvideo.push(video) ? NULL : video;
}
av_free_packet(&packet);
}
@@ -1157,6 +1267,7 @@
}
return true;
+#endif
}
void
@@ -1164,30 +1275,31 @@
{
GNASH_REPORT_FUNCTION;
- // We'll mess with the queues here
- boost::mutex::scoped_lock lock(_qMutex);
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ boost::mutex::scoped_lock lock(_parserMutex);
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
- long newpos = 0;
- double timebase = 0;
+ // We'll mess with the input here
+ if ( ! m_parser.get() )
+ {
+ log_debug("NetStreamFfmpeg::seek(%d): no parser, no party",
posSeconds);
+ return;
+ }
// Don't ask me why, but NetStream::seek() takes seconds...
boost::uint32_t pos = posSeconds*1000;
+ long newpos = 0;
+ double timebase = 0;
+
// Seek to new position
if (m_isFLV)
{
- if (m_parser.get())
- {
newpos = m_parser->seek(pos);
- }
- else
- {
- newpos = 0;
- }
+ log_debug("m_parser->seek(%d) returned %d", pos, newpos);
}
else if (m_FormatCtx)
{
-
AVStream* videostream = m_FormatCtx->streams[m_video_index];
timebase = static_cast<double>(videostream->time_base.num /
videostream->time_base.den);
newpos = static_cast<long>(pos / timebase);
@@ -1209,20 +1321,11 @@
{
m_last_video_timestamp = 0;
m_last_audio_timestamp = 0;
- m_current_timestamp = 0;
-
- m_start_clock = clocktime::getTicks();
-
}
else if (m_isFLV)
{
-
- if (m_VCodecCtx) m_start_clock += m_last_video_timestamp -
newpos;
- else m_start_clock += m_last_audio_timestamp - newpos;
-
if (m_ACodecCtx) m_last_audio_timestamp = newpos;
if (m_VCodecCtx) m_last_video_timestamp = newpos;
- m_current_timestamp = newpos;
}
else
{
@@ -1244,101 +1347,243 @@
av_free_packet( &Packet );
av_seek_frame(m_FormatCtx, m_video_index, newpos, 0);
- boost::uint32_t newtime_ms =
static_cast<boost::int32_t>(newtime / 1000.0);
- m_start_clock += m_last_audio_timestamp - newtime_ms;
+ newpos = static_cast<boost::int32_t>(newtime / 1000.0);
- m_last_audio_timestamp = newtime_ms;
- m_last_video_timestamp = newtime_ms;
- m_current_timestamp = newtime_ms;
+ m_last_audio_timestamp = newpos;
+ m_last_video_timestamp = newpos;
}
- // Flush the queues
- m_qvideo.clear();
- m_qaudio.clear();
-
- decodingStatus(DEC_DECODING); // or ::refreshVideoFrame will send a
STOPPED again
- if ( playbackStatus() == PLAY_STOPPED )
+ { // cleanup audio queue, so won't be consumed while seeking
+ boost::mutex::scoped_lock lock(_audioQueueMutex);
+ for (AudioQueue::iterator i=_audioQueue.begin(),
e=_audioQueue.end();
+ i!=e; ++i)
{
- // restart playback (if not paused)
- playbackStatus(PLAY_PLAYING);
+ delete (*i);
+ }
+ _audioQueue.clear();
}
+
+ // 'newpos' will always be on a keyframe (supposedly)
+ _playHead.seekTo(newpos);
+
+ decodingStatus(DEC_BUFFERING); // make sure we have enough things in
buffer
_qFillerResume.notify_all(); // wake it decoder is sleeping
+ refreshVideoFrame(true);
+}
+
+void
+NetStreamFfmpeg::parseNextChunk()
+{
+ // TODO: parse as much as possible w/out blocking
+ // (will always block currently..)
+ const int tagsPerChunk = 2;
+ for (int i=0; i<tagsPerChunk; ++i)
+ m_parser->parseNextTag();
}
void
-NetStreamFfmpeg::refreshVideoFrame()
+NetStreamFfmpeg::refreshAudioBuffer()
{
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in refreshVideoFrame");
+ assert ( m_parser.get() );
+
+#ifdef GNASH_DEBUG_DECODING
+ // bufferLength() would lock the mutex (which we already hold),
+ // so this is to avoid that.
+ boost::uint32_t parserTime = m_parser->getBufferLength();
+ boost::uint32_t playHeadTime = time();
+ boost::uint32_t bufferLen = parserTime > playHeadTime ?
parserTime-playHeadTime : 0;
#endif
- boost::mutex::scoped_lock lock(_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in refreshVideoFrame");
+
+ if ( _playHead.getState() == PlayHead::PLAY_PAUSED )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshAudioBuffer: doing nothing as playhead is
paused - "
+ "bufferLength=%d, bufferTime=%d",
+ this, bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+ return;
+ }
+
+ if ( _playHead.isAudioConsumed() )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshAudioBuffer: doing nothing "
+ "as current position was already decoded - "
+ "bufferLength=%d, bufferTime=%d",
+ this, bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+ return;
+ }
+
+ // Calculate the current time
+ boost::uint64_t curPos = _playHead.getPosition();
+
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshAudioBuffer: currentPosition=%d, playHeadState=%d,
bufferLength=%d, bufferTime=%d",
+ this, curPos, _playHead.getState(), bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+
+ // TODO: here we should fetch all frames up to the one with timestamp
>= curPos
+ // and push them into the buffer to be consumed by audio_streamer
+ pushDecodedAudioFrames(curPos);
+}
+
+void
+NetStreamFfmpeg::pushDecodedAudioFrames(boost::uint32_t ts)
+{
+ assert(m_parser.get());
+
+ bool consumed = false;
+
+ while ( 1 )
+ {
+ FLVAudioFrameInfo* info = m_parser->peekNextAudioFrameInfo();
+ if ( ! info )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.pushDecodedAudioFrames(%d): "
+ "no more audio frames in input "
+ "(peekNextAudioFrameInfo returned false)",
+ this, ts);
+#endif // GNASH_DEBUG_DECODING
+ consumed = true;
+ decodingStatus(DEC_STOPPED);
+#ifdef GNASH_DEBUG_STATUS
+ log_debug("Setting playStop status");
#endif
+ setStatus(playStop);
+ break;
+ }
+
+ if ( info->timestamp > ts )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.pushDecodedAudioFrames(%d): "
+ "next audio frame is in the future (%d)",
+ this, ts, info->timestamp);
+#endif // GNASH_DEBUG_DECODING
+ consumed = true;
+ break; // next frame is in the future
+ }
+
+ boost::mutex::scoped_lock lock(_audioQueueMutex);
+
+ static const int bufferLimit = 20;
+ if ( _audioQueue.size() > bufferLimit )
+ {
+ // we won't buffer more then 'bufferLimit' frames in
the queue
+ // to avoid ending up with a huge queue which will take
some
+ // time before being consumed by audio mixer, but still
marked
+ // as "consumed". Keeping decoded frames buffer low
would also
+ // reduce memory use.
+ //
+ // The alternative would be always decode on demand
from the
+ // audio consumer thread, but would introduce a lot of
thread-safety
+ // issues: playhead would need protection, input would
need protection.
+ //
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.pushDecodedAudioFrames(%d) : queue size
over limit (%d), "
+ "audio won't be consumed (buffer overrun?)",
+ this, ts, bufferLimit);
+#endif // GNASH_DEBUG_DECODING
+ return;
+ }
- // If we're paused (and we got the first imageframe), there is no need
to do this
- if (playbackStatus() == PLAY_PAUSED && m_imageframe)
+ media::raw_mediadata_t* audio = decodeNextAudioFrame();
+ if ( ! audio )
{
- log_debug("refreshVideoFrame doing nothing as playback is
paused and we have an image frame already");
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in refreshVideoFrame");
+ log_error("peekNextAudioFrameInfo returned some info, "
+ "but decodeNextAudioFrame returned null, "
+ "I don't think this should ever happen");
+ break;
+ }
+
+ _audioQueue.push_back(audio);
+ }
+
+ // If we consumed audio of current position, feel free to advance if
needed
+ if ( consumed ) _playHead.setAudioConsumed();
+}
+
+
+void
+NetStreamFfmpeg::refreshVideoFrame(bool alsoIfPaused)
+{
+ assert ( m_parser.get() );
+
+#ifdef GNASH_DEBUG_DECODING
+ // bufferLength() would lock the mutex (which we already hold),
+ // so this is to avoid that.
+ boost::uint32_t parserTime = m_parser->getBufferLength();
+ boost::uint32_t playHeadTime = time();
+ boost::uint32_t bufferLen = parserTime > playHeadTime ?
parserTime-playHeadTime : 0;
#endif
+
+ if ( ! alsoIfPaused && _playHead.getState() == PlayHead::PLAY_PAUSED )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshVideoFrame: doing nothing as playhead is
paused - "
+ "bufferLength=%d, bufferTime=%d",
+ this, bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
return;
}
- // Loop until a good frame is found
- do
+ if ( _playHead.isVideoConsumed() )
{
- // Get video frame from queue, will have the lowest timestamp
- // will return NULL if empty(). See multithread_queue::front
- media::raw_mediadata_t* video = m_qvideo.front();
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshVideoFrame: doing nothing "
+ "as current position was already decoded - "
+ "bufferLength=%d, bufferTime=%d",
+ this, bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+ return;
+ }
+
+ // Calculate the current time
+ boost::uint64_t curPos = _playHead.getPosition();
+
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshVideoFrame: currentPosition=%d, playHeadState=%d,
bufferLength=%d, bufferTime=%d",
+ this, curPos, _playHead.getState(), bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+
+ // Get next decoded video frame from parser, will have the lowest
timestamp
+ media::raw_mediadata_t* video = getDecodedVideoFrame(curPos);
- // If the queue is empty either we're waiting for more data
// to be decoded or we're out of data
if (!video)
{
- log_debug("refreshVideoFrame:: No more video frames in
queue");
-
if ( decodingStatus() == DEC_STOPPED )
{
- if ( playbackStatus() != PLAY_STOPPED )
- {
- playbackStatus(PLAY_STOPPED);
-//#ifdef GNASH_DEBUG_STATUS
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshVideoFrame(): "
+ "no more video frames to decode, "
+ "sending STOP event",
+ this);
+#endif // GNASH_DEBUG_DECODING
+#ifdef GNASH_DEBUG_STATUS
log_debug("Setting playStop status");
-//#endif
+#endif
setStatus(playStop);
}
- }
else
{
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.refreshVideoFrame(): "
+ "last video frame was good enough "
+ "for current position",
+ this);
+#endif // GNASH_DEBUG_DECODING
// There no video but decoder is still running
// not much to do here except wait for next call
//assert(decodingStatus() == DEC_BUFFERING);
}
- break;
- }
-
- // Caclulate the current time
- boost::uint32_t current_clock;
- if (m_ACodecCtx && _soundHandler)
- {
- current_clock = m_current_timestamp;
}
else
{
- current_clock = clocktime::getTicks() - m_start_clock;
- m_current_timestamp = current_clock;
- }
-
- boost::uint32_t video_clock = video->m_pts;
-
- // If the timestamp on the videoframe is smaller than the
- // current time, we put it in the output image.
- if (current_clock >= video_clock)
- {
if (m_videoFrameFormat == render::YUV)
{
@@ -1354,147 +1599,184 @@
}
// Delete the frame from the queue
- m_qvideo.pop();
delete video;
- // wake up filler (TODO: do only if decoder is running)
- // TODO2: resume only at end of loop ?
- _qFillerResume.notify_all();
-
// A frame is ready for pickup
m_newFrameReady = true;
-
- }
- else
- {
- // The timestamp on the first frame in the queue is
greater
- // than the current time, so no need to do anything.
- break;
}
- } while(!m_qvideo.empty());
+ // We consumed video of current position, feel free to advance if needed
+ _playHead.setVideoConsumed();
+
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in refreshVideoFrame");
-#endif
}
void
NetStreamFfmpeg::advance()
{
- //log_debug("advance");
-
// Check if there are any new status messages, and if we should
// pass them to a event handler
processStatusNotifications();
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ // stop parser thread while advancing
+ boost::mutex::scoped_lock lock(_parserMutex);
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
+
+ // Nothing to do if we don't have a parser
+ if ( ! m_parser.get() ) return;
+
+#ifndef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ // Parse some input no matter what
+ parseNextChunk();
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
+
+ // bufferLength() would lock the mutex (which we already hold),
+ // so this is to avoid that.
+ boost::uint32_t parserTime = m_parser->getBufferLength();
+ boost::uint32_t playHeadTime = time();
+ boost::uint32_t bufferLen = parserTime > playHeadTime ?
parserTime-playHeadTime : 0;
+
+
+ // Check decoding status
+ if ( decodingStatus() == DEC_DECODING && bufferLen == 0 )
+ {
+ if ( ! m_parser->parsingCompleted() )
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.advance: buffer empty while decoding,"
+ " setting buffer to buffering and pausing
playback clock",
+ this);
+#endif // GNASH_DEBUG_DECODING
+#ifdef GNASH_DEBUG_STATUS
+ log_debug("Setting bufferEmpty status");
+#endif
+ setStatus(bufferEmpty);
+ decodingStatus(DEC_BUFFERING);
+ _playbackClock->pause();
+ }
+ else
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.advance : bufferLength=%d, parsing
completed",
+ this, bufferLen);
+#endif // GNASH_DEBUG_DECODING
+ // set playStop ? (will be done later for now)
+ }
+ }
+
+ if ( decodingStatus() == DEC_BUFFERING )
+ {
+ if ( bufferLen < m_bufferTime && ! m_parser->parsingCompleted()
)
+ {
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.advance: buffering"
+ " - position=%d, buffer=%d/%d",
+ this, _playHead.getPosition(), bufferLen,
m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+ return;
+ }
+
+#ifdef GNASH_DEBUG_DECODING
+ log_debug("%p.advance: buffer full (or parsing completed),
resuming playback clock"
+ " - position=%d, buffer=%d/%d",
+ this, _playHead.getPosition(), bufferLen, m_bufferTime);
+#endif // GNASH_DEBUG_DECODING
+
+ setStatus(bufferFull);
+ decodingStatus(DEC_DECODING);
+ _playbackClock->resume();
+ }
+
// Find video frame with the most suited timestamp in the video queue,
// and put it in the output image frame.
refreshVideoFrame();
+
+ // Refill audio buffer to consume all samples
+ // up to current playhead
+ refreshAudioBuffer();
}
boost::int32_t
NetStreamFfmpeg::time()
{
-
- if (m_FormatCtx && m_FormatCtx->nb_streams > 0)
- {
- double time = (double)m_FormatCtx->streams[0]->time_base.num /
(double)m_FormatCtx->streams[0]->time_base.den *
(double)m_FormatCtx->streams[0]->cur_dts;
- return static_cast<boost::int32_t>(time);
- }
- else if
- (m_isFLV)
- {
- return m_current_timestamp;
- }
- else
- {
- return 0;
- }
+ return _playHead.getPosition();
}
void NetStreamFfmpeg::pausePlayback()
{
GNASH_REPORT_FUNCTION;
- if (playbackStatus() == PLAY_PAUSED) return;
+ PlayHead::PlaybackStatus oldStatus =
_playHead.setState(PlayHead::PLAY_PAUSED);
- playbackStatus(PLAY_PAUSED);
-
- // Save the current time so we later can tell how long the pause lasted
- m_time_of_pause = clocktime::getTicks();
-
- // Disconnect the soundhandler so we don't play while paused
- if ( _soundHandler ) _soundHandler->detach_aux_streamer((void*)this);
+ // Disconnect the soundhandler if we were playing before
+ if ( oldStatus == PlayHead::PLAY_PLAYING && _soundHandler )
+ {
+ _soundHandler->detach_aux_streamer((void*)this);
+ }
}
void NetStreamFfmpeg::unpausePlayback()
{
GNASH_REPORT_FUNCTION;
- if (playbackStatus() == PLAY_PLAYING) // already playing
- {
- log_debug("unpausePlayback: already playing");
- return;
- }
-
- playbackStatus(PLAY_PLAYING);
+ PlayHead::PlaybackStatus oldStatus =
_playHead.setState(PlayHead::PLAY_PLAYING);
- if (m_current_timestamp == 0)
+ // Re-connect to the soundhandler if we were paused before
+ if ( oldStatus == PlayHead::PLAY_PAUSED && _soundHandler )
{
- m_start_clock = clocktime::getTicks();
+ _soundHandler->attach_aux_streamer(audio_streamer, (void*)
this);
}
- else
- {
- // Add the paused time to the start time so that the playhead
doesn't
- // noticed that we have been paused
- m_start_clock += clocktime::getTicks() - m_time_of_pause;
- }
-
- // (re)-connect to the soundhandler.
- // It was disconnected in ::pausePlayback to avoid to keep playing
sound while paused
- if ( _soundHandler ) _soundHandler->attach_aux_streamer(audio_streamer,
(void*) this);
}
long
NetStreamFfmpeg::bytesLoaded ()
{
- long ret_val = 0;
-
- if ( _netCon )
+ if ( ! m_parser.get() )
{
- ret_val = _netCon->getBytesLoaded();
+ log_debug("bytesLoaded: no parser, no party");
+ return 0;
}
- return ret_val;
+ return m_parser->getBytesLoaded();
}
-
long
-NetStreamFfmpeg::bytesTotal ()
+NetStreamFfmpeg::bufferLength ()
{
- long ret_val = 0;
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ boost::mutex::scoped_lock lock(_parserMutex);
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
- if ( _netCon )
+ if ( ! m_parser.get() )
{
- ret_val = _netCon->getBytesTotal();
+ log_debug("bytesTotal: no parser, no party");
+ return 0;
}
- return ret_val;
+ boost::uint32_t maxTimeInBuffer = m_parser->getBufferLength();
+ boost::uint64_t curPos = _playHead.getPosition();
+
+ if ( maxTimeInBuffer < curPos ) return 0;
+ return maxTimeInBuffer-curPos;
}
-NetStreamFfmpeg::PlaybackState
-NetStreamFfmpeg::playbackStatus(PlaybackState newstate)
+long
+NetStreamFfmpeg::bytesTotal ()
{
- boost::mutex::scoped_lock lock(_state_mutex);
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ boost::mutex::scoped_lock lock(_parserMutex);
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
- if (newstate != PLAY_NONE) {
- _playback_state = newstate;
+ if ( ! m_parser.get() )
+ {
+ log_debug("bytesTotal: no parser, no party");
+ return 0;
}
- return _playback_state;
+ return m_parser->getBytesTotal();
}
NetStreamFfmpeg::DecodingState
@@ -1509,41 +1791,6 @@
return _decoding_state;
}
-void
-NetStreamFfmpeg::killDecodeThread()
-{
- GNASH_REPORT_FUNCTION;
-
- {
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in killDecodeThread");
-#endif
- boost::mutex::scoped_lock lock(_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in killDecodeThread");
-#endif
-
- _qFillerKillRequest = true;
- _qFillerResume.notify_all(); // wake it up if waiting..
- }
-
- // might as well be never started
- if ( _decodeThread )
- {
- _decodeThread->join();
- }
-
- delete _decodeThread;
- _decodeThread = NULL;
-}
-
-bool
-NetStreamFfmpeg::decodeThreadKillRequested()
-{
- boost::mutex::scoped_lock lock(_qMutex);
- return _qFillerKillRequest;
-}
-
} // gnash namespcae
#endif // USE_FFMPEG
Index: server/asobj/NetStreamFfmpeg.h
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStreamFfmpeg.h,v
retrieving revision 1.69
retrieving revision 1.70
diff -u -b -r1.69 -r1.70
--- server/asobj/NetStreamFfmpeg.h 24 May 2008 22:03:32 -0000 1.69
+++ server/asobj/NetStreamFfmpeg.h 27 May 2008 11:58:46 -0000 1.70
@@ -36,6 +36,9 @@
#include <boost/thread/condition.hpp>
#include <boost/thread/barrier.hpp>
+#include <memory>
+#include <cassert>
+
#include "impl.h"
#ifdef HAVE_FFMPEG_AVFORMAT_H
@@ -53,9 +56,13 @@
#include "image.h"
#include "StreamProvider.h"
#include "NetStream.h" // for inheritance
+#include "VirtualClock.h"
#include "ffmpegNetStreamUtil.h"
+/// Uncomment the following to load media in a separate thread
+//#define LOAD_MEDIA_IN_A_SEPARATE_THREAD
+
namespace gnash {
@@ -94,8 +101,10 @@
// Used for ffmpeg data read and seek callbacks with non-FLV
static offset_t seekMedia(void *opaque, offset_t offset, int whence);
- /// The decoding thread. Sets up the decoder, and decodes.
- static void av_streamer(NetStreamFfmpeg* ns);
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ /// The parsing thread. Sets up the decoder, and decodes.
+ static void parseAllInput(NetStreamFfmpeg* ns);
+#endif
/// Callback used by sound_handler to get audio data
//
@@ -109,6 +118,7 @@
long bytesTotal();
+ long bufferLength();
private:
enum PlaybackState {
@@ -125,7 +135,6 @@
DEC_BUFFERING,
};
- PlaybackState _playback_state;
DecodingState _decoding_state;
// Mutex protecting _playback_state and _decoding_state
@@ -163,7 +172,15 @@
/// is that refreshVideoFrame() is called right before get_video().
This is important
/// to ensure timing is correct..
///
- void refreshVideoFrame();
+ /// @param alsoIfPaused
+ /// If true, video is consumed/refreshed even if playhead is paused.
+ /// By default this is false, but will be used on ::seek
(user-reguested)
+ ///
+ void refreshVideoFrame(bool alsoIfPaused=false);
+
+ /// Refill audio buffers, so to contain new frames since last run
+ /// and up to current timestamp
+ void refreshAudioBuffer();
// Used to decode and push the next available (non-FLV) frame to the
audio or video queue
bool decodeMediaFrame();
@@ -197,35 +214,56 @@
///
bool decodeFLVFrame();
- /// Used to decode a video frame and push it on the videoqueue
+ /// Decode next video frame fetching it MediaParser cursor
//
- /// Also updates m_imageframe (why !??)
+ /// @return 0 on EOF or error, a decoded video otherwise
///
- /// This is a blocking call.
- /// If no Video decoding context exists (m_VCodecCtx), false is
returned.
- /// On decoding (or converting) error, false is returned.
- /// If renderer requested video format is render::NONE, false is
returned.
- /// In any other case, true is returned.
+ media::raw_mediadata_t* decodeNextVideoFrame();
+
+ /// Decode next audio frame fetching it MediaParser cursor
+ //
+ /// @return 0 on EOF or error, a decoded audio frame otherwise
+ ///
+ media::raw_mediadata_t* decodeNextAudioFrame();
+
+ /// \brief
+ /// Decode input audio frames with timestamp <= ts
+ /// and push them to the output audio queue
+ void pushDecodedAudioFrames(boost::uint32_t ts);
+
+ /// Decode input frames up to the one with timestamp <= ts.
+ //
+ /// Decoding starts from "next" element in the parser cursor.
///
- /// NOTE: (FIXME) if video queue is full,
- /// we'd still return true w/out pushing anything new there
+ /// Return 0 if:
+ /// 1. there's no parser active.
+ /// 2. parser cursor is already on last frame.
+ /// 3. next element in cursor has timestamp > tx
+ /// 4. there was an error decoding
+ ///
+ media::raw_mediadata_t* getDecodedVideoFrame(boost::uint32_t ts);
+
+ /// Used to decode a video frame
+ //
+ /// This is a blocking call.
+ /// If no Video decoding context exists (m_VCodecCtx), 0 is returned.
+ /// On decoding (or converting) error, 0 is returned.
+ /// If renderer requested video format is render::NONE, 0 is returned.
+ /// In any other case, a decoded video frame is returned.
///
/// TODO: return a more informative value to tell what happened.
///
- bool decodeVideo( AVPacket* packet );
+ media::raw_mediadata_t* decodeVideo( AVPacket* packet );
- /// Used to decode a audio frame and push it on the audioqueue
+ /// Used to decode an audio frame
//
/// This is a blocking call.
- /// If no Video decoding context exists (m_ACodecCtx), false is
returned.
- /// In any other case, true is returned.
- ///
- /// NOTE: (FIXME) if audio queue is full,
- /// we'd still return true w/out pushing anything new there
+ /// If no Video decoding context exists (m_ACodecCtx), 0 is returned.
+ /// In any other case, a decoded audio frame is returned.
///
/// TODO: return a more informative value to tell what happened.
///
- bool decodeAudio( AVPacket* packet );
+ media::raw_mediadata_t* decodeAudio( AVPacket* packet );
// Used to calculate a decimal value from a ffmpeg fraction
inline double as_double(AVRational time)
@@ -233,7 +271,6 @@
return time.num / (double) time.den;
}
- PlaybackState playbackStatus(PlaybackState newstate = PLAY_NONE);
DecodingState decodingStatus(DecodingState newstate = DEC_NONE);
int m_video_index;
@@ -255,11 +292,16 @@
// Use for resampling audio
media::AudioResampler _resampler;
- // The decoding thread
- boost::thread* _decodeThread;
-
- // Barrier to synchronize thread and thread starter
- boost::barrier _decodeThreadBarrier;
+#ifdef LOAD_MEDIA_IN_A_SEPARATE_THREAD
+ /// The parser thread
+ boost::thread* _parserThread;
+
+ /// Barrier to synchronize thread and thread starter
+ boost::barrier _parserThreadBarrier;
+
+ /// Mutex serializing access to parser,
+ /// when reading from a separate thread
+ boost::mutex _parserMutex;
/// Kill decoder thread, if any
//
@@ -269,15 +311,18 @@
///
/// Uses the _qMutex
///
- void killDecodeThread();
+ void killParserThread();
- /// Return true if kill of decoder thread
- /// was requested
- //
- bool decodeThreadKillRequested();
+ /// Return true if kill of parser thread was requested
+ bool parserThreadKillRequested();
+
+ /// Protected by _parserKillRequestMutex
+ bool _parserKillRequest;
+
+ /// Mutex protecting _parserKillRequest
+ boost::mutex _parserKillRequestMutex;
- /// Protected by _qMutex
- bool _qFillerKillRequest;
+#endif // LOAD_MEDIA_IN_A_SEPARATE_THREAD
// The timestamp of the last decoded video frame, in seconds.
@@ -286,24 +331,14 @@
// The timestamp of the last decoded audio frame, in seconds.
volatile boost::uint32_t m_last_audio_timestamp;
- // The timestamp of the last played audio (default) or video (if no
audio) frame.
- // Misured in seconds.
- boost::uint32_t m_current_timestamp;
-
- /// The queues of audio and video data.
- typedef media::ElementsOwningQueue<media::raw_mediadata_t*> MediaQueue;
-
- MediaQueue m_qaudio;
- MediaQueue m_qvideo;
-
- /// Mutex protecting access to queues
- boost::mutex _qMutex;
-
/// Queues filler will wait on this condition when queues are full
boost::condition _qFillerResume;
- // The time we started playing in seconds (since VM start ?)
- volatile boost::uint64_t m_start_clock;
+ /// Virtual clock used as playback clock source
+ std::auto_ptr<InterruptableVirtualClock> _playbackClock;
+
+ /// Playback control device
+ PlayHead _playHead;
// When the queues are full, this is where we keep the audio/video frame
// there wasn't room for on its queue
@@ -311,16 +346,34 @@
ByteIOContext ByteIOCxt;
- // Time of when pause started, in seconds since VM started
- volatile boost::uint64_t m_time_of_pause;
-
- bool m_start_onbuffer;
-
// Decoder buffer
boost::uint8_t* _decoderBuffer;
// Current sound handler
media::sound_handler* _soundHandler;
+
+ /// Parse a chunk of input
+ /// Currently blocks, ideally should parse as much
+ /// as possible w/out blocking
+ void parseNextChunk();
+
+ /// Input stream
+ //
+ /// This should just be a temporary variable, transferred
+ /// to MediaParser constructor.
+ ///
+ std::auto_ptr<tu_file> _inputStream;
+
+ typedef std::deque<media::raw_mediadata_t*> AudioQueue;
+
+ /// This is where audio frames are pushed by ::advance
+ /// and consumed by sound_handler callback (audio_streamer)
+ AudioQueue _audioQueue;
+
+ /// The queue needs to be protected as sound_handler callback
+ /// is invoked by a separate thread (dunno if it makes sense actually)
+ boost::mutex _audioQueueMutex;
+
};
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/09
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/11
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/11
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/12
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/12
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/23
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F..., Sandro Santilli, 2008/05/24
- [Gnash-commit] gnash ChangeLog libbase/FLVParser.cpp libbase/F...,
Sandro Santilli <=