gnash-commit
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gnash-commit] /srv/bzr/gnash/trunk r10693: Attempt to render RGBA video


From: Benjamin Wolsey
Subject: [Gnash-commit] /srv/bzr/gnash/trunk r10693: Attempt to render RGBA video, fix illegal read on probe of h264 (and
Date: Fri, 13 Mar 2009 11:45:40 +0100
User-agent: Bazaar (1.5)

------------------------------------------------------------
revno: 10693
committer: Benjamin Wolsey <address@hidden>
branch nick: trunk
timestamp: Fri 2009-03-13 11:45:40 +0100
message:
  Attempt to render RGBA video, fix illegal read on probe of h264 (and
  potentially other formats), remove unnecessary memcopy in VideoDecoderFfmpeg.
modified:
  backend/render_handler_agg.cpp
  libbase/GnashImage.h
  libbase/GnashImageJpeg.cpp
  libmedia/ffmpeg/MediaParserFfmpeg.cpp
  libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
    ------------------------------------------------------------
    revno: 10691.1.1
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Thu 2009-03-12 15:19:00 +0100
    message:
      Remove gratuitously rude comment about exceptions.
    modified:
      libbase/GnashImageJpeg.cpp
    ------------------------------------------------------------
    revno: 10691.1.2
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Fri 2009-03-13 09:58:57 +0100
    message:
      At least attempt to render RGBA video, even if the alpha data aren't
      there.
    modified:
      backend/render_handler_agg.cpp
    ------------------------------------------------------------
    revno: 10691.1.3
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Fri 2009-03-13 10:04:17 +0100
    message:
      As a VideoRenderer is constructed for each frame, drop the mutator
      functions for smoothing and quality and pass them in the ctor.
    modified:
      backend/render_handler_agg.cpp
    ------------------------------------------------------------
    revno: 10691.1.4
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Fri 2009-03-13 10:54:50 +0100
    message:
      Add ctor from pre-existing data for ImageRGBA (for consistency with 
ImageRGB).
      Allow ffmpeg to write video frames directly to new image data instead of
      copying each frame.
    modified:
      libbase/GnashImage.h
      libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
    ------------------------------------------------------------
    revno: 10691.1.5
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Fri 2009-03-13 11:22:20 +0100
    message:
      Make sure the probe buffer is padded and zeroed out, as the ffmpeg probe
      reads past the number of bytes it's told about.
    modified:
      libmedia/ffmpeg/MediaParserFfmpeg.cpp
=== modified file 'backend/render_handler_agg.cpp'
--- a/backend/render_handler_agg.cpp    2009-03-12 09:23:59 +0000
+++ b/backend/render_handler_agg.cpp    2009-03-13 09:04:17 +0000
@@ -182,7 +182,6 @@
 
 class AlphaMask;
 
-
 typedef std::vector<agg::path_storage> AggPaths;
 typedef std::vector<geometry::Range2d<int> > ClipBounds;
 typedef std::vector<AlphaMask*> AlphaMasks;
@@ -478,30 +477,19 @@
 
     typedef agg::trans_affine Matrix;
 
-    VideoRenderer(const ClipBounds& clipbounds, GnashImage* frame,
-            Matrix& mat, Quality quality)
+    VideoRenderer(const ClipBounds& clipbounds, GnashImage& frame,
+            Matrix& mat, Quality quality, bool smooth)
         :
-        _buf(frame->data(), frame->width(), frame->height(),
-                frame->pitch()),
+        _buf(frame.data(), frame.width(), frame.height(),
+                frame.pitch()),
         _pixf(_buf),
         _accessor(_pixf),
         _interpolator(mat),
         _clipbounds(clipbounds),
-        _quality(quality)
+        _quality(quality),
+        _smoothing(smooth)
     {}
 
-    /// Change the rendering quality required.
-    void setQuality(Quality quality)
-    {
-        _quality = quality;
-    }
-
-    /// Set whether smoothing is requested
-    void smooth(bool b)
-    {
-        _smoothing = b;
-    }
-
     void render(agg::path_storage& path, Renderer& rbase,
             const AlphaMasks& masks)
     {
@@ -563,7 +551,7 @@
     // rendering buffer is used to access the frame pixels here        
     agg::rendering_buffer _buf;
 
-    SourceFormat _pixf;
+    const SourceFormat _pixf;
     
     Accessor _accessor;
          
@@ -574,7 +562,7 @@
     const ClipBounds& _clipbounds;
 
     /// Quality of renderering
-    Quality _quality;
+    const Quality _quality;
 
     /// Whether smoothing is required.
     bool _smoothing;
@@ -602,6 +590,22 @@
         return new agg_bitmap_info(im);
     }
 
+    template<typename SourceFormat, typename Matrix>
+    void renderVideo(GnashImage& frame, Matrix& img_mtx,
+            agg::path_storage path, bool smooth)
+    {
+
+        // renderer base for the stage buffer (not the frame image!)
+        renderer_base& rbase = *m_rbase;
+
+        VideoRenderer<PixelFormat, SourceFormat> vr(_clipbounds, frame,
+                img_mtx, _quality, smooth);
+
+        // If smoothing is requested and _quality is set to HIGH or BEST,
+        // use high-quality interpolation.
+        vr.render(path, rbase, _alphaMasks);
+    }
+
     void drawVideoFrame(GnashImage* frame, const SWFMatrix* source_mat, 
         const rect* bounds, bool smooth)
     {
@@ -609,14 +613,6 @@
         // NOTE: Assuming that the source image is RGB 8:8:8
         // TODO: keep heavy instances alive accross frames for performance!
         // TODO: Maybe implement specialization for 1:1 scaled videos
-        
-        if (frame->type() == GNASH_IMAGE_RGBA) {
-                LOG_ONCE(log_error(_("Can't render videos with alpha")));
-                return;
-        }
-
-        assert(frame->type() == GNASH_IMAGE_RGB);
-        
         SWFMatrix mat = stage_matrix;
         mat.concatenate(*source_mat);
         
@@ -629,14 +625,14 @@
         
         // convert Gnash SWFMatrix to AGG SWFMatrix and scale down to
         // pixel coordinates while we're at it
-        agg::trans_affine img_mtx(mat.sx / 65536.0, mat.shx / 65536.0, 
+        agg::trans_affine mtx(mat.sx / 65536.0, mat.shx / 65536.0, 
             mat.shy / 65536.0, mat.sy / 65536.0, mat.tx, mat.ty);        
         
         // invert SWFMatrix since this is used for the image source
-        img_mtx.invert();
+        mtx.invert();
         
         // Apply video scale
-        img_mtx *= agg::trans_affine_scaling(1.0 / vscaleX, 1.0 / vscaleY);
+        mtx *= agg::trans_affine_scaling(1.0 / vscaleX, 1.0 / vscaleY);
         
         // make a path for the video outline
         point a, b, c, d;
@@ -652,19 +648,19 @@
         path.line_to(d.x, d.y);
         path.line_to(a.x, a.y);
 
-        // renderer base for the stage buffer (not the frame image!)
-        renderer_base& rbase = *m_rbase;
-        
-        // TODO: keep this alive and only update image / matrix? I've no
-        // idea how much reallocation that would save.
-        VideoRenderer<PixelFormat> vr(_clipbounds, frame, img_mtx, _quality);
-
-        vr.smooth(smooth);
-
-        // If smoothing is requested and _quality is set to HIGH or BEST,
-        // use high-quality interpolation.
-        vr.render(path, rbase, _alphaMasks);
-                
+        switch (frame->type())
+        {
+            case GNASH_IMAGE_RGBA:
+                renderVideo<agg::pixfmt_rgba32_pre>(*frame, mtx, path, smooth);
+                break;
+            case GNASH_IMAGE_RGB:
+                renderVideo<agg::pixfmt_rgb24_pre>(*frame, mtx, path, smooth);
+                break;
+            default:
+                log_error("Can't render this type of frame");
+                break;
+        }
+
     } 
 
   // Constructor

=== modified file 'libbase/GnashImage.h'
--- a/libbase/GnashImage.h      2009-02-25 22:33:03 +0000
+++ b/libbase/GnashImage.h      2009-03-13 09:54:50 +0000
@@ -222,7 +222,8 @@
     {}
 
     ImageRGB(boost::uint8_t* data, int width, int height, int stride)
-        : GnashImage(data, width, height, stride, GNASH_IMAGE_RGB)
+        :
+        GnashImage(data, width, height, stride, GNASH_IMAGE_RGB)
     {}
 
     ~ImageRGB();
@@ -247,6 +248,11 @@
         GnashImage(o)
     {}
 
+    ImageRGBA(boost::uint8_t* data, int width, int height, int stride)
+        :
+        GnashImage(data, width, height, stride, GNASH_IMAGE_RGBA)
+    {}
+    
     ~ImageRGBA();
 
     /// Set pixel value 

=== modified file 'libbase/GnashImageJpeg.cpp'
--- a/libbase/GnashImageJpeg.cpp        2009-02-25 22:33:03 +0000
+++ b/libbase/GnashImageJpeg.cpp        2009-03-12 14:19:00 +0000
@@ -517,7 +517,6 @@
                if (dest->m_out_stream.write(dest->m_buffer, IO_BUF_SIZE) != 
IO_BUF_SIZE)
                {
                        // Error.
-                       // @@ bah, exceptions suck.  TODO consider alternatives.
                        log_error(_("jpeg::rw_dest_IOChannel couldn't write 
data."));
                        return false;
                }

=== modified file 'libmedia/ffmpeg/MediaParserFfmpeg.cpp'
--- a/libmedia/ffmpeg/MediaParserFfmpeg.cpp     2009-02-11 21:58:37 +0000
+++ b/libmedia/ffmpeg/MediaParserFfmpeg.cpp     2009-03-13 10:22:20 +0000
@@ -55,27 +55,32 @@
 AVInputFormat*
 MediaParserFfmpeg::probeStream()
 {
-       boost::scoped_array<boost::uint8_t> buffer(new boost::uint8_t[2048]);
+    const size_t probeSize = 2048;
+    const size_t bufSize = probeSize + FF_INPUT_BUFFER_PADDING_SIZE;
 
-       // Probe the file to detect the format
-       AVProbeData probe_data;
-       probe_data.filename = "";
-       probe_data.buf = buffer.get();
-       probe_data.buf_size = 2048;
+       boost::scoped_array<boost::uint8_t> buffer(new boost::uint8_t[bufSize]);
 
        assert(_stream->tell() == static_cast<std::streampos>(0));
-       size_t actuallyRead = _stream->read(probe_data.buf, 
probe_data.buf_size);
+       size_t actuallyRead = _stream->read(buffer.get(), probeSize);
+    
+    // Fill any padding with 0s.
+    std::fill(buffer.get() + actuallyRead, buffer.get() + bufSize, 0);
+
        _stream->seek(0);
 
        if (actuallyRead < 1)
        {
                throw IOException(_("MediaParserFfmpeg could not read probe 
data "
                     "from input"));
-               return 0;
        }
 
-       probe_data.buf_size = actuallyRead; // right ?
-       AVInputFormat* ret = av_probe_input_format(&probe_data, 1);
+       // Probe the file to detect the format
+       AVProbeData probe_data;
+       probe_data.filename = "";
+       probe_data.buf = buffer.get();
+    probe_data.buf_size = actuallyRead;
+       
+    AVInputFormat* ret = av_probe_input_format(&probe_data, 1);
        return ret;
 }
 

=== modified file 'libmedia/ffmpeg/VideoDecoderFfmpeg.cpp'
--- a/libmedia/ffmpeg/VideoDecoderFfmpeg.cpp    2009-02-26 02:30:27 +0000
+++ b/libmedia/ffmpeg/VideoDecoderFfmpeg.cpp    2009-03-13 09:54:50 +0000
@@ -216,41 +216,27 @@
                                  const AVFrame& srcFrame)
 {
 
-    // Adjust to next highest 4-pixel value.
     const int width = srcCtx->width;
     const int height = srcCtx->height;
 
-    PixelFormat pixFmt;
+#ifdef FFMPEG_VP6A
+    PixelFormat pixFmt = (srcCtx->codec->id == CODEC_ID_VP6A) ?
+        PIX_FMT_RGBA : PIX_FMT_RGB24;
+#else 
+    PixelFormat pixFmt = PIX_FMT_RGB24;
+#endif 
+
     std::auto_ptr<GnashImage> im;
 
-#ifdef FFMPEG_VP6A
-    if (srcCtx->codec->id == CODEC_ID_VP6A) {
-        // Expect RGBA data
-        //log_debug("alpha image");
-        pixFmt = PIX_FMT_RGBA;
-        im.reset(new ImageRGBA(width, height));        
-    } else {
-        // Expect RGB data
-        pixFmt = PIX_FMT_RGB24;
-        im.reset(new ImageRGB(width, height));
-    }
-#else // ndef FFMPEG_VPA6
-    // Expect RGB data
-    pixFmt = PIX_FMT_RGB24;
-    im.reset(new ImageRGB(width, height));
-#endif // def FFMPEG_VP6A
-
 #ifdef HAVE_SWSCALE_H
     // Check whether the context wrapper exists
     // already.
     if (!_swsContext.get()) {
 
-        _swsContext.reset(
-                        new SwsContextWrapper(
-                                sws_getContext(width, height, srcCtx->pix_fmt,
-                                width, height, pixFmt,
-                                SWS_BILINEAR, NULL, NULL, NULL)
-                        ));
+        _swsContext.reset(new SwsContextWrapper(
+            sws_getContext(width, height, srcCtx->pix_fmt, width, height,
+                pixFmt, SWS_BILINEAR, NULL, NULL, NULL)
+        ));
         
         // Check that the context was assigned.
         if (!_swsContext->getContext()) {
@@ -260,24 +246,33 @@
             _swsContext.reset();
             
             // Can't do anything now, though.
-            im.reset();
             return im;
         }
     }
 #endif
 
     int bufsize = avpicture_get_size(pixFmt, width, height);
-            if (bufsize == -1) {
-                im.reset();
-                return im;
-            }
+    if (bufsize == -1) return im;
 
-    boost::scoped_array<boost::uint8_t> buffer ( new boost::uint8_t[bufsize] );
+    switch (pixFmt)
+    {
+        case PIX_FMT_RGBA:
+            im.reset(new ImageRGBA(width, height));
+            break;
+        case PIX_FMT_RGB24:
+            im.reset(new ImageRGB(width, height));
+            break;
+        default:
+            log_error("Pixel format not handled");
+            return im;
+    }
 
     AVPicture picture;
-    picture.data[0] = NULL;
 
-    avpicture_fill(&picture, buffer.get(), pixFmt, width, height);
+    // Let ffmpeg write directly to the GnashImage data. It is an uninitialized
+    // buffer here, so do not return the image if there is any error in
+    // conversion.
+    avpicture_fill(&picture, im->data(), pixFmt, width, height);
 
 #ifndef HAVE_SWSCALE_H
     img_convert(&picture, PIX_FMT_RGB24, (AVPicture*) &srcFrame,
@@ -297,9 +292,9 @@
         im.reset();
         return im;
     }
+
 #endif
 
-    im->update(picture.data[0]);
     return im;
 
 }


reply via email to

[Prev in Thread] Current Thread [Next in Thread]