From b99adee49c8cf17c938dba66af8081b4c19c3928 Mon Sep 17 00:00:00 2001 From: Julian Ospald Date: Sun, 22 May 2016 13:06:08 +0200 Subject: [PATCH] OpenSceneGraph: options for ffmpeg/libav Also fixes build with latest ffmpeg-3.0. --- ...es-0 => OpenSceneGraph-3.4.0-r1.exheres-0} | 5 +- .../OpenSceneGraph-3.4.0-ffmpeg-3.0.patch | 174 ++++++++++++++++++ 2 files changed, 178 insertions(+), 1 deletion(-) rename packages/dev-games/OpenSceneGraph/{OpenSceneGraph-3.4.0.exheres-0 => OpenSceneGraph-3.4.0-r1.exheres-0} (93%) create mode 100644 packages/dev-games/OpenSceneGraph/files/OpenSceneGraph-3.4.0-ffmpeg-3.0.patch diff --git a/packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0.exheres-0 b/packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0-r1.exheres-0 similarity index 93% rename from packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0.exheres-0 rename to packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0-r1.exheres-0 index dfc426c..061b830 100644 --- a/packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0.exheres-0 +++ b/packages/dev-games/OpenSceneGraph/OpenSceneGraph-3.4.0-r1.exheres-0 @@ -25,6 +25,7 @@ LICENCES="wxWinLL-3 LGPL-2.1" SLOT="0" PLATFORMS="~amd64 ~x86" MYOPTIONS=" + ( providers: ffmpeg libav ) [[ number-selected = exactly-one ]] ( providers: ijg-jpeg jpeg-turbo ) [[ number-selected = exactly-one ]] " @@ -34,7 +35,6 @@ DEPENDENCIES=" build+run: app-text/poppler[cairo] dev-libs/glib:2 - media/ffmpeg media-libs/freetype:2 media-libs/giflib:= media-libs/gstreamer:1.0 @@ -52,12 +52,15 @@ DEPENDENCIES=" x11-libs/libX11 x11-libs/libXrandr x11-libs/qtbase:5 + providers:ffmpeg? ( media/ffmpeg ) providers:ijg-jpeg? ( media-libs/jpeg:= ) providers:jpeg-turbo? ( media-libs/libjpeg-turbo ) + providers:libav? ( media/libav ) " DEFAULT_SRC_PREPARE_PATCHES=( "${FILES}"/${PNV}-Fix-wrong-jpeg-boolean-handling.patch + "${FILES}"/${PNV}-ffmpeg-3.0.patch ) CMAKE_SRC_CONFIGURE_PARAMS=( diff --git a/packages/dev-games/OpenSceneGraph/files/OpenSceneGraph-3.4.0-ffmpeg-3.0.patch b/packages/dev-games/OpenSceneGraph/files/OpenSceneGraph-3.4.0-ffmpeg-3.0.patch new file mode 100644 index 0000000..ec45dab --- /dev/null +++ b/packages/dev-games/OpenSceneGraph/files/OpenSceneGraph-3.4.0-ffmpeg-3.0.patch @@ -0,0 +1,174 @@ +From 2203cdc9c5657c7521dcaeaf4cb39cd1b8e720b6 Mon Sep 17 00:00:00 2001 +From: Ben Woods +Date: Wed, 27 Apr 2016 11:09:59 +0200 +Subject: [PATCH] Replace deprecated FFmpeg API. Author: Andreas Cadhalpun + . + +--- + src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp | 3 +- + src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp | 48 +++++++++++++--------------- + src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp | 4 +-- + src/osgPlugins/ffmpeg/FFmpegParameters.cpp | 2 +- + 4 files changed, 26 insertions(+), 31 deletions(-) + +diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp +index 665c68f..636bddd 100644 +--- a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp ++++ b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp +@@ -227,8 +227,7 @@ printf("### CONVERTING from sample format %s TO %s\n\t\tFROM %d TO %d channels\n + if (avcodec_open2(m_context, p_codec, NULL) < 0) + throw std::runtime_error("avcodec_open() failed"); + +- m_context->get_buffer = avcodec_default_get_buffer; +- m_context->release_buffer = avcodec_default_release_buffer; ++ m_context->get_buffer2 = avcodec_default_get_buffer2; + + } + +diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp +index 9375657..083d3db 100644 +--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp ++++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp +@@ -71,7 +71,7 @@ void FFmpegDecoderVideo::open(AVStream * const stream) + findAspectRatio(); + + // Find out whether we support Alpha channel +- m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P); ++ m_alpha_channel = (m_context->pix_fmt == AV_PIX_FMT_YUVA420P); + + // Find out the framerate + #if LIBAVCODEC_VERSION_MAJOR >= 56 +@@ -95,20 +95,19 @@ void FFmpegDecoderVideo::open(AVStream * const stream) + throw std::runtime_error("avcodec_open() failed"); + + // Allocate video frame +- m_frame.reset(avcodec_alloc_frame()); ++ m_frame.reset(av_frame_alloc()); + + // Allocate converted RGB frame +- m_frame_rgba.reset(avcodec_alloc_frame()); +- m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB24, width(), height())); ++ m_frame_rgba.reset(av_frame_alloc()); ++ m_buffer_rgba[0].resize(avpicture_get_size(AV_PIX_FMT_RGB24, width(), height())); + m_buffer_rgba[1].resize(m_buffer_rgba[0].size()); + + // Assign appropriate parts of the buffer to image planes in m_frame_rgba +- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB24, width(), height()); ++ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height()); + + // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame. + m_context->opaque = this; +- m_context->get_buffer = getBuffer; +- m_context->release_buffer = releaseBuffer; ++ m_context->get_buffer2 = getBuffer; + } + + +@@ -267,8 +266,8 @@ int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src, + #ifdef USE_SWSCALE + if (m_swscale_ctx==0) + { +- m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt, +- src_width, src_height, (PixelFormat) dst_pix_fmt, ++ m_swscale_ctx = sws_getContext(src_width, src_height, (AVPixelFormat) src_pix_fmt, ++ src_width, src_height, (AVPixelFormat) dst_pix_fmt, + /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL); + } + +@@ -315,14 +314,14 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled) + AVPicture * const dst = (AVPicture *) m_frame_rgba.get(); + + // Assign appropriate parts of the buffer to image planes in m_frame_rgba +- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB24, width(), height()); ++ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height()); + + // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine + +- if (m_context->pix_fmt == PIX_FMT_YUVA420P) ++ if (m_context->pix_fmt == AV_PIX_FMT_YUVA420P) + yuva420pToRgba(dst, src, width(), height()); + else +- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height()); ++ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height()); + + // Wait 'delay' seconds before publishing the picture. + int i_delay = static_cast(delay * 1000000 + 0.5); +@@ -349,7 +348,7 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled) + + void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height) + { +- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width, height); ++ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width, height); + + const size_t bpp = 4; + +@@ -367,31 +366,28 @@ void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const + } + } + +- +- +-int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture) ++int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture, int flags) + { ++ AVBufferRef *ref; + const FFmpegDecoderVideo * const this_ = reinterpret_cast(context->opaque); + +- const int result = avcodec_default_get_buffer(context, picture); ++ const int result = avcodec_default_get_buffer2(context, picture, flags); + int64_t * p_pts = reinterpret_cast( av_malloc(sizeof(int64_t)) ); + + *p_pts = this_->m_packet_pts; + picture->opaque = p_pts; + ++ ref = av_buffer_create((uint8_t *)picture->opaque, sizeof(int64_t), FFmpegDecoderVideo::freeBuffer, picture->buf[0], flags); ++ picture->buf[0] = ref; ++ + return result; + } + +- +- +-void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture) ++void FFmpegDecoderVideo::freeBuffer(void *opaque, uint8_t *data) + { +- if (picture != 0) +- av_freep(&picture->opaque); +- +- avcodec_default_release_buffer(context, picture); ++ AVBufferRef *ref = (AVBufferRef *)opaque; ++ av_buffer_unref(&ref); ++ av_free(data); + } + +- +- + } // namespace osgFFmpeg +diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp +index 7883b17..778c1a9 100644 +--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp ++++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp +@@ -94,8 +94,8 @@ class FFmpegDecoderVideo : public OpenThreads::Thread + int src_pix_fmt, int src_width, int src_height); + + +- static int getBuffer(AVCodecContext * context, AVFrame * picture); +- static void releaseBuffer(AVCodecContext * context, AVFrame * picture); ++ static int getBuffer(AVCodecContext * context, AVFrame * picture, int flags); ++ static void freeBuffer(void * opaque, uint8_t *data); + + PacketQueue & m_packets; + FFmpegClocks & m_clocks; +diff --git a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp +index 288e440..5915ab8 100644 +--- a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp ++++ b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp +@@ -19,7 +19,7 @@ extern "C" + #include + } + +-inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); } ++inline AVPixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); } + + + namespace osgFFmpeg {