Compare commits
50 Commits
temp-enum-
...
main
Author | SHA1 | Date | |
---|---|---|---|
d3f7ed08e7 | |||
e9d6f2164e | |||
bec8e436a1 | |||
444a8cbc2f | |||
7616f4ae57 | |||
a68f5456e4 | |||
cea80f1add | |||
02a6be5443 | |||
7eb3e77b94 | |||
13ab6b7bb6 | |||
00ffe02820 | |||
19c0666d40 | |||
14308b0a5e | |||
fe4cbe62df | |||
f522e8db22 | |||
3b7a520d88 | |||
2c7efe5ac0 | |||
3c6a981fd9 | |||
![]() |
46daaec425 | ||
7573e45103 | |||
![]() |
129798cacb | ||
32658bb5c9 | |||
174f39bd03 | |||
![]() |
27e3265267 | ||
cf9cacd091 | |||
03d5c8b4ed | |||
1b8d33b18c | |||
baa7a53974 | |||
b4a81d8053 | |||
b61f4fd8da | |||
bc64801857 | |||
ba7f110753 | |||
e24765d7d3 | |||
5730112174 | |||
![]() |
d354e6a846 | ||
706c7d775d | |||
db909281f2 | |||
e1d2485fd3 | |||
0b577e977b | |||
5a612097ab | |||
dbb002387d | |||
c1d67abcd8 | |||
b38eb31050 | |||
8eef2f83e9 | |||
cc5392b4d9 | |||
9d134104e4 | |||
7edbe463d2 | |||
ddb2d71438 | |||
84da05a8b8 | |||
383bc8d9bc |
@@ -15,6 +15,15 @@ if(WITH_WINDOWS_BUNDLE_CRT)
|
||||
|
||||
include(InstallRequiredSystemLibraries)
|
||||
|
||||
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
|
||||
# redirects for this dll, for details see T88813.
|
||||
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})
|
||||
string(FIND ${lib} "ucrtbase" pos)
|
||||
if(NOT pos EQUAL -1)
|
||||
list(REMOVE_ITEM CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS ${lib})
|
||||
install(FILES ${lib} DESTINATION . COMPONENT Libraries)
|
||||
endif()
|
||||
endforeach()
|
||||
# Install the CRT to the blender.crt Sub folder.
|
||||
install(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ./blender.crt COMPONENT Libraries)
|
||||
|
||||
|
@@ -22,10 +22,17 @@
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
/* check our ffmpeg is new enough, avoids user complaints */
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR < 52) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR == 52) && (LIBAVFORMAT_VERSION_MINOR <= 64))
|
||||
# error "FFmpeg 0.7 or newer is needed, Upgrade your FFmpeg or disable it"
|
||||
/* Check if our ffmpeg is new enough, avoids user complaints.
|
||||
* Minimum supported version is currently 3.2.0 which mean the following library versions:
|
||||
* libavutil > 55.30
|
||||
* libavcodec > 57.60
|
||||
* libavformat > 57.50
|
||||
*
|
||||
* We only check for one of these as they are usually updated in tandem.
|
||||
*/
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR < 57) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR == 57) && (LIBAVFORMAT_VERSION_MINOR <= 50))
|
||||
# error "FFmpeg 3.2.0 or newer is needed, Upgrade your FFmpeg or disable it"
|
||||
#endif
|
||||
/* end sanity check */
|
||||
|
||||
@@ -36,520 +43,83 @@
|
||||
# define FFMPEG_INLINE static inline
|
||||
#endif
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/rational.h>
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR < 58) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR == 58) && (LIBAVFORMAT_VERSION_MINOR < 76))
|
||||
# define FFMPEG_USE_DURATION_WORKAROUND 1
|
||||
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 101))
|
||||
# define FFMPEG_HAVE_PARSE_UTILS 1
|
||||
# include <libavutil/parseutils.h>
|
||||
#endif
|
||||
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 105))
|
||||
# define FFMPEG_HAVE_AVIO 1
|
||||
#endif
|
||||
|
||||
#if (LIBAVCODEC_VERSION_MAJOR > 53) || \
|
||||
((LIBAVCODEC_VERSION_MAJOR == 53) && (LIBAVCODEC_VERSION_MINOR > 1)) || \
|
||||
((LIBAVCODEC_VERSION_MAJOR == 53) && (LIBAVCODEC_VERSION_MINOR == 1) && \
|
||||
(LIBAVCODEC_VERSION_MICRO >= 1)) || \
|
||||
((LIBAVCODEC_VERSION_MAJOR == 52) && (LIBAVCODEC_VERSION_MINOR >= 121))
|
||||
# define FFMPEG_HAVE_DEFAULT_VAL_UNION 1
|
||||
#endif
|
||||
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 101))
|
||||
# define FFMPEG_HAVE_AV_DUMP_FORMAT 1
|
||||
#endif
|
||||
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR > 52) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR >= 52) && (LIBAVFORMAT_VERSION_MINOR >= 45))
|
||||
# define FFMPEG_HAVE_AV_GUESS_FORMAT 1
|
||||
#endif
|
||||
|
||||
#if (LIBAVCODEC_VERSION_MAJOR > 52) || \
|
||||
((LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 23))
|
||||
# define FFMPEG_HAVE_DECODE_AUDIO3 1
|
||||
# define FFMPEG_HAVE_DECODE_VIDEO2 1
|
||||
#endif
|
||||
|
||||
#if (LIBAVCODEC_VERSION_MAJOR > 52) || \
|
||||
((LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 64))
|
||||
# define FFMPEG_HAVE_AVMEDIA_TYPES 1
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 52) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 52) && (LIBAVCODEC_VERSION_MINOR >= 29)) && \
|
||||
((LIBSWSCALE_VERSION_MAJOR > 0) || \
|
||||
(LIBSWSCALE_VERSION_MAJOR >= 0) && (LIBSWSCALE_VERSION_MINOR >= 10))
|
||||
# define FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR > 14))
|
||||
# define FFMPEG_HAVE_CANON_H264_RESOLUTION_FIX
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 53) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 60))
|
||||
# define FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 53) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 42))
|
||||
# define FFMPEG_HAVE_DECODE_AUDIO4
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR >= 13))
|
||||
# define FFMPEG_HAVE_AVFRAME_SAMPLE_RATE
|
||||
#endif
|
||||
|
||||
#if ((LIBAVUTIL_VERSION_MAJOR > 51) || \
|
||||
(LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR >= 21))
|
||||
# define FFMPEG_FFV1_ALPHA_SUPPORTED
|
||||
# define FFMPEG_SAMPLE_FMT_S16P_SUPPORTED
|
||||
#else
|
||||
/* Before ffmpeg 4.4, package duration calculation used depricated variables to calculate the
|
||||
* packet duration. Use the function from commit
|
||||
* github.com/FFmpeg/FFmpeg/commit/1c0885334dda9ee8652e60c586fa2e3674056586
|
||||
* to calculate the correct framerate for ffmpeg < 4.4.
|
||||
*/
|
||||
|
||||
FFMPEG_INLINE
|
||||
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
|
||||
void my_guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* no planar formats in FFmpeg < 0.9 */
|
||||
(void)sample_fmt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* XXX TODO Probably fix to correct modern flags in code? Not sure how old FFMPEG we want to
|
||||
* support though, so for now this will do. */
|
||||
|
||||
#ifndef FF_MIN_BUFFER_SIZE
|
||||
# ifdef AV_INPUT_BUFFER_MIN_SIZE
|
||||
# define FF_MIN_BUFFER_SIZE AV_INPUT_BUFFER_MIN_SIZE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef FF_INPUT_BUFFER_PADDING_SIZE
|
||||
# ifdef AV_INPUT_BUFFER_PADDING_SIZE
|
||||
# define FF_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef CODEC_FLAG_GLOBAL_HEADER
|
||||
# ifdef AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
# define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef CODEC_FLAG_GLOBAL_HEADER
|
||||
# ifdef AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
# define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef CODEC_FLAG_INTERLACED_DCT
|
||||
# ifdef AV_CODEC_FLAG_INTERLACED_DCT
|
||||
# define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef CODEC_FLAG_INTERLACED_ME
|
||||
# ifdef AV_CODEC_FLAG_INTERLACED_ME
|
||||
# define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* FFmpeg upstream 1.0 is the first who added AV_ prefix. */
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 59, 100)
|
||||
# define AV_CODEC_ID_NONE CODEC_ID_NONE
|
||||
# define AV_CODEC_ID_MPEG4 CODEC_ID_MPEG4
|
||||
# define AV_CODEC_ID_MJPEG CODEC_ID_MJPEG
|
||||
# define AV_CODEC_ID_DNXHD CODEC_ID_DNXHD
|
||||
# define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO
|
||||
# define AV_CODEC_ID_MPEG1VIDEO CODEC_ID_MPEG1VIDEO
|
||||
# define AV_CODEC_ID_DVVIDEO CODEC_ID_DVVIDEO
|
||||
# define AV_CODEC_ID_THEORA CODEC_ID_THEORA
|
||||
# define AV_CODEC_ID_PNG CODEC_ID_PNG
|
||||
# define AV_CODEC_ID_QTRLE CODEC_ID_QTRLE
|
||||
# define AV_CODEC_ID_FFV1 CODEC_ID_FFV1
|
||||
# define AV_CODEC_ID_HUFFYUV CODEC_ID_HUFFYUV
|
||||
# define AV_CODEC_ID_H264 CODEC_ID_H264
|
||||
# define AV_CODEC_ID_FLV1 CODEC_ID_FLV1
|
||||
|
||||
# define AV_CODEC_ID_AAC CODEC_ID_AAC
|
||||
# define AV_CODEC_ID_AC3 CODEC_ID_AC3
|
||||
# define AV_CODEC_ID_MP3 CODEC_ID_MP3
|
||||
# define AV_CODEC_ID_MP2 CODEC_ID_MP2
|
||||
# define AV_CODEC_ID_FLAC CODEC_ID_FLAC
|
||||
# define AV_CODEC_ID_PCM_U8 CODEC_ID_PCM_U8
|
||||
# define AV_CODEC_ID_PCM_S16LE CODEC_ID_PCM_S16LE
|
||||
# define AV_CODEC_ID_PCM_S24LE CODEC_ID_PCM_S24LE
|
||||
# define AV_CODEC_ID_PCM_S32LE CODEC_ID_PCM_S32LE
|
||||
# define AV_CODEC_ID_PCM_F32LE CODEC_ID_PCM_F32LE
|
||||
# define AV_CODEC_ID_PCM_F64LE CODEC_ID_PCM_F64LE
|
||||
# define AV_CODEC_ID_VORBIS CODEC_ID_VORBIS
|
||||
#endif
|
||||
|
||||
FFMPEG_INLINE
|
||||
int av_get_cropped_height_from_codec(AVCodecContext *pCodecCtx)
|
||||
{
|
||||
int y = pCodecCtx->height;
|
||||
|
||||
#ifndef FFMPEG_HAVE_CANON_H264_RESOLUTION_FIX
|
||||
/* really bad hack to remove this dreadfull black bar at the bottom
|
||||
with Canon footage and old ffmpeg versions.
|
||||
(to fix this properly in older ffmpeg versions one has to write a new
|
||||
demuxer...)
|
||||
|
||||
see the actual fix here for reference:
|
||||
|
||||
http://git.libav.org/?p=libav.git;a=commit;h=30f515091c323da59c0f1b533703dedca2f4b95d
|
||||
|
||||
We do our best to apply this only to matching footage.
|
||||
*/
|
||||
if (pCodecCtx->width == 1920 && pCodecCtx->height == 1088 &&
|
||||
pCodecCtx->pix_fmt == PIX_FMT_YUVJ420P && pCodecCtx->codec_id == AV_CODEC_ID_H264) {
|
||||
y = 1080;
|
||||
if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
|
||||
av_log(s,
|
||||
AV_LOG_WARNING,
|
||||
"Packet with invalid duration %" PRId64 " in stream %d\n",
|
||||
pkt->duration,
|
||||
pkt->stream_index);
|
||||
pkt->duration = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
return y;
|
||||
}
|
||||
if (pkt->duration) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if ((LIBAVUTIL_VERSION_MAJOR < 51) || \
|
||||
(LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR < 22))
|
||||
FFMPEG_INLINE
|
||||
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
|
||||
{
|
||||
const AVOption *rv = NULL;
|
||||
(void)search_flags;
|
||||
av_set_string3(obj, name, val, 1, &rv);
|
||||
return rv != NULL;
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
|
||||
{
|
||||
const AVOption *rv = NULL;
|
||||
(void)search_flags;
|
||||
rv = av_set_int(obj, name, val);
|
||||
return rv != NULL;
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
int av_opt_set_double(void *obj, const char *name, double val, int search_flags)
|
||||
{
|
||||
const AVOption *rv = NULL;
|
||||
(void)search_flags;
|
||||
rv = av_set_double(obj, name, val);
|
||||
return rv != NULL;
|
||||
}
|
||||
|
||||
# define AV_OPT_TYPE_INT FF_OPT_TYPE_INT
|
||||
# define AV_OPT_TYPE_INT64 FF_OPT_TYPE_INT64
|
||||
# define AV_OPT_TYPE_STRING FF_OPT_TYPE_STRING
|
||||
# define AV_OPT_TYPE_CONST FF_OPT_TYPE_CONST
|
||||
# define AV_OPT_TYPE_DOUBLE FF_OPT_TYPE_DOUBLE
|
||||
# define AV_OPT_TYPE_FLOAT FF_OPT_TYPE_FLOAT
|
||||
#endif
|
||||
|
||||
#if ((LIBAVUTIL_VERSION_MAJOR < 51) || \
|
||||
(LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR < 54))
|
||||
FFMPEG_INLINE
|
||||
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
|
||||
return AV_SAMPLE_FMT_NONE;
|
||||
return sample_fmt;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR < 53) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR < 35))
|
||||
FFMPEG_INLINE
|
||||
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
|
||||
{
|
||||
/* TODO: no options are taking into account */
|
||||
(void)options;
|
||||
return avcodec_open(avctx, codec);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ((LIBAVFORMAT_VERSION_MAJOR < 53) || \
|
||||
(LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR < 21))
|
||||
FFMPEG_INLINE
|
||||
AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
|
||||
{
|
||||
/* TODO: no codec is taking into account */
|
||||
(void)c;
|
||||
return av_new_stream(s, 0);
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
{
|
||||
/* TODO: no options are taking into account */
|
||||
(void)options;
|
||||
return av_find_stream_info(ic);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ((LIBAVFORMAT_VERSION_MAJOR > 53) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR == 53) && (LIBAVFORMAT_VERSION_MINOR > 32)) || \
|
||||
((LIBAVFORMAT_VERSION_MAJOR == 53) && (LIBAVFORMAT_VERSION_MINOR == 24) && \
|
||||
(LIBAVFORMAT_VERSION_MICRO >= 100)))
|
||||
FFMPEG_INLINE
|
||||
void my_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
|
||||
st->cur_dts = av_rescale(timestamp,
|
||||
st->time_base.den * (int64_t)ref_st->time_base.num,
|
||||
st->time_base.num * (int64_t)ref_st->time_base.den);
|
||||
switch (st->codecpar->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0) {
|
||||
pkt->duration = av_rescale_q(1, av_inv_q(st->avg_frame_rate), st->time_base);
|
||||
}
|
||||
else if (st->time_base.num * 1000LL > st->time_base.den) {
|
||||
pkt->duration = 1;
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO: {
|
||||
int frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
|
||||
if (frame_size && st->codecpar->sample_rate) {
|
||||
pkt->duration = av_rescale_q(
|
||||
frame_size, (AVRational){1, st->codecpar->sample_rate}, st->time_base);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
|
||||
{
|
||||
my_update_cur_dts(s, ref_st, timestamp);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR < 54) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR < 28))
|
||||
FFMPEG_INLINE
|
||||
void avcodec_free_frame(AVFrame **frame)
|
||||
{
|
||||
/* don't need to do anything with old AVFrame
|
||||
* since it does not have malloced members */
|
||||
(void)frame;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR >= 54) && (LIBAVCODEC_VERSION_MINOR >= 13))
|
||||
# define FFMPEG_HAVE_AVFRAME_SAMPLE_RATE
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 54) || \
|
||||
(LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 13))
|
||||
# define FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_AVIO
|
||||
# define AVIO_FLAG_WRITE URL_WRONLY
|
||||
# define avio_open url_fopen
|
||||
# define avio_tell url_ftell
|
||||
# define avio_close url_fclose
|
||||
# define avio_size url_fsize
|
||||
#endif
|
||||
|
||||
/* There are some version in between, which have avio_... functions but no
|
||||
* AVIO_FLAG_... */
|
||||
#ifndef AVIO_FLAG_WRITE
|
||||
# define AVIO_FLAG_WRITE URL_WRONLY
|
||||
#endif
|
||||
|
||||
#ifndef AV_PKT_FLAG_KEY
|
||||
# define AV_PKT_FLAG_KEY PKT_FLAG_KEY
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_AV_DUMP_FORMAT
|
||||
# define av_dump_format dump_format
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_AV_GUESS_FORMAT
|
||||
# define av_guess_format guess_format
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_PARSE_UTILS
|
||||
# define av_parse_video_rate av_parse_video_frame_rate
|
||||
#endif
|
||||
|
||||
#ifdef FFMPEG_HAVE_DEFAULT_VAL_UNION
|
||||
# define FFMPEG_DEF_OPT_VAL_INT(OPT) OPT->default_val.i64
|
||||
# define FFMPEG_DEF_OPT_VAL_DOUBLE(OPT) OPT->default_val.dbl
|
||||
#else
|
||||
# define FFMPEG_DEF_OPT_VAL_INT(OPT) OPT->default_val
|
||||
# define FFMPEG_DEF_OPT_VAL_DOUBLE(OPT) OPT->default_val
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_AVMEDIA_TYPES
|
||||
# define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
|
||||
# define AVMEDIA_TYPE_AUDIO CODEC_TYPE_AUDIO
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_DECODE_AUDIO3
|
||||
FFMPEG_INLINE
|
||||
int avcodec_decode_audio3(AVCodecContext *avctx,
|
||||
int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
return avcodec_decode_audio2(avctx, samples, frame_size_ptr, avpkt->data, avpkt->size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef FFMPEG_HAVE_DECODE_VIDEO2
|
||||
FFMPEG_INLINE
|
||||
int avcodec_decode_video2(AVCodecContext *avctx,
|
||||
AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
return avcodec_decode_video(avctx, picture, got_picture_ptr, avpkt->data, avpkt->size);
|
||||
}
|
||||
#endif
|
||||
|
||||
FFMPEG_INLINE
|
||||
int64_t av_get_pts_from_frame(AVFormatContext *avctx, AVFrame *picture)
|
||||
int64_t timestamp_from_pts_or_dts(int64_t pts, int64_t dts)
|
||||
{
|
||||
int64_t pts;
|
||||
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 34, 100)
|
||||
pts = picture->pts;
|
||||
#else
|
||||
pts = picture->pkt_pts;
|
||||
#endif
|
||||
|
||||
/* Some videos do not have any pts values, use dts instead in those cases if
|
||||
* possible. Usually when this happens dts can act as pts because as all frames
|
||||
* should then be presented in their decoded in order. IE pts == dts. */
|
||||
if (pts == AV_NOPTS_VALUE) {
|
||||
pts = picture->pkt_dts;
|
||||
return dts;
|
||||
}
|
||||
if (pts == AV_NOPTS_VALUE) {
|
||||
pts = 0;
|
||||
}
|
||||
|
||||
(void)avctx;
|
||||
return pts;
|
||||
}
|
||||
|
||||
/* obsolete constant formerly defined in FFMpeg libavcodec/avcodec.h */
|
||||
#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE
|
||||
# define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 1, 0)
|
||||
FFMPEG_INLINE
|
||||
int avcodec_encode_video2(AVCodecContext *avctx,
|
||||
AVPacket *pkt,
|
||||
const AVFrame *frame,
|
||||
int *got_output)
|
||||
int64_t av_get_pts_from_frame(AVFrame *picture)
|
||||
{
|
||||
int outsize, ret;
|
||||
|
||||
ret = av_new_packet(pkt, avctx->width * avctx->height * 7 + 10000);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
outsize = avcodec_encode_video(avctx, pkt->data, pkt->size, frame);
|
||||
if (outsize <= 0) {
|
||||
*got_output = 0;
|
||||
av_free_packet(pkt);
|
||||
}
|
||||
else {
|
||||
*got_output = 1;
|
||||
av_shrink_packet(pkt, outsize);
|
||||
if (avctx->coded_frame) {
|
||||
pkt->pts = avctx->coded_frame->pts;
|
||||
if (avctx->coded_frame->key_frame)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
}
|
||||
}
|
||||
|
||||
return outsize >= 0 ? 0 : outsize;
|
||||
return timestamp_from_pts_or_dts(picture->pts, picture->pkt_dts);
|
||||
}
|
||||
|
||||
#endif
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Deinterlace code block
|
||||
*
|
||||
* NOTE: The code in this block are from FFmpeg 2.6.4, which is licensed by LGPL.
|
||||
* \{ */
|
||||
|
||||
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 17, 0)
|
||||
FFMPEG_INLINE
|
||||
void avformat_close_input(AVFormatContext **ctx)
|
||||
{
|
||||
av_close_input_file(*ctx);
|
||||
*ctx = NULL;
|
||||
}
|
||||
#endif
|
||||
#define MAX_NEG_CROP 1024
|
||||
|
||||
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 8, 0)
|
||||
FFMPEG_INLINE
|
||||
AVFrame *av_frame_alloc(void)
|
||||
{
|
||||
return avcodec_alloc_frame();
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
void av_frame_free(AVFrame **frame)
|
||||
{
|
||||
av_freep(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
FFMPEG_INLINE
|
||||
const char *av_get_metadata_key_value(AVDictionary *metadata, const char *key)
|
||||
{
|
||||
if (metadata == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
while ((tag = av_dict_get(metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if (!strcmp(tag->key, key)) {
|
||||
return tag->value;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
FFMPEG_INLINE
|
||||
bool av_check_encoded_with_ffmpeg(AVFormatContext *ctx)
|
||||
{
|
||||
const char *encoder = av_get_metadata_key_value(ctx->metadata, "ENCODER");
|
||||
if (encoder != NULL && !strncmp(encoder, "Lavf", 4)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51, 32, 0)
|
||||
# define AV_OPT_SEARCH_FAKE_OBJ 0
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 59, 100)
|
||||
# define FFMPEG_HAVE_DEPRECATED_FLAGS2
|
||||
#endif
|
||||
|
||||
/* Since FFmpeg-1.1 this constant have AV_ prefix. */
|
||||
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 3, 100)
|
||||
# define AV_PIX_FMT_BGR32 PIX_FMT_BGR32
|
||||
# define AV_PIX_FMT_YUV422P PIX_FMT_YUV422P
|
||||
# define AV_PIX_FMT_BGRA PIX_FMT_BGRA
|
||||
# define AV_PIX_FMT_ARGB PIX_FMT_ARGB
|
||||
# define AV_PIX_FMT_RGBA PIX_FMT_RGBA
|
||||
#endif
|
||||
|
||||
/* New API from FFmpeg-2.0 which soon became recommended one. */
|
||||
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 38, 100)
|
||||
# define av_frame_alloc avcodec_alloc_frame
|
||||
# define av_frame_free avcodec_free_frame
|
||||
# define av_frame_unref avcodec_get_frame_defaults
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 24, 102)
|
||||
|
||||
/* NOTE: The code in this block are from FFmpeg 2.6.4, which is licensed by LGPL. */
|
||||
|
||||
# define MAX_NEG_CROP 1024
|
||||
|
||||
# define times4(x) x, x, x, x
|
||||
# define times256(x) times4(times4(times4(times4(times4(x)))))
|
||||
#define times4(x) x, x, x, x
|
||||
#define times256(x) times4(times4(times4(times4(times4(x)))))
|
||||
|
||||
static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
|
||||
times256(0x00), 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
|
||||
@@ -575,8 +145,8 @@ static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
|
||||
0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA,
|
||||
0xFB, 0xFC, 0xFD, 0xFE, 0xFF, times256(0xFF)};
|
||||
|
||||
# undef times4
|
||||
# undef times256
|
||||
#undef times4
|
||||
#undef times256
|
||||
|
||||
/* filter parameters: [-1 4 2 4 -1] // 8 */
|
||||
FFMPEG_INLINE
|
||||
@@ -668,8 +238,9 @@ int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int
|
||||
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
|
||||
int y;
|
||||
uint8_t *buf = (uint8_t *)av_malloc(width);
|
||||
if (!buf)
|
||||
if (!buf) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
src_m1 = src1;
|
||||
memcpy(buf, src_m1, width);
|
||||
@@ -689,24 +260,21 @@ int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int
|
||||
return 0;
|
||||
}
|
||||
|
||||
# ifdef __GNUC__
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
# endif
|
||||
|
||||
FFMPEG_INLINE
|
||||
int avpicture_deinterlace(
|
||||
AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
|
||||
int av_image_deinterlace(
|
||||
AVFrame *dst, const AVFrame *src, enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
if (pix_fmt != AV_PIX_FMT_YUV420P && pix_fmt != AV_PIX_FMT_YUVJ420P &&
|
||||
pix_fmt != AV_PIX_FMT_YUV422P && pix_fmt != AV_PIX_FMT_YUVJ422P &&
|
||||
pix_fmt != AV_PIX_FMT_YUV444P && pix_fmt != AV_PIX_FMT_YUV411P &&
|
||||
pix_fmt != AV_PIX_FMT_GRAY8)
|
||||
pix_fmt != AV_PIX_FMT_GRAY8) {
|
||||
return -1;
|
||||
if ((width & 3) != 0 || (height & 3) != 0)
|
||||
}
|
||||
if ((width & 3) != 0 || (height & 3) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (i == 1) {
|
||||
@@ -732,8 +300,9 @@ int avpicture_deinterlace(
|
||||
}
|
||||
if (src == dst) {
|
||||
ret = deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i], width, height);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else {
|
||||
deinterlace_bottom_field(
|
||||
@@ -743,10 +312,6 @@ int avpicture_deinterlace(
|
||||
return 0;
|
||||
}
|
||||
|
||||
# ifdef __GNUC__
|
||||
# pragma GCC diagnostic pop
|
||||
# endif
|
||||
|
||||
#endif
|
||||
/* --- Deinterlace code block end --- */
|
||||
|
||||
#endif
|
||||
|
@@ -40,6 +40,25 @@
|
||||
</screenshot>
|
||||
</screenshots>
|
||||
<releases>
|
||||
<release version="2.93" date="2021-06-02">
|
||||
<description>
|
||||
<p>New features:</p>
|
||||
<ul>
|
||||
<li>Mesh primitive nodes</li>
|
||||
<li>Line Art</li>
|
||||
<li>EEVEE Realistic depth of field and volumetrics</li>
|
||||
<li>Spreadsheet editor</li>
|
||||
</ul>
|
||||
<p>Enhancements:</p>
|
||||
<ul>
|
||||
<li>Geometry Nodes - 22 new nodes and improved attribute search</li>
|
||||
<li>Mask loops, textures and patterns for sculpting</li>
|
||||
<li>Grease pencil interpolate refactored and SVG and PDF support</li>
|
||||
<li>Persistent Data rendering settings for Cycles</li>
|
||||
<li>Video Sequencer Editor auto-proxy system</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.92" date="2021-02-25">
|
||||
<description>
|
||||
<p>New features:</p>
|
||||
|
@@ -560,9 +560,17 @@ class Text(bpy_types.ID):
|
||||
self.write(string)
|
||||
|
||||
def as_module(self):
|
||||
from os.path import splitext
|
||||
import bpy
|
||||
from os.path import splitext, join
|
||||
from types import ModuleType
|
||||
mod = ModuleType(splitext(self.name)[0])
|
||||
name = self.name
|
||||
mod = ModuleType(splitext(name)[0])
|
||||
# This is a fake file-path, set this since some scripts check `__file__`,
|
||||
# error messages may include this as well.
|
||||
# NOTE: the file path may be a blank string if the file hasn't been saved.
|
||||
mod.__dict__.update({
|
||||
"__file__": join(bpy.data.filepath, name),
|
||||
})
|
||||
# TODO: We could use Text.compiled (C struct member)
|
||||
# if this is called often it will be much faster.
|
||||
exec(self.as_string(), mod.__dict__)
|
||||
|
@@ -25,6 +25,10 @@ class CollectionButtonsPanel:
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_context = "collection"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.collection != context.scene.collection
|
||||
|
||||
|
||||
def lineart_make_line_type_entry(col, line_type, text_disp, expand, search_from):
|
||||
col.prop(line_type, "use", text=text_disp)
|
||||
@@ -38,12 +42,6 @@ def lineart_make_line_type_entry(col, line_type, text_disp, expand, search_from)
|
||||
class COLLECTION_PT_collection_flags(CollectionButtonsPanel, Panel):
|
||||
bl_label = "Restrictions"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
vl = context.view_layer
|
||||
vlc = vl.active_layer_collection
|
||||
return (vlc.name != 'Master Collection')
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
|
@@ -914,6 +914,12 @@ class IMAGE_PT_active_mask_point(MASK_PT_point, Panel):
|
||||
bl_category = "Mask"
|
||||
|
||||
|
||||
class IMAGE_PT_mask_display(MASK_PT_display, Panel):
|
||||
bl_space_type = 'IMAGE_EDITOR'
|
||||
bl_region_type = 'UI'
|
||||
bl_category = "Mask"
|
||||
|
||||
|
||||
# --- end mask ---
|
||||
|
||||
class IMAGE_PT_snapping(Panel):
|
||||
@@ -1616,6 +1622,7 @@ classes = (
|
||||
IMAGE_PT_active_tool,
|
||||
IMAGE_PT_mask,
|
||||
IMAGE_PT_mask_layers,
|
||||
IMAGE_PT_mask_display,
|
||||
IMAGE_PT_active_mask_spline,
|
||||
IMAGE_PT_active_mask_point,
|
||||
IMAGE_PT_snapping,
|
||||
|
@@ -1381,7 +1381,6 @@ class SEQUENCER_PT_source(SequencerButtonsPanel, Panel):
|
||||
col = layout.column()
|
||||
col.prop(strip, "filepath", text="")
|
||||
col.prop(strip.colorspace_settings, "name", text="Color Space")
|
||||
col.prop(strip, "mpeg_preseek")
|
||||
col.prop(strip, "stream_index")
|
||||
col.prop(strip, "use_deinterlace")
|
||||
|
||||
|
@@ -2529,7 +2529,7 @@ class VIEW3D_MT_object_context_menu(Menu):
|
||||
layout.operator_menu_enum("gpencil.convert", "type", text="Convert To")
|
||||
|
||||
if (
|
||||
obj.type in {'MESH', 'CURVE', 'SURFACE', 'GPENCIL', 'LATTICE', 'ARMATURE', 'META'} or
|
||||
obj.type in {'MESH', 'CURVE', 'SURFACE', 'GPENCIL', 'LATTICE', 'ARMATURE', 'META', 'FONT'} or
|
||||
(obj.type == 'EMPTY' and obj.instance_collection is not None)
|
||||
):
|
||||
layout.operator_context = 'INVOKE_REGION_WIN'
|
||||
|
@@ -506,7 +506,7 @@ void blf_glyph_render(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, float x, fl
|
||||
if (gc->texture) {
|
||||
GPU_texture_free(gc->texture);
|
||||
}
|
||||
gc->texture = GPU_texture_create_1d_array(__func__, w, h, 1, GPU_R8, NULL);
|
||||
gc->texture = GPU_texture_create_2d(__func__, w, h, 1, GPU_R8, NULL);
|
||||
|
||||
gc->bitmap_len_landed = 0;
|
||||
}
|
||||
|
@@ -33,9 +33,9 @@ extern "C" {
|
||||
/* Blender major and minor version. */
|
||||
#define BLENDER_VERSION 293
|
||||
/* Blender patch version for bugfix releases. */
|
||||
#define BLENDER_VERSION_PATCH 0
|
||||
#define BLENDER_VERSION_PATCH 2
|
||||
/** Blender release cycle stage: alpha/beta/rc/release. */
|
||||
#define BLENDER_VERSION_CYCLE beta
|
||||
#define BLENDER_VERSION_CYCLE rc
|
||||
|
||||
/* Blender file format version. */
|
||||
#define BLENDER_FILE_VERSION BLENDER_VERSION
|
||||
|
@@ -1301,15 +1301,18 @@ void ntreeCompositOutputFileUniqueLayer(struct ListBase *list,
|
||||
void ntreeCompositColorBalanceSyncFromLGG(bNodeTree *ntree, bNode *node);
|
||||
void ntreeCompositColorBalanceSyncFromCDL(bNodeTree *ntree, bNode *node);
|
||||
|
||||
void ntreeCompositCryptomatteSyncFromAdd(bNode *node);
|
||||
void ntreeCompositCryptomatteSyncFromAdd(const Scene *scene, bNode *node);
|
||||
void ntreeCompositCryptomatteSyncFromRemove(bNode *node);
|
||||
bNodeSocket *ntreeCompositCryptomatteAddSocket(bNodeTree *ntree, bNode *node);
|
||||
int ntreeCompositCryptomatteRemoveSocket(bNodeTree *ntree, bNode *node);
|
||||
void ntreeCompositCryptomatteLayerPrefix(const bNode *node, char *r_prefix, size_t prefix_len);
|
||||
void ntreeCompositCryptomatteLayerPrefix(const Scene *scene,
|
||||
const bNode *node,
|
||||
char *r_prefix,
|
||||
size_t prefix_len);
|
||||
/* Update the runtime layer names with the cryptomatte layer names of the references
|
||||
* render layer or image. */
|
||||
void ntreeCompositCryptomatteUpdateLayerNames(bNode *node);
|
||||
struct CryptomatteSession *ntreeCompositCryptomatteSession(bNode *node);
|
||||
void ntreeCompositCryptomatteUpdateLayerNames(const Scene *scene, bNode *node);
|
||||
struct CryptomatteSession *ntreeCompositCryptomatteSession(const Scene *scene, bNode *node);
|
||||
|
||||
/** \} */
|
||||
|
||||
|
@@ -583,10 +583,6 @@ if(WITH_CODEC_FFMPEG)
|
||||
${FFMPEG_LIBRARIES}
|
||||
)
|
||||
add_definitions(-DWITH_FFMPEG)
|
||||
|
||||
remove_strict_c_flags_file(
|
||||
intern/writeffmpeg.c
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON)
|
||||
|
@@ -623,7 +623,8 @@ static void clamp_bounds_in_domain(FluidDomainSettings *fds,
|
||||
static bool is_static_object(Object *ob)
|
||||
{
|
||||
/* Check if the object has modifiers that might make the object "dynamic". */
|
||||
ModifierData *md = ob->modifiers.first;
|
||||
VirtualModifierData virtualModifierData;
|
||||
ModifierData *md = BKE_modifiers_get_virtual_modifierlist(ob, &virtualModifierData);
|
||||
for (; md; md = md->next) {
|
||||
if (ELEM(md->type,
|
||||
eModifierType_Cloth,
|
||||
@@ -631,7 +632,8 @@ static bool is_static_object(Object *ob)
|
||||
eModifierType_Explode,
|
||||
eModifierType_Ocean,
|
||||
eModifierType_ShapeKey,
|
||||
eModifierType_Softbody)) {
|
||||
eModifierType_Softbody,
|
||||
eModifierType_Nodes)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@@ -1367,16 +1367,16 @@ void BKE_texpaint_slots_refresh_object(Scene *scene, struct Object *ob)
|
||||
}
|
||||
|
||||
struct FindTexPaintNodeData {
|
||||
bNode *node;
|
||||
short iter_index;
|
||||
short index;
|
||||
Image *ima;
|
||||
bNode *r_node;
|
||||
};
|
||||
|
||||
static bool texpaint_slot_node_find_cb(bNode *node, void *userdata)
|
||||
{
|
||||
struct FindTexPaintNodeData *find_data = userdata;
|
||||
if (find_data->iter_index++ == find_data->index) {
|
||||
find_data->node = node;
|
||||
Image *ima = (Image *)node->id;
|
||||
if (find_data->ima == ima) {
|
||||
find_data->r_node = node;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1385,10 +1385,10 @@ static bool texpaint_slot_node_find_cb(bNode *node, void *userdata)
|
||||
|
||||
bNode *BKE_texpaint_slot_material_find_node(Material *ma, short texpaint_slot)
|
||||
{
|
||||
struct FindTexPaintNodeData find_data = {NULL, 0, texpaint_slot};
|
||||
struct FindTexPaintNodeData find_data = {ma->texpaintslot[texpaint_slot].ima, NULL};
|
||||
ntree_foreach_texnode_recursive(ma->nodetree, texpaint_slot_node_find_cb, &find_data);
|
||||
|
||||
return find_data.node;
|
||||
return find_data.r_node;
|
||||
}
|
||||
|
||||
/* r_col = current value, col = new value, (fac == 0) is no change */
|
||||
|
@@ -729,7 +729,6 @@ Object *BKE_modifiers_is_deformed_by_armature(Object *ob)
|
||||
ArmatureGpencilModifierData *agmd = NULL;
|
||||
GpencilModifierData *gmd = BKE_gpencil_modifiers_get_virtual_modifierlist(
|
||||
ob, &gpencilvirtualModifierData);
|
||||
gmd = ob->greasepencil_modifiers.first;
|
||||
|
||||
/* return the first selected armature, this lets us use multiple armatures */
|
||||
for (; gmd; gmd = gmd->next) {
|
||||
@@ -749,7 +748,6 @@ Object *BKE_modifiers_is_deformed_by_armature(Object *ob)
|
||||
VirtualModifierData virtualModifierData;
|
||||
ArmatureModifierData *amd = NULL;
|
||||
ModifierData *md = BKE_modifiers_get_virtual_modifierlist(ob, &virtualModifierData);
|
||||
md = ob->modifiers.first;
|
||||
|
||||
/* return the first selected armature, this lets us use multiple armatures */
|
||||
for (; md; md = md->next) {
|
||||
|
@@ -56,6 +56,7 @@
|
||||
# include <libavcodec/avcodec.h>
|
||||
# include <libavformat/avformat.h>
|
||||
# include <libavutil/imgutils.h>
|
||||
# include <libavutil/opt.h>
|
||||
# include <libavutil/rational.h>
|
||||
# include <libavutil/samplefmt.h>
|
||||
# include <libswscale/swscale.h>
|
||||
@@ -80,6 +81,8 @@ typedef struct FFMpegContext {
|
||||
int ffmpeg_preset; /* see eFFMpegPreset */
|
||||
|
||||
AVFormatContext *outfile;
|
||||
AVCodecContext *video_codec;
|
||||
AVCodecContext *audio_codec;
|
||||
AVStream *video_stream;
|
||||
AVStream *audio_stream;
|
||||
AVFrame *current_frame; /* Image frame in output pixel format. */
|
||||
@@ -91,10 +94,6 @@ typedef struct FFMpegContext {
|
||||
uint8_t *audio_input_buffer;
|
||||
uint8_t *audio_deinterleave_buffer;
|
||||
int audio_input_samples;
|
||||
# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
uint8_t *audio_output_buffer;
|
||||
int audio_outbuf_size;
|
||||
# endif
|
||||
double audio_time;
|
||||
bool audio_deinterleave;
|
||||
int audio_sample_size;
|
||||
@@ -141,33 +140,22 @@ static int request_float_audio_buffer(int codec_id)
|
||||
}
|
||||
|
||||
# ifdef WITH_AUDASPACE
|
||||
|
||||
static int write_audio_frame(FFMpegContext *context)
|
||||
{
|
||||
AVCodecContext *c = NULL;
|
||||
AVPacket pkt;
|
||||
AVFrame *frame = NULL;
|
||||
int got_output = 0;
|
||||
|
||||
c = context->audio_stream->codec;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.size = 0;
|
||||
pkt.data = NULL;
|
||||
AVCodecContext *c = context->audio_codec;
|
||||
|
||||
AUD_Device_read(
|
||||
context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
|
||||
context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
|
||||
|
||||
# ifdef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
frame = av_frame_alloc();
|
||||
av_frame_unref(frame);
|
||||
frame->pts = context->audio_time / av_q2d(c->time_base);
|
||||
frame->nb_samples = context->audio_input_samples;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channels = c->channels;
|
||||
# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
|
||||
frame->channel_layout = c->channel_layout;
|
||||
# endif
|
||||
|
||||
if (context->audio_deinterleave) {
|
||||
int channel, i;
|
||||
@@ -195,61 +183,48 @@ static int write_audio_frame(FFMpegContext *context)
|
||||
context->audio_input_samples * c->channels * context->audio_sample_size,
|
||||
1);
|
||||
|
||||
if (avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
|
||||
// XXX error("Error writing audio packet");
|
||||
return -1;
|
||||
int success = 0;
|
||||
|
||||
int ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
/* Can't send frame to encoder. This shouldn't happen. */
|
||||
fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
|
||||
success = -1;
|
||||
}
|
||||
|
||||
if (!got_output) {
|
||||
av_frame_free(&frame);
|
||||
return 0;
|
||||
}
|
||||
# else
|
||||
pkt.size = avcodec_encode_audio(c,
|
||||
context->audio_output_buffer,
|
||||
context->audio_outbuf_size,
|
||||
(short *)context->audio_input_buffer);
|
||||
AVPacket *pkt = av_packet_alloc();
|
||||
|
||||
if (pkt.size < 0) {
|
||||
// XXX error("Error writing audio packet");
|
||||
return -1;
|
||||
}
|
||||
while (ret >= 0) {
|
||||
|
||||
pkt.data = context->audio_output_buffer;
|
||||
got_output = 1;
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
success = -1;
|
||||
}
|
||||
|
||||
pkt->stream_index = context->audio_stream->index;
|
||||
av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
|
||||
# ifdef FFMPEG_USE_DURATION_WORKAROUND
|
||||
my_guess_pkt_duration(context->outfile, context->audio_stream, pkt);
|
||||
# endif
|
||||
|
||||
if (got_output) {
|
||||
if (pkt.pts != AV_NOPTS_VALUE) {
|
||||
pkt.pts = av_rescale_q(pkt.pts, c->time_base, context->audio_stream->time_base);
|
||||
}
|
||||
if (pkt.dts != AV_NOPTS_VALUE) {
|
||||
pkt.dts = av_rescale_q(pkt.dts, c->time_base, context->audio_stream->time_base);
|
||||
}
|
||||
if (pkt.duration > 0) {
|
||||
pkt.duration = av_rescale_q(pkt.duration, c->time_base, context->audio_stream->time_base);
|
||||
}
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
pkt.stream_index = context->audio_stream->index;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
|
||||
fprintf(stderr, "Error writing audio packet!\n");
|
||||
if (frame) {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
return -1;
|
||||
int write_ret = av_interleaved_write_frame(context->outfile, pkt);
|
||||
if (write_ret != 0) {
|
||||
fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
|
||||
success = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
if (frame) {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
|
||||
return 0;
|
||||
return success;
|
||||
}
|
||||
# endif /* #ifdef WITH_AUDASPACE */
|
||||
|
||||
@@ -265,14 +240,15 @@ static AVFrame *alloc_picture(int pix_fmt, int width, int height)
|
||||
if (!f) {
|
||||
return NULL;
|
||||
}
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
size = av_image_get_buffer_size(pix_fmt, width, height, 1);
|
||||
/* allocate the actual picture buffer */
|
||||
buf = MEM_mallocN(size, "AVFrame buffer");
|
||||
if (!buf) {
|
||||
free(f);
|
||||
return NULL;
|
||||
}
|
||||
avpicture_fill((AVPicture *)f, buf, pix_fmt, width, height);
|
||||
|
||||
av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
|
||||
f->format = pix_fmt;
|
||||
f->width = width;
|
||||
f->height = height;
|
||||
@@ -342,58 +318,61 @@ static const char **get_file_extensions(int format)
|
||||
}
|
||||
|
||||
/* Write a frame to the output file */
|
||||
static int write_video_frame(
|
||||
FFMpegContext *context, const RenderData *rd, int cfra, AVFrame *frame, ReportList *reports)
|
||||
static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
|
||||
{
|
||||
int got_output;
|
||||
int ret, success = 1;
|
||||
AVCodecContext *c = context->video_stream->codec;
|
||||
AVPacket packet = {0};
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
|
||||
av_init_packet(&packet);
|
||||
AVCodecContext *c = context->video_codec;
|
||||
|
||||
frame->pts = cfra;
|
||||
|
||||
ret = avcodec_encode_video2(c, &packet, frame, &got_output);
|
||||
|
||||
if (ret >= 0 && got_output) {
|
||||
if (packet.pts != AV_NOPTS_VALUE) {
|
||||
packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
|
||||
PRINT("Video Frame PTS: %d\n", (int)packet.pts);
|
||||
}
|
||||
else {
|
||||
PRINT("Video Frame PTS: not set\n");
|
||||
}
|
||||
if (packet.dts != AV_NOPTS_VALUE) {
|
||||
packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
|
||||
PRINT("Video Frame DTS: %d\n", (int)packet.dts);
|
||||
}
|
||||
else {
|
||||
PRINT("Video Frame DTS: not set\n");
|
||||
}
|
||||
|
||||
packet.stream_index = context->video_stream->index;
|
||||
ret = av_interleaved_write_frame(context->outfile, &packet);
|
||||
success = (ret == 0);
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
/* Can't send frame to encoder. This shouldn't happen. */
|
||||
fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
|
||||
success = -1;
|
||||
}
|
||||
else if (ret < 0) {
|
||||
success = 0;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, packet);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
/* No more packets available. */
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
|
||||
packet->stream_index = context->video_stream->index;
|
||||
av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
|
||||
# ifdef FFMPEG_USE_DURATION_WORKAROUND
|
||||
my_guess_pkt_duration(context->outfile, context->video_stream, packet);
|
||||
# endif
|
||||
|
||||
if (av_interleaved_write_frame(context->outfile, packet) != 0) {
|
||||
success = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
BKE_report(reports, RPT_ERROR, "Error writing frame");
|
||||
PRINT("Error writing frame: %s\n", av_err2str(ret));
|
||||
}
|
||||
|
||||
av_packet_free(&packet);
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
/* read and encode a frame of audio from the buffer */
|
||||
static AVFrame *generate_video_frame(FFMpegContext *context,
|
||||
const uint8_t *pixels,
|
||||
ReportList *reports)
|
||||
static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
|
||||
{
|
||||
AVCodecContext *c = context->video_stream->codec;
|
||||
int height = c->height;
|
||||
AVCodecParameters *codec = context->video_stream->codecpar;
|
||||
int height = codec->height;
|
||||
AVFrame *rgb_frame;
|
||||
|
||||
if (context->img_convert_frame != NULL) {
|
||||
@@ -438,7 +417,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
|
||||
(const uint8_t *const *)rgb_frame->data,
|
||||
rgb_frame->linesize,
|
||||
0,
|
||||
c->height,
|
||||
codec->height,
|
||||
context->current_frame->data,
|
||||
context->current_frame->linesize);
|
||||
}
|
||||
@@ -446,9 +425,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context,
|
||||
return context->current_frame;
|
||||
}
|
||||
|
||||
static void set_ffmpeg_property_option(AVCodecContext *c,
|
||||
IDProperty *prop,
|
||||
AVDictionary **dictionary)
|
||||
static void set_ffmpeg_property_option(IDProperty *prop, AVDictionary **dictionary)
|
||||
{
|
||||
char name[128];
|
||||
char *param;
|
||||
@@ -536,11 +513,53 @@ static void set_ffmpeg_properties(RenderData *rd,
|
||||
|
||||
for (curr = prop->data.group.first; curr; curr = curr->next) {
|
||||
if (ffmpeg_proprty_valid(c, prop_name, curr)) {
|
||||
set_ffmpeg_property_option(c, curr, dictionary);
|
||||
set_ffmpeg_property_option(curr, dictionary);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVRational calc_time_base(uint den, double num, int codec_id)
|
||||
{
|
||||
/* Convert the input 'num' to an integer. Simply shift the decimal places until we get an integer
|
||||
* (within a floating point error range).
|
||||
* For example if we have den = 3 and num = 0.1 then the fps is: den/num = 30 fps.
|
||||
* When converthing this to a ffmpeg time base, we want num to be an integer.
|
||||
* So we simply move the decimal places of both numbers. IE den = 30, num = 1.*/
|
||||
float eps = FLT_EPSILON;
|
||||
const uint DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
|
||||
|
||||
/* Calculate the precision of the initial floating point number. */
|
||||
if (num > 1.0) {
|
||||
const uint num_integer_bits = log2_floor_u((unsigned int)num);
|
||||
|
||||
/* Formula for calculating the epsilon value: (power of two range) / (pow mantissa bits)
|
||||
* For example, a float has 23 manitissa bits and the float value 3.5f as a pow2 range of
|
||||
* (4-2=2):
|
||||
* (2) / pow2(23) = floating point precision for 3.5f
|
||||
*/
|
||||
eps = (float)(1 << num_integer_bits) * FLT_EPSILON;
|
||||
}
|
||||
|
||||
/* Calculate how many decimal shifts we can do until we run out of precision. */
|
||||
const int max_num_shift = fabsf(log10f(eps));
|
||||
/* Calculate how many times we can shift the denominator. */
|
||||
const int max_den_shift = log10f(DENUM_MAX) - log10f(den);
|
||||
const int max_iter = min_ii(max_num_shift, max_den_shift);
|
||||
|
||||
for (int i = 0; i < max_iter && fabs(num - round(num)) > eps; i++) {
|
||||
/* Increase the number and denominator until both are integers. */
|
||||
num *= 10;
|
||||
den *= 10;
|
||||
eps *= 10;
|
||||
}
|
||||
|
||||
AVRational time_base;
|
||||
time_base.den = den;
|
||||
time_base.num = (int)num;
|
||||
|
||||
return time_base;
|
||||
}
|
||||
|
||||
/* prepare a video stream for the output file */
|
||||
|
||||
static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
@@ -553,7 +572,6 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
int error_size)
|
||||
{
|
||||
AVStream *st;
|
||||
AVCodecContext *c;
|
||||
AVCodec *codec;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
@@ -567,17 +585,29 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
|
||||
/* Set up the codec context */
|
||||
|
||||
c = st->codec;
|
||||
context->video_codec = avcodec_alloc_context3(NULL);
|
||||
AVCodecContext *c = context->video_codec;
|
||||
c->codec_id = codec_id;
|
||||
c->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Couldn't find valid video codec\n");
|
||||
avcodec_free_context(&c);
|
||||
context->video_codec = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Load codec defaults into 'c'. */
|
||||
avcodec_get_context_defaults3(c, codec);
|
||||
|
||||
/* Get some values from the current render settings */
|
||||
|
||||
c->width = rectx;
|
||||
c->height = recty;
|
||||
|
||||
/* FIXME: Really bad hack (tm) for NTSC support */
|
||||
if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
|
||||
/* FIXME: Really bad hack (tm) for NTSC support */
|
||||
c->time_base.den = 2997;
|
||||
c->time_base.num = 100;
|
||||
}
|
||||
@@ -585,21 +615,23 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
c->time_base.den = rd->frs_sec;
|
||||
c->time_base.num = (int)rd->frs_sec_base;
|
||||
}
|
||||
else if (compare_ff(rd->frs_sec_base, 1.001f, 0.000001f)) {
|
||||
/* This converts xx/1.001 (which is used in presets) to xx000/1001 (which is used in the rest
|
||||
* of the world, including FFmpeg). */
|
||||
c->time_base.den = (int)(rd->frs_sec * 1000);
|
||||
c->time_base.num = (int)(rd->frs_sec_base * 1000);
|
||||
}
|
||||
else {
|
||||
/* This calculates a fraction (DENUM_MAX / num) which approximates the scene frame rate
|
||||
* (frs_sec / frs_sec_base). It uses the maximum denominator allowed by FFmpeg.
|
||||
*/
|
||||
const double DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
|
||||
const double num = (DENUM_MAX / (double)rd->frs_sec) * rd->frs_sec_base;
|
||||
c->time_base = calc_time_base(rd->frs_sec, rd->frs_sec_base, codec_id);
|
||||
}
|
||||
|
||||
c->time_base.den = (int)DENUM_MAX;
|
||||
c->time_base.num = (int)num;
|
||||
/* As per the timebase documentation here:
|
||||
* https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options
|
||||
* We want to set the time base to (1 / fps) for fixed frame rate video.
|
||||
* If it is not possible, we want to set the timebase numbers to something as
|
||||
* small as possible.
|
||||
*/
|
||||
if (c->time_base.num != 1) {
|
||||
AVRational new_time_base;
|
||||
if (av_reduce(
|
||||
&new_time_base.num, &new_time_base.den, c->time_base.num, c->time_base.den, INT_MAX)) {
|
||||
/* Exact reduction was possible. Use the new value. */
|
||||
c->time_base = new_time_base;
|
||||
}
|
||||
}
|
||||
|
||||
st->time_base = c->time_base;
|
||||
@@ -611,6 +643,11 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
ffmpeg_dict_set_int(&opts, "lossless", 1);
|
||||
}
|
||||
else if (context->ffmpeg_crf >= 0) {
|
||||
/* As per https://trac.ffmpeg.org/wiki/Encode/VP9 we must set the bit rate to zero when
|
||||
* encoding with vp9 in crf mode.
|
||||
* Set this to always be zero for other codecs as well.
|
||||
* We don't care about bit rate in crf mode. */
|
||||
c->bit_rate = 0;
|
||||
ffmpeg_dict_set_int(&opts, "crf", context->ffmpeg_crf);
|
||||
}
|
||||
else {
|
||||
@@ -650,14 +687,6 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
}
|
||||
}
|
||||
|
||||
/* Deprecated and not doing anything since July 2015, deleted in recent ffmpeg */
|
||||
// c->me_method = ME_EPZS;
|
||||
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Be sure to use the correct pixel format(e.g. RGB, YUV) */
|
||||
|
||||
if (codec->pix_fmts) {
|
||||
@@ -674,12 +703,6 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
|
||||
}
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264) {
|
||||
/* correct wrong default ffmpeg param which crash x264 */
|
||||
c->qmin = 10;
|
||||
c->qmax = 51;
|
||||
}
|
||||
|
||||
/* Keep lossless encodes in the RGB domain. */
|
||||
if (codec_id == AV_CODEC_ID_HUFFYUV) {
|
||||
if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
|
||||
@@ -714,7 +737,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
|
||||
if ((of->oformat->flags & AVFMT_GLOBALHEADER)) {
|
||||
PRINT("Using global header\n");
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/* xasp & yasp got float lately... */
|
||||
@@ -739,9 +762,14 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
c->thread_type = FF_THREAD_SLICE;
|
||||
}
|
||||
|
||||
if (avcodec_open2(c, codec, &opts) < 0) {
|
||||
int ret = avcodec_open2(c, codec, &opts);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Couldn't initialize video codec: %s\n", av_err2str(ret));
|
||||
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
|
||||
av_dict_free(&opts);
|
||||
avcodec_free_context(&c);
|
||||
context->video_codec = NULL;
|
||||
return NULL;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
@@ -769,6 +797,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
|
||||
NULL);
|
||||
}
|
||||
|
||||
avcodec_parameters_from_context(st->codecpar, c);
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
@@ -780,7 +810,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
int error_size)
|
||||
{
|
||||
AVStream *st;
|
||||
AVCodecContext *c;
|
||||
AVCodec *codec;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
@@ -792,19 +821,30 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
}
|
||||
st->id = 1;
|
||||
|
||||
c = st->codec;
|
||||
context->audio_codec = avcodec_alloc_context3(NULL);
|
||||
AVCodecContext *c = context->audio_codec;
|
||||
c->thread_count = BLI_system_thread_count();
|
||||
c->thread_type = FF_THREAD_SLICE;
|
||||
|
||||
c->codec_id = codec_id;
|
||||
c->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Couldn't find valid audio codec\n");
|
||||
avcodec_free_context(&c);
|
||||
context->audio_codec = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Load codec defaults into 'c'. */
|
||||
avcodec_get_context_defaults3(c, codec);
|
||||
|
||||
c->sample_rate = rd->ffcodecdata.audio_mixrate;
|
||||
c->bit_rate = context->ffmpeg_audio_bitrate * 1000;
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->channels = rd->ffcodecdata.audio_channels;
|
||||
|
||||
# ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
|
||||
switch (rd->ffcodecdata.audio_channels) {
|
||||
case FFM_CHANNELS_MONO:
|
||||
c->channel_layout = AV_CH_LAYOUT_MONO;
|
||||
@@ -822,7 +862,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
c->channel_layout = AV_CH_LAYOUT_7POINT1;
|
||||
break;
|
||||
}
|
||||
# endif
|
||||
|
||||
if (request_float_audio_buffer(codec_id)) {
|
||||
/* mainly for AAC codec which is experimental */
|
||||
@@ -830,12 +869,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
c->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||
}
|
||||
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
// XXX error("Couldn't find a valid audio codec");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (codec->sample_fmts) {
|
||||
/* Check if the preferred sample format for this codec is supported.
|
||||
* this is because, depending on the version of libav,
|
||||
@@ -844,13 +877,13 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
* Float samples in particular are not always supported. */
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
for (; *p != -1; p++) {
|
||||
if (*p == st->codec->sample_fmt) {
|
||||
if (*p == c->sample_fmt) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (*p == -1) {
|
||||
/* sample format incompatible with codec. Defaulting to a format known to work */
|
||||
st->codec->sample_fmt = codec->sample_fmts[0];
|
||||
c->sample_fmt = codec->sample_fmts[0];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -859,52 +892,48 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
int best = 0;
|
||||
int best_dist = INT_MAX;
|
||||
for (; *p; p++) {
|
||||
int dist = abs(st->codec->sample_rate - *p);
|
||||
int dist = abs(c->sample_rate - *p);
|
||||
if (dist < best_dist) {
|
||||
best_dist = dist;
|
||||
best = *p;
|
||||
}
|
||||
}
|
||||
/* best is the closest supported sample rate (same as selected if best_dist == 0) */
|
||||
st->codec->sample_rate = best;
|
||||
c->sample_rate = best;
|
||||
}
|
||||
|
||||
if (of->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
set_ffmpeg_properties(rd, c, "audio", &opts);
|
||||
|
||||
if (avcodec_open2(c, codec, &opts) < 0) {
|
||||
// XXX error("Couldn't initialize audio codec");
|
||||
int ret = avcodec_open2(c, codec, &opts);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Couldn't initialize audio codec: %s\n", av_err2str(ret));
|
||||
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
|
||||
av_dict_free(&opts);
|
||||
avcodec_free_context(&c);
|
||||
context->audio_codec = NULL;
|
||||
return NULL;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
|
||||
/* need to prevent floating point exception when using vorbis audio codec,
|
||||
* initialize this value in the same way as it's done in FFmpeg itself (sergey) */
|
||||
st->codec->time_base.num = 1;
|
||||
st->codec->time_base.den = st->codec->sample_rate;
|
||||
|
||||
# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
context->audio_outbuf_size = FF_MIN_BUFFER_SIZE;
|
||||
# endif
|
||||
c->time_base.num = 1;
|
||||
c->time_base.den = c->sample_rate;
|
||||
|
||||
if (c->frame_size == 0) {
|
||||
/* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
|
||||
* not sure if that is needed anymore, so let's try out if there are any
|
||||
* complaints regarding some FFmpeg versions users might have. */
|
||||
context->audio_input_samples = FF_MIN_BUFFER_SIZE * 8 / c->bits_per_coded_sample / c->channels;
|
||||
context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
|
||||
c->channels;
|
||||
}
|
||||
else {
|
||||
context->audio_input_samples = c->frame_size;
|
||||
# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
if (c->frame_size * c->channels * sizeof(int16_t) * 4 > context->audio_outbuf_size) {
|
||||
context->audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
|
||||
@@ -913,10 +942,6 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
|
||||
context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * c->channels *
|
||||
context->audio_sample_size);
|
||||
# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
context->audio_output_buffer = (uint8_t *)av_malloc(context->audio_outbuf_size);
|
||||
# endif
|
||||
|
||||
if (context->audio_deinterleave) {
|
||||
context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
|
||||
context->audio_input_samples * c->channels * context->audio_sample_size);
|
||||
@@ -924,6 +949,8 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
|
||||
|
||||
context->audio_time = 0.0f;
|
||||
|
||||
avcodec_parameters_from_context(st->codecpar, c);
|
||||
|
||||
return st;
|
||||
}
|
||||
/* essential functions -- start, append, end */
|
||||
@@ -949,7 +976,7 @@ static void ffmpeg_dict_set_float(AVDictionary **dict, const char *key, float va
|
||||
static void ffmpeg_add_metadata_callback(void *data,
|
||||
const char *propname,
|
||||
char *propvalue,
|
||||
int len)
|
||||
int UNUSED(len))
|
||||
{
|
||||
AVDictionary **metadata = (AVDictionary **)data;
|
||||
av_dict_set(metadata, propname, propvalue, 0);
|
||||
@@ -1040,7 +1067,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
|
||||
|
||||
fmt->audio_codec = context->ffmpeg_audio_codec;
|
||||
|
||||
BLI_strncpy(of->filename, name, sizeof(of->filename));
|
||||
of->url = av_strdup(name);
|
||||
/* set the codec to the user's selection */
|
||||
switch (context->ffmpeg_type) {
|
||||
case FFMPEG_AVI:
|
||||
@@ -1105,9 +1132,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
|
||||
if (!context->video_stream) {
|
||||
if (error[0]) {
|
||||
BKE_report(reports, RPT_ERROR, error);
|
||||
PRINT("Video stream error: %s\n", error);
|
||||
}
|
||||
else {
|
||||
BKE_report(reports, RPT_ERROR, "Error initializing video stream");
|
||||
PRINT("Error initializing video stream");
|
||||
}
|
||||
goto fail;
|
||||
}
|
||||
@@ -1119,9 +1148,11 @@ static int start_ffmpeg_impl(FFMpegContext *context,
|
||||
if (!context->audio_stream) {
|
||||
if (error[0]) {
|
||||
BKE_report(reports, RPT_ERROR, error);
|
||||
PRINT("Audio stream error: %s\n", error);
|
||||
}
|
||||
else {
|
||||
BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
|
||||
PRINT("Error initializing audio stream");
|
||||
}
|
||||
goto fail;
|
||||
}
|
||||
@@ -1129,6 +1160,7 @@ static int start_ffmpeg_impl(FFMpegContext *context,
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
|
||||
BKE_report(reports, RPT_ERROR, "Could not open file for writing");
|
||||
PRINT("Could not open file for writing\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@@ -1138,10 +1170,12 @@ static int start_ffmpeg_impl(FFMpegContext *context,
|
||||
&of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
|
||||
}
|
||||
|
||||
if (avformat_write_header(of, NULL) < 0) {
|
||||
int ret = avformat_write_header(of, NULL);
|
||||
if (ret < 0) {
|
||||
BKE_report(reports,
|
||||
RPT_ERROR,
|
||||
"Could not initialize streams, probably unsupported codec combination");
|
||||
PRINT("Could not write media header: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -1156,13 +1190,11 @@ fail:
|
||||
avio_close(of->pb);
|
||||
}
|
||||
|
||||
if (context->video_stream && context->video_stream->codec) {
|
||||
avcodec_close(context->video_stream->codec);
|
||||
if (context->video_stream) {
|
||||
context->video_stream = NULL;
|
||||
}
|
||||
|
||||
if (context->audio_stream && context->audio_stream->codec) {
|
||||
avcodec_close(context->audio_stream->codec);
|
||||
if (context->audio_stream) {
|
||||
context->audio_stream = NULL;
|
||||
}
|
||||
|
||||
@@ -1190,46 +1222,39 @@ fail:
|
||||
*/
|
||||
static void flush_ffmpeg(FFMpegContext *context)
|
||||
{
|
||||
int ret = 0;
|
||||
AVCodecContext *c = context->video_codec;
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
|
||||
AVCodecContext *c = context->video_stream->codec;
|
||||
/* get the delayed frames */
|
||||
while (1) {
|
||||
int got_output;
|
||||
AVPacket packet = {0};
|
||||
av_init_packet(&packet);
|
||||
avcodec_send_frame(c, NULL);
|
||||
|
||||
ret = avcodec_encode_video2(c, &packet, NULL, &got_output);
|
||||
/* Get the packets frames. */
|
||||
int ret = 1;
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, packet);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
/* No more packets to flush. */
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding delayed frame %d\n", ret);
|
||||
fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
if (!got_output) {
|
||||
break;
|
||||
}
|
||||
if (packet.pts != AV_NOPTS_VALUE) {
|
||||
packet.pts = av_rescale_q(packet.pts, c->time_base, context->video_stream->time_base);
|
||||
PRINT("Video Frame PTS: %d\n", (int)packet.pts);
|
||||
}
|
||||
else {
|
||||
PRINT("Video Frame PTS: not set\n");
|
||||
}
|
||||
if (packet.dts != AV_NOPTS_VALUE) {
|
||||
packet.dts = av_rescale_q(packet.dts, c->time_base, context->video_stream->time_base);
|
||||
PRINT("Video Frame DTS: %d\n", (int)packet.dts);
|
||||
}
|
||||
else {
|
||||
PRINT("Video Frame DTS: not set\n");
|
||||
}
|
||||
|
||||
packet.stream_index = context->video_stream->index;
|
||||
ret = av_interleaved_write_frame(context->outfile, &packet);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error writing delayed frame %d\n", ret);
|
||||
packet->stream_index = context->video_stream->index;
|
||||
av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
|
||||
# ifdef FFMPEG_USE_DURATION_WORKAROUND
|
||||
my_guess_pkt_duration(context->outfile, context->video_stream, packet);
|
||||
# endif
|
||||
|
||||
int write_ret = av_interleaved_write_frame(context->outfile, packet);
|
||||
if (write_ret != 0) {
|
||||
fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
|
||||
break;
|
||||
}
|
||||
}
|
||||
avcodec_flush_buffers(context->video_stream->codec);
|
||||
|
||||
av_packet_free(&packet);
|
||||
}
|
||||
|
||||
/* **********************************************************************
|
||||
@@ -1327,7 +1352,8 @@ int BKE_ffmpeg_start(void *context_v,
|
||||
success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
|
||||
# ifdef WITH_AUDASPACE
|
||||
if (context->audio_stream) {
|
||||
AVCodecContext *c = context->audio_stream->codec;
|
||||
AVCodecContext *c = context->audio_codec;
|
||||
|
||||
AUD_DeviceSpecs specs;
|
||||
specs.channels = c->channels;
|
||||
|
||||
@@ -1354,10 +1380,6 @@ int BKE_ffmpeg_start(void *context_v,
|
||||
specs.rate = rd->ffcodecdata.audio_mixrate;
|
||||
context->audio_mixdown_device = BKE_sound_mixdown(
|
||||
scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
|
||||
# ifdef FFMPEG_CODEC_TIME_BASE
|
||||
c->time_base.den = specs.rate;
|
||||
c->time_base.num = 1;
|
||||
# endif
|
||||
}
|
||||
# endif
|
||||
return success;
|
||||
@@ -1398,8 +1420,8 @@ int BKE_ffmpeg_append(void *context_v,
|
||||
// write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));
|
||||
|
||||
if (context->video_stream) {
|
||||
avframe = generate_video_frame(context, (unsigned char *)pixels, reports);
|
||||
success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports));
|
||||
avframe = generate_video_frame(context, (unsigned char *)pixels);
|
||||
success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));
|
||||
|
||||
if (context->ffmpeg_autosplit) {
|
||||
if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
|
||||
@@ -1430,7 +1452,7 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
|
||||
}
|
||||
# endif
|
||||
|
||||
if (context->video_stream && context->video_stream->codec) {
|
||||
if (context->video_stream) {
|
||||
PRINT("Flushing delayed frames...\n");
|
||||
flush_ffmpeg(context);
|
||||
}
|
||||
@@ -1441,14 +1463,12 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
|
||||
|
||||
/* Close the video codec */
|
||||
|
||||
if (context->video_stream != NULL && context->video_stream->codec != NULL) {
|
||||
avcodec_close(context->video_stream->codec);
|
||||
if (context->video_stream != NULL) {
|
||||
PRINT("zero video stream %p\n", context->video_stream);
|
||||
context->video_stream = NULL;
|
||||
}
|
||||
|
||||
if (context->audio_stream != NULL && context->audio_stream->codec != NULL) {
|
||||
avcodec_close(context->audio_stream->codec);
|
||||
if (context->audio_stream != NULL) {
|
||||
context->audio_stream = NULL;
|
||||
}
|
||||
|
||||
@@ -1467,6 +1487,16 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
|
||||
avio_close(context->outfile->pb);
|
||||
}
|
||||
}
|
||||
|
||||
if (context->video_codec != NULL) {
|
||||
avcodec_free_context(&context->video_codec);
|
||||
context->video_codec = NULL;
|
||||
}
|
||||
if (context->audio_codec != NULL) {
|
||||
avcodec_free_context(&context->audio_codec);
|
||||
context->audio_codec = NULL;
|
||||
}
|
||||
|
||||
if (context->outfile != NULL) {
|
||||
avformat_free_context(context->outfile);
|
||||
context->outfile = NULL;
|
||||
@@ -1475,12 +1505,6 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
|
||||
av_free(context->audio_input_buffer);
|
||||
context->audio_input_buffer = NULL;
|
||||
}
|
||||
# ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
if (context->audio_output_buffer != NULL) {
|
||||
av_free(context->audio_output_buffer);
|
||||
context->audio_output_buffer = NULL;
|
||||
}
|
||||
# endif
|
||||
|
||||
if (context->audio_deinterleave_buffer != NULL) {
|
||||
av_free(context->audio_deinterleave_buffer);
|
||||
@@ -1560,12 +1584,12 @@ static IDProperty *BKE_ffmpeg_property_add(RenderData *rd,
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_INT:
|
||||
case AV_OPT_TYPE_INT64:
|
||||
val.i = FFMPEG_DEF_OPT_VAL_INT(o);
|
||||
val.i = o->default_val.i64;
|
||||
idp_type = IDP_INT;
|
||||
break;
|
||||
case AV_OPT_TYPE_DOUBLE:
|
||||
case AV_OPT_TYPE_FLOAT:
|
||||
val.f = FFMPEG_DEF_OPT_VAL_DOUBLE(o);
|
||||
val.f = o->default_val.dbl;
|
||||
idp_type = IDP_FLOAT;
|
||||
break;
|
||||
case AV_OPT_TYPE_STRING:
|
||||
@@ -1669,56 +1693,7 @@ static void ffmpeg_set_expert_options(RenderData *rd)
|
||||
IDP_FreePropertyContent(rd->ffcodecdata.properties);
|
||||
}
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264) {
|
||||
/*
|
||||
* All options here are for x264, but must be set via ffmpeg.
|
||||
* The names are therefore different - Search for "x264 to FFmpeg option mapping"
|
||||
* to get a list.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Use CABAC coder. Using "coder:1", which should be equivalent,
|
||||
* crashes Blender for some reason. Either way - this is no big deal.
|
||||
*/
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "coder:vlc");
|
||||
|
||||
/*
|
||||
* The other options were taken from the libx264-default.preset
|
||||
* included in the ffmpeg distribution.
|
||||
*/
|
||||
|
||||
/* This breaks compatibility for QT. */
|
||||
// BKE_ffmpeg_property_add_string(rd, "video", "flags:loop");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "cmp:chroma");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "partitions:parti4x4"); /* Deprecated. */
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "partitions:partp8x8"); /* Deprecated. */
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "partitions:partb8x8"); /* Deprecated. */
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "me:hex");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "subq:6");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "me_range:16");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "qdiff:4");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "keyint_min:25");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "sc_threshold:40");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "i_qfactor:0.71");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "b_strategy:1");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "bf:3");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "refs:2");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "qcomp:0.6");
|
||||
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "trellis:0");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "weightb:1");
|
||||
# ifdef FFMPEG_HAVE_DEPRECATED_FLAGS2
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "flags2:dct8x8");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "directpred:3");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "flags2:fastpskip");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "flags2:wpred");
|
||||
# else
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "8x8dct:1");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "fast-pskip:1");
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "wpredp:2");
|
||||
# endif
|
||||
}
|
||||
else if (codec_id == AV_CODEC_ID_DNXHD) {
|
||||
if (codec_id == AV_CODEC_ID_DNXHD) {
|
||||
if (rd->ffcodecdata.flags & FFMPEG_LOSSLESS_OUTPUT) {
|
||||
BKE_ffmpeg_property_add_string(rd, "video", "mbd:rd");
|
||||
}
|
||||
@@ -1871,14 +1846,12 @@ bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
|
||||
{
|
||||
int codec = rd->ffcodecdata.codec;
|
||||
|
||||
# ifdef FFMPEG_FFV1_ALPHA_SUPPORTED
|
||||
/* Visual Studio 2019 doesn't like #ifdef within ELEM(). */
|
||||
if (codec == AV_CODEC_ID_FFV1) {
|
||||
return true;
|
||||
}
|
||||
# endif
|
||||
|
||||
return ELEM(codec, AV_CODEC_ID_QTRLE, AV_CODEC_ID_PNG, AV_CODEC_ID_VP9, AV_CODEC_ID_HUFFYUV);
|
||||
return ELEM(codec,
|
||||
AV_CODEC_ID_FFV1,
|
||||
AV_CODEC_ID_QTRLE,
|
||||
AV_CODEC_ID_PNG,
|
||||
AV_CODEC_ID_VP9,
|
||||
AV_CODEC_ID_HUFFYUV);
|
||||
}
|
||||
|
||||
void *BKE_ffmpeg_context_create(void)
|
||||
|
@@ -553,7 +553,8 @@ static void bm_uuidwalk_pass_add(UUIDWalk *uuidwalk,
|
||||
|
||||
static int bm_face_len_cmp(const void *v1, const void *v2)
|
||||
{
|
||||
const BMFace *f1 = v1, *f2 = v2;
|
||||
const BMFace *f1 = *((BMFace **)v1);
|
||||
const BMFace *f2 = *((BMFace **)v2);
|
||||
|
||||
if (f1->len > f2->len) {
|
||||
return 1;
|
||||
|
@@ -77,10 +77,10 @@ void CryptomatteBaseNode::convertToOperations(NodeConverter &converter,
|
||||
|
||||
/** \name Cryptomatte V2
|
||||
* \{ */
|
||||
static std::string prefix_from_node(const bNode &node)
|
||||
static std::string prefix_from_node(const CompositorContext &context, const bNode &node)
|
||||
{
|
||||
char prefix[MAX_NAME];
|
||||
ntreeCompositCryptomatteLayerPrefix(&node, prefix, sizeof(prefix));
|
||||
ntreeCompositCryptomatteLayerPrefix(context.getScene(), &node, prefix, sizeof(prefix));
|
||||
return std::string(prefix, BLI_strnlen(prefix, sizeof(prefix)));
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ void CryptomatteNode::input_operations_from_render_source(
|
||||
return;
|
||||
}
|
||||
|
||||
const short cryptomatte_layer_id = 0;
|
||||
const std::string prefix = prefix_from_node(node);
|
||||
LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) {
|
||||
short view_layer_id = 0;
|
||||
const std::string prefix = prefix_from_node(context, node);
|
||||
LISTBASE_FOREACH_INDEX (ViewLayer *, view_layer, &scene->view_layers, view_layer_id) {
|
||||
RenderLayer *render_layer = RE_GetRenderLayer(render_result, view_layer->name);
|
||||
if (render_layer) {
|
||||
LISTBASE_FOREACH (RenderPass *, render_pass, &render_layer->passes) {
|
||||
@@ -129,7 +129,7 @@ void CryptomatteNode::input_operations_from_render_source(
|
||||
RenderLayersProg *op = new RenderLayersProg(
|
||||
render_pass->name, DataType::Color, render_pass->channels);
|
||||
op->setScene(scene);
|
||||
op->setLayerId(cryptomatte_layer_id);
|
||||
op->setLayerId(view_layer_id);
|
||||
op->setRenderData(context.getRenderData());
|
||||
op->setViewName(context.getViewName());
|
||||
r_input_operations.append(op);
|
||||
@@ -177,7 +177,7 @@ void CryptomatteNode::input_operations_from_image_source(
|
||||
}
|
||||
}
|
||||
|
||||
const std::string prefix = prefix_from_node(node);
|
||||
const std::string prefix = prefix_from_node(context, node);
|
||||
int layer_index;
|
||||
LISTBASE_FOREACH_INDEX (RenderLayer *, render_layer, &image->rr->layers, layer_index) {
|
||||
if (!blender::StringRef(prefix).startswith(blender::StringRef(
|
||||
|
@@ -79,7 +79,7 @@ bool EEVEE_renderpasses_only_first_sample_pass_active(EEVEE_Data *vedata)
|
||||
* type the rest of the bits are used for the name hash. */
|
||||
int EEVEE_renderpasses_aov_hash(const ViewLayerAOV *aov)
|
||||
{
|
||||
int hash = BLI_hash_string(aov->name);
|
||||
int hash = BLI_hash_string(aov->name) << 1;
|
||||
SET_FLAG_FROM_TEST(hash, aov->type == AOV_TYPE_COLOR, EEVEE_AOV_HASH_COLOR_TYPE_MASK);
|
||||
return hash;
|
||||
}
|
||||
|
@@ -297,7 +297,9 @@ typedef struct GPENCIL_PrivateData {
|
||||
int v3d_color_type;
|
||||
/* Current frame */
|
||||
int cfra;
|
||||
/* If we are rendering for final render (F12). */
|
||||
/* If we are rendering for final render (F12).
|
||||
* NOTE: set to false for viewport and opengl rendering (including VSE scene rendering), but set
|
||||
* to true when rendering in `OB_RENDER` shading mode (viewport or opengl rendering) */
|
||||
bool is_render;
|
||||
/* If we are in viewport display (used for VFX). */
|
||||
bool is_viewport;
|
||||
|
@@ -118,7 +118,8 @@ static bool eyedropper_init(bContext *C, wmOperator *op)
|
||||
RNA_property_float_get_array(&eye->ptr, eye->prop, col);
|
||||
if (eye->ptr.type == &RNA_CompositorNodeCryptomatteV2) {
|
||||
eye->crypto_node = (bNode *)eye->ptr.data;
|
||||
eye->cryptomatte_session = ntreeCompositCryptomatteSession(eye->crypto_node);
|
||||
eye->cryptomatte_session = ntreeCompositCryptomatteSession(CTX_data_scene(C),
|
||||
eye->crypto_node);
|
||||
eye->draw_handle_sample_text = WM_draw_cb_activate(CTX_wm_window(C), eyedropper_draw_cb, eye);
|
||||
}
|
||||
|
||||
@@ -199,6 +200,57 @@ static bool eyedropper_cryptomatte_sample_renderlayer_fl(RenderLayer *render_lay
|
||||
|
||||
return false;
|
||||
}
|
||||
static bool eyedropper_cryptomatte_sample_render_fl(const bNode *node,
|
||||
const char *prefix,
|
||||
const float fpos[2],
|
||||
float r_col[3])
|
||||
{
|
||||
bool success = false;
|
||||
Scene *scene = (Scene *)node->id;
|
||||
BLI_assert(GS(scene->id.name) == ID_SCE);
|
||||
Render *re = RE_GetSceneRender(scene);
|
||||
|
||||
if (re) {
|
||||
RenderResult *rr = RE_AcquireResultRead(re);
|
||||
if (rr) {
|
||||
LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) {
|
||||
RenderLayer *render_layer = RE_GetRenderLayer(rr, view_layer->name);
|
||||
success = eyedropper_cryptomatte_sample_renderlayer_fl(render_layer, prefix, fpos, r_col);
|
||||
if (success) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
RE_ReleaseResult(re);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool eyedropper_cryptomatte_sample_image_fl(const bNode *node,
|
||||
NodeCryptomatte *crypto,
|
||||
const char *prefix,
|
||||
const float fpos[2],
|
||||
float r_col[3])
|
||||
{
|
||||
bool success = false;
|
||||
Image *image = (Image *)node->id;
|
||||
BLI_assert(GS(image->id.name) == ID_IM);
|
||||
ImageUser *iuser = &crypto->iuser;
|
||||
|
||||
if (image && image->type == IMA_TYPE_MULTILAYER) {
|
||||
ImBuf *ibuf = BKE_image_acquire_ibuf(image, iuser, NULL);
|
||||
if (image->rr) {
|
||||
LISTBASE_FOREACH (RenderLayer *, render_layer, &image->rr->layers) {
|
||||
success = eyedropper_cryptomatte_sample_renderlayer_fl(render_layer, prefix, fpos, r_col);
|
||||
if (success) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
BKE_image_release_ibuf(image, ibuf, NULL);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool eyedropper_cryptomatte_sample_fl(
|
||||
bContext *C, Eyedropper *eye, int mx, int my, float r_col[3])
|
||||
@@ -255,53 +307,19 @@ static bool eyedropper_cryptomatte_sample_fl(
|
||||
return false;
|
||||
}
|
||||
|
||||
bool success = false;
|
||||
/* TODO(jbakker): Migrate this file to cc and use std::string as return param. */
|
||||
char prefix[MAX_NAME + 1];
|
||||
ntreeCompositCryptomatteLayerPrefix(node, prefix, sizeof(prefix) - 1);
|
||||
const Scene *scene = CTX_data_scene(C);
|
||||
ntreeCompositCryptomatteLayerPrefix(scene, node, prefix, sizeof(prefix) - 1);
|
||||
prefix[MAX_NAME] = '\0';
|
||||
|
||||
if (node->custom1 == CMP_CRYPTOMATTE_SRC_RENDER) {
|
||||
Scene *scene = (Scene *)node->id;
|
||||
BLI_assert(GS(scene->id.name) == ID_SCE);
|
||||
Render *re = RE_GetSceneRender(scene);
|
||||
|
||||
if (re) {
|
||||
RenderResult *rr = RE_AcquireResultRead(re);
|
||||
if (rr) {
|
||||
LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) {
|
||||
RenderLayer *render_layer = RE_GetRenderLayer(rr, view_layer->name);
|
||||
success = eyedropper_cryptomatte_sample_renderlayer_fl(
|
||||
render_layer, prefix, fpos, r_col);
|
||||
if (success) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
RE_ReleaseResult(re);
|
||||
}
|
||||
return eyedropper_cryptomatte_sample_render_fl(node, prefix, fpos, r_col);
|
||||
}
|
||||
else if (node->custom1 == CMP_CRYPTOMATTE_SRC_IMAGE) {
|
||||
Image *image = (Image *)node->id;
|
||||
BLI_assert(GS(image->id.name) == ID_IM);
|
||||
ImageUser *iuser = &crypto->iuser;
|
||||
|
||||
if (image && image->type == IMA_TYPE_MULTILAYER) {
|
||||
ImBuf *ibuf = BKE_image_acquire_ibuf(image, iuser, NULL);
|
||||
if (image->rr) {
|
||||
LISTBASE_FOREACH (RenderLayer *, render_layer, &image->rr->layers) {
|
||||
success = eyedropper_cryptomatte_sample_renderlayer_fl(
|
||||
render_layer, prefix, fpos, r_col);
|
||||
if (success) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
BKE_image_release_ibuf(image, ibuf, NULL);
|
||||
}
|
||||
return eyedropper_cryptomatte_sample_image_fl(node, crypto, prefix, fpos, r_col);
|
||||
}
|
||||
|
||||
return success;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -3890,6 +3890,13 @@ static void ui_do_but_textedit_select(
|
||||
/** \name Button Number Editing (various types)
|
||||
* \{ */
|
||||
|
||||
static void ui_numedit_begin_set_values(uiBut *but, uiHandleButtonData *data)
|
||||
{
|
||||
data->startvalue = ui_but_value_get(but);
|
||||
data->origvalue = data->startvalue;
|
||||
data->value = data->origvalue;
|
||||
}
|
||||
|
||||
static void ui_numedit_begin(uiBut *but, uiHandleButtonData *data)
|
||||
{
|
||||
if (but->type == UI_BTYPE_CURVE) {
|
||||
@@ -3915,16 +3922,12 @@ static void ui_numedit_begin(uiBut *but, uiHandleButtonData *data)
|
||||
but->editvec = data->vec;
|
||||
}
|
||||
else {
|
||||
float softrange, softmin, softmax;
|
||||
|
||||
data->startvalue = ui_but_value_get(but);
|
||||
data->origvalue = data->startvalue;
|
||||
data->value = data->origvalue;
|
||||
ui_numedit_begin_set_values(but, data);
|
||||
but->editval = &data->value;
|
||||
|
||||
softmin = but->softmin;
|
||||
softmax = but->softmax;
|
||||
softrange = softmax - softmin;
|
||||
float softmin = but->softmin;
|
||||
float softmax = but->softmax;
|
||||
float softrange = softmax - softmin;
|
||||
|
||||
if ((but->type == UI_BTYPE_NUM) && (ui_but_is_cursor_warp(but) == false)) {
|
||||
uiButNumber *number_but = (uiButNumber *)but;
|
||||
|
@@ -4643,7 +4643,7 @@ static int edbm_select_random_exec(bContext *C, wmOperator *op)
|
||||
}
|
||||
}
|
||||
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed);
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed_iter);
|
||||
const int count_select = elem_map_len * randfac;
|
||||
for (int i = 0; i < count_select; i++) {
|
||||
BM_vert_select_set(em->bm, elem_map[i], select);
|
||||
@@ -4659,7 +4659,7 @@ static int edbm_select_random_exec(bContext *C, wmOperator *op)
|
||||
elem_map[elem_map_len++] = eed;
|
||||
}
|
||||
}
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed);
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed_iter);
|
||||
const int count_select = elem_map_len * randfac;
|
||||
for (int i = 0; i < count_select; i++) {
|
||||
BM_edge_select_set(em->bm, elem_map[i], select);
|
||||
@@ -4675,7 +4675,7 @@ static int edbm_select_random_exec(bContext *C, wmOperator *op)
|
||||
elem_map[elem_map_len++] = efa;
|
||||
}
|
||||
}
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed);
|
||||
BLI_array_randomize(elem_map, sizeof(*elem_map), elem_map_len, seed_iter);
|
||||
const int count_select = elem_map_len * randfac;
|
||||
for (int i = 0; i < count_select; i++) {
|
||||
BM_face_select_set(em->bm, elem_map[i], select);
|
||||
|
@@ -1416,6 +1416,14 @@ ScrArea *ED_screen_state_toggle(bContext *C, wmWindow *win, ScrArea *area, const
|
||||
BLI_assert(CTX_wm_screen(C) == screen);
|
||||
BLI_assert(CTX_wm_area(C) == NULL); /* May have been freed. */
|
||||
|
||||
/* Setting the area is only needed for Python scripts that call
|
||||
* operators in succession before returning to the main event loop.
|
||||
* Without this, scripts can't run any operators that require
|
||||
* an area after toggling full-screen for example (see: T89526).
|
||||
* NOTE: an old comment stated this was "bad code",
|
||||
* however it doesn't cause problems so leave as-is. */
|
||||
CTX_wm_area_set(C, screen->areabase.first);
|
||||
|
||||
return screen->areabase.first;
|
||||
}
|
||||
|
||||
|
@@ -1191,7 +1191,7 @@ static void paint_line_strokes_spacing(bContext *C,
|
||||
|
||||
const bool use_scene_spacing = paint_stroke_use_scene_spacing(brush, mode);
|
||||
|
||||
float mouse[2], dmouse[2];
|
||||
float mouse[3], dmouse[2];
|
||||
float length;
|
||||
float d_world_space_position[3] = {0.0f};
|
||||
float world_space_position_old[3], world_space_position_new[3];
|
||||
|
@@ -154,7 +154,9 @@ static bool buttons_context_path_world(ButsContextPath *path)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool buttons_context_path_collection(ButsContextPath *path, wmWindow *window)
|
||||
static bool buttons_context_path_collection(const bContext *C,
|
||||
ButsContextPath *path,
|
||||
wmWindow *window)
|
||||
{
|
||||
PointerRNA *ptr = &path->ptr[path->len - 1];
|
||||
|
||||
@@ -162,10 +164,19 @@ static bool buttons_context_path_collection(ButsContextPath *path, wmWindow *win
|
||||
if (RNA_struct_is_a(ptr->type, &RNA_Collection)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Scene *scene = CTX_data_scene(C);
|
||||
|
||||
/* if we have a view layer, use the view layer's active collection */
|
||||
if (buttons_context_path_view_layer(path, window)) {
|
||||
ViewLayer *view_layer = path->ptr[path->len - 1].data;
|
||||
Collection *c = view_layer->active_collection->collection;
|
||||
|
||||
/* Do not show collection tab for master collection. */
|
||||
if (c == scene->master_collection) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (c) {
|
||||
RNA_id_pointer_create(&c->id, &path->ptr[path->len]);
|
||||
path->len++;
|
||||
@@ -600,7 +611,7 @@ static bool buttons_context_path(
|
||||
found = buttons_context_path_world(path);
|
||||
break;
|
||||
case BCONTEXT_COLLECTION: /* This is for Line Art collection flags */
|
||||
found = buttons_context_path_collection(path, window);
|
||||
found = buttons_context_path_collection(C, path, window);
|
||||
break;
|
||||
case BCONTEXT_TOOL:
|
||||
found = true;
|
||||
|
@@ -202,11 +202,11 @@ int ED_buttons_tabs_list(SpaceProperties *sbuts, short *context_tabs_array)
|
||||
context_tabs_array[length] = BCONTEXT_WORLD;
|
||||
length++;
|
||||
}
|
||||
if (length != 0) {
|
||||
context_tabs_array[length] = -1;
|
||||
length++;
|
||||
}
|
||||
if (sbuts->pathflag & (1 << BCONTEXT_COLLECTION)) {
|
||||
if (length != 0) {
|
||||
context_tabs_array[length] = -1;
|
||||
length++;
|
||||
}
|
||||
context_tabs_array[length] = BCONTEXT_COLLECTION;
|
||||
length++;
|
||||
}
|
||||
|
@@ -928,6 +928,7 @@ static void v3d_editvertex_buts(uiLayout *layout, View3D *v3d, Object *ob, float
|
||||
|
||||
if (apply_vcos) {
|
||||
EDBM_mesh_normals_update(em);
|
||||
BKE_editmesh_looptri_calc(em);
|
||||
}
|
||||
|
||||
/* Edges */
|
||||
|
@@ -2029,6 +2029,9 @@ ImBuf *ED_view3d_draw_offscreen_imbuf_simple(Depsgraph *depsgraph,
|
||||
}
|
||||
|
||||
v3d.flag2 = V3D_HIDE_OVERLAYS;
|
||||
/* HACK: When rendering gpencil objects this opacity is used to mix vertex colors in when not in
|
||||
* render mode. */
|
||||
v3d.overlay.gpencil_vertex_paint_opacity = 1.0f;
|
||||
|
||||
if (draw_flags & V3D_OFSDRAW_SHOW_ANNOTATION) {
|
||||
v3d.flag2 |= V3D_SHOW_ANNOTATION;
|
||||
|
@@ -1471,7 +1471,7 @@ static int uv_hide_exec(bContext *C, wmOperator *op)
|
||||
if (EDBM_mesh_hide(em, swap)) {
|
||||
EDBM_update_generic(ob->data, true, false);
|
||||
}
|
||||
return OPERATOR_FINISHED;
|
||||
continue;
|
||||
}
|
||||
|
||||
BM_ITER_MESH (efa, &iter, em->bm, BM_FACES_OF_MESH) {
|
||||
@@ -1609,7 +1609,7 @@ static int uv_reveal_exec(bContext *C, wmOperator *op)
|
||||
if (EDBM_mesh_reveal(em, select)) {
|
||||
EDBM_update_generic(ob->data, true, false);
|
||||
}
|
||||
return OPERATOR_FINISHED;
|
||||
continue;
|
||||
}
|
||||
if (use_face_center) {
|
||||
if (em->selectmode == SCE_SELECT_FACE) {
|
||||
|
@@ -748,10 +748,10 @@ static void lineart_triangle_cull_single(LineartRenderBuffer *rb,
|
||||
e = new_e;
|
||||
|
||||
#define INCREASE_RL \
|
||||
e_count++; \
|
||||
v1_obi = e->v1_obindex; \
|
||||
v2_obi = e->v2_obindex; \
|
||||
new_e = &((LineartEdge *)e_eln->pointer)[e_count]; \
|
||||
e_count++; \
|
||||
e = new_e; \
|
||||
e->v1_obindex = v1_obi; \
|
||||
e->v2_obindex = v2_obi; \
|
||||
@@ -3656,6 +3656,8 @@ bool MOD_lineart_compute_feature_lines(Depsgraph *depsgraph, LineartGpencilModif
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
int intersections_only = 0; /* Not used right now, but preserve for future. */
|
||||
|
||||
BKE_scene_camera_switch_update(scene);
|
||||
|
||||
if (!scene->camera) {
|
||||
return false;
|
||||
}
|
||||
|
@@ -359,7 +359,6 @@ typedef enum eGPUBuiltinShader {
|
||||
GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, /* Uniformly scaled */
|
||||
/* grease pencil drawing */
|
||||
GPU_SHADER_GPENCIL_STROKE,
|
||||
GPU_SHADER_GPENCIL_FILL,
|
||||
/* specialized for widget drawing */
|
||||
GPU_SHADER_2D_AREA_EDGES,
|
||||
GPU_SHADER_2D_WIDGET_BASE,
|
||||
|
@@ -38,6 +38,17 @@ namespace blender::gpu {
|
||||
/** \name Platform
|
||||
* \{ */
|
||||
|
||||
static bool match_renderer(StringRef renderer, const Vector<std::string> &items)
|
||||
{
|
||||
for (const std::string &item : items) {
|
||||
const std::string wrapped = " " + item + " ";
|
||||
if (renderer.endswith(item) || renderer.find(wrapped) != StringRef::not_found) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void GLBackend::platform_init()
|
||||
{
|
||||
BLI_assert(!GPG.initialized);
|
||||
@@ -282,14 +293,25 @@ static void detect_workarounds()
|
||||
* The work around uses `GPU_RGBA16I`.
|
||||
*/
|
||||
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_OFFICIAL)) {
|
||||
if (strstr(renderer, " RX 460 ") || strstr(renderer, " RX 470 ") ||
|
||||
strstr(renderer, " RX 480 ") || strstr(renderer, " RX 490 ") ||
|
||||
strstr(renderer, " RX 560 ") || strstr(renderer, " RX 560X ") ||
|
||||
strstr(renderer, " RX 570 ") || strstr(renderer, " RX 580 ") ||
|
||||
strstr(renderer, " RX 580X ") || strstr(renderer, " RX 590 ") ||
|
||||
strstr(renderer, " RX550/550 ") || strstr(renderer, "(TM) 520 ") ||
|
||||
strstr(renderer, "(TM) 530 ") || strstr(renderer, "(TM) 535 ") ||
|
||||
strstr(renderer, " R5 ") || strstr(renderer, " R7 ") || strstr(renderer, " R9 ")) {
|
||||
const Vector<std::string> matches = {"RX 460",
|
||||
"RX 470",
|
||||
"RX 480",
|
||||
"RX 490",
|
||||
"RX 560",
|
||||
"RX 560X",
|
||||
"RX 570",
|
||||
"RX 580",
|
||||
"RX 580X",
|
||||
"RX 590",
|
||||
"RX550/550",
|
||||
"(TM) 520",
|
||||
"(TM) 530",
|
||||
"(TM) 535",
|
||||
"R5",
|
||||
"R7",
|
||||
"R9"};
|
||||
|
||||
if (match_renderer(renderer, matches)) {
|
||||
GCaps.use_hq_normals_workaround = true;
|
||||
}
|
||||
}
|
||||
|
@@ -7,7 +7,7 @@ flat in int interp_size;
|
||||
|
||||
out vec4 fragColor;
|
||||
|
||||
uniform sampler1DArray glyph;
|
||||
uniform sampler2D glyph;
|
||||
|
||||
const vec2 offsets4[4] = vec2[4](
|
||||
vec2(-0.5, 0.5), vec2(0.5, 0.5), vec2(-0.5, -0.5), vec2(-0.5, -0.5));
|
||||
|
@@ -166,12 +166,6 @@ if(WITH_CODEC_FFMPEG)
|
||||
${OPENJPEG_LIBRARIES}
|
||||
)
|
||||
add_definitions(-DWITH_FFMPEG)
|
||||
|
||||
remove_strict_c_flags_file(
|
||||
intern/anim_movie.c
|
||||
intern/indexer.c
|
||||
intern/util.c
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_DDS)
|
||||
|
@@ -380,8 +380,6 @@ bool IMB_anim_can_produce_frames(const struct anim *anim);
|
||||
*/
|
||||
|
||||
int ismovie(const char *filepath);
|
||||
void IMB_anim_set_preseek(struct anim *anim, int preseek);
|
||||
int IMB_anim_get_preseek(struct anim *anim);
|
||||
int IMB_anim_get_image_width(struct anim *anim);
|
||||
int IMB_anim_get_image_height(struct anim *anim);
|
||||
|
||||
|
@@ -87,7 +87,7 @@ struct anim_index;
|
||||
struct anim {
|
||||
int ib_flags;
|
||||
int curtype;
|
||||
int curposition; /* index 0 = 1e, 1 = 2e, enz. */
|
||||
int cur_position; /* index 0 = 1e, 1 = 2e, enz. */
|
||||
int duration_in_frames;
|
||||
int frs_sec;
|
||||
double frs_sec_base;
|
||||
@@ -105,7 +105,6 @@ struct anim {
|
||||
int orientation;
|
||||
size_t framesize;
|
||||
int interlacing;
|
||||
int preseek;
|
||||
int streamindex;
|
||||
|
||||
/* avi */
|
||||
@@ -132,10 +131,10 @@ struct anim {
|
||||
struct SwsContext *img_convert_ctx;
|
||||
int videoStream;
|
||||
|
||||
struct ImBuf *last_frame;
|
||||
int64_t last_pts;
|
||||
int64_t next_pts;
|
||||
AVPacket next_packet;
|
||||
struct ImBuf *cur_frame_final;
|
||||
int64_t cur_pts;
|
||||
int64_t cur_key_frame_pts;
|
||||
AVPacket *cur_packet;
|
||||
#endif
|
||||
|
||||
char index_dir[768];
|
||||
|
@@ -49,6 +49,7 @@
|
||||
typedef struct anim_index_entry {
|
||||
int frameno;
|
||||
uint64_t seek_pos;
|
||||
uint64_t seek_pos_pts;
|
||||
uint64_t seek_pos_dts;
|
||||
uint64_t pts;
|
||||
} anim_index_entry;
|
||||
@@ -77,14 +78,19 @@ typedef struct anim_index_builder {
|
||||
} anim_index_builder;
|
||||
|
||||
anim_index_builder *IMB_index_builder_create(const char *name);
|
||||
void IMB_index_builder_add_entry(
|
||||
anim_index_builder *fp, int frameno, uint64_t seek_pos, uint64_t seek_pos_dts, uint64_t pts);
|
||||
void IMB_index_builder_add_entry(anim_index_builder *fp,
|
||||
int frameno,
|
||||
uint64_t seek_pos,
|
||||
uint64_t seek_pos_pts,
|
||||
uint64_t seek_pos_dts,
|
||||
uint64_t pts);
|
||||
|
||||
void IMB_index_builder_proc_frame(anim_index_builder *fp,
|
||||
unsigned char *buffer,
|
||||
int data_size,
|
||||
int frameno,
|
||||
uint64_t seek_pos,
|
||||
uint64_t seek_pos_pts,
|
||||
uint64_t seek_pos_dts,
|
||||
uint64_t pts);
|
||||
|
||||
@@ -92,6 +98,7 @@ void IMB_index_builder_finish(anim_index_builder *fp, int rollback);
|
||||
|
||||
struct anim_index *IMB_indexer_open(const char *name);
|
||||
uint64_t IMB_indexer_get_seek_pos(struct anim_index *idx, int frame_index);
|
||||
uint64_t IMB_indexer_get_seek_pos_pts(struct anim_index *idx, int frame_index);
|
||||
uint64_t IMB_indexer_get_seek_pos_dts(struct anim_index *idx, int frame_index);
|
||||
|
||||
int IMB_indexer_get_frame_index(struct anim_index *idx, int frameno);
|
||||
|
@@ -79,6 +79,7 @@
|
||||
|
||||
# include <libavcodec/avcodec.h>
|
||||
# include <libavformat/avformat.h>
|
||||
# include <libavutil/imgutils.h>
|
||||
# include <libavutil/rational.h>
|
||||
# include <libswscale/swscale.h>
|
||||
|
||||
@@ -432,8 +433,7 @@ static int startavi(struct anim *anim)
|
||||
anim->orientation = 0;
|
||||
anim->framesize = anim->x * anim->y * 4;
|
||||
|
||||
anim->curposition = 0;
|
||||
anim->preseek = 0;
|
||||
anim->cur_position = 0;
|
||||
|
||||
# if 0
|
||||
printf("x:%d y:%d size:%d interl:%d dur:%d\n",
|
||||
@@ -519,12 +519,10 @@ static int startffmpeg(struct anim *anim)
|
||||
double frs_den;
|
||||
int streamcount;
|
||||
|
||||
# ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
|
||||
/* The following for color space determination */
|
||||
int srcRange, dstRange, brightness, contrast, saturation;
|
||||
int *table;
|
||||
const int *inv_table;
|
||||
# endif
|
||||
|
||||
if (anim == NULL) {
|
||||
return (-1);
|
||||
@@ -547,7 +545,7 @@ static int startffmpeg(struct anim *anim)
|
||||
video_stream_index = -1;
|
||||
|
||||
for (i = 0; i < pFormatCtx->nb_streams; i++) {
|
||||
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (streamcount > 0) {
|
||||
streamcount--;
|
||||
continue;
|
||||
@@ -563,16 +561,17 @@ static int startffmpeg(struct anim *anim)
|
||||
}
|
||||
|
||||
video_stream = pFormatCtx->streams[video_stream_index];
|
||||
pCodecCtx = video_stream->codec;
|
||||
|
||||
/* Find the decoder for the video stream */
|
||||
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
|
||||
pCodec = avcodec_find_decoder(video_stream->codecpar->codec_id);
|
||||
if (pCodec == NULL) {
|
||||
avformat_close_input(&pFormatCtx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pCodecCtx->workaround_bugs = 1;
|
||||
pCodecCtx = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(pCodecCtx, video_stream->codecpar);
|
||||
pCodecCtx->workaround_bugs = FF_BUG_AUTODETECT;
|
||||
|
||||
if (pCodec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
|
||||
pCodecCtx->thread_count = 0;
|
||||
@@ -593,7 +592,7 @@ static int startffmpeg(struct anim *anim)
|
||||
return -1;
|
||||
}
|
||||
if (pCodecCtx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
avcodec_close(anim->pCodecCtx);
|
||||
avcodec_free_context(&anim->pCodecCtx);
|
||||
avformat_close_input(&pFormatCtx);
|
||||
return -1;
|
||||
}
|
||||
@@ -639,7 +638,7 @@ static int startffmpeg(struct anim *anim)
|
||||
anim->params = 0;
|
||||
|
||||
anim->x = pCodecCtx->width;
|
||||
anim->y = av_get_cropped_height_from_codec(pCodecCtx);
|
||||
anim->y = pCodecCtx->height;
|
||||
|
||||
anim->pFormatCtx = pFormatCtx;
|
||||
anim->pCodecCtx = pCodecCtx;
|
||||
@@ -650,11 +649,12 @@ static int startffmpeg(struct anim *anim)
|
||||
anim->orientation = 0;
|
||||
anim->framesize = anim->x * anim->y * 4;
|
||||
|
||||
anim->curposition = -1;
|
||||
anim->last_frame = 0;
|
||||
anim->last_pts = -1;
|
||||
anim->next_pts = -1;
|
||||
anim->next_packet.stream_index = -1;
|
||||
anim->cur_position = -1;
|
||||
anim->cur_frame_final = 0;
|
||||
anim->cur_pts = -1;
|
||||
anim->cur_key_frame_pts = -1;
|
||||
anim->cur_packet = av_packet_alloc();
|
||||
anim->cur_packet->stream_index = -1;
|
||||
|
||||
anim->pFrame = av_frame_alloc();
|
||||
anim->pFrameComplete = false;
|
||||
@@ -668,8 +668,9 @@ static int startffmpeg(struct anim *anim)
|
||||
|
||||
if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
avcodec_close(anim->pCodecCtx);
|
||||
avcodec_free_context(&anim->pCodecCtx);
|
||||
avformat_close_input(&anim->pFormatCtx);
|
||||
av_packet_free(&anim->cur_packet);
|
||||
av_frame_free(&anim->pFrameRGB);
|
||||
av_frame_free(&anim->pFrameDeinterlaced);
|
||||
av_frame_free(&anim->pFrame);
|
||||
@@ -678,10 +679,11 @@ static int startffmpeg(struct anim *anim)
|
||||
}
|
||||
}
|
||||
|
||||
if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) {
|
||||
if (av_image_get_buffer_size(AV_PIX_FMT_RGBA, anim->x, anim->y, 1) != anim->x * anim->y * 4) {
|
||||
fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n");
|
||||
avcodec_close(anim->pCodecCtx);
|
||||
avcodec_free_context(&anim->pCodecCtx);
|
||||
avformat_close_input(&anim->pFormatCtx);
|
||||
av_packet_free(&anim->cur_packet);
|
||||
av_frame_free(&anim->pFrameRGB);
|
||||
av_frame_free(&anim->pFrameDeinterlaced);
|
||||
av_frame_free(&anim->pFrame);
|
||||
@@ -690,21 +692,17 @@ static int startffmpeg(struct anim *anim)
|
||||
}
|
||||
|
||||
if (anim->ib_flags & IB_animdeinterlace) {
|
||||
avpicture_fill((AVPicture *)anim->pFrameDeinterlaced,
|
||||
MEM_callocN(avpicture_get_size(anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height),
|
||||
"ffmpeg deinterlace"),
|
||||
anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height);
|
||||
}
|
||||
|
||||
if (pCodecCtx->has_b_frames) {
|
||||
anim->preseek = 25; /* FIXME: detect gopsize ... */
|
||||
}
|
||||
else {
|
||||
anim->preseek = 0;
|
||||
av_image_fill_arrays(anim->pFrameDeinterlaced->data,
|
||||
anim->pFrameDeinterlaced->linesize,
|
||||
MEM_callocN(av_image_get_buffer_size(anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height,
|
||||
1),
|
||||
"ffmpeg deinterlace"),
|
||||
anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height,
|
||||
1);
|
||||
}
|
||||
|
||||
anim->img_convert_ctx = sws_getContext(anim->x,
|
||||
@@ -713,15 +711,16 @@ static int startffmpeg(struct anim *anim)
|
||||
anim->x,
|
||||
anim->y,
|
||||
AV_PIX_FMT_RGBA,
|
||||
SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
|
||||
SWS_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
if (!anim->img_convert_ctx) {
|
||||
fprintf(stderr, "Can't transform color space??? Bailing out...\n");
|
||||
avcodec_close(anim->pCodecCtx);
|
||||
avcodec_free_context(&anim->pCodecCtx);
|
||||
avformat_close_input(&anim->pFormatCtx);
|
||||
av_packet_free(&anim->cur_packet);
|
||||
av_frame_free(&anim->pFrameRGB);
|
||||
av_frame_free(&anim->pFrameDeinterlaced);
|
||||
av_frame_free(&anim->pFrame);
|
||||
@@ -729,7 +728,6 @@ static int startffmpeg(struct anim *anim)
|
||||
return -1;
|
||||
}
|
||||
|
||||
# ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
|
||||
/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
|
||||
if (!sws_getColorspaceDetails(anim->img_convert_ctx,
|
||||
(int **)&inv_table,
|
||||
@@ -756,7 +754,6 @@ static int startffmpeg(struct anim *anim)
|
||||
else {
|
||||
fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
|
||||
}
|
||||
# endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -764,13 +761,13 @@ static int startffmpeg(struct anim *anim)
|
||||
/* postprocess the image in anim->pFrame and do color conversion
|
||||
* and deinterlacing stuff.
|
||||
*
|
||||
* Output is anim->last_frame
|
||||
* Output is anim->cur_frame_final
|
||||
*/
|
||||
|
||||
static void ffmpeg_postprocess(struct anim *anim)
|
||||
{
|
||||
AVFrame *input = anim->pFrame;
|
||||
ImBuf *ibuf = anim->last_frame;
|
||||
ImBuf *ibuf = anim->cur_frame_final;
|
||||
int filter_y = 0;
|
||||
|
||||
if (!anim->pFrameComplete) {
|
||||
@@ -795,11 +792,11 @@ static void ffmpeg_postprocess(struct anim *anim)
|
||||
input->data[3]);
|
||||
|
||||
if (anim->ib_flags & IB_animdeinterlace) {
|
||||
if (avpicture_deinterlace((AVPicture *)anim->pFrameDeinterlaced,
|
||||
(const AVPicture *)anim->pFrame,
|
||||
anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height) < 0) {
|
||||
if (av_image_deinterlace(anim->pFrameDeinterlaced,
|
||||
anim->pFrame,
|
||||
anim->pCodecCtx->pix_fmt,
|
||||
anim->pCodecCtx->width,
|
||||
anim->pCodecCtx->height) < 0) {
|
||||
filter_y = true;
|
||||
}
|
||||
else {
|
||||
@@ -808,11 +805,13 @@ static void ffmpeg_postprocess(struct anim *anim)
|
||||
}
|
||||
|
||||
if (!need_aligned_ffmpeg_buffer(anim)) {
|
||||
avpicture_fill((AVPicture *)anim->pFrameRGB,
|
||||
(unsigned char *)ibuf->rect,
|
||||
AV_PIX_FMT_RGBA,
|
||||
anim->x,
|
||||
anim->y);
|
||||
av_image_fill_arrays(anim->pFrameRGB->data,
|
||||
anim->pFrameRGB->linesize,
|
||||
(unsigned char *)ibuf->rect,
|
||||
AV_PIX_FMT_RGBA,
|
||||
anim->x,
|
||||
anim->y,
|
||||
1);
|
||||
}
|
||||
|
||||
# if defined(__x86_64__) || defined(_M_X64)
|
||||
@@ -895,7 +894,7 @@ static void ffmpeg_postprocess(struct anim *anim)
|
||||
}
|
||||
}
|
||||
|
||||
/* decode one video frame also considering the packet read into next_packet */
|
||||
/* decode one video frame also considering the packet read into cur_packet */
|
||||
|
||||
static int ffmpeg_decode_video_frame(struct anim *anim)
|
||||
{
|
||||
@@ -903,82 +902,76 @@ static int ffmpeg_decode_video_frame(struct anim *anim)
|
||||
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, " DECODE VIDEO FRAME\n");
|
||||
|
||||
if (anim->next_packet.stream_index == anim->videoStream) {
|
||||
av_free_packet(&anim->next_packet);
|
||||
anim->next_packet.stream_index = -1;
|
||||
if (anim->cur_packet->stream_index == anim->videoStream) {
|
||||
av_packet_unref(anim->cur_packet);
|
||||
anim->cur_packet->stream_index = -1;
|
||||
}
|
||||
|
||||
while ((rval = av_read_frame(anim->pFormatCtx, &anim->next_packet)) >= 0) {
|
||||
while ((rval = av_read_frame(anim->pFormatCtx, anim->cur_packet)) >= 0) {
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
"%sREAD: strID=%d (VID: %d) dts=%" PRId64 " pts=%" PRId64 " %s\n",
|
||||
(anim->next_packet.stream_index == anim->videoStream) ? "->" : " ",
|
||||
anim->next_packet.stream_index,
|
||||
(anim->cur_packet->stream_index == anim->videoStream) ? "->" : " ",
|
||||
anim->cur_packet->stream_index,
|
||||
anim->videoStream,
|
||||
(anim->next_packet.dts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet.dts,
|
||||
(anim->next_packet.pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->next_packet.pts,
|
||||
(anim->next_packet.flags & AV_PKT_FLAG_KEY) ? " KEY" : "");
|
||||
if (anim->next_packet.stream_index == anim->videoStream) {
|
||||
(anim->cur_packet->dts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->cur_packet->dts,
|
||||
(anim->cur_packet->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->cur_packet->pts,
|
||||
(anim->cur_packet->flags & AV_PKT_FLAG_KEY) ? " KEY" : "");
|
||||
if (anim->cur_packet->stream_index == anim->videoStream) {
|
||||
anim->pFrameComplete = 0;
|
||||
|
||||
avcodec_decode_video2(
|
||||
anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet);
|
||||
avcodec_send_packet(anim->pCodecCtx, anim->cur_packet);
|
||||
anim->pFrameComplete = avcodec_receive_frame(anim->pCodecCtx, anim->pFrame) == 0;
|
||||
|
||||
if (anim->pFrameComplete) {
|
||||
anim->next_pts = av_get_pts_from_frame(anim->pFormatCtx, anim->pFrame);
|
||||
anim->cur_pts = av_get_pts_from_frame(anim->pFrame);
|
||||
|
||||
if (anim->pFrame->key_frame) {
|
||||
anim->cur_key_frame_pts = anim->cur_pts;
|
||||
}
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
" FRAME DONE: next_pts=%" PRId64 " pkt_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
|
||||
" FRAME DONE: cur_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
|
||||
(anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pts,
|
||||
(anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pkt_pts,
|
||||
(int64_t)anim->next_pts);
|
||||
(int64_t)anim->cur_pts);
|
||||
break;
|
||||
}
|
||||
}
|
||||
av_free_packet(&anim->next_packet);
|
||||
anim->next_packet.stream_index = -1;
|
||||
av_packet_unref(anim->cur_packet);
|
||||
anim->cur_packet->stream_index = -1;
|
||||
}
|
||||
|
||||
if (rval == AVERROR_EOF) {
|
||||
/* this sets size and data fields to zero,
|
||||
* which is necessary to decode the remaining data
|
||||
* in the decoder engine after EOF. It also prevents a memory
|
||||
* leak, since av_read_frame spills out a full size packet even
|
||||
* on EOF... (and: it's safe to call on NULL packets) */
|
||||
|
||||
av_free_packet(&anim->next_packet);
|
||||
|
||||
anim->next_packet.size = 0;
|
||||
anim->next_packet.data = 0;
|
||||
|
||||
/* Flush any remaining frames out of the decoder. */
|
||||
anim->pFrameComplete = 0;
|
||||
|
||||
avcodec_decode_video2(
|
||||
anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet);
|
||||
avcodec_send_packet(anim->pCodecCtx, NULL);
|
||||
anim->pFrameComplete = avcodec_receive_frame(anim->pCodecCtx, anim->pFrame) == 0;
|
||||
|
||||
if (anim->pFrameComplete) {
|
||||
anim->next_pts = av_get_pts_from_frame(anim->pFormatCtx, anim->pFrame);
|
||||
anim->cur_pts = av_get_pts_from_frame(anim->pFrame);
|
||||
|
||||
if (anim->pFrame->key_frame) {
|
||||
anim->cur_key_frame_pts = anim->cur_pts;
|
||||
}
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
" FRAME DONE (after EOF): next_pts=%" PRId64 " pkt_pts=%" PRId64
|
||||
", guessed_pts=%" PRId64 "\n",
|
||||
" FRAME DONE (after EOF): cur_pts=%" PRId64 ", guessed_pts=%" PRId64 "\n",
|
||||
(anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pts,
|
||||
(anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (int64_t)anim->pFrame->pkt_pts,
|
||||
(int64_t)anim->next_pts);
|
||||
(int64_t)anim->cur_pts);
|
||||
rval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (rval < 0) {
|
||||
anim->next_packet.stream_index = -1;
|
||||
av_packet_unref(anim->cur_packet);
|
||||
anim->cur_packet->stream_index = -1;
|
||||
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_ERROR,
|
||||
" DECODE READ FAILED: av_read_frame() "
|
||||
"returned error: %d\n",
|
||||
rval);
|
||||
"returned error: %s\n",
|
||||
av_err2str(rval));
|
||||
}
|
||||
|
||||
return (rval >= 0);
|
||||
@@ -1026,101 +1019,130 @@ static int ffmpeg_seek_by_byte(AVFormatContext *pFormatCtx)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int64_t ffmpeg_get_seek_pos(struct anim *anim, int position)
|
||||
{
|
||||
AVStream *v_st = anim->pFormatCtx->streams[anim->videoStream];
|
||||
double frame_rate = av_q2d(av_guess_frame_rate(anim->pFormatCtx, v_st, NULL));
|
||||
int64_t st_time = anim->pFormatCtx->start_time;
|
||||
int64_t pos = (int64_t)(position)*AV_TIME_BASE;
|
||||
/* Step back half a time base position to make sure that we get the requested
|
||||
* frame and not the one after it.
|
||||
*/
|
||||
pos -= (AV_TIME_BASE / 2);
|
||||
pos /= frame_rate;
|
||||
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
"NO INDEX seek pos = %" PRId64 ", st_time = %" PRId64 "\n",
|
||||
pos,
|
||||
(st_time != AV_NOPTS_VALUE) ? st_time : 0);
|
||||
|
||||
if (pos < 0) {
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
if (st_time != AV_NOPTS_VALUE) {
|
||||
pos += st_time;
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
/* This gives us an estimate of which pts our requested frame will have.
|
||||
* Note that this might be off a bit in certain video files, but it should still be close enough.
|
||||
*/
|
||||
static int64_t ffmpeg_get_pts_to_search(struct anim *anim,
|
||||
struct anim_index *tc_index,
|
||||
int position)
|
||||
{
|
||||
int64_t pts_to_search;
|
||||
int64_t st_time = anim->pFormatCtx->start_time;
|
||||
AVStream *v_st = anim->pFormatCtx->streams[anim->videoStream];
|
||||
double frame_rate = av_q2d(av_guess_frame_rate(anim->pFormatCtx, v_st, NULL));
|
||||
double pts_time_base = av_q2d(v_st->time_base);
|
||||
|
||||
if (tc_index) {
|
||||
int new_frame_index = IMB_indexer_get_frame_index(tc_index, position);
|
||||
pts_to_search = IMB_indexer_get_pts(tc_index, new_frame_index);
|
||||
}
|
||||
else {
|
||||
pts_to_search = (long long)floor(((double)position) / pts_time_base / frame_rate + 0.5);
|
||||
int64_t st_time = anim->pFormatCtx->start_time;
|
||||
AVStream *v_st = anim->pFormatCtx->streams[anim->videoStream];
|
||||
AVRational frame_rate = av_guess_frame_rate(anim->pFormatCtx, v_st, NULL);
|
||||
AVRational time_base = v_st->time_base;
|
||||
|
||||
if (st_time != AV_NOPTS_VALUE) {
|
||||
pts_to_search += st_time / pts_time_base / AV_TIME_BASE;
|
||||
int64_t steps_per_frame = (frame_rate.den * time_base.den) / (frame_rate.num * time_base.num);
|
||||
pts_to_search = position * steps_per_frame;
|
||||
|
||||
if (st_time != AV_NOPTS_VALUE && st_time != 0) {
|
||||
int64_t start_frame = (double)st_time / AV_TIME_BASE * av_q2d(frame_rate);
|
||||
pts_to_search += start_frame * steps_per_frame;
|
||||
}
|
||||
}
|
||||
return pts_to_search;
|
||||
}
|
||||
|
||||
/* Check if the pts will get us the same frame that we already have in memory from last decode. */
|
||||
static bool ffmpeg_pts_matches_last_frame(struct anim *anim, int64_t pts_to_search)
|
||||
{
|
||||
return anim->last_frame && anim->last_pts <= pts_to_search && anim->next_pts > pts_to_search;
|
||||
}
|
||||
|
||||
/* Requested video frame is expected to be found within different GOP as last decoded frame.
|
||||
* Seeking to new position and scanning is fastest way to get requested frame.
|
||||
* Check whether ffmpeg_can_scan() and ffmpeg_pts_matches_last_frame() is false before using this
|
||||
* function. */
|
||||
static bool ffmpeg_can_seek(struct anim *anim, int position)
|
||||
{
|
||||
return position != anim->curposition + 1;
|
||||
}
|
||||
|
||||
/* Requested video frame is expected to be found within same GOP as last decoded frame.
|
||||
* Decoding frames in sequence until frame matches requested one is fastest way to get it. */
|
||||
static bool ffmpeg_can_scan(struct anim *anim, int position, struct anim_index *tc_index)
|
||||
{
|
||||
if (position > anim->curposition + 1 && anim->preseek && !tc_index &&
|
||||
position - (anim->curposition + 1) < anim->preseek) {
|
||||
return true;
|
||||
if (anim->pFrame && anim->cur_frame_final) {
|
||||
int64_t diff = pts_to_search - anim->cur_pts;
|
||||
return diff >= 0 && diff < anim->pFrame->pkt_duration;
|
||||
}
|
||||
|
||||
if (tc_index == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int new_frame_index = IMB_indexer_get_frame_index(tc_index, position);
|
||||
int old_frame_index = IMB_indexer_get_frame_index(tc_index, anim->curposition);
|
||||
return IMB_indexer_can_scan(tc_index, old_frame_index, new_frame_index);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool ffmpeg_is_first_frame_decode(struct anim *anim, int position)
|
||||
{
|
||||
return position == 0 && anim->curposition == -1;
|
||||
return position == 0 && anim->cur_position == -1;
|
||||
}
|
||||
|
||||
/* Decode frames one by one until its PTS matches pts_to_search. */
|
||||
static void ffmpeg_decode_video_frame_scan(struct anim *anim, int64_t pts_to_search)
|
||||
{
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within preseek interval\n");
|
||||
|
||||
/* there seem to exist *very* silly GOP lengths out in the wild... */
|
||||
int count = 1000;
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within current GOP\n");
|
||||
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
"SCAN start: considering pts=%" PRId64 " in search of %" PRId64 "\n",
|
||||
(int64_t)anim->next_pts,
|
||||
(int64_t)anim->cur_pts,
|
||||
(int64_t)pts_to_search);
|
||||
|
||||
while (count > 0 && anim->next_pts < pts_to_search) {
|
||||
int64_t start_gop_frame = anim->cur_key_frame_pts;
|
||||
bool scan_fuzzy = false;
|
||||
|
||||
while (anim->cur_pts < pts_to_search) {
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
" WHILE: pts=%" PRId64 " in search of %" PRId64 "\n",
|
||||
(int64_t)anim->next_pts,
|
||||
(int64_t)anim->cur_pts,
|
||||
(int64_t)pts_to_search);
|
||||
if (!ffmpeg_decode_video_frame(anim)) {
|
||||
break;
|
||||
}
|
||||
count--;
|
||||
|
||||
if (start_gop_frame != anim->cur_key_frame_pts) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (anim->cur_pts < pts_to_search &&
|
||||
anim->cur_pts + anim->pFrame->pkt_duration > pts_to_search) {
|
||||
/* Our estimate of the pts was a bit off, but we have the frame we want. */
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "SCAN fuzzy frame match\n");
|
||||
scan_fuzzy = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count == 0) {
|
||||
|
||||
if (start_gop_frame != anim->cur_key_frame_pts) {
|
||||
/* We went into an other GOP frame. This should never happen as we should have positioned us
|
||||
* correctly by seeking into the GOP frame that contains the frame we want. */
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_ERROR,
|
||||
"SCAN failed: completely lost in stream, "
|
||||
"bailing out at PTS=%" PRId64 ", searching for PTS=%" PRId64 "\n",
|
||||
(int64_t)anim->next_pts,
|
||||
(int64_t)anim->cur_pts,
|
||||
(int64_t)pts_to_search);
|
||||
}
|
||||
if (anim->next_pts == pts_to_search) {
|
||||
|
||||
if (scan_fuzzy || anim->cur_pts == pts_to_search) {
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "SCAN HAPPY: we found our PTS!\n");
|
||||
}
|
||||
else {
|
||||
@@ -1128,22 +1150,25 @@ static void ffmpeg_decode_video_frame_scan(struct anim *anim, int64_t pts_to_sea
|
||||
}
|
||||
}
|
||||
|
||||
/* Wrapper over av_seek_frame(), for formats that doesn't have it's own read_seek() or read_seek2()
|
||||
* functions defined. When seeking in these formats, rule to seek to last necessary I-frame is not
|
||||
* honored. It is not even guaranteed that I-frame, that must be decoded will be read. See
|
||||
* https://trac.ffmpeg.org/ticket/1607 and https://developer.blender.org/T86944. */
|
||||
static int ffmpeg_generic_seek_workaround(struct anim *anim, int64_t requested_pos)
|
||||
/* Wrapper over av_seek_frame(), for formats that doesn't have its own read_seek() or
|
||||
* read_seek2() functions defined. When seeking in these formats, rule to seek to last
|
||||
* necessary I-frame is not honored. It is not even guaranteed that I-frame, that must be
|
||||
* decoded will be read. See https://trac.ffmpeg.org/ticket/1607 and
|
||||
* https://developer.blender.org/T86944. */
|
||||
static int ffmpeg_generic_seek_workaround(struct anim *anim,
|
||||
int64_t *requested_pos,
|
||||
int64_t pts_to_search)
|
||||
{
|
||||
AVStream *v_st = anim->pFormatCtx->streams[anim->videoStream];
|
||||
double frame_rate = av_q2d(av_guess_frame_rate(anim->pFormatCtx, v_st, NULL));
|
||||
int64_t current_pos = requested_pos;
|
||||
int64_t current_pos = *requested_pos;
|
||||
int64_t offset = 0;
|
||||
|
||||
/* This time offset maximum limit is arbitrary. If some files fails to decode it may be
|
||||
* increased. Seek performance will be negatively affected. Small initial offset is necessary
|
||||
* because encoder can re-arrange frames as it needs but within it's delay, which is usually
|
||||
* small. */
|
||||
for (int offset = 5; offset < 25; offset++) {
|
||||
current_pos = requested_pos - ((int64_t)(offset)*AV_TIME_BASE / frame_rate);
|
||||
int64_t cur_pts, prev_pts = -1;
|
||||
|
||||
/* Step backward frame by frame until we find the key frame we are looking for. */
|
||||
while (current_pos != 0) {
|
||||
current_pos = *requested_pos - ((int64_t)(offset)*AV_TIME_BASE / frame_rate);
|
||||
current_pos = max_ii(current_pos, 0);
|
||||
|
||||
/* Seek to timestamp. */
|
||||
@@ -1152,73 +1177,90 @@ static int ffmpeg_generic_seek_workaround(struct anim *anim, int64_t requested_p
|
||||
}
|
||||
|
||||
/* Read first video stream packet. */
|
||||
AVPacket read_packet = {0};
|
||||
while (av_read_frame(anim->pFormatCtx, &read_packet) >= 0) {
|
||||
if (anim->next_packet.stream_index == anim->videoStream) {
|
||||
AVPacket *read_packet = av_packet_alloc();
|
||||
while (av_read_frame(anim->pFormatCtx, read_packet) >= 0) {
|
||||
if (read_packet->stream_index == anim->videoStream) {
|
||||
break;
|
||||
}
|
||||
av_packet_unref(read_packet);
|
||||
}
|
||||
|
||||
/* If this packet contains an I-frame, this could be the frame that we need. */
|
||||
bool is_key_frame = read_packet->flags & AV_PKT_FLAG_KEY;
|
||||
/* We need to check the packet timestamp as the key frame could be for a GOP forward in the the
|
||||
* video stream. So if it has a larger timestamp than the frame we want, ignore it.
|
||||
*/
|
||||
cur_pts = timestamp_from_pts_or_dts(read_packet->pts, read_packet->dts);
|
||||
av_packet_free(&read_packet);
|
||||
|
||||
if (is_key_frame) {
|
||||
if (cur_pts <= pts_to_search) {
|
||||
/* We found the I-frame we were looking for! */
|
||||
break;
|
||||
}
|
||||
if (cur_pts == prev_pts) {
|
||||
/* We got the same key frame packet twice.
|
||||
* This probably means that we have hit the beginning of the stream. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If this packet contains I-frame, exit loop. This should be the frame that we need. */
|
||||
if (read_packet.flags & AV_PKT_FLAG_KEY) {
|
||||
break;
|
||||
}
|
||||
prev_pts = cur_pts;
|
||||
offset++;
|
||||
}
|
||||
|
||||
*requested_pos = current_pos;
|
||||
|
||||
/* Re-seek to timestamp that gave I-frame, so it can be read by decode function. */
|
||||
return av_seek_frame(anim->pFormatCtx, -1, current_pos, AVSEEK_FLAG_BACKWARD);
|
||||
}
|
||||
|
||||
/* Seek to last necessary I-frame and scan-decode until requested frame is found. */
|
||||
static void ffmpeg_seek_and_decode(struct anim *anim, int position, struct anim_index *tc_index)
|
||||
/* Seek to last necessary key frame. */
|
||||
static int ffmpeg_seek_to_key_frame(struct anim *anim,
|
||||
int position,
|
||||
struct anim_index *tc_index,
|
||||
int64_t pts_to_search)
|
||||
{
|
||||
AVStream *v_st = anim->pFormatCtx->streams[anim->videoStream];
|
||||
double frame_rate = av_q2d(av_guess_frame_rate(anim->pFormatCtx, v_st, NULL));
|
||||
int64_t st_time = anim->pFormatCtx->start_time;
|
||||
|
||||
int64_t pts_to_search = ffmpeg_get_pts_to_search(anim, tc_index, position);
|
||||
|
||||
int64_t pos;
|
||||
int ret;
|
||||
|
||||
if (tc_index) {
|
||||
/* We can use timestamps generated from our indexer to seek. */
|
||||
int new_frame_index = IMB_indexer_get_frame_index(tc_index, position);
|
||||
int old_frame_index = IMB_indexer_get_frame_index(tc_index, anim->cur_position);
|
||||
|
||||
if (IMB_indexer_can_scan(tc_index, old_frame_index, new_frame_index)) {
|
||||
/* No need to seek, return early. */
|
||||
return 0;
|
||||
}
|
||||
uint64_t pts;
|
||||
uint64_t dts;
|
||||
|
||||
pos = IMB_indexer_get_seek_pos(tc_index, new_frame_index);
|
||||
pts = IMB_indexer_get_seek_pos_pts(tc_index, new_frame_index);
|
||||
dts = IMB_indexer_get_seek_pos_dts(tc_index, new_frame_index);
|
||||
|
||||
anim->cur_key_frame_pts = timestamp_from_pts_or_dts(pts, dts);
|
||||
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek pos = %" PRId64 "\n", pos);
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek pts = %" PRIu64 "\n", pts);
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek dts = %" PRIu64 "\n", dts);
|
||||
|
||||
if (ffmpeg_seek_by_byte(anim->pFormatCtx)) {
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using BYTE pos\n");
|
||||
|
||||
ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BYTE);
|
||||
av_update_cur_dts(anim->pFormatCtx, v_st, dts);
|
||||
}
|
||||
else {
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using DTS pos\n");
|
||||
ret = av_seek_frame(anim->pFormatCtx, anim->videoStream, dts, AVSEEK_FLAG_BACKWARD);
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using PTS pos\n");
|
||||
ret = av_seek_frame(
|
||||
anim->pFormatCtx, anim->videoStream, anim->cur_key_frame_pts, AVSEEK_FLAG_BACKWARD);
|
||||
}
|
||||
}
|
||||
else {
|
||||
pos = (int64_t)(position)*AV_TIME_BASE / frame_rate;
|
||||
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
"NO INDEX seek pos = %" PRId64 ", st_time = %" PRId64 "\n",
|
||||
pos,
|
||||
(st_time != AV_NOPTS_VALUE) ? st_time : 0);
|
||||
|
||||
if (pos < 0) {
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
if (st_time != AV_NOPTS_VALUE) {
|
||||
pos += st_time;
|
||||
}
|
||||
|
||||
/* We have to manually seek with ffmpeg to get to the key frame we want to start decoding from.
|
||||
*/
|
||||
pos = ffmpeg_get_seek_pos(anim, position);
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "NO INDEX final seek pos = %" PRId64 "\n", pos);
|
||||
|
||||
AVFormatContext *format_ctx = anim->pFormatCtx;
|
||||
@@ -1227,7 +1269,49 @@ static void ffmpeg_seek_and_decode(struct anim *anim, int position, struct anim_
|
||||
ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BACKWARD);
|
||||
}
|
||||
else {
|
||||
ret = ffmpeg_generic_seek_workaround(anim, pos);
|
||||
ret = ffmpeg_generic_seek_workaround(anim, &pos, pts_to_search);
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "Adjusted final seek pos = %" PRId64 "\n", pos);
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
/* Double check if we need to seek and decode all packets. */
|
||||
AVPacket *current_gop_start_packet = av_packet_alloc();
|
||||
while (av_read_frame(anim->pFormatCtx, current_gop_start_packet) >= 0) {
|
||||
if (current_gop_start_packet->stream_index == anim->videoStream) {
|
||||
break;
|
||||
}
|
||||
av_packet_unref(current_gop_start_packet);
|
||||
}
|
||||
int64_t gop_pts = timestamp_from_pts_or_dts(current_gop_start_packet->pts,
|
||||
current_gop_start_packet->dts);
|
||||
|
||||
av_packet_free(¤t_gop_start_packet);
|
||||
bool same_gop = gop_pts == anim->cur_key_frame_pts;
|
||||
|
||||
if (same_gop && position > anim->cur_position) {
|
||||
/* Change back to our old frame position so we can simply continue decoding from there. */
|
||||
int64_t cur_pts = timestamp_from_pts_or_dts(anim->cur_packet->pts, anim->cur_packet->dts);
|
||||
|
||||
if (cur_pts == gop_pts) {
|
||||
/* We are already at the correct position. */
|
||||
return 0;
|
||||
}
|
||||
AVPacket *temp = av_packet_alloc();
|
||||
|
||||
while (av_read_frame(anim->pFormatCtx, temp) >= 0) {
|
||||
int64_t temp_pts = timestamp_from_pts_or_dts(temp->pts, temp->dts);
|
||||
if (temp->stream_index == anim->videoStream && temp_pts == cur_pts) {
|
||||
break;
|
||||
}
|
||||
av_packet_unref(temp);
|
||||
}
|
||||
av_packet_free(&temp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
anim->cur_key_frame_pts = gop_pts;
|
||||
/* Seek back so we are at the correct position after we decoded a frame. */
|
||||
av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BACKWARD);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1239,25 +1323,21 @@ static void ffmpeg_seek_and_decode(struct anim *anim, int position, struct anim_
|
||||
"): errcode = %d\n",
|
||||
pos,
|
||||
position,
|
||||
(int64_t)pts_to_search,
|
||||
pts_to_search,
|
||||
ret);
|
||||
}
|
||||
/* Flush the internal buffers of ffmpeg. This needs to be done after seeking to avoid decoding
|
||||
* errors. */
|
||||
avcodec_flush_buffers(anim->pCodecCtx);
|
||||
|
||||
anim->next_pts = -1;
|
||||
anim->cur_pts = -1;
|
||||
|
||||
if (anim->next_packet.stream_index == anim->videoStream) {
|
||||
av_free_packet(&anim->next_packet);
|
||||
anim->next_packet.stream_index = -1;
|
||||
if (anim->cur_packet->stream_index == anim->videoStream) {
|
||||
av_packet_unref(anim->cur_packet);
|
||||
anim->cur_packet->stream_index = -1;
|
||||
}
|
||||
|
||||
/* memset(anim->pFrame, ...) ?? */
|
||||
if (ret < 0) {
|
||||
/* Seek failed. */
|
||||
return;
|
||||
}
|
||||
|
||||
ffmpeg_decode_video_frame_scan(anim, pts_to_search);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position, IMB_Timecode_Type tc)
|
||||
@@ -1287,25 +1367,22 @@ static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position, IMB_Timecode_Typ
|
||||
if (ffmpeg_pts_matches_last_frame(anim, pts_to_search)) {
|
||||
av_log(anim->pFormatCtx,
|
||||
AV_LOG_DEBUG,
|
||||
"FETCH: frame repeat: last: %" PRId64 " next: %" PRId64 "\n",
|
||||
(int64_t)anim->last_pts,
|
||||
(int64_t)anim->next_pts);
|
||||
IMB_refImBuf(anim->last_frame);
|
||||
anim->curposition = position;
|
||||
return anim->last_frame;
|
||||
"FETCH: frame repeat: pts: %" PRId64 "\n",
|
||||
(int64_t)anim->cur_pts);
|
||||
IMB_refImBuf(anim->cur_frame_final);
|
||||
anim->cur_position = position;
|
||||
return anim->cur_frame_final;
|
||||
}
|
||||
|
||||
if (ffmpeg_can_scan(anim, position, tc_index) || ffmpeg_is_first_frame_decode(anim, position)) {
|
||||
if (position == anim->cur_position + 1 || ffmpeg_is_first_frame_decode(anim, position)) {
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: no seek necessary, just continue...\n");
|
||||
ffmpeg_decode_video_frame(anim);
|
||||
}
|
||||
else if (ffmpeg_seek_to_key_frame(anim, position, tc_index, pts_to_search) >= 0) {
|
||||
ffmpeg_decode_video_frame_scan(anim, pts_to_search);
|
||||
}
|
||||
else if (ffmpeg_can_seek(anim, position)) {
|
||||
ffmpeg_seek_and_decode(anim, position, tc_index);
|
||||
}
|
||||
else {
|
||||
av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: no seek necessary, just continue...\n");
|
||||
}
|
||||
|
||||
IMB_freeImBuf(anim->last_frame);
|
||||
IMB_freeImBuf(anim->cur_frame_final);
|
||||
|
||||
/* Certain versions of FFmpeg have a bug in libswscale which ends up in crash
|
||||
* when destination buffer is not properly aligned. For example, this happens
|
||||
@@ -1325,23 +1402,20 @@ static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position, IMB_Timecode_Typ
|
||||
*
|
||||
* The issue was reported to FFmpeg under ticket #8747 in the FFmpeg tracker
|
||||
* and is fixed in the newer versions than 4.3.1. */
|
||||
anim->last_frame = IMB_allocImBuf(anim->x, anim->y, 32, 0);
|
||||
anim->last_frame->rect = MEM_mallocN_aligned((size_t)4 * anim->x * anim->y, 32, "ffmpeg ibuf");
|
||||
anim->last_frame->mall |= IB_rect;
|
||||
anim->cur_frame_final = IMB_allocImBuf(anim->x, anim->y, 32, 0);
|
||||
anim->cur_frame_final->rect = MEM_mallocN_aligned(
|
||||
(size_t)4 * anim->x * anim->y, 32, "ffmpeg ibuf");
|
||||
anim->cur_frame_final->mall |= IB_rect;
|
||||
|
||||
anim->last_frame->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace);
|
||||
anim->cur_frame_final->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace);
|
||||
|
||||
ffmpeg_postprocess(anim);
|
||||
|
||||
anim->last_pts = anim->next_pts;
|
||||
anim->cur_position = position;
|
||||
|
||||
ffmpeg_decode_video_frame(anim);
|
||||
IMB_refImBuf(anim->cur_frame_final);
|
||||
|
||||
anim->curposition = position;
|
||||
|
||||
IMB_refImBuf(anim->last_frame);
|
||||
|
||||
return anim->last_frame;
|
||||
return anim->cur_frame_final;
|
||||
}
|
||||
|
||||
static void free_anim_ffmpeg(struct anim *anim)
|
||||
@@ -1351,32 +1425,30 @@ static void free_anim_ffmpeg(struct anim *anim)
|
||||
}
|
||||
|
||||
if (anim->pCodecCtx) {
|
||||
avcodec_close(anim->pCodecCtx);
|
||||
avcodec_free_context(&anim->pCodecCtx);
|
||||
avformat_close_input(&anim->pFormatCtx);
|
||||
av_packet_free(&anim->cur_packet);
|
||||
|
||||
/* Special case here: pFrame could share pointers with codec,
|
||||
* so in order to avoid double-free we don't use av_frame_free()
|
||||
* to free the frame.
|
||||
*
|
||||
* Could it be a bug in FFmpeg?
|
||||
*/
|
||||
av_free(anim->pFrame);
|
||||
av_frame_free(&anim->pFrame);
|
||||
|
||||
if (!need_aligned_ffmpeg_buffer(anim)) {
|
||||
/* If there's no need for own aligned buffer it means that FFmpeg's
|
||||
* frame shares the same buffer as temporary ImBuf. In this case we
|
||||
* should not free the buffer when freeing the FFmpeg buffer.
|
||||
*/
|
||||
avpicture_fill((AVPicture *)anim->pFrameRGB, NULL, AV_PIX_FMT_RGBA, anim->x, anim->y);
|
||||
av_image_fill_arrays(anim->pFrameRGB->data,
|
||||
anim->pFrameRGB->linesize,
|
||||
NULL,
|
||||
AV_PIX_FMT_RGBA,
|
||||
anim->x,
|
||||
anim->y,
|
||||
1);
|
||||
}
|
||||
av_frame_free(&anim->pFrameRGB);
|
||||
av_frame_free(&anim->pFrameDeinterlaced);
|
||||
|
||||
sws_freeContext(anim->img_convert_ctx);
|
||||
IMB_freeImBuf(anim->last_frame);
|
||||
if (anim->next_packet.stream_index != -1) {
|
||||
av_free_packet(&anim->next_packet);
|
||||
}
|
||||
IMB_freeImBuf(anim->cur_frame_final);
|
||||
}
|
||||
anim->duration_in_frames = 0;
|
||||
}
|
||||
@@ -1510,13 +1582,13 @@ struct ImBuf *IMB_anim_absolute(struct anim *anim,
|
||||
an_stringenc(anim->name, head, tail, digits, pic);
|
||||
ibuf = IMB_loadiffname(anim->name, IB_rect, anim->colorspace);
|
||||
if (ibuf) {
|
||||
anim->curposition = position;
|
||||
anim->cur_position = position;
|
||||
}
|
||||
break;
|
||||
case ANIM_MOVIE:
|
||||
ibuf = movie_fetchibuf(anim, position);
|
||||
if (ibuf) {
|
||||
anim->curposition = position;
|
||||
anim->cur_position = position;
|
||||
IMB_convert_rgba_to_abgr(ibuf);
|
||||
}
|
||||
break;
|
||||
@@ -1524,7 +1596,7 @@ struct ImBuf *IMB_anim_absolute(struct anim *anim,
|
||||
case ANIM_AVI:
|
||||
ibuf = avi_fetchibuf(anim, position);
|
||||
if (ibuf) {
|
||||
anim->curposition = position;
|
||||
anim->cur_position = position;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
@@ -1532,7 +1604,7 @@ struct ImBuf *IMB_anim_absolute(struct anim *anim,
|
||||
case ANIM_FFMPEG:
|
||||
ibuf = ffmpeg_fetchibuf(anim, position, tc);
|
||||
if (ibuf) {
|
||||
anim->curposition = position;
|
||||
anim->cur_position = position;
|
||||
}
|
||||
filter_y = 0; /* done internally */
|
||||
break;
|
||||
@@ -1543,7 +1615,7 @@ struct ImBuf *IMB_anim_absolute(struct anim *anim,
|
||||
if (filter_y) {
|
||||
IMB_filtery(ibuf);
|
||||
}
|
||||
BLI_snprintf(ibuf->name, sizeof(ibuf->name), "%s.%04d", anim->name, anim->curposition + 1);
|
||||
BLI_snprintf(ibuf->name, sizeof(ibuf->name), "%s.%04d", anim->name, anim->cur_position + 1);
|
||||
}
|
||||
return ibuf;
|
||||
}
|
||||
@@ -1598,16 +1670,6 @@ bool IMB_anim_get_fps(struct anim *anim, short *frs_sec, float *frs_sec_base, bo
|
||||
return false;
|
||||
}
|
||||
|
||||
void IMB_anim_set_preseek(struct anim *anim, int preseek)
|
||||
{
|
||||
anim->preseek = preseek;
|
||||
}
|
||||
|
||||
int IMB_anim_get_preseek(struct anim *anim)
|
||||
{
|
||||
return anim->preseek;
|
||||
}
|
||||
|
||||
int IMB_anim_get_image_width(struct anim *anim)
|
||||
{
|
||||
return anim->x;
|
||||
|
@@ -48,9 +48,10 @@
|
||||
|
||||
#ifdef WITH_FFMPEG
|
||||
# include "ffmpeg_compat.h"
|
||||
# include <libavutil/imgutils.h>
|
||||
#endif
|
||||
|
||||
static const char magic[] = "BlenMIdx";
|
||||
static const char binary_header_str[] = "BlenMIdx";
|
||||
static const char temp_ext[] = "_part";
|
||||
|
||||
static const int proxy_sizes[] = {IMB_PROXY_25, IMB_PROXY_50, IMB_PROXY_75, IMB_PROXY_100};
|
||||
@@ -65,7 +66,7 @@ static int tc_types[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#define INDEX_FILE_VERSION 1
|
||||
#define INDEX_FILE_VERSION 2
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* - time code index functions
|
||||
@@ -96,16 +97,25 @@ anim_index_builder *IMB_index_builder_create(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fprintf(rv->fp, "%s%c%.3d", magic, (ENDIAN_ORDER == B_ENDIAN) ? 'V' : 'v', INDEX_FILE_VERSION);
|
||||
fprintf(rv->fp,
|
||||
"%s%c%.3d",
|
||||
binary_header_str,
|
||||
(ENDIAN_ORDER == B_ENDIAN) ? 'V' : 'v',
|
||||
INDEX_FILE_VERSION);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
void IMB_index_builder_add_entry(
|
||||
anim_index_builder *fp, int frameno, uint64_t seek_pos, uint64_t seek_pos_dts, uint64_t pts)
|
||||
void IMB_index_builder_add_entry(anim_index_builder *fp,
|
||||
int frameno,
|
||||
uint64_t seek_pos,
|
||||
uint64_t seek_pos_pts,
|
||||
uint64_t seek_pos_dts,
|
||||
uint64_t pts)
|
||||
{
|
||||
fwrite(&frameno, sizeof(int), 1, fp->fp);
|
||||
fwrite(&seek_pos, sizeof(uint64_t), 1, fp->fp);
|
||||
fwrite(&seek_pos_pts, sizeof(uint64_t), 1, fp->fp);
|
||||
fwrite(&seek_pos_dts, sizeof(uint64_t), 1, fp->fp);
|
||||
fwrite(&pts, sizeof(uint64_t), 1, fp->fp);
|
||||
}
|
||||
@@ -115,6 +125,7 @@ void IMB_index_builder_proc_frame(anim_index_builder *fp,
|
||||
int data_size,
|
||||
int frameno,
|
||||
uint64_t seek_pos,
|
||||
uint64_t seek_pos_pts,
|
||||
uint64_t seek_pos_dts,
|
||||
uint64_t pts)
|
||||
{
|
||||
@@ -122,13 +133,14 @@ void IMB_index_builder_proc_frame(anim_index_builder *fp,
|
||||
anim_index_entry e;
|
||||
e.frameno = frameno;
|
||||
e.seek_pos = seek_pos;
|
||||
e.seek_pos_pts = seek_pos_pts;
|
||||
e.seek_pos_dts = seek_pos_dts;
|
||||
e.pts = pts;
|
||||
|
||||
fp->proc_frame(fp, buffer, data_size, &e);
|
||||
}
|
||||
else {
|
||||
IMB_index_builder_add_entry(fp, frameno, seek_pos, seek_pos_dts, pts);
|
||||
IMB_index_builder_add_entry(fp, frameno, seek_pos, seek_pos_pts, seek_pos_dts, pts);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,22 +171,26 @@ struct anim_index *IMB_indexer_open(const char *name)
|
||||
int i;
|
||||
|
||||
if (!fp) {
|
||||
fprintf(stderr, "Couldn't open indexer file: %s\n", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (fread(header, 12, 1, fp) != 1) {
|
||||
fprintf(stderr, "Couldn't read indexer file: %s\n", name);
|
||||
fclose(fp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
header[12] = 0;
|
||||
|
||||
if (memcmp(header, magic, 8) != 0) {
|
||||
if (memcmp(header, binary_header_str, 8) != 0) {
|
||||
fprintf(stderr, "Error reading %s: Binary file type string missmatch\n", name);
|
||||
fclose(fp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (atoi(header + 9) != INDEX_FILE_VERSION) {
|
||||
fprintf(stderr, "Error reading %s: File version missmatch\n", name);
|
||||
fclose(fp);
|
||||
return NULL;
|
||||
}
|
||||
@@ -187,6 +203,7 @@ struct anim_index *IMB_indexer_open(const char *name)
|
||||
|
||||
idx->num_entries = (ftell(fp) - 12) / (sizeof(int) + /* framepos */
|
||||
sizeof(uint64_t) + /* seek_pos */
|
||||
sizeof(uint64_t) + /* seek_pos_pts */
|
||||
sizeof(uint64_t) + /* seek_pos_dts */
|
||||
sizeof(uint64_t) /* pts */
|
||||
);
|
||||
@@ -200,12 +217,13 @@ struct anim_index *IMB_indexer_open(const char *name)
|
||||
for (i = 0; i < idx->num_entries; i++) {
|
||||
items_read += fread(&idx->entries[i].frameno, sizeof(int), 1, fp);
|
||||
items_read += fread(&idx->entries[i].seek_pos, sizeof(uint64_t), 1, fp);
|
||||
items_read += fread(&idx->entries[i].seek_pos_pts, sizeof(uint64_t), 1, fp);
|
||||
items_read += fread(&idx->entries[i].seek_pos_dts, sizeof(uint64_t), 1, fp);
|
||||
items_read += fread(&idx->entries[i].pts, sizeof(uint64_t), 1, fp);
|
||||
}
|
||||
|
||||
if (UNLIKELY(items_read != idx->num_entries * 4)) {
|
||||
perror("error reading animation index file");
|
||||
if (UNLIKELY(items_read != idx->num_entries * 5)) {
|
||||
fprintf(stderr, "Error: Element data size missmatch in: %s\n", name);
|
||||
MEM_freeN(idx->entries);
|
||||
MEM_freeN(idx);
|
||||
fclose(fp);
|
||||
@@ -216,6 +234,7 @@ struct anim_index *IMB_indexer_open(const char *name)
|
||||
for (i = 0; i < idx->num_entries; i++) {
|
||||
BLI_endian_switch_int32(&idx->entries[i].frameno);
|
||||
BLI_endian_switch_uint64(&idx->entries[i].seek_pos);
|
||||
BLI_endian_switch_uint64(&idx->entries[i].seek_pos_pts);
|
||||
BLI_endian_switch_uint64(&idx->entries[i].seek_pos_dts);
|
||||
BLI_endian_switch_uint64(&idx->entries[i].pts);
|
||||
}
|
||||
@@ -237,6 +256,17 @@ uint64_t IMB_indexer_get_seek_pos(struct anim_index *idx, int frame_index)
|
||||
return idx->entries[frame_index].seek_pos;
|
||||
}
|
||||
|
||||
uint64_t IMB_indexer_get_seek_pos_pts(struct anim_index *idx, int frame_index)
|
||||
{
|
||||
if (frame_index < 0) {
|
||||
frame_index = 0;
|
||||
}
|
||||
if (frame_index >= idx->num_entries) {
|
||||
frame_index = idx->num_entries - 1;
|
||||
}
|
||||
return idx->entries[frame_index].seek_pos_pts;
|
||||
}
|
||||
|
||||
uint64_t IMB_indexer_get_seek_pos_dts(struct anim_index *idx, int frame_index)
|
||||
{
|
||||
if (frame_index < 0) {
|
||||
@@ -318,8 +348,7 @@ int IMB_proxy_size_to_array_index(IMB_Proxy_Size pr_size)
|
||||
{
|
||||
switch (pr_size) {
|
||||
case IMB_PROXY_NONE:
|
||||
/* if we got here, something is broken anyways, so sane defaults... */
|
||||
return 0;
|
||||
return -1;
|
||||
case IMB_PROXY_25:
|
||||
return 0;
|
||||
case IMB_PROXY_50:
|
||||
@@ -329,16 +358,16 @@ int IMB_proxy_size_to_array_index(IMB_Proxy_Size pr_size)
|
||||
case IMB_PROXY_100:
|
||||
return 3;
|
||||
default:
|
||||
return 0;
|
||||
BLI_assert(!"Unhandled proxy size enum!");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int IMB_timecode_to_array_index(IMB_Timecode_Type tc)
|
||||
{
|
||||
switch (tc) {
|
||||
case IMB_TC_NONE: /* if we got here, something is broken anyways,
|
||||
* so sane defaults... */
|
||||
return 0;
|
||||
case IMB_TC_NONE:
|
||||
return -1;
|
||||
case IMB_TC_RECORD_RUN:
|
||||
return 0;
|
||||
case IMB_TC_FREE_RUN:
|
||||
@@ -348,7 +377,8 @@ int IMB_timecode_to_array_index(IMB_Timecode_Type tc)
|
||||
case IMB_TC_RECORD_RUN_NO_GAPS:
|
||||
return 3;
|
||||
default:
|
||||
return 0;
|
||||
BLI_assert(!"Unhandled timecode type enum!");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,6 +414,8 @@ static bool get_proxy_filename(struct anim *anim,
|
||||
char index_dir[FILE_MAXDIR];
|
||||
int i = IMB_proxy_size_to_array_index(preview_size);
|
||||
|
||||
BLI_assert(i >= 0);
|
||||
|
||||
char proxy_name[256];
|
||||
char stream_suffix[20];
|
||||
const char *name = (temp) ? "proxy_%d%s_part.avi" : "proxy_%d%s.avi";
|
||||
@@ -415,6 +447,9 @@ static void get_tc_filename(struct anim *anim, IMB_Timecode_Type tc, char *fname
|
||||
{
|
||||
char index_dir[FILE_MAXDIR];
|
||||
int i = IMB_timecode_to_array_index(tc);
|
||||
|
||||
BLI_assert(i >= 0);
|
||||
|
||||
const char *index_names[] = {
|
||||
"record_run%s%s.blen_tc",
|
||||
"free_run%s%s.blen_tc",
|
||||
@@ -465,13 +500,6 @@ struct proxy_output_ctx {
|
||||
struct anim *anim;
|
||||
};
|
||||
|
||||
// work around stupid swscaler 16 bytes alignment bug...
|
||||
|
||||
static int round_up(int x, int mod)
|
||||
{
|
||||
return x + ((mod - (x % mod)) % mod);
|
||||
}
|
||||
|
||||
static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
struct anim *anim, AVStream *st, int proxy_size, int width, int height, int quality)
|
||||
{
|
||||
@@ -488,23 +516,16 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
rv->of = avformat_alloc_context();
|
||||
rv->of->oformat = av_guess_format("avi", NULL, NULL);
|
||||
|
||||
BLI_strncpy(rv->of->filename, fname, sizeof(rv->of->filename));
|
||||
rv->of->url = av_strdup(fname);
|
||||
|
||||
fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename);
|
||||
fprintf(stderr, "Starting work on proxy: %s\n", rv->of->url);
|
||||
|
||||
rv->st = avformat_new_stream(rv->of, NULL);
|
||||
rv->st->id = 0;
|
||||
|
||||
rv->c = rv->st->codec;
|
||||
rv->c = avcodec_alloc_context3(NULL);
|
||||
rv->c->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
rv->c->codec_id = AV_CODEC_ID_H264;
|
||||
rv->c->width = width;
|
||||
rv->c->height = height;
|
||||
rv->c->gop_size = 10;
|
||||
rv->c->max_b_frames = 0;
|
||||
/* Correct wrong default ffmpeg param which crash x264. */
|
||||
rv->c->qmin = 10;
|
||||
rv->c->qmax = 51;
|
||||
|
||||
rv->of->oformat->video_codec = rv->c->codec_id;
|
||||
rv->codec = avcodec_find_encoder(rv->c->codec_id);
|
||||
@@ -513,10 +534,19 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
fprintf(stderr,
|
||||
"No ffmpeg encoder available? "
|
||||
"Proxy not built!\n");
|
||||
av_free(rv->of);
|
||||
avcodec_free_context(&rv->c);
|
||||
avformat_free_context(rv->of);
|
||||
MEM_freeN(rv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
avcodec_get_context_defaults3(rv->c, rv->codec);
|
||||
|
||||
rv->c->width = width;
|
||||
rv->c->height = height;
|
||||
rv->c->gop_size = 10;
|
||||
rv->c->max_b_frames = 0;
|
||||
|
||||
if (rv->codec->pix_fmts) {
|
||||
rv->c->pix_fmt = rv->codec->pix_fmts[0];
|
||||
}
|
||||
@@ -524,7 +554,7 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
rv->c->pix_fmt = AV_PIX_FMT_YUVJ420P;
|
||||
}
|
||||
|
||||
rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->codec->sample_aspect_ratio;
|
||||
rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->sample_aspect_ratio;
|
||||
|
||||
rv->c->time_base.den = 25;
|
||||
rv->c->time_base.num = 1;
|
||||
@@ -539,8 +569,11 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
AVDictionary *codec_opts = NULL;
|
||||
/* High quality preset value. */
|
||||
av_dict_set_int(&codec_opts, "crf", crf, 0);
|
||||
/* Prefer smaller file-size. */
|
||||
av_dict_set(&codec_opts, "preset", "slow", 0);
|
||||
/* Prefer smaller file-size. Presets from veryslow to veryfast produce output with very similar
|
||||
* file-size, but there is big difference in performance. In some cases veryfast preset will
|
||||
* produce smallest file-size. */
|
||||
av_dict_set(&codec_opts, "preset", "veryfast", 0);
|
||||
av_dict_set(&codec_opts, "tune", "fastdecode", 0);
|
||||
|
||||
if (rv->codec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
|
||||
rv->c->thread_count = 0;
|
||||
@@ -557,34 +590,58 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
}
|
||||
|
||||
if (rv->of->flags & AVFMT_GLOBALHEADER) {
|
||||
rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
rv->c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) {
|
||||
avcodec_parameters_from_context(rv->st->codecpar, rv->c);
|
||||
|
||||
int ret = avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr,
|
||||
"Couldn't open outputfile! "
|
||||
"Proxy not built!\n");
|
||||
av_free(rv->of);
|
||||
return 0;
|
||||
"Couldn't open IO: %s\n"
|
||||
"Proxy not built!\n",
|
||||
av_err2str(ret));
|
||||
avcodec_free_context(&rv->c);
|
||||
avformat_free_context(rv->of);
|
||||
MEM_freeN(rv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
avcodec_open2(rv->c, rv->codec, &codec_opts);
|
||||
ret = avcodec_open2(rv->c, rv->codec, &codec_opts);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr,
|
||||
"Couldn't open codec: %s\n"
|
||||
"Proxy not built!\n",
|
||||
av_err2str(ret));
|
||||
avcodec_free_context(&rv->c);
|
||||
avformat_free_context(rv->of);
|
||||
MEM_freeN(rv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rv->orig_height = av_get_cropped_height_from_codec(st->codec);
|
||||
rv->orig_height = st->codecpar->height;
|
||||
|
||||
if (st->codec->width != width || st->codec->height != height ||
|
||||
st->codec->pix_fmt != rv->c->pix_fmt) {
|
||||
if (st->codecpar->width != width || st->codecpar->height != height ||
|
||||
st->codecpar->format != rv->c->pix_fmt) {
|
||||
rv->frame = av_frame_alloc();
|
||||
avpicture_fill((AVPicture *)rv->frame,
|
||||
MEM_mallocN(avpicture_get_size(rv->c->pix_fmt, round_up(width, 16), height),
|
||||
"alloc proxy output frame"),
|
||||
rv->c->pix_fmt,
|
||||
round_up(width, 16),
|
||||
height);
|
||||
|
||||
rv->sws_ctx = sws_getContext(st->codec->width,
|
||||
av_image_fill_arrays(rv->frame->data,
|
||||
rv->frame->linesize,
|
||||
MEM_mallocN(av_image_get_buffer_size(rv->c->pix_fmt, width, height, 1),
|
||||
"alloc proxy output frame"),
|
||||
rv->c->pix_fmt,
|
||||
width,
|
||||
height,
|
||||
1);
|
||||
|
||||
rv->frame->format = rv->c->pix_fmt;
|
||||
rv->frame->width = width;
|
||||
rv->frame->height = height;
|
||||
|
||||
rv->sws_ctx = sws_getContext(st->codecpar->width,
|
||||
rv->orig_height,
|
||||
st->codec->pix_fmt,
|
||||
st->codecpar->format,
|
||||
width,
|
||||
height,
|
||||
rv->c->pix_fmt,
|
||||
@@ -594,26 +651,30 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (avformat_write_header(rv->of, NULL) < 0) {
|
||||
ret = avformat_write_header(rv->of, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr,
|
||||
"Couldn't set output parameters? "
|
||||
"Proxy not built!\n");
|
||||
av_free(rv->of);
|
||||
return 0;
|
||||
"Couldn't write header: %s\n"
|
||||
"Proxy not built!\n",
|
||||
av_err2str(ret));
|
||||
|
||||
if (rv->frame) {
|
||||
av_frame_free(&rv->frame);
|
||||
}
|
||||
|
||||
avcodec_free_context(&rv->c);
|
||||
avformat_free_context(rv->of);
|
||||
MEM_freeN(rv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static int add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *frame)
|
||||
static void add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *frame)
|
||||
{
|
||||
AVPacket packet = {0};
|
||||
int ret, got_output;
|
||||
|
||||
av_init_packet(&packet);
|
||||
|
||||
if (!ctx) {
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->sws_ctx && frame &&
|
||||
@@ -633,35 +694,49 @@ static int add_to_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, AVFrame *fra
|
||||
frame->pts = ctx->cfra++;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_video2(ctx->c, &packet, frame, &got_output);
|
||||
int ret = avcodec_send_frame(ctx->c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding proxy frame %d for '%s'\n", ctx->cfra - 1, ctx->of->filename);
|
||||
return 0;
|
||||
/* Can't send frame to encoder. This shouldn't happen. */
|
||||
fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
|
||||
return;
|
||||
}
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
|
||||
if (got_output) {
|
||||
if (packet.pts != AV_NOPTS_VALUE) {
|
||||
packet.pts = av_rescale_q(packet.pts, ctx->c->time_base, ctx->st->time_base);
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(ctx->c, packet);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
/* No more packets to flush. */
|
||||
break;
|
||||
}
|
||||
if (packet.dts != AV_NOPTS_VALUE) {
|
||||
packet.dts = av_rescale_q(packet.dts, ctx->c->time_base, ctx->st->time_base);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr,
|
||||
"Error encoding proxy frame %d for '%s': %s\n",
|
||||
ctx->cfra - 1,
|
||||
ctx->of->url,
|
||||
av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
|
||||
packet.stream_index = ctx->st->index;
|
||||
packet->stream_index = ctx->st->index;
|
||||
av_packet_rescale_ts(packet, ctx->c->time_base, ctx->st->time_base);
|
||||
# ifdef FFMPEG_USE_DURATION_WORKAROUND
|
||||
my_guess_pkt_duration(ctx->of, ctx->st, packet);
|
||||
# endif
|
||||
|
||||
if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
|
||||
int write_ret = av_interleaved_write_frame(ctx->of, packet);
|
||||
if (write_ret != 0) {
|
||||
fprintf(stderr,
|
||||
"Error writing proxy frame %d "
|
||||
"into '%s'\n",
|
||||
"into '%s': %s\n",
|
||||
ctx->cfra - 1,
|
||||
ctx->of->filename);
|
||||
return 0;
|
||||
ctx->of->url,
|
||||
av_err2str(write_ret));
|
||||
break;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
av_packet_free(&packet);
|
||||
}
|
||||
|
||||
static void free_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, int rollback)
|
||||
@@ -674,15 +749,15 @@ static void free_proxy_output_ffmpeg(struct proxy_output_ctx *ctx, int rollback)
|
||||
}
|
||||
|
||||
if (!rollback) {
|
||||
while (add_to_proxy_output_ffmpeg(ctx, NULL)) {
|
||||
}
|
||||
/* Flush the remaining packets. */
|
||||
add_to_proxy_output_ffmpeg(ctx, NULL);
|
||||
}
|
||||
|
||||
avcodec_flush_buffers(ctx->c);
|
||||
|
||||
av_write_trailer(ctx->of);
|
||||
|
||||
avcodec_close(ctx->c);
|
||||
avcodec_free_context(&ctx->c);
|
||||
|
||||
if (ctx->of->oformat) {
|
||||
if (!(ctx->of->oformat->flags & AVFMT_NOFILE)) {
|
||||
@@ -731,9 +806,10 @@ typedef struct FFmpegIndexBuilderContext {
|
||||
IMB_Proxy_Size proxy_sizes_in_use;
|
||||
|
||||
uint64_t seek_pos;
|
||||
uint64_t last_seek_pos;
|
||||
uint64_t seek_pos_dts;
|
||||
uint64_t seek_pos_pts;
|
||||
uint64_t seek_pos_dts;
|
||||
uint64_t last_seek_pos;
|
||||
uint64_t last_seek_pos_pts;
|
||||
uint64_t last_seek_pos_dts;
|
||||
uint64_t start_pts;
|
||||
double frame_rate;
|
||||
@@ -777,7 +853,7 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
|
||||
/* Find the video stream */
|
||||
context->videoStream = -1;
|
||||
for (i = 0; i < context->iFormatCtx->nb_streams; i++) {
|
||||
if (context->iFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (context->iFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (streamcount > 0) {
|
||||
streamcount--;
|
||||
continue;
|
||||
@@ -794,9 +870,8 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
|
||||
}
|
||||
|
||||
context->iStream = context->iFormatCtx->streams[context->videoStream];
|
||||
context->iCodecCtx = context->iStream->codec;
|
||||
|
||||
context->iCodec = avcodec_find_decoder(context->iCodecCtx->codec_id);
|
||||
context->iCodec = avcodec_find_decoder(context->iStream->codecpar->codec_id);
|
||||
|
||||
if (context->iCodec == NULL) {
|
||||
avformat_close_input(&context->iFormatCtx);
|
||||
@@ -804,7 +879,9 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
context->iCodecCtx->workaround_bugs = 1;
|
||||
context->iCodecCtx = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(context->iCodecCtx, context->iStream->codecpar);
|
||||
context->iCodecCtx->workaround_bugs = FF_BUG_AUTODETECT;
|
||||
|
||||
if (context->iCodec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
|
||||
context->iCodecCtx->thread_count = 0;
|
||||
@@ -822,19 +899,19 @@ static IndexBuildContext *index_ffmpeg_create_context(struct anim *anim,
|
||||
|
||||
if (avcodec_open2(context->iCodecCtx, context->iCodec, NULL) < 0) {
|
||||
avformat_close_input(&context->iFormatCtx);
|
||||
avcodec_free_context(&context->iCodecCtx);
|
||||
MEM_freeN(context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_proxy_sizes; i++) {
|
||||
if (proxy_sizes_in_use & proxy_sizes[i]) {
|
||||
context->proxy_ctx[i] = alloc_proxy_output_ffmpeg(
|
||||
anim,
|
||||
context->iStream,
|
||||
proxy_sizes[i],
|
||||
context->iCodecCtx->width * proxy_fac[i],
|
||||
av_get_cropped_height_from_codec(context->iCodecCtx) * proxy_fac[i],
|
||||
quality);
|
||||
context->proxy_ctx[i] = alloc_proxy_output_ffmpeg(anim,
|
||||
context->iStream,
|
||||
proxy_sizes[i],
|
||||
context->iCodecCtx->width * proxy_fac[i],
|
||||
context->iCodecCtx->height * proxy_fac[i],
|
||||
quality);
|
||||
if (!context->proxy_ctx[i]) {
|
||||
proxy_sizes_in_use &= ~proxy_sizes[i];
|
||||
}
|
||||
@@ -873,7 +950,7 @@ static void index_rebuild_ffmpeg_finish(FFmpegIndexBuilderContext *context, int
|
||||
}
|
||||
}
|
||||
|
||||
avcodec_close(context->iCodecCtx);
|
||||
avcodec_free_context(&context->iCodecCtx);
|
||||
avformat_close_input(&context->iFormatCtx);
|
||||
|
||||
MEM_freeN(context);
|
||||
@@ -885,8 +962,9 @@ static void index_rebuild_ffmpeg_proc_decoded_frame(FFmpegIndexBuilderContext *c
|
||||
{
|
||||
int i;
|
||||
uint64_t s_pos = context->seek_pos;
|
||||
uint64_t s_pts = context->seek_pos_pts;
|
||||
uint64_t s_dts = context->seek_pos_dts;
|
||||
uint64_t pts = av_get_pts_from_frame(context->iFormatCtx, in_frame);
|
||||
uint64_t pts = av_get_pts_from_frame(in_frame);
|
||||
|
||||
for (i = 0; i < context->num_proxy_sizes; i++) {
|
||||
add_to_proxy_output_ffmpeg(context->proxy_ctx[i], in_frame);
|
||||
@@ -900,15 +978,15 @@ static void index_rebuild_ffmpeg_proc_decoded_frame(FFmpegIndexBuilderContext *c
|
||||
context->frameno = floor(
|
||||
(pts - context->start_pts) * context->pts_time_base * context->frame_rate + 0.5);
|
||||
|
||||
/* decoding starts *always* on I-Frames,
|
||||
* so: P-Frames won't work, even if all the
|
||||
* information is in place, when we seek
|
||||
* to the I-Frame presented *after* the P-Frame,
|
||||
* but located before the P-Frame within
|
||||
* the stream */
|
||||
int64_t seek_pos_pts = timestamp_from_pts_or_dts(context->seek_pos_pts, context->seek_pos_dts);
|
||||
|
||||
if (pts < context->seek_pos_pts) {
|
||||
if (pts < seek_pos_pts) {
|
||||
/* Decoding starts *always* on I-Frames. In this case our position is
|
||||
* before our seek I-Frame. So we need to pick the previous available
|
||||
* I-Frame to be able to decode this one properly.
|
||||
*/
|
||||
s_pos = context->last_seek_pos;
|
||||
s_pts = context->last_seek_pos_pts;
|
||||
s_dts = context->last_seek_pos_dts;
|
||||
}
|
||||
|
||||
@@ -925,6 +1003,7 @@ static void index_rebuild_ffmpeg_proc_decoded_frame(FFmpegIndexBuilderContext *c
|
||||
curr_packet->size,
|
||||
tc_frameno,
|
||||
s_pos,
|
||||
s_pts,
|
||||
s_dts,
|
||||
pts);
|
||||
}
|
||||
@@ -938,23 +1017,18 @@ static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
|
||||
short *do_update,
|
||||
float *progress)
|
||||
{
|
||||
AVFrame *in_frame = 0;
|
||||
AVPacket next_packet;
|
||||
AVFrame *in_frame = av_frame_alloc();
|
||||
AVPacket *next_packet = av_packet_alloc();
|
||||
uint64_t stream_size;
|
||||
|
||||
memset(&next_packet, 0, sizeof(AVPacket));
|
||||
|
||||
in_frame = av_frame_alloc();
|
||||
|
||||
stream_size = avio_size(context->iFormatCtx->pb);
|
||||
|
||||
context->frame_rate = av_q2d(av_guess_frame_rate(context->iFormatCtx, context->iStream, NULL));
|
||||
context->pts_time_base = av_q2d(context->iStream->time_base);
|
||||
|
||||
while (av_read_frame(context->iFormatCtx, &next_packet) >= 0) {
|
||||
int frame_finished = 0;
|
||||
while (av_read_frame(context->iFormatCtx, next_packet) >= 0) {
|
||||
float next_progress =
|
||||
(float)((int)floor(((double)next_packet.pos) * 100 / ((double)stream_size) + 0.5)) / 100;
|
||||
(float)((int)floor(((double)next_packet->pos) * 100 / ((double)stream_size) + 0.5)) / 100;
|
||||
|
||||
if (*progress != next_progress) {
|
||||
*progress = next_progress;
|
||||
@@ -962,50 +1036,62 @@ static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
|
||||
}
|
||||
|
||||
if (*stop) {
|
||||
av_free_packet(&next_packet);
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_packet.stream_index == context->videoStream) {
|
||||
if (next_packet.flags & AV_PKT_FLAG_KEY) {
|
||||
if (next_packet->stream_index == context->videoStream) {
|
||||
if (next_packet->flags & AV_PKT_FLAG_KEY) {
|
||||
context->last_seek_pos = context->seek_pos;
|
||||
context->last_seek_pos_pts = context->seek_pos_pts;
|
||||
context->last_seek_pos_dts = context->seek_pos_dts;
|
||||
context->seek_pos = next_packet.pos;
|
||||
context->seek_pos_dts = next_packet.dts;
|
||||
context->seek_pos_pts = next_packet.pts;
|
||||
|
||||
context->seek_pos = next_packet->pos;
|
||||
context->seek_pos_pts = next_packet->pts;
|
||||
context->seek_pos_dts = next_packet->dts;
|
||||
}
|
||||
|
||||
avcodec_decode_video2(context->iCodecCtx, in_frame, &frame_finished, &next_packet);
|
||||
}
|
||||
int ret = avcodec_send_packet(context->iCodecCtx, next_packet);
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(context->iCodecCtx, in_frame);
|
||||
|
||||
if (frame_finished) {
|
||||
index_rebuild_ffmpeg_proc_decoded_frame(context, &next_packet, in_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
/* No more frames to flush. */
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding proxy frame: %s\n", av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
index_rebuild_ffmpeg_proc_decoded_frame(context, next_packet, in_frame);
|
||||
}
|
||||
}
|
||||
av_free_packet(&next_packet);
|
||||
av_packet_unref(next_packet);
|
||||
}
|
||||
|
||||
/* process pictures still stuck in decoder engine after EOF
|
||||
* according to ffmpeg docs using 0-size packets.
|
||||
* according to ffmpeg docs using NULL packets.
|
||||
*
|
||||
* At least, if we haven't already stopped... */
|
||||
|
||||
/* this creates the 0-size packet and prevents a memory leak. */
|
||||
av_free_packet(&next_packet);
|
||||
|
||||
if (!*stop) {
|
||||
int frame_finished;
|
||||
int ret = avcodec_send_packet(context->iCodecCtx, NULL);
|
||||
|
||||
do {
|
||||
frame_finished = 0;
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(context->iCodecCtx, in_frame);
|
||||
|
||||
avcodec_decode_video2(context->iCodecCtx, in_frame, &frame_finished, &next_packet);
|
||||
|
||||
if (frame_finished) {
|
||||
index_rebuild_ffmpeg_proc_decoded_frame(context, &next_packet, in_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
/* No more frames to flush. */
|
||||
break;
|
||||
}
|
||||
} while (frame_finished);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error flushing proxy frame: %s\n", av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
index_rebuild_ffmpeg_proc_decoded_frame(context, next_packet, in_frame);
|
||||
}
|
||||
}
|
||||
|
||||
av_packet_free(&next_packet);
|
||||
av_free(in_frame);
|
||||
|
||||
return 1;
|
||||
@@ -1343,6 +1429,10 @@ struct anim *IMB_anim_open_proxy(struct anim *anim, IMB_Proxy_Size preview_size)
|
||||
char fname[FILE_MAX];
|
||||
int i = IMB_proxy_size_to_array_index(preview_size);
|
||||
|
||||
if (i < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (anim->proxy_anim[i]) {
|
||||
return anim->proxy_anim[i];
|
||||
}
|
||||
@@ -1366,6 +1456,10 @@ struct anim_index *IMB_anim_open_index(struct anim *anim, IMB_Timecode_Type tc)
|
||||
char fname[FILE_MAX];
|
||||
int i = IMB_timecode_to_array_index(tc);
|
||||
|
||||
if (i < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (anim->curr_idx[i]) {
|
||||
return anim->curr_idx[i];
|
||||
}
|
||||
|
@@ -245,7 +245,6 @@ static void ffmpeg_log_callback(void *ptr, int level, const char *format, va_lis
|
||||
|
||||
void IMB_ffmpeg_init(void)
|
||||
{
|
||||
av_register_all();
|
||||
avdevice_register_all();
|
||||
|
||||
ffmpeg_last_error[0] = '\0';
|
||||
@@ -269,7 +268,6 @@ static int isffmpeg(const char *filepath)
|
||||
unsigned int i;
|
||||
int videoStream;
|
||||
AVCodec *pCodec;
|
||||
AVCodecContext *pCodecCtx;
|
||||
|
||||
if (BLI_path_extension_check_n(filepath,
|
||||
".swf",
|
||||
@@ -310,8 +308,8 @@ static int isffmpeg(const char *filepath)
|
||||
/* Find the first video stream */
|
||||
videoStream = -1;
|
||||
for (i = 0; i < pFormatCtx->nb_streams; i++) {
|
||||
if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codec &&
|
||||
(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)) {
|
||||
if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codecpar &&
|
||||
(pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)) {
|
||||
videoStream = i;
|
||||
break;
|
||||
}
|
||||
@@ -322,21 +320,15 @@ static int isffmpeg(const char *filepath)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
|
||||
AVCodecParameters *codec_par = pFormatCtx->streams[videoStream]->codecpar;
|
||||
|
||||
/* Find the decoder for the video stream */
|
||||
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
|
||||
pCodec = avcodec_find_decoder(codec_par->codec_id);
|
||||
if (pCodec == NULL) {
|
||||
avformat_close_input(&pFormatCtx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
|
||||
avformat_close_input(&pFormatCtx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
avcodec_close(pCodecCtx);
|
||||
avformat_close_input(&pFormatCtx);
|
||||
|
||||
return 1;
|
||||
|
@@ -172,7 +172,7 @@ typedef struct Sequence {
|
||||
float sat;
|
||||
float mul, handsize;
|
||||
|
||||
short anim_preseek;
|
||||
short anim_preseek; /* UNUSED. */
|
||||
/** Streamindex for movie or sound files with several streams. */
|
||||
short streamindex;
|
||||
/** For multicam source selection. */
|
||||
|
@@ -3892,7 +3892,7 @@ static void rna_NodeCryptomatte_layer_name_set(PointerRNA *ptr, int new_value)
|
||||
}
|
||||
}
|
||||
|
||||
static const EnumPropertyItem *rna_NodeCryptomatte_layer_name_itemf(bContext *UNUSED(C),
|
||||
static const EnumPropertyItem *rna_NodeCryptomatte_layer_name_itemf(bContext *C,
|
||||
PointerRNA *ptr,
|
||||
PropertyRNA *UNUSED(prop),
|
||||
bool *r_free)
|
||||
@@ -3903,7 +3903,7 @@ static const EnumPropertyItem *rna_NodeCryptomatte_layer_name_itemf(bContext *UN
|
||||
EnumPropertyItem template = {0, "", 0, "", ""};
|
||||
int totitem = 0;
|
||||
|
||||
ntreeCompositCryptomatteUpdateLayerNames(node);
|
||||
ntreeCompositCryptomatteUpdateLayerNames(CTX_data_scene(C), node);
|
||||
int layer_index;
|
||||
LISTBASE_FOREACH_INDEX (CryptomatteLayer *, layer, &storage->runtime.layers, layer_index) {
|
||||
template.value = layer_index;
|
||||
@@ -3995,7 +3995,7 @@ static void rna_NodeCryptomatte_matte_set(PointerRNA *ptr, const char *value)
|
||||
|
||||
static void rna_NodeCryptomatte_update_add(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||
{
|
||||
ntreeCompositCryptomatteSyncFromAdd(ptr->data);
|
||||
ntreeCompositCryptomatteSyncFromAdd(scene, ptr->data);
|
||||
rna_Node_update(bmain, scene, ptr);
|
||||
}
|
||||
|
||||
|
@@ -2417,12 +2417,6 @@ static void rna_def_movie(BlenderRNA *brna)
|
||||
RNA_def_struct_ui_text(srna, "Movie Sequence", "Sequence strip to load a video");
|
||||
RNA_def_struct_sdna(srna, "Sequence");
|
||||
|
||||
prop = RNA_def_property(srna, "mpeg_preseek", PROP_INT, PROP_NONE);
|
||||
RNA_def_property_int_sdna(prop, NULL, "anim_preseek");
|
||||
RNA_def_property_range(prop, 0, 50);
|
||||
RNA_def_property_ui_text(prop, "MPEG Preseek", "For MPEG movies, preseek this many frames");
|
||||
RNA_def_property_update(prop, NC_SCENE | ND_SEQUENCER, NULL);
|
||||
|
||||
prop = RNA_def_property(srna, "stream_index", PROP_INT, PROP_NONE);
|
||||
RNA_def_property_int_sdna(prop, NULL, "streamindex");
|
||||
RNA_def_property_range(prop, 0, 20);
|
||||
|
@@ -1381,6 +1381,10 @@ static void modifyGeometry(ModifierData *md,
|
||||
BKE_modifier_set_error(ctx->object, md, "Node group has cycles");
|
||||
return;
|
||||
}
|
||||
if (tree.has_undefined_nodes_or_sockets()) {
|
||||
BKE_modifier_set_error(ctx->object, md, "Node group has undefined nodes or sockets");
|
||||
return;
|
||||
}
|
||||
|
||||
const NodeTreeRef &root_tree_ref = tree.root_context().tree();
|
||||
Span<const NodeRef *> input_nodes = root_tree_ref.nodes_by_type("NodeGroupInput");
|
||||
|
@@ -90,6 +90,46 @@
|
||||
|
||||
#include "bmesh.h"
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Generic BMesh Utilities
|
||||
* \{ */
|
||||
|
||||
static void vert_face_normal_mark_set(BMVert *v)
|
||||
{
|
||||
BMIter iter;
|
||||
BMFace *f;
|
||||
BM_ITER_ELEM (f, &iter, v, BM_FACES_OF_VERT) {
|
||||
f->no[0] = FLT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void vert_face_normal_mark_update(BMVert *v)
|
||||
{
|
||||
BMIter iter;
|
||||
BMFace *f;
|
||||
BM_ITER_ELEM (f, &iter, v, BM_FACES_OF_VERT) {
|
||||
if (f->no[0] == FLT_MAX) {
|
||||
BM_face_normal_update(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recalculate the normals of all faces connected to `verts`.
|
||||
*/
|
||||
static void vert_array_face_normal_update(BMVert **verts, int verts_len)
|
||||
{
|
||||
for (int i = 0; i < verts_len; i++) {
|
||||
vert_face_normal_mark_set(verts[i]);
|
||||
}
|
||||
|
||||
for (int i = 0; i < verts_len; i++) {
|
||||
vert_face_normal_mark_update(verts[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
typedef struct {
|
||||
float mat[3][3];
|
||||
/* Vert that edge is pointing away from, no relation to
|
||||
@@ -1352,13 +1392,25 @@ static void skin_fix_hole_no_good_verts(BMesh *bm, Frame *frame, BMFace *split_f
|
||||
split_face = collapse_face_corners(bm, split_face, 4, vert_buf);
|
||||
}
|
||||
|
||||
/* Done with dynamic array, split_face must now be a quad */
|
||||
BLI_array_free(vert_buf);
|
||||
/* `split_face` should now be a quad. */
|
||||
BLI_assert(split_face->len == 4);
|
||||
|
||||
/* Account for the highly unlikely case that it's not a quad. */
|
||||
if (split_face->len != 4) {
|
||||
/* Reuse `vert_buf` for updating normals. */
|
||||
BLI_array_clear(vert_buf);
|
||||
BLI_array_grow_items(vert_buf, split_face->len);
|
||||
|
||||
BM_iter_as_array(bm, BM_FACES_OF_VERT, split_face, (void **)vert_buf, split_face->len);
|
||||
|
||||
vert_array_face_normal_update(vert_buf, split_face->len);
|
||||
BLI_array_free(vert_buf);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Done with dynamic array. */
|
||||
BLI_array_free(vert_buf);
|
||||
|
||||
/* Get split face's verts */
|
||||
// BM_iter_as_array(bm, BM_VERTS_OF_FACE, split_face, (void **)verts, 4);
|
||||
BM_face_as_array_vert_quad(split_face, verts);
|
||||
@@ -1373,6 +1425,8 @@ static void skin_fix_hole_no_good_verts(BMesh *bm, Frame *frame, BMFace *split_f
|
||||
}
|
||||
BMO_op_exec(bm, &op);
|
||||
BMO_op_finish(bm, &op);
|
||||
|
||||
vert_array_face_normal_update(frame->verts, 4);
|
||||
}
|
||||
|
||||
/* If the frame has some vertices that are inside the hull (detached)
|
||||
@@ -1731,6 +1785,11 @@ static void skin_smooth_hulls(BMesh *bm,
|
||||
|
||||
/* Done with original coordinates */
|
||||
BM_data_layer_free_n(bm, &bm->vdata, CD_SHAPEKEY, skey);
|
||||
|
||||
BMFace *f;
|
||||
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
|
||||
BM_face_normal_update(f);
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns true if all hulls are successfully built, false otherwise */
|
||||
|
@@ -170,6 +170,7 @@ class DerivedNodeTree {
|
||||
Span<const NodeTreeRef *> used_node_tree_refs() const;
|
||||
|
||||
bool has_link_cycles() const;
|
||||
bool has_undefined_nodes_or_sockets() const;
|
||||
void foreach_node(FunctionRef<void(DNode)> callback) const;
|
||||
|
||||
std::string to_dot() const;
|
||||
|
@@ -125,6 +125,7 @@ class SocketRef : NonCopyable, NonMovable {
|
||||
bNodeTree *btree() const;
|
||||
|
||||
bool is_available() const;
|
||||
bool is_undefined() const;
|
||||
|
||||
void *default_value() const;
|
||||
template<typename T> T *default_value() const;
|
||||
@@ -197,6 +198,7 @@ class NodeRef : NonCopyable, NonMovable {
|
||||
bool is_group_output_node() const;
|
||||
bool is_muted() const;
|
||||
bool is_frame() const;
|
||||
bool is_undefined() const;
|
||||
|
||||
void *storage() const;
|
||||
template<typename T> T *storage() const;
|
||||
@@ -260,6 +262,7 @@ class NodeTreeRef : NonCopyable, NonMovable {
|
||||
Span<const LinkRef *> links() const;
|
||||
|
||||
bool has_link_cycles() const;
|
||||
bool has_undefined_nodes_or_sockets() const;
|
||||
|
||||
bNodeTree *btree() const;
|
||||
StringRefNull name() const;
|
||||
@@ -417,6 +420,11 @@ inline bool SocketRef::is_available() const
|
||||
return (bsocket_->flag & SOCK_UNAVAIL) == 0;
|
||||
}
|
||||
|
||||
inline bool SocketRef::is_undefined() const
|
||||
{
|
||||
return bsocket_->typeinfo == &NodeSocketTypeUndefined;
|
||||
}
|
||||
|
||||
inline void *SocketRef::default_value() const
|
||||
{
|
||||
return bsocket_->default_value;
|
||||
@@ -554,6 +562,11 @@ inline bool NodeRef::is_frame() const
|
||||
return bnode_->type == NODE_FRAME;
|
||||
}
|
||||
|
||||
inline bool NodeRef::is_undefined() const
|
||||
{
|
||||
return bnode_->typeinfo == &NodeTypeUndefined;
|
||||
}
|
||||
|
||||
inline bool NodeRef::is_muted() const
|
||||
{
|
||||
return (bnode_->flag & NODE_MUTED) != 0;
|
||||
|
@@ -40,61 +40,74 @@
|
||||
|
||||
/** \name Cryptomatte
|
||||
* \{ */
|
||||
static blender::bke::cryptomatte::CryptomatteSessionPtr cryptomatte_init_from_node_render(
|
||||
const bNode &node, const bool use_meta_data)
|
||||
{
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session;
|
||||
|
||||
Scene *scene = (Scene *)node.id;
|
||||
if (!scene) {
|
||||
return session;
|
||||
}
|
||||
BLI_assert(GS(scene->id.name) == ID_SCE);
|
||||
|
||||
if (use_meta_data) {
|
||||
Render *render = (scene) ? RE_GetSceneRender(scene) : nullptr;
|
||||
RenderResult *render_result = render ? RE_AcquireResultRead(render) : nullptr;
|
||||
if (render_result) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_render_result(render_result));
|
||||
}
|
||||
if (render) {
|
||||
RE_ReleaseResult(render);
|
||||
}
|
||||
}
|
||||
|
||||
if (session == nullptr) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_scene(scene));
|
||||
}
|
||||
return session;
|
||||
}
|
||||
|
||||
static blender::bke::cryptomatte::CryptomatteSessionPtr cryptomatte_init_from_node_image(
|
||||
const Scene &scene, const bNode &node)
|
||||
{
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session;
|
||||
Image *image = (Image *)node.id;
|
||||
if (!image) {
|
||||
return session;
|
||||
}
|
||||
BLI_assert(GS(image->id.name) == ID_IM);
|
||||
|
||||
NodeCryptomatte *node_cryptomatte = static_cast<NodeCryptomatte *>(node.storage);
|
||||
ImageUser *iuser = &node_cryptomatte->iuser;
|
||||
BKE_image_user_frame_calc(image, iuser, scene.r.cfra);
|
||||
ImBuf *ibuf = BKE_image_acquire_ibuf(image, iuser, nullptr);
|
||||
RenderResult *render_result = image->rr;
|
||||
if (render_result) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_render_result(render_result));
|
||||
}
|
||||
BKE_image_release_ibuf(image, ibuf, nullptr);
|
||||
return session;
|
||||
}
|
||||
|
||||
static blender::bke::cryptomatte::CryptomatteSessionPtr cryptomatte_init_from_node(
|
||||
const bNode &node, const int frame_number, const bool use_meta_data)
|
||||
const Scene &scene, const bNode &node, const bool use_meta_data)
|
||||
{
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session;
|
||||
if (node.type != CMP_NODE_CRYPTOMATTE) {
|
||||
return session;
|
||||
}
|
||||
|
||||
NodeCryptomatte *node_cryptomatte = static_cast<NodeCryptomatte *>(node.storage);
|
||||
switch (node.custom1) {
|
||||
case CMP_CRYPTOMATTE_SRC_RENDER: {
|
||||
Scene *scene = (Scene *)node.id;
|
||||
if (!scene) {
|
||||
return session;
|
||||
}
|
||||
BLI_assert(GS(scene->id.name) == ID_SCE);
|
||||
|
||||
if (use_meta_data) {
|
||||
Render *render = (scene) ? RE_GetSceneRender(scene) : nullptr;
|
||||
RenderResult *render_result = render ? RE_AcquireResultRead(render) : nullptr;
|
||||
if (render_result) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_render_result(render_result));
|
||||
}
|
||||
if (render) {
|
||||
RE_ReleaseResult(render);
|
||||
}
|
||||
}
|
||||
|
||||
if (session == nullptr) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_scene(scene));
|
||||
}
|
||||
|
||||
break;
|
||||
return cryptomatte_init_from_node_render(node, use_meta_data);
|
||||
}
|
||||
|
||||
case CMP_CRYPTOMATTE_SRC_IMAGE: {
|
||||
Image *image = (Image *)node.id;
|
||||
if (!image) {
|
||||
break;
|
||||
}
|
||||
BLI_assert(GS(image->id.name) == ID_IM);
|
||||
|
||||
ImageUser *iuser = &node_cryptomatte->iuser;
|
||||
BKE_image_user_frame_calc(image, iuser, frame_number);
|
||||
ImBuf *ibuf = BKE_image_acquire_ibuf(image, iuser, nullptr);
|
||||
RenderResult *render_result = image->rr;
|
||||
if (render_result) {
|
||||
session = blender::bke::cryptomatte::CryptomatteSessionPtr(
|
||||
BKE_cryptomatte_init_from_render_result(render_result));
|
||||
}
|
||||
BKE_image_release_ibuf(image, ibuf, nullptr);
|
||||
break;
|
||||
return cryptomatte_init_from_node_image(scene, node);
|
||||
}
|
||||
}
|
||||
return session;
|
||||
@@ -111,7 +124,10 @@ static CryptomatteEntry *cryptomatte_find(const NodeCryptomatte &n, float encode
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void cryptomatte_add(bNode &node, NodeCryptomatte &node_cryptomatte, float encoded_hash)
|
||||
static void cryptomatte_add(const Scene &scene,
|
||||
bNode &node,
|
||||
NodeCryptomatte &node_cryptomatte,
|
||||
float encoded_hash)
|
||||
{
|
||||
/* Check if entry already exist. */
|
||||
if (cryptomatte_find(node_cryptomatte, encoded_hash)) {
|
||||
@@ -121,9 +137,8 @@ static void cryptomatte_add(bNode &node, NodeCryptomatte &node_cryptomatte, floa
|
||||
CryptomatteEntry *entry = static_cast<CryptomatteEntry *>(
|
||||
MEM_callocN(sizeof(CryptomatteEntry), __func__));
|
||||
entry->encoded_hash = encoded_hash;
|
||||
/* TODO(jbakker): Get current frame from scene. */
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session = cryptomatte_init_from_node(
|
||||
node, 0, true);
|
||||
scene, node, true);
|
||||
if (session) {
|
||||
BKE_cryptomatte_find_name(session.get(), encoded_hash, entry->name, sizeof(entry->name));
|
||||
}
|
||||
@@ -151,12 +166,12 @@ static bNodeSocketTemplate cmp_node_cryptomatte_out[] = {
|
||||
{-1, ""},
|
||||
};
|
||||
|
||||
void ntreeCompositCryptomatteSyncFromAdd(bNode *node)
|
||||
void ntreeCompositCryptomatteSyncFromAdd(const Scene *scene, bNode *node)
|
||||
{
|
||||
BLI_assert(ELEM(node->type, CMP_NODE_CRYPTOMATTE, CMP_NODE_CRYPTOMATTE_LEGACY));
|
||||
NodeCryptomatte *n = static_cast<NodeCryptomatte *>(node->storage);
|
||||
if (n->runtime.add[0] != 0.0f) {
|
||||
cryptomatte_add(*node, *n, n->runtime.add[0]);
|
||||
cryptomatte_add(*scene, *node, *n, n->runtime.add[0]);
|
||||
zero_v3(n->runtime.add);
|
||||
}
|
||||
}
|
||||
@@ -170,14 +185,14 @@ void ntreeCompositCryptomatteSyncFromRemove(bNode *node)
|
||||
zero_v3(n->runtime.remove);
|
||||
}
|
||||
}
|
||||
void ntreeCompositCryptomatteUpdateLayerNames(bNode *node)
|
||||
void ntreeCompositCryptomatteUpdateLayerNames(const Scene *scene, bNode *node)
|
||||
{
|
||||
BLI_assert(node->type == CMP_NODE_CRYPTOMATTE);
|
||||
NodeCryptomatte *n = static_cast<NodeCryptomatte *>(node->storage);
|
||||
BLI_freelistN(&n->runtime.layers);
|
||||
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session = cryptomatte_init_from_node(
|
||||
*node, 0, false);
|
||||
*scene, *node, false);
|
||||
|
||||
if (session) {
|
||||
for (blender::StringRef layer_name :
|
||||
@@ -190,12 +205,15 @@ void ntreeCompositCryptomatteUpdateLayerNames(bNode *node)
|
||||
}
|
||||
}
|
||||
|
||||
void ntreeCompositCryptomatteLayerPrefix(const bNode *node, char *r_prefix, size_t prefix_len)
|
||||
void ntreeCompositCryptomatteLayerPrefix(const Scene *scene,
|
||||
const bNode *node,
|
||||
char *r_prefix,
|
||||
size_t prefix_len)
|
||||
{
|
||||
BLI_assert(node->type == CMP_NODE_CRYPTOMATTE);
|
||||
NodeCryptomatte *node_cryptomatte = (NodeCryptomatte *)node->storage;
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session = cryptomatte_init_from_node(
|
||||
*node, 0, false);
|
||||
*scene, *node, false);
|
||||
std::string first_layer_name;
|
||||
|
||||
if (session) {
|
||||
@@ -216,10 +234,10 @@ void ntreeCompositCryptomatteLayerPrefix(const bNode *node, char *r_prefix, size
|
||||
BLI_strncpy(r_prefix, cstr, prefix_len);
|
||||
}
|
||||
|
||||
CryptomatteSession *ntreeCompositCryptomatteSession(bNode *node)
|
||||
CryptomatteSession *ntreeCompositCryptomatteSession(const Scene *scene, bNode *node)
|
||||
{
|
||||
blender::bke::cryptomatte::CryptomatteSessionPtr session_ptr = cryptomatte_init_from_node(
|
||||
*node, 0, true);
|
||||
*scene, *node, true);
|
||||
return session_ptr.release();
|
||||
}
|
||||
|
||||
|
@@ -84,6 +84,16 @@ bool DerivedNodeTree::has_link_cycles() const
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DerivedNodeTree::has_undefined_nodes_or_sockets() const
|
||||
{
|
||||
for (const NodeTreeRef *tree_ref : used_node_tree_refs_) {
|
||||
if (tree_ref->has_undefined_nodes_or_sockets()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Calls the given callback on all nodes in the (possibly nested) derived node tree. */
|
||||
void DerivedNodeTree::foreach_node(FunctionRef<void(DNode)> callback) const
|
||||
{
|
||||
|
@@ -346,6 +346,21 @@ bool NodeTreeRef::has_link_cycles() const
|
||||
return false;
|
||||
}
|
||||
|
||||
bool NodeTreeRef::has_undefined_nodes_or_sockets() const
|
||||
{
|
||||
for (const NodeRef *node : nodes_by_id_) {
|
||||
if (node->is_undefined()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
for (const SocketRef *socket : sockets_by_id_) {
|
||||
if (socket->is_undefined()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string NodeTreeRef::to_dot() const
|
||||
{
|
||||
dot::DirectedGraph digraph;
|
||||
|
@@ -43,8 +43,9 @@ static int node_shader_gpu_output_aov(GPUMaterial *mat,
|
||||
{
|
||||
GPUNodeLink *outlink;
|
||||
NodeShaderOutputAOV *aov = (NodeShaderOutputAOV *)node->storage;
|
||||
/* Keep in sync with `renderpass_lib.glsl#render_pass_aov_hash`. */
|
||||
unsigned int hash = BLI_hash_string(aov->name) & ~1;
|
||||
/* Keep in sync with `renderpass_lib.glsl#render_pass_aov_hash` and
|
||||
* `EEVEE_renderpasses_aov_hash`. */
|
||||
unsigned int hash = BLI_hash_string(aov->name) << 1;
|
||||
GPU_stack_link(mat, node, "node_output_aov", in, out, &outlink);
|
||||
GPU_material_add_output_link_aov(mat, outlink, hash);
|
||||
|
||||
|
@@ -1065,26 +1065,18 @@ static PyObject *bpy_bmesh_to_mesh(BPy_BMesh *self, PyObject *args)
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(
|
||||
bpy_bmesh_from_object_doc,
|
||||
".. method:: from_object(object, depsgraph, deform=True, cage=False, face_normals=True)\n"
|
||||
"\n"
|
||||
" Initialize this bmesh from existing object datablock (currently only meshes are "
|
||||
"supported).\n"
|
||||
"\n"
|
||||
" :arg object: The object data to load.\n"
|
||||
" :type object: :class:`Object`\n"
|
||||
" :arg deform: Apply deformation modifiers.\n"
|
||||
" :type deform: boolean\n"
|
||||
" :arg cage: Get the mesh as a deformed cage.\n"
|
||||
" :type cage: boolean\n"
|
||||
" :arg face_normals: Calculate face normals.\n"
|
||||
" :type face_normals: boolean\n"
|
||||
"\n"
|
||||
" .. deprecated:: 2.93\n"
|
||||
"\n"
|
||||
" The deform parameter is deprecated, assumed to be True, and will be removed in version "
|
||||
"3.0.\n");
|
||||
PyDoc_STRVAR(bpy_bmesh_from_object_doc,
|
||||
".. method:: from_object(object, depsgraph, cage=False, face_normals=True)\n"
|
||||
"\n"
|
||||
" Initialize this bmesh from existing object data-block (only meshes are currently "
|
||||
"supported).\n"
|
||||
"\n"
|
||||
" :arg object: The object data to load.\n"
|
||||
" :type object: :class:`Object`\n"
|
||||
" :arg cage: Get the mesh as a deformed cage.\n"
|
||||
" :type cage: boolean\n"
|
||||
" :arg face_normals: Calculate face normals.\n"
|
||||
" :type face_normals: boolean\n");
|
||||
static PyObject *bpy_bmesh_from_object(BPy_BMesh *self, PyObject *args, PyObject *kw)
|
||||
{
|
||||
static const char *kwlist[] = {"object", "depsgraph", "deform", "cage", "face_normals", NULL};
|
||||
|
@@ -479,7 +479,7 @@ static TriTessFace *mesh_calc_tri_tessface(Mesh *me, bool tangent, Mesh *me_eval
|
||||
loop_normals = CustomData_get_layer(&me_eval->ldata, CD_NORMAL);
|
||||
}
|
||||
|
||||
const float *precomputed_normals = CustomData_get_layer(&me->pdata, CD_NORMAL);
|
||||
const float(*precomputed_normals)[3] = CustomData_get_layer(&me->pdata, CD_NORMAL);
|
||||
const bool calculate_normal = precomputed_normals ? false : true;
|
||||
|
||||
for (i = 0; i < tottri; i++) {
|
||||
@@ -511,7 +511,7 @@ static TriTessFace *mesh_calc_tri_tessface(Mesh *me, bool tangent, Mesh *me_eval
|
||||
copy_v3_v3(triangles[i].normal, no);
|
||||
}
|
||||
else {
|
||||
copy_v3_v3(triangles[i].normal, &precomputed_normals[lt->poly]);
|
||||
copy_v3_v3(triangles[i].normal, precomputed_normals[lt->poly]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1111,8 +1111,6 @@ static ImBuf *seq_render_movie_strip_view(const SeqRenderData *context,
|
||||
ImBuf *ibuf = NULL;
|
||||
IMB_Proxy_Size psize = SEQ_rendersize_to_proxysize(context->preview_render_size);
|
||||
|
||||
IMB_anim_set_preseek(sanim->anim, seq->anim_preseek);
|
||||
|
||||
if (SEQ_can_use_proxy(context, seq, psize)) {
|
||||
/* Try to get a proxy image.
|
||||
* Movie proxies are handled by ImBuf module with exception of `custom file` setting. */
|
||||
|
@@ -549,7 +549,6 @@ Sequence *SEQ_add_movie_strip(Main *bmain, Scene *scene, ListBase *seqbase, SeqL
|
||||
seq->blend_mode = SEQ_TYPE_CROSS; /* so alpha adjustment fade to the strip below */
|
||||
|
||||
if (anim_arr[0] != NULL) {
|
||||
seq->anim_preseek = IMB_anim_get_preseek(anim_arr[0]);
|
||||
seq->len = IMB_anim_get_duration(anim_arr[0], IMB_TC_RECORD_RUN);
|
||||
|
||||
IMB_anim_load_metadata(anim_arr[0]);
|
||||
@@ -691,8 +690,6 @@ void SEQ_add_reload_new_file(Main *bmain, Scene *scene, Sequence *seq, const boo
|
||||
seq->len = IMB_anim_get_duration(
|
||||
sanim->anim, seq->strip->proxy ? seq->strip->proxy->tc : IMB_TC_RECORD_RUN);
|
||||
|
||||
seq->anim_preseek = IMB_anim_get_preseek(sanim->anim);
|
||||
|
||||
seq->len -= seq->anim_startofs;
|
||||
seq->len -= seq->anim_endofs;
|
||||
if (seq->len < 0) {
|
||||
|
Reference in New Issue
Block a user