提交 d517327b 编写于 作者: M Matt Oliver

Merge remote-tracking branch 'upstream/master'

......@@ -5939,7 +5939,7 @@ enabled libwebp && {
enabled libwebp_encoder && require_pkg_config libwebp "libwebp >= 0.2.0" webp/encode.h WebPGetEncoderVersion
enabled libwebp_anim_encoder && check_pkg_config libwebp_anim_encoder "libwebpmux >= 0.4.0" webp/mux.h WebPAnimEncoderOptionsInit; }
enabled libx264 && { check_pkg_config libx264 x264 "stdint.h x264.h" x264_encoder_encode ||
{ require libx264 "stdint.h x264.h" x264_encoder_encode -lx264 &&
{ require libx264 "stdint.h x264.h" x264_encoder_encode "-lx264 $pthreads_extralibs $libm_extralibs" &&
warn "using libx264 without pkg-config"; } } &&
require_cpp_condition x264.h "X264_BUILD >= 118" &&
{ check_cpp_condition x264.h "X264_MPEG2" &&
......@@ -6010,15 +6010,13 @@ fi
if enabled sdl2; then
SDL2_CONFIG="${cross_prefix}sdl2-config"
if test_pkg_config sdl2 "sdl2 >= 2.0.1 sdl2 < 2.1.0" SDL_events.h SDL_PollEvent; then
check_func_headers SDL.h SDL_Init $sdl2_extralibs $sdl2_cflags ||
disable sdl2
elif "${SDL2_CONFIG}" --version > /dev/null 2>&1; then
test_pkg_config sdl2 "sdl2 >= 2.0.1 sdl2 < 2.1.0" SDL_events.h SDL_PollEvent
if disabled sdl2 && "${SDL2_CONFIG}" --version > /dev/null 2>&1; then
sdl2_cflags=$("${SDL2_CONFIG}" --cflags)
sdl2_extralibs=$("${SDL2_CONFIG}" --libs)
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x020001" $sdl2_cflags &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) < 0x020100" $sdl2_cflags &&
check_func_headers SDL.h SDL_Init $sdl2_extralibs $sdl2_cflags &&
check_func_headers SDL_events.h SDL_PollEvent $sdl2_extralibs $sdl2_cflags &&
enable sdl2
fi
if test $target_os = "mingw32"; then
......
......@@ -64,7 +64,6 @@ to find an optimal combination by adding or subtracting a specific value from
all quantizers and adjusting some individual quantizer a little. Will tune
itself based on whether @option{aac_is}, @option{aac_ms} and @option{aac_pns}
are enabled.
This is the default choice for a coder.
@item anmr
Average noise to mask ratio (ANMR) trellis-based solution.
......@@ -77,10 +76,10 @@ Not currently recommended.
@item fast
Constant quantizer method.
This method sets a constant quantizer for all bands. This is the fastest of all
the methods and has no rate control or support for @option{aac_is} or
@option{aac_pns}.
Not recommended.
Uses a cheaper version of twoloop algorithm that doesn't try to do as many
clever adjustments. Worse with low bitrates (less than 64kbps), but is better
and much faster at higher bitrates.
This is the default choice for a coder
@end table
......
......@@ -217,8 +217,7 @@ close:
fclose(fout);
av_frame_free(&sw_frame);
av_frame_free(&hw_frame);
if (avctx)
avcodec_free_context(&avctx);
avcodec_free_context(&avctx);
av_buffer_unref(&hw_device_ctx);
return err;
......
......@@ -1084,7 +1084,36 @@ Set output gain.
@item f
Set coefficients format.
Can be @code{tf} - transfer function or @code{zp} - Z-plane zeros/poles.
@table @samp
@item tf
transfer function
@item zp
Z-plane zeros/poles, cartesian (default)
@item pr
Z-plane zeros/poles, polar radians
@item pd
Z-plane zeros/poles, polar degrees
@end table
@item r
Set kind of processing.
Can be @code{d} - direct or @code{s} - serial cascading. Defauls is @code{s}.
@item e
Set filtering precision.
@table @samp
@item dbl
double-precision floating-point (default)
@item flt
single-precision floating-point
@item i32
32-bit integers
@item i16
16-bit integers
@end table
@end table
Coefficients in @code{tf} format are separated by spaces and are in ascending
......@@ -1104,13 +1133,13 @@ used for all remaining channels.
@item
Apply 2 pole elliptic notch at arround 5000Hz for 48000 Hz sample rate:
@example
aiir=k=1:z=7.957584807809675810E-1 -2.575128568908332300 3.674839853930788710 -2.57512875289799137 7.957586296317130880E-1:p=1 -2.86950072432325953 3.63022088054647218 -2.28075678147272232 6.361362326477423500E-1:f=tf
aiir=k=1:z=7.957584807809675810E-1 -2.575128568908332300 3.674839853930788710 -2.57512875289799137 7.957586296317130880E-1:p=1 -2.86950072432325953 3.63022088054647218 -2.28075678147272232 6.361362326477423500E-1:f=tf:r=d
@end example
@item
Same as above but in @code{zp} format:
@example
aiir=k=0.79575848078096756:z=0.80918701+0.58773007i 0.80918701-0.58773007i 0.80884700+0.58784055i 0.80884700-0.58784055i:p=0.63892345+0.59951235i 0.63892345-0.59951235i 0.79582691+0.44198673i 0.79582691-0.44198673i:f=zp
aiir=k=0.79575848078096756:z=0.80918701+0.58773007i 0.80918701-0.58773007i 0.80884700+0.58784055i 0.80884700-0.58784055i:p=0.63892345+0.59951235i 0.63892345-0.59951235i 0.79582691+0.44198673i 0.79582691-0.44198673i:f=zp:r=s
@end example
@end itemize
......@@ -12785,7 +12814,7 @@ sequential number of the input frame, starting from 1
Mean Square Error pixel-by-pixel average difference of the compared
frames, averaged over all the image components.
@item mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
@item mse_y, mse_u, mse_v, mse_r, mse_g, mse_b, mse_a
Mean Square Error pixel-by-pixel average difference of the compared
frames for the component specified by the suffix.
......
......@@ -1118,10 +1118,10 @@ fail:
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
static const AVOption aacenc_options[] = {
{"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_TWOLOOP}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"},
{"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_FAST}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"},
{"anmr", "ANMR method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_ANMR}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
{"twoloop", "Two loop searching method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_TWOLOOP}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
{"fast", "Constant quantizer", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
{"fast", "Default fast search", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
{"aac_ms", "Force M/S stereo coding", offsetof(AACEncContext, options.mid_side), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AACENC_FLAGS},
{"aac_is", "Intensity stereo coding", offsetof(AACEncContext, options.intensity_stereo), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS},
{"aac_pns", "Perceptual noise substitution", offsetof(AACEncContext, options.pns), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS},
......
......@@ -336,11 +336,11 @@ function ff_sbr_hf_apply_noise_0_neon, export=1
vld1.32 {d0}, [r0,:64]
vld1.32 {d6}, [lr,:64]
vld1.32 {d2[]}, [r1,:32]!
vld1.32 {d3[]}, [r2,:32]!
vld1.32 {d18[]}, [r2,:32]!
vceq.f32 d4, d2, #0
veor d2, d2, d3
vmov d1, d0
vmla.f32 d0, d6, d3
vmla.f32 d0, d6, d18
vadd.f32 s2, s2, s4
vbif d0, d1, d4
vst1.32 {d0}, [r0,:64]!
......
......@@ -767,7 +767,7 @@ typedef struct AVCodecDescriptor {
* Note: If the first 23 bits of the additional bytes are not 0, then damaged
* MPEG bitstreams could cause overread and segfault.
*/
#define AV_INPUT_BUFFER_PADDING_SIZE 32
#define AV_INPUT_BUFFER_PADDING_SIZE 64
/**
* @ingroup lavc_encoding
......
此差异已折叠。
......@@ -105,10 +105,10 @@ void ff_spatial_idwt_slice2(DWTContext *d, int y);
(int)(((unsigned)(b2) - ((int)(-b0 + 9U*b1 + 9U*b3 - b4 + 16) >> 5)))
#define COMPOSE_HAARiL0(b0, b1)\
(b0 - ((b1 + 1) >> 1))
((int)(b0 - (unsigned)((int)(b1 + 1U) >> 1)))
#define COMPOSE_HAARiH0(b0, b1)\
(b0 + b1)
((int)(b0 + (unsigned)(b1)))
#define COMPOSE_FIDELITYiL0(b0, b1, b2, b3, b4, b5, b6, b7, b8)\
((unsigned)b4 - ((int)(-8*(b0+(unsigned)b8) + 21*(b1+(unsigned)b7) - 46*(b2+(unsigned)b6) + 161*(b3+(unsigned)b5) + 128) >> 8))
......
......@@ -509,16 +509,16 @@ static inline void codeblock(DiracContext *s, SubBand *b,
}
if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
int quant = b->quant;
int quant;
if (is_arith)
quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
quant = dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
else
quant += dirac_get_se_golomb(gb);
if (quant < 0) {
quant = dirac_get_se_golomb(gb);
if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
return;
}
b->quant = quant;
b->quant += quant;
}
if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
......
......@@ -630,10 +630,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
s->bits_per_raw_sample = 16;
s->use32bit = 1;
s->version = FFMAX(s->version, 1);
if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(avctx, AV_LOG_ERROR, "16bit RGB is experimental and under development, only use it for experiments\n");
return AVERROR_INVALIDDATA;
}
break;
case AV_PIX_FMT_0RGB32:
s->colorspace = 1;
......
......@@ -35,10 +35,10 @@ static void FUNCC(ff_h264_add_pixels4)(uint8_t *_dst, int16_t *_src, int stride)
stride /= sizeof(pixel);
for (i = 0; i < 4; i++) {
dst[0] += src[0];
dst[1] += src[1];
dst[2] += src[2];
dst[3] += src[3];
dst[0] += (unsigned)src[0];
dst[1] += (unsigned)src[1];
dst[2] += (unsigned)src[2];
dst[3] += (unsigned)src[3];
dst += stride;
src += 4;
......@@ -55,14 +55,14 @@ static void FUNCC(ff_h264_add_pixels8)(uint8_t *_dst, int16_t *_src, int stride)
stride /= sizeof(pixel);
for (i = 0; i < 8; i++) {
dst[0] += src[0];
dst[1] += src[1];
dst[2] += src[2];
dst[3] += src[3];
dst[4] += src[4];
dst[5] += src[5];
dst[6] += src[6];
dst[7] += src[7];
dst[0] += (unsigned)src[0];
dst[1] += (unsigned)src[1];
dst[2] += (unsigned)src[2];
dst[3] += (unsigned)src[3];
dst[4] += (unsigned)src[4];
dst[5] += (unsigned)src[5];
dst[6] += (unsigned)src[6];
dst[7] += (unsigned)src[7];
dst += stride;
src += 8;
......
......@@ -64,9 +64,9 @@ static void ict_int(void *_src0, void *_src1, void *_src2, int csize)
int i;
for (i = 0; i < csize; i++) {
i0 = *src0 + *src2 + (((26345 * *src2) + (1 << 15)) >> 16);
i0 = *src0 + *src2 + ((int)((26345U * *src2) + (1 << 15)) >> 16);
i1 = *src0 - ((int)(((unsigned)i_ict_params[1] * *src1) + (1 << 15)) >> 16)
- (((i_ict_params[2] * *src2) + (1 << 15)) >> 16);
- ((int)(((unsigned)i_ict_params[2] * *src2) + (1 << 15)) >> 16);
i2 = *src0 + (2 * *src1) + ((int)((-14942U * *src1) + (1 << 15)) >> 16);
*src0++ = i0;
*src1++ = i1;
......
......@@ -115,11 +115,11 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
ctx->params->sourceHeight = avctx->height;
ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR);
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
if ((avctx->color_primaries <= AVCOL_PRI_SMPTE432 &&
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
(avctx->color_trc <= AVCOL_TRC_ARIB_STD_B67 &&
avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
(avctx->colorspace <= AVCOL_SPC_ICTCP &&
avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
......
......@@ -566,13 +566,14 @@ void ff_celt_bitalloc(CeltFrame *f, OpusRangeCoder *rc, int encode)
int bits2[CELT_MAX_BANDS];
/* Spread */
if (opus_rc_tell(rc) + 4 <= f->framebits)
if (opus_rc_tell(rc) + 4 <= f->framebits) {
if (encode)
ff_opus_rc_enc_cdf(rc, f->spread, ff_celt_model_spread);
else
f->spread = ff_opus_rc_dec_cdf(rc, ff_celt_model_spread);
else
} else {
f->spread = CELT_SPREAD_NORMAL;
}
/* Initialize static allocation caps */
for (i = 0; i < CELT_MAX_BANDS; i++)
......
......@@ -43,6 +43,7 @@ static const uint8_t *parse_opus_ts_header(const uint8_t *start, int *payload_le
const uint8_t *buf = start + 1;
int start_trim_flag, end_trim_flag, control_extension_flag, control_extension_length;
uint8_t flags;
uint64_t payload_len_tmp;
GetByteContext gb;
bytestream2_init(&gb, buf, buf_len);
......@@ -52,11 +53,11 @@ static const uint8_t *parse_opus_ts_header(const uint8_t *start, int *payload_le
end_trim_flag = (flags >> 3) & 1;
control_extension_flag = (flags >> 2) & 1;
*payload_len = 0;
payload_len_tmp = *payload_len = 0;
while (bytestream2_peek_byte(&gb) == 0xff)
*payload_len += bytestream2_get_byte(&gb);
payload_len_tmp += bytestream2_get_byte(&gb);
*payload_len += bytestream2_get_byte(&gb);
payload_len_tmp += bytestream2_get_byte(&gb);
if (start_trim_flag)
bytestream2_skip(&gb, 2);
......@@ -67,6 +68,11 @@ static const uint8_t *parse_opus_ts_header(const uint8_t *start, int *payload_le
bytestream2_skip(&gb, control_extension_length);
}
if (bytestream2_tell(&gb) + payload_len_tmp > buf_len)
return NULL;
*payload_len = payload_len_tmp;
return buf + bytestream2_tell(&gb);
}
......@@ -104,6 +110,10 @@ static int opus_find_frame_end(AVCodecParserContext *ctx, AVCodecContext *avctx,
state = (state << 8) | payload[i];
if ((state & OPUS_TS_MASK) == OPUS_TS_HEADER) {
payload = parse_opus_ts_header(payload, &payload_len, buf_size - i);
if (!payload) {
av_log(avctx, AV_LOG_ERROR, "Error parsing Ogg TS header.\n");
return AVERROR_INVALIDDATA;
}
*header_len = payload - buf;
start_found = 1;
break;
......
......@@ -119,7 +119,7 @@ static void step_collect_psy_metrics(OpusPsyContext *s, int index)
for (j = 0; j < range; j++) {
const float c_s = coeffs[j]*coeffs[j];
dist_dev = (avg_c_s - c_s)*(avg_c_s - c_s);
dist_dev += (avg_c_s - c_s)*(avg_c_s - c_s);
}
st->tone[ch][i] += sqrtf(dist_dev);
......
......@@ -608,7 +608,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
ff_lock_avcodec(avctx, codec);
avctx->internal = av_mallocz(sizeof(AVCodecInternal));
avctx->internal = av_mallocz(sizeof(*avctx->internal));
if (!avctx->internal) {
ret = AVERROR(ENOMEM);
goto end;
......@@ -1099,7 +1099,7 @@ void avsubtitle_free(AVSubtitle *sub)
av_freep(&sub->rects);
memset(sub, 0, sizeof(AVSubtitle));
memset(sub, 0, sizeof(*sub));
}
av_cold int avcodec_close(AVCodecContext *avctx)
......
......@@ -198,7 +198,7 @@ HEVC_SAO_BAND_FILTER 64, 2
;******************************************************************************
%define MAX_PB_SIZE 64
%define PADDING_SIZE 32 ; AV_INPUT_BUFFER_PADDING_SIZE
%define PADDING_SIZE 64 ; AV_INPUT_BUFFER_PADDING_SIZE
%define EDGE_SRCSTRIDE 2 * MAX_PB_SIZE + PADDING_SIZE
%macro HEVC_SAO_EDGE_FILTER_INIT 0
......
......@@ -190,7 +190,7 @@ HEVC_SAO_BAND_FILTER 12, 64, 4
;******************************************************************************
%define MAX_PB_SIZE 64
%define PADDING_SIZE 32 ; AV_INPUT_BUFFER_PADDING_SIZE
%define PADDING_SIZE 64 ; AV_INPUT_BUFFER_PADDING_SIZE
%define EDGE_SRCSTRIDE 2 * MAX_PB_SIZE + PADDING_SIZE
%macro PMINUW 4
......
......@@ -235,7 +235,9 @@ gdigrab_read_header(AVFormatContext *s1)
AVStream *st = NULL;
int bpp;
int horzres;
int vertres;
int desktophorzres;
int desktopvertres;
RECT virtual_rect;
RECT clip_rect;
......@@ -279,11 +281,13 @@ gdigrab_read_header(AVFormatContext *s1)
GetClientRect(hwnd, &virtual_rect);
} else {
/* desktop -- get the right height and width for scaling DPI */
horzres = GetDeviceCaps(source_hdc, HORZRES);
vertres = GetDeviceCaps(source_hdc, VERTRES);
desktophorzres = GetDeviceCaps(source_hdc, DESKTOPHORZRES);
desktopvertres = GetDeviceCaps(source_hdc, DESKTOPVERTRES);
virtual_rect.left = GetSystemMetrics(SM_XVIRTUALSCREEN);
virtual_rect.top = GetSystemMetrics(SM_YVIRTUALSCREEN);
virtual_rect.right = (virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN)) * desktopvertres / vertres;
virtual_rect.right = (virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN)) * desktophorzres / horzres;
virtual_rect.bottom = (virtual_rect.top + GetSystemMetrics(SM_CYVIRTUALSCREEN)) * desktopvertres / vertres;
}
......@@ -447,7 +451,9 @@ static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab)
POINT pos;
RECT clip_rect = gdigrab->clip_rect;
HWND hwnd = gdigrab->hwnd;
int horzres = GetDeviceCaps(gdigrab->source_hdc, HORZRES);
int vertres = GetDeviceCaps(gdigrab->source_hdc, VERTRES);
int desktophorzres = GetDeviceCaps(gdigrab->source_hdc, DESKTOPHORZRES);
int desktopvertres = GetDeviceCaps(gdigrab->source_hdc, DESKTOPVERTRES);
info.hbmMask = NULL;
info.hbmColor = NULL;
......@@ -483,7 +489,7 @@ static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab)
}
//that would keep the correct location of mouse with hidpi screens
pos.x = pos.x * desktopvertres / vertres;
pos.x = pos.x * desktophorzres / horzres;
pos.y = pos.y * desktopvertres / vertres;
av_log(s1, AV_LOG_DEBUG, "Cursor pos (%li,%li) -> (%li,%li)\n",
......
此差异已折叠。
......@@ -39,8 +39,6 @@
#include "internal.h"
#include "video.h"
#define N_SRCE 3
typedef struct FrameRateContext {
const AVClass *class;
// parameters
......@@ -55,30 +53,25 @@ typedef struct FrameRateContext {
int line_size[4]; ///< bytes of pixel data per line for each plane
int vsub;
int frst, next, prev, crnt, last;
int pending_srce_frames; ///< how many input frames are still waiting to be processed
int flush; ///< are we flushing final frames
int pending_end_frame; ///< flag indicating we are waiting to call filter_frame()
AVRational srce_time_base; ///< timebase of source
AVRational dest_time_base; ///< timebase of destination
int32_t dest_frame_num;
int64_t last_dest_frame_pts; ///< pts of the last frame output
int64_t average_srce_pts_dest_delta;///< average input pts delta converted from input rate to output rate
int64_t average_dest_pts_delta; ///< calculated average output pts delta
av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
double prev_mafd; ///< previous MAFD (scene detect only)
AVFrame *srce[N_SRCE]; ///< buffered source frames
int64_t srce_pts_dest[N_SRCE]; ///< pts for source frames scaled to output timebase
double srce_score[N_SRCE]; ///< scene change score compared to the next srce frame
int64_t pts; ///< pts of frame we are working on
int max;
int bitdepth;
AVFrame *work;
AVFrame *f0; ///< last frame
AVFrame *f1; ///< current frame
int64_t pts0; ///< last frame pts in dest_time_base
int64_t pts1; ///< current frame pts in dest_time_base
int64_t delta; ///< pts1 to pts0 delta
double score; ///< scene change score (f0 to f1)
int flush; ///< 1 if the filter is being flushed
int64_t start_pts; ///< pts of the first output frame
int64_t n; ///< output frame counter
} FrameRateContext;
#define OFFSET(x) offsetof(FrameRateContext, x)
......@@ -102,27 +95,6 @@ static const AVOption framerate_options[] = {
AVFILTER_DEFINE_CLASS(framerate);
static void next_source(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
int i;
ff_dlog(ctx, "next_source()\n");
if (s->srce[s->last] && s->srce[s->last] != s->srce[s->last-1]) {
ff_dlog(ctx, "next_source() unlink %d\n", s->last);
av_frame_free(&s->srce[s->last]);
}
for (i = s->last; i > s->frst; i--) {
ff_dlog(ctx, "next_source() copy %d to %d\n", i - 1, i);
s->srce[i] = s->srce[i - 1];
s->srce_score[i] = s->srce_score[i - 1];
}
ff_dlog(ctx, "next_source() make %d null\n", s->frst);
s->srce[s->frst] = NULL;
s->srce_score[s->frst] = -1.0;
}
static av_always_inline int64_t sad_8x8_16(const uint16_t *src1, ptrdiff_t stride1,
const uint16_t *src2, ptrdiff_t stride2)
{
......@@ -307,28 +279,25 @@ static int filter_slice16(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
return 0;
}
static int blend_frames(AVFilterContext *ctx, int interpolate,
int src1, int src2)
static int blend_frames(AVFilterContext *ctx, int interpolate)
{
FrameRateContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
double interpolate_scene_score = 0;
if ((s->flags & FRAMERATE_FLAG_SCD) && s->srce[src1] && s->srce[src2]) {
int i1 = src1 < src2 ? src1 : src2;
int i2 = src1 < src2 ? src2 : src1;
if (i2 == i1 + 1 && s->srce_score[i1] >= 0.0)
interpolate_scene_score = s->srce_score[i1];
if ((s->flags & FRAMERATE_FLAG_SCD)) {
if (s->score >= 0.0)
interpolate_scene_score = s->score;
else
interpolate_scene_score = s->srce_score[i1] = get_scene_score(ctx, s->srce[i1], s->srce[i2]);
interpolate_scene_score = s->score = get_scene_score(ctx, s->f0, s->f1);
ff_dlog(ctx, "blend_frames() interpolate scene score:%f\n", interpolate_scene_score);
}
// decide if the shot-change detection allows us to blend two frames
if (interpolate_scene_score < s->scene_score && s->srce[src2]) {
if (interpolate_scene_score < s->scene_score) {
ThreadData td;
td.copy_src1 = s->srce[src1];
td.copy_src2 = s->srce[src2];
td.src2_factor = FFABS(interpolate);
td.copy_src1 = s->f0;
td.copy_src2 = s->f1;
td.src2_factor = interpolate;
td.src1_factor = s->max - td.src2_factor;
// get work-space for output frame
......@@ -336,7 +305,7 @@ static int blend_frames(AVFilterContext *ctx, int interpolate,
if (!s->work)
return AVERROR(ENOMEM);
av_frame_copy_props(s->work, s->srce[s->crnt]);
av_frame_copy_props(s->work, s->f0);
ff_dlog(ctx, "blend_frames() INTERPOLATE to create work frame\n");
ctx->internal->execute(ctx, s->bitdepth == 8 ? filter_slice8 : filter_slice16, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
......@@ -345,198 +314,65 @@ static int blend_frames(AVFilterContext *ctx, int interpolate,
return 0;
}
static int process_work_frame(AVFilterContext *ctx, int stop)
static int process_work_frame(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
int64_t work_next_pts;
int64_t work_pts;
int interpolate;
int src1, src2;
ff_dlog(ctx, "process_work_frame()\n");
ff_dlog(ctx, "process_work_frame() pending_input_frames %d\n", s->pending_srce_frames);
if (s->srce[s->prev]) ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
if (s->srce[s->crnt]) ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
if (s->srce[s->next]) ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
int ret;
if (!s->srce[s->crnt]) {
// the filter cannot do anything
ff_dlog(ctx, "process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
next_source(ctx);
if (!s->f1)
return 0;
}
work_next_pts = s->pts + s->average_dest_pts_delta;
ff_dlog(ctx, "process_work_frame() work crnt pts:%"PRId64"\n", s->pts);
ff_dlog(ctx, "process_work_frame() work next pts:%"PRId64"\n", work_next_pts);
if (s->srce[s->prev])
ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64" at dest time base:%u/%u\n",
s->srce_pts_dest[s->prev], s->dest_time_base.num, s->dest_time_base.den);
if (s->srce[s->crnt])
ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64" at dest time base:%u/%u\n",
s->srce_pts_dest[s->crnt], s->dest_time_base.num, s->dest_time_base.den);
if (s->srce[s->next])
ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64" at dest time base:%u/%u\n",
s->srce_pts_dest[s->next], s->dest_time_base.num, s->dest_time_base.den);
av_assert0(s->srce[s->next]);
// should filter be skipping input frame (output frame rate is lower than input frame rate)
if (!s->flush && s->pts >= s->srce_pts_dest[s->next]) {
ff_dlog(ctx, "process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
next_source(ctx);
s->pending_srce_frames--;
if (!s->f0 && !s->flush)
return 0;
}
// calculate interpolation
interpolate = av_rescale(s->pts - s->srce_pts_dest[s->crnt], s->max, s->average_srce_pts_dest_delta);
ff_dlog(ctx, "process_work_frame() interpolate:%d/%d\n", interpolate, s->max);
src1 = s->crnt;
if (interpolate > s->interp_end) {
ff_dlog(ctx, "process_work_frame() source is:NEXT\n");
src1 = s->next;
}
if (s->srce[s->prev] && interpolate < -s->interp_end) {
ff_dlog(ctx, "process_work_frame() source is:PREV\n");
src1 = s->prev;
}
work_pts = s->start_pts + av_rescale_q(s->n, av_inv_q(s->dest_frame_rate), s->dest_time_base);
if (work_pts >= s->pts1 && !s->flush)
return 0;
// decide whether to blend two frames
if ((interpolate >= s->interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->interp_end)) {
if (interpolate > 0) {
ff_dlog(ctx, "process_work_frame() interpolate source is:NEXT\n");
src2 = s->next;
if (!s->f0) {
s->work = av_frame_clone(s->f1);
} else {
if (work_pts >= s->pts1 + s->delta && s->flush)
return 0;
interpolate = av_rescale(work_pts - s->pts0, s->max, s->delta);
ff_dlog(ctx, "process_work_frame() interpolate:%d/%d\n", interpolate, s->max);
if (interpolate > s->interp_end) {
s->work = av_frame_clone(s->f1);
} else if (interpolate < s->interp_start) {
s->work = av_frame_clone(s->f0);
} else {
ff_dlog(ctx, "process_work_frame() interpolate source is:PREV\n");
src2 = s->prev;
ret = blend_frames(ctx, interpolate);
if (ret < 0)
return ret;
if (ret == 0)
s->work = av_frame_clone(interpolate > (s->max >> 1) ? s->f1 : s->f0);
}
if (blend_frames(ctx, interpolate, src1, src2))
goto copy_done;
else
ff_dlog(ctx, "process_work_frame() CUT - DON'T INTERPOLATE\n");
}
ff_dlog(ctx, "process_work_frame() COPY to the work frame\n");
// copy the frame we decided is our base source
s->work = av_frame_clone(s->srce[src1]);
if (!s->work)
return AVERROR(ENOMEM);
copy_done:
s->work->pts = s->pts;
// should filter be re-using input frame (output frame rate is higher than input frame rate)
if (!s->flush && (work_next_pts + s->average_dest_pts_delta) < (s->srce_pts_dest[s->crnt] + s->average_srce_pts_dest_delta)) {
ff_dlog(ctx, "process_work_frame() REPEAT FRAME\n");
} else {
ff_dlog(ctx, "process_work_frame() CONSUME FRAME, move to next frame\n");
s->pending_srce_frames--;
next_source(ctx);
}
ff_dlog(ctx, "process_work_frame() output a frame\n");
s->dest_frame_num++;
if (stop)
s->pending_end_frame = 0;
s->last_dest_frame_pts = s->work->pts;
s->work->pts = work_pts;
s->n++;
return 1;
}
static void set_srce_frame_dest_pts(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
ff_dlog(ctx, "set_srce_frame_output_pts()\n");
// scale the input pts from the timebase difference between input and output
if (s->srce[s->prev])
s->srce_pts_dest[s->prev] = av_rescale_q(s->srce[s->prev]->pts, s->srce_time_base, s->dest_time_base);
if (s->srce[s->crnt])
s->srce_pts_dest[s->crnt] = av_rescale_q(s->srce[s->crnt]->pts, s->srce_time_base, s->dest_time_base);
if (s->srce[s->next])
s->srce_pts_dest[s->next] = av_rescale_q(s->srce[s->next]->pts, s->srce_time_base, s->dest_time_base);
}
static void set_work_frame_pts(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
int64_t pts, average_srce_pts_delta = 0;
ff_dlog(ctx, "set_work_frame_pts()\n");
av_assert0(s->srce[s->next]);
av_assert0(s->srce[s->crnt]);
ff_dlog(ctx, "set_work_frame_pts() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
ff_dlog(ctx, "set_work_frame_pts() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
if (s->srce[s->prev])
ff_dlog(ctx, "set_work_frame_pts() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
average_srce_pts_delta = s->average_srce_pts_dest_delta;
ff_dlog(ctx, "set_work_frame_pts() initial average srce pts:%"PRId64"\n", average_srce_pts_delta);
set_srce_frame_dest_pts(ctx);
// calculate the PTS delta
if ((pts = (s->srce_pts_dest[s->next] - s->srce_pts_dest[s->crnt]))) {
average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
} else if (s->srce[s->prev] && (pts = (s->srce_pts_dest[s->crnt] - s->srce_pts_dest[s->prev]))) {
average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
}
s->average_srce_pts_dest_delta = average_srce_pts_delta;
ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64"\n", average_srce_pts_delta);
ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64" at dest time base:%u/%u\n",
s->average_srce_pts_dest_delta, s->dest_time_base.num, s->dest_time_base.den);
if (ctx->inputs[0] && !s->average_dest_pts_delta) {
int64_t d = av_q2d(av_inv_q(av_mul_q(s->dest_time_base, s->dest_frame_rate)));
s->average_dest_pts_delta = d;
ff_dlog(ctx, "set_work_frame_pts() average dest pts delta:%"PRId64"\n", s->average_dest_pts_delta);
}
if (!s->dest_frame_num) {
s->pts = s->last_dest_frame_pts = s->srce_pts_dest[s->crnt];
} else {
s->pts = s->last_dest_frame_pts + s->average_dest_pts_delta;
}
ff_dlog(ctx, "set_work_frame_pts() calculated pts:%"PRId64" at dest time base:%u/%u\n",
s->pts, s->dest_time_base.num, s->dest_time_base.den);
}
static av_cold int init(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
int i;
s->dest_frame_num = 0;
s->crnt = (N_SRCE)>>1;
s->last = N_SRCE - 1;
s->next = s->crnt - 1;
s->prev = s->crnt + 1;
for (i = 0; i < N_SRCE; i++)
s->srce_score[i] = -1.0;
s->start_pts = AV_NOPTS_VALUE;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
FrameRateContext *s = ctx->priv;
int i;
for (i = s->frst; i < s->last; i++) {
if (s->srce[i] && (s->srce[i] != s->srce[i + 1]))
av_frame_free(&s->srce[i]);
}
av_frame_free(&s->srce[s->last]);
av_frame_free(&s->f0);
av_frame_free(&s->f1);
}
static int query_formats(AVFilterContext *ctx)
......@@ -593,28 +429,48 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
int ret;
AVFilterContext *ctx = inlink->dst;
FrameRateContext *s = ctx->priv;
// we have one new frame
s->pending_srce_frames++;
int64_t pts;
if (inpicref->interlaced_frame)
av_log(ctx, AV_LOG_WARNING, "Interlaced frame found - the output will not be correct.\n");
// store the pointer to the new frame
av_frame_free(&s->srce[s->frst]);
s->srce[s->frst] = inpicref;
if (inpicref->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "Ignoring frame without PTS.\n");
return 0;
}
if (!s->pending_end_frame && s->srce[s->crnt]) {
set_work_frame_pts(ctx);
s->pending_end_frame = 1;
} else {
set_srce_frame_dest_pts(ctx);
pts = av_rescale_q(inpicref->pts, s->srce_time_base, s->dest_time_base);
if (s->f1 && pts == s->pts1) {
av_log(ctx, AV_LOG_WARNING, "Ignoring frame with same PTS.\n");
return 0;
}
ret = process_work_frame(ctx, 1);
if (ret < 0)
return ret;
return ret ? ff_filter_frame(ctx->outputs[0], s->work) : 0;
av_frame_free(&s->f0);
s->f0 = s->f1;
s->pts0 = s->pts1;
s->f1 = inpicref;
s->pts1 = pts;
s->delta = s->pts1 - s->pts0;
s->score = -1.0;
if (s->delta < 0) {
av_log(ctx, AV_LOG_WARNING, "PTS discontinuity.\n");
s->start_pts = s->pts1;
s->n = 0;
av_frame_free(&s->f0);
}
if (s->start_pts == AV_NOPTS_VALUE)
s->start_pts = s->pts1;
do {
ret = process_work_frame(ctx);
if (ret <= 0)
return ret;
ret = ff_filter_frame(ctx->outputs[0], s->work);
} while (ret >= 0);
return ret;
}
static int config_output(AVFilterLink *outlink)
......@@ -666,50 +522,21 @@ static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FrameRateContext *s = ctx->priv;
int ret, i;
int ret;
ff_dlog(ctx, "request_frame()\n");
// if there is no "next" frame AND we are not in flush then get one from our input filter
if (!s->srce[s->frst] && !s->flush)
goto request;
ff_dlog(ctx, "request_frame() REPEAT or FLUSH\n");
if (s->pending_srce_frames <= 0) {
ff_dlog(ctx, "request_frame() nothing else to do, return:EOF\n");
return AVERROR_EOF;
}
// otherwise, make brand-new frame and pass to our output filter
ff_dlog(ctx, "request_frame() FLUSH\n");
// back fill at end of file when source has no more frames
for (i = s->last; i > s->frst; i--) {
if (!s->srce[i - 1] && s->srce[i]) {
ff_dlog(ctx, "request_frame() copy:%d to:%d\n", i, i - 1);
s->srce[i - 1] = s->srce[i];
}
}
set_work_frame_pts(ctx);
ret = process_work_frame(ctx, 0);
if (ret < 0)
return ret;
if (ret)
return ff_filter_frame(ctx->outputs[0], s->work);
request:
ff_dlog(ctx, "request_frame() call source's request_frame()\n");
ret = ff_request_frame(ctx->inputs[0]);
if (ret < 0 && (ret != AVERROR_EOF)) {
ff_dlog(ctx, "request_frame() source's request_frame() returned error:%d\n", ret);
return ret;
} else if (ret == AVERROR_EOF) {
if (ret == AVERROR_EOF && s->f1 && !s->flush) {
s->flush = 1;
ret = process_work_frame(ctx);
if (ret < 0)
return ret;
ret = ret ? ff_filter_frame(ctx->outputs[0], s->work) : AVERROR_EOF;
}
ff_dlog(ctx, "request_frame() source's request_frame() returned:%d\n", ret);
return 0;
return ret;
}
static const AVFilterPad framerate_inputs[] = {
......
......@@ -250,6 +250,7 @@ fail_kernel_arg:
kernel_arg, cle);
err = AVERROR(EIO);
fail:
av_frame_free(&output);
return err;
}
......
......@@ -116,12 +116,14 @@ static int program_opencl_run(AVFilterContext *avctx)
if (cle != CL_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Failed to set kernel "
"destination image argument: %d.\n", cle);
err = AVERROR_UNKNOWN;
goto fail;
}
cle = clSetKernelArg(ctx->kernel, 1, sizeof(cl_uint), &ctx->index);
if (cle != CL_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Failed to set kernel "
"index argument: %d.\n", cle);
err = AVERROR_UNKNOWN;
goto fail;
}
......@@ -135,6 +137,7 @@ static int program_opencl_run(AVFilterContext *avctx)
if (cle != CL_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Failed to set kernel "
"source image argument %d: %d.\n", input, cle);
err = AVERROR_UNKNOWN;
goto fail;
}
}
......
......@@ -39,6 +39,20 @@ SECTION .text
pcmpeq%1 m6, m6
test hq, mmsize
je .loop
;process 1 * mmsize
movu m0, [mrefq+hq]
pavg%1 m0, [prefq+hq]
pxor m0, m6
pxor m2, m6, [srcq+hq]
pavg%1 m0, m2
pxor m0, m6
mova [dstq+hq], m0
add hq, mmsize
jge .end
.loop:
movu m0, [mrefq+hq]
movu m1, [mrefq+hq+mmsize]
......@@ -57,7 +71,9 @@ SECTION .text
add hq, 2*mmsize
jl .loop
REP_RET
.end:
REP_RET
%endmacro
%macro LOWPASS_LINE 0
......@@ -201,5 +217,10 @@ LOWPASS_LINE
INIT_XMM avx
LOWPASS_LINE
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
LOWPASS_LINE
%endif
INIT_XMM sse2
LOWPASS_LINE_COMPLEX
......@@ -32,6 +32,9 @@ void ff_lowpass_line_sse2(uint8_t *dstp, ptrdiff_t linesize,
void ff_lowpass_line_avx (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_avx2 (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_16_sse2(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
......@@ -39,6 +42,9 @@ void ff_lowpass_line_16_sse2(uint8_t *dstp, ptrdiff_t linesize,
void ff_lowpass_line_16_avx (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_16_avx2 (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_complex_sse2(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
......@@ -62,6 +68,9 @@ av_cold void ff_interlace_init_x86(InterlaceContext *s, int depth)
if (EXTERNAL_AVX(cpu_flags))
if (s->lowpass == VLPF_LIN)
s->lowpass_line = ff_lowpass_line_16_avx;
if (EXTERNAL_AVX2_FAST(cpu_flags))
if (s->lowpass == VLPF_LIN)
s->lowpass_line = ff_lowpass_line_16_avx2;
} else {
if (EXTERNAL_SSE2(cpu_flags)) {
if (s->lowpass == VLPF_LIN)
......@@ -72,5 +81,8 @@ av_cold void ff_interlace_init_x86(InterlaceContext *s, int depth)
if (EXTERNAL_AVX(cpu_flags))
if (s->lowpass == VLPF_LIN)
s->lowpass_line = ff_lowpass_line_avx;
if (EXTERNAL_AVX2_FAST(cpu_flags))
if (s->lowpass == VLPF_LIN)
s->lowpass_line = ff_lowpass_line_avx2;
}
}
......@@ -33,6 +33,9 @@ void ff_lowpass_line_sse2(uint8_t *dstp, ptrdiff_t linesize,
void ff_lowpass_line_avx (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_avx2 (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_16_sse2(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
......@@ -40,6 +43,9 @@ void ff_lowpass_line_16_sse2(uint8_t *dstp, ptrdiff_t linesize,
void ff_lowpass_line_16_avx (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_16_avx2 (uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
ptrdiff_t pref, int clip_max);
void ff_lowpass_line_complex_sse2(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp, ptrdiff_t mref,
......@@ -63,6 +69,11 @@ av_cold void ff_tinterlace_init_x86(TInterlaceContext *s)
if (EXTERNAL_AVX(cpu_flags))
if (!(s->flags & TINTERLACE_FLAG_CVLPF))
s->lowpass_line = ff_lowpass_line_16_avx;
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
if (!(s->flags & TINTERLACE_FLAG_CVLPF)) {
s->lowpass_line = ff_lowpass_line_16_avx2;
}
}
} else {
if (EXTERNAL_SSE2(cpu_flags)) {
if (!(s->flags & TINTERLACE_FLAG_CVLPF))
......@@ -73,5 +84,10 @@ av_cold void ff_tinterlace_init_x86(TInterlaceContext *s)
if (EXTERNAL_AVX(cpu_flags))
if (!(s->flags & TINTERLACE_FLAG_CVLPF))
s->lowpass_line = ff_lowpass_line_avx;
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
if (!(s->flags & TINTERLACE_FLAG_CVLPF)) {
s->lowpass_line = ff_lowpass_line_avx2;
}
}
}
}
......@@ -81,11 +81,10 @@ static void get_meta(AVFormatContext *s, const char *key, int size)
av_free(str);
return;
}
size += (size&1)-res;
size -= res;
str[res] = 0;
av_dict_set(&s->metadata, key, str, AV_DICT_DONT_STRDUP_VAL);
}else
size+= size&1;
}
avio_skip(s->pb, size);
}
......
......@@ -824,6 +824,7 @@ static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies)
}
}
}
av_dict_free(&new_params);
// duplicate the cookie name (dict will dupe the value)
if (!(eql = strchr(p, '='))) return AVERROR(EINVAL);
......
......@@ -21,6 +21,14 @@
#include <libopenmpt/libopenmpt.h>
#include <libopenmpt/libopenmpt_stream_callbacks_file.h>
#include <libopenmpt/libopenmpt_version.h>
/* Shims to support libopenmpt < 0.3.0 (as documented by libopenmpt) */
#if !defined(OPENMPT_API_VERSION_MAKE)
#define OPENMPT_API_VERSION_MAKE(major, minor, patch) (((major)<<24)|((minor)<<16)|((patch)<<0))
#endif
#if !defined(OPENMPT_API_VERSION_AT_LEAST)
#define OPENMPT_API_VERSION_AT_LEAST(major, minor, patch) (OPENMPT_API_VERSION >= OPENMPT_API_VERSION_MAKE((major), (minor), (patch)))
#endif
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
......@@ -72,13 +80,17 @@ static int read_header_openmpt(AVFormatContext *s)
{
AVStream *st;
OpenMPTContext *openmpt = s->priv_data;
int64_t size = avio_size(s->pb);
if (size <= 0)
return AVERROR_INVALIDDATA;
char *buf = av_malloc(size);
int64_t size;
char *buf;
#if OPENMPT_API_VERSION_AT_LEAST(0,3,0)
int error;
#endif
int ret;
size = avio_size(s->pb);
if (size <= 0)
return AVERROR_INVALIDDATA;
buf = av_malloc(size);
if (!buf)
return AVERROR(ENOMEM);
size = avio_read(s->pb, buf, size);
......@@ -88,10 +100,24 @@ static int read_header_openmpt(AVFormatContext *s)
return size;
}
#if OPENMPT_API_VERSION_AT_LEAST(0,3,0)
error = OPENMPT_ERROR_OK;
openmpt->module = openmpt_module_create_from_memory2(buf, size, openmpt_logfunc, s, NULL, NULL, &error, NULL, NULL);
av_freep(&buf);
if (!openmpt->module) {
if (error == OPENMPT_ERROR_OUT_OF_MEMORY)
return AVERROR(ENOMEM);
else if (error >= OPENMPT_ERROR_GENERAL)
return AVERROR_INVALIDDATA;
else
return AVERROR_UNKNOWN;
}
#else
openmpt->module = openmpt_module_create_from_memory(buf, size, openmpt_logfunc, s, NULL);
av_freep(&buf);
if (!openmpt->module)
return AVERROR_INVALIDDATA;
#endif
openmpt->channels = av_get_channel_layout_nb_channels(openmpt->layout);
......
......@@ -24,7 +24,6 @@
#include <stdlib.h>
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
......
......@@ -822,6 +822,7 @@ static int w64_read_header(AVFormatContext *s)
samples = avio_rl64(pb);
if (samples > 0)
st->duration = samples;
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 32);
} else if (!memcmp(guid, ff_w64_guid_data, 16)) {
wav->data_end = avio_tell(pb) + size - 24;
......
......@@ -103,7 +103,7 @@
#define AVRESAMPLE_MAX_CHANNELS 32
typedef attribute_deprecated struct AVAudioResampleContext AVAudioResampleContext;
typedef struct AVAudioResampleContext AVAudioResampleContext;
/**
* @deprecated use libswresample
......
/**
/*
* Copyright (c) 2016 Neil Birkbeck <neil.birkbeck@gmail.com>
*
* This file is part of FFmpeg.
......
......@@ -154,7 +154,7 @@ fate-aac-aref-encode: CMD = enc_dec_pcm adts wav s16le $(REF) -c:a aac -aac_is 0
fate-aac-aref-encode: CMP = stddev
fate-aac-aref-encode: REF = ./tests/data/asynth-44100-2.wav
fate-aac-aref-encode: CMP_SHIFT = -4096
fate-aac-aref-encode: CMP_TARGET = 669
fate-aac-aref-encode: CMP_TARGET = 596
fate-aac-aref-encode: SIZE_TOLERANCE = 2464
fate-aac-aref-encode: FUZZ = 89
......@@ -163,7 +163,7 @@ fate-aac-ln-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-ref
fate-aac-ln-encode: CMP = stddev
fate-aac-ln-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-ln-encode: CMP_SHIFT = -4096
fate-aac-ln-encode: CMP_TARGET = 61
fate-aac-ln-encode: CMP_TARGET = 72
fate-aac-ln-encode: SIZE_TOLERANCE = 3560
fate-aac-ln-encode: FUZZ = 30
......@@ -172,7 +172,7 @@ fate-aac-ln-encode-128k: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audi
fate-aac-ln-encode-128k: CMP = stddev
fate-aac-ln-encode-128k: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-ln-encode-128k: CMP_SHIFT = -4096
fate-aac-ln-encode-128k: CMP_TARGET = 800
fate-aac-ln-encode-128k: CMP_TARGET = 622
fate-aac-ln-encode-128k: SIZE_TOLERANCE = 3560
fate-aac-ln-encode-128k: FUZZ = 5
......@@ -181,7 +181,7 @@ fate-aac-pns-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-re
fate-aac-pns-encode: CMP = stddev
fate-aac-pns-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-pns-encode: CMP_SHIFT = -4096
fate-aac-pns-encode: CMP_TARGET = 616
fate-aac-pns-encode: CMP_TARGET = 655
fate-aac-pns-encode: SIZE_TOLERANCE = 3560
fate-aac-pns-encode: FUZZ = 74
......@@ -190,7 +190,7 @@ fate-aac-tns-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-re
fate-aac-tns-encode: CMP = stddev
fate-aac-tns-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-tns-encode: CMP_SHIFT = -4096
fate-aac-tns-encode: CMP_TARGET = 817
fate-aac-tns-encode: CMP_TARGET = 637
fate-aac-tns-encode: FUZZ = 7
fate-aac-tns-encode: SIZE_TOLERANCE = 3560
......@@ -199,7 +199,7 @@ fate-aac-is-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-ref
fate-aac-is-encode: CMP = stddev
fate-aac-is-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-is-encode: CMP_SHIFT = -4096
fate-aac-is-encode: CMP_TARGET = 615
fate-aac-is-encode: CMP_TARGET = 514
fate-aac-is-encode: SIZE_TOLERANCE = 3560
fate-aac-is-encode: FUZZ = 10
......@@ -208,26 +208,17 @@ fate-aac-ms-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-ref
fate-aac-ms-encode: CMP = stddev
fate-aac-ms-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-ms-encode: CMP_SHIFT = -4096
fate-aac-ms-encode: CMP_TARGET = 675
fate-aac-ms-encode: CMP_TARGET = 558
fate-aac-ms-encode: SIZE_TOLERANCE = 3560
fate-aac-ms-encode: FUZZ = 15
FATE_AAC_ENCODE += fate-aac-ltp-encode
fate-aac-ltp-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav -strict -2 -c:a aac -profile:a aac_ltp -aac_pns 0 -aac_is 0 -aac_ms 0 -aac_tns 0 -b:a 36k -fflags +bitexact -flags +bitexact
fate-aac-ltp-encode: CMP = stddev
fate-aac-ltp-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-ltp-encode: CMP_SHIFT = -4096
fate-aac-ltp-encode: CMP_TARGET = 1270
fate-aac-ltp-encode: SIZE_TOLERANCE = 3560
fate-aac-ltp-encode: FUZZ = 17
#Ticket1784
FATE_AAC_ENCODE += fate-aac-yoraw-encode
fate-aac-yoraw-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-reference/yo.raw-short.wav -c:a aac -fflags +bitexact -flags +bitexact
fate-aac-yoraw-encode: CMP = stddev
fate-aac-yoraw-encode: REF = $(SAMPLES)/audio-reference/yo.raw-short.wav
fate-aac-yoraw-encode: CMP_SHIFT = -12288
fate-aac-yoraw-encode: CMP_TARGET = 259
fate-aac-yoraw-encode: CMP_TARGET = 226
fate-aac-yoraw-encode: SIZE_TOLERANCE = 3560
fate-aac-yoraw-encode: FUZZ = 17
......@@ -237,7 +228,7 @@ fate-aac-pred-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-r
fate-aac-pred-encode: CMP = stddev
fate-aac-pred-encode: REF = $(SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
fate-aac-pred-encode: CMP_SHIFT = -4096
fate-aac-pred-encode: CMP_TARGET = 841
fate-aac-pred-encode: CMP_TARGET = 662
fate-aac-pred-encode: FUZZ = 12
fate-aac-pred-encode: SIZE_TOLERANCE = 3560
......
......@@ -104,14 +104,14 @@ fate-acodec-dca: tests/data/asynth-44100-2.wav
fate-acodec-dca: SRC = tests/data/asynth-44100-2.wav
fate-acodec-dca: CMD = md5 -i $(TARGET_PATH)/$(SRC) -c:a dca -strict -2 -f dts -flags +bitexact
fate-acodec-dca: CMP = oneline
fate-acodec-dca: REF = 7cd79a3717943a06b217f1130223a86f
fate-acodec-dca: REF = 2aa580ac67820fce4f581b96ebb34acc
FATE_ACODEC-$(call ENCDEC, DCA, WAV) += fate-acodec-dca2
fate-acodec-dca2: CMD = enc_dec_pcm dts wav s16le $(SRC) -c:a dca -strict -2 -flags +bitexact
fate-acodec-dca2: REF = $(SRC)
fate-acodec-dca2: CMP = stddev
fate-acodec-dca2: CMP_SHIFT = -2048
fate-acodec-dca2: CMP_TARGET = 527
fate-acodec-dca2: CMP_TARGET = 535
fate-acodec-dca2: SIZE_TOLERANCE = 1632
FATE_ACODEC-$(call ENCDEC, FLAC, FLAC) += fate-acodec-flac fate-acodec-flac-exact-rice
......
......@@ -62,3 +62,4 @@
0, 56, 56, 1, 307200, 0x8cf55128
0, 57, 57, 1, 307200, 0x4e740b42
0, 58, 58, 1, 307200, 0x8e7e705c
0, 59, 59, 1, 307200, 0xe73f29ef
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册