aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-04-10 22:06:53 +0200
committerMichael Niedermayer <michaelni@gmx.at>2012-04-10 22:53:25 +0200
commite387c9d5dd56e1f29470ee933027ee3d92f9cfd6 (patch)
treedaa5876aa5b6515b3c92b6ee45e552852345e35b
parentb1ef4dc406e8a0bd9acea40d880aa4e74412075b (diff)
parent2130bd8f5b6504ea14cd41e33f5d4f431eb724f3 (diff)
downloadffmpeg-e387c9d5dd56e1f29470ee933027ee3d92f9cfd6.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: (22 commits) rv40dsp x86: use only one register, for both increment and loop counter rv40dsp: implement prescaled versions for biweight. avconv: use default channel layouts when they are unknown avconv: parse channel layout string nutdec: K&R formatting cosmetics vda: Signal 4 byte NAL headers to the decoder regardless of what's in the extradata mem: Consistently return NULL for av_malloc(0) vf_overlay: implement poll_frame() vf_scale: support named constants for sws flags. lavc doxy: add all installed headers to doxy groups. lavc doxy: add avfft to the main lavc group. lavc doxy: add remaining avcodec.h functions to a misc doxygen group. lavc doxy: add AVPicture functions to a doxy group. lavc doxy: add resampling functions to a doxy group. lavc doxy: replace \ with / lavc doxy: add encoding functions to a doxy group. lavc doxy: add decoding functions to a doxy group. lavc doxy: fix formatting of AV_PKT_DATA_{PARAM_CHANGE,H263_MB_INFO} lavc doxy: add AVPacket-related stuff to a separate doxy group. lavc doxy: add core functions/definitions to a doxy group. ... Conflicts: ffmpeg.c libavcodec/avcodec.h libavcodec/vda.c libavcodec/x86/rv40dsp.asm libavfilter/vf_scale.c libavformat/nutdec.c libavutil/mem.c tests/ref/acodec/pcm_s24daud Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--ffmpeg.c96
-rw-r--r--libavcodec/arm/rv40dsp_init_neon.c4
-rw-r--r--libavcodec/avcodec.h1647
-rw-r--r--libavcodec/avfft.h17
-rw-r--r--libavcodec/dxva2.h17
-rw-r--r--libavcodec/ppc/gmc_altivec.c2
-rw-r--r--libavcodec/ppc/int_altivec.c3
-rw-r--r--libavcodec/rv34.c58
-rw-r--r--libavcodec/rv34.h2
-rw-r--r--libavcodec/rv34dsp.h7
-rw-r--r--libavcodec/rv40dsp.c20
-rw-r--r--libavcodec/vaapi.h10
-rw-r--r--libavcodec/vda.c35
-rw-r--r--libavcodec/vda.h17
-rw-r--r--libavcodec/vdpau.h12
-rw-r--r--libavcodec/version.h6
-rw-r--r--libavcodec/x86/rv40dsp.asm107
-rw-r--r--libavcodec/x86/rv40dsp_init.c30
-rw-r--r--libavcodec/xvmc.h17
-rw-r--r--libavfilter/vf_overlay.c15
-rw-r--r--libavfilter/vf_scale.c11
-rw-r--r--libavformat/nutdec.c936
-rw-r--r--tests/ref/acodec/pcm_f32le2
-rw-r--r--tests/ref/acodec/pcm_f64le2
-rw-r--r--tests/ref/acodec/pcm_s24daud2
-rw-r--r--tests/ref/acodec/pcm_s24le2
-rw-r--r--tests/ref/acodec/pcm_s32le2
-rw-r--r--tests/ref/lavf/caf4
28 files changed, 1765 insertions, 1318 deletions
diff --git a/ffmpeg.c b/ffmpeg.c
index 0a2375d38a..66f96ccebd 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -1282,7 +1282,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
swr_set_compensation(ost->swr, comp, enc->sample_rate);
}
}
- } else
+ } else if (audio_sync_method == 0)
ost->sync_opts = lrintf(get_sync_ipts(ost, ist->pts) * enc->sample_rate) -
av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
@@ -2328,10 +2328,57 @@ static void print_sdp(OutputFile *output_files, int n)
av_freep(&avc);
}
+static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
+{
+ char layout_name[256];
+ AVCodecContext *enc = ost->st->codec;
+ AVCodecContext *dec = ist->st->codec;
+
+ if (!dec->channel_layout) {
+ if (enc->channel_layout && dec->channels == enc->channels) {
+ dec->channel_layout = enc->channel_layout;
+ } else {
+ dec->channel_layout = av_get_default_channel_layout(dec->channels);
+
+ if (!dec->channel_layout) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
+ "layout for Input Stream #%d.%d\n", ist->file_index,
+ ist->st->index);
+ exit_program(1);
+ }
+ }
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ dec->channels, dec->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
+ "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
+ }
+ if (!enc->channel_layout) {
+ if (dec->channels == enc->channels) {
+ enc->channel_layout = dec->channel_layout;
+ return;
+ } else {
+ enc->channel_layout = av_get_default_channel_layout(enc->channels);
+ }
+ if (!enc->channel_layout) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
+ "for Output Stream #%d.%d\n", ost->file_index,
+ ost->st->index);
+ exit_program(1);
+ }
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ enc->channels, enc->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
+ "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
+ }
+}
+
+
static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
char *error, int error_len)
{
InputStream *ist = &input_streams[ist_index];
+ int i;
+
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
if (!codec) {
@@ -2356,6 +2403,17 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
}
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ist->opts);
+
+ if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = &output_streams[i];
+ if (ost->source_index == ist_index) {
+ if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
+ get_default_channel_layouts(ost, ist);
+ break;
+ }
+ }
+ }
}
ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
@@ -4943,6 +5001,41 @@ static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
opt_cpuflags("cpuflags", argv[idx + 1]);
}
+static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
+{
+ char layout_str[32];
+ char *stream_str;
+ char *ac_str;
+ int ret, channels, ac_str_size;
+ uint64_t layout;
+
+ layout = av_get_channel_layout(arg);
+ if (!layout) {
+ av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
+ return AVERROR(EINVAL);
+ }
+ snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
+ ret = opt_default(opt, layout_str);
+ if (ret < 0)
+ return ret;
+
+ /* set 'ac' option based on channel layout */
+ channels = av_get_channel_layout_nb_channels(layout);
+ snprintf(layout_str, sizeof(layout_str), "%d", channels);
+ stream_str = strchr(opt, ':');
+ ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
+ ac_str = av_mallocz(ac_str_size);
+ if (!ac_str)
+ return AVERROR(ENOMEM);
+ av_strlcpy(ac_str, "ac", 3);
+ if (stream_str)
+ av_strlcat(ac_str, stream_str, ac_str_size);
+ ret = parse_option(o, ac_str, layout_str, options);
+ av_free(ac_str);
+
+ return ret;
+}
+
#define OFFSET(x) offsetof(OptionsContext, x)
static const OptionDef options[] = {
/* main options */
@@ -5051,6 +5144,7 @@ static const OptionDef options[] = {
{ "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
{ "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
{ "rmvol", HAS_ARG | OPT_AUDIO | OPT_FLOAT | OPT_SPEC, {.off = OFFSET(rematrix_volume)}, "rematrix volume (as factor)", "volume" },
+ { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
/* subtitle options */
{ "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c
index 650ef61878..2ce50a2073 100644
--- a/libavcodec/arm/rv40dsp_init_neon.c
+++ b/libavcodec/arm/rv40dsp_init_neon.c
@@ -128,8 +128,8 @@ void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon;
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon;
- c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon;
- c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon;
+ c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_16_neon;
+ c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_8_neon;
c->rv40_loop_filter_strength[0] = ff_rv40_h_loop_filter_strength_neon;
c->rv40_loop_filter_strength[1] = ff_rv40_v_loop_filter_strength_neon;
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 56fc15cad1..d905a019e0 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -68,6 +68,14 @@
*
*/
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
/**
* Identify the syntax and semantics of the bitstream.
@@ -438,6 +446,7 @@ enum CodecID {
#endif
/**
+ * @ingroup lavc_decoding
* Required number of additionally allocated bytes at the end of the input bitstream for decoding.
* This is mainly needed because some optimized bitstream readers read
* 32 or 64 bit at once and could read over the end.<br>
@@ -447,6 +456,7 @@ enum CodecID {
#define FF_INPUT_BUFFER_PADDING_SIZE 16
/**
+ * @ingroup lavc_encoding
* minimum encoding buffer size
* Used to avoid some checks during header writing.
*/
@@ -454,6 +464,7 @@ enum CodecID {
/**
+ * @ingroup lavc_encoding
* motion estimation type.
*/
enum Motion_Est_ID {
@@ -469,6 +480,9 @@ enum Motion_Est_ID {
ME_TESA, ///< transformed exhaustive search algorithm
};
+/**
+ * @ingroup lavc_decoding
+ */
enum AVDiscard{
/* We leave some space between them for extensions (drop some
* keyframes for intra-only or drop just some bidir frames). */
@@ -548,6 +562,9 @@ enum AVAudioServiceType {
AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
};
+/**
+ * @ingroup lavc_encoding
+ */
typedef struct RcOverride{
int start_frame;
int end_frame;
@@ -773,10 +790,52 @@ typedef struct AVPanScan{
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
enum AVPacketSideDataType {
AV_PKT_DATA_PALETTE,
AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
AV_PKT_DATA_H263_MB_INFO,
};
@@ -846,44 +905,15 @@ typedef struct AVPacket {
#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
-/**
- * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
- * u32le param_flags
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
- * s32le channel_count
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
- * u64le channel_layout
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
- * s32le sample_rate
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
- * s32le width
- * s32le height
- */
-
-/**
- * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
- * structures with info about macroblocks relevant to splitting the
- * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
- * That is, it does not necessarily contain info about all macroblocks,
- * as long as the distance between macroblocks in the info is smaller
- * than the target payload size.
- * Each MB info structure is 12 bytes, and is laid out as follows:
- * u32le bit offset from the start of the packet
- * u8 current quantizer at the start of the macroblock
- * u8 GOB number
- * u16le macroblock address within the GOB
- * u8 horizontal MV predictor
- * u8 vertical MV predictor
- * u8 horizontal MV predictor for block number 3
- * u8 vertical MV predictor for block number 3
- */
-
enum AVSideDataParamChangeFlags {
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
};
+/**
+ * @}
+ */
/**
* Audio Video Frame.
@@ -982,7 +1012,7 @@ typedef struct AVFrame {
uint8_t *base[AV_NUM_DATA_POINTERS];
/**
- * sample aspect ratio for the video frame, 0/1 if unknown\unspecified
+ * sample aspect ratio for the video frame, 0/1 if unknown/unspecified
* - encoding: unused
* - decoding: Read by user.
*/
@@ -3049,6 +3079,13 @@ typedef struct AVHWAccel {
} AVHWAccel;
/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
* four components are given, that's all.
* the last component is alpha
*/
@@ -3057,6 +3094,10 @@ typedef struct AVPicture {
int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
} AVPicture;
+/**
+ * @}
+ */
+
#define AVPALETTE_SIZE 1024
#define AVPALETTE_COUNT 256
@@ -3111,544 +3152,374 @@ typedef struct AVSubtitle {
int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
} AVSubtitle;
-/* packet functions */
-
-/**
- * @deprecated use NULL instead
- */
-attribute_deprecated void av_destruct_packet_nofree(AVPacket *pkt);
-
/**
- * Default packet destructor.
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
*/
-void av_destruct_packet(AVPacket *pkt);
+AVCodec *av_codec_next(AVCodec *c);
/**
- * Initialize optional fields of a packet with default values.
- *
- * @param pkt packet
+ * Return the LIBAVCODEC_VERSION_INT constant.
*/
-void av_init_packet(AVPacket *pkt);
+unsigned avcodec_version(void);
/**
- * Allocate the payload of a packet and initialize its fields with
- * default values.
- *
- * @param pkt packet
- * @param size wanted payload size
- * @return 0 if OK, AVERROR_xxx otherwise
+ * Return the libavcodec build-time configuration.
*/
-int av_new_packet(AVPacket *pkt, int size);
+const char *avcodec_configuration(void);
/**
- * Reduce packet size, correctly zeroing padding
- *
- * @param pkt packet
- * @param size new size
+ * Return the libavcodec license.
*/
-void av_shrink_packet(AVPacket *pkt, int size);
+const char *avcodec_license(void);
/**
- * Increase packet size, correctly zeroing padding
+ * Register the codec codec and initialize libavcodec.
*
- * @param pkt packet
- * @param grow_by number of bytes by which to increase the size of the packet
- */
-int av_grow_packet(AVPacket *pkt, int grow_by);
-
-/**
- * @warning This is a hack - the packet memory allocation stuff is broken. The
- * packet is allocated if it was not really allocated.
- */
-int av_dup_packet(AVPacket *pkt);
-
-/**
- * Free a packet.
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
*
- * @param pkt packet to free
+ * @see avcodec_register_all()
*/
-void av_free_packet(AVPacket *pkt);
+void avcodec_register(AVCodec *codec);
/**
- * Allocate new information of a packet.
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
*
- * @param pkt packet
- * @param type side information type
- * @param size side information size
- * @return pointer to fresh allocated data or NULL otherwise
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
*/
-uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size);
+void avcodec_register_all(void);
-/**
- * Shrink the already allocated side data buffer
- *
- * @param pkt packet
- * @param type side information type
- * @param size new side information size
- * @return 0 on success, < 0 on failure
- */
-int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size);
+#if FF_API_ALLOC_CONTEXT
/**
- * Get side information from packet.
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by simply calling av_free().
*
- * @param pkt packet
- * @param type desired side information type
- * @param size pointer for side information size to store (optional)
- * @return pointer to data if present or NULL otherwise
- */
-uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int *size);
-
-int av_packet_merge_side_data(AVPacket *pkt);
-
-int av_packet_split_side_data(AVPacket *pkt);
-
-
-/* resample.c */
-
-struct ReSampleContext;
-struct AVResampleContext;
-
-typedef struct ReSampleContext ReSampleContext;
-
-/**
- * Initialize audio resampling context.
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
*
- * @param output_channels number of output channels
- * @param input_channels number of input channels
- * @param output_rate output sample rate
- * @param input_rate input sample rate
- * @param sample_fmt_out requested output sample format
- * @param sample_fmt_in input sample format
- * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
- * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
- * @param linear if 1 then the used FIR filter will be linearly interpolated
- between the 2 closest, if 0 the closest will be used
- * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
- * @return allocated ReSampleContext, NULL if error occurred
+ * @deprecated use avcodec_alloc_context3()
*/
-ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
- int output_rate, int input_rate,
- enum AVSampleFormat sample_fmt_out,
- enum AVSampleFormat sample_fmt_in,
- int filter_length, int log2_phase_count,
- int linear, double cutoff);
+attribute_deprecated
+AVCodecContext *avcodec_alloc_context(void);
-int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
+ * we WILL change its arguments and name a few times! */
+attribute_deprecated
+AVCodecContext *avcodec_alloc_context2(enum AVMediaType);
/**
- * Free resample context.
+ * Set the fields of the given AVCodecContext to default values.
*
- * @param s a non-NULL pointer to a resample context previously
- * created with av_audio_resample_init()
- */
-void audio_resample_close(ReSampleContext *s);
-
-
-/**
- * Initialize an audio resampler.
- * Note, if either rate is not an integer then simply scale both rates up so they are.
- * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
- * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
- * @param linear If 1 then the used FIR filter will be linearly interpolated
- between the 2 closest, if 0 the closest will be used
- * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
- */
-struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
-
-/**
- * Resample an array of samples using a previously configured context.
- * @param src an array of unconsumed samples
- * @param consumed the number of samples of src which have been consumed are returned here
- * @param src_size the number of unconsumed samples available
- * @param dst_size the amount of space in samples available in dst
- * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
- * @return the number of samples written in dst or -1 if an error occurred
+ * @param s The AVCodecContext of which the fields should be set to default values.
+ * @deprecated use avcodec_get_context_defaults3
*/
-int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+attribute_deprecated
+void avcodec_get_context_defaults(AVCodecContext *s);
+/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
+ * we WILL change its arguments and name a few times! */
+attribute_deprecated
+void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType);
+#endif
/**
- * Compensate samplerate/timestamp drift. The compensation is done by changing
- * the resampler parameters, so no audible clicks or similar distortions occur
- * @param compensation_distance distance in output samples over which the compensation should be performed
- * @param sample_delta number of output samples which should be output less
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by calling avcodec_close() on it followed
+ * by av_free().
*
- * example: av_resample_compensate(c, 10, 500)
- * here instead of 510 samples only 500 samples would be output
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
*
- * note, due to rounding the actual compensation might be slightly different,
- * especially if the compensation_distance is large and the in_rate used during init is small
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
*/
-void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
-void av_resample_close(struct AVResampleContext *c);
+AVCodecContext *avcodec_alloc_context3(AVCodec *codec);
/**
- * Allocate memory for a picture. Call avpicture_free() to free it.
- *
- * @see avpicture_fill()
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
*
- * @param picture the picture to be filled in
- * @param pix_fmt the format of the picture
- * @param width the width of the picture
- * @param height the height of the picture
- * @return zero if successful, a negative value if not
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
*/
-int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height);
+int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec);
/**
- * Free a picture previously allocated by avpicture_alloc().
- * The data buffer used by the AVPicture is freed, but the AVPicture structure
- * itself is not.
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
- * @param picture the AVPicture to be freed
+ * @see av_opt_find().
*/
-void avpicture_free(AVPicture *picture);
+const AVClass *avcodec_get_class(void);
/**
- * Fill in the AVPicture fields.
- * The fields of the given AVPicture are filled in by using the 'ptr' address
- * which points to the image data buffer. Depending on the specified picture
- * format, one or multiple image data pointers and line sizes will be set.
- * If a planar format is specified, several pointers will be set pointing to
- * the different picture planes and the line sizes of the different planes
- * will be stored in the lines_sizes array.
- * Call with ptr == NULL to get the required size for the ptr buffer.
- *
- * To allocate the buffer and fill in the AVPicture fields in one call,
- * use avpicture_alloc().
+ * Get the AVClass for AVFrame. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
- * @param picture AVPicture whose fields are to be filled in
- * @param ptr Buffer which will contain or contains the actual image data
- * @param pix_fmt The format in which the picture data is stored.
- * @param width the width of the image in pixels
- * @param height the height of the image in pixels
- * @return size of the image data in bytes
+ * @see av_opt_find().
*/
-int avpicture_fill(AVPicture *picture, uint8_t *ptr,
- enum PixelFormat pix_fmt, int width, int height);
+const AVClass *avcodec_get_frame_class(void);
/**
- * Copy pixel data from an AVPicture into a buffer.
- * The data is stored compactly, without any gaps for alignment or padding
- * which may be applied by avpicture_fill().
- *
- * @see avpicture_get_size()
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
*
- * @param[in] src AVPicture containing image data
- * @param[in] pix_fmt The format in which the picture data is stored.
- * @param[in] width the width of the image in pixels.
- * @param[in] height the height of the image in pixels.
- * @param[out] dest A buffer into which picture data will be copied.
- * @param[in] dest_size The size of 'dest'.
- * @return The number of bytes written to dest, or a negative value (error code) on error.
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
*/
-int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height,
- unsigned char *dest, int dest_size);
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
/**
- * Calculate the size in bytes that a picture of the given width and height
- * would occupy if stored in the given picture format.
- * Note that this returns the size of a compact representation as generated
- * by avpicture_layout(), which can be smaller than the size required for e.g.
- * avpicture_fill().
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct can be deallocated by simply calling av_free().
*
- * @param pix_fmt the given picture format
- * @param width the width of the image
- * @param height the height of the image
- * @return Image data size in bytes or -1 on error (e.g. too large dimensions).
- */
-int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height);
-void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift);
-
-/**
- * Get the name of a codec.
- * @return a static string identifying the codec; never NULL
- */
-const char *avcodec_get_name(enum CodecID id);
-
-void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
-
-/**
- * Return a value representing the fourCC code associated to the
- * pixel format pix_fmt, or 0 if no associated fourCC code can be
- * found.
+ * @return An AVFrame filled with default values or NULL on failure.
+ * @see avcodec_get_frame_defaults
*/
-unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt);
+AVFrame *avcodec_alloc_frame(void);
/**
- * Put a string representing the codec tag codec_tag in buf.
+ * Set the fields of the given AVFrame to default values.
*
- * @param buf_size size in bytes of buf
- * @return the length of the string that would have been generated if
- * enough space had been available, excluding the trailing null
+ * @param pic The AVFrame of which the fields should be set to default values.
*/
-size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
-
-#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
-#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
-#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
-#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
-#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
-#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+void avcodec_get_frame_defaults(AVFrame *pic);
+#if FF_API_AVCODEC_OPEN
/**
- * Compute what kind of losses will occur when converting from one specific
- * pixel format to another.
- * When converting from one pixel format to another, information loss may occur.
- * For example, when converting from RGB24 to GRAY, the color information will
- * be lost. Similarly, other losses occur when converting from some formats to
- * other formats. These losses can involve loss of chroma, but also loss of
- * resolution, loss of color depth, loss due to the color space conversion, loss
- * of the alpha bits or loss due to color quantization.
- * avcodec_get_fix_fmt_loss() informs you about the various types of losses
- * which will occur when converting from one pixel format to another.
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated.
*
- * @param[in] dst_pix_fmt destination pixel format
- * @param[in] src_pix_fmt source pixel format
- * @param[in] has_alpha Whether the source pixel format alpha channel is used.
- * @return Combination of flags informing you what kind of losses will occur
- * (maximum loss for an invalid dst_pix_fmt).
- */
-int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt,
- int has_alpha);
-
-/**
- * Find the best pixel format to convert to given a certain source pixel
- * format. When converting from one pixel format to another, information loss
- * may occur. For example, when converting from RGB24 to GRAY, the color
- * information will be lost. Similarly, other losses occur when converting from
- * some formats to other formats. avcodec_find_best_pix_fmt() searches which of
- * the given pixel formats should be used to suffer the least amount of loss.
- * The pixel formats from which it chooses one, are determined by the
- * pix_fmt_mask parameter.
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
*
- * Note, only the first 64 pixel formats will fit in pix_fmt_mask.
+ * @warning This function is not thread safe!
*
* @code
- * src_pix_fmt = PIX_FMT_YUV420P;
- * pix_fmt_mask = (1 << PIX_FMT_YUV422P) | (1 << PIX_FMT_RGB24);
- * dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss);
+ * avcodec_register_all();
+ * codec = avcodec_find_decoder(CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open(context, codec) < 0)
+ * exit(1);
* @endcode
*
- * @param[in] pix_fmt_mask bitmask determining which pixel format to choose from
- * @param[in] src_pix_fmt source pixel format
- * @param[in] has_alpha Whether the source pixel format alpha channel is used.
- * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
- * @return The best pixel format to convert to or -1 if none was found.
+ * @param avctx The context which will be set up to use the given codec.
+ * @param codec The codec to use within the context.
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close
+ *
+ * @deprecated use avcodec_open2
*/
-enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
- int has_alpha, int *loss_ptr);
+attribute_deprecated
+int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
+#endif
/**
- * Find the best pixel format to convert to given a certain source pixel
- * format and a selection of two destination pixel formats. When converting from
- * one pixel format to another, information loss may occur. For example, when converting
- * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when
- * converting from some formats to other formats. avcodec_find_best_pix_fmt2() selects which of
- * the given pixel formats should be used to suffer the least amount of loss.
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
*
- * If one of the destination formats is PIX_FMT_NONE the other pixel format (if valid) will be
- * returned.
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
*
* @code
- * src_pix_fmt = PIX_FMT_YUV420P;
- * dst_pix_fmt1= PIX_FMT_RGB24;
- * dst_pix_fmt2= PIX_FMT_GRAY8;
- * dst_pix_fmt3= PIX_FMT_RGB8;
- * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored.
- * dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss);
- * dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss);
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
* @endcode
*
- * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from
- * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from
- * @param[in] src_pix_fmt Source pixel format
- * @param[in] has_alpha Whether the source pixel format alpha channel is used.
- * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e.
- * NULL or value of zero means we care about all losses. Out: the loss
- * that occurs when converting from src to selected dst pixel format.
- * @return The best pixel format to convert to or -1 if none was found.
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
*/
-enum PixelFormat avcodec_find_best_pix_fmt2(enum PixelFormat dst_pix_fmt1, enum PixelFormat dst_pix_fmt2,
- enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
-
+int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
-/* deinterlace a picture */
-/* deinterlace - if not supported return -1 */
-int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
- enum PixelFormat pix_fmt, int width, int height);
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
-/* external high level API */
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
/**
- * If c is NULL, returns the first registered codec,
- * if c is non-NULL, returns the next registered codec after c,
- * or NULL if c is the last one.
+ * @}
*/
-AVCodec *av_codec_next(AVCodec *c);
/**
- * Return the LIBAVCODEC_VERSION_INT constant.
+ * @addtogroup lavc_packet
+ * @{
*/
-unsigned avcodec_version(void);
/**
- * Return the libavcodec build-time configuration.
+ * @deprecated use NULL instead
*/
-const char *avcodec_configuration(void);
+attribute_deprecated void av_destruct_packet_nofree(AVPacket *pkt);
/**
- * Return the libavcodec license.
+ * Default packet destructor.
*/
-const char *avcodec_license(void);
+void av_destruct_packet(AVPacket *pkt);
/**
- * Register the codec codec and initialize libavcodec.
- *
- * @warning either this function or avcodec_register_all() must be called
- * before any other libavcodec functions.
+ * Initialize optional fields of a packet with default values.
*
- * @see avcodec_register_all()
+ * @param pkt packet
*/
-void avcodec_register(AVCodec *codec);
+void av_init_packet(AVPacket *pkt);
/**
- * Find a registered encoder with a matching codec ID.
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
*
- * @param id CodecID of the requested encoder
- * @return An encoder if one was found, NULL otherwise.
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
*/
-AVCodec *avcodec_find_encoder(enum CodecID id);
+int av_new_packet(AVPacket *pkt, int size);
/**
- * Find a registered encoder with the specified name.
+ * Reduce packet size, correctly zeroing padding
*
- * @param name name of the requested encoder
- * @return An encoder if one was found, NULL otherwise.
+ * @param pkt packet
+ * @param size new size
*/
-AVCodec *avcodec_find_encoder_by_name(const char *name);
+void av_shrink_packet(AVPacket *pkt, int size);
/**
- * Find a registered decoder with a matching codec ID.
+ * Increase packet size, correctly zeroing padding
*
- * @param id CodecID of the requested decoder
- * @return A decoder if one was found, NULL otherwise.
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
*/
-AVCodec *avcodec_find_decoder(enum CodecID id);
+int av_grow_packet(AVPacket *pkt, int grow_by);
/**
- * Find a registered decoder with the specified name.
- *
- * @param name name of the requested decoder
- * @return A decoder if one was found, NULL otherwise.
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
*/
-AVCodec *avcodec_find_decoder_by_name(const char *name);
-void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+int av_dup_packet(AVPacket *pkt);
/**
- * Return a name for the specified profile, if available.
+ * Free a packet.
*
- * @param codec the codec that is searched for the given profile
- * @param profile the profile value for which a name is requested
- * @return A name for the profile if found, NULL otherwise.
+ * @param pkt packet to free
*/
-const char *av_get_profile_name(const AVCodec *codec, int profile);
+void av_free_packet(AVPacket *pkt);
-#if FF_API_ALLOC_CONTEXT
/**
- * Set the fields of the given AVCodecContext to default values.
+ * Allocate new information of a packet.
*
- * @param s The AVCodecContext of which the fields should be set to default values.
- * @deprecated use avcodec_get_context_defaults3
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
*/
-attribute_deprecated
-void avcodec_get_context_defaults(AVCodecContext *s);
-
-/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
- * we WILL change its arguments and name a few times! */
-attribute_deprecated
-void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType);
-#endif
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
/**
- * Set the fields of the given AVCodecContext to default values corresponding
- * to the given codec (defaults may be codec-dependent).
+ * Shrink the already allocated side data buffer
*
- * Do not call this function if a non-NULL codec has been passed
- * to avcodec_alloc_context3() that allocated this AVCodecContext.
- * If codec is non-NULL, it is illegal to call avcodec_open2() with a
- * different codec on this AVCodecContext.
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
*/
-int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec);
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
-#if FF_API_ALLOC_CONTEXT
/**
- * Allocate an AVCodecContext and set its fields to default values. The
- * resulting struct can be deallocated by simply calling av_free().
- *
- * @return An AVCodecContext filled with default values or NULL on failure.
- * @see avcodec_get_context_defaults
+ * Get side information from packet.
*
- * @deprecated use avcodec_alloc_context3()
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
*/
-attribute_deprecated
-AVCodecContext *avcodec_alloc_context(void);
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+int av_packet_merge_side_data(AVPacket *pkt);
+
+int av_packet_split_side_data(AVPacket *pkt);
-/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
- * we WILL change its arguments and name a few times! */
-attribute_deprecated
-AVCodecContext *avcodec_alloc_context2(enum AVMediaType);
-#endif
/**
- * Allocate an AVCodecContext and set its fields to default values. The
- * resulting struct can be deallocated by calling avcodec_close() on it followed
- * by av_free().
- *
- * @param codec if non-NULL, allocate private data and initialize defaults
- * for the given codec. It is illegal to then call avcodec_open2()
- * with a different codec.
- * If NULL, then the codec-specific defaults won't be initialized,
- * which may result in suboptimal default settings (this is
- * important mainly for encoders, e.g. libx264).
- *
- * @return An AVCodecContext filled with default values or NULL on failure.
- * @see avcodec_get_context_defaults
+ * @}
*/
-AVCodecContext *avcodec_alloc_context3(AVCodec *codec);
/**
- * Copy the settings of the source AVCodecContext into the destination
- * AVCodecContext. The resulting destination codec context will be
- * unopened, i.e. you are required to call avcodec_open2() before you
- * can use this AVCodecContext to decode/encode video/audio data.
- *
- * @param dest target codec context, should be initialized with
- * avcodec_alloc_context3(), but otherwise uninitialized
- * @param src source codec context
- * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ * @addtogroup lavc_decoding
+ * @{
*/
-int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
/**
- * Set the fields of the given AVFrame to default values.
+ * Find a registered decoder with a matching codec ID.
*
- * @param pic The AVFrame of which the fields should be set to default values.
+ * @param id CodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
*/
-void avcodec_get_frame_defaults(AVFrame *pic);
+AVCodec *avcodec_find_decoder(enum CodecID id);
/**
- * Allocate an AVFrame and set its fields to default values. The resulting
- * struct can be deallocated by simply calling av_free().
+ * Find a registered decoder with the specified name.
*
- * @return An AVFrame filled with default values or NULL on failure.
- * @see avcodec_get_frame_defaults
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
*/
-AVFrame *avcodec_alloc_frame(void);
+AVCodec *avcodec_find_decoder_by_name(const char *name);
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
@@ -3662,6 +3533,7 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
* @return Required padding in pixels.
*/
unsigned avcodec_get_edge_width(void);
+
/**
* Modify width and height values so that they will result in a memory
* buffer that is acceptable for the codec if you do not use any horizontal
@@ -3672,6 +3544,7 @@ unsigned avcodec_get_edge_width(void);
* according to avcodec_get_edge_width() before.
*/
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
/**
* Modify width and height values so that they will result in a memory
* buffer that is acceptable for the codec if you also ensure that all
@@ -3684,84 +3557,6 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[AV_NUM_DATA_POINTERS]);
-enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
-
-int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
-int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
-//FIXME func typedef
-
-#if FF_API_AVCODEC_OPEN
-/**
- * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
- * function the context has to be allocated.
- *
- * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
- * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
- * retrieving a codec.
- *
- * @warning This function is not thread safe!
- *
- * @code
- * avcodec_register_all();
- * codec = avcodec_find_decoder(CODEC_ID_H264);
- * if (!codec)
- * exit(1);
- *
- * context = avcodec_alloc_context3(codec);
- *
- * if (avcodec_open(context, codec) < 0)
- * exit(1);
- * @endcode
- *
- * @param avctx The context which will be set up to use the given codec.
- * @param codec The codec to use within the context.
- * @return zero on success, a negative value on error
- * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close
- *
- * @deprecated use avcodec_open2
- */
-attribute_deprecated
-int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
-#endif
-
-/**
- * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
- * function the context has to be allocated with avcodec_alloc_context3().
- *
- * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
- * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
- * retrieving a codec.
- *
- * @warning This function is not thread safe!
- *
- * @code
- * avcodec_register_all();
- * av_dict_set(&opts, "b", "2.5M", 0);
- * codec = avcodec_find_decoder(CODEC_ID_H264);
- * if (!codec)
- * exit(1);
- *
- * context = avcodec_alloc_context3(codec);
- *
- * if (avcodec_open2(context, codec, opts) < 0)
- * exit(1);
- * @endcode
- *
- * @param avctx The context to initialize.
- * @param codec The codec to open this context for. If a non-NULL codec has been
- * previously passed to avcodec_alloc_context3() or
- * avcodec_get_context_defaults3() for this context, then this
- * parameter MUST be either NULL or equal to the previously passed
- * codec.
- * @param options A dictionary filled with AVCodecContext and codec-private options.
- * On return this object will be filled with options that were not found.
- *
- * @return zero on success, a negative value on error
- * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
- * av_dict_set(), av_opt_find().
- */
-int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
-
#if FF_API_OLD_DECODE_AUDIO
/**
* Wrapper function which calls avcodec_decode_audio4.
@@ -3924,236 +3719,10 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
AVPacket *avpkt);
/**
- * Free all allocated data in the given subtitle struct.
- *
- * @param sub AVSubtitle to free.
- */
-void avsubtitle_free(AVSubtitle *sub);
-
-#if FF_API_OLD_ENCODE_AUDIO
-/**
- * Encode an audio frame from samples into buf.
- *
- * @deprecated Use avcodec_encode_audio2 instead.
- *
- * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
- * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
- * will know how much space is needed because it depends on the value passed
- * in buf_size as described below. In that case a lower value can be used.
- *
- * @param avctx the codec context
- * @param[out] buf the output buffer
- * @param[in] buf_size the output buffer size
- * @param[in] samples the input buffer containing the samples
- * The number of samples read from this buffer is frame_size*channels,
- * both of which are defined in avctx.
- * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
- * samples read from samples is equal to:
- * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
- * This also implies that av_get_bits_per_sample() must not return 0 for these
- * codecs.
- * @return On error a negative value is returned, on success zero or the number
- * of bytes used to encode the data read from the input buffer.
- */
-int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
- uint8_t *buf, int buf_size,
- const short *samples);
-#endif
-
-/**
- * Encode a frame of audio.
- *
- * Takes input samples from frame and writes the next output packet, if
- * available, to avpkt. The output packet does not necessarily contain data for
- * the most recent frame, as encoders can delay, split, and combine input frames
- * internally as needed.
- *
- * @param avctx codec context
- * @param avpkt output AVPacket.
- * The user can supply an output buffer by setting
- * avpkt->data and avpkt->size prior to calling the
- * function, but if the size of the user-provided data is not
- * large enough, encoding will fail. All other AVPacket fields
- * will be reset by the encoder using av_init_packet(). If
- * avpkt->data is NULL, the encoder will allocate it.
- * The encoder will set avpkt->size to the size of the
- * output packet.
- *
- * If this function fails or produces no output, avpkt will be
- * freed using av_free_packet() (i.e. avpkt->destruct will be
- * called to free the user supplied buffer).
- * @param[in] frame AVFrame containing the raw audio data to be encoded.
- * May be NULL when flushing an encoder that has the
- * CODEC_CAP_DELAY capability set.
- * There are 2 codec capabilities that affect the allowed
- * values of frame->nb_samples.
- * If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
- * frame may be smaller than avctx->frame_size, and all other
- * frames must be equal to avctx->frame_size.
- * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
- * can have any number of samples.
- * If neither is set, frame->nb_samples must be equal to
- * avctx->frame_size for all frames.
- * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
- * output packet is non-empty, and to 0 if it is
- * empty. If the function returns an error, the
- * packet can be assumed to be invalid, and the
- * value of got_packet_ptr is undefined and should
- * not be used.
- * @return 0 on success, negative error code on failure
- */
-int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr);
-
-/**
- * Fill audio frame data and linesize.
- * AVFrame extended_data channel pointers are allocated if necessary for
- * planar audio.
- *
- * @param frame the AVFrame
- * frame->nb_samples must be set prior to calling the
- * function. This function fills in frame->data,
- * frame->extended_data, frame->linesize[0].
- * @param nb_channels channel count
- * @param sample_fmt sample format
- * @param buf buffer to use for frame data
- * @param buf_size size of buffer
- * @param align plane size sample alignment (0 = default)
- * @return 0 on success, negative error code on failure
- */
-int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
- enum AVSampleFormat sample_fmt, const uint8_t *buf,
- int buf_size, int align);
-
-#if FF_API_OLD_ENCODE_VIDEO
-/**
- * @deprecated use avcodec_encode_video2() instead.
- *
- * Encode a video frame from pict into buf.
- * The input picture should be
- * stored using a specific format, namely avctx.pix_fmt.
- *
- * @param avctx the codec context
- * @param[out] buf the output buffer for the bitstream of encoded frame
- * @param[in] buf_size the size of the output buffer in bytes
- * @param[in] pict the input picture to encode
- * @return On error a negative value is returned, on success zero or the number
- * of bytes used from the output buffer.
- */
-attribute_deprecated
-int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
- const AVFrame *pict);
-#endif
-
-/**
- * Encode a frame of video.
- *
- * Takes input raw video data from frame and writes the next output packet, if
- * available, to avpkt. The output packet does not necessarily contain data for
- * the most recent frame, as encoders can delay and reorder input frames
- * internally as needed.
- *
- * @param avctx codec context
- * @param avpkt output AVPacket.
- * The user can supply an output buffer by setting
- * avpkt->data and avpkt->size prior to calling the
- * function, but if the size of the user-provided data is not
- * large enough, encoding will fail. All other AVPacket fields
- * will be reset by the encoder using av_init_packet(). If
- * avpkt->data is NULL, the encoder will allocate it.
- * The encoder will set avpkt->size to the size of the
- * output packet. The returned data (if any) belongs to the
- * caller, he is responsible for freeing it.
- *
- * If this function fails or produces no output, avpkt will be
- * freed using av_free_packet() (i.e. avpkt->destruct will be
- * called to free the user supplied buffer).
- * @param[in] frame AVFrame containing the raw video data to be encoded.
- * May be NULL when flushing an encoder that has the
- * CODEC_CAP_DELAY capability set.
- * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
- * output packet is non-empty, and to 0 if it is
- * empty. If the function returns an error, the
- * packet can be assumed to be invalid, and the
- * value of got_packet_ptr is undefined and should
- * not be used.
- * @return 0 on success, negative error code on failure
- */
-int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr);
-
-int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
- const AVSubtitle *sub);
-
-/**
- * Close a given AVCodecContext and free all the data associated with it
- * (but not the AVCodecContext itself).
- *
- * Calling this function on an AVCodecContext that hasn't been opened will free
- * the codec-specific data allocated in avcodec_alloc_context3() /
- * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
- * do nothing.
- */
-int avcodec_close(AVCodecContext *avctx);
-
-/**
- * Register all the codecs, parsers and bitstream filters which were enabled at
- * configuration time. If you do not call this function you can select exactly
- * which formats you want to support, by using the individual registration
- * functions.
- *
- * @see avcodec_register
- * @see av_register_codec_parser
- * @see av_register_bitstream_filter
- */
-void avcodec_register_all(void);
-
-/**
- * Flush buffers, should be called when seeking or when switching to a different stream.
- */
-void avcodec_flush_buffers(AVCodecContext *avctx);
-
-void avcodec_default_free_buffers(AVCodecContext *s);
-
-/* misc useful functions */
-
-/**
- * Return codec bits per sample.
- *
- * @param[in] codec_id the codec
- * @return Number of bits per sample or zero if unknown for the given codec.
- */
-int av_get_bits_per_sample(enum CodecID codec_id);
-
-/**
- * Return the PCM codec associated with a sample format.
- * @param be endianness, 0 for little, 1 for big,
- * -1 (or anything else) for native
- * @return CODEC_ID_PCM_* or CODEC_ID_NONE
- */
-enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
-
-/**
- * Return codec bits per sample.
- * Only return non-zero if the bits per sample is exactly correct, not an
- * approximation.
- *
- * @param[in] codec_id the codec
- * @return Number of bits per sample or zero if unknown for the given codec.
- */
-int av_get_exact_bits_per_sample(enum CodecID codec_id);
-
-/**
- * Return audio frame duration.
- *
- * @param avctx codec context
- * @param frame_bytes size of the frame, or 0 if unknown
- * @return frame duration, in samples, if known. 0 if not able to
- * determine.
+ * @defgroup lavc_parsing Frame parsing
+ * @{
*/
-int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
-/* frame parsing */
typedef struct AVCodecParserContext {
void *priv_data;
struct AVCodecParser *parser;
@@ -4348,6 +3917,576 @@ int av_parser_change(AVCodecParserContext *s,
const uint8_t *buf, int buf_size, int keyframe);
void av_parser_close(AVCodecParserContext *s);
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id CodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum CodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+#if FF_API_OLD_ENCODE_AUDIO
+/**
+ * Encode an audio frame from samples into buf.
+ *
+ * @deprecated Use avcodec_encode_audio2 instead.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
+ * will know how much space is needed because it depends on the value passed
+ * in buf_size as described below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
+ * samples read from samples is equal to:
+ * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
+ * This also implies that av_get_bits_per_sample() must not return 0 for these
+ * codecs.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size,
+ const short *samples);
+#endif
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * There are 2 codec capabilities that affect the allowed
+ * values of frame->nb_samples.
+ * If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
+ * frame may be smaller than avctx->frame_size, and all other
+ * frames must be equal to avctx->frame_size.
+ * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If neither is set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+#if FF_API_OLD_ENCODE_VIDEO
+/**
+ * @deprecated use avcodec_encode_video2() instead.
+ *
+ * Encode a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+attribute_deprecated
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict);
+#endif
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet. The returned data (if any) belongs to the
+ * caller, he is responsible for freeing it.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_resample Audio resampling
+ * @ingroup libavc
+ *
+ * @{
+ */
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ * Initialize audio resampling context.
+ *
+ * @param output_channels number of output channels
+ * @param input_channels number of input channels
+ * @param output_rate output sample rate
+ * @param input_rate input sample rate
+ * @param sample_fmt_out requested output sample format
+ * @param sample_fmt_in input sample format
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear if 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate,
+ enum AVSampleFormat sample_fmt_out,
+ enum AVSampleFormat sample_fmt_in,
+ int filter_length, int log2_phase_count,
+ int linear, double cutoff);
+
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ * created with av_audio_resample_init()
+ */
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * Allocate memory for a picture. Call avpicture_free() to free it.
+ *
+ * @see avpicture_fill()
+ *
+ * @param picture the picture to be filled in
+ * @param pix_fmt the format of the picture
+ * @param width the width of the picture
+ * @param height the height of the picture
+ * @return zero if successful, a negative value if not
+ */
+int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Fill in the AVPicture fields.
+ * The fields of the given AVPicture are filled in by using the 'ptr' address
+ * which points to the image data buffer. Depending on the specified picture
+ * format, one or multiple image data pointers and line sizes will be set.
+ * If a planar format is specified, several pointers will be set pointing to
+ * the different picture planes and the line sizes of the different planes
+ * will be stored in the lines_sizes array.
+ * Call with ptr == NULL to get the required size for the ptr buffer.
+ *
+ * To allocate the buffer and fill in the AVPicture fields in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture AVPicture whose fields are to be filled in
+ * @param ptr Buffer which will contain or contains the actual image data
+ * @param pix_fmt The format in which the picture data is stored.
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return size of the image data in bytes
+ */
+int avpicture_fill(AVPicture *picture, uint8_t *ptr,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ * The data is stored compactly, without any gaps for alignment or padding
+ * which may be applied by avpicture_fill().
+ *
+ * @see avpicture_get_size()
+ *
+ * @param[in] src AVPicture containing image data
+ * @param[in] pix_fmt The format in which the picture data is stored.
+ * @param[in] width the width of the image in pixels.
+ * @param[in] height the height of the image in pixels.
+ * @param[out] dest A buffer into which picture data will be copied.
+ * @param[in] dest_size The size of 'dest'.
+ * @return The number of bytes written to dest, or a negative value (error code) on error.
+ */
+int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ * Note that this returns the size of a compact representation as generated
+ * by avpicture_layout(), which can be smaller than the size required for e.g.
+ * avpicture_fill().
+ *
+ * @param pix_fmt the given picture format
+ * @param width the width of the image
+ * @param height the height of the image
+ * @return Image data size in bytes or -1 on error (e.g. too large dimensions).
+ */
+int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * deinterlace - if not supported return -1
+ */
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+/**
+ * Copy image src to dst. Wraps av_picture_data_copy() above.
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * avcodec_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_mask parameter.
+ *
+ * Note, only the first 64 pixel formats will fit in pix_fmt_mask.
+ *
+ * @code
+ * src_pix_fmt = PIX_FMT_YUV420P;
+ * pix_fmt_mask = (1 << PIX_FMT_YUV422P) | (1 << PIX_FMT_RGB24);
+ * dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss);
+ * @endcode
+ *
+ * @param[in] pix_fmt_mask bitmask determining which pixel format to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format and a selection of two destination pixel formats. When converting from
+ * one pixel format to another, information loss may occur. For example, when converting
+ * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when
+ * converting from some formats to other formats. avcodec_find_best_pix_fmt2() selects which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ *
+ * If one of the destination formats is PIX_FMT_NONE the other pixel format (if valid) will be
+ * returned.
+ *
+ * @code
+ * src_pix_fmt = PIX_FMT_YUV420P;
+ * dst_pix_fmt1= PIX_FMT_RGB24;
+ * dst_pix_fmt2= PIX_FMT_GRAY8;
+ * dst_pix_fmt3= PIX_FMT_RGB8;
+ * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored.
+ * dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss);
+ * dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss);
+ * @endcode
+ *
+ * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from
+ * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from
+ * @param[in] src_pix_fmt Source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e.
+ * NULL or value of zero means we care about all losses. Out: the loss
+ * that occurs when converting from src to selected dst pixel format.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum PixelFormat avcodec_find_best_pix_fmt2(enum PixelFormat dst_pix_fmt1, enum PixelFormat dst_pix_fmt2,
+ enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf_size size in bytes of buf
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill audio frame data and linesize.
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Flush buffers, should be called when seeking or when switching to a different stream.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+void avcodec_default_free_buffers(AVCodecContext *s);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum CodecID codec_id);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return CODEC_ID_PCM_* or CODEC_ID_NONE
+ */
+enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum CodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
typedef struct AVBitStreamFilterContext {
void *priv_data;
@@ -4411,24 +4550,6 @@ void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
/**
- * Copy image src to dst. Wraps av_picture_data_copy() above.
- */
-void av_picture_copy(AVPicture *dst, const AVPicture *src,
- enum PixelFormat pix_fmt, int width, int height);
-
-/**
- * Crop image top and left side.
- */
-int av_picture_crop(AVPicture *dst, const AVPicture *src,
- enum PixelFormat pix_fmt, int top_band, int left_band);
-
-/**
- * Pad image.
- */
-int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt,
- int padtop, int padbottom, int padleft, int padright, int *color);
-
-/**
* Encode extradata length to a buffer. Used by xiph codecs.
*
* @param s buffer to write to; must be at least (v/255+1) bytes long
@@ -4505,20 +4626,10 @@ int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
enum AVMediaType avcodec_get_type(enum CodecID codec_id);
/**
- * Get the AVClass for AVCodecContext. It can be used in combination with
- * AV_OPT_SEARCH_FAKE_OBJ for examining options.
- *
- * @see av_opt_find().
- */
-const AVClass *avcodec_get_class(void);
-
-/**
- * Get the AVClass for AVFrame. It can be used in combination with
- * AV_OPT_SEARCH_FAKE_OBJ for examining options.
- *
- * @see av_opt_find().
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
*/
-const AVClass *avcodec_get_frame_class(void);
+const char *avcodec_get_name(enum CodecID id);
/**
* @return a positive value if s is open (i.e. avcodec_open2() was called on it
@@ -4536,4 +4647,8 @@ int av_codec_is_encoder(AVCodec *codec);
*/
int av_codec_is_decoder(AVCodec *codec);
+/**
+ * @}
+ */
+
#endif /* AVCODEC_AVCODEC_H */
diff --git a/libavcodec/avfft.h b/libavcodec/avfft.h
index be2d9c7e10..2d20a45f87 100644
--- a/libavcodec/avfft.h
+++ b/libavcodec/avfft.h
@@ -19,6 +19,19 @@
#ifndef AVCODEC_AVFFT_H
#define AVCODEC_AVFFT_H
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
typedef float FFTSample;
typedef struct FFTComplex {
@@ -96,4 +109,8 @@ DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
void av_dct_calc(DCTContext *s, FFTSample *data);
void av_dct_end (DCTContext *s);
+/**
+ * @}
+ */
+
#endif /* AVCODEC_AVFFT_H */
diff --git a/libavcodec/dxva2.h b/libavcodec/dxva2.h
index fc99560830..7d27ca5af7 100644
--- a/libavcodec/dxva2.h
+++ b/libavcodec/dxva2.h
@@ -23,11 +23,24 @@
#ifndef AVCODEC_DXVA_H
#define AVCODEC_DXVA_H
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_dxva2
+ * Public libavcodec DXVA2 header.
+ */
+
#include <stdint.h>
#include <d3d9.h>
#include <dxva2api.h>
+/**
+ * @defgroup lavc_codec_hwaccel_dxva2 DXVA2
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
/**
@@ -68,4 +81,8 @@ struct dxva_context {
unsigned report_id;
};
+/**
+ * @}
+ */
+
#endif /* AVCODEC_DXVA_H */
diff --git a/libavcodec/ppc/gmc_altivec.c b/libavcodec/ppc/gmc_altivec.c
index f966165436..1341594ee5 100644
--- a/libavcodec/ppc/gmc_altivec.c
+++ b/libavcodec/ppc/gmc_altivec.c
@@ -48,7 +48,7 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int
unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
- tempA = vec_ld(0, (unsigned short*)ABCD);
+ tempA = vec_ld(0, (const unsigned short*)ABCD);
Av = vec_splat(tempA, 0);
Bv = vec_splat(tempA, 1);
Cv = vec_splat(tempA, 2);
diff --git a/libavcodec/ppc/int_altivec.c b/libavcodec/ppc/int_altivec.c
index 1d7aa1840e..30ae14fa5b 100644
--- a/libavcodec/ppc/int_altivec.c
+++ b/libavcodec/ppc/int_altivec.c
@@ -79,7 +79,8 @@ static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
return u.score[3];
}
-static int32_t scalarproduct_int16_altivec(const int16_t * v1, const int16_t * v2, int order, const int shift)
+static int32_t scalarproduct_int16_altivec(int16_t *v1, const int16_t *v2,
+ int order, const int shift)
{
int i;
LOAD_ZERO;
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 1af67eecdb..263ad94b17 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -521,7 +521,7 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
*/
static int calc_add_mv(RV34DecContext *r, int dir, int val)
{
- int mul = dir ? -r->weight2 : r->weight1;
+ int mul = dir ? -r->mv_weight2 : r->mv_weight1;
return (val * mul + 0x2000) >> 14;
}
@@ -776,24 +776,24 @@ static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
static void rv4_weight(RV34DecContext *r)
{
- r->rdsp.rv40_weight_pixels_tab[0](r->s.dest[0],
- r->tmp_b_block_y[0],
- r->tmp_b_block_y[1],
- r->weight1,
- r->weight2,
- r->s.linesize);
- r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[1],
- r->tmp_b_block_uv[0],
- r->tmp_b_block_uv[2],
- r->weight1,
- r->weight2,
- r->s.uvlinesize);
- r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[2],
- r->tmp_b_block_uv[1],
- r->tmp_b_block_uv[3],
- r->weight1,
- r->weight2,
- r->s.uvlinesize);
+ r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
+ r->tmp_b_block_y[0],
+ r->tmp_b_block_y[1],
+ r->weight1,
+ r->weight2,
+ r->s.linesize);
+ r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
+ r->tmp_b_block_uv[0],
+ r->tmp_b_block_uv[2],
+ r->weight1,
+ r->weight2,
+ r->s.uvlinesize);
+ r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
+ r->tmp_b_block_uv[1],
+ r->tmp_b_block_uv[3],
+ r->weight1,
+ r->weight2,
+ r->s.uvlinesize);
}
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
@@ -1707,11 +1707,21 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
- if (!refdist) {
- r->weight1 = r->weight2 = 8192;
- } else {
- r->weight1 = (dist0 << 14) / refdist;
- r->weight2 = (dist1 << 14) / refdist;
+ if(!refdist){
+ r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
+ r->scaled_weight = 0;
+ }else{
+ r->mv_weight1 = (dist0 << 14) / refdist;
+ r->mv_weight2 = (dist1 << 14) / refdist;
+ if((r->mv_weight1|r->mv_weight2) & 511){
+ r->weight1 = r->mv_weight1;
+ r->weight2 = r->mv_weight2;
+ r->scaled_weight = 0;
+ }else{
+ r->weight1 = r->mv_weight1 >> 9;
+ r->weight2 = r->mv_weight2 >> 9;
+ r->scaled_weight = 1;
+ }
}
}
s->mb_x = s->mb_y = 0;
diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h
index 4207ab45dd..6b8d36556f 100644
--- a/libavcodec/rv34.h
+++ b/libavcodec/rv34.h
@@ -106,7 +106,9 @@ typedef struct RV34DecContext{
int rpr; ///< one field size in RV30 slice header
int cur_pts, last_pts, next_pts;
+ int scaled_weight;
int weight1, weight2; ///< B frame distance fractions (0.14) used in motion compensation
+ int mv_weight1, mv_weight2;
uint16_t *cbp_luma; ///< CBP values for luma subblocks
uint8_t *cbp_chroma; ///< CBP values for chroma subblocks
diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h
index c70194cc20..58da59f038 100644
--- a/libavcodec/rv34dsp.h
+++ b/libavcodec/rv34dsp.h
@@ -58,7 +58,12 @@ typedef struct RV34DSPContext {
qpel_mc_func avg_pixels_tab[4][16];
h264_chroma_mc_func put_chroma_pixels_tab[3];
h264_chroma_mc_func avg_chroma_pixels_tab[3];
- rv40_weight_func rv40_weight_pixels_tab[2];
+ /**
+ * Biweight functions, first dimension is transform size (16/8),
+ * second is whether the weight is prescaled by 1/512 to skip
+ * the intermediate shifting.
+ */
+ rv40_weight_func rv40_weight_pixels_tab[2][2];
rv34_inv_transform_func rv34_inv_transform;
rv34_inv_transform_func rv34_inv_transform_dc;
rv34_idct_add_func rv34_idct_add;
diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c
index fbe59ef7b3..8ba10beac8 100644
--- a/libavcodec/rv40dsp.c
+++ b/libavcodec/rv40dsp.c
@@ -278,7 +278,7 @@ RV40_CHROMA_MC(put_, op_put)
RV40_CHROMA_MC(avg_, op_avg)
#define RV40_WEIGHT_FUNC(size) \
-static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
+static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
{\
int i, j;\
\
@@ -289,6 +289,18 @@ static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src
src2 += stride;\
dst += stride;\
}\
+}\
+static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
+{\
+ int i, j;\
+\
+ for (j = 0; j < size; j++) {\
+ for (i = 0; i < size; i++)\
+ dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\
+ src1 += stride;\
+ src2 += stride;\
+ dst += stride;\
+ }\
}
RV40_WEIGHT_FUNC(16)
@@ -578,8 +590,10 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c;
c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c;
- c->rv40_weight_pixels_tab[0] = rv40_weight_func_16;
- c->rv40_weight_pixels_tab[1] = rv40_weight_func_8;
+ c->rv40_weight_pixels_tab[0][0] = rv40_weight_func_rnd_16;
+ c->rv40_weight_pixels_tab[0][1] = rv40_weight_func_rnd_8;
+ c->rv40_weight_pixels_tab[1][0] = rv40_weight_func_nornd_16;
+ c->rv40_weight_pixels_tab[1][1] = rv40_weight_func_nornd_8;
c->rv40_weak_loop_filter[0] = rv40_h_weak_loop_filter;
c->rv40_weak_loop_filter[1] = rv40_v_weak_loop_filter;
diff --git a/libavcodec/vaapi.h b/libavcodec/vaapi.h
index 4c3bb9bb52..815a27e226 100644
--- a/libavcodec/vaapi.h
+++ b/libavcodec/vaapi.h
@@ -24,11 +24,17 @@
#ifndef AVCODEC_VAAPI_H
#define AVCODEC_VAAPI_H
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
#include <stdint.h>
/**
- * @defgroup VAAPI_Decoding VA API Decoding
- * @ingroup Decoder
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
* @{
*/
diff --git a/libavcodec/vda.c b/libavcodec/vda.c
index a2814d7024..e910ce1332 100644
--- a/libavcodec/vda.c
+++ b/libavcodec/vda.c
@@ -149,20 +149,55 @@ int ff_vda_create_decoder(struct vda_context *vda_ctx,
pthread_mutex_init(&vda_ctx->queue_mutex, NULL);
+<<<<<<< HEAD
if (extradata[4]==0xFE) {
// convert 3 byte NAL sizes to 4 byte
extradata[4] = 0xFF;
}
+||||||| merged common ancestors
+=======
+ /* Each VCL NAL in the bistream sent to the decoder
+ * is preceeded by a 4 bytes length header.
+ * Change the avcC atom header if needed, to signal headers of 4 bytes. */
+ if (extradata_size >= 4 && (extradata[4] & 0x03) != 0x03) {
+ uint8_t *rw_extradata;
+
+ if (!(rw_extradata = av_malloc(extradata_size)))
+ return AVERROR(ENOMEM);
+
+ memcpy(rw_extradata, extradata, extradata_size);
+
+ rw_extradata[4] |= 0x03;
+
+ avc_data = CFDataCreate(kCFAllocatorDefault, rw_extradata, extradata_size);
+
+ av_freep(&rw_extradata);
+ } else {
+ avc_data = CFDataCreate(kCFAllocatorDefault, extradata, extradata_size);
+ }
+
+>>>>>>> qatar/master
config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
4,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
+<<<<<<< HEAD
height = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->height);
width = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->width);
format = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->format);
avc_data = CFDataCreate(kCFAllocatorDefault, extradata, extradata_size);
+||||||| merged common ancestors
+ height = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->height);
+ width = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->width);
+ format = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->format);
+ avc_data = CFDataCreate(kCFAllocatorDefault, extradata, extradata_size);
+=======
+ height = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->height);
+ width = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->width);
+ format = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->format);
+>>>>>>> qatar/master
CFDictionarySetValue(config_info, kVDADecoderConfiguration_Height, height);
CFDictionarySetValue(config_info, kVDADecoderConfiguration_Width, width);
diff --git a/libavcodec/vda.h b/libavcodec/vda.h
index 6e9de9cd0a..4ea0e9f3b2 100644
--- a/libavcodec/vda.h
+++ b/libavcodec/vda.h
@@ -23,6 +23,12 @@
#ifndef AVCODEC_VDA_H
#define AVCODEC_VDA_H
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vda
+ * Public libavcodec VDA header.
+ */
+
#include <pthread.h>
#include <stdint.h>
@@ -35,6 +41,13 @@
#undef Picture
/**
+ * @defgroup lavc_codec_hwaccel_vda VDA
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+/**
* This structure is used to store a decoded frame information and data.
*/
typedef struct {
@@ -165,4 +178,8 @@ vda_frame *ff_vda_queue_pop(struct vda_context *vda_ctx);
/** Release the given frame. */
void ff_vda_release_vda_frame(vda_frame *frame);
+/**
+ * @}
+ */
+
#endif /* AVCODEC_VDA_H */
diff --git a/libavcodec/vdpau.h b/libavcodec/vdpau.h
index f3a547184d..23394b57a3 100644
--- a/libavcodec/vdpau.h
+++ b/libavcodec/vdpau.h
@@ -25,7 +25,15 @@
#define AVCODEC_VDPAU_H
/**
- * @defgroup Decoder VDPAU Decoder and Renderer
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
*
* VDPAU hardware acceleration has two modules
* - VDPAU decoding
@@ -38,8 +46,6 @@
* and rendering (API calls) are done as part of the VDPAU
* presentation (vo_vdpau.c) module.
*
- * @defgroup VDPAU_Decoding VDPAU Decoding
- * @ingroup Decoder
* @{
*/
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 6c631d8045..2429eb5e64 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -20,6 +20,12 @@
#ifndef AVCODEC_VERSION_H
#define AVCODEC_VERSION_H
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
#define LIBAVCODEC_VERSION_MAJOR 54
#define LIBAVCODEC_VERSION_MINOR 14
#define LIBAVCODEC_VERSION_MICRO 101
diff --git a/libavcodec/x86/rv40dsp.asm b/libavcodec/x86/rv40dsp.asm
index c13e9f03d9..e8acfb25fe 100644
--- a/libavcodec/x86/rv40dsp.asm
+++ b/libavcodec/x86/rv40dsp.asm
@@ -32,13 +32,14 @@ SECTION .text
; %1=5bits weights?, %2=dst %3=src1 %4=src3 %5=stride if sse2
%macro RV40_WCORE 4-5
- movh m4, [%3 + 0]
- movh m5, [%4 + 0]
+ movh m4, [%3 + r6 + 0]
+ movh m5, [%4 + r6 + 0]
%if %0 == 4
-%define OFFSET mmsize / 2
+%define OFFSET r6 + mmsize / 2
%else
; 8x8 block and sse2, stride was provided
-%define OFFSET %5
+%define OFFSET r6
+ add r6, r5
%endif
movh m6, [%3 + OFFSET]
movh m7, [%4 + OFFSET]
@@ -99,10 +100,12 @@ SECTION .text
packuswb m4, m6
%if %0 == 5
; Only called for 8x8 blocks and sse2
- movh [%2 + 0], m4
- movhps [%2 + %5], m4
+ sub r6, r5
+ movh [%2 + r6], m4
+ add r6, r5
+ movhps [%2 + r6], m4
%else
- mova [%2], m4
+ mova [%2 + r6], m4
%endif
%endmacro
@@ -115,93 +118,79 @@ SECTION .text
%endif
; Prepare for next loop
- add r0, r5
- add r1, r5
- add r2, r5
+ add r6, r5
%else
%ifidn %1, 8
RV40_WCORE %2, r0, r1, r2, r5
; Prepare 2 next lines
- lea r0, [r0 + 2 * r5]
- lea r1, [r1 + 2 * r5]
- lea r2, [r2 + 2 * r5]
+ add r6, r5
%else
RV40_WCORE %2, r0, r1, r2
; Prepare single next line
- add r0, r5
- add r1, r5
- add r2, r5
+ add r6, r5
%endif
%endif
- dec r6
%endmacro
; rv40_weight_func_%1(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride)
; %1=size %2=num of xmm regs
-%macro RV40_WEIGHT 2
-cglobal rv40_weight_func_%1, 6, 7, %2
+; The weights are FP0.14 notation of fractions depending on pts.
+; For timebases without rounding error (i.e. PAL), the fractions
+; can be simplified, and several operations can be avoided.
+; Therefore, we check here whether they are multiples of 2^9 for
+; those simplifications to occur.
+%macro RV40_WEIGHT 3
+cglobal rv40_weight_func_%1_%2, 6, 7, 8
%if cpuflag(ssse3)
mova m1, [shift_round]
%else
mova m1, [pw_16]
%endif
pxor m0, m0
- mov r6, r3
- or r6, r4
- ; The weights are FP0.14 notation of fractions depending on pts.
- ; For timebases without rounding error (i.e. PAL), the fractions
- ; can be simplified, and several operations can be avoided.
- ; Therefore, we check here whether they are multiples of 2^9 for
- ; those simplifications to occur.
- and r6, 0x1FF
; Set loop counter and increments
-%if mmsize == 8
- mov r6, %1
-%else
- mov r6, (%1 * %1) / mmsize
-%endif
+ mov r6, r5
+ shl r6, %3
+ add r0, r6
+ add r1, r6
+ add r2, r6
+ neg r6
- ; Use result of test now
- jz .loop_512
movd m2, r3d
movd m3, r4d
+%ifidn %1,rnd
+%define RND 0
SPLATW m2, m2
- SPLATW m3, m3
-
-.loop:
- MAIN_LOOP %1, 0
- jnz .loop
- REP_RET
-
- ; Weights are multiple of 512, which allows some shortcuts
-.loop_512:
- sar r3, 9
- sar r4, 9
- movd m2, r3d
- movd m3, r4d
+%else
+%define RND 1
%if cpuflag(ssse3)
punpcklbw m3, m2
- SPLATW m3, m3
%else
SPLATW m2, m2
- SPLATW m3, m3
%endif
-.loop2:
- MAIN_LOOP %1, 1
- jnz .loop2
- REP_RET
+%endif
+ SPLATW m3, m3
+.loop:
+ MAIN_LOOP %2, RND
+ jnz .loop
+ REP_RET
%endmacro
INIT_MMX mmx
-RV40_WEIGHT 8, 0
-RV40_WEIGHT 16, 0
+RV40_WEIGHT rnd, 8, 3
+RV40_WEIGHT rnd, 16, 4
+RV40_WEIGHT nornd, 8, 3
+RV40_WEIGHT nornd, 16, 4
INIT_XMM sse2
-RV40_WEIGHT 8, 8
-RV40_WEIGHT 16, 8
+RV40_WEIGHT rnd, 8, 3
+RV40_WEIGHT rnd, 16, 4
+RV40_WEIGHT nornd, 8, 3
+RV40_WEIGHT nornd, 16, 4
INIT_XMM ssse3
-RV40_WEIGHT 8, 8
-RV40_WEIGHT 16, 8
+RV40_WEIGHT rnd, 8, 3
+RV40_WEIGHT rnd, 16, 4
+RV40_WEIGHT nornd, 8, 3
+RV40_WEIGHT nornd, 16, 4
diff --git a/libavcodec/x86/rv40dsp_init.c b/libavcodec/x86/rv40dsp_init.c
index 79c70f78c3..df468aa9e5 100644
--- a/libavcodec/x86/rv40dsp_init.c
+++ b/libavcodec/x86/rv40dsp_init.c
@@ -41,10 +41,14 @@ void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
#define DECLARE_WEIGHT(opt) \
-void ff_rv40_weight_func_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
- int w1, int w2, ptrdiff_t stride); \
-void ff_rv40_weight_func_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
- int w1, int w2, ptrdiff_t stride);
+void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
+ int w1, int w2, ptrdiff_t stride); \
+void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
+ int w1, int w2, ptrdiff_t stride); \
+void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
+ int w1, int w2, ptrdiff_t stride); \
+void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
+ int w1, int w2, ptrdiff_t stride);
DECLARE_WEIGHT(mmx)
DECLARE_WEIGHT(sse2)
DECLARE_WEIGHT(ssse3)
@@ -57,8 +61,10 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
if (mm_flags & AV_CPU_FLAG_MMX) {
c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
- c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_mmx;
- c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_mmx;
+ c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmx;
+ c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmx;
+ c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmx;
+ c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmx;
}
if (mm_flags & AV_CPU_FLAG_MMX2) {
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2;
@@ -68,12 +74,16 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
}
if (mm_flags & AV_CPU_FLAG_SSE2) {
- c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_sse2;
- c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_sse2;
+ c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
+ c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
+ c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
+ c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
- c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_ssse3;
- c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_ssse3;
+ c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
+ c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
+ c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
+ c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
}
#endif
}
diff --git a/libavcodec/xvmc.h b/libavcodec/xvmc.h
index 93ad8bb9a5..fdaea7eab7 100644
--- a/libavcodec/xvmc.h
+++ b/libavcodec/xvmc.h
@@ -21,10 +21,23 @@
#ifndef AVCODEC_XVMC_H
#define AVCODEC_XVMC_H
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_xvmc
+ * Public libavcodec XvMC header.
+ */
+
#include <X11/extensions/XvMC.h>
#include "avcodec.h"
+/**
+ * @defgroup lavc_codec_hwaccel_xvmc XvMC
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
the number is 1337 speak for the letters IDCT MCo (motion compensation) */
@@ -148,4 +161,8 @@ struct xvmc_pix_fmt {
int next_free_data_block_num;
};
+/**
+ * @}
+ */
+
#endif /* AVCODEC_XVMC_H */
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 04cacad635..8227d35755 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -510,6 +510,18 @@ static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) {
static void null_end_frame(AVFilterLink *inlink) { }
+static int poll_frame(AVFilterLink *link)
+{
+ AVFilterContext *s = link->src;
+ OverlayContext *over = s->priv;
+ int ret = avfilter_poll_frame(s->inputs[OVERLAY]);
+
+ if (ret == AVERROR_EOF)
+ ret = !!over->overpicref;
+
+ return ret && avfilter_poll_frame(s->inputs[MAIN]);
+}
+
AVFilter avfilter_vf_overlay = {
.name = "overlay",
.description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
@@ -541,6 +553,7 @@ AVFilter avfilter_vf_overlay = {
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_output, },
+ .config_props = config_output,
+ .poll_frame = poll_frame },
{ .name = NULL}},
};
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 86be86f3c9..810a6d3ef1 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -27,6 +27,7 @@
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
@@ -92,7 +93,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
if (args) {
sscanf(args, "%255[^:]:%255[^:]", scale->w_expr, scale->h_expr);
p = strstr(args,"flags=");
- if (p) scale->flags = strtoul(p+6, NULL, 0);
+ if (p) {
+ const AVClass *class = sws_get_class();
+ const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
+ AV_OPT_SEARCH_FAKE_OBJ);
+ int ret = av_opt_eval_flags(&class, o, p + 6, &scale->flags);
+
+ if (ret < 0)
+ return ret;
+ }
if(strstr(args,"interl=1")){
scale->interlaced=1;
}else if(strstr(args,"interl=-1"))
diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
index 99e036d4a4..6d51950e11 100644
--- a/libavformat/nutdec.c
+++ b/libavformat/nutdec.c
@@ -33,79 +33,97 @@
#define NUT_MAX_STREAMS 256 /* arbitrary sanity check value */
-static int get_str(AVIOContext *bc, char *string, unsigned int maxlen){
- unsigned int len= ffio_read_varlen(bc);
+static int get_str(AVIOContext *bc, char *string, unsigned int maxlen)
+{
+ unsigned int len = ffio_read_varlen(bc);
- if(len && maxlen)
+ if (len && maxlen)
avio_read(bc, string, FFMIN(len, maxlen));
- while(len > maxlen){
+ while (len > maxlen) {
avio_r8(bc);
len--;
}
- if(maxlen)
- string[FFMIN(len, maxlen-1)]= 0;
+ if (maxlen)
+ string[FFMIN(len, maxlen - 1)] = 0;
- if(maxlen == len)
+ if (maxlen == len)
return -1;
else
return 0;
}
-static int64_t get_s(AVIOContext *bc){
+static int64_t get_s(AVIOContext *bc)
+{
int64_t v = ffio_read_varlen(bc) + 1;
- if (v&1) return -(v>>1);
- else return (v>>1);
+ if (v & 1)
+ return -(v >> 1);
+ else
+ return (v >> 1);
}
-static uint64_t get_fourcc(AVIOContext *bc){
- unsigned int len= ffio_read_varlen(bc);
+static uint64_t get_fourcc(AVIOContext *bc)
+{
+ unsigned int len = ffio_read_varlen(bc);
- if (len==2) return avio_rl16(bc);
- else if(len==4) return avio_rl32(bc);
- else return -1;
+ if (len == 2)
+ return avio_rl16(bc);
+ else if (len == 4)
+ return avio_rl32(bc);
+ else
+ return -1;
}
#ifdef TRACE
-static inline uint64_t get_v_trace(AVIOContext *bc, char *file, char *func, int line){
- uint64_t v= ffio_read_varlen(bc);
+static inline uint64_t get_v_trace(AVIOContext *bc, char *file,
+ char *func, int line)
+{
+ uint64_t v = ffio_read_varlen(bc);
- av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n",
+ v, v, file, func, line);
return v;
}
-static inline int64_t get_s_trace(AVIOContext *bc, char *file, char *func, int line){
- int64_t v= get_s(bc);
+static inline int64_t get_s_trace(AVIOContext *bc, char *file,
+ char *func, int line)
+{
+ int64_t v = get_s(bc);
- av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n",
+ v, v, file, func, line);
return v;
}
-static inline uint64_t get_vb_trace(AVIOContext *bc, char *file, char *func, int line){
- uint64_t v= get_vb(bc);
+static inline uint64_t get_vb_trace(AVIOContext *bc, char *file,
+ char *func, int line)
+{
+ uint64_t v = get_vb(bc);
- av_log(NULL, AV_LOG_DEBUG, "get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ av_log(NULL, AV_LOG_DEBUG, "get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n",
+ v, v, file, func, line);
return v;
}
-#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
-#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
-#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#endif
-static int get_packetheader(NUTContext *nut, AVIOContext *bc, int calculate_checksum, uint64_t startcode)
+static int get_packetheader(NUTContext *nut, AVIOContext *bc,
+ int calculate_checksum, uint64_t startcode)
{
int64_t size;
-// start= avio_tell(bc) - 8;
+// start = avio_tell(bc) - 8;
- startcode= av_be2ne64(startcode);
- startcode= ff_crc04C11DB7_update(0, (uint8_t*)&startcode, 8);
+ startcode = av_be2ne64(startcode);
+ startcode = ff_crc04C11DB7_update(0, (uint8_t*) &startcode, 8);
ffio_init_checksum(bc, ff_crc04C11DB7_update, startcode);
- size= ffio_read_varlen(bc);
- if(size > 4096)
+ size = ffio_read_varlen(bc);
+ if (size > 4096)
avio_rb32(bc);
- if(ffio_get_checksum(bc) && size > 4096)
+ if (ffio_get_checksum(bc) && size > 4096)
return -1;
ffio_init_checksum(bc, calculate_checksum ? ff_crc04C11DB7_update : NULL, 0);
@@ -113,17 +131,19 @@ static int get_packetheader(NUTContext *nut, AVIOContext *bc, int calculate_chec
return size;
}
-static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos){
- uint64_t state=0;
-
- if(pos >= 0)
- avio_seek(bc, pos, SEEK_SET); //note, this may fail if the stream is not seekable, but that should not matter, as in this case we simply start where we currently are
-
- while(!url_feof(bc)){
- state= (state<<8) | avio_r8(bc);
- if((state>>56) != 'N')
+static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos)
+{
+ uint64_t state = 0;
+
+ if (pos >= 0)
+ /* Note, this may fail if the stream is not seekable, but that should
+ * not matter, as in this case we simply start where we currently are */
+ avio_seek(bc, pos, SEEK_SET);
+ while (!url_feof(bc)) {
+ state = (state << 8) | avio_r8(bc);
+ if ((state >> 56) != 'N')
continue;
- switch(state){
+ switch (state) {
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case SYNCPOINT_STARTCODE:
@@ -142,20 +162,22 @@ static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos){
* @param pos the start position of the search, or -1 if the current position
* @return the position of the startcode or -1 if not found
*/
-static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos){
- for(;;){
- uint64_t startcode= find_any_startcode(bc, pos);
- if(startcode == code)
+static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos)
+{
+ for (;;) {
+ uint64_t startcode = find_any_startcode(bc, pos);
+ if (startcode == code)
return avio_tell(bc) - 8;
- else if(startcode == 0)
+ else if (startcode == 0)
return -1;
- pos=-1;
+ pos = -1;
}
}
-static int nut_probe(AVProbeData *p){
+static int nut_probe(AVProbeData *p)
+{
int i;
- uint64_t code= 0;
+ uint64_t code = 0;
for (i = 0; i < p->buf_size; i++) {
code = (code << 8) | p->buf[i];
@@ -165,225 +187,248 @@ static int nut_probe(AVProbeData *p){
return 0;
}
-#define GET_V(dst, check) \
- tmp= ffio_read_varlen(bc);\
- if(!(check)){\
- av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp);\
- return -1;\
- }\
- dst= tmp;
+#define GET_V(dst, check) \
+ tmp = ffio_read_varlen(bc); \
+ if (!(check)) { \
+ av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp); \
+ return -1; \
+ } \
+ dst = tmp;
-static int skip_reserved(AVIOContext *bc, int64_t pos){
+static int skip_reserved(AVIOContext *bc, int64_t pos)
+{
pos -= avio_tell(bc);
- if(pos<0){
+ if (pos < 0) {
avio_seek(bc, pos, SEEK_CUR);
return -1;
- }else{
- while(pos--)
+ } else {
+ while (pos--)
avio_r8(bc);
return 0;
}
}
-static int decode_main_header(NUTContext *nut){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_main_header(NUTContext *nut)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
uint64_t tmp, end;
unsigned int stream_count;
- int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res, tmp_head_idx;
+ int i, j, count;
+ int tmp_stream, tmp_mul, tmp_pts, tmp_size, tmp_res, tmp_head_idx;
- end= get_packetheader(nut, bc, 1, MAIN_STARTCODE);
+ end = get_packetheader(nut, bc, 1, MAIN_STARTCODE);
end += avio_tell(bc);
- GET_V(tmp , tmp >=2 && tmp <= 3)
- GET_V(stream_count , tmp > 0 && tmp <= NUT_MAX_STREAMS)
+ GET_V(tmp, tmp >= 2 && tmp <= 3)
+ GET_V(stream_count, tmp > 0 && tmp <= NUT_MAX_STREAMS)
nut->max_distance = ffio_read_varlen(bc);
- if(nut->max_distance > 65536){
+ if (nut->max_distance > 65536) {
av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance);
- nut->max_distance= 65536;
+ nut->max_distance = 65536;
}
- GET_V(nut->time_base_count, tmp>0 && tmp<INT_MAX / sizeof(AVRational))
- nut->time_base= av_malloc(nut->time_base_count * sizeof(AVRational));
+ GET_V(nut->time_base_count, tmp > 0 && tmp < INT_MAX / sizeof(AVRational))
+ nut->time_base = av_malloc(nut->time_base_count * sizeof(AVRational));
- for(i=0; i<nut->time_base_count; i++){
- GET_V(nut->time_base[i].num, tmp>0 && tmp<(1ULL<<31))
- GET_V(nut->time_base[i].den, tmp>0 && tmp<(1ULL<<31))
- if(av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1){
+ for (i = 0; i < nut->time_base_count; i++) {
+ GET_V(nut->time_base[i].num, tmp > 0 && tmp < (1ULL << 31))
+ GET_V(nut->time_base[i].den, tmp > 0 && tmp < (1ULL << 31))
+ if (av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1) {
av_log(s, AV_LOG_ERROR, "time base invalid\n");
return AVERROR_INVALIDDATA;
}
}
- tmp_pts=0;
- tmp_mul=1;
- tmp_stream=0;
- tmp_head_idx= 0;
- for(i=0; i<256;){
- int tmp_flags = ffio_read_varlen(bc);
- int tmp_fields= ffio_read_varlen(bc);
- if(tmp_fields>0) tmp_pts = get_s(bc);
- if(tmp_fields>1) tmp_mul = ffio_read_varlen(bc);
- if(tmp_fields>2) tmp_stream= ffio_read_varlen(bc);
- if(tmp_fields>3) tmp_size = ffio_read_varlen(bc);
- else tmp_size = 0;
- if(tmp_fields>4) tmp_res = ffio_read_varlen(bc);
- else tmp_res = 0;
- if(tmp_fields>5) count = ffio_read_varlen(bc);
- else count = tmp_mul - tmp_size;
- if(tmp_fields>6) get_s(bc);
- if(tmp_fields>7) tmp_head_idx= ffio_read_varlen(bc);
-
- while(tmp_fields-- > 8)
- ffio_read_varlen(bc);
-
- if(count == 0 || i+count > 256){
+ tmp_pts = 0;
+ tmp_mul = 1;
+ tmp_stream = 0;
+ tmp_head_idx = 0;
+ for (i = 0; i < 256;) {
+ int tmp_flags = ffio_read_varlen(bc);
+ int tmp_fields = ffio_read_varlen(bc);
+
+ if (tmp_fields > 0)
+ tmp_pts = get_s(bc);
+ if (tmp_fields > 1)
+ tmp_mul = ffio_read_varlen(bc);
+ if (tmp_fields > 2)
+ tmp_stream = ffio_read_varlen(bc);
+ if (tmp_fields > 3)
+ tmp_size = ffio_read_varlen(bc);
+ else
+ tmp_size = 0;
+ if (tmp_fields > 4)
+ tmp_res = ffio_read_varlen(bc);
+ else
+ tmp_res = 0;
+ if (tmp_fields > 5)
+ count = ffio_read_varlen(bc);
+ else
+ count = tmp_mul - tmp_size;
+ if (tmp_fields > 6)
+ get_s(bc);
+ if (tmp_fields > 7)
+ tmp_head_idx = ffio_read_varlen(bc);
+
+ while (tmp_fields-- > 8)
+ ffio_read_varlen(bc);
+
+ if (count == 0 || i + count > 256) {
av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
return AVERROR_INVALIDDATA;
}
- if(tmp_stream >= stream_count){
+ if (tmp_stream >= stream_count) {
av_log(s, AV_LOG_ERROR, "illegal stream number\n");
return AVERROR_INVALIDDATA;
}
- for(j=0; j<count; j++,i++){
+ for (j = 0; j < count; j++, i++) {
if (i == 'N') {
- nut->frame_code[i].flags= FLAG_INVALID;
+ nut->frame_code[i].flags = FLAG_INVALID;
j--;
continue;
}
- nut->frame_code[i].flags = tmp_flags ;
- nut->frame_code[i].pts_delta = tmp_pts ;
- nut->frame_code[i].stream_id = tmp_stream;
- nut->frame_code[i].size_mul = tmp_mul ;
- nut->frame_code[i].size_lsb = tmp_size+j;
- nut->frame_code[i].reserved_count = tmp_res ;
- nut->frame_code[i].header_idx = tmp_head_idx;
+ nut->frame_code[i].flags = tmp_flags;
+ nut->frame_code[i].pts_delta = tmp_pts;
+ nut->frame_code[i].stream_id = tmp_stream;
+ nut->frame_code[i].size_mul = tmp_mul;
+ nut->frame_code[i].size_lsb = tmp_size + j;
+ nut->frame_code[i].reserved_count = tmp_res;
+ nut->frame_code[i].header_idx = tmp_head_idx;
}
}
assert(nut->frame_code['N'].flags == FLAG_INVALID);
- if(end > avio_tell(bc) + 4){
- int rem= 1024;
- GET_V(nut->header_count, tmp<128U)
+ if (end > avio_tell(bc) + 4) {
+ int rem = 1024;
+ GET_V(nut->header_count, tmp < 128U)
nut->header_count++;
- for(i=1; i<nut->header_count; i++){
- GET_V(nut->header_len[i], tmp>0 && tmp<256);
+ for (i = 1; i < nut->header_count; i++) {
+ GET_V(nut->header_len[i], tmp > 0 && tmp < 256);
rem -= nut->header_len[i];
- if(rem < 0){
+ if (rem < 0) {
av_log(s, AV_LOG_ERROR, "invalid elision header\n");
return AVERROR_INVALIDDATA;
}
- nut->header[i]= av_malloc(nut->header_len[i]);
+ nut->header[i] = av_malloc(nut->header_len[i]);
avio_read(bc, nut->header[i], nut->header_len[i]);
}
- assert(nut->header_len[0]==0);
+ assert(nut->header_len[0] == 0);
}
- if(skip_reserved(bc, end) || ffio_get_checksum(bc)){
+ if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "main header checksum mismatch\n");
return AVERROR_INVALIDDATA;
}
- nut->stream = av_mallocz(sizeof(StreamContext)*stream_count);
- for(i=0; i<stream_count; i++){
+ nut->stream = av_mallocz(sizeof(StreamContext) * stream_count);
+ for (i = 0; i < stream_count; i++)
avformat_new_stream(s, NULL);
- }
return 0;
}
-static int decode_stream_header(NUTContext *nut){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_stream_header(NUTContext *nut)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
StreamContext *stc;
int class, stream_id;
uint64_t tmp, end;
AVStream *st;
- end= get_packetheader(nut, bc, 1, STREAM_STARTCODE);
+ end = get_packetheader(nut, bc, 1, STREAM_STARTCODE);
end += avio_tell(bc);
GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base);
- stc= &nut->stream[stream_id];
-
- st = s->streams[stream_id];
+ stc = &nut->stream[stream_id];
+ st = s->streams[stream_id];
if (!st)
return AVERROR(ENOMEM);
- class = ffio_read_varlen(bc);
- tmp = get_fourcc(bc);
- st->codec->codec_tag= tmp;
- switch(class)
- {
- case 0:
- st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- st->codec->codec_id = av_codec_get_id(
- (const AVCodecTag * const []) { ff_codec_bmp_tags, ff_nut_video_tags, 0 },
- tmp);
- break;
- case 1:
- st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
- st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, tmp);
- break;
- case 2:
- st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
- st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp);
- break;
- case 3:
- st->codec->codec_type = AVMEDIA_TYPE_DATA;
- break;
- default:
- av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class);
- return -1;
+ class = ffio_read_varlen(bc);
+ tmp = get_fourcc(bc);
+ st->codec->codec_tag = tmp;
+ switch (class) {
+ case 0:
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ st->codec->codec_id = av_codec_get_id((const AVCodecTag * const []) {
+ ff_codec_bmp_tags,
+ ff_nut_video_tags,
+ 0
+ },
+ tmp);
+ break;
+ case 1:
+ st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
+ st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, tmp);
+ break;
+ case 2:
+ st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp);
+ break;
+ case 3:
+ st->codec->codec_type = AVMEDIA_TYPE_DATA;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class);
+ return -1;
}
- if(class<3 && st->codec->codec_id == CODEC_ID_NONE)
- av_log(s, AV_LOG_ERROR, "Unknown codec tag '0x%04x' for stream number %d\n",
- (unsigned int)tmp, stream_id);
-
- GET_V(stc->time_base_id , tmp < nut->time_base_count);
- GET_V(stc->msb_pts_shift , tmp < 16);
- stc->max_pts_distance= ffio_read_varlen(bc);
- GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if Moore's law is true
- st->codec->has_b_frames= stc->decode_delay;
- ffio_read_varlen(bc); //stream flags
-
- GET_V(st->codec->extradata_size, tmp < (1<<30));
- if(st->codec->extradata_size){
- st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (class < 3 && st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR,
+ "Unknown codec tag '0x%04x' for stream number %d\n",
+ (unsigned int) tmp, stream_id);
+
+ GET_V(stc->time_base_id, tmp < nut->time_base_count);
+ GET_V(stc->msb_pts_shift, tmp < 16);
+ stc->max_pts_distance = ffio_read_varlen(bc);
+ GET_V(stc->decode_delay, tmp < 1000); // sanity limit, raise this if Moore's law is true
+ st->codec->has_b_frames = stc->decode_delay;
+ ffio_read_varlen(bc); // stream flags
+
+ GET_V(st->codec->extradata_size, tmp < (1 << 30));
+ if (st->codec->extradata_size) {
+ st->codec->extradata = av_mallocz(st->codec->extradata_size +
+ FF_INPUT_BUFFER_PADDING_SIZE);
avio_read(bc, st->codec->extradata, st->codec->extradata_size);
}
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
- GET_V(st->codec->width , tmp > 0)
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ GET_V(st->codec->width, tmp > 0)
GET_V(st->codec->height, tmp > 0)
- st->sample_aspect_ratio.num= ffio_read_varlen(bc);
- st->sample_aspect_ratio.den= ffio_read_varlen(bc);
- if((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)){
- av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
+ st->sample_aspect_ratio.num = ffio_read_varlen(bc);
+ st->sample_aspect_ratio.den = ffio_read_varlen(bc);
+ if ((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)) {
+ av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n",
+ st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
return -1;
}
ffio_read_varlen(bc); /* csp type */
- }else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO){
- GET_V(st->codec->sample_rate , tmp > 0)
+ } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ GET_V(st->codec->sample_rate, tmp > 0)
ffio_read_varlen(bc); // samplerate_den
GET_V(st->codec->channels, tmp > 0)
}
- if(skip_reserved(bc, end) || ffio_get_checksum(bc)){
- av_log(s, AV_LOG_ERROR, "stream header %d checksum mismatch\n", stream_id);
+ if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
+ av_log(s, AV_LOG_ERROR,
+ "stream header %d checksum mismatch\n", stream_id);
return -1;
}
- stc->time_base= &nut->time_base[stc->time_base_id];
- avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, stc->time_base->den);
+ stc->time_base = &nut->time_base[stc->time_base_id];
+ avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num,
+ stc->time_base->den);
return 0;
}
-static void set_disposition_bits(AVFormatContext* avf, char* value, int stream_id){
+static void set_disposition_bits(AVFormatContext *avf, char *value,
+ int stream_id)
+{
int flag = 0, i;
- for (i=0; ff_nut_dispositions[i].flag; ++i) {
+
+ for (i = 0; ff_nut_dispositions[i].flag; ++i)
if (!strcmp(ff_nut_dispositions[i].str, value))
flag = ff_nut_dispositions[i].flag;
- }
if (!flag)
av_log(avf, AV_LOG_INFO, "unknown disposition type '%s'\n", value);
for (i = 0; i < avf->nb_streams; ++i)
@@ -391,61 +436,63 @@ static void set_disposition_bits(AVFormatContext* avf, char* value, int stream_i
avf->streams[i]->disposition |= flag;
}
-static int decode_info_header(NUTContext *nut){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_info_header(NUTContext *nut)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
uint64_t tmp, chapter_start, chapter_len;
unsigned int stream_id_plus1, count;
int chapter_id, i;
int64_t value, end;
char name[256], str_value[1024], type_str[256];
const char *type;
- AVChapter *chapter= NULL;
- AVStream *st= NULL;
+ AVChapter *chapter = NULL;
+ AVStream *st = NULL;
AVDictionary **metadata = NULL;
- end= get_packetheader(nut, bc, 1, INFO_STARTCODE);
+ end = get_packetheader(nut, bc, 1, INFO_STARTCODE);
end += avio_tell(bc);
GET_V(stream_id_plus1, tmp <= s->nb_streams)
- chapter_id = get_s(bc);
- chapter_start= ffio_read_varlen(bc);
- chapter_len = ffio_read_varlen(bc);
- count = ffio_read_varlen(bc);
-
- if(chapter_id && !stream_id_plus1){
- int64_t start= chapter_start / nut->time_base_count;
- chapter= avpriv_new_chapter(s, chapter_id,
- nut->time_base[chapter_start % nut->time_base_count],
- start, start + chapter_len, NULL);
+ chapter_id = get_s(bc);
+ chapter_start = ffio_read_varlen(bc);
+ chapter_len = ffio_read_varlen(bc);
+ count = ffio_read_varlen(bc);
+
+ if (chapter_id && !stream_id_plus1) {
+ int64_t start = chapter_start / nut->time_base_count;
+ chapter = avpriv_new_chapter(s, chapter_id,
+ nut->time_base[chapter_start %
+ nut->time_base_count],
+ start, start + chapter_len, NULL);
metadata = &chapter->metadata;
- } else if(stream_id_plus1) {
- st= s->streams[stream_id_plus1 - 1];
+ } else if (stream_id_plus1) {
+ st = s->streams[stream_id_plus1 - 1];
metadata = &st->metadata;
} else
metadata = &s->metadata;
- for(i=0; i<count; i++){
+ for (i = 0; i < count; i++) {
get_str(bc, name, sizeof(name));
- value= get_s(bc);
- if(value == -1){
- type= "UTF-8";
+ value = get_s(bc);
+ if (value == -1) {
+ type = "UTF-8";
get_str(bc, str_value, sizeof(str_value));
- }else if(value == -2){
+ } else if (value == -2) {
get_str(bc, type_str, sizeof(type_str));
- type= type_str;
+ type = type_str;
get_str(bc, str_value, sizeof(str_value));
- }else if(value == -3){
- type= "s";
- value= get_s(bc);
- }else if(value == -4){
- type= "t";
- value= ffio_read_varlen(bc);
- }else if(value < -4){
- type= "r";
+ } else if (value == -3) {
+ type = "s";
+ value = get_s(bc);
+ } else if (value == -4) {
+ type = "t";
+ value = ffio_read_varlen(bc);
+ } else if (value < -4) {
+ type = "r";
get_s(bc);
- }else{
- type= "v";
+ } else {
+ type = "v";
}
if (stream_id_plus1 > s->nb_streams) {
@@ -453,143 +500,143 @@ static int decode_info_header(NUTContext *nut){
continue;
}
- if(!strcmp(type, "UTF-8")){
- if(chapter_id==0 && !strcmp(name, "Disposition")) {
+ if (!strcmp(type, "UTF-8")) {
+ if (chapter_id == 0 && !strcmp(name, "Disposition")) {
set_disposition_bits(s, str_value, stream_id_plus1 - 1);
continue;
}
- if(metadata && av_strcasecmp(name,"Uses")
- && av_strcasecmp(name,"Depends") && av_strcasecmp(name,"Replaces"))
+ if (metadata && av_strcasecmp(name, "Uses") &&
+ av_strcasecmp(name, "Depends") && av_strcasecmp(name, "Replaces"))
av_dict_set(metadata, name, str_value, 0);
}
}
- if(skip_reserved(bc, end) || ffio_get_checksum(bc)){
+ if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "info header checksum mismatch\n");
return -1;
}
return 0;
}
-static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
int64_t end, tmp;
- nut->last_syncpoint_pos= avio_tell(bc)-8;
+ nut->last_syncpoint_pos = avio_tell(bc) - 8;
- end= get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE);
+ end = get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE);
end += avio_tell(bc);
- tmp= ffio_read_varlen(bc);
- *back_ptr= nut->last_syncpoint_pos - 16*ffio_read_varlen(bc);
- if(*back_ptr < 0)
+ tmp = ffio_read_varlen(bc);
+ *back_ptr = nut->last_syncpoint_pos - 16 * ffio_read_varlen(bc);
+ if (*back_ptr < 0)
return -1;
- ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], tmp / nut->time_base_count);
+ ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count],
+ tmp / nut->time_base_count);
- if(skip_reserved(bc, end) || ffio_get_checksum(bc)){
+ if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
return -1;
}
- *ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE;
+ *ts = tmp / s->nb_streams *
+ av_q2d(nut->time_base[tmp % s->nb_streams]) * AV_TIME_BASE;
ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts);
return 0;
}
-static int find_and_decode_index(NUTContext *nut){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int find_and_decode_index(NUTContext *nut)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
uint64_t tmp, end;
int i, j, syncpoint_count;
- int64_t filesize= avio_size(bc);
+ int64_t filesize = avio_size(bc);
int64_t *syncpoints;
int8_t *has_keyframe;
- int ret= -1;
+ int ret = -1;
- avio_seek(bc, filesize-12, SEEK_SET);
- avio_seek(bc, filesize-avio_rb64(bc), SEEK_SET);
- if(avio_rb64(bc) != INDEX_STARTCODE){
+ avio_seek(bc, filesize - 12, SEEK_SET);
+ avio_seek(bc, filesize - avio_rb64(bc), SEEK_SET);
+ if (avio_rb64(bc) != INDEX_STARTCODE) {
av_log(s, AV_LOG_ERROR, "no index at the end\n");
return -1;
}
- end= get_packetheader(nut, bc, 1, INDEX_STARTCODE);
+ end = get_packetheader(nut, bc, 1, INDEX_STARTCODE);
end += avio_tell(bc);
- ffio_read_varlen(bc); //max_pts
- GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0)
- syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count);
- has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1));
- for(i=0; i<syncpoint_count; i++){
+ ffio_read_varlen(bc); // max_pts
+ GET_V(syncpoint_count, tmp < INT_MAX / 8 && tmp > 0)
+ syncpoints = av_malloc(sizeof(int64_t) * syncpoint_count);
+ has_keyframe = av_malloc(sizeof(int8_t) * (syncpoint_count + 1));
+ for (i = 0; i < syncpoint_count; i++) {
syncpoints[i] = ffio_read_varlen(bc);
- if(syncpoints[i] <= 0)
+ if (syncpoints[i] <= 0)
goto fail;
- if(i)
- syncpoints[i] += syncpoints[i-1];
- }
-
- for(i=0; i<s->nb_streams; i++){
- int64_t last_pts= -1;
- for(j=0; j<syncpoint_count;){
- uint64_t x= ffio_read_varlen(bc);
- int type= x&1;
- int n= j;
- x>>=1;
- if(type){
- int flag= x&1;
- x>>=1;
- if(n+x >= syncpoint_count + 1){
+ if (i)
+ syncpoints[i] += syncpoints[i - 1];
+ }
+
+ for (i = 0; i < s->nb_streams; i++) {
+ int64_t last_pts = -1;
+ for (j = 0; j < syncpoint_count;) {
+ uint64_t x = ffio_read_varlen(bc);
+ int type = x & 1;
+ int n = j;
+ x >>= 1;
+ if (type) {
+ int flag = x & 1;
+ x >>= 1;
+ if (n + x >= syncpoint_count + 1) {
av_log(s, AV_LOG_ERROR, "index overflow A\n");
goto fail;
}
- while(x--)
- has_keyframe[n++]= flag;
- has_keyframe[n++]= !flag;
- }else{
- while(x != 1){
- if(n>=syncpoint_count + 1){
+ while (x--)
+ has_keyframe[n++] = flag;
+ has_keyframe[n++] = !flag;
+ } else {
+ while (x != 1) {
+ if (n >= syncpoint_count + 1) {
av_log(s, AV_LOG_ERROR, "index overflow B\n");
goto fail;
}
- has_keyframe[n++]= x&1;
- x>>=1;
+ has_keyframe[n++] = x & 1;
+ x >>= 1;
}
}
- if(has_keyframe[0]){
+ if (has_keyframe[0]) {
av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n");
goto fail;
}
- assert(n<=syncpoint_count+1);
- for(; j<n && j<syncpoint_count; j++){
- if(has_keyframe[j]){
- uint64_t B, A= ffio_read_varlen(bc);
- if(!A){
- A= ffio_read_varlen(bc);
- B= ffio_read_varlen(bc);
- //eor_pts[j][i] = last_pts + A + B
- }else
- B= 0;
- av_add_index_entry(
- s->streams[i],
- 16*syncpoints[j-1],
- last_pts + A,
- 0,
- 0,
- AVINDEX_KEYFRAME);
+ assert(n <= syncpoint_count + 1);
+ for (; j < n && j < syncpoint_count; j++) {
+ if (has_keyframe[j]) {
+ uint64_t B, A = ffio_read_varlen(bc);
+ if (!A) {
+ A = ffio_read_varlen(bc);
+ B = ffio_read_varlen(bc);
+ // eor_pts[j][i] = last_pts + A + B
+ } else
+ B = 0;
+ av_add_index_entry(s->streams[i], 16 * syncpoints[j - 1],
+ last_pts + A, 0, 0, AVINDEX_KEYFRAME);
last_pts += A + B;
}
}
}
}
- if(skip_reserved(bc, end) || ffio_get_checksum(bc)){
+ if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "index checksum mismatch\n");
goto fail;
}
- ret= 0;
+ ret = 0;
+
fail:
av_free(syncpoints);
av_free(has_keyframe);
@@ -603,53 +650,53 @@ static int nut_read_header(AVFormatContext *s)
int64_t pos;
int initialized_stream_count;
- nut->avf= s;
+ nut->avf = s;
/* main header */
- pos=0;
- do{
- pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
- if (pos<0+1){
+ pos = 0;
+ do {
+ pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1;
+ if (pos < 0 + 1) {
av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
return AVERROR_INVALIDDATA;
}
- }while(decode_main_header(nut) < 0);
+ } while (decode_main_header(nut) < 0);
/* stream headers */
- pos=0;
- for(initialized_stream_count=0; initialized_stream_count < s->nb_streams;){
- pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
- if (pos<0+1){
+ pos = 0;
+ for (initialized_stream_count = 0; initialized_stream_count < s->nb_streams;) {
+ pos = find_startcode(bc, STREAM_STARTCODE, pos) + 1;
+ if (pos < 0 + 1) {
av_log(s, AV_LOG_ERROR, "Not all stream headers found.\n");
return AVERROR_INVALIDDATA;
}
- if(decode_stream_header(nut) >= 0)
+ if (decode_stream_header(nut) >= 0)
initialized_stream_count++;
}
/* info headers */
- pos=0;
- for(;;){
- uint64_t startcode= find_any_startcode(bc, pos);
- pos= avio_tell(bc);
+ pos = 0;
+ for (;;) {
+ uint64_t startcode = find_any_startcode(bc, pos);
+ pos = avio_tell(bc);
- if(startcode==0){
+ if (startcode == 0) {
av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
return AVERROR_INVALIDDATA;
- }else if(startcode == SYNCPOINT_STARTCODE){
- nut->next_startcode= startcode;
+ } else if (startcode == SYNCPOINT_STARTCODE) {
+ nut->next_startcode = startcode;
break;
- }else if(startcode != INFO_STARTCODE){
+ } else if (startcode != INFO_STARTCODE) {
continue;
}
decode_info_header(nut);
}
- s->data_offset= pos-8;
+ s->data_offset = pos - 8;
- if(bc->seekable){
- int64_t orig_pos= avio_tell(bc);
+ if (bc->seekable) {
+ int64_t orig_pos = avio_tell(bc);
find_and_decode_index(nut);
avio_seek(bc, orig_pos, SEEK_SET);
}
@@ -660,15 +707,19 @@ static int nut_read_header(AVFormatContext *s)
return 0;
}
-static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, uint8_t *header_idx, int frame_code){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id,
+ uint8_t *header_idx, int frame_code)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
StreamContext *stc;
int size, flags, size_mul, pts_delta, i, reserved_count;
uint64_t tmp;
- if(avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance){
- av_log(s, AV_LOG_ERROR, "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n", avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance);
+ if (avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance) {
+ av_log(s, AV_LOG_ERROR,
+ "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n",
+ avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance);
return AVERROR_INVALIDDATA;
}
@@ -680,86 +731,88 @@ static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, ui
reserved_count = nut->frame_code[frame_code].reserved_count;
*header_idx = nut->frame_code[frame_code].header_idx;
- if(flags & FLAG_INVALID)
+ if (flags & FLAG_INVALID)
return AVERROR_INVALIDDATA;
- if(flags & FLAG_CODED)
+ if (flags & FLAG_CODED)
flags ^= ffio_read_varlen(bc);
- if(flags & FLAG_STREAM_ID){
+ if (flags & FLAG_STREAM_ID) {
GET_V(*stream_id, tmp < s->nb_streams)
}
- stc= &nut->stream[*stream_id];
- if(flags&FLAG_CODED_PTS){
- int coded_pts= ffio_read_varlen(bc);
-//FIXME check last_pts validity?
- if(coded_pts < (1<<stc->msb_pts_shift)){
- *pts=ff_lsb2full(stc, coded_pts);
- }else
- *pts=coded_pts - (1<<stc->msb_pts_shift);
- }else
- *pts= stc->last_pts + pts_delta;
- if(flags&FLAG_SIZE_MSB){
- size += size_mul*ffio_read_varlen(bc);
- }
- if(flags&FLAG_MATCH_TIME)
+ stc = &nut->stream[*stream_id];
+ if (flags & FLAG_CODED_PTS) {
+ int coded_pts = ffio_read_varlen(bc);
+ // FIXME check last_pts validity?
+ if (coded_pts < (1 << stc->msb_pts_shift)) {
+ *pts = ff_lsb2full(stc, coded_pts);
+ } else
+ *pts = coded_pts - (1 << stc->msb_pts_shift);
+ } else
+ *pts = stc->last_pts + pts_delta;
+ if (flags & FLAG_SIZE_MSB)
+ size += size_mul * ffio_read_varlen(bc);
+ if (flags & FLAG_MATCH_TIME)
get_s(bc);
- if(flags&FLAG_HEADER_IDX)
- *header_idx= ffio_read_varlen(bc);
- if(flags&FLAG_RESERVED)
- reserved_count= ffio_read_varlen(bc);
- for(i=0; i<reserved_count; i++)
+ if (flags & FLAG_HEADER_IDX)
+ *header_idx = ffio_read_varlen(bc);
+ if (flags & FLAG_RESERVED)
+ reserved_count = ffio_read_varlen(bc);
+ for (i = 0; i < reserved_count; i++)
ffio_read_varlen(bc);
- if(*header_idx >= (unsigned)nut->header_count){
+ if (*header_idx >= (unsigned)nut->header_count) {
av_log(s, AV_LOG_ERROR, "header_idx invalid\n");
return AVERROR_INVALIDDATA;
}
- if(size > 4096)
- *header_idx=0;
+ if (size > 4096)
+ *header_idx = 0;
size -= nut->header_len[*header_idx];
- if(flags&FLAG_CHECKSUM){
- avio_rb32(bc); //FIXME check this
- }else if(size > 2*nut->max_distance || FFABS(stc->last_pts - *pts) > stc->max_pts_distance){
+ if (flags & FLAG_CHECKSUM) {
+ avio_rb32(bc); // FIXME check this
+ } else if (size > 2 * nut->max_distance || FFABS(stc->last_pts - *pts) >
+ stc->max_pts_distance) {
av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n");
return AVERROR_INVALIDDATA;
}
- stc->last_pts= *pts;
- stc->last_flags= flags;
+ stc->last_pts = *pts;
+ stc->last_flags = flags;
return size;
}
-static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){
- AVFormatContext *s= nut->avf;
- AVIOContext *bc = s->pb;
+static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code)
+{
+ AVFormatContext *s = nut->avf;
+ AVIOContext *bc = s->pb;
int size, stream_id, discard;
int64_t pts, last_IP_pts;
StreamContext *stc;
uint8_t header_idx;
- size= decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code);
- if(size < 0)
+ size = decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code);
+ if (size < 0)
return size;
- stc= &nut->stream[stream_id];
+ stc = &nut->stream[stream_id];
if (stc->last_flags & FLAG_KEY)
- stc->skip_until_key_frame=0;
-
- discard= s->streams[ stream_id ]->discard;
- last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
- if( (discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY))
- ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
- || discard >= AVDISCARD_ALL
- || stc->skip_until_key_frame){
+ stc->skip_until_key_frame = 0;
+
+ discard = s->streams[stream_id]->discard;
+ last_IP_pts = s->streams[stream_id]->last_IP_pts;
+ if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) ||
+ (discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE &&
+ last_IP_pts > pts) ||
+ discard >= AVDISCARD_ALL ||
+ stc->skip_until_key_frame) {
avio_skip(bc, size);
return 1;
}
av_new_packet(pkt, size + nut->header_len[header_idx]);
memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]);
- pkt->pos= avio_tell(bc); //FIXME
+ pkt->pos = avio_tell(bc); // FIXME
avio_read(bc, pkt->data + nut->header_len[header_idx], size);
pkt->stream_index = stream_id;
@@ -774,135 +827,146 @@ static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
- int i, frame_code=0, ret, skip;
+ int i, frame_code = 0, ret, skip;
int64_t ts, back_ptr;
- for(;;){
- int64_t pos= avio_tell(bc);
- uint64_t tmp= nut->next_startcode;
- nut->next_startcode=0;
+ for (;;) {
+ int64_t pos = avio_tell(bc);
+ uint64_t tmp = nut->next_startcode;
+ nut->next_startcode = 0;
- if(tmp){
- pos-=8;
- }else{
+ if (tmp) {
+ pos -= 8;
+ } else {
frame_code = avio_r8(bc);
- if(url_feof(bc))
+ if (url_feof(bc))
return -1;
- if(frame_code == 'N'){
- tmp= frame_code;
- for(i=1; i<8; i++)
- tmp = (tmp<<8) + avio_r8(bc);
+ if (frame_code == 'N') {
+ tmp = frame_code;
+ for (i = 1; i < 8; i++)
+ tmp = (tmp << 8) + avio_r8(bc);
}
}
- switch(tmp){
+ switch (tmp) {
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case INDEX_STARTCODE:
- skip= get_packetheader(nut, bc, 0, tmp);
+ skip = get_packetheader(nut, bc, 0, tmp);
avio_skip(bc, skip);
break;
case INFO_STARTCODE:
- if(decode_info_header(nut)<0)
+ if (decode_info_header(nut) < 0)
goto resync;
break;
case SYNCPOINT_STARTCODE:
- if(decode_syncpoint(nut, &ts, &back_ptr)<0)
+ if (decode_syncpoint(nut, &ts, &back_ptr) < 0)
goto resync;
frame_code = avio_r8(bc);
case 0:
- ret= decode_frame(nut, pkt, frame_code);
- if(ret==0)
+ ret = decode_frame(nut, pkt, frame_code);
+ if (ret == 0)
return 0;
- else if(ret==1) //ok but discard packet
+ else if (ret == 1) // OK but discard packet
break;
default:
resync:
-av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
- tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1);
- if(tmp==0)
+ av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
+ tmp = find_any_startcode(bc, nut->last_syncpoint_pos + 1);
+ if (tmp == 0)
return AVERROR_INVALIDDATA;
-av_log(s, AV_LOG_DEBUG, "sync\n");
- nut->next_startcode= tmp;
+ av_log(s, AV_LOG_DEBUG, "sync\n");
+ nut->next_startcode = tmp;
}
}
}
-static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
+static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index,
+ int64_t *pos_arg, int64_t pos_limit)
+{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
int64_t pos, pts, back_ptr;
-av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
+ av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n",
+ stream_index, *pos_arg, pos_limit);
- pos= *pos_arg;
- do{
- pos= find_startcode(bc, SYNCPOINT_STARTCODE, pos)+1;
- if(pos < 1){
+ pos = *pos_arg;
+ do {
+ pos = find_startcode(bc, SYNCPOINT_STARTCODE, pos) + 1;
+ if (pos < 1) {
assert(nut->next_startcode == 0);
av_log(s, AV_LOG_ERROR, "read_timestamp failed.\n");
return AV_NOPTS_VALUE;
}
- }while(decode_syncpoint(nut, &pts, &back_ptr) < 0);
- *pos_arg = pos-1;
+ } while (decode_syncpoint(nut, &pts, &back_ptr) < 0);
+ *pos_arg = pos - 1;
assert(nut->last_syncpoint_pos == *pos_arg);
- av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts,back_ptr );
- if (stream_index == -1) return pts;
- else if(stream_index == -2) return back_ptr;
+ av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts, back_ptr);
+ if (stream_index == -1)
+ return pts;
+ else if (stream_index == -2)
+ return back_ptr;
-assert(0);
+ assert(0);
}
-static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags){
- NUTContext *nut = s->priv_data;
- AVStream *st= s->streams[stream_index];
- Syncpoint dummy={.ts= pts*av_q2d(st->time_base)*AV_TIME_BASE};
- Syncpoint nopts_sp= {.ts= AV_NOPTS_VALUE, .back_ptr= AV_NOPTS_VALUE};
- Syncpoint *sp, *next_node[2]= {&nopts_sp, &nopts_sp};
+static int read_seek(AVFormatContext *s, int stream_index,
+ int64_t pts, int flags)
+{
+ NUTContext *nut = s->priv_data;
+ AVStream *st = s->streams[stream_index];
+ Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE };
+ Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE };
+ Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp };
int64_t pos, pos2, ts;
int i;
- if(st->index_entries){
- int index= av_index_search_timestamp(st, pts, flags);
- if(index<0)
+ if (st->index_entries) {
+ int index = av_index_search_timestamp(st, pts, flags);
+ if (index < 0)
return -1;
- pos2= st->index_entries[index].pos;
- ts = st->index_entries[index].timestamp;
- }else{
+ pos2 = st->index_entries[index].pos;
+ ts = st->index_entries[index].timestamp;
+ } else {
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pts_cmp,
(void **) next_node);
- av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos,
- next_node[0]->ts , next_node[1]->ts);
- pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
- next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
-
- if(!(flags & AVSEEK_FLAG_BACKWARD)){
- dummy.pos= pos+16;
- next_node[1]= &nopts_sp;
+ av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n",
+ next_node[0]->pos, next_node[1]->pos, next_node[0]->ts,
+ next_node[1]->ts);
+ pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos,
+ next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->ts, next_node[1]->ts,
+ AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
+
+ if (!(flags & AVSEEK_FLAG_BACKWARD)) {
+ dummy.pos = pos + 16;
+ next_node[1] = &nopts_sp;
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
(void **) next_node);
- pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
- next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
- if(pos2>=0)
- pos= pos2;
- //FIXME dir but I think it does not matter
+ pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos,
+ next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->back_ptr, next_node[1]->back_ptr,
+ flags, &ts, nut_read_timestamp);
+ if (pos2 >= 0)
+ pos = pos2;
+ // FIXME dir but I think it does not matter
}
- dummy.pos= pos;
- sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
- NULL);
+ dummy.pos = pos;
+ sp = av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
+ NULL);
assert(sp);
- pos2= sp->back_ptr - 15;
+ pos2 = sp->back_ptr - 15;
}
av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2);
- pos= find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2);
+ pos = find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2);
avio_seek(s->pb, pos, SEEK_SET);
av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos);
- if(pos2 > pos || pos2 + 15 < pos){
+ if (pos2 > pos || pos2 + 15 < pos)
av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n");
- }
- for(i=0; i<s->nb_streams; i++)
- nut->stream[i].skip_until_key_frame=1;
+ for (i = 0; i < s->nb_streams; i++)
+ nut->stream[i].skip_until_key_frame = 1;
return 0;
}
@@ -915,7 +979,7 @@ static int nut_read_close(AVFormatContext *s)
av_freep(&nut->time_base);
av_freep(&nut->stream);
ff_nut_free_sp(nut);
- for(i = 1; i < nut->header_count; i++)
+ for (i = 1; i < nut->header_count; i++)
av_freep(&nut->header[i]);
return 0;
diff --git a/tests/ref/acodec/pcm_f32le b/tests/ref/acodec/pcm_f32le
index 38e5c0b719..eb6ea93687 100644
--- a/tests/ref/acodec/pcm_f32le
+++ b/tests/ref/acodec/pcm_f32le
@@ -1,4 +1,4 @@
-46f44f86a18984a832206ab9e29a79f2 *./tests/data/acodec/pcm_f32le.wav
+653d82a64b7bd96ac193e105e9f92d4c *./tests/data/acodec/pcm_f32le.wav
2116880 ./tests/data/acodec/pcm_f32le.wav
64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_f32le.acodec.out.wav
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400
diff --git a/tests/ref/acodec/pcm_f64le b/tests/ref/acodec/pcm_f64le
index 42875a8d2f..2f0576bf91 100644
--- a/tests/ref/acodec/pcm_f64le
+++ b/tests/ref/acodec/pcm_f64le
@@ -1,4 +1,4 @@
-ba17c6d1a270e1333e981f239bf7eb45 *./tests/data/acodec/pcm_f64le.wav
+48b4cd378f47a50dc902aa03cc8280ed *./tests/data/acodec/pcm_f64le.wav
4233680 ./tests/data/acodec/pcm_f64le.wav
64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_f64le.acodec.out.wav
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400
diff --git a/tests/ref/acodec/pcm_s24daud b/tests/ref/acodec/pcm_s24daud
index 62c4421908..e1f22964fc 100644
--- a/tests/ref/acodec/pcm_s24daud
+++ b/tests/ref/acodec/pcm_s24daud
@@ -1,4 +1,4 @@
1b75d5198ae789ab3c48f7024e08f4a9 *./tests/data/acodec/pcm_s24daud.302
10368730 ./tests/data/acodec/pcm_s24daud.302
-4708f86529c594e29404603c64bb208c *./tests/data/pcm_s24daud.acodec.out.wav
+70ec0ba6bc151ddc7509c09804d95d3b *./tests/data/pcm_s24daud.acodec.out.wav
stddev: 8967.92 PSNR: 17.28 MAXDIFF:42548 bytes: 6911796/ 1058400
diff --git a/tests/ref/acodec/pcm_s24le b/tests/ref/acodec/pcm_s24le
index a724e8c189..0d86d1e7f7 100644
--- a/tests/ref/acodec/pcm_s24le
+++ b/tests/ref/acodec/pcm_s24le
@@ -1,4 +1,4 @@
-a85380fb79b0d4fff38e24ac1e34bb94 *./tests/data/acodec/pcm_s24le.wav
+18ea73985dbdf59e23f5aba66145e6fe *./tests/data/acodec/pcm_s24le.wav
1587668 ./tests/data/acodec/pcm_s24le.wav
64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_s24le.acodec.out.wav
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400
diff --git a/tests/ref/acodec/pcm_s32le b/tests/ref/acodec/pcm_s32le
index 86777505f5..2b81c29e6a 100644
--- a/tests/ref/acodec/pcm_s32le
+++ b/tests/ref/acodec/pcm_s32le
@@ -1,4 +1,4 @@
-da6ed80f4f40f0082577dea80827e014 *./tests/data/acodec/pcm_s32le.wav
+8d8849fa5c5d91b9cb74f5c74e937faf *./tests/data/acodec/pcm_s32le.wav
2116868 ./tests/data/acodec/pcm_s32le.wav
64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_s32le.acodec.out.wav
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400
diff --git a/tests/ref/lavf/caf b/tests/ref/lavf/caf
index 2e38f864f4..972b27cc19 100644
--- a/tests/ref/lavf/caf
+++ b/tests/ref/lavf/caf
@@ -1,3 +1,3 @@
-df9ebf2812784a653d3337cf12c0c687 *./tests/data/lavf/lavf.caf
-90180 ./tests/data/lavf/lavf.caf
+71e1abdfc59613fe05fca2939f02e02d *./tests/data/lavf/lavf.caf
+90204 ./tests/data/lavf/lavf.caf
./tests/data/lavf/lavf.caf CRC=0xf1ae5536