diff options
author | Diego Biurrun <diego@biurrun.de> | 2012-08-24 13:31:50 +0200 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2012-10-12 20:56:54 +0200 |
commit | c1ef30a6ba2cdc15147c4e80766e9bda629ffc1d (patch) | |
tree | dacd008cb6db7e5d2794770f2469acf6fd5dc4af | |
parent | d5c62122a7b26704bf867a1262df358623bf5edf (diff) | |
download | ffmpeg-c1ef30a6ba2cdc15147c4e80766e9bda629ffc1d.tar.gz |
De-doxygenize some top-level files
-rw-r--r-- | avconv.c | 15 | ||||
-rw-r--r-- | avconv.h | 14 | ||||
-rw-r--r-- | avconv_filter.c | 6 | ||||
-rw-r--r-- | avconv_opt.c | 8 | ||||
-rw-r--r-- | avplay.c | 30 |
5 files changed, 30 insertions, 43 deletions
@@ -246,7 +246,7 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder) } } -/** +/* * Update the requested input sample format based on the output sample format. * This is currently only used to request float output from decoders which * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT. @@ -660,7 +660,7 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost, } } -/** +/* * Read one frame for lavfi output for ost and encode it. */ static int poll_filter(OutputStream *ost) @@ -723,7 +723,7 @@ static int poll_filter(OutputStream *ost) return 0; } -/** +/* * Read as many frames from possible from lavfi and encode them. * * Always read from the active stream with the lowest timestamp. If no frames @@ -1941,10 +1941,7 @@ static int transcode_init(void) return 0; } -/** - * @return 1 if there are still streams where more output is wanted, - * 0 otherwise - */ +/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */ static int need_output(void) { int i; @@ -2128,13 +2125,13 @@ static void reset_eagain(void) input_files[i]->eagain = 0; } -/** +/* * Read one packet from an input file and send it for * - decoding -> lavfi (audio/video) * - decoding -> encoding -> muxing (subtitles) * - muxing (streamcopy) * - * @return + * Return * - 0 -- one packet was read and processed * - AVERROR(EAGAIN) -- no packets were available for selected file, * this function should be called again @@ -51,21 +51,19 @@ /* select an input stream for an output stream */ typedef struct StreamMap { - int disabled; /** 1 is this mapping is disabled by a negative map */ + int disabled; /* 1 is this mapping is disabled by a negative map */ int file_index; int stream_index; int sync_file_index; int sync_stream_index; - char *linklabel; /** name of an output link, for mapping lavfi outputs */ + char *linklabel; /* name of an output link, for mapping lavfi outputs */ } StreamMap; -/** - * select an input file for an output file - */ +/* select an input file for an output file */ typedef struct MetadataMap { - int file; ///< file index - char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram - int index; ///< stream/chapter/program number + int file; // file index + char type; // type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram + int index; // stream/chapter/program number } MetadataMap; typedef struct OptionsContext { diff --git a/avconv_filter.c b/avconv_filter.c index aa158478f3..8f3f912582 100644 --- a/avconv_filter.c +++ b/avconv_filter.c @@ -29,10 +29,8 @@ #include "libavutil/pixfmt.h" #include "libavutil/samplefmt.h" -/** - * Define a function for building a string containing a list of - * allowed formats, - */ +/* Define a function for building a string containing a list of + * allowed formats. */ #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\ static char *choose_ ## var ## s(OutputStream *ost) \ { \ diff --git a/avconv_opt.c b/avconv_opt.c index fd3d5735fe..058d5a3779 100644 --- a/avconv_opt.c +++ b/avconv_opt.c @@ -275,7 +275,7 @@ static int opt_attach(void *optctx, const char *opt, const char *arg) } /** - * Parse a metadata specifier in arg. + * Parse a metadata specifier passed as 'arg' parameter. * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram) * @param index for type c/p, chapter/program index is written here * @param stream_spec for type s, the stream specifier is written here @@ -423,10 +423,8 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream * return avcodec_find_decoder(st->codec->codec_id); } -/** - * Add all the streams from the given input file to the global - * list of input streams. - */ +/* Add all the streams from the given input file to the global + * list of input streams. */ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) { int i; @@ -99,9 +99,9 @@ typedef struct PacketQueue { #define SUBPICTURE_QUEUE_SIZE 4 typedef struct VideoPicture { - double pts; ///< presentation time stamp for this picture - double target_clock; ///< av_gettime() time at which this should be displayed ideally - int64_t pos; ///< byte position in file + double pts; // presentation timestamp for this picture + double target_clock; // av_gettime() time at which this should be displayed ideally + int64_t pos; // byte position in file SDL_Overlay *bmp; int width, height; /* source height & width */ int allocated; @@ -191,13 +191,13 @@ typedef struct VideoState { double frame_timer; double frame_last_pts; double frame_last_delay; - double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame + double video_clock; // pts of last decoded frame / predicted pts of next decoded frame int video_stream; AVStream *video_st; PacketQueue videoq; - double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used) - double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts - int64_t video_current_pos; ///< current displayed file pos + double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used) + double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts + int64_t video_current_pos; // current displayed file pos VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; int pictq_size, pictq_rindex, pictq_windex; SDL_mutex *pictq_mutex; @@ -213,8 +213,8 @@ typedef struct VideoState { PtsCorrectionContext pts_ctx; #if CONFIG_AVFILTER - AVFilterContext *in_video_filter; ///< the first filter in the video chain - AVFilterContext *out_video_filter; ///< the last filter in the video chain + AVFilterContext *in_video_filter; // the first filter in the video chain + AVFilterContext *out_video_filter; // the last filter in the video chain int use_dr1; FrameBuffer *buffer_pool; #endif @@ -1306,10 +1306,8 @@ static void alloc_picture(void *opaque) SDL_UnlockMutex(is->pictq_mutex); } -/** - * - * @param pts the dts of the pkt / pts of the frame and guessed if not known - */ +/* The 'pts' parameter is the dts of the packet / pts of the frame and + * guessed if not known. */ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos) { VideoPicture *vp; @@ -1427,10 +1425,8 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t return 0; } -/** - * compute the exact PTS for the picture if it is omitted in the stream - * @param pts1 the dts of the pkt / pts of the frame - */ +/* Compute the exact PTS for the picture if it is omitted in the stream. + * The 'pts1' parameter is the dts of the packet / pts of the frame. */ static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos) { double frame_delay, pts; |