aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFabrice Bellard <fabrice@bellard.org>2001-07-22 14:37:44 +0000
committerFabrice Bellard <fabrice@bellard.org>2001-07-22 14:37:44 +0000
commit85f07f223de9fbeb2b9d66db11f89091ac717926 (patch)
tree07c79a0ac6c7829c2ce248f2747bdfcd547262a9
parentde6d9b6404bfd1c589799142da5a95428f146edd (diff)
downloadffmpeg-85f07f223de9fbeb2b9d66db11f89091ac717926.tar.gz
merge
Originally committed as revision 6 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r--Makefile42
-rw-r--r--README73
-rw-r--r--doc/README.tech46
-rw-r--r--doc/TODO63
-rw-r--r--doc/ffmpeg.txt181
-rw-r--r--doc/ffserver.conf261
-rw-r--r--ffmpeg.c2079
-rw-r--r--ffserver.c1577
-rw-r--r--libav/Makefile22
9 files changed, 4344 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000000..1650c89612
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,42 @@
+# Main ffmpeg Makefile
+# (c) 2000, 2001 Gerard Lantau
+#
+include config.mk
+
+CFLAGS= -O2 -Wall -g -I./libavcodec -I./libav
+LDFLAGS= -g
+ifdef CONFIG_GPROF
+CFLAGS+=-p
+LDFLAGS+=-p
+endif
+
+PROG= ffmpeg ffserver
+
+all: lib $(PROG)
+
+lib:
+ make -C libavcodec all
+ make -C libav all
+
+ffmpeg: ffmpeg.o libav/libav.a libavcodec/libavcodec.a
+ gcc $(LDFLAGS) -o $@ $^ -lm
+
+ffserver: ffserver.o libav/libav.a libavcodec/libavcodec.a
+ gcc $(LDFLAGS) -o $@ $^ -lm
+
+%.o: %.c
+ gcc $(CFLAGS) -c -o $@ $<
+
+install: all
+ install -s -m 755 $(PROG) $(PREFIX)/bin
+
+clean:
+ make -C libavcodec clean
+ make -C libav clean
+ rm -f *.o *~ gmon.out TAGS $(PROG)
+
+distclean: clean
+ rm -f Rules.mk config.h
+
+TAGS:
+ etags *.[ch] libav/*.[ch] libavcodec/*.[ch]
diff --git a/README b/README
new file mode 100644
index 0000000000..4ae288ece9
--- /dev/null
+++ b/README
@@ -0,0 +1,73 @@
+FFmpeg - (c) 2000,2001 Gerard Lantau.
+
+1) Introduction
+---------------
+
+ffmpeg is a hyper fast realtime audio/video encoder, a streaming
+server and a generic audio and video file converter.
+
+It can grab from a standard Video4Linux video source and convert it
+into several file formats based on DCT/motion compensation
+encoding. Sound is compressed in MPEG audio layer 2 or using an AC3
+compatible stream.
+
+What makes ffmpeg interesting ?
+
+- Innovative streaming technology : multiformat, real time encoding,
+ simple configuration.
+
+- Simple and efficient video encoder: outputs MPEG1, H263, Real
+ Video(tm), MPEG4, DIVX and MJPEG compatible bitstreams using the
+ same encoder core.
+
+- Real time encoding (25 fps in 352x288 on a K6 500) using the
+ video4linux API.
+
+- Generates I and P frames, which means it is far better than a MJPEG
+ encoder.
+
+- Hyper fast MPEG audio layer 2 compression (50 times faster than
+ realtime on a K6 500).
+
+- Hyper fast AC3 compatible encoder.
+
+- simple and very small portable C source code, easy to understand and
+ to modify. It be may the smallest decent MPEG encoder :-)
+
+- optional non real time higher quality encoding (different motion
+ estimators available).
+
+- Audio and Video decoders are in development.
+
+ffmpeg is made of two programs:
+
+* ffmpeg: soft VCR which encodes in real time to several formats. It
+ can also encode from any supported input file format to any input
+ supported format.
+
+* ffserver: high performance live broadcast streaming server based on
+ the ffmpeg core encoders.
+
+2) Documentation
+----------------
+
+* Read doc/ffmpeg.txt and doc/ffserver.txt to learn the basic features.
+
+* Read doc/TODO to know what are the know bugs and missing features.
+
+* Read doc/README.dev if you want to contribute or use the codec or
+ format libraries.
+
+3) Licensing:
+------------
+
+* See the file COPYING. ffmpeg and the associated library are licensed
+ under the GNU General Public License. I may change the license of
+ libavcodec and libav to LGPL if many people ask it (and if they
+ submit good patches!).
+
+* This code should be patent free since it is very simple. I took care
+ to use the same video encoder/decoder core for all formats to show
+ that they really ARE THE SAME except for the encoding huffman codes.
+
+Gerard Lantau (glantau@yahoo.fr).
diff --git a/doc/README.tech b/doc/README.tech
new file mode 100644
index 0000000000..e110b3a960
--- /dev/null
+++ b/doc/README.tech
@@ -0,0 +1,46 @@
+Technical notes:
+---------------
+
+Video:
+-----
+
+- The decision intra/predicted macroblock is the algorithm suggested
+ by the mpeg 1 specification.
+
+- only Huffman based H263 is supported, mainly because of patent
+ issues.
+
+- MPEG4 is supported, as an extension of the H263 encoder. MPEG4 DC
+ prediction is used, but not AC prediction. Specific VLC are used for
+ intra pictures. The output format is compatible with Open DIVX
+ version 47.
+
+- MJPEG is supported, but in the current version the huffman tables
+ are not optimized. It could be interesting to add this feature for
+ the flash format.
+
+- To increase speed, only motion vectors (0,0) are tested for real
+ time compression. NEW: now motion compensation is done with several
+ methods : none, full, log, and phods. The code is mmx/sse optimized.
+
+- In high quality mode, full search is used for motion
+ vectors. Currently, only fcode = 1 is used for both H263/MPEG1. Half
+ pel vectors are used.
+
+I also plan to improve the bitrate control which is too simplistic.
+
+Audio:
+-----
+
+- I rewrote the mpeg audio layer 2 compatible encoder from scratch. It
+ is one of the simplest encoder you can imagine (800 lines of C code
+ !). It is also one of the fastest because of its simplicity. There
+ are still some problems of overflow. A minimal psycho acoustic model
+ could be added. Currently, stereo is supported, but not joint
+ stereo.
+
+- I rewrote the AC3 audio encoder from scratch. It is fairly naive,
+ but the result are quiet interesting at 64 kbit/s. It includes
+ extensions for low sampling rates used in some Internet
+ formats. Differential and coupled stereo is not handled. Stereo
+ channels are simply handled as two mono channels.
diff --git a/doc/TODO b/doc/TODO
new file mode 100644
index 0000000000..09c5f2b6c2
--- /dev/null
+++ b/doc/TODO
@@ -0,0 +1,63 @@
+ffmpeg TODO list:
+----------------
+
+(in approximate decreasing priority order)
+
+Planned in next release:
+
+(DONE) - apply header fixes
+(DONE) - mpeg audio decoder.
+(DONE) - fix decode/encode codec string.
+(DONE) - fix EINTR error if VIDIOCSYNC.
+(DONE) - add CONFIG system.
+(DONE) - merge mplayer mmx accel.
+(DONE) - fix emms bug.
+(DONE) - add I263 handling
+(DONE) - add RV10 decoding.
+(DONE) - add true pgm support.
+(DONE) - msmpeg4 0x18 fix.
+- add qscale out.
+- add format autodetect with content (for example to distinguish
+ mpegvideo/mpegmux).
+- add external alloc for libavcodec (avifile request).
+- fix -sameq in grabbing
+- find a solution to clear feed1.ffm if format change.
+- new grab architecture : use avformat instead of audio: and video:
+ protocol.
+- correct PTS handling to sync audio and video.
+- fix 0 size picture in AVIs = skip picture
+
+BUGS:
+
+- fix audio/video synchro (including real player synchro bugs)
+
+- Improve the bit rate control for video codecs.
+
+- see ov511.o YUV problem (420 instead of 420P).
+
+- fix file caching pb in windows (add correct headers)
+
+- add low pass filter to suppress noise coming from cheap TV cards.
+
+- test/debug audio in flash format
+
+- sort out ASF streaming pbs.
+
+- Improve psycho acoustic model for AC3 & mpeg audio.
+
+FEATURES:
+
+- add MPEG4 in mpegmux support.
+
+- add RTP / multicast layer.
+
+- demux streams for CCTV : N streams in one stream. Add option to
+ generate multiple streams.
+
+- add disconnect user option in stat.html.
+
+- deny & allow + password in ffserver.
+
+- graphical user interface.
+
+- animated gif as output format
diff --git a/doc/ffmpeg.txt b/doc/ffmpeg.txt
new file mode 100644
index 0000000000..7352a662e2
--- /dev/null
+++ b/doc/ffmpeg.txt
@@ -0,0 +1,181 @@
+*************** FFMPEG soft VCR documentation *****************
+
+0) Introduction
+---------------
+
+ FFmpeg is a very fast video and audio encoder. It can grab from
+ files or from a live audio/video source.
+
+ The command line interface is designed to be intuitive, in the sense
+ that ffmpeg tries to figure out all the paramters, when
+ possible. You have usually to give only the target bitrate you want.
+
+ FFmpeg can also convert from any sample rate to any other, and
+ resize video on the fly with a high quality polyphase filter.
+
+1) Video and Audio grabbing
+---------------------------
+
+* ffmpeg can use a video4linux compatible video source and any Open
+ Sound System audio source:
+
+ ffmpeg /tmp/out.mpg
+
+ Note that you must activate the right video source and channel
+ before launching ffmpeg. You can use any TV viewer such as xawtv by
+ Gerd Knorr which I find very good. You must also set correctly the
+ audio recording levels with a standard mixer.
+
+2) Video and Audio file format convertion
+-----------------------------------------
+
+* ffmpeg can use any supported file format and protocol as input :
+
+examples:
+
+ ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
+
+If will use the files:
+/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
+/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
+
+The Y files use twice the resolution of the U and V files. They are
+raw files, without header. They can be generated by all decent video
+decoders. You must specify the size of the image with the '-s' option
+if ffmpeg cannot guess it.
+
+* You can set several input files and output files:
+
+ ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
+
+Convert the audio file a.wav and the raw yuv video file a.yuv to mpeg file a.mpg
+
+* you can also do audio and video convertions at the same time:
+
+ ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
+
+Convert the sample rate of a.wav to 22050 Hz and encode it to MPEG audio.
+
+* you can encode to several formats at the same time and define a
+ mapping from input stream to output streams:
+
+ ffmpeg -i /tmp/a.wav -ab 64 /tmp/a.mp2 -ab 128 /tmp/b.mp2 -map 0:0 -map 0:0
+
+ convert a.wav to a.mp2 at 64 kbits and b.mp2 at 128 kbits. '-map
+ file:index' specify which input stream is used for each output
+ stream, in the order of the definition of output streams.
+
+NOTE: to see the supported input formats, use 'ffmpeg -formats'.
+
+2) Invocation
+-------------
+
+* The generic syntax is :
+
+ ffmpeg [[options][-i input_file]]... {[options] output_file}...
+
+ If no input file is given, audio/video grabbing is done.
+
+ As a general rule, options are applied to the next specified
+ file. For example, if you give the '-b 64' option, it sets the video
+ bitrate of the next file. Format option may be needed for raw input
+ files.
+
+ By default, ffmpeg tries to convert as losslessly as possible: it
+ uses the same audio and video parameter fors the outputs as the one
+ specified for the inputs.
+
+* Main options are:
+
+-h show help
+-formats show available formats, codecs and protocols
+-L print the LICENSE
+-i filename input file name
+-y overwrite output files
+-t duration set recording time in seconds
+-f format set encoding format [guessed]
+-title string set the title
+-author string set the author
+-copyright string set the copyright
+-comment string set the comment
+
+* Video Options are:
+
+-s size set frame size [160x128]
+-r fps set frame rate [25]
+-b bitrate set the video bitrate in kbit/s [200]
+-vn disable video recording [no]
+
+* Audio Options are:
+
+-ar freq set the audio sampling freq [44100]
+-ab bitrate set the audio bitrate in kbit/s [64]
+-ac channels set the number of audio channels [1]
+-an disable audio recording [no]
+
+Advanced options are:
+
+-map file:stream set input stream mapping
+-g gop_size set the group of picture size [12]
+-intra use only intra frames [no]
+-qscale q use fixed video quantiser scale (VBR)
+-c comment set the comment string
+-vd device set video4linux device name [/dev/video]
+-vcodec codec force audio codec
+-me method set motion estimation method
+-ad device set audio device name [/dev/dsp]
+-acodec codec force audio codec
+
+The output file can be "-" to output to a pipe. This is only possible
+with mpeg1 and h263 formats.
+
+3) Protocols
+
+ ffmpeg handles also many protocols specified with the URL syntax.
+
+ Use 'ffmpeg -formats' to have a list of the supported protocols.
+
+ The protocol 'http:' is currently used only to communicate with
+ ffserver (see the ffserver documentation). When ffmpeg will be a
+ video player it will also be used for streaming :-)
+
+4) File formats and codecs
+--------------------------
+
+ Use 'ffmpeg -formats' to have a list of the supported output
+ formats. Only some formats are handled as input, but it will improve
+ in the next versions.
+
+5) Tips
+-------
+
+- For streaming at very low bit rate application, use a low frame rate
+ and a small gop size. This is especially true for real video where
+ the Linux player does not seem to be very fast, so it can miss
+ frames. An example is:
+
+ ffmpeg -g 3 -r 3 -t 10 -b 50 -s qcif -f rv10 /tmp/b.rm
+
+- The parameter 'q' which is displayed while encoding is the current
+ quantizer. The value of 1 indicates that a very good quality could
+ be achieved. The value of 31 indicates the worst quality. If q=31
+ too often, it means that the encoder cannot compress enough to meet
+ your bit rate. You must either increase the bit rate, decrease the
+ frame rate or decrease the frame size.
+
+- If your computer is not fast enough, you can speed up the
+ compression at the expense of the compression ratio. You can use
+ '-me zero' to speed up motion estimation, and '-intra' to disable
+ completly motion estimation (you have only I frames, which means it
+ is about as good as JPEG compression).
+
+- To have very low bitrates in audio, reduce the sampling frequency
+ (down to 22050 kHz for mpeg audio, 22050 or 11025 for ac3).
+
+- To have a constant quality (but a variable bitrate), use the option
+ '-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst
+ quality).
+
+- When converting video files, you can use the '-sameq' option which
+ uses in the encoder the same quality factor than in the decoder. It
+ allows to be almost lossless in encoding.
diff --git a/doc/ffserver.conf b/doc/ffserver.conf
new file mode 100644
index 0000000000..311f48b817
--- /dev/null
+++ b/doc/ffserver.conf
@@ -0,0 +1,261 @@
+# Port on which the server is listening. You must select a different
+# port from your standard http web server if it is running on the same
+# computer.
+
+Port 8090
+
+# Address on which the server is bound. Only useful if you have
+# several network interfaces.
+
+BindAddress 0.0.0.0
+
+# Number of simultaneous requests that can be handled. Since FFServer
+# is very fast, this limit is determined mainly by your Internet
+# connection speed.
+
+MaxClients 1000
+
+# Access Log file (uses standard Apache log file format)
+# '-' is the standard output
+
+CustomLog -
+
+##################################################################
+# Definition of the live feeds. Each live feed contains one video
+# and/or audio sequence coming from an ffmpeg encoder or another
+# ffserver. This sequence may be encoded simultaneously with several
+# codecs at several resolutions.
+
+<Feed feed1.ffm>
+
+# You must use 'ffmpeg' to send a live feed to ffserver. In this
+# example, you can type:
+#
+# ffmpeg http://localhost:8090/feed1.ffm
+
+# ffserver can also do time shifting. It means that it can stream any
+# previously recorded live stream. The request should contain:
+# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
+# a path where the feed is stored on disk. You also specify the
+# maximum size of the feed (100M bytes here). Default:
+# File=/tmp/feed_name.ffm FileMaxSize=5M
+
+File /tmp/feed1.ffm
+FileMaxSize 50M
+
+</Feed>
+
+##################################################################
+# Now you can define each stream which will be generated from the
+# original audio and video stream. Each format has a filename (here
+# 'test128.mpg'). FFServer will send this stream when answering a
+# request containing this filename.
+
+<Stream test1.mpg>
+
+# coming from live feed 'feed1'
+Feed feed1.ffm
+
+# Format of the stream : you can choose among:
+# mpeg : MPEG1 multiplexed video and audio
+# mpegvideo : only MPEG1 video
+# mp2 : MPEG audio layer 2
+# mp3 : MPEG audio layer 3 (currently sent as layer 2)
+# rm : Real Networks compatible stream. Multiplexed audio and video.
+# ra : Real Networks compatible stream. Audio only.
+# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
+# jpeg : Generate a single JPEG image.
+# asf : ASF compatible stream (Windows Media Player format). Not finished yet.
+# swf : Macromedia flash(tm) compatible stream
+# avi : AVI format (open divx video, mpeg audio sound)
+# master : special ffmpeg stream used to duplicate a server
+
+Format mpeg
+
+# Bitrate for the audio stream. Codecs usually support only a few different bitrates.
+
+AudioBitRate 32
+
+# Number of audio channels : 1 = mono, 2 = stereo
+
+AudioChannels 1
+
+# Sampling frequency for audio. When using low bitrates, you should
+# lower this frequency to 22050 or 11025. The supported frequencies
+# depend on the selected audio codec.
+
+AudioSampleRate 44100
+
+# Bitrate for the video stream.
+VideoBitRate 64
+
+# Number of frames per second
+VideoFrameRate 3
+
+# Size of the video frame : WxH (default: 160x128)
+# W : width, H : height
+# The following abbreviation are defined : sqcif, qcif, cif, 4cif
+VideoSize 160x128
+
+# transmit only intra frames (useful for low bitrates)
+VideoIntraOnly
+
+# If non intra only, an intra frame is transmitted every VideoGopSize
+# frames Video synchronization can only begin at an I frames.
+#VideoGopSize 12
+
+# Suppress audio
+#NoAudio
+
+# Suppress video
+#NoVideo
+
+</Stream>
+
+# second mpeg stream with high frame rate
+
+<Stream test2.mpg>
+Feed feed1.ffm
+Format mpegvideo
+VideoBitRate 128
+VideoFrameRate 25
+#VideoSize 352x240
+VideoGopSize 25
+</Stream>
+
+##################################################################
+# A stream coming from a file : you only need to set the input
+# filename and optionnally a new format. Supported conversions:
+# avi -> asf
+#
+
+<Stream file.asf>
+
+#File "/tmp/file.avi"
+File "tmp/file.avi"
+# avi must be converted to asf to be streamed
+Format asf
+
+</Stream>
+
+# another file streaming
+<Stream file.mp3>
+
+File "tmp/file.mp3"
+
+</Stream>
+
+##################################################################
+# Another stream : Real with audio only at 32 kbits
+
+<Stream test.ra>
+
+Feed feed1.ffm
+Format rm
+AudioBitRate 32
+NoVideo
+
+</Stream>
+
+##################################################################
+# Another stream : Real with audio and video at 64 kbits
+
+<Stream test.rm>
+
+Feed feed1.ffm
+Format rm
+
+AudioBitRate 32
+VideoBitRate 20
+VideoFrameRate 2
+VideoIntraOnly
+
+</Stream>
+
+##################################################################
+# Another stream : Mpeg audio layer 2 at 64 kbits.
+
+<Stream test.mp2>
+
+Feed feed1.ffm
+Format mp2
+AudioBitRate 64
+AudioSampleRate 44100
+
+</Stream>
+
+<Stream test1.mp2>
+
+Feed feed1.ffm
+Format mp2
+AudioBitRate 32
+AudioSampleRate 16000
+
+</Stream>
+
+##################################################################
+# Another stream : Multipart JPEG
+
+<Stream test.mjpg>
+
+Feed feed1.ffm
+Format mpjpeg
+
+VideoFrameRate 2
+VideoIntraOnly
+
+</Stream>
+
+##################################################################
+# Another stream : Multipart JPEG
+
+<Stream test.jpg>
+
+Feed feed1.ffm
+Format jpeg
+
+# the parameters are choose here to take the same output as the
+# Multipart JPEG one.
+VideoFrameRate 2
+VideoIntraOnly
+#VideoSize 352x240
+
+</Stream>
+
+##################################################################
+# Another stream : Flash
+
+<Stream test.swf>
+
+Feed feed1.ffm
+Format swf
+
+VideoFrameRate 2
+VideoIntraOnly
+
+</Stream>
+
+
+##################################################################
+# Another stream : ASF compatible
+
+<Stream test.asf>
+
+Feed feed1.ffm
+Format asf
+
+AudioBitRate 64
+AudioSampleRate 44100
+VideoFrameRate 2
+VideoIntraOnly
+
+</Stream>
+
+##################################################################
+# Special stream : server status
+
+<Stream stat.html>
+
+Format status
+
+</Stream>
diff --git a/ffmpeg.c b/ffmpeg.c
new file mode 100644
index 0000000000..d5bc917a42
--- /dev/null
+++ b/ffmpeg.c
@@ -0,0 +1,2079 @@
+/*
+ * FFmpeg main
+ * Copyright (c) 2000,2001 Gerard Lantau
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <string.h>
+#include <sys/poll.h>
+#include <termios.h>
+#include <ctype.h>
+
+#include "avformat.h"
+
+typedef struct {
+ const char *name;
+ int flags;
+#define HAS_ARG 0x0001
+#define OPT_BOOL 0x0002
+#define OPT_EXPERT 0x0004
+#define OPT_STRING 0x0008
+ union {
+ void (*func_arg)();
+ int *int_arg;
+ char **str_arg;
+ } u;
+ const char *help;
+ const char *argname;
+} OptionDef;
+
+/* select an input stream for an output stream */
+typedef struct AVStreamMap {
+ int file_index;
+ int stream_index;
+} AVStreamMap;
+
+extern const OptionDef options[];
+
+void show_help(void);
+
+#define MAX_FILES 20
+
+static AVFormatContext *input_files[MAX_FILES];
+static int nb_input_files = 0;
+
+static AVFormatContext *output_files[MAX_FILES];
+static int nb_output_files = 0;
+
+static AVStreamMap stream_maps[MAX_FILES];
+static int nb_stream_maps;
+
+static AVFormat *file_format;
+static int frame_width = 160;
+static int frame_height = 128;
+static int frame_rate = 25 * FRAME_RATE_BASE;
+static int video_bit_rate = 200000;
+static int video_qscale = 0;
+static int video_disable = 0;
+static int video_codec_id = CODEC_ID_NONE;
+static int same_quality = 0;
+
+static int gop_size = 12;
+static int intra_only = 0;
+static int audio_sample_rate = 44100;
+static int audio_bit_rate = 64000;
+static int audio_disable = 0;
+static int audio_channels = 1;
+static int audio_codec_id = CODEC_ID_NONE;
+
+static INT64 recording_time = 0;
+static int file_overwrite = 0;
+static char *str_title = NULL;
+static char *str_author = NULL;
+static char *str_copyright = NULL;
+static char *str_comment = NULL;
+
+typedef struct AVOutputStream {
+ int file_index; /* file index */
+ int index; /* stream index in the output file */
+ int source_index; /* AVInputStream index */
+ AVStream *st; /* stream in the output file */
+ int encoding_needed; /* true if encoding needed for this stream */
+
+ int fifo_packet_rptr; /* read index in the corresponding
+ avinputstream packet fifo */
+ /* video only */
+ AVPicture pict_tmp; /* temporary image for resizing */
+ int video_resample;
+ ImgReSampleContext *img_resample_ctx; /* for image resampling */
+
+ /* audio only */
+ int audio_resample;
+ ReSampleContext *resample; /* for audio resampling */
+ FifoBuffer fifo; /* for compression: one audio fifo per codec */
+} AVOutputStream;
+
+typedef struct AVInputStream {
+ int file_index;
+ int index;
+ AVStream *st;
+ int discard; /* true if stream data should be discarded */
+ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
+ INT64 pts; /* current pts */
+ int frame_number; /* current frame */
+ INT64 sample_index; /* current sample */
+} AVInputStream;
+
+typedef struct AVInputFile {
+ int eof_reached; /* true if eof reached */
+ int ist_index; /* index of first stream in ist_table */
+ int buffer_size; /* current total buffer size */
+ int buffer_size_max; /* buffer size at which we consider we can stop
+ buffering */
+} AVInputFile;
+
+/* init terminal so that we can grab keys */
+static struct termios oldtty;
+
+static void term_exit(void)
+{
+ tcsetattr (0, TCSANOW, &oldtty);
+}
+
+static void term_init(void)
+{
+ struct termios tty;
+
+ tcgetattr (0, &tty);
+ oldtty = tty;
+
+ tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
+ |INLCR|IGNCR|ICRNL|IXON);
+ tty.c_oflag |= OPOST;
+ tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
+ tty.c_cflag &= ~(CSIZE|PARENB);
+ tty.c_cflag |= CS8;
+ tty.c_cc[VMIN] = 1;
+ tty.c_cc[VTIME] = 0;
+
+ tcsetattr (0, TCSANOW, &tty);
+
+ atexit(term_exit);
+}
+
+/* read a key without blocking */
+static int read_key(void)
+{
+ struct timeval tv;
+ int n;
+ unsigned char ch;
+ fd_set rfds;
+
+ FD_ZERO(&rfds);
+ FD_SET(0, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ n = select(1, &rfds, NULL, NULL, &tv);
+ if (n > 0) {
+ if (read(0, &ch, 1) == 1)
+ return ch;
+ }
+ return -1;
+}
+
+#define AUDIO_FIFO_SIZE 8192
+
+/* main loop for grabbing */
+int av_grab(AVFormatContext *s)
+{
+ UINT8 audio_buf[AUDIO_FIFO_SIZE/2];
+ UINT8 audio_buf1[AUDIO_FIFO_SIZE/2];
+ UINT8 audio_out[AUDIO_FIFO_SIZE/2];
+ UINT8 video_buffer[128*1024];
+ char buf[256];
+ short *samples;
+ URLContext *audio_handle = NULL, *video_handle = NULL;
+ int ret;
+ AVCodecContext *enc, *first_video_enc = NULL;
+ int frame_size, frame_bytes;
+ int use_audio, use_video;
+ int frame_rate, sample_rate, channels;
+ int width, height, frame_number, i, pix_fmt = 0;
+ AVOutputStream *ost_table[s->nb_streams], *ost;
+ UINT8 *picture_in_buf = NULL, *picture_420p = NULL;
+ int audio_fifo_size = 0, picture_size = 0;
+ INT64 time_start;
+
+ /* init output stream info */
+ for(i=0;i<s->nb_streams;i++)
+ ost_table[i] = NULL;
+
+ /* output stream init */
+ for(i=0;i<s->nb_streams;i++) {
+ ost = av_mallocz(sizeof(AVOutputStream));
+ if (!ost)
+ goto fail;
+ ost->index = i;
+ ost->st = s->streams[i];
+ ost_table[i] = ost;
+ }
+
+ use_audio = 0;
+ use_video = 0;
+ frame_rate = 0;
+ sample_rate = 0;
+ frame_size = 0;
+ channels = 1;
+ width = 0;
+ height = 0;
+ frame_number = 0;
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVCodec *codec;
+
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ codec = avcodec_find_encoder(enc->codec_id);
+ if (!codec) {
+ fprintf(stderr, "Unknown codec\n");
+ return -1;
+ }
+ if (avcodec_open(enc, codec) < 0) {
+ fprintf(stderr, "Incorrect encode parameters\n");
+ return -1;
+ }
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ use_audio = 1;
+ if (enc->sample_rate > sample_rate)
+ sample_rate = enc->sample_rate;
+ if (enc->frame_size > frame_size)
+ frame_size = enc->frame_size;
+ if (enc->channels > channels)
+ channels = enc->channels;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (!first_video_enc)
+ first_video_enc = enc;
+ use_video = 1;
+ if (enc->frame_rate > frame_rate)
+ frame_rate = enc->frame_rate;
+ if (enc->width > width)
+ width = enc->width;
+ if (enc->height > height)
+ height = enc->height;
+ break;
+ }
+ }
+
+ /* audio */
+ samples = NULL;
+ if (use_audio) {
+ snprintf(buf, sizeof(buf), "audio:%d,%d", sample_rate, channels);
+ ret = url_open(&audio_handle, buf, URL_RDONLY);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open audio device: disabling audio capture\n");
+ use_audio = 0;
+ } else {
+ URLFormat f;
+ /* read back exact grab parameters */
+ if (url_getformat(audio_handle, &f) < 0) {
+ fprintf(stderr, "could not read back video grab parameters\n");
+ goto fail;
+ }
+ sample_rate = f.sample_rate;
+ channels = f.channels;
+ audio_fifo_size = ((AUDIO_FIFO_SIZE / 2) / audio_handle->packet_size) *
+ audio_handle->packet_size;
+ fprintf(stderr, "Audio sampling: %d Hz, %s\n",
+ sample_rate, channels == 2 ? "stereo" : "mono");
+ }
+ }
+
+ /* video */
+ if (use_video) {
+ snprintf(buf, sizeof(buf), "video:%d,%d,%f",
+ width, height, (float)frame_rate / FRAME_RATE_BASE);
+
+ ret = url_open(&video_handle, buf, URL_RDONLY);
+ if (ret < 0) {
+ fprintf(stderr,"Could not init video 4 linux capture: disabling video capture\n");
+ use_video = 0;
+ } else {
+ URLFormat f;
+ const char *pix_fmt_str;
+ /* read back exact grab parameters */
+ if (url_getformat(video_handle, &f) < 0) {
+ fprintf(stderr, "could not read back video grab parameters\n");
+ goto fail;
+ }
+ width = f.width;
+ height = f.height;
+ pix_fmt = f.pix_fmt;
+ switch(pix_fmt) {
+ case PIX_FMT_YUV420P:
+ pix_fmt_str = "420P";
+ break;
+ case PIX_FMT_YUV422:
+ pix_fmt_str = "422";
+ break;
+ case PIX_FMT_RGB24:
+ pix_fmt_str = "RGB24";
+ break;
+ case PIX_FMT_BGR24:
+ pix_fmt_str = "BGR24";
+ break;
+ default:
+ pix_fmt_str = "???";
+ break;
+ }
+ picture_size = video_handle->packet_size;
+ picture_in_buf = malloc(picture_size);
+ if (!picture_in_buf)
+ goto fail;
+ /* allocate a temporary picture if not grabbing in 420P format */
+ if (pix_fmt != PIX_FMT_YUV420P) {
+ picture_420p = malloc((width * height * 3) / 2);
+ }
+ fprintf(stderr, "Video sampling: %dx%d, %s format, %0.2f fps\n",
+ width, height, pix_fmt_str, (float)frame_rate / FRAME_RATE_BASE);
+ }
+ }
+
+ if (!use_video && !use_audio) {
+ fprintf(stderr,"Could not open grab devices : exiting\n");
+ exit(1);
+ }
+
+ /* init built in conversion functions */
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ ost->audio_resample = 0;
+ if ((enc->channels != channels ||
+ enc->sample_rate != sample_rate)) {
+ ost->audio_resample = 1;
+ ost->resample = audio_resample_init(enc->channels, channels,
+ enc->sample_rate, sample_rate);
+ }
+ if (fifo_init(&ost->fifo, (2 * audio_fifo_size * enc->sample_rate) /
+ sample_rate))
+ goto fail;
+ break;
+ case CODEC_TYPE_VIDEO:
+ ost->video_resample = 0;
+ if (enc->width != width ||
+ enc->height != height) {
+ UINT8 *buf;
+ ost->video_resample = 1;
+ buf = malloc((enc->width * enc->height * 3) / 2);
+ if (!buf)
+ goto fail;
+ ost->pict_tmp.data[0] = buf;
+ ost->pict_tmp.data[1] = buf + enc->width * height;
+ ost->pict_tmp.data[2] = ost->pict_tmp.data[1] + (enc->width * height) / 4;
+ ost->pict_tmp.linesize[0] = enc->width;
+ ost->pict_tmp.linesize[1] = enc->width / 2;
+ ost->pict_tmp.linesize[2] = enc->width / 2;
+ ost->img_resample_ctx = img_resample_init(
+ ost->st->codec.width, ost->st->codec.height,
+ width, height);
+ }
+ }
+ }
+
+ fprintf(stderr, "Press [q] to stop encoding\n");
+
+ s->format->write_header(s);
+ time_start = gettime();
+ term_init();
+
+ for(;;) {
+ /* if 'q' pressed, exits */
+ if (read_key() == 'q')
+ break;
+
+ /* read & compress audio frames */
+ if (use_audio) {
+ int ret, nb_samples, nb_samples_out;
+ UINT8 *buftmp;
+
+ for(;;) {
+ ret = url_read(audio_handle, audio_buf, audio_fifo_size);
+ if (ret <= 0)
+ break;
+ /* fill each codec fifo by doing the right sample
+ rate conversion. This is not optimal because we
+ do too much work, but it is easy to do */
+ nb_samples = ret / (channels * 2);
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ /* rate & stereo convertion */
+ if (!ost->audio_resample) {
+ buftmp = audio_buf;
+ nb_samples_out = nb_samples;
+ } else {
+ buftmp = audio_buf1;
+ nb_samples_out = audio_resample(ost->resample,
+ (short *)buftmp, (short *)audio_buf,
+ nb_samples);
+ }
+ fifo_write(&ost->fifo, buftmp, nb_samples_out * enc->channels * 2,
+ &ost->fifo.wptr);
+ }
+ }
+
+ /* compress as many frame as possible with each audio codec */
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ frame_bytes = enc->frame_size * 2 * enc->channels;
+
+ while (fifo_read(&ost->fifo, audio_buf,
+ frame_bytes, &ost->fifo.rptr) == 0) {
+ ret = avcodec_encode_audio(enc,
+ audio_out, sizeof(audio_out),
+ (short *)audio_buf);
+ s->format->write_packet(s, ost->index, audio_out, ret);
+ }
+ }
+ }
+ }
+ }
+
+ if (use_video) {
+ AVPicture *picture1;
+ AVPicture picture;
+ UINT8 *pict_buffer;
+
+ ret = url_read(video_handle, picture_in_buf, picture_size);
+ if (ret < 0)
+ break;
+ if (pix_fmt != PIX_FMT_YUV420P) {
+ pict_buffer = picture_420p;
+ img_convert_to_yuv420(pict_buffer, picture_in_buf, pix_fmt, width, height);
+ } else {
+ pict_buffer = picture_in_buf;
+ }
+ /* build a picture storage */
+ picture.data[0] = pict_buffer;
+ picture.data[1] = picture.data[0] + width * height;
+ picture.data[2] = picture.data[1] + (width * height) / 4;
+ picture.linesize[0] = width;
+ picture.linesize[1] = width / 2;
+ picture.linesize[2] = width / 2;
+
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ int n1, n2, nb;
+
+ /* feed each codec with its requested frame rate */
+ n1 = ((INT64)frame_number * enc->frame_rate) / frame_rate;
+ n2 = (((INT64)frame_number + 1) * enc->frame_rate) / frame_rate;
+ nb = n2 - n1;
+ if (nb > 0) {
+ /* resize the picture if needed */
+ if (ost->video_resample) {
+ picture1 = &ost->pict_tmp;
+ img_resample(ost->img_resample_ctx,
+ picture1, &picture);
+ } else {
+ picture1 = &picture;
+ }
+ ret = avcodec_encode_video(enc, video_buffer,
+ sizeof(video_buffer),
+ picture1);
+ s->format->write_packet(s, ost->index, video_buffer, ret);
+ }
+ }
+ }
+ frame_number++;
+ }
+
+ /* write report */
+ {
+ char buf[1024];
+ INT64 total_size;
+ float ti, bitrate;
+ static float last_ti;
+ INT64 ti1;
+
+ total_size = url_ftell(&s->pb);
+ ti1 = gettime() - time_start;
+ /* check elapsed time */
+ if (recording_time && ti1 >= recording_time)
+ break;
+
+ ti = ti1 / 1000000.0;
+ if (ti < 0.1)
+ ti = 0.1;
+ /* dispaly twice per second */
+ if ((ti - last_ti) >= 0.5) {
+ last_ti = ti;
+ bitrate = (int)((total_size * 8) / ti / 1000.0);
+
+ buf[0] = '\0';
+ if (use_video) {
+ sprintf(buf + strlen(buf), "frame=%5d fps=%4.1f q=%2d ",
+ frame_number, (float)frame_number / ti, first_video_enc->quality);
+ }
+
+ sprintf(buf + strlen(buf), "size=%8LdkB time=%0.1f bitrate=%6.1fkbits/s",
+ total_size / 1024, ti, bitrate);
+ fprintf(stderr, "%s \r", buf);
+ fflush(stderr);
+ }
+ }
+ }
+ term_exit();
+
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ avcodec_close(enc);
+ }
+ s->format->write_trailer(s);
+
+ if (audio_handle)
+ url_close(audio_handle);
+
+ if (video_handle)
+ url_close(video_handle);
+
+ /* write report */
+ {
+ float ti, bitrate;
+ INT64 total_size;
+
+ total_size = url_ftell(&s->pb);
+
+ ti = (gettime() - time_start) / 1000000.0;
+ if (ti < 0.1)
+ ti = 0.1;
+ bitrate = (int)((total_size * 8) / ti / 1000.0);
+
+ fprintf(stderr, "\033[K\nTotal time = %0.1f s, %Ld KBytes, %0.1f kbits/s\n",
+ ti, total_size / 1024, bitrate);
+ if (use_video) {
+ fprintf(stderr, "Total frames = %d\n", frame_number);
+ }
+ }
+
+ ret = 0;
+ fail1:
+ if (picture_in_buf)
+ free(picture_in_buf);
+ if (picture_420p)
+ free(picture_420p);
+ for(i=0;i<s->nb_streams;i++) {
+ ost = ost_table[i];
+ if (ost) {
+ if (ost->fifo.buffer)
+ fifo_free(&ost->fifo);
+ if (ost->pict_tmp.data[0])
+ free(ost->pict_tmp.data[0]);
+ if (ost->video_resample)
+ img_resample_close(ost->img_resample_ctx);
+ if (ost->audio_resample)
+ audio_resample_close(ost->resample);
+ free(ost);
+ }
+ }
+ return ret;
+ fail:
+ ret = -ENOMEM;
+ goto fail1;
+}
+
+int read_ffserver_streams(AVFormatContext *s, const char *filename)
+{
+ int i;
+ AVFormatContext *ic;
+
+ ic = av_open_input_file(filename, FFM_PACKET_SIZE);
+ if (!ic)
+ return -EIO;
+ /* copy stream format */
+ s->nb_streams = ic->nb_streams;
+ for(i=0;i<ic->nb_streams;i++) {
+ AVStream *st;
+ st = av_mallocz(sizeof(AVFormatContext));
+ memcpy(st, ic->streams[i], sizeof(AVStream));
+ s->streams[i] = st;
+ }
+
+ av_close_input_file(ic);
+ return 0;
+}
+
+#define MAX_AUDIO_PACKET_SIZE 16384
+
+static void do_audio_out(AVFormatContext *s,
+ AVOutputStream *ost,
+ AVInputStream *ist,
+ unsigned char *buf, int size)
+{
+ UINT8 *buftmp;
+ UINT8 audio_buf[2*MAX_AUDIO_PACKET_SIZE]; /* XXX: allocate it */
+ UINT8 audio_out[MAX_AUDIO_PACKET_SIZE]; /* XXX: allocate it */
+ int size_out, frame_bytes, ret;
+ AVCodecContext *enc;
+
+ enc = &ost->st->codec;
+
+ if (ost->audio_resample) {
+ buftmp = audio_buf;
+ size_out = audio_resample(ost->resample,
+ (short *)buftmp, (short *)buf,
+ size / (ist->st->codec.channels * 2));
+ size_out = size_out * enc->channels * 2;
+ } else {
+ buftmp = buf;
+ size_out = size;
+ }
+
+ /* now encode as many frames as possible */
+ if (enc->codec_id != CODEC_ID_PCM) {
+ /* output resampled raw samples */
+ fifo_write(&ost->fifo, buftmp, size_out,
+ &ost->fifo.wptr);
+
+ frame_bytes = enc->frame_size * 2 * enc->channels;
+
+ while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
+ &ost->fifo.rptr) == 0) {
+ ret = avcodec_encode_audio(enc,
+ audio_out, sizeof(audio_out), (short *)audio_buf);
+ s->format->write_packet(s, ost->index, audio_out, ret);
+ }
+ } else {
+ /* XXX: handle endianness */
+ s->format->write_packet(s, ost->index, buftmp, size_out);
+ }
+}
+
+/* write a picture to a raw mux */
+static void write_picture(AVFormatContext *s, int index, AVPicture *picture, int w, int h)
+{
+ UINT8 *buf, *src, *dest;
+ int size, j, i;
+ /* XXX: not efficient, should add test if we can take
+ directly the AVPicture */
+ size = (w * h) * 3 / 2;
+ buf = malloc(size);
+ dest = buf;
+ for(i=0;i<3;i++) {
+ if (i == 1) {
+ w >>= 1;
+ h >>= 1;
+ }
+ src = picture->data[i];
+ for(j=0;j<h;j++) {
+ memcpy(dest, src, w);
+ dest += w;
+ src += picture->linesize[i];
+ }
+ }
+ s->format->write_packet(s, index, buf, size);
+ free(buf);
+}
+
+
+static void do_video_out(AVFormatContext *s,
+ AVOutputStream *ost,
+ AVInputStream *ist,
+ AVPicture *pict)
+{
+ int n1, n2, nb, i, ret, frame_number;
+ AVPicture *picture;
+ UINT8 video_buffer[128*1024];
+ AVCodecContext *enc;
+
+ enc = &ost->st->codec;
+
+ frame_number = ist->frame_number;
+ /* first drop frame if needed */
+ n1 = ((INT64)frame_number * enc->frame_rate) / ist->st->codec.frame_rate;
+ n2 = (((INT64)frame_number + 1) * enc->frame_rate) / ist->st->codec.frame_rate;
+ nb = n2 - n1;
+ if (nb <= 0)
+ return;
+
+ if (ost->video_resample) {
+ picture = &ost->pict_tmp;
+ img_resample(ost->img_resample_ctx, picture, pict);
+ } else {
+ picture = pict;
+ }
+
+ /* duplicates frame if needed */
+ /* XXX: pb because no interleaving */
+ for(i=0;i<nb;i++) {
+ if (enc->codec_id != CODEC_ID_RAWVIDEO) {
+ /* handles sameq here. This is not correct because it may
+ not be a global option */
+ if (same_quality) {
+ ost->st->codec.quality = ist->st->codec.quality;
+ }
+ ret = avcodec_encode_video(&ost->st->codec,
+ video_buffer, sizeof(video_buffer),
+ picture);
+ s->format->write_packet(s, ost->index, video_buffer, ret);
+ } else {
+ write_picture(s, ost->index, picture, enc->width, enc->height);
+ }
+ }
+}
+
+//#define HEX_DUMP
+
+#ifdef HEX_DUMP
+static void hex_dump(UINT8 *buf, int size)
+{
+ int len, i, j, c;
+
+ for(i=0;i<size;i+=16) {
+ len = size - i;
+ if (len > 16)
+ len = 16;
+ printf("%08x ", i);
+ for(j=0;j<16;j++) {
+ if (j < len)
+ printf(" %02x", buf[i+j]);
+ else
+ printf(" ");
+ }
+ printf(" ");
+ for(j=0;j<len;j++) {
+ c = buf[i+j];
+ if (c < ' ' || c > '~')
+ c = '.';
+ printf("%c", c);
+ }
+ printf("\n");
+ }
+}
+#endif
+
+/*
+ * The following code is the main loop of the file converter
+ */
+static int av_encode(AVFormatContext **output_files,
+ int nb_output_files,
+ AVFormatContext **input_files,
+ int nb_input_files,
+ AVStreamMap *stream_maps, int nb_stream_maps)
+{
+ int ret, i, j, k, n, nb_istreams, nb_ostreams = 0;
+ AVFormatContext *is, *os;
+ AVCodecContext *codec, *icodec;
+ AVOutputStream *ost, **ost_table = NULL;
+ AVInputStream *ist, **ist_table = NULL;
+ INT64 min_pts, start_time;
+ AVInputFile file_table[nb_input_files];
+
+ memset(file_table, 0, sizeof(file_table));
+
+ /* input stream init */
+ j = 0;
+ for(i=0;i<nb_input_files;i++) {
+ is = input_files[i];
+ file_table[i].ist_index = j;
+ j += is->nb_streams;
+ }
+ nb_istreams = j;
+
+ ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
+ if (!ist_table)
+ return -ENOMEM;
+
+ for(i=0;i<nb_istreams;i++) {
+ ist = av_mallocz(sizeof(AVInputStream));
+ if (!ist)
+ goto fail;
+ ist_table[i] = ist;
+ }
+ j = 0;
+ for(i=0;i<nb_input_files;i++) {
+ is = input_files[i];
+ for(k=0;k<is->nb_streams;k++) {
+ ist = ist_table[j++];
+ ist->st = is->streams[k];
+ ist->file_index = i;
+ ist->index = k;
+ ist->discard = 1; /* the stream is discarded by default
+ (changed later) */
+ }
+ }
+
+ /* output stream init */
+ nb_ostreams = 0;
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ nb_ostreams += os->nb_streams;
+ }
+ if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
+ fprintf(stderr, "Number of stream maps must match number of output streams\n");
+ exit(1);
+ }
+
+ ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
+ if (!ost_table)
+ goto fail;
+ for(i=0;i<nb_ostreams;i++) {
+ ost = av_mallocz(sizeof(AVOutputStream));
+ if (!ost)
+ goto fail;
+ ost_table[i] = ost;
+ }
+
+ n = 0;
+ for(k=0;k<nb_output_files;k++) {
+ os = output_files[k];
+ for(i=0;i<os->nb_streams;i++) {
+ int found;
+ ost = ost_table[n++];
+ ost->file_index = k;
+ ost->index = i;
+ ost->st = os->streams[i];
+ if (nb_stream_maps > 0) {
+ ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
+ stream_maps[n-1].stream_index;
+ } else {
+ /* get corresponding input stream index : we select the first one with the right type */
+ found = 0;
+ for(j=0;j<nb_istreams;j++) {
+ ist = ist_table[j];
+ if (ist->discard &&
+ ist->st->codec.codec_type == ost->st->codec.codec_type) {
+ ost->source_index = j;
+ found = 1;
+ }
+ }
+
+ if (!found) {
+ /* try again and reuse existing stream */
+ for(j=0;j<nb_istreams;j++) {
+ ist = ist_table[j];
+ if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
+ ost->source_index = j;
+ found = 1;
+ }
+ }
+ if (!found) {
+ fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ }
+ }
+ ist = ist_table[ost->source_index];
+ ist->discard = 0;
+ }
+ }
+
+ /* dump the stream mapping */
+ fprintf(stderr, "Stream mapping:\n");
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ fprintf(stderr, " Stream #%d.%d -> #%d.%d\n",
+ ist_table[ost->source_index]->file_index,
+ ist_table[ost->source_index]->index,
+ ost->file_index,
+ ost->index);
+ }
+
+ /* for each output stream, we compute the right encoding parameters */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ ist = ist_table[ost->source_index];
+
+ codec = &ost->st->codec;
+ icodec = &ist->st->codec;
+
+ switch(codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ /* check if same codec with same parameters. If so, no
+ reencoding is needed */
+ if (codec->codec_id == icodec->codec_id &&
+ codec->bit_rate == icodec->bit_rate &&
+ codec->sample_rate == icodec->sample_rate &&
+ codec->channels == icodec->channels) {
+ /* no reencoding */
+ } else {
+ if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
+ goto fail;
+
+ if (codec->channels == icodec->channels &&
+ codec->sample_rate == icodec->sample_rate) {
+ ost->audio_resample = 0;
+ } else {
+ ost->audio_resample = 1;
+ ost->resample = audio_resample_init(codec->channels, icodec->channels,
+ codec->sample_rate,
+ icodec->sample_rate);
+ }
+ ist->decoding_needed = 1;
+ ost->encoding_needed = 1;
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ /* check if same codec with same parameters. If so, no
+ reencoding is needed */
+ if (codec->codec_id == icodec->codec_id &&
+ codec->bit_rate == icodec->bit_rate &&
+ codec->frame_rate == icodec->frame_rate &&
+ codec->width == icodec->width &&
+ codec->height == icodec->height) {
+ /* no reencoding */
+ } else {
+ if (codec->width == icodec->width &&
+ codec->height == icodec->height) {
+ ost->video_resample = 0;
+ } else {
+ UINT8 *buf;
+ ost->video_resample = 1;
+ buf = malloc((codec->width * codec->height * 3) / 2);
+ if (!buf)
+ goto fail;
+ ost->pict_tmp.data[0] = buf;
+ ost->pict_tmp.data[1] = ost->pict_tmp.data[0] + (codec->width * codec->height);
+ ost->pict_tmp.data[2] = ost->pict_tmp.data[1] + (codec->width * codec->height) / 4;
+ ost->pict_tmp.linesize[0] = codec->width;
+ ost->pict_tmp.linesize[1] = codec->width / 2;
+ ost->pict_tmp.linesize[2] = codec->width / 2;
+
+ ost->img_resample_ctx = img_resample_init(
+ ost->st->codec.width, ost->st->codec.height,
+ ist->st->codec.width, ist->st->codec.height);
+ }
+ ost->encoding_needed = 1;
+ ist->decoding_needed = 1;
+ }
+ break;
+ }
+ }
+
+ /* open each encoder */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->encoding_needed) {
+ AVCodec *codec;
+ codec = avcodec_find_encoder(ost->st->codec.codec_id);
+ if (!codec) {
+ fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ if (avcodec_open(&ost->st->codec, codec) < 0) {
+ fprintf(stderr, "Error while opening codec for stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ }
+ }
+
+ /* open each decoder */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist->decoding_needed) {
+ AVCodec *codec;
+ codec = avcodec_find_decoder(ist->st->codec.codec_id);
+ if (!codec) {
+ fprintf(stderr, "Unsupported codec for input stream #%d.%d\n",
+ ist->file_index, ist->index);
+ exit(1);
+ }
+ if (avcodec_open(&ist->st->codec, codec) < 0) {
+ fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
+ ist->file_index, ist->index);
+ exit(1);
+ }
+ }
+ }
+
+ /* init pts */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ ist->pts = 0;
+ ist->frame_number = 0;
+ }
+
+ /* compute buffer size max (should use a complete heuristic) */
+ for(i=0;i<nb_input_files;i++) {
+ file_table[i].buffer_size_max = 2048;
+ }
+
+ /* open files and write file headers */
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ os->format->write_header(os);
+ }
+
+ start_time = gettime();
+ min_pts = 0;
+ for(;;) {
+ int file_index, ist_index;
+ AVPacket pkt;
+ UINT8 *ptr;
+ int len;
+ UINT8 *data_buf;
+ int data_size, got_picture;
+ AVPicture picture;
+ short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
+
+ /* select the input file with the smallest pts */
+ redo:
+ file_index = -1;
+ min_pts = (1ULL << 63) - 1;
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (!ist->discard && !file_table[ist->file_index].eof_reached && ist->pts < min_pts) {
+ min_pts = ist->pts;
+ file_index = ist->file_index;
+ }
+ }
+ /* if none, if is finished */
+ if (file_index < 0)
+ break;
+ /* finish if recording time exhausted */
+ if (recording_time > 0 && min_pts >= recording_time)
+ break;
+ /* read a packet from it and output it in the fifo */
+
+ is = input_files[file_index];
+ if (av_read_packet(is, &pkt) < 0) {
+ file_table[file_index].eof_reached = 1;
+ continue;
+ }
+ ist_index = file_table[file_index].ist_index + pkt.stream_index;
+ ist = ist_table[ist_index];
+ if (ist->discard) {
+ continue;
+ }
+
+#ifdef HEX_DUMP
+ printf("stream #%d, size=%d:\n", pkt.stream_index, pkt.size);
+ hex_dump(pkt.data, pkt.size);
+#endif
+
+ // printf("read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
+
+ len = pkt.size;
+ ptr = pkt.data;
+ while (len > 0) {
+
+ /* decode the packet if needed */
+ data_buf = NULL; /* fail safe */
+ data_size = 0;
+ if (ist->decoding_needed) {
+ switch(ist->st->codec.codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (ist->st->codec.codec_id == CODEC_ID_PCM) {
+ /* no need to call a codec */
+ data_buf = ptr;
+ data_size = len;
+ ret = len;
+ } else {
+ ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
+ ptr, len);
+ if (ret < 0)
+ goto fail_decode;
+ if (data_size == 0) {
+ /* no audio frame */
+ ptr += ret;
+ len -= ret;
+ continue;
+ }
+ data_buf = (UINT8 *)samples;
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (ist->st->codec.codec_id == CODEC_ID_RAWVIDEO) {
+ int size;
+ size = (ist->st->codec.width * ist->st->codec.height);
+
+ picture.data[0] = ptr;
+ picture.data[1] = picture.data[0] + size;
+ picture.data[2] = picture.data[1] + size / 4;
+ picture.linesize[0] = ist->st->codec.width;
+ picture.linesize[1] = ist->st->codec.width / 2;
+ picture.linesize[2] = ist->st->codec.width / 2;
+ ret = len;
+ } else {
+ data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
+ ret = avcodec_decode_video(&ist->st->codec,
+ &picture, &got_picture, ptr, len);
+ if (ret < 0) {
+ fail_decode:
+ fprintf(stderr, "Error while decoding stream #%d.%d\n",
+ ist->file_index, ist->index);
+ av_free_packet(&pkt);
+ goto redo;
+ }
+ if (!got_picture) {
+ /* no picture yet */
+ ptr += ret;
+ len -= ret;
+ continue;
+ }
+ }
+ break;
+ default:
+ goto fail_decode;
+ }
+ } else {
+ data_buf = ptr;
+ data_size = len;
+ ret = len;
+ }
+ /* update pts */
+ switch(ist->st->codec.codec_type) {
+ case CODEC_TYPE_AUDIO:
+ ist->pts = (INT64)1000000 * ist->sample_index / ist->st->codec.sample_rate;
+ ist->sample_index += data_size / (2 * ist->st->codec.channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ ist->frame_number++;
+ ist->pts = ((INT64)ist->frame_number * 1000000 * FRAME_RATE_BASE) /
+ ist->st->codec.frame_rate;
+ break;
+ }
+ ptr += ret;
+ len -= ret;
+
+ /* transcode raw format, encode packets and output them */
+
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->source_index == ist_index) {
+ os = output_files[ost->file_index];
+
+ if (ost->encoding_needed) {
+ switch(ost->st->codec.codec_type) {
+ case CODEC_TYPE_AUDIO:
+ do_audio_out(os, ost, ist, data_buf, data_size);
+ break;
+ case CODEC_TYPE_VIDEO:
+ do_video_out(os, ost, ist, &picture);
+ break;
+ }
+ } else {
+ /* no reencoding needed : output the packet directly */
+ os->format->write_packet(os, ost->index, data_buf, data_size);
+ }
+ }
+ }
+ }
+ av_free_packet(&pkt);
+
+ /* dump report by using the first video and audio streams */
+ {
+ char buf[1024];
+ AVFormatContext *oc;
+ INT64 total_size, ti;
+ AVCodecContext *enc;
+ int frame_number, vid;
+ double bitrate, ti1;
+ static INT64 last_time;
+
+ if ((min_pts - last_time) >= 500000) {
+ last_time = min_pts;
+
+ oc = output_files[0];
+
+ total_size = url_ftell(&oc->pb);
+
+ buf[0] = '\0';
+ ti = (1ULL << 63) - 1;
+ vid = 0;
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ ist = ist_table[ost->source_index];
+ if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
+ frame_number = ist->frame_number;
+ sprintf(buf + strlen(buf), "frame=%5d q=%2d ",
+ frame_number, enc->quality);
+ vid = 1;
+ }
+ /* compute min pts value */
+ if (!ist->discard && ist->pts < ti) {
+ ti = ist->pts;
+ }
+ }
+
+ ti1 = ti / 1000000.0;
+ if (ti1 < 0.1)
+ ti1 = 0.1;
+ bitrate = (double)(total_size * 8) / ti1 / 1000.0;
+
+ sprintf(buf + strlen(buf), "size=%8LdkB time=%0.1f bitrate=%6.1fkbits/s",
+ total_size / 1024, ti1, bitrate);
+
+ fprintf(stderr, "%s \r", buf);
+ fflush(stderr);
+ }
+ }
+ }
+
+ /* dump report by using the first video and audio streams */
+ {
+ char buf[1024];
+ AVFormatContext *oc;
+ INT64 total_size, ti;
+ AVCodecContext *enc;
+ int frame_number, vid;
+ double bitrate, ti1;
+
+ oc = output_files[0];
+
+ total_size = url_ftell(&oc->pb);
+
+ buf[0] = '\0';
+ ti = (1ULL << 63) - 1;
+ vid = 0;
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ enc = &ost->st->codec;
+ ist = ist_table[ost->source_index];
+ if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
+ frame_number = ist->frame_number;
+ sprintf(buf + strlen(buf), "frame=%5d q=%2d ",
+ frame_number, enc->quality);
+ vid = 1;
+ }
+ /* compute min pts value */
+ if (!ist->discard && ist->pts < ti) {
+ ti = ist->pts;
+ }
+ }
+
+ ti1 = ti / 1000000.0;
+ if (ti1 < 0.1)
+ ti1 = 0.1;
+ bitrate = (double)(total_size * 8) / ti1 / 1000.0;
+
+ sprintf(buf + strlen(buf), "size=%8LdkB time=%0.1f bitrate=%6.1fkbits/s",
+ total_size / 1024, ti1, bitrate);
+
+ fprintf(stderr, "%s \n", buf);
+ }
+ /* close each encoder */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->encoding_needed) {
+ avcodec_close(&ost->st->codec);
+ }
+ }
+
+ /* close each decoder */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist->decoding_needed) {
+ avcodec_close(&ist->st->codec);
+ }
+ }
+
+
+ /* write the trailer if needed and close file */
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ os->format->write_trailer(os);
+ }
+ /* finished ! */
+
+ ret = 0;
+ fail1:
+ if (ist_table) {
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist) {
+ free(ist);
+ }
+ }
+ free(ist_table);
+ }
+ if (ost_table) {
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost) {
+ if (ost->pict_tmp.data[0])
+ free(ost->pict_tmp.data[0]);
+ if (ost->video_resample)
+ img_resample_close(ost->img_resample_ctx);
+ if (ost->audio_resample)
+ audio_resample_close(ost->resample);
+ free(ost);
+ }
+ }
+ free(ost_table);
+ }
+ return ret;
+ fail:
+ ret = -ENOMEM;
+ goto fail1;
+}
+
+#if 0
+int file_read(const char *filename)
+{
+ URLContext *h;
+ unsigned char buffer[1024];
+ int len, i;
+
+ if (url_open(&h, filename, O_RDONLY) < 0) {
+ printf("could not open '%s'\n", filename);
+ return -1;
+ }
+ for(;;) {
+ len = url_read(h, buffer, sizeof(buffer));
+ if (len <= 0)
+ break;
+ for(i=0;i<len;i++) putchar(buffer[i]);
+ }
+ url_close(h);
+ return 0;
+}
+#endif
+
+void show_licence(void)
+{
+ printf(
+ "ffmpeg version " FFMPEG_VERSION "\n"
+ "Copyright (c) 2000,2001 Gerard Lantau\n"
+ "This program is free software; you can redistribute it and/or modify\n"
+ "it under the terms of the GNU General Public License as published by\n"
+ "the Free Software Foundation; either version 2 of the License, or\n"
+ "(at your option) any later version.\n"
+ "\n"
+ "This program is distributed in the hope that it will be useful,\n"
+ "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
+ "GNU General Public License for more details.\n"
+ "\n"
+ "You should have received a copy of the GNU General Public License\n"
+ "along with this program; if not, write to the Free Software\n"
+ "Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n"
+ );
+ exit(1);
+}
+
+void opt_format(const char *arg)
+{
+ AVFormat *f;
+ f = first_format;
+ while (f != NULL && strcmp(f->name, arg) != 0) f = f->next;
+ if (f == NULL) {
+ fprintf(stderr, "Invalid format: %s\n", arg);
+ exit(1);
+ }
+ file_format = f;
+}
+
+void opt_video_bitrate(const char *arg)
+{
+ video_bit_rate = atoi(arg) * 1000;
+}
+
+void opt_frame_rate(const char *arg)
+{
+ frame_rate = (int)(strtod(arg, 0) * FRAME_RATE_BASE);
+}
+
+void opt_frame_size(const char *arg)
+{
+ parse_image_size(&frame_width, &frame_height, arg);
+ if (frame_width <= 0 || frame_height <= 0) {
+ fprintf(stderr, "Incorrect frame size\n");
+ exit(1);
+ }
+ if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
+ fprintf(stderr, "Frame size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+void opt_gop_size(const char *arg)
+{
+ gop_size = atoi(arg);
+}
+
+void opt_qscale(const char *arg)
+{
+ video_qscale = atoi(arg);
+ if (video_qscale < 0 ||
+ video_qscale > 31) {
+ fprintf(stderr, "qscale must be >= 1 and <= 31\n");
+ exit(1);
+ }
+}
+
+
+void opt_audio_bitrate(const char *arg)
+{
+ audio_bit_rate = atoi(arg) * 1000;
+}
+
+void opt_audio_rate(const char *arg)
+{
+ audio_sample_rate = atoi(arg);
+}
+
+void opt_audio_channels(const char *arg)
+{
+ audio_channels = atoi(arg);
+}
+
+void opt_video_device(const char *arg)
+{
+ v4l_device = strdup(arg);
+}
+
+void opt_audio_device(const char *arg)
+{
+ audio_device = strdup(arg);
+}
+
+void opt_audio_codec(const char *arg)
+{
+ AVCodec *p;
+
+ p = first_avcodec;
+ while (p) {
+ if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
+ break;
+ p = p->next;
+ }
+ if (p == NULL) {
+ fprintf(stderr, "Unknown audio codec '%s'\n", arg);
+ exit(1);
+ } else {
+ audio_codec_id = p->id;
+ }
+}
+
+const char *motion_str[] = {
+ "zero",
+ "full",
+ "log",
+ "phods",
+ NULL,
+};
+
+void opt_motion_estimation(const char *arg)
+{
+ const char **p;
+ p = motion_str;
+ for(;;) {
+ if (!*p) {
+ fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
+ exit(1);
+ }
+ if (!strcmp(*p, arg))
+ break;
+ p++;
+ }
+ motion_estimation_method = p - motion_str;
+}
+
+void opt_video_codec(const char *arg)
+{
+ AVCodec *p;
+
+ p = first_avcodec;
+ while (p) {
+ if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
+ break;
+ p = p->next;
+ }
+ if (p == NULL) {
+ fprintf(stderr, "Unknown video codec '%s'\n", arg);
+ exit(1);
+ } else {
+ video_codec_id = p->id;
+ }
+}
+
+void opt_map(const char *arg)
+{
+ AVStreamMap *m;
+ const char *p;
+
+ p = arg;
+ m = &stream_maps[nb_stream_maps++];
+
+ m->file_index = strtol(arg, (char **)&p, 0);
+ if (*p)
+ p++;
+ m->stream_index = strtol(arg, (char **)&p, 0);
+}
+
+void opt_recording_time(const char *arg)
+{
+ recording_time = parse_date(arg, 1);
+}
+
+/* return the number of packet read to find the codec parameters */
+int find_codec_parameters(AVFormatContext *ic)
+{
+ int val, i, count, ret, got_picture, size;
+ AVCodec *codec;
+ AVCodecContext *enc;
+ AVStream *st;
+ AVPacket *pkt;
+ AVPicture picture;
+ AVPacketList *pktl, **ppktl;
+ short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
+ UINT8 *ptr;
+
+ count = 0;
+ ppktl = &ic->packet_buffer;
+ for(;;) {
+ for(i=0;i<ic->nb_streams;i++) {
+ enc = &ic->streams[i]->codec;
+
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ val = enc->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ val = enc->width;
+ break;
+ default:
+ val = 1;
+ break;
+ }
+ /* if no parameters supplied, then we should read it from
+ the stream */
+ if (val == 0)
+ break;
+ }
+ if (i == ic->nb_streams) {
+ ret = count;
+ break;
+ }
+
+ if (count == 0) {
+ /* open each codec */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ codec = avcodec_find_decoder(st->codec.codec_id);
+ if (codec == NULL) {
+ ret = -1;
+ goto the_end;
+ }
+ avcodec_open(&st->codec, codec);
+ }
+ }
+ pktl = av_mallocz(sizeof(AVPacketList));
+ if (!pktl) {
+ ret = -1;
+ break;
+ }
+
+ /* add the packet in the buffered packet list */
+ *ppktl = pktl;
+ ppktl = &pktl->next;
+
+ pkt = &pktl->pkt;
+ if (ic->format->read_packet(ic, pkt) < 0) {
+ ret = -1;
+ break;
+ }
+ st = ic->streams[pkt->stream_index];
+
+ /* decode the data and update codec parameters */
+ ptr = pkt->data;
+ size = pkt->size;
+ while (size > 0) {
+ switch(st->codec.codec_type) {
+ case CODEC_TYPE_VIDEO:
+ ret = avcodec_decode_video(&st->codec, &picture, &got_picture, ptr, size);
+ break;
+ case CODEC_TYPE_AUDIO:
+ ret = avcodec_decode_audio(&st->codec, samples, &got_picture, ptr, size);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ if (ret < 0) {
+ ret = -1;
+ goto the_end;
+ }
+ if (got_picture)
+ break;
+ ptr += ret;
+ size -= ret;
+ }
+
+ count++;
+ }
+ the_end:
+ if (count > 0) {
+ /* close each codec */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ avcodec_close(&st->codec);
+ }
+ }
+ return ret;
+}
+
+
+void opt_input_file(const char *filename)
+{
+ AVFormatContext *ic;
+ AVFormatParameters params, *ap = &params;
+ URLFormat url_format;
+ AVFormat *fmt;
+ int err, i, ret;
+
+ ic = av_mallocz(sizeof(AVFormatContext));
+ strcpy(ic->filename, filename);
+ /* first format guess to know if we must open file */
+ fmt = file_format;
+ if (!fmt)
+ fmt = guess_format(NULL, filename, NULL);
+
+ if (fmt == NULL || !(fmt->flags & AVFMT_NOFILE)) {
+ /* open file */
+ if (url_fopen(&ic->pb, filename, URL_RDONLY) < 0) {
+ fprintf(stderr, "Could not open '%s'\n", filename);
+ exit(1);
+ }
+
+ /* find format and set default parameters */
+ fmt = file_format;
+ err = url_getformat(url_fileno(&ic->pb), &url_format);
+ if (err >= 0) {
+ if (!fmt)
+ fmt = guess_format(url_format.format_name, NULL, NULL);
+ ap->sample_rate = url_format.sample_rate;
+ ap->frame_rate = url_format.frame_rate;
+ ap->channels = url_format.channels;
+ ap->width = url_format.width;
+ ap->height = url_format.height;
+ ap->pix_fmt = url_format.pix_fmt;
+ } else {
+ if (!fmt)
+ fmt = guess_format(NULL, filename, NULL);
+ memset(ap, 0, sizeof(*ap));
+ }
+ } else {
+ memset(ap, 0, sizeof(*ap));
+ }
+
+ if (!fmt || !fmt->read_header) {
+ fprintf(stderr, "%s: Unknown file format\n", filename);
+ exit(1);
+ }
+ ic->format = fmt;
+
+ /* get default parameters from command line */
+ if (!ap->sample_rate)
+ ap->sample_rate = audio_sample_rate;
+ if (!ap->channels)
+ ap->channels = audio_channels;
+
+ if (!ap->frame_rate)
+ ap->frame_rate = frame_rate;
+ if (!ap->width)
+ ap->width = frame_width;
+ if (!ap->height)
+ ap->height = frame_height;
+
+ err = ic->format->read_header(ic, ap);
+ if (err < 0) {
+ fprintf(stderr, "%s: Error while parsing header\n", filename);
+ exit(1);
+ }
+
+ /* If not enough info for the codecs, we decode the first frames
+ to get it. (used in mpeg case for example) */
+ ret = find_codec_parameters(ic);
+ if (ret < 0) {
+ fprintf(stderr, "%s: could not find codec parameters\n", filename);
+ exit(1);
+ }
+
+ /* update the current parameters so that they match the one of the input stream */
+ for(i=0;i<ic->nb_streams;i++) {
+ AVCodecContext *enc = &ic->streams[i]->codec;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ audio_channels = enc->channels;
+ audio_sample_rate = enc->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ frame_height = enc->height;
+ frame_width = enc->width;
+ frame_rate = enc->frame_rate;
+ break;
+ }
+ }
+
+ input_files[nb_input_files] = ic;
+ /* dump the file content */
+ dump_format(ic, nb_input_files, filename, 0);
+ nb_input_files++;
+ file_format = NULL;
+}
+
+void opt_output_file(const char *filename)
+{
+ AVStream *st;
+ AVFormatContext *oc;
+ int use_video, use_audio, nb_streams;
+ int codec_id;
+
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+
+ oc = av_mallocz(sizeof(AVFormatContext));
+
+ if (!file_format) {
+ file_format = guess_format(NULL, filename, NULL);
+ if (!file_format)
+ file_format = &mpeg_mux_format;
+ }
+
+ oc->format = file_format;
+
+ if (!strcmp(file_format->name, "ffm") &&
+ strstart(filename, "http:", NULL)) {
+ /* special case for files sent to ffserver: we get the stream
+ parameters from ffserver */
+ if (read_ffserver_streams(oc, filename) < 0) {
+ fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
+ exit(1);
+ }
+ } else {
+ use_video = file_format->video_codec != CODEC_ID_NONE;
+ use_audio = file_format->audio_codec != CODEC_ID_NONE;
+
+ if (audio_disable) {
+ use_audio = 0;
+ }
+ if (video_disable) {
+ use_video = 0;
+ }
+
+ nb_streams = 0;
+ if (use_video) {
+ AVCodecContext *video_enc;
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+ video_enc = &st->codec;
+
+ codec_id = file_format->video_codec;
+ if (video_codec_id != CODEC_ID_NONE)
+ codec_id = video_codec_id;
+
+ video_enc->codec_id = codec_id;
+ video_enc->codec_type = CODEC_TYPE_VIDEO;
+
+ video_enc->bit_rate = video_bit_rate;
+ video_enc->frame_rate = frame_rate;
+
+ video_enc->width = frame_width;
+ video_enc->height = frame_height;
+ if (!intra_only)
+ video_enc->gop_size = gop_size;
+ else
+ video_enc->gop_size = 0;
+ if (video_qscale || same_quality) {
+ video_enc->flags |= CODEC_FLAG_QSCALE;
+ video_enc->quality = video_qscale;
+ }
+
+ oc->streams[nb_streams] = st;
+ nb_streams++;
+ }
+
+ if (use_audio) {
+ AVCodecContext *audio_enc;
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+ audio_enc = &st->codec;
+ codec_id = file_format->audio_codec;
+ if (audio_codec_id != CODEC_ID_NONE)
+ codec_id = audio_codec_id;
+ audio_enc->codec_id = codec_id;
+ audio_enc->codec_type = CODEC_TYPE_AUDIO;
+
+ audio_enc->bit_rate = audio_bit_rate;
+ audio_enc->sample_rate = audio_sample_rate;
+ audio_enc->channels = audio_channels;
+ oc->streams[nb_streams] = st;
+ nb_streams++;
+ }
+
+ oc->nb_streams = nb_streams;
+
+ if (!nb_streams) {
+ fprintf(stderr, "No audio or video selected\n");
+ exit(1);
+ }
+
+ if (str_title)
+ nstrcpy(oc->title, sizeof(oc->title), str_title);
+ if (str_author)
+ nstrcpy(oc->author, sizeof(oc->author), str_author);
+ if (str_copyright)
+ nstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright);
+ if (str_comment)
+ nstrcpy(oc->comment, sizeof(oc->comment), str_comment);
+ }
+
+ output_files[nb_output_files] = oc;
+ /* dump the file content */
+ dump_format(oc, nb_output_files, filename, 1);
+ nb_output_files++;
+
+ strcpy(oc->filename, filename);
+ if (!(oc->format->flags & AVFMT_NOFILE)) {
+ /* test if it already exists to avoid loosing precious files */
+ if (!file_overwrite &&
+ (strchr(filename, ':') == NULL ||
+ strstart(filename, "file:", NULL))) {
+ if (url_exist(filename)) {
+ int c;
+
+ printf("File '%s' already exists. Overwrite ? [y/N] ", filename);
+ fflush(stdout);
+ c = getchar();
+ if (toupper(c) != 'Y') {
+ fprintf(stderr, "Not overwriting - exiting\n");
+ exit(1);
+ }
+ }
+ }
+
+ /* open the file */
+ if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
+ fprintf(stderr, "Could not open '%s'\n", filename);
+ exit(1);
+ }
+ }
+
+ /* reset some options */
+ file_format = NULL;
+ audio_disable = 0;
+ video_disable = 0;
+ audio_codec_id = CODEC_ID_NONE;
+ video_codec_id = CODEC_ID_NONE;
+}
+
+void show_formats(void)
+{
+ AVFormat *f;
+ URLProtocol *up;
+ AVCodec *p;
+ const char **pp;
+
+ printf("File formats:\n");
+ printf(" Encoding:");
+ for(f = first_format; f != NULL; f = f->next) {
+ if (f->write_header)
+ printf(" %s", f->name);
+ }
+ printf("\n");
+ printf(" Decoding:");
+ for(f = first_format; f != NULL; f = f->next) {
+ if (f->read_header)
+ printf(" %s", f->name);
+ }
+ printf("\n");
+
+ printf("Codecs:\n");
+ printf(" Encoders:");
+ for(p = first_avcodec; p != NULL; p = p->next) {
+ if (p->encode)
+ printf(" %s", p->name);
+ }
+ printf("\n");
+
+ printf(" Decoders:");
+ for(p = first_avcodec; p != NULL; p = p->next) {
+ if (p->decode)
+ printf(" %s", p->name);
+ }
+ printf("\n");
+
+ printf("Supported file protocols:");
+ for(up = first_protocol; up != NULL; up = up->next)
+ printf(" %s:", up->name);
+ printf("\n");
+
+ printf("Frame size abbreviations: sqcif qcif cif 4cif\n");
+ printf("Motion estimation methods:");
+ pp = motion_str;
+ while (*pp) {
+ printf(" %s", *pp);
+ if ((pp - motion_str) == ME_ZERO)
+ printf("(fastest)");
+ else if ((pp - motion_str) == ME_FULL)
+ printf("(slowest)");
+ else if ((pp - motion_str) == ME_LOG)
+ printf("(default)");
+ pp++;
+ }
+ printf("\n");
+ exit(1);
+}
+
+void show_help(void)
+{
+ const OptionDef *po;
+ int i, expert;
+
+ printf("ffmpeg version " FFMPEG_VERSION ", Copyright (c) 2000,2001 Gerard Lantau\n"
+ "usage: ffmpeg [[options] -i input_file]... {[options] outfile}...\n"
+ "Hyper fast MPEG1/MPEG4/H263/RV and AC3/MPEG audio encoder\n"
+ "\n"
+ "Main options are:\n");
+ for(i=0;i<2;i++) {
+ if (i == 1)
+ printf("\nAdvanced options are:\n");
+ for(po = options; po->name != NULL; po++) {
+ char buf[64];
+ expert = (po->flags & OPT_EXPERT) != 0;
+ if (expert == i) {
+ strcpy(buf, po->name);
+ if (po->flags & HAS_ARG) {
+ strcat(buf, " ");
+ strcat(buf, po->argname);
+ }
+ printf("-%-17s %s\n", buf, po->help);
+ }
+ }
+ }
+
+ exit(1);
+}
+
+const OptionDef options[] = {
+ { "L", 0, {show_licence}, "show license" },
+ { "h", 0, {show_help}, "show help" },
+ { "formats", 0, {show_formats}, "show available formats, codecs, protocols, ..." },
+ { "f", HAS_ARG, {opt_format}, "force format", "fmt" },
+ { "i", HAS_ARG, {opt_input_file}, "input file name", "filename" },
+ { "y", OPT_BOOL, {int_arg:&file_overwrite}, "overwrite output files" },
+ { "map", HAS_ARG | OPT_EXPERT, {opt_map}, "set input stream mapping", "file:stream" },
+ { "t", HAS_ARG, {opt_recording_time}, "set the recording time", "duration" },
+ { "title", HAS_ARG | OPT_STRING, {str_arg: &str_title}, "set the title", "string" },
+ { "author", HAS_ARG | OPT_STRING, {str_arg: &str_author}, "set the author", "string" },
+ { "copyright", HAS_ARG | OPT_STRING, {str_arg: &str_copyright}, "set the copyright", "string" },
+ { "comment", HAS_ARG | OPT_STRING, {str_arg: &str_comment}, "set the comment", "string" },
+ /* video options */
+ { "b", HAS_ARG, {opt_video_bitrate}, "set video bitrate (in kbit/s)", "bitrate" },
+ { "r", HAS_ARG, {opt_frame_rate}, "set frame rate (in Hz)", "rate" },
+ { "s", HAS_ARG, {opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
+ { "g", HAS_ARG | OPT_EXPERT, {opt_gop_size}, "set the group of picture size", "gop_size" },
+ { "intra", OPT_BOOL | OPT_EXPERT, {int_arg: &intra_only}, "use only intra frames"},
+ { "vn", OPT_BOOL, {int_arg: &video_disable}, "disable video" },
+ { "qscale", HAS_ARG | OPT_EXPERT, {opt_qscale}, "use fixed video quantiser scale (VBR)", "q" },
+ { "vd", HAS_ARG | OPT_EXPERT, {opt_video_device}, "set video device", "device" },
+ { "vcodec", HAS_ARG | OPT_EXPERT, {opt_video_codec}, "force video codec", "codec" },
+ { "me", HAS_ARG | OPT_EXPERT, {opt_motion_estimation}, "set motion estimation method",
+ "method" },
+ { "sameq", OPT_BOOL, {int_arg: &same_quality},
+ "use same video quality as source (implies VBR)" },
+ /* audio options */
+ { "ab", HAS_ARG, {opt_audio_bitrate}, "set audio bitrate (in kbit/s)", "bitrate", },
+ { "ar", HAS_ARG, {opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" },
+ { "ac", HAS_ARG, {opt_audio_channels}, "set number of audio channels", "channels" },
+ { "an", OPT_BOOL, {int_arg: &audio_disable}, "disable audio" },
+ { "ad", HAS_ARG | OPT_EXPERT, {opt_audio_device}, "set audio device", "device" },
+ { "acodec", HAS_ARG | OPT_EXPERT, {opt_audio_codec}, "force audio codec", "codec" },
+
+ { NULL, },
+};
+
+int main(int argc, char **argv)
+{
+ int optindex, i;
+ const char *opt, *arg;
+ const OptionDef *po;
+
+ register_all();
+
+ if (argc <= 1)
+ show_help();
+
+ optindex = 1;
+ while (optindex < argc) {
+ opt = argv[optindex++];
+
+ if (opt[0] == '-' && opt[1] != '\0') {
+ po = options;
+ while (po->name != NULL) {
+ if (!strcmp(opt + 1, po->name))
+ break;
+ po++;
+ }
+ if (!po->name) {
+ fprintf(stderr, "%s: unrecognized option '%s'\n", argv[0], opt);
+ exit(1);
+ }
+ arg = NULL;
+ if (po->flags & HAS_ARG)
+ arg = argv[optindex++];
+ if (po->flags & OPT_STRING) {
+ char *str;
+ str = strdup(arg);
+ *po->u.str_arg = str;
+ } else if (po->flags & OPT_BOOL) {
+ *po->u.int_arg = 1;
+ } else {
+ po->u.func_arg(arg);
+ }
+ } else {
+ opt_output_file(opt);
+ }
+ }
+
+
+ if (nb_input_files == 0) {
+ if (nb_output_files != 1) {
+ fprintf(stderr, "Only one output file supported when grabbing\n");
+ exit(1);
+ }
+ av_grab(output_files[0]);
+ } else {
+ if (nb_output_files <= 0) {
+ fprintf(stderr, "Must supply at least one output file\n");
+ exit(1);
+ }
+ av_encode(output_files, nb_output_files, input_files, nb_input_files,
+ stream_maps, nb_stream_maps);
+ }
+
+ /* close files */
+ for(i=0;i<nb_output_files;i++) {
+ if (!(output_files[i]->format->flags & AVFMT_NOFILE))
+ url_fclose(&output_files[i]->pb);
+ }
+ for(i=0;i<nb_input_files;i++) {
+ if (!(input_files[i]->format->flags & AVFMT_NOFILE))
+ url_fclose(&input_files[i]->pb);
+ }
+
+ return 0;
+}
diff --git a/ffserver.c b/ffserver.c
new file mode 100644
index 0000000000..cfabe30823
--- /dev/null
+++ b/ffserver.c
@@ -0,0 +1,1577 @@
+/*
+ * Multiple format streaming server
+ * Copyright (c) 2000,2001 Gerard Lantau.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <netinet/in.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <time.h>
+#include <getopt.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <ctype.h>
+#include <signal.h>
+
+#include "avformat.h"
+
+/* maximum number of simultaneous HTTP connections */
+#define HTTP_MAX_CONNECTIONS 2000
+
+enum HTTPState {
+ HTTPSTATE_WAIT_REQUEST,
+ HTTPSTATE_SEND_HEADER,
+ HTTPSTATE_SEND_DATA_HEADER,
+ HTTPSTATE_SEND_DATA,
+ HTTPSTATE_SEND_DATA_TRAILER,
+ HTTPSTATE_RECEIVE_DATA,
+ HTTPSTATE_WAIT_FEED,
+};
+
+const char *http_state[] = {
+ "WAIT_REQUEST",
+ "SEND_HEADER",
+ "SEND_DATA_HEADER",
+ "SEND_DATA",
+ "SEND_DATA_TRAILER",
+ "RECEIVE_DATA",
+ "WAIT_FEED",
+};
+
+#define IOBUFFER_MAX_SIZE 16384
+
+/* coef for exponential mean for bitrate estimation in statistics */
+#define AVG_COEF 0.9
+
+/* timeouts are in ms */
+#define REQUEST_TIMEOUT (15 * 1000)
+#define SYNC_TIMEOUT (10 * 1000)
+
+/* context associated with one connection */
+typedef struct HTTPContext {
+ enum HTTPState state;
+ int fd; /* socket file descriptor */
+ struct sockaddr_in from_addr; /* origin */
+ struct pollfd *poll_entry; /* used when polling */
+ long timeout;
+ UINT8 buffer[IOBUFFER_MAX_SIZE];
+ UINT8 *buffer_ptr, *buffer_end;
+ int http_error;
+ struct HTTPContext *next;
+ int got_key_frame[MAX_STREAMS]; /* for each type */
+ INT64 data_count;
+ /* feed input */
+ int feed_fd;
+ /* input format handling */
+ AVFormatContext *fmt_in;
+ /* output format handling */
+ struct FFStream *stream;
+ AVFormatContext fmt_ctx;
+ int last_packet_sent; /* true if last data packet was sent */
+} HTTPContext;
+
+/* each generated stream is described here */
+enum StreamType {
+ STREAM_TYPE_LIVE,
+ STREAM_TYPE_STATUS,
+};
+
+/* description of each stream of the ffserver.conf file */
+typedef struct FFStream {
+ enum StreamType stream_type;
+ char filename[1024]; /* stream filename */
+ struct FFStream *feed;
+ AVFormat *fmt;
+ int nb_streams;
+ AVStream *streams[MAX_STREAMS];
+ int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
+ char feed_filename[1024]; /* file name of the feed storage, or
+ input file name for a stream */
+ struct FFStream *next;
+ /* feed specific */
+ int feed_opened; /* true if someone if writing to feed */
+ int is_feed; /* true if it is a feed */
+ INT64 feed_max_size; /* maximum storage size */
+ INT64 feed_write_index; /* current write position in feed (it wraps round) */
+ INT64 feed_size; /* current size of feed */
+ struct FFStream *next_feed;
+} FFStream;
+
+typedef struct FeedData {
+ long long data_count;
+ float avg_frame_size; /* frame size averraged over last frames with exponential mean */
+} FeedData;
+
+struct sockaddr_in my_addr;
+char logfilename[1024];
+HTTPContext *first_http_ctx;
+FFStream *first_feed; /* contains only feeds */
+FFStream *first_stream; /* contains all streams, including feeds */
+
+static int handle_http(HTTPContext *c, long cur_time);
+static int http_parse_request(HTTPContext *c);
+static int http_send_data(HTTPContext *c);
+static void compute_stats(HTTPContext *c);
+static int open_input_stream(HTTPContext *c, const char *info);
+static int http_start_receive_data(HTTPContext *c);
+static int http_receive_data(HTTPContext *c);
+
+int nb_max_connections;
+int nb_connections;
+
+static long gettime_ms(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv,NULL);
+ return (long long)tv.tv_sec * 1000 + (tv.tv_usec / 1000);
+}
+
+static FILE *logfile = NULL;
+
+static void http_log(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+
+ if (logfile)
+ vfprintf(logfile, fmt, ap);
+ va_end(ap);
+}
+
+/* main loop of the http server */
+static int http_server(struct sockaddr_in my_addr)
+{
+ int server_fd, tmp, ret;
+ struct sockaddr_in from_addr;
+ struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 1], *poll_entry;
+ HTTPContext *c, **cp;
+ long cur_time;
+
+ server_fd = socket(AF_INET,SOCK_STREAM,0);
+ if (server_fd < 0) {
+ perror ("socket");
+ return -1;
+ }
+
+ tmp = 1;
+ setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
+
+ if (bind (server_fd, (struct sockaddr *) &my_addr, sizeof (my_addr)) < 0) {
+ perror ("bind");
+ close(server_fd);
+ return -1;
+ }
+
+ if (listen (server_fd, 5) < 0) {
+ perror ("listen");
+ close(server_fd);
+ return -1;
+ }
+
+ http_log("ffserver started.\n");
+
+ fcntl(server_fd, F_SETFL, O_NONBLOCK);
+ first_http_ctx = NULL;
+ nb_connections = 0;
+ first_http_ctx = NULL;
+ for(;;) {
+ poll_entry = poll_table;
+ poll_entry->fd = server_fd;
+ poll_entry->events = POLLIN;
+ poll_entry++;
+
+ /* wait for events on each HTTP handle */
+ c = first_http_ctx;
+ while (c != NULL) {
+ int fd;
+ fd = c->fd;
+ switch(c->state) {
+ case HTTPSTATE_WAIT_REQUEST:
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLIN;
+ poll_entry++;
+ break;
+ case HTTPSTATE_SEND_HEADER:
+ case HTTPSTATE_SEND_DATA_HEADER:
+ case HTTPSTATE_SEND_DATA:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLOUT;
+ poll_entry++;
+ break;
+ case HTTPSTATE_RECEIVE_DATA:
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLIN;
+ poll_entry++;
+ break;
+ case HTTPSTATE_WAIT_FEED:
+ /* need to catch errors */
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = 0;
+ poll_entry++;
+ break;
+ default:
+ c->poll_entry = NULL;
+ break;
+ }
+ c = c->next;
+ }
+
+ /* wait for an event on one connection. We poll at least every
+ second to handle timeouts */
+ do {
+ ret = poll(poll_table, poll_entry - poll_table, 1000);
+ } while (ret == -1);
+
+ cur_time = gettime_ms();
+
+ /* now handle the events */
+
+ cp = &first_http_ctx;
+ while ((*cp) != NULL) {
+ c = *cp;
+ if (handle_http (c, cur_time) < 0) {
+ /* close and free the connection */
+ close(c->fd);
+ if (c->fmt_in)
+ av_close_input_file(c->fmt_in);
+ *cp = c->next;
+ free(c);
+ nb_connections--;
+ } else {
+ cp = &c->next;
+ }
+ }
+
+ /* new connection request ? */
+ poll_entry = poll_table;
+ if (poll_entry->revents & POLLIN) {
+ int fd, len;
+
+ len = sizeof(from_addr);
+ fd = accept(server_fd, (struct sockaddr *)&from_addr,
+ &len);
+ if (fd >= 0) {
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ /* XXX: should output a warning page when coming
+ close to the connection limit */
+ if (nb_connections >= nb_max_connections) {
+ close(fd);
+ } else {
+ /* add a new connection */
+ c = av_mallocz(sizeof(HTTPContext));
+ c->next = first_http_ctx;
+ first_http_ctx = c;
+ c->fd = fd;
+ c->poll_entry = NULL;
+ c->from_addr = from_addr;
+ c->state = HTTPSTATE_WAIT_REQUEST;
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer + IOBUFFER_MAX_SIZE;
+ c->timeout = cur_time + REQUEST_TIMEOUT;
+ nb_connections++;
+ }
+ }
+ }
+ poll_entry++;
+ }
+}
+
+static int handle_http(HTTPContext *c, long cur_time)
+{
+ int len;
+
+ switch(c->state) {
+ case HTTPSTATE_WAIT_REQUEST:
+ /* timeout ? */
+ if ((c->timeout - cur_time) < 0)
+ return -1;
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* no need to read if no events */
+ if (!(c->poll_entry->revents & POLLIN))
+ return 0;
+ /* read the data */
+ len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR)
+ return -1;
+ } else if (len == 0) {
+ return -1;
+ } else {
+ /* search for end of request. XXX: not fully correct since garbage could come after the end */
+ UINT8 *ptr;
+ c->buffer_ptr += len;
+ ptr = c->buffer_ptr;
+ if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) ||
+ (ptr >= c->buffer + 4 && !memcmp(ptr-4, "\r\n\r\n", 4))) {
+ /* request found : parse it and reply */
+ if (http_parse_request(c) < 0)
+ return -1;
+ } else if (ptr >= c->buffer_end) {
+ /* request too long: cannot do anything */
+ return -1;
+ }
+ }
+ break;
+
+ case HTTPSTATE_SEND_HEADER:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* no need to read if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ return -1;
+ }
+ } else {
+ c->buffer_ptr += len;
+ if (c->buffer_ptr >= c->buffer_end) {
+ /* if error, exit */
+ if (c->http_error)
+ return -1;
+ /* all the buffer was send : synchronize to the incoming stream */
+ c->state = HTTPSTATE_SEND_DATA_HEADER;
+ c->buffer_ptr = c->buffer_end = c->buffer;
+ }
+ }
+ break;
+
+ case HTTPSTATE_SEND_DATA:
+ case HTTPSTATE_SEND_DATA_HEADER:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ /* no need to read if no events */
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ if (http_send_data(c) < 0)
+ return -1;
+ break;
+ case HTTPSTATE_RECEIVE_DATA:
+ /* no need to read if no events */
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+ if (!(c->poll_entry->revents & POLLIN))
+ return 0;
+ if (http_receive_data(c) < 0)
+ return -1;
+ break;
+ case HTTPSTATE_WAIT_FEED:
+ /* no need to read if no events */
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* nothing to do, we'll be waken up by incoming feed packets */
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/* parse http request and prepare header */
+static int http_parse_request(HTTPContext *c)
+{
+ char *p;
+ int post;
+ char cmd[32];
+ char info[1024], *filename;
+ char url[1024], *q;
+ char protocol[32];
+ char msg[1024];
+ const char *mime_type;
+ FFStream *stream;
+
+ p = c->buffer;
+ q = cmd;
+ while (!isspace(*p) && *p != '\0') {
+ if ((q - cmd) < sizeof(cmd) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ if (!strcmp(cmd, "GET"))
+ post = 0;
+ else if (!strcmp(cmd, "POST"))
+ post = 1;
+ else
+ return -1;
+
+ while (isspace(*p)) p++;
+ q = url;
+ while (!isspace(*p) && *p != '\0') {
+ if ((q - url) < sizeof(url) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+
+ while (isspace(*p)) p++;
+ q = protocol;
+ while (!isspace(*p) && *p != '\0') {
+ if ((q - protocol) < sizeof(protocol) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ if (strcmp(protocol, "HTTP/1.0") && strcmp(protocol, "HTTP/1.1"))
+ return -1;
+
+ /* find the filename and the optional info string in the request */
+ p = url;
+ if (*p == '/')
+ p++;
+ filename = p;
+ p = strchr(p, '?');
+ if (p) {
+ strcpy(info, p);
+ *p = '\0';
+ } else {
+ info[0] = '\0';
+ }
+
+ stream = first_stream;
+ while (stream != NULL) {
+ if (!strcmp(stream->filename, filename))
+ break;
+ stream = stream->next;
+ }
+ if (stream == NULL) {
+ sprintf(msg, "File '%s' not found", url);
+ goto send_error;
+ }
+ c->stream = stream;
+
+ /* should do it after so that the size can be computed */
+ {
+ char buf1[32], buf2[32], *p;
+ time_t ti;
+ /* XXX: reentrant function ? */
+ p = inet_ntoa(c->from_addr.sin_addr);
+ strcpy(buf1, p);
+ ti = time(NULL);
+ p = ctime(&ti);
+ strcpy(buf2, p);
+ p = buf2 + strlen(p) - 1;
+ if (*p == '\n')
+ *p = '\0';
+ http_log("%s - - [%s] \"%s %s %s\" %d %d\n",
+ buf1, buf2, cmd, url, protocol, 200, 1024);
+ }
+
+ /* XXX: add there authenticate and IP match */
+
+ if (post) {
+ /* if post, it means a feed is being sent */
+ if (!stream->is_feed) {
+ sprintf(msg, "POST command not handled");
+ goto send_error;
+ }
+ if (http_start_receive_data(c) < 0) {
+ sprintf(msg, "could not open feed");
+ goto send_error;
+ }
+ c->http_error = 0;
+ c->state = HTTPSTATE_RECEIVE_DATA;
+ return 0;
+ }
+
+ if (c->stream->stream_type == STREAM_TYPE_STATUS)
+ goto send_stats;
+
+ /* open input stream */
+ if (open_input_stream(c, info) < 0) {
+ sprintf(msg, "Input stream corresponding to '%s' not found", url);
+ goto send_error;
+ }
+
+ /* prepare http header */
+ q = c->buffer;
+ q += sprintf(q, "HTTP/1.0 200 OK\r\n");
+ mime_type = c->stream->fmt->mime_type;
+ if (!mime_type)
+ mime_type = "application/x-octet_stream";
+ q += sprintf(q, "Content-type: %s\r\n", mime_type);
+ q += sprintf(q, "Pragma: no-cache\r\n");
+
+ /* for asf, we need extra headers */
+ if (!strcmp(c->stream->fmt->name,"asf")) {
+ q += sprintf(q, "Pragma: features=broadcast\r\n");
+ }
+ q += sprintf(q, "\r\n");
+
+ /* prepare output buffer */
+ c->http_error = 0;
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ send_error:
+ c->http_error = 404;
+ q = c->buffer;
+ q += sprintf(q, "HTTP/1.0 404 Not Found\r\n");
+ q += sprintf(q, "Content-type: %s\r\n", "text/html");
+ q += sprintf(q, "\r\n");
+ q += sprintf(q, "<HTML>\n");
+ q += sprintf(q, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n");
+ q += sprintf(q, "<BODY>%s</BODY>\n", msg);
+ q += sprintf(q, "</HTML>\n");
+
+ /* prepare output buffer */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ send_stats:
+ compute_stats(c);
+ c->http_error = 200; /* horrible : we use this value to avoid
+ going to the send data state */
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+}
+
+static void compute_stats(HTTPContext *c)
+{
+ HTTPContext *c1;
+ FFStream *stream;
+ char *q, *p;
+ time_t ti;
+ int i;
+
+ q = c->buffer;
+ q += sprintf(q, "HTTP/1.0 200 OK\r\n");
+ q += sprintf(q, "Content-type: %s\r\n", "text/html");
+ q += sprintf(q, "Pragma: no-cache\r\n");
+ q += sprintf(q, "\r\n");
+
+ q += sprintf(q, "<HEAD><TITLE>FFServer Status</TITLE></HEAD>\n<BODY>");
+ q += sprintf(q, "<H1>FFServer Status</H1>\n");
+ /* format status */
+ q += sprintf(q, "<H1>Available Streams</H1>\n");
+ q += sprintf(q, "<TABLE>\n");
+ q += sprintf(q, "<TR><TD>Path<TD>Format<TD>Bit rate (kbits/s)<TD>Video<TD>Audio<TD>Feed\n");
+ stream = first_stream;
+ while (stream != NULL) {
+ q += sprintf(q, "<TR><TD><A HREF=\"/%s\">%s</A> ",
+ stream->filename, stream->filename);
+ switch(stream->stream_type) {
+ case STREAM_TYPE_LIVE:
+ {
+ int audio_bit_rate = 0;
+ int video_bit_rate = 0;
+
+ for(i=0;i<stream->nb_streams;i++) {
+ AVStream *st = stream->streams[i];
+ switch(st->codec.codec_type) {
+ case CODEC_TYPE_AUDIO:
+ audio_bit_rate += st->codec.bit_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ video_bit_rate += st->codec.bit_rate;
+ break;
+ }
+ }
+ q += sprintf(q, "<TD> %s <TD> %d <TD> %d <TD> %d",
+ stream->fmt->name,
+ (audio_bit_rate + video_bit_rate) / 1000,
+ video_bit_rate / 1000, audio_bit_rate / 1000);
+ if (stream->feed) {
+ q += sprintf(q, "<TD>%s", stream->feed->filename);
+ } else {
+ q += sprintf(q, "<TD>%s", stream->feed_filename);
+ }
+ q += sprintf(q, "\n");
+ }
+ break;
+ default:
+ q += sprintf(q, "<TD> - <TD> - <TD> - <TD> -\n");
+ break;
+ }
+ stream = stream->next;
+ }
+ q += sprintf(q, "</TABLE>\n");
+
+#if 0
+ {
+ float avg;
+ AVCodecContext *enc;
+ char buf[1024];
+
+ /* feed status */
+ stream = first_feed;
+ while (stream != NULL) {
+ q += sprintf(q, "<H1>Feed '%s'</H1>\n", stream->filename);
+ q += sprintf(q, "<TABLE>\n");
+ q += sprintf(q, "<TR><TD>Parameters<TD>Frame count<TD>Size<TD>Avg bitrate (kbits/s)\n");
+ for(i=0;i<stream->nb_streams;i++) {
+ AVStream *st = stream->streams[i];
+ FeedData *fdata = st->priv_data;
+ enc = &st->codec;
+
+ avcodec_string(buf, sizeof(buf), enc);
+ avg = fdata->avg_frame_size * (float)enc->rate * 8.0;
+ if (enc->codec->type == CODEC_TYPE_AUDIO && enc->frame_size > 0)
+ avg /= enc->frame_size;
+ q += sprintf(q, "<TR><TD>%s <TD> %d <TD> %Ld <TD> %0.1f\n",
+ buf, enc->frame_number, fdata->data_count, avg / 1000.0);
+ }
+ q += sprintf(q, "</TABLE>\n");
+ stream = stream->next_feed;
+ }
+ }
+#endif
+
+ /* connection status */
+ q += sprintf(q, "<H1>Connection Status</H1>\n");
+
+ q += sprintf(q, "Number of connections: %d / %d<BR>\n",
+ nb_connections, nb_max_connections);
+
+ q += sprintf(q, "<TABLE>\n");
+ q += sprintf(q, "<TR><TD>#<TD>File<TD>IP<TD>State<TD>Size\n");
+ c1 = first_http_ctx;
+ i = 0;
+ while (c1 != NULL) {
+ i++;
+ p = inet_ntoa(c1->from_addr.sin_addr);
+ q += sprintf(q, "<TR><TD><B>%d</B><TD>%s%s <TD> %s <TD> %s <TD> %Ld\n",
+ i, c1->stream->filename,
+ c1->state == HTTPSTATE_RECEIVE_DATA ? "(input)" : "",
+ p,
+ http_state[c1->state],
+ c1->data_count);
+ c1 = c1->next;
+ }
+ q += sprintf(q, "</TABLE>\n");
+
+ /* date */
+ ti = time(NULL);
+ p = ctime(&ti);
+ q += sprintf(q, "<HR>Generated at %s", p);
+ q += sprintf(q, "</BODY>\n</HTML>\n");
+
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+}
+
+
+static void http_write_packet(void *opaque,
+ unsigned char *buf, int size)
+{
+ HTTPContext *c = opaque;
+ if (size > IOBUFFER_MAX_SIZE)
+ abort();
+ memcpy(c->buffer, buf, size);
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer + size;
+}
+
+static int open_input_stream(HTTPContext *c, const char *info)
+{
+ char buf[128];
+ char input_filename[1024];
+ AVFormatContext *s;
+ int buf_size;
+ INT64 stream_pos;
+
+ /* find file name */
+ if (c->stream->feed) {
+ strcpy(input_filename, c->stream->feed->feed_filename);
+ buf_size = FFM_PACKET_SIZE;
+ /* compute position (absolute time) */
+ if (find_info_tag(buf, sizeof(buf), "date", info)) {
+ stream_pos = parse_date(buf, 0);
+ } else {
+ stream_pos = gettime();
+ }
+ } else {
+ strcpy(input_filename, c->stream->feed_filename);
+ buf_size = 0;
+ /* compute position (relative time) */
+ if (find_info_tag(buf, sizeof(buf), "date", info)) {
+ stream_pos = parse_date(buf, 1);
+ } else {
+ stream_pos = 0;
+ }
+ }
+ if (input_filename[0] == '\0')
+ return -1;
+
+ /* open stream */
+ s = av_open_input_file(input_filename, buf_size);
+ if (!s)
+ return -1;
+ c->fmt_in = s;
+
+ if (c->fmt_in->format->read_seek) {
+ c->fmt_in->format->read_seek(c->fmt_in, stream_pos);
+ }
+
+ // printf("stream %s opened pos=%0.6f\n", input_filename, stream_pos / 1000000.0);
+ return 0;
+}
+
+static int http_prepare_data(HTTPContext *c)
+{
+ int i;
+
+ switch(c->state) {
+ case HTTPSTATE_SEND_DATA_HEADER:
+ memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
+ if (c->stream->feed) {
+ /* open output stream by using specified codecs */
+ c->fmt_ctx.format = c->stream->fmt;
+ c->fmt_ctx.nb_streams = c->stream->nb_streams;
+ for(i=0;i<c->fmt_ctx.nb_streams;i++) {
+ AVStream *st;
+ st = av_mallocz(sizeof(AVStream));
+ c->fmt_ctx.streams[i] = st;
+ memcpy(st, c->stream->streams[i], sizeof(AVStream));
+ st->codec.frame_number = 0; /* XXX: should be done in
+ AVStream, not in codec */
+ c->got_key_frame[i] = 0;
+ }
+ } else {
+ /* open output stream by using codecs in specified file */
+ c->fmt_ctx.format = c->stream->fmt;
+ c->fmt_ctx.nb_streams = c->fmt_in->nb_streams;
+ for(i=0;i<c->fmt_ctx.nb_streams;i++) {
+ AVStream *st;
+ st = av_mallocz(sizeof(AVStream));
+ c->fmt_ctx.streams[i] = st;
+ memcpy(st, c->fmt_in->streams[i], sizeof(AVStream));
+ st->codec.frame_number = 0; /* XXX: should be done in
+ AVStream, not in codec */
+ c->got_key_frame[i] = 0;
+ }
+ }
+ init_put_byte(&c->fmt_ctx.pb, c->buffer, IOBUFFER_MAX_SIZE,
+ 1, c, NULL, http_write_packet, NULL);
+ c->fmt_ctx.pb.is_streamed = 1;
+ /* prepare header */
+ c->fmt_ctx.format->write_header(&c->fmt_ctx);
+ c->state = HTTPSTATE_SEND_DATA;
+ c->last_packet_sent = 0;
+ break;
+ case HTTPSTATE_SEND_DATA:
+ /* find a new packet */
+#if 0
+ fifo_total_size = http_fifo_write_count - c->last_http_fifo_write_count;
+ if (fifo_total_size >= ((3 * FIFO_MAX_SIZE) / 4)) {
+ /* overflow : resync. We suppose that wptr is at this
+ point a pointer to a valid packet */
+ c->rptr = http_fifo.wptr;
+ for(i=0;i<c->fmt_ctx.nb_streams;i++) {
+ c->got_key_frame[i] = 0;
+ }
+ }
+
+ start_rptr = c->rptr;
+ if (fifo_read(&http_fifo, (UINT8 *)&hdr, sizeof(hdr), &c->rptr) < 0)
+ return 0;
+ payload_size = ntohs(hdr.payload_size);
+ payload = malloc(payload_size);
+ if (fifo_read(&http_fifo, payload, payload_size, &c->rptr) < 0) {
+ /* cannot read all the payload */
+ free(payload);
+ c->rptr = start_rptr;
+ return 0;
+ }
+
+ c->last_http_fifo_write_count = http_fifo_write_count -
+ fifo_size(&http_fifo, c->rptr);
+
+ if (c->stream->stream_type != STREAM_TYPE_MASTER) {
+ /* test if the packet can be handled by this format */
+ ret = 0;
+ for(i=0;i<c->fmt_ctx.nb_streams;i++) {
+ AVStream *st = c->fmt_ctx.streams[i];
+ if (test_header(&hdr, &st->codec)) {
+ /* only begin sending when got a key frame */
+ if (st->codec.key_frame)
+ c->got_key_frame[i] = 1;
+ if (c->got_key_frame[i]) {
+ ret = c->fmt_ctx.format->write_packet(&c->fmt_ctx, i,
+ payload, payload_size);
+ }
+ break;
+ }
+ }
+ if (ret) {
+ /* must send trailer now */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
+ } else {
+ /* master case : send everything */
+ char *q;
+ q = c->buffer;
+ memcpy(q, &hdr, sizeof(hdr));
+ q += sizeof(hdr);
+ memcpy(q, payload, payload_size);
+ q += payload_size;
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ }
+ free(payload);
+#endif
+ {
+ AVPacket pkt;
+
+ /* read a packet from the input stream */
+ if (c->stream->feed) {
+ ffm_set_write_index(c->fmt_in,
+ c->stream->feed->feed_write_index,
+ c->stream->feed->feed_size);
+ }
+ if (av_read_packet(c->fmt_in, &pkt) < 0) {
+ if (c->stream->feed && c->stream->feed->feed_opened) {
+ /* if coming from feed, it means we reached the end of the
+ ffm file, so must wait for more data */
+ c->state = HTTPSTATE_WAIT_FEED;
+ return 1; /* state changed */
+ } else {
+ /* must send trailer now because eof or error */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
+ } else {
+ /* send it to the appropriate stream */
+ if (c->stream->feed) {
+ /* if coming from a feed, select the right stream */
+ for(i=0;i<c->stream->nb_streams;i++) {
+ if (c->stream->feed_streams[i] == pkt.stream_index) {
+ pkt.stream_index = i;
+ goto send_it;
+ }
+ }
+ } else {
+ send_it:
+ av_write_packet(&c->fmt_ctx, &pkt);
+ }
+
+ av_free_packet(&pkt);
+ }
+ }
+ break;
+ default:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ /* last packet test ? */
+ if (c->last_packet_sent)
+ return -1;
+ /* prepare header */
+ c->fmt_ctx.format->write_trailer(&c->fmt_ctx);
+ c->last_packet_sent = 1;
+ break;
+ }
+ return 0;
+}
+
+/* should convert the format at the same time */
+static int http_send_data(HTTPContext *c)
+{
+ int len, ret;
+
+ while (c->buffer_ptr >= c->buffer_end) {
+ ret = http_prepare_data(c);
+ if (ret < 0)
+ return -1;
+ else if (ret == 0) {
+ break;
+ } else {
+ /* state change requested */
+ return 0;
+ }
+ }
+
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ return -1;
+ }
+ } else {
+ c->buffer_ptr += len;
+ c->data_count += len;
+ }
+ return 0;
+}
+
+static int http_start_receive_data(HTTPContext *c)
+{
+ int fd;
+
+ if (c->stream->feed_opened)
+ return -1;
+
+ /* open feed */
+ fd = open(c->stream->feed_filename, O_RDWR);
+ if (fd < 0)
+ return -1;
+ c->feed_fd = fd;
+
+ c->stream->feed_write_index = ffm_read_write_index(fd);
+ c->stream->feed_size = lseek(fd, 0, SEEK_END);
+ lseek(fd, 0, SEEK_SET);
+
+ /* init buffer input */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer + FFM_PACKET_SIZE;
+ c->stream->feed_opened = 1;
+ return 0;
+}
+
+static int http_receive_data(HTTPContext *c)
+{
+ int len;
+ HTTPContext *c1;
+
+ if (c->buffer_ptr >= c->buffer_end) {
+ /* a packet has been received : write it in the store, except
+ if header */
+ if (c->data_count > FFM_PACKET_SIZE) {
+ FFStream *feed = c->stream;
+
+ // printf("writing pos=0x%Lx size=0x%Lx\n", feed->feed_write_index, feed->feed_size);
+ /* XXX: use llseek or url_seek */
+ lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
+ write(c->feed_fd, c->buffer, FFM_PACKET_SIZE);
+
+ feed->feed_write_index += FFM_PACKET_SIZE;
+ /* update file size */
+ if (feed->feed_write_index > c->stream->feed_size)
+ feed->feed_size = feed->feed_write_index;
+
+ /* handle wrap around if max file size reached */
+ if (feed->feed_write_index >= c->stream->feed_max_size)
+ feed->feed_write_index = FFM_PACKET_SIZE;
+
+ /* write index */
+ ffm_write_write_index(c->feed_fd, feed->feed_write_index);
+
+ /* wake up any waiting connections */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->state == HTTPSTATE_WAIT_FEED &&
+ c1->stream->feed == c->stream->feed) {
+ c1->state = HTTPSTATE_SEND_DATA;
+ }
+ }
+ }
+ c->buffer_ptr = c->buffer;
+ }
+
+ len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ goto fail;
+ }
+ } else if (len == 0) {
+ /* end of connection : close it */
+ goto fail;
+ } else {
+ c->buffer_ptr += len;
+ c->data_count += len;
+ }
+ return 0;
+ fail:
+ c->stream->feed_opened = 0;
+ close(c->feed_fd);
+ return -1;
+}
+
+/* return the stream number in the feed */
+int add_av_stream(FFStream *feed,
+ AVStream *st)
+{
+ AVStream *fst;
+ AVCodecContext *av, *av1;
+ int i;
+
+ av = &st->codec;
+ for(i=0;i<feed->nb_streams;i++) {
+ st = feed->streams[i];
+ av1 = &st->codec;
+ if (av1->codec == av->codec &&
+ av1->bit_rate == av->bit_rate) {
+
+ switch(av->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (av1->channels == av->channels &&
+ av1->sample_rate == av->sample_rate)
+ goto found;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (av1->width == av->width &&
+ av1->height == av->height &&
+ av1->frame_rate == av->frame_rate &&
+ av1->gop_size == av->gop_size)
+ goto found;
+ break;
+ }
+ }
+ }
+
+ fst = av_mallocz(sizeof(AVStream));
+ if (!fst)
+ return -1;
+ fst->priv_data = av_mallocz(sizeof(FeedData));
+ memcpy(&fst->codec, av, sizeof(AVCodecContext));
+ feed->streams[feed->nb_streams++] = fst;
+ return feed->nb_streams - 1;
+ found:
+ return i;
+}
+
+/* compute the needed AVStream for each feed */
+void build_feed_streams(void)
+{
+ FFStream *stream, *feed;
+ int i;
+
+ /* gather all streams */
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ feed = stream->feed;
+ if (feed) {
+ if (!stream->is_feed) {
+ for(i=0;i<stream->nb_streams;i++) {
+ stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]);
+ }
+ } else {
+ for(i=0;i<stream->nb_streams;i++) {
+ stream->feed_streams[i] = i;
+ }
+ }
+ }
+ }
+
+ /* create feed files if needed */
+ for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
+ int fd;
+
+ if (!url_exist(feed->feed_filename)) {
+ AVFormatContext s1, *s = &s1;
+
+ /* only write the header of the ffm file */
+ if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) {
+ fprintf(stderr, "Could not open output feed file '%s'\n",
+ feed->feed_filename);
+ exit(1);
+ }
+ s->format = feed->fmt;
+ s->nb_streams = feed->nb_streams;
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st;
+ st = feed->streams[i];
+ s->streams[i] = st;
+ }
+ s->format->write_header(s);
+
+ url_fclose(&s->pb);
+ }
+ /* get feed size and write index */
+ fd = open(feed->feed_filename, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "Could not open output feed file '%s'\n",
+ feed->feed_filename);
+ exit(1);
+ }
+
+ feed->feed_write_index = ffm_read_write_index(fd);
+ feed->feed_size = lseek(fd, 0, SEEK_END);
+ /* ensure that we do not wrap before the end of file */
+ if (feed->feed_max_size < feed->feed_size)
+ feed->feed_max_size = feed->feed_size;
+
+ close(fd);
+ }
+}
+
+static void get_arg(char *buf, int buf_size, const char **pp)
+{
+ const char *p;
+ char *q;
+ int quote;
+
+ p = *pp;
+ while (isspace(*p)) p++;
+ q = buf;
+ quote = 0;
+ if (*p == '\"' || *p == '\'')
+ quote = *p++;
+ for(;;) {
+ if (quote) {
+ if (*p == quote)
+ break;
+ } else {
+ if (isspace(*p))
+ break;
+ }
+ if (*p == '\0')
+ break;
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ if (quote && *p == quote)
+ p++;
+ *pp = p;
+}
+
+/* add a codec and set the default parameters */
+void add_codec(FFStream *stream, AVCodecContext *av)
+{
+ AVStream *st;
+
+ /* compute default parameters */
+ switch(av->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (av->bit_rate == 0)
+ av->bit_rate = 64000;
+ if (av->sample_rate == 0)
+ av->sample_rate = 22050;
+ if (av->channels == 0)
+ av->channels = 1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (av->bit_rate == 0)
+ av->bit_rate = 64000;
+ if (av->frame_rate == 0)
+ av->frame_rate = 5 * FRAME_RATE_BASE;
+ if (av->width == 0 || av->height == 0) {
+ av->width = 160;
+ av->height = 128;
+ }
+ break;
+ }
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st)
+ return;
+ stream->streams[stream->nb_streams++] = st;
+ memcpy(&st->codec, av, sizeof(AVCodecContext));
+}
+
+int parse_ffconfig(const char *filename)
+{
+ FILE *f;
+ char line[1024];
+ char cmd[64];
+ char arg[1024];
+ const char *p;
+ int val, errors, line_num;
+ FFStream **last_stream, *stream;
+ FFStream **last_feed, *feed;
+ AVCodecContext audio_enc, video_enc;
+ int audio_id, video_id;
+
+ f = fopen(filename, "r");
+ if (!f) {
+ perror(filename);
+ return -1;
+ }
+
+ errors = 0;
+ line_num = 0;
+ first_stream = NULL;
+ last_stream = &first_stream;
+ first_feed = NULL;
+ last_feed = &first_feed;
+ stream = NULL;
+ feed = NULL;
+ audio_id = CODEC_ID_NONE;
+ video_id = CODEC_ID_NONE;
+ for(;;) {
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+ line_num++;
+ p = line;
+ while (isspace(*p))
+ p++;
+ if (*p == '\0' || *p == '#')
+ continue;
+
+ get_arg(cmd, sizeof(cmd), &p);
+
+ if (!strcasecmp(cmd, "Port")) {
+ get_arg(arg, sizeof(arg), &p);
+ my_addr.sin_port = htons (atoi(arg));
+ } else if (!strcasecmp(cmd, "BindAddress")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (!inet_aton(arg, &my_addr.sin_addr)) {
+ fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "MaxClients")) {
+ get_arg(arg, sizeof(arg), &p);
+ val = atoi(arg);
+ if (val < 1 || val > HTTP_MAX_CONNECTIONS) {
+ fprintf(stderr, "%s:%d: Invalid MaxClients: %s\n",
+ filename, line_num, arg);
+ errors++;
+ } else {
+ nb_max_connections = val;
+ }
+ } else if (!strcasecmp(cmd, "CustomLog")) {
+ get_arg(logfilename, sizeof(logfilename), &p);
+ } else if (!strcasecmp(cmd, "<Feed")) {
+ /*********************************************/
+ /* Feed related options */
+ char *q;
+ if (stream || feed) {
+ fprintf(stderr, "%s:%d: Already in a tag\n",
+ filename, line_num);
+ } else {
+ feed = av_mallocz(sizeof(FFStream));
+ /* add in stream list */
+ *last_stream = feed;
+ last_stream = &feed->next;
+ /* add in feed list */
+ *last_feed = feed;
+ last_feed = &feed->next_feed;
+
+ get_arg(feed->filename, sizeof(feed->filename), &p);
+ q = strrchr(feed->filename, '>');
+ if (*q)
+ *q = '\0';
+ feed->fmt = guess_format("ffm", NULL, NULL);
+ /* defaut feed file */
+ snprintf(feed->feed_filename, sizeof(feed->feed_filename),
+ "/tmp/%s.ffm", feed->filename);
+ feed->feed_max_size = 5 * 1024 * 1024;
+ feed->is_feed = 1;
+ feed->feed = feed; /* self feeding :-) */
+ }
+ } else if (!strcasecmp(cmd, "File")) {
+ if (feed) {
+ get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
+ } else if (stream) {
+ get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
+ }
+ } else if (!strcasecmp(cmd, "FileMaxSize")) {
+ if (feed) {
+ const char *p1;
+ double fsize;
+
+ get_arg(arg, sizeof(arg), &p);
+ p1 = arg;
+ fsize = strtod(p1, (char **)&p1);
+ switch(toupper(*p1)) {
+ case 'K':
+ fsize *= 1024;
+ break;
+ case 'M':
+ fsize *= 1024 * 1024;
+ break;
+ case 'G':
+ fsize *= 1024 * 1024 * 1024;
+ break;
+ }
+ feed->feed_max_size = (INT64)fsize;
+ }
+ } else if (!strcasecmp(cmd, "</Feed>")) {
+ if (!feed) {
+ fprintf(stderr, "%s:%d: No corresponding <Feed> for </Feed>\n",
+ filename, line_num);
+ errors++;
+ }
+ feed = NULL;
+ } else if (!strcasecmp(cmd, "<Stream")) {
+ /*********************************************/
+ /* Stream related options */
+ char *q;
+ if (stream || feed) {
+ fprintf(stderr, "%s:%d: Already in a tag\n",
+ filename, line_num);
+ } else {
+ stream = av_mallocz(sizeof(FFStream));
+ *last_stream = stream;
+ last_stream = &stream->next;
+
+ get_arg(stream->filename, sizeof(stream->filename), &p);
+ q = strrchr(stream->filename, '>');
+ if (*q)
+ *q = '\0';
+ stream->fmt = guess_format(NULL, stream->filename, NULL);
+ memset(&audio_enc, 0, sizeof(AVCodecContext));
+ memset(&video_enc, 0, sizeof(AVCodecContext));
+ audio_id = CODEC_ID_NONE;
+ video_id = CODEC_ID_NONE;
+ if (stream->fmt) {
+ audio_id = stream->fmt->audio_codec;
+ video_id = stream->fmt->video_codec;
+ }
+ }
+ } else if (!strcasecmp(cmd, "Feed")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ FFStream *sfeed;
+
+ sfeed = first_feed;
+ while (sfeed != NULL) {
+ if (!strcmp(sfeed->filename, arg))
+ break;
+ sfeed = sfeed->next_feed;
+ }
+ if (!sfeed) {
+ fprintf(stderr, "%s:%d: feed '%s' not defined\n",
+ filename, line_num, arg);
+ } else {
+ stream->feed = sfeed;
+ }
+ }
+ } else if (!strcasecmp(cmd, "Format")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (!strcmp(arg, "status")) {
+ stream->stream_type = STREAM_TYPE_STATUS;
+ stream->fmt = NULL;
+ } else {
+ stream->stream_type = STREAM_TYPE_LIVE;
+ stream->fmt = guess_format(arg, NULL, NULL);
+ if (!stream->fmt) {
+ fprintf(stderr, "%s:%d: Unknown Format: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ }
+ if (stream->fmt) {
+ audio_id = stream->fmt->audio_codec;
+ video_id = stream->fmt->video_codec;
+ }
+ } else if (!strcasecmp(cmd, "AudioBitRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.bit_rate = atoi(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "AudioChannels")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.channels = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "AudioSampleRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.sample_rate = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoBitRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.bit_rate = atoi(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "VideoSize")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ parse_image_size(&video_enc.width, &video_enc.height, arg);
+ if ((video_enc.width % 16) != 0 ||
+ (video_enc.height % 16) != 0) {
+ fprintf(stderr, "%s:%d: Image size must be a multiple of 16\n",
+ filename, line_num);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "VideoFrameRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.frame_rate = (int)(strtod(arg, NULL) * FRAME_RATE_BASE);
+ }
+ } else if (!strcasecmp(cmd, "VideoGopSize")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.gop_size = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoIntraOnly")) {
+ if (stream) {
+ video_enc.gop_size = 1;
+ }
+ } else if (!strcasecmp(cmd, "NoVideo")) {
+ video_id = CODEC_ID_NONE;
+ } else if (!strcasecmp(cmd, "NoAudio")) {
+ audio_id = CODEC_ID_NONE;
+ } else if (!strcasecmp(cmd, "</Stream>")) {
+ if (!stream) {
+ fprintf(stderr, "%s:%d: No corresponding <Stream> for </Stream>\n",
+ filename, line_num);
+ errors++;
+ }
+ if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
+ if (audio_id != CODEC_ID_NONE) {
+ audio_enc.codec_type = CODEC_TYPE_AUDIO;
+ audio_enc.codec_id = audio_id;
+ add_codec(stream, &audio_enc);
+ }
+ if (video_id != CODEC_ID_NONE) {
+ video_enc.codec_type = CODEC_TYPE_VIDEO;
+ video_enc.codec_id = video_id;
+ add_codec(stream, &video_enc);
+ }
+ }
+ stream = NULL;
+ } else {
+ fprintf(stderr, "%s:%d: Incorrect keyword: '%s'\n",
+ filename, line_num, cmd);
+ errors++;
+ }
+ }
+
+ fclose(f);
+ if (errors)
+ return -1;
+ else
+ return 0;
+}
+
+
+void *http_server_thread(void *arg)
+{
+ http_server(my_addr);
+ return NULL;
+}
+
+#if 0
+static void write_packet(FFCodec *ffenc,
+ UINT8 *buf, int size)
+{
+ PacketHeader hdr;
+ AVCodecContext *enc = &ffenc->enc;
+ UINT8 *wptr;
+ mk_header(&hdr, enc, size);
+ wptr = http_fifo.wptr;
+ fifo_write(&http_fifo, (UINT8 *)&hdr, sizeof(hdr), &wptr);
+ fifo_write(&http_fifo, buf, size, &wptr);
+ /* atomic modification of wptr */
+ http_fifo.wptr = wptr;
+ ffenc->data_count += size;
+ ffenc->avg_frame_size = ffenc->avg_frame_size * AVG_COEF + size * (1.0 - AVG_COEF);
+}
+#endif
+
+void help(void)
+{
+ printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000,2001 Gerard Lantau\n"
+ "usage: ffserver [-L] [-h] [-f configfile]\n"
+ "Hyper fast multi format Audio/Video streaming server\n"
+ "\n"
+ "-L : print the LICENCE\n"
+ "-h : this help\n"
+ "-f configfile : use configfile instead of /etc/ffserver.conf\n"
+ );
+}
+
+void licence(void)
+{
+ printf(
+ "ffserver version " FFMPEG_VERSION "\n"
+ "Copyright (c) 2000,2001 Gerard Lantau\n"
+ "This program is free software; you can redistribute it and/or modify\n"
+ "it under the terms of the GNU General Public License as published by\n"
+ "the Free Software Foundation; either version 2 of the License, or\n"
+ "(at your option) any later version.\n"
+ "\n"
+ "This program is distributed in the hope that it will be useful,\n"
+ "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
+ "GNU General Public License for more details.\n"
+ "\n"
+ "You should have received a copy of the GNU General Public License\n"
+ "along with this program; if not, write to the Free Software\n"
+ "Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n"
+ );
+}
+
+int main(int argc, char **argv)
+{
+ const char *config_filename;
+ int c;
+
+ register_all();
+
+ config_filename = "/etc/ffserver.conf";
+
+ for(;;) {
+ c = getopt_long_only(argc, argv, "Lh?f:", NULL, NULL);
+ if (c == -1)
+ break;
+ switch(c) {
+ case 'L':
+ licence();
+ exit(1);
+ case '?':
+ case 'h':
+ help();
+ exit(1);
+ case 'f':
+ config_filename = optarg;
+ break;
+ default:
+ exit(2);
+ }
+ }
+
+ /* address on which the server will handle connections */
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = htons (8080);
+ my_addr.sin_addr.s_addr = htonl (INADDR_ANY);
+ nb_max_connections = 5;
+ first_stream = NULL;
+ logfilename[0] = '\0';
+
+ if (parse_ffconfig(config_filename) < 0) {
+ fprintf(stderr, "Incorrect config file - exiting.\n");
+ exit(1);
+ }
+
+ build_feed_streams();
+
+ /* signal init */
+ signal(SIGPIPE, SIG_IGN);
+
+ /* open log file if needed */
+ if (logfilename[0] != '\0') {
+ if (!strcmp(logfilename, "-"))
+ logfile = stdout;
+ else
+ logfile = fopen(logfilename, "w");
+ }
+
+ if (http_server(my_addr) < 0) {
+ fprintf(stderr, "Could start http server\n");
+ exit(1);
+ }
+
+ return 0;
+}
diff --git a/libav/Makefile b/libav/Makefile
new file mode 100644
index 0000000000..08d507a7f2
--- /dev/null
+++ b/libav/Makefile
@@ -0,0 +1,22 @@
+include ../config.mk
+CFLAGS= -O2 -Wall -g -I../libavcodec
+
+OBJS= rm.o mpeg.o asf.o avienc.o jpegenc.o swf.o wav.o raw.o \
+ avidec.o ffm.o \
+ avio.o aviobuf.o utils.o \
+ udp.o http.o file.o grab.o audio.o img.o
+
+LIB= libav.a
+
+all: $(LIB)
+
+$(LIB): $(OBJS)
+ rm -f $@
+ $(AR) rcs $@ $(OBJS)
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+clean:
+ rm -f *.o *~ *.a
+