aboutsummaryrefslogtreecommitdiffstats
path: root/libavdevice
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-01-28 04:23:26 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-01-28 07:53:34 +0100
commite37f161e66e042d6c2c7470c4d9881df9427fc4a (patch)
tree6400fd6453f0525a65724937532d5baa33deead3 /libavdevice
parentf21b6159cf3110a5f018d6addf7382840d427199 (diff)
parente771e6dd63e837220aa5d959486546d2be972e83 (diff)
downloadffmpeg-e37f161e66e042d6c2c7470c4d9881df9427fc4a.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: (71 commits) movenc: Allow writing to a non-seekable output if using empty moov movenc: Support adding isml (smooth streaming live) metadata libavcodec: Don't crash in avcodec_encode_audio if time_base isn't set sunrast: Document the different Sun Raster file format types. sunrast: Add a check for experimental type. libspeexenc: use AVSampleFormat instead of deprecated/removed SampleFormat lavf: remove disabled FF_API_SET_PTS_INFO cruft lavf: remove disabled FF_API_OLD_INTERRUPT_CB cruft lavf: remove disabled FF_API_REORDER_PRIVATE cruft lavf: remove disabled FF_API_SEEK_PUBLIC cruft lavf: remove disabled FF_API_STREAM_COPY cruft lavf: remove disabled FF_API_PRELOAD cruft lavf: remove disabled FF_API_NEW_STREAM cruft lavf: remove disabled FF_API_RTSP_URL_OPTIONS cruft lavf: remove disabled FF_API_MUXRATE cruft lavf: remove disabled FF_API_FILESIZE cruft lavf: remove disabled FF_API_TIMESTAMP cruft lavf: remove disabled FF_API_LOOP_OUTPUT cruft lavf: remove disabled FF_API_LOOP_INPUT cruft lavf: remove disabled FF_API_AVSTREAM_QUALITY cruft ... Conflicts: doc/APIchanges libavcodec/8bps.c libavcodec/avcodec.h libavcodec/libx264.c libavcodec/mjpegbdec.c libavcodec/options.c libavcodec/sunrast.c libavcodec/utils.c libavcodec/version.h libavcodec/x86/h264_deblock.asm libavdevice/libdc1394.c libavdevice/v4l2.c libavformat/avformat.h libavformat/avio.c libavformat/avio.h libavformat/aviobuf.c libavformat/dv.c libavformat/mov.c libavformat/utils.c libavformat/version.h libavformat/wtv.c libavutil/Makefile libavutil/file.c libswscale/x86/input.asm libswscale/x86/swscale_mmx.c libswscale/x86/swscale_template.c tests/ref/lavf/ffm Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavdevice')
-rw-r--r--libavdevice/alsa-audio-dec.c3
-rw-r--r--libavdevice/bktr.c2
-rw-r--r--libavdevice/dshow.c6
-rw-r--r--libavdevice/dv1394.c2
-rw-r--r--libavdevice/fbdev.c3
-rw-r--r--libavdevice/jack_audio.c2
-rw-r--r--libavdevice/lavfi.c4
-rw-r--r--libavdevice/libcdio.c2
-rw-r--r--libavdevice/libdc1394.c414
-rw-r--r--libavdevice/openal-dec.c2
-rw-r--r--libavdevice/oss_audio.c2
-rw-r--r--libavdevice/pulse.c3
-rw-r--r--libavdevice/sndio_dec.c3
-rw-r--r--libavdevice/v4l2.c6
-rw-r--r--libavdevice/vfwcap.c2
-rw-r--r--libavdevice/x11grab.c2
16 files changed, 252 insertions, 206 deletions
diff --git a/libavdevice/alsa-audio-dec.c b/libavdevice/alsa-audio-dec.c
index bb9d233a4e..f2dd21655b 100644
--- a/libavdevice/alsa-audio-dec.c
+++ b/libavdevice/alsa-audio-dec.c
@@ -53,8 +53,7 @@
#include "avdevice.h"
#include "alsa-audio.h"
-static av_cold int audio_read_header(AVFormatContext *s1,
- AVFormatParameters *ap)
+static av_cold int audio_read_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;
diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c
index 63f7ee3f4a..9c97dafa84 100644
--- a/libavdevice/bktr.c
+++ b/libavdevice/bktr.c
@@ -243,7 +243,7 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
return video_buf_size;
}
-static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c
index 61f0d08a2f..dd1b29b2ef 100644
--- a/libavdevice/dshow.c
+++ b/libavdevice/dshow.c
@@ -649,7 +649,7 @@ static enum CodecID waveform_codec_id(enum AVSampleFormat sample_fmt)
}
}
-static enum SampleFormat sample_fmt_bits_per_sample(int bits)
+static enum AVSampleFormat sample_fmt_bits_per_sample(int bits)
{
switch (bits) {
case 8: return AV_SAMPLE_FMT_U8;
@@ -660,7 +660,7 @@ static enum SampleFormat sample_fmt_bits_per_sample(int bits)
}
static int
-dshow_add_device(AVFormatContext *avctx, AVFormatParameters *ap,
+dshow_add_device(AVFormatContext *avctx,
enum dshowDeviceType devtype)
{
struct dshow_ctx *ctx = avctx->priv_data;
@@ -784,7 +784,7 @@ static int parse_device_name(AVFormatContext *avctx)
return ret;
}
-static int dshow_read_header(AVFormatContext *avctx, AVFormatParameters *ap)
+static int dshow_read_header(AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
IGraphBuilder *graph = NULL;
diff --git a/libavdevice/dv1394.c b/libavdevice/dv1394.c
index 30bbe6b482..13c0dbb4c7 100644
--- a/libavdevice/dv1394.c
+++ b/libavdevice/dv1394.c
@@ -81,7 +81,7 @@ static int dv1394_start(struct dv1394_data *dv)
return 0;
}
-static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
+static int dv1394_read_header(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;
diff --git a/libavdevice/fbdev.c b/libavdevice/fbdev.c
index ee5ecee89d..92afaefc32 100644
--- a/libavdevice/fbdev.c
+++ b/libavdevice/fbdev.c
@@ -95,8 +95,7 @@ typedef struct {
uint8_t *data; ///< framebuffer data
} FBDevContext;
-av_cold static int fbdev_read_header(AVFormatContext *avctx,
- AVFormatParameters *ap)
+av_cold static int fbdev_read_header(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
AVStream *st = NULL;
diff --git a/libavdevice/jack_audio.c b/libavdevice/jack_audio.c
index d0a09ee218..257a291758 100644
--- a/libavdevice/jack_audio.c
+++ b/libavdevice/jack_audio.c
@@ -222,7 +222,7 @@ static void stop_jack(JackData *self)
ff_timefilter_destroy(self->timefilter);
}
-static int audio_read_header(AVFormatContext *context, AVFormatParameters *params)
+static int audio_read_header(AVFormatContext *context)
{
JackData *self = context->priv_data;
AVStream *stream;
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
index c4a0654c17..30c08e0d95 100644
--- a/libavdevice/lavfi.c
+++ b/libavdevice/lavfi.c
@@ -32,6 +32,7 @@
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/audioconvert.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
@@ -78,8 +79,7 @@ av_cold static int lavfi_read_close(AVFormatContext *avctx)
return 0;
}
-av_cold static int lavfi_read_header(AVFormatContext *avctx,
- AVFormatParameters *ap)
+av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
diff --git a/libavdevice/libcdio.c b/libavdevice/libcdio.c
index a84918fe92..c590995561 100644
--- a/libavdevice/libcdio.c
+++ b/libavdevice/libcdio.c
@@ -47,7 +47,7 @@ typedef struct CDIOContext {
int paranoia_mode;
} CDIOContext;
-static av_cold int read_header(AVFormatContext *ctx, AVFormatParameters *ap)
+static av_cold int read_header(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
AVStream *st;
diff --git a/libavdevice/libdc1394.c b/libavdevice/libdc1394.c
index a8d41f9c16..f4c37cc214 100644
--- a/libavdevice/libdc1394.c
+++ b/libavdevice/libdc1394.c
@@ -2,7 +2,6 @@
* IIDC1394 grab interface (uses libdc1394 and libraw1394)
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
- * Copyright (c) 2011 Martin Lambers
*
* This file is part of FFmpeg.
*
@@ -22,26 +21,46 @@
*/
#include "config.h"
+#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
-#include "avdevice.h"
-
-#include <stdlib.h>
-#include <string.h>
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#if HAVE_LIBDC1394_2
#include <dc1394/dc1394.h>
+#elif HAVE_LIBDC1394_1
+#include <libraw1394/raw1394.h>
+#include <libdc1394/dc1394_control.h>
+
+#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422
+#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411
+#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422
+#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875
+#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75
+#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5
+#define DC1394_FRAMERATE_15 FRAMERATE_15
+#define DC1394_FRAMERATE_30 FRAMERATE_30
+#define DC1394_FRAMERATE_60 FRAMERATE_60
+#define DC1394_FRAMERATE_120 FRAMERATE_120
+#define DC1394_FRAMERATE_240 FRAMERATE_240
+#endif
#undef free
typedef struct dc1394_data {
AVClass *class;
+#if HAVE_LIBDC1394_1
+ raw1394handle_t handle;
+ dc1394_cameracapture camera;
+ int channel;
+#elif HAVE_LIBDC1394_2
dc1394_t *d;
dc1394camera_t *camera;
dc1394video_frame_t *frame;
+#endif
int current_frame;
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */
char *video_size; /**< String describing video size, set by a private option. */
@@ -51,21 +70,16 @@ typedef struct dc1394_data {
AVPacket packet;
} dc1394_data;
-/* The list of color codings that we support.
- * We assume big endian for the dc1394 16bit modes: libdc1394 never sets the
- * flag little_endian in dc1394video_frame_t. */
-struct dc1394_color_coding {
- int pix_fmt;
- int score;
- uint32_t coding;
-} dc1394_color_codings[] = {
- { PIX_FMT_GRAY16BE, 1000, DC1394_COLOR_CODING_MONO16 },
- { PIX_FMT_RGB48BE, 1100, DC1394_COLOR_CODING_RGB16 },
- { PIX_FMT_GRAY8, 1200, DC1394_COLOR_CODING_MONO8 },
- { PIX_FMT_RGB24, 1300, DC1394_COLOR_CODING_RGB8 },
- { PIX_FMT_UYYVYY411, 1400, DC1394_COLOR_CODING_YUV411 },
- { PIX_FMT_UYVY422, 1500, DC1394_COLOR_CODING_YUV422 },
- { PIX_FMT_NONE, 0, 0 } /* gotta be the last one */
+struct dc1394_frame_format {
+ int width;
+ int height;
+ enum PixelFormat pix_fmt;
+ int frame_size_id;
+} dc1394_frame_formats[] = {
+ { 320, 240, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
+ { 640, 480, PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
+ { 640, 480, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
+ { 0, 0, 0, 0 } /* gotta be the last one */
};
struct dc1394_frame_rate {
@@ -86,6 +100,9 @@ struct dc1394_frame_rate {
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
+#if HAVE_LIBDC1394_1
+ { "channel", "", offsetof(dc1394_data, channel), AV_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+#endif
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ "pixel_format", "", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
@@ -99,183 +116,202 @@ static const AVClass libdc1394_class = {
.version = LIBAVUTIL_VERSION_INT,
};
-static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
+
+static inline int dc1394_read_common(AVFormatContext *c,
+ struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
- AVStream *vst;
- const struct dc1394_color_coding *cc;
- const struct dc1394_frame_rate *fr;
- dc1394camera_list_t *list;
- dc1394video_modes_t video_modes;
- dc1394video_mode_t video_mode;
- dc1394framerates_t frame_rates;
- dc1394framerate_t frame_rate;
- uint32_t dc1394_width, dc1394_height, dc1394_color_coding;
- int rate, best_rate;
- int score, max_score;
- int final_width, final_height, final_pix_fmt, final_frame_rate;
- int res, i, j;
- int ret=-1;
-
- /* Now let us prep the hardware. */
- dc1394->d = dc1394_new();
- dc1394_camera_enumerate (dc1394->d, &list);
- if ( !list || list->num == 0) {
- av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
+ AVStream* vst;
+ struct dc1394_frame_format *fmt;
+ struct dc1394_frame_rate *fps;
+ enum PixelFormat pix_fmt;
+ int width, height;
+ AVRational framerate;
+ int ret = 0;
+
+ if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) {
+ av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
+ ret = AVERROR(EINVAL);
goto out;
}
- /* FIXME: To select a specific camera I need to search in list its guid */
- dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
- if (list->num > 1) {
- av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
+ if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) {
+ av_log(c, AV_LOG_ERROR, "Could not parse video size '%s'.\n", dc1394->video_size);
+ goto out;
}
-
- /* Freeing list of cameras */
- dc1394_camera_free_list (list);
-
- /* Get the list of video modes supported by the camera. */
- res = dc1394_video_get_supported_modes (dc1394->camera, &video_modes);
- if (res != DC1394_SUCCESS) {
- av_log(c, AV_LOG_ERROR, "Could not get video formats.\n");
- goto out_camera;
+ if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) {
+ av_log(c, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", dc1394->framerate);
+ goto out;
}
+ dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den);
- if (dc1394->pixel_format) {
- if ((ap->pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) {
- av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
- ret = AVERROR(EINVAL);
- goto out;
- }
- }
+ for (fmt = dc1394_frame_formats; fmt->width; fmt++)
+ if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
+ break;
- if (dc1394->video_size) {
- if ((ret = av_parse_video_size(&ap->width, &ap->height, dc1394->video_size)) < 0) {
- av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n");
- goto out;
- }
- }
+ for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
+ if (fps->frame_rate == dc1394->frame_rate)
+ break;
- /* Choose the best mode. */
- rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1);
- max_score = -1;
- for (i = 0; i < video_modes.num; i++) {
- if (video_modes.modes[i] == DC1394_VIDEO_MODE_EXIF
- || (video_modes.modes[i] >= DC1394_VIDEO_MODE_FORMAT7_MIN
- && video_modes.modes[i] <= DC1394_VIDEO_MODE_FORMAT7_MAX)) {
- /* These modes are currently not supported as they would require
- * much more work. For the remaining modes, the functions
- * dc1394_get_image_size_from_video_mode and
- * dc1394_get_color_coding_from_video_mode do not need to query the
- * camera, and thus cannot fail. */
- continue;
- }
- dc1394_get_color_coding_from_video_mode (NULL, video_modes.modes[i],
- &dc1394_color_coding);
- for (cc = dc1394_color_codings; cc->pix_fmt != PIX_FMT_NONE; cc++)
- if (cc->coding == dc1394_color_coding)
- break;
- if (cc->pix_fmt == PIX_FMT_NONE) {
- /* We currently cannot handle this color coding. */
- continue;
- }
- /* Here we know that the mode is supported. Get its frame size and the list
- * of frame rates supported by the camera for this mode. This list is sorted
- * in ascending order according to libdc1394 example programs. */
- dc1394_get_image_size_from_video_mode (NULL, video_modes.modes[i],
- &dc1394_width, &dc1394_height);
- res = dc1394_video_get_supported_framerates (dc1394->camera, video_modes.modes[i],
- &frame_rates);
- if (res != DC1394_SUCCESS || frame_rates.num == 0) {
- av_log(c, AV_LOG_ERROR, "Cannot get frame rates for video mode.\n");
- goto out_camera;
- }
- /* Choose the best frame rate. */
- best_rate = -1;
- for (j = 0; j < frame_rates.num; j++) {
- for (fr = dc1394_frame_rates; fr->frame_rate; fr++) {
- if (fr->frame_rate_id == frame_rates.framerates[j]) {
- break;
- }
- }
- if (!fr->frame_rate) {
- /* This frame rate is not supported. */
- continue;
- }
- best_rate = fr->frame_rate;
- frame_rate = fr->frame_rate_id;
- if (ap->time_base.num && rate == fr->frame_rate) {
- /* This is the requested frame rate. */
- break;
- }
- }
- if (best_rate == -1) {
- /* No supported rate found. */
- continue;
- }
- /* Here we know that both the mode and the rate are supported. Compute score. */
- if (ap->width && ap->height
- && (dc1394_width == ap->width && dc1394_height == ap->height)) {
- score = 110000;
- } else {
- score = dc1394_width * 10; // 1600 - 16000
- }
- if (ap->pix_fmt == cc->pix_fmt) {
- score += 90000;
- } else {
- score += cc->score; // 1000 - 1500
- }
- if (ap->time_base.num && rate == best_rate) {
- score += 70000;
- } else {
- score += best_rate / 1000; // 1 - 240
- }
- if (score > max_score) {
- video_mode = video_modes.modes[i];
- final_width = dc1394_width;
- final_height = dc1394_height;
- final_pix_fmt = cc->pix_fmt;
- final_frame_rate = best_rate;
- max_score = score;
- }
- }
- if (max_score == -1) {
- av_log(c, AV_LOG_ERROR, "No suitable video mode / frame rate available.\n");
- goto out_camera;
- }
- if (ap->width && ap->height && !(ap->width == final_width && ap->height == final_height)) {
- av_log(c, AV_LOG_WARNING, "Requested frame size is not available, using fallback.\n");
- }
- if (ap->pix_fmt != PIX_FMT_NONE && ap->pix_fmt != final_pix_fmt) {
- av_log(c, AV_LOG_WARNING, "Requested pixel format is not supported, using fallback.\n");
- }
- if (ap->time_base.num && rate != final_frame_rate) {
- av_log(c, AV_LOG_WARNING, "Requested frame rate is not available, using fallback.\n");
+ if (!fps->frame_rate || !fmt->width) {
+ av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
+ width, height, dc1394->frame_rate);
+ ret = AVERROR(EINVAL);
+ goto out;
}
/* create a video stream */
vst = avformat_new_stream(c, NULL);
- if (!vst)
- goto out_camera;
+ if (!vst) {
+ ret = AVERROR(ENOMEM);
+ goto out;
+ }
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_RAWVIDEO;
- vst->codec->time_base.den = final_frame_rate;
- vst->codec->time_base.num = 1000;
- vst->codec->width = final_width;
- vst->codec->height = final_height;
- vst->codec->pix_fmt = final_pix_fmt;
+ vst->codec->time_base.den = framerate.num;
+ vst->codec->time_base.num = framerate.den;
+ vst->codec->width = fmt->width;
+ vst->codec->height = fmt->height;
+ vst->codec->pix_fmt = fmt->pix_fmt;
/* packet init */
av_init_packet(&dc1394->packet);
- dc1394->packet.size = avpicture_get_size(final_pix_fmt, final_width, final_height);
+ dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
dc1394->packet.stream_index = vst->index;
dc1394->packet.flags |= AV_PKT_FLAG_KEY;
dc1394->current_frame = 0;
- dc1394->frame_rate = final_frame_rate;
- vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, final_frame_rate, 1000);
+ vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
+ *select_fps = fps;
+ *select_fmt = fmt;
+out:
+ return ret;
+}
+
+#if HAVE_LIBDC1394_1
+static int dc1394_v1_read_header(AVFormatContext *c)
+{
+ dc1394_data* dc1394 = c->priv_data;
+ AVStream* vst;
+ nodeid_t* camera_nodes;
+ int res;
+ struct dc1394_frame_format *fmt = NULL;
+ struct dc1394_frame_rate *fps = NULL;
+
+ if (dc1394_read_common(c, &fmt, &fps) != 0)
+ return -1;
+
+ /* Now let us prep the hardware. */
+ dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
+ if (!dc1394->handle) {
+ av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
+ goto out;
+ }
+ camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
+ if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) {
+ av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel);
+ goto out_handle;
+ }
+ res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel],
+ 0,
+ FORMAT_VGA_NONCOMPRESSED,
+ fmt->frame_size_id,
+ SPEED_400,
+ fps->frame_rate_id, 8, 1,
+ c->filename,
+ &dc1394->camera);
+ dc1394_free_camera_nodes(camera_nodes);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
+ goto out_handle;
+ }
+
+ res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
+ goto out_handle_dma;
+ }
+
+ return 0;
+
+out_handle_dma:
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+out_handle:
+ dc1394_destroy_handle(dc1394->handle);
+out:
+ return -1;
+}
+
+static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt)
+{
+ struct dc1394_data *dc1394 = c->priv_data;
+ int res;
+
+ /* discard stale frame */
+ if (dc1394->current_frame++) {
+ if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
+ av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
+ }
+
+ res = dc1394_dma_single_capture(&dc1394->camera);
+
+ if (res == DC1394_SUCCESS) {
+ dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
+ dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate;
+ res = dc1394->packet.size;
+ } else {
+ av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
+ dc1394->packet.data = NULL;
+ res = -1;
+ }
+
+ *pkt = dc1394->packet;
+ return res;
+}
+
+static int dc1394_v1_close(AVFormatContext * context)
+{
+ struct dc1394_data *dc1394 = context->priv_data;
+
+ dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+ dc1394_destroy_handle(dc1394->handle);
+
+ return 0;
+}
+
+#elif HAVE_LIBDC1394_2
+static int dc1394_v2_read_header(AVFormatContext *c)
+{
+ dc1394_data* dc1394 = c->priv_data;
+ dc1394camera_list_t *list;
+ int res, i;
+ struct dc1394_frame_format *fmt = NULL;
+ struct dc1394_frame_rate *fps = NULL;
+
+ if (dc1394_read_common(c, &fmt, &fps) != 0)
+ return -1;
+
+ /* Now let us prep the hardware. */
+ dc1394->d = dc1394_new();
+ dc1394_camera_enumerate (dc1394->d, &list);
+ if ( !list || list->num == 0) {
+ av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
+ goto out;
+ }
+
+ /* FIXME: To select a specific camera I need to search in list its guid */
+ dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
+ if (list->num > 1) {
+ av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
+ }
+
+ /* Freeing list of cameras */
+ dc1394_camera_free_list (list);
/* Select MAX Speed possible from the cam */
if (dc1394->camera->bmode_capable>0) {
@@ -293,13 +329,13 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
goto out_camera;
}
- if (dc1394_video_set_mode(dc1394->camera, video_mode) != DC1394_SUCCESS) {
+ if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
goto out_camera;
}
- if (dc1394_video_set_framerate(dc1394->camera, frame_rate) != DC1394_SUCCESS) {
- av_log(c, AV_LOG_ERROR, "Could not set framerate %d.\n", final_frame_rate);
+ if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
goto out_camera;
}
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
@@ -319,10 +355,10 @@ out_camera:
dc1394_camera_free (dc1394->camera);
out:
dc1394_free(dc1394->d);
- return ret;
+ return -1;
}
-static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
+static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
@@ -348,7 +384,7 @@ static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
return res;
}
-static int dc1394_close(AVFormatContext * context)
+static int dc1394_v2_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
@@ -362,11 +398,25 @@ static int dc1394_close(AVFormatContext * context)
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
- .long_name = NULL_IF_CONFIG_SMALL("dc1394 A/V grab"),
+ .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
+ .priv_data_size = sizeof(struct dc1394_data),
+ .read_header = dc1394_v2_read_header,
+ .read_packet = dc1394_v2_read_packet,
+ .read_close = dc1394_v2_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &libdc1394_class,
+};
+
+#endif
+#if HAVE_LIBDC1394_1
+AVInputFormat ff_libdc1394_demuxer = {
+ .name = "libdc1394",
+ .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
- .read_header = dc1394_read_header,
- .read_packet = dc1394_read_packet,
- .read_close = dc1394_close,
+ .read_header = dc1394_v1_read_header,
+ .read_packet = dc1394_v1_read_packet,
+ .read_close = dc1394_v1_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
+#endif
diff --git a/libavdevice/openal-dec.c b/libavdevice/openal-dec.c
index 17256ca066..2227d6350b 100644
--- a/libavdevice/openal-dec.c
+++ b/libavdevice/openal-dec.c
@@ -117,7 +117,7 @@ static inline void print_al_capture_devices(void *log_ctx)
av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
}
-static int read_header(AVFormatContext *ctx, AVFormatParameters *ap)
+static int read_header(AVFormatContext *ctx)
{
al_data *ad = ctx->priv_data;
static const ALCenum sample_formats[2][2] = {
diff --git a/libavdevice/oss_audio.c b/libavdevice/oss_audio.c
index e3ab926704..c86d2b4302 100644
--- a/libavdevice/oss_audio.c
+++ b/libavdevice/oss_audio.c
@@ -204,7 +204,7 @@ static int audio_write_trailer(AVFormatContext *s1)
/* grab support */
-static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+static int audio_read_header(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
AVStream *st;
diff --git a/libavdevice/pulse.c b/libavdevice/pulse.c
index 4c75faabf9..044ec3178d 100644
--- a/libavdevice/pulse.c
+++ b/libavdevice/pulse.c
@@ -66,8 +66,7 @@ static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
}
}
-static av_cold int pulse_read_header(AVFormatContext *s,
- AVFormatParameters *ap)
+static av_cold int pulse_read_header(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
AVStream *st;
diff --git a/libavdevice/sndio_dec.c b/libavdevice/sndio_dec.c
index ff3d5c160c..48adf08618 100644
--- a/libavdevice/sndio_dec.c
+++ b/libavdevice/sndio_dec.c
@@ -28,8 +28,7 @@
#include "sndio_common.h"
-static av_cold int audio_read_header(AVFormatContext *s1,
- AVFormatParameters *ap)
+static av_cold int audio_read_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c
index be461c2877..2780a448ef 100644
--- a/libavdevice/v4l2.c
+++ b/libavdevice/v4l2.c
@@ -555,7 +555,7 @@ static void mmap_close(struct video_data *s)
av_free(s->buf_len);
}
-static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap)
+static int v4l2_set_parameters(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
struct v4l2_input input = { 0 };
@@ -683,7 +683,7 @@ static uint32_t device_try_init(AVFormatContext *s1,
return desired_format;
}
-static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+static int v4l2_read_header(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
AVStream *st;
@@ -770,7 +770,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->frame_format = desired_format;
- if ((res = v4l2_set_parameters(s1, ap)) < 0)
+ if ((res = v4l2_set_parameters(s1)) < 0)
goto out;
st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);
diff --git a/libavdevice/vfwcap.c b/libavdevice/vfwcap.c
index 9a525ea27a..d8e32cec41 100644
--- a/libavdevice/vfwcap.c
+++ b/libavdevice/vfwcap.c
@@ -236,7 +236,7 @@ static int vfw_read_close(AVFormatContext *s)
return 0;
}
-static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
+static int vfw_read_header(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVCodecContext *codec;
diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c
index dccbb278a3..722f00ff0d 100644
--- a/libavdevice/x11grab.c
+++ b/libavdevice/x11grab.c
@@ -154,7 +154,7 @@ x11grab_region_win_init(struct x11_grab *s)
* </ul>
*/
static int
-x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+x11grab_read_header(AVFormatContext *s1)
{
struct x11_grab *x11grab = s1->priv_data;
Display *dpy;