aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2021-06-08 01:03:45 +0200
committerAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2021-07-22 07:32:09 +0200
commit10f8f06a56ef9ea27595269a8a747026433b0673 (patch)
tree71fc91595d8f009f3bde47bea76d4e4d7d4cc035
parent0bc7ddc460511c82392677c83bc320db26a4a06e (diff)
downloadffmpeg-10f8f06a56ef9ea27595269a8a747026433b0673.tar.gz
avcodec/adpcmenc: Use smaller scope for some variables
This is to avoid unused variables warnings if the code for disabled encoders is #if'ed away which will happen in a subsequent commit. In case of buf it also avoids shadowing. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
-rw-r--r--libavcodec/adpcmenc.c105
1 files changed, 54 insertions, 51 deletions
diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c
index 2b5a606699..b95a9822e5 100644
--- a/libavcodec/adpcmenc.c
+++ b/libavcodec/adpcmenc.c
@@ -67,8 +67,6 @@ typedef struct ADPCMEncodeContext {
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
{
ADPCMEncodeContext *s = avctx->priv_data;
- uint8_t *extradata;
- int i;
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
@@ -132,6 +130,8 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->block_align = 34 * avctx->channels;
break;
case AV_CODEC_ID_ADPCM_MS:
+ {
+ uint8_t *extradata;
/* each 16 bits sample gives one nibble
and we have 7 bytes per channel overhead */
avctx->frame_size = (s->block_size - 7 * avctx->channels) * 2 / avctx->channels + 2;
@@ -143,11 +143,12 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
extradata = avctx->extradata;
bytestream_put_le16(&extradata, avctx->frame_size);
bytestream_put_le16(&extradata, 7); /* wNumCoef */
- for (i = 0; i < 7; i++) {
+ for (int i = 0; i < 7; i++) {
bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
}
break;
+ }
case AV_CODEC_ID_ADPCM_YAMAHA:
avctx->frame_size = s->block_size * 2 / avctx->channels;
avctx->block_align = s->block_size;
@@ -588,12 +589,11 @@ static int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
- int n, i, ch, st, pkt_size, ret;
+ int st, pkt_size, ret;
const int16_t *samples;
int16_t **samples_p;
uint8_t *dst;
ADPCMEncodeContext *c = avctx->priv_data;
- uint8_t *buf;
samples = (const int16_t *)frame->data[0];
samples_p = (int16_t **)frame->extended_data;
@@ -613,11 +613,9 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
switch(avctx->codec->id) {
case AV_CODEC_ID_ADPCM_IMA_WAV:
{
- int blocks, j;
-
- blocks = (frame->nb_samples - 1) / 8;
+ int blocks = (frame->nb_samples - 1) / 8;
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
ADPCMChannelStatus *status = &c->status[ch];
status->prev_sample = samples_p[ch][0];
/* status->step_index = 0;
@@ -629,27 +627,28 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
if (avctx->trellis > 0) {
+ uint8_t *buf;
if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
return AVERROR(ENOMEM);
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
adpcm_compress_trellis(avctx, &samples_p[ch][1],
buf + ch * blocks * 8, &c->status[ch],
blocks * 8, 1);
}
- for (i = 0; i < blocks; i++) {
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int i = 0; i < blocks; i++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
- for (j = 0; j < 8; j += 2)
+ for (int j = 0; j < 8; j += 2)
*dst++ = buf1[j] | (buf1[j + 1] << 4);
}
}
av_free(buf);
} else {
- for (i = 0; i < blocks; i++) {
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int i = 0; i < blocks; i++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
ADPCMChannelStatus *status = &c->status[ch];
const int16_t *smp = &samples_p[ch][1 + i * 8];
- for (j = 0; j < 8; j += 2) {
+ for (int j = 0; j < 8; j += 2) {
uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
*dst++ = v;
@@ -664,7 +663,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
PutBitContext pb;
init_put_bits(&pb, dst, pkt_size);
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
ADPCMChannelStatus *status = &c->status[ch];
put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
put_bits(&pb, 7, status->step_index);
@@ -672,11 +671,11 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
uint8_t buf[64];
adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
64, 1);
- for (i = 0; i < 64; i++)
+ for (int i = 0; i < 64; i++)
put_bits(&pb, 4, buf[i ^ 1]);
status->prev_sample = status->predictor;
} else {
- for (i = 0; i < 64; i += 2) {
+ for (int i = 0; i < 64; i += 2) {
int t1, t2;
t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
@@ -696,8 +695,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
av_assert0(avctx->trellis == 0);
- for (i = 0; i < frame->nb_samples; i++) {
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int i = 0; i < frame->nb_samples; i++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
}
}
@@ -712,8 +711,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
av_assert0(avctx->trellis == 0);
- for (n = frame->nb_samples / 2; n > 0; n--) {
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int n = frame->nb_samples / 2; n > 0; n--) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, *samples++));
put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, samples[st]));
}
@@ -725,11 +724,10 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
case AV_CODEC_ID_ADPCM_SWF:
{
+ const int n = frame->nb_samples - 1;
PutBitContext pb;
init_put_bits(&pb, dst, pkt_size);
- n = frame->nb_samples - 1;
-
/* NB: This is safe as we don't have AV_CODEC_CAP_SMALL_LAST_FRAME. */
av_assert0(n == 4095);
@@ -737,7 +735,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
// init the encoder state
- for (i = 0; i < avctx->channels; i++) {
+ for (int i = 0; i < avctx->channels; i++) {
// clip step so it fits 6 bits
c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
put_sbits(&pb, 16, samples[i]);
@@ -753,13 +751,13 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
buf + n, &c->status[1], n,
avctx->channels);
- for (i = 0; i < n; i++) {
+ for (int i = 0; i < n; i++) {
put_bits(&pb, 4, buf[i]);
if (avctx->channels == 2)
put_bits(&pb, 4, buf[n + i]);
}
} else {
- for (i = 1; i < frame->nb_samples; i++) {
+ for (int i = 1; i < frame->nb_samples; i++) {
put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
samples[avctx->channels * i]));
if (avctx->channels == 2)
@@ -771,46 +769,47 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
break;
}
case AV_CODEC_ID_ADPCM_MS:
- for (i = 0; i < avctx->channels; i++) {
+ for (int i = 0; i < avctx->channels; i++) {
int predictor = 0;
*dst++ = predictor;
c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
}
- for (i = 0; i < avctx->channels; i++) {
+ for (int i = 0; i < avctx->channels; i++) {
if (c->status[i].idelta < 16)
c->status[i].idelta = 16;
bytestream_put_le16(&dst, c->status[i].idelta);
}
- for (i = 0; i < avctx->channels; i++)
+ for (int i = 0; i < avctx->channels; i++)
c->status[i].sample2= *samples++;
- for (i = 0; i < avctx->channels; i++) {
+ for (int i = 0; i < avctx->channels; i++) {
c->status[i].sample1 = *samples++;
bytestream_put_le16(&dst, c->status[i].sample1);
}
- for (i = 0; i < avctx->channels; i++)
+ for (int i = 0; i < avctx->channels; i++)
bytestream_put_le16(&dst, c->status[i].sample2);
if (avctx->trellis > 0) {
- n = avctx->block_align - 7 * avctx->channels;
- if (!(buf = av_malloc(2 * n)))
+ const int n = avctx->block_align - 7 * avctx->channels;
+ uint8_t *buf = av_malloc(2 * n);
+ if (!buf)
return AVERROR(ENOMEM);
if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
avctx->channels);
- for (i = 0; i < n; i += 2)
+ for (int i = 0; i < n; i += 2)
*dst++ = (buf[i] << 4) | buf[i + 1];
} else {
adpcm_compress_trellis(avctx, samples, buf,
&c->status[0], n, avctx->channels);
adpcm_compress_trellis(avctx, samples + 1, buf + n,
&c->status[1], n, avctx->channels);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
*dst++ = (buf[i] << 4) | buf[n + i];
}
av_free(buf);
} else {
- for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
+ for (int i = 7 * avctx->channels; i < avctx->block_align; i++) {
int nibble;
nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
@@ -819,22 +818,24 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
break;
case AV_CODEC_ID_ADPCM_YAMAHA:
- n = frame->nb_samples / 2;
+ {
+ int n = frame->nb_samples / 2;
if (avctx->trellis > 0) {
- if (!(buf = av_malloc(2 * n * 2)))
+ uint8_t *buf = av_malloc(2 * n * 2);
+ if (!buf)
return AVERROR(ENOMEM);
n *= 2;
if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
avctx->channels);
- for (i = 0; i < n; i += 2)
+ for (int i = 0; i < n; i += 2)
*dst++ = buf[i] | (buf[i + 1] << 4);
} else {
adpcm_compress_trellis(avctx, samples, buf,
&c->status[0], n, avctx->channels);
adpcm_compress_trellis(avctx, samples + 1, buf + n,
&c->status[1], n, avctx->channels);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
*dst++ = buf[i] | (buf[n + i] << 4);
}
av_free(buf);
@@ -846,6 +847,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
*dst++ = nibble;
}
break;
+ }
case AV_CODEC_ID_ADPCM_IMA_APM:
{
PutBitContext pb;
@@ -853,8 +855,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
av_assert0(avctx->trellis == 0);
- for (n = frame->nb_samples / 2; n > 0; n--) {
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int n = frame->nb_samples / 2; n > 0; n--) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, samples[st]));
}
@@ -875,18 +877,19 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
bytestream_put_le32(&dst, avctx->frame_size);
if (avctx->trellis > 0) {
- n = frame->nb_samples >> 1;
+ const int n = frame->nb_samples >> 1;
+ uint8_t *buf = av_malloc(2 * n);
- if (!(buf = av_malloc(2 * n)))
+ if (!buf)
return AVERROR(ENOMEM);
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], 2 * n, avctx->channels);
- for (i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
bytestream_put_byte(&dst, (buf[2 * i] << 4) | buf[2 * i + 1]);
samples += 2 * n;
av_free(buf);
- } else for (n = frame->nb_samples >> 1; n > 0; n--) {
+ } else for (int n = frame->nb_samples >> 1; n > 0; n--) {
int nibble;
nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4;
nibble |= adpcm_ima_compress_sample(&c->status[0], *samples++) & 0x0F;
@@ -906,7 +909,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
av_assert0(frame->nb_samples == 32);
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
int64_t error = INT64_MAX, tmperr = INT64_MAX;
int shift = 2, flag = 0;
int saved1 = c->status[ch].sample1;
@@ -943,9 +946,9 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
init_put_bits(&pb, dst, pkt_size);
av_assert0(avctx->trellis == 0);
- for (n = frame->nb_samples / 2; n > 0; n--) {
+ for (int n = frame->nb_samples / 2; n > 0; n--) {
/* stereo: 1 byte (2 samples) for left, 1 byte for right */
- for (ch = 0; ch < avctx->channels; ch++) {
+ for (int ch = 0; ch < avctx->channels; ch++) {
int t1, t2;
t1 = adpcm_ima_compress_sample(&c->status[ch], *samples++);
t2 = adpcm_ima_compress_sample(&c->status[ch], samples[st]);