27 #include "libavutil/channel_layout.h"
41 #define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
43 #define SAMPLES_BUF_SIZE 4096
79 if (channels <= 0 || channels > 2){
80 av_log(avctx,
AV_LOG_ERROR,
"encoding %d channel(s) is not allowed in mp2\n", channels);
83 bitrate = bitrate / 1000;
86 avctx->
delay = 512 - 32 + 1;
131 av_dlog(avctx,
"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
151 v = (int)(
exp2((3 - i) / 3.0) * (1 << 20));
156 scale_factor_inv_table[i] =
exp2(-(3 - i) / 3.0) / (float)(1 << 20);
197 for(j=31;j>=3;j-=2) tab[j] += tab[j - 2];
241 x1 =
MUL((t[8] - x2), xp[0]);
242 x2 =
MUL((t[8] + x2), xp[1]);
255 xr =
MUL(t[28],xp[0]);
259 xr =
MUL(t[4],xp[1]);
260 t[ 4] = (t[24] - xr);
261 t[24] = (t[24] + xr);
263 xr =
MUL(t[20],xp[2]);
267 xr =
MUL(t[12],xp[3]);
268 t[12] = (t[16] - xr);
269 t[16] = (t[16] + xr);
274 for (i = 0; i < 4; i++) {
275 xr =
MUL(tab[30-i*4],xp[0]);
276 tab[30-i*4] = (tab[i*4] - xr);
277 tab[ i*4] = (tab[i*4] + xr);
279 xr =
MUL(tab[ 2+i*4],xp[1]);
280 tab[ 2+i*4] = (tab[28-i*4] - xr);
281 tab[28-i*4] = (tab[28-i*4] + xr);
283 xr =
MUL(tab[31-i*4],xp[0]);
284 tab[31-i*4] = (tab[1+i*4] - xr);
285 tab[ 1+i*4] = (tab[1+i*4] + xr);
287 xr =
MUL(tab[ 3+i*4],xp[1]);
288 tab[ 3+i*4] = (tab[29-i*4] - xr);
289 tab[29-i*4] = (tab[29-i*4] + xr);
297 xr =
MUL(t1[0], *xp);
310 #define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS)
325 s->
samples_buf[ch][offset + (31 - i)] = samples[0];
334 sum = p[0*64] * q[0*64];
335 sum += p[1*64] * q[1*64];
336 sum += p[2*64] * q[2*64];
337 sum += p[3*64] * q[3*64];
338 sum += p[4*64] * q[4*64];
339 sum += p[5*64] * q[5*64];
340 sum += p[6*64] * q[6*64];
341 sum += p[7*64] * q[7*64];
346 tmp1[0] = tmp[16] >>
WSHIFT;
347 for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >>
WSHIFT;
348 for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >>
WSHIFT;
366 unsigned char scale_factors[SBLIMIT][3],
367 int sb_samples[3][12][SBLIMIT],
370 int *p, vmax,
v,
n, i, j, k, code;
372 unsigned char *sf = &scale_factors[0][0];
374 for(j=0;j<sblimit;j++) {
377 p = &sb_samples[i][0][j];
390 index = (21 -
n) * 3 - 3;
401 av_dlog(NULL,
"%2d:%d in=%x %x %d\n",
414 switch(d1 * 5 + d2) {
446 sf[1] = sf[2] = sf[0];
451 sf[0] = sf[1] = sf[2];
457 sf[0] = sf[2] = sf[1];
463 sf[1] = sf[2] = sf[0];
470 av_dlog(NULL,
"%d: %2d %2d %2d %d %d -> %d\n", j,
471 sf[0], sf[1], sf[2], d1, d2, code);
472 scale_code[j] = code;
490 #define SB_NOTALLOCATED 0
491 #define SB_ALLOCATED 1
502 int i, ch,
b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
506 const unsigned char *alloc;
508 memcpy(smr, smr1, s->
nb_channels *
sizeof(
short) * SBLIMIT);
524 current_frame_size = 32;
538 if (smr[ch][i] > max_smr && subband_status[ch][i] !=
SB_NOMORE) {
539 max_smr = smr[ch][i];
547 av_dlog(NULL,
"current=%d max=%d max_sb=%d max_ch=%d alloc=%d\n",
548 current_frame_size, max_frame_size, max_sb, max_ch,
554 for(i=0;i<max_sb;i++) {
555 alloc += 1 << alloc[0];
569 if (current_frame_size + incr <= max_frame_size) {
572 current_frame_size += incr;
574 smr[max_ch][max_sb] = smr1[max_ch][max_sb] -
quant_snr[alloc[
b]];
576 if (b == ((1 << alloc[0]) - 1))
577 subband_status[max_ch][max_sb] =
SB_NOMORE;
582 subband_status[max_ch][max_sb] =
SB_NOMORE;
585 *padding = max_frame_size - current_frame_size;
597 int i, j, k, l, bit_alloc_bits,
b, ch;
625 j += 1 << bit_alloc_bits;
680 a = (float)sample * scale_factor_inv_table[s->
scale_factors[ch][i][k]];
681 q[m] = (
int)((a + 1.0) * steps * 0.5);
692 q1 = sample << (-
shift);
694 q1 = sample >>
shift;
695 q1 = (q1 *
mult) >> P;
696 q[
m] = ((q1 + (1 << P)) * steps) >> (P + 1);
707 q[0] + steps * (q[1] + steps * q[2]));
716 j += 1 << bit_alloc_bits;
722 for(i=0;i<padding;i++)
733 const int16_t *samples = (
const int16_t *)frame->
data[0];
781 .supported_samplerates = (
const int[]){
782 44100, 48000, 32000, 22050, 24000, 16000, 0
#define MPA_MAX_CODED_FRAME_SIZE
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and/or allocate data.
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static av_cold int init(AVCodecContext *avctx)
static struct endianess table[]
static const unsigned char nb_scale_factors[4]
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
const int ff_mpa_quant_bits[17]
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static int scale_factor_table[64]
#define AV_CH_LAYOUT_STEREO
static void compute_scale_factors(unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit)
mpeg audio layer common tables.
const int32_t ff_mpa_enwindow[257]
static int32_t scale_factor_mult[15][3]
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static const int costab32[30]
static const uint8_t offset[511][2]
static unsigned char scale_diff_table[128]
const int ff_mpa_quant_steps[17]
static void idct32(int *out, int *tab)
const uint16_t avpriv_mpa_freq_tab[3]
const unsigned char *const ff_mpa_alloc_tables[5]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
const char * name
Name of the codec implementation.
unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]
mpeg audio layer 2 tables.
static av_cold int MPA_encode_init(AVCodecContext *avctx)
unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]
static int bit_alloc(AC3EncodeContext *s, int snr_offset)
Run the bit allocation with a given SNR offset.
static const unsigned short quant_snr[17]
static int16_t filter_bank[512]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const int bitinv32[32]
Libavcodec external API header.
static int put_bits_count(PutBitContext *s)
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
const unsigned char * alloc_table
int bit_rate
the average bitrate
int AC3_NAME() encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
int frame_size
Number of samples per channel in an audio frame.
AVSampleFormat
Audio Sample Formats.
int samples_offset[MPA_MAX_CHANNELS]
int sample_rate
samples per second
static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
static const float fixed_smr[SBLIMIT]
main external API structure.
static int16_t mult(Float11 *f1, Float11 *f2)
short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]
static const AVCodecDefault mp2_defaults[]
static int8_t scale_factor_shift[64]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
mpeg audio declarations for both encoder and decoder.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const int ff_mpa_sblimit_table[5]
int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
int channels
number of audio channels
static struct twinvq_data tab
static enum AVSampleFormat sample_fmts[]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const uint16_t avpriv_mpa_bitrate_tab[2][3][15]
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
static void compute_bit_allocation(MpegAudioContext *s, short smr1[MPA_MAX_CHANNELS][SBLIMIT], unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int *padding)
static unsigned short total_quant_bits[17]
int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.