00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "avcodec.h"
00022 #include "bitstream.h"
00023 #include "bytestream.h"
00024
00055 #define BLKSIZE 1024
00056
00057
00058
00059 static const int index_table[16] = {
00060 -1, -1, -1, -1, 2, 4, 6, 8,
00061 -1, -1, -1, -1, 2, 4, 6, 8,
00062 };
00063
00068 static const int step_table[89] = {
00069 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
00070 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
00071 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
00072 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
00073 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
00074 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
00075 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
00076 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
00077 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
00078 };
00079
00080
00081
00082 static const int AdaptationTable[] = {
00083 230, 230, 230, 230, 307, 409, 512, 614,
00084 768, 614, 512, 409, 307, 230, 230, 230
00085 };
00086
00087 static const int AdaptCoeff1[] = {
00088 256, 512, 0, 192, 240, 460, 392
00089 };
00090
00091 static const int AdaptCoeff2[] = {
00092 0, -256, 0, 64, 0, -208, -232
00093 };
00094
00095
00096 static const int xa_adpcm_table[5][2] = {
00097 { 0, 0 },
00098 { 60, 0 },
00099 { 115, -52 },
00100 { 98, -55 },
00101 { 122, -60 }
00102 };
00103
00104 static const int ea_adpcm_table[] = {
00105 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
00106 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
00107 };
00108
00109 static const int ct_adpcm_table[8] = {
00110 0x00E6, 0x00E6, 0x00E6, 0x00E6,
00111 0x0133, 0x0199, 0x0200, 0x0266
00112 };
00113
00114
00115 static const int swf_index_tables[4][16] = {
00116 { -1, 2 },
00117 { -1, -1, 2, 4 },
00118 { -1, -1, -1, -1, 2, 4, 6, 8 },
00119 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
00120 };
00121
00122 static const int yamaha_indexscale[] = {
00123 230, 230, 230, 230, 307, 409, 512, 614,
00124 230, 230, 230, 230, 307, 409, 512, 614
00125 };
00126
00127 static const int yamaha_difflookup[] = {
00128 1, 3, 5, 7, 9, 11, 13, 15,
00129 -1, -3, -5, -7, -9, -11, -13, -15
00130 };
00131
00132
00133
00134 typedef struct ADPCMChannelStatus {
00135 int predictor;
00136 short int step_index;
00137 int step;
00138
00139 int prev_sample;
00140
00141
00142 short sample1;
00143 short sample2;
00144 int coeff1;
00145 int coeff2;
00146 int idelta;
00147 } ADPCMChannelStatus;
00148
00149 typedef struct ADPCMContext {
00150 int channel;
00151 ADPCMChannelStatus status[6];
00152 } ADPCMContext;
00153
00154
00155
00156 #ifdef CONFIG_ENCODERS
00157 static int adpcm_encode_init(AVCodecContext *avctx)
00158 {
00159 if (avctx->channels > 2)
00160 return -1;
00161 switch(avctx->codec->id) {
00162 case CODEC_ID_ADPCM_IMA_WAV:
00163 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1;
00164
00165 avctx->block_align = BLKSIZE;
00166
00167 break;
00168 case CODEC_ID_ADPCM_MS:
00169 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
00170
00171 avctx->block_align = BLKSIZE;
00172 break;
00173 case CODEC_ID_ADPCM_YAMAHA:
00174 avctx->frame_size = BLKSIZE * avctx->channels;
00175 avctx->block_align = BLKSIZE;
00176 break;
00177 case CODEC_ID_ADPCM_SWF:
00178 if (avctx->sample_rate != 11025 &&
00179 avctx->sample_rate != 22050 &&
00180 avctx->sample_rate != 44100) {
00181 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
00182 return -1;
00183 }
00184 avctx->frame_size = 512 * (avctx->sample_rate / 11025);
00185 break;
00186 default:
00187 return -1;
00188 break;
00189 }
00190
00191 avctx->coded_frame= avcodec_alloc_frame();
00192 avctx->coded_frame->key_frame= 1;
00193
00194 return 0;
00195 }
00196
00197 static int adpcm_encode_close(AVCodecContext *avctx)
00198 {
00199 av_freep(&avctx->coded_frame);
00200
00201 return 0;
00202 }
00203
00204
00205 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
00206 {
00207 int delta = sample - c->prev_sample;
00208 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
00209 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
00210 c->prev_sample = av_clip_int16(c->prev_sample);
00211 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
00212 return nibble;
00213 }
00214
00215 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
00216 {
00217 int predictor, nibble, bias;
00218
00219 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
00220
00221 nibble= sample - predictor;
00222 if(nibble>=0) bias= c->idelta/2;
00223 else bias=-c->idelta/2;
00224
00225 nibble= (nibble + bias) / c->idelta;
00226 nibble= av_clip(nibble, -8, 7)&0x0F;
00227
00228 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00229
00230 c->sample2 = c->sample1;
00231 c->sample1 = av_clip_int16(predictor);
00232
00233 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00234 if (c->idelta < 16) c->idelta = 16;
00235
00236 return nibble;
00237 }
00238
00239 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
00240 {
00241 int nibble, delta;
00242
00243 if(!c->step) {
00244 c->predictor = 0;
00245 c->step = 127;
00246 }
00247
00248 delta = sample - c->predictor;
00249
00250 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
00251
00252 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
00253 c->predictor = av_clip_int16(c->predictor);
00254 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00255 c->step = av_clip(c->step, 127, 24567);
00256
00257 return nibble;
00258 }
00259
00260 typedef struct TrellisPath {
00261 int nibble;
00262 int prev;
00263 } TrellisPath;
00264
00265 typedef struct TrellisNode {
00266 uint32_t ssd;
00267 int path;
00268 int sample1;
00269 int sample2;
00270 int step;
00271 } TrellisNode;
00272
00273 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
00274 uint8_t *dst, ADPCMChannelStatus *c, int n)
00275 {
00276 #define FREEZE_INTERVAL 128
00277
00278 const int frontier = 1 << avctx->trellis;
00279 const int stride = avctx->channels;
00280 const int version = avctx->codec->id;
00281 const int max_paths = frontier*FREEZE_INTERVAL;
00282 TrellisPath paths[max_paths], *p;
00283 TrellisNode node_buf[2][frontier];
00284 TrellisNode *nodep_buf[2][frontier];
00285 TrellisNode **nodes = nodep_buf[0];
00286 TrellisNode **nodes_next = nodep_buf[1];
00287 int pathn = 0, froze = -1, i, j, k;
00288
00289 assert(!(max_paths&(max_paths-1)));
00290
00291 memset(nodep_buf, 0, sizeof(nodep_buf));
00292 nodes[0] = &node_buf[1][0];
00293 nodes[0]->ssd = 0;
00294 nodes[0]->path = 0;
00295 nodes[0]->step = c->step_index;
00296 nodes[0]->sample1 = c->sample1;
00297 nodes[0]->sample2 = c->sample2;
00298 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_SWF))
00299 nodes[0]->sample1 = c->prev_sample;
00300 if(version == CODEC_ID_ADPCM_MS)
00301 nodes[0]->step = c->idelta;
00302 if(version == CODEC_ID_ADPCM_YAMAHA) {
00303 if(c->step == 0) {
00304 nodes[0]->step = 127;
00305 nodes[0]->sample1 = 0;
00306 } else {
00307 nodes[0]->step = c->step;
00308 nodes[0]->sample1 = c->predictor;
00309 }
00310 }
00311
00312 for(i=0; i<n; i++) {
00313 TrellisNode *t = node_buf[i&1];
00314 TrellisNode **u;
00315 int sample = samples[i*stride];
00316 memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
00317 for(j=0; j<frontier && nodes[j]; j++) {
00318
00319 const int range = (j < frontier/2) ? 1 : 0;
00320 const int step = nodes[j]->step;
00321 int nidx;
00322 if(version == CODEC_ID_ADPCM_MS) {
00323 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 256;
00324 const int div = (sample - predictor) / step;
00325 const int nmin = av_clip(div-range, -8, 6);
00326 const int nmax = av_clip(div+range, -7, 7);
00327 for(nidx=nmin; nidx<=nmax; nidx++) {
00328 const int nibble = nidx & 0xf;
00329 int dec_sample = predictor + nidx * step;
00330 #define STORE_NODE(NAME, STEP_INDEX)\
00331 int d;\
00332 uint32_t ssd;\
00333 dec_sample = av_clip_int16(dec_sample);\
00334 d = sample - dec_sample;\
00335 ssd = nodes[j]->ssd + d*d;\
00336 if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\
00337 continue;\
00338
00339
00340 \
00341 for(k=0; k<frontier && nodes_next[k]; k++) {\
00342 if(dec_sample == nodes_next[k]->sample1) {\
00343 assert(ssd >= nodes_next[k]->ssd);\
00344 goto next_##NAME;\
00345 }\
00346 }\
00347 for(k=0; k<frontier; k++) {\
00348 if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\
00349 TrellisNode *u = nodes_next[frontier-1];\
00350 if(!u) {\
00351 assert(pathn < max_paths);\
00352 u = t++;\
00353 u->path = pathn++;\
00354 }\
00355 u->ssd = ssd;\
00356 u->step = STEP_INDEX;\
00357 u->sample2 = nodes[j]->sample1;\
00358 u->sample1 = dec_sample;\
00359 paths[u->path].nibble = nibble;\
00360 paths[u->path].prev = nodes[j]->path;\
00361 memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\
00362 nodes_next[k] = u;\
00363 break;\
00364 }\
00365 }\
00366 next_##NAME:;
00367 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
00368 }
00369 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_SWF)) {
00370 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
00371 const int predictor = nodes[j]->sample1;\
00372 const int div = (sample - predictor) * 4 / STEP_TABLE;\
00373 int nmin = av_clip(div-range, -7, 6);\
00374 int nmax = av_clip(div+range, -6, 7);\
00375 if(nmin<=0) nmin--; \
00376 if(nmax<0) nmax--;\
00377 for(nidx=nmin; nidx<=nmax; nidx++) {\
00378 const int nibble = nidx<0 ? 7-nidx : nidx;\
00379 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
00380 STORE_NODE(NAME, STEP_INDEX);\
00381 }
00382 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
00383 } else {
00384 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
00385 #undef LOOP_NODES
00386 #undef STORE_NODE
00387 }
00388 }
00389
00390 u = nodes;
00391 nodes = nodes_next;
00392 nodes_next = u;
00393
00394
00395 if(nodes[0]->ssd > (1<<28)) {
00396 for(j=1; j<frontier && nodes[j]; j++)
00397 nodes[j]->ssd -= nodes[0]->ssd;
00398 nodes[0]->ssd = 0;
00399 }
00400
00401
00402 if(i == froze + FREEZE_INTERVAL) {
00403 p = &paths[nodes[0]->path];
00404 for(k=i; k>froze; k--) {
00405 dst[k] = p->nibble;
00406 p = &paths[p->prev];
00407 }
00408 froze = i;
00409 pathn = 0;
00410
00411
00412
00413 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
00414 }
00415 }
00416
00417 p = &paths[nodes[0]->path];
00418 for(i=n-1; i>froze; i--) {
00419 dst[i] = p->nibble;
00420 p = &paths[p->prev];
00421 }
00422
00423 c->predictor = nodes[0]->sample1;
00424 c->sample1 = nodes[0]->sample1;
00425 c->sample2 = nodes[0]->sample2;
00426 c->step_index = nodes[0]->step;
00427 c->step = nodes[0]->step;
00428 c->idelta = nodes[0]->step;
00429 }
00430
00431 static int adpcm_encode_frame(AVCodecContext *avctx,
00432 unsigned char *frame, int buf_size, void *data)
00433 {
00434 int n, i, st;
00435 short *samples;
00436 unsigned char *dst;
00437 ADPCMContext *c = avctx->priv_data;
00438
00439 dst = frame;
00440 samples = (short *)data;
00441 st= avctx->channels == 2;
00442
00443
00444 switch(avctx->codec->id) {
00445 case CODEC_ID_ADPCM_IMA_WAV:
00446 n = avctx->frame_size / 8;
00447 c->status[0].prev_sample = (signed short)samples[0];
00448
00449 bytestream_put_le16(&dst, c->status[0].prev_sample);
00450 *dst++ = (unsigned char)c->status[0].step_index;
00451 *dst++ = 0;
00452 samples++;
00453 if (avctx->channels == 2) {
00454 c->status[1].prev_sample = (signed short)samples[0];
00455
00456 bytestream_put_le16(&dst, c->status[1].prev_sample);
00457 *dst++ = (unsigned char)c->status[1].step_index;
00458 *dst++ = 0;
00459 samples++;
00460 }
00461
00462
00463 if(avctx->trellis > 0) {
00464 uint8_t buf[2][n*8];
00465 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8);
00466 if(avctx->channels == 2)
00467 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8);
00468 for(i=0; i<n; i++) {
00469 *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4);
00470 *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4);
00471 *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4);
00472 *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4);
00473 if (avctx->channels == 2) {
00474 *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4);
00475 *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4);
00476 *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4);
00477 *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4);
00478 }
00479 }
00480 } else
00481 for (; n>0; n--) {
00482 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
00483 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
00484 dst++;
00485 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
00486 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
00487 dst++;
00488 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
00489 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
00490 dst++;
00491 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
00492 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
00493 dst++;
00494
00495 if (avctx->channels == 2) {
00496 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
00497 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
00498 dst++;
00499 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
00500 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
00501 dst++;
00502 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
00503 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
00504 dst++;
00505 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
00506 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
00507 dst++;
00508 }
00509 samples += 8 * avctx->channels;
00510 }
00511 break;
00512 case CODEC_ID_ADPCM_SWF:
00513 {
00514 int i;
00515 PutBitContext pb;
00516 init_put_bits(&pb, dst, buf_size*8);
00517
00518 n = avctx->frame_size-1;
00519
00520
00521 put_bits(&pb, 2, 2);
00522
00523
00524 for(i=0; i<avctx->channels; i++){
00525 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
00526 put_bits(&pb, 16, samples[i] & 0xFFFF);
00527 put_bits(&pb, 6, c->status[i].step_index);
00528 c->status[i].prev_sample = (signed short)samples[i];
00529 }
00530
00531 if(avctx->trellis > 0) {
00532 uint8_t buf[2][n];
00533 adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n);
00534 if (avctx->channels == 2)
00535 adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n);
00536 for(i=0; i<n; i++) {
00537 put_bits(&pb, 4, buf[0][i]);
00538 if (avctx->channels == 2)
00539 put_bits(&pb, 4, buf[1][i]);
00540 }
00541 } else {
00542 for (i=1; i<avctx->frame_size; i++) {
00543 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
00544 if (avctx->channels == 2)
00545 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
00546 }
00547 }
00548 flush_put_bits(&pb);
00549 dst += put_bits_count(&pb)>>3;
00550 break;
00551 }
00552 case CODEC_ID_ADPCM_MS:
00553 for(i=0; i<avctx->channels; i++){
00554 int predictor=0;
00555
00556 *dst++ = predictor;
00557 c->status[i].coeff1 = AdaptCoeff1[predictor];
00558 c->status[i].coeff2 = AdaptCoeff2[predictor];
00559 }
00560 for(i=0; i<avctx->channels; i++){
00561 if (c->status[i].idelta < 16)
00562 c->status[i].idelta = 16;
00563
00564 bytestream_put_le16(&dst, c->status[i].idelta);
00565 }
00566 for(i=0; i<avctx->channels; i++){
00567 c->status[i].sample1= *samples++;
00568
00569 bytestream_put_le16(&dst, c->status[i].sample1);
00570 }
00571 for(i=0; i<avctx->channels; i++){
00572 c->status[i].sample2= *samples++;
00573
00574 bytestream_put_le16(&dst, c->status[i].sample2);
00575 }
00576
00577 if(avctx->trellis > 0) {
00578 int n = avctx->block_align - 7*avctx->channels;
00579 uint8_t buf[2][n];
00580 if(avctx->channels == 1) {
00581 n *= 2;
00582 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00583 for(i=0; i<n; i+=2)
00584 *dst++ = (buf[0][i] << 4) | buf[0][i+1];
00585 } else {
00586 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00587 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00588 for(i=0; i<n; i++)
00589 *dst++ = (buf[0][i] << 4) | buf[1][i];
00590 }
00591 } else
00592 for(i=7*avctx->channels; i<avctx->block_align; i++) {
00593 int nibble;
00594 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
00595 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
00596 *dst++ = nibble;
00597 }
00598 break;
00599 case CODEC_ID_ADPCM_YAMAHA:
00600 n = avctx->frame_size / 2;
00601 if(avctx->trellis > 0) {
00602 uint8_t buf[2][n*2];
00603 n *= 2;
00604 if(avctx->channels == 1) {
00605 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00606 for(i=0; i<n; i+=2)
00607 *dst++ = buf[0][i] | (buf[0][i+1] << 4);
00608 } else {
00609 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00610 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00611 for(i=0; i<n; i++)
00612 *dst++ = buf[0][i] | (buf[1][i] << 4);
00613 }
00614 } else
00615 for (; n>0; n--) {
00616 for(i = 0; i < avctx->channels; i++) {
00617 int nibble;
00618 nibble = adpcm_yamaha_compress_sample(&c->status[i], samples[i]);
00619 nibble |= adpcm_yamaha_compress_sample(&c->status[i], samples[i+avctx->channels]) << 4;
00620 *dst++ = nibble;
00621 }
00622 samples += 2 * avctx->channels;
00623 }
00624 break;
00625 default:
00626 return -1;
00627 }
00628 return dst - frame;
00629 }
00630 #endif //CONFIG_ENCODERS
00631
00632 static int adpcm_decode_init(AVCodecContext * avctx)
00633 {
00634 ADPCMContext *c = avctx->priv_data;
00635 unsigned int max_channels = 2;
00636
00637 switch(avctx->codec->id) {
00638 case CODEC_ID_ADPCM_EA_R1:
00639 case CODEC_ID_ADPCM_EA_R2:
00640 case CODEC_ID_ADPCM_EA_R3:
00641 max_channels = 6;
00642 break;
00643 }
00644 if(avctx->channels > max_channels){
00645 return -1;
00646 }
00647
00648 switch(avctx->codec->id) {
00649 case CODEC_ID_ADPCM_CT:
00650 c->status[0].step = c->status[1].step = 511;
00651 break;
00652 case CODEC_ID_ADPCM_IMA_WS:
00653 if (avctx->extradata && avctx->extradata_size == 2 * 4) {
00654 c->status[0].predictor = AV_RL32(avctx->extradata);
00655 c->status[1].predictor = AV_RL32(avctx->extradata + 4);
00656 }
00657 break;
00658 default:
00659 break;
00660 }
00661 return 0;
00662 }
00663
00664 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
00665 {
00666 int step_index;
00667 int predictor;
00668 int sign, delta, diff, step;
00669
00670 step = step_table[c->step_index];
00671 step_index = c->step_index + index_table[(unsigned)nibble];
00672 if (step_index < 0) step_index = 0;
00673 else if (step_index > 88) step_index = 88;
00674
00675 sign = nibble & 8;
00676 delta = nibble & 7;
00677
00678
00679
00680 diff = ((2 * delta + 1) * step) >> shift;
00681 predictor = c->predictor;
00682 if (sign) predictor -= diff;
00683 else predictor += diff;
00684
00685 c->predictor = av_clip_int16(predictor);
00686 c->step_index = step_index;
00687
00688 return (short)c->predictor;
00689 }
00690
00691 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
00692 {
00693 int predictor;
00694
00695 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
00696 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00697
00698 c->sample2 = c->sample1;
00699 c->sample1 = av_clip_int16(predictor);
00700 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00701 if (c->idelta < 16) c->idelta = 16;
00702
00703 return c->sample1;
00704 }
00705
00706 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
00707 {
00708 int sign, delta, diff;
00709 int new_step;
00710
00711 sign = nibble & 8;
00712 delta = nibble & 7;
00713
00714
00715
00716 diff = ((2 * delta + 1) * c->step) >> 3;
00717
00718 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
00719 c->predictor = av_clip_int16(c->predictor);
00720
00721 new_step = (ct_adpcm_table[nibble & 7] * c->step) >> 8;
00722 c->step = av_clip(new_step, 511, 32767);
00723
00724 return (short)c->predictor;
00725 }
00726
00727 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
00728 {
00729 int sign, delta, diff;
00730
00731 sign = nibble & (1<<(size-1));
00732 delta = nibble & ((1<<(size-1))-1);
00733 diff = delta << (7 + c->step + shift);
00734
00735
00736 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
00737
00738
00739 if (delta >= (2*size - 3) && c->step < 3)
00740 c->step++;
00741 else if (delta == 0 && c->step > 0)
00742 c->step--;
00743
00744 return (short) c->predictor;
00745 }
00746
00747 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
00748 {
00749 if(!c->step) {
00750 c->predictor = 0;
00751 c->step = 127;
00752 }
00753
00754 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
00755 c->predictor = av_clip_int16(c->predictor);
00756 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00757 c->step = av_clip(c->step, 127, 24567);
00758 return c->predictor;
00759 }
00760
00761 static void xa_decode(short *out, const unsigned char *in,
00762 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
00763 {
00764 int i, j;
00765 int shift,filter,f0,f1;
00766 int s_1,s_2;
00767 int d,s,t;
00768
00769 for(i=0;i<4;i++) {
00770
00771 shift = 12 - (in[4+i*2] & 15);
00772 filter = in[4+i*2] >> 4;
00773 f0 = xa_adpcm_table[filter][0];
00774 f1 = xa_adpcm_table[filter][1];
00775
00776 s_1 = left->sample1;
00777 s_2 = left->sample2;
00778
00779 for(j=0;j<28;j++) {
00780 d = in[16+i+j*4];
00781
00782 t = (signed char)(d<<4)>>4;
00783 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00784 s_2 = s_1;
00785 s_1 = av_clip_int16(s);
00786 *out = s_1;
00787 out += inc;
00788 }
00789
00790 if (inc==2) {
00791 left->sample1 = s_1;
00792 left->sample2 = s_2;
00793 s_1 = right->sample1;
00794 s_2 = right->sample2;
00795 out = out + 1 - 28*2;
00796 }
00797
00798 shift = 12 - (in[5+i*2] & 15);
00799 filter = in[5+i*2] >> 4;
00800
00801 f0 = xa_adpcm_table[filter][0];
00802 f1 = xa_adpcm_table[filter][1];
00803
00804 for(j=0;j<28;j++) {
00805 d = in[16+i+j*4];
00806
00807 t = (signed char)d >> 4;
00808 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00809 s_2 = s_1;
00810 s_1 = av_clip_int16(s);
00811 *out = s_1;
00812 out += inc;
00813 }
00814
00815 if (inc==2) {
00816 right->sample1 = s_1;
00817 right->sample2 = s_2;
00818 out -= 1;
00819 } else {
00820 left->sample1 = s_1;
00821 left->sample2 = s_2;
00822 }
00823 }
00824 }
00825
00826
00827
00828 #define DK3_GET_NEXT_NIBBLE() \
00829 if (decode_top_nibble_next) \
00830 { \
00831 nibble = (last_byte >> 4) & 0x0F; \
00832 decode_top_nibble_next = 0; \
00833 } \
00834 else \
00835 { \
00836 last_byte = *src++; \
00837 if (src >= buf + buf_size) break; \
00838 nibble = last_byte & 0x0F; \
00839 decode_top_nibble_next = 1; \
00840 }
00841
00842 static int adpcm_decode_frame(AVCodecContext *avctx,
00843 void *data, int *data_size,
00844 const uint8_t *buf, int buf_size)
00845 {
00846 ADPCMContext *c = avctx->priv_data;
00847 ADPCMChannelStatus *cs;
00848 int n, m, channel, i;
00849 int block_predictor[2];
00850 short *samples;
00851 short *samples_end;
00852 const uint8_t *src;
00853 int st;
00854
00855
00856 unsigned char last_byte = 0;
00857 unsigned char nibble;
00858 int decode_top_nibble_next = 0;
00859 int diff_channel;
00860
00861
00862 uint32_t samples_in_chunk;
00863 int32_t previous_left_sample, previous_right_sample;
00864 int32_t current_left_sample, current_right_sample;
00865 int32_t next_left_sample, next_right_sample;
00866 int32_t coeff1l, coeff2l, coeff1r, coeff2r;
00867 uint8_t shift_left, shift_right;
00868 int count1, count2;
00869
00870 if (!buf_size)
00871 return 0;
00872
00873
00874
00875
00876 if(*data_size/4 < buf_size + 8)
00877 return -1;
00878
00879 samples = data;
00880 samples_end= samples + *data_size/2;
00881 *data_size= 0;
00882 src = buf;
00883
00884 st = avctx->channels == 2 ? 1 : 0;
00885
00886 switch(avctx->codec->id) {
00887 case CODEC_ID_ADPCM_IMA_QT:
00888 n = (buf_size - 2);
00889 channel = c->channel;
00890 cs = &(c->status[channel]);
00891
00892
00893
00894 cs->predictor = (*src++) << 8;
00895 cs->predictor |= (*src & 0x80);
00896 cs->predictor &= 0xFF80;
00897
00898
00899 if(cs->predictor & 0x8000)
00900 cs->predictor -= 0x10000;
00901
00902 cs->predictor = av_clip_int16(cs->predictor);
00903
00904 cs->step_index = (*src++) & 0x7F;
00905
00906 if (cs->step_index > 88){
00907 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00908 cs->step_index = 88;
00909 }
00910
00911 cs->step = step_table[cs->step_index];
00912
00913 if (st && channel)
00914 samples++;
00915
00916 for(m=32; n>0 && m>0; n--, m--) {
00917 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
00918 samples += avctx->channels;
00919 *samples = adpcm_ima_expand_nibble(cs, (src[0] >> 4) & 0x0F, 3);
00920 samples += avctx->channels;
00921 src ++;
00922 }
00923
00924 if(st) {
00925 c->channel = (channel + 1) % 2;
00926 if(channel == 1) {
00927 return src - buf;
00928 }
00929 }
00930 break;
00931 case CODEC_ID_ADPCM_IMA_WAV:
00932 if (avctx->block_align != 0 && buf_size > avctx->block_align)
00933 buf_size = avctx->block_align;
00934
00935
00936
00937 for(i=0; i<avctx->channels; i++){
00938 cs = &(c->status[i]);
00939 cs->predictor = *samples++ = (int16_t)(src[0] + (src[1]<<8));
00940 src+=2;
00941
00942 cs->step_index = *src++;
00943 if (cs->step_index > 88){
00944 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00945 cs->step_index = 88;
00946 }
00947 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]);
00948 }
00949
00950 while(src < buf + buf_size){
00951 for(m=0; m<4; m++){
00952 for(i=0; i<=st; i++)
00953 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
00954 for(i=0; i<=st; i++)
00955 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
00956 src++;
00957 }
00958 src += 4*st;
00959 }
00960 break;
00961 case CODEC_ID_ADPCM_4XM:
00962 cs = &(c->status[0]);
00963 c->status[0].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2;
00964 if(st){
00965 c->status[1].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2;
00966 }
00967 c->status[0].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2;
00968 if(st){
00969 c->status[1].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2;
00970 }
00971 if (cs->step_index < 0) cs->step_index = 0;
00972 if (cs->step_index > 88) cs->step_index = 88;
00973
00974 m= (buf_size - (src - buf))>>st;
00975 for(i=0; i<m; i++) {
00976 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
00977 if (st)
00978 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
00979 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
00980 if (st)
00981 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
00982 }
00983
00984 src += m<<st;
00985
00986 break;
00987 case CODEC_ID_ADPCM_MS:
00988 if (avctx->block_align != 0 && buf_size > avctx->block_align)
00989 buf_size = avctx->block_align;
00990 n = buf_size - 7 * avctx->channels;
00991 if (n < 0)
00992 return -1;
00993 block_predictor[0] = av_clip(*src++, 0, 7);
00994 block_predictor[1] = 0;
00995 if (st)
00996 block_predictor[1] = av_clip(*src++, 0, 7);
00997 c->status[0].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
00998 src+=2;
00999 if (st){
01000 c->status[1].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
01001 src+=2;
01002 }
01003 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
01004 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
01005 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
01006 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
01007
01008 c->status[0].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
01009 src+=2;
01010 if (st) c->status[1].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
01011 if (st) src+=2;
01012 c->status[0].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
01013 src+=2;
01014 if (st) c->status[1].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
01015 if (st) src+=2;
01016
01017 *samples++ = c->status[0].sample1;
01018 if (st) *samples++ = c->status[1].sample1;
01019 *samples++ = c->status[0].sample2;
01020 if (st) *samples++ = c->status[1].sample2;
01021 for(;n>0;n--) {
01022 *samples++ = adpcm_ms_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F);
01023 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
01024 src ++;
01025 }
01026 break;
01027 case CODEC_ID_ADPCM_IMA_DK4:
01028 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01029 buf_size = avctx->block_align;
01030
01031 c->status[0].predictor = (int16_t)(src[0] | (src[1] << 8));
01032 c->status[0].step_index = src[2];
01033 src += 4;
01034 *samples++ = c->status[0].predictor;
01035 if (st) {
01036 c->status[1].predictor = (int16_t)(src[0] | (src[1] << 8));
01037 c->status[1].step_index = src[2];
01038 src += 4;
01039 *samples++ = c->status[1].predictor;
01040 }
01041 while (src < buf + buf_size) {
01042
01043
01044 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01045 (src[0] >> 4) & 0x0F, 3);
01046
01047
01048
01049 if (st)
01050 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01051 src[0] & 0x0F, 3);
01052 else
01053 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01054 src[0] & 0x0F, 3);
01055
01056 src++;
01057 }
01058 break;
01059 case CODEC_ID_ADPCM_IMA_DK3:
01060 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01061 buf_size = avctx->block_align;
01062
01063 if(buf_size + 16 > (samples_end - samples)*3/8)
01064 return -1;
01065
01066 c->status[0].predictor = (int16_t)(src[10] | (src[11] << 8));
01067 c->status[1].predictor = (int16_t)(src[12] | (src[13] << 8));
01068 c->status[0].step_index = src[14];
01069 c->status[1].step_index = src[15];
01070
01071 src += 16;
01072 diff_channel = c->status[1].predictor;
01073
01074
01075
01076 while (1) {
01077
01078
01079
01080
01081
01082 DK3_GET_NEXT_NIBBLE();
01083 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01084
01085
01086 DK3_GET_NEXT_NIBBLE();
01087 adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
01088
01089
01090 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01091 *samples++ = c->status[0].predictor + c->status[1].predictor;
01092 *samples++ = c->status[0].predictor - c->status[1].predictor;
01093
01094
01095 DK3_GET_NEXT_NIBBLE();
01096 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01097
01098
01099 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01100 *samples++ = c->status[0].predictor + c->status[1].predictor;
01101 *samples++ = c->status[0].predictor - c->status[1].predictor;
01102 }
01103 break;
01104 case CODEC_ID_ADPCM_IMA_WS:
01105
01106 while (src < buf + buf_size) {
01107
01108 if (st) {
01109 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01110 (src[0] >> 4) & 0x0F, 3);
01111 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01112 src[0] & 0x0F, 3);
01113 } else {
01114 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01115 (src[0] >> 4) & 0x0F, 3);
01116 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01117 src[0] & 0x0F, 3);
01118 }
01119
01120 src++;
01121 }
01122 break;
01123 case CODEC_ID_ADPCM_XA:
01124 while (buf_size >= 128) {
01125 xa_decode(samples, src, &c->status[0], &c->status[1],
01126 avctx->channels);
01127 src += 128;
01128 samples += 28 * 8;
01129 buf_size -= 128;
01130 }
01131 break;
01132 case CODEC_ID_ADPCM_IMA_EA_EACS:
01133 samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
01134
01135 if (samples_in_chunk > buf_size-4-(8<<st)) {
01136 src += buf_size - 4;
01137 break;
01138 }
01139
01140 for (i=0; i<=st; i++)
01141 c->status[i].step_index = bytestream_get_le32(&src);
01142 for (i=0; i<=st; i++)
01143 c->status[i].predictor = bytestream_get_le32(&src);
01144
01145 for (; samples_in_chunk; samples_in_chunk--, src++) {
01146 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
01147 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
01148 }
01149 break;
01150 case CODEC_ID_ADPCM_IMA_EA_SEAD:
01151 for (; src < buf+buf_size; src++) {
01152 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
01153 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
01154 }
01155 break;
01156 case CODEC_ID_ADPCM_EA:
01157 samples_in_chunk = AV_RL32(src);
01158 if (samples_in_chunk >= ((buf_size - 12) * 2)) {
01159 src += buf_size;
01160 break;
01161 }
01162 src += 4;
01163 current_left_sample = (int16_t)AV_RL16(src);
01164 src += 2;
01165 previous_left_sample = (int16_t)AV_RL16(src);
01166 src += 2;
01167 current_right_sample = (int16_t)AV_RL16(src);
01168 src += 2;
01169 previous_right_sample = (int16_t)AV_RL16(src);
01170 src += 2;
01171
01172 for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
01173 coeff1l = ea_adpcm_table[(*src >> 4) & 0x0F];
01174 coeff2l = ea_adpcm_table[((*src >> 4) & 0x0F) + 4];
01175 coeff1r = ea_adpcm_table[*src & 0x0F];
01176 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
01177 src++;
01178
01179 shift_left = ((*src >> 4) & 0x0F) + 8;
01180 shift_right = (*src & 0x0F) + 8;
01181 src++;
01182
01183 for (count2 = 0; count2 < 28; count2++) {
01184 next_left_sample = (((*src & 0xF0) << 24) >> shift_left);
01185 next_right_sample = (((*src & 0x0F) << 28) >> shift_right);
01186 src++;
01187
01188 next_left_sample = (next_left_sample +
01189 (current_left_sample * coeff1l) +
01190 (previous_left_sample * coeff2l) + 0x80) >> 8;
01191 next_right_sample = (next_right_sample +
01192 (current_right_sample * coeff1r) +
01193 (previous_right_sample * coeff2r) + 0x80) >> 8;
01194
01195 previous_left_sample = current_left_sample;
01196 current_left_sample = av_clip_int16(next_left_sample);
01197 previous_right_sample = current_right_sample;
01198 current_right_sample = av_clip_int16(next_right_sample);
01199 *samples++ = (unsigned short)current_left_sample;
01200 *samples++ = (unsigned short)current_right_sample;
01201 }
01202 }
01203 break;
01204 case CODEC_ID_ADPCM_EA_R1:
01205 case CODEC_ID_ADPCM_EA_R2:
01206 case CODEC_ID_ADPCM_EA_R3: {
01207
01208
01209
01210
01211 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
01212 int32_t previous_sample, current_sample, next_sample;
01213 int32_t coeff1, coeff2;
01214 uint8_t shift;
01215 unsigned int channel;
01216 uint16_t *samplesC;
01217 const uint8_t *srcC;
01218
01219 samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
01220 : bytestream_get_le32(&src)) / 28;
01221 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
01222 28*samples_in_chunk*avctx->channels > samples_end-samples) {
01223 src += buf_size - 4;
01224 break;
01225 }
01226
01227 for (channel=0; channel<avctx->channels; channel++) {
01228 srcC = src + (big_endian ? bytestream_get_be32(&src)
01229 : bytestream_get_le32(&src))
01230 + (avctx->channels-channel-1) * 4;
01231 samplesC = samples + channel;
01232
01233 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
01234 current_sample = (int16_t)bytestream_get_le16(&srcC);
01235 previous_sample = (int16_t)bytestream_get_le16(&srcC);
01236 } else {
01237 current_sample = c->status[channel].predictor;
01238 previous_sample = c->status[channel].prev_sample;
01239 }
01240
01241 for (count1=0; count1<samples_in_chunk; count1++) {
01242 if (*srcC == 0xEE) {
01243 srcC++;
01244 current_sample = (int16_t)bytestream_get_be16(&srcC);
01245 previous_sample = (int16_t)bytestream_get_be16(&srcC);
01246
01247 for (count2=0; count2<28; count2++) {
01248 *samplesC = (int16_t)bytestream_get_be16(&srcC);
01249 samplesC += avctx->channels;
01250 }
01251 } else {
01252 coeff1 = ea_adpcm_table[ (*srcC>>4) & 0x0F ];
01253 coeff2 = ea_adpcm_table[((*srcC>>4) & 0x0F) + 4];
01254 shift = (*srcC++ & 0x0F) + 8;
01255
01256 for (count2=0; count2<28; count2++) {
01257 if (count2 & 1)
01258 next_sample = ((*srcC++ & 0x0F) << 28) >> shift;
01259 else
01260 next_sample = ((*srcC & 0xF0) << 24) >> shift;
01261
01262 next_sample += (current_sample * coeff1) +
01263 (previous_sample * coeff2);
01264 next_sample = av_clip_int16(next_sample >> 8);
01265
01266 previous_sample = current_sample;
01267 current_sample = next_sample;
01268 *samplesC = current_sample;
01269 samplesC += avctx->channels;
01270 }
01271 }
01272 }
01273
01274 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
01275 c->status[channel].predictor = current_sample;
01276 c->status[channel].prev_sample = previous_sample;
01277 }
01278 }
01279
01280 src = src + buf_size - (4 + 4*avctx->channels);
01281 samples += 28 * samples_in_chunk * avctx->channels;
01282 break;
01283 }
01284 case CODEC_ID_ADPCM_EA_XAS:
01285 if (samples_end-samples < 32*4*avctx->channels
01286 || buf_size < (4+15)*4*avctx->channels) {
01287 src += buf_size;
01288 break;
01289 }
01290 for (channel=0; channel<avctx->channels; channel++) {
01291 int coeff[2][4], shift[4];
01292 short *s2, *s = &samples[channel];
01293 for (n=0; n<4; n++, s+=32*avctx->channels) {
01294 for (i=0; i<2; i++)
01295 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
01296 shift[n] = (src[2]&0x0F) + 8;
01297 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
01298 s2[0] = (src[0]&0xF0) + (src[1]<<8);
01299 }
01300
01301 for (m=2; m<32; m+=2) {
01302 s = &samples[m*avctx->channels + channel];
01303 for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
01304 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
01305 int level = ((*src & (0xF0>>i)) << (24+i)) >> shift[n];
01306 int pred = s2[-1*avctx->channels] * coeff[0][n]
01307 + s2[-2*avctx->channels] * coeff[1][n];
01308 s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
01309 }
01310 }
01311 }
01312 }
01313 samples += 32*4*avctx->channels;
01314 break;
01315 case CODEC_ID_ADPCM_IMA_AMV:
01316 case CODEC_ID_ADPCM_IMA_SMJPEG:
01317 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01318 c->status[0].step_index = bytestream_get_le16(&src);
01319
01320 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01321 src+=4;
01322
01323 while (src < buf + buf_size) {
01324 char hi, lo;
01325 lo = *src & 0x0F;
01326 hi = (*src >> 4) & 0x0F;
01327
01328 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01329 FFSWAP(char, hi, lo);
01330
01331 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01332 lo, 3);
01333 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01334 hi, 3);
01335 src++;
01336 }
01337 break;
01338 case CODEC_ID_ADPCM_CT:
01339 while (src < buf + buf_size) {
01340 if (st) {
01341 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01342 (src[0] >> 4) & 0x0F);
01343 *samples++ = adpcm_ct_expand_nibble(&c->status[1],
01344 src[0] & 0x0F);
01345 } else {
01346 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01347 (src[0] >> 4) & 0x0F);
01348 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01349 src[0] & 0x0F);
01350 }
01351 src++;
01352 }
01353 break;
01354 case CODEC_ID_ADPCM_SBPRO_4:
01355 case CODEC_ID_ADPCM_SBPRO_3:
01356 case CODEC_ID_ADPCM_SBPRO_2:
01357 if (!c->status[0].step_index) {
01358
01359 *samples++ = 128 * (*src++ - 0x80);
01360 if (st)
01361 *samples++ = 128 * (*src++ - 0x80);
01362 c->status[0].step_index = 1;
01363 }
01364 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
01365 while (src < buf + buf_size) {
01366 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01367 (src[0] >> 4) & 0x0F, 4, 0);
01368 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01369 src[0] & 0x0F, 4, 0);
01370 src++;
01371 }
01372 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
01373 while (src < buf + buf_size && samples + 2 < samples_end) {
01374 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01375 (src[0] >> 5) & 0x07, 3, 0);
01376 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01377 (src[0] >> 2) & 0x07, 3, 0);
01378 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01379 src[0] & 0x03, 2, 0);
01380 src++;
01381 }
01382 } else {
01383 while (src < buf + buf_size && samples + 3 < samples_end) {
01384 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01385 (src[0] >> 6) & 0x03, 2, 2);
01386 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01387 (src[0] >> 4) & 0x03, 2, 2);
01388 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01389 (src[0] >> 2) & 0x03, 2, 2);
01390 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01391 src[0] & 0x03, 2, 2);
01392 src++;
01393 }
01394 }
01395 break;
01396 case CODEC_ID_ADPCM_SWF:
01397 {
01398 GetBitContext gb;
01399 const int *table;
01400 int k0, signmask, nb_bits, count;
01401 int size = buf_size*8;
01402
01403 init_get_bits(&gb, buf, size);
01404
01405
01406 nb_bits = get_bits(&gb, 2)+2;
01407
01408 table = swf_index_tables[nb_bits-2];
01409 k0 = 1 << (nb_bits-2);
01410 signmask = 1 << (nb_bits-1);
01411
01412 while (get_bits_count(&gb) <= size - 22*avctx->channels) {
01413 for (i = 0; i < avctx->channels; i++) {
01414 *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
01415 c->status[i].step_index = get_bits(&gb, 6);
01416 }
01417
01418 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
01419 int i;
01420
01421 for (i = 0; i < avctx->channels; i++) {
01422
01423 int delta = get_bits(&gb, nb_bits);
01424 int step = step_table[c->status[i].step_index];
01425 long vpdiff = 0;
01426 int k = k0;
01427
01428 do {
01429 if (delta & k)
01430 vpdiff += step;
01431 step >>= 1;
01432 k >>= 1;
01433 } while(k);
01434 vpdiff += step;
01435
01436 if (delta & signmask)
01437 c->status[i].predictor -= vpdiff;
01438 else
01439 c->status[i].predictor += vpdiff;
01440
01441 c->status[i].step_index += table[delta & (~signmask)];
01442
01443 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
01444 c->status[i].predictor = av_clip_int16(c->status[i].predictor);
01445
01446 *samples++ = c->status[i].predictor;
01447 if (samples >= samples_end) {
01448 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01449 return -1;
01450 }
01451 }
01452 }
01453 }
01454 src += buf_size;
01455 break;
01456 }
01457 case CODEC_ID_ADPCM_YAMAHA:
01458 while (src < buf + buf_size) {
01459 if (st) {
01460 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01461 src[0] & 0x0F);
01462 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
01463 (src[0] >> 4) & 0x0F);
01464 } else {
01465 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01466 src[0] & 0x0F);
01467 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01468 (src[0] >> 4) & 0x0F);
01469 }
01470 src++;
01471 }
01472 break;
01473 case CODEC_ID_ADPCM_THP:
01474 {
01475 int table[2][16];
01476 unsigned int samplecnt;
01477 int prev[2][2];
01478 int ch;
01479
01480 if (buf_size < 80) {
01481 av_log(avctx, AV_LOG_ERROR, "frame too small\n");
01482 return -1;
01483 }
01484
01485 src+=4;
01486 samplecnt = bytestream_get_be32(&src);
01487
01488 for (i = 0; i < 32; i++)
01489 table[0][i] = (int16_t)bytestream_get_be16(&src);
01490
01491
01492 for (i = 0; i < 4; i++)
01493 prev[0][i] = (int16_t)bytestream_get_be16(&src);
01494
01495 if (samplecnt >= (samples_end - samples) / (st + 1)) {
01496 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01497 return -1;
01498 }
01499
01500 for (ch = 0; ch <= st; ch++) {
01501 samples = (unsigned short *) data + ch;
01502
01503
01504 for (i = 0; i < samplecnt / 14; i++) {
01505 int index = (*src >> 4) & 7;
01506 unsigned int exp = 28 - (*src++ & 15);
01507 int factor1 = table[ch][index * 2];
01508 int factor2 = table[ch][index * 2 + 1];
01509
01510
01511 for (n = 0; n < 14; n++) {
01512 int32_t sampledat;
01513 if(n&1) sampledat= *src++ <<28;
01514 else sampledat= (*src&0xF0)<<24;
01515
01516 sampledat = ((prev[ch][0]*factor1
01517 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
01518 *samples = av_clip_int16(sampledat);
01519 prev[ch][1] = prev[ch][0];
01520 prev[ch][0] = *samples++;
01521
01522
01523
01524 samples += st;
01525 }
01526 }
01527 }
01528
01529
01530
01531 samples -= st;
01532 break;
01533 }
01534
01535 default:
01536 return -1;
01537 }
01538 *data_size = (uint8_t *)samples - (uint8_t *)data;
01539 return src - buf;
01540 }
01541
01542
01543
01544 #ifdef CONFIG_ENCODERS
01545 #define ADPCM_ENCODER(id,name) \
01546 AVCodec name ## _encoder = { \
01547 #name, \
01548 CODEC_TYPE_AUDIO, \
01549 id, \
01550 sizeof(ADPCMContext), \
01551 adpcm_encode_init, \
01552 adpcm_encode_frame, \
01553 adpcm_encode_close, \
01554 NULL, \
01555 };
01556 #else
01557 #define ADPCM_ENCODER(id,name)
01558 #endif
01559
01560 #ifdef CONFIG_DECODERS
01561 #define ADPCM_DECODER(id,name) \
01562 AVCodec name ## _decoder = { \
01563 #name, \
01564 CODEC_TYPE_AUDIO, \
01565 id, \
01566 sizeof(ADPCMContext), \
01567 adpcm_decode_init, \
01568 NULL, \
01569 NULL, \
01570 adpcm_decode_frame, \
01571 };
01572 #else
01573 #define ADPCM_DECODER(id,name)
01574 #endif
01575
01576 #define ADPCM_CODEC(id, name) \
01577 ADPCM_ENCODER(id,name) ADPCM_DECODER(id,name)
01578
01579 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm);
01580 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct);
01581 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea);
01582 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1);
01583 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2);
01584 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3);
01585 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas);
01586 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv);
01587 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3);
01588 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4);
01589 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs);
01590 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead);
01591 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt);
01592 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg);
01593 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav);
01594 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws);
01595 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms);
01596 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4);
01597 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3);
01598 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2);
01599 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf);
01600 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp);
01601 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa);
01602 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha);