56 uint8_t *dest_cr,
int mb_x,
int mb_y)
59 int dc, dcu, dcv,
y, i;
60 for (i = 0; i < 4; i++) {
61 dc = s->
dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->
b8_stride];
66 for (y = 0; y < 8; y++) {
68 for (x = 0; x < 8; x++)
69 dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
82 for (y = 0; y < 8; y++) {
84 for (x = 0; x < 8; x++) {
85 dest_cb[x + y * linesize[1]] = dcu / 8;
86 dest_cr[x + y * linesize[2]] = dcv / 8;
96 for (y = 1; y < height - 1; y++) {
97 int prev_dc = data[0 + y *
stride];
99 for (x = 1; x < width - 1; x++) {
102 data[x + y *
stride] * 8 -
104 dc = (dc * 10923 + 32768) >> 16;
105 prev_dc = data[x + y *
stride];
111 for (x = 1; x < width - 1; x++) {
112 int prev_dc = data[x];
114 for (y = 1; y < height - 1; y++) {
118 data[x + y *
stride] * 8 -
119 data[x + (y + 1) * stride];
120 dc = (dc * 10923 + 32768) >> 16;
121 prev_dc = data[x + y *
stride];
133 int h,
int stride,
int is_luma)
136 int16_t (*col )[4] =
av_malloc(stride*h*
sizeof( int16_t)*4);
137 uint32_t (*dist)[4] =
av_malloc(stride*h*
sizeof(uint32_t)*4);
144 for(b_y=0; b_y<h; b_y++){
147 for(b_x=0; b_x<w; b_x++){
148 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
152 color= dc[b_x + b_y*
stride];
156 dist[b_x + b_y*
stride][1]= distance >= 0 ? b_x-distance : 9999;
160 for(b_x=w-1; b_x>=0; b_x--){
161 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
165 color= dc[b_x + b_y*
stride];
169 dist[b_x + b_y*
stride][0]= distance >= 0 ? distance-b_x : 9999;
172 for(b_x=0; b_x<w; b_x++){
175 for(b_y=0; b_y<h; b_y++){
176 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
180 color= dc[b_x + b_y*
stride];
184 dist[b_x + b_y*
stride][3]= distance >= 0 ? b_y-distance : 9999;
188 for(b_y=h-1; b_y>=0; b_y--){
189 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
193 color= dc[b_x + b_y*
stride];
197 dist[b_x + b_y*
stride][2]= distance >= 0 ? distance-b_y : 9999;
201 for (b_y = 0; b_y < h; b_y++) {
202 for (b_x = 0; b_x < w; b_x++) {
203 int mb_index, error, j;
204 int64_t guess, weight_sum;
205 mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->
mb_stride;
215 for (j = 0; j < 4; j++) {
216 int64_t
weight = 256 * 256 * 256 * 16 /
FFMAX(dist[b_x + b_y*stride][j], 1);
217 guess += weight*(int64_t)col[b_x + b_y*stride][j];
220 guess = (guess + weight_sum / 2) / weight_sum;
221 dc[b_x + b_y *
stride] = guess;
236 int h,
int stride,
int is_luma)
238 int b_x, b_y, mvx_stride, mvy_stride;
241 mvx_stride >>= is_luma;
242 mvy_stride *= mvx_stride;
244 for (b_y = 0; b_y < h; b_y++) {
245 for (b_x = 0; b_x < w - 1; b_x++) {
253 int offset = b_x * 8 + b_y * stride * 8;
254 int16_t *left_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
255 int16_t *right_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
256 if (!(left_damage || right_damage))
258 if ((!left_intra) && (!right_intra) &&
259 FFABS(left_mv[0] - right_mv[0]) +
260 FFABS(left_mv[1] + right_mv[1]) < 2)
263 for (y = 0; y < 8; y++) {
266 a = dst[offset + 7 + y *
stride] - dst[offset + 6 + y *
stride];
267 b = dst[offset + 8 + y *
stride] - dst[offset + 7 + y *
stride];
268 c = dst[offset + 9 + y *
stride] - dst[offset + 8 + y *
stride];
278 if (!(left_damage && right_damage))
282 dst[offset + 7 + y *
stride] = cm[dst[offset + 7 + y *
stride] + ((d * 7) >> 4)];
283 dst[offset + 6 + y *
stride] = cm[dst[offset + 6 + y *
stride] + ((d * 5) >> 4)];
284 dst[offset + 5 + y *
stride] = cm[dst[offset + 5 + y *
stride] + ((d * 3) >> 4)];
285 dst[offset + 4 + y *
stride] = cm[dst[offset + 4 + y *
stride] + ((d * 1) >> 4)];
288 dst[offset + 8 + y *
stride] = cm[dst[offset + 8 + y *
stride] - ((d * 7) >> 4)];
289 dst[offset + 9 + y *
stride] = cm[dst[offset + 9 + y *
stride] - ((d * 5) >> 4)];
290 dst[offset + 10+ y *
stride] = cm[dst[offset + 10 + y *
stride] - ((d * 3) >> 4)];
291 dst[offset + 11+ y *
stride] = cm[dst[offset + 11 + y *
stride] - ((d * 1) >> 4)];
306 int b_x, b_y, mvx_stride, mvy_stride;
309 mvx_stride >>= is_luma;
310 mvy_stride *= mvx_stride;
312 for (b_y = 0; b_y < h - 1; b_y++) {
313 for (b_x = 0; b_x < w; b_x++) {
321 int offset = b_x * 8 + b_y * stride * 8;
323 int16_t *top_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
324 int16_t *bottom_mv = s->
cur_pic->
motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
326 if (!(top_damage || bottom_damage))
329 if ((!top_intra) && (!bottom_intra) &&
330 FFABS(top_mv[0] - bottom_mv[0]) +
331 FFABS(top_mv[1] + bottom_mv[1]) < 2)
334 for (x = 0; x < 8; x++) {
337 a = dst[offset + x + 7 *
stride] - dst[offset + x + 6 *
stride];
338 b = dst[offset + x + 8 *
stride] - dst[offset + x + 7 *
stride];
339 c = dst[offset + x + 9 *
stride] - dst[offset + x + 8 *
stride];
349 if (!(top_damage && bottom_damage))
353 dst[offset + x + 7 *
stride] = cm[dst[offset + x + 7 *
stride] + ((d * 7) >> 4)];
354 dst[offset + x + 6 *
stride] = cm[dst[offset + x + 6 *
stride] + ((d * 5) >> 4)];
355 dst[offset + x + 5 *
stride] = cm[dst[offset + x + 5 *
stride] + ((d * 3) >> 4)];
356 dst[offset + x + 4 *
stride] = cm[dst[offset + x + 4 *
stride] + ((d * 1) >> 4)];
359 dst[offset + x + 8 *
stride] = cm[dst[offset + x + 8 *
stride] - ((d * 7) >> 4)];
360 dst[offset + x + 9 *
stride] = cm[dst[offset + x + 9 *
stride] - ((d * 5) >> 4)];
361 dst[offset + x + 10 *
stride] = cm[dst[offset + x + 10 *
stride] - ((d * 3) >> 4)];
362 dst[offset + x + 11 *
stride] = cm[dst[offset + x + 11 *
stride] - ((d * 1) >> 4)];
374 #define MV_UNCHANGED 1
378 int i,
depth, num_avail;
379 int mb_x, mb_y, mot_step, mot_stride;
384 for (i = 0; i < s->
mb_num; i++) {
400 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
408 num_avail <= mb_width / 2) {
409 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
410 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
411 const int mb_xy = mb_x + mb_y * s->
mb_stride;
428 for (depth = 0; ; depth++) {
429 int changed,
pass, none_left;
433 for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
438 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
439 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
440 const int mb_xy = mb_x + mb_y * s->
mb_stride;
441 int mv_predictor[8][2] = { { 0 } };
445 int best_score = 256 * 256 * 256 * 64;
447 const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
448 int prev_x, prev_y, prev_ref;
450 if ((mb_x ^ mb_y ^ pass) & 1)
459 if (mb_x > 0 && fixed[mb_xy - 1] ==
MV_FROZEN)
461 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] ==
MV_FROZEN)
463 if (mb_y > 0 && fixed[mb_xy - mb_stride] ==
MV_FROZEN)
465 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] ==
MV_FROZEN)
471 if (mb_x > 0 && fixed[mb_xy - 1 ] ==
MV_CHANGED)
473 if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] ==
MV_CHANGED)
475 if (mb_y > 0 && fixed[mb_xy - mb_stride] ==
MV_CHANGED)
477 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] ==
MV_CHANGED)
479 if (j == 0 && pass > 1)
484 if (mb_x > 0 && fixed[mb_xy - 1]) {
485 mv_predictor[pred_count][0] =
487 mv_predictor[pred_count][1] =
493 if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
494 mv_predictor[pred_count][0] =
496 mv_predictor[pred_count][1] =
502 if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
503 mv_predictor[pred_count][0] =
505 mv_predictor[pred_count][1] =
511 if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
512 mv_predictor[pred_count][0] =
514 mv_predictor[pred_count][1] =
523 if (pred_count > 1) {
524 int sum_x = 0, sum_y = 0, sum_r = 0;
525 int max_x, max_y, min_x, min_y, max_r, min_r;
527 for (j = 0; j < pred_count; j++) {
528 sum_x += mv_predictor[j][0];
529 sum_y += mv_predictor[j][1];
531 if (j && ref[j] != ref[j - 1])
532 goto skip_mean_and_median;
536 mv_predictor[pred_count][0] = sum_x / j;
537 mv_predictor[pred_count][1] = sum_y / j;
538 ref[pred_count] = sum_r / j;
541 if (pred_count >= 3) {
542 min_y = min_x = min_r = 99999;
543 max_y = max_x = max_r = -99999;
545 min_x = min_y = max_x = max_y = min_r = max_r = 0;
547 for (j = 0; j < pred_count; j++) {
548 max_x =
FFMAX(max_x, mv_predictor[j][0]);
549 max_y =
FFMAX(max_y, mv_predictor[j][1]);
550 max_r =
FFMAX(max_r, ref[j]);
551 min_x =
FFMIN(min_x, mv_predictor[j][0]);
552 min_y =
FFMIN(min_y, mv_predictor[j][1]);
553 min_r =
FFMIN(min_r, ref[j]);
555 mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
556 mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
557 ref[pred_count + 1] = sum_r - max_r - min_r;
559 if (pred_count == 4) {
560 mv_predictor[pred_count + 1][0] /= 2;
561 mv_predictor[pred_count + 1][1] /= 2;
562 ref[pred_count + 1] /= 2;
567 skip_mean_and_median:
571 if (!fixed[mb_xy] && 0) {
591 mv_predictor[pred_count][0] = prev_x;
592 mv_predictor[pred_count][1] = prev_y;
593 ref[pred_count] = prev_ref;
598 for (j = 0; j < pred_count; j++) {
602 mb_x * 16 + mb_y * 16 * linesize[0];
605 s->
mv[0][0][0] = mv_predictor[j][0];
607 s->
mv[0][0][1] = mv_predictor[j][1];
616 if (mb_x > 0 && fixed[mb_xy - 1]) {
618 for (k = 0; k < 16; k++)
619 score +=
FFABS(src[k * linesize[0] - 1] -
620 src[k * linesize[0]]);
622 if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
624 for (k = 0; k < 16; k++)
625 score +=
FFABS(src[k * linesize[0] + 15] -
626 src[k * linesize[0] + 16]);
628 if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
630 for (k = 0; k < 16; k++)
631 score +=
FFABS(src[k - linesize[0]] - src[k]);
633 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
635 for (k = 0; k < 16; k++)
636 score +=
FFABS(src[k + linesize[0] * 15] -
637 src[k + linesize[0] * 16]);
640 if (score <= best_score) {
645 score_sum += best_score;
646 s->
mv[0][0][0] = mv_predictor[best_pred][0];
647 s->
mv[0][0][1] = mv_predictor[best_pred][1];
649 for (i = 0; i < mot_step; i++)
650 for (j = 0; j < mot_step; j++) {
659 if (s->
mv[0][0][0] != prev_x || s->
mv[0][0][1] != prev_y) {
671 for (i = 0; i < s->
mb_num; i++) {
681 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
687 for (i = 0; i < s->
mb_num; i++) {
697 if (undamaged_count < 5)
706 skip_amount =
FFMAX(undamaged_count / 50, 1);
710 for (mb_y = 0; mb_y < s->
mb_height - 1; mb_y++) {
711 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
713 const int mb_xy = mb_x + mb_y * s->
mb_stride;
721 if ((j % skip_amount) != 0)
727 mb_x * 16 + mb_y * 16 * linesize[0];
729 mb_x * 16 + mb_y * 16 * linesize[0];
736 is_intra_likely += s->
dsp->
sad[0](NULL, last_mb_ptr, mb_ptr,
739 is_intra_likely -= s->
dsp->
sad[0](NULL, last_mb_ptr,
740 last_mb_ptr + linesize[0] * 16,
751 return is_intra_likely > 0;
773 int endx,
int endy,
int status)
775 const int start_i = av_clip(startx + starty * s->
mb_width, 0, s->
mb_num - 1);
776 const int end_i = av_clip(endx + endy * s->
mb_width, 0, s->
mb_num);
784 if (start_i > end_i || start_xy > end_xy) {
786 "internal error, slice end before start\n");
814 (end_xy - start_xy) *
sizeof(
uint8_t));
817 for (i = start_xy; i < end_xy; i++)
845 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
847 int threshold_part[4] = { 100, 100, 100 };
863 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
898 for (i = 0; i < 2; i++) {
907 for (i = 0; i < 2; i++) {
918 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
919 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
930 for (error_type = 1; error_type <= 3; error_type++) {
933 for (i = s->
mb_num - 1; i >= 0; i--) {
937 if (error & (1 << error_type))
939 if (error & (8 << error_type))
955 for (i = s->
mb_num - 1; i >= 0; i--) {
1003 for (error_type = 1; error_type <= 3; error_type++) {
1004 for (i = s->
mb_num - 1; i >= 0; i--) {
1010 if (error & (1 << error_type))
1014 if (distance < threshold_part[error_type - 1])
1017 if (distance < threshold)
1029 for (i = 0; i < s->
mb_num; i++) {
1043 for (i = 0; i < s->
mb_num; i++) {
1053 dc_error = ac_error = mv_error = 0;
1054 for (i = 0; i < s->
mb_num; i++) {
1070 for (i = 0; i < s->
mb_num; i++) {
1076 if (is_intra_likely)
1085 for (i = 0; i < s->
mb_num; i++) {
1092 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1093 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1094 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1110 int mb_index = mb_x * 2 + mb_y * 2 * s->
b8_stride;
1113 for (j = 0; j < 4; j++) {
1124 mv_dir, mv_type, &s->
mv, mb_x, mb_y, 0, 0);
1130 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1131 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1132 int xy = mb_x * 2 + mb_y * 2 * s->
b8_stride;
1133 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1180 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1181 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1182 int dc, dcu, dcv,
y,
n;
1184 uint8_t *dest_y, *dest_cb, *dest_cr;
1185 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1195 dest_y = s->
cur_pic->
f.
data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1196 dest_cb = s->
cur_pic->
f.
data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1197 dest_cr = s->
cur_pic->
f.
data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1200 for (n = 0; n < 4; n++) {
1202 for (y = 0; y < 8; y++) {
1204 for (x = 0; x < 8; x++)
1205 dc += dest_y[x + (n & 1) * 8 +
1206 (y + (n >> 1) * 8) * linesize[0]];
1208 dc_ptr[(n & 1) + (n >> 1) * s->
b8_stride] = (dc + 4) >> 3;
1212 for (y = 0; y < 8; y++) {
1214 for (x = 0; x < 8; x++) {
1215 dcu += dest_cb[x + y * linesize[1]];
1216 dcv += dest_cr[x + y * linesize[2]];
1235 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1236 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1237 uint8_t *dest_y, *dest_cb, *dest_cr;
1238 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1248 dest_y = s->
cur_pic->
f.
data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1249 dest_cb = s->
cur_pic->
f.
data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1250 dest_cr = s->
cur_pic->
f.
data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1252 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1277 for (i = 0; i < s->
mb_num; i++) {
#define CONFIG_MPEG_XVMC_DECODER
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
#define AV_LOG_WARNING
Something somehow does not look correct.
static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
Replace the current MB with a flat dc-only version.
void ff_er_frame_end(ERContext *s)
static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, int stride, int is_luma)
simple vertical deblocking filter used for error resilience
static void filter181(int16_t *data, int width, int height, int stride)
#define VP_START
< current MB is the first after a resync marker
int field_picture
whether or not the picture was encoded in separate fields
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
static void guess_mv(ERContext *s)
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
if((e=av_dict_get(options,"", NULL, AV_DICT_IGNORE_SUFFIX)))
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static const uint8_t offset[511][2]
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static void guess_dc(ERContext *s, int16_t *dc, int w, int h, int stride, int is_luma)
guess the dc of blocks which do not have an undamaged dc
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
static const uint16_t mask[17]
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int active_thread_type
Which multithreading methods are in use by the codec.
int error_concealment
error concealment flags
int capabilities
Codec capabilities.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
struct Picture * next_pic
Libavcodec external API header.
static float distance(float x, float y, int band)
uint8_t * error_status_table
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
enum AVPictureType pict_type
Picture type of the frame.
struct Picture * last_pic
uint8_t * data
The data buffer.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
int16_t(*[2] motion_val)[2]
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
int skip_top
Number of macroblock rows at the top which are skipped.
preferred ID for MPEG-1/2 video decoding
int xvmc_acceleration
XVideo Motion Acceleration.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_INFO
Standard information.
#define AV_EF_EXPLODE
abort decoding on minor error detection
BYTE int const BYTE int int int height
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int weight(int i, int blen, int offset)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
static int is_intra_more_likely(ERContext *s)
struct AVHWAccel * hwaccel
Hardware accelerator in use.
void ff_er_frame_start(ERContext *s)
static void h_block_filter(ERContext *s, uint8_t *dst, int w, int h, int stride, int is_luma)
simple horizontal deblocking filter used for error resilience
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void set_mv_strides(ERContext *s, int *mv_step, int *stride)
AVBufferRef * ref_index_buf[2]