FFmpeg  2.1.1
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mathops.h"
38 #include "mpegvideo.h"
39 #include "mjpegenc.h"
40 #include "msmpeg4.h"
41 #include "xvmc_internal.h"
42 #include "thread.h"
43 #include <limits.h>
44 
46  int16_t *block, int n, int qscale);
48  int16_t *block, int n, int qscale);
50  int16_t *block, int n, int qscale);
52  int16_t *block, int n, int qscale);
54  int16_t *block, int n, int qscale);
56  int16_t *block, int n, int qscale);
58  int16_t *block, int n, int qscale);
59 
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
64 };
65 
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 };
77 
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 };
89 
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 };
101 
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 };
113 
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
119 };
120 
124 };
125 
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127  int (*mv)[2][4][2],
128  int mb_x, int mb_y, int mb_intra, int mb_skipped)
129 {
130  MpegEncContext *s = opaque;
131 
132  s->mv_dir = mv_dir;
133  s->mv_type = mv_type;
134  s->mb_intra = mb_intra;
135  s->mb_skipped = mb_skipped;
136  s->mb_x = mb_x;
137  s->mb_y = mb_y;
138  memcpy(s->mv, mv, sizeof(*mv));
139 
142 
143  s->dsp.clear_blocks(s->block[0]);
144 
145  s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146  s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147  s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 
149  if (ref)
150  av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
151  ff_MPV_decode_mb(s, s->block);
152 }
153 
154 /* init common dct for both encoder and decoder */
156 {
157  ff_dsputil_init(&s->dsp, s->avctx);
158  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
159  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 
167  if (s->flags & CODEC_FLAG_BITEXACT)
170 
171  if (ARCH_ALPHA)
173  if (ARCH_ARM)
175  if (ARCH_BFIN)
177  if (ARCH_PPC)
179  if (ARCH_X86)
181 
182  /* load & permutate scantables
183  * note: only wmv uses different ones
184  */
185  if (s->alternate_scan) {
188  } else {
191  }
194 
195  return 0;
196 }
197 
199 {
200  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
201 
202  // edge emu needs blocksize + filter length - 1
203  // (= 17x17 for halfpel / 21x21 for h264)
204  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205  // at uvlinesize. It supports only YUV420 so 24x24 is enough
206  // linesize * interlaced * MBsize
207  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
208  fail);
209 
210  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
211  fail)
212  s->me.temp = s->me.scratchpad;
213  s->rd_scratchpad = s->me.scratchpad;
214  s->b_scratchpad = s->me.scratchpad;
215  s->obmc_scratchpad = s->me.scratchpad + 16;
216 
217  return 0;
218 fail:
220  return AVERROR(ENOMEM);
221 }
222 
223 /**
224  * Allocate a frame buffer
225  */
227 {
228  int r, ret;
229 
230  pic->tf.f = &pic->f;
231  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
234  r = ff_thread_get_buffer(s->avctx, &pic->tf,
235  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
236  else {
237  pic->f.width = s->avctx->width;
238  pic->f.height = s->avctx->height;
239  pic->f.format = s->avctx->pix_fmt;
240  r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
241  }
242 
243  if (r < 0 || !pic->f.data[0]) {
244  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245  r, pic->f.data[0]);
246  return -1;
247  }
248 
249  if (s->avctx->hwaccel) {
250  assert(!pic->hwaccel_picture_private);
251  if (s->avctx->hwaccel->priv_data_size) {
253  if (!pic->hwaccel_priv_buf) {
254  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
255  return -1;
256  }
258  }
259  }
260 
261  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262  s->uvlinesize != pic->f.linesize[1])) {
264  "get_buffer() failed (stride changed)\n");
265  ff_mpeg_unref_picture(s, pic);
266  return -1;
267  }
268 
269  if (pic->f.linesize[1] != pic->f.linesize[2]) {
271  "get_buffer() failed (uv stride mismatch)\n");
272  ff_mpeg_unref_picture(s, pic);
273  return -1;
274  }
275 
276  if (!s->edge_emu_buffer &&
277  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279  "get_buffer() failed to allocate context scratch buffers.\n");
280  ff_mpeg_unref_picture(s, pic);
281  return ret;
282  }
283 
284  return 0;
285 }
286 
287 static void free_picture_tables(Picture *pic)
288 {
289  int i;
290 
291  pic->alloc_mb_width =
292  pic->alloc_mb_height = 0;
293 
300 
301  for (i = 0; i < 2; i++) {
303  av_buffer_unref(&pic->ref_index_buf[i]);
304  }
305 }
306 
308 {
309  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310  const int mb_array_size = s->mb_stride * s->mb_height;
311  const int b8_array_size = s->b8_stride * s->mb_height * 2;
312  int i;
313 
314 
315  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
318  sizeof(uint32_t));
319  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320  return AVERROR(ENOMEM);
321 
322  if (s->encoding) {
323  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327  return AVERROR(ENOMEM);
328  }
329 
330  if (s->out_format == FMT_H263 || s->encoding ||
331  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333  int ref_index_size = 4 * mb_array_size;
334 
335  for (i = 0; mv_size && i < 2; i++) {
336  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339  return AVERROR(ENOMEM);
340  }
341  }
342 
343  pic->alloc_mb_width = s->mb_width;
344  pic->alloc_mb_height = s->mb_height;
345 
346  return 0;
347 }
348 
350 {
351  int ret, i;
352 #define MAKE_WRITABLE(table) \
353 do {\
354  if (pic->table &&\
355  (ret = av_buffer_make_writable(&pic->table)) < 0)\
356  return ret;\
357 } while (0)
358 
359  MAKE_WRITABLE(mb_var_buf);
360  MAKE_WRITABLE(mc_mb_var_buf);
361  MAKE_WRITABLE(mb_mean_buf);
362  MAKE_WRITABLE(mbskip_table_buf);
363  MAKE_WRITABLE(qscale_table_buf);
364  MAKE_WRITABLE(mb_type_buf);
365 
366  for (i = 0; i < 2; i++) {
367  MAKE_WRITABLE(motion_val_buf[i]);
368  MAKE_WRITABLE(ref_index_buf[i]);
369  }
370 
371  return 0;
372 }
373 
374 /**
375  * Allocate a Picture.
376  * The pixels are allocated/set by calling get_buffer() if shared = 0
377  */
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
379 {
380  int i, ret;
381 
382  if (pic->qscale_table_buf)
383  if ( pic->alloc_mb_width != s->mb_width
384  || pic->alloc_mb_height != s->mb_height)
385  free_picture_tables(pic);
386 
387  if (shared) {
388  av_assert0(pic->f.data[0]);
389  pic->shared = 1;
390  } else {
391  av_assert0(!pic->f.data[0]);
392 
393  if (alloc_frame_buffer(s, pic) < 0)
394  return -1;
395 
396  s->linesize = pic->f.linesize[0];
397  s->uvlinesize = pic->f.linesize[1];
398  }
399 
400  if (!pic->qscale_table_buf)
401  ret = alloc_picture_tables(s, pic);
402  else
403  ret = make_tables_writable(pic);
404  if (ret < 0)
405  goto fail;
406 
407  if (s->encoding) {
408  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410  pic->mb_mean = pic->mb_mean_buf->data;
411  }
412 
413  pic->mbskip_table = pic->mbskip_table_buf->data;
414  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
416 
417  if (pic->motion_val_buf[0]) {
418  for (i = 0; i < 2; i++) {
419  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420  pic->ref_index[i] = pic->ref_index_buf[i]->data;
421  }
422  }
423 
424  return 0;
425 fail:
426  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427  ff_mpeg_unref_picture(s, pic);
428  free_picture_tables(pic);
429  return AVERROR(ENOMEM);
430 }
431 
432 /**
433  * Deallocate a picture.
434  */
436 {
437  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
438 
439  pic->tf.f = &pic->f;
440  /* WM Image / Screen codecs allocate internal buffers with different
441  * dimensions / colorspaces; ignore user-defined callbacks for these. */
442  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
445  ff_thread_release_buffer(s->avctx, &pic->tf);
446  else
447  av_frame_unref(&pic->f);
448 
450 
451  if (pic->needs_realloc)
452  free_picture_tables(pic);
453 
454  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
455 }
456 
458 {
459  int i;
460 
461 #define UPDATE_TABLE(table)\
462 do {\
463  if (src->table &&\
464  (!dst->table || dst->table->buffer != src->table->buffer)) {\
465  av_buffer_unref(&dst->table);\
466  dst->table = av_buffer_ref(src->table);\
467  if (!dst->table) {\
468  free_picture_tables(dst);\
469  return AVERROR(ENOMEM);\
470  }\
471  }\
472 } while (0)
473 
474  UPDATE_TABLE(mb_var_buf);
475  UPDATE_TABLE(mc_mb_var_buf);
476  UPDATE_TABLE(mb_mean_buf);
477  UPDATE_TABLE(mbskip_table_buf);
478  UPDATE_TABLE(qscale_table_buf);
479  UPDATE_TABLE(mb_type_buf);
480  for (i = 0; i < 2; i++) {
481  UPDATE_TABLE(motion_val_buf[i]);
482  UPDATE_TABLE(ref_index_buf[i]);
483  }
484 
485  dst->mb_var = src->mb_var;
486  dst->mc_mb_var = src->mc_mb_var;
487  dst->mb_mean = src->mb_mean;
488  dst->mbskip_table = src->mbskip_table;
489  dst->qscale_table = src->qscale_table;
490  dst->mb_type = src->mb_type;
491  for (i = 0; i < 2; i++) {
492  dst->motion_val[i] = src->motion_val[i];
493  dst->ref_index[i] = src->ref_index[i];
494  }
495 
496  dst->alloc_mb_width = src->alloc_mb_width;
497  dst->alloc_mb_height = src->alloc_mb_height;
498 
499  return 0;
500 }
501 
503 {
504  int ret;
505 
506  av_assert0(!dst->f.buf[0]);
507  av_assert0(src->f.buf[0]);
508 
509  src->tf.f = &src->f;
510  dst->tf.f = &dst->f;
511  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
512  if (ret < 0)
513  goto fail;
514 
515  ret = update_picture_tables(dst, src);
516  if (ret < 0)
517  goto fail;
518 
519  if (src->hwaccel_picture_private) {
521  if (!dst->hwaccel_priv_buf)
522  goto fail;
524  }
525 
526  dst->field_picture = src->field_picture;
527  dst->mb_var_sum = src->mb_var_sum;
528  dst->mc_mb_var_sum = src->mc_mb_var_sum;
529  dst->b_frame_score = src->b_frame_score;
530  dst->needs_realloc = src->needs_realloc;
531  dst->reference = src->reference;
532  dst->shared = src->shared;
533 
534  return 0;
535 fail:
536  ff_mpeg_unref_picture(s, dst);
537  return ret;
538 }
539 
541 {
542  int16_t (*tmp)[64];
543 
544  tmp = s->pblocks[4];
545  s->pblocks[4] = s->pblocks[5];
546  s->pblocks[5] = tmp;
547 }
548 
550 {
551  int y_size = s->b8_stride * (2 * s->mb_height + 1);
552  int c_size = s->mb_stride * (s->mb_height + 1);
553  int yc_size = y_size + 2 * c_size;
554  int i;
555 
556  s->edge_emu_buffer =
557  s->me.scratchpad =
558  s->me.temp =
559  s->rd_scratchpad =
560  s->b_scratchpad =
561  s->obmc_scratchpad = NULL;
562 
563  if (s->encoding) {
564  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565  ME_MAP_SIZE * sizeof(uint32_t), fail)
567  ME_MAP_SIZE * sizeof(uint32_t), fail)
568  if (s->avctx->noise_reduction) {
570  2 * 64 * sizeof(int), fail)
571  }
572  }
573  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574  s->block = s->blocks[0];
575 
576  for (i = 0; i < 12; i++) {
577  s->pblocks[i] = &s->block[i];
578  }
579  if (s->avctx->codec_tag == AV_RL32("VCR2"))
580  exchange_uv(s);
581 
582  if (s->out_format == FMT_H263) {
583  /* ac values */
585  yc_size * sizeof(int16_t) * 16, fail);
586  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588  s->ac_val[2] = s->ac_val[1] + c_size;
589  }
590 
591  return 0;
592 fail:
593  return -1; // free() through ff_MPV_common_end()
594 }
595 
597 {
598  if (s == NULL)
599  return;
600 
602  av_freep(&s->me.scratchpad);
603  s->me.temp =
604  s->rd_scratchpad =
605  s->b_scratchpad =
606  s->obmc_scratchpad = NULL;
607 
608  av_freep(&s->dct_error_sum);
609  av_freep(&s->me.map);
610  av_freep(&s->me.score_map);
611  av_freep(&s->blocks);
612  av_freep(&s->ac_val_base);
613  s->block = NULL;
614 }
615 
617 {
618 #define COPY(a) bak->a = src->a
619  COPY(edge_emu_buffer);
620  COPY(me.scratchpad);
621  COPY(me.temp);
622  COPY(rd_scratchpad);
623  COPY(b_scratchpad);
624  COPY(obmc_scratchpad);
625  COPY(me.map);
626  COPY(me.score_map);
627  COPY(blocks);
628  COPY(block);
629  COPY(start_mb_y);
630  COPY(end_mb_y);
631  COPY(me.map_generation);
632  COPY(pb);
633  COPY(dct_error_sum);
634  COPY(dct_count[0]);
635  COPY(dct_count[1]);
636  COPY(ac_val_base);
637  COPY(ac_val[0]);
638  COPY(ac_val[1]);
639  COPY(ac_val[2]);
640 #undef COPY
641 }
642 
644 {
645  MpegEncContext bak;
646  int i, ret;
647  // FIXME copy only needed parts
648  // START_TIMER
649  backup_duplicate_context(&bak, dst);
650  memcpy(dst, src, sizeof(MpegEncContext));
651  backup_duplicate_context(dst, &bak);
652  for (i = 0; i < 12; i++) {
653  dst->pblocks[i] = &dst->block[i];
654  }
655  if (dst->avctx->codec_tag == AV_RL32("VCR2"))
656  exchange_uv(dst);
657  if (!dst->edge_emu_buffer &&
658  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
659  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660  "scratch buffers.\n");
661  return ret;
662  }
663  // STOP_TIMER("update_duplicate_context")
664  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
665  return 0;
666 }
667 
669  const AVCodecContext *src)
670 {
671  int i, ret;
672  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
673 
674  if (dst == src)
675  return 0;
676 
677  av_assert0(s != s1);
678 
679  // FIXME can parameters change on I-frames?
680  // in that case dst may need a reinit
681  if (!s->context_initialized) {
682  memcpy(s, s1, sizeof(MpegEncContext));
683 
684  s->avctx = dst;
685  s->bitstream_buffer = NULL;
687 
688  if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691  if((ret = ff_MPV_common_init(s)) < 0){
692  memset(s, 0, sizeof(MpegEncContext));
693  s->avctx = dst;
694  return ret;
695  }
696  }
697  }
698 
699  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700  s->context_reinit = 0;
701  s->height = s1->height;
702  s->width = s1->width;
703  if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
704  return ret;
705  }
706 
707  s->avctx->coded_height = s1->avctx->coded_height;
708  s->avctx->coded_width = s1->avctx->coded_width;
709  s->avctx->width = s1->avctx->width;
710  s->avctx->height = s1->avctx->height;
711 
712  s->coded_picture_number = s1->coded_picture_number;
713  s->picture_number = s1->picture_number;
714  s->input_picture_number = s1->input_picture_number;
715 
716  av_assert0(!s->picture || s->picture != s1->picture);
717  if(s->picture)
718  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
719  ff_mpeg_unref_picture(s, &s->picture[i]);
720  if (s1->picture[i].f.data[0] &&
721  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
722  return ret;
723  }
724 
725 #define UPDATE_PICTURE(pic)\
726 do {\
727  ff_mpeg_unref_picture(s, &s->pic);\
728  if (s1->pic.f.data[0])\
729  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
730  else\
731  ret = update_picture_tables(&s->pic, &s1->pic);\
732  if (ret < 0)\
733  return ret;\
734 } while (0)
735 
736  UPDATE_PICTURE(current_picture);
737  UPDATE_PICTURE(last_picture);
738  UPDATE_PICTURE(next_picture);
739 
740  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
741  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
742  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
743 
744  // Error/bug resilience
745  s->next_p_frame_damaged = s1->next_p_frame_damaged;
746  s->workaround_bugs = s1->workaround_bugs;
747  s->padding_bug_score = s1->padding_bug_score;
748 
749  // MPEG4 timing info
750  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
751  (char *) &s1->shape - (char *) &s1->time_increment_bits);
752 
753  // B-frame info
754  s->max_b_frames = s1->max_b_frames;
755  s->low_delay = s1->low_delay;
756  s->droppable = s1->droppable;
757 
758  // DivX handling (doesn't work)
759  s->divx_packed = s1->divx_packed;
760 
761  if (s1->bitstream_buffer) {
762  if (s1->bitstream_buffer_size +
766  s1->allocated_bitstream_buffer_size);
767  s->bitstream_buffer_size = s1->bitstream_buffer_size;
768  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769  s1->bitstream_buffer_size);
770  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
772  }
773 
774  // linesize dependend scratch buffer allocation
775  if (!s->edge_emu_buffer)
776  if (s1->linesize) {
777  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
778  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779  "scratch buffers.\n");
780  return AVERROR(ENOMEM);
781  }
782  } else {
783  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784  "be allocated due to unknown size.\n");
785  }
786 
787  // MPEG2/interlacing info
788  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
790 
791  if (!s1->first_field) {
792  s->last_pict_type = s1->pict_type;
793  if (s1->current_picture_ptr)
794  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
795 
796  if (s1->pict_type != AV_PICTURE_TYPE_B) {
797  s->last_non_b_pict_type = s1->pict_type;
798  }
799  }
800 
801  return 0;
802 }
803 
804 /**
805  * Set the given MpegEncContext to common defaults
806  * (same for encoding and decoding).
807  * The changed fields will not depend upon the
808  * prior state of the MpegEncContext.
809  */
811 {
812  s->y_dc_scale_table =
815  s->progressive_frame = 1;
816  s->progressive_sequence = 1;
818 
819  s->coded_picture_number = 0;
820  s->picture_number = 0;
821  s->input_picture_number = 0;
822 
823  s->picture_in_gop_number = 0;
824 
825  s->f_code = 1;
826  s->b_code = 1;
827 
828  s->slice_context_count = 1;
829 }
830 
831 /**
832  * Set the given MpegEncContext to defaults for decoding.
833  * the changed fields will not depend upon
834  * the prior state of the MpegEncContext.
835  */
837 {
839 }
840 
842 {
843  ERContext *er = &s->er;
844  int mb_array_size = s->mb_height * s->mb_stride;
845  int i;
846 
847  er->avctx = s->avctx;
848  er->dsp = &s->dsp;
849 
850  er->mb_index2xy = s->mb_index2xy;
851  er->mb_num = s->mb_num;
852  er->mb_width = s->mb_width;
853  er->mb_height = s->mb_height;
854  er->mb_stride = s->mb_stride;
855  er->b8_stride = s->b8_stride;
856 
858  er->error_status_table = av_mallocz(mb_array_size);
859  if (!er->er_temp_buffer || !er->error_status_table)
860  goto fail;
861 
862  er->mbskip_table = s->mbskip_table;
863  er->mbintra_table = s->mbintra_table;
864 
865  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
866  er->dc_val[i] = s->dc_val[i];
867 
869  er->opaque = s;
870 
871  return 0;
872 fail:
873  av_freep(&er->er_temp_buffer);
875  return AVERROR(ENOMEM);
876 }
877 
878 /**
879  * Initialize and allocates MpegEncContext fields dependent on the resolution.
880  */
882 {
883  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
884 
885  s->mb_width = (s->width + 15) / 16;
886  s->mb_stride = s->mb_width + 1;
887  s->b8_stride = s->mb_width * 2 + 1;
888  s->b4_stride = s->mb_width * 4 + 1;
889  mb_array_size = s->mb_height * s->mb_stride;
890  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
891 
892  /* set default edge pos, will be overriden
893  * in decode_header if needed */
894  s->h_edge_pos = s->mb_width * 16;
895  s->v_edge_pos = s->mb_height * 16;
896 
897  s->mb_num = s->mb_width * s->mb_height;
898 
899  s->block_wrap[0] =
900  s->block_wrap[1] =
901  s->block_wrap[2] =
902  s->block_wrap[3] = s->b8_stride;
903  s->block_wrap[4] =
904  s->block_wrap[5] = s->mb_stride;
905 
906  y_size = s->b8_stride * (2 * s->mb_height + 1);
907  c_size = s->mb_stride * (s->mb_height + 1);
908  yc_size = y_size + 2 * c_size;
909 
910  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
911  for (y = 0; y < s->mb_height; y++)
912  for (x = 0; x < s->mb_width; x++)
913  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
914 
915  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
916 
917  if (s->encoding) {
918  /* Allocate MV tables */
919  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
920  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
921  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
922  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
923  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
924  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
925  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
931 
932  /* Allocate MB type table */
933  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
934 
935  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
936 
938  mb_array_size * sizeof(float), fail);
940  mb_array_size * sizeof(float), fail);
941 
942  }
943 
944  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
946  /* interlaced direct mode decoding tables */
947  for (i = 0; i < 2; i++) {
948  int j, k;
949  for (j = 0; j < 2; j++) {
950  for (k = 0; k < 2; k++) {
952  s->b_field_mv_table_base[i][j][k],
953  mv_table_size * 2 * sizeof(int16_t),
954  fail);
955  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
956  s->mb_stride + 1;
957  }
958  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
959  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
960  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
961  }
962  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
963  }
964  }
965  if (s->out_format == FMT_H263) {
966  /* cbp values */
967  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
968  s->coded_block = s->coded_block_base + s->b8_stride + 1;
969 
970  /* cbp, ac_pred, pred_dir */
971  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
972  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
973  }
974 
975  if (s->h263_pred || s->h263_plus || !s->encoding) {
976  /* dc values */
977  // MN: we need these for error resilience of intra-frames
978  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
979  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
980  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
981  s->dc_val[2] = s->dc_val[1] + c_size;
982  for (i = 0; i < yc_size; i++)
983  s->dc_val_base[i] = 1024;
984  }
985 
986  /* which mb is a intra block */
987  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
988  memset(s->mbintra_table, 1, mb_array_size);
989 
990  /* init macroblock skip table */
991  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
992  // Note the + 1 is for a quicker mpeg4 slice_end detection
993 
994  return init_er(s);
995 fail:
996  return AVERROR(ENOMEM);
997 }
998 
999 /**
1000  * init common structure for both encoder and decoder.
1001  * this assumes that some variables like width/height are already set
1002  */
1004 {
1005  int i;
1006  int nb_slices = (HAVE_THREADS &&
1008  s->avctx->thread_count : 1;
1009 
1010  if (s->encoding && s->avctx->slices)
1011  nb_slices = s->avctx->slices;
1012 
1014  s->mb_height = (s->height + 31) / 32 * 2;
1015  else
1016  s->mb_height = (s->height + 15) / 16;
1017 
1018  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1020  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1021  return -1;
1022  }
1023 
1024  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1025  int max_slices;
1026  if (s->mb_height)
1027  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1028  else
1029  max_slices = MAX_THREADS;
1030  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1031  " reducing to %d\n", nb_slices, max_slices);
1032  nb_slices = max_slices;
1033  }
1034 
1035  if ((s->width || s->height) &&
1036  av_image_check_size(s->width, s->height, 0, s->avctx))
1037  return -1;
1038 
1039  ff_dct_common_init(s);
1040 
1041  s->flags = s->avctx->flags;
1042  s->flags2 = s->avctx->flags2;
1043 
1044  /* set chroma shifts */
1046  &s->chroma_x_shift,
1047  &s->chroma_y_shift);
1048 
1049  /* convert fourcc to upper case */
1052 
1053  s->avctx->coded_frame = &s->current_picture.f;
1054 
1055  if (s->encoding) {
1056  if (s->msmpeg4_version) {
1058  2 * 2 * (MAX_LEVEL + 1) *
1059  (MAX_RUN + 1) * 2 * sizeof(int), fail);
1060  }
1061  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1062 
1063  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1064  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1065  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1066  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1067  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1068  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1071 
1072  if (s->avctx->noise_reduction) {
1073  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1074  }
1075  }
1076 
1078  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1079  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1081  }
1082  memset(&s->next_picture, 0, sizeof(s->next_picture));
1083  memset(&s->last_picture, 0, sizeof(s->last_picture));
1084  memset(&s->current_picture, 0, sizeof(s->current_picture));
1088 
1089  if (init_context_frame(s))
1090  goto fail;
1091 
1092  s->parse_context.state = -1;
1093 
1094  s->context_initialized = 1;
1095  s->thread_context[0] = s;
1096 
1097 // if (s->width && s->height) {
1098  if (nb_slices > 1) {
1099  for (i = 1; i < nb_slices; i++) {
1100  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1101  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1102  }
1103 
1104  for (i = 0; i < nb_slices; i++) {
1105  if (init_duplicate_context(s->thread_context[i]) < 0)
1106  goto fail;
1107  s->thread_context[i]->start_mb_y =
1108  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1109  s->thread_context[i]->end_mb_y =
1110  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1111  }
1112  } else {
1113  if (init_duplicate_context(s) < 0)
1114  goto fail;
1115  s->start_mb_y = 0;
1116  s->end_mb_y = s->mb_height;
1117  }
1118  s->slice_context_count = nb_slices;
1119 // }
1120 
1121  return 0;
1122  fail:
1123  ff_MPV_common_end(s);
1124  return -1;
1125 }
1126 
1127 /**
1128  * Frees and resets MpegEncContext fields depending on the resolution.
1129  * Is used during resolution changes to avoid a full reinitialization of the
1130  * codec.
1131  */
1133 {
1134  int i, j, k;
1135 
1136  av_freep(&s->mb_type);
1143  s->p_mv_table = NULL;
1144  s->b_forw_mv_table = NULL;
1145  s->b_back_mv_table = NULL;
1146  s->b_bidir_forw_mv_table = NULL;
1147  s->b_bidir_back_mv_table = NULL;
1148  s->b_direct_mv_table = NULL;
1149  for (i = 0; i < 2; i++) {
1150  for (j = 0; j < 2; j++) {
1151  for (k = 0; k < 2; k++) {
1152  av_freep(&s->b_field_mv_table_base[i][j][k]);
1153  s->b_field_mv_table[i][j][k] = NULL;
1154  }
1155  av_freep(&s->b_field_select_table[i][j]);
1156  av_freep(&s->p_field_mv_table_base[i][j]);
1157  s->p_field_mv_table[i][j] = NULL;
1158  }
1160  }
1161 
1162  av_freep(&s->dc_val_base);
1164  av_freep(&s->mbintra_table);
1165  av_freep(&s->cbp_table);
1166  av_freep(&s->pred_dir_table);
1167 
1168  av_freep(&s->mbskip_table);
1169 
1171  av_freep(&s->er.er_temp_buffer);
1172  av_freep(&s->mb_index2xy);
1173  av_freep(&s->lambda_table);
1174 
1175  av_freep(&s->cplx_tab);
1176  av_freep(&s->bits_tab);
1177 
1178  s->linesize = s->uvlinesize = 0;
1179 
1180  return 0;
1181 }
1182 
1184 {
1185  int i, err = 0;
1186 
1187  if (s->slice_context_count > 1) {
1188  for (i = 0; i < s->slice_context_count; i++) {
1190  }
1191  for (i = 1; i < s->slice_context_count; i++) {
1192  av_freep(&s->thread_context[i]);
1193  }
1194  } else
1196 
1197  if ((err = free_context_frame(s)) < 0)
1198  return err;
1199 
1200  if (s->picture)
1201  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1202  s->picture[i].needs_realloc = 1;
1203  }
1204 
1205  s->last_picture_ptr =
1206  s->next_picture_ptr =
1207  s->current_picture_ptr = NULL;
1208 
1209  // init
1211  s->mb_height = (s->height + 31) / 32 * 2;
1212  else
1213  s->mb_height = (s->height + 15) / 16;
1214 
1215  if ((s->width || s->height) &&
1216  av_image_check_size(s->width, s->height, 0, s->avctx))
1217  return AVERROR_INVALIDDATA;
1218 
1219  if ((err = init_context_frame(s)))
1220  goto fail;
1221 
1222  s->thread_context[0] = s;
1223 
1224  if (s->width && s->height) {
1225  int nb_slices = s->slice_context_count;
1226  if (nb_slices > 1) {
1227  for (i = 1; i < nb_slices; i++) {
1228  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1229  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1230  }
1231 
1232  for (i = 0; i < nb_slices; i++) {
1233  if (init_duplicate_context(s->thread_context[i]) < 0)
1234  goto fail;
1235  s->thread_context[i]->start_mb_y =
1236  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1237  s->thread_context[i]->end_mb_y =
1238  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1239  }
1240  } else {
1241  err = init_duplicate_context(s);
1242  if (err < 0)
1243  goto fail;
1244  s->start_mb_y = 0;
1245  s->end_mb_y = s->mb_height;
1246  }
1247  s->slice_context_count = nb_slices;
1248  }
1249 
1250  return 0;
1251  fail:
1252  ff_MPV_common_end(s);
1253  return err;
1254 }
1255 
1256 /* init common structure for both encoder and decoder */
1258 {
1259  int i;
1260 
1261  if (s->slice_context_count > 1) {
1262  for (i = 0; i < s->slice_context_count; i++) {
1264  }
1265  for (i = 1; i < s->slice_context_count; i++) {
1266  av_freep(&s->thread_context[i]);
1267  }
1268  s->slice_context_count = 1;
1269  } else free_duplicate_context(s);
1270 
1272  s->parse_context.buffer_size = 0;
1273 
1276 
1277  av_freep(&s->avctx->stats_out);
1278  av_freep(&s->ac_stats);
1279 
1282  s->q_chroma_intra_matrix= NULL;
1283  s->q_chroma_intra_matrix16= NULL;
1284  av_freep(&s->q_intra_matrix);
1285  av_freep(&s->q_inter_matrix);
1288  av_freep(&s->input_picture);
1290  av_freep(&s->dct_offset);
1291 
1292  if (s->picture) {
1293  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1294  free_picture_tables(&s->picture[i]);
1295  ff_mpeg_unref_picture(s, &s->picture[i]);
1296  }
1297  }
1298  av_freep(&s->picture);
1307 
1308  free_context_frame(s);
1309 
1310  s->context_initialized = 0;
1311  s->last_picture_ptr =
1312  s->next_picture_ptr =
1313  s->current_picture_ptr = NULL;
1314  s->linesize = s->uvlinesize = 0;
1315 }
1316 
1318  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1319 {
1320  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1321  uint8_t index_run[MAX_RUN + 1];
1322  int last, run, level, start, end, i;
1323 
1324  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1325  if (static_store && rl->max_level[0])
1326  return;
1327 
1328  /* compute max_level[], max_run[] and index_run[] */
1329  for (last = 0; last < 2; last++) {
1330  if (last == 0) {
1331  start = 0;
1332  end = rl->last;
1333  } else {
1334  start = rl->last;
1335  end = rl->n;
1336  }
1337 
1338  memset(max_level, 0, MAX_RUN + 1);
1339  memset(max_run, 0, MAX_LEVEL + 1);
1340  memset(index_run, rl->n, MAX_RUN + 1);
1341  for (i = start; i < end; i++) {
1342  run = rl->table_run[i];
1343  level = rl->table_level[i];
1344  if (index_run[run] == rl->n)
1345  index_run[run] = i;
1346  if (level > max_level[run])
1347  max_level[run] = level;
1348  if (run > max_run[level])
1349  max_run[level] = run;
1350  }
1351  if (static_store)
1352  rl->max_level[last] = static_store[last];
1353  else
1354  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1355  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1356  if (static_store)
1357  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1358  else
1359  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1360  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1361  if (static_store)
1362  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1363  else
1364  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1365  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1366  }
1367 }
1368 
1370 {
1371  int i, q;
1372 
1373  for (q = 0; q < 32; q++) {
1374  int qmul = q * 2;
1375  int qadd = (q - 1) | 1;
1376 
1377  if (q == 0) {
1378  qmul = 1;
1379  qadd = 0;
1380  }
1381  for (i = 0; i < rl->vlc.table_size; i++) {
1382  int code = rl->vlc.table[i][0];
1383  int len = rl->vlc.table[i][1];
1384  int level, run;
1385 
1386  if (len == 0) { // illegal code
1387  run = 66;
1388  level = MAX_LEVEL;
1389  } else if (len < 0) { // more bits needed
1390  run = 0;
1391  level = code;
1392  } else {
1393  if (code == rl->n) { // esc
1394  run = 66;
1395  level = 0;
1396  } else {
1397  run = rl->table_run[code] + 1;
1398  level = rl->table_level[code] * qmul + qadd;
1399  if (code >= rl->last) run += 192;
1400  }
1401  }
1402  rl->rl_vlc[q][i].len = len;
1403  rl->rl_vlc[q][i].level = level;
1404  rl->rl_vlc[q][i].run = run;
1405  }
1406  }
1407 }
1408 
1410 {
1411  int i;
1412 
1413  /* release non reference frames */
1414  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1415  if (!s->picture[i].reference &&
1416  (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1417  ff_mpeg_unref_picture(s, &s->picture[i]);
1418  }
1419  }
1420 }
1421 
1422 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1423 {
1424  if (pic == s->last_picture_ptr)
1425  return 0;
1426  if (pic->f.data[0] == NULL)
1427  return 1;
1428  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1429  return 1;
1430  return 0;
1431 }
1432 
1433 static int find_unused_picture(MpegEncContext *s, int shared)
1434 {
1435  int i;
1436 
1437  if (shared) {
1438  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1439  if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1440  return i;
1441  }
1442  } else {
1443  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1444  if (pic_is_unused(s, &s->picture[i]))
1445  return i;
1446  }
1447  }
1448 
1450  "Internal error, picture buffer overflow\n");
1451  /* We could return -1, but the codec would crash trying to draw into a
1452  * non-existing frame anyway. This is safer than waiting for a random crash.
1453  * Also the return of this is never useful, an encoder must only allocate
1454  * as much as allowed in the specification. This has no relationship to how
1455  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1456  * enough for such valid streams).
1457  * Plus, a decoder has to check stream validity and remove frames if too
1458  * many reference frames are around. Waiting for "OOM" is not correct at
1459  * all. Similarly, missing reference frames have to be replaced by
1460  * interpolated/MC frames, anything else is a bug in the codec ...
1461  */
1462  abort();
1463  return -1;
1464 }
1465 
1467 {
1468  int ret = find_unused_picture(s, shared);
1469 
1470  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1471  if (s->picture[ret].needs_realloc) {
1472  s->picture[ret].needs_realloc = 0;
1473  free_picture_tables(&s->picture[ret]);
1474  ff_mpeg_unref_picture(s, &s->picture[ret]);
1476  }
1477  }
1478  return ret;
1479 }
1480 
1482 {
1483  int intra, i;
1484 
1485  for (intra = 0; intra < 2; intra++) {
1486  if (s->dct_count[intra] > (1 << 16)) {
1487  for (i = 0; i < 64; i++) {
1488  s->dct_error_sum[intra][i] >>= 1;
1489  }
1490  s->dct_count[intra] >>= 1;
1491  }
1492 
1493  for (i = 0; i < 64; i++) {
1494  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1495  s->dct_count[intra] +
1496  s->dct_error_sum[intra][i] / 2) /
1497  (s->dct_error_sum[intra][i] + 1);
1498  }
1499  }
1500 }
1501 
1502 /**
1503  * generic function for encode/decode called after coding/decoding
1504  * the header and before a frame is coded/decoded.
1505  */
1507 {
1508  int i, ret;
1509  Picture *pic;
1510  s->mb_skipped = 0;
1511 
1512  if (!ff_thread_can_start_frame(avctx)) {
1513  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1514  return -1;
1515  }
1516 
1517  /* mark & release old frames */
1518  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1520  s->last_picture_ptr->f.data[0]) {
1522  }
1523 
1524  /* release forgotten pictures */
1525  /* if (mpeg124/h263) */
1526  if (!s->encoding) {
1527  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1528  if (&s->picture[i] != s->last_picture_ptr &&
1529  &s->picture[i] != s->next_picture_ptr &&
1530  s->picture[i].reference && !s->picture[i].needs_realloc) {
1531  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1532  av_log(avctx, AV_LOG_ERROR,
1533  "releasing zombie picture\n");
1534  ff_mpeg_unref_picture(s, &s->picture[i]);
1535  }
1536  }
1537  }
1538 
1540 
1541  if (!s->encoding) {
1543 
1544  if (s->current_picture_ptr &&
1545  s->current_picture_ptr->f.data[0] == NULL) {
1546  // we already have a unused image
1547  // (maybe it was set before reading the header)
1548  pic = s->current_picture_ptr;
1549  } else {
1550  i = ff_find_unused_picture(s, 0);
1551  if (i < 0) {
1552  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1553  return i;
1554  }
1555  pic = &s->picture[i];
1556  }
1557 
1558  pic->reference = 0;
1559  if (!s->droppable) {
1560  if (s->pict_type != AV_PICTURE_TYPE_B)
1561  pic->reference = 3;
1562  }
1563 
1565 
1566  if (ff_alloc_picture(s, pic, 0) < 0)
1567  return -1;
1568 
1569  s->current_picture_ptr = pic;
1570  // FIXME use only the vars from current_pic
1572  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1574  if (s->picture_structure != PICT_FRAME)
1577  }
1581  }
1582 
1584  // if (s->flags && CODEC_FLAG_QSCALE)
1585  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1587 
1588  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1589  s->current_picture_ptr)) < 0)
1590  return ret;
1591 
1592  if (s->pict_type != AV_PICTURE_TYPE_B) {
1594  if (!s->droppable)
1596  }
1597  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1599  s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1600  s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1601  s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1602  s->pict_type, s->droppable);
1603 
1604  if ((s->last_picture_ptr == NULL ||
1605  s->last_picture_ptr->f.data[0] == NULL) &&
1606  (s->pict_type != AV_PICTURE_TYPE_I ||
1607  s->picture_structure != PICT_FRAME)) {
1608  int h_chroma_shift, v_chroma_shift;
1610  &h_chroma_shift, &v_chroma_shift);
1612  av_log(avctx, AV_LOG_DEBUG,
1613  "allocating dummy last picture for B frame\n");
1614  else if (s->pict_type != AV_PICTURE_TYPE_I)
1615  av_log(avctx, AV_LOG_ERROR,
1616  "warning: first frame is no keyframe\n");
1617  else if (s->picture_structure != PICT_FRAME)
1618  av_log(avctx, AV_LOG_DEBUG,
1619  "allocate dummy last picture for field based first keyframe\n");
1620 
1621  /* Allocate a dummy frame */
1622  i = ff_find_unused_picture(s, 0);
1623  if (i < 0) {
1624  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1625  return i;
1626  }
1627  s->last_picture_ptr = &s->picture[i];
1628  s->last_picture_ptr->f.key_frame = 0;
1629  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1630  s->last_picture_ptr = NULL;
1631  return -1;
1632  }
1633 
1634  memset(s->last_picture_ptr->f.data[0], 0x80,
1635  avctx->height * s->last_picture_ptr->f.linesize[0]);
1636  memset(s->last_picture_ptr->f.data[1], 0x80,
1637  (avctx->height >> v_chroma_shift) *
1638  s->last_picture_ptr->f.linesize[1]);
1639  memset(s->last_picture_ptr->f.data[2], 0x80,
1640  (avctx->height >> v_chroma_shift) *
1641  s->last_picture_ptr->f.linesize[2]);
1642 
1644  for(i=0; i<avctx->height; i++)
1645  memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1646  }
1647 
1648  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1649  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1650  }
1651  if ((s->next_picture_ptr == NULL ||
1652  s->next_picture_ptr->f.data[0] == NULL) &&
1653  s->pict_type == AV_PICTURE_TYPE_B) {
1654  /* Allocate a dummy frame */
1655  i = ff_find_unused_picture(s, 0);
1656  if (i < 0) {
1657  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1658  return i;
1659  }
1660  s->next_picture_ptr = &s->picture[i];
1661  s->next_picture_ptr->f.key_frame = 0;
1662  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1663  s->next_picture_ptr = NULL;
1664  return -1;
1665  }
1666  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1667  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1668  }
1669 
1670 #if 0 // BUFREF-FIXME
1671  memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1672  memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1673 #endif
1674  if (s->last_picture_ptr) {
1676  if (s->last_picture_ptr->f.data[0] &&
1677  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1678  s->last_picture_ptr)) < 0)
1679  return ret;
1680  }
1681  if (s->next_picture_ptr) {
1683  if (s->next_picture_ptr->f.data[0] &&
1684  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1685  s->next_picture_ptr)) < 0)
1686  return ret;
1687  }
1688 
1690  s->last_picture_ptr->f.data[0]));
1691 
1692  if (s->picture_structure!= PICT_FRAME) {
1693  int i;
1694  for (i = 0; i < 4; i++) {
1696  s->current_picture.f.data[i] +=
1697  s->current_picture.f.linesize[i];
1698  }
1699  s->current_picture.f.linesize[i] *= 2;
1700  s->last_picture.f.linesize[i] *= 2;
1701  s->next_picture.f.linesize[i] *= 2;
1702  }
1703  }
1704 
1705  s->err_recognition = avctx->err_recognition;
1706 
1707  /* set dequantizer, we can't do it during init as
1708  * it might change for mpeg4 and we can't do it in the header
1709  * decode as init is not called for mpeg4 there yet */
1710  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1713  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1716  } else {
1719  }
1720 
1721  if (s->dct_error_sum) {
1724  }
1725 
1727  return ff_xvmc_field_start(s, avctx);
1728 
1729  return 0;
1730 }
1731 
1732 /* generic function for encode/decode called after a
1733  * frame has been coded/decoded. */
1735 {
1736  /* redraw edges for the frame if decoding didn't complete */
1737  // just to make sure that all data is rendered.
1739  ff_xvmc_field_end(s);
1740  } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1741  !s->avctx->hwaccel &&
1743  s->unrestricted_mv &&
1745  !s->intra_only &&
1746  !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1747  !s->avctx->lowres
1748  ) {
1750  int hshift = desc->log2_chroma_w;
1751  int vshift = desc->log2_chroma_h;
1753  s->h_edge_pos, s->v_edge_pos,
1755  EDGE_TOP | EDGE_BOTTOM);
1757  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1758  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1759  EDGE_TOP | EDGE_BOTTOM);
1761  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1762  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1763  EDGE_TOP | EDGE_BOTTOM);
1764  }
1765 
1766  emms_c();
1767 
1768  s->last_pict_type = s->pict_type;
1770  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1772  }
1773 #if 0
1774  /* copy back current_picture variables */
1775  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1776  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1777  s->picture[i] = s->current_picture;
1778  break;
1779  }
1780  }
1781  av_assert0(i < MAX_PICTURE_COUNT);
1782 #endif
1783 
1784  // clear copies, to avoid confusion
1785 #if 0
1786  memset(&s->last_picture, 0, sizeof(Picture));
1787  memset(&s->next_picture, 0, sizeof(Picture));
1788  memset(&s->current_picture, 0, sizeof(Picture));
1789 #endif
1791 
1792  if (s->current_picture.reference)
1794 }
1795 
1796 /**
1797  * Draw a line from (ex, ey) -> (sx, sy).
1798  * @param w width of the image
1799  * @param h height of the image
1800  * @param stride stride/linesize of the image
1801  * @param color color of the arrow
1802  */
1803 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1804  int w, int h, int stride, int color)
1805 {
1806  int x, y, fr, f;
1807 
1808  sx = av_clip(sx, 0, w - 1);
1809  sy = av_clip(sy, 0, h - 1);
1810  ex = av_clip(ex, 0, w - 1);
1811  ey = av_clip(ey, 0, h - 1);
1812 
1813  buf[sy * stride + sx] += color;
1814 
1815  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1816  if (sx > ex) {
1817  FFSWAP(int, sx, ex);
1818  FFSWAP(int, sy, ey);
1819  }
1820  buf += sx + sy * stride;
1821  ex -= sx;
1822  f = ((ey - sy) << 16) / ex;
1823  for (x = 0; x <= ex; x++) {
1824  y = (x * f) >> 16;
1825  fr = (x * f) & 0xFFFF;
1826  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1827  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1828  }
1829  } else {
1830  if (sy > ey) {
1831  FFSWAP(int, sx, ex);
1832  FFSWAP(int, sy, ey);
1833  }
1834  buf += sx + sy * stride;
1835  ey -= sy;
1836  if (ey)
1837  f = ((ex - sx) << 16) / ey;
1838  else
1839  f = 0;
1840  for(y= 0; y <= ey; y++){
1841  x = (y*f) >> 16;
1842  fr = (y*f) & 0xFFFF;
1843  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1844  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1845  }
1846  }
1847 }
1848 
1849 /**
1850  * Draw an arrow from (ex, ey) -> (sx, sy).
1851  * @param w width of the image
1852  * @param h height of the image
1853  * @param stride stride/linesize of the image
1854  * @param color color of the arrow
1855  */
1856 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1857  int ey, int w, int h, int stride, int color)
1858 {
1859  int dx,dy;
1860 
1861  sx = av_clip(sx, -100, w + 100);
1862  sy = av_clip(sy, -100, h + 100);
1863  ex = av_clip(ex, -100, w + 100);
1864  ey = av_clip(ey, -100, h + 100);
1865 
1866  dx = ex - sx;
1867  dy = ey - sy;
1868 
1869  if (dx * dx + dy * dy > 3 * 3) {
1870  int rx = dx + dy;
1871  int ry = -dx + dy;
1872  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1873 
1874  // FIXME subpixel accuracy
1875  rx = ROUNDED_DIV(rx * 3 << 4, length);
1876  ry = ROUNDED_DIV(ry * 3 << 4, length);
1877 
1878  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1879  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1880  }
1881  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1882 }
1883 
1884 /**
1885  * Print debugging info for the given picture.
1886  */
1887 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1888  int *low_delay,
1889  int mb_width, int mb_height, int mb_stride, int quarter_sample)
1890 {
1891  if (avctx->hwaccel || !p || !p->mb_type
1893  return;
1894 
1895 
1896  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1897  int x,y;
1898 
1899  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1901  for (y = 0; y < mb_height; y++) {
1902  for (x = 0; x < mb_width; x++) {
1903  if (avctx->debug & FF_DEBUG_SKIP) {
1904  int count = mbskip_table[x + y * mb_stride];
1905  if (count > 9)
1906  count = 9;
1907  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1908  }
1909  if (avctx->debug & FF_DEBUG_QP) {
1910  av_log(avctx, AV_LOG_DEBUG, "%2d",
1911  p->qscale_table[x + y * mb_stride]);
1912  }
1913  if (avctx->debug & FF_DEBUG_MB_TYPE) {
1914  int mb_type = p->mb_type[x + y * mb_stride];
1915  // Type & MV direction
1916  if (IS_PCM(mb_type))
1917  av_log(avctx, AV_LOG_DEBUG, "P");
1918  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1919  av_log(avctx, AV_LOG_DEBUG, "A");
1920  else if (IS_INTRA4x4(mb_type))
1921  av_log(avctx, AV_LOG_DEBUG, "i");
1922  else if (IS_INTRA16x16(mb_type))
1923  av_log(avctx, AV_LOG_DEBUG, "I");
1924  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1925  av_log(avctx, AV_LOG_DEBUG, "d");
1926  else if (IS_DIRECT(mb_type))
1927  av_log(avctx, AV_LOG_DEBUG, "D");
1928  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1929  av_log(avctx, AV_LOG_DEBUG, "g");
1930  else if (IS_GMC(mb_type))
1931  av_log(avctx, AV_LOG_DEBUG, "G");
1932  else if (IS_SKIP(mb_type))
1933  av_log(avctx, AV_LOG_DEBUG, "S");
1934  else if (!USES_LIST(mb_type, 1))
1935  av_log(avctx, AV_LOG_DEBUG, ">");
1936  else if (!USES_LIST(mb_type, 0))
1937  av_log(avctx, AV_LOG_DEBUG, "<");
1938  else {
1939  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1940  av_log(avctx, AV_LOG_DEBUG, "X");
1941  }
1942 
1943  // segmentation
1944  if (IS_8X8(mb_type))
1945  av_log(avctx, AV_LOG_DEBUG, "+");
1946  else if (IS_16X8(mb_type))
1947  av_log(avctx, AV_LOG_DEBUG, "-");
1948  else if (IS_8X16(mb_type))
1949  av_log(avctx, AV_LOG_DEBUG, "|");
1950  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1951  av_log(avctx, AV_LOG_DEBUG, " ");
1952  else
1953  av_log(avctx, AV_LOG_DEBUG, "?");
1954 
1955 
1956  if (IS_INTERLACED(mb_type))
1957  av_log(avctx, AV_LOG_DEBUG, "=");
1958  else
1959  av_log(avctx, AV_LOG_DEBUG, " ");
1960  }
1961  }
1962  av_log(avctx, AV_LOG_DEBUG, "\n");
1963  }
1964  }
1965 
1966  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1967  (avctx->debug_mv)) {
1968  const int shift = 1 + quarter_sample;
1969  int mb_y;
1970  uint8_t *ptr;
1971  int i;
1972  int h_chroma_shift, v_chroma_shift, block_height;
1973  const int width = avctx->width;
1974  const int height = avctx->height;
1975  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1976  const int mv_stride = (mb_width << mv_sample_log2) +
1977  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1978 
1979  *low_delay = 0; // needed to see the vectors without trashing the buffers
1980 
1981  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1982 
1983  av_frame_make_writable(pict);
1984 
1985  pict->opaque = NULL;
1986  ptr = pict->data[0];
1987  block_height = 16 >> v_chroma_shift;
1988 
1989  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1990  int mb_x;
1991  for (mb_x = 0; mb_x < mb_width; mb_x++) {
1992  const int mb_index = mb_x + mb_y * mb_stride;
1993  if ((avctx->debug_mv) && p->motion_val[0]) {
1994  int type;
1995  for (type = 0; type < 3; type++) {
1996  int direction = 0;
1997  switch (type) {
1998  case 0:
1999  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2000  (pict->pict_type!= AV_PICTURE_TYPE_P))
2001  continue;
2002  direction = 0;
2003  break;
2004  case 1:
2005  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2006  (pict->pict_type!= AV_PICTURE_TYPE_B))
2007  continue;
2008  direction = 0;
2009  break;
2010  case 2:
2011  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2012  (pict->pict_type!= AV_PICTURE_TYPE_B))
2013  continue;
2014  direction = 1;
2015  break;
2016  }
2017  if (!USES_LIST(p->mb_type[mb_index], direction))
2018  continue;
2019 
2020  if (IS_8X8(p->mb_type[mb_index])) {
2021  int i;
2022  for (i = 0; i < 4; i++) {
2023  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2024  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2025  int xy = (mb_x * 2 + (i & 1) +
2026  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2027  int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2028  int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2029  draw_arrow(ptr, sx, sy, mx, my, width,
2030  height, pict->linesize[0], 100);
2031  }
2032  } else if (IS_16X8(p->mb_type[mb_index])) {
2033  int i;
2034  for (i = 0; i < 2; i++) {
2035  int sx = mb_x * 16 + 8;
2036  int sy = mb_y * 16 + 4 + 8 * i;
2037  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2038  int mx = (p->motion_val[direction][xy][0] >> shift);
2039  int my = (p->motion_val[direction][xy][1] >> shift);
2040 
2041  if (IS_INTERLACED(p->mb_type[mb_index]))
2042  my *= 2;
2043 
2044  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2045  height, pict->linesize[0], 100);
2046  }
2047  } else if (IS_8X16(p->mb_type[mb_index])) {
2048  int i;
2049  for (i = 0; i < 2; i++) {
2050  int sx = mb_x * 16 + 4 + 8 * i;
2051  int sy = mb_y * 16 + 8;
2052  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2053  int mx = p->motion_val[direction][xy][0] >> shift;
2054  int my = p->motion_val[direction][xy][1] >> shift;
2055 
2056  if (IS_INTERLACED(p->mb_type[mb_index]))
2057  my *= 2;
2058 
2059  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2060  height, pict->linesize[0], 100);
2061  }
2062  } else {
2063  int sx= mb_x * 16 + 8;
2064  int sy= mb_y * 16 + 8;
2065  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2066  int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2067  int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2068  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2069  }
2070  }
2071  }
2072  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2073  uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2074  0x0101010101010101ULL;
2075  int y;
2076  for (y = 0; y < block_height; y++) {
2077  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2078  (block_height * mb_y + y) *
2079  pict->linesize[1]) = c;
2080  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2081  (block_height * mb_y + y) *
2082  pict->linesize[2]) = c;
2083  }
2084  }
2085  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2086  p->motion_val[0]) {
2087  int mb_type = p->mb_type[mb_index];
2088  uint64_t u,v;
2089  int y;
2090 #define COLOR(theta, r) \
2091  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2092  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2093 
2094 
2095  u = v = 128;
2096  if (IS_PCM(mb_type)) {
2097  COLOR(120, 48)
2098  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2099  IS_INTRA16x16(mb_type)) {
2100  COLOR(30, 48)
2101  } else if (IS_INTRA4x4(mb_type)) {
2102  COLOR(90, 48)
2103  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2104  // COLOR(120, 48)
2105  } else if (IS_DIRECT(mb_type)) {
2106  COLOR(150, 48)
2107  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2108  COLOR(170, 48)
2109  } else if (IS_GMC(mb_type)) {
2110  COLOR(190, 48)
2111  } else if (IS_SKIP(mb_type)) {
2112  // COLOR(180, 48)
2113  } else if (!USES_LIST(mb_type, 1)) {
2114  COLOR(240, 48)
2115  } else if (!USES_LIST(mb_type, 0)) {
2116  COLOR(0, 48)
2117  } else {
2118  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2119  COLOR(300,48)
2120  }
2121 
2122  u *= 0x0101010101010101ULL;
2123  v *= 0x0101010101010101ULL;
2124  for (y = 0; y < block_height; y++) {
2125  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2126  (block_height * mb_y + y) * pict->linesize[1]) = u;
2127  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2128  (block_height * mb_y + y) * pict->linesize[2]) = v;
2129  }
2130 
2131  // segmentation
2132  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2133  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2134  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2135  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2136  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2137  }
2138  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2139  for (y = 0; y < 16; y++)
2140  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2141  pict->linesize[0]] ^= 0x80;
2142  }
2143  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2144  int dm = 1 << (mv_sample_log2 - 2);
2145  for (i = 0; i < 4; i++) {
2146  int sx = mb_x * 16 + 8 * (i & 1);
2147  int sy = mb_y * 16 + 8 * (i >> 1);
2148  int xy = (mb_x * 2 + (i & 1) +
2149  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2150  // FIXME bidir
2151  int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2152  if (mv[0] != mv[dm] ||
2153  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2154  for (y = 0; y < 8; y++)
2155  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2156  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2157  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2158  pict->linesize[0]) ^= 0x8080808080808080ULL;
2159  }
2160  }
2161 
2162  if (IS_INTERLACED(mb_type) &&
2163  avctx->codec->id == AV_CODEC_ID_H264) {
2164  // hmm
2165  }
2166  }
2167  mbskip_table[mb_index] = 0;
2168  }
2169  }
2170  }
2171 }
2172 
2174 {
2175  ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2176  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2177 }
2178 
2180 {
2182  int offset = 2*s->mb_stride + 1;
2183  if(!ref)
2184  return AVERROR(ENOMEM);
2185  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2186  ref->size -= offset;
2187  ref->data += offset;
2188  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2189 }
2190 
2192  uint8_t *dest, uint8_t *src,
2193  int field_based, int field_select,
2194  int src_x, int src_y,
2195  int width, int height, ptrdiff_t stride,
2196  int h_edge_pos, int v_edge_pos,
2197  int w, int h, h264_chroma_mc_func *pix_op,
2198  int motion_x, int motion_y)
2199 {
2200  const int lowres = s->avctx->lowres;
2201  const int op_index = FFMIN(lowres, 3);
2202  const int s_mask = (2 << lowres) - 1;
2203  int emu = 0;
2204  int sx, sy;
2205 
2206  if (s->quarter_sample) {
2207  motion_x /= 2;
2208  motion_y /= 2;
2209  }
2210 
2211  sx = motion_x & s_mask;
2212  sy = motion_y & s_mask;
2213  src_x += motion_x >> lowres + 1;
2214  src_y += motion_y >> lowres + 1;
2215 
2216  src += src_y * stride + src_x;
2217 
2218  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2219  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2221  src, s->linesize, w + 1,
2222  (h + 1) << field_based, src_x,
2223  src_y << field_based,
2224  h_edge_pos,
2225  v_edge_pos);
2226  src = s->edge_emu_buffer;
2227  emu = 1;
2228  }
2229 
2230  sx = (sx << 2) >> lowres;
2231  sy = (sy << 2) >> lowres;
2232  if (field_select)
2233  src += s->linesize;
2234  pix_op[op_index](dest, src, stride, h, sx, sy);
2235  return emu;
2236 }
2237 
2238 /* apply one mpeg motion vector to the three components */
2240  uint8_t *dest_y,
2241  uint8_t *dest_cb,
2242  uint8_t *dest_cr,
2243  int field_based,
2244  int bottom_field,
2245  int field_select,
2246  uint8_t **ref_picture,
2247  h264_chroma_mc_func *pix_op,
2248  int motion_x, int motion_y,
2249  int h, int mb_y)
2250 {
2251  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2252  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2253  ptrdiff_t uvlinesize, linesize;
2254  const int lowres = s->avctx->lowres;
2255  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2256  const int block_s = 8>>lowres;
2257  const int s_mask = (2 << lowres) - 1;
2258  const int h_edge_pos = s->h_edge_pos >> lowres;
2259  const int v_edge_pos = s->v_edge_pos >> lowres;
2260  linesize = s->current_picture.f.linesize[0] << field_based;
2261  uvlinesize = s->current_picture.f.linesize[1] << field_based;
2262 
2263  // FIXME obviously not perfect but qpel will not work in lowres anyway
2264  if (s->quarter_sample) {
2265  motion_x /= 2;
2266  motion_y /= 2;
2267  }
2268 
2269  if(field_based){
2270  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2271  }
2272 
2273  sx = motion_x & s_mask;
2274  sy = motion_y & s_mask;
2275  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2276  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2277 
2278  if (s->out_format == FMT_H263) {
2279  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2280  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2281  uvsrc_x = src_x >> 1;
2282  uvsrc_y = src_y >> 1;
2283  } else if (s->out_format == FMT_H261) {
2284  // even chroma mv's are full pel in H261
2285  mx = motion_x / 4;
2286  my = motion_y / 4;
2287  uvsx = (2 * mx) & s_mask;
2288  uvsy = (2 * my) & s_mask;
2289  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2290  uvsrc_y = mb_y * block_s + (my >> lowres);
2291  } else {
2292  if(s->chroma_y_shift){
2293  mx = motion_x / 2;
2294  my = motion_y / 2;
2295  uvsx = mx & s_mask;
2296  uvsy = my & s_mask;
2297  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2298  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2299  } else {
2300  if(s->chroma_x_shift){
2301  //Chroma422
2302  mx = motion_x / 2;
2303  uvsx = mx & s_mask;
2304  uvsy = motion_y & s_mask;
2305  uvsrc_y = src_y;
2306  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2307  } else {
2308  //Chroma444
2309  uvsx = motion_x & s_mask;
2310  uvsy = motion_y & s_mask;
2311  uvsrc_x = src_x;
2312  uvsrc_y = src_y;
2313  }
2314  }
2315  }
2316 
2317  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2318  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2319  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2320 
2321  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2322  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2323  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, linesize >> field_based, ptr_y,
2324  linesize >> field_based, 17, 17 + field_based,
2325  src_x, src_y << field_based, h_edge_pos,
2326  v_edge_pos);
2327  ptr_y = s->edge_emu_buffer;
2328  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2329  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2330  s->vdsp.emulated_edge_mc(uvbuf, uvlinesize >> field_based,
2331  ptr_cb, uvlinesize >> field_based, 9,
2332  9 + field_based,
2333  uvsrc_x, uvsrc_y << field_based,
2334  h_edge_pos >> 1, v_edge_pos >> 1);
2335  s->vdsp.emulated_edge_mc(uvbuf + 16, uvlinesize >> field_based,
2336  ptr_cr, uvlinesize >> field_based, 9,
2337  9 + field_based,
2338  uvsrc_x, uvsrc_y << field_based,
2339  h_edge_pos >> 1, v_edge_pos >> 1);
2340  ptr_cb = uvbuf;
2341  ptr_cr = uvbuf + 16;
2342  }
2343  }
2344 
2345  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2346  if (bottom_field) {
2347  dest_y += s->linesize;
2348  dest_cb += s->uvlinesize;
2349  dest_cr += s->uvlinesize;
2350  }
2351 
2352  if (field_select) {
2353  ptr_y += s->linesize;
2354  ptr_cb += s->uvlinesize;
2355  ptr_cr += s->uvlinesize;
2356  }
2357 
2358  sx = (sx << 2) >> lowres;
2359  sy = (sy << 2) >> lowres;
2360  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2361 
2362  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2363  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2364  uvsx = (uvsx << 2) >> lowres;
2365  uvsy = (uvsy << 2) >> lowres;
2366  if (hc) {
2367  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2368  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2369  }
2370  }
2371  // FIXME h261 lowres loop filter
2372 }
2373 
2375  uint8_t *dest_cb, uint8_t *dest_cr,
2376  uint8_t **ref_picture,
2377  h264_chroma_mc_func * pix_op,
2378  int mx, int my)
2379 {
2380  const int lowres = s->avctx->lowres;
2381  const int op_index = FFMIN(lowres, 3);
2382  const int block_s = 8 >> lowres;
2383  const int s_mask = (2 << lowres) - 1;
2384  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2385  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2386  int emu = 0, src_x, src_y, sx, sy;
2387  ptrdiff_t offset;
2388  uint8_t *ptr;
2389 
2390  if (s->quarter_sample) {
2391  mx /= 2;
2392  my /= 2;
2393  }
2394 
2395  /* In case of 8X8, we construct a single chroma motion vector
2396  with a special rounding */
2397  mx = ff_h263_round_chroma(mx);
2398  my = ff_h263_round_chroma(my);
2399 
2400  sx = mx & s_mask;
2401  sy = my & s_mask;
2402  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2403  src_y = s->mb_y * block_s + (my >> lowres + 1);
2404 
2405  offset = src_y * s->uvlinesize + src_x;
2406  ptr = ref_picture[1] + offset;
2407  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2408  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2410  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2411  ptr = s->edge_emu_buffer;
2412  emu = 1;
2413  }
2414  sx = (sx << 2) >> lowres;
2415  sy = (sy << 2) >> lowres;
2416  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2417 
2418  ptr = ref_picture[2] + offset;
2419  if (emu) {
2421  ptr, s->uvlinesize, 9, 9,
2422  src_x, src_y, h_edge_pos, v_edge_pos);
2423  ptr = s->edge_emu_buffer;
2424  }
2425  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2426 }
2427 
2428 /**
2429  * motion compensation of a single macroblock
2430  * @param s context
2431  * @param dest_y luma destination pointer
2432  * @param dest_cb chroma cb/u destination pointer
2433  * @param dest_cr chroma cr/v destination pointer
2434  * @param dir direction (0->forward, 1->backward)
2435  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2436  * @param pix_op halfpel motion compensation function (average or put normally)
2437  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2438  */
2439 static inline void MPV_motion_lowres(MpegEncContext *s,
2440  uint8_t *dest_y, uint8_t *dest_cb,
2441  uint8_t *dest_cr,
2442  int dir, uint8_t **ref_picture,
2443  h264_chroma_mc_func *pix_op)
2444 {
2445  int mx, my;
2446  int mb_x, mb_y, i;
2447  const int lowres = s->avctx->lowres;
2448  const int block_s = 8 >>lowres;
2449 
2450  mb_x = s->mb_x;
2451  mb_y = s->mb_y;
2452 
2453  switch (s->mv_type) {
2454  case MV_TYPE_16X16:
2455  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2456  0, 0, 0,
2457  ref_picture, pix_op,
2458  s->mv[dir][0][0], s->mv[dir][0][1],
2459  2 * block_s, mb_y);
2460  break;
2461  case MV_TYPE_8X8:
2462  mx = 0;
2463  my = 0;
2464  for (i = 0; i < 4; i++) {
2465  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2466  s->linesize) * block_s,
2467  ref_picture[0], 0, 0,
2468  (2 * mb_x + (i & 1)) * block_s,
2469  (2 * mb_y + (i >> 1)) * block_s,
2470  s->width, s->height, s->linesize,
2471  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2472  block_s, block_s, pix_op,
2473  s->mv[dir][i][0], s->mv[dir][i][1]);
2474 
2475  mx += s->mv[dir][i][0];
2476  my += s->mv[dir][i][1];
2477  }
2478 
2479  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2480  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2481  pix_op, mx, my);
2482  break;
2483  case MV_TYPE_FIELD:
2484  if (s->picture_structure == PICT_FRAME) {
2485  /* top field */
2486  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2487  1, 0, s->field_select[dir][0],
2488  ref_picture, pix_op,
2489  s->mv[dir][0][0], s->mv[dir][0][1],
2490  block_s, mb_y);
2491  /* bottom field */
2492  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2493  1, 1, s->field_select[dir][1],
2494  ref_picture, pix_op,
2495  s->mv[dir][1][0], s->mv[dir][1][1],
2496  block_s, mb_y);
2497  } else {
2498  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2499  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2500  ref_picture = s->current_picture_ptr->f.data;
2501 
2502  }
2503  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2504  0, 0, s->field_select[dir][0],
2505  ref_picture, pix_op,
2506  s->mv[dir][0][0],
2507  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2508  }
2509  break;
2510  case MV_TYPE_16X8:
2511  for (i = 0; i < 2; i++) {
2512  uint8_t **ref2picture;
2513 
2514  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2515  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2516  ref2picture = ref_picture;
2517  } else {
2518  ref2picture = s->current_picture_ptr->f.data;
2519  }
2520 
2521  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2522  0, 0, s->field_select[dir][i],
2523  ref2picture, pix_op,
2524  s->mv[dir][i][0], s->mv[dir][i][1] +
2525  2 * block_s * i, block_s, mb_y >> 1);
2526 
2527  dest_y += 2 * block_s * s->linesize;
2528  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2529  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2530  }
2531  break;
2532  case MV_TYPE_DMV:
2533  if (s->picture_structure == PICT_FRAME) {
2534  for (i = 0; i < 2; i++) {
2535  int j;
2536  for (j = 0; j < 2; j++) {
2537  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2538  1, j, j ^ i,
2539  ref_picture, pix_op,
2540  s->mv[dir][2 * i + j][0],
2541  s->mv[dir][2 * i + j][1],
2542  block_s, mb_y);
2543  }
2545  }
2546  } else {
2547  for (i = 0; i < 2; i++) {
2548  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2549  0, 0, s->picture_structure != i + 1,
2550  ref_picture, pix_op,
2551  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2552  2 * block_s, mb_y >> 1);
2553 
2554  // after put we make avg of the same block
2556 
2557  // opposite parity is always in the same
2558  // frame if this is second field
2559  if (!s->first_field) {
2560  ref_picture = s->current_picture_ptr->f.data;
2561  }
2562  }
2563  }
2564  break;
2565  default:
2566  av_assert2(0);
2567  }
2568 }
2569 
2570 /**
2571  * find the lowest MB row referenced in the MVs
2572  */
2574 {
2575  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2576  int my, off, i, mvs;
2577 
2578  if (s->picture_structure != PICT_FRAME || s->mcsel)
2579  goto unhandled;
2580 
2581  switch (s->mv_type) {
2582  case MV_TYPE_16X16:
2583  mvs = 1;
2584  break;
2585  case MV_TYPE_16X8:
2586  mvs = 2;
2587  break;
2588  case MV_TYPE_8X8:
2589  mvs = 4;
2590  break;
2591  default:
2592  goto unhandled;
2593  }
2594 
2595  for (i = 0; i < mvs; i++) {
2596  my = s->mv[dir][i][1]<<qpel_shift;
2597  my_max = FFMAX(my_max, my);
2598  my_min = FFMIN(my_min, my);
2599  }
2600 
2601  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2602 
2603  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2604 unhandled:
2605  return s->mb_height-1;
2606 }
2607 
2608 /* put block[] to dest[] */
2609 static inline void put_dct(MpegEncContext *s,
2610  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2611 {
2612  s->dct_unquantize_intra(s, block, i, qscale);
2613  s->dsp.idct_put (dest, line_size, block);
2614 }
2615 
2616 /* add block[] to dest[] */
2617 static inline void add_dct(MpegEncContext *s,
2618  int16_t *block, int i, uint8_t *dest, int line_size)
2619 {
2620  if (s->block_last_index[i] >= 0) {
2621  s->dsp.idct_add (dest, line_size, block);
2622  }
2623 }
2624 
2625 static inline void add_dequant_dct(MpegEncContext *s,
2626  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2627 {
2628  if (s->block_last_index[i] >= 0) {
2629  s->dct_unquantize_inter(s, block, i, qscale);
2630 
2631  s->dsp.idct_add (dest, line_size, block);
2632  }
2633 }
2634 
2635 /**
2636  * Clean dc, ac, coded_block for the current non-intra MB.
2637  */
2639 {
2640  int wrap = s->b8_stride;
2641  int xy = s->block_index[0];
2642 
2643  s->dc_val[0][xy ] =
2644  s->dc_val[0][xy + 1 ] =
2645  s->dc_val[0][xy + wrap] =
2646  s->dc_val[0][xy + 1 + wrap] = 1024;
2647  /* ac pred */
2648  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2649  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2650  if (s->msmpeg4_version>=3) {
2651  s->coded_block[xy ] =
2652  s->coded_block[xy + 1 ] =
2653  s->coded_block[xy + wrap] =
2654  s->coded_block[xy + 1 + wrap] = 0;
2655  }
2656  /* chroma */
2657  wrap = s->mb_stride;
2658  xy = s->mb_x + s->mb_y * wrap;
2659  s->dc_val[1][xy] =
2660  s->dc_val[2][xy] = 1024;
2661  /* ac pred */
2662  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2663  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2664 
2665  s->mbintra_table[xy]= 0;
2666 }
2667 
2668 /* generic function called after a macroblock has been parsed by the
2669  decoder or after it has been encoded by the encoder.
2670 
2671  Important variables used:
2672  s->mb_intra : true if intra macroblock
2673  s->mv_dir : motion vector direction
2674  s->mv_type : motion vector type
2675  s->mv : motion vector
2676  s->interlaced_dct : true if interlaced dct used (mpeg2)
2677  */
2678 static av_always_inline
2680  int lowres_flag, int is_mpeg12)
2681 {
2682  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2684  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2685  return;
2686  }
2687 
2688  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2689  /* print DCT coefficients */
2690  int i,j;
2691  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2692  for(i=0; i<6; i++){
2693  for(j=0; j<64; j++){
2694  av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2695  }
2696  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2697  }
2698  }
2699 
2700  s->current_picture.qscale_table[mb_xy] = s->qscale;
2701 
2702  /* update DC predictors for P macroblocks */
2703  if (!s->mb_intra) {
2704  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2705  if(s->mbintra_table[mb_xy])
2707  } else {
2708  s->last_dc[0] =
2709  s->last_dc[1] =
2710  s->last_dc[2] = 128 << s->intra_dc_precision;
2711  }
2712  }
2713  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2714  s->mbintra_table[mb_xy]=1;
2715 
2716  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2717  uint8_t *dest_y, *dest_cb, *dest_cr;
2718  int dct_linesize, dct_offset;
2719  op_pixels_func (*op_pix)[4];
2720  qpel_mc_func (*op_qpix)[16];
2721  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2722  const int uvlinesize = s->current_picture.f.linesize[1];
2723  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2724  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2725 
2726  /* avoid copy if macroblock skipped in last frame too */
2727  /* skip only during decoding as we might trash the buffers during encoding a bit */
2728  if(!s->encoding){
2729  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2730 
2731  if (s->mb_skipped) {
2732  s->mb_skipped= 0;
2734  *mbskip_ptr = 1;
2735  } else if(!s->current_picture.reference) {
2736  *mbskip_ptr = 1;
2737  } else{
2738  *mbskip_ptr = 0; /* not skipped */
2739  }
2740  }
2741 
2742  dct_linesize = linesize << s->interlaced_dct;
2743  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2744 
2745  if(readable){
2746  dest_y= s->dest[0];
2747  dest_cb= s->dest[1];
2748  dest_cr= s->dest[2];
2749  }else{
2750  dest_y = s->b_scratchpad;
2751  dest_cb= s->b_scratchpad+16*linesize;
2752  dest_cr= s->b_scratchpad+32*linesize;
2753  }
2754 
2755  if (!s->mb_intra) {
2756  /* motion handling */
2757  /* decoding or more than one mb_type (MC was already done otherwise) */
2758  if(!s->encoding){
2759 
2761  if (s->mv_dir & MV_DIR_FORWARD) {
2764  0);
2765  }
2766  if (s->mv_dir & MV_DIR_BACKWARD) {
2769  0);
2770  }
2771  }
2772 
2773  if(lowres_flag){
2775 
2776  if (s->mv_dir & MV_DIR_FORWARD) {
2777  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2779  }
2780  if (s->mv_dir & MV_DIR_BACKWARD) {
2781  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2782  }
2783  }else{
2784  op_qpix= s->me.qpel_put;
2785  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2786  op_pix = s->hdsp.put_pixels_tab;
2787  }else{
2788  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2789  }
2790  if (s->mv_dir & MV_DIR_FORWARD) {
2791  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2792  op_pix = s->hdsp.avg_pixels_tab;
2793  op_qpix= s->me.qpel_avg;
2794  }
2795  if (s->mv_dir & MV_DIR_BACKWARD) {
2796  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2797  }
2798  }
2799  }
2800 
2801  /* skip dequant / idct if we are really late ;) */
2802  if(s->avctx->skip_idct){
2805  || s->avctx->skip_idct >= AVDISCARD_ALL)
2806  goto skip_idct;
2807  }
2808 
2809  /* add dct residue */
2811  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2812  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2813  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2814  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2815  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2816 
2817  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2818  if (s->chroma_y_shift){
2819  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2820  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2821  }else{
2822  dct_linesize >>= 1;
2823  dct_offset >>=1;
2824  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2825  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2826  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2827  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2828  }
2829  }
2830  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2831  add_dct(s, block[0], 0, dest_y , dct_linesize);
2832  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2833  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2834  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2835 
2836  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2837  if(s->chroma_y_shift){//Chroma420
2838  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2839  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2840  }else{
2841  //chroma422
2842  dct_linesize = uvlinesize << s->interlaced_dct;
2843  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2844 
2845  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2846  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2847  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2848  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2849  if(!s->chroma_x_shift){//Chroma444
2850  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2851  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2852  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2853  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2854  }
2855  }
2856  }//fi gray
2857  }
2859  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2860  }
2861  } else {
2862  /* dct only in intra block */
2864  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2865  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2866  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2867  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2868 
2869  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2870  if(s->chroma_y_shift){
2871  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2872  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2873  }else{
2874  dct_offset >>=1;
2875  dct_linesize >>=1;
2876  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2877  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2878  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2879  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2880  }
2881  }
2882  }else{
2883  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2884  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2885  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2886  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2887 
2888  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2889  if(s->chroma_y_shift){
2890  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2891  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2892  }else{
2893 
2894  dct_linesize = uvlinesize << s->interlaced_dct;
2895  dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2896 
2897  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2898  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2899  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2900  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2901  if(!s->chroma_x_shift){//Chroma444
2902  s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2903  s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2904  s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2905  s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2906  }
2907  }
2908  }//gray
2909  }
2910  }
2911 skip_idct:
2912  if(!readable){
2913  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2914  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2915  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2916  }
2917  }
2918 }
2919 
2920 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2921 #if !CONFIG_SMALL
2922  if(s->out_format == FMT_MPEG1) {
2923  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2924  else MPV_decode_mb_internal(s, block, 0, 1);
2925  } else
2926 #endif
2927  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2928  else MPV_decode_mb_internal(s, block, 0, 0);
2929 }
2930 
2931 /**
2932  * @param h is the normal height, this will be reduced automatically if needed for the last row
2933  */
2935  Picture *last, int y, int h, int picture_structure,
2936  int first_field, int draw_edges, int low_delay,
2937  int v_edge_pos, int h_edge_pos)
2938 {
2939  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2940  int hshift = desc->log2_chroma_w;
2941  int vshift = desc->log2_chroma_h;
2942  const int field_pic = picture_structure != PICT_FRAME;
2943  if(field_pic){
2944  h <<= 1;
2945  y <<= 1;
2946  }
2947 
2948  if (!avctx->hwaccel &&
2950  draw_edges &&
2951  cur->reference &&
2952  !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2953  int *linesize = cur->f.linesize;
2954  int sides = 0, edge_h;
2955  if (y==0) sides |= EDGE_TOP;
2956  if (y + h >= v_edge_pos)
2957  sides |= EDGE_BOTTOM;
2958 
2959  edge_h= FFMIN(h, v_edge_pos - y);
2960 
2961  dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2962  linesize[0], h_edge_pos, edge_h,
2963  EDGE_WIDTH, EDGE_WIDTH, sides);
2964  dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2965  linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2966  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2967  dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2968  linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2969  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2970  }
2971 
2972  h = FFMIN(h, avctx->height - y);
2973 
2974  if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2975 
2976  if (avctx->draw_horiz_band) {
2977  AVFrame *src;
2979  int i;
2980 
2981  if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2983  src = &cur->f;
2984  else if (last)
2985  src = &last->f;
2986  else
2987  return;
2988 
2989  if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2990  picture_structure == PICT_FRAME &&
2991  avctx->codec_id != AV_CODEC_ID_SVQ3) {
2992  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2993  offset[i] = 0;
2994  }else{
2995  offset[0]= y * src->linesize[0];
2996  offset[1]=
2997  offset[2]= (y >> vshift) * src->linesize[1];
2998  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2999  offset[i] = 0;
3000  }
3001 
3002  emms_c();
3003 
3004  avctx->draw_horiz_band(avctx, src, offset,
3005  y, picture_structure, h);
3006  }
3007 }
3008 
3010 {
3011  int draw_edges = s->unrestricted_mv && !s->intra_only;
3013  s->last_picture_ptr, y, h, s->picture_structure,
3014  s->first_field, draw_edges, s->low_delay,
3015  s->v_edge_pos, s->h_edge_pos);
3016 }
3017 
3018 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3019  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3020  const int uvlinesize = s->current_picture.f.linesize[1];
3021  const int mb_size= 4 - s->avctx->lowres;
3022 
3023  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3024  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3025  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3026  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3027  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3028  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3029  //block_index is not used by mpeg2, so it is not affected by chroma_format
3030 
3031  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3032  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3033  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3034 
3036  {
3037  if(s->picture_structure==PICT_FRAME){
3038  s->dest[0] += s->mb_y * linesize << mb_size;
3039  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3040  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3041  }else{
3042  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3043  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3044  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3046  }
3047  }
3048 }
3049 
3050 /**
3051  * Permute an 8x8 block.
3052  * @param block the block which will be permuted according to the given permutation vector
3053  * @param permutation the permutation vector
3054  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3055  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3056  * (inverse) permutated to scantable order!
3057  */
3058 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3059 {
3060  int i;
3061  int16_t temp[64];
3062 
3063  if(last<=0) return;
3064  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3065 
3066  for(i=0; i<=last; i++){
3067  const int j= scantable[i];
3068  temp[j]= block[j];
3069  block[j]=0;
3070  }
3071 
3072  for(i=0; i<=last; i++){
3073  const int j= scantable[i];
3074  const int perm_j= permutation[j];
3075  block[perm_j]= temp[j];
3076  }
3077 }
3078 
3080  int i;
3081  MpegEncContext *s = avctx->priv_data;
3082 
3083  if(s==NULL || s->picture==NULL)
3084  return;
3085 
3086  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3087  ff_mpeg_unref_picture(s, &s->picture[i]);
3089 
3093 
3094  s->mb_x= s->mb_y= 0;
3095  s->closed_gop= 0;
3096 
3097  s->parse_context.state= -1;
3099  s->parse_context.overread= 0;
3101  s->parse_context.index= 0;
3102  s->parse_context.last_index= 0;
3103  s->bitstream_buffer_size=0;
3104  s->pp_time=0;
3105 }
3106 
3108  int16_t *block, int n, int qscale)
3109 {
3110  int i, level, nCoeffs;
3111  const uint16_t *quant_matrix;
3112 
3113  nCoeffs= s->block_last_index[n];
3114 
3115  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3116  /* XXX: only mpeg1 */
3117  quant_matrix = s->intra_matrix;
3118  for(i=1;i<=nCoeffs;i++) {
3119  int j= s->intra_scantable.permutated[i];
3120  level = block[j];
3121  if (level) {
3122  if (level < 0) {
3123  level = -level;
3124  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3125  level = (level - 1) | 1;
3126  level = -level;
3127  } else {
3128  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3129  level = (level - 1) | 1;
3130  }
3131  block[j] = level;
3132  }
3133  }
3134 }
3135 
3137  int16_t *block, int n, int qscale)
3138 {
3139  int i, level, nCoeffs;
3140  const uint16_t *quant_matrix;
3141 
3142  nCoeffs= s->block_last_index[n];
3143 
3144  quant_matrix = s->inter_matrix;
3145  for(i=0; i<=nCoeffs; i++) {
3146  int j= s->intra_scantable.permutated[i];
3147  level = block[j];
3148  if (level) {
3149  if (level < 0) {
3150  level = -level;
3151  level = (((level << 1) + 1) * qscale *
3152  ((int) (quant_matrix[j]))) >> 4;
3153  level = (level - 1) | 1;
3154  level = -level;
3155  } else {
3156  level = (((level << 1) + 1) * qscale *
3157  ((int) (quant_matrix[j]))) >> 4;
3158  level = (level - 1) | 1;
3159  }
3160  block[j] = level;
3161  }
3162  }
3163 }
3164 
3166  int16_t *block, int n, int qscale)
3167 {
3168  int i, level, nCoeffs;
3169  const uint16_t *quant_matrix;
3170 
3171  if(s->alternate_scan) nCoeffs= 63;
3172  else nCoeffs= s->block_last_index[n];
3173 
3174  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3175  quant_matrix = s->intra_matrix;
3176  for(i=1;i<=nCoeffs;i++) {
3177  int j= s->intra_scantable.permutated[i];
3178  level = block[j];
3179  if (level) {
3180  if (level < 0) {
3181  level = -level;
3182  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3183  level = -level;
3184  } else {
3185  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3186  }
3187  block[j] = level;
3188  }
3189  }
3190 }
3191 
3193  int16_t *block, int n, int qscale)
3194 {
3195  int i, level, nCoeffs;
3196  const uint16_t *quant_matrix;
3197  int sum=-1;
3198 
3199  if(s->alternate_scan) nCoeffs= 63;
3200  else nCoeffs= s->block_last_index[n];
3201 
3202  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3203  sum += block[0];
3204  quant_matrix = s->intra_matrix;
3205  for(i=1;i<=nCoeffs;i++) {
3206  int j= s->intra_scantable.permutated[i];
3207  level = block[j];
3208  if (level) {
3209  if (level < 0) {
3210  level = -level;
3211  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3212  level = -level;
3213  } else {
3214  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3215  }
3216  block[j] = level;
3217  sum+=level;
3218  }
3219  }
3220  block[63]^=sum&1;
3221 }
3222 
3224  int16_t *block, int n, int qscale)
3225 {
3226  int i, level, nCoeffs;
3227  const uint16_t *quant_matrix;
3228  int sum=-1;
3229 
3230  if(s->alternate_scan) nCoeffs= 63;
3231  else nCoeffs= s->block_last_index[n];
3232 
3233  quant_matrix = s->inter_matrix;
3234  for(i=0; i<=nCoeffs; i++) {
3235  int j= s->intra_scantable.permutated[i];
3236  level = block[j];
3237  if (level) {
3238  if (level < 0) {
3239  level = -level;
3240  level = (((level << 1) + 1) * qscale *
3241  ((int) (quant_matrix[j]))) >> 4;
3242  level = -level;
3243  } else {
3244  level = (((level << 1) + 1) * qscale *
3245  ((int) (quant_matrix[j]))) >> 4;
3246  }
3247  block[j] = level;
3248  sum+=level;
3249  }
3250  }
3251  block[63]^=sum&1;
3252 }
3253 
3255  int16_t *block, int n, int qscale)
3256 {
3257  int i, level, qmul, qadd;
3258  int nCoeffs;
3259 
3260  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3261 
3262  qmul = qscale << 1;
3263 
3264  if (!s->h263_aic) {
3265  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3266  qadd = (qscale - 1) | 1;
3267  }else{
3268  qadd = 0;
3269  }
3270  if(s->ac_pred)
3271  nCoeffs=63;
3272  else
3273  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3274 
3275  for(i=1; i<=nCoeffs; i++) {
3276  level = block[i];
3277  if (level) {
3278  if (level < 0) {
3279  level = level * qmul - qadd;
3280  } else {
3281  level = level * qmul + qadd;
3282  }
3283  block[i] = level;
3284  }
3285  }
3286 }
3287 
3289  int16_t *block, int n, int qscale)
3290 {
3291  int i, level, qmul, qadd;
3292  int nCoeffs;
3293 
3294  av_assert2(s->block_last_index[n]>=0);
3295 
3296  qadd = (qscale - 1) | 1;
3297  qmul = qscale << 1;
3298 
3299  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3300 
3301  for(i=0; i<=nCoeffs; i++) {
3302  level = block[i];
3303  if (level) {
3304  if (level < 0) {
3305  level = level * qmul - qadd;
3306  } else {
3307  level = level * qmul + qadd;
3308  }
3309  block[i] = level;
3310  }
3311  }
3312 }
3313 
3314 /**
3315  * set qscale and update qscale dependent variables.
3316  */
3317 void ff_set_qscale(MpegEncContext * s, int qscale)
3318 {
3319  if (qscale < 1)
3320  qscale = 1;
3321  else if (qscale > 31)
3322  qscale = 31;
3323 
3324  s->qscale = qscale;
3325  s->chroma_qscale= s->chroma_qscale_table[qscale];
3326 
3327  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3329 }
3330 
3332 {
3335 }
3336 
3337 #if CONFIG_ERROR_RESILIENCE
3339 {
3340  ERContext *er = &s->er;
3341 
3342  er->cur_pic = s->current_picture_ptr;
3343  er->last_pic = s->last_picture_ptr;
3344  er->next_pic = s->next_picture_ptr;
3345 
3346  er->pp_time = s->pp_time;
3347  er->pb_time = s->pb_time;
3348  er->quarter_sample = s->quarter_sample;
3350 
3351  ff_er_frame_start(er);
3352 }
3353 #endif /* CONFIG_ERROR_RESILIENCE */
int bitstream_buffer_size
Definition: mpegvideo.h:616
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:194
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:668
int last
number of values for last = 0
Definition: rl.h:40
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
Definition: mpegvideo.c:502
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:549
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread.c:920
int16_t(* b_bidir_back_mv_table_base)[2]
Definition: mpegvideo.h:405
int table_size
Definition: get_bits.h:66
#define PICT_TOP_FIELD
Definition: mpegvideo.h:667
discard all frames except keyframes
Definition: avcodec.h:617
int8_t * ref_index[2]
Definition: mpegvideo.h:114
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3018
unsigned int stream_codec_tag
fourcc from the AVI stream header (LSB first, so &quot;ABCD&quot; -&gt; (&#39;D&#39;&lt;&lt;24) + (&#39;C&#39;&lt;&lt;16) + (&#39;B&#39;&lt;&lt;8) + ...
Definition: avcodec.h:1180
float v
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2678
int picture_number
Definition: mpegvideo.h:279
const char * s
Definition: avisynth_c.h:668
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:66
#define CONFIG_MPEG_XVMC_DECODER
Definition: config.h:553
ScanTable intra_v_scantable
Definition: mpegvideo.h:302
AVBufferRef * mb_var_buf
Definition: mpegvideo.h:116
static int shift(int a, int b)
Definition: sonic.c:78
#define CONFIG_WMV2_ENCODER
Definition: config.h:1124
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:731
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:105
void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
Definition: mpegvideo.c:1409
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpegvideo.h:560
This structure describes decoded (raw) audio or video data.
Definition: frame.h:96
#define IS_SKIP(a)
Definition: mpegvideo.h:140
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
Definition: mpegvideo.c:378
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:409
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegvideo.h:369
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y &lt;= row &lt; end_mb_y) ...
Definition: mpegvideo.h:320
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:428
#define MAKE_WRITABLE(table)
const uint8_t * y_dc_scale_table
qscale -&gt; y_dc_scale table
Definition: mpegvideo.h:355
void(* emulated_edge_mc)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:61
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideo.h:126
#define av_always_inline
Definition: attributes.h:41
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1324
av_cold int ff_dct_common_init(MpegEncContext *s)
Definition: mpegvideo.c:155
#define ARCH_PPC
Definition: config.h:26
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:1003
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:68
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: avcodec.h:4153
void ff_MPV_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:3331
AVFrame * f
Definition: thread.h:36
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:2374
uint8_t * coded_block_base
Definition: mpegvideo.h:358
else temp
Definition: vf_mcdeint.c:258
#define FF_DEBUG_SKIP
Definition: avcodec.h:2450
static int update_picture_tables(Picture *dst, Picture *src)
Definition: mpegvideo.c:457
#define EDGE_TOP
Definition: dsputil.h:262
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y &lt;= row &lt; end_mb_y) ...
Definition: mpegvideo.h:321
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideo.h:117
#define ARCH_BFIN
Definition: config.h:20
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:501
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
Definition: mpegvideo.c:3058
void ff_MPV_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:361
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:285
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:27
#define me
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:644
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: dsputil.h:260
static const uint8_t mpeg2_dc_scale_table3[128]
Definition: mpegvideo.c:102
void ff_xvmc_field_end(MpegEncContext *s)
Complete frame/field rendering by passing any remaining blocks.
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:176
uint8_t * bitstream_buffer
Definition: mpegvideo.h:615
enum AVCodecID codec_id
Definition: mpegvideo.h:261
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:73
void ff_MPV_common_init_ppc(MpegEncContext *s)
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type)
Definition: frame.c:49
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:169
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1342
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:415
int16_t(* p_mv_table_base)[2]
Definition: mpegvideo.h:401
static int make_tables_writable(Picture *pic)
Definition: mpegvideo.c:349
uint8_t raster_end[64]
Definition: dsputil.h:114
#define wrap(func)
Definition: w64xmmtest.h:70
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
uint32_t * score_map
map to store the scores
Definition: mpegvideo.h:200
mpegvideo header.
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
discard all
Definition: avcodec.h:618
uint8_t permutated[64]
Definition: dsputil.h:113
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:135
const int8_t * table_level
Definition: rl.h:43
uint8_t run
Definition: svq3.c:145
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:596
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2570
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:606
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:286
Pixel format.
Definition: avcodec.h:4533
int stride
Definition: mace.c:144
int frame_start_found
Definition: parser.h:34
#define av_cold
Definition: avcodec.h:653
int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
Find and store the surfaces that are used as reference frames.
int qscale
QP.
Definition: mpegvideo.h:373
RLTable.
Definition: rl.h:38
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -&gt; idct -&gt; add dest -&gt; clip to unsigned 8 bit -&gt; dest.
Definition: dsputil.h:232
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:296
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:411
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:68
int chroma_x_shift
Definition: mpegvideo.h:684
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:263
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:721
int field_select[2][2]
Definition: mpegvideo.h:436
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:729
av_cold void ff_MPV_common_init_axp(MpegEncContext *s)
int block_wrap[6]
Definition: mpegvideo.h:470
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3136
int16_t(* b_back_mv_table_base)[2]
Definition: mpegvideo.h:403
uint16_t pp_time
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:616
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:156
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2638
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:723
Predicted.
Definition: avcodec.h:2305
#define COLOR(theta, r)
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:234
if((e=av_dict_get(options,"", NULL, AV_DICT_IGNORE_SUFFIX)))
Definition: avfilter.c:965
int b_frame_score
Definition: mpegvideo.h:175
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1668
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegvideo.h:122
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:707
int picture_in_gop_number
0-&gt; first pic in gop, ...
Definition: mpegvideo.h:280
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: utils.c:153
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:3009
int8_t * max_run[2]
encoding &amp; decoding
Definition: rl.h:46
int context_reinit
Definition: mpegvideo.h:745
const uint8_t ff_alternate_vertical_scan[64]
Definition: dsputil.c:86
static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:2679
int16_t * dc_val_base
Definition: mpegvideo.h:353
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -&gt; idct -&gt; clip to unsigned 8 bit -&gt; dest.
Definition: dsputil.h:226
int ff_MPV_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1183
uint8_t
#define IS_8X16(a)
Definition: mpegvideo.h:147
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:290
#define PICT_FRAME
Definition: mpegvideo.h:669
enum OutputFormat out_format
output format
Definition: mpegvideo.h:253
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:83
static int first_field(const struct video_data *s, int fd)
Definition: v4l2.c:228
static const uint8_t offset[511][2]
Definition: vf_uspp.c:58
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:507
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Definition: mpegvideo.h:367
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:779
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:232
#define emms_c()
Definition: internal.h:49
#define IS_GMC(a)
Definition: mpegvideo.h:144
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:443
int interlaced_dct
Definition: mpegvideo.h:689
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:497
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:347
int intra_dc_precision
Definition: mpegvideo.h:671
static int pic_is_unused(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:1422
static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:307
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:412
float * cplx_tab
Definition: mpegvideo.h:741
#define FF_DEBUG_VIS_MV_B_BACK
Definition: avcodec.h:2469
int8_t * max_level[2]
encoding &amp; decoding
Definition: rl.h:45
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3273
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:82
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:565
uint8_t idct_permutation[64]
idct input permutation.
Definition: dsputil.h:246
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:107
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
Definition: mpegvideo.h:371
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: mpegvideo.c:126
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:265
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
Definition: avcodec.h:1649
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:293
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:714
int mb_height
number of MBs horizontally &amp; vertically
Definition: mpegvideo.h:281
#define AV_NUM_DATA_POINTERS
Definition: frame.h:97
int lowres
low resolution decoding, 1-&gt; 1/2 size, 2-&gt;1/4 size
Definition: avcodec.h:2580
enum AVPixelFormat ff_pixfmt_list_420[]
Definition: mpegvideo.c:121
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1734
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:271
#define FFSWAP(type, a, b)
Definition: avcodec.h:928
int16_t(*[2][2] p_field_mv_table_base)[2]
Definition: mpegvideo.h:407
static int free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1132
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo.c:1481
#define MAX_LEVEL
Definition: rl.h:35
#define IS_INTERLACED(a)
Definition: mpegvideo.h:142
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:866
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:3317
AVBufferRef * mb_mean_buf
Definition: mpegvideo.h:125
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:498
void ff_xvmc_decode_mb(MpegEncContext *s)
Synthesize the data needed by XvMC to render one macroblock of data.
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:496
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:251
ThreadFrame tf
Definition: mpegvideo.h:99
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:354
enum AVCodecID id
Definition: avcodec.h:2936
int h263_plus
h263 plus headers
Definition: mpegvideo.h:258
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:323
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread.c:684
int last_non_b_pict_type
used for mpeg4 gmc b-frames &amp; ratecontrol
Definition: mpegvideo.h:384
unsigned int buffer_size
Definition: parser.h:32
int width
width and height of the video frame
Definition: frame.h:145
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: avcodec.h:4147
uint8_t * mbskip_table
Definition: mpegvideo.h:111
int stream_codec_tag
internal stream_codec_tag upper case converted from avctx stream_codec_tag
Definition: mpegvideo.h:272
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:352
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:77
void * opaque
for some private data of the user
Definition: frame.h:272
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:2617
static int ff_h263_round_chroma(int x)
Definition: mpegvideo.h:889
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:718
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:362
#define ARCH_X86
Definition: config.h:35
int chroma_y_shift
Definition: mpegvideo.h:685
static int find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1433
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:594
av_cold void ff_init_vlc_rl(RLTable *rl)
Definition: mpegvideo.c:1369
const uint8_t ff_alternate_horizontal_scan[64]
Definition: dsputil.c:75
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:392
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2608
ERContext er
Definition: mpegvideo.h:747
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2615
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:388
int reference
Definition: mpegvideo.h:178
const char * r
Definition: vf_curves.c:103
int capabilities
Codec capabilities.
Definition: avcodec.h:2941
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: avcodec.h:4168
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:368
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3192
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:719
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
struct AVCodec * codec
Definition: avcodec.h:1155
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:713
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1234
struct Picture * next_pic
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:2609
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3107
int quarter_sample
1-&gt;qpel, 0-&gt;half pel ME/MC
Definition: mpegvideo.h:584
uint16_t * mb_type
Table for candidate MB types for encoding.
Definition: mpegvideo.h:450
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -&gt; (sx, sy).
Definition: mpegvideo.c:1803
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:597
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread.c:1029
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
Definition: mpegvideo.c:435
VLC vlc
decoding only deprecated FIXME remove
Definition: rl.h:47
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:418
Libavcodec external API header.
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
int8_t len
Definition: get_bits.h:72
uint8_t * mbintra_table
int * mb_index2xy
int priv_data_size
Size of HW accelerator private data.
Definition: avcodec.h:3105
int off
Definition: dsputil_bfin.c:29
static const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideo.c:60
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:3079
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
int coded_picture_number
used to set pic-&gt;coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:278
int * lambda_table
Definition: mpegvideo.h:377
uint8_t * error_status_table
AVBufferRef * hwaccel_priv_buf
Definition: mpegvideo.h:128
goto fail
Definition: avfilter.c:963
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:580
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:399
#define FF_ARRAY_ELEMS(a)
Definition: avcodec.h:929
#define MAX_THREADS
int n
number of entries of table_vlc minus 1
Definition: rl.h:39
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define IS_8X8(a)
Definition: mpegvideo.h:148
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
int err_recognition
Definition: mpegvideo.h:536
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:104
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1376
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:2179
int progressive_frame
Definition: mpegvideo.h:687
void ff_mpeg_er_frame_start(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:167
struct Picture * last_pic
#define UPDATE_PICTURE(pic)
int top_field_first
Definition: mpegvideo.h:673
uint8_t * data
The data buffer.
Definition: buffer.h:89
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2476
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:351
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:736
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:502
int last_index
Definition: parser.h:31
float y
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:535
#define ARCH_ARM
Definition: config.h:16
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:1021
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3223
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:341
ret
Definition: avfilter.c:961
int width
picture width / height.
Definition: avcodec.h:1314
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:363
void(* clear_blocks)(int16_t *blocks)
Definition: dsputil.h:143
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:351
Picture.
Definition: mpegvideo.h:97
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
int alternate_scan
Definition: mpegvideo.h:677
unsigned int allocated_bitstream_buffer_size
Definition: mpegvideo.h:617
void * hwaccel_picture_private
hardware accelerator private data
Definition: mpegvideo.h:132
uint16_t pb_time
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2449
int16_t(* ac_val_base)[16]
Definition: mpegvideo.h:360
int32_t
DSPContext * dsp
#define FFMIN(a, b)
Definition: avcodec.h:925
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX &amp; these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:500
const int8_t * table_run
Definition: rl.h:42
int16_t(*[2][2][2] b_field_mv_table_base)[2]
Definition: mpegvideo.h:408
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:208
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:649
int16_t(* b_forw_mv_table_base)[2]
Definition: mpegvideo.h:402
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:703
#define CONFIG_GRAY
Definition: config.h:395
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:295
MotionEstContext me
Definition: mpegvideo.h:441
float u
int n
Definition: avisynth_c.h:588
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
Definition: mpegvideo.c:198
#define EDGE_BOTTOM
Definition: dsputil.h:263
int mb_decision
macroblock decision mode
Definition: avcodec.h:1665
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2376
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:365
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:65
#define FF_DEBUG_VIS_MV_P_FOR
Definition: avcodec.h:2467
#define ME_MAP_SIZE
Definition: mpegvideo.h:68
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1506
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2934
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:668
RL_VLC_ELEM * rl_vlc[32]
decoding only
Definition: rl.h:48
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
#define FF_DEBUG_VIS_MB_TYPE
Definition: avcodec.h:2457
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2596
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:469
#define IS_16X8(a)
Definition: mpegvideo.h:146
int xvmc_acceleration
XVideo Motion Acceleration.
Definition: avcodec.h:1658
int * mb_index2xy
mb_index -&gt; mb_x + mb_y*mb_stride
Definition: mpegvideo.h:473
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1938
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:691
static const int8_t mv[256][2]
Definition: 4xm.c:73
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:157
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:425
planar YUV 4:2:0, 12bpp, (1 Cr &amp; Cb sample per 2x2 Y samples)
Definition: avcodec.h:4534
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:76
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideo.h:120
AVBufferRef * qscale_table_buf
Definition: mpegvideo.h:101
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:422
struct Picture * cur_pic
int16_t(* b_bidir_forw_mv_table_base)[2]
Definition: mpegvideo.h:404
void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table, int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1887
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideo.c:114
static int width
Definition: utils.c:158
int coded_picture_number
picture number in bitstream order
Definition: frame.h:199
uint16_t inter_matrix[64]
Definition: mpegvideo.h:478
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegvideo.h:123
uint8_t * buffer
Definition: parser.h:29
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:322
AVS_Value src
Definition: avisynth_c.h:523
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:693
#define FFMAX(a, b)
Definition: avcodec.h:923
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:287
enum AVCodecID codec_id
Definition: avcodec.h:1157
static av_const unsigned int ff_sqrt(unsigned int a)
Definition: mathops.h:207
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
Definition: avcodec.h:2771
void ff_MPV_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:558
int debug
debug
Definition: avcodec.h:2442
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:57
main external API structure.
Definition: avcodec.h:1146
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2607
ScanTable intra_scantable
Definition: mpegvideo.h:300
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:81
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:359
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:249
unsigned int codec_tag
fourcc (LSB first, so &quot;ABCD&quot; -&gt; (&#39;D&#39;&lt;&lt;24) + (&#39;C&#39;&lt;&lt;16) + (&#39;B&#39;&lt;&lt;8) + &#39;A&#39;).
Definition: avcodec.h:1172
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideo.c:66
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3165
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: dsputil.c:111
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:427
#define FF_DEBUG_QP
Definition: avcodec.h:2447
void * buf
Definition: avisynth_c.h:594
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:2173
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
Picture * picture
main picture buffer
Definition: mpegvideo.h:289
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:366
int progressive_sequence
Definition: mpegvideo.h:663
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
int slice_flags
slice flags
Definition: avcodec.h:1648
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2920
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
Definition: utils.c:1046
int coded_height
Definition: avcodec.h:1324
Bi-dir predicted.
Definition: avcodec.h:2306
ScanTable intra_h_scantable
Definition: mpegvideo.h:301
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)&gt;&gt;1.
Definition: hpeldsp.h:80
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:81
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:416
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
Definition: mpegvideo.h:366
int closed_gop
MPEG1/2 GOP is closed.
Definition: mpegvideo.h:380
#define UPDATE_TABLE(table)
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:3265
AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2588
uint8_t * index_run[2]
encoding only
Definition: rl.h:44
int context_initialized
Definition: mpegvideo.h:276
int input_picture_number
used to set pic-&gt;display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:277
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:288
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:395
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:124
#define s1
Definition: regdef.h:38
int f_code
forward MV resolution
Definition: mpegvideo.h:399
#define COPY(a)
#define type
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:666
AVCodecContext * avctx
#define MV_DIR_FORWARD
Definition: mpegvideo.h:421
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:266
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:381
#define ROUNDED_DIV(a, b)
Definition: avcodec.h:914
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int size
Size of data in bytes.
Definition: buffer.h:93
#define FFABS(a)
Definition: avcodec.h:920
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:254
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:413
void * priv_data
Definition: avcodec.h:1182
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:881
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:417
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:414
const uint8_t * c_dc_scale_table
qscale -&gt; c_dc_scale table
Definition: mpegvideo.h:356
uint8_t level
Definition: svq3.c:146
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:233
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second &quot; : depend...
Definition: mpegvideo.h:435
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:410
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:283
int noise_reduction
noise reduction strength
Definition: avcodec.h:1697
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
#define FF_DEBUG_VIS_MV_B_FOR
Definition: avcodec.h:2468
#define IS_ACPRED(a)
Definition: mpegvideo.h:153
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3254
MpegEncContext.
Definition: mpegvideo.h:245
uint8_t run
Definition: get_bits.h:73
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:350
int8_t * qscale_table
Definition: mpegvideo.h:102
#define MAX_RUN
Definition: rl.h:34
struct AVCodecContext * avctx
Definition: mpegvideo.h:247
A reference to a data buffer.
Definition: buffer.h:81
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw an arrow from (ex, ey) -&gt; (sx, sy).
Definition: mpegvideo.c:1856
discard all non reference
Definition: avcodec.h:615
av_cold void ff_MPV_common_init_bfin(MpegEncContext *s)
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
Definition: avcodec.h:706
static void exchange_uv(MpegEncContext *s)
Definition: mpegvideo.c:540
int(* dct_error_sum)[64]
Definition: mpegvideo.h:505
int partitioned_frame
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1967
#define FF_DEBUG_MV
Definition: avcodec.h:2448
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left &amp; top MBs without sig11 ...
Definition: mpegvideo.h:282
AVBufferRef * mbskip_table_buf
Definition: mpegvideo.h:110
void ff_MPV_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:810
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:705
#define IS_PCM(a)
Definition: mpegvideo.h:137
#define ARCH_ALPHA
Definition: config.h:15
uint8_t * dest[3]
Definition: mpegvideo.h:471
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:118
int shared
Definition: mpegvideo.h:179
static double c[64]
int last_pict_type
Definition: mpegvideo.h:383
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3288
int b4_stride
4*mb_width+1 used for some 4x4 block arrays to allow simple addressing
Definition: mpegvideo.h:284
int16_t * dc_val[3]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:329
uint8_t * obmc_scratchpad
Definition: mpegvideo.h:370
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:1650
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
Allocate a frame buffer.
Definition: mpegvideo.c:226
#define FFALIGN(x, a)
Definition: avcodec.h:930
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:349
int index
Definition: parser.h:30
const uint8_t * chroma_qscale_table
qscale -&gt; chroma_qscale (h263)
Definition: mpegvideo.h:357
uint32_t * map
map to avoid duplicate evaluations
Definition: mpegvideo.h:199
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:643
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:715
DSP utils.
static int lowres
Definition: ffplay.c:310
H264ChromaContext h264chroma
Definition: mpegvideo.h:396
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:706
#define IS_INTRA(x, y)
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:28
int slices
Number of slices.
Definition: avcodec.h:1864
int picture_structure
Definition: mpegvideo.h:665
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:91
#define AVERROR_INVALIDDATA
VideoDSPContext vdsp
Definition: mpegvideo.h:398
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:298
#define IS_DIRECT(a)
Definition: mpegvideo.h:143
#define AV_RL32(x)
Definition: intreadwrite.h:275
int len
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1257
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:429
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2501
av_cold void ff_init_rl(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
Definition: mpegvideo.c:1317
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:2573
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:705
ParseContext parse_context
Definition: mpegvideo.h:538
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:2625
#define FF_DEBUG_MB_TYPE
Definition: avcodec.h:2446
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:335
#define EDGE_WIDTH
Definition: dsputil.h:261
AVBufferRef * mc_mb_var_buf
Definition: mpegvideo.h:119
int key_frame
1 -&gt; keyframe, 0-&gt; not
Definition: frame.h:162
static const uint8_t mpeg2_dc_scale_table1[128]
Definition: mpegvideo.c:78
#define CONFIG_WMV2_DECODER
Definition: config.h:653
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:2239
#define AVERROR(e)
int16_t level
Definition: get_bits.h:71
#define IS_16X16(a)
Definition: mpegvideo.h:145
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
Definition: mpegvideo.h:291
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1241
static void free_picture_tables(Picture *pic)
Definition: mpegvideo.c:287
#define HAVE_THREADS
Definition: config.h:286
static int init_er(MpegEncContext *s)
Definition: mpegvideo.c:841
static const uint8_t mpeg2_dc_scale_table2[128]
Definition: mpegvideo.c:90
int chroma_qscale
chroma QP
Definition: mpegvideo.h:374
struct AVFrame f
Definition: mpegvideo.h:98
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:717
int mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:172
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264.c:1645
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:2191
void ff_er_frame_start(ERContext *s)
int height
Definition: frame.h:145
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:264
int mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:173
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:476
uint32_t * mb_type
Definition: mpegvideo.h:108
void INT64 INT64 count
Definition: avisynth_c.h:594
void INT64 start
Definition: avisynth_c.h:594
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:270
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
Definition: mpegvideo.h:299
uint8_t * temp
Definition: mpegvideo.h:197
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: avcodec.h:4141
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const char int length
Definition: avisynth_c.h:668
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
Definition: h264.c:313
av_cold void ff_MPV_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:41
int debug_mv
debug
Definition: avcodec.h:2466
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1466
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:426
int16_t(* b_direct_mv_table_base)[2]
Definition: mpegvideo.h:406
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:400
float * bits_tab
Definition: mpegvideo.h:741
int dct_count[2]
Definition: mpegvideo.h:506
uint8_t * mbskip_table
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:910
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:107
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:2439
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:127
#define FF_DEBUG_VIS_QP
Definition: avcodec.h:2456
void * av_mallocz(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:241
DSPContext.
Definition: dsputil.h:124
void ff_MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:836
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:566
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:113
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
HpelDSPContext hdsp
Definition: mpegvideo.h:397
static int16_t block[64]
Definition: dct-test.c:198