FFmpeg  2.1.1
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include "libavutil/attributes.h"
44 #include "internal.h"
45 #include "avcodec.h"
46 #include "mpegvideo.h"
47 #include "h264.h"
48 
49 #include "h264data.h" // FIXME FIXME FIXME
50 
51 #include "h264_mvpred.h"
52 #include "golomb.h"
53 #include "hpeldsp.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 #include "svq3.h"
63 
64 /**
65  * @file
66  * svq3 decoder.
67  */
68 
69 typedef struct {
79  uint32_t watermark_key;
81  int buf_size;
87 } SVQ3Context;
88 
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
93 
94 /* dual scan (from some older h264 draft)
95  * o-->o-->o o
96  * | /|
97  * o o o / o
98  * | / | |/ |
99  * o o o o
100  * /
101  * o-->o-->o-->o
102  */
103 static const uint8_t svq3_scan[16] = {
104  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 };
109 
110 static const uint8_t luma_dc_zigzag_scan[16] = {
111  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
112  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
113  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
114  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
115 };
116 
117 static const uint8_t svq3_pred_0[25][2] = {
118  { 0, 0 },
119  { 1, 0 }, { 0, 1 },
120  { 0, 2 }, { 1, 1 }, { 2, 0 },
121  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
122  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
123  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
124  { 2, 4 }, { 3, 3 }, { 4, 2 },
125  { 4, 3 }, { 3, 4 },
126  { 4, 4 }
127 };
128 
129 static const int8_t svq3_pred_1[6][6][5] = {
130  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
131  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
132  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
133  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
134  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
135  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
136  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
137  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
138  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
139  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
140  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
141  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
142 };
143 
144 static const struct {
147 } svq3_dct_tables[2][16] = {
148  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
149  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
150  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
151  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
152 };
153 
154 static const uint32_t svq3_dequant_coeff[32] = {
155  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
156  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
157  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
158  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
159 };
160 
161 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
162 {
163  const int qmul = svq3_dequant_coeff[qp];
164 #define stride 16
165  int i;
166  int temp[16];
167  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
168 
169  for (i = 0; i < 4; i++) {
170  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
171  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
172  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
173  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
174 
175  temp[4 * i + 0] = z0 + z3;
176  temp[4 * i + 1] = z1 + z2;
177  temp[4 * i + 2] = z1 - z2;
178  temp[4 * i + 3] = z0 - z3;
179  }
180 
181  for (i = 0; i < 4; i++) {
182  const int offset = x_offset[i];
183  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
184  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
185  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
186  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
187 
188  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
189  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
190  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
191  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
192  }
193 }
194 #undef stride
195 
196 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
197  int stride, int qp, int dc)
198 {
199  const int qmul = svq3_dequant_coeff[qp];
200  int i;
201 
202  if (dc) {
203  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
204  : qmul * (block[0] >> 3) / 2);
205  block[0] = 0;
206  }
207 
208  for (i = 0; i < 4; i++) {
209  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
210  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
211  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
212  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
213 
214  block[0 + 4 * i] = z0 + z3;
215  block[1 + 4 * i] = z1 + z2;
216  block[2 + 4 * i] = z1 - z2;
217  block[3 + 4 * i] = z0 - z3;
218  }
219 
220  for (i = 0; i < 4; i++) {
221  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
222  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
223  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
224  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
225  const int rr = (dc + 0x80000);
226 
227  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
228  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
229  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
230  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
231  }
232 
233  memset(block, 0, 16 * sizeof(int16_t));
234 }
235 
236 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
237  int index, const int type)
238 {
239  static const uint8_t *const scan_patterns[4] =
241 
242  int run, level, sign, limit;
243  unsigned vlc;
244  const int intra = 3 * type >> 2;
245  const uint8_t *const scan = scan_patterns[type];
246 
247  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
248  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
249  if ((int32_t)vlc < 0)
250  return -1;
251 
252  sign = (vlc & 1) ? 0 : -1;
253  vlc = vlc + 1 >> 1;
254 
255  if (type == 3) {
256  if (vlc < 3) {
257  run = 0;
258  level = vlc;
259  } else if (vlc < 4) {
260  run = 1;
261  level = 1;
262  } else {
263  run = vlc & 0x3;
264  level = (vlc + 9 >> 2) - run;
265  }
266  } else {
267  if (vlc < 16U) {
268  run = svq3_dct_tables[intra][vlc].run;
269  level = svq3_dct_tables[intra][vlc].level;
270  } else if (intra) {
271  run = vlc & 0x7;
272  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
273  } else {
274  run = vlc & 0xF;
275  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
276  }
277  }
278 
279 
280  if ((index += run) >= limit)
281  return -1;
282 
283  block[scan[index]] = (level ^ sign) - sign;
284  }
285 
286  if (type != 2) {
287  break;
288  }
289  }
290 
291  return 0;
292 }
293 
294 static inline void svq3_mc_dir_part(SVQ3Context *s,
295  int x, int y, int width, int height,
296  int mx, int my, int dxy,
297  int thirdpel, int dir, int avg)
298 {
299  H264Context *h = &s->h;
300  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
301  uint8_t *src, *dest;
302  int i, emu = 0;
303  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
304 
305  mx += x;
306  my += y;
307 
308  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
309  my < 0 || my >= s->v_edge_pos - height - 1) {
310  emu = 1;
311  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
312  my = av_clip(my, -16, s->v_edge_pos - height + 15);
313  }
314 
315  /* form component predictions */
316  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
317  src = pic->f.data[0] + mx + my * h->linesize;
318 
319  if (emu) {
321  src, h->linesize,
322  width + 1, height + 1,
323  mx, my, s->h_edge_pos, s->v_edge_pos);
324  src = h->edge_emu_buffer;
325  }
326  if (thirdpel)
327  (avg ? h->dsp.avg_tpel_pixels_tab
328  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
329  width, height);
330  else
331  (avg ? s->hdsp.avg_pixels_tab
332  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
333  height);
334 
335  if (!(h->flags & CODEC_FLAG_GRAY)) {
336  mx = mx + (mx < (int) x) >> 1;
337  my = my + (my < (int) y) >> 1;
338  width = width >> 1;
339  height = height >> 1;
340  blocksize++;
341 
342  for (i = 1; i < 3; i++) {
343  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
344  src = pic->f.data[i] + mx + my * h->uvlinesize;
345 
346  if (emu) {
348  src, h->uvlinesize,
349  width + 1, height + 1,
350  mx, my, (s->h_edge_pos >> 1),
351  s->v_edge_pos >> 1);
352  src = h->edge_emu_buffer;
353  }
354  if (thirdpel)
355  (avg ? h->dsp.avg_tpel_pixels_tab
356  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
357  h->uvlinesize,
358  width, height);
359  else
360  (avg ? s->hdsp.avg_pixels_tab
361  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
362  h->uvlinesize,
363  height);
364  }
365  }
366 }
367 
368 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
369  int dir, int avg)
370 {
371  int i, j, k, mx, my, dx, dy, x, y;
372  H264Context *h = &s->h;
373  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
374  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
375  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
376  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
377  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
378 
379  for (i = 0; i < 16; i += part_height)
380  for (j = 0; j < 16; j += part_width) {
381  const int b_xy = (4 * h->mb_x + (j >> 2)) +
382  (4 * h->mb_y + (i >> 2)) * h->b_stride;
383  int dxy;
384  x = 16 * h->mb_x + j;
385  y = 16 * h->mb_y + i;
386  k = (j >> 2 & 1) + (i >> 1 & 2) +
387  (j >> 1 & 4) + (i & 8);
388 
389  if (mode != PREDICT_MODE) {
390  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
391  } else {
392  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
393  my = s->next_pic->motion_val[0][b_xy][1] << 1;
394 
395  if (dir == 0) {
396  mx = mx * h->frame_num_offset /
397  h->prev_frame_num_offset + 1 >> 1;
398  my = my * h->frame_num_offset /
399  h->prev_frame_num_offset + 1 >> 1;
400  } else {
401  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
402  h->prev_frame_num_offset + 1 >> 1;
403  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
404  h->prev_frame_num_offset + 1 >> 1;
405  }
406  }
407 
408  /* clip motion vector prediction to frame border */
409  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
410  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
411 
412  /* get (optional) motion vector differential */
413  if (mode == PREDICT_MODE) {
414  dx = dy = 0;
415  } else {
416  dy = svq3_get_se_golomb(&h->gb);
417  dx = svq3_get_se_golomb(&h->gb);
418 
419  if (dx == INVALID_VLC || dy == INVALID_VLC) {
420  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
421  return -1;
422  }
423  }
424 
425  /* compute motion vector */
426  if (mode == THIRDPEL_MODE) {
427  int fx, fy;
428  mx = (mx + 1 >> 1) + dx;
429  my = (my + 1 >> 1) + dy;
430  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
431  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
432  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
433 
434  svq3_mc_dir_part(s, x, y, part_width, part_height,
435  fx, fy, dxy, 1, dir, avg);
436  mx += mx;
437  my += my;
438  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
439  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
440  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
441  dxy = (mx & 1) + 2 * (my & 1);
442 
443  svq3_mc_dir_part(s, x, y, part_width, part_height,
444  mx >> 1, my >> 1, dxy, 0, dir, avg);
445  mx *= 3;
446  my *= 3;
447  } else {
448  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
449  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
450 
451  svq3_mc_dir_part(s, x, y, part_width, part_height,
452  mx, my, 0, 0, dir, avg);
453  mx *= 6;
454  my *= 6;
455  }
456 
457  /* update mv_cache */
458  if (mode != PREDICT_MODE) {
459  int32_t mv = pack16to32(mx, my);
460 
461  if (part_height == 8 && i < 8) {
462  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
463 
464  if (part_width == 8 && j < 8)
465  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
466  }
467  if (part_width == 8 && j < 8)
468  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
469  if (part_width == 4 || part_height == 4)
470  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
471  }
472 
473  /* write back motion vectors */
474  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
475  part_width >> 2, part_height >> 2, h->b_stride,
476  pack16to32(mx, my), 4);
477  }
478 
479  return 0;
480 }
481 
482 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
483 {
484  H264Context *h = &s->h;
485  int i, j, k, m, dir, mode;
486  int cbp = 0;
487  uint32_t vlc;
488  int8_t *top, *left;
489  const int mb_xy = h->mb_xy;
490  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
491 
492  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
493  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
494  h->topright_samples_available = 0xFFFF;
495 
496  if (mb_type == 0) { /* SKIP */
497  if (h->pict_type == AV_PICTURE_TYPE_P ||
498  s->next_pic->mb_type[mb_xy] == -1) {
499  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
500  0, 0, 0, 0, 0, 0);
501 
502  if (h->pict_type == AV_PICTURE_TYPE_B)
503  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
504  0, 0, 0, 0, 1, 1);
505 
506  mb_type = MB_TYPE_SKIP;
507  } else {
508  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
509  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
510  return -1;
511  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
512  return -1;
513 
514  mb_type = MB_TYPE_16x16;
515  }
516  } else if (mb_type < 8) { /* INTER */
517  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
518  mode = THIRDPEL_MODE;
519  else if (s->halfpel_flag &&
520  s->thirdpel_flag == !get_bits1(&h->gb))
521  mode = HALFPEL_MODE;
522  else
523  mode = FULLPEL_MODE;
524 
525  /* fill caches */
526  /* note ref_cache should contain here:
527  * ????????
528  * ???11111
529  * N??11111
530  * N??11111
531  * N??11111
532  */
533 
534  for (m = 0; m < 2; m++) {
535  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
536  for (i = 0; i < 4; i++)
537  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
538  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
539  } else {
540  for (i = 0; i < 4; i++)
541  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
542  }
543  if (h->mb_y > 0) {
544  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
545  h->cur_pic.motion_val[m][b_xy - h->b_stride],
546  4 * 2 * sizeof(int16_t));
547  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
548  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
549 
550  if (h->mb_x < h->mb_width - 1) {
551  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
552  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
553  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
554  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
555  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
556  } else
557  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
558  if (h->mb_x > 0) {
559  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
560  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
561  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
562  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
563  } else
564  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
565  } else
566  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
567  PART_NOT_AVAILABLE, 8);
568 
569  if (h->pict_type != AV_PICTURE_TYPE_B)
570  break;
571  }
572 
573  /* decode motion vector(s) and form prediction(s) */
574  if (h->pict_type == AV_PICTURE_TYPE_P) {
575  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
576  return -1;
577  } else { /* AV_PICTURE_TYPE_B */
578  if (mb_type != 2) {
579  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
580  return -1;
581  } else {
582  for (i = 0; i < 4; i++)
583  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
584  0, 4 * 2 * sizeof(int16_t));
585  }
586  if (mb_type != 1) {
587  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
588  return -1;
589  } else {
590  for (i = 0; i < 4; i++)
591  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
592  0, 4 * 2 * sizeof(int16_t));
593  }
594  }
595 
596  mb_type = MB_TYPE_16x16;
597  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
598  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
599 
600  if (mb_type == 8) {
601  if (h->mb_x > 0) {
602  for (i = 0; i < 4; i++)
603  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
604  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
605  h->left_samples_available = 0x5F5F;
606  }
607  if (h->mb_y > 0) {
608  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
609  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
610  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
611  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
612 
613  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
614  h->top_samples_available = 0x33FF;
615  }
616 
617  /* decode prediction codes for luma blocks */
618  for (i = 0; i < 16; i += 2) {
619  vlc = svq3_get_ue_golomb(&h->gb);
620 
621  if (vlc >= 25U) {
622  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
623  return -1;
624  }
625 
626  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
627  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
628 
629  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
630  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
631 
632  if (left[1] == -1 || left[2] == -1) {
633  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
634  return -1;
635  }
636  }
637  } else { /* mb_type == 33, DC_128_PRED block type */
638  for (i = 0; i < 4; i++)
639  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
640  }
641 
643 
644  if (mb_type == 8) {
646 
647  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
648  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
649  } else {
650  for (i = 0; i < 4; i++)
651  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
652 
653  h->top_samples_available = 0x33FF;
654  h->left_samples_available = 0x5F5F;
655  }
656 
657  mb_type = MB_TYPE_INTRA4x4;
658  } else { /* INTRA16x16 */
659  dir = i_mb_type_info[mb_type - 8].pred_mode;
660  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
661 
662  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
663  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
664  return h->intra16x16_pred_mode;
665  }
666 
667  cbp = i_mb_type_info[mb_type - 8].cbp;
668  mb_type = MB_TYPE_INTRA16x16;
669  }
670 
671  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
672  for (i = 0; i < 4; i++)
673  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
674  0, 4 * 2 * sizeof(int16_t));
675  if (h->pict_type == AV_PICTURE_TYPE_B) {
676  for (i = 0; i < 4; i++)
677  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
678  0, 4 * 2 * sizeof(int16_t));
679  }
680  }
681  if (!IS_INTRA4x4(mb_type)) {
682  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
683  }
684  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
685  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
686  }
687 
688  if (!IS_INTRA16x16(mb_type) &&
689  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
690  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
691  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
692  return -1;
693  }
694 
695  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
696  : golomb_to_inter_cbp[vlc];
697  }
698  if (IS_INTRA16x16(mb_type) ||
699  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
700  h->qscale += svq3_get_se_golomb(&h->gb);
701 
702  if (h->qscale > 31u) {
703  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
704  return -1;
705  }
706  }
707  if (IS_INTRA16x16(mb_type)) {
708  AV_ZERO128(h->mb_luma_dc[0] + 0);
709  AV_ZERO128(h->mb_luma_dc[0] + 8);
710  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
712  "error while decoding intra luma dc\n");
713  return -1;
714  }
715  }
716 
717  if (cbp) {
718  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
719  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
720 
721  for (i = 0; i < 4; i++)
722  if ((cbp & (1 << i))) {
723  for (j = 0; j < 4; j++) {
724  k = index ? (1 * (j & 1) + 2 * (i & 1) +
725  2 * (j & 2) + 4 * (i & 2))
726  : (4 * i + j);
727  h->non_zero_count_cache[scan8[k]] = 1;
728 
729  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
731  "error while decoding block\n");
732  return -1;
733  }
734  }
735  }
736 
737  if ((cbp & 0x30)) {
738  for (i = 1; i < 3; ++i)
739  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
741  "error while decoding chroma dc block\n");
742  return -1;
743  }
744 
745  if ((cbp & 0x20)) {
746  for (i = 1; i < 3; i++) {
747  for (j = 0; j < 4; j++) {
748  k = 16 * i + j;
749  h->non_zero_count_cache[scan8[k]] = 1;
750 
751  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
753  "error while decoding chroma ac block\n");
754  return -1;
755  }
756  }
757  }
758  }
759  }
760  }
761 
762  h->cbp = cbp;
763  h->cur_pic.mb_type[mb_xy] = mb_type;
764 
765  if (IS_INTRA(mb_type))
767 
768  return 0;
769 }
770 
772 {
773  SVQ3Context *s = avctx->priv_data;
774  H264Context *h = &s->h;
775  const int mb_xy = h->mb_xy;
776  int i, header;
777  unsigned slice_id;
778 
779  header = get_bits(&h->gb, 8);
780 
781  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
782  /* TODO: what? */
783  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
784  return -1;
785  } else {
786  int length = header >> 5 & 3;
787 
789  8 * show_bits(&h->gb, 8 * length) +
790  8 * length;
791 
792  if (s->next_slice_index > h->gb.size_in_bits) {
793  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
794  return -1;
795  }
796 
797  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
798  skip_bits(&h->gb, 8);
799 
800  if (s->watermark_key) {
801  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
802  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
803  header ^ s->watermark_key);
804  }
805  if (length > 0) {
806  memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
807  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
808  }
809  skip_bits_long(&h->gb, 0);
810  }
811 
812  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
813  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
814  return -1;
815  }
816 
817  h->slice_type = golomb_to_pict_type[slice_id];
818 
819  if ((header & 0x9F) == 2) {
820  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
821  h->mb_skip_run = get_bits(&h->gb, i) -
822  (h->mb_y * h->mb_width + h->mb_x);
823  } else {
824  skip_bits1(&h->gb);
825  h->mb_skip_run = 0;
826  }
827 
828  h->slice_num = get_bits(&h->gb, 8);
829  h->qscale = get_bits(&h->gb, 5);
830  s->adaptive_quant = get_bits1(&h->gb);
831 
832  /* unknown fields */
833  skip_bits1(&h->gb);
834 
835  if (s->unknown_flag)
836  skip_bits1(&h->gb);
837 
838  skip_bits1(&h->gb);
839  skip_bits(&h->gb, 2);
840 
841  while (get_bits1(&h->gb))
842  skip_bits(&h->gb, 8);
843 
844  /* reset intra predictors and invalidate motion vector references */
845  if (h->mb_x > 0) {
846  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
847  -1, 4 * sizeof(int8_t));
848  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
849  -1, 8 * sizeof(int8_t) * h->mb_x);
850  }
851  if (h->mb_y > 0) {
852  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
853  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
854 
855  if (h->mb_x > 0)
856  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
857  }
858 
859  return 0;
860 }
861 
863 {
864  SVQ3Context *s = avctx->priv_data;
865  H264Context *h = &s->h;
866  int m;
867  unsigned char *extradata;
868  unsigned char *extradata_end;
869  unsigned int size;
870  int marker_found = 0;
871 
872  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
873  s->last_pic = av_mallocz(sizeof(*s->last_pic));
874  s->next_pic = av_mallocz(sizeof(*s->next_pic));
875  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
876  av_freep(&s->cur_pic);
877  av_freep(&s->last_pic);
878  av_freep(&s->next_pic);
879  return AVERROR(ENOMEM);
880  }
881 
882  if (ff_h264_decode_init(avctx) < 0)
883  return -1;
884 
885  ff_hpeldsp_init(&s->hdsp, avctx->flags);
886  h->flags = avctx->flags;
887  h->is_complex = 1;
888  h->sps.chroma_format_idc = 1;
890  avctx->pix_fmt = avctx->codec->pix_fmts[0];
891 
892  h->chroma_qp[0] = h->chroma_qp[1] = 4;
893  h->chroma_x_shift = h->chroma_y_shift = 1;
894 
895  s->halfpel_flag = 1;
896  s->thirdpel_flag = 1;
897  s->unknown_flag = 0;
898 
899  /* prowl for the "SEQH" marker in the extradata */
900  extradata = (unsigned char *)avctx->extradata;
901  extradata_end = avctx->extradata + avctx->extradata_size;
902  if (extradata) {
903  for (m = 0; m + 8 < avctx->extradata_size; m++) {
904  if (!memcmp(extradata, "SEQH", 4)) {
905  marker_found = 1;
906  break;
907  }
908  extradata++;
909  }
910  }
911 
912  /* if a match was found, parse the extra data */
913  if (marker_found) {
914  GetBitContext gb;
915  int frame_size_code;
916 
917  size = AV_RB32(&extradata[4]);
918  if (size > extradata_end - extradata - 8)
919  return AVERROR_INVALIDDATA;
920  init_get_bits(&gb, extradata + 8, size * 8);
921 
922  /* 'frame size code' and optional 'width, height' */
923  frame_size_code = get_bits(&gb, 3);
924  switch (frame_size_code) {
925  case 0:
926  avctx->width = 160;
927  avctx->height = 120;
928  break;
929  case 1:
930  avctx->width = 128;
931  avctx->height = 96;
932  break;
933  case 2:
934  avctx->width = 176;
935  avctx->height = 144;
936  break;
937  case 3:
938  avctx->width = 352;
939  avctx->height = 288;
940  break;
941  case 4:
942  avctx->width = 704;
943  avctx->height = 576;
944  break;
945  case 5:
946  avctx->width = 240;
947  avctx->height = 180;
948  break;
949  case 6:
950  avctx->width = 320;
951  avctx->height = 240;
952  break;
953  case 7:
954  avctx->width = get_bits(&gb, 12);
955  avctx->height = get_bits(&gb, 12);
956  break;
957  }
958 
959  s->halfpel_flag = get_bits1(&gb);
960  s->thirdpel_flag = get_bits1(&gb);
961 
962  /* unknown fields */
963  skip_bits1(&gb);
964  skip_bits1(&gb);
965  skip_bits1(&gb);
966  skip_bits1(&gb);
967 
968  h->low_delay = get_bits1(&gb);
969 
970  /* unknown field */
971  skip_bits1(&gb);
972 
973  while (get_bits1(&gb))
974  skip_bits(&gb, 8);
975 
976  s->unknown_flag = get_bits1(&gb);
977  avctx->has_b_frames = !h->low_delay;
978  if (s->unknown_flag) {
979 #if CONFIG_ZLIB
980  unsigned watermark_width = svq3_get_ue_golomb(&gb);
981  unsigned watermark_height = svq3_get_ue_golomb(&gb);
982  int u1 = svq3_get_ue_golomb(&gb);
983  int u2 = get_bits(&gb, 8);
984  int u3 = get_bits(&gb, 2);
985  int u4 = svq3_get_ue_golomb(&gb);
986  unsigned long buf_len = watermark_width *
987  watermark_height * 4;
988  int offset = get_bits_count(&gb) + 7 >> 3;
989  uint8_t *buf;
990 
991  if (watermark_height <= 0 ||
992  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
993  return -1;
994 
995  buf = av_malloc(buf_len);
996  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
997  watermark_width, watermark_height);
998  av_log(avctx, AV_LOG_DEBUG,
999  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1000  u1, u2, u3, u4, offset);
1001  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1002  size - offset) != Z_OK) {
1003  av_log(avctx, AV_LOG_ERROR,
1004  "could not uncompress watermark logo\n");
1005  av_free(buf);
1006  return -1;
1007  }
1008  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1009  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1010  av_log(avctx, AV_LOG_DEBUG,
1011  "watermark key %#x\n", s->watermark_key);
1012  av_free(buf);
1013 #else
1014  av_log(avctx, AV_LOG_ERROR,
1015  "this svq3 file contains watermark which need zlib support compiled in\n");
1016  return -1;
1017 #endif
1018  }
1019  }
1020 
1021  h->width = avctx->width;
1022  h->height = avctx->height;
1023  h->mb_width = (h->width + 15) / 16;
1024  h->mb_height = (h->height + 15) / 16;
1025  h->mb_stride = h->mb_width + 1;
1026  h->mb_num = h->mb_width * h->mb_height;
1027  h->b_stride = 4 * h->mb_width;
1028  s->h_edge_pos = h->mb_width * 16;
1029  s->v_edge_pos = h->mb_height * 16;
1030 
1031  if (ff_h264_alloc_tables(h) < 0) {
1032  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1033  return AVERROR(ENOMEM);
1034  }
1035 
1036  return 0;
1037 }
1038 
1039 static void free_picture(AVCodecContext *avctx, Picture *pic)
1040 {
1041  int i;
1042  for (i = 0; i < 2; i++) {
1043  av_buffer_unref(&pic->motion_val_buf[i]);
1044  av_buffer_unref(&pic->ref_index_buf[i]);
1045  }
1047 
1048  av_frame_unref(&pic->f);
1049 }
1050 
1051 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1052 {
1053  SVQ3Context *s = avctx->priv_data;
1054  H264Context *h = &s->h;
1055  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1056  const int mb_array_size = h->mb_stride * h->mb_height;
1057  const int b4_stride = h->mb_width * 4 + 1;
1058  const int b4_array_size = b4_stride * h->mb_height * 4;
1059  int ret;
1060 
1061  if (!pic->motion_val_buf[0]) {
1062  int i;
1063 
1064  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1065  if (!pic->mb_type_buf)
1066  return AVERROR(ENOMEM);
1067  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1068 
1069  for (i = 0; i < 2; i++) {
1070  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1071  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1072  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1073  ret = AVERROR(ENOMEM);
1074  goto fail;
1075  }
1076 
1077  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1078  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1079  }
1080  }
1081  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1082 
1083  ret = ff_get_buffer(avctx, &pic->f,
1084  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1085  if (ret < 0)
1086  goto fail;
1087 
1088  if (!h->edge_emu_buffer) {
1089  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1090  if (!h->edge_emu_buffer)
1091  return AVERROR(ENOMEM);
1092  }
1093 
1094  h->linesize = pic->f.linesize[0];
1095  h->uvlinesize = pic->f.linesize[1];
1096 
1097  return 0;
1098 fail:
1099  free_picture(avctx, pic);
1100  return ret;
1101 }
1102 
1103 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1104  int *got_frame, AVPacket *avpkt)
1105 {
1106  SVQ3Context *s = avctx->priv_data;
1107  H264Context *h = &s->h;
1108  int buf_size = avpkt->size;
1109  int left;
1110  uint8_t *buf;
1111  int ret, m, i;
1112 
1113  /* special case for last picture */
1114  if (buf_size == 0) {
1115  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1116  ret = av_frame_ref(data, &s->next_pic->f);
1117  if (ret < 0)
1118  return ret;
1119  s->last_frame_output = 1;
1120  *got_frame = 1;
1121  }
1122  return 0;
1123  }
1124 
1125  h->mb_x = h->mb_y = h->mb_xy = 0;
1126 
1127  if (s->watermark_key) {
1128  av_fast_malloc(&s->buf, &s->buf_size,
1129  buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1130  if (!s->buf)
1131  return AVERROR(ENOMEM);
1132  memcpy(s->buf, avpkt->data, buf_size);
1133  buf = s->buf;
1134  } else {
1135  buf = avpkt->data;
1136  }
1137 
1138  init_get_bits(&h->gb, buf, 8 * buf_size);
1139 
1140  if (svq3_decode_slice_header(avctx))
1141  return -1;
1142 
1143  h->pict_type = h->slice_type;
1144 
1145  if (h->pict_type != AV_PICTURE_TYPE_B)
1146  FFSWAP(Picture*, s->next_pic, s->last_pic);
1147 
1148  av_frame_unref(&s->cur_pic->f);
1149 
1150  /* for skipping the frame */
1151  s->cur_pic->f.pict_type = h->pict_type;
1153 
1154  ret = get_buffer(avctx, s->cur_pic);
1155  if (ret < 0)
1156  return ret;
1157 
1158  h->cur_pic_ptr = s->cur_pic;
1159  av_frame_unref(&h->cur_pic.f);
1160  h->cur_pic = *s->cur_pic;
1161  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1162  if (ret < 0)
1163  return ret;
1164 
1165  for (i = 0; i < 16; i++) {
1166  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1167  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1168  }
1169  for (i = 0; i < 16; i++) {
1170  h->block_offset[16 + i] =
1171  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1172  h->block_offset[48 + 16 + i] =
1173  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1174  }
1175 
1176  if (h->pict_type != AV_PICTURE_TYPE_I) {
1177  if (!s->last_pic->f.data[0]) {
1178  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1179  ret = get_buffer(avctx, s->last_pic);
1180  if (ret < 0)
1181  return ret;
1182  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1183  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1184  s->last_pic->f.linesize[1]);
1185  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1186  s->last_pic->f.linesize[2]);
1187  }
1188 
1189  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1190  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1191  ret = get_buffer(avctx, s->next_pic);
1192  if (ret < 0)
1193  return ret;
1194  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1195  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1196  s->next_pic->f.linesize[1]);
1197  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1198  s->next_pic->f.linesize[2]);
1199  }
1200  }
1201 
1202  if (avctx->debug & FF_DEBUG_PICT_INFO)
1204  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1206  s->halfpel_flag, s->thirdpel_flag,
1207  s->adaptive_quant, h->qscale, h->slice_num);
1208 
1209  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1211  avctx->skip_frame >= AVDISCARD_ALL)
1212  return 0;
1213 
1214  if (s->next_p_frame_damaged) {
1215  if (h->pict_type == AV_PICTURE_TYPE_B)
1216  return 0;
1217  else
1218  s->next_p_frame_damaged = 0;
1219  }
1220 
1221  if (h->pict_type == AV_PICTURE_TYPE_B) {
1223 
1224  if (h->frame_num_offset < 0)
1225  h->frame_num_offset += 256;
1226  if (h->frame_num_offset == 0 ||
1228  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1229  return -1;
1230  }
1231  } else {
1232  h->prev_frame_num = h->frame_num;
1233  h->frame_num = h->slice_num;
1235 
1236  if (h->prev_frame_num_offset < 0)
1237  h->prev_frame_num_offset += 256;
1238  }
1239 
1240  for (m = 0; m < 2; m++) {
1241  int i;
1242  for (i = 0; i < 4; i++) {
1243  int j;
1244  for (j = -1; j < 4; j++)
1245  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1246  if (i < 3)
1247  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1248  }
1249  }
1250 
1251  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1252  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1253  unsigned mb_type;
1254  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1255 
1256  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1257  ((get_bits_count(&h->gb) & 7) == 0 ||
1258  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1259  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1260  h->gb.size_in_bits = 8 * buf_size;
1261 
1262  if (svq3_decode_slice_header(avctx))
1263  return -1;
1264 
1265  /* TODO: support s->mb_skip_run */
1266  }
1267 
1268  mb_type = svq3_get_ue_golomb(&h->gb);
1269 
1270  if (h->pict_type == AV_PICTURE_TYPE_I)
1271  mb_type += 8;
1272  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1273  mb_type += 4;
1274  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1276  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1277  return -1;
1278  }
1279 
1280  if (mb_type != 0 || h->cbp)
1282 
1283  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1284  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1285  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1286  }
1287 
1288  ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1289  16 * h->mb_y, 16, h->picture_structure, 0, 0,
1290  h->low_delay, h->mb_height * 16, h->mb_width * 16);
1291  }
1292 
1293  left = buf_size*8 - get_bits_count(&h->gb);
1294 
1295  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1296  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1297  //av_hex_dump(stderr, buf+buf_size-8, 8);
1298  }
1299 
1300  if (left < 0) {
1301  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1302  return -1;
1303  }
1304 
1305  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1306  ret = av_frame_ref(data, &s->cur_pic->f);
1307  else if (s->last_pic->f.data[0])
1308  ret = av_frame_ref(data, &s->last_pic->f);
1309  if (ret < 0)
1310  return ret;
1311 
1312  /* Do not output the last pic after seeking. */
1313  if (s->last_pic->f.data[0] || h->low_delay)
1314  *got_frame = 1;
1315 
1316  if (h->pict_type != AV_PICTURE_TYPE_B) {
1317  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1318  } else {
1319  av_frame_unref(&s->cur_pic->f);
1320  }
1321 
1322  return buf_size;
1323 }
1324 
1326 {
1327  SVQ3Context *s = avctx->priv_data;
1328  H264Context *h = &s->h;
1329 
1330  free_picture(avctx, s->cur_pic);
1331  free_picture(avctx, s->next_pic);
1332  free_picture(avctx, s->last_pic);
1333  av_freep(&s->cur_pic);
1334  av_freep(&s->next_pic);
1335  av_freep(&s->last_pic);
1336 
1337  av_frame_unref(&h->cur_pic.f);
1338 
1340 
1341  av_freep(&s->buf);
1342  s->buf_size = 0;
1344 
1345  return 0;
1346 }
1347 
1349  .name = "svq3",
1350  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1351  .type = AVMEDIA_TYPE_VIDEO,
1352  .id = AV_CODEC_ID_SVQ3,
1353  .priv_data_size = sizeof(SVQ3Context),
1357  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1358  CODEC_CAP_DR1 |
1360  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1361  AV_PIX_FMT_NONE},
1362 };
int chroma_format_idc
Definition: h264.h:168
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:839
uint8_t pred_mode
Definition: h264data.h:76
#define MB_TYPE_SKIP
Definition: avcodec.h:849
discard all frames except keyframes
Definition: avcodec.h:617
uint8_t * edge_emu_buffer
Definition: h264.h:676
int8_t * ref_index[2]
Definition: mpegvideo.h:114
const char * s
Definition: avisynth_c.h:668
unsigned int top_samples_available
Definition: h264.h:346
GetBitContext gb
Definition: h264.h:294
int low_delay
Definition: h264.h:316
int mb_num
Definition: h264.h:493
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:105
int size
#define IS_SKIP(a)
Definition: mpegvideo.h:140
Picture * last_pic
Definition: svq3.c:74
void(* emulated_edge_mc)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:61
ptrdiff_t uvlinesize
Definition: h264.h:309
int cbp
Definition: h264.h:461
HpelDSPContext hdsp
Definition: svq3.c:71
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:255
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:236
else temp
Definition: vf_mcdeint.c:258
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:212
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1553
#define AV_WL32(p, darg)
Definition: intreadwrite.h:282
int mb_y
Definition: h264.h:487
int size
Definition: avcodec.h:1064
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:838
int chroma_x_shift
Definition: h264.h:310
const uint8_t * buffer
Definition: get_bits.h:55
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
#define INVALID_VLC
Definition: golomb.h:38
int flags
Definition: h264.h:319
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1342
int mb_height
Definition: h264.h:491
static void free_picture(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1039
DSPContext dsp
Definition: h264.h:295
mpegvideo header.
#define AV_COPY32(d, s)
Definition: intreadwrite.h:578
int v_edge_pos
Definition: svq3.c:85
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:362
H264Context.
Definition: h264.h:286
discard all
Definition: avcodec.h:618
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:135
uint8_t run
Definition: svq3.c:145
Pixel format.
Definition: avcodec.h:4533
#define FULLPEL_MODE
Definition: svq3.c:89
AVCodec.
Definition: avcodec.h:2922
#define av_cold
Definition: avcodec.h:653
int picture_structure
Definition: h264.h:408
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:862
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1254
int mb_skip_run
Definition: h264.h:490
Predicted.
Definition: avcodec.h:2305
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:234
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:368
if((e=av_dict_get(options,"", NULL, AV_DICT_IGNORE_SUFFIX)))
Definition: avfilter.c:965
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2778
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: utils.c:153
int thirdpel_flag
Definition: svq3.c:76
#define IS_INTER(a)
Definition: mpegvideo.h:139
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:110
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:537
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
#define PICT_FRAME
Definition: mpegvideo.h:669
static struct @83 svq3_dct_tables[2][16]
static const uint8_t offset[511][2]
Definition: vf_uspp.c:58
static int get_buffer(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1051
int mb_xy
Definition: h264.h:494
const char * name
Name of the codec implementation.
Definition: avcodec.h:2929
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by a given frame.
Definition: frame.c:247
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:742
const char data[16]
Definition: mxf.c:68
int height
Definition: h264.h:308
int mb_x
Definition: h264.h:487
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:80
#define AV_ZERO32(d)
Definition: intreadwrite.h:606
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:207
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:82
int chroma_y_shift
Definition: h264.h:310
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:107
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:510
#define FFSWAP(type, a, b)
Definition: avcodec.h:928
int width
Definition: h264.h:308
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:482
H.264 / AVC / MPEG4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
int frame_num
Definition: h264.h:533
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top &amp; left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:565
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: avcodec.h:4147
int next_slice_index
Definition: svq3.c:78
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1427
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:219
int16_t mb_luma_dc[3][16 *2]
Definition: h264.h:450
#define HALFPEL_MODE
Definition: svq3.c:90
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: dsputil.h:184
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2585
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:769
unsigned m
Definition: audioconvert.c:186
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:151
int reference
Definition: mpegvideo.h:178
Picture * next_pic
Definition: svq3.c:73
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: avcodec.h:4168
struct AVCodec * codec
Definition: avcodec.h:1155
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1234
#define PREDICT_MODE
Definition: svq3.c:92
Libavcodec external API header.
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:846
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:93
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:217
int chroma_pred_mode
Definition: h264.h:326
goto fail
Definition: avfilter.c:963
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:580
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:196
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:348
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:104
Half-pel DSP context.
Definition: hpeldsp.h:45
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:167
int frame_num_offset
for POC type 2
Definition: h264.h:536
uint8_t * data
The data buffer.
Definition: buffer.h:89
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:351
uint32_t * mb2br_xy
Definition: h264.h:379
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:736
ptrdiff_t linesize
Definition: h264.h:309
float y
H264Context h
Definition: svq3.c:70
ret
Definition: avfilter.c:961
int width
picture width / height.
Definition: avcodec.h:1314
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:161
Picture.
Definition: mpegvideo.h:97
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
SPS sps
current sps
Definition: h264.h:386
int size_in_bits
Definition: get_bits.h:57
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:282
#define FFMIN(a, b)
Definition: avcodec.h:925
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:862
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:342
float u
unsigned int topright_samples_available
Definition: h264.h:347
int slice_type
Definition: h264.h:400
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:43
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2934
int last_frame_output
Definition: svq3.c:86
#define PART_NOT_AVAILABLE
Definition: h264.h:365
int next_p_frame_damaged
Definition: svq3.c:83
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: avcodec.h:4546
Picture cur_pic
Definition: h264.h:300
static const int8_t mv[256][2]
Definition: 4xm.c:73
VideoDSPContext vdsp
Definition: h264.h:288
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1348
static int width
Definition: utils.c:158
int mb_stride
Definition: h264.h:492
#define AV_LOG_INFO
Standard information.
Definition: avcodec.h:4158
AVCodecContext * avctx
Definition: h264.h:287
AVS_Value src
Definition: avisynth_c.h:523
H264 / AVC / MPEG4 part10 codec data table
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:55
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:538
int debug
debug
Definition: avcodec.h:2442
main external API structure.
Definition: avcodec.h:1146
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:538
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top &amp; left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:518
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1329
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:941
int16_t mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:449
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:594
int extradata_size
Definition: avcodec.h:1255
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:299
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:324
Bi-dir predicted.
Definition: avcodec.h:2306
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:81
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:292
int index
Definition: gxfenc.c:89
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:62
uint8_t * data
Definition: avcodec.h:1063
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:294
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:405
#define MB_TYPE_16x16
Definition: avcodec.h:841
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1325
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:117
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:124
#define type
int unknown_flag
Definition: svq3.c:77
uint8_t * buf
Definition: svq3.c:80
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:376
void * priv_data
Definition: avcodec.h:1182
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2443
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:5141
uint8_t level
Definition: svq3.c:146
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
Definition: vp9.h:48
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: dsputil.h:183
discard all non reference
Definition: avcodec.h:615
int is_complex
Definition: h264.h:496
int qscale
Definition: h264.h:312
uint8_t cbp
Definition: h264data.h:77
common internal api header.
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:705
int h_edge_pos
Definition: svq3.c:84
H.264 / AVC / MPEG4 part10 motion vector predicion.
int chroma_qp[2]
Definition: h264.h:303
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:49
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2943
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:906
int intra16x16_pred_mode
Definition: h264.h:327
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:154
#define THIRDPEL_MODE
Definition: svq3.c:91
#define AV_RB32(x)
Definition: intreadwrite.h:258
Picture * cur_pic_ptr
Definition: h264.h:299
#define AVERROR_INVALIDDATA
#define AV_RL32(x)
Definition: intreadwrite.h:275
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:355
#define av_log2
Definition: intmath.h:89
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:771
int key_frame
1 -&gt; keyframe, 0-&gt; not
Definition: frame.h:162
int mb_width
Definition: h264.h:491
enum AVPictureType pict_type
Definition: h264.h:600
static const uint8_t svq3_scan[16]
Definition: svq3.c:103
#define AVERROR(e)
Picture * cur_pic
Definition: svq3.c:72
struct AVFrame f
Definition: mpegvideo.h:98
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:129
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1904
uint32_t * mb_type
Definition: mpegvideo.h:108
uint32_t watermark_key
Definition: svq3.c:79
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
int8_t * intra4x4_pred_mode
Definition: h264.h:343
#define AV_ZERO128(d)
Definition: intreadwrite.h:614
const char int length
Definition: avisynth_c.h:668
#define stride
#define AV_WN32A(p, v)
Definition: intreadwrite.h:530
int buf_size
Definition: svq3.c:81
exp golomb vlc stuff
int slice_num
Definition: h264.h:398
This structure stores compressed data.
Definition: avcodec.h:1040
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1103
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:910
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:107
for(j=16;j >0;--j)
int b_stride
Definition: h264.h:380
void * av_mallocz(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:241
int halfpel_flag
Definition: svq3.c:75
int adaptive_quant
Definition: svq3.c:82
int8_t ref_cache[2][5 *8]
Definition: h264.h:363
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:113
static int16_t block[64]
Definition: dct-test.c:198