FFmpeg  2.1.1
vf_edgedetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Edge detection filter
24  *
25  * @see https://en.wikipedia.org/wiki/Canny_edge_detector
26  */
27 
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 typedef struct {
35  const AVClass *class;
37  uint16_t *gradients;
38  char *directions;
39  double low, high;
40  uint8_t low_u8, high_u8;
42 
43 #define OFFSET(x) offsetof(EdgeDetectContext, x)
44 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
45 static const AVOption edgedetect_options[] = {
46  { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
47  { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
48  { NULL }
49 };
50 
51 AVFILTER_DEFINE_CLASS(edgedetect);
52 
53 static av_cold int init(AVFilterContext *ctx)
54 {
55  EdgeDetectContext *edgedetect = ctx->priv;
56 
57  edgedetect->low_u8 = edgedetect->low * 255. + .5;
58  edgedetect->high_u8 = edgedetect->high * 255. + .5;
59  return 0;
60 }
61 
63 {
64  static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
66  return 0;
67 }
68 
69 static int config_props(AVFilterLink *inlink)
70 {
71  AVFilterContext *ctx = inlink->dst;
72  EdgeDetectContext *edgedetect = ctx->priv;
73 
74  edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h);
75  edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients));
76  edgedetect->directions = av_malloc(inlink->w * inlink->h);
77  if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions)
78  return AVERROR(ENOMEM);
79  return 0;
80 }
81 
82 static void gaussian_blur(AVFilterContext *ctx, int w, int h,
83  uint8_t *dst, int dst_linesize,
84  const uint8_t *src, int src_linesize)
85 {
86  int i, j;
87 
88  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
89  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
90  for (j = 2; j < h - 2; j++) {
91  dst[0] = src[0];
92  dst[1] = src[1];
93  for (i = 2; i < w - 2; i++) {
94  /* Gaussian mask of size 5x5 with sigma = 1.4 */
95  dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
96  + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
97  + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
98  + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
99  + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
100 
101  + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
102  + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
103  + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
104  + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
105  + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
106 
107  + src[i-2] * 5
108  + src[i-1] * 12
109  + src[i ] * 15
110  + src[i+1] * 12
111  + src[i+2] * 5) / 159;
112  }
113  dst[i ] = src[i ];
114  dst[i + 1] = src[i + 1];
115 
116  dst += dst_linesize;
117  src += src_linesize;
118  }
119  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
120  memcpy(dst, src, w);
121 }
122 
123 enum {
128 };
129 
130 static int get_rounded_direction(int gx, int gy)
131 {
132  /* reference angles:
133  * tan( pi/8) = sqrt(2)-1
134  * tan(3pi/8) = sqrt(2)+1
135  * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
136  * <ref-angle>, or more simply Gy against <ref-angle>*Gx
137  *
138  * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
139  * round((sqrt(2)-1) * (1<<16)) = 27146
140  * round((sqrt(2)+1) * (1<<16)) = 158218
141  */
142  if (gx) {
143  int tanpi8gx, tan3pi8gx;
144 
145  if (gx < 0)
146  gx = -gx, gy = -gy;
147  gy <<= 16;
148  tanpi8gx = 27146 * gx;
149  tan3pi8gx = 158218 * gx;
150  if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
151  if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
152  if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
153  }
154  return DIRECTION_VERTICAL;
155 }
156 
157 static void sobel(AVFilterContext *ctx, int w, int h,
158  uint16_t *dst, int dst_linesize,
159  const uint8_t *src, int src_linesize)
160 {
161  int i, j;
162  EdgeDetectContext *edgedetect = ctx->priv;
163 
164  for (j = 1; j < h - 1; j++) {
165  dst += dst_linesize;
166  src += src_linesize;
167  for (i = 1; i < w - 1; i++) {
168  const int gx =
169  -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
170  -2*src[ i-1] + 2*src[ i+1]
171  -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
172  const int gy =
173  -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
174  -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
175  -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
176 
177  dst[i] = FFABS(gx) + FFABS(gy);
178  edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
179  }
180  }
181 }
182 
183 static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
184  uint8_t *dst, int dst_linesize,
185  const uint16_t *src, int src_linesize)
186 {
187  int i, j;
188  EdgeDetectContext *edgedetect = ctx->priv;
189 
190 #define COPY_MAXIMA(ay, ax, by, bx) do { \
191  if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
192  src[i] > src[(by)*src_linesize + i+(bx)]) \
193  dst[i] = av_clip_uint8(src[i]); \
194 } while (0)
195 
196  for (j = 1; j < h - 1; j++) {
197  dst += dst_linesize;
198  src += src_linesize;
199  for (i = 1; i < w - 1; i++) {
200  switch (edgedetect->directions[j*w + i]) {
201  case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
202  case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
203  case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
204  case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
205  }
206  }
207  }
208 }
209 
210 static void double_threshold(AVFilterContext *ctx, int w, int h,
211  uint8_t *dst, int dst_linesize,
212  const uint8_t *src, int src_linesize)
213 {
214  int i, j;
215  EdgeDetectContext *edgedetect = ctx->priv;
216  const int low = edgedetect->low_u8;
217  const int high = edgedetect->high_u8;
218 
219  for (j = 0; j < h; j++) {
220  for (i = 0; i < w; i++) {
221  if (src[i] > high) {
222  dst[i] = src[i];
223  continue;
224  }
225 
226  if ((!i || i == w - 1 || !j || j == h - 1) &&
227  src[i] > low &&
228  (src[-src_linesize + i-1] > high ||
229  src[-src_linesize + i ] > high ||
230  src[-src_linesize + i+1] > high ||
231  src[ i-1] > high ||
232  src[ i+1] > high ||
233  src[ src_linesize + i-1] > high ||
234  src[ src_linesize + i ] > high ||
235  src[ src_linesize + i+1] > high))
236  dst[i] = src[i];
237  else
238  dst[i] = 0;
239  }
240  dst += dst_linesize;
241  src += src_linesize;
242  }
243 }
244 
245 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
246 {
247  AVFilterContext *ctx = inlink->dst;
248  EdgeDetectContext *edgedetect = ctx->priv;
249  AVFilterLink *outlink = inlink->dst->outputs[0];
250  uint8_t *tmpbuf = edgedetect->tmpbuf;
251  uint16_t *gradients = edgedetect->gradients;
252  int direct = 0;
253  AVFrame *out;
254 
255  if (av_frame_is_writable(in)) {
256  direct = 1;
257  out = in;
258  } else {
259  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
260  if (!out) {
261  av_frame_free(&in);
262  return AVERROR(ENOMEM);
263  }
264  av_frame_copy_props(out, in);
265  }
266 
267  /* gaussian filter to reduce noise */
268  gaussian_blur(ctx, inlink->w, inlink->h,
269  tmpbuf, inlink->w,
270  in->data[0], in->linesize[0]);
271 
272  /* compute the 16-bits gradients and directions for the next step */
273  sobel(ctx, inlink->w, inlink->h,
274  gradients, inlink->w,
275  tmpbuf, inlink->w);
276 
277  /* non_maximum_suppression() will actually keep & clip what's necessary and
278  * ignore the rest, so we need a clean output buffer */
279  memset(tmpbuf, 0, inlink->w * inlink->h);
280  non_maximum_suppression(ctx, inlink->w, inlink->h,
281  tmpbuf, inlink->w,
282  gradients, inlink->w);
283 
284  /* keep high values, or low values surrounded by high values */
285  double_threshold(ctx, inlink->w, inlink->h,
286  out->data[0], out->linesize[0],
287  tmpbuf, inlink->w);
288 
289  if (!direct)
290  av_frame_free(&in);
291  return ff_filter_frame(outlink, out);
292 }
293 
294 static av_cold void uninit(AVFilterContext *ctx)
295 {
296  EdgeDetectContext *edgedetect = ctx->priv;
297  av_freep(&edgedetect->tmpbuf);
298  av_freep(&edgedetect->gradients);
299  av_freep(&edgedetect->directions);
300 }
301 
302 static const AVFilterPad edgedetect_inputs[] = {
303  {
304  .name = "default",
305  .type = AVMEDIA_TYPE_VIDEO,
306  .config_props = config_props,
307  .filter_frame = filter_frame,
308  },
309  { NULL }
310 };
311 
312 static const AVFilterPad edgedetect_outputs[] = {
313  {
314  .name = "default",
315  .type = AVMEDIA_TYPE_VIDEO,
316  },
317  { NULL }
318 };
319 
321  .name = "edgedetect",
322  .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
323  .priv_size = sizeof(EdgeDetectContext),
324  .init = init,
325  .uninit = uninit,
327  .inputs = edgedetect_inputs,
328  .outputs = edgedetect_outputs,
329  .priv_class = &edgedetect_class,
331 };
static const AVFilterPad edgedetect_outputs[]
static void double_threshold(AVFilterContext *ctx, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
void * av_calloc(size_t nmemb, size_t size) av_malloc_attrib
Allocate a block of nmemb * size bytes with alignment suitable for all memory accesses (including vec...
Definition: mem.c:249
static av_cold void uninit(AVFilterContext *ctx)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:96
AVOption.
Definition: opt.h:253
static void non_maximum_suppression(AVFilterContext *ctx, int w, int h, uint8_t *dst, int dst_linesize, const uint16_t *src, int src_linesize)
const char * name
Filter name.
Definition: avfilter.h:468
void * priv
private data for use by the filter
Definition: avfilter.h:648
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:111
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:109
Pixel format.
Definition: avcodec.h:4533
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:382
#define av_cold
Definition: avcodec.h:653
Y , 8bpp.
Definition: avcodec.h:4542
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:234
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:294
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic &quot;enable&quot; expression option that can be used to enable or disable a fil...
Definition: avfilter.h:445
const char * name
Pad name.
Definition: internal.h:66
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1118
uint8_t
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only &quot;metadata&quot; fields from src to dst.
Definition: frame.c:446
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int query_formats(AVFilterContext *ctx)
Definition: vf_edgedetect.c:62
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:531
A filter pad used for either input or output.
Definition: internal.h:60
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
static const AVOption edgedetect_options[]
Definition: vf_edgedetect.c:45
#define FLAGS
Definition: vf_edgedetect.c:44
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:151
AVFilter avfilter_vf_edgedetect
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:123
static int config_props(AVFilterLink *inlink)
Definition: vf_edgedetect.c:69
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
static void gaussian_blur(AVFilterContext *ctx, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
Definition: vf_edgedetect.c:82
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
static int get_rounded_direction(int gx, int gy)
uint16_t * gradients
Definition: vf_edgedetect.c:37
Main libavfilter public API header.
static const AVFilterPad edgedetect_inputs[]
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:642
static av_cold int init(AVFilterContext *ctx)
Definition: vf_edgedetect.c:53
AVS_Value src
Definition: avisynth_c.h:523
#define OFFSET(x)
Definition: vf_edgedetect.c:43
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:464
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:102
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:124
#define COPY_MAXIMA(ay, ax, by, bx)
#define FFABS(a)
Definition: avcodec.h:920
static int flags
Definition: cpu.c:45
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:301
#define AVERROR(e)
An instance of a filter.
Definition: avfilter.h:627
static void sobel(AVFilterContext *ctx, int w, int h, uint16_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
internal API functions
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:107