33 for (i=0; i<n/2; i++) {
34 ((uint16_t *)dst)[i] =
AV_RB16(src+2*i);
43 int buf_size = avpkt->
size;
46 int i, j,
n, linesize, h, upgrade = 0, is_mono = 0;
48 int components, sample_len,
ret;
100 n = avctx->
width * 2;
105 n = avctx->
width * 2;
113 n = (avctx->
width + 7) >> 3;
122 if(s->
type < 4 || (is_mono && s->
type==7)){
123 for (i=0; i<avctx->
height; i++) {
126 for(j=0; j<avctx->
width * components; j++){
144 if (sample_len == 16) {
145 ((uint16_t*)ptr)[j] = (((1<<sample_len)-1)*v + (s->
maxval>>1))/s->
maxval;
149 if (sample_len != 16)
154 for (i = 0; i < avctx->
height; i++) {
157 else if (upgrade == 1) {
158 unsigned int j, f = (255 * 128 + s->
maxval / 2) / s->
maxval;
159 for (j = 0; j <
n; j++)
161 }
else if (upgrade == 2) {
162 unsigned int j,
v, f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
163 for (j = 0; j < n / 2; j++) {
165 ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
177 unsigned char *ptr1, *ptr2;
186 for (i = 0; i < avctx->
height; i++) {
195 for (i = 0; i < h; i++) {
207 uint16_t *ptr1, *ptr2;
208 const int f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
211 n = avctx->
width * 2;
216 for (i = 0; i < avctx->
height; i++) {
217 for (j = 0; j < n / 2; j++) {
219 ((uint16_t *)ptr)[j] = (
v * f + 16384) >> 15;
224 ptr1 = (uint16_t*)p->
data[1];
225 ptr2 = (uint16_t*)p->
data[2];
228 for (i = 0; i < h; i++) {
229 for (j = 0; j < n / 2; j++) {
231 ptr1[j] = (
v * f + 16384) >> 15;
235 for (j = 0; j < n / 2; j++) {
237 ptr2[j] = (
v * f + 16384) >> 15;
253 #if CONFIG_PGM_DECODER
265 #if CONFIG_PGMYUV_DECODER
277 #if CONFIG_PPM_DECODER
289 #if CONFIG_PBM_DECODER
301 #if CONFIG_PAM_DECODER
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
int maxval
maximum value of a pixel
packed RGB 8:8:8, 24bpp, RGBRGB...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
if((e=av_dict_get(options,"", NULL, AV_DICT_IGNORE_SUFFIX)))
const char * name
Name of the codec implementation.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_GRAY16
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Libavcodec external API header.
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext *const s)
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_PIX_FMT_YUV420P9
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static void samplecpy(uint8_t *dst, const uint8_t *src, int n, int maxval)
static int pnm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_PIX_FMT_YUV420P16
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
uint8_t * bytestream_start
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
#define AVERROR_INVALIDDATA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int key_frame
1 -> keyframe, 0-> not
#define AV_PIX_FMT_YUV420P10
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
This structure stores compressed data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.