Many people it seems are visiting this blog for the ffmpeg and opencv code, even though that is largely outdated. I have changed some parts of it to account for the ffmpeg changes, which is attached with this post. Hope this helps to get started.

Don’t yet how to attach files so just copy pasting.

ffread.h

#ifndef FFREAD_H
#define FFREAD_H

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}

#include "opencv/cv.h"
#define FSKIP 1

class FFread
{
public:

AVFormatContext *pFormatCtx;
int             i, videoStream;
AVCodecContext  *pCodecCtx;
AVCodec         *pCodec;
AVFrame         *pFrame;
AVFrame         *pFrameRGB;
AVPicture       pict;
AVPacket        packet;
AVRational      fr;
int             frameFinished;
int             numBytes;
uint8_t         *buffer;
int fskip, m, n, p;
IplImage * dum;
unsigned char *ptr1;
float *ptr2;
float fps;
int nCols;
int nRows;
int nBands;
int nframes;

int init(char *);
int movie_end;
struct SwsContext * img_convert_ctx;

void getframe(IplImage ** frame);

void closeit();

};

#endif

ffread.cpp

#include "ffread.h"

int FFread::init(char * file)
{
 fskip = 1;
 m = 0;
 n = 0;
 p = 0;
 nframes = 0;
 movie_end = 0;

 // Register all formats and codecs
 av_register_all();

 // Open video file
 if(av_open_input_file(&pFormatCtx, file, NULL, 0, NULL)!=0)
 return -1; // Couldn't open file

 // Retrieve stream information
 if(av_find_stream_info(pFormatCtx)<0)
 return -1; // Couldn't find stream information

 // Dump information about file onto standard error
 dump_format(pFormatCtx, 0, file, 0);

 // Find the first video stream
 videoStream=-1;
 for(i=0; i<pFormatCtx->nb_streams; i++)
 if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
 videoStream=i;
 fr = pFormatCtx->streams[videoStream]->r_frame_rate;
 break;
 }
 if(videoStream==-1)
 return -1; // Didn't find a video stream

 // Get a pointer to the codec context for the video stream
 pCodecCtx=pFormatCtx->streams[videoStream]->codec;

 // Find the decoder for the video stream
 pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
 if(pCodec==NULL) {
 fprintf(stderr, "Unsupported codec!\n");
 return -1; // Codec not found
 }

 // Open codec
 if(avcodec_open(pCodecCtx, pCodec)<0)
 return -1; // Could not open codec

 // Allocate video frame
 pFrame=avcodec_alloc_frame();

 // Allocate an AVFrame structure
 //  pFrameRGB=avcodec_alloc_frame();
 // if(pFrameRGB==NULL)
 //  return -1;

 // Determine required buffer size and allocate buffer
 numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
 pCodecCtx->height);
 buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

 // Assign appropriate parts of buffer to image planes in pFrameRGB
 // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
 // of AVPicture
 //avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
 //         pCodecCtx->width, pCodecCtx->height);

 avpicture_fill(&pict, buffer, PIX_FMT_RGB24,
 pCodecCtx->width, pCodecCtx->height);

 img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
 pCodecCtx->pix_fmt,
 pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24,
 SWS_BICUBIC,
 NULL, NULL, NULL);

 if(img_convert_ctx == NULL)
 {
 fprintf(stderr, "Cannot initialize the conversion context!\n");
 exit(1);
 }

 dum = cvCreateImage(cvSize(pCodecCtx->width,pCodecCtx->height),
 IPL_DEPTH_32F,3);

 //compute the frame rate
 fps = (fr.num + 0.0) / (fr.den + 0.0);

 nCols = pCodecCtx->width;
 nRows = pCodecCtx->height;
 nBands = 3;
}

void FFread::getframe(IplImage ** frame)
{
 //Read frames and store to IplImages
 i=0;
 while(fskip <= FSKIP)
 {
 if(av_read_frame(pFormatCtx, &packet)>=0)
 {
 // Is this a packet from the video stream?
 if(packet.stream_index==videoStream)
 {
 // Decode video frame
 avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

 // Did we get a video frame?
 if(frameFinished)
 {

 // Convert the image from its native format to RGB
 sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
 0, pCodecCtx->height, pict.data,
 pict.linesize);

 /*
 img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
 (AVPicture*)pFrame, pCodecCtx->pix_fmt,
 pCodecCtx->width, pCodecCtx->height);
 */

 //convert frame to 32F grayscale
 if(fskip == FSKIP)
 {
 for(m = 0; m < pCodecCtx->height; m++)
 {
 ptr1 = pict.data[0]+ m*pict.linesize[0];

 ptr2 = (float*)(dum->imageData + m*dum->widthStep);

 for(n = 0; n < 3 * pCodecCtx->width; n++)

 ptr2[n] =  (float)(ptr1[n]);

 }

 cvConvertScale(dum, *frame, 1.0, 0.0);

 fskip=0;

 break;
 }

 fskip++;
 }
 }

 // Free the packet that was allocated by av_read_frame
 av_free_packet(&packet);
 }
 else
 {
 movie_end = 1;
 break;
 }
 }
 nframes++;
}

void FFread::closeit()
{
 // Free the RGB image
 av_free(buffer);
 av_free(&pict);

 // Free the YUV frame
 av_free(pFrame);

 // Close the codec
 avcodec_close(pCodecCtx);

 // Close the video file
 av_close_input_file(pFormatCtx);
}

test_main.cpp

#include "ffread.h"
#include "opencv/highgui.h"

int main()
{
 cvNamedWindow("test",1);

 IplImage * img;
 FFread file_cap;
 int i = 0;
 file_cap.init("/home/phani/movies/anbesivam/anbe1.avi");
 img = cvCreateImage(cvSize(file_cap.nCols,file_cap.nRows),IPL_DEPTH_8U,3);

 for(i = 0; i < 10000; i++)
 {
 file_cap.getframe(&img);
 cvShowImage("test",img);
 cvWaitKey(10);
 }
 file_cap.closeit();
}
Advertisements