#include <cuvi.h>
static const int width = 640; //Width of video frame
static const int height = 480; //Height of video frame
//Parameters for feature selection
static const int requestedFeatures = 150; //Number of features to look for
static const float featureQuality = 0.006f; //Quality of a feature
static const int featureMinDistance = 15; //Minimum distance between 2 features
static const int blockSize = 3; //block size for computing Eigen Matrix
static const float k = -2.0f; //k for Harris Corner detector
//Parameters for feature tracking
static const int pyramidLevels = 3; //Level Of Scaling
static const CuviSize trackingWindow = cuviSize(30,30); //Size of tracking window
static const float residue = 20.0f; //Absolute Difference Between Original Location Window & Tracked Location Window
static const int iterations = 10; //Maximum number of iterations before a feature is found
//Pre-processing parameters
static const bool smoothBeforeSelecting = false; //Smooth Image Before Feature Selection & Tracking
static const bool adjustImage = false; //Adjust Image Light Before Feature Selection
//Post-processing parameters
static const float movementThreshold = 0.33f; //Mark as motion if a feature moves 0.33 Pixels
//Checks if the feature has moved from is original location.
//It can be used in intrusion detection and the sensitivity can be set using 'threshold' parameter
bool featureHasMoved(CuviPointValue2D point1, CuviPointValue2D point2, float threshold){
if(point2.val != 0.0f) return false;
return ((fabsf(point1.x - point2.x)>threshold) || (fabsf(point1.y - point2.y)>threshold));
}
//Creating a smoothing filter kernel
CuviFilter* Gauss = Cuvi_Builtin_Filters::Gaussian(3,0.7f); //3x3 Gaussian Filter with Standard Deviation 0.7
void main()
{
//Buffer Images on GPU
CuviImage* gFrame = new CuviImage(width,height,pitch,8,3);
CuviImage* gimg1 = new CuviImage(Width,Height,GetOpenCVPitch(Width,Height,8,1),8,1);
CuviImage* gimg2 = new CuviImage(Width,Height,GetOpenCVPitch(Width,Height,8,1),8,1);
//Region of Interest in the video frame
CuviROI roi = cuviROI(0,0,width,height);
CuviPointValue2D *features1, *features2;
int feature_count = 0;
do
{
//Read a Video Frame and populate GPU image with it
gFrame->upload(frame->imageData);
//Converting to Gray Image for computations
cuvi::colorOperations::RGB2Gray(gFrame,gimg1);
//Do the same with next, adjacent frame
gFrame->upload(frame->imageData);
cuvi::colorConvert(gFrame,gimg2);
feature_count = RequestedFeatures; //Reset feature count to original
//Use this option if the adjacent frames are lightening sensitive
if(AdjustImage){
cuvi::colorOperations::adjust(gimg1);
cuvi::colorOperations::adjust(gimg2);
}
//Use this option if the images contain fair amount of noise
if(SmoothBeforeSelecting){
//Apply Gaussian Smoothing Filter On Both The Images
cuvi::imageFiltering::imageFilter(gimg1,roi,Gauss);
cuvi::imageFiltering::imageFilter(gimg2,roi,Gauss);
}
//Defining feature selection criteria from parameters
CuviFeaturesCriteria feature_criteria = cuviFeaturesCriteria(CUVI_FEATURES_HARRIS, featureQuality, featureMinDistance, blockSize, k);
//Call any Feature Detector on first Frame( KLT | HARRIS | PETER )
cuvi::computerVision::goodFeaturesToTrack(gimg1,roi,features1,&feature_count,feature_criteria);
//Defining tracking criteria from tracking parameters
CuviTrackingCriteria tracking_criteria = cuviTrackingCriteria(pyramidLevels, trackingWindow, iterations, residue);
//Track Features Using of Frame#1 onto Frame#2 using KLT Tracker
cuvi::trackFeatures(gimg1,gimg2,features1,features2,feature_count,tracking_criteria );
//At this point you can indetify whether the selected features in frame one moved in frame two or not
for(int i=0; i<feature_count; i++){
//True only if the feature has moved from its location
if(FeatureHasMoved(features1[i],features2[i],MovementThreshold))
//You can also plot the tracked features on the screen
}
}while(video_Frames)
//Freeing GPU Memory
gFrame->release();
gimg1->release();
gimg2->release();
}
|