天天看點

前景檢測算法_1(codebook和平均背景法)

      前景分割中一個非常重要的研究方向就是背景減圖法,因為背景減圖的方法簡單,原理容易被想到,且在智能視訊監控領域中,錄影機很多情況下是固定的,且背景也是基本不變或者是緩慢變換的,在這種場合背景減圖法的應用驅使了其不少科研人員去研究它。

      但是背景減圖獲得前景圖像的方法缺點也很多:比如說光照因素,遮擋因素,動态周期背景,且背景非周期背景,且一般情況下我們考慮的是每個像素點之間獨立,這對實際應用留下了很大的隐患。

      這一小講主要是講簡單背景減圖法和codebook法。

一、簡單背景減圖法的工作原理。

      在視訊對背景進行模組化的過程中,每2幀圖像之間對應像素點灰階值算出一個誤內插補點,在背景模組化時間内算出該像素點的平均值,誤差平均值,然後在平均內插補點的基礎上+-誤差平均值的常數(這個系數需要手動調整)倍作為背景圖像的門檻值範圍,是以當進行前景檢測時,當相應點位置來了一個像素時,如果來的這個像素的每個通道的灰階值都在這個門檻值範圍内,則認為是背景用0表示,否則認為是前景用255表示。

      下面的一個工程是learning opencv一書中作者提供的源代碼,關于簡單背景減圖的代碼和注釋如下:

     avg_background.h檔案:

1 ///
 2 // Accumulate average and ~std (really absolute difference) image and use this to detect background and foreground
 3 //
 4 // Typical way of using this is to:
 5 //     AllocateImages();
 6 ////loop for N images to accumulate background differences
 7 //    accumulateBackground();
 8 ////When done, turn this into our avg and std model with high and low bounds
 9 //    createModelsfromStats();
10 ////Then use the function to return background in a mask (255 == foreground, 0 == background)
11 //    backgroundDiff(IplImage *I,IplImage *Imask, int num);
12 ////Then tune the high and low difference from average image background acceptance thresholds
13 //    float scalehigh,scalelow; //Set these, defaults are 7 and 6. Note: scalelow is how many average differences below average
14 //    scaleHigh(scalehigh);
15 //    scaleLow(scalelow);
16 ////That is, change the scale high and low bounds for what should be background to make it work.
17 ////Then continue detecting foreground in the mask image
18 //    backgroundDiff(IplImage *I,IplImage *Imask, int num);
19 //
20 //NOTES: num is camera number which varies from 0 ... NUM_CAMERAS - 1.  Typically you only have one camera, but this routine allows
21 //          you to index many.
22 //
23 #ifndef AVGSEG_
24 #define AVGSEG_
25 
26 
27 #include "cv.h"                // define all of the opencv classes etc.
28 #include "highgui.h"
29 #include "cxcore.h"
30 
31 //IMPORTANT DEFINES:
32 #define NUM_CAMERAS   1              //This function can handle an array of cameras
33 #define HIGH_SCALE_NUM 7.0            //How many average differences from average image on the high side == background
34 #define LOW_SCALE_NUM 6.0        //How many average differences from average image on the low side == background
35 
36 void AllocateImages(IplImage *I);
37 void DeallocateImages();
38 void accumulateBackground(IplImage *I, int number=0);
39 void scaleHigh(float scale = HIGH_SCALE_NUM, int num = 0);
40 void scaleLow(float scale = LOW_SCALE_NUM, int num = 0);
41 void createModelsfromStats();
42 void backgroundDiff(IplImage *I,IplImage *Imask, int num = 0);
43 
44 #endif      

     avg_background.cpp檔案:

1 // avg_background.cpp : 定義控制台應用程式的入口點。
  2 //
  3 
  4 #include "stdafx.h"
  5 #include "avg_background.h"
  6 
  7 
  8 //GLOBALS
  9 
 10 IplImage *IavgF[NUM_CAMERAS],*IdiffF[NUM_CAMERAS], *IprevF[NUM_CAMERAS], *IhiF[NUM_CAMERAS], *IlowF[NUM_CAMERAS];
 11 IplImage *Iscratch,*Iscratch2,*Igray1,*Igray2,*Igray3,*Imaskt;
 12 IplImage *Ilow1[NUM_CAMERAS],*Ilow2[NUM_CAMERAS],*Ilow3[NUM_CAMERAS],*Ihi1[NUM_CAMERAS],*Ihi2[NUM_CAMERAS],*Ihi3[NUM_CAMERAS];
 13 
 14 float Icount[NUM_CAMERAS];
 15 
 16 void AllocateImages(IplImage *I)  //I is just a sample for allocation purposes
 17 {
 18     for(int i = 0; i<NUM_CAMERAS; i++){
 19         IavgF[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 20         IdiffF[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 21         IprevF[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 22         IhiF[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 23         IlowF[i] = cvCreateImage(cvGetSize(I), IPL_DEPTH_32F, 3 );
 24         Ilow1[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 25         Ilow2[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 26         Ilow3[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 27         Ihi1[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 28         Ihi2[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 29         Ihi3[i] = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 30         cvZero(IavgF[i]  );
 31         cvZero(IdiffF[i]  );
 32         cvZero(IprevF[i]  );
 33         cvZero(IhiF[i] );
 34         cvZero(IlowF[i]  );        
 35         Icount[i] = 0.00001; //Protect against divide by zero
 36     }
 37     Iscratch = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 38     Iscratch2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 39     Igray1 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 40     Igray2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 41     Igray3 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 42     Imaskt = cvCreateImage( cvGetSize(I), IPL_DEPTH_8U, 1 );
 43 
 44     cvZero(Iscratch);
 45     cvZero(Iscratch2 );
 46 }
 47 
 48 void DeallocateImages()
 49 {
 50     for(int i=0; i<NUM_CAMERAS; i++){
 51         cvReleaseImage(&IavgF[i]);
 52         cvReleaseImage(&IdiffF[i] );
 53         cvReleaseImage(&IprevF[i] );
 54         cvReleaseImage(&IhiF[i] );
 55         cvReleaseImage(&IlowF[i] );
 56         cvReleaseImage(&Ilow1[i]  );
 57         cvReleaseImage(&Ilow2[i]  );
 58         cvReleaseImage(&Ilow3[i]  );
 59         cvReleaseImage(&Ihi1[i]   );
 60         cvReleaseImage(&Ihi2[i]   );
 61         cvReleaseImage(&Ihi3[i]  );
 62     }
 63     cvReleaseImage(&Iscratch);
 64     cvReleaseImage(&Iscratch2);
 65 
 66     cvReleaseImage(&Igray1  );
 67     cvReleaseImage(&Igray2 );
 68     cvReleaseImage(&Igray3 );
 69 
 70     cvReleaseImage(&Imaskt);
 71 }
 72 
 73 // Accumulate the background statistics for one more frame
 74 // We accumulate the images, the image differences and the count of images for the 
 75 //    the routine createModelsfromStats() to work on after we're done accumulating N frames.
 76 // I        Background image, 3 channel, 8u
 77 // number    Camera number
 78 void accumulateBackground(IplImage *I, int number)
 79 {
 80     static int first = 1;
 81     cvCvtScale(I,Iscratch,1,0); //To float;#define cvCvtScale cvConvertScale #define cvScale cvConvertScale
 82     if (!first){
 83         cvAcc(Iscratch,IavgF[number]);//将2幅圖像相加:IavgF[number]=IavgF[number]+Iscratch,IavgF[]裡面裝的是時間序列圖檔的累加
 84         cvAbsDiff(Iscratch,IprevF[number],Iscratch2);//将2幅圖像相減:Iscratch2=abs(Iscratch-IprevF[number]);
 85         cvAcc(Iscratch2,IdiffF[number]);//IdiffF[]裡面裝的是圖像差的累積和
 86         Icount[number] += 1.0;//累積的圖檔幀數計數
 87     }
 88     first = 0;
 89     cvCopy(Iscratch,IprevF[number]);//執行完該函數後,将目前幀資料儲存為前一幀資料
 90 }
 91 
 92 // Scale the average difference from the average image high acceptance threshold
 93 void scaleHigh(float scale, int num)//設定背景模組化時的高門檻值函數
 94 {
 95     cvConvertScale(IdiffF[num],Iscratch,scale); //Converts with rounding and saturation
 96     cvAdd(Iscratch,IavgF[num],IhiF[num]);//将平均累積圖像與誤差累積圖像縮放scale倍然後再相加
 97     cvCvtPixToPlane( IhiF[num], Ihi1[num],Ihi2[num],Ihi3[num], 0 );//#define cvCvtPixToPlane cvSplit,且cvSplit是将一個多通道矩陣轉換為幾個單通道矩陣
 98 }
 99 
100 // Scale the average difference from the average image low acceptance threshold
101 void scaleLow(float scale, int num)//設定背景模組化時的低門檻值函數
102 {
103     cvConvertScale(IdiffF[num],Iscratch,scale); //Converts with rounding and saturation
104     cvSub(IavgF[num],Iscratch,IlowF[num]);//将平均累積圖像與誤差累積圖像縮放scale倍然後再相減
105     cvCvtPixToPlane( IlowF[num], Ilow1[num],Ilow2[num],Ilow3[num], 0 );
106 }
107 
108 //Once you've learned the background long enough, turn it into a background model
109 void createModelsfromStats()
110 {
111     for(int i=0; i<NUM_CAMERAS; i++)
112     {
113         cvConvertScale(IavgF[i],IavgF[i],(double)(1.0/Icount[i]));//此處為求出累積求和圖像的平均值
114         cvConvertScale(IdiffF[i],IdiffF[i],(double)(1.0/Icount[i]));//此處為求出累計誤差圖像的平均值
115         cvAddS(IdiffF[i],cvScalar(1.0,1.0,1.0),IdiffF[i]);  //Make sure diff is always something,cvAddS是用于一個數值和一個标量相加
116         scaleHigh(HIGH_SCALE_NUM,i);//HIGH_SCALE_NUM初始定義為7,其實就是一個倍數
117         scaleLow(LOW_SCALE_NUM,i);//LOW_SCALE_NUM初始定義為6
118     }
119 }
120 
121 // Create a binary: 0,255 mask where 255 means forground pixel
122 // I        Input image, 3 channel, 8u
123 // Imask    mask image to be created, 1 channel 8u
124 // num        camera number.
125 //
126 void backgroundDiff(IplImage *I,IplImage *Imask, int num)  //Mask should be grayscale
127 {
128     cvCvtScale(I,Iscratch,1,0); //To float;
129 //Channel 1
130     cvCvtPixToPlane( Iscratch, Igray1,Igray2,Igray3, 0 );
131     cvInRange(Igray1,Ilow1[num],Ihi1[num],Imask);//Igray1[]中相應的點在Ilow1[]和Ihi1[]之間時,Imask中相應的點為255(背景符合)
132 //Channel 2
133     cvInRange(Igray2,Ilow2[num],Ihi2[num],Imaskt);//也就是說對于每一幅圖像的絕對值差小于絕對值差平均值的6倍或者大于絕對值差平均值的7倍被認為是前景圖像
134     cvOr(Imask,Imaskt,Imask);
135     //Channel 3
136     cvInRange(Igray3,Ilow3[num],Ihi3[num],Imaskt);//這裡的固定門檻值6和7太不合理了,還好工程後面可以根據實際情況手動調整!
137     cvOr(Imask,Imaskt,Imask);
138     //Finally, invert the results
139     cvSubRS( Imask, cvScalar(255), Imask);//前景用255表示了,背景是用0表示
140 }      

 二、codebook算法工作原理

     考慮到簡單背景減圖法無法對動态的背景模組化,有學者就提出了codebook算法。

     該算法為圖像中每一個像素點建立一個碼本,每個碼本可以包括多個碼元,每個碼元有它的學習時最大最小門檻值,檢測時的最大最小門檻值等成員。在背景模組化期間,每當來了一幅新圖檔,對每個像素點進行碼本比對,也就是說如果該像素值在碼本中某個碼元的學習門檻值内,則認為它離過去該對應點出現過的曆史情況偏離不大,通過一定的像素值比較,如果滿足條件,此時還可以更新對應點的學習門檻值和檢測門檻值。如果新來的像素值對碼本中每個碼元都不比對,則有可能是由于背景是動态的,是以我們需要為其建立一個新的碼元,并且設定相應的碼元成員變量。是以,在背景學習的過程中,每個像素點可以對應多個碼元,這樣就可以學到複雜的動态背景。

     關于codebook算法的代碼和注釋如下:

     cv_yuv_codebook.h檔案:

1 ///
 2 // Accumulate average and ~std (really absolute difference) image and use this to detect background and foreground
 3 //
 4 // Typical way of using this is to:
 5 //     AllocateImages();
 6 ////loop for N images to accumulate background differences
 7 //    accumulateBackground();
 8 ////When done, turn this into our avg and std model with high and low bounds
 9 //    createModelsfromStats();
10 ////Then use the function to return background in a mask (255 == foreground, 0 == background)
11 //    backgroundDiff(IplImage *I,IplImage *Imask, int num);
12 ////Then tune the high and low difference from average image background acceptance thresholds
13 //    float scalehigh,scalelow; //Set these, defaults are 7 and 6. Note: scalelow is how many average differences below average
14 //    scaleHigh(scalehigh);
15 //    scaleLow(scalelow);
16 ////That is, change the scale high and low bounds for what should be background to make it work.
17 ////Then continue detecting foreground in the mask image
18 //    backgroundDiff(IplImage *I,IplImage *Imask, int num);
19 //
20 //NOTES: num is camera number which varies from 0 ... NUM_CAMERAS - 1.  Typically you only have one camera, but this routine allows
21 //          you to index many.
22 //
23 #ifndef AVGSEG_
24 #define AVGSEG_
25 
26 
27 #include "cv.h"                // define all of the opencv classes etc.
28 #include "highgui.h"
29 #include "cxcore.h"
30 
31 //IMPORTANT DEFINES:
32 #define NUM_CAMERAS   1              //This function can handle an array of cameras
33 #define HIGH_SCALE_NUM 7.0            //How many average differences from average image on the high side == background
34 #define LOW_SCALE_NUM 6.0        //How many average differences from average image on the low side == background
35 
36 void AllocateImages(IplImage *I);
37 void DeallocateImages();
38 void accumulateBackground(IplImage *I, int number=0);
39 void scaleHigh(float scale = HIGH_SCALE_NUM, int num = 0);
40 void scaleLow(float scale = LOW_SCALE_NUM, int num = 0);
41 void createModelsfromStats();
42 void backgroundDiff(IplImage *I,IplImage *Imask, int num = 0);
43 
44 #endif      

     cv_yuv_codebook.cpp檔案:

1 ////YUV CODEBOOK
  2 // Gary Bradski, July 14, 2005
  3 
  4 
  5 #include "stdafx.h"
  6 #include "cv_yuv_codebook.h"
  7 
  8 //GLOBALS FOR ALL CAMERA MODELS
  9 
 10 //For connected components:
 11 int CVCONTOUR_APPROX_LEVEL = 2;   // Approx.threshold - the bigger it is, the simpler is the boundary
 12 int CVCLOSE_ITR = 1;                // How many iterations of erosion and/or dialation there should be
 13 //#define CVPERIMSCALE 4            // image (width+height)/PERIMSCALE.  If contour lenght < this, delete that contour
 14 
 15 //For learning background
 16 
 17 //Just some convienience macros
 18 #define CV_CVX_WHITE    CV_RGB(0xff,0xff,0xff)
 19 #define CV_CVX_BLACK    CV_RGB(0x00,0x00,0x00)
 20 
 21 
 22 ///
 23 // int updateCodeBook(uchar *p, codeBook &c, unsigned cbBounds)
 24 // Updates the codebook entry with a new data point
 25 //
 26 // p            Pointer to a YUV pixel
 27 // c            Codebook for this pixel
 28 // cbBounds        Learning bounds for codebook (Rule of thumb: 10)
 29 // numChannels    Number of color channels we're learning
 30 //
 31 // NOTES:
 32 //        cvBounds must be of size cvBounds[numChannels]
 33 //
 34 // RETURN
 35 //    codebook index
 36 int cvupdateCodeBook(uchar *p, codeBook &c, unsigned *cbBounds, int numChannels)
 37 {
 38 
 39     if(c.numEntries == 0) c.t = 0;//說明每個像素如果周遊了的話至少對應一個碼元
 40     c.t += 1;        //Record learning event,周遊該像素點的次數加1
 41 //SET HIGH AND LOW BOUNDS
 42     int n;
 43     unsigned int high[3],low[3];
 44     for(n=0; n<numChannels; n++)//為該像素點的每個通道設定最大門檻值和最小門檻值,後面用來更新學習的高低門檻值時有用
 45     {
 46         high[n] = *(p+n)+*(cbBounds+n);
 47         if(high[n] > 255) high[n] = 255;
 48         low[n] = *(p+n)-*(cbBounds+n);
 49         if(low[n] < 0) low[n] = 0;
 50     }
 51     int matchChannel;
 52     //SEE IF THIS FITS AN EXISTING CODEWORD
 53     int i;
 54     for(i=0; i<c.numEntries; i++)//需要對所有的碼元進行掃描
 55     {
 56         matchChannel = 0;
 57         for(n=0; n<numChannels; n++)
 58         {
 59             //這個地方要非常小心,if條件不是下面表達的
 60 //if((c.cb[i]->min[n]-c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->max[n]+c.cb[i]->learnHigh[n]))
 61 //原因是因為在每次建立一個新碼元的時候,learnHigh[n]和learnLow[n]的範圍就在max[n]和min[n]上擴充了cbBounds[n],是以說
 62 //learnHigh[n]和learnLow[n]的變化範圍實際上比max[n]和min[n]的大
 63             if((c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->learnHigh[n])) //Found an entry for this channel
 64             {
 65                 matchChannel++;
 66             }
 67         }
 68         if(matchChannel == numChannels) //If an entry was found over all channels,找到了該元素此刻對應的碼元
 69         {
 70             c.cb[i]->t_last_update = c.t;
 71             //adjust this codeword for the first channel
 72 //更新每個碼元的最大最小門檻值,因為這2個門檻值在後面的前景分離過程要用到
 73             for(n=0; n<numChannels; n++)
 74             {
 75                 if(c.cb[i]->max[n] < *(p+n))//用該點的像素值更新該碼元的最大值,是以max[n]儲存的是實際上曆史出現過的最大像素值
 76                 {
 77                     c.cb[i]->max[n] = *(p+n);//因為這個for語句是在比對成功了的條件門檻值下的,是以一般來說改變後的max[n]和min[n]
 78 //也不會過學習的高低門檻值,并且學習的高低門檻值也一直在緩慢變化  
 79                 }
 80                 else if(c.cb[i]->min[n] > *(p+n))//用該點的像素值更新該碼元的最小值,是以min[n]儲存的是實際上曆史出現過的最小像素值
 81                 {
 82                     c.cb[i]->min[n] = *(p+n);
 83                 }
 84             }
 85             break;//一旦找到了該像素的一個碼元後就不用繼續往後找了,加快算法速度。因為最多隻有一個碼元與之對應
 86         }
 87     }
 88 
 89     //OVERHEAD TO TRACK POTENTIAL STALE ENTRIES
 90     for(int s=0; s<c.numEntries; s++)
 91     {
 92         //This garbage is to track which codebook entries are going stale
 93         int negRun = c.t - c.cb[s]->t_last_update;//negRun表示碼元沒有更新的時間間隔
 94         if(c.cb[s]->stale < negRun) c.cb[s]->stale = negRun;//更新每個碼元的statle
 95     }
 96 
 97 
 98     //ENTER A NEW CODE WORD IF NEEDED
 99     if(i == c.numEntries)  //No existing code word found, make a new one,隻有當該像素碼本中的所有碼元都不符合要求時才滿足if條件
100     {
101         code_element **foo = new code_element* [c.numEntries+1];//建立一個新的碼元序列
102         for(int ii=0; ii<c.numEntries; ii++)
103         {
104             foo[ii] = c.cb[ii];//将碼本前面所有的碼元位址賦給foo
105         }
106         foo[c.numEntries] = new code_element;//建立一個新碼元并賦給foo指針的下一個空位
107         if(c.numEntries) delete [] c.cb;//?
108         c.cb = foo;
109         for(n=0; n<numChannels; n++)//給建立立的碼元結構體元素指派
110         {
111             c.cb[c.numEntries]->learnHigh[n] = high[n];//當建立一個新碼元時,用目前值附近cbBounds範圍作為碼元box的學習門檻值
112             c.cb[c.numEntries]->learnLow[n] = low[n];
113             c.cb[c.numEntries]->max[n] = *(p+n);//當建立一個新碼元時,用目前值作為碼元box的最大最小邊界值
114             c.cb[c.numEntries]->min[n] = *(p+n);
115         }
116         c.cb[c.numEntries]->t_last_update = c.t;
117         c.cb[c.numEntries]->stale = 0;//因為剛建立,所有為0
118         c.numEntries += 1;//碼元的個數加1
119     }
120 
121     //SLOWLY ADJUST LEARNING BOUNDS
122     for(n=0; n<numChannels; n++)//每次周遊該像素點就将每個碼元的學習最大門檻值變大,最小門檻值變小,但是都是緩慢變化的
123     {                           //如果是建立立的碼元,則if條件肯定不滿足
124         if(c.cb[i]->learnHigh[n] < high[n]) c.cb[i]->learnHigh[n] += 1;                
125         if(c.cb[i]->learnLow[n] > low[n]) c.cb[i]->learnLow[n] -= 1;
126     }
127 
128     return(i);//傳回所找到碼本中碼元的索引
129 }
130 
131 ///
132 // uchar cvbackgroundDiff(uchar *p, codeBook &c, int minMod, int maxMod)
133 // Given a pixel and a code book, determine if the pixel is covered by the codebook
134 //
135 // p        pixel pointer (YUV interleaved)
136 // c        codebook reference
137 // numChannels  Number of channels we are testing
138 // maxMod    Add this (possibly negative) number onto max level when code_element determining if new pixel is foreground
139 // minMod    Subract this (possible negative) number from min level code_element when determining if pixel is foreground
140 //
141 // NOTES:
142 // minMod and maxMod must have length numChannels, e.g. 3 channels => minMod[3], maxMod[3].
143 //
144 // Return
145 // 0 => background, 255 => foreground
146 uchar cvbackgroundDiff(uchar *p, codeBook &c, int numChannels, int *minMod, int *maxMod)
147 {
148     int matchChannel;
149     //SEE IF THIS FITS AN EXISTING CODEWORD
150     int i;
151     for(i=0; i<c.numEntries; i++)
152     {
153         matchChannel = 0;
154         for(int n=0; n<numChannels; n++)
155         {
156             if((c.cb[i]->min[n] - minMod[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->max[n] + maxMod[n]))
157             {
158                 matchChannel++; //Found an entry for this channel
159             }
160             else
161             {
162                 break;//加快速度,當一個通道不滿足時提前結束
163             }
164         }
165         if(matchChannel == numChannels)
166         {
167             break; //Found an entry that matched all channels,加快速度,當一個碼元找到時,提前結束
168         }
169     }
170     if(i >= c.numEntries) return(255);//255代表前景,因為所有的碼元都不滿足條件
171     return(0);//0代表背景,因為至少有一個碼元滿足條件
172 }
173 
174 
175 //UTILITES/
176 /
177 //int clearStaleEntries(codeBook &c)
178 // After you've learned for some period of time, periodically call this to clear out stale codebook entries
179 //
180 //c        Codebook to clean up
181 //
182 // Return
183 // number of entries cleared
184 int cvclearStaleEntries(codeBook &c)//對每一個碼本進行檢查
185 {
186     int staleThresh = c.t>>1;//門檻值設定為通路該碼元的次數的一半,經驗值
187     int *keep = new int [c.numEntries];
188     int keepCnt = 0;
189     //SEE WHICH CODEBOOK ENTRIES ARE TOO STALE
190     for(int i=0; i<c.numEntries; i++)
191     {
192         if(c.cb[i]->stale > staleThresh)//當在背景模組化期間有一半的時間内,codebook的碼元條目沒有被通路,則該條目将被删除
193             keep[i] = 0; //Mark for destruction
194         else
195         {
196             keep[i] = 1; //Mark to keep,為1時,該碼本的條目将被保留
197             keepCnt += 1;//keepCnt記錄了要保持的codebook的數目
198         }
199     }
200     //KEEP ONLY THE GOOD
201     c.t = 0;                        //Full reset on stale tracking
202     code_element **foo = new code_element* [keepCnt];//重建立立一個碼本的雙指針
203     int k=0;
204     for(int ii=0; ii<c.numEntries; ii++)
205     {
206         if(keep[ii])
207         {
208             foo[k] = c.cb[ii];//要保持該碼元的話就要把碼元結構體複制到fook
209             foo[k]->stale = 0;        //We have to refresh these entries for next clearStale,不被通路的累加器stale重新指派0
210             foo[k]->t_last_update = 0;//
211             k++;
212         }
213     }
214     //CLEAN UP
215     delete [] keep;
216     delete [] c.cb;
217     c.cb = foo;
218     int numCleared = c.numEntries - keepCnt;//numCleared中儲存的是被删除碼元的個數
219     c.numEntries = keepCnt;//最後新的碼元數為儲存下來碼元的個數
220     return(numCleared);//傳回被删除的碼元個數
221 }
222 
223 /
224 //int countSegmentation(codeBook *c, IplImage *I)
225 //
226 //Count how many pixels are detected as foreground
227 // c    Codebook
228 // I    Image (yuv, 24 bits)
229 // numChannels  Number of channels we are testing
230 // maxMod    Add this (possibly negative) number onto max level when code_element determining if new pixel is foreground
231 // minMod    Subract this (possible negative) number from min level code_element when determining if pixel is foreground
232 //
233 // NOTES:
234 // minMod and maxMod must have length numChannels, e.g. 3 channels => minMod[3], maxMod[3].
235 //
236 //Return
237 // Count of fg pixels
238 //
239 int cvcountSegmentation(codeBook *c, IplImage *I, int numChannels, int *minMod, int *maxMod)
240 {
241     int count = 0,i;
242     uchar *pColor;
243     int imageLen = I->width * I->height;
244 
245     //GET BASELINE NUMBER OF FG PIXELS FOR Iraw
246     pColor = (uchar *)((I)->imageData);
247     for(i=0; i<imageLen; i++)
248     {
249         if(cvbackgroundDiff(pColor, c[i], numChannels, minMod, maxMod))//對每一個像素點都要檢測其是否為前景,如果是的話,計數器count就加1
250             count++;
251         pColor += 3;
252     }
253     return(count);//傳回圖像I的前景像素點的個數
254 }
255 
256 
257 ///
258 //void cvconnectedComponents(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers)
259 // This cleans up the forground segmentation mask derived from calls to cvbackgroundDiff
260 //
261 // mask            Is a grayscale (8 bit depth) "raw" mask image which will be cleaned up
262 //
263 // OPTIONAL PARAMETERS:
264 // poly1_hull0    If set, approximate connected component by (DEFAULT) polygon, or else convex hull (0)
265 // perimScale     Len = image (width+height)/perimScale.  If contour len < this, delete that contour (DEFAULT: 4)
266 // num            Maximum number of rectangles and/or centers to return, on return, will contain number filled (DEFAULT: NULL)
267 // bbs            Pointer to bounding box rectangle vector of length num.  (DEFAULT SETTING: NULL)
268 // centers        Pointer to contour centers vectore of length num (DEFULT: NULL)
269 //
270 void cvconnectedComponents(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers)
271 {
272 static CvMemStorage*    mem_storage    = NULL;
273 static CvSeq*            contours    = NULL;
274 //CLEAN UP RAW MASK
275 //開運算作用:平滑輪廓,去掉細節,斷開缺口
276     cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, CVCLOSE_ITR );//對輸入mask進行開操作,CVCLOSE_ITR為開操作的次數,輸出為mask圖像
277 //閉運算作用:平滑輪廓,連接配接缺口
278     cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, CVCLOSE_ITR );//對輸入mask進行閉操作,CVCLOSE_ITR為閉操作的次數,輸出為mask圖像
279 
280 //FIND CONTOURS AROUND ONLY BIGGER REGIONS
281     if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0);
282     else cvClearMemStorage(mem_storage);
283 
284     //CV_RETR_EXTERNAL=0是在types_c.h中定義的,CV_CHAIN_APPROX_SIMPLE=2也是在該檔案中定義的
285     CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
286     CvSeq* c;
287     int numCont = 0;
288     while( (c = cvFindNextContour( scanner )) != NULL )
289     {
290         double len = cvContourPerimeter( c );
291         double q = (mask->height + mask->width) /perimScale;   //calculate perimeter len threshold
292         if( len < q ) //Get rid of blob if it's perimeter is too small
293         {
294             cvSubstituteContour( scanner, NULL );
295         }
296         else //Smooth it's edges if it's large enough
297         {
298             CvSeq* c_new;
299             if(poly1_hull0) //Polygonal approximation of the segmentation
300                 c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0);
301             else //Convex Hull of the segmentation
302                 c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1);
303             cvSubstituteContour( scanner, c_new );
304             numCont++;
305         }
306     }
307     contours = cvEndFindContours( &scanner );
308 
309 // PAINT THE FOUND REGIONS BACK INTO THE IMAGE
310     cvZero( mask );
311     IplImage *maskTemp;
312     //CALC CENTER OF MASS AND OR BOUNDING RECTANGLES
313     if(num != NULL)
314     {
315         int N = *num, numFilled = 0, i=0;
316         CvMoments moments;
317         double M00, M01, M10;
318         maskTemp = cvCloneImage(mask);
319         for(i=0, c=contours; c != NULL; c = c->h_next,i++ )
320         {
321             if(i < N) //Only process up to *num of them
322             {
323                 cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8);
324                 //Find the center of each contour
325                 if(centers != NULL)
326                 {
327                     cvMoments(maskTemp,&moments,1);
328                     M00 = cvGetSpatialMoment(&moments,0,0);
329                     M10 = cvGetSpatialMoment(&moments,1,0);
330                     M01 = cvGetSpatialMoment(&moments,0,1);
331                     centers[i].x = (int)(M10/M00);
332                     centers[i].y = (int)(M01/M00);
333                 }
334                 //Bounding rectangles around blobs
335                 if(bbs != NULL)
336                 {
337                     bbs[i] = cvBoundingRect(c);
338                 }
339                 cvZero(maskTemp);
340                 numFilled++;
341             }
342             //Draw filled contours into mask
343             cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask
344         } //end looping over contours
345         *num = numFilled;
346         cvReleaseImage( &maskTemp);
347     }
348     //ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
349     else
350     {
351         for( c=contours; c != NULL; c = c->h_next )
352         {
353             cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8);
354         }
355     }
356 }      

三、2種算法進行對比。

     Learning Opencv的作者将這兩種算法做了下對比,用的視訊是有風吹動樹枝的動态背景,一段時間過後的前景是視訊中移動的手。

     當然在這個工程中,作者除了展現上述簡單背景差法和codobook算法的一些原理外,還引入了很多細節來優化前景分割效果。比如說誤差計算時的方差和協方差計算加速方法,消除像素點内長時間沒有被通路過的碼元,對檢測到的粗糙原始前景圖用連通域分析法清楚噪聲,其中引入了形态學中的幾種操作,使用多邊形拟合前景輪廓等細節處理。

     在看作者代碼前,最好先看下下面幾個變量的實體含義。

     maxMod[n]:用訓練好的背景模型進行前景檢測時用到,判斷點是否小于max[n] + maxMod[n])。

     minMod[n]:用訓練好的背景模型進行前景檢測時用到,判斷點是否小于min[n] -minMod[n])。

     cbBounds*:訓練背景模型時用到,可以手動輸入該參數,這個數主要是配合high[n]和low[n]來用的。

     learnHigh[n]:背景學習過程中當一個新像素來時用來判斷是否在已有的碼元中,是門檻值的上界部分。

     learnLow[n]:背景學習過程中當一個新像素來時用來判斷是否在已有的碼元中,是門檻值的下界部分。

     max[n]: 背景學習過程中每個碼元學習到的最大值,在前景分割時配合maxMod[n]用的。

     min[n]: 背景學習過程中每個碼元學習到的最小值,在前景分割時配合minMod[n]用的。

     high[n]:背景學習過程中用來調整learnHigh[n]的,如果learnHigh[n]<high[n],則learnHigh[n]緩慢加1

     low[n]: 背景學習過程中用來調整learnLow[n]的,如果learnLow[n]>Low[n],則learnLow[緩慢減1

     該工程帶主函數部分代碼和注釋如下:

#include "stdafx.h"

#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include "avg_background.h"
#include "cv_yuv_codebook.h"

//VARIABLES for CODEBOOK METHOD:
codeBook *cB;   //This will be our linear model of the image, a vector 
//of lengh = height*width
int maxMod[CHANNELS];    //Add these (possibly negative) number onto max 
// level when code_element determining if new pixel is foreground
int minMod[CHANNELS];     //Subract these (possible negative) number from min 
//level code_element when determining if pixel is foreground
unsigned cbBounds[CHANNELS]; //Code Book bounds for learning
bool ch[CHANNELS];        //This sets what channels should be adjusted for background bounds
int nChannels = CHANNELS;
int imageLen = 0;
uchar *pColor; //YUV pointer

void help() {
    printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"\nUSAGE:\n  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]\n"
"If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V\n\n"
"***Keep the focus on the video windows, NOT the consol***\n\n"
"INTERACTIVE PARAMETERS:\n"
"\tESC,q,Q  - quit the program\n"
"\th    - print this help\n"
"\tp    - pause toggle\n"
"\ts    - single step\n"
"\tr    - run mode (single step off)\n"
"=== AVG PARAMS ===\n"
"\t-    - bump high threshold UP by 0.25\n"
"\t=    - bump high threshold DOWN by 0.25\n"
"\t[    - bump low threshold UP by 0.25\n"
"\t]    - bump low threshold DOWN by 0.25\n"
"=== CODEBOOK PARAMS ===\n"
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
"\ta    - adjust all 3 channels at once\n"
"\tb    - adjust both 2 and 3 at once\n"
"\ti,o    - bump upper threshold up,down by 1\n"
"\tk,l    - bump lower threshold up,down by 1\n"
        );
}

//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
     IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskAVG = 0,*ImaskAVGCC = 0;
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

int startcapture = 1;
int endcapture = 30;
int c,n;

    maxMod[0] = 3;  //Set color thresholds to default values
    minMod[0] = 10;
    maxMod[1] = 1;
    minMod[1] = 1;
    maxMod[2] = 1;
    minMod[2] = 1;
float scalehigh = HIGH_SCALE_NUM;//預設值為6
    float scalelow = LOW_SCALE_NUM;//預設值為7
    
if(argc < 3) {//隻有1個參數或者沒有參數時,輸出錯誤,并提示help資訊,因為該程式本身就算進去了一個參數
        printf("ERROR: Too few parameters\n");
        help();
    }else{//至少有2個參數才算正确
        if(argc == 3){//輸入為2個參數的情形是從攝像頭輸入資料
            printf("Capture from Camera\n");
            capture = cvCaptureFromCAM( 0 );
        }
else {//輸入大于2個參數時是從檔案中讀入視訊資料
            printf("Capture from file %s\n",argv[3]);//第三個參數是讀入視訊檔案的檔案名
//        capture = cvCaptureFromFile( argv[3] );
            capture = cvCreateFileCapture( argv[3] );
if(!capture) { printf("Couldn't open %s\n",argv[3]); return -1;}//讀入視訊檔案失敗
        }
if(isdigit(argv[1][0])) { //Start from of background capture
            startcapture = atoi(argv[1]);//第一個參數表示視訊開始的背景訓練時的幀,預設是1
            printf("startcapture = %d\n",startcapture);
        }
if(isdigit(argv[2][0])) { //End frame of background capture
            endcapture = atoi(argv[2]);//第二個參數表示的結束背景訓練時的,預設為30
            printf("endcapture = %d\n"); 
        }
if(argc > 4){ //See if parameters are set from command line,輸入多于4個參數表示後面的算法中用到的參數在這裡直接輸入
//FOR AVG MODEL
            if(argc >= 5){
if(isdigit(argv[4][0])){
                    scalehigh = (float)atoi(argv[4]);
                }
            }
if(argc >= 6){
if(isdigit(argv[5][0])){
                    scalelow = (float)atoi(argv[5]);
                }
            }
//FOR CODEBOOK MODEL, CHANNEL 0
            if(argc >= 7){
if(isdigit(argv[6][0])){
                    maxMod[0] = atoi(argv[6]);
                }
            }
if(argc >= 8){
if(isdigit(argv[7][0])){
                    minMod[0] = atoi(argv[7]);
                }
            }
//Channel 1
            if(argc >= 9){
if(isdigit(argv[8][0])){
                    maxMod[1] = atoi(argv[8]);
                }
            }
if(argc >= 10){
if(isdigit(argv[9][0])){
                    minMod[1] = atoi(argv[9]);
                }
            }
//Channel 2
            if(argc >= 11){
if(isdigit(argv[10][0])){
                    maxMod[2] = atoi(argv[10]);
                }
            }
if(argc >= 12){
if(isdigit(argv[11][0])){
                    minMod[2] = atoi(argv[11]);
                }
            }
        }
    }

//MAIN PROCESSING LOOP:
    bool pause = false;
bool singlestep = false;

if( capture )
    {
      cvNamedWindow( "Raw", 1 );//原始視訊圖像
        cvNamedWindow( "AVG_ConnectComp",1);//平均法連通區域分析後的圖像
        cvNamedWindow( "ForegroundCodeBook",1);//codebook法後圖像
        cvNamedWindow( "CodeBook_ConnectComp",1);//codebook法連通區域分析後的圖像
         cvNamedWindow( "ForegroundAVG",1);//平均法後圖像
        int i = -1;

for(;;)
        {
if(!pause){
//                if( !cvGrabFrame( capture ))
//                    break;
//                rawImage = cvRetrieveFrame( capture );
                rawImage = cvQueryFrame( capture );
                ++i;//count it
//                printf("%d\n",i);
                if(!rawImage) 
break;
//REMOVE THIS FOR GENERAL OPERATION, JUST A CONVIENIENCE WHEN RUNNING WITH THE SMALL tree.avi file
                if(i == 56){//程式開始運作幾十幀後自動暫停,以便後面好手動調整參數
                    pause = 1;
                    printf("\n\nVideo paused for your convienience at frame 50 to work with demo\n"
"You may adjust parameters, single step or continue running\n\n");
                    help();
                }
            }
if(singlestep){
                pause = true;
            }
//First time:
            if(0 == i) {
                printf("\n . . . wait for it . . .\n"); //Just in case you wonder why the image is white at first
//AVG METHOD ALLOCATION
                AllocateImages(rawImage);//為算法的使用配置設定記憶體
                scaleHigh(scalehigh);//設定背景模組化時的高門檻值函數
                scaleLow(scalelow);//設定背景模組化時的低門檻值函數
                ImaskAVG = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
                ImaskAVGCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
                cvSet(ImaskAVG,cvScalar(255));
//CODEBOOK METHOD ALLOCATION:
                yuvImage = cvCloneImage(rawImage);
                ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );//用來裝前景背景圖的,當然隻要一個通道的圖像即可
                ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
                cvSet(ImaskCodeBook,cvScalar(255));
                imageLen = rawImage->width*rawImage->height;
                cB = new codeBook [imageLen];//建立一個碼本cB數組,每個像素對應一個碼本
                for(int f = 0; f<imageLen; f++)
                {
                     cB[f].numEntries = 0;//每個碼本的初始碼元個數指派為0
                }
for(int nc=0; nc<nChannels;nc++)
                {
                    cbBounds[nc] = 10; //Learning bounds factor,初始值為10
                }
                ch[0] = true; //Allow threshold setting simultaneously for all channels
                ch[1] = true;
                ch[2] = true;
            }
//If we've got an rawImage and are good to go:                
            if( rawImage )
            {
                cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
//This is where we build our background model
                if( !pause && i >= startcapture && i < endcapture  ){
//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
                    accumulateBackground(rawImage);//平均法累加過程
//LEARNING THE CODEBOOK BACKGROUND
                    pColor = (uchar *)((yuvImage)->imageData);//yuvImage矩陣的首位置
                    for(int c=0; c<imageLen; c++)
                    {
                        cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);//codebook算法模組化過程
                        pColor += 3;
                    }
                }
//When done, create the background model
                if(i == endcapture){
                    createModelsfromStats();//平均法模組化過程
                }
//Find the foreground if any
                if(i >= endcapture) {//endcapture幀後開始檢測前景
//FIND FOREGROUND BY AVG METHOD:
                    backgroundDiff(rawImage,ImaskAVG);
                    cvCopy(ImaskAVG,ImaskAVGCC);
                    cvconnectedComponents(ImaskAVGCC);//平均法中的前景清除
//FIND FOREGROUND BY CODEBOOK METHOD
                    uchar maskPixelCodeBook;
                    pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
                    uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
                    for(int c=0; c<imageLen; c++)
                    {
                         maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);//前景傳回255,背景傳回0
                        *pMask++ = maskPixelCodeBook;//将前景檢測的結果傳回到ImaskCodeBook中
                        pColor += 3;
                    }
//This part just to visualize bounding boxes and centers if desired
                    cvCopy(ImaskCodeBook,ImaskCodeBookCC);    
                    cvconnectedComponents(ImaskCodeBookCC);//codebook算法中的前景清除
                }
//Display
                   cvShowImage( "Raw", rawImage );//除了這張是彩色圖外,另外4張都是黑白圖
                cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
                   cvShowImage( "ForegroundAVG",ImaskAVG);
                 cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
                 cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

//USER INPUT:
                 c = cvWaitKey(10)&0xFF;
//End processing on ESC, q or Q
                if(c == 27 || c == 'q' | c == 'Q')
break;
//Else check for user input
                switch(c)
                {
case 'h':
                        help();
break;
case 'p':
                        pause ^= 1;
break;
case 's':
                        singlestep = 1;
                        pause = false;
break;
case 'r':
                        pause = false;
                        singlestep = false;
break;
//AVG BACKROUND PARAMS
                    case '-'://調整scalehigh的參數,scalehigh的實體意義是誤差累加的影響因子,其倒數為縮放倍數,加0.25實際上是減小其影響力
                        if(i > endcapture){
                            scalehigh += 0.25;
                            printf("AVG scalehigh=%f\n",scalehigh);
                            scaleHigh(scalehigh);
                        }
break;
case '='://scalehigh減少2.5是增加其影響力
                        if(i > endcapture){
                            scalehigh -= 0.25;
                            printf("AVG scalehigh=%f\n",scalehigh);
                            scaleHigh(scalehigh);
                        }
break;
case '[':
if(i > endcapture){//設定設定背景模組化時的低門檻值函數,同上
                            scalelow += 0.25;
                            printf("AVG scalelow=%f\n",scalelow);
                            scaleLow(scalelow);
                        }
break;
case ']':
if(i > endcapture){
                            scalelow -= 0.25;
                            printf("AVG scalelow=%f\n",scalelow);
                            scaleLow(scalelow);
                        }
break;
//CODEBOOK PARAMS
                case 'y':
case '0'://激活y通道
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
break;
case 'u':
case '1'://激活u通道
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
break;
case 'v':
case '2'://激活v通道
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
break;
case 'a': //All
                case '3'://激活所有通道
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
break;
case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
break;
case 'i': //modify max classification bounds (max bound goes higher)
                    for(n=0; n<nChannels; n++){//maxMod和minMod是最大值和最小值跳動的門檻值
                        if(ch[n])
                            maxMod[n] += 1;
                        printf("%.4d,",maxMod[n]);
                    }
                    printf(" CodeBook High Side\n");
break;
case 'o': //modify max classification bounds (max bound goes lower)
                    for(n=0; n<nChannels; n++){
if(ch[n])
                            maxMod[n] -= 1;
                        printf("%.4d,",maxMod[n]);
                    }
                    printf(" CodeBook High Side\n");
break;
case 'k': //modify min classification bounds (min bound goes lower)
                    for(n=0; n<nChannels; n++){
if(ch[n])
                            minMod[n] += 1;
                        printf("%.4d,",minMod[n]);
                    }
                    printf(" CodeBook Low Side\n");
break;
case 'l': //modify min classification bounds (min bound goes higher)
                    for(n=0; n<nChannels; n++){
if(ch[n])
                            minMod[n] -= 1;
                        printf("%.4d,",minMod[n]);
                    }
                    printf(" CodeBook Low Side\n");
break;
                }

            }
        }        
      cvReleaseCapture( &capture );
      cvDestroyWindow( "Raw" );
        cvDestroyWindow( "ForegroundAVG" );
        cvDestroyWindow( "AVG_ConnectComp");
        cvDestroyWindow( "ForegroundCodeBook");
        cvDestroyWindow( "CodeBook_ConnectComp");
        DeallocateImages();//釋放平均法背景模組化過程中用到的記憶體
        if(yuvImage) cvReleaseImage(&yuvImage);
if(ImaskAVG) cvReleaseImage(&ImaskAVG);
if(ImaskAVGCC) cvReleaseImage(&ImaskAVGCC);
if(ImaskCodeBook) cvReleaseImage(&ImaskCodeBook);
if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
        delete [] cB;
    }
else{ printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
    }
return 0;
}      

     運作結果截圖如下:

     訓練過程視訊原圖截圖:

前景檢測算法_1(codebook和平均背景法)

     測試過程視訊原圖截圖:

前景檢測算法_1(codebook和平均背景法)

     前景檢測過程截圖:

前景檢測算法_1(codebook和平均背景法)

     可以看到左邊2幅截圖的對比,codebook算法的效果明顯比簡單減圖法要好,手型比較清晰些。

 四、參考文獻

      Bradski, G. and A. Kaehler (2008). Learning OpenCV: Computer vision with the OpenCV library, O'Reilly Media.

繼續閱讀