ANSYS背景图片设置_百度文库
两大类热门资源免费畅读
续费一年阅读会员,立省24元!
ANSYS背景图片设置
上传于|0|0|暂无简介
阅读已结束,如果下载本文需要使用0下载券
想免费下载更多文档?
定制HR最喜欢的简历
下载文档到电脑,查找使用更方便
还剩3页未读,继续阅读
定制HR最喜欢的简历
你可能喜欢使用opencv开源代码,然后改成自己需要的参数,简单好用,直接复制下列文件,新建工程就可以使用
MOG_BGS3.hpp
文件#include "opencv2/core/core.hpp"
#include &list&
#include"cv.h"
namespace OurMogBgs{
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
virtual ~BackgroundSubtractor();
CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask,
double learningRate=0);
virtual void getBackgroundImage(OutputArray backgroundImage)
class CV_EXPORTS_W BackgroundSubtractorMOG3 : public BackgroundSubtractor
CV_WRAP BackgroundSubtractorMOG3();
CV_WRAP BackgroundSubtractorMOG3(int history,
float varThreshold, bool bShadowDetection=true);
virtual ~BackgroundSubtractorMOG3();
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1);
virtual void getBackgroundImage(OutputArray backgroundImage)
virtual void initialize(Size frameSize, int frameType);
protected:
Size frameS
int frameT
Mat bgmodelUsedM
double varT
float backgroundR
float varThresholdG
float fVarI
float fVarM
float fVarM
float fCT;
bool bShadowD
unsigned char nShadowD/
}MOG_BGS3.cpp
文件#include "stdafx.h"
#include "MOG_BGS3.hpp"
#include &list&
namespace OurMogBgs{
Interface of Gaussian mixture algorithm from:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004
Advantages:
-fast - number of Gausssian components is constantly adapted per pixel.
-performs also shadow detection (see bgfg_segm_test.cpp example)
BackgroundSubtractor::~BackgroundSubtractor() {}
void BackgroundSubtractor::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
void BackgroundSubtractor::getBackgroundImage(OutputArray backgroundImage) const
// default parameters of gaussian background detection algorithm
static const int defaultHistory3 = 500; // L alpha = 1/defaultHistory2
static const float defaultVarThreshold3 = 4.0f*4.0f;
//表示马氏平方距离上使用的来判断是否为背景的阈值
static const int defaultNMixtures3 = 3; // maximal number of Gaussians in mixture
static const float defaultBackgroundRatio3 = 0.9f; // threshold sum of weights for background test
static const float defaultVarThresholdGen3 = 2.5f*2.5f;
//判断是否匹配的那个函数
static const float defaultVarInit3 = 30.0f; // initial variance for new components
初始化的方差
static const float defaultVarMax3 = 5*defaultVarInit3;
static const float defaultVarMin3 = 4.0f;
// additional parameters
static const float defaultfCT3 = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
static const unsigned char defaultnShadowDetection3 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
struct GaussBGStatModel3Params
//image info
int nND;//number of data dimensions (image channels)
bool bPostF//defult 1 - do postfiltering - will make shadow detection results also give value 255
minA // for postfiltering
bool bI//default 1, faster updates at start
/////////////////////////
//very important parameters - things you will change
////////////////////////
float fAlphaT;
//alpha - speed of update - if the time interval you want to average over is T
//set alpha=1/T. It is also usefull at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared Mahalan. dist. to decide if it is well described
//by the background model or not. Related to Cthr from the paper.
//This does not influence the update of the background. A typical value could be 4 sigma
//and that is Tb=4*4=16;
/////////////////////////
//less important parameters - things you might change but be carefull
////////////////////////
float fTg;
//Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma =& Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fTB;//1-cf from the paper
//TB - threshold when the component becomes significant enough to be included into
//the background model. It is the TB=1-cf from the paper. So I use cf=0.1 =& TB=0.
//For alpha=0.001 it means that the mode should exist for approximately 105 frames before
//it is considered foreground
float fVarI
float fVarM
float fVarM
//initial standard deviation
for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//even less important parameters
int nM;//max number of modes - const - 4 is usually enough
//shadow detection parameters
bool bShadowD//default 1 - do shadow detection
unsigned char nShadowD//do shadow detection - insert this value as the detection result
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
struct GMM
// shadow detection performed per pixel
// should work for rgb data, could be usefull for gray scale and depth data as well
// See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
static CV_INLINE bool
detectShadowGMM(const float* data, int nchannels, int nmodes,
const GMM* gmm, const float* mean,
float Tb, float TB, float tau)
float tWeight = 0;
// check all the components
marked as background:
for( int mode = 0; mode & mode++, mean += nchannels )
GMM g = gmm[mode];
float numerator = 0.0f;
float denominator = 0.0f;
for( int c = 0; c & c++ )
+= data[c] * mean[c];
denominator += mean[c] * mean[c];
// no division by zero allowed
if( denominator == 0 )
// if tau & a & 1 then also check the color distortion
if( numerator &= denominator && numerator &= tau*denominator )
float a = numerator /
float dist2a = 0.0f;
for( int c = 0; c & c++ )
float dD= a*mean[c] - data[c];
dist2a += dD*dD;
if (dist2a & Tb*g.variance*a*a)
tWeight += g.
if( tWeight & TB )
//update GMM - the base update function performed per pixel
//"Efficient Adaptive Density Estimapion per Image Pixel for the Task of Background Subtraction"
//Z.Zivkovic, F. van der Heijden
//Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006.
//The algorithm similar to the standard Stauffer&Grimson algorithm with
//additional selection of the number of the Gaussian components based on:
//"Recursive unsupervised learning of finite mixture models "
//Z.Zivkovic, F.van der Heijden
//IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004
//http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf
struct MOG3Invoker : ParallelLoopBody
MOG3Invoker(const Mat& _src, Mat& _dst,
GMM* _gmm, float* _mean,
uchar* _modesUsed,
int _nmixtures, float _alphaT,
float _Tb, float _TB, float _Tg,
float _varInit, float _varMin, float _varMax,
float _prune, float _tau, bool _detectShadows,
uchar _shadowVal)
modesUsed0 = _modesU
nmixtures = _
alphaT = _alphaT;
varInit = _varI
varMin = MIN(_varMin, _varMax);
varMax = MAX(_varMin, _varMax);
detectShadows = _detectS
shadowVal = _shadowV
cvtfunc = src-&depth() != CV_32F ? getConvertFunc(src-&depth(), CV_32F) : 0;
void operator()(const Range& range) const
int y0 = range.start, y1 = range.
int ncols = src-&cols, nchannels = src-&channels();
AutoBuffer&float& buf(src-&cols*nchannels);
float alpha1 = 1.f - alphaT;
float dData[CV_CN_MAX];
for( int y = y0; y & y1; y++ )
const float* data =
if( cvtfunc )
cvtfunc( src-&ptr(y), src-&step, 0, 0, (uchar*)data, 0, Size(ncols*nchannels, 1), 0);
data = src-&ptr&float&(y);
float* mean = mean0 + ncols*nmixtures*nchannels*y;
GMM* gmm = gmm0 + ncols*nmixtures*y;
uchar* modesUsed = modesUsed0 + ncols*y;
uchar* mask = dst-&ptr(y);
for( int x = 0; x & x++, data += nchannels, gmm += nmixtures, mean += nmixtures*nchannels )
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background =//return value -& true - the pixel classified as background
//internal:
bool fitsPDF =//if it remains zero a new GMM mode will be added
int nmodes = modesUsed[x], nNewModes =//current number of modes in GMM
float totalWeight = 0.f;
float* mean_m =
//go through all modes
for( int mode = 0; mode & mode++, mean_m += nchannels )
float weight = alpha1*gmm[mode].weight +//need only weight if fit is found
int swap_count = 0;
//fit not found yet
if( !fitsPDF )
//check if it belongs to some of the remaining modes
float var = gmm[mode].
//高斯混合模型的方差
//calculate difference and distance
float dist2;
if( nchannels == 3 )
dData[0] = mean_m[0] - data[0];
dData[1] = mean_m[1] - data[1];
dData[2] = mean_m[2] - data[2];
dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
dist2 = 0.f;
for( int c = 0; c & c++ )
dData[c] = mean_m[c] - data[c];
dist2 += dData[c]*dData[c];
//background? - Tb - usually larger than Tg
if( totalWeight & TB && dist2 & Tb*var )
background =
//check fit
if( dist2 & Tg*var )
//belongs to the mode
//update distribution
//update weight
weight += alphaT;
float k = alphaT/
//update mean
for( int c = 0; c & c++ )
mean_m[c] -= k*dData[c];
//update variance
float varnew = var + k*(dist2-var);
//limit the variance
varnew = MAX(varnew, varMin);
varnew = MIN(varnew, varMax);
gmm[mode].variance =
//all other weights are at the same place and
//only the matched (iModes) is higher -& just find the new place for it
for( int i = i & 0; i-- )
//check one up
if( weight & gmm[i-1].weight )
swap_count++;
//swap one up
std::swap(gmm[i], gmm[i-1]);
for( int c = 0; c & c++ )
std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
//belongs to the mode - bFitsPDF becomes 1
}//!bFitsPDF)
//check prune
if( weight & -prune )
weight = 0.0;
gmm[mode-swap_count].weight =//update weight by the calculated value
totalWeight +=
//go through all modes
//renormalize weights
totalWeight = 1.f/totalW
for( int mode = 0; mode & mode++ )
gmm[mode].weight *= totalW
nmodes = nNewM
//make new mode if needed and exit
if( !fitsPDF )
// replace the weakest or add a new one
int mode = nmodes == nmixtures ? nmixtures-1 : nmodes++;
if (nmodes==1)
gmm[mode].weight = 1.f;
gmm[mode].weight = alphaT;
// renormalize all other weights
for( int i = 0; i & nmodes-1; i++ )
gmm[i].weight *= alpha1;
for( int c = 0; c & c++ )
mean[mode*nchannels + c] = data[c];
gmm[mode].variance = varI
//find the new place for it
for( int i = nmodes - 1; i & 0; i-- )
// check one up
if( alphaT & gmm[i-1].weight )
// swap one up
std::swap(gmm[i], gmm[i-1]);
for( int c = 0; c & c++ )
std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
//set the number of modes
modesUsed[x] = uchar(nmodes);
mask[x] = background ? 0 :
detectShadows && detectShadowGMM(data, nchannels, nmodes, gmm, mean, Tb, TB, tau) ?
shadowVal : 255;
const Mat*
GMM* gmm0;
float* mean0;
uchar* modesUsed0;
float alphaT, Tb, TB, Tg;
float varInit, varMin, varMax, prune,
bool detectS
uchar shadowV
BackgroundSubtractorMOG3::BackgroundSubtractorMOG3()
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory3;
varThreshold = defaultVarThreshold3;
bShadowDetection = 1;
nmixtures = defaultNMixtures3;
backgroundRatio = defaultBackgroundRatio3;
fVarInit = defaultVarInit3;
= defaultVarMax3;
fVarMin = defaultVarMin3;
varThresholdGen = defaultVarThresholdGen3;
fCT = defaultfCT3;
nShadowDetection =
defaultnShadowDetection3;
fTau = defaultfT
BackgroundSubtractorMOG3::BackgroundSubtractorMOG3(int _history,
float _varThreshold, bool _bShadowDetection)
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history & 0 ? _history : defaultHistory3;
varThreshold = (_varThreshold&0)? _varThreshold : defaultVarThreshold3;
bShadowDetection = _bShadowD
nmixtures = defaultNMixtures3;
backgroundRatio = defaultBackgroundRatio3;
fVarInit = defaultVarInit3;
= defaultVarMax3;
fVarMin = defaultVarMin3;
varThresholdGen = defaultVarThresholdGen3;
fCT = defaultfCT3;
nShadowDetection =
defaultnShadowDetection3;
fTau = defaultfT
BackgroundSubtractorMOG3::~BackgroundSubtractorMOG3()
void BackgroundSubtractorMOG3::initialize(Size _frameSize, int _frameType)
frameSize = _frameS
frameType = _frameT
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels &= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
void BackgroundSubtractorMOG3::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate &= 1 || image.size() != frameSize || image.type() != frameT
if( needToInitialize )
initialize(image.size(), image.type());
_fgmask.create( image.size(), CV_8U );
Mat fgmask = _fgmask.getMat();
learningRate = learningRate &= 0 && nframes & 1 ? learningRate : 1./min( 2*nframes, history );
CV_Assert(learningRate &= 0);
parallel_for_(Range(0, image.rows),
MOG3Invoker(image, fgmask,
(GMM*)bgmodel.data,
(float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
bgmodelUsedModes.data, nmixtures, (float)learningRate,
(float)varThreshold,
backgroundRatio, varThresholdGen,
fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau,
bShadowDetection, nShadowDetection));
void BackgroundSubtractorMOG3::getBackgroundImage(OutputArray backgroundImage) const
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels == 3 );
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
int firstGaussianIdx = 0;
const GMM* gmm = (GMM*)bgmodel.
const Vec3f* mean = reinterpret_cast&const Vec3f*&(gmm + frameSize.width*frameSize.height*nmixtures);
for(int row=0; row&meanBackground. row++)
for(int col=0; col&meanBackground. col++)
int nmodes = bgmodelUsedModes.at&uchar&(row, col);
Vec3f meanV
float totalWeight = 0.f;
for(int gaussianIdx = firstGaussianI gaussianIdx & firstGaussianIdx + gaussianIdx++)
GMM gaussian = gmm[gaussianIdx];
meanVal += gaussian.weight * mean[gaussianIdx];
totalWeight += gaussian.
if(totalWeight & backgroundRatio)
meanVal *= (1.f / totalWeight);
meanBackground.at&Vec3b&(row, col) = Vec3b(meanVal);
firstGaussianIdx +=
switch(CV_MAT_CN(frameType))
vector&Mat&
split(meanBackground, channels);
channels[0].copyTo(backgroundImage);
meanBackground.copyTo(backgroundImage);
CV_Error(CV_StsUnsupportedFormat, "");
}主函数(自己起个名字吧)#include "stdafx.h"
#include &stdio.h&
#include &cv.h&
#include "opencv2/core/core.hpp"
#include & opencv2/highgui/highgui.hpp &
#include "MOG_BGS3.hpp"
using namespace OurMogB
int main()
VideoCapture capture("c:\\...\\1.avi");
if( !capture.isOpened() )
cout&&"读取视频失败"&&
return -1;
//获取整个帧数
long totalframenumber = capture.get(CV_CAP_PROP_FRAME_COUNT);
cout&&"整个视频共"&&totalframenumber&&"帧"&&
//设置开始帧()
long frametostart = 1;
capture.set( CV_CAP_PROP_FRAME_COUNT,frametostart);
cout&&"从第"&&frametostart&&"帧开始读"&&
//设置结束帧
int frametostop = 100;
if(frametostop & frametostart)
cout&&"结束帧小于开始帧,程序错误,即将退出!"&&
return -1;
cout&&"结束帧为:第"&&frametostop&&"帧"&&
double rate = capture.get(CV_CAP_PROP_FPS);
int delay = 100/
//前景图片
//背景图片
BackgroundSubtractorMOG3 mog(20,16,true);
bool stop(false);
long currentframe =
while( !stop )
if( !capture.read(frame) )
cout&&"从视频中读取图像失败或者读完整个视频"&&
return -2;
imshow("输入视频",frame);
//参数为:输入图像、输出图像、学习速率
mog(frame,foreground,0.005); //
mog.getBackgroundImage(background);
// 返回当前背景图像
imshow("前景",foreground);
imshow("背景",background);
//按esc键退出,按其他键会停止在当前帧
int c = waitKey(delay);
if ( (char)c == 27 || currentframe &= frametostop)
if ( c &= 0)
waitKey(0);
currentframe++;
if (currentframe == frametostop)
imwrite("c:\\...\\...", background);
waitKey(0);
如果您想留下此文,您可以将其发送至您的邮箱(将同时以邮件内容&PDF形式发送)
相关文章推荐
(Ctrl+Enter提交) &&
已有0人在此发表见解
&在& 17:05收藏到了
&&在信息爆炸的时代,您的知识需要整理,沉淀,积累!Lai18为您提供一个简单实用的文章整理收藏工具,在这里您可以收藏对您有用的技术文章,自由分门别类,在整理的过程中,用心梳理自己的知识!相信,用不了多久,您收藏整理的文章将是您一生的知识宝库!
? 蜀ICP备号-1