学习opencv 第六章 习题十三

时间:2023-03-09 18:03:46
学习opencv 第六章 习题十三

用傅里叶变换加速卷积,直接上代码,Mat版是Copy他人的。

CvMat版

 #include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include <iostream> using namespace cv;
using namespace std; void speedy_convolution(const CvMat* A,const CvMat* B,CvMat* C); int main()
{
IplImage* img=cvLoadImage("C:/Users/shark/Desktop/fruits.jpg",);
CvMat* src=cvCreateMat(img->height,img->width,CV_32FC1);
/*int data;
for(int i=0;i<img->height;i++)
{
for(int j=0;j<img->width;j++)
{
data=img->imageData[i*img->widthStep+j];
cvmSet(src,i,j,data);
}
}*/
//必须归一化矩阵的值为0-1之间(缩放比例在1/255.0附近效果最好,太小最后会全黑,接近1或大于1几乎是全白;
//(还未深入了解函数cvConvertScale的机理),缩放比例不能为1,打出目标图像的像素有正有负
cvConvertScale(img,src,/255.0,); CvMat* kernel=cvCreateMat(,,CV_32FC1);
cvSetReal2D(kernel,,,1.0/); cvSetReal2D(kernel,,,2.0/); cvSetReal2D(kernel,,,1.0/); //注意设置值时必须加个.0否则1/16的值0
cvSetReal2D(kernel,,,2.0/); cvSetReal2D(kernel,,,4.0/); cvSetReal2D(kernel,,,2.0/);
cvSetReal2D(kernel,,,1.0/); cvSetReal2D(kernel,,,2.0/); cvSetReal2D(kernel,,,1.0/);
CvMat* C=cvCreateMat((src->rows+kernel->rows-),(src->cols+kernel->cols-),src->type);
speedy_convolution(src,kernel,C); IplImage* img_src=cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,);
cvGetImage(src,img_src);
IplImage* img_dst=cvCreateImage(cvGetSize(C),IPL_DEPTH_32F,);
cvGetImage(C,img_dst); cvNamedWindow("img_src");
cvShowImage("img_src",img_src);
cvNamedWindow("img");
cvShowImage("img",img);
cvNamedWindow("dst");
cvShowImage("dst",img_dst);
cvWaitKey();
return ;
} void speedy_convolution(
const CvMat* A,
const CvMat* B,
CvMat* C
){
int dft_M=cvGetOptimalDFTSize(A->rows+B->rows-);
int dft_N=cvGetOptimalDFTSize(A->cols+B->cols-); CvMat *dft_A=cvCreateMat(dft_M,dft_N,A->type);
CvMat *dft_B=cvCreateMat(dft_M,dft_N,B->type);
CvMat tmp;
cvGetSubRect(dft_A,&tmp,cvRect(,,A->cols,A->rows));
cvCopy(A,&tmp);
cvGetSubRect(dft_A,&tmp,cvRect(A->cols,,dft_A->cols-A->cols,A->rows));
cvZero(&tmp);
cvDFT(dft_A,dft_A,CV_DXT_FORWARD,A->rows); cvGetSubRect(dft_B,&tmp,cvRect(,,B->cols,B->rows));
cvCopy(B,&tmp);
cvGetSubRect(dft_B,&tmp,cvRect(B->cols,,dft_B->cols-B->cols,B->rows));
cvZero(&tmp);
cvDFT(dft_B,dft_B,CV_DXT_FORWARD,B->rows); cvMulSpectrums(dft_A,dft_B,dft_A,); cvDFT(dft_A,dft_A,CV_DXT_INV_SCALE,C->rows);
cvGetSubRect(dft_A,&tmp,cvRect(,,C->cols,C->rows));
cvCopy(&tmp,C);
cvReleaseMat(&dft_A);
cvReleaseMat(&dft_B);
}

Mat版

 #include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream> using namespace cv;
using namespace std; //http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#dft[2]
void convolveDFT(Mat A, Mat B, Mat& C)
{
// reallocate the output array if needed
C.create(abs(A.rows - B.rows)+, abs(A.cols - B.cols)+, A.type());
Size dftSize;
// calculate the size of DFT transform
dftSize.width = getOptimalDFTSize(A.cols + B.cols - );
dftSize.height = getOptimalDFTSize(A.rows + B.rows - ); // allocate temporary buffers and initialize them with 0's
Mat tempA(dftSize, A.type(), Scalar::all());
Mat tempB(dftSize, B.type(), Scalar::all()); // copy A and B to the top-left corners of tempA and tempB, respectively
Mat roiA(tempA, Rect(,,A.cols,A.rows));
A.copyTo(roiA);
Mat roiB(tempB, Rect(,,B.cols,B.rows));
B.copyTo(roiB); // now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
dft(tempA, tempA, , A.rows);
dft(tempB, tempB, , B.rows); // multiply the spectrums;
// the function handles packed spectrum representations well
mulSpectrums(tempA, tempB, tempA, DFT_COMPLEX_OUTPUT);
//mulSpectrums(tempA, tempB, tempA, DFT_REAL_OUTPUT); // transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows); // now copy the result back to C.
tempA(Rect(, , C.cols, C.rows)).copyTo(C); // all the temporary buffers will be deallocated automatically
} int main(int argc, char* argv[])
{
const char* filename = argc >= ? argv[] : "Lenna.png"; Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -; Mat kernel = (Mat_<float>(,) << , , , , , , , , );
cout << kernel; Mat floatI = Mat_<float>(I);// change image type into float
Mat filteredI;
convolveDFT(floatI, kernel, filteredI); normalize(filteredI, filteredI, , , CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("image", I);
imshow("filtered", filteredI);
waitKey(); } //一是输出Mat C应声明为引用;二是其中的mulSpectrums函数的第四个参数flag值没有指定,应指定为DFT_COMPLEX_OUTPUT或是DFT_REAL_OUTPUT. //main函数中首先按灰度图读入图像,然后创造一个平滑核kernel,将输入图像转换成float类型(注意这步是必须的,因为dft只能处理浮点数),在调用convolveDFT求出卷积结果后,将卷积结果归一化方便显示观看。 //需要注意的是,一般求法中,利用核游走整个图像进行卷积运算,实际上进行的是相关运算,真正意义上的卷积,应该首先把核翻转180度,再在整个图像上进行游走。OpenCV中的filter2D实际上做的也只是相关,而非卷积。