[OpenCV] Samples 03: cout_mat

时间:2023-03-09 19:14:01
[OpenCV] Samples 03: cout_mat

注意Mat作为kmeans的参数的含义。

扩展:高维向量的聚类。

一、像素聚类

[OpenCV] Samples 03: cout_mat

#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream> using namespace cv;
using namespace std; // static void help()
// {
// cout << "\nThis program demonstrates kmeans clustering.\n"
// "It generates an image with random points, then assigns a random number of cluster\n"
// "centers and uses kmeans to move those cluster centers to their representitive location\n"
// "Call\n"
// "./kmeans\n" << endl;
// } int main( int /*argc*/, char** /*argv*/ )
{
const int MAX_CLUSTERS = 5;
Scalar colorTab[] =
{
Scalar(0, 0, 255),
Scalar(0,255,0),
Scalar(255,100,100),
Scalar(255,0,255),
Scalar(0,255,255)
}; Mat img(500, 500, CV_8UC3);
RNG rng(12345); for(;;)
{
//Jeff --> The second parameter is non-inclusive boundary.
int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);
int i, sampleCount = rng.uniform(2, 1001);
// int i, sampleCount = 10; Mat points(sampleCount, 1, CV_32FC2), labels;

     //一般来说,没有必要。sampleCount都远大于ClusterCount。
// clusterCount = MIN(clusterCount, sampleCount);
Mat centers; /* Jeff --> generate random sample from multigaussian distribution 以某一个中心点,二维高斯分布分配点;主要是一个数学技巧。*/
for( k = 0; k < clusterCount; k++ )
{
Point center;
center.x = rng.uniform(0, img.cols);
center.y = rng.uniform(0, img.rows); Mat pointChunk = points.rowRange(k*sampleCount/clusterCount,
k == clusterCount - 1 ? sampleCount :
(k+1)*sampleCount/clusterCount);
rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
cout << pointChunk << endl;
}

//洗牌
randShuffle(points, 1, &rng); std::cout << points << std::endl; //Jeff --> Mat is vector here, including a list of points.
// labels: index of cluster for each points.
kmeans(points, clusterCount, labels,
TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0),
3, KMEANS_PP_CENTERS, centers); //Jeff --> Draw point (tiny circle) with its color on black background.
img = Scalar::all(0); // Step One: show sample points.
for( i = 0; i < sampleCount; i++ )
{
int clusterIdx = labels.at<int>(i);
Point ipt = points.at<Point2f>(i);
circle( img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA );
} // Step Two: show central points.
for( i = 0; i < clusterCount; i++ )
{
std::cout << centers.at<Point2f>(i) << std::endl;
} imshow("clusters", img); char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
} return 0;
}

二、图像的kmeans降维处理

g++ -std=c++11 -pthread  -fpermissive  main.cpp -o output `pkg-config --cflags --libs opencv` -ldl

From: http://seiya-kumada.blogspot.com/2013/03/k-means-clustering.html【非常好】

#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream> using namespace cv;
using namespace std; void show_result(const cv::Mat& labels, const cv::Mat& centers, int height, int width)
{
std::cout << "===\n";
std::cout << "labels: " << labels.rows << " " << labels.cols << std::endl;
std::cout << "centers: " << centers.rows << " " << centers.cols << std::endl;
assert(labels.type() == CV_32SC1);
assert(centers.type() == CV_32FC1); cv::Mat rgb_image(height, width, CV_8UC3);
cv::MatIterator_<cv::Vec3b> rgb_first = rgb_image.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> rgb_last = rgb_image.end<cv::Vec3b>();
cv::MatConstIterator_<int> label_first = labels.begin<int>(); cv::Mat centers_u8;
centers.convertTo(centers_u8, CV_8UC1, 255.0);
cv::Mat centers_u8c3 = centers_u8.reshape(); while ( rgb_first != rgb_last ) {
const cv::Vec3b& rgb = centers_u8c3.ptr<cv::Vec3b>(*label_first)[];
*rgb_first = rgb;
++rgb_first;
++label_first;
}
cv::imshow("tmp", rgb_image);
cv::imwrite("./result.jpg", rgb_image);
cv::waitKey();
} int main(int argc, const char * argv[])
{
cv::Mat image = cv::imread("./d1.jpg");
if ( image.empty() ) {
std::cout << "unable to load an input image\n";
return ;
} std::cout << "image: " << image.rows << ", " << image.cols << std::endl;
assert(image.type() == CV_8UC3);
cv::imshow("image", image); cv::Mat reshaped_image = image.reshape(, image.cols * image.rows);
std::cout << "reshaped image: " << reshaped_image.rows << ", " << reshaped_image.cols << std::endl;
assert(reshaped_image.type() == CV_8UC1);
//check0(image, reshaped_image); cv::Mat reshaped_image32f;
reshaped_image.convertTo(reshaped_image32f, CV_32FC1, 1.0 / 255.0);
std::cout << "reshaped image 32f: " << reshaped_image32f.rows << ", " << reshaped_image32f.cols << std::endl;
assert(reshaped_image32f.type() == CV_32FC1); cv::Mat labels;
int cluster_number = ;
cv::TermCriteria criteria {cv::TermCriteria::COUNT, , };
cv::Mat centers;
cv::kmeans(reshaped_image32f, cluster_number, labels, criteria, , cv::KMEANS_RANDOM_CENTERS, centers); show_result(labels, centers, image.rows, image.cols); return ;
}

三、ROI的kmeans支持

原文:https://blog.****.net/fengbingchun/article/details/79395298

double kmeans( InputArray data, int K, InputOutputArray bestLabels, 
TermCriteria criteria,
int attempts, int flags, OutputArray centers = noArray() );

接口的声明在include/opencv2/core.hpp文件中,实现在modules/core/src/kmeans.cpp文件中

(1)、data:为cv::Mat类型,每行代表一个样本,即特征,即mat.cols=特征长度,mat.rows=样本数,数据类型仅支持float;

(2)、K:指定聚类时划分为几类;

(3)、bestLabels:为cv::Mat类型,是一个长度为(样本数,1)的矩阵,即mat.cols=1,mat.rows=样本数;为K-Means算法的结果输出,指定每一个样本聚类到哪一个label中;

(4)、criteria:TermCriteria类,算法进行迭代时终止的条件,可以指定最大迭代次数,也可以指定预期的精度,也可以这两种同时指定;

(5)、attempts:指定K-Means算法执行的次数,每次算法执行的结果是不一样的,选择最好的那次结果输出;

(6)、flags:初始化均值点的方法,目前支持三种:KMEANS_RANDOM_CENTERS、KMEANS_PP_CENTERS、KMEANS_USE_INITIAL_LABELS;

(7)、centers:为cv::Mat类型,输出最终的均值点,mat.cols=特征长度,mat.rols=K.

    // Color dimension reduction
Mat processTagByKmean(Mat3b const tag, Option option)
{
int K = option.knnClusterNum; // 0. Prepare arguments for kmeans.
cv::Mat reshaped_tag = tag.reshape(, tag.cols * tag.rows); cv::Mat reshaped_tag32f, labels, centers;
reshaped_tag.convertTo(reshaped_tag32f, CV_32FC1, 1.0 / 255.0);
// ------------------------------------------------------------------
// 1. do kmeans
cv::kmeans(reshaped_tag32f, K, labels,
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, , 1.0),
, KMEANS_PP_CENTERS, centers);
// ------------------------------------------------------------------
// 2. convert to rgb mat
cv::Mat rgb_tag(tag.rows, tag.cols, CV_8UC3);
cv::MatIterator_<cv::Vec3b> rgb_first = rgb_tag.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> rgb_last = rgb_tag.end<cv::Vec3b>();
cv::MatConstIterator_<int> label_first = labels.begin<int>(); cv::Mat centers_u8;
centers.convertTo(centers_u8, CV_8UC1, 255.0);
cv::Mat centers_u8c3 = centers_u8.reshape(); while (rgb_first != rgb_last)
{
const cv::Vec3b &rgb = centers_u8c3.ptr<cv::Vec3b>(*label_first)[];
*rgb_first = rgb;
++rgb_first;
++label_first;
} return rgb_tag;
}

原文:https://blog.****.net/qq_22764813/article/details/52135686

如果Mat类型数据的深度和通道数不满足上面的要求,则需要使用convertTo()函数和cvtColor()函数来进行转换。
convertTo()函数负责转换数据类型不同的Mat,即可以将类似float型的Mat转换到imwrite()函数能够接受的类型。
cvtColor()函数是负责转换不同通道的Mat,因为该函数的第4个参数就可以设置目的Mat数据的通道数(只是我们一般没有用到它,一般情况下这个函数是用来进行色彩空间转换的)。
另外也可以不用imwrite()函数来存图片数据,可以直接用通用的XML IO接口函数将数据存在XML或者YXML中。