栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

KMeans算法

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

KMeans算法

原理参考:
K-means聚类算法原理及python实现
Sklearn之KMeans算法

python实现:

import random
import pandas as pd
import numpy as np

class KMeans:
    def __init__(self, dataSet, k):
        self.dataSet = dataSet
        self.k = k

    # 计算欧拉距离
    def calcDis(self, centroids):
        clalist=[]
        for data in self.dataSet:
            diff = np.tile(data, (self.k, 1)) - centroids  #相减  a=[0,1,2], (np.tile(a,(2,1))就是把a先沿x轴复制1倍,即没有复制,仍然是 [0,1,2]。再把结果沿y方向复制2倍得到array([[0,1,2],[0,1,2]]))
            squaredDiff = diff ** 2     #平方
            squaredDist = np.sum(squaredDiff, axis=1)   #和  (axis=1表示行)
            distance = squaredDist ** 0.5  #开根号
            clalist.append(distance) 
        clalist = np.array(clalist)  #返回一个每个点到质点的距离len(dateSet)*k的数组
        return clalist

    # 计算质心
    def classify(self, centroids):
        # 计算样本到质心的距离
        clalist = self.calcDis(centroids)
        #print(dataSet, centroids, clalist)
        # 分组并计算新的质心
        minDistIndices = np.argmin(clalist, axis=1)    #axis=1 表示求出每行的最小值的下标
        #print(clalist, minDistIndices)
        newCentroids = pd.DataFrame(self.dataSet).groupby(minDistIndices).mean() #DataFrame(dataSet)对DataSet分组,groupby(min)按照min进行统计分类,mean()对分类结果求均值
        #print(newCentroids, newCentroids.values)
        newCentroids = newCentroids.values    
        changed = newCentroids - centroids #计算变化量
        return changed, newCentroids

    # 使用k-means分类
    def predict(self):
        # 随机取质心
        centroids = self.dataSet[np.random.choice(self.dataSet.shape[0], size=self.k, replace=False), :]
        # 更新质心 直到变化量全为0
        changed, newCentroids = self.classify(centroids)    
        #print(centroids,newCentroids) 
        while np.any(changed != 0):          
            changed, newCentroids = self.classify(newCentroids)
            #print(changed)   
        centroids = newCentroids.tolist()   #tolist()将矩阵转换成列表
        # 根据质心计算每个集群
        cluster = []
        clalist = self.calcDis(centroids) #调用欧拉距离
        minDistIndices = np.argmin(clalist, axis=1)  
        for i in range(self.k):
            cluster.append([])
        for i, j in enumerate(minDistIndices):   #enumerate()可同时遍历索引和遍历元素
            cluster[j].append(self.dataSet[i])        
        return centroids, cluster
 

if __name__=='__main__': 
    x = np.array([[1, 1], [1, 2], [2, 1], [6, 4], [6, 3], [5, 4]])
    kmeans = KMeans(x, 2)
    centroids, cluster = kmeans.predict()
    print('质心为:%s' % centroids)
    print('集群为:%s' % cluster)

python调包:

import numpy as np
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn import metrics
import matplotlib.pyplot as plt

x = np.array([[1, 1], [1, 2], [2, 1], [6, 4], [6, 3], [5, 4]])
k_means = KMeans(n_clusters=2)
k_means.fit(x)

y_predict = k_means.predict(x)
plt.scatter(x[:,0],x[:,1],c=y_predict)
plt.show()
print(k_means.predict((x[:,:])))
print(k_means.cluster_centers_)
print(k_means.inertia_)
print(metrics.silhouette_score(x,y_predict))

C++实现:

#include 
#include 
#include 

void printMat(std::vector> mat)
{
	for (size_t i = 0; i < mat.size(); i++)
	{
		for (size_t j = 0; j < mat[0].size(); j++)
		{
			std::cout << mat[i][j] << " ";
		}
		std::cout << std::endl;
	}
	std::cout << std::endl;
}

bool checkZeros(std::vector> mat)
{
	bool flag = true;
	for (size_t i = 0; i < mat.size(); i++)
	{
		for (size_t j = 0; j < mat[0].size(); j++)
		{
			if (mat[i][j] != 0) flag = false;
		}
	}
	return flag;
}

std::vector getminDistIndices(std::vector> clalist)
{
	std::vector minDistIndices(clalist.size());
	for (size_t i = 0; i < clalist.size(); i++)
	{
		float minDist = INT_MAX;
		int minDistIndex = 0;
		for (size_t j = 0; j < clalist[0].size(); j++)
		{
			if (clalist[i][j] < minDist)
			{
				minDist = clalist[i][j];
				minDistIndex = j;
			}
		}
		//std::cout << minDistIndex << std::endl;
		minDistIndices[i] = minDistIndex;
	}
	return minDistIndices;
}

class KMeans
{
public:
	KMeans(std::vector> dataSet, int k) :m_dataSet(dataSet), m_k(k) {};

	std::vector> calcDis(std::vector> centroids)
	{
		std::vector> clalist;
		for (auto data : m_dataSet)
		{
			std::vector> diff(m_k);
			for (size_t i = 0; i < diff.size(); i++)
			{
				diff[i] = data;
			}
			for (size_t i = 0; i < diff.size(); i++)
			{
				for (size_t j = 0; j < diff[0].size(); j++)
				{
					diff[i][j] -= centroids[i][j];
					diff[i][j] = pow(diff[i][j], 2);
				}
			}
			std::vector squaredDist(diff.size());
			for (size_t i = 0; i < diff.size(); i++)
			{
				for (size_t j = 0; j < diff[0].size(); j++)
				{
					squaredDist[i] += diff[i][j];
				}
				squaredDist[i] = sqrt(squaredDist[i]);
			}
			clalist.push_back(squaredDist);
		}

		//printMat(clalist);
		return clalist;
	}

	void classify(std::vector> centroids, std::vector>& newCentroids, std::vector>& changed)
	{
		std::vector> clalist = calcDis(centroids);
		std::vector minDistIndices = getminDistIndices(clalist);

		newCentroids.resize(m_k, std::vector(m_dataSet[0].size()));
		for (size_t i = 0; i < m_dataSet[0].size(); i++)
		{
			std::vector sum(m_k);
			std::vector num(m_k, 0);
			for (size_t j = 0; j < m_dataSet.size(); j++)
			{
				sum[minDistIndices[j]] += m_dataSet[j][i];
				++num[minDistIndices[j]];
			}

			for (size_t j = 0; j < m_k; j++)
			{
				//std::cout << sum[j] <<" "<(m_dataSet[0].size()));
		for (size_t i = 0; i < changed.size(); i++)
		{
			for (size_t j = 0; j < changed[0].size(); j++)
			{
				changed[i][j] = newCentroids[i][j] - centroids[i][j];
			}
		}
	}

	void predict(std::vector>& centroids, std::vector>>& cluster)
	{
		srand((unsigned)time(NULL));
		std::vector random_indices;
		while (random_indices.size() < m_k)
		{
			int random_index = rand() % m_dataSet.size();
			if(find(random_indices.begin(), random_indices.end(), random_index)== random_indices.end())
				random_indices.push_back(random_index);
		}

		centroids.resize(m_k, std::vector(m_dataSet[0].size()));
		for (size_t i = 0; i < m_k; i++)
		{
			centroids[i] = m_dataSet[random_indices[i]];
		}

		std::vector> newCentroids;
		std::vector> changed;
		classify(centroids, newCentroids, changed);
		//printMat(centroids); printMat(newCentroids);

		while (!checkZeros(changed))
		{
			std::vector> copyCentroids = newCentroids;
			classify(copyCentroids, newCentroids, changed);
			//printMat(changed);
		}
		centroids = newCentroids;

		std::vector> clalist = calcDis(newCentroids);
		std::vector minDistIndices = getminDistIndices(clalist);
		//for (auto i : minDistIndices)	std::cout << i << std::endl;

		cluster.resize(m_k);
		for (size_t i = 0; i < minDistIndices.size(); i++)
		{
			cluster[minDistIndices[i]].push_back(m_dataSet[i]);
		}
	}

private:
	std::vector> m_dataSet;
	int m_k;
};


int main(int argc, char* argv[])
{
	std::vector> dataSet = { {1, 1},{1, 2},{2, 1},{6, 4},{6, 3},{5, 4} };
	//std::vector> centroids = { {1, 2},{6, 4} };
	int k = 2;
	KMeans kmeans = KMeans(dataSet, k);
	//kmeans.calcDis(centroids);
	//kmeans.classify(centroids);

	std::vector> centroids;
	std::vector>> cluster;
	kmeans.predict(centroids, cluster);
	printMat(centroids);
	printMat(cluster[0]); printMat(cluster[1]);

	system("pause");
	return EXIT_SUCCESS;
}
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/861477.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号