#使用梯度上升法
import numpy as np
import matplotlib.pyplot as plt
X = np.empty((100,2)) ##返回一个100X2的数组 ##size为样本个数
X[:,0] = np.random.uniform(0.,100.,size =100)#随机生成100个实数在(0-100)范围内个数
X[:,1] = 0.75*X[:,0] + 3.+ np.random.normal(0,10., size =100)#正态分布
# print(X)
# print(X[:,0])
# print(X[:,1])
plt.scatter(X[:,0],X[:,1])
plt.show()
##去中心化
#demean
def demean(X):
return X-np.mean(X, axis= 0)
X_demean = demean(X)
plt.scatter(X_demean[:,0],X_demean[:,1])
plt.show()
#梯度上升法
def f(w,X):
return np.sum((X.dot(w)**2))/len(X)
#dot函数:运算时直接进行乘积
# 二维数组(矩阵)之间的运算,则得到的是矩阵积(mastrix product)
def df_math(w,X):
return X.T.dot(X.dot(w))*2./len(X)
def df_debug(w,X, epsilon = 0.0001):
res = np.empty(len(w))
for i in range(len(w)):
w_1 = w.copy()
w_1[i] += epsilon
w_2 = w.copy()
w_2[i] -= epsilon
res[i] = (f(w_1,X) - f(w_2,X)) / (2 * epsilon)
return res
def direction(w):
return w/ np.linalg.norm(w)
#np.linalg.norm:求范数,默认情况下,是求整体的矩阵元素平方和,再开根号
def gradient_ascent(df,X,initial_w,eta,n_iters = 1e4,epsilon = 1e-8):
w = direction(initial_w)
cur_iter = 0
while cur_iter



