# 损失函数(y-y^hat)**2对标量b和向量W求偏导得它们的梯度
b_gradient (2 / cnt) * ((np.dot(W_current.T, X)[0, 0] b_current) - y)
W_gradient (2 / cnt) * ((np.dot(W_current.T, X)[0, 0] b_current)-y) * X
# 梯度下降发更新参数
new_W W_current - (learning_rate * W_gradient)
new_b b_current - (learning_rate * b_gradient)
return new_b, new_W
def lr(points, starting_b, starting_W, learning_rate, num_iterations):
线性回归模型
:param points:
:param starting_b: 1个标量
:param starting_W: W参数向量 这里shape是(4, 1)
:param learning_rate:学习率
:param num_iterations:迭代次数
:return:
b starting_b
W starting_W
# update for several times
for i in range(num_iterations):
b, W gradient(b, W, np.array(points), learning_rate)
print( 第{}次 损失 {} .format(i 1, compute_error(b, W, points)))
return b, W