参考链接中的文章,有错误,我给更正了。
并且原文中是需要数据集文件的,我直接给替换成了一个数组,采用直接赋值的方式。
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
class SimpleDataReader(object):
def __init__(self, data_file):
self.train_file_name = data_file
self.num_train = 0
self.XTrain = None
self.YTrain = None
# read data from file
def ReadData(self):
# data = np.load(self.train_file_name)
# self.XTrain = data["data"]
# self.YTrain = data["label"]
self.XTrain = np.array([0.95, 3, 4, 5.07, 6.03, 8.21, 8.85, 12.02, 15], dtype=float)
self.YTrain = np.array([5.1, 8.7, 11.5, 13, 15.3, 18, 21, 26.87, 32.5], dtype=float)
self.num_train = self.XTrain.shape[0]
#end if
# get batch training data
def GetSingleTrainSample(self, iteration):
x = self.XTrain[iteration]
y = self.YTrain[iteration]
return x, y
def GetWholeTrainSamples(self):
return self.XTrain, self.YTrain
class NeuralNet(object):
def __init__(self, eta):
self.eta = eta
self.w = 0
self.b = 0
def __forward(self, x):
z = x * self.w + self.b
return z
def __backward(self, x,y,z):
dz = z - y # 原错误为:dz = x * (z - y)
db = dz
dw = dz
return dw, db
def __update(self, dw, db):
self.w = self.w - self.eta * dw
self.b = self.b - self.eta * db
def train(self, dataReader):
for i in range(dataReader.num_train):
# get x and y value for one sample
x,y = dataReader.GetSingleTrainSample(i)
# get z from x,y
z = self.__forward(x)
# calculate gradient of w and b
dw, db = self.__backward(x, y, z)
# update w,b
self.__update(dw, db)
# end for
def inference(self, x):
return self.__forward(x)
if __name__ == '__main__':
# read data
sdr = SimpleDataReader('ch04.npz')
sdr.ReadData()
# create net
eta = 0.1
net = NeuralNet(eta)
net.train(sdr)
# result
print("w=%f,b=%f" %(net.w, net.b))
# 绘图部分
trainX,trainY = sdr.GetWholeTrainSamples()
fig = plt.figure()
ax = fig.add_subplot(111)
# 绘制散点图
ax.scatter(trainX,trainY)
# 绘制线性回归
x = np.arange(0, 15, 0.01)
f = np.vectorize(net.inference, excluded=['x'])
plt.plot(x,f(x),color='red')
# 显示图表
plt.show()
Ref:
- 通过神经网络实现线性回归的拟合



