# 定义输入、标签。这里标签y_就是输入x
x tf.compat.v1.placeholder(tf.float32, [None, 784])
y_ tf.compat.v1.placeholder(tf.float32, [None, 784])
# 计算当前参数在神经网络上的结果
y inference(x)
# 定义损失函数
loss_mean tf.reduce_mean(tf.reduce_sum(tf.square(y - y_)))
# 将loss_mean加入损失集合。
tf.compat.v1.add_to_collection( losses , loss_mean)
# 总损失函数
loss tf.add_n(tf.compat.v1.get_collection( losses ))
# 初始速率0.1 后面每训练100次后在学习速率基础上乘以0.96
learning_rate tf.compat.v1.train.exponential_decay(0.9999, global_step, 5000, 0.9, staircase True)
# 使用tf.train.GradientDescentOptimizer 优化算法来优化损失函数。
train_step tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step global_step)
# 加载样本数据
data_feed data_init()
# 初始化会话并开始训练过程。
init_var tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init_var)
for i in range(TRAINING_STEPS):
train_batch_x data_batch_set(data_feed, feed_name x_train )
# 标签就是样本x
sess.run(train_step, feed_dict {x: train_batch_x, y_: train_batch_x})
if i % 500 0:
# loss
loss_val sess.run(loss, feed_dict {x: train_batch_x, y_: train_batch_x})
print( After %d training step(s) , loss %f % (i, loss_val))
# test
test_batch_x data_batch_set(data_feed, feed_name x_test )
# test_img
test_x test_batch_x[0]
# 原图
test_x_img test_x * 255
test_x_img np.reshape(test_x_img, (28, 28))
# 重建后的图
# 转换为0-1之间的值
test_y tf.nn.sigmoid(y)
test_y sess.run(test_y, feed_dict {x: [test_x]})
test_y_img test_y * 255
test_y_img np.reshape(test_y_img, (28, 28))
plt.subplot(1, 2, 1)
plt.title( origin )
plt.imshow(test_x_img)
plt.subplot(1, 2, 2)
plt.title( forecast )
plt.imshow(test_y_img)
plt.show()