栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

神经网络预测Pm2.5

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

神经网络预测Pm2.5

import tensorflow as tf
import numpy as np



"""定义数据形式"""


x_data = np.asarray([[16,4,1020,1.79,0,0],
[15,4,1020,2.68,0,0],
[11,5,1021,3.57,0,0],
[7,5,1022,5.36,1,0],
[7,5,1022,6.25,2,0],
[7,6,1022,7.14,3,0],
[7,6,1023,8.93,4,0],
[7,5,1024,10.72,0,0],
[8,6,1024,12.51,0,0],
[7,5,1025,14.3,0,0],
[7,5,1026,17.43,1,0],
[8,5,1026,20.56,0,0],
[8,5,1026,23.69,0,0],
[8,5,1025,27.71,0,0],
[9,5,1025,31.73,0,0],
[9,5,1025,35.75,0,0],
[9,5,1026,37.54,0,0],
[8,5,1027,39.33,0,0],
[8,5,1027,42.46,0,0],
[8,5,1028,44.25,0,0]])
x_data1 = np.asarray([[7,5,1028,46.04,0,0],
[7,5,1027,49.17,1,0],
 [8,6,1028,52.3,2,0],
[8,6,1027,55.43,3,0],
[7,6,1027,58.56,4,0],
[8,6,1026,61.69,5,0],
 [8,7,1026,65.71,6,0],
  [8,6,1026,68.84,7,0],
  [8,7,1025,72.86,8,0],                   
[9,8,1024,76.88,9,0]])
X= np.asarray([[16,4,1020,1.79,0,0],
[15,4,1020,2.68,0,0],
[11,5,1021,3.57,0,0],
[7,5,1022,5.36,1,0],
[7,5,1022,6.25,2,0],
[7,6,1022,7.14,3,0],
[7,6,1023,8.93,4,0],
[7,5,1024,10.72,0,0],
[8,6,1024,12.51,0,0],
[7,5,1025,14.3,0,0],
[7,5,1026,17.43,1,0],
[8,5,1026,20.56,0,0],
[8,5,1026,23.69,0,0],
[8,5,1025,27.71,0,0],
[9,5,1025,31.73,0,0],
[9,5,1025,35.75,0,0],
[9,5,1026,37.54,0,0],
[8,5,1027,39.33,0,0],
[8,5,1027,42.46,0,0],
[8,5,1028,44.25,0,0],
 [7,5,1028,46.04,0.0],
[7,5,1027,49.17,1.0],
 [8,6,1028,52.3,2,0],
[8,6,1027,55.43,3,0],
[7,6,1027,58.56,4,0],
[8,6,1026,61.69,5,0],
 [8,7,1026,65.71,6,0],
  [8,6,1026,68.84,7,0],
  [8,7,1025,72.86,8,0],                   
[9,8,1024,76.88,9,0]])              
y_data = np.asarray([129,148,159,181,138,109,105,124,120,132,140,152,148,164,158,154,159,164,170,149])  
y_data1 = np.asarray([154,164,156,126,90,63,65,55,65,83])
# 引入数据
Max= np.max(X,axis = 0)
Min= np.min(X,axis = 0)
# 求各列最大值、最小值


w1 = tf.Variable(tf.random_normal([6,3],stddev=1.0,seed=1))
b1 = tf.Variable(tf.random_normal([3],stddev=1.0,seed=1))
w2 = tf.Variable(tf.random_normal([3,1],stddev=1.0,seed=1))
b2 = tf.Variable(tf.random_normal([1],stddev=1.0,seed=1))
xs=tf.placeholder(tf.float32,[None,6])
ys=tf.placeholder(tf.float32,[None,1])
keep_prob=tf.placeholder(tf.float32)
# 定义变量
"""建立网络"""
#定义隐藏层,输入6个节点,输出3个节点
l1= tf.nn.sigmoid(tf.matmul(xs,w1) + b1)   
# dropout
l1_drop = tf.nn.dropout(l1,keep_prob=keep_prob)
# 定义输出层,输入3个节点,输出1个节点
prediction= tf.matmul(l1_drop,w2) + b2

"""反向反馈"""
#损失函数,算出的是每个例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))

"""训练"""
#优化算法,minimize(loss)以0.2的学习率对loss进行减小
train_step= tf.train.GradientDescentOptimizer(0.05).minimize(loss)


with tf.Session() as sess:
    init=tf.global_variables_initializer()
    sess.run(init)
    for i in range(501):
         for(x,y) in zip(x_data,y_data):
                x= x.reshape([-1,6])
                y = y.reshape([-1,1])
               
                sess.run(train_step,feed_dict={xs:x,ys:y,keep_prob:0.6})   
                if i%500==0:
                    print("Flood type impact factor")
                    print(x)
                    print("flood type")
                    print(y)
                    print("flood type forecast")
                   
                    print(sess.run(prediction,feed_dict={xs:x,keep_prob:0.6}))     
                    sess.run(train_step,feed_dict={xs:x,ys:y,keep_prob:0.6})
                    print("the loss")
                    print(sess.run(loss,feed_dict={xs:x,ys:y,keep_prob:0.6}))
                  
    print('training is done!')   # 训练结束
    

    for(x1,y1) in zip(x_data1,y_data1):      #预测数据
            x1 = x1.reshape([-1,6])
            y1 = y1.reshape([-1,1])
            print("Flood type impact factor")
            print(x1)
            print("flood type")
            print(y1)
            print("flood type forecast")
            
            print(sess.run(prediction,feed_dict={xs:x1,keep_prob:1.0}))  # 输出预测值             
            print("the loss")
            print(sess.run(loss,feed_dict={xs:x1,ys:y1,keep_prob:1.0}))

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/275172.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号