- Notifications
You must be signed in to change notification settings - Fork 251
/
Copy pathBP_Regression.py
105 lines (79 loc) · 4.34 KB
/
BP_Regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#-*- coding:utf-8 -*-
# &Author AnFany
# 适用于多维输出
importnumpyasnp
importtensorflowastf
'''基于TensorFlow构建训练函数'''
# 创建激活函数
defactivate(input_layer, weights, biases, actfunc):
layer=tf.add(tf.matmul(input_layer, weights), biases)
ifactfunc=='relu':
returntf.nn.relu(layer)
elifactfunc=='tanh':
returntf.nn.tanh(layer)
elifactfunc=='sigmoid':
returntf.nn.sigmoid(layer)
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
defTen_train(xdata, ydata, prexdata, preydata, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.05, itertimes=100000, batch_size=200, activate_func='sigmoid', break_error=0.0043):
# 开始搭建神经网络
Input_Dimen=len(xdata[0])
Unit_Layers= [Input_Dimen] + [hiddennodes] *hiddenlayers+ [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data=tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')
y_target=tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME=locals()
forjjinrange(hiddenlayers+1):
VAR_NAME['weight%s'%jj] =tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj+1]), dtype=tf.float32,\
name='weight%s'%jj) /np.sqrt(Unit_Layers[jj]) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s'%jj] =tf.Variable(tf.random_normal([Unit_Layers[jj+1]], stddev=10, name='bias%s'%jj),
dtype=tf.float32)
ifjj==0:
VAR_NAME['ooutda%s'%jj] =activate(x_data, eval('weight%s'%jj), eval('bias%s'%jj), actfunc=activate_func)
else:
VAR_NAME['ooutda%s'%jj] =activate(eval('ooutda%s'% (jj-1)), eval('weight%s'%jj), \
eval('bias%s'%jj), actfunc=activate_func)
# 均方误差
loss=tf.reduce_mean(tf.reduce_sum(tf.square(y_target-eval('ooutda%s'% (hiddenlayers))), reduction_indices=[1]))
# 优化的方法
my_opt=tf.train.AdamOptimizer(learn_rate)
train_step=my_opt.minimize(loss)
# 初始化
init=tf.global_variables_initializer()
# 存储误差的字典
accudict= {}
loss_vec= [] # 训练误差
loss_pre= [] # 验证数据误差
accunum=np.inf
withtf.Session() assess:
saver=tf.train.Saver()
sess.run(init)
foriinrange(itertimes):
rand_index=np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x=xdata[rand_index]
rand_y=ydata[rand_index]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss=sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
temmp_losspre=sess.run(loss, feed_dict={x_data: prexdata, y_target: preydata})
loss_vec.append(temp_loss)
loss_pre.append(temmp_losspre)
accudict[i] = [temp_loss, temmp_losspre]
# 根据输出的误差,判断训练的情况
if (i+1) %20==0:
print('Generation: '+str(i+1) +'. 归一训练误差:Loss = '+str(temp_loss) +
'. 归一验证误差:Loss = '+str(temmp_losspre))
# 提前退出的判断
iftemp_loss<break_error: # 根据经验获得此数值, 因为采用的是随机下降,因此误差在前期可能出现浮动
break
# 在所有的循环次数中,找到综合误差最低的一次,保存参数
zongheaccu=0.01*temp_loss+0.99*temmp_losspre
ifzongheaccu<accunum:
accunum=zongheaccu
# 保存模型
saver.save(sess, './pm25', global_step=i) # 注意路径
sign=min(accudict.items(), key=lambdad: 0.01*d[1][0] +0.99*d[1][1])[0]
returnloss_vec, loss_pre, sign, hiddenlayers