- Notifications
You must be signed in to change notification settings - Fork 251
/
Copy pathbp_Classify.py
151 lines (122 loc) · 6.17 KB
/
bp_Classify.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# -*- coding:utf-8 -*-
# &Author AnFany
'''第一部分:库'''
importtensorflowastf
importnumpyasnp
'''函数'''
# 根据输出的结果判断类别的函数
defjudge(ydata):
maxnum=np.max(ydata, axis=1)
lastdata= []
foriiinrange(len(ydata)):
maxindex=list(ydata[ii]).index(maxnum[ii])
fu= [0] *len(ydata[0])
fu[maxindex] =1
lastdata.append(fu)
returnnp.array(lastdata)
# 根据输出的结果以及真实结果输出分类的效果
defoutvsreal(outdata, realdata):
subdata=outdata-realdata
sundata=np.sum(np.abs(subdata), axis=1)
correct=list(sundata).count(0)
returncorrect/len(outdata)
'''基于TensorFlow构建训练函数'''
# 创建激活函数
defactivate(input_layer, weights, biases, actfunc):
layer=tf.add(tf.matmul(input_layer, weights), biases)
ifactfunc=='relu':
returntf.nn.relu(layer)
elifactfunc=='tanh':
returntf.nn.tanh(layer)
elifactfunc=='sigmoid':
returntf.nn.sigmoid(layer)
elifactfunc=='linear':
returnlayer
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
defTen_train(xdata, ydata, addxdata, addydata, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.02, itertimes=20, batch_size=200, activate_func='tanh'):
# 开始搭建神经网络
Input_Dimen=len(xdata[0])
Unit_Layers= [Input_Dimen] + [hiddennodes] *hiddenlayers+ [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data=tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')
y_target=tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME=locals()
forjjinrange(hiddenlayers+1):
VAR_NAME['weight%s'%jj] =tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj+1]) /np.sqrt(Unit_Layers[jj]), \
dtype=tf.float32, name='Weight%s'%jj) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]),dtype=tf.float32, \name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s'%jj] =tf.Variable(tf.random_normal([Unit_Layers[jj+1]], stddev=10), dtype=tf.float32, name='Bias%s'%jj)
ifjj==0:
VAR_NAME['ooutda%s'%jj] =activate(x_data, eval('weight%s'%jj), eval('bias%s'%jj),
actfunc=activate_func)
elifjj==hiddenlayers:
VAR_NAME['ooutda%s'%jj] =activate(eval('ooutda%s'% (jj-1)), eval('weight%s'%jj),\
eval('bias%s'%jj), actfunc='linear') # 因此最后一层采用线性激活函数
else:
VAR_NAME['ooutda%s'%jj] =activate(eval('ooutda%s'% (jj-1)), eval('weight%s'%jj),\
eval('bias%s'%jj), actfunc=activate_func)
# 需要对输出进行softmax计算
uuu=tf.nn.softmax(eval('ooutda%s'% (hiddenlayers)))
# 交叉熵函数
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_target, logits=eval('ooutda%s'% (hiddenlayers))))
# 计算精确度需要
accu=eval('ooutda%s'%hiddenlayers)
# 优化的方法
# my_opt = tf.train.GradientDescentOptimizer(learn_rate)
my_opt=tf.train.AdamOptimizer(learn_rate)
train_step=my_opt.minimize(loss)
# 初始化
init=tf.global_variables_initializer()
loss_vec= [] # 训练误差
loss_vec_add= [] # 验证误差
acc_vec= [] # 训练精确度
acc_vec_add= [] # 验证精确度
# 需要保存的权重以及偏置
graph=tf.get_default_graph()
saver=tf.train.Saver(max_to_keep=1)
sess=tf.Session()
# 存储精确率的字典
accudict= {}
accunum=0
sess.run(init)
foriinrange(itertimes): # 在总共的迭代次数中选择最高的(验证正确率+训练精确率)
forjjinrange(int(len(xdata) /batch_size)):
rand_index=np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x=xdata[rand_index]
rand_y=ydata[rand_index]
# 开始训练
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
# 训练误差
temp_loss=sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
# 存储训练误差
loss_vec.append(temp_loss)
# 验证误差
temp_loss_add=sess.run(loss, feed_dict={x_data: addxdata, y_target: addydata})
# 存储验证误差
loss_vec_add.append(temp_loss_add)
# 训练精确率
acc_ru=sess.run(accu, feed_dict={x_data: xdata})
acc_rughy_train=outvsreal(judge(acc_ru), ydata)
# 存储
acc_vec.append(acc_rughy_train)
# 验证精确率
acu=sess.run(accu, feed_dict={x_data: addxdata})
acc_rughy=outvsreal(judge(acu), addydata)
# 存储
acc_vec_add.append(acc_rughy)
print('%s代误差: [训练:%.4f, 验证:%.4f], 正确率: [训练:%.4f, 验证:%.4f]'% (i, temp_loss,
temp_loss_add, acc_rughy_train, acc_rughy))
accudict[i] = [acc_rughy_train, acc_rughy]
# 在所有的循环次数中,找到综合精确度最高的一次,保存参数
zongheaccu=0.1*acc_rughy_train+0.9*acc_rughy
ifzongheaccu>accunum:
accunum=zongheaccu
# 保存模型
saver.save(sess, r'E:\tensorflow_Learn\Stacking\adult\model', global_step=i) # 注意路径
sign=max(accudict.items(), key=lambdad: 0.1*d[1][0] +0.9*d[1][1])[0]
print('折运行完毕,模型已经保存,最优的是%s代'%sign)
returnloss_vec[: sign+1], loss_vec_add[: sign+1], acc_vec[: sign+1], acc_vec_add[: sign+1], sign, hiddenlayers