import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
feature_count=784
labels_count=10
batch_size=128#128个样本作为1个batch
epochs=10
learning_rate=0.5
features=tf.placeholder(tf.float32,[None,feature_count])
labels=tf.placeholder(tf.float32,[None,labels_count])
weights=tf.Variable(tf.truncated_normal((feature_count,labels_count)))
biases=tf.Variable(tf.zeros(labels_count),name="biases")
#Linear Function WX+b
logits=tf.add(tf.matmul(features,weights),biases)
prediction=tf.nn.softmax(logits)
#cross entropy
cross_entropy=-tf.reduce_sum(labels*tf.log(prediction),reduction_indices=1)
#training loss
loss=tf.reduce_mean(cross_entropy)
#initializing all variables
init=tf.global_variables_initializer()
#determing if the prediction are accurate
is_correct_prediction=tf.equel(tf.argmax(prediction,1),tf.argmax(labels,1))
#calculate prediction accuracy
accuracy=tf.reduce_mean(tf.cast(is_correct_prediction,tf.float32))
#训练模型
with tf.Session() as sess:
sess.run(init)
total_batch=int(len(mnist.train.labels)/batch_size)
for epoch in range(epochs):
avg_cost=0
for i in range(total_batch):
batch_x,batch_y=mnist.train.next_batch(batch_size=batch_size)
# 设置优化器
loss, optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
_,c=sess.run([optimizer,loss],feed_dict={features:batch_x,labels:batch_y})
avg_cost+=c/total_batch
print("epoch:",(epoch+1),"cost=",":.3f".format(avg_cost))
print(sess.run(accuracy,feed_dict={features:mnist.test.images,labels:mnist.test.labels}))
原文:https://www.cnblogs.com/hapyygril/p/11335838.html