【深度学习】tfrecord 的读入与训练 MNIST (训练集 + 验证集)

tfrecord 的读入与训练 MNIST (训练集 + 验证集)

代码

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
os.system("rm -r logs")
import tensorflow as tf
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt 
from PIL import Image
import multiprocessing


# In[2]:



TrainPath = '/home/winsoul/disk/MNIST/data/tfrecord/train.tfrecords'
TestPath = '/home/winsoul/disk/MNIST/data/tfrecord/test.tfrecords'
# BatchSize = 64
epoch = 10
DisplayStep = 20
SaveModelStep = 1000


# In[3]:


def read_tfrecord(TFRecordPath):
    with tf.Session() as sess:
        feature = {
            'image': tf.FixedLenFeature([], tf.string),
            'label': tf.FixedLenFeature([], tf.int64)
        }
#         filename_queue = tf.train.string_input_producer([TFRecordPath], num_epochs = 1)
        filename_queue = tf.train.string_input_producer([TFRecordPath])
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(filename_queue)
        features = tf.parse_single_example(serialized_example, features = feature)
        image = tf.decode_raw(features['image'], tf.float32)
        image = tf.reshape(image, [28, 28, 1])
        label = tf.cast(features['label'], tf.int32)
        return image, label


# In[4]:


def conv_layer(X, k, s, channels_in, channels_out, name = 'CONV'):
    with tf.name_scope(name):
        W = tf.Variable(tf.truncated_normal([k, k, channels_in, channels_out], stddev = 0.1));
        b = tf.Variable(tf.constant(0.1, shape = [channels_out]))
        conv = tf.nn.conv2d(X, W, strides = [1, s, s, 1], padding = 'SAME')
        result = tf.nn.relu(conv + b)
        tf.summary.histogram('weights', W)
        tf.summary.histogram('biases', b)
        tf.summary.histogram('activations', result)
        return result


# In[5]:


def pool_layer(X, k, s, strr = 'SAME', pool_type = 'MAX'):
    if pool_type == 'MAX':
        result = tf.nn.max_pool(X,
                              ksize = [1, k, k, 1],
                              strides = [1, s, s, 1],
                              padding = strr)
    else:
        result = tf.nn.avg_pool(X,
                              ksize = [1, k, k, 1],
                              strides = [1, s, s, 1],
                              padding = strr)
    return result


# In[6]:


def fc_layer(X, neurons_in, neurons_out, last = False, name = 'FC'):
    with tf.name_scope(name):
        W = tf.Variable(tf.truncated_normal([neurons_in, neurons_out], stddev = 0.1))
        b = tf.Variable(tf.constant(0.1, shape = [neurons_out]))
        tf.summary.histogram('weights', W)
        tf.summary.histogram('biases', b)
        if last == False:
            result = tf.nn.relu(tf.matmul(X, W) + b)
        else:
            result =  tf.nn.softmax(tf.matmul(X, W) + b)
        tf.summary.histogram('activations', result)
        return result


# In[7]:


def Network(BatchSize, learning_rate):
    with tf.Session() as sess:
        in_training = tf.placeholder(dtype = tf.bool, shape=())
        keep_prob = tf.placeholder('float32', name = 'keep_prob')
        
        judge = tf.Print(in_training, ['in_training:', in_training])
        
        image_train, label_train = read_tfrecord(TrainPath) 
        image_val, label_val = read_tfrecord(TestPath) 
#         image, label = read_tfrecord(TrainPath) if tf.equal(use_train_data, use_train_data_judge) else read_tfrecord(TestPath)
#         image, label = tf.cond(use_train_data, lambda: read_tfrecord(TrainPath), lambda: read_tfrecord(TestPath))     



        image_train_Batch, label_train_Batch = tf.train.shuffle_batch([image_train, label_train], 
                                                     batch_size = BatchSize, 
                                                     capacity = BatchSize*3 + 200,
                                                     min_after_dequeue = BatchSize)
        image_val_Batch, label_val_Batch = tf.train.shuffle_batch([image_val, label_val], 
                                                     batch_size = BatchSize, 
                                                     capacity = BatchSize*3 + 200,
                                                     min_after_dequeue = BatchSize)
        
        image_Batch = tf.cond(in_training, lambda: image_train_Batch, lambda: image_val_Batch)
        label_Batch = tf.cond(in_training, lambda: label_train_Batch, lambda: label_val_Batch)
        
        label_Batch = tf.one_hot(label_Batch, depth = 10)
        


        X = tf.identity(image_Batch)
        y = tf.identity(label_Batch)
        
        
        with tf.name_scope('input_reshape'):
            tf.summary.image('input', X, 32)
    
        conv1 = conv_layer(X, 5, 1, 1, 32, "conv1")
        pool1 = pool_layer(conv1, 2, 2, "SAME", "MAX")

        conv2 = conv_layer(pool1, 5, 1, 32, 64, 'conv2')
        pool2 = pool_layer(conv2, 2, 2, "SAME", "MAX")
        print(pool2.shape)

        drop1 = tf.nn.dropout(pool2, keep_prob)
        fc1 = fc_layer(tf.reshape(drop1, [-1, 7 * 7 * 64]), 7 * 7 * 64, 1024)

        drop2 = tf.nn.dropout(fc1, keep_prob)
        y_result = fc_layer(drop2, 1024, 10, True)
        
        
        with tf.name_scope('summaries'):
            cross_entropy = -tf.reduce_mean(y * tf.log(tf.clip_by_value(y_result, 1e-3,1.0)))
            train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
            #train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
            corrent_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_result, 1))
            accuracy = tf.reduce_mean(tf.cast(corrent_prediction, 'float', name = 'accuracy'))
            tf.summary.scalar("loss", cross_entropy)
            tf.summary.scalar("accuracy", accuracy)
            
        init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord = coord)
        
        merge_summary = tf.summary.merge_all()
        summary__train_writer = tf.summary.FileWriter("./logs/train" , sess.graph)
        summary_val_writer = tf.summary.FileWriter("./logs/test")
        
        try:
            batch_index = 0
            while not coord.should_stop():
                sess.run([train_step], feed_dict = {keep_prob: 0.5, in_training: True})
                if batch_index % 10 == 0:
                    summary_train, _, acc, loss = sess.run([merge_summary, train_step, accuracy, cross_entropy], feed_dict = {keep_prob: 1.0, in_training: True})   
                    summary__train_writer.add_summary(summary_train, batch_index) 
                    summary_val, acc, loss = sess.run([merge_summary, accuracy, cross_entropy], feed_dict = {keep_prob: 1.0, in_training: False}) 
                    summary_val_writer.add_summary(summary_val, batch_index) 
                    print(str(batch_index) + 'train:' + '  ' + str(acc) + ' ' + str(loss))
                    print(str(batch_index) + ' val: ' + '  ' + str(acc) + ' ' + str(loss))
                batch_index += 1;
                    
        except tf.errors.OutOfRangeError:
            print("OutofRangeError!")
        finally:
            print("Finish")
    
        coord.request_stop()
        coord.join(threads)
        sess.close()


# In[8]:


def main():
    Network(512, 0.0001)


# In[ ]:


if __name__ == '__main__':
    main()

【深度学习】tfrecord 的读入与训练 MNIST (训练集 + 验证集)

上一篇:linux -- ubuntu 何为软件源


下一篇:python – 在庞大的数据框架上启用完整的pandas摘要