# -*- coding: utf-8 -*-
"""
Created on Thu Mar 9 19:20:51 2017 @author: Jarvis
"""
'''
tnesorflow 做机器学习的几个步骤
1.定义公式
2.定义loss function,选择优化器,并制定优化器优化loss
3.迭代地对数据进行训练
4。在测试集或验证集对准确率进行评测 ''' import tensorflow as tf
import pandas as pd
import random
#自己定义的一个选取batch进行训练的一个取batch函数
def next_batch(mnist, num,ilen = 55):
size = len(mnist)
selected_n = set([]) while(len(selected_n) < num):
t = random.choice(range(size))
selected_n.add(t)
l = list(selected_n) batch_xs = []
batch_ys = [] batch_xs = mnist.iloc[l,range(2,54)] batch_ys = mnist.iloc[l,range(54,62)]
return batch_xs,batch_ys #对数据进行读取
org_mnist = pd.read_csv("NDVI_NDWI.csv",header = None,encoding = 'gbk')
mnist = pd.get_dummies(org_mnist)
#创建session
#input_data.read_data_sets("MNIST_data/",one_hot = True)
sess = tf.InteractiveSession() #定义算法公式,在此处就是神经网络的结构方式
in_units = 52#每一条instance具有52个输入
h1_units = 30
h2_units = 20
h3_units = 10
h4_units = 5 #tf.truncated_normal是正态分布的一个东东,主要用于初始化一些W矩阵
W1 = tf.Variable(tf.truncated_normal([in_units,h1_units],stddev = 0.1))
b1 = tf.Variable(tf.zeros([h1_units]))
W2 = tf.Variable(tf.zeros([h1_units,h2_units]))#[h1_units,8]
b2 = tf.Variable(tf.zeros([h2_units]))#
W3 = tf.Variable(tf.zeros([h2_units,h3_units]))
b3 = tf.Variable(tf.zeros([h3_units]))
W4 = tf.Variable(tf.zeros([h3_units,8]))
b4 = tf.Variable(tf.zeros([8])) '''
W4 = tf.Variable(tf.zeros([h3_units,h4_units]))
b4 = tf.Variable(tf.zeros([h4_units]))
W5 = tf.Variable(tf.zeros([h4_units,8]))
b5 = tf.Variable(tf.zeros([8]))
'''
x = tf.placeholder(tf.float32,[None, in_units])
keep_prob = tf.placeholder(tf.float32)#dropout 的比例 keep_prob hidden1 = tf.nn.sigmoid(tf.matmul(x,W1)+b1)
hidden1_drop = tf.nn.dropout(hidden1,keep_prob)
hidden2 = tf.nn.sigmoid(tf.matmul(hidden1_drop,W2)+b2)
hidden2_drop = tf.nn.dropout(hidden2,keep_prob)
hidden3 = tf.nn.sigmoid(tf.matmul(hidden2_drop,W3)+b3)
hidden3_drop = tf.nn.dropout(hidden3,keep_prob)
#hidden4 = tf.nn.sigmoid(tf.matmul(hidden3_drop,W4)+b4)
#hidden4_drop = tf.nn.dropout(hidden4,keep_prob) y = tf.nn.softmax(tf.matmul(hidden3_drop,W4)+b4)
y_ = tf.placeholder(tf.float32,[None,8])#[None,10]
#设置优化函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy) tf.global_variables_initializer().run() for i in range(2010):#
batch_xs, batch_ys = next_batch(mnist,1000)#1000 3
train_step.run( {x : batch_xs, y_ : batch_ys,keep_prob: 1}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
batch_xs, batch_ys = next_batch(mnist,10000)
print(accuracy.eval({x:batch_xs,y_:batch_ys,keep_prob:1.0}))