代码拉取完成,页面将自动刷新
同步操作将从 深度学习/天池_道路通行时间预测LSTM 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 15:37:00 2017
@author: xuanlei
"""
import pandas as pd
import tensorflow as tf
import numpy as np
#==============================================================================
# Batch Normalization
#==============================================================================
def batch_norm_layer(x, train_phase, scope_bn):
with tf.variable_scope(scope_bn):
beta = tf.Variable(tf.constant(0.0, shape=[x.shape[-1]]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[x.shape[-1]]), name='gamma', trainable=True)
axises = np.arange(len(x.shape) - 1)
batch_mean, batch_var = tf.nn.moments(x, axises, name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(train_phase, mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
#==============================================================================
# RNN Structure
#==============================================================================
class LSTMRNN():
#initial setting
def __init__(self, n_steps, input_size, output_size, cell_size, h1_size, h2_size, h3_size, LR,num_size, batch_size):
self.n_steps = n_steps
self.input_size = input_size
self.output_size = output_size
self.cell_size = cell_size
self.num_size = num_size
self.h1_size = h1_size
self.h2_size = h2_size
self.h3_size = h3_size
self.batch_size = batch_size
self.LR = LR
self.num_size = num_size
with tf.name_scope('inputs'):
self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [None, output_size], name='ys')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
with tf.name_scope('in_hidden'):
self.add_input_layer()
with tf.name_scope('LSTM_Cell'):
self.add_cell_layer()
with tf.name_scope('hidden_1'):
self.add_h1_layer()
with tf.name_scope('hidden_2'):
self.add_h2_layer()
with tf.name_scope('hidden_3'):
self.add_h3_layer()
with tf.name_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(learning_rate=self.LR).minimize(self.cost)
def add_input_layer(self):
with tf.name_scope('input_layer'):
l_in_x = tf.reshape(self.xs,[-1,self.input_size], name='x_input')
Ws_in = tf.Variable(tf.truncated_normal([self.input_size, self.cell_size], mean=1, stddev=0.5))
bs_in = tf.Variable(tf.zeros([self.cell_size,])+0.01)
l_in_y = tf.matmul(l_in_x,Ws_in)+bs_in
self.l_in_y = tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='cell_input')
def add_cell_layer(self):
with tf.name_scope('LSTM_layer'):
lstm_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True), output_keep_prob=self.keep_prob)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell]*self.num_size, state_is_tuple=True)
self.cells_init_state = lstm_cells.zero_state(self.batch_size,dtype=tf.float32)
self.cells_outputs, self.cells_final_state = tf.nn.dynamic_rnn(lstm_cells, self.l_in_y, initial_state=self.cells_init_state, time_major=False)
def add_h1_layer(self):
with tf.name_scope('h1_layer'):
h1_x = tf.reshape(self.cells_outputs, [-1,self.cell_size])
Ws_h1 = tf.Variable(tf.truncated_normal([self.cell_size, self.h1_size], mean=3, stddev=1))
bs_h1 = tf.Variable(tf.zeros([self.h1_size,])+0.01)
non_bn_h1 = tf.matmul(h1_x,Ws_h1)+bs_h1
self.h1_y = batch_norm_layer(non_bn_h1, train_phase=self.train_phase, scope_bn='bn_h1')
def add_h2_layer(self):
with tf.name_scope('h2_layer'):
h2_x = tf.reshape(self.h1_y, [-1,self.h1_size])
Ws_h2 = tf.Variable(tf.truncated_normal([self.h1_size, self.h2_size], mean=3, stddev=2))
bs_h2 = tf.Variable(tf.zeros([self.h2_size,])+0.01)
non_bn_h2 = tf.matmul(h2_x,Ws_h2)+bs_h2
self.h2_y = batch_norm_layer(non_bn_h2, train_phase=self.train_phase, scope_bn='bn_h2')
def add_h3_layer(self):
with tf.name_scope('h3_layer'):
h3_x = tf.reshape(self.h2_y, [-1,self.h2_size])
Ws_h3 = tf.Variable(tf.truncated_normal([self.h2_size, self.h3_size], mean=3, stddev=2))
bs_h3 = tf.Variable(tf.zeros([self.h3_size,])+0.01)
non_bn_h3 = tf.matmul(h3_x,Ws_h3)+bs_h3
self.h3_y = batch_norm_layer(non_bn_h3, train_phase=self.train_phase, scope_bn='bn_h3')
def add_output_layer(self):
with tf.name_scope('output_layer'):
l_out_x = tf.reshape(self.h3_y,[-1,self.h3_size],name = 'y_input')
Ws_out = tf.Variable(tf.truncated_normal([self.h3_size, self.output_size], mean=3, stddev=1))
bs_out = tf.Variable(tf.zeros([self.output_size,]))
self.pred = tf.nn.relu(tf.matmul(l_out_x,Ws_out)+bs_out)
def compute_cost(self):
#self.cost = -tf.reduce_sum(self.ys*tf.log(self.pred+0.001)+0.3*(1-self.ys)*tf.log(1-self.pred+0.001))
#重新定义回归的loss
self.cost = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.pred, self.ys))))
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。