Skip to content

Commit 564281c

Browse files
author
yitong91
authored
Add files via upload
1 parent 7aca595 commit 564281c

File tree

4 files changed

+347
-0
lines changed

4 files changed

+347
-0
lines changed

SyncNetModel.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import tensorflow as tf
2+
import utils
3+
import numpy as np
4+
5+
class SyncNetModel(object):
6+
def __init__(self, options):
7+
self.l = tf.placeholder(tf.float32, [])
8+
self.lr = tf.placeholder(tf.float32, [])
9+
self.sample_type = tf.float32
10+
self.num_labels = options['num_labels']
11+
self.sample_shape = options['sample_shape']
12+
self.batch_size = options['batch_size']
13+
self.dropout_rate = options['dropout_rate']
14+
self.X = tf.placeholder(tf.as_dtype(self.sample_type), [None] + list(self.sample_shape), name="input_X")
15+
self.y = tf.placeholder(tf.float32, [None, self.num_labels], name="input_labels")
16+
self.train = tf.placeholder(tf.bool, [], name = 'train')
17+
self._build_model(options)
18+
self._setup_train_ops()
19+
20+
21+
def SyncNetFilters(self, options):
22+
b=tf.Variable(tf.random_uniform([1,1,options['C'],options['K']], minval=-0.05, maxval=0.05, dtype=tf.float32),name='b')
23+
omega=tf.Variable(tf.random_uniform ([1,1,1,options['K']], minval = 0., maxval = 1.),name='omega')
24+
zero_pad = tf.zeros( (1, 1, 1, options['K']), dtype = tf.float32, name ='zero_pad')
25+
phi_ini=tf.Variable(tf.random_normal([1,1,options['C']-1, options['K']],mean=0.0, stddev=0.05, dtype=tf.float32), name='phi')
26+
phi = tf.concat([zero_pad, phi_ini], axis = 2)
27+
beta=tf.Variable(tf.random_uniform([1,1,1,options['K']], minval = 0., maxval = 0.05), dtype = tf.float32,name='beta')
28+
#t=np.reshape(np.linspace(-options['Nt']/2.,options['Nt']/2.,options['Nt']),[1,options['Nt'],1,1])
29+
t=np.reshape(range(-options['Nt']/2,options['Nt']/2),[1,options['Nt'],1,1])
30+
tc=tf.constant(np.single(t),name='t')
31+
W_osc=tf.multiply(b,tf.cos(tc*omega+phi))
32+
W_decay=tf.exp(-tf.pow(tc,2)*beta)
33+
W=tf.multiply(W_osc,W_decay)
34+
self.beta_op = tf.assign(beta, tf.clip_by_value(beta, 0, np.infty))
35+
return W
36+
37+
def feature_extractor(self, X, options):
38+
self.dropout_x = utils.channel_dropout(X, self.dropout_rate)
39+
X = tf.expand_dims(self.dropout_x, axis = 1, name = 'reshaped_input')
40+
with tf.variable_scope('syncnet_conv',reuse = True):
41+
W = self.SyncNetFilters(options)
42+
bias = tf.Variable(tf.constant(0.0, dtype = tf.float32, shape = [options['K']]), name = 'bias')
43+
h_conv1 = tf.nn.relu(tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME') + bias)
44+
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 1, options['pool_size'], 1], strides=[1, 1, options['pool_size'], 1], padding='SAME')
45+
self.h_pool1 = h_pool1
46+
features = tf.reshape(h_pool1, [-1, options['cl_Wy']])
47+
return features
48+
49+
def label_predictor(self, features):
50+
with tf.variable_scope('label_predictor_logits'):
51+
logits = utils.fully_connected_layer(features, self.num_labels)
52+
return logits
53+
54+
def _build_model(self, options):
55+
self.features = self.feature_extractor(self.X, options)
56+
logits = self.label_predictor(self.features)
57+
self.y_pred = tf.nn.softmax(logits)
58+
self.y_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = self.y))
59+
self.y_acc = utils.predictor_accuracy(self.y_pred,self.y)
60+
61+
62+
def _setup_train_ops(self):
63+
self.train_ops = tf.train.AdamOptimizer(self.lr).minimize(self.y_loss)
64+
65+

plot_d_b.m

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
params = load('./result.mat');
2+
params.b = squeeze(params.b);
3+
params.phi = squeeze(params.phi);
4+
figure('Color',[1,1,1]);
5+
%plot(shat,'.-'); hold on;
6+
[C,K] = size(params.b);
7+
b = params.b;
8+
phi = [zeros(K,1);params.phi];
9+
plot(-b.*exp(j*phi),'.-'); hold on;

syncnet.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
from SyncNetModel import SyncNetModel
4+
import scipy.io
5+
import os
6+
import utils
7+
8+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
9+
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
10+
11+
12+
batch_size = 10
13+
num_steps = 2000
14+
valid_steps = 1000
15+
mat = scipy.io.loadmat('toy.mat')
16+
train = mat['train'].astype('float32')
17+
val = mat['val'].astype('float32')
18+
test = mat['test'].astype('float32')
19+
labtrain = utils.to_one_hot(mat['labtrain'])
20+
labval = utils.to_one_hot(mat['labval'])
21+
labtest = utils.to_one_hot(mat['labtest'])
22+
train = train.transpose(0,2,1)
23+
val = val.transpose(0,2,1)
24+
test = test.transpose(0,2,1)
25+
options = {}
26+
options['sample_shape'] = (train.shape[1],train.shape[2])
27+
options['num_labels'] = labtrain.shape[1]
28+
options['batch_size'] = batch_size
29+
options['C'] = train.shape[2]
30+
options['T'] = train.shape[1]
31+
options['K'] = 1
32+
options['Nt'] = 40
33+
options['pool_size'] = 40
34+
options['dropout_rate'] = 0.0
35+
options['cl_Wy'] = int(np.ceil(float(options['T'])/float(options['pool_size'])) * options['K'])
36+
tf.reset_default_graph()
37+
graph = tf.get_default_graph()
38+
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
39+
40+
model = SyncNetModel(options)
41+
sess = tf.Session(graph = graph, config=tf.ConfigProto(gpu_options=gpu_options))
42+
tf.global_variables_initializer().run(session = sess)
43+
gen_batches = utils.batch_generator([train, labtrain], options['batch_size'])
44+
45+
print('Training...')
46+
for i in range(1, num_steps + 1):
47+
p = float(i) / num_steps
48+
#lr = 0.002 #/ (1. + 10 * p)**0.75
49+
lr = 0.002
50+
X, y = gen_batches.next()
51+
_, batch_loss, y_acc = \
52+
sess.run([model.train_ops, model.y_loss, model.y_acc],
53+
feed_dict={model.X: X, model.y: y, model.lr: lr})
54+
_ = sess.run(model.beta_op, feed_dict = {})
55+
if i % 100 == 0:
56+
print 'iter %d loss: %f p_acc: %f lr: %f' % \
57+
(i, batch_loss, y_acc, lr)
58+
59+
60+
if i % valid_steps == 0:
61+
train_pred, train_acc = sess.run([model.y_pred, model.y_acc], feed_dict = {model.X: train, model.y:labtrain})
62+
63+
val_pred, val_acc = sess.run([model.y_pred, model.y_acc], feed_dict = {model.X: val, model.y:labval})
64+
65+
test_pred, test_acc = sess.run([model.y_pred, model.y_acc], feed_dict = {model.X: test, model.y:labtest})
66+
67+
print 'train: %.4f valid: %.4f test: %.4f ' % \
68+
(train_acc, val_acc, test_acc)
69+
params = utils.get_params(sess)
70+
np.save('./result.npy',params)
71+
#result = utils.get_params(sess)
72+
#result_for_save = {}
73+
#keys = result.keys()
74+
#for i in range(len(keys)):
75+
# temp = keys[i].split('/')
76+
# temp = temp[1]
77+
# temp = temp[:-2]
78+
# result_for_save[temp] = result[keys[i]]
79+
#scipy.io.savemat('./result.mat',{'b':result_for_save['b'], 'phi':result_for_save['phi']})
80+
#

utils.py

Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
import matplotlib.patches as mpatches
4+
import matplotlib.pyplot as plt
5+
from mpl_toolkits.axes_grid1 import ImageGrid
6+
import scipy
7+
# Model construction utilities below adapted from
8+
# https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
9+
10+
def get_params(sess):
11+
variables = tf.trainable_variables()
12+
params = {}
13+
for i in range(len(variables)):
14+
name = variables[i].name
15+
params[name] = sess.run(variables[i])
16+
return params
17+
18+
19+
def to_one_hot(x, N = -1):
20+
x = x.astype('int32')
21+
if np.min(x) !=0 and N == -1:
22+
x = x - np.min(x)
23+
x = x.reshape(-1)
24+
if N == -1:
25+
N = np.max(x) + 1
26+
label = np.zeros((x.shape[0],N))
27+
idx = range(x.shape[0])
28+
label[idx,x] = 1
29+
return label.astype('float32')
30+
31+
def image_mean(x):
32+
x_mean = x.mean((0, 1, 2))
33+
return x_mean
34+
35+
def shape(tensor):
36+
"""
37+
Get the shape of a tensor. This is a compile-time operation,
38+
meaning that it runs when building the graph, not running it.
39+
This means that it cannot know the shape of any placeholders
40+
or variables with shape determined by feed_dict.
41+
"""
42+
return tuple([d.value for d in tensor.get_shape()])
43+
44+
45+
def fully_connected_layer(in_tensor, out_units):
46+
"""
47+
Add a fully connected layer to the default graph, taking as input `in_tensor`, and
48+
creating a hidden layer of `out_units` neurons. This should be done in a new variable
49+
scope. Creates variables W and b, and computes activation_function(in * W + b).
50+
"""
51+
_, num_features = shape(in_tensor)
52+
weights = tf.get_variable(name = "weights", shape = [num_features, out_units], initializer = tf.truncated_normal_initializer(stddev=0.1))
53+
biases = tf.get_variable( name = "biases", shape = [out_units], initializer=tf.constant_initializer(0.1))
54+
return tf.matmul(in_tensor, weights) + biases
55+
56+
57+
def conv2d(in_tensor, filter_shape, out_channels):
58+
"""
59+
Creates a conv2d layer. The input image (whish should already be shaped like an image,
60+
a 4D tensor [N, W, H, C]) is convolved with `out_channels` filters, each with shape
61+
`filter_shape` (a width and height). The ReLU activation function is used on the
62+
output of the convolution.
63+
"""
64+
_, _, _, channels = shape(in_tensor)
65+
W_shape = filter_shape + [channels, out_channels]
66+
67+
# create variables
68+
weights = tf.get_variable(name = "weights", shape = W_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
69+
biases = tf.get_variable(name = "biases", shape = [out_channels], initializer= tf.constant_initializer(0.1))
70+
conv = tf.nn.conv2d( in_tensor, weights, strides=[1, 1, 1, 1], padding='SAME')
71+
h_conv = conv + biases
72+
return h_conv
73+
74+
75+
#def conv1d(in_tensor, filter_shape, out_channels):
76+
# _, _, channels = shape(in_tensor)
77+
# W_shape = [filter_shape, channels, out_channels]
78+
#
79+
# W = tf.truncated_normal(W_shape, dtype = tf.float32, stddev = 0.1)
80+
# weights = tf.Variable(W, name = "weights")
81+
# b = tf.truncated_normal([out_channels], dtype = tf.float32, stddev = 0.1)
82+
# biases = tf.Variable(b, name = "biases")
83+
# conv = tf.nn.conv1d(in_tensor, weights, stride=1, padding='SAME')
84+
# h_conv = conv + biases
85+
# return h_conv
86+
87+
def vars_from_scopes(scopes):
88+
"""
89+
Returns list of all variables from all listed scopes. Operates within the current scope,
90+
so if current scope is "scope1", then passing in ["weights", "biases"] will find
91+
all variables in scopes "scope1/weights" and "scope1/biases".
92+
"""
93+
current_scope = tf.get_variable_scope().name
94+
#print(current_scope)
95+
if current_scope != '':
96+
scopes = [current_scope + '/' + scope for scope in scopes]
97+
var = []
98+
for scope in scopes:
99+
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope):
100+
var.append(v)
101+
return var
102+
103+
def tfvar2str(tf_vars):
104+
names = []
105+
for i in range(len(tf_vars)):
106+
names.append(tf_vars[i].name)
107+
return names
108+
109+
110+
def shuffle_aligned_list(data):
111+
"""Shuffle arrays in a list by shuffling each array identically."""
112+
num = data[0].shape[0]
113+
p = np.random.permutation(num)
114+
return [d[p] for d in data]
115+
116+
117+
def batch_generator(data, batch_size, shuffle=True):
118+
"""Generate batches of data.
119+
120+
Given a list of array-like objects, generate batches of a given
121+
size by yielding a list of array-like objects corresponding to the
122+
same slice of each input.
123+
"""
124+
if shuffle:
125+
data = shuffle_aligned_list(data)
126+
127+
batch_count = 0
128+
while True:
129+
if batch_count * batch_size + batch_size >= len(data[0]):
130+
batch_count = 0
131+
132+
if shuffle:
133+
data = shuffle_aligned_list(data)
134+
135+
start = batch_count * batch_size
136+
end = start + batch_size
137+
batch_count += 1
138+
yield [d[start:end] for d in data]
139+
140+
141+
142+
143+
def predictor_accuracy(predictions, labels):
144+
"""
145+
Returns a number in [0, 1] indicating the percentage of `labels` predicted
146+
correctly (i.e., assigned max logit) by `predictions`.
147+
"""
148+
return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1)),tf.float32))
149+
150+
def dic2list(sources, targets):
151+
names_dic = {}
152+
for key in sources:
153+
names_dic[sources[key]] = key
154+
for key in targets:
155+
names_dic[targets[key]] = key
156+
names = []
157+
for i in range(len(names_dic)):
158+
names.append(names_dic[i])
159+
return names
160+
161+
def softmax(x):
162+
"""Compute softmax values for each sets of scores in x."""
163+
e_x = np.exp(x - np.max(x))
164+
return e_x / e_x.sum(axis=0)
165+
166+
def norm_matrix(X, l):
167+
Y = np.zeros(X.shape);
168+
for i in range(X.shape[0]):
169+
Y[i] = X[i]/np.linalg.norm(X[i],l)
170+
return Y
171+
172+
173+
def description(sources, targets):
174+
source_names = sources.keys()
175+
target_names = targets.keys()
176+
N = min(len(source_names), 4)
177+
description = source_names[0]
178+
for i in range(1,N):
179+
description = description + '_' + source_names[i]
180+
description = description + '-' + target_names[0]
181+
return description
182+
183+
def channel_dropout(X, p):
184+
if p == 0:
185+
return X
186+
mask = tf.random_uniform(shape = [tf.shape(X)[0], tf.shape(X)[2]])
187+
mask = mask + 1 - p
188+
mask = tf.floor(mask)
189+
dropout = tf.expand_dims(mask,axis = 1) * X/(1-p)
190+
return dropout
191+
192+
def sigmoid(x):
193+
return 1 / (1 + np.exp(-x))

0 commit comments

Comments
 (0)