1+ import tensorflow as tf
2+ import numpy as np
3+ import matplotlib .patches as mpatches
4+ import matplotlib .pyplot as plt
5+ from mpl_toolkits .axes_grid1 import ImageGrid
6+ import scipy
7+ # Model construction utilities below adapted from
8+ # https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
9+
10+ def get_params (sess ):
11+ variables = tf .trainable_variables ()
12+ params = {}
13+ for i in range (len (variables )):
14+ name = variables [i ].name
15+ params [name ] = sess .run (variables [i ])
16+ return params
17+
18+
19+ def to_one_hot (x , N = - 1 ):
20+ x = x .astype ('int32' )
21+ if np .min (x ) != 0 and N == - 1 :
22+ x = x - np .min (x )
23+ x = x .reshape (- 1 )
24+ if N == - 1 :
25+ N = np .max (x ) + 1
26+ label = np .zeros ((x .shape [0 ],N ))
27+ idx = range (x .shape [0 ])
28+ label [idx ,x ] = 1
29+ return label .astype ('float32' )
30+
31+ def image_mean (x ):
32+ x_mean = x .mean ((0 , 1 , 2 ))
33+ return x_mean
34+
35+ def shape (tensor ):
36+ """
37+ Get the shape of a tensor. This is a compile-time operation,
38+ meaning that it runs when building the graph, not running it.
39+ This means that it cannot know the shape of any placeholders
40+ or variables with shape determined by feed_dict.
41+ """
42+ return tuple ([d .value for d in tensor .get_shape ()])
43+
44+
45+ def fully_connected_layer (in_tensor , out_units ):
46+ """
47+ Add a fully connected layer to the default graph, taking as input `in_tensor`, and
48+ creating a hidden layer of `out_units` neurons. This should be done in a new variable
49+ scope. Creates variables W and b, and computes activation_function(in * W + b).
50+ """
51+ _ , num_features = shape (in_tensor )
52+ weights = tf .get_variable (name = "weights" , shape = [num_features , out_units ], initializer = tf .truncated_normal_initializer (stddev = 0.1 ))
53+ biases = tf .get_variable ( name = "biases" , shape = [out_units ], initializer = tf .constant_initializer (0.1 ))
54+ return tf .matmul (in_tensor , weights ) + biases
55+
56+
57+ def conv2d (in_tensor , filter_shape , out_channels ):
58+ """
59+ Creates a conv2d layer. The input image (whish should already be shaped like an image,
60+ a 4D tensor [N, W, H, C]) is convolved with `out_channels` filters, each with shape
61+ `filter_shape` (a width and height). The ReLU activation function is used on the
62+ output of the convolution.
63+ """
64+ _ , _ , _ , channels = shape (in_tensor )
65+ W_shape = filter_shape + [channels , out_channels ]
66+
67+ # create variables
68+ weights = tf .get_variable (name = "weights" , shape = W_shape , initializer = tf .truncated_normal_initializer (stddev = 0.1 ))
69+ biases = tf .get_variable (name = "biases" , shape = [out_channels ], initializer = tf .constant_initializer (0.1 ))
70+ conv = tf .nn .conv2d ( in_tensor , weights , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' )
71+ h_conv = conv + biases
72+ return h_conv
73+
74+
75+ #def conv1d(in_tensor, filter_shape, out_channels):
76+ # _, _, channels = shape(in_tensor)
77+ # W_shape = [filter_shape, channels, out_channels]
78+ #
79+ # W = tf.truncated_normal(W_shape, dtype = tf.float32, stddev = 0.1)
80+ # weights = tf.Variable(W, name = "weights")
81+ # b = tf.truncated_normal([out_channels], dtype = tf.float32, stddev = 0.1)
82+ # biases = tf.Variable(b, name = "biases")
83+ # conv = tf.nn.conv1d(in_tensor, weights, stride=1, padding='SAME')
84+ # h_conv = conv + biases
85+ # return h_conv
86+
87+ def vars_from_scopes (scopes ):
88+ """
89+ Returns list of all variables from all listed scopes. Operates within the current scope,
90+ so if current scope is "scope1", then passing in ["weights", "biases"] will find
91+ all variables in scopes "scope1/weights" and "scope1/biases".
92+ """
93+ current_scope = tf .get_variable_scope ().name
94+ #print(current_scope)
95+ if current_scope != '' :
96+ scopes = [current_scope + '/' + scope for scope in scopes ]
97+ var = []
98+ for scope in scopes :
99+ for v in tf .get_collection (tf .GraphKeys .GLOBAL_VARIABLES , scope = scope ):
100+ var .append (v )
101+ return var
102+
103+ def tfvar2str (tf_vars ):
104+ names = []
105+ for i in range (len (tf_vars )):
106+ names .append (tf_vars [i ].name )
107+ return names
108+
109+
110+ def shuffle_aligned_list (data ):
111+ """Shuffle arrays in a list by shuffling each array identically."""
112+ num = data [0 ].shape [0 ]
113+ p = np .random .permutation (num )
114+ return [d [p ] for d in data ]
115+
116+
117+ def batch_generator (data , batch_size , shuffle = True ):
118+ """Generate batches of data.
119+
120+ Given a list of array-like objects, generate batches of a given
121+ size by yielding a list of array-like objects corresponding to the
122+ same slice of each input.
123+ """
124+ if shuffle :
125+ data = shuffle_aligned_list (data )
126+
127+ batch_count = 0
128+ while True :
129+ if batch_count * batch_size + batch_size >= len (data [0 ]):
130+ batch_count = 0
131+
132+ if shuffle :
133+ data = shuffle_aligned_list (data )
134+
135+ start = batch_count * batch_size
136+ end = start + batch_size
137+ batch_count += 1
138+ yield [d [start :end ] for d in data ]
139+
140+
141+
142+
143+ def predictor_accuracy (predictions , labels ):
144+ """
145+ Returns a number in [0, 1] indicating the percentage of `labels` predicted
146+ correctly (i.e., assigned max logit) by `predictions`.
147+ """
148+ return tf .reduce_mean (tf .cast (tf .equal (tf .argmax (predictions , 1 ), tf .argmax (labels , 1 )),tf .float32 ))
149+
150+ def dic2list (sources , targets ):
151+ names_dic = {}
152+ for key in sources :
153+ names_dic [sources [key ]] = key
154+ for key in targets :
155+ names_dic [targets [key ]] = key
156+ names = []
157+ for i in range (len (names_dic )):
158+ names .append (names_dic [i ])
159+ return names
160+
161+ def softmax (x ):
162+ """Compute softmax values for each sets of scores in x."""
163+ e_x = np .exp (x - np .max (x ))
164+ return e_x / e_x .sum (axis = 0 )
165+
166+ def norm_matrix (X , l ):
167+ Y = np .zeros (X .shape );
168+ for i in range (X .shape [0 ]):
169+ Y [i ] = X [i ]/ np .linalg .norm (X [i ],l )
170+ return Y
171+
172+
173+ def description (sources , targets ):
174+ source_names = sources .keys ()
175+ target_names = targets .keys ()
176+ N = min (len (source_names ), 4 )
177+ description = source_names [0 ]
178+ for i in range (1 ,N ):
179+ description = description + '_' + source_names [i ]
180+ description = description + '-' + target_names [0 ]
181+ return description
182+
183+ def channel_dropout (X , p ):
184+ if p == 0 :
185+ return X
186+ mask = tf .random_uniform (shape = [tf .shape (X )[0 ], tf .shape (X )[2 ]])
187+ mask = mask + 1 - p
188+ mask = tf .floor (mask )
189+ dropout = tf .expand_dims (mask ,axis = 1 ) * X / (1 - p )
190+ return dropout
191+
192+ def sigmoid (x ):
193+ return 1 / (1 + np .exp (- x ))
0 commit comments