Package dimer :: Package nnet :: Module autoencoder_tests
[hide private]
[frames] | no frames]

Source Code for Module dimer.nnet.autoencoder_tests

  1   
  2  #import unittest 
  3  from operator import concat 
  4  import numpy as np 
  5  rng = np.random.RandomState() 
  6   
  7  import theano 
  8  import theano.tensor as T 
  9   
 10  import base_test_classes 
 11  from autoencoder import AELayer, AutoEncoder, AEStack 
 12   
 13   
 14  #@unittest.skip("AE ON HOLD") 
15 -class TestAEl( base_test_classes.CNNLayerTester, base_test_classes.NpyTester ):
16 - def setUp(self):
17 super(TestAEl, self).setUp() 18 19 self.thrng = T.shared_randomstreams.RandomStreams(self.rng.randint(100)) 20 self.bs = rng.randint(5, 10) 21 self.nin = 20 22 self.nout = 10
23
24 - def get_input(self):
25 return np.asarray( rng.rand(self.bs, self.nin) , dtype=np.float64 )
26
27 - def get_layer(self, cl=0):
28 return AELayer(T.matrix("X"), self.nin, self.nout, 29 self.rng, self.thrng, self.get_input().dtype, cl)
30
31 - def test_aelinit(self):
32 "hr weights are all init'ed to [-thr, thr]" 33 34 self._weights_inrange( np.sqrt( 6./ (self.nin+self.nout) ), 0 ) 35 self._weights_inrange(0.0, 1) 36 self._weights_inrange(0.0, 2)
37
38 - def test_init(self):
39 self._test_init_()
40
41 - def test_norms(self):
42 self._test_norms_()
43
44 - def test_corruption(self):
45 X = T.matrix("X") 46 x = self.get_input() 47 layer = self.get_layer() 48 49 f = theano.function(inputs=[X], outputs=layer.corrupt(X, 1)) 50 self.assertZeroArray( f(x) ) 51 52 f = theano.function(inputs=[X], outputs=layer.corrupt(X, 0)) 53 self.assertEqualArray( f(x), x )
54 55
56 - def test_activation(self):
57 layer = self.get_layer() 58 enc_f = theano.function(inputs=[layer.input], outputs=layer.encoder ) 59 i = self.get_input() 60 61 self.assertEqualArray(theano.function(inputs=[layer.input], outputs=layer.tilde_input )(i), i) 62 63 #W [ 0, 1, 1, ...] 64 wv = np.ones( self.zero_weights()[0].shape, self.zero_weights()[0].dtype ) 65 wv[:,0] = 0.0 66 layer._weights_[0].set_value( wv ) 67 68 o = enc_f( i ) 69 eo = 1. / (1 + np.exp (-np.dot( i, wv ) )) 70 71 print i.shape, wv.shape 72 print o 73 print eo 74 print eo - o 75 76 self.assertAlmostEqual( np.max( np.abs( eo - o ) ), 0 )
77
78 - def test_iact(self):
79 # identity 80 _identity_size = 5 81 ilayer = AELayer(T.matrix("X"), _identity_size, _identity_size, self.rng, self.thrng, self.get_input().dtype, 0) 82 wv = np.zeros( (_identity_size,_identity_size), self.zero_weights()[0].dtype ) 83 for _i in range(_identity_size): wv[_i,_i] = 1 84 ilayer._weights_[0].set_value( wv ) 85 86 enc_f = theano.function(inputs=[ilayer.input], outputs=ilayer.encoder ) 87 dec_f = theano.function(inputs=[ilayer.input], outputs=ilayer.decoder ) 88 i = np.asarray( rng.rand(self.bs, _identity_size) , dtype=self.zero_weights()[0].dtype ); i -= i 89 print i.shape, wv.shape 90 print wv 91 print i 92 print 93 print 94 95 o = enc_f( i ) 96 eo = 1. / (1 + np.exp (-i)) 97 print o 98 print eo 99 print eo - o 100 print 101 self.assertAlmostEqual( np.max( np.abs( o - eo ) ), 0 ) 102 103 104 o = dec_f( i ) 105 eo = 1. / (1 + np.exp (-eo)) 106 print o 107 print eo 108 print eo - o 109 self.assertAlmostEqual( np.max( np.abs( o - eo ) ), 0 )
110
111 - def speed_update(self):
112 pass
113 114 115 #@unittest.skip("not n")
116 -class TestAEModel( base_test_classes.ModelTester, base_test_classes.NpyTester ):
117
118 - def setUp(self):
119 super(TestAEModel, self).setUp() 120 121 self.thrng = T.shared_randomstreams.RandomStreams(self.rng.randint(100)) 122 self.bs = rng.randint(4, 13) 123 self.nin = rng.randint(50, 100) 124 self.nout = rng.randint(10, 40) 125 self.cl = rng.randint(0, 100) / 100.
126
127 - def get_model(self):
128 return AutoEncoder( self.nin, self.nout, self.rng, self.thrng, 129 self.get_input().dtype, 0 )
130
131 - def get_input(self):
132 return np.asarray( rng.rand(self.bs, self.nin) , dtype=np.float64 )
133
134 - def get_output(self):
135 return np.asarray( rng.rand(self.nout,) , dtype=self.get_input().dtype )
136 137
138 - def get_gradient_f(self, l1, l2):
139 model = self.get_model() 140 idx = T.iscalar("batch_idx") 141 X = theano.shared( value=self.get_input(), name="X" ) 142 143 params = reduce(concat, model.get_params()) 144 return theano.function( inputs=[idx], outputs=T.grad( model.cost(l1, l2), wrt=params ), 145 givens={model[0].input : X[self.bs * idx:self.bs * (idx+1)]} )
146
147 - def test_update(self):
148 model = self.get_model() 149 layer = model[0] 150 grad_f = self.get_gradient_f(0, 0) 151 rho = rng.randint(0, 10) / 10. 152 mu = rng.randint(0, 10) / 10. 153 154 weights = layer.get_weights() 155 self.assertFalse( all( map(lambda s: np.all(s==0), layer.get_weights()) ) ) 156 self.assertTrue( all( map(lambda s: np.all(s==0), layer.get_speeds()) ) ) 157 158 grad_v = grad_f(0) 159 160 model.update_params([0], grad_f, mu, rho) 161 162 for i in range(len(grad_v)): 163 print i, rho, mu 164 self.assertAlmostEqualArray( layer.get_speeds()[i], -rho * grad_v[i] ) 165 self.assertAlmostEqualArray( layer.get_weights()[i] - layer.get_speeds()[i], weights[i] ) 166 167 weights = layer.get_weights() 168 speeds = layer.get_speeds() 169 model.update_params([0], grad_f, mu, rho) 170 for i in range(len(grad_v)): 171 print i, rho, mu 172 print (layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i] 173 print np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])) 174 175 self.assertTrue( np.all(layer.get_speeds()[i] == -rho * grad_v[i] + mu*speeds[i]) ) 176 self.assertAlmostEqual( np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])), 0 )
177 178 #@unittest.skip("AE ON HOLD")
179 -class TestAEStackModel( base_test_classes.ModelTester, base_test_classes.NpyTester ):
180
181 - def setUp(self):
182 super(TestAEStackModel, self).setUp() 183 184 self.thrng = T.shared_randomstreams.RandomStreams(self.rng.randint(100)) 185 self.bs = rng.randint(2, 3) 186 self.nin = 100 187 self.nhid = (rng.randint(50, 80), 188 rng.randint(10, 40), 189 rng.randint(2, 4)) 190 self.nin, self.nhid = 10, (5, 2) 191 self.cl = rng.randint(0, 100) / 100.
192
193 - def get_model(self):
194 return AEStack( self.nin, self.nhid, self.rng, self.thrng, 195 self.get_input().dtype, 0 )
196
197 - def get_input(self):
198 return np.asarray(rng.rand(self.bs, self.nin), dtype=np.float64 )
199
200 - def get_output(self):
201 return np.asarray(rng.rand(self.nhid[-1],), dtype=self.get_input().dtype)
202
203 - def test_cost(self):
204 l1 = rng.randint(0, 10) / 10 205 l2 = rng.randint(0, 10) / 10 206 M = self.get_model() 207 for layer in M: 208 cost = layer.cost 209 210 cf = theano.function(inputs=[M[0].input], outputs=cost(l1, l2)) 211 cf_00 = theano.function(inputs=[M[0].input], outputs=cost(0, 0)) 212 w_summ = l1 * layer.weight_norm("l1") 213 w_summ_sq = l2 * layer.weight_norm("l2") 214 215 x = self.get_input() 216 self.assertEqual( cf(x) - cf_00(x), w_summ + w_summ_sq ) 217 218 M = self.zero_model() 219 for layer in M: 220 cost = layer.cost 221 222 cf = theano.function(inputs=[M[0].input], outputs=cost(l1, l2)) 223 cf_00 = theano.function(inputs=[M[0].input], outputs=cost(0, 0)) 224 x = self.get_input() 225 self.assertEqual( cf(x) , cf_00(x) )
226 227
228 - def get_gradient_f(self, l1, l2, lidx):
229 model = self.get_model() 230 print model.get_params()[lidx] 231 grad = T.grad(model[lidx].cost(l1, l2), 232 wrt=model.get_params()[lidx]) 233 idx = T.iscalar("batch_idx") 234 X = theano.shared(value=self.get_input(), name="X") 235 givens={model[0].input : X[self.bs * idx:self.bs * (idx+1)]} 236 237 return theano.function(inputs=[idx], outputs=grad, givens=givens)
238
239 - def _test_layer_update(self, model, lidx):
240 layer = model[lidx] 241 grad_f = self.get_gradient_f(0, 0, lidx) 242 rho = rng.randint(0, 10) / 10. 243 mu = rng.randint(0, 10) / 10. 244 245 weights = layer.get_weights() 246 self.assertFalse( all( map(lambda s: np.all(s==0), 247 layer.get_weights()) ) ) 248 self.assertTrue( all( map(lambda s: np.all(s==0), 249 layer.get_speeds()) ) ) 250 251 grad_v = grad_f(0) 252 253 model.update_params([0], grad_f, mu, rho, lidx) 254 255 for i in range(len(grad_v)): 256 print i, rho, mu 257 self.assertAlmostEqualArray( layer.get_speeds()[i], -rho * grad_v[i] ) 258 self.assertAlmostEqualArray( layer.get_weights()[i] - layer.get_speeds()[i], weights[i] ) 259 260 weights = layer.get_weights() 261 speeds = layer.get_speeds() 262 model.update_params([0], grad_f, mu, rho, lidx) 263 for i in range(len(grad_v)): 264 print i, rho, mu 265 print (layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i] 266 print np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])) 267 268 self.assertTrue( np.all(layer.get_speeds()[i] == -rho * grad_v[i] + mu*speeds[i]) ) 269 self.assertAlmostEqual( np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])), 0 )
270
271 - def test_model_update(self):
272 model = self.get_model() 273 for i in range( len(model) ): 274 self._test_layer_update(model, i)
275
276 - def test_activation(self):
277 model = self.get_model() 278 279 x = self.get_input() 280 out_f = [] 281 grad_f = [] 282 outvals = [] 283 for i in range(len(model)): 284 out_f.append( theano.function(inputs=[model[0].input], 285 outputs=model[i].activation()) 286 ) 287 grad_f.append( self.get_gradient_f(0, 0, i) ) 288 outvals.append( out_f[-1](x) ) 289 290 self.assertEqual( map(lambda _: _.shape[0], outvals), 291 [self.bs] * len(model) ) 292 self.assertEqual( map(lambda _: _.shape[1], outvals), list(self.nhid) ) 293 294 o0 = out_f[0](x) 295 o1 = out_f[1](x) 296 w = model[1].get_weights()[0] 297 print w 298 model.update_params([0], grad_f[1], 0, 0.1, 1) 299 print grad_f[1](0) 300 print w 301 302 self.assertDifferentArray(w[0], model[1].get_weights()[0]) 303 self.assertDifferentArray(w[1], model[1].get_weights()[1]) 304 self.assertEqualArray(o0, out_f[0](x)) 305 self.assertDifferentArray(o1, out_f[1](x))
306
307 - def test_io(self):
308 self._test_io()
309