Package dimer :: Package nnet :: Module nccn_tests
[hide private]
[frames] | no frames]

Source Code for Module dimer.nnet.nccn_tests

  1   
  2   
  3  import unittest 
  4  from operator import concat 
  5  import numpy as np 
  6  rng = np.random.RandomState() 
  7   
  8  import theano 
  9  import theano.tensor as T 
 10   
 11  from nccn import LogisticReg, HiddenLayer, ConvPoolLayer, CnnModel 
 12   
 13  import base_test_classes 
 14   
 15  #@unittest.skip("TEMP") 
16 -class TestLRLayer( base_test_classes.CNNLayerTester ):
17 - def setUp(self):
18 super(TestLRLayer, self).setUp() 19 20 self.nin = rng.randint(5, 10) 21 self.nout = rng.randint(2, 4) 22 self.bs = rng.randint(2, 8)
23
24 - def get_input(self):
25 return np.asarray( self.rng.uniform(low=-1, high=1, 26 size=(self.bs, self.nin)), dtype=np.float64 )
27
28 - def get_layer(self):
29 l = LogisticReg(T.matrix("X"), self.nin, self.nout, 30 self.rng, self.get_input().dtype) 31 ## all is initialized to 0. revert that 32 l._weights_[0].set_value( np.asarray( rng.rand(self.nin, self.nout), 33 dtype=l.get_weights()[0].dtype ) ) 34 return l
35
36 - def test_rlinit(self):
37 "lr weights are all init'ed to rnd" 38 39 l = LogisticReg(T.matrix("X"), self.nin, self.nout, 40 self.rng, self.get_input().dtype) 41 self.assertGreaterEqual( np.max( np.abs(l.get_weights()[0]) ), 0 ) 42 self.assertLessEqual( np.max( np.abs(l.get_weights()[1]) ), 0 )
43
44 - def test_init(self):
45 self._test_init_()
46
47 - def test_norms(self):
48 self._test_norms_()
49
50 - def test_activation(self):
51 layer = self.get_layer() 52 out_f = theano.function(inputs=[layer.input], outputs=layer.activation() ) 53 i = self.get_input() 54 55 self.assertFalse( np.all( out_f( i ) == 1. / self.nout) ) 56 57 ## 0.5 58 layer.set_weights( self.zero_weights() ) 59 self.assertTrue( np.all( out_f( self.get_input() ) == 1. / self.nout ) ) 60 61 layer._weights_[0].set_value( np.ones( self.zero_weights()[0].shape, self.zero_weights()[0].dtype ) ) 62 print out_f(i) - 1. /self.nout 63 self.assertAlmostEqual( np.max( np.abs( out_f(i) - 1. /self.nout ) ), 0 ) 64 65 wv = np.ones( self.zero_weights()[0].shape, self.zero_weights()[0].dtype ) 66 wv[:,0] = 0.0 67 layer._weights_[0].set_value( wv ) 68 o = out_f( i ) 69 eo = np.exp (np.dot( i, wv ) ) 70 d = np.sum( eo, axis=1 ) 71 for idx in range(eo.shape[0]): 72 eo[idx,:] /= d[idx] 73 74 print i.shape, wv.shape 75 print o 76 print eo 77 print eo - o 78 79 self.assertAlmostEqual( np.max( np.abs( eo - o ) ), 0 )
80 81 82 #@unittest.skip("TEMP")
83 -class TestHLayer( base_test_classes.CNNLayerTester ):
84 - def setUp(self):
85 super(TestHLayer, self).setUp() 86 87 self.nin = rng.randint(5, 10) 88 self.nout = rng.randint(2, 4) 89 self.bs = rng.randint(2, 10)
90
91 - def get_input(self):
92 return np.asarray( self.rng.uniform(low=-1, high=1, 93 size=(self.bs, self.nin)), dtype=np.float64 )
94
95 - def get_layer(self):
96 return HiddenLayer(T.matrix("X"), self.nin, self.nout, 97 self.rng, self.get_input().dtype)
98
99 - def test_hlinit(self):
100 "hr weights are all init'ed to [-thr, thr]" 101 102 self._weights_inrange( np.sqrt( 6./ (self.nin+self.nout) ), 0 ) 103 self._weights_inrange(0.0, 1)
104
105 - def test_init(self): self._test_init_()
106
107 - def test_norms(self): self._test_norms_()
108
109 - def test_activation(self):
110 layer = self.get_layer() 111 out_f = theano.function(inputs=[layer.input], outputs=layer.activation() ) 112 i = self.get_input() 113 114 #W [ 0, 1, 1, ...] 115 wv = np.ones( self.zero_weights()[0].shape, self.zero_weights()[0].dtype ) 116 wv[:,0] = 0.0 117 layer._weights_[0].set_value( wv ) 118 119 o = out_f( i ) 120 eo = 1. / (1 + np.exp (-np.dot( i, wv ) )) 121 122 print i.shape, wv.shape 123 print o 124 print eo 125 print eo - o 126 127 self.assertAlmostEqual( np.max( np.abs( eo - o ) ), 0 )
128 129 #@unittest.skip("TEMP")
130 -class TestCPLayer( base_test_classes.CNNLayerTester ):
131 - def setUp(self):
132 super(TestCPLayer, self).setUp() 133 134 self.imsh = (4, 1, 11, 11) 135 self.fsh = (2, 1, 4, 4) 136 self.psh = (2,2)
137
138 - def get_layer(self):
139 return ConvPoolLayer(T.dtensor4("X"), self.fsh, self.imsh, 140 self.rng, (2,2), self.get_input().dtype)
141
142 - def get_input(self):
143 return np.asarray( self.rng.uniform(low=-1, high=1, size=self.imsh), 144 dtype=np.float64 )
145
146 - def test_init(self): self._test_init_()
147
148 - def test_norms(self): self._test_norms_()
149
150 - def test_activation(self):
151 layer = self.get_layer() 152 out_f = theano.function(inputs=[layer.input], outputs=layer.activation() ) 153 i = self.get_input() 154 155 156 # set a 0 and a 1 RF 157 wv = np.ones( self.zero_weights()[0].shape, self.zero_weights()[0].dtype ) 158 wv[0,0] = 0.0 159 wv[1,0] = 1.0 160 layer._weights_[0].set_value( wv ) 161 162 o = out_f( i ) 163 eo = np.ones( (4, 2, 4, 4), dtype=self.get_input().dtype) / 2 164 165 print i.shape, wv.shape, o.shape 166 print o[:,0] 167 print eo[:,0] 168 print eo[:,0] - o[:,0] 169 170 self.assertAlmostEqual( np.max( np.abs( eo[:,0] - o[:,0] ) ), 0 )
171 172 #@unittest.skip("TEMP")
173 -class TestCnnModel( base_test_classes.ModelTester, base_test_classes.NpyTester ):
174 - def setUp(self):
175 super(TestCnnModel, self).setUp() 176 177 self.bs = rng.randint(5, 10) 178 self.arch=[(2,), ((4,4),), ((2,2),)] 179 self.imsh = (self.bs, 1, 11, 11) 180 self.lrs = 12 181 self.G = 2 182 183 self.CW_sh = (self.arch[0][0], 1, self.arch[1][0][0], self.arch[1][0][1]) 184 self.Cb_sh = (self.arch[0][0], ) 185 self.HW_sh = (32, 12) 186 self.Hb_sh = (12, ) 187 self.RW_sh = (12, 2) 188 self.Rb_sh = (2, )
189
190 - def get_model(self):
191 return CnnModel(self.arch, self.lrs, self.imsh, 192 self.G, rng, self.get_input().dtype, "int32")
193
194 - def get_input(self):
195 return np.asarray( rng.rand(*self.imsh) , dtype=np.float64 )
196
197 - def get_output(self):
198 return np.asarray(np.zeros((self.imsh[0],)), dtype=np.int32)
199
200 - def test_topcpidx(self):
201 model = self.get_model() 202 self.assertEqual( model.top_cp_idx, len(self.arch[0])-1 ) 203 self.assertEqual( model.top_cp_idx, len(model)-3)
204 205 #@unittest.skip("TEMP")
206 - def test_cost(self):
207 M = self.get_model() 208 l1 = rng.randint(0, 10) / 10. 209 l2 = rng.randint(0, 10) / 10. 210 y = self.get_output() 211 x = self.get_input() 212 213 214 ## check dependence on weight norms 215 cf = theano.function(inputs=[M.X, M.Y], outputs=M.cost(l1, l2)) 216 cf_00 = theano.function(inputs=[M.X, M.Y], outputs=M.cost(0, 0)) 217 w_summ = l1 * M.weight_norm("l1") 218 w_summ_sq = l2 * M.weight_norm("l2") 219 self.assertNotEqual( 0, w_summ + w_summ_sq ) 220 self.assertAlmostEqual( cf(x,y) - cf_00(x,y), w_summ + w_summ_sq ) 221 222 ## check that cost is zero on right answers 223 # this will make output = 0 224 nw = np.copy ( M[-1]._weights_[0].get_value() ) 225 nw[:,0] = 1 226 M[-1]._weights_[0].set_value( nw ) 227 out_f = theano.function(inputs=[M.X], outputs=M[-1].p_y_given_x) 228 y = np.asarray(np.argmax(out_f(x), axis=1), dtype=y.dtype) 229 print cf(x, y) 230 print cf_00(x, y) 231 print map(lambda l: l.weight_norm("l1"), M) 232 print map(lambda l: l.weight_norm("l2"), M) 233 print M[-1].get_weights() 234 self.assertTrue( np.all(cf_00(x,y) < 0.1) )
235 236 #@unittest.skip("TEMP")
237 - def test_activation_vs_i(self):
238 "test activation function, sensitive to input" 239 240 model = self.get_model() 241 out_f = theano.function( inputs=[model[0].input], 242 outputs=model[0].activation() ) 243 x = self.get_input() 244 self.assertDifferentArray( out_f(x), out_f(x-x) )
245 246 247 #@unittest.skip("TEMP")
248 - def test_activation_vs_w(self):
249 "test activation function, sensitive to weights" 250 251 model = self.get_model() 252 out_f = theano.function( inputs=[model[0].input], 253 outputs=model[0].activation() ) 254 i = self.get_input() 255 256 print i 257 o1 = out_f(i) 258 for l in range(len(model)): 259 model[l].set_weights( self.zero_model()[l].get_weights() ) 260 self.assertFalse( np.all( o1 == out_f( i ) ) )
261 262 #@unittest.skip("TEMP")
263 - def test_init(self):
264 model = self.get_model() 265 self.assertNotEqual( 0, model.weight_norm("l1")) 266 self.assertNotEqual( 0, model.weight_norm("l2")) 267 268 layer = model[0] 269 self.assertFalse( all( map(lambda s: np.all(s==0), layer.get_weights()) ) ) 270 self.assertNotEqual( 0, layer.weight_norm("l1")) 271 self.assertNotEqual( 0, layer.weight_norm("l2")) 272 self.assertTrue( all( map(lambda s: np.all(s==0), layer.get_speeds()) ) ) 273 274 layer = model[1] 275 self.assertFalse( all( map(lambda s: np.all(s==0), layer.get_weights()) ) ) 276 self.assertNotEqual( 0, layer.weight_norm("l1")) 277 self.assertNotEqual( 0, layer.weight_norm("l2")) 278 self.assertTrue( all( map(lambda s: np.all(s==0), layer.get_speeds()) ) ) 279 280 layer = model[2] 281 self.assertFalse( all( map(lambda s: np.all(s==0), layer.get_weights()) ) ) 282 self.assertTrue( all( map(lambda s: np.all(s==0), layer.get_speeds()) ) )
283 284 285 #@unittest.skip("TEMP")
286 - def test_update(self):
287 model = self.get_model() 288 289 x = theano.shared( value=self.get_input(), name="X" ) 290 y = theano.shared( value=self.get_output(), name="Y" ) 291 idx = T.iscalar("batch_idx") 292 params = reduce(concat, map(lambda l: l.get_params(), model)) 293 grad_f = theano.function(inputs=[idx], 294 outputs=T.grad(model.cost(0, 0), wrt=params), 295 givens={model.X : x[idx:self.bs * (idx+1)], model.Y : y[idx:self.bs * (idx+1)]}) 296 297 rho = rng.randint(0, 10) / 10. 298 mu = rng.randint(0, 10) / 10. 299 300 grad_v = grad_f(0) 301 all_weights = reduce(concat, model.get_weights()) 302 all_speeds = reduce(concat, model.get_speeds()) 303 model.update_params([0], grad_f, mu, rho) 304 for layer in model: 305 grads = map(lambda i: grad_v.pop(0), range(len(layer.get_params()))) 306 weights = map(lambda i: all_weights.pop(0), range(len(layer.get_params()))) 307 speeds = map(lambda i: all_speeds.pop(0), range(len(layer.get_params()))) 308 print grads[0].shape 309 print weights[0].shape 310 print speeds[0].shape 311 for i in range(len(grads)): 312 print i, rho, mu 313 print np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])) 314 print np.max( np.abs( layer.get_speeds()[i] + rho * grads[i] ) ) 315 self.assertTrue( np.all(layer.get_speeds()[i] == -rho * grads[i]) ) 316 self.assertAlmostEqual( np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])), 0 ) 317 318 319 print 320 print 321 322 grad_v = grad_f(0) 323 all_weights = reduce(concat, model.get_weights()) 324 all_speeds = reduce(concat, model.get_speeds()) 325 model.update_params([0], grad_f, mu, rho) 326 for layer in model: 327 grads = map(lambda i: grad_v.pop(0), range(len(layer.get_params()))) 328 weights = map(lambda i: all_weights.pop(0), range(len(layer.get_params()))) 329 speeds = map(lambda i: all_speeds.pop(0), range(len(layer.get_params()))) 330 print grads[0].shape 331 print weights[0].shape 332 print speeds[0].shape 333 print 334 for i in range(len(grads)): 335 print i, rho, mu 336 print np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])) 337 print np.max( np.abs( layer.get_speeds()[i] + rho * grads[i] ) ) 338 self.assertTrue( np.all(layer.get_speeds()[i] == -rho * grads[i] + mu*speeds[i]) ) 339 self.assertAlmostEqual( np.max( np.abs((layer.get_weights()[i] - layer.get_speeds()[i]) - weights[i])), 0 )
340
341 - def test_io(self):
342 self._test_io()
343