Package dimer :: Package nnet :: Module base_test_classes
[hide private]
[frames] | no frames]

Source Code for Module dimer.nnet.base_test_classes

  1  """abstract tester methods and common test operations""" 
  2   
  3  import unittest 
  4  import tempfile 
  5  import numpy as np 
  6  rng = np.random.RandomState() 
  7  import theano 
  8  import theano.tensor as T 
  9  from .. import archive 
 10   
11 -class NpyTester( object ):
12 - def assertZeroArray(self, x):
13 self.assertEqual( np.max( np.abs(x) ), 0 )
14
15 - def assertAlmostZeroArray(self, x):
16 self.assertAlmostEqual( np.max( np.abs(x) ), 0 )
17
18 - def assertEqualArray(self, x, y):
19 #print x 20 #print y 21 #print x - y 22 self.assertZeroArray( x-y )
23
24 - def assertAlmostEqualArray(self, x, y):
25 #print x 26 #print y 27 #print x - y 28 self.assertAlmostZeroArray( x-y )
29
30 - def assertDifferentArray(self, x, y):
31 #print x 32 #print y 33 #print x - y 34 self.assertGreater( np.max( np.abs(x-y) ), 0 )
35
36 -class ModelTester( unittest.TestCase, NpyTester ):
37 - def setUp(self):
38 self.rng = rng 39 self.__rw = None
40
41 - def get_model(self):
42 raise NotImplementedError("should be overriden")
43
44 - def get_ouput(self):
45 raise NotImplementedError("should be overriden")
46
47 - def get_input(self):
48 raise NotImplementedError("should be overriden")
49
50 - def rnd_weights(self):
51 "save copies of the weights of a model" 52 53 if self.__rw is None: 54 m = self.get_model() 55 self.__rw = [] 56 for i in range( len(m) ): 57 self.__rw.append( map(np.copy, m[i].get_weights() ) ) 58 self.__rw = tuple( self.__rw ) 59 return self.__rw
60
61 - def zero_model(self):
62 m = self.get_model() 63 64 for l in m: 65 zw = map(lambda w: w-w, l.get_weights()) 66 l.set_weights( tuple(zw) ) 67 return m
68
69 - def _test_s_cost_(self):
70 """dependence of cost on weight L1/L2 norms""" 71 72 M = self.get_model() 73 l1 = rng.randint(0, 10) / 10 74 l2 = rng.randint(0, 10) / 10 75 Y = T.lvector("Y") 76 y = self.get_output() 77 78 x = self.get_input() 79 80 cf = theano.function(inputs=[M[0].input], outputs=M[-1].p_y_given_x) 81 print cf(x) 82 print y 83 84 cf = theano.function(inputs=[M[0].input, Y], outputs=M.cost(Y, l1, l2)) 85 cf_00 = theano.function(inputs=[M[0].input, Y], outputs=M.cost(Y, 0, 0)) 86 w_summ = l1 * M.weight_norm("l1") 87 w_summ_sq = l2 * M.weight_norm("l2") 88 89 self.assertEqual( cf(x,y) - cf_00(x,y), w_summ + w_summ_sq ) 90 91 M = self.zero_model() 92 cf = theano.function(inputs=[M[0].input, Y], outputs=M.cost(Y, l1, l2)) 93 cf_00 = theano.function(inputs=[M[0].input, Y], outputs=M.cost(Y, 0, 0)) 94 x = self.get_input() 95 self.assertEqual( cf(x,y) , cf_00(x,y) )
96
97 - def _test_io(self):
98 """model can save and load""" 99 modela = self.get_model() 100 modelb = self.get_model() 101 102 are_eq = [] 103 for i in range(len(modela)): 104 rw = modela[i].get_weights() 105 zw = modelb[i].get_weights() 106 for (r,z) in zip(rw, zw): 107 are_eq.append( np.all( r==z ) ) 108 if are_eq[-1]: 109 print r, z 110 print r-z 111 print 112 self.assertFalse( all(are_eq) ) 113 114 with tempfile.NamedTemporaryFile(delete=False, suffix="." + archive.__HDF_SUFFIX__) as fd: 115 path = fd.name + ":model_weights" 116 modela.save( path ) 117 modelb.load( path ) 118 119 are_eq = [] 120 for i in range(len(modela)): 121 rw = modela[i].get_weights() 122 zw = modelb[i].get_weights() 123 for (r,z) in zip(rw, zw): 124 are_eq.append( np.all( r==z) ) 125 self.assertTrue( all(are_eq) )
126 127
128 -class CNNLayerTester( unittest.TestCase ):
129 """test class for CPLayer, HiddenLayer and LogisticRegression Layer 130 131 initializes a random number generator. Needs the get_layer method and self.x (input vector) 132 to be defined and from that defines: 133 134 - zero_weights, 135 - rnd_weights, 136 - zero_layer and rnd_layer""" 137
138 - def setUp(self):
139 self.rng = rng 140 self.__zw = None 141 self.__rl = None
142
143 - def get_layer(self):
144 raise NotImplementedError("should be overriden")
145
146 - def get_input(self):
147 raise NotImplementedError("should be overriden")
148
149 - def zero_input(self):
150 i = self.get_input() 151 return np.zeros( i.shape(), dtype = i.dtype )
152
153 - def zero_weights(self):
154 "cached zero weights" 155 156 if self.__zw is None: 157 self.__zw = map(np.copy, self.rnd_weights()) 158 for i in range(len(self.__zw)): 159 self.__zw[i] -= self.__zw[i] 160 self.__zw = tuple( self.__zw ) 161 return self.__zw
162
163 - def rnd_weights(self):
164 "cached random weights" 165 166 return self.rnd_layer().get_weights()
167
168 - def zero_layer(self):
169 "zero layer" 170 l = self.get_layer() 171 l.set_weights( self.zero_weights() ) 172 return l
173
174 - def rnd_layer(self):
175 if self.__rl is None: 176 self.__rl = self.get_layer() 177 return self.__rl
178
179 - def _weights_inrange(self, thr, widx):
180 l = self.get_layer() 181 self.assertLessEqual( np.max( np.abs(l.get_weights()[widx]) ), thr )
182
183 - def _test_init_(self):
184 def _weights_speeds_eqto(l, wlst): 185 for z,w,s in zip(wlst, l.get_weights(), l.get_speeds()): 186 self.assertTrue( np.all(w == z) ) 187 self.assertTrue( np.all(s == z-z) )
188 189 _weights_speeds_eqto( self.zero_layer(), self.zero_weights() ) 190 _weights_speeds_eqto( self.rnd_layer(), self.rnd_weights() ) 191 192 # biases ar all zero 193 self.assertTrue( np.all(self.get_layer().get_weights()[1] == 0) )
194 195
196 - def _test_norms_(self):
197 l = self.zero_layer() 198 self.assertEqual( l.weight_norm("l1"), 0.0 ) 199 self.assertEqual( l.weight_norm("l2"), 0.0 ) 200 201 l = self.rnd_layer() 202 wlst = map(np.abs, self.rnd_weights()) 203 204 self.assertEqual( l.weight_norm("l1"), sum(map(lambda _: _.sum(), wlst)) ) 205 self.assertEqual( l.weight_norm("l2"), sum(map(lambda _: (_**2).sum(), wlst)) )
206
207 - def _test_activation(self):
208 pass
209