1"""
2These routines are not well-tested. They are also old.
3OB says that it is not important to test them well because Scalar Ops
4are rarely used by themselves, instead they are the basis for Tensor Ops
5(which should be checked thoroughly). Moreover, Scalar will be changed
6to use numpy's scalar routines.
7If you do want to rewrite these tests, bear in mind:
8  * You don't need to use Composite.
9  * FunctionGraph and DualLinker are old, use compile.function instead.
10"""
11from __future__ import absolute_import, print_function, division
12
13import unittest
14import numpy as np
15
16import theano
17from theano.gof import FunctionGraph
18from theano import gof
19from theano.tests import unittest_tools as utt
20
21from theano.scalar.basic import (floats, float16, float32, float64,
22                                 ints, int8, int32, complex64, uint8,
23                                 ComplexError, IntDiv, TrueDiv,
24                                 Composite, add, div_proxy,
25                                 and_, eq, neq, invert, mul, Scalar, InRange,
26                                 cast, constant, switch)
27from theano.scalar.basic import (
28    true_div, inv, log, log2, log10, log1p, exp, exp2, expm1, sqrt, deg2rad,
29    rad2deg, cos, arccos, sin, arcsin, tan, arctan, arctan2, cosh, arccosh,
30    sinh, arcsinh, tanh, arctanh)
31
32
33def inputs():
34    return floats('xyz')
35
36
37class test_ScalarOps(unittest.TestCase):
38
39    def test_straightforward(self):
40        x, y, z = inputs()
41        e = mul(add(x, y), div_proxy(x, y))
42        g = FunctionGraph([x, y], [e])
43        fn = gof.DualLinker().accept(g).make_function()
44        assert fn(1.0, 2.0) == 1.5
45
46    # This test is moved to theano.tensor.tests.test_basic.py:test_mod
47    # We move it their as under ubuntu the c_extract call of theano.scalar
48    # call PyInt_check and it fail under some os. If work in other case.
49    # As we use theano.scalar normally, but we use theano.tensor.scalar
50    # that is not important. Also this make the theano fct fail at call time
51    # so this is not a silent bug.
52    # --> This is why it is purposely named 'tes_mod' instead of 'test_mod'.
53    def tes_mod(self):
54        # We add this test as not all language and C implementation give the same
55        # sign to the result. This check that the c_code of `Mod` is implemented
56        # as Python. That is what we want.
57
58        x, y = ints('xy')
59        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x % y])).make_function()
60        for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),
61                     (1, 2), (-1, 2), (1, -2), (-1, -2),
62                     (5, 3), (-5, 3), (5, -3), (-5, -3)
63                     ):
64            self.assertTrue(fn(a, b) == a % b, (a,))
65
66
67def has_f16(comp):
68    if any(v.type == float16 for v in comp.fgraph.variables):
69        return True
70    return False
71
72
73class test_composite(unittest.TestCase):
74    def test_composite_clone_float32(self):
75        w = int8()
76        x = float16()
77        y = float32()
78        cz = Composite([x, y], [tanh(x + cast(y, 'float16'))])
79        c = Composite([w, x, y], [cz(x, y) - cz(x, y)**2 +
80                                  cast(x, 'int16') + cast(x, 'float32') +
81                                  cast(w, 'float16') -
82                                  constant(np.float16(1.0))])
83        assert has_f16(c)
84        nc = c.clone_float32()
85        assert not has_f16(nc)
86
87        v = uint8()
88        w = float16()
89        x = float16()
90        y = float16()
91        z = float16()
92
93        c = Composite([v, w, x, y, z], [switch(v, mul(w, x, y), z)])
94
95        assert has_f16(c)
96        nc = c.clone_float32()
97        assert not has_f16(nc)
98
99    def test_straightforward(self):
100        x, y, z = inputs()
101        e = mul(add(x, y), div_proxy(x, y))
102        C = Composite([x, y], [e])
103        c = C.make_node(x, y)
104        # print c.c_code(['x', 'y'], ['z'], dict(id = 0))
105        g = FunctionGraph([x, y], [c.out])
106        fn = gof.DualLinker().accept(g).make_function()
107        assert fn(1.0, 2.0) == 1.5
108
109    def test_flatten(self):
110        # Test that we flatten multiple Composite.
111        x, y, z = inputs()
112        C = Composite([x, y], [x + y])
113        CC = Composite([x, y], [C(x * y, y)])
114        assert not isinstance(CC.outputs[0].owner.op, Composite)
115
116        # Test with multiple outputs
117        CC = Composite([x, y, z], [C(x * y, y), C(x * z, y)])
118        # We don't flatten that case.
119        assert isinstance(CC.outputs[0].owner.op, Composite)
120
121    def test_with_constants(self):
122        x, y, z = inputs()
123        e = mul(add(70.0, y), div_proxy(x, y))
124        C = Composite([x, y], [e])
125        c = C.make_node(x, y)
126        assert "70.0" in c.op.c_code(c, 'dummy', ['x', 'y'], ['z'], dict(id=0))
127        # print c.c_code(['x', 'y'], ['z'], dict(id = 0))
128        g = FunctionGraph([x, y], [c.out])
129        fn = gof.DualLinker().accept(g).make_function()
130        assert fn(1.0, 2.0) == 36.0
131
132    def test_many_outputs(self):
133        x, y, z = inputs()
134        e0 = x + y + z
135        e1 = x + y * z
136        e2 = x / y
137        C = Composite([x, y, z], [e0, e1, e2])
138        c = C.make_node(x, y, z)
139        # print c.c_code(['x', 'y', 'z'], ['out0', 'out1', 'out2'], dict(id = 0))
140        g = FunctionGraph([x, y, z], c.outputs)
141        fn = gof.DualLinker().accept(g).make_function()
142        assert fn(1.0, 2.0, 3.0) == [6.0, 7.0, 0.5]
143
144    def test_composite_printing(self):
145        x, y, z = floats('xyz')
146        e0 = x + y + z
147        e1 = x + y * z
148        e2 = x / y
149        e3 = x // 5
150        e4 = -x
151        e5 = x - y
152        e6 = x ** y + (-z)
153        e7 = x % 3
154        C = Composite([x, y, z], [e0, e1, e2, e3, e4, e5, e6, e7])
155        c = C.make_node(x, y, z)
156        g = FunctionGraph([x, y, z], c.outputs)
157        gof.DualLinker().accept(g).make_function()
158
159        assert str(g) == ('[*1 -> Composite{((i0 + i1) + i2),'
160                          ' (i0 + (i1 * i2)), (i0 / i1), '
161                          '(i0 // Constant{5}), '
162                          '(-i0), (i0 - i1), ((i0 ** i1) + (-i2)),'
163                          ' (i0 % Constant{3})}(x, y, z), '
164                          '*1::1, *1::2, *1::3, *1::4, *1::5, *1::6, *1::7]')
165
166    def test_make_node_continue_graph(self):
167        # This is a test for a bug (now fixed) that disabled the
168        # local_gpu_elemwise_0 optimization and printed an
169        # optimization warning on the terminal.
170
171        # We test that Composite.make_node accept as inputs Variable
172        # some that represent existing computation.
173
174        si0 = theano.scalar.int8()
175        si1 = theano.scalar.int8()
176        si2 = theano.scalar.float32()
177        sout = (si0 * si1) / si2
178        sop = theano.scalar.Composite([si0, si1, si2],
179                                      [sout])
180        si0 = theano.scalar.int8()
181        si1 = theano.scalar.int8()
182        si2 = theano.scalar.float32()
183        si3 = theano.scalar.float32()
184        sop.make_node(si0 * si3, si1, si2)
185
186
187class test_logical(unittest.TestCase):
188    def test_gt(self):
189        x, y, z = inputs()
190        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
191        for a, b in ((3., 9), (3, 0.9), (3, 3)):
192            self.assertTrue(fn(a, b) == (a > b))
193
194    def test_lt(self):
195        x, y, z = inputs()
196        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
197        for a, b in ((3., 9), (3, 0.9), (3, 3)):
198            self.assertTrue(fn(a, b) == (a < b))
199
200    def test_le(self):
201        x, y, z = inputs()
202        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
203        for a, b in ((3., 9), (3, 0.9), (3, 3)):
204            self.assertTrue(fn(a, b) == (a <= b))
205
206    def test_ge(self):
207        x, y, z = inputs()
208        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
209        for a, b in ((3., 9), (3, 0.9), (3, 3)):
210            self.assertTrue(fn(a, b) == (a >= b))
211
212    def test_eq(self):
213        x, y, z = inputs()
214        fn = gof.DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
215        for a, b in ((3., 9), (3, 0.9), (3, 3)):
216            self.assertTrue(fn(a, b) == (a == b))
217
218    def test_neq(self):
219        x, y, z = inputs()
220        fn = gof.DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
221        for a, b in ((3., 9), (3, 0.9), (3, 3)):
222            self.assertTrue(fn(a, b) == (a != b))
223
224    def test_or(self):
225        x, y, z = ints('xyz')
226        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x | y])).make_function()
227        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
228            self.assertTrue(fn(a, b) == (a | b), (a, b))
229
230    def test_xor(self):
231        x, y, z = ints('xyz')
232        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x ^ y])).make_function()
233        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
234            self.assertTrue(fn(a, b) == (a ^ b), (a, b))
235
236    def test_and(self):
237        x, y, z = ints('xyz')
238        fn = gof.DualLinker().accept(FunctionGraph([x, y], [and_(x, y)])).make_function()
239        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
240            self.assertTrue(fn(a, b) == (a & b), (a, b))
241
242        x, y, z = ints('xyz')
243        fn = gof.DualLinker().accept(FunctionGraph([x, y], [x & y])).make_function()
244        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
245            self.assertTrue(fn(a, b) == (a & b), (a, b))
246
247    def test_not(self):
248        x, y, z = ints('xyz')
249        fn = gof.DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
250        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
251            self.assertTrue(fn(a, b) == ~a, (a,))
252
253        x, y, z = ints('xyz')
254        fn = gof.DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
255        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
256            self.assertTrue(fn(a, b) == ~a, (a,))
257
258
259# This class does not inherit from unittest.TestCase, because it would
260# interfere with the "yield" mechanism that automatically generates test, see
261# http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
262# Therefore, it needs to be named "test_..." or "Test_...", so nose can pick
263# it up by name, otherwise the tests would not be executed.
264class test_upgrade_to_float(object):
265    # Test for Ops whose output has to be floating point, even when all
266    # inputs are ints.
267    # In particular, when the inputs are int8, the output should be
268    # at least float32, not float16.
269
270    unary_ops_vals = [
271        (inv, list(range(-127, 0)) + list(range(1, 127))),
272        (sqrt, list(range(0, 128))),
273        (log, list(range(1, 128))),
274        (log2, list(range(1, 128))),
275        (log10, list(range(1, 128))),
276        (log1p, list(range(0, 128))),
277        (exp, list(range(-127, 89))),
278        (exp2, list(range(-127, 89))),
279        (expm1, list(range(-127, 89))),
280        (deg2rad, list(range(-127, 128))),
281        (rad2deg, list(range(-127, 128))),
282        (cos, list(range(-127, 128))),
283        (arccos, list(range(-1, 2))),
284        (cosh, list(range(-89, 90))),
285        (arccosh, list(range(1, 128))),
286        (sin, list(range(-127, 128))),
287        (arcsin, list(range(-1, 2))),
288        (sinh, list(range(-89, 90))),
289        (arcsinh, list(range(-127, 128))),
290        (tan, list(range(-3, 4))),
291        (arctan, list(range(-127, 128))),
292        (tanh, list(range(-127, 128))),
293        (arctanh, [0])]
294
295    binary_ops_vals = [
296        (arctan2, list(range(-127, 128)), list(range(-127, 128)))]
297
298    @staticmethod
299    def _test_unary(unary_op, x_range):
300        xi = int8('xi')
301        xf = float32('xf')
302
303        ei = unary_op(xi)
304        fi = theano.function([xi], ei)
305
306        ef = unary_op(xf)
307        ff = theano.function([xf], ef)
308
309        for x_val in x_range:
310            outi = fi(x_val)
311            outf = ff(x_val)
312
313            assert outi.dtype == outf.dtype, 'incorrect dtype'
314            assert np.allclose(outi, outf), 'insufficient precision'
315
316    @staticmethod
317    def _test_binary(binary_op, x_range, y_range):
318        xi = int8('xi')
319        yi = int8('yi')
320        xf = float32('xf')
321        yf = float32('yf')
322
323        ei = binary_op(xi, yi)
324        fi = theano.function([xi, yi], ei)
325
326        ef = binary_op(xf, yf)
327        ff = theano.function([xf, yf], ef)
328
329        for x_val in x_range:
330            for y_val in y_range:
331                outi = fi(x_val, y_val)
332                outf = ff(x_val, y_val)
333
334                assert outi.dtype == outf.dtype, 'incorrect dtype'
335                assert np.allclose(outi, outf), 'insufficient precision'
336
337    def test_true_div(self):
338        # true_div's upcast policy is not exactly "upgrade_to_float",
339        # so the test is a little bit different
340        x_range = list(range(-127, 128))
341        y_range = list(range(-127, 0)) + list(range(1, 127))
342
343        xi = int8('xi')
344        yi = int8('yi')
345        xf = Scalar(theano.config.floatX)('xf')
346        yf = Scalar(theano.config.floatX)('yf')
347
348        ei = true_div(xi, yi)
349        fi = theano.function([xi, yi], ei)
350
351        ef = true_div(xf, yf)
352        ff = theano.function([xf, yf], ef)
353
354        for x_val in x_range:
355            for y_val in y_range:
356                outi = fi(x_val, y_val)
357                outf = ff(x_val, y_val)
358
359                assert outi.dtype == outf.dtype, 'incorrect dtype'
360                assert np.allclose(outi, outf), 'insufficient precision'
361
362    def test_unary(self):
363        # Automatically define all individual unary tests
364        for unary_op, x_range in self.unary_ops_vals:
365            test_name = 'test_%s' % unary_op.name
366
367            def test():
368                self._test_unary(unary_op, x_range)
369            test.description = test_name
370            yield test
371
372    def test_binary(self):
373        # Automatically define all individual binary tests
374        for binary_op, x_range, y_range in self.binary_ops_vals:
375            test_name = 'test_%s' % binary_op.name
376
377            def test():
378                self._test_binary(binary_op, x_range, y_range)
379            test.description = test_name
380            yield test
381
382
383class test_complex_mod(unittest.TestCase):
384    # Make sure % fails on complex numbers.
385
386    def test_fail(self):
387        x = complex64()
388        y = int32()
389        try:
390            x % y
391            assert False
392        except ComplexError:
393            pass
394
395
396class test_div(unittest.TestCase):
397    def test_0(self):
398        a = int8()
399        b = int32()
400        c = complex64()
401        d = float64()
402        f = float32()
403
404        assert isinstance((a // b).owner.op, IntDiv)
405        assert isinstance((b // a).owner.op, IntDiv)
406        assert isinstance((b / d).owner.op, TrueDiv)
407        assert isinstance((b / f).owner.op, TrueDiv)
408        assert isinstance((f / a).owner.op, TrueDiv)
409        assert isinstance((d / b).owner.op, TrueDiv)
410        assert isinstance((d / f).owner.op, TrueDiv)
411        assert isinstance((f / c).owner.op, TrueDiv)
412        assert isinstance((a / c).owner.op, TrueDiv)
413
414
415def test_grad_gt():
416    x = float32(name='x')
417    y = float32(name='y')
418    z = x > y
419    g = theano.gradient.grad(z, y)
420    assert g.eval({y: 1.}) == 0.
421
422
423def test_grad_switch():
424
425    # This is a code snippet from the mailing list
426    # It caused an assert to be raised due to the
427    # switch op's grad method not handling integer
428    # inputs correctly
429
430    x = theano.tensor.matrix()
431    c = theano.tensor.matrix()
432
433    s = theano.tensor.switch(c, x, 0)
434    l = s.sum()
435
436    theano.gradient.grad(l, x)
437
438
439def test_grad_identity():
440    # Check that the grad method of Identity correctly handles int dytpes
441    x = theano.tensor.imatrix('x')
442    # tensor_copy is Elemwise{Identity}
443    y = theano.tensor.tensor_copy(x)
444    l = y.sum(dtype=theano.config.floatX)
445    theano.gradient.grad(l, x)
446
447
448def test_grad_inrange():
449    for bound_definition in [(True, True), (False, False)]:
450        # Instantiate op, and then take the gradient
451        op = InRange(*bound_definition)
452        x = theano.tensor.fscalar('x')
453        low = theano.tensor.fscalar('low')
454        high = theano.tensor.fscalar('high')
455        out = op(x, low, high)
456        gx, glow, ghigh = theano.tensor.grad(out, [x, low, high])
457
458        # We look if the gradient are equal to zero
459        # if x is lower than the lower bound,
460        # equal to the lower bound, between lower and higher bound,
461        # equal to the higher bound and higher than the higher
462        # bound.
463        # Mathematically we should have an infinite gradient when
464        # x is equal to the lower or higher bound but in that case
465        # Theano defines the gradient to be zero for stability.
466        f = theano.function([x, low, high], [gx, glow, ghigh])
467        utt.assert_allclose(f(0, 1, 5), [0, 0, 0])
468        utt.assert_allclose(f(1, 1, 5), [0, 0, 0])
469        utt.assert_allclose(f(2, 1, 5), [0, 0, 0])
470        utt.assert_allclose(f(5, 1, 5), [0, 0, 0])
471        utt.assert_allclose(f(7, 1, 5), [0, 0, 0])
472
473
474def test_grad_abs():
475    a = theano.tensor.fscalar("a")
476    b = theano.tensor.nnet.relu(a)
477    c = theano.grad(b, a)
478    f = theano.function([a], c, mode=theano.Mode(optimizer=None))
479    # Currently Theano return 0.5, but it isn't sure it won't change
480    # in the futur.
481    ret = f(0.)
482    assert ret == 0.5, ret
483
484# Testing of Composite is done in tensor/tests/test_opt.py
485# in test_fusion, TestCompositeCodegen
486
487
488def test_constant():
489    c = constant(2, name='a')
490    assert c.name == 'a'
491    assert c.dtype == 'int8'
492    c = constant(2, dtype='float32')
493    assert c.name is None
494    assert c.dtype == 'float32'
495
496
497if __name__ == '__main__':
498    unittest.main()
499