1# Example program that uses 'setup' and 'cleanup' functions to
2# initialize/de-initialize global variables on each node before
3# computations are executed. Computations (jobs) on a node update
4# shared variable 'shvar' using multiprocessing's locks (so no two
5# jobs update the variable simultaneously).
6
7# Under Windows global variables must be serializable, so modules
8# can't be global variables: See
9# https://docs.python.org/2/library/multiprocessing.html#windows for
10# details. This example is implemented to load 'random' module in
11# computation (for each job). When executing in posix (Linux, OS X and
12# other Unix variants), 'random' can be declared global variable and
13# module loaded in 'setup' (and deleted in 'cleanup').
14
15def setup():
16    import multiprocessing, multiprocessing.sharedctypes
17    global shvar
18    lock = multiprocessing.Lock()
19    shvar = multiprocessing.sharedctypes.Value('i', 1, lock=lock)
20    return 0
21
22def cleanup():
23    global shvar
24    del shvar
25
26def compute():
27    import random
28    r = random.randint(1, 10)
29    global shvar
30    shvar.value += r
31    return shvar.value
32
33if __name__ == '__main__':
34    import dispy
35    cluster = dispy.JobCluster(compute, setup=setup, cleanup=cleanup)
36    jobs = []
37    for n in range(10):
38        job = cluster.submit()
39        jobs.append(job)
40
41    for job in jobs:
42        job()
43        if job.status != dispy.DispyJob.Finished:
44            print('job %s failed: %s' % (job.id, job.exception))
45        else:
46            print('%s: %s' % (job.id, job.result))
47    cluster.print_status()
48    cluster.close()
49