1# Copyright (c) 2016, the GPyOpt Authors
2# Licensed under the BSD 3-clause license (see LICENSE.txt)
3
4import time
5import numpy as np
6from ...util.general import spawn
7from ...util.general import get_d_moments
8import GPy
9import GPyOpt
10
11class Objective(object):
12    """
13    General class to handle the objective function internally.
14    """
15
16    def evaluate(self, x):
17        raise NotImplementedError()
18
19
20class SingleObjective(Objective):
21    """
22    Class to handle problems with one single objective function.
23
24    param func: objective function.
25    param batch_size: size of the batches (default, 1)
26    param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
27    param objective_name: name of the objective function.
28    param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
29    param space: Not in use.
30
31    .. Note:: the objective function should take 2-dimensional numpy arrays as input and outputs. Each row should
32    contain a location (in the case of the inputs) or a function evaluation (in the case of the outputs).
33    """
34
35
36    def __init__(self, func, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None):
37        self.func  = func
38        self.n_procs = num_cores
39        self.num_evaluations = 0
40        self.space = space
41        self.objective_name = objective_name
42
43
44    def evaluate(self, x):
45        """
46        Performs the evaluation of the objective at x.
47        """
48
49        if self.n_procs == 1:
50            f_evals, cost_evals = self._eval_func(x)
51        else:
52            try:
53                f_evals, cost_evals = self._syncronous_batch_evaluation(x)
54            except:
55                if not hasattr(self, 'parallel_error'):
56                    print('Error in parallel computation. Fall back to single process!')
57                else:
58                    self.parallel_error = True
59                f_evals, cost_evals = self._eval_func(x)
60
61        return f_evals, cost_evals
62
63
64    def _eval_func(self, x):
65        """
66        Performs sequential evaluations of the function at x (single location or batch). The computing time of each
67        evaluation is also provided.
68        """
69        cost_evals = []
70        f_evals     = np.empty(shape=[0, 1])
71
72        for i in range(x.shape[0]):
73            st_time    = time.time()
74            rlt = self.func(np.atleast_2d(x[i]))
75            f_evals     = np.vstack([f_evals,rlt])
76            cost_evals += [time.time()-st_time]
77        return f_evals, cost_evals
78
79
80    def _syncronous_batch_evaluation(self,x):
81        """
82        Evaluates the function a x, where x can be a single location or a batch. The evaluation is performed in parallel
83        according to the number of accessible cores.
84        """
85        from multiprocessing import Process, Pipe
86
87        # --- parallel evaluation of the function
88        divided_samples = [x[i::self.n_procs] for i in range(self.n_procs)]
89        pipe = [Pipe() for i in range(self.n_procs)]
90        proc = [Process(target=spawn(self._eval_func),args=(c,k)) for k,(p,c) in zip(divided_samples,pipe)]
91        [p.start() for p in proc]
92        [p.join() for p in proc]
93
94        # --- time of evaluation is set to constant (=1). This is one of the hypothesis of synchronous batch methods.
95        f_evals = np.zeros((x.shape[0],1))
96        cost_evals = np.ones((x.shape[0],1))
97        i = 0
98        for (p,c) in pipe:
99            f_evals[i::self.n_procs] = p.recv()[0] # throw away costs
100            i += 1
101        return f_evals, cost_evals
102
103    def _asyncronous_batch_evaluation(self,x):
104
105        """
106        Performs the evaluation of the function at x while other evaluations are pending.
107        """
108        ### --- TODO
109        pass
110