1#  ___________________________________________________________________________
2#
3#  Pyomo: Python Optimization Modeling Objects
4#  Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5#  Under the terms of Contract DE-NA0003525 with National Technology and
6#  Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7#  rights in this software.
8#  This software is distributed under the 3-clause BSD License.
9#  ___________________________________________________________________________
10
11from __future__ import division
12from pyomo.common.dependencies import attempt_import
13from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy
14from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts, add_no_good_cuts
15from pyomo.contrib.mindtpy.mip_solve import handle_main_optimal, solve_main, handle_regularization_main_tc
16from pyomo.opt.results import ProblemSense
17from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error
18import logging
19from pyomo.repn import generate_standard_repn
20from math import fabs
21from pyomo.core.expr import current as EXPR
22import pyomo.environ as pyo
23from math import copysign
24from pyomo.contrib.mindtpy.util import get_integer_solution
25from pyomo.contrib.gdpopt.util import copy_var_list_values, identify_variables, get_main_elapsed_time, time_code
26from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem, solve_feasibility_subproblem, handle_nlp_subproblem_tc
27from pyomo.opt import TerminationCondition as tc
28from pyomo.core import Constraint, minimize, value, maximize
29cplex, cplex_available = attempt_import('cplex')
30
31logger = logging.getLogger('pyomo.contrib.mindtpy')
32
33
34class LazyOACallback_cplex(cplex.callbacks.LazyConstraintCallback if cplex_available else object):
35    """Inherent class in Cplex to call Lazy callback."""
36
37    def copy_lazy_var_list_values(self, opt, from_list, to_list, config,
38                                  skip_stale=False, skip_fixed=True,
39                                  ignore_integrality=False):
40        """This function copies variable values from one list to another.
41        Rounds to Binary/Integer if neccessary
42        Sets to zero for NonNegativeReals if neccessary
43
44        Parameters
45        ----------
46        opt: SolverFactory
47            the mip solver
48        from_list: variable list
49            contains variables and their values
50        to_list: variable list
51            contains the variables that need to set value
52        config: ConfigBlock
53            contains the specific configurations for the algorithm
54        """
55        for v_from, v_to in zip(from_list, to_list):
56            if skip_stale and v_from.stale:
57                continue  # Skip stale variable values.
58            if skip_fixed and v_to.is_fixed():
59                continue  # Skip fixed variables.
60            v_val = self.get_values(
61                opt._pyomo_var_to_solver_var_map[v_from])
62            try:
63                v_to.set_value(v_val)
64                if skip_stale:
65                    v_to.stale = False
66            except ValueError:
67                # Snap the value to the bounds
68                if v_to.has_lb() and v_val < v_to.lb and v_to.lb - v_val <= config.variable_tolerance:
69                    v_to.set_value(v_to.lb)
70                elif v_to.has_ub() and v_val > v_to.ub and v_val - v_to.ub <= config.variable_tolerance:
71                    v_to.set_value(v_to.ub)
72                # ... or the nearest integer
73                elif v_to.is_integer():
74                    rounded_val = int(round(v_val))
75                    if (ignore_integrality or fabs(v_val - rounded_val) <= config.integer_tolerance) \
76                            and rounded_val in v_to.domain:
77                        v_to.set_value(rounded_val)
78                else:
79                    raise
80
81    def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt,
82                         linearize_active=True,
83                         linearize_violated=True):
84        """
85        Linearizes nonlinear constraints; add the OA cuts through Cplex inherent function self.add()
86        For nonconvex problems, turn on 'config.add_slack'. Slack variables will
87        always be used for nonlinear equality constraints.
88        Parameters
89        ----------
90        target_model:
91            this is the MIP/MILP model for the OA algorithm; we want to add the OA cuts to 'target_model'
92        dual_values:
93            contains the value of the duals for each constraint
94        solve_data: MindtPy Data Container
95            data container that holds solve-instance data
96        config: ConfigBlock
97            contains the specific configurations for the algorithm
98        opt: SolverFactory
99            the mip solver
100        linearize_active: bool, optional
101            this parameter acts as a Boolean flag that signals whether the linearized constraint is active
102        linearize_violated: bool, optional
103            this parameter acts as a Boolean flag that signals whether the nonlinear constraint represented by the
104            linearized constraint has been violated
105        """
106
107        config.logger.info('Adding OA cuts')
108        with time_code(solve_data.timing, 'OA cut generation'):
109            for index, constr in enumerate(target_model.MindtPy_utils.constraint_list):
110                if constr.body.polynomial_degree() in {0, 1}:
111                    continue
112
113                constr_vars = list(identify_variables(constr.body))
114                jacs = solve_data.jacobians
115
116                # Equality constraint (makes the problem nonconvex)
117                if constr.has_ub() and constr.has_lb() and value(constr.lower) == value(constr.upper):
118                    sign_adjust = -1 if solve_data.objective_sense == minimize else 1
119                    rhs = constr.lower
120
121                    # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression
122                    pyomo_expr = copysign(1, sign_adjust * dual_values[index]) * (sum(value(jacs[constr][var]) * (
123                        var - value(var)) for var in EXPR.identify_variables(constr.body)) + value(constr.body) - rhs)
124                    cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr)
125                    cplex_rhs = -generate_standard_repn(pyomo_expr).constant
126                    self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
127                             sense='L',
128                             rhs=cplex_rhs)
129                else:  # Inequality constraint (possibly two-sided)
130                    if (constr.has_ub()
131                        and (linearize_active and abs(constr.uslack()) < config.zero_tolerance)
132                            or (linearize_violated and constr.uslack() < 0)
133                            or (config.linearize_inactive and constr.uslack() > 0)) or (constr.name == 'MindtPy_utils.objective_constr' and constr.has_ub()):
134
135                        pyomo_expr = sum(
136                            value(jacs[constr][var])*(var - var.value) for var in constr_vars) + value(constr.body)
137                        cplex_rhs = - \
138                            generate_standard_repn(pyomo_expr).constant
139                        cplex_expr, _ = opt._get_expr_from_pyomo_expr(
140                            pyomo_expr)
141                        self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
142                                 sense='L',
143                                 rhs=value(constr.upper) + cplex_rhs)
144                    if (constr.has_lb()
145                        and (linearize_active and abs(constr.lslack()) < config.zero_tolerance)
146                            or (linearize_violated and constr.lslack() < 0)
147                            or (config.linearize_inactive and constr.lslack() > 0)) or (constr.name == 'MindtPy_utils.objective_constr' and constr.has_lb()):
148                        pyomo_expr = sum(value(jacs[constr][var]) * (var - self.get_values(
149                            opt._pyomo_var_to_solver_var_map[var])) for var in constr_vars) + value(constr.body)
150                        cplex_rhs = - \
151                            generate_standard_repn(pyomo_expr).constant
152                        cplex_expr, _ = opt._get_expr_from_pyomo_expr(
153                            pyomo_expr)
154                        self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
155                                 sense='G',
156                                 rhs=value(constr.lower) + cplex_rhs)
157
158    def add_lazy_affine_cuts(self, solve_data, config, opt):
159        """
160        Adds affine cuts using MCPP; add affine cuts through Cplex inherent function self.add()
161
162        Parameters
163        ----------
164        solve_data: MindtPy Data Container
165            data container that holds solve-instance data
166        config: ConfigBlock
167            contains the specific configurations for the algorithm
168        opt: SolverFactory
169            the mip solver
170        """
171        with time_code(solve_data.timing, 'Affine cut generation'):
172            m = solve_data.mip
173            config.logger.info('Adding affine cuts')
174            counter = 0
175
176            for constr in m.MindtPy_utils.nonlinear_constraint_list:
177
178                vars_in_constr = list(
179                    identify_variables(constr.body))
180                if any(var.value is None for var in vars_in_constr):
181                    continue  # a variable has no values
182
183                # mcpp stuff
184                try:
185                    mc_eqn = mc(constr.body)
186                except MCPP_Error as e:
187                    config.logger.debug(
188                        'Skipping constraint %s due to MCPP error %s' % (constr.name, str(e)))
189                    continue  # skip to the next constraint
190                # TODO: check if the value of ccSlope and cvSlope is not Nan or inf. If so, we skip this.
191                ccSlope = mc_eqn.subcc()
192                cvSlope = mc_eqn.subcv()
193                ccStart = mc_eqn.concave()
194                cvStart = mc_eqn.convex()
195
196                concave_cut_valid = True
197                convex_cut_valid = True
198                for var in vars_in_constr:
199                    if not var.fixed:
200                        if ccSlope[var] == float('nan') or ccSlope[var] == float('inf'):
201                            concave_cut_valid = False
202                        if cvSlope[var] == float('nan') or cvSlope[var] == float('inf'):
203                            convex_cut_valid = False
204                if ccStart == float('nan') or ccStart == float('inf'):
205                    concave_cut_valid = False
206                if cvStart == float('nan') or cvStart == float('inf'):
207                    convex_cut_valid = False
208                # check if the value of ccSlope and cvSlope all equals zero. if so, we skip this.
209                if not any(ccSlope.values()):
210                    concave_cut_valid = False
211                if not any(cvSlope.values()):
212                    convex_cut_valid = False
213                if not (concave_cut_valid or convex_cut_valid):
214                    continue
215
216                ub_int = min(value(constr.upper), mc_eqn.upper()
217                             ) if constr.has_ub() else mc_eqn.upper()
218                lb_int = max(value(constr.lower), mc_eqn.lower()
219                             ) if constr.has_lb() else mc_eqn.lower()
220
221                if concave_cut_valid:
222                    pyomo_concave_cut = sum(ccSlope[var] * (var - var.value)
223                                            for var in vars_in_constr
224                                            if not var.fixed) + ccStart
225                    cplex_concave_rhs = generate_standard_repn(
226                        pyomo_concave_cut).constant
227                    cplex_concave_cut, _ = opt._get_expr_from_pyomo_expr(
228                        pyomo_concave_cut)
229                    self.add(constraint=cplex.SparsePair(ind=cplex_concave_cut.variables, val=cplex_concave_cut.coefficients),
230                             sense='G',
231                             rhs=lb_int - cplex_concave_rhs)
232                    counter += 1
233                if convex_cut_valid:
234                    pyomo_convex_cut = sum(cvSlope[var] * (var - var.value)
235                                           for var in vars_in_constr
236                                           if not var.fixed) + cvStart
237                    cplex_convex_rhs = generate_standard_repn(
238                        pyomo_convex_cut).constant
239                    cplex_convex_cut, _ = opt._get_expr_from_pyomo_expr(
240                        pyomo_convex_cut)
241                    self.add(constraint=cplex.SparsePair(ind=cplex_convex_cut.variables, val=cplex_convex_cut.coefficients),
242                             sense='L',
243                             rhs=ub_int - cplex_convex_rhs)
244                    counter += 1
245
246            config.logger.info('Added %s affine cuts' % counter)
247
248    def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=False):
249        """
250        Adds no-good cuts; add the no-good cuts through Cplex inherent function self.add()
251
252        Parameters
253        ----------
254        var_values: list
255            values of the current variables, used to generate the cut
256        solve_data: MindtPy Data Container
257            data container that holds solve-instance data
258        config: ConfigBlock
259            contains the specific configurations for the algorithm
260        feasible: bool, optional
261            boolean indicating if integer combination yields a feasible or infeasible NLP
262        opt: SolverFactory
263            the mip solver
264        """
265        if not config.add_no_good_cuts:
266            return
267
268        config.logger.info('Adding no-good cuts')
269        with time_code(solve_data.timing, 'No-good cut generation'):
270            m = solve_data.mip
271            MindtPy = m.MindtPy_utils
272            int_tol = config.integer_tolerance
273
274            binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]
275
276            # copy variable values over
277            for var, val in zip(MindtPy.variable_list, var_values):
278                if not var.is_binary():
279                    continue
280                var.value = val
281
282            # check to make sure that binary variables are all 0 or 1
283            for v in binary_vars:
284                if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
285                    raise ValueError('Binary {} = {} is not 0 or 1'.format(
286                        v.name, value(v)))
287
288            if not binary_vars:  # if no binary variables, skip
289                return
290
291            pyomo_no_good_cut = sum(1 - v for v in binary_vars if value(abs(v - 1))
292                                    <= int_tol) + sum(v for v in binary_vars if value(abs(v)) <= int_tol)
293            cplex_no_good_rhs = generate_standard_repn(
294                pyomo_no_good_cut).constant
295            cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr(
296                pyomo_no_good_cut)
297
298            self.add(constraint=cplex.SparsePair(ind=cplex_no_good_cut.variables, val=cplex_no_good_cut.coefficients),
299                     sense='G',
300                     rhs=1 - cplex_no_good_rhs)
301
302    def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt):
303        """ This function is called during the branch and bound of main mip, more exactly when a feasible solution is found and LazyCallback is activated.
304        Copy the result to working model and update upper or lower bound.
305        In LP-NLP, upper or lower bound are updated during solving the main problem
306
307        Parameters
308        ----------
309        main_mip: Pyomo model
310            the MIP main problem
311        solve_data: MindtPy Data Container
312            data container that holds solve-instance data
313        config: ConfigBlock
314            contains the specific configurations for the algorithm
315        opt: SolverFactory
316            the mip solver
317        """
318        # proceed. Just need integer values
319
320        # this value copy is useful since we need to fix subproblem based on the solution of the main problem
321        self.copy_lazy_var_list_values(opt,
322                                       main_mip.MindtPy_utils.variable_list,
323                                       solve_data.working_model.MindtPy_utils.variable_list,
324                                       config)
325        if solve_data.objective_sense == minimize:
326            solve_data.LB = max(
327                self.get_best_objective_value(), solve_data.LB)
328            solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[-1]
329            solve_data.LB_progress.append(solve_data.LB)
330        else:
331            solve_data.UB = min(
332                self.get_best_objective_value(), solve_data.UB)
333            solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[-1]
334            solve_data.UB_progress.append(solve_data.UB)
335        config.logger.info(
336            'MIP %s: OBJ (at current node): %s  Bound: %s  LB: %s  UB: %s  TIME: %s'
337            % (solve_data.mip_iter, self.get_objective_value(), self.get_best_objective_value(),
338                solve_data.LB, solve_data.UB, round(get_main_elapsed_time(
339                    solve_data.timing), 2)))
340
341    def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt):
342        """
343        This function copies  result to mip(explaination see below), updates bound, adds OA and no-good cuts,
344        stores best solution if new one is best
345
346        Parameters
347        ----------
348        fixed_nlp: Pyomo model
349            Fixed-NLP from the model
350        solve_data: MindtPy Data Container
351            data container that holds solve-instance data
352        config: ConfigBlock
353            contains the specific configurations for the algorithm
354        opt: SolverFactory
355            the mip solver
356        """
357        if config.calculate_dual:
358            for c in fixed_nlp.tmp_duals:
359                if fixed_nlp.dual.get(c, None) is None:
360                    fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
361            dual_values = list(fixed_nlp.dual[c]
362                               for c in fixed_nlp.MindtPy_utils.constraint_list)
363        else:
364            dual_values = None
365        main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
366        if solve_data.objective_sense == minimize:
367            solve_data.UB = min(value(main_objective.expr), solve_data.UB)
368            solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1]
369            solve_data.UB_progress.append(solve_data.UB)
370        else:
371            solve_data.LB = max(value(main_objective.expr), solve_data.LB)
372            solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1]
373            solve_data.LB_progress.append(solve_data.LB)
374
375        config.logger.info(
376            'Fixed-NLP {}: OBJ: {}  LB: {}  UB: {}  TIME: {}'
377            .format(solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB, round(get_main_elapsed_time(solve_data.timing), 2)))
378
379        if solve_data.solution_improved:
380            solve_data.best_solution_found = fixed_nlp.clone()
381            solve_data.best_solution_found_time = get_main_elapsed_time(
382                solve_data.timing)
383            if config.add_no_good_cuts or config.use_tabu_list:
384                if solve_data.results.problem.sense == ProblemSense.minimize:
385                    solve_data.stored_bound.update(
386                        {solve_data.UB: solve_data.LB})
387                else:
388                    solve_data.stored_bound.update(
389                        {solve_data.LB: solve_data.UB})
390
391        # In OA algorithm, OA cuts are generated based on the solution of the subproblem
392        # We need to first copy the value of variables from the subproblem and then add cuts
393        # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts()
394        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
395                             solve_data.mip.MindtPy_utils.variable_list,
396                             config)
397        if config.strategy == 'OA':
398            self.add_lazy_oa_cuts(
399                solve_data.mip, dual_values, solve_data, config, opt)
400            if config.add_regularization is not None:
401                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
402        elif config.strategy == 'GOA':
403            self.add_lazy_affine_cuts(solve_data, config, opt)
404        if config.add_no_good_cuts:
405            var_values = list(
406                v.value for v in fixed_nlp.MindtPy_utils.variable_list)
407            self.add_lazy_no_good_cuts(var_values, solve_data, config, opt)
408
409    def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt):
410        """
411        Solves feasibility problem and adds cut according to the specified strategy
412
413        Parameters
414        ----------
415        solve_data: MindtPy Data Container
416            data container that holds solve-instance data
417        config: ConfigBlock
418            contains the specific configurations for the algorithm
419        opt: SolverFactory
420            the mip solver
421        """
422        # TODO try something else? Reinitialize with different initial
423        # value?
424        config.logger.info('NLP subproblem was locally infeasible.')
425        solve_data.nlp_infeasible_counter += 1
426        if config.calculate_dual:
427            for c in fixed_nlp.component_data_objects(ctype=Constraint):
428                rhs = ((0 if c.upper is None else c.upper)
429                       + (0 if c.lower is None else c.lower))
430                sign_adjust = 1 if c.upper is None else -1
431                fixed_nlp.dual[c] = (sign_adjust
432                                     * max(0, sign_adjust * (rhs - value(c.body))))
433            dual_values = list(fixed_nlp.dual[c]
434                               for c in fixed_nlp.MindtPy_utils.constraint_list)
435        else:
436            dual_values = None
437
438        config.logger.info('Solving feasibility problem')
439        feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem(
440            solve_data, config)
441        # In OA algorithm, OA cuts are generated based on the solution of the subproblem
442        # We need to first copy the value of variables from the subproblem and then add cuts
443        copy_var_list_values(feas_subproblem.MindtPy_utils.variable_list,
444                             solve_data.mip.MindtPy_utils.variable_list,
445                             config)
446        if config.strategy == 'OA':
447            self.add_lazy_oa_cuts(
448                solve_data.mip, dual_values, solve_data, config, opt)
449            if config.add_regularization is not None:
450                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
451        elif config.strategy == 'GOA':
452            self.add_lazy_affine_cuts(solve_data, config, opt)
453        if config.add_no_good_cuts:
454            var_values = list(
455                v.value for v in fixed_nlp.MindtPy_utils.variable_list)
456            self.add_lazy_no_good_cuts(var_values, solve_data, config, opt)
457
458    def handle_lazy_subproblem_other_termination(self, fixed_nlp, termination_condition,
459                                                 solve_data, config):
460        """
461        Handles the result of the latest iteration of solving the NLP subproblem given a solution that is neither optimal
462        nor infeasible.
463
464        Parameters
465        ----------
466        termination_condition: Pyomo TerminationCondition
467            the termination condition of the NLP subproblem
468        solve_data: MindtPy Data Container
469            data container that holds solve-instance data
470        config: ConfigBlock
471            contains the specific configurations for the algorithm
472        """
473        if termination_condition is tc.maxIterations:
474            # TODO try something else? Reinitialize with different initial value?
475            config.logger.info(
476                'NLP subproblem failed to converge within iteration limit.')
477            var_values = list(
478                v.value for v in fixed_nlp.MindtPy_utils.variable_list)
479        else:
480            raise ValueError(
481                'MindtPy unable to handle NLP subproblem termination '
482                'condition of {}'.format(termination_condition))
483
484    def handle_lazy_regularization_problem(self, main_mip, main_mip_results, solve_data, config):
485        if main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}:
486            handle_main_optimal(
487                main_mip, solve_data, config, update_bound=False)
488        elif main_mip_results.solver.termination_condition in {tc.infeasible, tc.infeasibleOrUnbounded}:
489            config.logger.info('regularization problem infeasible.')
490            if config.reduce_level_coef:
491                config.level_coef = config.level_coef / 2
492                main_mip, main_mip_results = solve_main(
493                    solve_data, config, regularization_problem=True)
494                if main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}:
495                    handle_main_optimal(
496                        main_mip, solve_data, config, update_bound=False)
497                elif main_mip_results.solver.termination_condition is tc.infeasible:
498                    config.logger.info('regularization problem still infeasible with reduced level_coef. '
499                                       'NLP subproblem is generated based on the incumbent solution of the main problem.')
500                elif main_mip_results.solver.termination_condition is tc.maxTimeLimit:
501                    config.logger.info(
502                        'Regularization problem failed to converge within the time limit.')
503                    solve_data.results.solver.termination_condition = tc.maxTimeLimit
504                elif main_mip_results.solver.termination_condition is tc.unbounded:
505                    config.logger.info(
506                        'Regularization problem ubounded.'
507                        'Sometimes solving MIQP using cplex, unbounded means infeasible.')
508                elif main_mip_results.solver.termination_condition is tc.unknown:
509                    config.logger.info(
510                        'Termination condition of the regularization problem is unknown.')
511                    if main_mip_results.problem.lower_bound != float('-inf'):
512                        config.logger.info('Solution limit has been reached.')
513                        handle_main_optimal(
514                            main_mip, solve_data, config, update_bound=False)
515                    else:
516                        config.logger.info('No solution obtained from the regularization subproblem.'
517                                           'Please set mip_solver_tee to True for more informations.'
518                                           'The solution of the OA main problem will be adopted.')
519                else:
520                    raise ValueError(
521                        'MindtPy unable to handle regularization problem termination condition '
522                        'of %s. Solver message: %s' %
523                        (main_mip_results.solver.termination_condition, main_mip_results.solver.message))
524            elif config.use_bb_tree_incumbent:
525                config.logger.info(
526                    'Fixed subproblem will be generated based on the incumbent solution of the main problem.')
527        elif main_mip_results.solver.termination_condition is tc.maxTimeLimit:
528            config.logger.info(
529                'Regularization problem failed to converge within the time limit.')
530            solve_data.results.solver.termination_condition = tc.maxTimeLimit
531        elif main_mip_results.solver.termination_condition is tc.unbounded:
532            config.logger.info(
533                'Regularization problem ubounded.'
534                'Sometimes solving MIQP using cplex, unbounded means infeasible.')
535        elif main_mip_results.solver.termination_condition is tc.unknown:
536            config.logger.info(
537                'Termination condition of the regularization problem is unknown.')
538            if main_mip_results.problem.lower_bound != float('-inf'):
539                config.logger.info('Solution limit has been reached.')
540                handle_main_optimal(main_mip, solve_data,
541                                    config, update_bound=False)
542        else:
543            raise ValueError(
544                'MindtPy unable to handle regularization problem termination condition '
545                'of %s. Solver message: %s' %
546                (main_mip_results.solver.termination_condition, main_mip_results.solver.message))
547
548    def __call__(self):
549        """
550        This is an inherent function in LazyConstraintCallback in cplex.
551        This function is called whenever an integer solution is found during the branch and bound process
552        """
553        solve_data = self.solve_data
554        config = self.config
555        opt = self.opt
556        main_mip = self.main_mip
557
558        if solve_data.should_terminate:
559            self.abort()
560            return
561
562        self.handle_lazy_main_feasible_solution(
563            main_mip, solve_data, config, opt)
564
565        if config.add_cuts_at_incumbent:
566            self.copy_lazy_var_list_values(opt,
567                                           main_mip.MindtPy_utils.variable_list,
568                                           solve_data.mip.MindtPy_utils.variable_list,
569                                           config)
570            if config.strategy == 'OA':
571                self.add_lazy_oa_cuts(
572                    solve_data.mip, None, solve_data, config, opt)
573
574        # regularization is activated after the first feasible solution is found.
575        if config.add_regularization is not None and solve_data.best_solution_found is not None:
576            # the main problem might be unbounded, regularization is activated only when a valid bound is provided.
577            if not solve_data.bound_improved and not solve_data.solution_improved:
578                config.logger.info('the bound and the best found solution have neither been improved.'
579                                   'We will skip solving the regularization problem and the Fixed-NLP subproblem')
580                solve_data.solution_improved = False
581                return
582            if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf'))
583                    or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))):
584                main_mip, main_mip_results = solve_main(
585                    solve_data, config, regularization_problem=True)
586                self.handle_lazy_regularization_problem(
587                    main_mip, main_mip_results, solve_data, config)
588
589        if solve_data.LB + config.bound_tolerance >= solve_data.UB:
590            config.logger.info(
591                'MindtPy exiting on bound convergence. '
592                'LB: {} + (tol {}) >= UB: {}\n'.format(
593                    solve_data.LB, config.bound_tolerance, solve_data.UB))
594            solve_data.results.solver.termination_condition = tc.optimal
595            self.abort()
596            return
597
598        # check if the same integer combination is obtained.
599        solve_data.curr_int_sol = get_integer_solution(
600            solve_data.working_model, string_zero=True)
601
602        if solve_data.curr_int_sol in set(solve_data.integer_list):
603            config.logger.info('This integer combination has been explored. '
604                               'We will skip solving the Fixed-NLP subproblem.')
605            solve_data.solution_improved = False
606            if config.strategy == 'GOA':
607                if config.add_no_good_cuts:
608                    var_values = list(
609                        v.value for v in solve_data.working_model.MindtPy_utils.variable_list)
610                    self.add_lazy_no_good_cuts(
611                        var_values, solve_data, config, opt)
612                return
613            elif config.strategy == 'OA':
614                return
615        else:
616            solve_data.integer_list.append(solve_data.curr_int_sol)
617
618        # solve subproblem
619        # The constraint linearization happens in the handlers
620        fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config)
621
622        # add oa cuts
623        if fixed_nlp_result.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}:
624            self.handle_lazy_subproblem_optimal(
625                fixed_nlp, solve_data, config, opt)
626            if solve_data.LB + config.bound_tolerance >= solve_data.UB:
627                config.logger.info(
628                    'MindtPy exiting on bound convergence. '
629                    'LB: {} + (tol {}) >= UB: {}\n'.format(
630                        solve_data.LB, config.bound_tolerance, solve_data.UB))
631                solve_data.results.solver.termination_condition = tc.optimal
632                return
633        elif fixed_nlp_result.solver.termination_condition in {tc.infeasible, tc.noSolution}:
634            self.handle_lazy_subproblem_infeasible(
635                fixed_nlp, solve_data, config, opt)
636        else:
637            self.handle_lazy_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
638                                                          solve_data, config)
639
640
641# Gurobi
642
643
644def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config):
645    """
646        This is a GUROBI callback function defined for LP/NLP based B&B algorithm.
647        Parameters
648        ----------
649        cb_m: Pyomo model
650            the MIP main problem
651        cb_opt: SolverFactory
652            the gurobi_persistent solver
653        cb_where: int
654            an enum member of gurobipy.GRB.Callback
655        solve_data: MindtPy Data Container
656            data container that holds solve-instance data
657        config: ConfigBlock
658            contains the specific configurations for the algorithm
659    """
660    if cb_where == gurobipy.GRB.Callback.MIPSOL:
661        # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process
662        if solve_data.should_terminate:
663            cb_opt._solver_model.terminate()
664            return
665        cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list)
666        handle_lazy_main_feasible_solution_gurobi(
667            cb_m, cb_opt, solve_data, config)
668
669        if config.add_cuts_at_incumbent:
670            if config.strategy == 'OA':
671                add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt)
672
673        # # regularization is activated after the first feasible solution is found.
674        if config.add_regularization is not None and solve_data.best_solution_found is not None:
675            # the main problem might be unbounded, regularization is activated only when a valid bound is provided.
676            if not solve_data.bound_improved and not solve_data.solution_improved:
677                config.logger.info('the bound and the best found solution have neither been improved.'
678                                   'We will skip solving the regularization problem and the Fixed-NLP subproblem')
679                solve_data.solution_improved = False
680                return
681            if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf'))
682                    or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))):
683                main_mip, main_mip_results = solve_main(
684                    solve_data, config, regularization_problem=True)
685                handle_regularization_main_tc(
686                    main_mip, main_mip_results, solve_data, config)
687
688        if solve_data.LB + config.bound_tolerance >= solve_data.UB:
689            config.logger.info(
690                'MindtPy exiting on bound convergence. '
691                'LB: {} + (tol {}) >= UB: {}\n'.format(
692                    solve_data.LB, config.bound_tolerance, solve_data.UB))
693            solve_data.results.solver.termination_condition = tc.optimal
694            cb_opt._solver_model.terminate()
695            return
696
697        # # check if the same integer combination is obtained.
698        solve_data.curr_int_sol = get_integer_solution(
699            solve_data.working_model, string_zero=True)
700
701        if solve_data.curr_int_sol in set(solve_data.integer_list):
702            config.logger.info('This integer combination has been explored. '
703                               'We will skip solving the Fixed-NLP subproblem.')
704            solve_data.solution_improved = False
705            if config.strategy == 'GOA':
706                if config.add_no_good_cuts:
707                    var_values = list(
708                        v.value for v in solve_data.working_model.MindtPy_utils.variable_list)
709                    add_no_good_cuts(var_values, solve_data, config)
710                return
711            elif config.strategy == 'OA':
712                return
713        else:
714            solve_data.integer_list.append(solve_data.curr_int_sol)
715
716        # solve subproblem
717        # The constraint linearization happens in the handlers
718        fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config)
719
720        handle_nlp_subproblem_tc(
721            fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)
722
723
724def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config):
725    """ This function is called during the branch and bound of main mip, more exactly when a feasible solution is found and LazyCallback is activated.
726    Copy the result to working model and update upper or lower bound.
727    In LP-NLP, upper or lower bound are updated during solving the main problem
728
729    Parameters
730    ----------
731    cb_m: Pyomo model
732        the MIP main problem
733    cb_opt: SolverFactory
734        the gurobi_persistent solver
735    solve_data: MindtPy Data Container
736        data container that holds solve-instance data
737    config: ConfigBlock
738        contains the specific configurations for the algorithm
739    """
740    # proceed. Just need integer values
741    cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list)
742    # this value copy is useful since we need to fix subproblem based on the solution of the main problem
743    copy_var_list_values(cb_m.MindtPy_utils.variable_list,
744                         solve_data.working_model.MindtPy_utils.variable_list,
745                         config)
746    if solve_data.objective_sense == minimize:
747        solve_data.LB = max(
748            cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND), solve_data.LB)
749        solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[-1]
750        solve_data.LB_progress.append(solve_data.LB)
751    else:
752        solve_data.UB = min(
753            cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND), solve_data.UB)
754        solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[-1]
755        solve_data.UB_progress.append(solve_data.UB)
756    config.logger.info(
757        'MIP %s: OBJ (at current node): %s  Bound: %s  LB: %s  UB: %s  TIME: %s'
758        % (solve_data.mip_iter, cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJ), cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND),
759            solve_data.LB, solve_data.UB, round(get_main_elapsed_time(
760                solve_data.timing), 2)))
761