1 // Copyright 2020, 2021 PaGMO development team
2 //
3 // This file is part of the pygmo library.
4 //
5 // This Source Code Form is subject to the terms of the Mozilla
6 // Public License v. 2.0. If a copy of the MPL was not distributed
7 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
8
9 #include <string>
10
11 #include "docstrings.hpp"
12
13 namespace pygmo
14 {
15
population_docstring()16 std::string population_docstring()
17 {
18 return R"(The population class.
19
20 This class represents a population of individuals, i.e., potential candidate solutions to a given problem. In pygmo an
21 individual is determined:
22
23 * by a unique ID used to track him across generations and migrations,
24 * by a chromosome (a decision vector),
25 * by the fitness of the chromosome as evaluated by a :class:`~pygmo.problem` and thus including objectives,
26 equality constraints and inequality constraints if present.
27
28 A special mechanism is implemented to track the best individual that has ever been part of the population. Such an individual
29 is called *champion* and its decision vector and fitness vector are automatically kept updated. The *champion* is not necessarily
30 an individual currently in the population. The *champion* is only defined and accessible via the population interface if the
31 :class:`~pygmo.problem` currently contained in the :class:`~pygmo.population` is single objective.
32
33 See also the docs of the C++ class :cpp:class:`pagmo::population`.
34
35 )";
36 }
37
population_push_back_docstring()38 std::string population_push_back_docstring()
39 {
40 return R"(push_back(x, f = None)
41
42 Adds one decision vector (chromosome) to the population.
43
44 This method will append a new chromosome *x* to the population, creating a new unique identifier for the newly born individual
45 and, if *f* is not provided, evaluating its fitness. If *f* is provided, the fitness of the new individual will be set to *f*.
46 It is the user's responsibility to ensure that *f* actually corresponds to the fitness of *x*.
47
48 In case of exceptions, the population will not be altered.
49
50 Args:
51 x (array-like object): decision vector to be added to the population
52
53 Raises:
54 ValueError: if the dimensions of *x* or *f* (if provided) are incompatible with the population's problem
55 unspecified: any exception thrown by :func:`pygmo.problem.fitness()` or by failures at the intersection between C++ and
56 Python (e.g., type conversion errors, mismatched function signatures, etc.)
57
58 )";
59 }
60
population_random_decision_vector_docstring()61 std::string population_random_decision_vector_docstring()
62 {
63 return R"(random_decision_vector()
64
65 This method will create a random decision vector within the problem's bounds.
66
67 Returns:
68 :class:`numpy.ndarray`: a random decision vector within the problem's bounds
69
70 Raises:
71 unspecified: any exception thrown by :func:`pygmo.random_decision_vector()`
72
73 )";
74 }
75
population_best_idx_docstring()76 std::string population_best_idx_docstring()
77 {
78 return R"(best_idx(tol = self.problem.c_tol)
79
80 Index of the best individual.
81
82 If the problem is single-objective and unconstrained, the best is simply the individual with the smallest fitness. If the problem
83 is, instead, single objective, but with constraints, the best will be defined using the criteria specified in :func:`pygmo.sort_population_con()`.
84 If the problem is multi-objective one single best is not well defined. In this case the user can still obtain a strict ordering of the population
85 individuals by calling the :func:`pygmo.sort_population_mo()` function.
86
87 Args:
88 tol (:class:`float` or array-like object): scalar tolerance or vector of tolerances to be applied to each constraints. By default, the
89 :attr:`~pygmo.problem.c_tol` attribute from the population's problem is used.
90
91 Returns:
92 :class:`int`: the index of the best individual
93
94 Raises:
95 ValueError: if the problem is multiobjective and thus a best individual is not well defined, or if the population is empty
96 unspecified: any exception thrown by :cpp:func:`pagmo::sort_population_con()`
97
98 )";
99 }
100
population_worst_idx_docstring()101 std::string population_worst_idx_docstring()
102 {
103 return R"(worst_idx(tol = 0.)
104
105 Index of the worst individual.
106
107 If the problem is single-objective and unconstrained, the worst is simply the individual with the largest fitness. If the problem
108 is, instead, single objective, but with constraints, the worst will be defined using the criteria specified in :func:`pygmo.sort_population_con()`.
109 If the problem is multi-objective one single worst is not well defined. In this case the user can still obtain a strict ordering of the population
110 individuals by calling the :func:`pygmo.sort_population_mo()` function.
111
112 Args:
113 tol (:class:`float` or array-like object): scalar tolerance or vector of tolerances to be applied to each constraints
114
115 Returns:
116 :class:`int`: the index of the worst individual
117
118 Raises:
119 ValueError: if the problem is multiobjective and thus a worst individual is not well defined, or if the population is empty
120 unspecified: any exception thrown by :func:`pygmo.sort_population_con()`
121
122 )";
123 }
124
population_champion_x_docstring()125 std::string population_champion_x_docstring()
126 {
127 return R"(Champion's decision vector.
128
129 This read-only property contains an array of :class:`float` representing the decision vector of the population's champion.
130
131 .. note::
132
133 If the problem is stochastic the champion is the individual that had the lowest fitness for
134 some lucky seed, not on average across seeds. Re-evaluating its decision vector may then result in a different
135 fitness.
136
137 Returns:
138 1D NumPy float array: the champion's decision vector
139
140 Raises:
141 ValueError: if the current problem is not single objective
142 unspecified: any exception thrown by failures at the intersection between C++ and
143 Python (e.g., type conversion errors, mismatched function signatures, etc.)
144
145 )";
146 }
147
population_champion_f_docstring()148 std::string population_champion_f_docstring()
149 {
150 return R"(Champion's fitness vector.
151
152 This read-only property contains an array of :class:`float` representing the fitness vector of the population's champion.
153
154 .. note::
155
156 If the problem is stochastic, the champion is the individual that had the lowest fitness for
157 some lucky seed, not on average across seeds. Re-evaluating its decision vector may then result in a different
158 fitness.
159
160 Returns:
161 1D NumPy float array: the champion's fitness vector
162
163 Raises:
164 ValueError: if the current problem is not single objective
165 unspecified: any exception thrown by failures at the intersection between C++ and
166 Python (e.g., type conversion errors, mismatched function signatures, etc.)
167
168 )";
169 }
170
population_set_xf_docstring()171 std::string population_set_xf_docstring()
172 {
173 return R"(set_xf(i,x,f)
174
175 Sets the :math:`i`-th individual decision vector, and fitness.
176
177 Sets simultaneously the :math:`i`-th individual decision vector and fitness thus avoiding to trigger a fitness function evaluation.
178
179 .. note::
180
181 The user must make sure that the input fitness *f* makes sense as pygmo will only check its dimension.
182
183 Args:
184 i (:class:`int`): individual's index in the population
185 x (array-like object): a decision vector (chromosome)
186 f (array-like object): a fitness vector
187
188 Raises:
189 ValueError: if *i* is invalid, or if *x* or *f* have the wrong dimensions (i.e., their dimensions are
190 inconsistent with the problem's properties)
191 unspecified: any exception thrown by failures at the intersection between C++ and
192 Python (e.g., type conversion errors, mismatched function signatures, etc.)
193
194 )";
195 }
196
population_set_x_docstring()197 std::string population_set_x_docstring()
198 {
199 return R"(set_x(i,x)
200
201 Sets the :math:`i`-th individual decision vector.
202
203 Sets the chromosome of the :math:`i`-th individual to the value *x* and changes its fitness accordingly. The
204 individual's ID remains the same.
205
206 .. note::
207
208 A call to this method triggers one fitness function evaluation.
209
210 Args:
211 i (:class:`int`): individual's index in the population
212 x (array-like object): a decision vector (chromosome)
213
214 Raises:
215 ValueError: if *i* is invalid, or if *x* has the wrong dimensions (i.e., the dimension is
216 inconsistent with the problem's properties)
217 unspecified: any exception thrown by failures at the intersection between C++ and
218 Python (e.g., type conversion errors, mismatched function signatures, etc.)
219
220 )";
221 }
222
population_problem_docstring()223 std::string population_problem_docstring()
224 {
225 return R"(Population's problem.
226
227 This read-only property gives direct access to the :class:`~pygmo.problem` stored within the population.
228
229 Returns:
230 :class:`~pygmo.problem`: a reference to the internal problem
231
232 )";
233 }
234
population_get_f_docstring()235 std::string population_get_f_docstring()
236 {
237 return R"(get_f()
238
239 This method will return the fitness vectors of the individuals as a 2D NumPy array.
240
241 Each row of the returned array represents the fitness vector of the individual at the corresponding position in the
242 population.
243
244 Returns:
245 2D NumPy float array: a deep copy of the fitness vectors of the individuals
246
247 Raises:
248 unspecified: any exception thrown by failures at the intersection between C++ and
249 Python (e.g., type conversion errors, mismatched function signatures, etc.)
250
251 )";
252 }
253
population_get_x_docstring()254 std::string population_get_x_docstring()
255 {
256 return R"(get_x()
257
258 This method will return the chromosomes of the individuals as a 2D NumPy array.
259
260 Each row of the returned array represents the chromosome of the individual at the corresponding position in the
261 population.
262
263 Returns:
264 2D NumPy float array: a deep copy of the chromosomes of the individuals
265
266 Raises:
267 unspecified: any exception thrown by failures at the intersection between C++ and
268 Python (e.g., type conversion errors, mismatched function signatures, etc.)
269
270 )";
271 }
272
population_get_ID_docstring()273 std::string population_get_ID_docstring()
274 {
275 return R"(get_ID()
276
277 This method will return the IDs of the individuals as a 1D NumPy array.
278
279 Each element of the returned array represents the ID of the individual at the corresponding position in the
280 population.
281
282 Returns:
283 1D NumPy int array: a deep copy of the IDs of the individuals
284
285 Raises:
286 unspecified: any exception thrown by failures at the intersection between C++ and
287 Python (e.g., type conversion errors, mismatched function signatures, etc.)
288
289 )";
290 }
291
population_get_seed_docstring()292 std::string population_get_seed_docstring()
293 {
294 return R"(get_seed()
295
296 This method will return the random seed of the population.
297
298 Returns:
299 :class:`int`: the random seed of the population
300
301 )";
302 }
303
problem_docstring()304 std::string problem_docstring()
305 {
306 return R"(__init__(udp = null_problem())
307
308 Problem class.
309
310 This class represents a generic *mathematical programming* or *evolutionary optimization* problem in the form:
311
312 .. math::
313 \begin{array}{rl}
314 \mbox{find:} & \mathbf {lb} \le \mathbf x \le \mathbf{ub}\\
315 \mbox{to minimize: } & \mathbf f(\mathbf x, s) \in \mathbb R^{n_{obj}}\\
316 \mbox{subject to:} & \mathbf {c}_e(\mathbf x, s) = 0 \\
317 & \mathbf {c}_i(\mathbf x, s) \le 0
318 \end{array}
319
320 where :math:`\mathbf x \in \mathbb R^{n_{cx}} \times \mathbb Z^{n_{ix}}` is called *decision vector* or
321 *chromosome*, and is made of :math:`n_{cx}` real numbers and :math:`n_{ix}` integers (all represented as doubles). The
322 total problem dimension is then indicated with :math:`n_x = n_{cx} + n_{ix}`. :math:`\mathbf{lb}, \mathbf{ub} \in
323 \mathbb R^{n_{cx}} \times \mathbb Z^{n_{ix}}` are the *box-bounds*, :math:`\mathbf f: \mathbb R^{n_{cx}} \times
324 \mathbb Z^{n_{ix}} \rightarrow \mathbb R^{n_{obj}}` define the *objectives*, :math:`\mathbf c_e: \mathbb R^{n_{cx}}
325 \times \mathbb Z^{n_{ix}} \rightarrow \mathbb R^{n_{ec}}` are non linear *equality constraints*, and :math:`\mathbf
326 c_i: \mathbb R^{n_{cx}} \times \mathbb Z^{n_{ix}} \rightarrow \mathbb R^{n_{ic}}` are non linear *inequality
327 constraints*. Note that the objectives and constraints may also depend from an added value :math:`s` seeding the
328 values of any number of stochastic variables. This allows also for stochastic programming tasks to be represented by
329 this class. A tolerance is also considered for all constraints and set, by default, to zero. It can be modified
330 via the :attr:`~pygmo.problem.c_tol` attribute.
331
332 In order to define an optimizaztion problem in pygmo, the user must first define a class
333 whose methods describe the properties of the problem and allow to compute
334 the objective function, the gradient, the constraints, etc. In pygmo, we refer to such
335 a class as a **user-defined problem**, or UDP for short. Once defined and instantiated,
336 a UDP can then be used to construct an instance of this class, :class:`~pygmo.problem`, which
337 provides a generic interface to optimization problems.
338
339 Every UDP must implement at least the following two methods:
340
341 .. code-block::
342
343 def fitness(self, dv):
344 ...
345 def get_bounds(self):
346 ...
347
348 The ``fitness()`` method is expected to return the fitness of the input decision vector (concatenating
349 the objectives, the equality and the inequality constraints), while
350 ``get_bounds()`` is expected to return the box bounds of the problem,
351 :math:`(\mathbf{lb}, \mathbf{ub})`, which also implicitly define the dimension of the problem.
352 The ``fitness()`` and ``get_bounds()`` methods of the UDP are accessible from the corresponding
353 :func:`pygmo.problem.fitness()` and :func:`pygmo.problem.get_bounds()`
354 methods (see their documentation for information on how the two methods should be implemented
355 in the UDP and other details).
356
357 The two mandatory methods above allow to define a single objective, deterministic, derivative-free, unconstrained
358 optimization problem. In order to consider more complex cases, the UDP may implement one or more of the following
359 methods:
360
361 .. code-block::
362
363 def get_nobj(self):
364 ...
365 def get_nec(self):
366 ...
367 def get_nic(self):
368 ...
369 def get_nix(self):
370 ...
371 def batch_fitness(self, dvs):
372 ...
373 def has_batch_fitness(self):
374 ...
375 def has_gradient(self):
376 ...
377 def gradient(self, dv):
378 ...
379 def has_gradient_sparsity(self):
380 ...
381 def gradient_sparsity(self):
382 ...
383 def has_hessians(self):
384 ...
385 def hessians(self, dv):
386 ...
387 def has_hessians_sparsity(self):
388 ...
389 def hessians_sparsity(self):
390 ...
391 def has_set_seed(self):
392 ...
393 def set_seed(self, s):
394 ...
395 def get_name(self):
396 ...
397 def get_extra_info(self):
398 ...
399
400 See the documentation of the corresponding methods in this class for details on how the optional
401 methods in the UDP should be implemented and on how they are used by :class:`~pygmo.problem`.
402 Note that the exposed C++ problems can also be used as UDPs, even if they do not expose any of the
403 mandatory or optional methods listed above (see :ref:`here <problems>` for the
404 full list of UDPs already coded in pygmo).
405
406 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::problem`.
407
408 Args:
409 udp: a user-defined problem, either C++ or Python
410
411 Raises:
412 NotImplementedError: if *udp* does not implement the mandatory methods detailed above
413 ValueError: if the number of objectives of the UDP is zero, the number of objectives,
414 equality or inequality constraints is larger than an implementation-defined value,
415 the problem bounds are invalid (e.g., they contain NaNs, the dimensionality of the lower bounds is
416 different from the dimensionality of the upper bounds, etc. - note that infinite bounds are allowed),
417 or if the ``gradient_sparsity()`` and ``hessians_sparsity()`` methods of the UDP fail basic sanity checks
418 (e.g., they return vectors with repeated indices, they contain indices exceeding the problem's dimensions, etc.)
419 unspecified: any exception thrown by methods of the UDP invoked during construction,
420 the deep copy of the UDP, the constructor of the underlying C++ class,
421 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
422 signatures, etc.)
423
424 )";
425 }
426
problem_fitness_docstring()427 std::string problem_fitness_docstring()
428 {
429 return R"(fitness(dv)
430
431 Fitness.
432
433 This method will invoke the ``fitness()`` method of the UDP to compute the fitness of the
434 input decision vector *dv*. The return value of the ``fitness()`` method of the UDP is expected to have a
435 dimension of :math:`n_{f} = n_{obj} + n_{ec} + n_{ic}` and to contain the concatenated values of
436 :math:`\mathbf f, \mathbf c_e` and :math:`\mathbf c_i` (in this order).
437 Equality constraints are all assumed in the form :math:`c_{e_i}(\mathbf x) = 0` while inequalities are assumed in
438 the form :math:`c_{i_i}(\mathbf x) <= 0` so that negative values are associated to satisfied inequalities.
439
440 In addition to invoking the ``fitness()`` method of the UDP, this method will perform sanity checks on
441 *dv* and on the returned fitness vector. A successful call of this method will increase the internal fitness
442 evaluation counter (see :func:`~pygmo.problem.get_fevals()`).
443
444 The ``fitness()`` method of the UDP must be able to take as input the decision vector as a 1D NumPy array, and it must
445 return the fitness vector as an iterable Python object (e.g., 1D NumPy array, list, tuple, etc.).
446
447 Args:
448 dv (array-like object): the decision vector (chromosome) to be evaluated
449
450 Returns:
451 1D NumPy float array: the fitness of *dv*
452
453 Raises:
454 ValueError: if either the length of *dv* differs from the value returned by :func:`~pygmo.problem.get_nx()`, or
455 the length of the returned fitness vector differs from the value returned by :func:`~pygmo.problem.get_nf()`
456 unspecified: any exception thrown by the ``fitness()`` method of the UDP, or by failures at the intersection
457 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
458
459 )";
460 }
461
problem_get_bounds_docstring()462 std::string problem_get_bounds_docstring()
463 {
464 return R"(get_bounds()
465
466 Box-bounds.
467
468 This method will return the box-bounds :math:`(\mathbf{lb}, \mathbf{ub})` of the problem,
469 as returned by the ``get_bounds()`` method of the UDP. Infinities in the bounds are allowed.
470
471 The ``get_bounds()`` method of the UDP must return the box-bounds as a tuple of 2 elements,
472 the lower bounds vector and the upper bounds vector, which must be represented as iterable Python objects (e.g.,
473 1D NumPy arrays, lists, tuples, etc.). The box-bounds returned by the UDP are checked upon the construction
474 of a :class:`~pygmo.problem`.
475
476 Returns:
477 :class:`tuple`: a tuple of two 1D NumPy float arrays representing the lower and upper box-bounds of the problem
478
479 Raises:
480 unspecified: any exception thrown by the invoked method of the underlying C++ class, or failures at the
481 intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
482
483 )";
484 }
485
problem_get_lb_docstring()486 std::string problem_get_lb_docstring()
487 {
488 return R"(get_lb()
489
490 Lower box-bounds.
491
492 This method will return the lower box-bounds for this problem. See :func:`~pygmo.problem.get_bounds()`
493 for a detailed explanation of how the bounds are determined.
494
495 Returns:
496 1D NumPy float array: an array representing the lower box-bounds of this problem
497
498 Raises:
499 unspecified: any exception thrown by the invoked method of the underlying C++ class, or failures at the
500 intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
501
502 )";
503 }
504
problem_get_ub_docstring()505 std::string problem_get_ub_docstring()
506 {
507 return R"(get_ub()
508
509 Upper box-bounds.
510
511 This method will return the upper box-bounds for this problem. See :func:`~pygmo.problem.get_bounds()`
512 for a detailed explanation of how the bounds are determined.
513
514 Returns:
515 1D NumPy float array: an array representing the upper box-bounds of this problem
516
517 Raises:
518 unspecified: any exception thrown by the invoked method of the underlying C++ class, or failures at the
519 intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
520
521 )";
522 }
523
problem_batch_fitness_docstring()524 std::string problem_batch_fitness_docstring()
525 {
526 return R"(batch_fitness(dvs)
527
528 This method implements the evaluation of multiple decision vectors in batch mode
529 by invoking the ``batch_fitness()`` method of the UDP. The ``batch_fitness()``
530 method of the UDP accepts in input a batch of decision vectors, *dvs*, stored contiguously:
531 for a problem with dimension :math:`n`, the first decision vector in *dvs* occupies
532 the index range :math:`\left[0, n\right)`, the second decision vector occupies the range
533 :math:`\left[n, 2n\right)`, and so on. The return value is the batch of fitness vectors *fvs*
534 resulting from computing the fitness of the input decision vectors.
535 *fvs* is also stored contiguously: for a problem with fitness dimension :math:`f`, the first fitness
536 vector will occupy the index range :math:`\left[0, f\right)`, the second fitness vector
537 will occupy the range :math:`\left[f, 2f\right)`, and so on.
538
539 If the UDP provides a ``batch_fitness()`` method, this method will forward ``dvs``
540 to the ``batch_fitness()`` method of the UDP after sanity checks. The output of the ``batch_fitness()``
541 method of the UDP will also be checked before being returned. If the UDP does not provide a
542 ``batch_fitness()`` method, an error will be raised.
543
544 A successful call of this method will increase the internal fitness evaluation counter
545 (see :func:`~pygmo.problem.get_fevals()`).
546
547 The ``batch_fitness()`` method of the UDP must be able to take as input the decision vectors as a 1D NumPy array,
548 and it must return the fitness vectors as an iterable Python object (e.g., 1D NumPy array, list, tuple, etc.).
549
550 Args:
551 dvs (array-like object): the decision vectors (chromosomes) to be evaluated in batch mode
552
553 Returns:
554 1D NumPy float array: the fitness vectors of *dvs*
555
556 Raises:
557 ValueError: if *dvs* and/or the return value are not compatible with the problem's properties
558 unspecified: any exception thrown by the ``batch_fitness()`` method of the UDP, or by failures at the intersection
559 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
560
561 )";
562 }
563
problem_has_batch_fitness_docstring()564 std::string problem_has_batch_fitness_docstring()
565 {
566 return R"(has_batch_fitness()
567
568 Check if the ``batch_fitness()`` method is available in the UDP.
569
570 This method will return ``True`` if the ``batch_fitness()`` method is available in the UDP, ``False`` otherwise.
571
572 The availability of the ``batch_fitness()`` method is determined as follows:
573
574 * if the UDP does not provide a ``batch_fitness()`` method, then this method will always return ``False``;
575 * if the UDP provides a ``batch_fitness()`` method but it does not provide a ``has_batch_fitness()`` method,
576 then this method will always return ``True``;
577 * if the UDP provides both a ``batch_fitness()`` and a ``has_batch_fitness()`` method, then this method will return
578 the output of the ``has_batch_fitness()`` method of the UDP.
579
580 The optional ``has_batch_fitness()`` method of the UDP must return a ``bool``. For information on how to
581 implement the ``batch_fitness()`` method of the UDP, see :func:`~pygmo.problem.batch_fitness()`.
582
583 Returns:
584 ``bool``: a flag signalling the availability of the ``batch_fitness()`` method in the UDP
585
586 )";
587 }
588
problem_get_nobj_docstring()589 std::string problem_get_nobj_docstring()
590 {
591 return R"(get_nobj()
592
593 Number of objectives.
594
595 This method will return :math:`n_{obj}`, the number of objectives of the problem.
596
597 The optional ``get_nobj()`` method of the UDP must return the number of objectives as an :class:`int`.
598 If the UDP does not implement the ``get_nobj()`` method, a single-objective optimizaztion problem
599 will be assumed. The number of objectives returned by the UDP is checked upon the construction
600 of a :class:`~pygmo.problem`.
601
602 Returns:
603 :class:`int`: the number of objectives of the problem
604
605 )";
606 }
607
problem_get_nx_docstring()608 std::string problem_get_nx_docstring()
609 {
610 return R"(get_nx()
611
612 Dimension of the problem.
613
614 This method will return :math:`n_{x}`, the dimension of the problem as established by the length of
615 the bounds returned by :func:`~pygmo.problem.get_bounds()`.
616
617 Returns:
618 :class:`int`: the dimension of the problem
619
620 )";
621 }
622
problem_get_nix_docstring()623 std::string problem_get_nix_docstring()
624 {
625 return R"(get_nix()
626
627 Integer dimension of the problem.
628
629 This method will return :math:`n_{ix}`, the integer dimension of the problem.
630
631 The optional ``get_nix()`` method of the UDP must return the problem's integer dimension as an :class:`int`.
632 If the UDP does not implement the ``get_nix()`` method, a zero integer dimension will be assumed.
633 The integer dimension returned by the UDP is checked upon the construction
634 of a :class:`~pygmo.problem`.
635
636 Returns:
637 :class:`int`: the integer dimension of the problem
638
639 )";
640 }
641
problem_get_ncx_docstring()642 std::string problem_get_ncx_docstring()
643 {
644 return R"(get_ncx()
645
646 Continuous dimension of the problem.
647
648 This method will return :math:`n_{cx}`, the continuous dimension of the problem.
649
650 Returns:
651 :class:`int`: the continuous dimension of the problem
652
653 )";
654 }
655
problem_get_nf_docstring()656 std::string problem_get_nf_docstring()
657 {
658 return R"(get_nf()
659
660 Dimension of the fitness.
661
662 This method will return :math:`n_{f}`, the dimension of the fitness, which is the sum of
663 :math:`n_{obj}`, :math:`n_{ec}` and :math:`n_{ic}`.
664
665 Returns:
666 :class:`int`: the dimension of the fitness
667
668 )";
669 }
670
problem_get_nec_docstring()671 std::string problem_get_nec_docstring()
672 {
673 return R"(get_nec()
674
675 Number of equality constraints.
676
677 This method will return :math:`n_{ec}`, the number of equality constraints of the problem.
678
679 The optional ``get_nec()`` method of the UDP must return the number of equality constraints as an :class:`int`.
680 If the UDP does not implement the ``get_nec()`` method, zero equality constraints will be assumed.
681 The number of equality constraints returned by the UDP is checked upon the construction
682 of a :class:`~pygmo.problem`.
683
684 Returns:
685 :class:`int`: the number of equality constraints of the problem
686
687 )";
688 }
689
problem_get_nic_docstring()690 std::string problem_get_nic_docstring()
691 {
692 return R"(get_nic()
693
694 Number of inequality constraints.
695
696 This method will return :math:`n_{ic}`, the number of inequality constraints of the problem.
697
698 The optional ``get_nic()`` method of the UDP must return the number of inequality constraints as an :class:`int`.
699 If the UDP does not implement the ``get_nic()`` method, zero inequality constraints will be assumed.
700 The number of inequality constraints returned by the UDP is checked upon the construction
701 of a :class:`~pygmo.problem`.
702
703 Returns:
704 :class:`int`: the number of inequality constraints of the problem
705
706 )";
707 }
708
problem_get_nc_docstring()709 std::string problem_get_nc_docstring()
710 {
711 return R"(get_nc()
712
713 Total number of constraints.
714
715 This method will return the sum of the output of :func:`~pygmo.problem.get_nic()` and
716 :func:`~pygmo.problem.get_nec()` (i.e., the total number of constraints).
717
718 Returns:
719 :class:`int`: the total number of constraints of the problem
720
721 )";
722 }
723
problem_c_tol_docstring()724 std::string problem_c_tol_docstring()
725 {
726 return R"(Constraints tolerance.
727
728 This property contains an array of :class:`float` that are used when checking for constraint feasibility.
729 The dimension of the array is :math:`n_{ec} + n_{ic}` (i.e., the total number of constraints), and
730 the array is zero-filled on problem construction.
731
732 This property can also be set via a scalar, instead of an array. In such case, all the tolerances
733 will be set to the provided scalar value.
734
735 Returns:
736 1D NumPy float array: the constraints' tolerances
737
738 Raises:
739 ValueError: if, when setting this property, the size of the input array differs from the number
740 of constraints of the problem or if any element of the array is negative or NaN
741 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
742 type conversion errors, mismatched function signatures, etc.)
743
744 Examples:
745 >>> from pygmo import problem, hock_schittkowsky_71 as hs71
746 >>> prob = problem(hs71())
747 >>> prob.c_tol
748 array([0., 0.])
749 >>> prob.c_tol = [1, 2]
750 >>> prob.c_tol
751 array([1., 2.])
752 >>> prob.c_tol = .5
753 >>> prob.c_tol
754 array([0.5, 0.5])
755
756 )";
757 }
758
problem_get_fevals_docstring()759 std::string problem_get_fevals_docstring()
760 {
761 return R"(get_fevals()
762
763 Number of fitness evaluations.
764
765 Each time a call to :func:`~pygmo.problem.fitness()` successfully completes, an internal counter
766 is increased by one. The counter is initialised to zero upon problem construction and it is never
767 reset. Copy operations copy the counter as well.
768
769 Returns:
770 :class:`int` : the number of times :func:`~pygmo.problem.fitness()` was successfully called
771
772 )";
773 }
774
problem_increment_fevals_docstring()775 std::string problem_increment_fevals_docstring()
776 {
777 return R"(increment_fevals(n)
778
779 Increment the number of fitness evaluations.
780
781 .. versionadded:: 2.13
782
783 This method will increase the internal counter of fitness evaluations by *n*.
784
785 Args:
786 n (:class:`int`): the amount by which the internal counter of fitness evaluations will be increased
787
788 Raises:
789 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
790 type conversion errors, mismatched function signatures, etc.)
791
792 )";
793 }
794
problem_get_gevals_docstring()795 std::string problem_get_gevals_docstring()
796 {
797 return R"(get_gevals()
798
799 Number of gradient evaluations.
800
801 Each time a call to :func:`~pygmo.problem.gradient()` successfully completes, an internal counter
802 is increased by one. The counter is initialised to zero upon problem construction and it is never
803 reset. Copy operations copy the counter as well.
804
805 Returns:
806 :class:`int` : the number of times :func:`~pygmo.problem.gradient()` was successfully called
807
808 )";
809 }
810
problem_get_hevals_docstring()811 std::string problem_get_hevals_docstring()
812 {
813 return R"(get_hevals()
814
815 Number of hessians evaluations.
816
817 Each time a call to :func:`~pygmo.problem.hessians()` successfully completes, an internal counter
818 is increased by one. The counter is initialised to zero upon problem construction and it is never
819 reset. Copy operations copy the counter as well.
820
821 Returns:
822 :class:`int` : the number of times :func:`~pygmo.problem.hessians()` was successfully called
823
824 )";
825 }
826
problem_has_gradient_docstring()827 std::string problem_has_gradient_docstring()
828 {
829 return R"(has_gradient()
830
831 Check if the gradient is available in the UDP.
832
833 This method will return ``True`` if the gradient is available in the UDP, ``False`` otherwise.
834
835 The availability of the gradient is determined as follows:
836
837 * if the UDP does not provide a ``gradient()`` method, then this method will always return ``False``;
838 * if the UDP provides a ``gradient()`` method but it does not provide a ``has_gradient()`` method,
839 then this method will always return ``True``;
840 * if the UDP provides both a ``gradient()`` and a ``has_gradient()`` method, then this method will return
841 the output of the ``has_gradient()`` method of the UDP.
842
843 The optional ``has_gradient()`` method of the UDP must return a ``bool``. For information on how to
844 implement the ``gradient()`` method of the UDP, see :func:`~pygmo.problem.gradient()`.
845
846 Returns:
847 ``bool``: a flag signalling the availability of the gradient in the UDP
848
849 )";
850 }
851
problem_gradient_docstring()852 std::string problem_gradient_docstring()
853 {
854 return R"(gradient(dv)
855
856 Gradient.
857
858 This method will compute the gradient of the input decision vector *dv* by invoking
859 the ``gradient()`` method of the UDP. The ``gradient()`` method of the UDP must return
860 a sparse representation of the gradient: the :math:`k`-th term of the gradient vector
861 is expected to contain :math:`\frac{\partial f_i}{\partial x_j}`, where the pair :math:`(i,j)`
862 is the :math:`k`-th element of the sparsity pattern (collection of index pairs), as returned by
863 :func:`~pygmo.problem.gradient_sparsity()`.
864
865 If the UDP provides a ``gradient()`` method, this method will forward *dv* to the ``gradient()``
866 method of the UDP after sanity checks. The output of the ``gradient()`` method of the UDP will
867 also be checked before being returned. If the UDP does not provide a ``gradient()`` method, an
868 error will be raised. A successful call of this method will increase the internal gradient
869 evaluation counter (see :func:`~pygmo.problem.get_gevals()`).
870
871 The ``gradient()`` method of the UDP must be able to take as input the decision vector as a 1D NumPy
872 array, and it must return the gradient vector as an iterable Python object (e.g., 1D NumPy array,
873 list, tuple, etc.).
874
875 Args:
876 dv (array-like object): the decision vector whose gradient will be computed
877
878 Returns:
879 1D NumPy float array: the gradient of *dv*
880
881 Raises:
882 ValueError: if either the length of *dv* differs from the value returned by :func:`~pygmo.problem.get_nx()`, or
883 the returned gradient vector does not have the same size as the vector returned by
884 :func:`~pygmo.problem.gradient_sparsity()`
885 NotImplementedError: if the UDP does not provide a ``gradient()`` method
886 unspecified: any exception thrown by the ``gradient()`` method of the UDP, or by failures at the intersection
887 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
888
889 )";
890 }
891
problem_has_gradient_sparsity_docstring()892 std::string problem_has_gradient_sparsity_docstring()
893 {
894 return R"(has_gradient_sparsity()
895
896 Check if the gradient sparsity is available in the UDP.
897
898 This method will return ``True`` if the gradient sparsity is available in the UDP, ``False`` otherwise.
899
900 The availability of the gradient sparsity is determined as follows:
901
902 * if the UDP does not provide a ``gradient_sparsity()`` method, then this method will always return ``False``;
903 * if the UDP provides a ``gradient_sparsity()`` method but it does not provide a ``has_gradient_sparsity()``
904 method, then this method will always return ``True``;
905 * if the UDP provides both a ``gradient_sparsity()`` method and a ``has_gradient_sparsity()`` method,
906 then this method will return the output of the ``has_gradient_sparsity()`` method of the UDP.
907
908 The optional ``has_gradient_sparsity()`` method of the UDP must return a ``bool``. For information on how to
909 implement the ``gradient_sparsity()`` method of the UDP, see :func:`~pygmo.problem.gradient_sparsity()`.
910
911 .. note::
912
913 Regardless of what this method returns, the :func:`~pygmo.problem.gradient_sparsity()` method will always
914 return a sparsity pattern: if the UDP does not provide the gradient sparsity, pygmo will assume that the sparsity
915 pattern of the gradient is dense. See :func:`~pygmo.problem.gradient_sparsity()` for more details.
916
917 Returns:
918 ``bool``: a flag signalling the availability of the gradient sparsity in the UDP
919
920 )";
921 }
922
problem_gradient_sparsity_docstring()923 std::string problem_gradient_sparsity_docstring()
924 {
925 return R"(gradient_sparsity()
926
927 Gradient sparsity pattern.
928
929 This method will return the gradient sparsity pattern of the problem. The gradient sparsity pattern is a lexicographically sorted
930 collection of the indices :math:`(i,j)` of the non-zero elements of :math:`g_{ij} = \frac{\partial f_i}{\partial x_j}`.
931
932 If :func:`~pygmo.problem.has_gradient_sparsity()` returns ``True``, then the ``gradient_sparsity()`` method of the
933 UDP will be invoked, and its result returned (after sanity checks). Otherwise, a a dense pattern is assumed and the
934 returned vector will be :math:`((0,0),(0,1), ... (0,n_x-1), ...(n_f-1,n_x-1))`.
935
936 The ``gradient_sparsity()`` method of the UDP must return either a 2D NumPy array of integers, or an iterable
937 Python object of any kind. Specifically:
938
939 * if the returned value is a NumPy array, its shape must be :math:`(n,2)` (with :math:`n \geq 0`),
940 * if the returned value is an iterable Python object, then its elements must in turn be iterable Python objects
941 containing each exactly 2 elements representing the indices :math:`(i,j)`.
942
943 Returns:
944 2D Numpy int array: the gradient sparsity pattern
945
946 Raises:
947 ValueError: if the NumPy array returned by the UDP does not satisfy the requirements described above (e.g., invalid
948 shape, dimensions, etc.),
949 at least one element of the returned iterable Python object does not consist of a collection of exactly
950 2 elements, or the sparsity pattern returned by the UDP is invalid
951 (specifically, if it is not strictly sorted lexicographically,
952 or if the indices in the pattern are incompatible with the properties of the problem, or if the size of the
953 returned pattern is different from the size recorded upon construction)
954 OverflowError: if the NumPy array returned by the UDP contains integer values which are negative or outside an
955 implementation-defined range
956 unspecified: any exception thrown by the underlying C++ function,
957 the ``PyArray_FROM_OTF()`` function from the NumPy C API, or
958 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
959 signatures, etc.)
960
961 )";
962 }
963
problem_has_hessians_docstring()964 std::string problem_has_hessians_docstring()
965 {
966 return R"(has_hessians()
967
968 Check if the hessians are available in the UDP.
969
970 This method will return ``True`` if the hessians are available in the UDP, ``False`` otherwise.
971
972 The availability of the hessians is determined as follows:
973
974 * if the UDP does not provide a ``hessians()`` method, then this method will always return ``False``;
975 * if the UDP provides a ``hessians()`` method but it does not provide a ``has_hessians()`` method,
976 then this method will always return ``True``;
977 * if the UDP provides both a ``hessians()`` and a ``has_hessians()`` method, then this method will return
978 the output of the ``has_hessians()`` method of the UDP.
979
980 The optional ``has_hessians()`` method of the UDP must return a ``bool``. For information on how to
981 implement the ``hessians()`` method of the UDP, see :func:`~pygmo.problem.hessians()`.
982
983 Returns:
984 ``bool``: a flag signalling the availability of the hessians in the UDP
985
986 )";
987 }
988
problem_hessians_docstring()989 std::string problem_hessians_docstring()
990 {
991 return R"(hessians(dv)
992
993 Hessians.
994
995 This method will compute the hessians of the input decision vector *dv* by invoking
996 the ``hessians()`` method of the UDP. The ``hessians()`` method of the UDP must return
997 a sparse representation of the hessians: the element :math:`l` of the returned vector contains
998 :math:`h^l_{ij} = \frac{\partial f^2_l}{\partial x_i\partial x_j}` in the order specified by the
999 :math:`l`-th element of the hessians sparsity pattern (a vector of index pairs :math:`(i,j)`)
1000 as returned by :func:`~pygmo.problem.hessians_sparsity()`. Since
1001 the hessians are symmetric, their sparse representation contains only lower triangular elements.
1002
1003 If the UDP provides a ``hessians()`` method, this method will forward *dv* to the ``hessians()``
1004 method of the UDP after sanity checks. The output of the ``hessians()`` method of the UDP will
1005 also be checked before being returned. If the UDP does not provide a ``hessians()`` method, an
1006 error will be raised. A successful call of this method will increase the internal hessians
1007 evaluation counter (see :func:`~pygmo.problem.get_hevals()`).
1008
1009 The ``hessians()`` method of the UDP must be able to take as input the decision vector as a 1D NumPy
1010 array, and it must return the hessians vector as an iterable Python object (e.g., list, tuple, etc.).
1011
1012 Args:
1013 dv (array-like object): the decision vector whose hessians will be computed
1014
1015 Returns:
1016 :class:`list` of 1D NumPy float array: the hessians of *dv*
1017
1018 Raises:
1019 ValueError: if the length of *dv* differs from the value returned by :func:`~pygmo.problem.get_nx()`, or
1020 the length of returned hessians does not match the corresponding hessians sparsity pattern dimensions, or
1021 the size of the return value is not equal to the fitness dimension
1022 NotImplementedError: if the UDP does not provide a ``hessians()`` method
1023 unspecified: any exception thrown by the ``hessians()`` method of the UDP, or by failures at the intersection
1024 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
1025
1026 )";
1027 }
1028
problem_has_hessians_sparsity_docstring()1029 std::string problem_has_hessians_sparsity_docstring()
1030 {
1031 return R"(has_hessians_sparsity()
1032
1033 Check if the hessians sparsity is available in the UDP.
1034
1035 This method will return ``True`` if the hessians sparsity is available in the UDP, ``False`` otherwise.
1036
1037 The availability of the hessians sparsity is determined as follows:
1038
1039 * if the UDP does not provide a ``hessians_sparsity()`` method, then this method will always return ``False``;
1040 * if the UDP provides a ``hessians_sparsity()`` method but it does not provide a ``has_hessians_sparsity()``
1041 method, then this method will always return ``True``;
1042 * if the UDP provides both a ``hessians_sparsity()`` method and a ``has_hessians_sparsity()`` method,
1043 then this method will return the output of the ``has_hessians_sparsity()`` method of the UDP.
1044
1045 The optional ``has_hessians_sparsity()`` method of the UDP must return a ``bool``. For information on how to
1046 implement the ``hessians_sparsity()`` method of the UDP, see :func:`~pygmo.problem.hessians_sparsity()`.
1047
1048 .. note::
1049
1050 Regardless of what this method returns, the :func:`~pygmo.problem.hessians_sparsity()` method will always
1051 return a sparsity pattern: if the UDP does not provide the hessians sparsity, pygmo will assume that the sparsity
1052 pattern of the hessians is dense. See :func:`~pygmo.problem.hessians_sparsity()` for more details.
1053
1054 Returns:
1055 ``bool``: a flag signalling the availability of the hessians sparsity in the UDP
1056
1057 )";
1058 }
1059
problem_hessians_sparsity_docstring()1060 std::string problem_hessians_sparsity_docstring()
1061 {
1062 return R"(hessians_sparsity()
1063
1064 Hessians sparsity pattern.
1065
1066 This method will return the hessians sparsity pattern of the problem. Each component :math:`l` of the hessians
1067 sparsity pattern is a lexicographically sorted collection of the indices :math:`(i,j)` of the non-zero elements of
1068 :math:`h^l_{ij} = \frac{\partial f^l}{\partial x_i\partial x_j}`. Since the Hessian matrix is symmetric, only
1069 lower triangular elements are allowed.
1070
1071 If :func:`~pygmo.problem.has_hessians_sparsity()` returns ``True``, then the ``hessians_sparsity()`` method of the
1072 UDP will be invoked, and its result returned (after sanity checks). Otherwise, a dense pattern is assumed and
1073 :math:`n_f` sparsity patterns containing :math:`((0,0),(1,0), (1,1), (2,0) ... (n_x-1,n_x-1))` will be returned.
1074
1075 The ``hessians_sparsity()`` method of the UDP must return an iterable Python object of any kind. Each element of the
1076 returned object will then be interpreted as a sparsity pattern in the same way as described in
1077 :func:`~pygmo.problem.gradient_sparsity()`. Specifically:
1078
1079 * if the element is a NumPy array, its shape must be :math:`(n,2)` (with :math:`n \geq 0`),
1080 * if the element is itself an iterable Python object, then its elements must in turn be iterable Python objects
1081 containing each exactly 2 elements representing the indices :math:`(i,j)`.
1082
1083 Returns:
1084 :class:`list` of 2D Numpy int array: the hessians sparsity patterns
1085
1086 Raises:
1087 ValueError: if the NumPy arrays returned by the UDP do not satisfy the requirements described above (e.g., invalid
1088 shape, dimensions, etc.),
1089 at least one element of a returned iterable Python object does not consist of a collection of exactly
1090 2 elements, or if a sparsity pattern returned by the UDP is invalid (specifically, if it is not strictly sorted lexicographically,
1091 if the indices in the pattern are incompatible with the properties of the problem or if the size of the pattern
1092 differs from the size recorded upon construction)
1093 OverflowError: if the NumPy arrays returned by the UDP contain integer values which are negative or outside an
1094 implementation-defined range
1095 unspecified: any exception thrown by the underlying C++ function,
1096 the ``PyArray_FROM_OTF()`` function from the NumPy C API, or
1097 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
1098 signatures, etc.)
1099
1100 )";
1101 }
1102
problem_set_seed_docstring()1103 std::string problem_set_seed_docstring()
1104 {
1105 return R"(set_seed(seed)
1106
1107 Set the seed for the stochastic variables.
1108
1109 This method will set the seed to be used in the fitness function to instantiate
1110 all stochastic variables. If the UDP provides a ``set_seed()`` method, then
1111 its ``set_seed()`` method will be invoked. Otherwise, an error will be raised.
1112 The *seed* parameter must be non-negative.
1113
1114 The ``set_seed()`` method of the UDP must be able to take an :class:`int` as input parameter.
1115
1116 Args:
1117 seed (:class:`int`): the desired seed value
1118
1119 Raises:
1120 NotImplementedError: if the UDP does not provide a ``set_seed()`` method
1121 OverflowError: if *seed* is negative
1122 unspecified: any exception raised by the ``set_seed()`` method of the UDP or failures at the intersection
1123 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
1124
1125 )";
1126 }
1127
problem_has_set_seed_docstring()1128 std::string problem_has_set_seed_docstring()
1129 {
1130 return R"(has_set_seed()
1131
1132 Check if the ``set_seed()`` method is available in the UDP.
1133
1134 This method will return ``True`` if the ``set_seed()`` method is available in the UDP, ``False`` otherwise.
1135
1136 The availability of the ``set_seed()`` method is determined as follows:
1137
1138 * if the UDP does not provide a ``set_seed()`` method, then this method will always return ``False``;
1139 * if the UDP provides a ``set_seed()`` method but it does not provide a ``has_set_seed()`` method,
1140 then this method will always return ``True``;
1141 * if the UDP provides both a ``set_seed()`` and a ``has_set_seed()`` method, then this method will return
1142 the output of the ``has_set_seed()`` method of the UDP.
1143
1144 The optional ``has_set_seed()`` method of the UDP must return a ``bool``. For information on how to
1145 implement the ``set_seed()`` method of the UDP, see :func:`~pygmo.problem.set_seed()`.
1146
1147 Returns:
1148 ``bool``: a flag signalling the availability of the ``set_seed()`` method in the UDP
1149
1150 )";
1151 }
1152
problem_feasibility_f_docstring()1153 std::string problem_feasibility_f_docstring()
1154 {
1155 return R"(feasibility_f(f)
1156
1157 This method will check the feasibility of a fitness vector *f* against the tolerances returned by
1158 :attr:`~pygmo.problem.c_tol`.
1159
1160 Args:
1161 f (array-like object): a fitness vector
1162
1163 Returns:
1164 ``bool``: ``True`` if the fitness vector is feasible, ``False`` otherwise
1165
1166 Raises:
1167 ValueError: if the size of *f* is not the same as the output of
1168 :func:`~pygmo.problem.get_nf()`
1169
1170 )";
1171 }
1172
problem_feasibility_x_docstring()1173 std::string problem_feasibility_x_docstring()
1174 {
1175 return R"(feasibility_x(x)
1176
1177 This method will check the feasibility of the fitness corresponding to a decision vector *x* against
1178 the tolerances returned by :attr:`~pygmo.problem.c_tol`.
1179
1180 .. note:
1181
1182 This will cause one fitness evaluation.
1183
1184 Args:
1185 dv (array-like object): a decision vector
1186
1187 Returns:
1188 ``bool``: ``True`` if *x* results in a feasible fitness, ``False`` otherwise
1189
1190 Raises:
1191 unspecified: any exception thrown by :func:`~pygmo.problem.feasibility_f()` or
1192 :func:`~pygmo.problem.fitness()`
1193
1194 )";
1195 }
1196
problem_get_name_docstring()1197 std::string problem_get_name_docstring()
1198 {
1199 return R"(get_name()
1200
1201 Problem's name.
1202
1203 If the UDP provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
1204 Otherwise, an implementation-defined name based on the type of the UDP will be returned.
1205
1206 Returns:
1207 :class:`str`: the problem's name
1208
1209 )";
1210 }
1211
problem_get_extra_info_docstring()1212 std::string problem_get_extra_info_docstring()
1213 {
1214 return R"(get_extra_info()
1215
1216 Problem's extra info.
1217
1218 If the UDP provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
1219 method. Otherwise, an empty string will be returned.
1220
1221 Returns:
1222 :class:`str`: extra info about the UDP
1223
1224 Raises:
1225 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDP
1226
1227 )";
1228 }
1229
problem_get_thread_safety_docstring()1230 std::string problem_get_thread_safety_docstring()
1231 {
1232 return R"(get_thread_safety()
1233
1234 Problem's thread safety level.
1235
1236 This method will return a value of the enum :class:`pygmo.thread_safety` which indicates the thread safety level
1237 of the UDP. Unlike in C++, in Python it is not possible to re-implement this method in the UDP. That is, for C++
1238 UDPs, the returned value will be the value returned by the ``get_thread_safety()`` method of the UDP. For Python
1239 UDPs, the returned value will be unconditionally ``none``.
1240
1241 Returns:
1242 a value of :class:`pygmo.thread_safety`: the thread safety level of the UDP
1243
1244 )";
1245 }
1246
problem_get_best_docstring(const std::string & name)1247 std::string problem_get_best_docstring(const std::string &name)
1248 {
1249 return R"(best_known()
1250
1251 The best known solution for the )"
1252 + name + R"( problem.
1253
1254 Returns:
1255 1D NumPy float array: the best known solution for the )"
1256 + name + R"( problem
1257
1258 )";
1259 }
1260
translate_docstring()1261 std::string translate_docstring()
1262 {
1263 return R"(The translate meta-problem.
1264
1265 This meta-problem translates the whole search space of an input :class:`pygmo.problem` or
1266 user-defined problem (UDP) by a fixed translation vector. :class:`~pygmo.translate` objects
1267 are user-defined problems that can be used in the construction of a :class:`pygmo.problem`.
1268 )";
1269 }
1270
translate_translation_docstring()1271 std::string translate_translation_docstring()
1272 {
1273 return R"(Translation vector.
1274
1275 This read-only property contains an array of :class:`float` representing the translation vector used in the
1276 construction of this problem.
1277
1278 Returns:
1279 1D NumPy float array: the translation vector
1280
1281 )";
1282 }
1283
algorithm_docstring()1284 std::string algorithm_docstring()
1285 {
1286 return R"(__init__(uda = null_algorithm())
1287
1288 Algorithm class.
1289
1290 This class represents an optimization algorithm. An algorithm can be
1291 stochastic, deterministic, population based, derivative-free, using hessians,
1292 using gradients, a meta-heuristic, evolutionary, etc.. Via this class pygmo offers
1293 a common interface to all types of algorithms that can be applied to find solution
1294 to a generic mathematical programming problem as represented by the
1295 :class:`~pygmo.problem` class.
1296
1297 In order to define an optimizaztion algorithm in pygmo, the user must first define a class
1298 whose methods describe the properties of the algorithm and implement its logic.
1299 In pygmo, we refer to such a class as a **user-defined algorithm**, or UDA for short. Once
1300 defined and instantiated, a UDA can then be used to construct an instance of this class,
1301 :class:`~pygmo.algorithm`, which provides a generic interface to optimization algorithms.
1302
1303 Every UDA must implement at least the following method:
1304
1305 .. code-block::
1306
1307 def evolve(self, pop):
1308 ...
1309
1310 The ``evolve()`` method takes as input a :class:`~pygmo.population`, and it is expected to return
1311 a new population generated by the *evolution* (or *optimisation*) of the original population.
1312
1313 Additional optional methods can be implemented in a UDA:
1314
1315 .. code-block::
1316
1317 def has_set_seed(self):
1318 ...
1319 def set_seed(self, s):
1320 ...
1321 def has_set_verbosity(self):
1322 ...
1323 def set_verbosity(self, l):
1324 ...
1325 def get_name(self):
1326 ...
1327 def get_extra_info(self):
1328 ...
1329
1330 See the documentation of the corresponding methods in this class for details on how the optional
1331 methods in the UDA should be implemented and on how they are used by :class:`~pygmo.algorithm`.
1332 Note that the exposed C++ algorithms can also be used as UDAs, even if they do not expose any of the
1333 mandatory or optional methods listed above (see :ref:`here <available_algorithms>` for the
1334 full list of UDAs already coded in pygmo).
1335
1336 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::algorithm`.
1337
1338 Args:
1339 uda: a user-defined algorithm, either C++ or Python
1340
1341 Raises:
1342 NotImplementedError: if *uda* does not implement the mandatory method detailed above
1343 unspecified: any exception thrown by methods of the UDA invoked during construction,
1344 the deep copy of the UDA, the constructor of the underlying C++ class, or
1345 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
1346 signatures, etc.)
1347
1348 )";
1349 }
1350
algorithm_evolve_docstring()1351 std::string algorithm_evolve_docstring()
1352 {
1353 return R"(evolve(pop)
1354
1355 This method will invoke the ``evolve()`` method of the UDA. This is where the core of the optimization
1356 (*evolution*) is made.
1357
1358 Args:
1359 pop (:class:`~pygmo.population`): starting population
1360
1361 Returns:
1362 :class:`~pygmo.population`: evolved population
1363
1364 Raises:
1365 unspecified: any exception thrown by the ``evolve()`` method of the UDA or by failures at the
1366 intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
1367
1368 )";
1369 }
1370
algorithm_set_seed_docstring()1371 std::string algorithm_set_seed_docstring()
1372 {
1373 return R"(set_seed(seed)
1374
1375 Set the seed for the stochastic evolution.
1376
1377 This method will set the seed to be used in the ``evolve()`` method of the UDA for all stochastic variables. If the UDA
1378 provides a ``set_seed()`` method, then its ``set_seed()`` method will be invoked. Otherwise, an error will be
1379 raised. The *seed* parameter must be non-negative.
1380
1381 The ``set_seed()`` method of the UDA must be able to take an :class:`int` as input parameter.
1382
1383 Args:
1384 seed (:class:`int`): the random seed
1385
1386 Raises:
1387 NotImplementedError: if the UDA does not provide a ``set_seed()`` method
1388 unspecified: any exception raised by the ``set_seed()`` method of the UDA or failures at the intersection
1389 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
1390
1391 )";
1392 }
1393
algorithm_has_set_seed_docstring()1394 std::string algorithm_has_set_seed_docstring()
1395 {
1396 return R"(has_set_seed()
1397
1398 Check if the ``set_seed()`` method is available in the UDA.
1399
1400 This method will return ``True`` if the ``set_seed()`` method is available in the UDA, ``False`` otherwise.
1401
1402 The availability of the ``set_seed()`` method is determined as follows:
1403
1404 * if the UDA does not provide a ``set_seed()`` method, then this method will always return ``False``;
1405 * if the UDA provides a ``set_seed()`` method but it does not provide a ``has_set_seed()`` method,
1406 then this method will always return ``True``;
1407 * if the UDA provides both a ``set_seed()`` and a ``has_set_seed()`` method, then this method will return
1408 the output of the ``has_set_seed()`` method of the UDA.
1409
1410 The optional ``has_set_seed()`` method of the UDA must return a ``bool``. For information on how to
1411 implement the ``set_seed()`` method of the UDA, see :func:`~pygmo.algorithm.set_seed()`.
1412
1413 Returns:
1414 ``bool``: a flag signalling the availability of the ``set_seed()`` method in the UDA
1415
1416 )";
1417 }
1418
algorithm_set_verbosity_docstring()1419 std::string algorithm_set_verbosity_docstring()
1420 {
1421 return R"(set_verbosity(level)
1422
1423 Set the verbosity of logs and screen output.
1424
1425 This method will set the level of verbosity for the algorithm. If the UDA provides a ``set_verbosity()`` method,
1426 then its ``set_verbosity()`` method will be invoked. Otherwise, an error will be raised.
1427
1428 The exact meaning of the input parameter *level* is dependent on the UDA.
1429
1430 The ``set_verbosity()`` method of the UDA must be able to take an :class:`int` as input parameter.
1431
1432 Args:
1433 level (:class:`int`): the desired verbosity level
1434
1435 Raises:
1436 NotImplementedError: if the UDA does not provide a ``set_verbosity()`` method
1437 unspecified: any exception raised by the ``set_verbosity()`` method of the UDA or failures at the intersection
1438 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
1439
1440 )";
1441 }
1442
algorithm_has_set_verbosity_docstring()1443 std::string algorithm_has_set_verbosity_docstring()
1444 {
1445 return R"(has_set_verbosity()
1446
1447 Check if the ``set_verbosity()`` method is available in the UDA.
1448
1449 This method will return ``True`` if the ``set_verbosity()`` method is available in the UDA, ``False`` otherwise.
1450
1451 The availability of the ``set_verbosity()`` method is determined as follows:
1452
1453 * if the UDA does not provide a ``set_verbosity()`` method, then this method will always return ``False``;
1454 * if the UDA provides a ``set_verbosity()`` method but it does not provide a ``has_set_verbosity()`` method,
1455 then this method will always return ``True``;
1456 * if the UDA provides both a ``set_verbosity()`` and a ``has_set_verbosity()`` method, then this method will return
1457 the output of the ``has_set_verbosity()`` method of the UDA.
1458
1459 The optional ``has_set_verbosity()`` method of the UDA must return a ``bool``. For information on how to
1460 implement the ``set_verbosity()`` method of the UDA, see :func:`~pygmo.algorithm.set_verbosity()`.
1461
1462 Returns:
1463 ``bool``: a flag signalling the availability of the ``set_verbosity()`` method in the UDA
1464
1465 )";
1466 }
1467
algorithm_get_name_docstring()1468 std::string algorithm_get_name_docstring()
1469 {
1470 return R"(get_name()
1471
1472 Algorithm's name.
1473
1474 If the UDA provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
1475 Otherwise, an implementation-defined name based on the type of the UDA will be returned.
1476
1477 Returns:
1478 :class:`str`: the algorithm's name
1479
1480 )";
1481 }
1482
algorithm_get_extra_info_docstring()1483 std::string algorithm_get_extra_info_docstring()
1484 {
1485 return R"(get_extra_info()
1486
1487 Algorithm's extra info.
1488
1489 If the UDA provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
1490 method. Otherwise, an empty string will be returned.
1491
1492 Returns:
1493 :class:`str`: extra info about the UDA
1494
1495 Raises:
1496 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDA
1497
1498 )";
1499 }
1500
algorithm_get_thread_safety_docstring()1501 std::string algorithm_get_thread_safety_docstring()
1502 {
1503 return R"(get_thread_safety()
1504
1505 Algorithm's thread safety level.
1506
1507 This method will return a value of the enum :class:`pygmo.thread_safety` which indicates the thread safety level
1508 of the UDA. Unlike in C++, in Python it is not possible to re-implement this method in the UDA. That is, for C++
1509 UDAs, the returned value will be the value returned by the ``get_thread_safety()`` method of the UDA. For Python
1510 UDAs, the returned value will be unconditionally ``none``.
1511
1512 Returns:
1513 a value of :class:`pygmo.thread_safety`: the thread safety level of the UDA
1514
1515 )";
1516 }
1517
generic_uda_inner_algorithm_docstring()1518 std::string generic_uda_inner_algorithm_docstring()
1519 {
1520
1521 return R"(Inner algorithm of the meta-algorithm.
1522
1523 This read-only property gives direct access to the :class:`~pygmo.algorithm` stored within this meta-algorithm.
1524
1525 Returns:
1526 :class:`~pygmo.algorithm`: a reference to the inner algorithm
1527
1528 )";
1529 }
1530
generic_udp_inner_problem_docstring()1531 std::string generic_udp_inner_problem_docstring()
1532 {
1533
1534 return R"(Inner problem of the meta-problem.
1535
1536 This read-only property gives direct access to the :class:`~pygmo.problem` stored within this meta-problem.
1537
1538 Returns:
1539 :class:`~pygmo.problem`: a reference to the inner problem
1540
1541 )";
1542 }
1543
mbh_docstring()1544 std::string mbh_docstring()
1545 {
1546 return R"(Monotonic Basin Hopping (generalized).
1547
1548 Monotonic basin hopping, or simply, basin hopping, is an algorithm rooted in the idea of mapping
1549 the objective function :math:`f(\mathbf x_0)` into the local minima found starting from :math:`\mathbf x_0`.
1550 This simple idea allows a substantial increase of efficiency in solving problems, such as the Lennard-Jones
1551 cluster or the MGA-1DSM interplanetary trajectory problem that are conjectured to have a so-called
1552 funnel structure.
1553
1554 In pygmo we provide an original generalization of this concept resulting in a meta-algorithm that operates
1555 on any :class:`pygmo.population` using any suitable user-defined algorithm (UDA). When a population containing a single
1556 individual is used and coupled with a local optimizer, the original method is recovered.
1557 The pseudo code of our generalized version is:
1558
1559 .. code-block:: none
1560
1561 > Select a pygmo population
1562 > Select a UDA
1563 > Store best individual
1564 > while i < stop_criteria
1565 > > Perturb the population in a selected neighbourhood
1566 > > Evolve the population using the algorithm
1567 > > if the best individual is improved
1568 > > > increment i
1569 > > > update best individual
1570 > > else
1571 > > > i = 0
1572
1573 :class:`pygmo.mbh` is a user-defined algorithm (UDA) that can be used to construct :class:`pygmo.algorithm` objects.
1574
1575 See: https://arxiv.org/pdf/cond-mat/9803344.pdf for the paper introducing the basin hopping idea for a Lennard-Jones
1576 cluster optimization.
1577
1578 See also the docs of the C++ class :cpp:class:`pagmo::mbh`.
1579
1580 )";
1581 }
1582
mbh_get_seed_docstring()1583 std::string mbh_get_seed_docstring()
1584 {
1585 return R"(get_seed()
1586
1587 Get the seed value that was used for the construction of this :class:`~pygmo.mbh`.
1588
1589 Returns:
1590 :class:`int`: the seed value
1591
1592 )";
1593 }
1594
mbh_get_verbosity_docstring()1595 std::string mbh_get_verbosity_docstring()
1596 {
1597 return R"(get_verbosity()
1598
1599 Get the verbosity level value that was used for the construction of this :class:`~pygmo.mbh`.
1600
1601 Returns:
1602 :class:`int`: the verbosity level
1603
1604 )";
1605 }
1606
mbh_set_perturb_docstring()1607 std::string mbh_set_perturb_docstring()
1608 {
1609 return R"(set_perturb(perturb)
1610
1611 Set the perturbation vector.
1612
1613 Args:
1614 perturb (array-like object): perturb the perturbation to be applied to each component
1615
1616 Raises:
1617 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
1618 mismatched function signatures, etc.)
1619
1620 )";
1621 }
1622
mbh_get_log_docstring()1623 std::string mbh_get_log_docstring()
1624 {
1625 return R"(get_log()
1626
1627 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity parameter
1628 (by default nothing is logged) which can be set calling :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed
1629 with an :class:`~pygmo.mbh`. A verbosity level ``N > 0`` will log one line at the end of each call to the inner algorithm.
1630
1631 Returns:
1632 :class:`list` of :class:`tuple`: at each call of the inner algorithm, the values ``Fevals``, ``Best``, ``Violated``, ``Viol. Norm`` and ``Trial``, where:
1633
1634 * ``Fevals`` (:class:`int`), the number of fitness evaluations made
1635 * ``Best`` (:class:`float`), the objective function of the best fitness currently in the population
1636 * ``Violated`` (:class:`int`), the number of constraints currently violated by the best solution
1637 * ``Viol. Norm`` (:class:`float`), the norm of the violation (discounted already by the constraints tolerance)
1638 * ``Trial`` (:class:`int`), the trial number (which will determine the algorithm stop)
1639
1640 Examples:
1641 >>> from pygmo import *
1642 >>> algo = algorithm(mbh(algorithm(de(gen = 10))))
1643 >>> algo.set_verbosity(3)
1644 >>> prob = problem(cec2013(prob_id = 1, dim = 20))
1645 >>> pop = population(prob, 20)
1646 >>> pop = algo.evolve(pop) # doctest: +SKIP
1647 Fevals: Best: Violated: Viol. Norm: Trial:
1648 440 25162.3 0 0 0
1649 880 14318 0 0 0
1650 1320 11178.2 0 0 0
1651 1760 6613.71 0 0 0
1652 2200 6613.71 0 0 1
1653 2640 6124.62 0 0 0
1654 3080 6124.62 0 0 1
1655
1656 See also the docs of the relevant C++ method :cpp:func:`pagmo::mbh::get_log()`.
1657
1658 )";
1659 }
1660
mbh_get_perturb_docstring()1661 std::string mbh_get_perturb_docstring()
1662 {
1663 return R"(get_perturb()
1664
1665 Get the perturbation vector.
1666
1667 Returns:
1668 1D NumPy float array: the perturbation vector
1669
1670 )";
1671 }
1672
cstrs_self_adaptive_docstring()1673 std::string cstrs_self_adaptive_docstring()
1674 {
1675 return R"(This meta-algorithm implements a constraint handling technique that allows the use of any user-defined algorithm
1676 (UDA) able to deal with single-objective unconstrained problems, on single-objective constrained problems. The
1677 technique self-adapts its parameters during each successive call to the inner UDA basing its decisions on the entire
1678 underlying population. The resulting approach is an alternative to using the meta-problem :class:`~pygmo.unconstrain`
1679 to transform the constrained fitness into an unconstrained fitness.
1680
1681 The self-adaptive constraints handling meta-algorithm is largely based on the ideas of Faramani and Wright but it
1682 extends their use to any-algorithm, in particular to non generational, population based, evolutionary approaches where
1683 a steady-state reinsertion is used (i.e. as soon as an individual is found fit it is immediately reinserted into the
1684 population and will influence the next offspring genetic material).
1685
1686 Each decision vector is assigned an infeasibility measure :math:`\iota` which accounts for the normalized violation of
1687 all the constraints (discounted by the constraints tolerance as returned by :attr:`pygmo.problem.c_tol`). The
1688 normalization factor used :math:`c_{j_{max}}` is the maximum violation of the :math:`j` constraint.
1689
1690 As in the original paper, three individuals in the evolving population are then used to penalize the single
1691 objective.
1692
1693 .. math::
1694 \begin{array}{rl}
1695 \check X & \mbox{: the best decision vector} \\
1696 \hat X & \mbox{: the worst decision vector} \\
1697 \breve X & \mbox{: the decision vector with the highest objective}
1698 \end{array}
1699
1700 The best and worst decision vectors are defined accounting for their infeasibilities and for the value of the
1701 objective function. Using the above definitions the overall pseudo code can be summarized as follows:
1702
1703 .. code-block:: none
1704
1705 > Select a pygmo.population (related to a single-objective constrained problem)
1706 > Select a UDA (able to solve single-objective unconstrained problems)
1707 > while i < iter
1708 > > Compute the normalization factors (will depend on the current population)
1709 > > Compute the best, worst, highest (will depend on the current population)
1710 > > Evolve the population using the UDA and a penalized objective
1711 > > Reinsert the best decision vector from the previous evolution
1712
1713 :class:`pygmo.cstrs_self_adaptive` is a user-defined algorithm (UDA) that can be used to construct :class:`pygmo.algorithm` objects.
1714
1715 .. note::
1716
1717 Self-adaptive constraints handling implements an internal cache to avoid the re-evaluation of the fitness
1718 for decision vectors already evaluated. This makes the final counter of fitness evaluations somewhat unpredictable.
1719 The number of function evaluation will be bounded to *iters* times the fevals made by one call to the inner UDA. The
1720 internal cache is reset at each iteration, but its size will grow unlimited during each call to
1721 the inner UDA evolve method.
1722
1723 .. note::
1724
1725 Several modification were made to the original Faramani and Wright ideas to allow their approach to work on
1726 corner cases and with any UDAs. Most notably, a violation to the :math:`j`-th constraint is ignored if all
1727 the decision vectors in the population satisfy that particular constraint (i.e. if :math:`c_{j_{max}} = 0`).
1728
1729 .. note::
1730
1731 The performances of :class:`~pygmo.cstrs_self_adaptive` are highly dependent on the particular inner
1732 algorithm employed and in particular to its parameters (generations / iterations).
1733
1734 .. seealso::
1735
1736 Farmani, Raziyeh, and Jonathan A. Wright. "Self-adaptive fitness formulation for constrained optimization." IEEE
1737 Transactions on Evolutionary Computation 7.5 (2003): 445-455.
1738
1739 See also the docs of the C++ class :cpp:class:`pagmo::cstrs_self_adaptive`.
1740
1741 )";
1742 }
1743
cstrs_self_adaptive_get_log_docstring()1744 std::string cstrs_self_adaptive_get_log_docstring()
1745 {
1746 return R"(get_log()
1747
1748 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity parameter
1749 (by default nothing is logged) which can be set calling :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed
1750 with an :class:`~pygmo.cstrs_self_adaptive`. A verbosity level of ``N > 0`` will log one line each ``N`` ``iters``.
1751
1752 Returns:
1753 :class:`list` of :class:`tuple`: at each call of the inner algorithm, the values ``Iters``, ``Fevals``, ``Best``, ``Infeasibility``,
1754 ``Violated``, ``Viol. Norm`` and ``N. Feasible``, where:
1755
1756 * ``Iters`` (:class:`int`), the number of iterations made (i.e. calls to the evolve method of the inner algorithm)
1757 * ``Fevals`` (:class:`int`), the number of fitness evaluations made
1758 * ``Best`` (:class:`float`), the objective function of the best fitness currently in the population
1759 * ``Infeasibility`` (:class:`float`), the aggregated (and normalized) infeasibility value of ``Best``
1760 * ``Violated`` (:class:`int`), the number of constraints currently violated by the best solution
1761 * ``Viol. Norm`` (:class:`float`), the norm of the violation (discounted already by the constraints tolerance)
1762 * ``N. Feasible`` (:class:`int`), the number of feasible individuals currently in the population.
1763
1764 Examples:
1765 >>> from pygmo import *
1766 >>> algo = algorithm(cstrs_self_adaptive(iters = 20, algo = de(10)))
1767 >>> algo.set_verbosity(3)
1768 >>> prob = problem(cec2006(prob_id = 1))
1769 >>> pop = population(prob, 20)
1770 >>> pop = algo.evolve(pop) # doctest: +SKIP
1771 Iter: Fevals: Best: Infeasibility: Violated: Viol. Norm: N. Feasible:
1772 1 0 -96.5435 0.34607 4 177.705 0 i
1773 4 600 -96.5435 0.360913 4 177.705 0 i
1774 7 1200 -96.5435 0.36434 4 177.705 0 i
1775 10 1800 -96.5435 0.362307 4 177.705 0 i
1776 13 2400 -23.2502 0.098049 4 37.1092 0 i
1777 16 3000 -23.2502 0.071571 4 37.1092 0 i
1778 19 3600 -23.2502 0.257604 4 37.1092 0 i
1779 >>> uda = algo.extract(moead)
1780 >>> uda.get_log() # doctest: +SKIP
1781 [(1, 0, -96.54346700540063, 0.34606950943401493, 4, 177.70482046341274, 0), (4, 600, ...
1782
1783 See also the docs of the relevant C++ method :cpp:func:`pagmo::cstrs_self_adaptive::get_log()`.
1784
1785 )";
1786 }
1787
null_algorithm_docstring()1788 std::string null_algorithm_docstring()
1789 {
1790 return R"(__init__()
1791
1792 The null algorithm.
1793
1794 An algorithm used in the default-initialization of :class:`pygmo.algorithm` and of the meta-algorithms.
1795
1796 )";
1797 }
1798
null_problem_docstring()1799 std::string null_problem_docstring()
1800 {
1801 return R"(__init__(nobj = 1, nec = 0, nic = 0)
1802
1803 The null problem.
1804
1805 A problem used in the default-initialization of :class:`pygmo.problem` and of the meta-problems.
1806
1807 Args:
1808 nobj (:class:`int`): the number of objectives
1809 nec (:class:`int`): the number of equality constraints
1810 nic (:class:`int`): the number of inequality constraintsctives
1811
1812 Raises:
1813 ValueError: if *nobj*, *nec*, *nic* are negative or greater than an implementation-defined value or if *nobj* is zero
1814 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
1815 type conversion errors, mismatched function signatures, etc.)
1816
1817 )";
1818 }
1819
rosenbrock_docstring()1820 std::string rosenbrock_docstring()
1821 {
1822 return R"(__init__(dim = 2)
1823
1824 The Rosenbrock problem.
1825
1826 Args:
1827 dim (:class:`int`): problem dimension
1828
1829 Raises:
1830 OverflowError: if *dim* is negative or greater than an implementation-defined value
1831 ValueError: if *dim* is less than 2
1832
1833 See also the docs of the C++ class :cpp:class:`pagmo::rosenbrock`.
1834
1835 )";
1836 }
1837
minlp_rastrigin_docstring()1838 std::string minlp_rastrigin_docstring()
1839 {
1840 return R"(__init__(dim_c = 1, dim_i = 1)
1841
1842 The scalable MINLP Rastrigin problem.
1843
1844 Args:
1845 dim_c (:class:`int`): MINLP continuous dimension
1846 dim_i (:class:`int`): MINLP integer dimension
1847
1848 Raises:
1849 OverflowError: if *dim_c* / *dim_i* is negative or greater than an implementation-defined value
1850 ValueError: if *dim_c* + *dim_i* is less than 1
1851
1852 See also the docs of the C++ class :cpp:class:`pagmo::minlp_rastrigin`.
1853
1854 )";
1855 }
1856
zdt_p_distance_docstring()1857 std::string zdt_p_distance_docstring()
1858 {
1859 return R"(p_distance(point)
1860
1861 p_distance(pop)
1862
1863 Convergence metric for decision vectors (0 = on the optimal front)
1864
1865 Introduced by Martens and Izzo, this metric is able to measure "a distance" of any point from
1866 the pareto front of any DTLZ problem analytically.
1867
1868 Args:
1869 point (array-like object): decision vector for which the p distance is requested
1870 pop (:class:`~pygmo.population`): population for which the average p distance is requested
1871
1872 Returns:
1873 :class:`float`: the distance (or average distance) from the Pareto front
1874
1875 See also the docs of the C++ class :func:`~pygmo.zdt.p_distance()`
1876
1877 )";
1878 }
1879
dtlz_p_distance_docstring()1880 std::string dtlz_p_distance_docstring()
1881 {
1882 return R"(p_distance(point)
1883
1884 p_distance(pop)
1885
1886 Convergence metric for decision vectors (0 = on the optimal front)
1887
1888 Introduced by Martens and Izzo, this metric is able to measure "a distance" of any point from
1889 the pareto front of any DTLZ problem analytically.
1890
1891 Args:
1892 point (array-like object): decision vector for which the p distance is requested
1893 pop (:class:`~pygmo.population`): population for which the average p distance is requested
1894
1895 Returns:
1896 :class:`float`: the distance (or average distance) from the Pareto front
1897
1898 See also the docs of the C++ class :func:`~pygmo.dtlz.p_distance()`
1899
1900 )";
1901 }
1902
dtlz_docstring()1903 std::string dtlz_docstring()
1904 {
1905 return R"(__init__(prob_id = 1, dim = 5, fdim = 3, alpha = 100)
1906
1907 The DTLZ problem suite problem.
1908
1909 Args:
1910 prob_id (:class:`int`): DTLZ problem id
1911 dim (:class:`int`): problem dimension
1912 fdim (:class:`int`): number of objectives
1913 alpha (:class:`int`): controls density of solutions (used only by DTLZ4)
1914
1915 Raises:
1916 OverflowError: if *prob_id*, *dim*, *fdim* or *alpha* are negative or greater than an implementation-defined value
1917 ValueError: if *prob_id* is not in [1..7], *fdim* is smaller than 2, *dim* is smaller or equal to *fdim*.
1918
1919 See also the docs of the C++ class :cpp:class:`pagmo::dtlz`.
1920
1921 )";
1922 }
1923
wfg_docstring()1924 std::string wfg_docstring()
1925 {
1926 return R"(__init__(prob_id = 1, dim_dvs = 5, dim_obj = 3, dim_k = 4)
1927
1928 The WFG problem suite.
1929
1930 Args:
1931 prob_id (:class:`int`): WFG problem id
1932 dim_dvs (:class:`int`): decision vector size
1933 dim_obj (:class:`int`): number of objectives
1934 dim_k (:class:`int`): position parameter
1935
1936 Raises:
1937 OverflowError: if *prob_id*, *dim_dvs*, *dim_obj* or *dim_k* are negative or greater than an implementation-defined value
1938 ValueError: if *prob_id* is not in [1, ..., 9], *dim_dvs* is smaller than 1, *dim_obj* is smaller than 2, *dim_k* is
1939 smaller than 1 or bigger or equal to *dim_dvs* or if *dim_k*mod(*dim_obj*-1) is different than zero. Also, when *prob_id* equals
1940 to 2 or 3, if (*dim_dvs*-*dim_k*)mod(2) is different than zero.
1941
1942 See also the docs of the C++ class :cpp:class:`pagmo::wfg`.
1943
1944 )";
1945 }
1946
cec2014_docstring()1947 std::string cec2014_docstring()
1948 {
1949 return R"(__init__(prob_id = 1, dim = 2)
1950
1951 .. versionadded:: 2.8
1952
1953 The CEC 2014 problem suite (continuous, box-bounded, single-objective problems)
1954
1955 Args:
1956 prob_id (:class:`int`): problem id (one of [1..30])
1957 dim (:class:`int`): number of dimensions (one of [2, 10, 20, 30, 50, 100])
1958
1959 Raises:
1960 OverflowError: if *dim* or *prob_id* are negative or greater than an implementation-defined value
1961 ValueError: if *prob_id* is not in [1..28] or if *dim* is not in [2, 10, 20, 30, 50, 100] or if *dim* is 2 and *prob_id* is in [17,18,19,20,21,22,29,30]
1962
1963 See also the docs of the C++ class :cpp:class:`pagmo::cec2014`.
1964
1965 )";
1966 }
1967
cec2013_docstring()1968 std::string cec2013_docstring()
1969 {
1970 return R"(__init__(prob_id = 1, dim = 2)
1971
1972 The CEC 2013 problem suite (continuous, box-bounded, single-objective problems)
1973
1974 Args:
1975 prob_id (:class:`int`): problem id (one of [1..28])
1976 dim (:class:`int`): number of dimensions (one of [2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
1977
1978 Raises:
1979 OverflowError: if *dim* or *prob_id* are negative or greater than an implementation-defined value
1980 ValueError: if *prob_id* is not in [1..28] or if *dim* is not in [2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
1981
1982 See also the docs of the C++ class :cpp:class:`pagmo::cec2013`.
1983
1984 )";
1985 }
1986
cec2009_docstring()1987 std::string cec2009_docstring()
1988 {
1989 return R"(__init__(prob_id = 1, is_constrained = false, dim = 30u)
1990
1991 The CEC 2009 problem suite (continuous, constrained, single-objective problems)
1992
1993 Args:
1994 prob_id (:class:`int`): problem id (one of [1..10])
1995 is_constrained (:class:`bool`): selects the constrained version of the problems
1996 dim (:class:`int`): problem dimension
1997
1998 Raises:
1999 OverflowError: if *prob_id* or *dim* are negative or greater than an implementation-defined value
2000 ValueError: if *prob_id* is not in [1..10] or if *dim* is zero
2001
2002 See also the docs of the C++ class :cpp:class:`pagmo::cec2009`.
2003
2004 )";
2005 }
2006
cec2006_docstring()2007 std::string cec2006_docstring()
2008 {
2009 return R"(__init__(prob_id = 1)
2010
2011 The CEC 2006 problem suite (continuous, constrained, single-objective problems)
2012
2013 Args:
2014 prob_id (:class:`int`): problem id (one of [1..24])
2015
2016 Raises:
2017 OverflowError: if *prob_id* is negative or greater than an implementation-defined value
2018 ValueError: if *prob_id* is not in [1..24]
2019
2020 See also the docs of the C++ class :cpp:class:`pagmo::cec2006`.
2021
2022 )";
2023 }
2024
luksan_vlcek1_docstring()2025 std::string luksan_vlcek1_docstring()
2026 {
2027 return R"(__init__(dim = 3)
2028
2029 Implementation of Example 5.1 in the report from Luksan and Vlcek.
2030
2031 The problem is also known as the Chained Rosenbrock function with trigonometric-exponential constraints.
2032
2033 Its formulation in pygmo is written as:
2034
2035 .. math::
2036 \begin{array}{rl}
2037 \mbox{find:} & -5 \le x_i \le 5, \forall i=1..n \\
2038 \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right] \\
2039 \mbox{subject to:} &
2040 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2})\sin(x_{k+1}+x_{k+2}) + \\
2041 & +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 = 0, \forall k=1..n-2
2042 \end{array}
2043
2044 See: Luksan, L., and Jan Vlcek. "Sparse and partially separable test problems for unconstrained and equality
2045 constrained optimization." (1999). http://hdl.handle.net/11104/0123965
2046
2047 Args:
2048 dim (:class:`int`): problem dimension
2049
2050 Raises:
2051 OverflowError: if *dim* is negative or greater than an implementation-defined value
2052
2053 See also the docs of the C++ class :cpp:class:`pagmo::luksan_vlcek1`.
2054
2055 )";
2056 }
2057
generic_uda_get_seed_docstring()2058 std::string generic_uda_get_seed_docstring()
2059 {
2060 return R"(get_seed()
2061
2062 This method will return the random seed used internally by this uda.
2063
2064 Returns:
2065 :class:`int`: the random seed of the population
2066 )";
2067 }
2068
bee_colony_docstring()2069 std::string bee_colony_docstring()
2070 {
2071 return R"(__init__(gen = 1, limit = 1, seed = random)
2072
2073 Artificial Bee Colony.
2074
2075 Args:
2076 gen (:class:`int`): number of generations
2077 limit (:class:`int`): maximum number of trials for abandoning a source
2078 seed (:class:`int`): seed used by the internal random number generator (default is random)
2079
2080 Raises:
2081 OverflowError: if *gen*, *limit* or *seed* is negative or greater than an implementation-defined value
2082 ValueError: if *limit* is not greater than 0
2083
2084 See also the docs of the C++ class :cpp:class:`pagmo::bee_colony`.
2085
2086 )";
2087 }
2088
bee_colony_get_log_docstring()2089 std::string bee_colony_get_log_docstring()
2090 {
2091 return R"(get_log()
2092
2093 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2094 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2095 constructed with a :class:`~pygmo.bee_colony`. A verbosity of ``N`` implies a log line each ``N`` generations.
2096
2097 Returns:
2098 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Current best``, ``Best``, where:
2099
2100 * ``Gen`` (:class:`int`), generation number
2101 * ``Fevals`` (:class:`int`), number of functions evaluation made
2102 * ``Current best`` (:class:`float`), the best fitness currently in the population
2103 * ``Best`` (:class:`float`), the best fitness found
2104
2105 Examples:
2106 >>> from pygmo import *
2107 >>> algo = algorithm(bee_colony(gen = 500, limit = 20))
2108 >>> algo.set_verbosity(100)
2109 >>> prob = problem(rosenbrock(10))
2110 >>> pop = population(prob, 20)
2111 >>> pop = algo.evolve(pop) # doctest: +SKIP
2112 Gen: Fevals: Best: Current Best:
2113 1 40 261363 261363
2114 101 4040 112.237 267.969
2115 201 8040 20.8885 265.122
2116 301 12040 20.6076 20.6076
2117 401 16040 18.252 140.079
2118 >>> uda = algo.extract(bee_colony)
2119 >>> uda.get_log() # doctest: +SKIP
2120 [(1, 40, 183727.83934515435, 183727.83934515435), ...
2121
2122 See also the docs of the relevant C++ method :cpp:func:`pagmo::bee_colony::get_log()`.
2123
2124 )";
2125 }
2126
de_docstring()2127 std::string de_docstring()
2128 {
2129 return R"(__init__(gen = 1, F = 0.8, CR = 0.9, variant = 2, ftol = 1e-6, xtol = 1e-6, seed = random)
2130
2131 Differential Evolution
2132
2133 Args:
2134 gen (:class:`int`): number of generations
2135 F (:class:`float`): weight coefficient (dafault value is 0.8)
2136 CR (:class:`float`): crossover probability (dafault value is 0.9)
2137 variant (:class:`int`): mutation variant (dafault variant is 2: /rand/1/exp)
2138 ftol (:class:`float`): stopping criteria on the f tolerance (default is 1e-6)
2139 xtol (:class:`float`): stopping criteria on the x tolerance (default is 1e-6)
2140 seed (:class:`int`): seed used by the internal random number generator (default is random)
2141
2142 Raises:
2143 OverflowError: if *gen*, *variant* or *seed* is negative or greater than an implementation-defined value
2144 ValueError: if *F*, *CR* are not in [0,1] or *variant* is not in [1, 10]
2145
2146 The following variants (mutation variants) are available to create a new candidate individual:
2147
2148 +-------------------------+-------------------------+
2149 | 1 - best/1/exp | 2 - rand/1/exp |
2150 +-------------------------+-------------------------+
2151 | 3 - rand-to-best/1/exp | 4 - best/2/exp |
2152 +-------------------------+-------------------------+
2153 | 5 - rand/2/exp | 6 - best/1/bin |
2154 +-------------------------+-------------------------+
2155 | 7 - rand/1/bin | 8 - rand-to-best/1/bin |
2156 +-------------------------+-------------------------+
2157 | 9 - best/2/bin | 10 - rand/2/bin |
2158 +-------------------------+-------------------------+
2159
2160 See also the docs of the C++ class :cpp:class:`pagmo::de`.
2161
2162 )";
2163 }
2164
de_get_log_docstring()2165 std::string de_get_log_docstring()
2166 {
2167 return R"(get_log()
2168
2169 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2170 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2171 constructed with a :class:`~pygmo.de`. A verbosity of ``N`` implies a log line each ``N`` generations.
2172
2173 Returns:
2174 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``dx``, ``df``, where:
2175
2176 * ``Gen`` (:class:`int`), generation number
2177 * ``Fevals`` (:class:`int`), number of functions evaluation made
2178 * ``Best`` (:class:`float`), the best fitness function currently in the population
2179 * ``dx`` (:class:`float`), the norm of the distance to the population mean of the mutant vectors
2180 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
2181
2182 Examples:
2183 >>> from pygmo import *
2184 >>> algo = algorithm(de(gen = 500))
2185 >>> algo.set_verbosity(100)
2186 >>> prob = problem(rosenbrock(10))
2187 >>> pop = population(prob, 20)
2188 >>> pop = algo.evolve(pop) # doctest: +SKIP
2189 Gen: Fevals: Best: dx: df:
2190 1 20 162446 65.2891 1.78686e+06
2191 101 2020 198.402 8.4454 572.161
2192 201 4020 21.1155 2.60629 24.5152
2193 301 6020 6.67069 0.51811 1.99744
2194 401 8020 3.60022 0.583444 0.554511
2195 Exit condition -- generations = 500
2196 >>> uda = algo.extract(de)
2197 >>> uda.get_log() # doctest: +SKIP
2198 [(1, 20, 162446.0185265718, 65.28911664703388, 1786857.8926660626), ...
2199
2200 See also the docs of the relevant C++ method :cpp:func:`pagmo::de::get_log()`.
2201
2202 )";
2203 }
2204
compass_search_docstring()2205 std::string compass_search_docstring()
2206 {
2207 return R"(__init__(max_fevals = 1, start_range = .1, stop_range = .01, reduction_coeff = .5)
2208
2209 Compass Search
2210
2211 Args:
2212 max_fevals (:class:`int`): maximum number of function evaluation
2213 start_range (:class:`float`): start range (dafault value is .1)
2214 stop_range (:class:`float`): stop range (dafault value is .01)
2215 reduction_coeff (:class:`float`): range reduction coefficient (dafault value is .5)
2216
2217 Raises:
2218 OverflowError: if *max_fevals* is negative or greater than an implementation-defined value
2219 ValueError: if *start_range* is not in (0, 1], if *stop_range* is not in (*start_range*, 1] or if *reduction_coeff* is not in (0,1)
2220
2221 See also the docs of the C++ class :cpp:class:`pagmo::compass_search`.
2222
2223 )";
2224 }
2225
compass_search_get_log_docstring()2226 std::string compass_search_get_log_docstring()
2227 {
2228 return R"(get_log()
2229
2230 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
2231 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2232 constructed with a :class:`~pygmo.compass_search`. A verbosity larger than 0 implies one log line at each improvment of the fitness or
2233 change in the search range.
2234
2235 Returns:
2236 :class:`list` of :class:`tuple`: at each logged epoch, the values``Fevals``, ``Best``, ``Range``, where:
2237
2238 * ``Fevals`` (:class:`int`), number of functions evaluation made
2239 * ``Best`` (:class:`float`), the best fitness function currently in the population
2240 * ``Range`` (:class:`float`), the range used to vary the chromosome (relative to the box bounds width)
2241
2242 Examples:
2243 >>> from pygmo import *
2244 >>> algo = algorithm(compass_search(max_fevals = 500))
2245 >>> algo.set_verbosity(1)
2246 >>> prob = problem(hock_schittkowsky_71())
2247 >>> pop = population(prob, 1)
2248 >>> pop = algo.evolve(pop) # doctest: +SKIP
2249 Fevals: Best: Violated: Viol. Norm: Range:
2250 4 110.785 1 2.40583 0.5
2251 12 110.785 1 2.40583 0.25
2252 20 110.785 1 2.40583 0.125
2253 22 91.0454 1 1.01855 0.125
2254 25 96.2795 1 0.229446 0.125
2255 33 96.2795 1 0.229446 0.0625
2256 41 96.2795 1 0.229446 0.03125
2257 45 94.971 1 0.127929 0.03125
2258 53 94.971 1 0.127929 0.015625
2259 56 95.6252 1 0.0458521 0.015625
2260 64 95.6252 1 0.0458521 0.0078125
2261 68 95.2981 1 0.0410151 0.0078125
2262 76 95.2981 1 0.0410151 0.00390625
2263 79 95.4617 1 0.00117433 0.00390625
2264 87 95.4617 1 0.00117433 0.00195312
2265 95 95.4617 1 0.00117433 0.000976562
2266 103 95.4617 1 0.00117433 0.000488281
2267 111 95.4617 1 0.00117433 0.000244141
2268 115 95.4515 0 0 0.000244141
2269 123 95.4515 0 0 0.00012207
2270 131 95.4515 0 0 6.10352e-05
2271 139 95.4515 0 0 3.05176e-05
2272 143 95.4502 0 0 3.05176e-05
2273 151 95.4502 0 0 1.52588e-05
2274 159 95.4502 0 0 7.62939e-06
2275 Exit condition -- range: 7.62939e-06 <= 1e-05
2276 >>> uda = algo.extract(compass_search)
2277 >>> uda.get_log() # doctest: +SKIP
2278 [(4, 110.785345345, 1, 2.405833534534, 0.5), (12, 110.785345345, 1, 2.405833534534, 0.25) ...
2279
2280 See also the docs of the relevant C++ method :cpp:func:`pagmo::compass_search::get_log()`.
2281
2282 )";
2283 }
2284
sade_docstring()2285 std::string sade_docstring()
2286 {
2287 return R"(__init__(gen = 1, variant = 2, variant_adptv = 1, ftol = 1e-6, xtol = 1e-6, memory = False, seed = random)
2288
2289 Self-adaptive Differential Evolution.
2290
2291 Args:
2292 gen (:class:`int`): number of generations
2293 variant (:class:`int`): mutation variant (dafault variant is 2: /rand/1/exp)
2294 variant_adptv (:class:`int`): F and CR parameter adaptation scheme to be used (one of 1..2)
2295 ftol (:class:`float`): stopping criteria on the x tolerance (default is 1e-6)
2296 xtol (:class:`float`): stopping criteria on the f tolerance (default is 1e-6)
2297 memory (:class:`bool`): when true the adapted parameters CR anf F are not reset between successive calls to the evolve method
2298 seed (:class:`int`): seed used by the internal random number generator (default is random)
2299
2300 Raises:
2301 OverflowError: if *gen*, *variant*, *variant_adptv* or *seed* is negative or greater than an implementation-defined value
2302 ValueError: if *variant* is not in [1,18] or *variant_adptv* is not in [0,1]
2303
2304 The following variants (mutation variants) are available to create a new candidate individual:
2305
2306 +--------------------------------------+--------------------------------------+
2307 | 1 - best/1/exp | 2 - rand/1/exp |
2308 +--------------------------------------+--------------------------------------+
2309 | 3 - rand-to-best/1/exp | 4 - best/2/exp |
2310 +--------------------------------------+--------------------------------------+
2311 | 5 - rand/2/exp | 6 - best/1/bin |
2312 +--------------------------------------+--------------------------------------+
2313 | 7 - rand/1/bin | 8 - rand-to-best/1/bin |
2314 +--------------------------------------+--------------------------------------+
2315 | 9 - best/2/bin | 10 - rand/2/bin |
2316 +--------------------------------------+--------------------------------------+
2317 | 11 - rand/3/exp | 12 - rand/3/bin |
2318 +--------------------------------------+--------------------------------------+
2319 | 13 - best/3/exp | 14 - best/3/bin |
2320 +--------------------------------------+--------------------------------------+
2321 | 15 - rand-to-current/2/exp | 16 - rand-to-current/2/bin |
2322 +--------------------------------------+--------------------------------------+
2323 | 17 - rand-to-best-and-current/2/exp | 18 - rand-to-best-and-current/2/bin |
2324 +--------------------------------------+--------------------------------------+
2325
2326 The following adaptation schemes are available:
2327
2328 +--------------------------------------+--------------------------------------+
2329 | 1 - jDE | 2 - iDE |
2330 +--------------------------------------+--------------------------------------+
2331
2332 See also the docs of the C++ class :cpp:class:`pagmo::sade`.
2333
2334 )";
2335 }
2336
sade_get_log_docstring()2337 std::string sade_get_log_docstring()
2338 {
2339 return R"(get_log()
2340
2341 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2342 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2343 constructed with a :class:`~pygmo.sade`. A verbosity of ``N`` implies a log line each ``N`` generations.
2344
2345 Returns:
2346 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``F``, ``CR``, ``dx``, ``df``, where:
2347
2348 * ``Gen`` (:class:`int`), generation number
2349 * ``Fevals`` (:class:`int`), number of functions evaluation made
2350 * ``Best`` (:class:`float`), the best fitness function currently in the population
2351 * ``F`` (:class:`float`), the value of the adapted paramter F used to create the best so far
2352 * ``CR`` (:class:`float`), the value of the adapted paramter CR used to create the best so far
2353 * ``dx`` (:class:`float`), the norm of the distance to the population mean of the mutant vectors
2354 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
2355
2356 Examples:
2357 >>> from pygmo import *
2358 >>> algo = algorithm(sade(gen = 500))
2359 >>> algo.set_verbosity(100)
2360 >>> prob = problems.rosenbrock(10)
2361 >>> pop = population(prob, 20)
2362 >>> pop = algo.evolve(pop) # doctest: +SKIP
2363 Gen: Fevals: Best: F: CR: dx: df:
2364 1 20 297060 0.690031 0.294769 44.1494 2.30584e+06
2365 101 2020 97.4258 0.58354 0.591527 13.3115 441.545
2366 201 4020 8.79247 0.6678 0.53148 17.8822 121.676
2367 301 6020 6.84774 0.494549 0.98105 12.2781 40.9626
2368 401 8020 4.7861 0.428741 0.743813 12.2938 39.7791
2369 Exit condition -- generations = 500
2370 >>> uda = algo.extract(sade)
2371 >>> uda.get_log() # doctest: +SKIP
2372 [(1, 20, 297059.6296130389, 0.690031071850855, 0.29476914701127666, 44.14940516578547, 2305836.7422693395), ...
2373
2374 See also the docs of the relevant C++ method :cpp:func:`pagmo::sade::get_log()`.
2375
2376 )";
2377 }
2378
nsga2_set_bfe_docstring()2379 std::string nsga2_set_bfe_docstring()
2380 {
2381 return R"(set_bfe(b)
2382
2383 Set the batch function evaluation scheme.
2384
2385 This method will set the batch function evaluation scheme to be used for :class:`~pygmo.nsga2`.
2386
2387 Args:
2388 b (:class:`~pygmo.bfe`): the batch function evaluation object
2389
2390 Raises:
2391 unspecified: any exception thrown by the underlying C++ method
2392
2393 )";
2394 }
2395
nsga2_docstring()2396 std::string nsga2_docstring()
2397 {
2398 return R"(__init__(gen = 1, cr = 0.95, eta_c = 10., m = 0.01, eta_m = 50., seed = random)
2399
2400 Non dominated Sorting Genetic Algorithm (NSGA-II).
2401
2402 Args:
2403 gen (:class:`int`): number of generations
2404 cr (:class:`float`): crossover probability
2405 eta_c (:class:`float`): distribution index for crossover
2406 m (:class:`float`): mutation probability
2407 eta_m (:class:`float`): distribution index for mutation
2408 seed (:class:`int`): seed used by the internal random number generator (default is random)
2409
2410 Raises:
2411 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
2412 ValueError: if either *cr* is not in [0,1[, *eta_c* is not in [0,100[, *m* is not in [0,1], or
2413 *eta_m* is not in [0,100[
2414
2415 See also the docs of the C++ class :cpp:class:`pagmo::nsga2`.
2416
2417 )";
2418 }
2419
nsga2_get_log_docstring()2420 std::string nsga2_get_log_docstring()
2421 {
2422 return R"(get_log()
2423
2424 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
2425 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2426 constructed with a :class:`~pygmo.nsga2`. A verbosity of ``N`` implies a log line each ``N`` generations.
2427
2428 Returns:
2429 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``ideal_point``, where:
2430
2431 * ``Gen`` (:class:`int`), generation number
2432 * ``Fevals`` (:class:`int`), number of functions evaluation made
2433 * ``ideal_point`` (1D numpy array), the ideal point of the current population (cropped to max 5 dimensions only in the screen output)
2434
2435 Examples:
2436 >>> from pygmo import *
2437 >>> algo = algorithm(nsga2(gen=100))
2438 >>> algo.set_verbosity(20)
2439 >>> pop = population(zdt(1), 40)
2440 >>> pop = algo.evolve(pop) # doctest: +SKIP
2441 Gen: Fevals: ideal1: ideal2:
2442 1 0 0.0033062 2.44966
2443 21 800 0.000275601 0.893137
2444 41 1600 3.15834e-05 0.44117
2445 61 2400 2.3664e-05 0.206365
2446 81 3200 2.3664e-05 0.133305
2447 >>> uda = algo.extract(nsga2)
2448 >>> uda.get_log() # doctest: +SKIP
2449 [(1, 0, array([ 0.0033062 , 2.44965599])), (21, 800, array([ 2.75601086e-04 ...
2450
2451 See also the docs of the relevant C++ method :cpp:func:`pagmo::nsga2::get_log`.
2452
2453 )";
2454 }
2455
gaco_set_bfe_docstring()2456 std::string gaco_set_bfe_docstring()
2457 {
2458 return R"(set_bfe(b)
2459
2460 Set the batch function evaluation scheme.
2461
2462 This method will set the batch function evaluation scheme to be used for :class:`~pygmo.gaco`.
2463
2464 Args:
2465 b (:class:`~pygmo.bfe`): the batch function evaluation object
2466
2467 Raises:
2468 unspecified: any exception thrown by the underlying C++ method
2469
2470 )";
2471 }
2472
gaco_docstring()2473 std::string gaco_docstring()
2474 {
2475 return R"(__init__(gen = 1, ker = 63, q = 1.0, oracle = 0., acc = 0.01, threshold = 1u, n_gen_mark = 7u, impstop = 100000u, evalstop = 100000u, focus = 0., memory = false, seed = random)
2476
2477 Extended Ant Colony Optimization algorithm (gaco).
2478
2479 Ant colony optimization is a class of optimization algorithms modeled on the actions
2480 of an ant colony. Artificial 'ants' (e.g. simulation agents) locate optimal solutions by
2481 moving through a parameter space representing all possible solutions. Real ants lay down
2482 pheromones directing each other to resources while exploring their environment.
2483 The simulated 'ants' similarly record their positions and the quality of their solutions,
2484 so that in later simulation iterations more ants locate better solutions.
2485
2486 In pygmo we propose a version of this algorithm called extended ACO and originally described
2487 by Schlueter et al.
2488 Extended ACO generates future generations of ants by using the a multi-kernel gaussian distribution
2489 based on three parameters (i.e., pheromone values) which are computed depending on the quality
2490 of each previous solution. The solutions are ranked through an oracle penalty method.
2491
2492 This algorithm can be applied to box-bounded single-objective, constrained and unconstrained
2493 optimization, with both continuous and integer variables.
2494
2495 .. note::
2496
2497 The ACO version implemented in PaGMO is an extension of Schlueter's originally proposed extended ACO algorithm.
2498 The main difference between the implemented version and the original one lies in
2499 how two of the three pheromone values are computed (in particular, the weights and the standard deviations).
2500
2501 .. seealso::
2502
2503 M. Schlueter, et al. (2009). Extended ant colony optimization for non-convex mixed integer non-linear programming. Computers & Operations Research.
2504
2505 Args:
2506 gen (:class:`int`): number of generations
2507 ker (:class:`int`): kernel size
2508 q (:class:`float`): convergence speed parameter
2509 oracle (:class:`float`): oracle parameter
2510 acc (:class:`float`): accuracy parameter
2511 threshold (:class:`int`): threshold parameter
2512 n_gen_mark (:class:`int`): std convergence speed parameter
2513 impstop (:class:`int`): improvement stopping criterion
2514 evalstop (:class:`int`): evaluation stopping criterion
2515 focus (:class:`float`): focus parameter
2516 memory (:class:`bool`): memory parameter
2517 seed (:class:`int`): seed used by the internal random number generator (default is random)
2518
2519 Raises:
2520 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
2521 ValueError: if either *acc* is not >=0, *focus* is not >=0 or *q* is not >=0,
2522 *threshold* is not in [1,gen] when gen!=0 and memory==false, or
2523 *threshold* is not in >=1 when gen!=0 and memory==true
2524
2525 See also the docs of the C++ class :cpp:class:`pagmo::gaco`.
2526
2527 )";
2528 }
2529
gaco_get_log_docstring()2530 std::string gaco_get_log_docstring()
2531 {
2532 return R"(get_log()
2533
2534 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
2535 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2536 constructed with a :class:`~pygmo.gaco`. A verbosity of ``N`` implies a log line each ``N`` generations.
2537
2538 Returns:
2539 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``Kernel``, ``Oracle``, ``dx``, ``dp``, where:
2540
2541 * ``Gen`` (:class:`int`), generation number
2542 * ``Fevals`` (:class:`int`), number of functions evaluation made
2543 * ``Best`` (:class:`float`), best fitness function value
2544 * ``Kernel`` (:class:`int`), kernel size
2545 * ``Oracle`` (:class:`float`), oracle parameter
2546 * ``dx`` (:class:`float`), sum of the absolute value of the difference between the variables' values of the best and worst solutions
2547 * ``dp`` (:class:`float`), absolute value of the difference between the worst and best solutions' penalty values
2548
2549 Examples:
2550 >>> import pygmo as pg
2551 >>> prob = pg.problem(pg.rosenbrock(dim = 2))
2552 >>> pop = pg.population(prob, size=13, seed=23)
2553 >>> algo = pg.algorithm(pg.gaco(10, 13, 1.0, 1e9, 0.0, 1, 7, 100000, 100000, 0.0, False, 23))
2554 >>> algo.set_verbosity(1)
2555 >>> pop = algo.evolve(pop) # doctest: +SKIP
2556 Gen: Fevals: Best: Kernel: Oracle: dx: dp:
2557 1 0 179.464 13 1e+09 13.1007 649155
2558 2 13 166.317 13 179.464 5.11695 15654.1
2559 3 26 3.81781 13 166.317 5.40633 2299.95
2560 4 39 3.81781 13 3.81781 2.11767 385.781
2561 5 52 2.32543 13 3.81781 1.30415 174.982
2562 6 65 2.32543 13 2.32543 4.58441 43.808
2563 7 78 1.17205 13 2.32543 1.18585 21.6315
2564 8 91 1.17205 13 1.17205 0.806727 12.0702
2565 9 104 1.17205 13 1.17205 0.806727 12.0702
2566 10 130 0.586187 13 0.586187 0.806727 12.0702
2567 >>> uda = algo.extract(pg.gaco)
2568 >>> uda.get_log() # doctest: +SKIP
2569 [(1, 0, 179.464, 13, 1e+09, 13.1007, 649155), (2, 15, 166.317, 13, 179.464, ...
2570
2571 See also the docs of the relevant C++ method :cpp:func:`pagmo::gaco::get_log`.
2572
2573 )";
2574 }
2575
maco_set_bfe_docstring()2576 std::string maco_set_bfe_docstring()
2577 {
2578 return R"(set_bfe(b)
2579
2580 Set the batch function evaluation scheme.
2581
2582 This method will set the batch function evaluation scheme to be used for :class:`~pygmo.nsga2`.
2583
2584 Args:
2585 b (:class:`~pygmo.bfe`): the batch function evaluation object
2586
2587 Raises:
2588 unspecified: any exception thrown by the underlying C++ method
2589
2590 )";
2591 }
2592
maco_docstring()2593 std::string maco_docstring()
2594 {
2595 return R"(__init__(gen = 1, ker = 63, q = 1.0, threshold = 1, n_gen_mark = 7, evalstop = 100000, focus = 0., memory = False, seed = random)
2596
2597 Multi-objective Ant Colony Optimizer (MACO).
2598
2599 Args:
2600 gen (:class:`int`): number of generations
2601 ker (:class:`int`): kernel size
2602 q (:class:`float`): convergence speed parameter
2603 threshold (:class:`int`): threshold parameter
2604 n_gen_mark (:class:`int`): std convergence speed parameter
2605 evalstop (:class:`int`): evaluation stopping criterion
2606 focus (:class:`float`): focus parameter
2607 memory (:class:`bool`): memory parameter
2608 seed (:class:`int`): seed used by the internal random number generator (default is random)
2609
2610 Raises:
2611 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
2612 ValueError: if either *focus* is < 0, *threshold* is not in [0,*gen*] when *gen* is > 0 and *memory* is *False*, or if *threshold* is not >=1 when *gen* is > 0 and *memory* is *True*
2613
2614 See also the docs of the C++ class :cpp:class:`pagmo::maco`.
2615
2616 )";
2617 }
2618
maco_get_log_docstring()2619 std::string maco_get_log_docstring()
2620 {
2621 return R"(get_log()
2622
2623 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
2624 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2625 constructed with a :class:`~pygmo.maco`. A verbosity of ``N`` implies a log line each ``N`` generations.
2626
2627 Returns:
2628 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``ideal_point``, where:
2629
2630 * ``Gen`` (:class:`int`), generation number
2631 * ``Fevals`` (:class:`int`), number of functions evaluation made
2632 * ``ideal_point`` (1D numpy array), the ideal point of the current population (cropped to max 5 dimensions only in the screen output)
2633
2634 Examples:
2635 >>> from pygmo import *
2636 >>> algo = algorithm(maco(gen=100))
2637 >>> algo.set_verbosity(20)
2638 >>> pop = population(zdt(1), 63)
2639 >>> pop = algo.evolve(pop) # doctest: +SKIP
2640 Gen: Fevals: ideal1: ideal2:
2641 1 0 0.0422249 2.72416
2642 21 1260 0.000622664 1.27304
2643 41 2520 0.000100557 0.542994
2644 61 3780 8.06766e-06 0.290677
2645 81 5040 8.06766e-06 0.290677
2646 >>> uda = algo.extract(maco)
2647 >>> uda.get_log() # doctest: +SKIP
2648 [(1, 0, array([0.04222492, 2.72415949])), (21, 1260, array([6.22663991e-04, ...
2649
2650 See also the docs of the relevant C++ method :cpp:func:`pagmo::maco::get_log`.
2651
2652 )";
2653 }
2654
gwo_docstring()2655 std::string gwo_docstring()
2656 {
2657 return R"(__init__(gen = 1, seed = random)
2658
2659 Grey Wolf Optimizer (gwo).
2660
2661 Grey Wolf Optimizer is an optimization algorithm based on the leadership hierarchy and hunting mechanism of
2662 greywolves, proposed by Seyedali Mirjalilia, Seyed Mohammad Mirjalilib, Andrew Lewis in 2014.
2663
2664 This algorithm is a classic example of a highly criticizable line of search that led in the first decades of
2665 our millenia to the development of an entire zoo of metaphors inspiring optimzation heuristics. In our opinion they,
2666 as is the case for the grey wolf optimizer, are often but small variations of already existing heuristics rebranded with unnecessray and convoluted
2667 biological metaphors. In the case of GWO this is particularly evident as the position update rule is shokingly
2668 trivial and can also be easily seen as a product of an evolutionary metaphor or a particle swarm one. Such an update rule
2669 is also not particulary effective and results in a rather poor performance most of times. Reading the original
2670 peer-reviewed paper, where the poor algorithmic perfromance is hidden by the methodological flaws of the benchmark presented,
2671 one is left with a bitter opinion of the whole peer-review system.
2672
2673 This algorithm can be applied to box-bounded single-objective, constrained and unconstrained
2674 optimization, with continuous value.
2675
2676 Args:
2677 gen (:class:`int`): number of generations
2678 seed (:class:`int`): seed used by the internal random number generator (default is random)
2679
2680 Raises:
2681 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
2682 ValueError: if *gen* is not >=3
2683
2684 See also the docs of the C++ class :cpp:class:`pagmo::gwo`.
2685
2686 )";
2687 }
2688
gwo_get_log_docstring()2689 std::string gwo_get_log_docstring()
2690 {
2691 return R"(get_log()
2692
2693 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
2694 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2695 constructed with a :class:`~pygmo.gwo`. A verbosity of ``N`` implies a log line each ``N`` generations.
2696
2697 Returns:
2698 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``ideal_point``, where:
2699
2700 * ``Gen`` (:class:`int`), generation number
2701 * ``alpha`` (:class:`float`), fitness function value of alpha
2702 * ``beta`` (:class:`float`), fitness function value of beta
2703 * ``delta`` (:class:`float`), fitness function value of delta
2704
2705 Examples:
2706 >>> from pygmo import *
2707 >>> algo = algorithm(gwo(gen=10))
2708 >>> algo.set_verbosity(2)
2709 >>> prob = problem(rosenbrock(dim=2))
2710 >>> pop = population(prob, size=13, seed=23)
2711 >>> pop = algo.evolve(pop) # doctest: +SKIP
2712 Gen: Alpha: Beta: Delta:
2713 1 179.464 3502.82 3964.75
2714 3 6.82024 30.2149 61.1906
2715 5 0.321879 2.39373 3.46188
2716 7 0.134441 0.342357 0.439651
2717 9 0.100281 0.211849 0.297448
2718 >>> uda = algo.extract(gwo)
2719 >>> uda.get_log() # doctest: +SKIP
2720 [(1, 179.46420983829944, 3502.8158822203472, 3964.7542658046486), ...
2721
2722 See also the docs of the relevant C++ method :cpp:func:`pagmo::gwo::get_log`.
2723
2724 )";
2725 }
2726
moead_docstring()2727 std::string moead_docstring()
2728 {
2729 return R"(__init__(gen = 1, weight_generation = "grid", decomposition = "tchebycheff", neighbours = 20, CR = 1, F = 0.5, eta_m = 20, realb = 0.9, limit = 2, preserve_diversity = true, seed = random)
2730
2731 Multi Objective Evolutionary Algorithms by Decomposition (the DE variant)
2732
2733 Args:
2734 gen (:class:`int`): number of generations
2735 weight_generation (:class:`str`): method used to generate the weights, one of "grid", "low discrepancy" or "random"
2736 decomposition (:class:`str`): method used to decompose the objectives, one of "tchebycheff", "weighted" or "bi"
2737 neighbours (:class:`int`): size of the weight's neighborhood
2738 CR (:class:`float`): crossover parameter in the Differential Evolution operator
2739 F (:class:`float`): parameter for the Differential Evolution operator
2740 eta_m (:class:`float`): distribution index used by the polynomial mutation
2741 realb (:class:`float`): chance that the neighbourhood is considered at each generation, rather than the whole population (only if preserve_diversity is true)
2742 limit (:class:`int`): maximum number of copies reinserted in the population (only if m_preserve_diversity is true)
2743 preserve_diversity (:class:`bool`): when true activates diversity preservation mechanisms
2744 seed (:class:`int`): seed used by the internal random number generator (default is random)
2745
2746 Raises:
2747 OverflowError: if *gen*, *neighbours*, *seed* or *limit* are negative or greater than an implementation-defined value
2748 ValueError: if either *decomposition* is not one of 'tchebycheff', 'weighted' or 'bi',
2749 *weight_generation* is not one of 'random', 'low discrepancy' or 'grid',
2750 *CR* or *F* or *realb* are not in [0.,1.] or *eta_m* is negative, if *neighbours* is not >=2
2751
2752 See also the docs of the C++ class :cpp:class:`pagmo::moead`.
2753
2754 )";
2755 }
2756
moead_get_log_docstring()2757 std::string moead_get_log_docstring()
2758 {
2759 return R"(get_log()
2760
2761 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2762 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2763 constructed with a :class:`~pygmo.moead`. A verbosity of ``N`` implies a log line each ``N`` generations.
2764
2765 Returns:
2766 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``ADR``, ``ideal_point``, where:
2767
2768 * ``Gen`` (:class:`int`), generation number
2769 * ``Fevals`` (:class:`int`), number of functions evaluation made
2770 * ``ADF`` (:class:`float`), Average Decomposed Fitness, that is the average across all decomposed problem of the single objective decomposed fitness along the corresponding direction
2771 * ``ideal_point`` (``array``), the ideal point of the current population (cropped to max 5 dimensions only in the screen output)
2772
2773 Examples:
2774 >>> from pygmo import *
2775 >>> algo = algorithm(moead(gen=500))
2776 >>> algo.set_verbosity(100)
2777 >>> prob = problem(zdt())
2778 >>> pop = population(prob, 40)
2779 >>> pop = algo.evolve(pop) # doctest: +SKIP
2780 Gen: Fevals: ADF: ideal1: ideal2:
2781 1 0 32.5747 0.00190532 2.65685
2782 101 4000 5.67751 2.56736e-09 0.468789
2783 201 8000 5.38297 2.56736e-09 0.0855025
2784 301 12000 5.05509 9.76581e-10 0.0574796
2785 401 16000 5.13126 9.76581e-10 0.0242256
2786 >>> uda = algo.extract(moead)
2787 >>> uda.get_log() # doctest: +SKIP
2788 [(1, 0, 32.574745630075874, array([ 1.90532430e-03, 2.65684834e+00])), ...
2789
2790 See also the docs of the relevant C++ method :cpp:func:`pagmo::moead::get_log()`.
2791
2792 )";
2793 }
2794
cmaes_docstring()2795 std::string cmaes_docstring()
2796 {
2797 return R"(__init__(gen = 1, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0 = 0.5, ftol = 1e-6, xtol = 1e-6, memory = False, force_bounds = False, seed = random)
2798
2799 Covariance Matrix Evolutionary Strategy (CMA-ES).
2800
2801 Args:
2802 gen (:class:`int`): number of generations
2803 cc (:class:`float`): backward time horizon for the evolution path (by default is automatically assigned)
2804 cs (:class:`float`): makes partly up for the small variance loss in case the indicator is zero (by default is automatically assigned)
2805 c1 (:class:`float`): learning rate for the rank-one update of the covariance matrix (by default is automatically assigned)
2806 cmu (:class:`float`): learning rate for the rank-mu update of the covariance matrix (by default is automatically assigned)
2807 sigma0 (:class:`float`): initial step-size
2808 ftol (:class:`float`): stopping criteria on the x tolerance
2809 xtol (:class:`float`): stopping criteria on the f tolerance
2810 memory (:class:`bool`): when true the adapted parameters are not reset between successive calls to the evolve method
2811 force_bounds (:class:`bool`): when true the box bounds are enforced. The fitness will never be called outside the bounds but the covariance matrix adaptation mechanism will worsen
2812 seed (:class:`int`): seed used by the internal random number generator (default is random)
2813
2814 Raises:
2815 OverflowError: if *gen* is negative or greater than an implementation-defined value
2816 ValueError: if *cc*, *cs*, *c1*, *cmu* are not in [0,1] or -1
2817
2818 See also the docs of the C++ class :cpp:class:`pagmo::cmaes`.
2819
2820 )";
2821 }
2822
cmaes_get_log_docstring()2823 std::string cmaes_get_log_docstring()
2824 {
2825 return R"(get_log()
2826
2827 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2828 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2829 constructed with a :class:`~pygmo.cmaes`. A verbosity of ``N`` implies a log line each ``N`` generations.
2830
2831 Returns:
2832 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``dx``, ``df``, ``sigma``, where:
2833
2834 * ``Gen`` (:class:`int`), generation number
2835 * ``Fevals`` (:class:`int`), number of functions evaluation made
2836 * ``Best`` (:class:`float`), the best fitness function currently in the population
2837 * ``dx`` (:class:`float`), the norm of the distance to the population mean of the mutant vectors
2838 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
2839 * ``sigma`` (:class:`float`), the current step-size
2840
2841 Examples:
2842 >>> from pygmo import *
2843 >>> algo = algorithm(cmaes(gen = 500))
2844 >>> algo.set_verbosity(100)
2845 >>> prob = problem(rosenbrock(10))
2846 >>> pop = population(prob, 20)
2847 >>> pop = algo.evolve(pop) # doctest: +SKIP
2848 Gen: Fevals: Best: dx: df: sigma:
2849 1 0 173924 33.6872 3.06519e+06 0.5
2850 101 2000 92.9612 0.583942 156.921 0.0382078
2851 201 4000 8.79819 0.117574 5.101 0.0228353
2852 301 6000 4.81377 0.0698366 1.34637 0.0297664
2853 401 8000 1.04445 0.0568541 0.514459 0.0649836
2854 Exit condition -- generations = 500
2855 >>> uda = algo.extract(cmaes)
2856 >>> uda.get_log() # doctest: +SKIP
2857 [(1, 0, 173924.2840042722, 33.68717961390855, 3065192.3843070837, 0.5), ...
2858
2859 See also the docs of the relevant C++ method :cpp:func:`pagmo::cmaes::get_log()`.
2860
2861 )";
2862 }
2863
xnes_docstring()2864 std::string xnes_docstring()
2865 {
2866 return R"(__init__(gen = 1, eta_mu = -1, eta_sigma = -1, eta_b = -1, sigma0 = -1, ftol = 1e-6, xtol = 1e-6, memory = False, force_bounds = False, seed = random)
2867
2868 Exponential Evolution Strategies.
2869
2870 Args:
2871 gen (:class:`int`): number of generations
2872 eta_mu (:class:`float`): learning rate for mean update (if -1 will be automatically selected to be 1)
2873 eta_sigma (:class:`float`): learning rate for step-size update (if -1 will be automatically selected)
2874 eta_b (:class:`float`): learning rate for the covariance matrix update (if -1 will be automatically selected)
2875 sigma0 (:class:`float`): the initial search width will be sigma0 * (ub - lb) (if -1 will be automatically selected to be 1)
2876 ftol (:class:`float`): stopping criteria on the x tolerance
2877 xtol (:class:`float`): stopping criteria on the f tolerance
2878 memory (:class:`bool`): when true the adapted parameters are not reset between successive calls to the evolve method
2879 force_bounds (:class:`bool`): when true the box bounds are enforced. The fitness will never be called outside the bounds but the covariance matrix adaptation mechanism will worsen
2880 seed (:class:`int`): seed used by the internal random number generator (default is random)
2881
2882 Raises:
2883 OverflowError: if *gen* is negative or greater than an implementation-defined value
2884 ValueError: if *eta_mu*, *eta_sigma*, *eta_b*, *sigma0* are not in ]0,1] or -1
2885
2886 See also the docs of the C++ class :cpp:class:`pagmo::xnes`.
2887
2888 )";
2889 }
2890
xnes_get_log_docstring()2891 std::string xnes_get_log_docstring()
2892 {
2893 return R"(get_log()
2894
2895 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2896 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2897 constructed with a :class:`~pygmo.xnes`. A verbosity of ``N`` implies a log line each ``N`` generations.
2898
2899 Returns:
2900 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``dx``, ``df``, ``sigma``, where:
2901
2902 * ``Gen`` (:class:`int`), generation number
2903 * ``Fevals`` (:class:`int`), number of functions evaluation made
2904 * ``Best`` (:class:`float`), the best fitness function currently in the population
2905 * ``dx`` (:class:`float`), the norm of the distance to the population mean of the mutant vectors
2906 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
2907 * ``sigma`` (:class:`float`), the current step-size
2908
2909 Examples:
2910 >>> from pygmo import *
2911 >>> algo = algorithm(xnes(gen = 500))
2912 >>> algo.set_verbosity(100)
2913 >>> prob = problem(rosenbrock(10))
2914 >>> pop = population(prob, 20)
2915 >>> pop = algo.evolve(pop) # doctest: +SKIP
2916 Gen: Fevals: Best: dx: df: sigma:
2917 1 0 173924 33.6872 3.06519e+06 0.5
2918 101 2000 92.9612 0.583942 156.921 0.0382078
2919 201 4000 8.79819 0.117574 5.101 0.0228353
2920 301 6000 4.81377 0.0698366 1.34637 0.0297664
2921 401 8000 1.04445 0.0568541 0.514459 0.0649836
2922 Exit condition -- generations = 500
2923 >>> uda = algo.extract(xnes)
2924 >>> uda.get_log() # doctest: +SKIP
2925 [(1, 0, 173924.2840042722, 33.68717961390855, 3065192.3843070837, 0.5), ...
2926
2927 See also the docs of the relevant C++ method :cpp:func:`pagmo::xnes::get_log()`.
2928
2929 )";
2930 }
2931
de1220_docstring()2932 std::string de1220_docstring()
2933 {
2934 return R"(__init__(gen = 1, allowed_variants = [2,3,7,10,13,14,15,16], variant_adptv = 1, ftol = 1e-6, xtol = 1e-6, memory = False, seed = random)
2935
2936 Self-adaptive Differential Evolution, pygmo flavour (pDE).
2937 The adaptation of the mutation variant is added to :class:`~pygmo.sade`
2938
2939 Args:
2940 gen (:class:`int`): number of generations
2941 allowed_variants (array-like object): allowed mutation variants, each one being a number in [1, 18]
2942 variant_adptv (:class:`int`): *F* and *CR* parameter adaptation scheme to be used (one of 1..2)
2943 ftol (:class:`float`): stopping criteria on the x tolerance (default is 1e-6)
2944 xtol (:class:`float`): stopping criteria on the f tolerance (default is 1e-6)
2945 memory (:class:`bool`): when true the adapted parameters *CR* anf *F* are not reset between successive calls to the evolve method
2946 seed (:class:`int`): seed used by the internal random number generator (default is random)
2947
2948 Raises:
2949 OverflowError: if *gen*, *variant*, *variant_adptv* or *seed* is negative or greater than an implementation-defined value
2950 ValueError: if each id in *variant_adptv* is not in [1,18] or *variant_adptv* is not in [0,1]
2951
2952 The following variants (mutation variants) can be put into *allowed_variants*:
2953
2954 +--------------------------------------+--------------------------------------+
2955 | 1 - best/1/exp | 2 - rand/1/exp |
2956 +--------------------------------------+--------------------------------------+
2957 | 3 - rand-to-best/1/exp | 4 - best/2/exp |
2958 +--------------------------------------+--------------------------------------+
2959 | 5 - rand/2/exp | 6 - best/1/bin |
2960 +--------------------------------------+--------------------------------------+
2961 | 7 - rand/1/bin | 8 - rand-to-best/1/bin |
2962 +--------------------------------------+--------------------------------------+
2963 | 9 - best/2/bin | 10 - rand/2/bin |
2964 +--------------------------------------+--------------------------------------+
2965 | 11 - rand/3/exp | 12 - rand/3/bin |
2966 +--------------------------------------+--------------------------------------+
2967 | 13 - best/3/exp | 14 - best/3/bin |
2968 +--------------------------------------+--------------------------------------+
2969 | 15 - rand-to-current/2/exp | 16 - rand-to-current/2/bin |
2970 +--------------------------------------+--------------------------------------+
2971 | 17 - rand-to-best-and-current/2/exp | 18 - rand-to-best-and-current/2/bin |
2972 +--------------------------------------+--------------------------------------+
2973
2974 The following adaptation schemes for the parameters *F* and *CR* are available:
2975
2976 +--------------------------------------+--------------------------------------+
2977 | 1 - jDE | 2 - iDE |
2978 +--------------------------------------+--------------------------------------+
2979
2980 See also the docs of the C++ class :cpp:class:`pagmo::de1220`.
2981
2982 )";
2983 }
2984
de1220_get_log_docstring()2985 std::string de1220_get_log_docstring()
2986 {
2987 return R"(get_log()
2988
2989 Returns a log containing relevant parameters recorded during the last call to ``evolve()``. The log frequency depends on the verbosity
2990 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
2991 constructed with a :class:`~pygmo.de1220`. A verbosity of N implies a log line each N generations.
2992
2993 Returns:
2994 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``F``, ``CR``, ``Variant``, ``dx``, ``df``, where:
2995
2996 * ``Gen`` (:class:`int`), generation number
2997 * ``Fevals`` (:class:`int`), number of functions evaluation made
2998 * ``Best`` (:class:`float`), the best fitness function currently in the population
2999 * ``F`` (:class:`float`), the value of the adapted paramter F used to create the best so far
3000 * ``CR`` (:class:`float`), the value of the adapted paramter CR used to create the best so far
3001 * ``Variant`` (:class:`int`), the mutation variant used to create the best so far
3002 * ``dx`` (:class:`float`), the norm of the distance to the population mean of the mutant vectors
3003 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
3004
3005 Examples:
3006 >>> from pygmo import *
3007 >>> algo = algorithm(de1220(gen = 500))
3008 >>> algo.set_verbosity(100)
3009 >>> prob = problem(rosenbrock(10))
3010 >>> pop = population(prob, 20)
3011 >>> pop = algo.evolve(pop) # doctest: +SKIP
3012 Gen: Fevals: Best: F: CR: Variant: dx: df:
3013 1 20 285653 0.55135 0.441551 16 43.9719 2.02379e+06
3014 101 2020 12.2721 0.127285 0.0792493 14 3.22986 106.764
3015 201 4020 5.72927 0.148337 0.777806 14 2.72177 4.10793
3016 301 6020 4.85084 0.12193 0.996191 3 2.95555 3.85027
3017 401 8020 4.20638 0.235997 0.996259 3 3.60338 4.49432
3018 Exit condition -- generations = 500
3019 >>> uda = algo.extract(de1220)
3020 >>> uda.get_log() # doctest: +SKIP
3021 [(1, 20, 285652.7928977573, 0.551350234239449, 0.4415510963067054, 16, 43.97185788345982, 2023791.5123259544), ...
3022
3023 See also the docs of the relevant C++ method :cpp:func:`pagmo::de1220::get_log()`.
3024
3025 )";
3026 }
3027
pso_docstring()3028 std::string pso_docstring()
3029 {
3030 return R"(__init__(gen = 1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, max_vel = 0.5, variant = 5, neighb_type = 2, neighb_param = 4, memory = False, seed = random)
3031
3032 Particle Swarm Optimization
3033
3034 Args:
3035 gen (:class:`int`): number of generations
3036 omega (:class:`float`): inertia weight (or constriction factor)
3037 eta1 (:class:`float`): social component
3038 eta2 (:class:`float`): cognitive component
3039 max_vel (:class:`float`): maximum allowed particle velocities (normalized with respect to the bounds width)
3040 variant (:class:`int`): algorithmic variant
3041 neighb_type (:class:`int`): swarm topology (defining each particle's neighbours)
3042 neighb_param (:class:`int`): topology parameter (defines how many neighbours to consider)
3043 memory (:class:`bool`): when true the velocities are not reset between successive calls to the evolve method
3044 seed (:class:`int`): seed used by the internal random number generator (default is random)
3045
3046 Raises:
3047 OverflowError: if *gen* or *seed* is negative or greater than an implementation-defined value
3048 ValueError: if *omega* is not in the [0,1] interval, if *eta1*, *eta2* are not in the [0,4] interval, if *max_vel* is not in ]0,1]
3049 ValueError: *variant* is not one of 1 .. 6, if *neighb_type* is not one of 1 .. 4 or if *neighb_param* is zero
3050
3051 The following variants can be selected via the *variant* parameter:
3052
3053 +-----------------------------------------+-----------------------------------------+
3054 | 1 - Canonical (with inertia weight) | 2 - Same social and cognitive rand. |
3055 +-----------------------------------------+-----------------------------------------+
3056 | 3 - Same rand. for all components | 4 - Only one rand. |
3057 +-----------------------------------------+-----------------------------------------+
3058 | 5 - Canonical (with constriction fact.) | 6 - Fully Informed (FIPS) |
3059 +-----------------------------------------+-----------------------------------------+
3060
3061
3062 The following topologies are selected by *neighb_type*:
3063
3064 +--------------------------------------+--------------------------------------+
3065 | 1 - gbest | 2 - lbest |
3066 +--------------------------------------+--------------------------------------+
3067 | 3 - Von Neumann | 4 - Adaptive random |
3068 +--------------------------------------+--------------------------------------+
3069
3070 The topology determines (together with the topology parameter) which particles need to be considered
3071 when computing the social component of the velocity update.
3072
3073 )";
3074 }
3075
pso_get_log_docstring()3076 std::string pso_get_log_docstring()
3077 {
3078 return R"(get_log()
3079
3080 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
3081 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
3082 constructed with a :class:`~pygmo.pso`. A verbosity of ``N`` implies a log line each ``N`` generations.
3083
3084 Returns:
3085 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``gbest``, ``Mean Vel.``, ``Mean lbest``, ``Avg. Dist.``, where:
3086
3087 * ``Gen`` (:class:`int`), generation number
3088 * ``Fevals`` (:class:`int`), number of functions evaluation made
3089 * ``gbest`` (:class:`float`), the best fitness function found so far by the the swarm
3090 * ``Mean Vel.`` (:class:`float`), the average particle velocity (normalized)
3091 * ``Mean lbest`` (:class:`float`), the average fitness of the current particle locations
3092 * ``Avg. Dist.`` (:class:`float`), the average distance between particles (normalized)
3093
3094 Examples:
3095 >>> from pygmo import *
3096 >>> algo = algorithm(pso(gen = 500))
3097 >>> algo.set_verbosity(50)
3098 >>> prob = problem(rosenbrock(10))
3099 >>> pop = population(prob, 20)
3100 >>> pop = algo.evolve(pop) # doctest: +SKIP
3101 Gen: Fevals: gbest: Mean Vel.: Mean lbest: Avg. Dist.:
3102 1 40 72473.3 0.173892 677427 0.281744
3103 51 1040 135.867 0.0183806 748.001 0.065826
3104 101 2040 12.6726 0.00291046 84.9531 0.0339452
3105 151 3040 8.4405 0.000852588 33.5161 0.0191379
3106 201 4040 7.56943 0.000778264 28.213 0.00789202
3107 251 5040 6.8089 0.00435521 22.7988 0.00107112
3108 301 6040 6.3692 0.000289725 17.3763 0.00325571
3109 351 7040 6.09414 0.000187343 16.8875 0.00172307
3110 401 8040 5.78415 0.000524536 16.5073 0.00234197
3111 451 9040 5.4662 0.00018305 16.2339 0.000958182
3112 >>> uda = algo.extract(pso)
3113 >>> uda.get_log() # doctest: +SKIP
3114 [(1,40,72473.32713790605,0.1738915144248373,677427.3504996448,0.2817443174278134), (51,1040,...
3115
3116 See also the docs of the relevant C++ method :cpp:func:`pagmo::pso::get_log()`.
3117
3118 )";
3119 }
3120
3121 //----------
pso_gen_set_bfe_docstring()3122 std::string pso_gen_set_bfe_docstring()
3123 {
3124 return R"(set_bfe(b)
3125 Set the batch function evaluation scheme.
3126 This method will set the batch function evaluation scheme to be used for :class:`~pygmo.pso_gen`.
3127 Args:
3128 b (:class:`~pygmo.bfe`): the batch function evaluation object
3129 Raises:
3130 unspecified: any exception thrown by the underlying C++ method
3131 )";
3132 }
pso_gen_docstring()3133 std::string pso_gen_docstring()
3134 {
3135 return R"(__init__(gen = 1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, max_vel = 0.5, variant = 5, neighb_type = 2, neighb_param = 4, memory = False, seed = random)
3136
3137 Particle Swarm Optimization (generational) is identical to :class:`~pygmo.pso`, but does update the velocities of each particle before new particle positions are computed (taking
3138 into consideration all updated particle velocities). Each particle is thus evaluated on the same seed within a generation as opposed to the standard PSO which evaluates single particle
3139 at a time. Consequently, the generational PSO algorithm is suited for stochastic optimization problems.
3140
3141
3142 Args:
3143 gen (:class:`int`): number of generations
3144 omega (:class:`float`): inertia weight (or constriction factor)
3145 eta1 (:class:`float`): social component
3146 eta2 (:class:`float`): cognitive component
3147 max_vel (:class:`float`): maximum allowed particle velocities (normalized with respect to the bounds width)
3148 variant (:class:`int`): algorithmic variant
3149 neighb_type (:class:`int`): swarm topology (defining each particle's neighbours)
3150 neighb_param (:class:`int`): topology parameter (defines how many neighbours to consider)
3151 memory (:class:`bool`): when true the velocities are not reset between successive calls to the evolve method
3152 seed (:class:`int`): seed used by the internal random number generator (default is random)
3153
3154 Raises:
3155 OverflowError: if *gen* or *seed* is negative or greater than an implementation-defined value
3156 ValueError: if *omega* is not in the [0,1] interval, if *eta1*, *eta2* are not in the [0,1] interval, if *max_vel* is not in ]0,1]
3157 ValueError: *variant* is not one of 1 .. 6, if *neighb_type* is not one of 1 .. 4 or if *neighb_param* is zero
3158
3159 The following variants can be selected via the *variant* parameter:
3160
3161 +-----------------------------------------+-----------------------------------------+
3162 | 1 - Canonical (with inertia weight) | 2 - Same social and cognitive rand. |
3163 +-----------------------------------------+-----------------------------------------+
3164 | 3 - Same rand. for all components | 4 - Only one rand. |
3165 +-----------------------------------------+-----------------------------------------+
3166 | 5 - Canonical (with constriction fact.) | 6 - Fully Informed (FIPS) |
3167 +-----------------------------------------+-----------------------------------------+
3168
3169
3170 The following topologies are selected by *neighb_type*:
3171
3172 +--------------------------------------+--------------------------------------+
3173 | 1 - gbest | 2 - lbest |
3174 +--------------------------------------+--------------------------------------+
3175 | 3 - Von Neumann | 4 - Adaptive random |
3176 +--------------------------------------+--------------------------------------+
3177
3178 The topology determines (together with the topology parameter) which particles need to be considered
3179 when computing the social component of the velocity update.
3180
3181 )";
3182 }
3183
pso_gen_get_log_docstring()3184 std::string pso_gen_get_log_docstring()
3185 {
3186 return R"(get_log()
3187
3188 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
3189 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
3190 constructed with a :class:`~pygmo.pso`. A verbosity of ``N`` implies a log line each ``N`` generations.
3191
3192 Returns:
3193 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``gbest``, ``Mean Vel.``, ``Mean lbest``, ``Avg. Dist.``, where:
3194
3195 * ``Gen`` (:class:`int`), generation number
3196 * ``Fevals`` (:class:`int`), number of functions evaluation made
3197 * ``gbest`` (:class:`float`), the best fitness function found so far by the the swarm
3198 * ``Mean Vel.`` (:class:`float`), the average particle velocity (normalized)
3199 * ``Mean lbest`` (:class:`float`), the average fitness of the current particle locations
3200 * ``Avg. Dist.`` (:class:`float`), the average distance between particles (normalized)
3201
3202 Examples:
3203 >>> from pygmo import *
3204 >>> algo = algorithm(pso(gen = 500))
3205 >>> algo.set_verbosity(50)
3206 >>> prob = problem(rosenbrock(10))
3207 >>> pop = population(prob, 20)
3208 >>> pop = algo.evolve(pop) # doctest: +SKIP
3209 Gen: Fevals: gbest: Mean Vel.: Mean lbest: Avg. Dist.:
3210 1 40 72473.3 0.173892 677427 0.281744
3211 51 1040 135.867 0.0183806 748.001 0.065826
3212 101 2040 12.6726 0.00291046 84.9531 0.0339452
3213 151 3040 8.4405 0.000852588 33.5161 0.0191379
3214 201 4040 7.56943 0.000778264 28.213 0.00789202
3215 251 5040 6.8089 0.00435521 22.7988 0.00107112
3216 301 6040 6.3692 0.000289725 17.3763 0.00325571
3217 351 7040 6.09414 0.000187343 16.8875 0.00172307
3218 401 8040 5.78415 0.000524536 16.5073 0.00234197
3219 451 9040 5.4662 0.00018305 16.2339 0.000958182
3220 >>> uda = algo.extract(pso)
3221 >>> uda.get_log() # doctest: +SKIP
3222 [(1,40,72473.32713790605,0.1738915144248373,677427.3504996448,0.2817443174278134), (51,1040,...
3223
3224 See also the docs of the relevant C++ method :cpp:func:`pagmo::pso::get_log()`.
3225
3226 )";
3227 }
3228
simulated_annealing_docstring()3229 std::string simulated_annealing_docstring()
3230 {
3231 return R"(__init__(Ts = 10., Tf = .1, n_T_adj = 10, n_range_adj = 10, bin_size = 10, start_range = 1., seed = random)
3232
3233 Simulated Annealing (Corana's version)
3234
3235 Args:
3236 Ts (:class:`float`): starting temperature
3237 Tf (:class:`float`): final temperature
3238 n_T_adj (:class:`int`): number of temperature adjustments in the annealing schedule
3239 n_range_adj (:class:`int`): number of adjustments of the search range performed at a constant temperature
3240 bin_size (:class:`int`): number of mutations that are used to compute the acceptance rate
3241 start_range (:class:`float`): starting range for mutating the decision vector
3242 seed (:class:`int`): seed used by the internal random number generator (default is random)
3243
3244 Raises:
3245 OverflowError: if *n_T_adj*, *n_range_adj* or *bin_size* are negative or greater than an implementation-defined value
3246 ValueError: if *Ts* is not in (0, inf), if *Tf* is not in (0, inf), if *Tf* > *Ts* or if *start_range* is not in (0,1]
3247 ValueError: if *n_T_adj* is not strictly positive or if *n_range_adj* is not strictly positive
3248
3249 See also the docs of the C++ class :cpp:class:`pagmo::simulated_annealing`.
3250 )";
3251 }
3252
simulated_annealing_get_log_docstring()3253 std::string simulated_annealing_get_log_docstring()
3254 {
3255 return R"(get_log()
3256
3257 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen.
3258 The log frequency depends on the verbosity parameter (by default nothing is logged) which can be set calling
3259 the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed with a
3260 :class:`~pygmo.simulated_annealing`. A verbosity larger than 0 will produce a log with one entry
3261 each verbosity fitness evaluations.
3262
3263 Returns:
3264 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Fevals``, ``Best``, ``Current``, ``Mean range``, ``Temperature``, where:
3265
3266 * ``Fevals`` (:class:`int`), number of functions evaluation made
3267 * ``Best`` (:class:`float`), the best fitness function found so far
3268 * ``Current`` (:class:`float`), last fitness sampled
3269 * ``Mean range`` (:class:`float`), the mean search range across the decision vector components (relative to the box bounds width)
3270 * ``Temperature`` (:class:`float`), the current temperature
3271
3272 Examples:
3273 >>> from pygmo import *
3274 >>> algo = algorithm(simulated_annealing(Ts=10., Tf=1e-5, n_T_adj = 100))
3275 >>> algo.set_verbosity(5000)
3276 >>> prob = problem(rosenbrock(dim = 10))
3277 >>> pop = population(prob, 1)
3278 >>> pop = algo.evolve(pop) # doctest: +SKIP
3279 Fevals: Best: Current: Mean range: Temperature:
3280 57 5937 5937 0.48 10
3281 10033 9.50937 28.6775 0.0325519 2.51189
3282 15033 7.87389 14.3951 0.0131132 1.25893
3283 20033 7.87389 8.68616 0.0120491 0.630957
3284 25033 2.90084 4.43344 0.00676893 0.316228
3285 30033 0.963616 1.36471 0.00355931 0.158489
3286 35033 0.265868 0.63457 0.00202753 0.0794328
3287 40033 0.13894 0.383283 0.00172611 0.0398107
3288 45033 0.108051 0.169876 0.000870499 0.0199526
3289 50033 0.0391731 0.0895308 0.00084195 0.01
3290 55033 0.0217027 0.0303561 0.000596116 0.00501187
3291 60033 0.00670073 0.00914824 0.000342754 0.00251189
3292 65033 0.0012298 0.00791511 0.000275182 0.00125893
3293 70033 0.00112816 0.00396297 0.000192117 0.000630957
3294 75033 0.000183055 0.00139717 0.000135137 0.000316228
3295 80033 0.000174868 0.00192479 0.000109781 0.000158489
3296 85033 7.83e-05 0.000494225 8.20723e-05 7.94328e-05
3297 90033 5.35153e-05 0.000120148 5.76009e-05 3.98107e-05
3298 95033 5.35153e-05 9.10958e-05 3.18624e-05 1.99526e-05
3299 99933 2.34849e-05 8.72206e-05 2.59215e-05 1.14815e-05
3300 >>> uda = algo.extract(simulated_annealing)
3301 >>> uda.get_log() # doctest: +SKIP
3302 [(57, 5936.999957947842, 5936.999957947842, 0.47999999999999987, 10.0), (10033, ...
3303
3304 See also the docs of the relevant C++ method :cpp:func:`pagmo::simulated_annealing::get_log()`.
3305
3306 )";
3307 }
3308
nspso_set_bfe_docstring()3309 std::string nspso_set_bfe_docstring()
3310 {
3311 return R"(set_bfe(b)
3312
3313 Set the batch function evaluation scheme.
3314
3315 This method will set the batch function evaluation scheme to be used for :class:`~pygmo.nspso`.
3316
3317 Args:
3318 b (:class:`~pygmo.bfe`): the batch function evaluation object
3319
3320 Raises:
3321 unspecified: any exception thrown by the underlying C++ method
3322
3323 )";
3324 }
3325
nspso_docstring()3326 std::string nspso_docstring()
3327 {
3328 return R"(__init__(gen = 1, omega = 0.6, c1 = 0.01, c2 = 0.5, chi = 0.5, v_coeff = 0.5, leader_selection_range = 2, diversity_mechanism = "crowding distance", memory = false, seed = random)
3329
3330 Non dominated Sorting Particle Swarm Optimization (NSPSO).
3331
3332 Args:
3333 gen (:class:`int`): number of generations to evolve
3334 omega (:class:`float`): particles' inertia weight
3335 c1 (:class:`float`): magnitude of the force, applied to the particle's velocity, in the direction of its previous best position.
3336 c2 (:class:`float`): magnitude of the force, applied to the particle's velocity, in the direction of its global best position.
3337 chi (:class:`float`): velocity scaling factor.
3338 v_coeff (:class:`float`): velocity coefficient.
3339 leader_selection_range (:class:`int`): leader selection range.
3340 diversity_mechanism (str): leader selection range.
3341 memory (:class:`bool`): memory parameter.
3342
3343
3344 Raises:
3345 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
3346 ValueError: if either *omega* < 0 or *c1* <= 0 or *c2* <= 0 or *chi* <= 0, if *omega* > 1,
3347 if *v_coeff* <= 0 or *v_coeff* > 1, if *leader_selection_range* > 100, if *diversity_mechanism* != "crowding distance", or != "niche count", or != "max min"
3348
3349 See also the docs of the C++ class :cpp:class:`pagmo::nspso`.
3350
3351 )";
3352 }
3353
nspso_get_log_docstring()3354 std::string nspso_get_log_docstring()
3355 {
3356 return R"(get_log()
3357
3358 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen. The log frequency depends on the verbosity
3359 parameter (by default nothing is logged) which can be set calling the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm`
3360 constructed with a :class:`~pygmo.nspso`. A verbosity of ``N`` implies a log line each ``N`` generations.
3361
3362 Returns:
3363 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``ideal_point``, where:
3364
3365 * ``Gen`` (:class:`int`), generation number
3366 * ``Fevals`` (:class:`int`), number of functions evaluation made
3367 * ``ideal_point`` (1D numpy array), the ideal point of the current population (cropped to max 5 dimensions only in the screen output)
3368
3369 Examples:
3370 >>> from pygmo import *
3371 >>> algo = algorithm(nspso(gen=100))
3372 >>> algo.set_verbosity(20)
3373 >>> pop = population(zdt(1), 40)
3374 >>> pop = algo.evolve(pop) # doctest: +SKIP
3375 Gen: Fevals: ideal1: ideal2:
3376 1 40 0.019376 2.75209
3377 21 840 0 1.97882
3378 41 1640 0 1.88428
3379 61 2440 0 1.88428
3380 81 3240 0 1.88428
3381 >>> uda = algo.extract(nspso)
3382 >>> uda.get_log() # doctest: +SKIP
3383 [(1, 40, array([0.04843319, 2.98129814])), (21, 840, array([0., 1.68331679])) ...
3384
3385 See also the docs of the relevant C++ method :cpp:func:`pagmo::nspso::get_log`.
3386
3387 )";
3388 }
3389
random_decision_vector_docstring()3390 std::string random_decision_vector_docstring()
3391 {
3392 return R"(random_decision_vector(prob)
3393
3394 This function will generate a decision vector whose values are randomly chosen with uniform probability within
3395 the lower and upper bounds :math:`lb` and :math:`ub` of the input :class:`~pygmo.problem` *prob*.
3396
3397 For the continuous part of the decision vector, the :math:`i`-th component of the randomly generated decision
3398 vector will be such that :math:`lb_i \le x_i < ub_i`.
3399
3400 For the discrete part of the decision vector, the :math:`i`-th component of the randomly generated decision vector
3401 is guaranteed to be an integral value such that :math:`lb_i \le x_i \le ub_i`.
3402
3403 For both the continuous and discrete parts of the decision vector, if :math:`lb_i == ub_i` then :math:`lb_i` is returned.
3404
3405 Args:
3406 prob (:class:`~pygmo.problem`): the input problem
3407
3408 Returns:
3409 :class:`numpy.ndarray`: a random decision vector within the problem's bounds
3410
3411 Raises:
3412 ValueError: if the problem's bounds are not finite or larger than an implementation-defined limit
3413 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
3414 type conversion errors, mismatched function signatures, etc.)
3415
3416 )";
3417 }
3418
batch_random_decision_vector_docstring()3419 std::string batch_random_decision_vector_docstring()
3420 {
3421 return R"(batch_random_decision_vector(prob, n)
3422
3423 This function will generate a batch of *n* decision vectors whose values are randomly chosen with uniform probability within
3424 the lower and upper bounds :math:`lb` and :math:`ub` of the input :class:`~pygmo.problem` *prob*.
3425 The decision vectors are laid out contiguously in the return value: for a problem with dimension :math:`x`,
3426 the first decision vector in the return value occupies the index range :math:`\left[0, x\right)`, the second decision vector
3427 occupies the range :math:`\left[x, 2x\right)`, and so on.
3428
3429 For the continuous parts of the decision vectors, the :math:`i`-th components of the randomly generated decision
3430 vectors will be such that :math:`lb_i \le x_i < ub_i`.
3431
3432 For the discrete parts of the decision vectors, the :math:`i`-th components of the randomly generated decision vectors
3433 are guaranteed to be integral values such that :math:`lb_i \le x_i \le ub_i`.
3434
3435 For both the continuous and discrete parts of the decision vectors, if :math:`lb_i == ub_i` then :math:`lb_i` is returned.
3436
3437 Args:
3438 prob (:class:`~pygmo.problem`): the input problem
3439 n (:class:`int`): the number of decision vectors that will be generated
3440
3441 Returns:
3442 :class:`numpy.ndarray`: a batch of random decision vectors within the problem's bounds, laid out contiguously in a 1D array
3443
3444 Raises:
3445 OverflowError: in case of (unlikely) overflows
3446 ValueError: if the problem's bounds are not finite or larger than an implementation-defined limit
3447 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
3448 type conversion errors, mismatched function signatures, etc.)
3449
3450 )";
3451 }
3452
sbx_crossover_docstring()3453 std::string sbx_crossover_docstring()
3454 {
3455 return R"(sbx_crossover(parent1, parent2, bounds, nix, p_cr, eta_c, seed)
3456
3457 This function will perform a binary crossover on the continuous parts of the two chromosomes
3458 *parent1* and *parent2* and a two-point crossover on their integer parts. The crossover will
3459 only happen with a probability *p_cr*. If that is the case, each continuous component of the chromosomes
3460 will be crossovered with a probability of 0.5.
3461
3462 Args:
3463 parent1 (array-like object): a first chromosome
3464 parent2 (array-like object): a second chromosome
3465 bounds (2-D array-like object): problem bounds
3466 nix (:class:`int`): the integer dimension of the chromosome
3467 p_cr (:class:`float`): crossover probability
3468 eta_c (:class:`float`): crossover distribution index
3469 seed (:class:`int`): seed used by the internal random number generator
3470
3471 Returns:
3472 :class:`tuple`: of :class:`numpy.ndarray`: containing the two crossovered chromosomes
3473
3474 Raises:
3475 ValueError: if *bounds* *parent1* *parent2* are not of equal length, if lower bounds are not less
3476 or equal to the upper bounds, if the *nix* is larger than the parent size or if infinite values are
3477 detected in *bounds*, *p_cr* or *eta_c*
3478 unspecified: any exception thrown by failiures at the intersection between C++ and Python (e.g.,
3479 type conversion errors, mismatched function signatures, etc.)
3480
3481 See also the docs of the C++ class :cpp:class:`pagmo::sbx_crossover`.
3482
3483 )";
3484 }
3485
polynomial_mutation_docstring()3486 std::string polynomial_mutation_docstring()
3487 {
3488 return R"(polynomial_mutation(dv, bounds, nix, p_m, eta_m, seed)
3489
3490 This function will perform a polynomial mutation over the continuous part of the chromosme *dv*
3491 and a uniform mutation on the remaining integer part.
3492
3493 Args:
3494 dv (array-like object):the chromosome
3495 bounds (2-D array-like object): problem bounds
3496 nix (:class:`int`): the integer dimension of the chromosome
3497 p_m (:class:`float`): mutation probability
3498 eta_m (:class:`float`): mutation distribution index
3499 seed (:class:`int`): seed used by the internal random number generator
3500
3501 Returns:
3502 :class:`tuple`: of :class:`numpy.ndarray`: containing the two crossovered chromosomes
3503
3504 Raises:
3505 ValueError: if *bounds* and *dv* are not of equal length, if lower bounds are not less
3506 or equal to the upper bounds, if the *nix* is larger than the parent size or if infinite values are
3507 detected in *bounds*, *p_m* or *eta_m*
3508 unspecified: any exception thrown by failiures at the intersection between C++ and Python (e.g.,
3509 type conversion errors, mismatched function signatures, etc.)
3510
3511 See also the docs of the C++ class :cpp:class:`pagmo::polynomial_mutation`.
3512
3513 )";
3514 }
3515
decompose_docstring()3516 std::string decompose_docstring()
3517 {
3518 return R"(The decompose meta-problem.
3519
3520 This meta-problem *decomposes* a multi-objective input user-defined problem,
3521 resulting in a single-objective user-defined problem with a fitness function combining the
3522 original fitness functions. In particular, three different *decomposition methods* are here
3523 made available:
3524
3525 * weighted decomposition,
3526 * Tchebycheff decomposition,
3527 * boundary interception method (with penalty constraint).
3528
3529 In the case of :math:`n` objectives, we indicate with: :math:`\mathbf f(\mathbf x) = [f_1(\mathbf x), \ldots, f_n(\mathbf
3530 x)]` the vector containing the original multiple objectives, with: :math:`\boldsymbol \lambda = (\lambda_1, \ldots,
3531 \lambda_n)` an :math:`n`-dimensional weight vector and with: :math:`\mathbf z^* = (z^*_1, \ldots, z^*_n)`
3532 an :math:`n`-dimensional reference point. We also ussume :math:`\lambda_i > 0, \forall i=1..n` and :math:`\sum_i \lambda_i =
3533 1`.
3534
3535 The decomposed problem is thus a single objective optimization problem having the following single objective,
3536 according to the decomposition method chosen:
3537
3538 * weighted decomposition: :math:`f_d(\mathbf x) = \boldsymbol \lambda \cdot \mathbf f`,
3539 * Tchebycheff decomposition: :math:`f_d(\mathbf x) = \max_{1 \leq i \leq m} \lambda_i \vert f_i(\mathbf x) - z^*_i \vert`,
3540 * boundary interception method (with penalty constraint): :math:`f_d(\mathbf x) = d_1 + \theta d_2`,
3541
3542
3543 where :math:`d_1 = (\mathbf f - \mathbf z^*) \cdot \hat {\mathbf i}_{\lambda}`,
3544 :math:`d_2 = \vert (\mathbf f - \mathbf z^*) - d_1 \hat {\mathbf i}_{\lambda})\vert` and
3545 :math:`\hat {\mathbf i}_{\lambda} = \frac{\boldsymbol \lambda}{\vert \boldsymbol \lambda \vert}`.
3546
3547 .. note:
3548
3549 The reference point :math:`z^*` is often taken as the ideal point and as such
3550 it may be allowed to change during the course of the optimization / evolution. The argument adapt_ideal activates
3551 this behaviour so that whenever a new ideal point is found :math:`z^*` is adapted accordingly.
3552
3553 .. note:
3554
3555 The use of :class:`~pygmo.decompose` discards gradients and hessians so that if the original user defined problem
3556 implements them, they will not be available in the decomposed problem. The reason for this behaviour is that
3557 the Tchebycheff decomposition is not differentiable. Also, the use of this class was originally intended for
3558 derivative-free optimization.
3559
3560 See: "Q. Zhang -- MOEA/D: A Multiobjective Evolutionary Algorithm Based on Decomposition"
3561
3562 See: https://en.wikipedia.org/wiki/Multi-objective_optimization#Scalarizing
3563 )";
3564 }
3565
decompose_original_fitness_docstring()3566 std::string decompose_original_fitness_docstring()
3567 {
3568 return R"(original_fitness(x)
3569
3570 Fitness of the original problem.
3571
3572 Returns the fitness of the original multi-objective problem used to construct the decomposed problem.
3573
3574 Args:
3575 x (array-like object): input decision vector
3576
3577 Returns:
3578 1D NumPy float array: the fitness of the original multi-objective problem
3579
3580 Raises:
3581 unspecified: any exception thrown by the original fitness computation, or by failures at the
3582 intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
3583
3584 )";
3585 }
3586
decompose_z_docstring()3587 std::string decompose_z_docstring()
3588 {
3589 return R"(Current reference point.
3590
3591 This read-only property contains the reference point to be used for the decomposition. This is only
3592 used for Tchebycheff and boundary interception decomposition methods.
3593
3594 .. note:
3595
3596 The reference point is adapted at each call of the fitness.
3597
3598 Returns:
3599 1D NumPy float array: the reference point
3600
3601 Raises:
3602 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
3603 type conversion errors, mismatched function signatures, etc.)
3604
3605 )";
3606 }
3607
unconstrain_docstring()3608 std::string unconstrain_docstring()
3609 {
3610 return R"(The unconstrain meta-problem.
3611
3612 This meta-problem transforms a constrained problem into an unconstrained problem applying one of the following methods:
3613
3614 * Death penalty: simply penalizes all objectives by the same high value if the fitness vector is infeasible.
3615 * Kuri's death penalty: defined by Angel Kuri Morales et al., penalizes all objectives according to the rate of satisfied constraints.
3616 * Weighted violations penalty: penalizes all objectives by the weighted sum of the constraint violations.
3617 * Ignore the constraints: simply ignores the constraints.
3618 * Ignore the objectives: ignores the objectives and defines as a new single objective the overall constraints violation (i.e. the sum of the L2 norms of the equalities and inequalities violations)
3619
3620 .. note:
3621
3622 The use of :class:`~pygmo.unconstrain` discards gradients and hessians so that if the original user defined problem
3623 implements them, they will not be available in the unconstrained problem. The reason for this behaviour is that,
3624 in general, the methods implemented may not be differentiable. Also, the use of this class was originally intended for
3625 derivative-free optimization.
3626
3627 See: Coello Coello, C. A. (2002). Theoretical and numerical constraint-handling techniques used with evolutionary algorithms:
3628 a survey of the state of the art. Computer methods in applied mechanics and engineering, 191(11), 1245-1287.
3629
3630 See: Kuri Morales, A. and Quezada, C.C. A Universal eclectic genetic algorithm for constrained optimization,
3631 Proceedings 6th European Congress on Intelligent Techniques & Soft Computing, EUFIT'98, 518-522, 1998.
3632
3633 )";
3634 }
3635
fast_non_dominated_sorting_docstring()3636 std::string fast_non_dominated_sorting_docstring()
3637 {
3638 return R"(fast_non_dominated_sorting(points)
3639
3640 Runs the fast non dominated sorting algorithm on the input *points*
3641
3642 Args:
3643 points (2d-array-like object): the input points
3644
3645 Raises:
3646 ValueError: if *points* is malformed
3647 TypeError: if *points* cannot be converted to a vector of vector floats
3648
3649 Returns:
3650 :class:`tuple`: (*ndf*, *dl*, *dc*, *ndr*), where:
3651
3652 * *ndf* (:class:`list` of 1D NumPy int array): the non dominated fronts
3653 * *dl* (:class:`list` of 1D NumPy int array): the domination list
3654 * *dc* (1D NumPy int array): the domination count
3655 * *ndr* (1D NumPy int array): the non domination ranks
3656
3657 Examples:
3658 >>> import pygmo as pg
3659 >>> ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(points = [[0,1],[-1,3],[2.3,-0.2],[1.1,-0.12],[1.1, 2.12],[-1.1,-1.1]])
3660 )";
3661 }
3662
pareto_dominance_docstring()3663 std::string pareto_dominance_docstring()
3664 {
3665 return R"(pareto_dominance(obj1, obj2)
3666
3667 Returns ``True`` if *obj1* Pareto dominates *obj2*, ``False`` otherwise. Minimization
3668 is assumed.
3669
3670 Each pair of corresponding elements in *obj1* and *obj2* is compared: if all
3671 elements in *obj1* are less or equal to the corresponding element in *obj2*,
3672 but at least one is different, ``True`` will be returned. Otherwise, ``False`` will be returned.
3673
3674 Args:
3675 obj1 (array-like object): the first list of objectives
3676 obj2 (array-like object): the second list of objectives
3677
3678 Raises:
3679 ValueError: if the dimensions of *obj1* and *obj2* are different
3680 TypeError: if *obj1* or *obj2* cannot be converted to a vector of vector floats
3681
3682 Returns:
3683 bool: :data:`True` if *obj1* is dominating *obj2*, :data:`False` otherwise.
3684
3685 Examples:
3686 >>> import pygmo as pg
3687 >>> pg.pareto_dominance(obj1 = [1,2], obj2 = [2,2])
3688 True
3689
3690 )";
3691 }
3692
non_dominated_front_2d_docstring()3693 std::string non_dominated_front_2d_docstring()
3694 {
3695 return R"(non_dominated_front_2d(points)
3696
3697 Finds the non dominated front of a set of two dimensional objectives. Complexity is :math:`\mathcal{O}(N \log N)`
3698 and is thus lower than the complexity of calling :func:`~pygmo.fast_non_dominated_sorting()`
3699
3700 See: Jensen, Mikkel T. "Reducing the run-time complexity of multiobjective EAs: The NSGA-II and other algorithms."
3701 IEEE Transactions on Evolutionary Computation 7.5 (2003): 503-515.
3702
3703 Args:
3704 points (2d-array-like object): the input points
3705
3706 Raises:
3707 ValueError: if *points* contain anything else than 2 dimensional objectives
3708 TypeError: if *points* cannot be converted to a vector of vector floats
3709
3710 Returns:
3711 1D NumPy int array: the non dominated fronts
3712
3713 Examples:
3714 >>> import pygmo as pg
3715 >>> pg.non_dominated_front_2d(points = [[0,5],[1,4],[2,3],[3,2],[4,1],[2,2]])
3716 array([0, 1, 5, 4], dtype=uint64)
3717 )";
3718 }
3719
crowding_distance_docstring()3720 std::string crowding_distance_docstring()
3721 {
3722 return R"(crowding_distance(points)
3723
3724 An implementation of the crowding distance. Complexity is :math:`O(M N \log N)` where :math:`M` is the number of
3725 objectives and :math:`N` is the number of individuals. The function assumes *points* contain a non-dominated front.
3726 Failiure to meet this condition will result in undefined behaviour.
3727
3728 See: Deb, Kalyanmoy, et al. "A fast elitist non-dominated sorting genetic algorithm for multi-objective
3729 optimization: NSGA-II." Parallel problem solving from nature PPSN VI. Springer Berlin Heidelberg, 2000.
3730
3731 Args:
3732 points (2d-array-like object): the input points
3733
3734 Raises:
3735 ValueError: if *points* does not contain at least two points, or is malformed
3736 TypeError: if *points* cannot be converted to a vector of vector floats
3737
3738 Returns:
3739 1D NumPy float array: the crowding distances
3740
3741 Examples:
3742 >>> import pygmo as pg
3743 >>> pg.crowding_distance(points = [[0,5],[1,4],[2,3],[3,2],[4,1]])
3744 array([inf, 1., 1., 1., inf])
3745 )";
3746 }
3747
sort_population_mo_docstring()3748 std::string sort_population_mo_docstring()
3749 {
3750 return R"(sort_population_mo(points)
3751
3752 Sorts a multi-objective, unconstrained, population (intended here as a 2D array-like
3753 containing objective vectors) with respect to the following strict ordering:
3754
3755 * :math:`f_1 \prec f_2` if the non domination ranks are such that :math:`i_1 < i_2`. In case :math:`i_1 = i_2`,
3756 then :math:`f_1 \prec f_2` if the crowding distances are such that :math:`d_1 > d_2`.
3757
3758 Complexity is :math:`\mathcal{O}(M N^2)` where :math:`M` is the size of the objective vector and :math:`N` is the number of individuals.
3759
3760 .. note::
3761
3762 This function will also work for single objective optimization, i.e. with objective vector
3763 of size 1, in which case, though, it is more efficient to sort using python built-in sorting methods.
3764
3765 Args:
3766 points (2d-array-like object): the input objective vectors
3767
3768 Raises:
3769 unspecified: all exceptions thrown by :func:`pygmo.fast_non_dominated_sorting()` and :func:`pygmo.crowding_distance()`
3770 TypeError: if *points* cannot be converted to a vector of vector floats
3771
3772 Returns:
3773 1D NumPy int array: the indexes of the sorted objectives vectors.
3774
3775 Examples:
3776 >>> import pygmo as pg
3777 >>> pop = pg.population(prob = pg.dtlz(prob_id = 3, dim=10, fdim=4), size = 20)
3778 >>> pg.sort_population_mo(points = pop.get_f()) # doctest: +SKIP
3779 array([ 4, 7, 14, 15, 16, 18, 9, 13, 5, 3, 6, 2, 12, 0, 1, 19, 17, 8, 10, 11])
3780 )";
3781 }
3782
select_best_N_mo_docstring()3783 std::string select_best_N_mo_docstring()
3784 {
3785 return R"(select_best_N_mo(points, N)
3786
3787 Returns (unordered) the best N individuals out of a multi-objective, unconstrained population, (intended here
3788 as a 2D array-like containing objective vectors). The strict ordering used is the same as that defined
3789 in :func:`~pygmo.sort_population_mo()`
3790
3791 Complexity is :math:`\mathcal{O}(M N^2)` where :math:`M` is the number of objectives and :math:`N` is the number of individuals.
3792
3793 While the complexity is the same as that of :func:`~pygmo.sort_population_mo()`, this function is to be preferred when
3794 possible in that it avoids to compute the crowidng distance for all individuals and only computes it for the last
3795 non-dominated front containing individuals included in the best N.
3796
3797 If N is zero, an empty array will be returned.
3798
3799 Args:
3800 points (2d-array-like object): the input objective vectors
3801 N (:class:`int`): The size of the returned list of bests.
3802
3803 Raises:
3804 unspecified: all exceptions thrown by :func:`pygmo.fast_non_dominated_sorting()` and :func:`pygmo.crowding_distance()`
3805 TypeError: if *points* cannot be converted to a vector of vector floats
3806
3807 Returns:
3808 1D NumPy int array: the indexes of the *N* best objectives vectors.
3809
3810 Examples:
3811 >>> import pygmo as pg
3812 >>> pop = pg.population(prob = pg.dtlz(prob_id = 3, dim=10, fdim=4), size = 20)
3813 >>> pg.select_best_N_mo(points = pop.get_f(), N = 13) # doctest: +SKIP
3814 array([ 2, 3, 4, 5, 6, 7, 9, 12, 13, 14, 15, 16, 18])
3815 )";
3816 }
3817
decompose_objectives_docstring()3818 std::string decompose_objectives_docstring()
3819 {
3820 return R"(decompose_objectives(objs, weights, ref_point, method)
3821
3822 Decomposes a vector of objectives.
3823
3824 A vector of objectives is reduced to one only objective using a decomposition technique.
3825
3826 Three different possibilities for *method* are here made available:
3827
3828 - weighted decomposition,
3829 - Tchebycheff decomposition,
3830 - boundary interception method (with penalty constraint).
3831
3832 In the case of :math:`n` objectives, we indicate with: :math:`\mathbf f(\mathbf x) = [f_1(\mathbf x), \ldots,
3833 f_n(\mathbf x)]` the vector containing the original multiple objectives, with: :math:`\boldsymbol \lambda =
3834 (\lambda_1, \ldots, \lambda_n)` an :math:`n`-dimensional weight vector and with: :math:`\mathbf z^* = (z^*_1, \ldots,
3835 z^*_n)` an :math:`n`-dimensional reference point. We also ussume :math:`\lambda_i > 0, \forall i=1..n` and :math:`\sum_i
3836 \lambda_i = 1`.
3837
3838 The resulting single objective is thus defined as:
3839
3840 - weighted decomposition: :math:`f_d(\mathbf x) = \boldsymbol \lambda \cdot \mathbf f`
3841 - Tchebycheff decomposition: :math:`f_d(\mathbf x) = \max_{1 \leq i \leq m} \lambda_i \vert f_i(\mathbf x) - z^*_i \vert`
3842 - boundary interception method (with penalty constraint): :math:`f_d(\mathbf x) = d_1 + \theta d_2`
3843
3844 where :math:`d_1 = (\mathbf f - \mathbf z^*) \cdot \hat {\mathbf i}_{\lambda}` ,
3845 :math:`d_2 = \vert (\mathbf f - \mathbf z^*) - d_1 \hat {\mathbf i}_{\lambda})\vert` , and
3846 :math:`\hat {\mathbf i}_{\lambda} = \frac{\boldsymbol \lambda}{\vert \boldsymbol \lambda \vert}`
3847
3848 Note that while `ref_point` is required, it does not impact the calculation for the `weighted` method as shown above.
3849
3850 Args:
3851 objs (array-like object): the objective vectors
3852 weights (array-like object): the weights :math:`\boldsymbol \lambda`
3853 ref_point (array-like object): the reference point :math:`\mathbf z^*` . It is not used if *method* is ``"weighted"``
3854 method (:class:`str`): the decomposition method: one of ``"weighted"``, ``"tchebycheff"`` or ``"bi"``
3855
3856 Raises:
3857 ValueError: if *objs*, *weight* and *ref_point* have different sizes or if *method* is not one of ``"weighted"``, ``"tchebycheff"`` or ``"bi"``.
3858 TypeError: if *weights* or *ref_point* or *objs* cannot be converted to a vector of floats.
3859
3860 Returns:
3861 1D NumPy float array: a one dimensional array containing the decomposed objective.
3862
3863 Examples:
3864 >>> import pygmo as pg
3865 >>> pg.decompose_objectives(objs = [1,2,3], weights = [0.1,0.1,0.8], ref_point=[5,5,5], method = "weighted") # doctest: +SKIP
3866 array([ 2.7])
3867 >>> pg.decompose_objectives(objs = [1,2,3], weights = [0.1,0.1,0.8], ref_point=[0,0,0], method = "weighted") # doctest: +SKIP
3868 array([ 2.7])
3869 >>> pg.decompose_objectives(objs = [1,2,3], weights = [0.1,0.1,0.8], ref_point=[5,5,5], method = "tchebycheff") # doctest: +SKIP
3870 array([ 1.6])
3871 )";
3872 }
3873
decomposition_weights_docstring()3874 std::string decomposition_weights_docstring()
3875 {
3876 return R"(decomposition_weights(n_f, n_w, method, seed)
3877
3878 Generates the requested number of weight vectors to be used to decompose a multi-objective problem. Three methods are available:
3879
3880 - ``"grid"`` generates weights on an uniform grid. This method may only be used when the number of requested weights to be genrated is such that a uniform grid is indeed possible.
3881 In two dimensions this is always the case, but in larger dimensions uniform grids are possible only in special cases
3882 - ``"random"`` generates weights randomly distributing them uniformly on the simplex (weights are such that :math:`\sum_i \lambda_i = 1`)
3883 - ``"low discrepancy"`` generates weights using a low-discrepancy sequence to, eventually, obtain a better coverage of the Pareto front. Halton sequence is used since
3884 low dimensionalities are expected in the number of objectives (i.e. less than 20), hence Halton sequence is deemed as appropriate.
3885
3886 .. note::
3887 All methods are guaranteed to generate weights on the simplex (:math:`\sum_i \lambda_i = 1`). All weight generation methods are guaranteed
3888 to generate the canonical weights [1,0,0,...], [0,1,0,..], ... first.
3889
3890 Args:
3891 n_f (:class:`int`): number of the objective vectors
3892 n_w (:class:`int`): number of the weights :math:`\boldsymbol \lambda`
3893 method (:class:`str`): the weight generation method: one of ``"grid"``, ``"random"``, or ``"low discrepancy"``
3894 seed (:class:`int`): seed used by the internal random number generator
3895
3896 Raises:
3897 OverflowError: if *n_f*, *n_w* or *seed* are negative or greater than an implementation-defined value
3898 ValueError: if *n_f* and *n_w* are not compatible with the selected weight generation method or if *method* is not
3899 one of ``"grid"``, ``"random"`` or ``"low discrepancy"``
3900
3901
3902 Returns:
3903 2D NumPy float array: the generated weights
3904
3905 Examples:
3906 >>> import pygmo as pg
3907 >>> pg.decomposition_weights(n_f = 2, n_w = 6, method = "low discrepancy", seed = 33) # doctest: +SKIP
3908 array([[ 1. , 0. ],
3909 [ 0. , 1. ],
3910 [ 0.25 , 0.75 ],
3911 [ 0.75 , 0.25 ],
3912 [ 0.125, 0.875],
3913 [ 0.625, 0.375]])
3914 )";
3915 }
3916
nadir_docstring()3917 std::string nadir_docstring()
3918 {
3919 return R"(nadir(points)
3920
3921 Computes the nadir point of a set of points, i.e objective vectors. The nadir is that point that has the maximum
3922 value of the objective function in the points of the non-dominated front.
3923
3924 Complexity is :math:`\mathcal{O}(MN^2)` where :math:`M` is the number of objectives and :math:`N` is the number of points.
3925
3926 Args:
3927 points (2d-array-like object): the input points
3928
3929 Raises:
3930 ValueError: if *points* is malformed
3931 TypeError: if *points* cannot be converted to a vector of vector floats
3932
3933 Returns:
3934 1D NumPy float array: the nadir point
3935
3936 )";
3937 }
3938
ideal_docstring()3939 std::string ideal_docstring()
3940 {
3941 return R"(ideal(points)
3942
3943 Computes the ideal point of a set of points, i.e objective vectors. The ideal point is that point that has, in each
3944 component, the minimum value of the objective functions of the input points.
3945
3946 Complexity is :math:`\mathcal{O}(MN)` where :math:`M` is the number of objectives and :math:`N` is the number of points.
3947
3948 Args:
3949 points (2d-array-like object): the input points
3950
3951 Raises:
3952 ValueError: if *points* is malformed
3953 TypeError: if *points* cannot be converted to a vector of vector floats
3954
3955 Returns:
3956 1D NumPy float array: the ideal point
3957
3958 )";
3959 }
3960
compare_fc_docstring()3961 std::string compare_fc_docstring()
3962 {
3963 return R"(compare_fc(f1, f2, nec, tol)
3964
3965 Compares two fitness vectors in a single-objective, constrained, case.
3966
3967 The following strict ordering is used:
3968
3969 - :math:`f_1 \prec f_2` if :math:`f_1` is feasible and :math:`f_2` is not.
3970 - :math:`f_1 \prec f_2` if :math:`f_1` is they are both infeasible, but :math:`f_1`
3971 violates fewer constraints than :math:`f_2`, or in case they both violate the same
3972 number of constraints, if the :math:`L_2` norm of the overall constraint violation
3973 is smaller.
3974 - :math:`f_1 \prec f_2` if both fitness vectors are feasible and the objective value
3975 in :math:`f_1` is smaller than the objectve value in :math:`f_2`
3976
3977 .. note::
3978 The fitness vectors are assumed to contain exactly one objective, *nec* equality constraints
3979 and the rest (if any) inequality constraints.
3980
3981 Args:
3982 f1 (array-like object): the first fitness vector
3983 f2 (array-like object): the second fitness vector
3984 nec (:class:`int`): the number of equality consraints in the fitness vectors
3985 tol (array-like object): tolerances to be accounted for in the constraints
3986
3987 Raises:
3988 OverflowError: if *nec* is negative or greater than an implementation-defined value
3989 ValueError: if *f1* and *f2* do not have equal size :math:`n`, if *f1* does not have at least size 1,
3990 if *nec* is larger than :math:`n-1` (too many constraints) or if the size of *tol* is not :math:`n - 1`
3991 TypeError: if *f1*, *f2* or *tol* cannot be converted to a vector of floats
3992
3993 Returns:
3994 ``bool``: ``True`` if *f1* is better than *f2*, ``False`` otherwise.
3995
3996 Examples:
3997 >>> import pygmo as pg
3998 >>> pg.compare_fc(f1 = [1,1,1], f2 = [1,2.1,-1.2], nec = 1, tol = [0]*2)
3999 False
4000
4001 )";
4002 }
4003
sort_population_con_docstring()4004 std::string sort_population_con_docstring()
4005 {
4006 return R"(sort_population_con(input_f, nec, tol)
4007
4008 Sorts a population (intended here as a 2D array-like
4009 containing fitness vectors) assuming a single-objective, constrained case.
4010
4011 The following strict ordering is used (same as the one used in :func:`pygmo.compare_fc()`):
4012
4013 - :math:`f_1 \prec f_2` if :math:`f_1` is feasible and :math:`f_2` is not.
4014 - :math:`f_1 \prec f_2` if :math:`f_1` is they are both infeasible, but :math:`f_1`
4015 violates fewer constraints than :math:`f_2`, or in case they both violate the same
4016 number of constraints, if the :math:`L_2` norm of the overall constraint violation
4017 is smaller.
4018 - :math:`f_1 \prec f_2` if both fitness vectors are feasible and the objective value
4019 in :math:`f_1` is smaller than the objectve value in :math:`f_2`
4020
4021 .. note::
4022 The fitness vectors are assumed to contain exactly one objective, *nec* equality constraints
4023 and the rest (if any) inequality constraints.
4024
4025 Args:
4026 input_f (2-D array-like object): the fitness vectors
4027 nec (:class:`int`): the number of equality constraints in the fitness vectors
4028 tol (array-like object): tolerances to be accounted for in the constraints
4029
4030 Raises:
4031 OverflowError: if *nec* is negative or greater than an implementation-defined value
4032 ValueError: if the input fitness vectors do not have all the same size :math:`n >=1`, or if *nec* is larger than :math:`n-1` (too many constraints)
4033 or if the size of *tol* is not equal to :math:`n-1`
4034 TypeError: if *input_f* cannot be converted to a vector of vector of floats or *tol* cannot be converted to a vector of floats.
4035
4036 Returns:
4037 :class:`list` of 1D NumPy int array: the indexes of the sorted fitnesses vectors.
4038
4039 Examples:
4040 >>> import pygmo as pg
4041 >>> idxs = pg.sort_population_con(input_f = [[1.2,0.1,-1],[0.2,1.1,1.1],[2,-0.5,-2]], nec = 1, tol = [1e-8]*2)
4042 >>> print(idxs)
4043 [0 2 1]
4044
4045 )";
4046 }
4047
estimate_sparsity_docstring()4048 std::string estimate_sparsity_docstring()
4049 {
4050 return R"(estimate_sparsity(callable, x, dx = 1e-8)
4051
4052 Performs a numerical estimation of the sparsity pattern of same callable object by numerically
4053 computing it around the input point *x* and detecting the components that are changed.
4054
4055 The *callable* must accept an iterable as input and return an array-like object
4056
4057 Note that estimate_sparsity may fail to detect the real sparsity as it only considers one variation around the input
4058 point. It is of use, though, in tests or cases where its not possible to write the sparsity or where the user is
4059 confident the estimate will be correct.
4060
4061 Args:
4062 callable (a callable object): The function we want to estimate sparsity (typically a fitness).
4063 x (array-like object): decision vector to use when testing for sparisty.
4064 dx (:class:`float`): To detect the sparsity each component of *x* will be changed by :math:`\max(|x_i|,1) dx`.
4065
4066 Raises:
4067 unspecified: any exception thrown by the *callable* object when called on *x*.
4068 TypeError: if *x* cannot be converted to a vector of floats or *callable* is not callable.
4069
4070 Returns:
4071 2D NumPy float array: the sparsity_pattern of *callable* detected around *x*
4072
4073 Examples:
4074 >>> import pygmo as pg
4075 >>> def my_fun(x):
4076 ... return [x[0]+x[3], x[2], x[1]]
4077 >>> pg.estimate_sparsity(callable = my_fun, x = [0.1,0.1,0.1,0.1], dx = 1e-8)
4078 array([[0, 0],
4079 [0, 3],
4080 [1, 2],
4081 [2, 1]], dtype=uint64)
4082
4083 )";
4084 }
4085
estimate_gradient_docstring()4086 std::string estimate_gradient_docstring()
4087 {
4088 return R"(estimate_gradient(callable, x, dx = 1e-8)
4089
4090 Performs a numerical estimation of the sparsity pattern of same callable object by numerically
4091 computing it around the input point *x* and detecting the components that are changed.
4092
4093 The *callable* must accept an iterable as input and return an array-like object. The gradient returned will be dense
4094 and contain, in the lexicographic order requested by :func:`~pygmo.problem.gradient()`, :math:`\frac{df_i}{dx_j}`.
4095
4096 The numerical approximation of each derivative is made by central difference, according to the formula:
4097
4098 .. math::
4099 \frac{df}{dx} \approx \frac{f(x+dx) - f(x-dx)}{2dx} + O(dx^2)
4100
4101 The overall cost, in terms of calls to *callable* will thus be :math:`n` where :math:`n` is the size of *x*.
4102
4103 Args:
4104 callable (a callable object): The function we want to estimate sparsity (typically a fitness).
4105 x (array-like object): decision vector to use when testing for sparisty.
4106 dx (:class:`float`): To detect the sparsity each component of *x* will be changed by :math:`\max(|x_i|,1) dx`.
4107
4108 Raises:
4109 unspecified: any exception thrown by the *callable* object when called on *x*.
4110 TypeError: if *x* cannot be converted to a vector of floats or *callable* is not callable.
4111
4112 Returns:
4113 2D NumPy float array: the dense gradient of *callable* detected around *x*
4114
4115 Examples:
4116 >>> import pygmo as pg
4117 >>> def my_fun(x):
4118 ... return [x[0]+x[3], x[2], x[1]]
4119 >>> pg.estimate_gradient(callable = my_fun, x = [0]*4, dx = 1e-8) # doctest: +NORMALIZE_WHITESPACE
4120 array([1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.])
4121 )";
4122 }
4123
estimate_gradient_h_docstring()4124 std::string estimate_gradient_h_docstring()
4125 {
4126 return R"(estimate_gradient_h(callable, x, dx = 1e-8)
4127
4128 Performs a numerical estimation of the sparsity pattern of same callable object by numerically
4129 computing it around the input point *x* and detecting the components that are changed.
4130
4131 The *callable* must accept an iterable as input and return an array-like object. The gradient returned will be dense
4132 and contain, in the lexicographic order requested by :func:`~pygmo.problem.gradient`, :math:`\frac{df_i}{dx_j}`.
4133
4134 The numerical approximation of each derivative is made by central difference, according to the formula:
4135
4136 .. math::
4137 \frac{df}{dx} \approx \frac 32 m_1 - \frac 35 m_2 +\frac 1{10} m_3 + O(dx^6)
4138
4139 where:
4140
4141 .. math::
4142 m_i = \frac{f(x + i dx) - f(x-i dx)}{2i dx}
4143
4144 The overall cost, in terms of calls to *callable* will thus be 6:math:`n` where :math:`n` is the size of *x*.
4145
4146 Args:
4147 callable (a callable object): The function we want to estimate sparsity (typically a fitness).
4148 x (array-like object): decision vector to use when testing for sparisty.
4149 dx (:class:`float`): To detect the sparsity each component of *x* will be changed by :math:`\max(|x_i|,1) dx`.
4150
4151 Raises:
4152 unspecified: any exception thrown by the *callable* object when called on *x*.
4153 TypeError: if *x* cannot be converted to a vector of floats or *callable* is not callable.
4154
4155 Returns:
4156 2D NumPy float array: the dense gradient of *callable* detected around *x*
4157
4158 Examples:
4159 >>> import pygmo as pg
4160 >>> def my_fun(x):
4161 ... return [x[0]+x[3], x[2], x[1]]
4162 >>> pg.estimate_gradient_h(callable = my_fun, x = [0]*4, dx = 1e-2) # doctest: +NORMALIZE_WHITESPACE
4163 array([1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.])
4164 )";
4165 }
4166
set_global_rng_seed_docstring()4167 std::string set_global_rng_seed_docstring()
4168 {
4169 return R"(set_global_rng_seed(seed)
4170
4171 In pygmo it is, in general, possible to control the seed of all random generators by a dedicated *seed* kwarg passed on via various
4172 constructors. If no *seed* is passed pygmo randomly creates a seed for you using its global random number generator.
4173
4174 This function allows to be able to reset the seed of such a global random number generator. This can be useful to create a deterministic behaviour of pygmo easily.
4175
4176 Args:
4177 seed (:class:`int`): the new global seed for random number generation
4178
4179 .. note::
4180 In complex parallel evolutions obtaining a deterministic behaviour is not possible even setting the global seed as
4181 pygmo implements an asynchronous model for parallel execution and the exact interplay between threads and processes cannot
4182 be reproduced deterministically.
4183
4184 Examples:
4185 >>> import pygmo as pg
4186 >>> pg.set_global_rng_seed(seed = 32)
4187 >>> pop = pg.population(prob = pg.ackley(5), size = 20)
4188 >>> print(pop.champion_f) # doctest: +SKIP
4189 [17.26891503]
4190 >>> pg.set_global_rng_seed(seed = 32)
4191 >>> pop = pg.population(prob = pg.ackley(5), size = 20)
4192 >>> print(pop.champion_f) # doctest: +SKIP
4193 [17.26891503]
4194 )";
4195 }
4196
hvwfg_docstring()4197 std::string hvwfg_docstring()
4198 {
4199 return R"(__init__(stop_dimension = 2)
4200
4201 The hypervolume algorithm from the Walking Fish Group (2011 version).
4202
4203 This object can be passed as parameter to the various methods of the
4204 class :class:`~pygmo.hypervolume` as it derives from the hidden base
4205 class :class:`~pygmo._hv_algorithm`
4206
4207 Args:
4208 stop_dimension (:class:`int`): the input population
4209
4210 Raises:
4211 OverflowError: if *stop_dimension* is negative or greater than an implementation-defined value
4212
4213 Examples:
4214 >>> import pygmo as pg
4215 >>> hv_algo = pg.hvwfg(stop_dimension = 2)
4216
4217 See also the docs of the C++ class :cpp:class:`pagmo::hvwfg`.
4218
4219 )";
4220 }
4221
hv2d_docstring()4222 std::string hv2d_docstring()
4223 {
4224 return R"(__init__()
4225
4226 Exact hypervolume algorithm for two dimensional points.
4227
4228 This object can be passed as parameter to the various methods of the
4229 class :class:`~pygmo.hypervolume` as it derives from the hidden base
4230 class :class:`~pygmo._hv_algorithm`
4231
4232 Examples:
4233 >>> import pygmo as pg
4234 >>> hv_algo = pg.hv2d()
4235
4236 See also the docs of the C++ class :cpp:class:`pagmo::hv2d`.
4237
4238 )";
4239 }
4240
hv3d_docstring()4241 std::string hv3d_docstring()
4242 {
4243 return R"(__init__()
4244
4245 Exact hypervolume algorithm for three dimensional points.
4246
4247 This object can be passed as parameter to the various methods of the
4248 class :class:`~pygmo.hypervolume` as it derives from the hidden base
4249 class :class:`~pygmo._hv_algorithm`
4250
4251 Examples:
4252 >>> import pygmo as pg
4253 >>> hv_algo = pg.hv3d()
4254
4255 See also the docs of the C++ class :cpp:class:`pagmo::hv3d`.
4256
4257 )";
4258 }
4259
bf_approx_docstring()4260 std::string bf_approx_docstring()
4261 {
4262 return R"(__init__()
4263
4264 Bringmann-Friedrich approximation method. Implementation of the Bringmann-Friedrich approximation scheme (FPRAS),
4265 reduced to the special case of approximating the least contributor.
4266
4267 This object can be passed as parameter to the various methods of the
4268 class :class:`~pygmo.hypervolume` as it derives from the hidden base
4269 class :class:`~pygmo._hv_algorithm`
4270
4271 Examples:
4272 >>> import pygmo as pg
4273 >>> hv_algo = pg.bf_approx()
4274
4275 See also the docs of the C++ class :cpp:class:`pagmo::bf_approx`.
4276
4277 )";
4278 }
4279
bf_fpras_docstring()4280 std::string bf_fpras_docstring()
4281 {
4282 return R"(__init__(eps = 1e-2, delta = 1e-2, seed = random)
4283
4284 Bringmann-Friedrich approximation method. Implementation of the Bringmann-Friedrich approximation scheme (FPRAS),
4285 reduced to the special case of approximating the hypervolume indicator.
4286
4287 This object can be passed as parameter to the various methods of the
4288 class :class:`~pygmo.hypervolume` as it derives from the hidden base
4289 class :class:`~pygmo._hv_algorithm`
4290
4291 Examples:
4292 >>> import pygmo as pg
4293 >>> hv_algo = pg.bf_fpras(eps = 1e-2, delta = 1e-2)
4294
4295 See also the docs of the C++ class :cpp:class:`pagmo::bf_fpras`.
4296
4297 )";
4298 }
4299
hv_init1_docstring()4300 std::string hv_init1_docstring()
4301 {
4302 return R"(__init__(pop)
4303
4304 Constructor from population
4305
4306 Args:
4307 pop (:class:`~pygmo.population`): the input population
4308
4309 Raises:
4310 ValueError: if *pop* contains a single-objective or a constrained problem
4311
4312 Examples:
4313 >>> from pygmo import *
4314 >>> pop = population(prob = zdt(prob_id = 1), size = 20)
4315 >>> hv = hypervolume(pop = pop)
4316
4317 See also the docs of the C++ class :cpp:class:`pagmo::hypervolume`.
4318
4319 )";
4320 }
4321
hv_init2_docstring()4322 std::string hv_init2_docstring()
4323 {
4324 return R"(__init__(points)
4325
4326 Constructor from points
4327
4328 Args:
4329 points (2d array-like object): the points
4330
4331 Raises:
4332 ValueError: if *points* is inconsistent
4333
4334 Examples:
4335 >>> from pygmo import *
4336 >>> points = [[1,2],[0.5, 3],[0.1,3.1]]
4337 >>> hv = hypervolume(points = points)
4338
4339 See also the docs of the C++ class :cpp:class:`pagmo::hypervolume`.
4340
4341 )";
4342 }
4343
hv_compute_docstring()4344 std::string hv_compute_docstring()
4345 {
4346 return R"(hypervolume.compute(ref_point, hv_algo = auto)
4347
4348 Computes the hypervolume with the supplied algorithm. If no algorithm
4349 is supplied, then an exact hypervolume algorithm is automatically selected
4350 specific for the point dimension.
4351
4352 Args:
4353 ref_point (2d array-like object): the points
4354 hv_algo (deriving from :class:`~pygmo._hv_algorithm`): hypervolume algorithm to be used
4355
4356 Returns:
4357 :class:`float`: the computed hypervolume assuming *ref_point* as reference point
4358
4359 Raises:
4360 ValueError: if *ref_point* is not dominated by the nadir point
4361
4362 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::compute`.
4363
4364 )";
4365 }
4366
hv_contributions_docstring()4367 std::string hv_contributions_docstring()
4368 {
4369 return R"(hypervolume.contributions(ref_point, hv_algo = auto)
4370
4371 This method returns the exclusive contribution to the hypervolume of every point.
4372 According to *hv_algo* this computation can be implemented optimally (as opposed to calling
4373 for :func:`~pygmo.hypervolume.exclusive` in a loop).
4374
4375 Args:
4376 ref_point (2d array-like object): the points
4377 hv_algo (deriving from :class:`~pygmo._hv_algorithm`): hypervolume algorithm to be used
4378
4379 Returns:
4380 1D NumPy float array: the contribution of all points to the hypervolume
4381
4382 Raises:
4383 ValueError: if *ref_point* is not suitable
4384
4385 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::contributions`.
4386
4387 )";
4388 }
4389
hv_exclusive_docstring()4390 std::string hv_exclusive_docstring()
4391 {
4392 return R"(hypervolume.exclusive(idx, ref_point, hv_algo = auto)
4393
4394 Computes the exclusive contribution to the hypervolume of a particular point.
4395
4396 Args:
4397 idx (:class:`int`): index of the point
4398 ref_point (array-like object): the reference point
4399 hv_algo (deriving from :class:`~pygmo._hv_algorithm`): hypervolume algorithm to be used
4400
4401
4402 Returns:
4403 1D NumPy float array: the contribution of all points to the hypervolume
4404
4405 Raises:
4406 ValueError: if *ref_point* is not suitable or if *idx* is out of bounds
4407 OverflowError: if *idx* is negative or greater than an implementation-defined value
4408
4409 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::exclusive`.
4410
4411 )";
4412 }
4413
hv_greatest_contributor_docstring()4414 std::string hv_greatest_contributor_docstring()
4415 {
4416 return R"(hypervolume.greatest_contributor(ref_point, hv_algo = auto)
4417
4418 Computes the point contributing the most to the total hypervolume.
4419
4420 Args:
4421 ref_point (array-like object): the reference point
4422 hv_algo (deriving from :class:`~pygmo._hv_algorithm`): hypervolume algorithm to be used
4423
4424 Raises:
4425 ValueError: if *ref_point* is not suitable
4426
4427 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::greatest_contributor`.
4428
4429 )";
4430 }
4431
hv_least_contributor_docstring()4432 std::string hv_least_contributor_docstring()
4433 {
4434 return R"(hypervolume.least_contributor(ref_point, hv_algo = auto)
4435
4436 Computes the point contributing the least to the total hypervolume.
4437
4438 Args:
4439 ref_point (array-like object): the reference point
4440 hv_algo (deriving from :class:`~pygmo._hv_algorithm`): hypervolume algorithm to be used
4441
4442 Raises:
4443 ValueError: if *ref_point* is not suitable
4444
4445 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::least_contributor`.
4446
4447 )";
4448 }
4449
hv_refpoint_docstring()4450 std::string hv_refpoint_docstring()
4451 {
4452 return R"(hypervolume.refpoint(offset = 0)
4453
4454 Calculates a mock refpoint by taking the maximum in each dimension over all points saved in the hypervolume object.
4455 The result is a point that is necessarily dominated by all other points, and thus can be used for hypervolume computations.
4456
4457 .. note:
4458
4459 This point is different from the one computed by :func:`~pygmo.nadir()` as only the non dominated front is considered
4460 in that method (also its complexity is thus higher)
4461
4462 Args:
4463 offset (:class:`float`): the reference point
4464
4465 Returns:
4466 1D NumPy float array: the reference point
4467
4468 See also the docs of the C++ class :cpp:func:`pagmo::hypervolume::refpoint`.
4469
4470 )";
4471 }
4472
island_docstring()4473 std::string island_docstring()
4474 {
4475 return R"(Island class.
4476
4477 In the pygmo jargon, an island is a class that encapsulates the following entities:
4478
4479 * a user-defined island (**UDI**),
4480 * an :class:`~pygmo.algorithm`,
4481 * a :class:`~pygmo.population`,
4482 * a replacement policy (of type :class:`~pygmo.r_policy`),
4483 * a selection policy (of type :class:`~pygmo.s_policy`).
4484
4485 Through the UDI, the island class manages the asynchronous evolution (or optimisation)
4486 of its :class:`~pygmo.population` via the algorithm's :func:`~pygmo.algorithm.evolve()`
4487 method. Depending on the UDI, the evolution might take place in a separate thread (e.g., if the UDI is a
4488 :class:`~pygmo.thread_island`), in a separate process (e.g., if the UDI is a
4489 :class:`~pygmo.mp_island`) or even in a separate machine (e.g., if the UDI is a
4490 :class:`~pygmo.ipyparallel_island`). The evolution is always asynchronous (i.e., running in the
4491 "background") and it is initiated by a call to the :func:`~pygmo.island.evolve()` method. At any
4492 time the user can query the state of the island and fetch its internal data members. The user can explicitly
4493 wait for pending evolutions to conclude by calling the :func:`~pygmo.island.wait()` and
4494 :func:`~pygmo.island.wait_check()` methods. The status of ongoing evolutions in the island can be queried via
4495 the :attr:`~pygmo.island.status` attribute.
4496
4497 The replacement and selection policies are used when the island is part of an :class:`~pygmo.archipelago`.
4498 They establish how individuals are selected and replaced from the island when migration across islands occurs within
4499 the :class:`~pygmo.archipelago`. If the island is not part of an :class:`~pygmo.archipelago`,
4500 the replacement and selection policies play no role.
4501
4502 Typically, pygmo users will employ an already-available UDI in conjunction with this class (see :ref:`here <available_islands>`
4503 for a full list), but advanced users can implement their own UDI types. A user-defined island must implement
4504 the following method:
4505
4506 .. code-block::
4507
4508 def run_evolve(self, algo, pop):
4509 ...
4510
4511 The ``run_evolve()`` method of the UDI will use the input :class:`~pygmo.algorithm`'s
4512 :func:`~pygmo.algorithm.evolve()` method to evolve the input :class:`~pygmo.population` and, once the evolution
4513 is finished, it will return the algorithm used for the evolution and the evolved :class:`~pygmo.population`.
4514
4515 In addition to the mandatory ``run_evolve()`` method, a UDI may implement the following optional methods:
4516
4517 .. code-block::
4518
4519 def get_name(self):
4520 ...
4521 def get_extra_info(self):
4522 ...
4523
4524 See the documentation of the corresponding methods in this class for details on how the optional
4525 methods in the UDI are used by :class:`~pygmo.island`.
4526
4527 Note that, due to the asynchronous nature of :class:`~pygmo.island`, a UDI has certain requirements regarding
4528 thread safety. Specifically, ``run_evolve()`` is always called in a separate thread of execution, and consequently:
4529
4530 * multiple UDI objects may be calling their own ``run_evolve()`` method concurrently,
4531 * in a specific UDI object, any method from the public API of the UDI may be called while ``run_evolve()`` is
4532 running concurrently in another thread. Thus, UDI writers must ensure that actions such as copying
4533 the UDI, calling the optional methods (such as ``get_name()``), etc. can be safely performed while the island
4534 is evolving.
4535
4536 An island can be initialised in a variety of ways using keyword arguments:
4537
4538 * if the arguments list is empty, a default :class:`~pygmo.island` is constructed, containing a
4539 :class:`~pygmo.thread_island` UDI, a :class:`~pygmo.null_algorithm` algorithm, an empty
4540 population with problem type :class:`~pygmo.null_problem`, and default-constructed
4541 :class:`~pygmo.r_policy` and :class:`~pygmo.s_policy`;
4542 * if the arguments list contains *algo*, *pop* and, optionally, *udi*, *r_pol* and *s_pol*, then the constructor will
4543 initialise an :class:`~pygmo.island` containing the specified algorithm, population, UDI and replacement/selection
4544 policies. If *r_pol* and/or *s_pol* are not supplied, the replacement/selection policies will be default-constructed.
4545 If the *udi* parameter is not supplied, the UDI type is chosen according to a heuristic which depends
4546 on the platform, the Python version and the supplied *algo* and *pop* parameters:
4547
4548 * if *algo* and *pop*'s problem provide at least the ``basic`` :class:`~pygmo.thread_safety` guarantee,
4549 then :class:`~pygmo.thread_island` will be selected as UDI type;
4550 * otherwise, if the current platform is Windows or the Python version is at least 3.4, then :class:`~pygmo.mp_island`
4551 will be selected as UDI type, else :class:`~pygmo.ipyparallel_island` will be chosen;
4552 * if the arguments list contains *algo*, *prob*, *size* and, optionally, *udi*, *b*, *seed*, *r_pol* and *s_pol*,
4553 then a :class:`~pygmo.population` will be constructed from *prob*, *size*, *b* and *seed*, and the construction will
4554 then proceed in the same way detailed above (i.e., *algo* and the newly-created population are used to initialise the
4555 island's algorithm and population, the UDI, if not specified, will be chosen according to the heuristic detailed above,
4556 and the replacement/selection policies are given by *r_pol* and *s_pol*).
4557
4558 If the keyword arguments list is invalid, a :exc:`KeyError` exception will be raised.
4559
4560 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::island`.
4561
4562 )";
4563 }
4564
island_evolve_docstring()4565 std::string island_evolve_docstring()
4566 {
4567 return R"(evolve(n = 1)
4568
4569 Launch evolution.
4570
4571 This method will evolve the island's :class:`~pygmo.population` using the island's :class:`~pygmo.algorithm`.
4572 The evolution happens asynchronously: a call to :func:`~pygmo.island.evolve()` will create an evolution task that
4573 will be pushed to a queue, and then return immediately. The tasks in the queue are consumed by a separate thread of execution
4574 managed by the :class:`~pygmo.island` object. Each task will invoke the ``run_evolve()`` method of the UDI *n*
4575 times consecutively to perform the actual evolution. The island's algorithm and population will be updated at the
4576 end of each ``run_evolve()`` invocation. Exceptions raised inside the tasks are stored within the island object,
4577 and can be re-raised by calling :func:`~pygmo.island.wait_check()`.
4578
4579 If the island is part of an :class:`~pygmo.archipelago`, then migration of individuals to/from other
4580 islands might occur. The migration algorithm consists of the following steps:
4581
4582 * before invoking ``run_evolve()`` on the UDI, the island will ask the
4583 archipelago if there are candidate incoming individuals from other islands
4584 If so, the replacement policy is invoked and the current population of the island is updated with the migrants;
4585 * ``run_evolve()`` is then invoked and the current population is evolved;
4586 * after ``run_evolve()`` has concluded, individuals are selected in the
4587 evolved population and copied into the migration database of the archipelago
4588 for future migrations.
4589
4590 It is possible to call this method multiple times to enqueue multiple evolution tasks, which will be consumed in a FIFO (first-in
4591 first-out) fashion. The user may call :func:`~pygmo.island.wait()` or :func:`~pygmo.island.wait_check()` to block until all
4592 tasks have been completed, and to fetch exceptions raised during the execution of the tasks. The :attr:`~pygmo.island.status`
4593 attribute can be used to query the status of the asynchronous operations in the island.
4594
4595 Args:
4596 n (:class:`int`): the number of times the ``run_evolve()`` method of the UDI will be called within the evolution task
4597 (this corresponds also to the number of times migration can happen, if the island belongs to an archipelago)
4598
4599 Raises:
4600 IndexError: if the island is part of an archipelago and during migration an invalid island index is used (this can
4601 happen if the archipelago's topology is malformed)
4602 OverflowError: if *n* is negative or larger than an implementation-defined value
4603 unspecified: any exception thrown by the public interface of :class:`~pygmo.archipelago`, the public interface of
4604 the replacement/selection policies, the underlying C++ method, or by failures at the intersection between C++ and
4605 Python (e.g., type conversion errors, mismatched function signatures, etc.)
4606
4607 )";
4608 }
4609
island_wait_check_docstring()4610 std::string island_wait_check_docstring()
4611 {
4612 return R"(wait_check()
4613
4614 Block until evolution ends and re-raise the first stored exception.
4615
4616 If one task enqueued after the last call to :func:`~pygmo.island.wait_check()` threw an exception, the exception will be re-thrown
4617 by this method. If more than one task enqueued after the last call to :func:`~pygmo.island.wait_check()` threw an exception,
4618 this method will re-throw the exception raised by the first enqueued task that threw, and the exceptions
4619 from all the other tasks that threw will be ignored.
4620
4621 Note that :func:`~pygmo.island.wait_check()` resets the status of the island: after a call to :func:`~pygmo.island.wait_check()`,
4622 :attr:`~pygmo.island.status` will always return the ``idle`` :class:`~pygmo.evolve_status`.
4623
4624 Raises:
4625 unspecified: any exception thrown by evolution tasks or by the underlying C++ method
4626
4627 )";
4628 }
4629
island_wait_docstring()4630 std::string island_wait_docstring()
4631 {
4632 return R"(wait()
4633
4634 This method will block until all the evolution tasks enqueued via :func:`~pygmo.island.evolve()` have been completed.
4635 Exceptions thrown by the enqueued tasks can be re-raised via :func:`~pygmo.island.wait_check()`: they will **not** be
4636 re-thrown by this method. Also, contrary to :func:`~pygmo.island.wait_check()`, this method will **not** reset the
4637 status of the island: after a call to :func:`~pygmo.island.wait()`, :attr:`~pygmo.island.status` will always return
4638 either the ``idle`` or ``idle_error`` :class:`~pygmo.evolve_status`.
4639
4640 )";
4641 }
4642
island_status_docstring()4643 std::string island_status_docstring()
4644 {
4645 return R"(Status of the island.
4646
4647 This read-only property will return an :class:`~pygmo.evolve_status` flag indicating the current status of
4648 asynchronous operations in the island. The flag will be:
4649
4650 * ``idle`` if the island is currently not evolving and no exceptions
4651 were thrown by evolution tasks since the last call to :func:`~pygmo.island.wait_check()`;
4652 * ``busy`` if the island is evolving and no exceptions
4653 have (yet) been thrown by evolution tasks since the last call to :func:`~pygmo.island.wait_check()`;
4654 * ``idle_error`` if the island is currently not evolving and at least one
4655 evolution task threw an exception since the last call to :func:`~pygmo.island.wait_check()`;
4656 * ``busy_error`` if the island is currently evolving and at least one
4657 evolution task has already thrown an exception since the last call to :func:`~pygmo.island.wait_check()`.
4658
4659 Note that after a call to :func:`~pygmo.island.wait_check()`, :attr:`~pygmo.island.status` will always return
4660 ``idle``, and after a call to :func:`~pygmo.island.wait()`, :attr:`~pygmo.island.status`
4661 will always return either ``idle`` or ``idle_error``.
4662
4663 Returns:
4664 :class:`~pygmo.evolve_status`: a flag indicating the current status of asynchronous operations in the island
4665
4666 )";
4667 }
4668
island_get_algorithm_docstring()4669 std::string island_get_algorithm_docstring()
4670 {
4671 return R"(get_algorithm()
4672
4673 Get the algorithm.
4674
4675 It is safe to call this method while the island is evolving.
4676
4677 Returns:
4678 :class:`~pygmo.algorithm`: a copy of the island's algorithm
4679
4680 Raises:
4681 unspecified: any exception thrown by the underlying C++ method
4682
4683 )";
4684 }
4685
island_set_algorithm_docstring()4686 std::string island_set_algorithm_docstring()
4687 {
4688 return R"(set_algorithm(algo)
4689
4690 Set the algorithm.
4691
4692 It is safe to call this method while the island is evolving.
4693
4694 Args:
4695 algo (:class:`~pygmo.algorithm`): the algorithm that will be copied into the island
4696
4697 Raises:
4698 unspecified: any exception thrown by the underlying C++ method
4699
4700 )";
4701 }
4702
island_get_population_docstring()4703 std::string island_get_population_docstring()
4704 {
4705 return R"(get_population()
4706
4707 Get the population.
4708
4709 It is safe to call this method while the island is evolving.
4710
4711 Returns:
4712 :class:`~pygmo.population`: a copy of the island's population
4713
4714 Raises:
4715 unspecified: any exception thrown by the underlying C++ method
4716
4717 )";
4718 }
4719
island_set_population_docstring()4720 std::string island_set_population_docstring()
4721 {
4722 return R"(set_population(pop)
4723
4724 Set the population.
4725
4726 It is safe to call this method while the island is evolving.
4727
4728 Args:
4729 pop (:class:`~pygmo.population`): the population that will be copied into the island
4730
4731 Raises:
4732 unspecified: any exception thrown by the underlying C++ method
4733
4734 )";
4735 }
4736
island_get_name_docstring()4737 std::string island_get_name_docstring()
4738 {
4739 return R"(get_name()
4740
4741 Island's name.
4742
4743 If the UDI provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
4744 Otherwise, an implementation-defined name based on the type of the UDI will be returned.
4745
4746 It is safe to call this method while the island is evolving.
4747
4748 Returns:
4749 str: the name of the UDI
4750
4751 Raises:
4752 unspecified: any exception thrown by the ``get_name()`` method of the UDI
4753
4754 )";
4755 }
4756
island_get_extra_info_docstring()4757 std::string island_get_extra_info_docstring()
4758 {
4759 return R"(get_extra_info()
4760
4761 Island's extra info.
4762
4763 If the UDI provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
4764 method. Otherwise, an empty string will be returned.
4765
4766 It is safe to call this method while the island is evolving.
4767
4768 Returns:
4769 str: extra info about the UDI
4770
4771 Raises:
4772 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDI
4773
4774 )";
4775 }
4776
island_get_r_policy_docstring()4777 std::string island_get_r_policy_docstring()
4778 {
4779 return R"(get_r_policy()
4780
4781 Get the replacement policy.
4782
4783 Returns:
4784 :class:`~pygmo.r_policy`: a copy of the current replacement policy
4785
4786 )";
4787 }
4788
island_get_s_policy_docstring()4789 std::string island_get_s_policy_docstring()
4790 {
4791 return R"(get_s_policy()
4792
4793 Get the selection policy.
4794
4795 Returns:
4796 :class:`~pygmo.s_policy`: a copy of the current selection policy
4797
4798 )";
4799 }
4800
thread_island_docstring()4801 std::string thread_island_docstring()
4802 {
4803 return R"(__init__(use_pool=True)
4804
4805 Thread island.
4806
4807 This class is a user-defined island (UDI) that will run evolutions in
4808 a separate thread of execution. Evolution tasks running on this
4809 UDI must involve :class:`~pygmo.algorithm` and :class:`~pygmo.problem` instances
4810 that provide at least the ``basic`` :class:`~pygmo.thread_safety` guarantee, otherwise
4811 errors will be raised during the evolution.
4812
4813 Note that algorithms and problems implemented in Python are never considered thread safe, and thus
4814 this UDI can be used only with algorithms and problems implemented in C++.
4815
4816 The *use_pool* flag signals whether or not this island should use a common thread pool
4817 shared by all islands.
4818
4819 Using a thread pool is more computationally-efficient, for at least
4820 two reasons:
4821
4822 * it avoids runtime overhead when
4823 the number of islands evolving simultaneously is larger than the CPU
4824 count (e.g., in a large :class:`~pygmo.archipelago`);
4825 * because the implementation uses the Intel TBB libraries, it integrates
4826 better with other pagmo facilities built on top of TBB (e.g., the
4827 :class:`~pygmo.thread_bfe` batch fitness evaluator).
4828
4829 A thread pool however also introduces a serializing behaviour because the number
4830 of evolutions actually running at the same time is limited by the CPU
4831 count (whereas without the thread pool an unlimited number of evolutions
4832 can be active at the same time, albeit with a performance penalty).
4833
4834 See also the documentation of the corresponding C++ class :cpp:class:`pagmo::thread_island`.
4835
4836 .. versionadded:: 2.16.0
4837
4838 The *use_pool* flag.
4839
4840 Args:
4841 use_pool (:class:`bool`): a boolean flag signalling whether or not a thread pool should be used by the island
4842
4843 )";
4844 }
4845
archipelago_docstring()4846 std::string archipelago_docstring()
4847 {
4848 return R"(Archipelago.
4849
4850 An archipelago is a collection of :class:`~pygmo.island` objects connected by a
4851 :class:`~pygmo.topology`. The islands in the archipelago can exchange individuals
4852 (i.e., candidate solutions) via a process called *migration*. The individuals migrate
4853 across the routes described by the topology, and the islands' replacement
4854 and selection policies (see :class:`~pygmo.r_policy` and :class:`~pygmo.s_policy`)
4855 establish how individuals are replaced in and selected from the islands' populations.
4856
4857 The interface of :class:`~pygmo.archipelago` mirrors partially the interface
4858 of :class:`~pygmo.island`: the evolution is initiated by a call to :func:`~pygmo.archipelago.evolve()`,
4859 and at any time the user can query the
4860 state of the archipelago and access its island members. The user can explicitly wait for pending evolutions
4861 to conclude by calling the :func:`~pygmo.archipelago.wait()` and :func:`~pygmo.archipelago.wait_check()`
4862 methods. The status of ongoing evolutions in the archipelago can be queried via
4863 :func:`~pygmo.archipelago.status()`.
4864
4865 )";
4866 }
4867
archipelago_evolve_docstring()4868 std::string archipelago_evolve_docstring()
4869 {
4870 return R"(evolve(n = 1)
4871
4872 Evolve archipelago.
4873
4874 This method will call :func:`pygmo.island.evolve()` on all the islands of the archipelago.
4875 The input parameter *n* will be passed to the invocations of :func:`pygmo.island.evolve()` for each island.
4876 The :attr:`~pygmo.archipelago.status` attribute can be used to query the status of the asynchronous operations in the
4877 archipelago.
4878
4879 Args:
4880 n (:class:`int`): the parameter that will be passed to :func:`pygmo.island.evolve()`
4881
4882 Raises:
4883 unspecified: any exception thrown by :func:`pygmo.island.evolve()`
4884
4885 )";
4886 }
4887
archipelago_status_docstring()4888 std::string archipelago_status_docstring()
4889 {
4890 return R"(Status of the archipelago.
4891
4892 This read-only property will return an :class:`~pygmo.evolve_status` flag indicating the current status of
4893 asynchronous operations in the archipelago. The flag will be:
4894
4895 * ``idle`` if, for all the islands in the archipelago, :attr:`pygmo.island.status`
4896 returns ``idle``;
4897 * ``busy`` if, for at least one island in the archipelago, :attr:`pygmo.island.status`
4898 returns ``busy``, and for no island :attr:`pygmo.island.status` returns an error status;
4899 * ``idle_error`` if no island in the archipelago is busy and for at least one island
4900 :attr:`pygmo.island.status` returns ``idle_error``;
4901 * ``busy_error`` if, for at least one island in the archipelago, :attr:`pygmo.island.status`
4902 returns an error status and at least one island is busy.
4903
4904 Note that after a call to :func:`~pygmo.archipelago.wait_check()`, :attr:`pygmo.archipelago.status` will always return
4905 ``idle``, and after a call to :func:`~pygmo.archipelago.wait()`, :attr:`pygmo.archipelago.status`
4906 will always return either ``idle`` or ``idle_error``.
4907
4908 Returns:
4909 :class:`~pygmo.evolve_status`: a flag indicating the current status of asynchronous operations in the archipelago
4910
4911 )";
4912 }
4913
archipelago_wait_docstring()4914 std::string archipelago_wait_docstring()
4915 {
4916 return R"(wait()
4917
4918 Block until all evolutions have finished.
4919
4920 This method will call :func:`pygmo.island.wait()` on all the islands of the archipelago. Exceptions thrown by island
4921 evolutions can be re-raised via :func:`~pygmo.archipelago.wait_check()`: they will **not** be re-thrown by this method.
4922 Also, contrary to :func:`~pygmo.archipelago.wait_check()`, this method will **not** reset the status of the archipelago:
4923 after a call to :func:`~pygmo.archipelago.wait()`, the :attr:`~pygmo.archipelago.status` attribute will
4924 always return either ``idle`` or ``idle_error``.
4925
4926 )";
4927 }
4928
archipelago_wait_check_docstring()4929 std::string archipelago_wait_check_docstring()
4930 {
4931 return R"(wait_check()
4932
4933 Block until all evolutions have finished and raise the first exception that was encountered.
4934
4935 This method will call :func:`pygmo.island.wait_check()` on all the islands of the archipelago (following
4936 the order in which the islands were inserted into the archipelago).
4937 The first exception raised by :func:`pygmo.island.wait_check()` will be re-raised by this method,
4938 and all the exceptions thrown by the other calls to :func:`pygmo.island.wait_check()` will be ignored.
4939
4940 Note that :func:`~pygmo.archipelago.wait_check()` resets the status of the archipelago: after a call to
4941 :func:`~pygmo.archipelago.wait_check()`, the :attr:`~pygmo.archipelago.status` attribute will
4942 always return ``idle``.
4943
4944 Raises:
4945 unspecified: any exception thrown by any evolution task queued in the archipelago's
4946 islands
4947
4948 )";
4949 }
4950
archipelago_getitem_docstring()4951 std::string archipelago_getitem_docstring()
4952 {
4953 return R"(__getitem__(i)
4954
4955 This subscript operator can be used to access the *i*-th island of the archipelago (that is, the *i*-th island that was
4956 inserted via :func:`~pygmo.archipelago.push_back()`).
4957
4958 Raises:
4959 IndexError: if *i* is greater than the size of the archipelago
4960
4961 )";
4962 }
4963
archipelago_get_champions_f_docstring()4964 std::string archipelago_get_champions_f_docstring()
4965 {
4966 return R"(get_champions_f()
4967
4968 Get the fitness vectors of the islands' champions.
4969
4970 Returns:
4971 :class:`list` of 1D NumPy float arrays: the fitness vectors of the islands' champions
4972
4973 Raises:
4974 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
4975 mismatched function signatures, etc.)
4976
4977 )";
4978 }
4979
archipelago_get_champions_x_docstring()4980 std::string archipelago_get_champions_x_docstring()
4981 {
4982 return R"(get_champions_x()
4983
4984 Get the decision vectors of the islands' champions.
4985
4986 Returns:
4987 :class:`list` of 1D NumPy float arrays: the decision vectors of the islands' champions
4988
4989 Raises:
4990 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
4991 mismatched function signatures, etc.)
4992
4993 )";
4994 }
4995
archipelago_get_migrants_db_docstring()4996 std::string archipelago_get_migrants_db_docstring()
4997 {
4998 return R"(get_migrants_db()
4999
5000 During the evolution of an archipelago, islands will periodically
5001 store the individuals selected for migration in a *migrant database*.
5002 This is a :class:`list` of :class:`tuple` objects whose
5003 size is equal to the number of islands in the archipelago, and which
5004 contains the current candidate outgoing migrants for each island.
5005
5006 The migrants tuples consist of 3 values each:
5007
5008 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
5009 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
5010 stored in row-major order),
5011 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
5012 stored in row-major order).
5013
5014 Returns:
5015 :class:`list`: a copy of the database of migrants
5016
5017 Raises:
5018 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
5019 mismatched function signatures, etc.)
5020
5021 )";
5022 }
5023
archipelago_set_migrants_db_docstring()5024 std::string archipelago_set_migrants_db_docstring()
5025 {
5026 return R"(set_migrants_db(mig)
5027
5028 During the evolution of an archipelago, islands will periodically
5029 store the individuals selected for migration in a *migrant database*.
5030 This is a :class:`list` of :class:`tuple` objects whose
5031 size is equal to the number of islands in the archipelago, and which
5032 contains the current candidate outgoing migrants for each island.
5033
5034 The migrants tuples consist of 3 values each:
5035
5036 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
5037 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
5038 stored in row-major order),
5039 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
5040 stored in row-major order).
5041
5042 This setter allows to replace the current database of migrants with a new one.
5043
5044 Note that this setter will accept in input a malformed database of migrants without complaining.
5045 An invalid database of migrants will however result in exceptions being raised when migration occurs.
5046
5047 Args:
5048 mig (:class:`list`): the new database of migrants
5049
5050 Raises:
5051 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
5052 mismatched function signatures, etc.)
5053
5054 )";
5055 }
5056
archipelago_get_migration_log_docstring()5057 std::string archipelago_get_migration_log_docstring()
5058 {
5059 return R"(get_migration_log()
5060
5061 Each time an individual migrates from an island (the source) to another
5062 (the destination), an entry will be added to the migration log.
5063 The entry is a :class:`tuple` of 6 elements containing:
5064
5065 * a timestamp of the migration,
5066 * the ID of the individual that migrated,
5067 * the decision and fitness vectors of the individual that migrated,
5068 * the indices of the source and destination islands.
5069
5070 The migration log is a :class:`list` of migration entries.
5071
5072 Returns:
5073 list: a copy of the migration log
5074
5075 Raises:
5076 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors,
5077 mismatched function signatures, etc.)
5078
5079 )";
5080 }
5081
archipelago_get_topology_docstring()5082 std::string archipelago_get_topology_docstring()
5083 {
5084 return R"(get_topology()
5085
5086 Returns:
5087 :class:`~pygmo.tyopology`: a copy of the current topology
5088
5089 )";
5090 }
5091
archipelago_get_migration_type_docstring()5092 std::string archipelago_get_migration_type_docstring()
5093 {
5094 return R"(get_migration_type()
5095
5096 Returns:
5097 :class:`~pygmo.migration_type`: the current migration type for this archipelago
5098
5099 )";
5100 }
5101
archipelago_set_migration_type_docstring()5102 std::string archipelago_set_migration_type_docstring()
5103 {
5104 return R"(set_migration_type(mt)
5105
5106 Set a new migration type for this archipelago.
5107
5108 Args:
5109 mt (:class:`~pygmo.migration_type`): the desired migration type for this archipelago
5110
5111 )";
5112 }
5113
archipelago_get_migrant_handling_docstring()5114 std::string archipelago_get_migrant_handling_docstring()
5115 {
5116 return R"(get_migrant_handling()
5117
5118 Returns:
5119 :class:`~pygmo.migrant_handling`: the current migrant handling policy for this archipelago
5120
5121 )";
5122 }
5123
archipelago_set_migrant_handling_docstring()5124 std::string archipelago_set_migrant_handling_docstring()
5125 {
5126 return R"(set_migrant_handling(mh)
5127
5128 Set a new migrant handling policy for this archipelago.
5129
5130 Args:
5131 mh (:class:`~pygmo.migrant_handling`): the desired migrant handling policy for this archipelago
5132
5133 )";
5134 }
5135
nlopt_docstring()5136 std::string nlopt_docstring()
5137 {
5138 return R"(__init__(solver = "cobyla")
5139
5140 NLopt algorithms.
5141
5142 This user-defined algorithm wraps a selection of solvers from the
5143 `NLopt <https://nlopt.readthedocs.io/en/latest/>`__ library, focusing on
5144 local optimisation (both gradient-based and derivative-free). The complete list of supported
5145 NLopt algorithms is:
5146
5147 * COBYLA,
5148 * BOBYQA,
5149 * NEWUOA + bound constraints,
5150 * PRAXIS,
5151 * Nelder-Mead simplex,
5152 * sbplx,
5153 * MMA (Method of Moving Asymptotes),
5154 * CCSA,
5155 * SLSQP,
5156 * low-storage BFGS,
5157 * preconditioned truncated Newton,
5158 * shifted limited-memory variable-metric,
5159 * augmented Lagrangian algorithm.
5160
5161 The desired NLopt solver is selected upon construction of an :class:`~pygmo.nlopt` algorithm. Various properties
5162 of the solver (e.g., the stopping criteria) can be configured via class attributes. Multiple
5163 stopping criteria can be active at the same time: the optimisation will stop as soon as at least one stopping criterion
5164 is satisfied. By default, only the ``xtol_rel`` stopping criterion is active (see :attr:`~pygmo.nlopt.xtol_rel`).
5165
5166 All NLopt solvers support only single-objective optimisation, and, as usual in pygmo, minimisation
5167 is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient.
5168 Some solvers support equality and/or inequality constraints. The constraints' tolerances will
5169 be set to those specified in the :class:`~pygmo.problem` being optimised (see :attr:`pygmo.problem.c_tol`).
5170
5171 In order to support pygmo's population-based optimisation model, the ``evolve()`` method will select
5172 a single individual from the input :class:`~pygmo.population` to be optimised by the NLopt solver.
5173 If the optimisation produces a better individual (as established by :func:`~pygmo.compare_fc()`),
5174 the optimised individual will be inserted back into the population.
5175 The selection and replacement strategies can be configured via the :attr:`~pygmo.nlopt.selection`
5176 and :attr:`~pygmo.nlopt.replacement` attributes.
5177
5178 .. note::
5179
5180 This user-defined algorithm is available only if pygmo was compiled with the ``PAGMO_WITH_NLOPT`` option
5181 enabled (see the :ref:`installation instructions <install>`).
5182
5183 .. seealso::
5184
5185 The `NLopt website <https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/>`__ contains a detailed description
5186 of each supported solver.
5187
5188 This constructor will initialise an :class:`~pygmo.nlopt` object which will use the NLopt algorithm specified by
5189 the input string *solver*, the ``"best"`` individual selection strategy and the ``"best"`` individual
5190 replacement strategy. *solver* is translated to an NLopt algorithm type according to the following
5191 translation table:
5192
5193 ================================ ====================================
5194 *solver* string NLopt algorithm
5195 ================================ ====================================
5196 ``"cobyla"`` ``NLOPT_LN_COBYLA``
5197 ``"bobyqa"`` ``NLOPT_LN_BOBYQA``
5198 ``"newuoa"`` ``NLOPT_LN_NEWUOA``
5199 ``"newuoa_bound"`` ``NLOPT_LN_NEWUOA_BOUND``
5200 ``"praxis"`` ``NLOPT_LN_PRAXIS``
5201 ``"neldermead"`` ``NLOPT_LN_NELDERMEAD``
5202 ``"sbplx"`` ``NLOPT_LN_SBPLX``
5203 ``"mma"`` ``NLOPT_LD_MMA``
5204 ``"ccsaq"`` ``NLOPT_LD_CCSAQ``
5205 ``"slsqp"`` ``NLOPT_LD_SLSQP``
5206 ``"lbfgs"`` ``NLOPT_LD_LBFGS``
5207 ``"tnewton_precond_restart"`` ``NLOPT_LD_TNEWTON_PRECOND_RESTART``
5208 ``"tnewton_precond"`` ``NLOPT_LD_TNEWTON_PRECOND``
5209 ``"tnewton_restart"`` ``NLOPT_LD_TNEWTON_RESTART``
5210 ``"tnewton"`` ``NLOPT_LD_TNEWTON``
5211 ``"var2"`` ``NLOPT_LD_VAR2``
5212 ``"var1"`` ``NLOPT_LD_VAR1``
5213 ``"auglag"`` ``NLOPT_AUGLAG``
5214 ``"auglag_eq"`` ``NLOPT_AUGLAG_EQ``
5215 ================================ ====================================
5216
5217 The parameters of the selected solver can be configured via the attributes of this class.
5218
5219 See also the docs of the C++ class :cpp:class:`pagmo::nlopt`.
5220
5221 .. seealso::
5222
5223 The `NLopt website <https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/>`__ contains a detailed
5224 description of each supported solver.
5225
5226 Args:
5227 solver (:class:`str`): the name of the NLopt algorithm that will be used by this :class:`~pygmo.nlopt` object
5228
5229 Raises:
5230 RuntimeError: if the NLopt version is not at least 2
5231 ValueError: if *solver* is not one of the allowed algorithm names
5232 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5233 type conversion errors, mismatched function signatures, etc.)
5234
5235 Examples:
5236 >>> from pygmo import *
5237 >>> nl = nlopt('slsqp')
5238 >>> nl.xtol_rel = 1E-6 # Change the default value of the xtol_rel stopping criterion
5239 >>> nl.xtol_rel # doctest: +SKIP
5240 1E-6
5241 >>> algo = algorithm(nl)
5242 >>> algo.set_verbosity(1)
5243 >>> prob = problem(luksan_vlcek1(20))
5244 >>> prob.c_tol = [1E-6] * 18 # Set constraints tolerance to 1E-6
5245 >>> pop = population(prob, 20)
5246 >>> pop = algo.evolve(pop) # doctest: +SKIP
5247 objevals: objval: violated: viol. norm:
5248 1 95959.4 18 538.227 i
5249 2 89282.7 18 5177.42 i
5250 3 75580 18 464.206 i
5251 4 75580 18 464.206 i
5252 5 77737.6 18 1095.94 i
5253 6 41162 18 350.446 i
5254 7 41162 18 350.446 i
5255 8 67881 18 362.454 i
5256 9 30502.2 18 249.762 i
5257 10 30502.2 18 249.762 i
5258 11 7266.73 18 95.5946 i
5259 12 4510.3 18 42.2385 i
5260 13 2400.66 18 35.2507 i
5261 14 34051.9 18 749.355 i
5262 15 1657.41 18 32.1575 i
5263 16 1657.41 18 32.1575 i
5264 17 1564.44 18 12.5042 i
5265 18 275.987 14 6.22676 i
5266 19 232.765 12 12.442 i
5267 20 161.892 15 4.00744 i
5268 21 161.892 15 4.00744 i
5269 22 17.6821 11 1.78909 i
5270 23 7.71103 5 0.130386 i
5271 24 6.24758 4 0.00736759 i
5272 25 6.23325 1 5.12547e-05 i
5273 26 6.2325 0 0
5274 27 6.23246 0 0
5275 28 6.23246 0 0
5276 29 6.23246 0 0
5277 30 6.23246 0 0
5278 <BLANKLINE>
5279 Optimisation return status: NLOPT_XTOL_REACHED (value = 4, Optimization stopped because xtol_rel or xtol_abs was reached)
5280 <BLANKLINE>
5281
5282 )";
5283 }
5284
nlopt_stopval_docstring()5285 std::string nlopt_stopval_docstring()
5286 {
5287 return R"(``stopval`` stopping criterion.
5288
5289 The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than
5290 or equal to ``stopval`` is found. Defaults to the C constant ``-HUGE_VAL`` (that is, this stopping criterion
5291 is disabled by default).
5292
5293 Returns:
5294 :class:`float`: the value of the ``stopval`` stopping criterion
5295
5296 Raises:
5297 ValueError: if, when setting this property, a ``NaN`` is passed
5298 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5299 type conversion errors, mismatched function signatures, etc.)
5300
5301 )";
5302 }
5303
nlopt_ftol_rel_docstring()5304 std::string nlopt_ftol_rel_docstring()
5305 {
5306 return R"(``ftol_rel`` stopping criterion.
5307
5308 The ``ftol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the
5309 optimum) changes the objective function value by less than ``ftol_rel`` multiplied by the absolute value of the
5310 function value. Defaults to 0 (that is, this stopping criterion is disabled by default).
5311
5312 Returns:
5313 :class:`float`: the value of the ``ftol_rel`` stopping criterion
5314
5315 Raises:
5316 ValueError: if, when setting this property, a ``NaN`` is passed
5317 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5318 type conversion errors, mismatched function signatures, etc.)
5319
5320 )";
5321 }
5322
nlopt_ftol_abs_docstring()5323 std::string nlopt_ftol_abs_docstring()
5324 {
5325 return R"(``ftol_abs`` stopping criterion.
5326
5327 The ``ftol_abs`` stopping criterion instructs the solver to stop when an optimization step
5328 (or an estimate of the optimum) changes the function value by less than ``ftol_abs``.
5329 Defaults to 0 (that is, this stopping criterion is disabled by default).
5330
5331 Returns:
5332 :class:`float`: the value of the ``ftol_abs`` stopping criterion
5333
5334 Raises:
5335 ValueError: if, when setting this property, a ``NaN`` is passed
5336 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5337 type conversion errors, mismatched function signatures, etc.)
5338
5339 )";
5340 }
5341
nlopt_xtol_rel_docstring()5342 std::string nlopt_xtol_rel_docstring()
5343 {
5344 return R"(``xtol_rel`` stopping criterion.
5345
5346 The ``xtol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the
5347 optimum) changes every parameter by less than ``xtol_rel`` multiplied by the absolute value of the parameter.
5348 Defaults to 1E-8.
5349
5350 Returns:
5351 :class:`float`: the value of the ``xtol_rel`` stopping criterion
5352
5353 Raises:
5354 ValueError: if, when setting this property, a ``NaN`` is passed
5355 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5356 type conversion errors, mismatched function signatures, etc.)
5357
5358 )";
5359 }
5360
nlopt_xtol_abs_docstring()5361 std::string nlopt_xtol_abs_docstring()
5362 {
5363 return R"(``xtol_abs`` stopping criterion.
5364
5365 The ``xtol_abs`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the
5366 optimum) changes every parameter by less than ``xtol_abs``. Defaults to 0 (that is, this stopping criterion is disabled
5367 by default).
5368
5369 Returns:
5370 :class:`float`: the value of the ``xtol_abs`` stopping criterion
5371
5372 Raises:
5373 ValueError: if, when setting this property, a ``NaN`` is passed
5374 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5375 type conversion errors, mismatched function signatures, etc.)
5376
5377 )";
5378 }
5379
nlopt_maxeval_docstring()5380 std::string nlopt_maxeval_docstring()
5381 {
5382 return R"(``maxeval`` stopping criterion.
5383
5384 The ``maxeval`` stopping criterion instructs the solver to stop when the number of function evaluations exceeds
5385 ``maxeval``. Defaults to 0 (that is, this stopping criterion is disabled by default).
5386
5387 Returns:
5388 :class:`int`: the value of the ``maxeval`` stopping criterion
5389
5390 Raises:
5391 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5392 type conversion errors, mismatched function signatures, etc.)
5393
5394 )";
5395 }
5396
nlopt_maxtime_docstring()5397 std::string nlopt_maxtime_docstring()
5398 {
5399 return R"(``maxtime`` stopping criterion.
5400
5401 The ``maxtime`` stopping criterion instructs the solver to stop when the optimization time (in seconds) exceeds
5402 ``maxtime``. Defaults to 0 (that is, this stopping criterion is disabled by default).
5403
5404 Returns:
5405 :class:`float`: the value of the ``maxtime`` stopping criterion
5406
5407 Raises:
5408 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5409 type conversion errors, mismatched function signatures, etc.)
5410
5411 )";
5412 }
5413
nlopt_get_log_docstring()5414 std::string nlopt_get_log_docstring()
5415 {
5416 return R"(get_log()
5417
5418 Optimisation log.
5419
5420 The optimisation log is a collection of log data lines. A log data line is a tuple consisting of:
5421
5422 * the number of objective function evaluations made so far,
5423 * the objective function value for the current decision vector,
5424 * the number of constraints violated by the current decision vector,
5425 * the constraints violation norm for the current decision vector,
5426 * a boolean flag signalling the feasibility of the current decision vector.
5427
5428 Returns:
5429 :class:`list`: the optimisation log
5430
5431 Raises:
5432 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5433 type conversion errors, mismatched function signatures, etc.)
5434
5435 )";
5436 }
5437
nlopt_get_last_opt_result_docstring()5438 std::string nlopt_get_last_opt_result_docstring()
5439 {
5440 return R"(get_last_opt_result()
5441
5442 Get the result of the last optimisation.
5443
5444 Returns:
5445 :class:`int`: the NLopt return code for the last optimisation run, or ``NLOPT_SUCCESS`` if no optimisations have been run yet
5446
5447 Raises:
5448 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5449 type conversion errors, mismatched function signatures, etc.)
5450
5451 )";
5452 }
5453
nlopt_get_solver_name_docstring()5454 std::string nlopt_get_solver_name_docstring()
5455 {
5456 return R"(get_solver_name()
5457
5458 Get the name of the NLopt solver used during construction.
5459
5460 Returns:
5461 :class:`str`: the name of the NLopt solver used during construction
5462
5463 Raises:
5464 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5465 type conversion errors, mismatched function signatures, etc.)
5466
5467 )";
5468 }
5469
nlopt_local_optimizer_docstring()5470 std::string nlopt_local_optimizer_docstring()
5471 {
5472 return R"(Local optimizer.
5473
5474 Some NLopt algorithms rely on other NLopt algorithms as local/subsidiary optimizers.
5475 This property, of type :class:`~pygmo.nlopt`, allows to set such local optimizer.
5476 By default, no local optimizer is specified, and the property is set to ``None``.
5477
5478 .. note::
5479
5480 At the present time, only the ``"auglag"`` and ``"auglag_eq"`` solvers make use
5481 of a local optimizer. Setting a local optimizer on any other solver will have no effect.
5482
5483 .. note::
5484
5485 The objective function, bounds, and nonlinear-constraint parameters of the local
5486 optimizer are ignored (as they are provided by the parent optimizer). Conversely, the stopping
5487 criteria should be specified in the local optimizer.The verbosity of
5488 the local optimizer is also forcibly set to zero during the optimisation.
5489
5490 Returns:
5491 :class:`~pygmo.nlopt`: the local optimizer, or ``None`` if not set
5492
5493 Raises:
5494 unspecified: any exception thrown by failures at the intersection between C++ and Python
5495 (e.g., type conversion errors, mismatched function signatures, etc.), when setting the property
5496
5497 )";
5498 }
5499
sea_docstring()5500 std::string sea_docstring()
5501 {
5502 return R"(__init__(gen = 1, seed = random)
5503
5504 (N+1)-ES simple evolutionary algorithm.
5505
5506 Args:
5507 gen (:class:`int`): number of generations to consider (each generation will compute the objective function once)
5508 seed (:class:`int`): seed used by the internal random number generator
5509
5510 Raises:
5511 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
5512 unspecified: any exception thrown by failures at the intersection between C++ and Python
5513 (e.g., type conversion errors, mismatched function signatures, etc.)
5514
5515 See also the docs of the C++ class :cpp:class:`pagmo::sea`.
5516
5517 )";
5518 }
5519
sea_get_log_docstring()5520 std::string sea_get_log_docstring()
5521 {
5522 return R"(get_log()
5523
5524 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen.
5525 The log frequency depends on the verbosity parameter (by default nothing is logged) which can be set calling
5526 the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed with a
5527 :class:`~pygmo.sea`.
5528 A verbosity larger than 1 will produce a log with one entry each verbosity fitness evaluations.
5529 A verbosity equal to 1 will produce a log with one entry at each improvement of the fitness.
5530
5531 Returns:
5532 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``Improvement``, ``Mutations``
5533
5534 * ``Gen`` (:class:`int`), generation.
5535 * ``Fevals`` (:class:`int`), number of functions evaluation made.
5536 * ``Best`` (:class:`float`), the best fitness function found so far.
5537 * ``Improvement`` (:class:`float`), improvement made by the last mutation.
5538 * ``Mutations`` (:class:`float`), number of mutated components for the decision vector.
5539
5540 Examples:
5541 >>> from pygmo import *
5542 >>> algo = algorithm(sea(500))
5543 >>> algo.set_verbosity(50)
5544 >>> prob = problem(schwefel(dim = 20))
5545 >>> pop = population(prob, 20)
5546 >>> pop = algo.evolve(pop) # doctest: +SKIP
5547 Gen: Fevals: Best: Improvement: Mutations:
5548 1 1 6363.44 2890.49 2
5549 1001 1001 1039.92 -562.407 3
5550 2001 2001 358.966 -632.6 2
5551 3001 3001 106.08 -995.927 3
5552 4001 4001 83.391 -266.8 1
5553 5001 5001 62.4994 -1018.38 3
5554 6001 6001 39.2851 -732.695 2
5555 7001 7001 37.2185 -518.847 1
5556 8001 8001 20.9452 -450.75 1
5557 9001 9001 17.9193 -270.679 1
5558 >>> uda = algo.extract(sea)
5559 >>> uda.get_log() # doctest: +SKIP
5560 [(1, 1, 6363.442036625835, 2890.4854414320716, 2), (1001, 1001, ...
5561
5562 See also the docs of the relevant C++ method :cpp:func:`pagmo::sea::get_log()`.
5563 )";
5564 }
5565
ihs_docstring()5566 std::string ihs_docstring()
5567 {
5568 return R"(__init__(gen = 1, phmcr = 0.85, ppar_min = 0.35, ppar_max=0.99, bw_min=1e-5, bw_max=1., seed = random)
5569
5570 Harmony search (HS) is a metaheuristic algorithm said to mimick the improvisation process of musicians.
5571 In the metaphor, each musician (i.e., each variable) plays (i.e., generates) a note (i.e., a value)
5572 for finding a best harmony (i.e., the global optimum) all together.
5573
5574 This pygmo UDA implements the so-called improved harmony search algorithm (IHS), in which the probability
5575 of picking the variables from the decision vector and the amount of mutation to which they are subject
5576 vary (respectively linearly and exponentially) at each call of the ``evolve()`` method.
5577
5578 In this algorithm the number of fitness function evaluations is equal to the number of iterations.
5579 All the individuals in the input population participate in the evolution. A new individual is generated
5580 at every iteration, substituting the current worst individual of the population if better.
5581
5582 .. warning::
5583
5584 The HS algorithm can and has been criticized, not for its performances,
5585 but for the use of a metaphor that does not add anything to existing ones. The HS
5586 algorithm essentially applies mutation and crossover operators to a background population and as such
5587 should have been developed in the context of Evolutionary Strategies or Genetic Algorithms and studied
5588 in that context. The use of the musicians metaphor only obscures its internal functioning
5589 making theoretical results from ES and GA erroneously seem as unapplicable to HS.
5590
5591 .. note::
5592
5593 The original IHS algorithm was designed to solve unconstrained, deterministic single objective problems.
5594 In pygmo, the algorithm was modified to tackle also multi-objective, constrained (box and non linearly).
5595 Such extension is original with pygmo.
5596
5597 Args:
5598 gen (:class:`int`): number of generations to consider (each generation will compute the objective function once)
5599 phmcr (:class:`float`): probability of choosing from memory (similar to a crossover probability)
5600 ppar_min (:class:`float`): minimum pitch adjustment rate. (similar to a mutation rate)
5601 ppar_max (:class:`float`): maximum pitch adjustment rate. (similar to a mutation rate)
5602 bw_min (:class:`float`): minimum distance bandwidth. (similar to a mutation width)
5603 bw_max (:class:`float`): maximum distance bandwidth. (similar to a mutation width)
5604 seed (:class:`int`): seed used by the internal random number generator
5605
5606 Raises:
5607 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
5608 ValueError: if *phmcr* is not in the ]0,1[ interval, *ppar_min* or *ppar_max* are not in the ]0,1[
5609 interval, min/max quantities are less than/greater than max/min quantities, *bw_min* is negative.
5610 unspecified: any exception thrown by failures at the intersection between C++ and Python
5611 (e.g., type conversion errors, mismatched function signatures, etc.)
5612
5613 See also the docs of the C++ class :cpp:class:`pagmo::ihs`.
5614
5615 )";
5616 }
5617
ihs_get_log_docstring()5618 std::string ihs_get_log_docstring()
5619 {
5620 return R"(get_log()
5621
5622 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen.
5623 The log frequency depends on the verbosity parameter (by default nothing is logged) which can be set calling
5624 the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed with a
5625 :class:`~pygmo.ihs`.
5626 A verbosity larger than 1 will produce a log with one entry each verbosity fitness evaluations.
5627
5628 Returns:
5629 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Fevals``, ``ppar``, ``bw``, ``dx``, ``df``, ``Violated``, ``Viol. Norm``,``ideal``
5630
5631 * ``Fevals`` (:class:`int`), number of functions evaluation made.
5632 * ``ppar`` (:class:`float`), the pitch adjustment rate.
5633 * ``bw`` (:class:`float`), the distance bandwidth.
5634 * ``dx`` (:class:`float`), the population flatness evaluated as the distance between the decisions vector of the best and of the worst individual (or -1 in a multiobjective case).
5635 * ``df`` (:class:`float`), the population flatness evaluated as the distance between the fitness of the best and of the worst individual (or -1 in a multiobjective case).
5636 * ``Violated`` (:class:`int`), the number of constraints violated by the current decision vector.
5637 * ``Viol. Norm`` (:class:`float`), the constraints violation norm for the current decision vector.
5638 * ``ideal_point`` (1D numpy array), the ideal point of the current population (cropped to max 5 dimensions only in the screen output)
5639
5640 Examples:
5641 >>> from pygmo import *
5642 >>> algo = algorithm(ihs(20000))
5643 >>> algo.set_verbosity(2000)
5644 >>> prob = problem(hock_schittkowsky_71())
5645 >>> prob.c_tol = [1e-1]*2
5646 >>> pop = population(prob, 20)
5647 >>> pop = algo.evolve(pop) # doctest: +SKIP
5648 Fevals: ppar: bw: dx: df: Violated: Viol. Norm: ideal1:
5649 1 0.350032 0.999425 4.88642 14.0397 0 0 43.2982
5650 2001 0.414032 0.316046 5.56101 25.7009 0 0 33.4251
5651 4001 0.478032 0.0999425 5.036 26.9657 0 0 19.0052
5652 6001 0.542032 0.0316046 3.77292 23.9992 0 0 19.0052
5653 8001 0.606032 0.00999425 3.97937 16.0803 0 0 18.1803
5654 10001 0.670032 0.00316046 1.15023 1.57947 0 0 17.8626
5655 12001 0.734032 0.000999425 0.017882 0.0185438 0 0 17.5894
5656 14001 0.798032 0.000316046 0.00531358 0.0074745 0 0 17.5795
5657 16001 0.862032 9.99425e-05 0.00270865 0.00155563 0 0 17.5766
5658 18001 0.926032 3.16046e-05 0.00186637 0.00167523 0 0 17.5748
5659 >>> uda = algo.extract(ihs)
5660 >>> uda.get_log() # doctest: +SKIP
5661 [(1, 0.35003234534534, 0.9994245193792801, 4.886415773459253, 14.0397487316794, ...
5662
5663 See also the docs of the relevant C++ method :cpp:func:`pagmo::ihs::get_log()`.
5664 )";
5665 }
5666
sga_docstring()5667 std::string sga_docstring()
5668 {
5669 return R"(__init__(gen = 1, cr = .90, eta_c = 1., m = 0.02, param_m = 1., param_s = 2, crossover = "exponential", mutation = "polynomial", selection = "tournament", seed = random)
5670
5671 A Simple Genetic Algorithm
5672
5673 .. versionadded:: 2.2
5674
5675 Approximately during the same decades as Evolutionary Strategies (see :class:`~pygmo.sea`) were studied,
5676 a different group led by John Holland, and later by his student David Goldberg, introduced and
5677 studied an algorithmic framework called "genetic algorithms" that were, essentially, leveraging on
5678 the same idea but introducing also crossover as a genetic operator. This led to a few decades of
5679 confusion and discussions on what was an evolutionary startegy and what a genetic algorithm and on
5680 whether the crossover was a useful operator or mutation only algorithms were to be preferred.
5681
5682 In pygmo we provide a rather classical implementation of a genetic algorithm, letting the user choose between
5683 selected crossover types, selection schemes and mutation types.
5684
5685 The pseudo code of our version is:
5686
5687 .. code-block:: none
5688
5689 > Start from a population (pop) of dimension N
5690 > while i < gen
5691 > > Selection: create a new population (pop2) with N individuals selected from pop (with repetition allowed)
5692 > > Crossover: create a new population (pop3) with N individuals obtained applying crossover to pop2
5693 > > Mutation: create a new population (pop4) with N individuals obtained applying mutation to pop3
5694 > > Evaluate all new chromosomes in pop4
5695 > > Reinsertion: set pop to contain the best N individuals taken from pop and pop4
5696
5697 The various blocks of pygmo genetic algorithm are listed below:
5698
5699 *Selection*: two selection methods are provided: ``tournament`` and ``truncated``. ``Tournament`` selection works by
5700 selecting each offspring as the one having the minimal fitness in a random group of size *param_s*. The ``truncated``
5701 selection, instead, works selecting the best *param_s* chromosomes in the entire population over and over.
5702 We have deliberately not implemented the popular roulette wheel selection as we are of the opinion that such
5703 a system does not generalize much being highly sensitive to the fitness scaling.
5704
5705 *Crossover*: four different crossover schemes are provided:``single``, ``exponential``, ``binomial``, ``sbx``. The
5706 ``single`` point crossover, works selecting a random point in the parent chromosome and, with probability *cr*, inserting the
5707 partner chromosome thereafter. The ``exponential`` crossover is taken from the algorithm differential evolution,
5708 implemented, in pygmo, as :class:`~pygmo.de`. It essentially selects a random point in the parent chromosome and inserts,
5709 in each successive gene, the partner values with probability *cr* up to when it stops. The binomial crossover
5710 inserts each gene from the partner with probability *cr*. The simulated binary crossover (called ``sbx``), is taken
5711 from the NSGA-II algorithm, implemented in pygmo as :class:`~pygmo.nsga2`, and makes use of an additional parameter called
5712 distribution index *eta_c*.
5713
5714 *Mutation*: three different mutations schemes are provided: ``uniform``, ``gaussian`` and ``polynomial``. Uniform mutation
5715 simply randomly samples from the bounds. Gaussian muattion samples around each gene using a normal distribution
5716 with standard deviation proportional to the *param_m* and the bounds width. The last scheme is the ``polynomial``
5717 mutation from Deb.
5718
5719 *Reinsertion*: the only reinsertion strategy provided is what we call pure elitism. After each generation
5720 all parents and children are put in the same pool and only the best are passed to the next generation.
5721
5722 .. note:
5723
5724 This algorithm will work only for box bounded problems.
5725
5726 Args:
5727 gen (:class:`int`): number of generations.
5728 cr (:class:`float`): crossover probability.
5729 eta_c (:class:`float`): distribution index for ``sbx`` crossover. This parameter is inactive if other types of crossover are selected.
5730 m (:class:`float`): mutation probability.
5731 param_m (:class:`float`): distribution index (``polynomial`` mutation), gaussian width (``gaussian`` mutation) or inactive (``uniform`` mutation)
5732 param_s (:class:`float`): the number of best individuals to use in "truncated" selection or the size of the tournament in ``tournament`` selection.
5733 crossover (:class:`str`): the crossover strategy. One of ``exponential``, ``binomial``, ``single`` or ``sbx``
5734 mutation (:class:`str`): the mutation strategy. One of ``gaussian``, ``polynomial`` or ``uniform``.
5735 selection (:class:`str`): the selection strategy. One of ``tournament``, "truncated".
5736 seed (:class:`int`): seed used by the internal random number generator
5737
5738 Raises:
5739 OverflowError: if *gen* or *seed* are negative or greater than an implementation-defined value
5740 ValueError: if *cr* is not in [0,1], if *eta_c* is not in [1,100], if *m* is not in [0,1], input_f *mutation*
5741 is not one of ``gaussian``, ``uniform`` or ``polynomial``, if *selection* not one of "roulette",
5742 "truncated" or *crossover* is not one of ``exponential``, ``binomial``, ``sbx``, ``single``, if *param_m* is
5743 not in [0,1] and *mutation* is not ``polynomial``, if *mutation* is not in [1,100] and *mutation* is ``polynomial``
5744 unspecified: any exception thrown by failures at the intersection between C++ and Python
5745 (e.g., type conversion errors, mismatched function signatures, etc.)
5746
5747 See also the docs of the C++ class :cpp:class:`pagmo::sga`.
5748 )";
5749 }
5750
sga_get_log_docstring()5751 std::string sga_get_log_docstring()
5752 {
5753 return R"(get_log()
5754
5755 Returns a log containing relevant parameters recorded during the last call to ``evolve()`` and printed to screen.
5756 The log frequency depends on the verbosity parameter (by default nothing is logged) which can be set calling
5757 the method :func:`~pygmo.algorithm.set_verbosity()` on an :class:`~pygmo.algorithm` constructed with a
5758 :class:`~pygmo.sga`.
5759 A verbosity larger than 1 will produce a log with one entry each verbosity fitness evaluations.
5760 A verbosity equal to 1 will produce a log with one entry at each improvement of the fitness.
5761
5762 Returns:
5763 :class:`list` of :class:`tuple`: at each logged epoch, the values ``Gen``, ``Fevals``, ``Best``, ``Improvement``
5764
5765 ``Gen`` (:class:`int`), generation.
5766 ``Fevals`` (:class:`int`), number of functions evaluation made.
5767 ``Best`` (:class:`float`), the best fitness function found so far.
5768 ``Improvement`` (:class:`float`), improvement made by the last generation.
5769
5770 Examples:
5771 >>> from pygmo import *
5772 >>> algo = algorithm(sga(gen = 500))
5773 >>> algo.set_verbosity(50)
5774 >>> prob = problem(schwefel(dim = 20))
5775 >>> pop = population(prob, 20)
5776 >>> pop = algo.evolve(pop) # doctest: +SKIP
5777 Gen: Fevals: Best: Improvement: Mutations:
5778 1 1 6363.44 2890.49 2
5779 1001 1001 1039.92 -562.407 3
5780 2001 2001 358.966 -632.6 2
5781 3001 3001 106.08 -995.927 3
5782 4001 4001 83.391 -266.8 1
5783 5001 5001 62.4994 -1018.38 3
5784 6001 6001 39.2851 -732.695 2
5785 7001 7001 37.2185 -518.847 1
5786 8001 8001 20.9452 -450.75 1
5787 9001 9001 17.9193 -270.679 1
5788 >>> uda = algo.extract(sea)
5789 >>> uda.get_log() # doctest: +SKIP
5790 [(1, 1, 6363.442036625835, 2890.4854414320716, 2), (1001, 1001, ...
5791
5792 See also the docs of the relevant C++ method :cpp:func:`pagmo::sga::get_log()`.
5793 )";
5794 }
5795
ipopt_docstring()5796 std::string ipopt_docstring()
5797 {
5798 return R"(__init__()
5799
5800 Ipopt.
5801
5802 .. versionadded:: 2.2
5803
5804 This class is a user-defined algorithm (UDA) that wraps the Ipopt (Interior Point OPTimizer) solver,
5805 a software package for large-scale nonlinear optimization. Ipopt is a powerful solver that
5806 is able to handle robustly and efficiently constrained nonlinear opimization problems at high dimensionalities.
5807
5808 Ipopt supports only single-objective minimisation, and it requires the availability of the gradient in the
5809 optimisation problem. If possible, for best results the Hessians should be provided as well (but Ipopt
5810 can estimate numerically the Hessians if needed).
5811
5812 In order to support pygmo's population-based optimisation model, the ``evolve()`` method will select
5813 a single individual from the input :class:`~pygmo.population` to be optimised.
5814 If the optimisation produces a better individual (as established by :func:`~pygmo.compare_fc()`),
5815 the optimised individual will be inserted back into the population. The selection and replacement strategies
5816 can be configured via the :attr:`~pygmo.ipopt.selection` and :attr:`~pygmo.ipopt.replacement` attributes.
5817
5818 Ipopt supports a large amount of options for the configuration of the optimisation run. The options
5819 are divided into three categories:
5820
5821 * *string* options (i.e., the type of the option is :class:`str`),
5822 * *integer* options (i.e., the type of the option is :class:`int`),
5823 * *numeric* options (i.e., the type of the option is :class:`float`).
5824
5825 The full list of options is available on the `Ipopt website <https://coin-or.github.io/Ipopt/OPTIONS.html>`__.
5826 :class:`pygmo.ipopt` allows to configure any Ipopt option via methods such as :func:`~pygmo.ipopt.set_string_options()`,
5827 :func:`~pygmo.ipopt.set_string_option()`, :func:`~pygmo.ipopt.set_integer_options()`, etc., which need to be used before
5828 invoking the ``evolve()`` method.
5829
5830 If the user does not set any option, :class:`pygmo.ipopt` use Ipopt's default values for the options (see the
5831 `documentation <https://coin-or.github.io/Ipopt/OPTIONS.html>`__), with the following
5832 modifications:
5833
5834 * if the ``"print_level"`` integer option is **not** set by the user, it will be set to 0 by :class:`pygmo.ipopt` (this will
5835 suppress most screen output produced by the solver - note that we support an alternative form of logging via
5836 the :func:`pygmo.algorithm.set_verbosity()` machinery);
5837 * if the ``"hessian_approximation"`` string option is **not** set by the user and the optimisation problem does
5838 **not** provide the Hessians, then the option will be set to ``"limited-memory"`` by :class:`pygmo.ipopt`. This makes it
5839 possible to optimise problems without Hessians out-of-the-box (i.e., Ipopt will approximate numerically the
5840 Hessians for you);
5841 * if the ``"constr_viol_tol"`` numeric option is **not** set by the user and the optimisation problem is constrained,
5842 then :class:`pygmo.ipopt` will compute the minimum value ``min_tol`` in the vector returned by :attr:`pygmo.problem.c_tol`
5843 for the optimisation problem at hand. If ``min_tol`` is nonzero, then the ``"constr_viol_tol"`` Ipopt option will
5844 be set to ``min_tol``, otherwise the default Ipopt value (1E-4) will be used for the option. This ensures that,
5845 if the constraint tolerance is not explicitly set by the user, a solution deemed feasible by Ipopt is also
5846 deemed feasible by pygmo (but the opposite is not necessarily true).
5847
5848 .. note::
5849
5850 This user-defined algorithm is available only if pygmo was compiled with the ``PAGMO_WITH_IPOPT`` option
5851 enabled (see the :ref:`installation instructions <install>`).
5852
5853 .. note::
5854
5855 Ipopt is not thread-safe, and thus it cannot be used in a :class:`pygmo.thread_island`.
5856
5857 .. seealso::
5858
5859 https://projects.coin-or.org/Ipopt.
5860
5861 See also the docs of the C++ class :cpp:class:`pagmo::ipopt`.
5862
5863 Examples:
5864 >>> from pygmo import *
5865 >>> ip = ipopt()
5866 >>> ip.set_numeric_option("tol",1E-9) # Change the relative convergence tolerance
5867 >>> ip.get_numeric_options() # doctest: +SKIP
5868 {'tol': 1e-09}
5869 >>> algo = algorithm(ip)
5870 >>> algo.set_verbosity(1)
5871 >>> prob = problem(luksan_vlcek1(20))
5872 >>> prob.c_tol = [1E-6] * 18 # Set constraints tolerance to 1E-6
5873 >>> pop = population(prob, 20)
5874 >>> pop = algo.evolve(pop) # doctest: +SKIP
5875 <BLANKLINE>
5876 ******************************************************************************
5877 This program contains Ipopt, a library for large-scale nonlinear optimization.
5878 Ipopt is released as open source code under the Eclipse Public License (EPL).
5879 For more information visit http://projects.coin-or.org/Ipopt
5880 ******************************************************************************
5881 <BLANKLINE>
5882 <BLANKLINE>
5883 objevals: objval: violated: viol. norm:
5884 1 201174 18 1075.3 i
5885 2 209320 18 691.814 i
5886 3 36222.3 18 341.639 i
5887 4 11158.1 18 121.097 i
5888 5 4270.38 18 46.4742 i
5889 6 2054.03 18 20.7306 i
5890 7 705.959 18 5.43118 i
5891 8 37.8304 18 1.52099 i
5892 9 2.89066 12 0.128862 i
5893 10 0.300807 3 0.0165902 i
5894 11 0.00430279 3 0.000496496 i
5895 12 7.54121e-06 2 9.70735e-06 i
5896 13 4.34249e-08 0 0
5897 14 3.71925e-10 0 0
5898 15 3.54406e-13 0 0
5899 16 2.37071e-18 0 0
5900 <BLANKLINE>
5901 Optimisation return status: Solve_Succeeded (value = 0)
5902 <BLANKLINE>
5903 )";
5904 }
5905
ipopt_get_log_docstring()5906 std::string ipopt_get_log_docstring()
5907 {
5908 return R"(get_log()
5909
5910 Optimisation log.
5911
5912 The optimisation log is a collection of log data lines. A log data line is a tuple consisting of:
5913
5914 * the number of objective function evaluations made so far,
5915 * the objective function value for the current decision vector,
5916 * the number of constraints violated by the current decision vector,
5917 * the constraints violation norm for the current decision vector,
5918 * a boolean flag signalling the feasibility of the current decision vector.
5919
5920 Returns:
5921 :class:`list`: the optimisation log
5922
5923 Raises:
5924 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5925 type conversion errors, mismatched function signatures, etc.)
5926
5927 .. warning::
5928
5929 The number of constraints violated, the constraints violation norm and the feasibility flag stored in the log
5930 are all determined via the facilities and the tolerances specified within :class:`pygmo.problem`. That
5931 is, they might not necessarily be consistent with Ipopt's notion of feasibility. See the explanation
5932 of how the ``"constr_viol_tol"`` numeric option is handled in :class:`pygmo.ipopt`.
5933
5934 .. note::
5935
5936 Ipopt supports its own logging format and protocol, including the ability to print to screen and write to file.
5937 Ipopt's screen logging is disabled by default (i.e., the Ipopt verbosity setting is set to 0 - see
5938 :class:`pygmo.ipopt`). On-screen logging can be enabled via the ``"print_level"`` string option.
5939
5940 )";
5941 }
5942
ipopt_get_last_opt_result_docstring()5943 std::string ipopt_get_last_opt_result_docstring()
5944 {
5945 return R"(get_last_opt_result()
5946
5947 Get the result of the last optimisation.
5948
5949 Returns:
5950 :class:`int`: the Ipopt return code for the last optimisation run, or ``Ipopt::Solve_Succeeded`` if no optimisations have been run yet
5951
5952 Raises:
5953 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5954 type conversion errors, mismatched function signatures, etc.)
5955
5956 Examples:
5957 >>> from pygmo import *
5958 >>> ip = ipopt()
5959 >>> ip.get_last_opt_result()
5960 0
5961
5962 )";
5963 }
5964
ipopt_set_string_option_docstring()5965 std::string ipopt_set_string_option_docstring()
5966 {
5967 return R"(set_string_option(name, value)
5968
5969 Set string option.
5970
5971 This method will set the optimisation string option *name* to *value*.
5972 The optimisation options are passed to the Ipopt API when calling the ``evolve()`` method.
5973
5974 Args:
5975 name (:class:`str`): the name of the option
5976 value (:class:`str`): the value of the option
5977
5978 Raises:
5979 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
5980 type conversion errors, mismatched function signatures, etc.)
5981
5982 Examples:
5983 >>> from pygmo import *
5984 >>> ip = ipopt()
5985 >>> ip.set_string_option("hessian_approximation","limited-memory")
5986 >>> algorithm(ip) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
5987 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
5988 C++ class name: ...
5989 <BLANKLINE>
5990 Thread safety: none
5991 <BLANKLINE>
5992 Extra info:
5993 Last optimisation return code: Solve_Succeeded (value = 0)
5994 Verbosity: 0
5995 Individual selection policy: best
5996 Individual replacement policy: best
5997 String options: {hessian_approximation : limited-memory}
5998 <BLANKLINE>
5999 )";
6000 }
6001
ipopt_set_string_options_docstring()6002 std::string ipopt_set_string_options_docstring()
6003 {
6004 return R"(set_string_options(opts)
6005
6006 Set string options.
6007
6008 This method will set the optimisation string options contained in *opts*.
6009 It is equivalent to calling :func:`~pygmo.ipopt.set_string_option()` passing all the name-value pairs in *opts*
6010 as arguments.
6011
6012 Args:
6013 opts (:class:`dict` of :class:`str`-:class:`str` pairs): the name-value map that will be used to set the options
6014
6015 Raises:
6016 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
6017 type conversion errors, mismatched function signatures, etc.)
6018
6019 Examples:
6020 >>> from pygmo import *
6021 >>> ip = ipopt()
6022 >>> ip.set_string_options({"hessian_approximation":"limited-memory", "limited_memory_initialization":"scalar1"})
6023 >>> algorithm(ip) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
6024 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
6025 C++ class name: ...
6026 <BLANKLINE>
6027 Thread safety: none
6028 <BLANKLINE>
6029 Extra info:
6030 Last optimisation return code: Solve_Succeeded (value = 0)
6031 Verbosity: 0
6032 Individual selection policy: best
6033 Individual replacement policy: best
6034 String options: {hessian_approximation : limited-memory, limited_memory_initialization : scalar1}
6035
6036 )";
6037 }
6038
ipopt_get_string_options_docstring()6039 std::string ipopt_get_string_options_docstring()
6040 {
6041 return R"(get_string_options()
6042
6043 Get string options.
6044
6045 Returns:
6046 :class:`dict` of :class:`str`-:class:`str` pairs: a name-value dictionary of optimisation string options
6047
6048 Examples:
6049 >>> from pygmo import *
6050 >>> ip = ipopt()
6051 >>> ip.set_string_option("hessian_approximation","limited-memory")
6052 >>> ip.get_string_options()
6053 {'hessian_approximation': 'limited-memory'}
6054
6055 )";
6056 }
6057
ipopt_reset_string_options_docstring()6058 std::string ipopt_reset_string_options_docstring()
6059 {
6060 return R"(reset_string_options()
6061
6062 Clear all string options.
6063
6064 Examples:
6065 >>> from pygmo import *
6066 >>> ip = ipopt()
6067 >>> ip.set_string_option("hessian_approximation","limited-memory")
6068 >>> ip.get_string_options()
6069 {'hessian_approximation': 'limited-memory'}
6070 >>> ip.reset_string_options()
6071 >>> ip.get_string_options()
6072 {}
6073
6074 )";
6075 }
6076
ipopt_set_integer_option_docstring()6077 std::string ipopt_set_integer_option_docstring()
6078 {
6079 return R"(set_integer_option(name, value)
6080
6081 Set integer option.
6082
6083 This method will set the optimisation integer option *name* to *value*.
6084 The optimisation options are passed to the Ipopt API when calling the ``evolve()`` method.
6085
6086 Args:
6087 name (:class:`str`): the name of the option
6088 value (:class:`int`): the value of the option
6089
6090 Raises:
6091 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
6092 type conversion errors, mismatched function signatures, etc.)
6093
6094 Examples:
6095 >>> from pygmo import *
6096 >>> ip = ipopt()
6097 >>> ip.set_integer_option("print_level",3)
6098 >>> algorithm(ip) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
6099 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
6100 C++ class name: ...
6101 <BLANKLINE>
6102 Thread safety: none
6103 <BLANKLINE>
6104 Extra info:
6105 Last optimisation return code: Solve_Succeeded (value = 0)
6106 Verbosity: 0
6107 Individual selection policy: best
6108 Individual replacement policy: best
6109 Integer options: {print_level : 3}
6110
6111 )";
6112 }
6113
ipopt_set_integer_options_docstring()6114 std::string ipopt_set_integer_options_docstring()
6115 {
6116 return R"(set_integer_options(opts)
6117
6118 Set integer options.
6119
6120 This method will set the optimisation integer options contained in *opts*.
6121 It is equivalent to calling :func:`~pygmo.ipopt.set_integer_option()` passing all the name-value pairs in *opts*
6122 as arguments.
6123
6124 Args:
6125 opts (:class:`dict` of :class:`str`-:class:`int` pairs): the name-value map that will be used to set the options
6126
6127 Raises:
6128 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
6129 type conversion errors, mismatched function signatures, etc.)
6130
6131 Examples:
6132 >>> from pygmo import *
6133 >>> ip = ipopt()
6134 >>> ip.set_integer_options({"filter_reset_trigger":4, "print_level":3})
6135 >>> algorithm(ip) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
6136 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
6137 C++ class name: ...
6138 <BLANKLINE>
6139 Thread safety: none
6140 <BLANKLINE>
6141 Extra info:
6142 Last optimisation return code: Solve_Succeeded (value = 0)
6143 Verbosity: 0
6144 Individual selection policy: best
6145 Individual replacement policy: best
6146 Integer options: {filter_reset_trigger : 4, print_level : 3}
6147
6148 )";
6149 }
6150
ipopt_get_integer_options_docstring()6151 std::string ipopt_get_integer_options_docstring()
6152 {
6153 return R"(get_integer_options()
6154
6155 Get integer options.
6156
6157 Returns:
6158 :class:`dict` of :class:`str`-:class:`int` pairs: a name-value dictionary of optimisation integer options
6159
6160 Examples:
6161 >>> from pygmo import *
6162 >>> ip = ipopt()
6163 >>> ip.set_integer_option("print_level",3)
6164 >>> ip.get_integer_options()
6165 {'print_level': 3}
6166
6167 )";
6168 }
6169
ipopt_reset_integer_options_docstring()6170 std::string ipopt_reset_integer_options_docstring()
6171 {
6172 return R"(reset_integer_options()
6173
6174 Clear all integer options.
6175
6176 Examples:
6177 >>> from pygmo import *
6178 >>> ip = ipopt()
6179 >>> ip.set_integer_option("print_level",3)
6180 >>> ip.get_integer_options()
6181 {'print_level': 3}
6182 >>> ip.reset_integer_options()
6183 >>> ip.get_integer_options()
6184 {}
6185
6186 )";
6187 }
6188
ipopt_set_numeric_option_docstring()6189 std::string ipopt_set_numeric_option_docstring()
6190 {
6191 return R"(set_numeric_option(name, value)
6192
6193 Set numeric option.
6194
6195 This method will set the optimisation numeric option *name* to *value*.
6196 The optimisation options are passed to the Ipopt API when calling the ``evolve()`` method.
6197
6198 Args:
6199 name (:class:`str`): the name of the option
6200 value (:class:`float`): the value of the option
6201
6202 Raises:
6203 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
6204 type conversion errors, mismatched function signatures, etc.)
6205
6206 Examples:
6207 >>> from pygmo import *
6208 >>> ip = ipopt()
6209 >>> ip.set_numeric_option("tol",1E-6)
6210 >>> algorithm(ip) # doctest: +SKIP +ELLIPSIS
6211 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
6212 C++ class name: ...
6213 <BLANKLINE>
6214 Thread safety: none
6215 <BLANKLINE>
6216 Extra info:
6217 Last optimisation return code: Solve_Succeeded (value = 0)
6218 Verbosity: 0
6219 Individual selection policy: best
6220 Individual replacement policy: best
6221 Numeric options: {tol : 1E-6}
6222
6223 )";
6224 }
6225
ipopt_set_numeric_options_docstring()6226 std::string ipopt_set_numeric_options_docstring()
6227 {
6228 return R"(set_numeric_options(opts)
6229
6230 Set numeric options.
6231
6232 This method will set the optimisation numeric options contained in *opts*.
6233 It is equivalent to calling :func:`~pygmo.ipopt.set_numeric_option()` passing all the name-value pairs in *opts*
6234 as arguments.
6235
6236 Args:
6237 opts (:class:`dict` of :class:`str`-:class:`float` pairs): the name-value map that will be used to set the options
6238
6239 Raises:
6240 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
6241 type conversion errors, mismatched function signatures, etc.)
6242
6243 Examples:
6244 >>> from pygmo import *
6245 >>> ip = ipopt()
6246 >>> ip.set_numeric_options({"tol":1E-4, "constr_viol_tol":1E-3})
6247 >>> algorithm(ip) # doctest: +SKIP +ELLIPSIS
6248 Algorithm name: Ipopt: Interior Point Optimization [deterministic]
6249 C++ class name: ...
6250 <BLANKLINE>
6251 Thread safety: none
6252 <BLANKLINE>
6253 Extra info:
6254 Last optimisation return code: Solve_Succeeded (value = 0)
6255 Verbosity: 0
6256 Individual selection policy: best
6257 Individual replacement policy: best
6258 Numeric options: {constr_viol_tol : 1E-3, tol : 1E-4}
6259
6260 )";
6261 }
6262
ipopt_get_numeric_options_docstring()6263 std::string ipopt_get_numeric_options_docstring()
6264 {
6265 return R"(get_numeric_options()
6266
6267 Get numeric options.
6268
6269 Returns:
6270 :class:`dict` of :class:`str`-:class:`float` pairs: a name-value dictionary of optimisation numeric options
6271
6272 Examples:
6273 >>> from pygmo import *
6274 >>> ip = ipopt()
6275 >>> ip.set_numeric_option("tol",1E-4)
6276 >>> ip.get_numeric_options() # doctest: +SKIP
6277 {'tol': 1E-4}
6278
6279 )";
6280 }
6281
ipopt_reset_numeric_options_docstring()6282 std::string ipopt_reset_numeric_options_docstring()
6283 {
6284 return R"(reset_numeric_options()
6285
6286 Clear all numeric options.
6287
6288 Examples:
6289 >>> from pygmo import *
6290 >>> ip = ipopt()
6291 >>> ip.set_numeric_option("tol",1E-4)
6292 >>> ip.get_numeric_options() # doctest: +SKIP
6293 {'tol': 1E-4}
6294 >>> ip.reset_numeric_options()
6295 >>> ip.get_numeric_options()
6296 {}
6297 )";
6298 }
6299
bfe_docstring()6300 std::string bfe_docstring()
6301 {
6302 return R"(__init__(udbfe = default_bfe())
6303
6304 Batch fitness evaluator.
6305
6306 This class implements the evaluation of decision vectors in batch mode. That is,
6307 whereas a :class:`pygmo.problem` provides the means to evaluate a single decision
6308 vector via the :func:`pygmo.problem.fitness()` method, a
6309 :class:`~pygmo.bfe` (short for *batch fitness evaluator*) enables a :class:`~pygmo.problem`
6310 to evaluate the fitnesses of a group (or a *batch*) of decision vectors, possibly
6311 in a parallel/vectorised fashion.
6312
6313 Together with the :func:`pygmo.problem.batch_fitness()` method,
6314 :class:`~pygmo.bfe` is one of the mechanisms provided
6315 by pagmo to enable a form of parallelism on a finer level than the
6316 :class:`~pygmo.archipelago` and :class:`~pygmo.island` classes.
6317 However, while the :func:`pygmo.problem.batch_fitness()` method must be
6318 implemented on a UDP-by-UDP basis, a :class:`~pygmo.bfe`
6319 provides generic batch fitness evaluation capabilities for any :class:`~pygmo.problem`,
6320 and it can thus be used also with UDPs which do not implement the
6321 :func:`pygmo.problem.batch_fitness()` method.
6322
6323 Like :class:`~pygmo.problem`, :class:`~pygmo.algorithm`, and many other
6324 pagmo classes, :class:`~pygmo.bfe` is a generic container
6325 which stores internally
6326 a user-defined batch fitness evaluator (UDBFE for short) which actually
6327 implements the fitness evaluation in batch mode. Users are free to either
6328 use one of the evaluators provided with pagmo, or to write their own UDBFE.
6329
6330 Every UDBFE must be a callable (i.e., a function or a class with a call
6331 operator) with a signature equivalent to
6332
6333 .. code-block::
6334
6335 def __call__(self, prob, dvs):
6336 ...
6337
6338 UDBFEs receive in input a :class:`~pygmo.problem` and a batch of decision vectors
6339 stored contiguously in an array-like object, and they return
6340 a NumPy array containing the fitness vectors
6341 corresponding to the input batch of decision vectors (as evaluated by the input problem and
6342 stored contiguously).
6343
6344 UDBFEs can also implement the following (optional) methods:
6345
6346 .. code-block::
6347
6348 def get_name(self):
6349 ...
6350 def get_extra_info(self):
6351 ...
6352
6353 See the documentation of the corresponding methods in this class for details on how the optional
6354 methods in the UDBFE are used by :class:`~pygmo.bfe`.
6355
6356 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::bfe`.
6357
6358 Args:
6359 udbfe: a user-defined batch fitness evaluator, either C++ or Python
6360
6361 Raises:
6362 NotImplementedError: if *udbfe* does not implement the mandatory methods detailed above
6363 unspecified: any exception thrown by methods of the UDBFE invoked during construction,
6364 the deep copy of the UDBFE, the constructor of the underlying C++ class, or
6365 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
6366 signatures, etc.)
6367
6368 )";
6369 }
6370
bfe_get_name_docstring()6371 std::string bfe_get_name_docstring()
6372 {
6373 return R"(get_name()
6374
6375 Bfe's name.
6376
6377 If the UDBFE provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
6378 Otherwise, an implementation-defined name based on the type of the UDBFE will be returned.
6379
6380 Returns:
6381 str: the bfe's name
6382
6383 )";
6384 }
6385
bfe_get_extra_info_docstring()6386 std::string bfe_get_extra_info_docstring()
6387 {
6388 return R"(get_extra_info()
6389
6390 Bfe's extra info.
6391
6392 If the UDBFE provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
6393 method. Otherwise, an empty string will be returned.
6394
6395 Returns:
6396 str: extra info about the UDBFE
6397
6398 Raises:
6399 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDBFE
6400
6401 )";
6402 }
6403
bfe_get_thread_safety_docstring()6404 std::string bfe_get_thread_safety_docstring()
6405 {
6406 return R"(get_thread_safety()
6407
6408 Bfe's thread safety level.
6409
6410 This method will return a value of the enum :class:`pygmo.thread_safety` which indicates the thread safety level
6411 of the UDBFE. Unlike in C++, in Python it is not possible to re-implement this method in the UDBFE. That is, for C++
6412 UDBFEs, the returned value will be the value returned by the ``get_thread_safety()`` method of the UDBFE. For Python
6413 UDBFEs, the returned value will be unconditionally ``none``.
6414
6415 Returns:
6416 a value of :class:`pygmo.thread_safety`: the thread safety level of the UDBFE
6417
6418 )";
6419 }
6420
default_bfe_docstring()6421 std::string default_bfe_docstring()
6422 {
6423 return R"(__init__()
6424
6425 Default UDBFE.
6426
6427 This class is a user-defined batch fitness evaluator (UDBFE) that can be used to
6428 construct a :class:`~pygmo.bfe`.
6429
6430 :class:`~pygmo.default_bfe` is the default UDBFE used by :class:`~pygmo.bfe`, and,
6431 depending on the properties of the input :class:`~pygmo.problem`, it will delegate the implementation
6432 of its call operator to another UDBFE. Specifically:
6433
6434 * if the input problem provides a batch fitness member function (as established by
6435 :func:`pygmo.problem.has_batch_fitness()`), then a :class:`~pygmo.member_bfe` will
6436 be constructed and invoked to produce the return value; otherwise,
6437 * if the input problem provides at least the ``basic`` :class:`~pygmo.thread_safety`
6438 guarantee (as established by :func:`pygmo.problem.get_thread_safety()`), then a
6439 :class:`pygmo.thread_bfe` will be constructed and invoked to produce the return value;
6440 otherwise,
6441 * a :class:`pygmo.mp_bfe` will be constructed and invoked to produce the return value.
6442
6443 See also the docs of the C++ class :cpp:class:`pagmo::default_bfe`.
6444
6445 )";
6446 }
6447
thread_bfe_docstring()6448 std::string thread_bfe_docstring()
6449 {
6450 return R"(__init__()
6451
6452 Threaded UDBFE.
6453
6454 This class is a user-defined batch fitness evaluator (UDBFE) that can be used to
6455 construct a :class:`~pygmo.bfe`.
6456
6457 :class:`~pygmo.thread_bfe` will use multiple threads of execution to parallelise
6458 the evaluation of the fitnesses of a batch of input decision vectors.
6459
6460 See also the docs of the C++ class :cpp:class:`pagmo::thread_bfe`.
6461
6462 )";
6463 }
6464
member_bfe_docstring()6465 std::string member_bfe_docstring()
6466 {
6467 return R"(__init__()
6468
6469 Member UDBFE.
6470
6471 This class is a user-defined batch fitness evaluator (UDBFE) that can be used to
6472 construct a :class:`~pygmo.bfe`.
6473
6474 :class:`~pygmo.member_bfe` is a simple wrapper which delegates batch fitness evaluations
6475 to the input problem's :func:`pygmo.problem.batch_fitness()` method.
6476
6477 See also the docs of the C++ class :cpp:class:`pagmo::member_bfe`.
6478
6479 )";
6480 }
6481
topology_docstring()6482 std::string topology_docstring()
6483 {
6484 return R"(__init__(udt = unconnected())
6485
6486 Topology.
6487
6488 In the jargon of pagmo, a topology is an object that represents connections among
6489 :class:`islands <pygmo.island>` in an :class:`~pygmo.archipelago`.
6490 In essence, a topology is a *weighted directed graph* in which
6491
6492 * the *vertices* (or *nodes*) are islands,
6493 * the *edges* (or *arcs*) are directed connections between islands across which information flows during the
6494 optimisation process (via the migration of individuals),
6495 * the *weights* of the edges (whose numerical values are the :math:`[0.,1.]` range) represent the migration
6496 probability.
6497
6498 Following the same schema adopted for :class:`~pygmo.problem`, :class:`~pygmo.algorithm`, etc.,
6499 :class:`~pygmo.topology` exposes a generic interface to *user-defined topologies* (or UDT for short).
6500 UDTs are classes providing a certain set
6501 of methods that describe the properties of (and allow to interact with) a topology. Once
6502 defined and instantiated, a UDT can then be used to construct an instance of this class,
6503 :class:`~pygmo.topology`, which provides a generic interface to topologies for use by
6504 :class:`~pygmo.archipelago`.
6505
6506 In a :class:`~pygmo.topology`, vertices in the graph are identified by a zero-based unique
6507 integral index. This integral index corresponds to the index of an
6508 :class:`~pygmo.island` in an :class:`~pygmo.archipelago`.
6509
6510 Every UDT must implement at least the following methods:
6511
6512 .. code-block::
6513
6514 def get_connections(self, n):
6515 ...
6516 def push_back(self):
6517 ...
6518
6519 The ``get_connections()`` method takes as input a vertex index ``n``, and it is expected to return
6520 a pair of array-like values containing respectively:
6521
6522 * the indices of the vertices which are connecting to ``n`` (that is, the list of vertices for which a directed edge
6523 towards ``n`` exists),
6524 * the weights (i.e., the migration probabilities) of the edges linking the connecting vertices to ``n``.
6525
6526 The ``push_back()`` method is expected to add a new vertex to the topology, assigning it the next
6527 available index and establishing connections to other vertices. The ``push_back()`` method is invoked
6528 by :func:`pygmo.archipelago.push_back()` upon the insertion of a new island into an archipelago,
6529 and it is meant to allow the incremental construction of a topology. That is, after ``N`` calls to ``push_back()``
6530 on an initially-empty topology, the topology should contain ``N`` vertices and any number of edges (depending
6531 on the specifics of the topology).
6532
6533 Additional optional methods can be implemented in a UDT:
6534
6535 .. code-block::
6536
6537 def get_name(self):
6538 ...
6539 def get_extra_info(self):
6540 ...
6541 def to_networkx(self):
6542 ...
6543
6544 See the documentation of the corresponding methods in this class for details on how the optional
6545 methods in the UDT are used by :class:`~pygmo.topology`.
6546
6547 Topologies are used in asynchronous operations involving migration in archipelagos,
6548 and thus they need to provide a certain degree of thread safety. Specifically, the
6549 ``get_connections()`` method of the UDT might be invoked concurrently with
6550 any other method of the UDT interface. It is up to the
6551 authors of user-defined topologies to ensure that this safety requirement is satisfied.
6552
6553 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::topology`.
6554
6555 Args:
6556 udt: a user-defined topology, either C++ or Python
6557
6558 Raises:
6559 NotImplementedError: if *udt* does not implement the mandatory methods detailed above
6560 unspecified: any exception thrown by methods of the UDT invoked during construction,
6561 the deep copy of the UDT, the constructor of the underlying C++ class, or
6562 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
6563 signatures, etc.)
6564
6565 )";
6566 }
6567
topology_get_connections_docstring()6568 std::string topology_get_connections_docstring()
6569 {
6570 return R"(get_connections(n)
6571
6572 Get the connections to a vertex.
6573
6574 This method will invoke the ``get_connections()`` method of the UDT, which is expected to return
6575 a pair of array-like objects containing respectively:
6576
6577 * the indices of the vertices which are connecting to *n* (that is, the list of vertices for which a directed
6578 edge towards *n* exists),
6579 * the weights (i.e., the migration probabilities) of the edges linking the connecting vertices to *n*.
6580
6581 This method will also run sanity checks on the output of the ``get_connections()`` method of the UDT.
6582
6583 Args:
6584 n (:class:`int`): the index of the vertex whose incoming connections' details will be returned
6585
6586 Returns:
6587 Pair of 1D NumPy arrays: a pair of arrays describing *n*'s incoming connections
6588
6589 Raises:
6590 RuntimeError: if the object returned by a pythonic UDT is not iterable, or it is an iterable
6591 whose number of elements is not exactly 2, or if the invocation of the ``get_connections()``
6592 method of the UDT raises an exception
6593 ValueError: if the sizes of the returned arrays differ, or if any element of the second
6594 array is not in the :math:`[0.,1.]` range
6595 unspecified: any exception raised by failures at the intersection
6596 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
6597
6598 )";
6599 }
6600
topology_push_back_docstring()6601 std::string topology_push_back_docstring()
6602 {
6603 return R"(push_back(n=1)
6604
6605 Add vertices.
6606
6607 This method will invoke the ``push_back()`` method of the UDT *n* times. The ``push_back()`` method
6608 of the UDT is expected to add a new vertex to the
6609 topology, assigning it the next available index and establishing connections to other vertices.
6610
6611 Args:
6612 n (:class:`int`): the number of times the ``push_back()`` method of the UDT will be invoked
6613
6614 Raises:
6615 OverflowError: if *n* is negative or too large
6616 unspecified: any exception thrown by the ``push_back()`` method of the UDT
6617
6618 )";
6619 }
6620
topology_get_name_docstring()6621 std::string topology_get_name_docstring()
6622 {
6623 return R"(get_name()
6624
6625 Topology's name.
6626
6627 If the UDT provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
6628 Otherwise, an implementation-defined name based on the type of the UDT will be returned.
6629
6630 Returns:
6631 str: the topology's name
6632
6633 )";
6634 }
6635
topology_get_extra_info_docstring()6636 std::string topology_get_extra_info_docstring()
6637 {
6638 return R"(get_extra_info()
6639
6640 Topology's extra info.
6641
6642 If the UDT provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
6643 method. Otherwise, an empty string will be returned.
6644
6645 Returns:
6646 str: extra info about the UDT
6647
6648 Raises:
6649 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDT
6650
6651 )";
6652 }
6653
topology_to_networkx_docstring()6654 std::string topology_to_networkx_docstring()
6655 {
6656 return R"(to_networkx()
6657
6658 .. versionadded:: 2.15
6659
6660 Conversion to NetworkX.
6661
6662 If the UDT provides a ``to_networkx()`` method, then this method will invoke it and return
6663 its output. Otherwise, an error will be raised.
6664
6665 This method is meant to export a representation of the current state of the topology
6666 as a NetworkX graph object. The returned object must be a :class:`networkx.DiGraph`
6667 in which the edges have a ``weight`` attribute represented as a floating-point value.
6668
6669 Note that this method will strip away all node attributes and edge attributes other
6670 than ``weight`` from the graph returned by the UDT. It will also redefine the nodes
6671 to be numbered sequentially (that is, if the NetworkX graph returned by the UDT
6672 has three nodes numbered 0, 1 and 5, the graph returned by this method will have
6673 nodes numbered 0, 1 and 2).
6674
6675 Returns:
6676 networkx.DiGraph: a graph representation of the UDT
6677
6678 Raises:
6679 NotImplementedError: if the UDT does not provide a ``to_networkx()`` method
6680 TypeError: if the object returned by the UDT is not a :class:`networkx.DiGraph`
6681 ValueError: if the edges of the returned graph do not all have a ``weight`` attribute
6682 unspecified: any exception thrown by the ``to_networkx()`` method of the UDT
6683
6684 )";
6685 }
6686
unconnected_docstring()6687 std::string unconnected_docstring()
6688 {
6689 return R"(__init__()
6690
6691 Unconnected topology.
6692
6693 This user-defined topology (UDT) represents an unconnected graph. This is the default
6694 UDT used by :class:`pygmo.topology`.
6695
6696 See also the docs of the C++ class :cpp:class:`pagmo::unconnected`.
6697
6698 )";
6699 }
6700
ring_docstring()6701 std::string ring_docstring()
6702 {
6703 return R"(__init__(n=0, w=1.)
6704
6705 Ring topology.
6706
6707 This user-defined topology (UDT) represents a bidirectional ring (that is, a ring in
6708 which each node connects to both the previous and the following nodes).
6709
6710 See also the docs of the C++ class :cpp:class:`pagmo::ring`.
6711
6712 Args:
6713 n (:class:`int`): the desired number of vertices
6714 w (:class:`float`): the weight of the edges
6715
6716 Raises:
6717 TypeError: if *n* is negative or too large
6718 ValueError: if *w* is not in the :math:`\left[0, 1\right]` range
6719
6720 )";
6721 }
6722
ring_get_weight_docstring()6723 std::string ring_get_weight_docstring()
6724 {
6725 return R"(get_weight()
6726
6727 Returns:
6728 float: the weight *w* used in the construction of this topology
6729
6730 )";
6731 }
6732
base_bgl_num_vertices_docstring()6733 std::string base_bgl_num_vertices_docstring()
6734 {
6735 return R"(num_vertices()
6736
6737 Returns:
6738 int: the number of vertices in the topology
6739
6740 )";
6741 }
6742
base_bgl_are_adjacent_docstring()6743 std::string base_bgl_are_adjacent_docstring()
6744 {
6745 return R"(are_adjacent(i, j)
6746
6747 Check if two vertices are adjacent.
6748
6749 Two vertices *i* and *j* are adjacent if there is a directed edge connecting *i* to *j*.
6750
6751 Args:
6752 i (:class:`int`): the first vertex index
6753 j (:class:`int`): the second vertex index
6754
6755 Returns:
6756 bool: :data:`True` if *i* and *j* are adjacent, :data:`False` otherwise
6757
6758 Raises:
6759 TypeError: if *i* or *j* are negative or too large
6760 ValueError: if *i* or *j* are not smaller than the number of vertices
6761
6762 )";
6763 }
6764
base_bgl_add_vertex_docstring()6765 std::string base_bgl_add_vertex_docstring()
6766 {
6767 return R"(add_vertex()
6768
6769 Add a vertex.
6770
6771 This method will add a new vertex to the topology.
6772
6773 The newly-added vertex will be disjoint from any other vertex in the topology (i.e., there are no connections to/from the new vertex).
6774
6775 )";
6776 }
6777
base_bgl_add_edge_docstring()6778 std::string base_bgl_add_edge_docstring()
6779 {
6780 return R"(add_edge(i, j, w=1.)
6781
6782 Add a new edge.
6783
6784 This method will add a new edge of weight *w* connecting *i* to *j*.
6785
6786 Args:
6787 i (:class:`int`): the first vertex index
6788 j (:class:`int`): the second vertex index
6789 w (:class:`float`): the edge's weight
6790
6791 Raises:
6792 TypeError: if *i* or *j* are negative or too large
6793 ValueError: if *i* or *j* are not smaller than the number of vertices, *i* and *j* are already adjacent, or
6794 if *w* is not in the :math:`\left[0, 1\right]` range
6795
6796 )";
6797 }
6798
base_bgl_remove_edge_docstring()6799 std::string base_bgl_remove_edge_docstring()
6800 {
6801 return R"(remove_edge(i, j)
6802
6803 Remove an existing edge.
6804
6805 This method will remove the edge connecting *i* to *j*.
6806
6807 Args:
6808 i (:class:`int`): the first vertex index
6809 j (:class:`int`): the second vertex index
6810
6811 Raises:
6812 TypeError: if *i* or *j* are negative or too large
6813 ValueError: if *i* or *j* are not smaller than the number of vertices, or *i* and *j* are not adjacent
6814
6815 )";
6816 }
6817
base_bgl_set_weight_docstring()6818 std::string base_bgl_set_weight_docstring()
6819 {
6820 return R"(set_weight(i, j, w)
6821
6822 Set the weight of an edge.
6823
6824 This method will set to *w* the weight of the edge connecting *i* to *j*.
6825
6826 Args:
6827 i (:class:`int`): the first vertex index
6828 j (:class:`int`): the second vertex index
6829 w (:class:`float`): the desired weight
6830
6831 Raises:
6832 TypeError: if *i* or *j* are negative or too large
6833 ValueError: if *i* or *j* are not smaller than the number of vertices, *i* and *j* are not adjacent, or
6834 if *w* is not in the :math:`\left[0, 1\right]` range
6835
6836 )";
6837 }
6838
base_bgl_set_all_weights_docstring()6839 std::string base_bgl_set_all_weights_docstring()
6840 {
6841 return R"(set_all_weights(w)
6842
6843 This method will set the weights of all edges in the topology to *w*.
6844
6845 Args:
6846 w (:class:`float`): the edges' weight
6847
6848 Raises:
6849 ValueError: if *w* is not in the :math:`\left[0, 1\right]` range
6850
6851 )";
6852 }
6853
base_bgl_get_edge_weight_docstring()6854 std::string base_bgl_get_edge_weight_docstring()
6855 {
6856 return R"(get_edge_weight(i, j)
6857
6858 .. versionadded:: 2.15
6859
6860 Fetch the weight of the edge connecting *i* to *j*.
6861
6862 Args:
6863 i (:class:`int`): the source vertex index
6864 j (:class:`int`): the destination vertex index
6865
6866 Returns:
6867 float: the weight of the edge connecting *i* to *j*
6868
6869 Raises:
6870 TypeError: if *i* or *j* are negative or too large
6871 ValueError: if either *i* or *j* are not smaller than the number of vertices, or
6872 *i* and *j* are not adjacent
6873
6874 )";
6875 }
6876
fully_connected_docstring()6877 std::string fully_connected_docstring()
6878 {
6879 return R"(__init__(n=0, w=1.)
6880
6881 Fully connected topology.
6882
6883 This user-defined topology (UDT) represents a *complete graph* (that is, a topology
6884 in which all vertices connect to all other vertices). The edge weight is configurable
6885 at construction, and it will be the same for all the edges in the topology.
6886
6887 See also the docs of the C++ class :cpp:class:`pagmo::fully_connected`.
6888
6889 Args:
6890 n (:class:`int`): the desired number of vertices
6891 w (:class:`float`): the weight of the edges
6892
6893 Raises:
6894 TypeError: if *n* is negative or too large
6895 ValueError: if *w* is not in the :math:`\left[0, 1\right]` range
6896
6897 )";
6898 }
6899
fully_connected_get_weight_docstring()6900 std::string fully_connected_get_weight_docstring()
6901 {
6902 return ring_get_weight_docstring();
6903 }
6904
fully_connected_num_vertices_docstring()6905 std::string fully_connected_num_vertices_docstring()
6906 {
6907 return base_bgl_num_vertices_docstring();
6908 }
6909
free_form_docstring()6910 std::string free_form_docstring()
6911 {
6912 return R"(Free-form topology.
6913
6914 This user-defined topology (UDT) represents a graph in which
6915 vertices and edges can be manipulated freely. Instances
6916 of this class can be constructed from either:
6917
6918 * a :class:`~pygmo.topology`,
6919 * another UDT,
6920 * a :class:`networkx.DiGraph`,
6921 * :data:`None`.
6922
6923 Construction from :data:`None` will initialise a topology
6924 without vertices or edges.
6925
6926 Construction from a :class:`networkx.DiGraph` will initialise
6927 a topology whose vertices and edges are described by the
6928 input graph. All the edges of the input graph must have
6929 a :class:`float` attribute called ``weight`` whose value
6930 is in the :math:`\left[0 , 1\right]` range.
6931
6932 When *t* is a :class:`~pygmo.topology` or a UDT,
6933 the constructor will attempt to fetch the NetworkX
6934 representation of the input object via the
6935 :func:`pygmo.topology.to_networkx()` method, and will then
6936 proceed in the same manner explained in the previous
6937 paragraph.
6938
6939 See also the docs of the C++ class :cpp:class:`pagmo::free_form`.
6940
6941 )";
6942 }
6943
r_policy_docstring()6944 std::string r_policy_docstring()
6945 {
6946 return R"(__init__(udrp = fair_replace())
6947
6948 Replacement policy.
6949
6950 A replacement policy establishes
6951 how, during migration within an :class:`~pygmo.archipelago`,
6952 a group of migrants replaces individuals in an existing
6953 :class:`~pygmo.population`. In other words, a replacement
6954 policy is tasked with producing a new set of individuals from
6955 an original set of individuals and a set of candidate migrants.
6956
6957 Following the same schema adopted for :class:`~pygmo.problem`, :class:`~pygmo.algorithm`, etc.,
6958 :class:`~pygmo.r_policy` exposes a generic
6959 interface to *user-defined replacement policies* (or UDRP for short).
6960 UDRPs are classes providing a certain set
6961 of methods that implement the logic of the replacement policy. Once
6962 defined and instantiated, a UDRP can then be used to construct an instance of this class,
6963 :class:`~pygmo.r_policy`, which
6964 provides a generic interface to replacement policies for use by :class:`~pygmo.island`.
6965
6966 Every UDRP must implement at least the following method:
6967
6968 .. code-block::
6969
6970 def replace(self, inds, nx, nix, nobj, nec, nic, tol, mig):
6971 ...
6972
6973 The ``replace()`` method takes in input the following parameters:
6974
6975 * a group of individuals *inds*,
6976 * a set of arguments describing the properties of the :class:`~pygmo.problem` the individuals refer to:
6977
6978 * the total dimension *nx*,
6979 * the integral dimension *nix*,
6980 * the number of objectives *nobj*,
6981 * the number of equality constraints *nec*,
6982 * the number of inequality constraints *nic*,
6983 * the problem's constraint tolerances *tol*,
6984
6985 * a set of migrants *mig*,
6986
6987 and it produces in output another set of individuals resulting from replacing individuals in *inds* with
6988 individuals from *mig* (following some logic established by the UDRP). The sets of individuals *inds* and
6989 *mig*, and the return value of the ``replace()`` method are represented as tuples of 3 elements containing:
6990
6991 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
6992 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
6993 stored in row-major order),
6994 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
6995 stored in row-major order).
6996
6997 Additional optional methods can be implemented in a UDRP:
6998
6999 .. code-block::
7000
7001 def get_name(self):
7002 ...
7003 def get_extra_info(self):
7004 ...
7005
7006 See the documentation of the corresponding methods in this class for details on how the optional
7007 methods in the UDRP are used by :class:`~pygmo.r_policy`.
7008
7009 Replacement policies are used in asynchronous operations involving migration in archipelagos,
7010 and thus they need to provide a certain degree of thread safety. Specifically, the
7011 ``replace()`` method of the UDRP might be invoked concurrently with
7012 any other method of the UDRP interface. It is up to the
7013 authors of user-defined replacement policies to ensure that this safety requirement is satisfied.
7014
7015 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::r_policy`.
7016
7017 Args:
7018 udrp: a user-defined replacement policy, either C++ or Python
7019
7020 Raises:
7021 NotImplementedError: if *udrp* does not implement the mandatory methods detailed above
7022 unspecified: any exception thrown by methods of the UDRP invoked during construction,
7023 the deep copy of the UDRP, the constructor of the underlying C++ class, or
7024 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
7025 signatures, etc.)
7026
7027 )";
7028 }
7029
r_policy_replace_docstring()7030 std::string r_policy_replace_docstring()
7031 {
7032 return R"(replace(inds, nx, nix, nobj, nec, nic, tol, mig)
7033
7034 Replace individuals in a group with migrants from another group.
7035
7036 This method will invoke the ``replace()`` method of the UDRP.
7037 Given a set of individuals, *inds*, and a set of migrants, *mig*, the ``replace()`` method of the UDRP
7038 is expected to replace individuals in *inds*
7039 with individuals from *mig*, and return the new set of individuals resulting from the replacement.
7040 The other arguments of this method describe the properties of the :class:`~pygmo.problem`
7041 that the individuals in *inds* and *mig* refer to.
7042
7043 The sets of individuals *inds* and *mig*, and the return value of this method are
7044 represented as tuples of 3 elements containing:
7045
7046 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
7047 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
7048 stored in row-major order),
7049 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
7050 stored in row-major order).
7051
7052 In addition to invoking the ``replace()`` method of the UDRP, this method will also
7053 perform a variety of sanity checks on both the input arguments and on the output produced by the
7054 UDRP.
7055
7056 Args:
7057 inds (tuple): the original group of individuals
7058 nx (:class:`int`): the dimension of the problem *inds* and *mig* refer to
7059 nix (:class:`int`): the integral dimension of the problem *inds* and *mig* refer to
7060 nobj (:class:`int`): the number of objectives of the problem *inds* and *mig* refer to
7061 nec (:class:`int`): the number of equality constraints of the problem *inds* and *mig* refer to
7062 nic (:class:`int`): the number of inequality constraints of the problem *inds* and *mig* refer to
7063 tol (array-like object): the vector of constraints tolerances of the problem *inds* and *mig* refer to
7064 mig (tuple): the group of migrants
7065
7066 Returns:
7067 tuple: a new set of individuals resulting from replacing individuals in *inds* with individuals from *mig*
7068
7069 Raises:
7070 RuntimeError: if the object returned by a pythonic UDRP is not iterable, or it is an iterable
7071 whose number of elements is not exactly 3, or if the invocation of the ``replace()``
7072 method of the UDRP raises an exception
7073 ValueError: if *inds*, *mig* or the return value are not consistent with the problem properties,
7074 or the ID, decision and fitness vectors in *inds*, *mig* or the return value have inconsistent sizes,
7075 or the problem properties are invalid (e.g., *nobj* is zero, *nix* > *nx*, etc.)
7076 unspecified: any exception raised by failures at the intersection
7077 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
7078
7079 )";
7080 }
7081
r_policy_get_name_docstring()7082 std::string r_policy_get_name_docstring()
7083 {
7084 return R"(get_name()
7085
7086 Name of the replacement policy.
7087
7088 If the UDRP provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
7089 Otherwise, an implementation-defined name based on the type of the UDRP will be returned.
7090
7091 Returns:
7092 str: the name of the replacement policy
7093
7094 )";
7095 }
7096
r_policy_get_extra_info_docstring()7097 std::string r_policy_get_extra_info_docstring()
7098 {
7099 return R"(get_extra_info()
7100
7101 Replacement policy's extra info.
7102
7103 If the UDRP provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
7104 method. Otherwise, an empty string will be returned.
7105
7106 Returns:
7107 str: extra info about the UDRP
7108
7109 Raises:
7110 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDRP
7111
7112 )";
7113 }
7114
fair_replace_docstring()7115 std::string fair_replace_docstring()
7116 {
7117 return R"(__init__(rate=1)
7118
7119 Fair replacement policy.
7120
7121 This user-defined replacement policy (UDRP) will replace individuals in
7122 a group only if the candidate replacement individuals are *better* than
7123 the original individuals.
7124
7125 In this context, *better* means the following:
7126
7127 * in single-objective unconstrained problems, an individual is better
7128 than another one if its fitness is lower,
7129 * in single-objective constrained problems, individuals are ranked
7130 via :func:`~pygmo.sort_population_con()`,
7131 * in multi-objective unconstrained problems, individuals are ranked
7132 via :func:`~pygmo.sort_population_mo()`.
7133
7134 Note that this user-defined replacement policy currently does *not* support
7135 multi-objective constrained problems.
7136
7137 A fair replacement policy is constructed from a *rate* argument, which
7138 can be either an integral or a floating-point value.
7139
7140 If *rate* is a floating point value in the :math:`\left[0,1\right]` range,
7141 then it represents a *fractional* migration rate. That is, it indicates,
7142 the fraction of individuals that may be replaced in the input population:
7143 a value of 0 means that no individuals will be replaced, a value of 1 means that
7144 all individuals may be replaced.
7145
7146 If *rate* is an integral value, then it represents an *absolute* migration rate, that is,
7147 the exact number of individuals that may be replaced in the input population.
7148
7149 See also the docs of the C++ class :cpp:class:`pagmo::fair_replace`.
7150
7151 Args:
7152 rate (int, float): the desired migration rate
7153
7154 Raises:
7155 ValueError: if the supplied fractional migration rate is not finite
7156 or not in the :math:`\left[0,1\right]` range
7157 TypeError: if *rate* is not an instance of :class:`int` or :class:`float`
7158 unspecified: any exception raised by the invoked C++ constructor
7159
7160 )";
7161 }
7162
s_policy_docstring()7163 std::string s_policy_docstring()
7164 {
7165 return R"(__init__(udsp = select_best())
7166
7167 Selection policy.
7168
7169 A selection policy establishes
7170 how, during migration within an :class:`~pygmo.archipelago`,
7171 candidate migrants are selected from an :class:`~pygmo.island`.
7172
7173 Following the same schema adopted for :class:`~pygmo.problem`, :class:`~pygmo.algorithm`, etc.,
7174 :class:`~pygmo.s_policy` exposes a generic
7175 interface to *user-defined selection policies* (or UDSP for short).
7176 UDSPs are classes providing a certain set of methods that implement the logic of the selection policy. Once
7177 defined and instantiated, a UDSP can then be used to construct an instance of this class,
7178 :class:`~pygmo.s_policy`, which
7179 provides a generic interface to selection policies for use by :class:`~pygmo.island`.
7180
7181 Every UDSP must implement at least the following method:
7182
7183 .. code-block::
7184
7185 def select(self, inds, nx, nix, nobj, nec, nic, tol):
7186 ...
7187
7188 The ``select()`` method takes in input the following parameters:
7189
7190 * a group of individuals *inds*,
7191 * a set of arguments describing the properties of the :class:`~pygmo.problem` the individuals refer to:
7192
7193 * the total dimension *nx*,
7194 * the integral dimension *nix*,
7195 * the number of objectives *nobj*,
7196 * the number of equality constraints *nec*,
7197 * the number of inequality constraints *nic*,
7198 * the problem's constraint tolerances *tol*,
7199
7200 and it produces in output another set of individuals resulting from selecting individuals in *inds*
7201 (following some logic established by the UDSP). The sets of individuals *inds*
7202 and the return value of the ``select()`` method are represented as tuples of 3 elements containing:
7203
7204 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
7205 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
7206 stored in row-major order),
7207 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
7208 stored in row-major order).
7209
7210 Additional optional methods can be implemented in a UDSP:
7211
7212 .. code-block::
7213
7214 def get_name(self):
7215 ...
7216 def get_extra_info(self):
7217 ...
7218
7219 See the documentation of the corresponding methods in this class for details on how the optional
7220 methods in the UDSP are used by :class:`~pygmo.s_policy`.
7221
7222 Selection policies are used in asynchronous operations involving migration in archipelagos,
7223 and thus they need to provide a certain degree of thread safety. Specifically, the
7224 ``select()`` method of the UDSP might be invoked concurrently with
7225 any other method of the UDSP interface. It is up to the
7226 authors of user-defined selection policies to ensure that this safety requirement is satisfied.
7227
7228 This class is the Python counterpart of the C++ class :cpp:class:`pagmo::s_policy`.
7229
7230 Args:
7231 udsp: a user-defined selection policy, either C++ or Python
7232
7233 Raises:
7234 NotImplementedError: if *udsp* does not implement the mandatory methods detailed above
7235 unspecified: any exception thrown by methods of the UDSP invoked during construction,
7236 the deep copy of the UDSP, the constructor of the underlying C++ class, or
7237 failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function
7238 signatures, etc.)
7239
7240 )";
7241 }
7242
s_policy_select_docstring()7243 std::string s_policy_select_docstring()
7244 {
7245 return R"(select(inds, nx, nix, nobj, nec, nic, tol)
7246
7247 Select individuals from a group.
7248
7249 This method will invoke the ``select()`` method of the UDSP.
7250 Given a set of individuals, *inds*, the ``select()`` method of the UDSP
7251 is expected to return a new set of individuals selected from *inds*.
7252 The other arguments of this method describe the properties of the :class:`~pygmo.problem`
7253 that the individuals in *inds* refer to.
7254
7255 The set of individuals *inds* and the return value of this method are
7256 represented as tuples of 3 elements containing:
7257
7258 * a 1D NumPy array of individual IDs (represented as 64-bit unsigned integrals),
7259 * a 2D NumPy array of decision vectors (i.e., the decision vectors of each individual,
7260 stored in row-major order),
7261 * a 2D NumPy array of fitness vectors (i.e., the fitness vectors of each individual,
7262 stored in row-major order).
7263
7264 In addition to invoking the ``select()`` method of the UDSP, this function will also
7265 perform a variety of sanity checks on both the input arguments and on the output produced by the
7266 UDSP.
7267
7268 Args:
7269 inds (tuple): the original group of individuals
7270 nx (:class:`int`): the dimension of the problem *inds* refers to
7271 nix (:class:`int`): the integral dimension of the problem *inds* refers to
7272 nobj (:class:`int`): the number of objectives of the problem *inds* refers to
7273 nec (:class:`int`): the number of equality constraints of the problem *inds* refers to
7274 nic (:class:`int`): the number of inequality constraints of the problem *inds* refers to
7275 tol (array-like object): the vector of constraints tolerances of the problem *inds* refers to
7276
7277 Returns:
7278 tuple: a new set of individuals resulting from selecting individuals in *inds*.
7279
7280 Raises:
7281 RuntimeError: if the object returned by a pythonic UDSP is not iterable, or it is an iterable
7282 whose number of elements is not exactly 3, or if the invocation of the ``select()``
7283 method of the UDSP raises an exception
7284 ValueError: if *inds* or the return value are not consistent with the problem properties,
7285 or the ID, decision and fitness vectors in *inds* or the return value have inconsistent sizes,
7286 or the problem properties are invalid (e.g., *nobj* is zero, *nix* > *nx*, etc.)
7287 unspecified: any exception raised by failures at the intersection
7288 between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.)
7289
7290 )";
7291 }
7292
s_policy_get_name_docstring()7293 std::string s_policy_get_name_docstring()
7294 {
7295 return R"(get_name()
7296
7297 Name of the selection policy.
7298
7299 If the UDSP provides a ``get_name()`` method, then this method will return the output of its ``get_name()`` method.
7300 Otherwise, an implementation-defined name based on the type of the UDSP will be returned.
7301
7302 Returns:
7303 str: the name of the selection policy
7304
7305 )";
7306 }
7307
s_policy_get_extra_info_docstring()7308 std::string s_policy_get_extra_info_docstring()
7309 {
7310 return R"(get_extra_info()
7311
7312 Selection policy's extra info.
7313
7314 If the UDSP provides a ``get_extra_info()`` method, then this method will return the output of its ``get_extra_info()``
7315 method. Otherwise, an empty string will be returned.
7316
7317 Returns:
7318 str: extra info about the UDSP
7319
7320 Raises:
7321 unspecified: any exception thrown by the ``get_extra_info()`` method of the UDSP
7322
7323 )";
7324 }
7325
select_best_docstring()7326 std::string select_best_docstring()
7327 {
7328 return R"(__init__(rate=1)
7329
7330 Select best selection policy.
7331
7332 This user-defined selection policy (UDSP) will select the *best*
7333 individuals from a group.
7334
7335 In this context, *best* means the following:
7336
7337 * in single-objective unconstrained problems, individuals are ranked
7338 according to their fitness function,
7339 * in single-objective constrained problems, individuals are ranked
7340 via :func:`~pygmo.sort_population_con()`,
7341 * in multi-objective unconstrained problems, individuals are ranked
7342 via :func:`~pygmo.sort_population_mo()`.
7343
7344 Note that this user-defined selection policy currently does *not* support
7345 multi-objective constrained problems.
7346
7347 A select best policy is constructed from a *rate* argument, which
7348 can be either an integral or a floating-point value.
7349
7350 If *rate* is a floating point value in the :math:`\left[0,1\right]` range,
7351 then it represents a *fractional* migration rate. That is, it indicates,
7352 the fraction of individuals that will be selected from the input population:
7353 a value of 0 means that no individuals will be selected, a value of 1 means that
7354 all individuals will be selected.
7355
7356 If *rate* is an integral value, then it represents an *absolute* migration rate, that is,
7357 the exact number of individuals that will be selected from the input population.
7358
7359 See also the docs of the C++ class :cpp:class:`pagmo::select_best`.
7360
7361 Args:
7362 rate (int, float): the desired migration rate
7363
7364 Raises:
7365 ValueError: if the supplied fractional migration rate is not finite
7366 or not in the :math:`\left[0,1\right]` range
7367 TypeError: if *rate* is not an instance of :class:`int` or :class:`float`
7368 unspecified: any exception raised by the invoked C++ constructor
7369
7370 )";
7371 }
7372
7373 // Utilities for implementing the exposition of algorithms
7374 // which inherit from not_population_based.
bls_selection_docstring(const std::string & algo)7375 std::string bls_selection_docstring(const std::string &algo)
7376 {
7377 return R"(Individual selection policy.
7378
7379 This attribute represents the policy that is used in the ``evolve()`` method to select the individual
7380 that will be optimised. The attribute can be either a string or an integral.
7381
7382 If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``:
7383
7384 * ``"best"`` will select the best individual in the population,
7385 * ``"worst"`` will select the worst individual in the population,
7386 * ``"random"`` will randomly choose one individual in the population.
7387
7388 :func:`~pygmo.)"
7389 + algo + R"(.set_random_sr_seed()` can be used to seed the random number generator
7390 used by the ``"random"`` policy.
7391
7392 If the attribute is an integer, it represents the index (in the population) of the individual that is selected
7393 for optimisation.
7394
7395 Returns:
7396 :class:`int` or :class:`str`: the individual selection policy or index
7397
7398 Raises:
7399 OverflowError: if the attribute is set to an integer which is negative or too large
7400 ValueError: if the attribute is set to an invalid string
7401 TypeError: if the attribute is set to a value of an invalid type
7402 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
7403 type conversion errors, mismatched function signatures, etc.)
7404
7405 )";
7406 }
7407
bls_replacement_docstring(const std::string & algo)7408 std::string bls_replacement_docstring(const std::string &algo)
7409 {
7410 return R"(Individual replacement policy.
7411
7412 This attribute represents the policy that is used in the ``evolve()`` method to select the individual
7413 that will be replaced by the optimised individual. The attribute can be either a string or an integral.
7414
7415 If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``:
7416
7417 * ``"best"`` will select the best individual in the population,
7418 * ``"worst"`` will select the worst individual in the population,
7419 * ``"random"`` will randomly choose one individual in the population.
7420
7421 :func:`~pygmo.)"
7422 + algo + R"(.set_random_sr_seed()` can be used to seed the random number generator
7423 used by the ``"random"`` policy.
7424
7425 If the attribute is an integer, it represents the index (in the population) of the individual that will be
7426 replaced by the optimised individual.
7427
7428 Returns:
7429 :class:`int` or :class:`str`: the individual replacement policy or index
7430
7431 Raises:
7432 OverflowError: if the attribute is set to an integer which is negative or too large
7433 ValueError: if the attribute is set to an invalid string
7434 TypeError: if the attribute is set to a value of an invalid type
7435 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
7436 type conversion errors, mismatched function signatures, etc.)
7437
7438 )";
7439 }
7440
bls_set_random_sr_seed_docstring(const std::string & algo)7441 std::string bls_set_random_sr_seed_docstring(const std::string &algo)
7442 {
7443 return R"(set_random_sr_seed(seed)
7444
7445 Set the seed for the ``"random"`` selection/replacement policies.
7446
7447 Args:
7448 seed (:class:`int`): the value that will be used to seed the random number generator used by the ``"random"``
7449 election/replacement policies (see :attr:`~pygmo.)"
7450 + algo + R"(.selection` and
7451 :attr:`~pygmo.)"
7452 + algo + R"(.replacement`)
7453
7454 Raises:
7455 OverflowError: if the attribute is set to an integer which is negative or too large
7456 unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g.,
7457 type conversion errors, mismatched function signatures, etc.)
7458
7459 )";
7460 }
7461
7462 // evolve_status enum.
evolve_status_docstring()7463 std::string evolve_status_docstring()
7464 {
7465 return R"(Evolution status.
7466
7467 This enumeration contains status flags used to represent the current status of asynchronous evolution/optimisation
7468 in :class:`pygmo.island` and :class:`pygmo.archipelago`.
7469
7470 .. seealso::
7471
7472 :attr:`pygmo.island.status` and :attr:`pygmo.archipelago.status`.
7473
7474 )";
7475 }
7476
evolve_status_idle_docstring()7477 std::string evolve_status_idle_docstring()
7478 {
7479 return R"(no asynchronous operations are ongoing, and no error was generated
7480 by an asynchronous operation in the past (value = 0)
7481 )";
7482 }
7483
evolve_status_busy_docstring()7484 std::string evolve_status_busy_docstring()
7485 {
7486 return R"(asynchronous operations are ongoing, and no error was generated
7487 by an asynchronous operation in the past (value = 1)
7488 )";
7489 }
7490
evolve_status_idle_error_docstring()7491 std::string evolve_status_idle_error_docstring()
7492 {
7493 return R"(no asynchronous operations are ongoing, but an error was generated
7494 by an asynchronous operation in the past (value = 2)
7495 )";
7496 }
7497
evolve_status_busy_error_docstring()7498 std::string evolve_status_busy_error_docstring()
7499 {
7500 return R"(asynchronous operations are ongoing, and an error was generated
7501 by an asynchronous operation in the past (value = 3)
7502 )";
7503 }
7504
thread_safety_docstring()7505 std::string thread_safety_docstring()
7506 {
7507 return R"(Thread safety level.
7508
7509 This enumeration defines a set of values that can be used to specify the thread safety of problems, algorithms, etc.
7510
7511 .. note::
7512
7513 For safety reasons, pygmo currently does not allow to set a thread safety level higher than ``none``
7514 for any user-defined object implemented in Python. That is, only problems, algorithms, etc.
7515 implemented in C++ can have some degree of thread safety.
7516
7517 )";
7518 }
7519
thread_safety_none_docstring()7520 std::string thread_safety_none_docstring()
7521 {
7522 return R"(no thread safety - concurrent operations on distinct objects are unsafe (value = 0)
7523
7524 )";
7525 }
7526
thread_safety_basic_docstring()7527 std::string thread_safety_basic_docstring()
7528 {
7529 return R"(basic thread safety - concurrent operations on distinct objects are safe (value = 1)
7530
7531 )";
7532 }
7533
thread_safety_constant_docstring()7534 std::string thread_safety_constant_docstring()
7535 {
7536 return R"(constant thread safety - constant (i.e., read-only) concurrent operations on the same object are safe (value = 2)
7537
7538 )";
7539 }
7540
migration_type_docstring()7541 std::string migration_type_docstring()
7542 {
7543 return R"(Migration type.
7544
7545 This enumeration represents the available migration policies in an :class:`~pygmo.archipelago`:
7546
7547 * with the point-to-point migration policy, during migration an island will
7548 consider individuals from only one of the connecting islands;
7549 * with the broadcast migration policy, during migration an island will consider
7550 individuals from *all* the connecting islands.
7551
7552 )";
7553 }
7554
migration_type_p2p_docstring()7555 std::string migration_type_p2p_docstring()
7556 {
7557 return R"(point-to-point migration (value = 0)
7558
7559 )";
7560 }
7561
migration_type_broadcast_docstring()7562 std::string migration_type_broadcast_docstring()
7563 {
7564 return R"(broadcast migration (value = 1)
7565
7566 )";
7567 }
7568
migrant_handling_docstring()7569 std::string migrant_handling_docstring()
7570 {
7571 return R"(Migrant handling policy.
7572
7573 This enumeration represents the available migrant handling
7574 policies in an :class:`~pygmo.archipelago`.
7575
7576 During migration,
7577 individuals are selected from the islands and copied into a migration
7578 database, from which they can be fetched by other islands.
7579 This policy establishes what happens to the migrants in the database
7580 after they have been fetched by a destination island:
7581
7582 * with the preserve policy, a copy of the candidate migrants
7583 remains in the database;
7584 * with the evict policy, the candidate migrants are
7585 removed from the database.
7586
7587 )";
7588 }
7589
migrant_handling_preserve_docstring()7590 std::string migrant_handling_preserve_docstring()
7591 {
7592 return R"(perserve migrants in the database (value = 0)
7593
7594 )";
7595 }
7596
migrant_handling_evict_docstring()7597 std::string migrant_handling_evict_docstring()
7598 {
7599 return R"(evict migrants from the database (value = 1)
7600
7601 )";
7602 }
7603
7604 } // namespace pygmo
7605