1 // -*- C++ -*-
2 /**
3 * @brief This file provides functions to ease Python wrapping
4 *
5 * Copyright 2005-2021 Airbus-EDF-IMACS-ONERA-Phimeca
6 *
7 * This library is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library. If not, see <http://www.gnu.org/licenses/>.
19 *
20 */
21
22 #ifndef OPENTURNS_PYTHONWRAPPINGFUNCTIONS_HXX
23 #define OPENTURNS_PYTHONWRAPPINGFUNCTIONS_HXX
24
25 #include <Python.h>
26 #include "openturns/OT.hxx"
27
28 BEGIN_NAMESPACE_OPENTURNS
29
30
31 /** Scoped PyObject pointer: automatically triggers garbage collection */
32 class ScopedPyObjectPointer
33 {
34 public:
ScopedPyObjectPointer(PyObject * pyObj=0)35 explicit ScopedPyObjectPointer(PyObject * pyObj = 0)
36 : pyObj_(pyObj)
37 {
38 }
39
~ScopedPyObjectPointer()40 ~ScopedPyObjectPointer()
41 {
42 Py_XDECREF(pyObj_);
43 }
44
operator =(PyObject * pyObj)45 ScopedPyObjectPointer & operator=(PyObject * pyObj)
46 {
47 Py_XDECREF(pyObj_);
48 pyObj_ = pyObj;
49 return *this;
50 }
51
operator *() const52 PyObject & operator*() const
53 {
54 return *pyObj_;
55 }
56
get() const57 PyObject * get() const
58 {
59 return pyObj_;
60 }
61
isNull()62 bool isNull()
63 {
64 return !pyObj_;
65 }
66
67 private:
68 PyObject* pyObj_;
69 };
70
71
72 /** These templates are just declared, not defined. Only specializations are. */
73 template <class CPP_Type> struct traitsPythonType;
74 template <class PYTHON_Type> static inline int isAPython(PyObject * pyObj);
75 template <class PYTHON_Type> static inline const char * namePython();
76 template <class PYTHON_Type, class CPP_Type> static inline CPP_Type convert(PyObject * pyObj);
77 template <class CPP_Type, class PYTHON_Type> static inline PyObject * convert(CPP_Type);
78 template <class PYTHON_Type, class CPP_Type> static inline bool canConvert(PyObject * pyObj);
79 template <class PYTHON_Type> static inline void check(PyObject * pyObj);
80 template <class PYTHON_Type, class CPP_Type> static inline CPP_Type checkAndConvert(PyObject * pyObj);
81 template <class T> static inline T * buildObjectFromPySequence(PyObject * pyObj);
82
83
84
85 /** Specializations */
86
87
88 /* PyObject */
89 struct _PyObject_ {};
90
91 template <>
92 inline
93 int
isAPython(PyObject *)94 isAPython<_PyObject_>(PyObject *)
95 {
96 return 1;
97 }
98
99 template <>
100 inline
101 const char *
namePython()102 namePython<_PyObject_>()
103 {
104 return "object";
105 }
106
107
108
109
110 /* PyBool */
111 struct _PyBool_ {};
112
113 template <>
114 inline
115 int
isAPython(PyObject * pyObj)116 isAPython<_PyBool_>(PyObject * pyObj)
117 {
118 return PyBool_Check(pyObj);
119 }
120
121 template <>
122 inline
123 const char *
namePython()124 namePython<_PyBool_>()
125 {
126 return "bool";
127 }
128
129 template <>
130 struct traitsPythonType<Bool>
131 {
132 typedef _PyBool_ Type;
133 };
134
135 template <>
136 inline
137 Bool
convert(PyObject * pyObj)138 convert< _PyBool_, Bool >(PyObject * pyObj)
139 {
140 return pyObj == Py_True;
141 }
142
143 template <>
144 inline
145 PyObject *
convert(Bool inB)146 convert< Bool, _PyBool_ >(Bool inB)
147 {
148 return PyBool_FromLong(inB ? 1 : 0);
149 }
150
151
152 /* PyInt */
153 struct _PyInt_ {};
154
155 template <>
156 inline
157 int
isAPython(PyObject * pyObj)158 isAPython< _PyInt_ >(PyObject * pyObj)
159 {
160 // PyInt type is deprecated
161 #if PY_MAJOR_VERSION >= 3
162 return PyLong_Check(pyObj);
163 #else
164 return PyInt_Check(pyObj) || PyLong_Check(pyObj);
165 #endif
166 }
167
168 template <>
169 inline
170 const char *
namePython()171 namePython< _PyInt_ >()
172 {
173 return "integer";
174 }
175
176 // In C++11, it is no more possible to define a static member which is a const char*.
177 // We could use constexpr instead, if we are sure that compiler support is okay.
178 // As only numerical types are concerned, it is unlikely that new types are added,
179 // so let us use an array instead and initialize an index into this array.
180 static const char* const pyBuf_formats [] =
181 {
182 "l",
183 "d",
184 "Zd"
185 };
186
187 template <>
188 struct traitsPythonType< UnsignedInteger >
189 {
190 typedef _PyInt_ Type;
191 static const int buf_itemsize = sizeof(UnsignedInteger);
192 static const int buf_format_idx = 0; // "l"
193 };
194
195 template <>
196 inline
197 bool
canConvert(PyObject *)198 canConvert< _PyInt_, UnsignedInteger >(PyObject *)
199 {
200 return true;
201 }
202
203 template <>
204 inline
205 UnsignedInteger
convert(PyObject * pyObj)206 convert< _PyInt_, UnsignedInteger >(PyObject * pyObj)
207 {
208 return PyLong_AsUnsignedLong(pyObj);
209 }
210
211 template <>
212 inline
213 PyObject *
convert(UnsignedInteger n)214 convert< UnsignedInteger, _PyInt_ >(UnsignedInteger n)
215 {
216 return PyLong_FromUnsignedLong(n);
217 }
218
219
220
221
222 /* PyFloat */
223 struct _PyFloat_ {};
224
225 template <>
226 inline
227 int
isAPython(PyObject * pyObj)228 isAPython< _PyFloat_ >(PyObject * pyObj)
229 {
230 // check also against sequence protocol as numpy array complies to several protocols
231 return PyNumber_Check(pyObj) && !PyComplex_Check(pyObj) && !PySequence_Check(pyObj);
232 }
233
234 template <>
235 inline
236 const char *
namePython()237 namePython<_PyFloat_>()
238 {
239 return "double";
240 }
241
242 template <>
243 struct traitsPythonType< Scalar >
244 {
245 typedef _PyFloat_ Type;
246 static const int buf_itemsize = sizeof(Scalar);
247 static const int buf_format_idx = 1; // "d"
248 };
249
250 template <>
251 inline
252 Scalar
convert(PyObject * pyObj)253 convert< _PyFloat_, Scalar >(PyObject * pyObj)
254 {
255 return PyFloat_AsDouble(pyObj);
256 }
257
258 template <>
259 inline
260 PyObject *
convert(Scalar x)261 convert< Scalar, _PyFloat_ >(Scalar x)
262 {
263 return PyFloat_FromDouble(x);
264 }
265
266
267 /* PyComplex */
268 struct _PyComplex_ {};
269
270 template <>
271 inline
272 int
isAPython(PyObject * pyObj)273 isAPython<_PyComplex_>(PyObject * pyObj)
274 {
275 return PyNumber_Check(pyObj);
276 }
277
278 template <>
279 inline
280 const char *
namePython()281 namePython<_PyComplex_>()
282 {
283 return "complex";
284 }
285
286 template <>
287 struct traitsPythonType< Complex >
288 {
289 typedef _PyComplex_ Type;
290 static const int buf_itemsize = sizeof(Complex);
291 static const int buf_format_idx = 2; // "Zd"
292 };
293
294 template <>
295 inline
296 Complex
convert(PyObject * pyObj)297 convert< _PyComplex_, Complex >(PyObject * pyObj)
298 {
299 return Complex(PyComplex_RealAsDouble(pyObj), PyComplex_ImagAsDouble(pyObj));
300 }
301
302 template <>
303 inline
304 PyObject *
convert(Complex x)305 convert< Complex, _PyComplex_ >(Complex x)
306 {
307 return PyComplex_FromDoubles(x.real(), x.imag());
308 }
309
310
311 /* PyBytes */
312 struct _PyBytes_ {};
313
314 template <>
315 inline
316 int
isAPython(PyObject * pyObj)317 isAPython< _PyBytes_ >(PyObject * pyObj)
318 {
319 #if PY_MAJOR_VERSION >= 3
320 return PyBytes_Check(pyObj);
321 #else
322 return PyString_Check(pyObj);
323 #endif
324 }
325
326 template <>
327 inline
328 const char *
namePython()329 namePython< _PyBytes_ >()
330 {
331 return "bytes";
332 }
333
334 template <>
335 inline
336 String
convert(PyObject * pyObj)337 convert< _PyBytes_, String >(PyObject * pyObj)
338 {
339 #if PY_MAJOR_VERSION >= 3
340 return PyBytes_AsString(pyObj);
341 #else
342 return PyString_AsString(pyObj);
343 #endif
344 }
345
346 template <>
347 inline
348 PyObject *
convert(String s)349 convert< String, _PyBytes_ >(String s)
350 {
351 #if PY_MAJOR_VERSION >= 3
352 return PyBytes_FromString(s.data());
353 #else
354 return PyString_FromString(s.data());
355 #endif
356 }
357
358
359 /* PyUnicode */
360 struct _PyUnicode_ {};
361
362 template <>
363 inline
364 int
isAPython(PyObject * pyObj)365 isAPython< _PyUnicode_ >(PyObject * pyObj)
366 {
367 return PyUnicode_Check(pyObj);
368 }
369
370 template <>
371 inline
372 const char *
namePython()373 namePython< _PyUnicode_ >()
374 {
375 return "unicode";
376 }
377
378 template <>
379 inline
380 String
convert(PyObject * pyObj)381 convert< _PyUnicode_, String >(PyObject * pyObj)
382 {
383 ScopedPyObjectPointer encodedBytes(PyUnicode_AsUTF8String(pyObj));
384 assert(encodedBytes.get());
385 return convert<_PyBytes_, String>(encodedBytes.get());
386 }
387
388 template <>
389 inline
390 PyObject *
convert(String s)391 convert< String, _PyUnicode_ >(String s)
392 {
393 return PyUnicode_FromString(s.data());
394 }
395
396
397 /* PyString */
398 struct _PyString_ {};
399
400 template <>
401 inline
402 int
isAPython(PyObject * pyObj)403 isAPython< _PyString_ >(PyObject * pyObj)
404 {
405 #if PY_MAJOR_VERSION >= 3
406 return PyUnicode_Check(pyObj);
407 #else
408 return PyString_Check(pyObj) || PyUnicode_Check(pyObj);
409 #endif
410 }
411
412 template <>
413 inline
414 const char *
namePython()415 namePython<_PyString_>()
416 {
417 return "string";
418 }
419
420 template <>
421 struct traitsPythonType< String >
422 {
423 typedef _PyString_ Type;
424 };
425
426 template <>
427 inline
428 String
convert(PyObject * pyObj)429 convert< _PyString_, String >(PyObject * pyObj)
430 {
431 String result;
432 #if PY_MAJOR_VERSION >= 3
433 result = convert< _PyUnicode_, String >(pyObj);
434 #else
435 if(isAPython<_PyBytes_>(pyObj))
436 {
437 result = convert<_PyBytes_, String>(pyObj);
438 }
439 else if (isAPython<_PyUnicode_>(pyObj))
440 {
441 result = convert<_PyUnicode_, String>(pyObj);
442 }
443 #endif
444 return result;
445 }
446
447 template <>
448 inline
449 PyObject *
convert(String s)450 convert< String, _PyString_ >(String s)
451 {
452 #if PY_MAJOR_VERSION >= 3
453 return convert<String, _PyUnicode_>(s);
454 #else
455 return convert<String, _PyBytes_>(s);
456 #endif
457 }
458
459
460 /* PySequence */
461 struct _PySequence_ {};
462
463 template <>
464 inline
465 int
isAPython(PyObject * pyObj)466 isAPython< _PySequence_ >(PyObject * pyObj)
467 {
468 return PySequence_Check(pyObj);
469 }
470
471 template <>
472 inline
473 const char *
namePython()474 namePython<_PySequence_>()
475 {
476 return "sequence object";
477 }
478
479
480
481 /* Buffer protocol PEP 3118 */
482 template <class CPP_Type, int ndim>
483 static inline
484 bool
isAPythonBufferOf(PyObject * pyObj)485 isAPythonBufferOf(PyObject * pyObj)
486 {
487 if (PyObject_CheckBuffer(pyObj))
488 {
489 Py_buffer view;
490 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
491 {
492 bool result = (view.ndim == ndim &&
493 view.itemsize == traitsPythonType<CPP_Type>::buf_itemsize &&
494 view.format != NULL &&
495 strcmp(view.format, pyBuf_formats[traitsPythonType<CPP_Type>::buf_format_idx]) == 0);
496 PyBuffer_Release(&view);
497 return result;
498 }
499 else
500 {
501 // When PyObject_GetBuffer fails, it sets PyErr_BufferException,
502 // thus PyErr_Clear must be called.
503 PyErr_Clear();
504 }
505 }
506 return false;
507 }
508
509 template <class PYTHON_Type>
510 static inline
511 int
isAPythonSequenceOf(PyObject * pyObj)512 isAPythonSequenceOf(PyObject * pyObj)
513 {
514 int ok = isAPython<_PySequence_>(pyObj) && (! isAPython< _PyString_ >(pyObj));
515
516 if (ok)
517 {
518 const UnsignedInteger size = PySequence_Size(pyObj);
519 for(UnsignedInteger i = 0; ok && (i < size); ++i)
520 {
521 ScopedPyObjectPointer elt(PySequence_ITEM(pyObj, i));
522 int elt_ok = elt.get() && isAPython<PYTHON_Type>(elt.get());
523 ok *= elt_ok;
524 }
525 }
526
527 return ok;
528 }
529
530
531 template <class PYTHON_Type>
532 static inline
533 void
check(PyObject * pyObj)534 check(PyObject * pyObj)
535 {
536 if (! isAPython<PYTHON_Type>(pyObj))
537 {
538 throw InvalidArgumentException(HERE) << "Object passed as argument is not a " << namePython<PYTHON_Type>();
539 }
540 }
541
542
543 template <class PYTHON_Type, class CPP_Type>
544 static inline
545 CPP_Type
checkAndConvert(PyObject * pyObj)546 checkAndConvert(PyObject * pyObj)
547 {
548 check<PYTHON_Type>(pyObj);
549 return convert< PYTHON_Type, CPP_Type >(pyObj);
550 }
551
552
553
554
555
556 template <class T>
557 static inline
558 bool
canConvertCollectionObjectFromPySequence(PyObject * pyObj)559 canConvertCollectionObjectFromPySequence(PyObject * pyObj)
560 {
561 try
562 {
563 check<_PySequence_>(pyObj);
564 }
565 catch (InvalidArgumentException &)
566 {
567 return false;
568 }
569
570 ScopedPyObjectPointer newPyObj(PySequence_Fast(pyObj, ""));
571
572 const UnsignedInteger size = PySequence_Fast_GET_SIZE(newPyObj.get());
573 for(UnsignedInteger i = 0; i < size; ++i)
574 {
575 PyObject * elt = PySequence_Fast_GET_ITEM(newPyObj.get(), i);
576 if (!canConvert< typename traitsPythonType< T >::Type, T >(elt))
577 {
578 return false;
579 }
580 }
581
582 return true;
583 }
584
585
586
587
588 template <class T>
589 static inline
590 Collection<T> *
buildCollectionFromPySequence(PyObject * pyObj,int sz=0)591 buildCollectionFromPySequence(PyObject * pyObj, int sz = 0)
592 {
593 check<_PySequence_>(pyObj);
594 ScopedPyObjectPointer newPyObj(PySequence_Fast(pyObj, ""));
595 if (!newPyObj.get()) throw InvalidArgumentException(HERE) << "Not a sequence object";
596 const UnsignedInteger size = PySequence_Fast_GET_SIZE(newPyObj.get());
597 if ((sz != 0) && (sz != (int)size))
598 {
599 throw InvalidArgumentException(HERE) << "Sequence object has incorrect size " << size << ". Must be " << sz << ".";
600 }
601 Collection<T> * p_coll = new Collection< T >(size);
602
603 for(UnsignedInteger i = 0; i < size; ++i)
604 {
605 PyObject * elt = PySequence_Fast_GET_ITEM(newPyObj.get(), i);
606 try
607 {
608 check<typename traitsPythonType< T >::Type>(elt);
609 }
610 catch (InvalidArgumentException &)
611 {
612 delete p_coll;
613 throw;
614 }
615 (*p_coll)[i] = convert< typename traitsPythonType< T >::Type, T >(elt);
616 }
617
618 return p_coll;
619 }
620
621
622
623
624
625
626 template <>
627 struct traitsPythonType< Point >
628 {
629 typedef _PySequence_ Type;
630 };
631
632 template <>
633 inline
634 Point
convert(PyObject * pyObj)635 convert< _PySequence_, Point >(PyObject * pyObj)
636 {
637 // Check whether pyObj follows the buffer protocol
638 if (PyObject_CheckBuffer(pyObj))
639 {
640 Py_buffer view;
641 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
642 {
643 if (view.ndim == 1 &&
644 view.itemsize == traitsPythonType<Scalar>::buf_itemsize &&
645 view.format != NULL &&
646 strcmp(view.format, pyBuf_formats[traitsPythonType<Scalar>::buf_format_idx]) == 0)
647 {
648 // 1-d contiguous array, we can directly copy memory chunk
649 const Scalar* data = static_cast<const Scalar*>(view.buf);
650 const UnsignedInteger size = view.shape[0];
651 Point point(size);
652 std::copy(data, data + size, (size > 0) ? &point[0] : 0);
653 PyBuffer_Release(&view);
654 return point;
655 }
656 PyBuffer_Release(&view);
657 }
658 else
659 PyErr_Clear();
660 }
661
662 Pointer<Collection<Scalar> > ptr = buildCollectionFromPySequence<Scalar>(pyObj);
663 return Point(*ptr);
664 }
665
666 template <>
667 inline
668 PyObject *
convert(Point inP)669 convert< Point, _PySequence_ >(Point inP)
670 {
671 UnsignedInteger dimension = inP.getDimension();
672 PyObject * point = PyTuple_New(dimension);
673 for (UnsignedInteger i = 0; i < dimension; ++ i)
674 {
675 PyTuple_SetItem(point, i, convert< Scalar, _PyFloat_ >(inP[i]));
676 }
677 return point;
678 }
679
680 template <>
681 inline
682 PyObject *
convert(Description inP)683 convert< Description, _PySequence_ >(Description inP)
684 {
685 UnsignedInteger dimension = inP.getSize();
686 PyObject * point = PyTuple_New(dimension);
687 for (UnsignedInteger i = 0; i < dimension; ++ i)
688 {
689 PyTuple_SetItem(point, i, convert< String, _PyString_ >(inP[i]));
690 }
691 return point;
692 }
693
694 template <>
695 struct traitsPythonType< Collection < Complex> >
696 {
697 typedef _PySequence_ Type;
698 };
699
700
701 template <>
702 inline
703 Collection<Complex>
convert(PyObject * pyObj)704 convert<_PySequence_, Collection<Complex> >(PyObject * pyObj)
705 {
706 // Check whether pyObj follows the buffer protocol
707 if (PyObject_CheckBuffer(pyObj))
708 {
709 Py_buffer view;
710 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
711 {
712 if (view.ndim == 1 &&
713 view.itemsize == traitsPythonType<Complex>::buf_itemsize &&
714 view.format != NULL &&
715 strcmp(view.format, pyBuf_formats[traitsPythonType<Complex>::buf_format_idx]) == 0)
716 {
717 // 1-d contiguous array, we can directly copy memory chunk
718 const Complex* data = static_cast<const Complex*>(view.buf);
719 const UnsignedInteger size = view.shape[0];
720 Collection<Complex> result(size);
721 std::copy(data, data + size, (size > 0) ? &result[0] : 0);
722 PyBuffer_Release(&view);
723 return result;
724 }
725 PyBuffer_Release(&view);
726 }
727 else
728 PyErr_Clear();
729 }
730
731 Pointer<Collection<Complex> > ptr = buildCollectionFromPySequence<Complex>(pyObj);
732 return Collection<Complex>(*ptr);
733 }
734
735 inline
handleException()736 void handleException()
737 {
738 PyObject * exception = PyErr_Occurred();
739
740 if (exception)
741 {
742 PyObject *type = NULL, *value = NULL, *traceback = NULL;
743 PyErr_Fetch(&type, &value, &traceback);
744
745 String exceptionMessage("Python exception");
746
747 // get the name of the exception
748 if (type)
749 {
750 ScopedPyObjectPointer nameObj(PyObject_GetAttrString(type, "__name__"));
751 if (nameObj.get())
752 {
753 String typeString = checkAndConvert< _PyString_, String >(nameObj.get());
754 exceptionMessage += ": " + typeString;
755 }
756 }
757
758 // try to get error msg, value and traceback can be NULL
759 if (value)
760 {
761 ScopedPyObjectPointer valueObj(PyObject_Str(value));
762 if (valueObj.get())
763 {
764 String valueString = checkAndConvert< _PyString_, String >(valueObj.get());
765 exceptionMessage += ": " + valueString;
766 }
767 }
768
769 PyErr_Restore(type, value, traceback);
770 PyErr_Print();
771 throw InternalException(HERE) << exceptionMessage;
772 }
773 }
774
775
776
777 template <>
778 struct traitsPythonType< Sample >
779 {
780 typedef _PySequence_ Type;
781 };
782
783 template <>
784 inline
785 Sample
convert(PyObject * pyObj)786 convert< _PySequence_, Sample >(PyObject * pyObj)
787 {
788 // Check whether pyObj follows the buffer protocol
789 if (PyObject_CheckBuffer(pyObj))
790 {
791 Py_buffer view;
792 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
793 {
794 if (view.ndim == 2 &&
795 view.itemsize == traitsPythonType<Scalar>::buf_itemsize &&
796 view.format != NULL &&
797 strcmp(view.format, pyBuf_formats[traitsPythonType<Scalar>::buf_format_idx]) == 0)
798 {
799 const Scalar* data = static_cast<const Scalar*>(view.buf);
800 const UnsignedInteger size = view.shape[0];
801 const UnsignedInteger dimension = view.shape[1];
802 Sample sample(size, dimension);
803 if (PyBuffer_IsContiguous(&view, 'C'))
804 {
805 // 2-d contiguous array in C notation, we can directly copy memory chunk
806 std::copy(data, data + size * dimension, (Scalar *)sample.data());
807 }
808 else
809 {
810 for (UnsignedInteger j = 0; j < dimension; ++j)
811 for(UnsignedInteger i = 0; i < size; ++i, ++data)
812 sample(i, j) = *data;
813 }
814 PyBuffer_Release(&view);
815 return sample;
816 }
817 PyBuffer_Release(&view);
818 }
819 else
820 PyErr_Clear();
821 }
822
823 // use the same conversion function for numpy array/matrix, knowing numpy matrix is not a sequence
824 if (PyObject_HasAttrString(pyObj, const_cast<char *>("shape")))
825 {
826 ScopedPyObjectPointer shapeObj(PyObject_GetAttrString(pyObj, "shape"));
827 if (!shapeObj.get()) throw;
828
829 Indices shape(checkAndConvert< _PySequence_, Indices >(shapeObj.get()));
830 if (shape.getSize() == 2)
831 {
832 UnsignedInteger size = shape[0];
833 UnsignedInteger dimension = shape[1];
834 ScopedPyObjectPointer askObj(PyTuple_New(2));
835 ScopedPyObjectPointer methodObj(convert< String, _PyString_ >("__getitem__"));
836 Sample sample(size, dimension);
837 for (UnsignedInteger i = 0; i < size; ++ i)
838 {
839 PyTuple_SetItem(askObj.get(), 0, convert< UnsignedInteger, _PyInt_ >(i));
840 for (UnsignedInteger j = 0; j < dimension; ++ j)
841 {
842 PyTuple_SetItem(askObj.get(), 1, convert< UnsignedInteger, _PyInt_ >(j));
843 ScopedPyObjectPointer elt(PyObject_CallMethodObjArgs(pyObj, methodObj.get(), askObj.get(), NULL));
844 if (elt.get())
845 {
846 sample(i, j) = checkAndConvert<_PyFloat_, Scalar>(elt.get());
847 }
848 }
849 }
850 return sample;
851 }
852 else if (shape.getSize() == 1)
853 throw InvalidArgumentException(HERE) << "Invalid array dimension 1 is ambiguous, please set the dimension explicitly";
854 else
855 throw InvalidArgumentException(HERE) << "Invalid array dimension: " << shape.getSize() << " is greater than 2";
856 }
857 check<_PySequence_>(pyObj);
858 ScopedPyObjectPointer newPyObj(PySequence_Fast(pyObj, ""));
859 if (!newPyObj.get()) throw InvalidArgumentException(HERE) << "Not a sequence object";
860 const UnsignedInteger size = PySequence_Fast_GET_SIZE(newPyObj.get());
861 if (size == 0) return Sample();
862
863 // Get dimension of first point
864 PyObject * firstPoint = PySequence_Fast_GET_ITEM(newPyObj.get(), 0);
865 check<_PySequence_>(firstPoint);
866 ScopedPyObjectPointer newPyFirstObj(PySequence_Fast(firstPoint, ""));
867 const UnsignedInteger dimension = PySequence_Fast_GET_SIZE(newPyFirstObj.get());
868 // Allocate result Sample
869 Sample sample(size, dimension);
870 for(UnsignedInteger i = 0; i < size; ++i)
871 {
872 PyObject * pointObj = PySequence_Fast_GET_ITEM(newPyObj.get(), i);
873 ScopedPyObjectPointer newPyPointObj(PySequence_Fast(pointObj, ""));
874 if (i > 0)
875 {
876 // Check that object is a sequence, and has the right size
877 check<_PySequence_>(pointObj);
878 const UnsignedInteger subDim = static_cast<UnsignedInteger>(PySequence_Fast_GET_SIZE(newPyPointObj.get()));
879 if (subDim != dimension)
880 throw InvalidArgumentException(HERE) << "Inner sequences must have the same dimension";
881 }
882 for(UnsignedInteger j = 0; j < dimension; ++j)
883 {
884 PyObject * value = PySequence_Fast_GET_ITEM(newPyPointObj.get(), j);
885 sample(i, j) = checkAndConvert<_PyFloat_, Scalar>(value);
886 }
887 }
888 return sample;
889 }
890
891
892 template <>
893 inline
894 PyObject *
convert(Sample sample)895 convert< Sample, _PySequence_ >(Sample sample)
896 {
897 const UnsignedInteger size = sample.getSize();
898 PyObject * pyObj = PyTuple_New(size);
899 for (UnsignedInteger i = 0; i < size; ++ i)
900 PyTuple_SetItem(pyObj, i, convert< Point, _PySequence_ >(sample[i]));
901 return pyObj;
902 }
903
904
905 template <>
906 struct traitsPythonType< Collection< UnsignedInteger > >
907 {
908 typedef _PySequence_ Type;
909 };
910
911 template <>
912 inline
913 Collection< UnsignedInteger >
convert(PyObject * pyObj)914 convert< _PySequence_, Collection<UnsignedInteger> >(PyObject * pyObj)
915 {
916 // Check whether pyObj follows the buffer protocol
917 if (PyObject_CheckBuffer(pyObj))
918 {
919 Py_buffer view;
920 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
921 {
922 if (view.ndim == 1 &&
923 view.itemsize == traitsPythonType<UnsignedInteger>::buf_itemsize &&
924 view.format != NULL &&
925 strcmp(view.format, pyBuf_formats[traitsPythonType<UnsignedInteger>::buf_format_idx]) == 0)
926 {
927 // 1-d contiguous array, we can directly copy memory chunk
928 const UnsignedInteger* data = static_cast<const UnsignedInteger*>(view.buf);
929 const UnsignedInteger size = view.shape[0];
930 Collection<UnsignedInteger> result(size);
931 std::copy(data, data + size, &result[0]);
932 PyBuffer_Release(&view);
933 return result;
934 }
935 PyBuffer_Release(&view);
936 }
937 else
938 PyErr_Clear();
939 }
940
941 Pointer<Collection<UnsignedInteger> > ptr = buildCollectionFromPySequence<UnsignedInteger>(pyObj);
942 return Collection<UnsignedInteger>(ptr->begin(), ptr->end());
943 }
944
945
946 template <>
947 struct traitsPythonType< Indices >
948 {
949 typedef _PySequence_ Type;
950 };
951
952 template <>
953 inline
954 Indices
convert(PyObject * pyObj)955 convert< _PySequence_, Indices >(PyObject * pyObj)
956 {
957 Pointer<Collection<UnsignedInteger> > ptr = buildCollectionFromPySequence<UnsignedInteger>(pyObj);
958 return Indices(ptr->begin(), ptr->end());
959 }
960
961 template <>
962 inline
963 PyObject *
convert(Indices inP)964 convert< Indices, _PySequence_ >(Indices inP)
965 {
966 UnsignedInteger dimension = inP.getSize();
967 PyObject * point = PyTuple_New(dimension);
968 for (UnsignedInteger i = 0; i < dimension; ++ i)
969 {
970 PyTuple_SetItem(point, i, convert< UnsignedInteger, _PyInt_ >(inP[i]));
971 }
972 return point;
973 }
974
975 template <>
976 struct traitsPythonType< IndicesCollection >
977 {
978 typedef _PySequence_ Type;
979 };
980
981 template <>
982 inline
983 IndicesCollection
convert(PyObject * pyObj)984 convert< _PySequence_, IndicesCollection >(PyObject * pyObj)
985 {
986 // Check whether pyObj follows the buffer protocol
987 if (PyObject_CheckBuffer(pyObj))
988 {
989 Py_buffer view;
990 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
991 {
992 if (view.ndim == 2 &&
993 view.itemsize == traitsPythonType<UnsignedInteger>::buf_itemsize &&
994 view.format != NULL &&
995 strcmp(view.format, pyBuf_formats[traitsPythonType<UnsignedInteger>::buf_format_idx]) == 0)
996 {
997 const UnsignedInteger* data = static_cast<const UnsignedInteger*>(view.buf);
998 const UnsignedInteger size = view.shape[0];
999 const UnsignedInteger dimension = view.shape[1];
1000 IndicesCollection indices(size, dimension);
1001 if (PyBuffer_IsContiguous(&view, 'C'))
1002 {
1003 // 2-d contiguous array in C notation, we can directly copy memory chunk
1004 std::copy(data, data + size * dimension, &indices(0, 0));
1005 }
1006 else
1007 {
1008 for (UnsignedInteger j = 0; j < dimension; ++j)
1009 for(UnsignedInteger i = 0; i < size; ++i, ++data)
1010 indices(i, j) = *data;
1011 }
1012 PyBuffer_Release(&view);
1013 return indices;
1014 }
1015 PyBuffer_Release(&view);
1016 }
1017 else
1018 PyErr_Clear();
1019 }
1020
1021 // use the same conversion function for numpy array/matrix, knowing numpy matrix is not a sequence
1022 if (PyObject_HasAttrString(pyObj, const_cast<char *>("shape")))
1023 {
1024 ScopedPyObjectPointer shapeObj(PyObject_GetAttrString(pyObj, "shape"));
1025 if (shapeObj.get())
1026 {
1027 Indices shape(checkAndConvert< _PySequence_, Indices >(shapeObj.get()));
1028 if (shape.getSize() == 2)
1029 {
1030 UnsignedInteger size = shape[0];
1031 UnsignedInteger dimension = shape[1];
1032 ScopedPyObjectPointer askObj(PyTuple_New(2));
1033 ScopedPyObjectPointer methodObj(convert< String, _PyString_ >("__getitem__"));
1034 IndicesCollection indices(size, dimension);
1035 for (UnsignedInteger i = 0; i < size; ++ i)
1036 {
1037 PyTuple_SetItem(askObj.get(), 0, convert< UnsignedInteger, _PyInt_ >(i));
1038 for (UnsignedInteger j = 0; j < dimension; ++ j)
1039 {
1040 PyTuple_SetItem(askObj.get(), 1, convert< UnsignedInteger, _PyInt_ >(j));
1041 ScopedPyObjectPointer elt(PyObject_CallMethodObjArgs(pyObj, methodObj.get(), askObj.get(), NULL));
1042 if (elt.get())
1043 {
1044 indices(i, j) = checkAndConvert<_PyInt_, UnsignedInteger>(elt.get());
1045 }
1046 }
1047 }
1048 return indices;
1049 }
1050 else
1051 throw InvalidArgumentException(HERE) << "Invalid array dimension: " << shape.getSize();
1052 }
1053 }
1054 // This object is a sequence; unlike Matrix and Sample, dimension is not constant.
1055 check<_PySequence_>(pyObj);
1056 ScopedPyObjectPointer newPyObj(PySequence_Fast(pyObj, ""));
1057 if (!newPyObj.get()) throw InvalidArgumentException(HERE) << "Not a sequence object";
1058 const UnsignedInteger size = PySequence_Fast_GET_SIZE(newPyObj.get());
1059 if (size == 0) return IndicesCollection();
1060 // Allocate a Collection of Indices
1061 Collection<Indices> coll(size);
1062 for(UnsignedInteger i = 0; i < size; ++i)
1063 {
1064 PyObject * indicesObj = PySequence_Fast_GET_ITEM(newPyObj.get(), i);
1065 ScopedPyObjectPointer newPyIndicesObj(PySequence_Fast(indicesObj, ""));
1066 // Check that object is a sequence
1067 check<_PySequence_>(indicesObj);
1068 const UnsignedInteger dimension = PySequence_Fast_GET_SIZE(newPyIndicesObj.get());
1069 Indices newIndices(dimension);
1070 for(UnsignedInteger j = 0; j < dimension; ++j)
1071 {
1072 PyObject * value = PySequence_Fast_GET_ITEM(newPyIndicesObj.get(), j);
1073 newIndices[j] = checkAndConvert<_PyInt_, UnsignedInteger>(value);
1074 }
1075 coll[i] = newIndices;
1076 }
1077 return IndicesCollection(coll);
1078 }
1079
1080
1081 template <>
1082 struct traitsPythonType< Description >
1083 {
1084 typedef _PySequence_ Type;
1085 };
1086
1087 template <>
1088 inline
1089 Description
convert(PyObject * pyObj)1090 convert<_PySequence_, Description>(PyObject * pyObj)
1091 {
1092 Pointer<Collection<String> > ptr = buildCollectionFromPySequence<String>(pyObj);
1093 return Description(*ptr);
1094 }
1095
1096
1097
1098 template <>
1099 struct traitsPythonType< Collection< Scalar > >
1100 {
1101 typedef _PySequence_ Type;
1102 };
1103
1104 template <>
1105 inline
1106 Collection<Scalar>
convert(PyObject * pyObj)1107 convert< _PySequence_, Collection<Scalar> >(PyObject * pyObj)
1108 {
1109 // Check whether pyObj follows the buffer protocol
1110 if (PyObject_CheckBuffer(pyObj))
1111 {
1112 Py_buffer view;
1113 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
1114 {
1115 if (view.ndim == 1 &&
1116 view.itemsize == traitsPythonType<Scalar>::buf_itemsize &&
1117 view.format != NULL &&
1118 strcmp(view.format, pyBuf_formats[traitsPythonType<Scalar>::buf_format_idx]) == 0)
1119 {
1120 // 1-d contiguous array, we can directly copy memory chunk
1121 const Scalar* data = static_cast<const Scalar*>(view.buf);
1122 const UnsignedInteger size = view.shape[0];
1123 Collection<Scalar> result(size);
1124 std::copy(data, data + size, &result[0]);
1125 PyBuffer_Release(&view);
1126 return result;
1127 }
1128 PyBuffer_Release(&view);
1129 }
1130 else
1131 PyErr_Clear();
1132 }
1133 Pointer<Collection<Scalar> > ptr = buildCollectionFromPySequence<Scalar>(pyObj);
1134 return Collection<Scalar>(*ptr);
1135 }
1136
1137
1138
1139 template <>
1140 struct traitsPythonType< MatrixImplementation >
1141 {
1142 typedef _PySequence_ Type;
1143 };
1144
1145
1146
1147 template <>
1148 inline
1149 MatrixImplementation*
convert(PyObject * pyObj)1150 convert< _PySequence_, MatrixImplementation* >(PyObject * pyObj)
1151 {
1152 MatrixImplementation *p_implementation = 0;
1153 // Check whether pyObj follows the buffer protocol
1154 if (PyObject_CheckBuffer(pyObj))
1155 {
1156 Py_buffer view;
1157 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
1158 {
1159 if (view.ndim == 2 &&
1160 view.itemsize == traitsPythonType<Scalar>::buf_itemsize &&
1161 view.format != NULL &&
1162 strcmp(view.format, pyBuf_formats[traitsPythonType<Scalar>::buf_format_idx]) == 0)
1163 {
1164 const Scalar* data = static_cast<const Scalar*>(view.buf);
1165 const UnsignedInteger nbRows = view.shape[0];
1166 const UnsignedInteger nbColumns = view.shape[1];
1167 p_implementation = new MatrixImplementation(nbRows, nbColumns);
1168 if (PyBuffer_IsContiguous(&view, 'F'))
1169 {
1170 // 2-d contiguous array in Fortran notation, we can directly copy memory chunk
1171 std::copy(data, data + nbRows * nbColumns, &p_implementation->operator()(0, 0));
1172 }
1173 else
1174 {
1175 for(UnsignedInteger i = 0; i < nbRows; ++i)
1176 for (UnsignedInteger j = 0; j < nbColumns; ++j, ++data)
1177 p_implementation->operator()(i, j) = *data;
1178 }
1179 PyBuffer_Release(&view);
1180 return p_implementation;
1181 }
1182 PyBuffer_Release(&view);
1183 }
1184 else
1185 PyErr_Clear();
1186 }
1187
1188 // use the same conversion function for numpy array/matrix, knowing numpy matrix is not a sequence
1189 if (PyObject_HasAttrString(pyObj, const_cast<char *>("shape")))
1190 {
1191 ScopedPyObjectPointer shapeObj(PyObject_GetAttrString(pyObj, "shape"));
1192 if (shapeObj.get())
1193 {
1194 Indices shape(checkAndConvert< _PySequence_, Indices >(shapeObj.get()));
1195 if (shape.getSize() == 2)
1196 {
1197 UnsignedInteger nbRows = shape[0];
1198 UnsignedInteger nbColumns = shape[1];
1199 ScopedPyObjectPointer askObj(PyTuple_New(2));
1200 ScopedPyObjectPointer methodObj(convert< String, _PyString_ >("__getitem__"));
1201 p_implementation = new MatrixImplementation(nbRows, nbColumns);
1202 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1203 {
1204 PyTuple_SetItem(askObj.get(), 0, convert< UnsignedInteger, _PyInt_ >(i));
1205 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1206 {
1207 PyTuple_SetItem(askObj.get(), 1, convert< UnsignedInteger, _PyInt_ >(j));
1208 ScopedPyObjectPointer elt(PyObject_CallMethodObjArgs(pyObj, methodObj.get(), askObj.get(), NULL));
1209 if (elt.get())
1210 {
1211 try
1212 {
1213 p_implementation->operator()(i, j) = checkAndConvert<_PyFloat_, Scalar>(elt.get());
1214 }
1215 catch (InvalidArgumentException &)
1216 {
1217 delete p_implementation;
1218 throw;
1219 }
1220 }
1221 }
1222 }
1223 }
1224 else
1225 throw InvalidArgumentException(HERE) << "Invalid array dimension: " << shape.getSize();
1226 }
1227 }
1228 else if (PyObject_HasAttrString(pyObj, const_cast<char *>("getNbColumns")))
1229 {
1230 // case of conversion from XMatrix to YMatrix
1231 // X could be Square,Triangular,Identity...
1232 // YMatrix might be Matrix of one of its inheritance types
1233 ScopedPyObjectPointer colunmsObj(PyObject_CallMethod (pyObj,
1234 const_cast<char *>("getNbColumns"),
1235 const_cast<char *>("()")));
1236 ScopedPyObjectPointer rowsObj(PyObject_CallMethod (pyObj,
1237 const_cast<char *>("getNbRows"),
1238 const_cast<char *>("()")));
1239 ScopedPyObjectPointer implObj(PyObject_CallMethod (pyObj,
1240 const_cast<char *>("getImplementation"),
1241 const_cast<char *>("()")));
1242 Pointer< Collection< Scalar > > ptr = buildCollectionFromPySequence< Scalar >(implObj.get());
1243 UnsignedInteger nbColumns = checkAndConvert< _PyInt_, UnsignedInteger >(colunmsObj.get());
1244 UnsignedInteger nbRows = checkAndConvert< _PyInt_, UnsignedInteger >(rowsObj.get());
1245 p_implementation = new MatrixImplementation(nbRows, nbColumns, *ptr);
1246 }
1247 else
1248 {
1249 // try to convert from a sequence of sequences
1250 Pointer< Collection< Point > > ptr = buildCollectionFromPySequence< Point >(pyObj);
1251 Sample sample(*ptr);
1252 UnsignedInteger nbRows = sample.getSize();
1253 UnsignedInteger nbColumns = sample.getDimension();
1254 p_implementation = new MatrixImplementation(nbRows, nbColumns);
1255 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1256 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1257 p_implementation->operator()(i, j) = sample(i, j);
1258 }
1259 return p_implementation;
1260 }
1261
1262
1263
1264 template <>
1265 inline
1266 Matrix
convert(PyObject * pyObj)1267 convert< _PySequence_, Matrix >(PyObject * pyObj)
1268 {
1269 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1270 return Matrix(p_implementation);
1271 }
1272
1273
1274
1275 template <>
1276 inline
1277 SquareMatrix
convert(PyObject * pyObj)1278 convert< _PySequence_, SquareMatrix >(PyObject * pyObj)
1279 {
1280 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1281 if (p_implementation->getNbRows() != p_implementation->getNbColumns())
1282 throw InvalidArgumentException(HERE) << "The matrix is not square";
1283 return SquareMatrix(p_implementation);
1284 }
1285
1286
1287
1288 template <>
1289 inline
1290 TriangularMatrix
convert(PyObject * pyObj)1291 convert< _PySequence_, TriangularMatrix >(PyObject * pyObj)
1292 {
1293 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1294 if (!(p_implementation->isTriangular(true) || p_implementation->isTriangular(false)))
1295 throw InvalidArgumentException(HERE) << "The matrix is not triangular";
1296 return TriangularMatrix(p_implementation, p_implementation->isTriangular(true));
1297 }
1298
1299
1300
1301 template <>
1302 inline
1303 SymmetricMatrix
convert(PyObject * pyObj)1304 convert< _PySequence_, SymmetricMatrix >(PyObject * pyObj)
1305 {
1306 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1307 if (!p_implementation->isSymmetric())
1308 throw InvalidArgumentException(HERE) << "The matrix is not symmetric";
1309 return SymmetricMatrix(p_implementation);
1310 }
1311
1312
1313
1314 template <>
1315 inline
1316 CovarianceMatrix
convert(PyObject * pyObj)1317 convert< _PySequence_, CovarianceMatrix >(PyObject * pyObj)
1318 {
1319 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1320 if (!p_implementation->isSymmetric())
1321 throw InvalidArgumentException(HERE) << "The matrix is not symmetric";
1322 // SPD check is too expensive
1323 return CovarianceMatrix(p_implementation);
1324 }
1325
1326
1327
1328 template <>
1329 inline
1330 CorrelationMatrix
convert(PyObject * pyObj)1331 convert< _PySequence_, CorrelationMatrix >(PyObject * pyObj)
1332 {
1333 MatrixImplementation *p_implementation = convert< _PySequence_, MatrixImplementation* >(pyObj);
1334 if (!p_implementation->isSymmetric())
1335 throw InvalidArgumentException(HERE) << "The matrix is not symmetric";
1336 // SPD check is too expensive
1337 if (!p_implementation->hasUnitRange())
1338 throw InvalidArgumentException(HERE) << "The matrix range is not (-1;1)";
1339 return CorrelationMatrix(p_implementation);
1340 }
1341
1342
1343
1344 template <>
1345 struct traitsPythonType< TensorImplementation >
1346 {
1347 typedef _PySequence_ Type;
1348 };
1349
1350
1351
1352 template <>
1353 inline
1354 TensorImplementation*
convert(PyObject * pyObj)1355 convert< _PySequence_, TensorImplementation* >(PyObject * pyObj)
1356 {
1357 // Check whether pyObj follows the buffer protocol
1358 if (PyObject_CheckBuffer(pyObj))
1359 {
1360 Py_buffer view;
1361 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
1362 {
1363 if (view.ndim == 3 &&
1364 view.itemsize == traitsPythonType<Scalar>::buf_itemsize &&
1365 view.format != NULL &&
1366 strcmp(view.format, pyBuf_formats[traitsPythonType<Scalar>::buf_format_idx]) == 0)
1367 {
1368 const Scalar* data = static_cast<const Scalar*>(view.buf);
1369 const UnsignedInteger nbRows = view.shape[0];
1370 const UnsignedInteger nbColumns = view.shape[1];
1371 const UnsignedInteger nbSheets = view.shape[2];
1372 TensorImplementation *p_implementation = new TensorImplementation(nbRows, nbColumns, nbSheets);
1373 if (PyBuffer_IsContiguous(&view, 'F'))
1374 {
1375 // 3-d contiguous array in Fortran notation, we can directly copy memory chunk
1376 std::copy(data, data + nbRows * nbColumns * nbSheets, &p_implementation->operator()(0, 0, 0));
1377 }
1378 else
1379 {
1380 for(UnsignedInteger i = 0; i < nbRows; ++i)
1381 for (UnsignedInteger j = 0; j < nbColumns; ++j)
1382 for (UnsignedInteger k = 0; k < nbSheets; ++k, ++data)
1383 p_implementation->operator()(i, j, k) = *data;
1384 }
1385 PyBuffer_Release(&view);
1386 return p_implementation;
1387 }
1388 PyBuffer_Release(&view);
1389 }
1390 else
1391 PyErr_Clear();
1392 }
1393
1394 Pointer< Collection< Sample > > ptr = buildCollectionFromPySequence< Sample >(pyObj);
1395 UnsignedInteger nbRows = ptr->getSize();
1396 UnsignedInteger nbColumns = ptr->getSize() > 0 ? (*ptr)[0].getSize() : 0;
1397 UnsignedInteger nbSheets = ptr->getSize() > 0 ? (*ptr)[0].getDimension() : 0;
1398 TensorImplementation *p_implementation = new TensorImplementation(nbRows, nbColumns, nbSheets);
1399 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1400 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1401 for (UnsignedInteger k = 0; k < nbSheets; ++ k)
1402 p_implementation->operator()(i, j, k) = (*ptr)[i](j, k);
1403 return p_implementation;
1404 }
1405
1406
1407
1408 template <>
1409 inline
1410 Tensor
convert(PyObject * pyObj)1411 convert< _PySequence_, Tensor >(PyObject * pyObj)
1412 {
1413 TensorImplementation *p_implementation = convert<_PySequence_, TensorImplementation*>(pyObj);
1414 return Tensor(p_implementation);
1415 }
1416
1417
1418
1419 template <>
1420 inline
1421 SymmetricTensor
convert(PyObject * pyObj)1422 convert< _PySequence_, SymmetricTensor >(PyObject * pyObj)
1423 {
1424 TensorImplementation *p_implementation = convert< _PySequence_, TensorImplementation* >(pyObj);
1425 if (!p_implementation->isSymmetric())
1426 throw InvalidArgumentException(HERE) << "The tensor is not symmetric";
1427 return SymmetricTensor(p_implementation);
1428 }
1429
1430
1431
1432 template <>
1433 struct traitsPythonType< ComplexMatrixImplementation* >
1434 {
1435 typedef _PySequence_ Type;
1436 };
1437
1438
1439
1440 template <>
1441 inline
1442 ComplexMatrixImplementation*
convert(PyObject * pyObj)1443 convert< _PySequence_, ComplexMatrixImplementation* >(PyObject * pyObj)
1444 {
1445 // Check whether pyObj follows the buffer protocol
1446 if (PyObject_CheckBuffer(pyObj))
1447 {
1448 Py_buffer view;
1449 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
1450 {
1451 if (view.ndim == 2 &&
1452 view.itemsize == traitsPythonType<Complex>::buf_itemsize &&
1453 view.format != NULL &&
1454 strcmp(view.format, pyBuf_formats[traitsPythonType<Complex>::buf_format_idx]) == 0)
1455 {
1456 const Complex* data = static_cast<const Complex*>(view.buf);
1457 const UnsignedInteger nbRows = view.shape[0];
1458 const UnsignedInteger nbColumns = view.shape[1];
1459 ComplexMatrixImplementation *p_implementation = new ComplexMatrixImplementation(nbRows, nbColumns);
1460 if (PyBuffer_IsContiguous(&view, 'F'))
1461 {
1462 // 2-d contiguous array in Fortran notation, we can directly copy memory chunk
1463 std::copy(data, data + nbRows * nbColumns, &p_implementation->operator()(0, 0));
1464 }
1465 else
1466 {
1467 for(UnsignedInteger i = 0; i < nbRows; ++i)
1468 for (UnsignedInteger j = 0; j < nbColumns; ++j, ++data)
1469 p_implementation->operator()(i, j) = *data;
1470 }
1471 PyBuffer_Release(&view);
1472 return p_implementation;
1473 }
1474 PyBuffer_Release(&view);
1475 }
1476 else
1477 PyErr_Clear();
1478 }
1479
1480 // use the same conversion function for numpy array/matrix, knowing numpy matrix is not a sequence
1481 if (PyObject_HasAttrString(pyObj, const_cast<char *>("shape")))
1482 {
1483 ScopedPyObjectPointer shapeObj(PyObject_GetAttrString(pyObj, "shape"));
1484 if (shapeObj.get())
1485 {
1486 Indices shape(checkAndConvert< _PySequence_, Indices >(shapeObj.get()));
1487 if (shape.getSize() == 2)
1488 {
1489 UnsignedInteger nbRows = shape[0];
1490 UnsignedInteger nbColumns = shape[1];
1491 ScopedPyObjectPointer askObj(PyTuple_New(2));
1492 ScopedPyObjectPointer methodObj(convert< String, _PyString_ >("__getitem__"));
1493 ComplexMatrixImplementation *p_implementation = new ComplexMatrixImplementation(nbRows, nbColumns);
1494 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1495 {
1496 PyTuple_SetItem(askObj.get(), 0, convert< UnsignedInteger, _PyInt_ >(i));
1497 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1498 {
1499 PyTuple_SetItem(askObj.get(), 1, convert< UnsignedInteger, _PyInt_ >(j));
1500 ScopedPyObjectPointer elt(PyObject_CallMethodObjArgs(pyObj, methodObj.get(), askObj.get(), NULL));
1501 if (elt.get())
1502 {
1503 try
1504 {
1505 p_implementation->operator()(i, j) = checkAndConvert<_PyComplex_, Complex>(elt.get());
1506 }
1507 catch (InvalidArgumentException &)
1508 {
1509 delete p_implementation;
1510 throw;
1511 }
1512 }
1513 }
1514 }
1515 return p_implementation;
1516 }
1517 else
1518 throw InvalidArgumentException(HERE) << "Invalid array dimension: " << shape.getSize();
1519 }
1520 }
1521
1522 // case of conversion from XMatrix to YMatrix
1523 // X could be Square,Triangular,Identity...
1524 // YMatrix might be Matrix of one of its inheritance types
1525 if (PyObject_HasAttrString(pyObj, const_cast<char *>("getNbColumns")))
1526 {
1527 ScopedPyObjectPointer colunmsObj(PyObject_CallMethod (pyObj,
1528 const_cast<char *>("getNbColumns"),
1529 const_cast<char *>("()")));
1530 ScopedPyObjectPointer rowsObj(PyObject_CallMethod (pyObj,
1531 const_cast<char *>("getNbRows"),
1532 const_cast<char *>("()")));
1533 ScopedPyObjectPointer implObj(PyObject_CallMethod (pyObj,
1534 const_cast<char *>("getImplementation"),
1535 const_cast<char *>("()")));
1536 Pointer< Collection< Complex > > ptr = buildCollectionFromPySequence< Complex >(implObj.get());
1537 UnsignedInteger nbColumns = checkAndConvert< _PyInt_, UnsignedInteger >(colunmsObj.get());
1538 UnsignedInteger nbRows = checkAndConvert< _PyInt_, UnsignedInteger >(rowsObj.get());
1539 ComplexMatrixImplementation *p_implementation = new ComplexMatrixImplementation(nbRows, nbColumns, *ptr);
1540 return p_implementation;
1541 }
1542
1543 // else try to convert from a sequence of sequences
1544 Pointer< Collection< Collection< Complex > > > ptr = buildCollectionFromPySequence< Collection< Complex > >(pyObj);
1545 UnsignedInteger nbRows = ptr->getSize();
1546 UnsignedInteger nbColumns = ptr->getSize() > 0 ? (*ptr)[0].getSize() : 0;
1547 ComplexMatrixImplementation *p_implementation = new ComplexMatrixImplementation(nbRows, nbColumns);
1548 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1549 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1550 p_implementation->operator()(i, j) = (*ptr)[i][j];
1551 return p_implementation;
1552 }
1553
1554
1555
1556 template <>
1557 inline
1558 ComplexMatrix
convert(PyObject * pyObj)1559 convert< _PySequence_, ComplexMatrix >(PyObject * pyObj)
1560 {
1561 ComplexMatrixImplementation *p_implementation = convert< _PySequence_, ComplexMatrixImplementation* >(pyObj);
1562 return ComplexMatrix(p_implementation);
1563 }
1564
1565
1566
1567 template <>
1568 inline
1569 SquareComplexMatrix
convert(PyObject * pyObj)1570 convert< _PySequence_, SquareComplexMatrix >(PyObject * pyObj)
1571 {
1572 ComplexMatrixImplementation *p_implementation = convert< _PySequence_, ComplexMatrixImplementation* >(pyObj);
1573 if (p_implementation->getNbRows() != p_implementation->getNbColumns())
1574 throw InvalidArgumentException(HERE) << "The matrix is not square";
1575 return SquareComplexMatrix(p_implementation);
1576 }
1577
1578
1579
1580 template <>
1581 inline
1582 TriangularComplexMatrix
convert(PyObject * pyObj)1583 convert< _PySequence_, TriangularComplexMatrix >(PyObject * pyObj)
1584 {
1585 ComplexMatrixImplementation *p_implementation = convert< _PySequence_, ComplexMatrixImplementation* >(pyObj);
1586 Bool lower = p_implementation->isTriangular(true);
1587 if (!lower && !p_implementation->isTriangular(false))
1588 throw InvalidArgumentException(HERE) << "The matrix is not triangular";
1589 return TriangularComplexMatrix(p_implementation, lower);
1590 }
1591
1592
1593 template <>
1594 struct traitsPythonType< HermitianMatrix >
1595 {
1596 typedef _PySequence_ Type;
1597 };
1598
1599 template <>
1600 inline
1601 HermitianMatrix
convert(PyObject * pyObj)1602 convert< _PySequence_, HermitianMatrix >(PyObject * pyObj)
1603 {
1604 ComplexMatrixImplementation *p_implementation = convert< _PySequence_, ComplexMatrixImplementation* >(pyObj);
1605 if (!p_implementation->isHermitian())
1606 throw InvalidArgumentException(HERE) << "The matrix is not hermitian";
1607 return HermitianMatrix(p_implementation);
1608 }
1609
1610 template <>
1611 struct traitsPythonType< ComplexTensorImplementation >
1612 {
1613 typedef _PySequence_ Type;
1614 };
1615
1616
1617
1618 template <>
1619 inline
1620 ComplexTensorImplementation*
convert(PyObject * pyObj)1621 convert< _PySequence_, ComplexTensorImplementation* >(PyObject * pyObj)
1622 {
1623 ComplexTensorImplementation *p_implementation = 0;
1624
1625 // Check whether pyObj follows the buffer protocol
1626 if (PyObject_CheckBuffer(pyObj))
1627 {
1628 Py_buffer view;
1629 if (PyObject_GetBuffer(pyObj, &view, PyBUF_FORMAT | PyBUF_ND | PyBUF_ANY_CONTIGUOUS) >= 0)
1630 {
1631 if (view.ndim == 3 &&
1632 view.itemsize == traitsPythonType<Complex>::buf_itemsize &&
1633 view.format != NULL &&
1634 strcmp(view.format, pyBuf_formats[traitsPythonType<Complex>::buf_format_idx]) == 0)
1635 {
1636 const Complex* data = static_cast<const Complex*>(view.buf);
1637 const UnsignedInteger nbRows = view.shape[0];
1638 const UnsignedInteger nbColumns = view.shape[1];
1639 const UnsignedInteger nbSheets = view.shape[2];
1640 p_implementation = new ComplexTensorImplementation(nbRows, nbColumns, nbSheets);
1641 if (PyBuffer_IsContiguous(&view, 'F'))
1642 {
1643 // 3-d contiguous array in Fortran notation, we can directly copy memory chunk
1644 std::copy(data, data + nbRows * nbColumns * nbSheets, &p_implementation->operator()(0, 0, 0));
1645 }
1646 else
1647 {
1648 for(UnsignedInteger i = 0; i < nbRows; ++i)
1649 for (UnsignedInteger j = 0; j < nbColumns; ++j)
1650 for (UnsignedInteger k = 0; k < nbSheets; ++k, ++data)
1651 p_implementation->operator()(i, j, k) = *data;
1652 }
1653 PyBuffer_Release(&view);
1654 return p_implementation;
1655 }
1656 PyBuffer_Release(&view);
1657 }
1658 else
1659 PyErr_Clear();
1660 }
1661 // use the same conversion function for numpy array/matrix, knowing numpy matrix is not a sequence
1662 if (PyObject_HasAttrString(pyObj, const_cast<char *>("shape")))
1663 {
1664 ScopedPyObjectPointer shapeObj(PyObject_GetAttrString(pyObj, "shape"));
1665 if (shapeObj.get())
1666 {
1667 Indices shape(checkAndConvert< _PySequence_, Indices >(shapeObj.get()));
1668 if (shape.getSize() == 3)
1669 {
1670 UnsignedInteger nbRows = shape[0];
1671 UnsignedInteger nbColumns = shape[1];
1672 UnsignedInteger nbSheets = shape[2];
1673 ScopedPyObjectPointer askObj(PyTuple_New(3));
1674 ScopedPyObjectPointer methodObj(convert< String, _PyString_ >("__getitem__"));
1675 p_implementation = new ComplexTensorImplementation(nbRows, nbColumns, nbSheets);
1676 for (UnsignedInteger i = 0; i < nbRows; ++ i)
1677 {
1678 PyTuple_SetItem(askObj.get(), 0, convert< UnsignedInteger, _PyInt_ >(i));
1679 for (UnsignedInteger j = 0; j < nbColumns; ++ j)
1680 {
1681 PyTuple_SetItem(askObj.get(), 1, convert< UnsignedInteger, _PyInt_ >(j));
1682 for (UnsignedInteger k = 0; k < nbSheets; ++ k)
1683 {
1684 PyTuple_SetItem(askObj.get(), 2, convert< UnsignedInteger, _PyInt_ >(k));
1685 ScopedPyObjectPointer elt(PyObject_CallMethodObjArgs(pyObj, methodObj.get(), askObj.get(), NULL));
1686 if (elt.get())
1687 {
1688 try
1689 {
1690 p_implementation->operator()(i, j, k) = checkAndConvert<_PyComplex_, Complex>(elt.get());
1691 }
1692 catch (InvalidArgumentException &)
1693 {
1694 delete p_implementation;
1695 throw;
1696 }
1697 }
1698 }
1699 }
1700 }
1701 }
1702 else
1703 throw InvalidArgumentException(HERE) << "Invalid array dimension: " << shape.getSize();
1704 }
1705 }
1706 else if (PyObject_HasAttrString(pyObj, const_cast<char *>("getNbSheets")))
1707 {
1708 // case of conversion from XTensor to YTensor
1709 ScopedPyObjectPointer colunmsObj(PyObject_CallMethod (pyObj,
1710 const_cast<char *>("getNbColumns"),
1711 const_cast<char *>("()")));
1712 ScopedPyObjectPointer rowsObj(PyObject_CallMethod (pyObj,
1713 const_cast<char *>("getNbRows"),
1714 const_cast<char *>("()")));
1715 ScopedPyObjectPointer sheetsObj(PyObject_CallMethod (pyObj,
1716 const_cast<char *>("getNbSheets"),
1717 const_cast<char *>("()")));
1718 ScopedPyObjectPointer implObj(PyObject_CallMethod (pyObj,
1719 const_cast<char *>("getImplementation"),
1720 const_cast<char *>("()")));
1721 Pointer< Collection< Complex > > ptr = buildCollectionFromPySequence< Complex >(implObj.get());
1722 UnsignedInteger nbColumns = checkAndConvert< _PyInt_, UnsignedInteger >(colunmsObj.get());
1723 UnsignedInteger nbRows = checkAndConvert< _PyInt_, UnsignedInteger >(rowsObj.get());
1724 UnsignedInteger nbSheets = checkAndConvert< _PyInt_, UnsignedInteger >(sheetsObj.get());
1725 p_implementation = new ComplexTensorImplementation(nbRows, nbColumns, nbSheets, *ptr);
1726 }
1727 return p_implementation;
1728 }
1729
1730
1731
1732 template <>
1733 inline
1734 ComplexTensor
convert(PyObject * pyObj)1735 convert< _PySequence_, ComplexTensor >(PyObject * pyObj)
1736 {
1737 ComplexTensorImplementation *p_implementation = convert<_PySequence_, ComplexTensorImplementation*>(pyObj);
1738 return ComplexTensor(p_implementation);
1739 }
1740
1741 template <>
1742 struct traitsPythonType< WhittleFactoryState >
1743 {
1744 typedef _PySequence_ Type;
1745 };
1746
1747 template <>
1748 inline
1749 WhittleFactoryState
convert(PyObject *)1750 convert< _PySequence_, WhittleFactoryState >(PyObject *)
1751 {
1752 return WhittleFactoryState();
1753 }
1754
1755
1756 // PySliceObject type is deprecated
1757 #if PY_VERSION_HEX >= 0x03020000
SliceCast(PyObject * pyObj)1758 inline PyObject* SliceCast(PyObject* pyObj)
1759 {
1760 return pyObj;
1761 }
1762 #else
SliceCast(PyObject * pyObj)1763 inline PySliceObject* SliceCast(PyObject* pyObj)
1764 {
1765 return (PySliceObject*)pyObj;
1766 }
1767 #endif
1768
1769
1770 inline
pickleSave(Advocate & adv,PyObject * pyObj,const String attributName="pyInstance_")1771 void pickleSave(Advocate & adv, PyObject * pyObj, const String attributName = "pyInstance_")
1772 {
1773 // try to use dill
1774 ScopedPyObjectPointer pickleModule(PyImport_ImportModule("dill")); // new reference
1775 if (pickleModule.get() == NULL)
1776 {
1777 // fallback to pickle
1778 PyErr_Clear();
1779 pickleModule = PyImport_ImportModule("pickle"); // new reference
1780 }
1781 assert(pickleModule.get());
1782
1783 PyObject * pickleDict = PyModule_GetDict(pickleModule.get());
1784 assert(pickleDict);
1785
1786 PyObject * dumpsMethod = PyDict_GetItemString(pickleDict, "dumps");
1787 assert(dumpsMethod);
1788 if (! PyCallable_Check(dumpsMethod))
1789 throw InternalException(HERE) << "Python 'pickle' module has no 'dumps' method";
1790
1791 assert(pyObj);
1792 ScopedPyObjectPointer rawDump(PyObject_CallFunctionObjArgs(dumpsMethod, pyObj, NULL)); // new reference
1793
1794 handleException();
1795 assert(rawDump.get());
1796
1797 ScopedPyObjectPointer base64Module(PyImport_ImportModule("base64")); // new reference
1798 assert(base64Module.get());
1799
1800 PyObject * base64Dict = PyModule_GetDict(base64Module.get());
1801 assert(base64Dict);
1802
1803 PyObject * b64encodeMethod = PyDict_GetItemString(base64Dict, "standard_b64encode");
1804 assert(b64encodeMethod);
1805 if (! PyCallable_Check(b64encodeMethod))
1806 throw InternalException(HERE) << "Python 'base64' module has no 'standard_b64encode' method";
1807
1808 ScopedPyObjectPointer base64Dump(PyObject_CallFunctionObjArgs(b64encodeMethod, rawDump.get(), NULL)); // new reference
1809 handleException();
1810 assert(base64Dump.get());
1811
1812 String pyInstanceSt(convert< _PyBytes_, String >(base64Dump.get()));
1813 adv.saveAttribute(attributName, pyInstanceSt);
1814 }
1815
1816
1817 inline
pickleLoad(Advocate & adv,PyObject * & pyObj,const String attributName="pyInstance_")1818 void pickleLoad(Advocate & adv, PyObject * & pyObj, const String attributName = "pyInstance_")
1819 {
1820 String pyInstanceSt;
1821 adv.loadAttribute(attributName, pyInstanceSt);
1822
1823 ScopedPyObjectPointer base64Dump(convert< String, _PyBytes_ >(pyInstanceSt)); // new reference
1824 assert(base64Dump.get());
1825
1826 ScopedPyObjectPointer base64Module(PyImport_ImportModule("base64")); // new reference
1827 assert(base64Module.get());
1828
1829 PyObject * base64Dict = PyModule_GetDict(base64Module.get());
1830 assert(base64Dict);
1831
1832 PyObject * b64decodeMethod = PyDict_GetItemString(base64Dict, "standard_b64decode");
1833 assert(b64decodeMethod);
1834 if (! PyCallable_Check(b64decodeMethod))
1835 throw InternalException(HERE) << "Python 'base64' module has no 'standard_b64decode' method";
1836
1837 ScopedPyObjectPointer rawDump(PyObject_CallFunctionObjArgs(b64decodeMethod, base64Dump.get(), NULL)); // new reference
1838 handleException();
1839 assert(rawDump.get());
1840
1841 // try to use dill
1842 ScopedPyObjectPointer pickleModule(PyImport_ImportModule("dill")); // new reference
1843 if (pickleModule.get() == NULL)
1844 {
1845 // fallback to pickle
1846 PyErr_Clear();
1847 pickleModule = PyImport_ImportModule("pickle"); // new reference
1848 }
1849 assert(pickleModule.get());
1850
1851 PyObject * pickleDict = PyModule_GetDict(pickleModule.get());
1852 assert(pickleDict);
1853
1854 PyObject * loadsMethod = PyDict_GetItemString(pickleDict, "loads");
1855 assert(loadsMethod);
1856 if (! PyCallable_Check(loadsMethod))
1857 throw InternalException(HERE) << "Python 'pickle' module has no 'loads' method";
1858
1859 Py_XDECREF(pyObj);
1860 pyObj = PyObject_CallFunctionObjArgs(loadsMethod, rawDump.get(), NULL); // new reference
1861 handleException();
1862 assert(pyObj);
1863 }
1864
1865
1866
1867 inline
deepCopy(PyObject * pyObj)1868 ScopedPyObjectPointer deepCopy(PyObject * pyObj)
1869 {
1870 ScopedPyObjectPointer copyModule(PyImport_ImportModule("copy"));
1871 assert(copyModule.get());
1872
1873 PyObject * copyDict = PyModule_GetDict(copyModule.get());
1874 assert(copyDict);
1875
1876 PyObject * deepCopyMethod = PyDict_GetItemString(copyDict, "deepcopy");
1877 assert(deepCopyMethod );
1878
1879 if (!PyCallable_Check(deepCopyMethod))
1880 throw InternalException(HERE) << "Python 'copy' module has no 'deepcopy' method";
1881
1882 ScopedPyObjectPointer pyObjDeepCopy(PyObject_CallFunctionObjArgs(deepCopyMethod, pyObj, NULL));
1883 handleException();
1884 return pyObjDeepCopy;
1885 }
1886
1887 END_NAMESPACE_OPENTURNS
1888
1889 #endif /* OPENTURNS_PYTHONWRAPPINGFUNCTIONS_HXX */
1890