1 /*******************************************************************************
2  * Copyright (c) 2008-2016 The Khronos Group Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and/or associated documentation files (the
6  * "Materials"), to deal in the Materials without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Materials, and to
9  * permit persons to whom the Materials are furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Materials.
14  *
15  * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS
16  * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS
17  * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT
18  *    https://www.khronos.org/registry/
19  *
20  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
27  ******************************************************************************/
28 
29 /*! \file
30  *
31  *   \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33),
32  *       OpenCL 1.2 (rev 15), OpenCL 2.0 (rev 29), OpenCL 2.1 (rev 17),
33  *       and OpenCL 2.2 (V2.2-11).
34  *   \author Lee Howes and Bruce Merry
35  *
36  *   Derived from the OpenCL 1.x C++ bindings written by
37  *   Benedict R. Gaster, Laurent Morichetti and Lee Howes
38  *   With additions and fixes from:
39  *       Brian Cole, March 3rd 2010 and April 2012
40  *       Matt Gruenke, April 2012.
41  *       Bruce Merry, February 2013.
42  *       Tom Deakin and Simon McIntosh-Smith, July 2013
43  *       James Price, 2015-
44  *   \version 2.2.0
45  *   \date 2019-09-18
46  *
47  *   Optional extension support
48  *
49  *         cl_ext_device_fission
50  *         #define CL_HPP_USE_CL_DEVICE_FISSION
51  *         cl_khr_d3d10_sharing
52  *         #define CL_HPP_USE_DX_INTEROP
53  *         cl_khr_sub_groups
54  *         #define CL_HPP_USE_CL_SUB_GROUPS_KHR
55  *         cl_khr_image2d_from_buffer
56  *         #define CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR
57  *
58  *   Doxygen documentation for this header is available here:
59  *
60  *       http://khronosgroup.github.io/OpenCL-CLHPP/
61  *
62  *   The latest version of this header can be found on the GitHub releases page:
63  *
64  *       https://github.com/KhronosGroup/OpenCL-CLHPP/releases
65  *
66  *   Bugs and patches can be submitted to the GitHub repository:
67  *
68  *       https://github.com/KhronosGroup/OpenCL-CLHPP
69  */
70 
71 /*! \mainpage
72  * \section intro Introduction
73  * For many large applications C++ is the language of choice and so it seems
74  * reasonable to define C++ bindings for OpenCL.
75  *
76  * The interface is contained with a single C++ header file \em cl2.hpp and all
77  * definitions are contained within the namespace \em cl. There is no additional
78  * requirement to include \em cl.h and to use either the C++ or original C
79  * bindings; it is enough to simply include \em cl2.hpp.
80  *
81  * The bindings themselves are lightweight and correspond closely to the
82  * underlying C API. Using the C++ bindings introduces no additional execution
83  * overhead.
84  *
85  * There are numerous compatibility, portability and memory management
86  * fixes in the new header as well as additional OpenCL 2.0 features.
87  * As a result the header is not directly backward compatible and for this
88  * reason we release it as cl2.hpp rather than a new version of cl.hpp.
89  *
90  *
91  * \section compatibility Compatibility
92  * Due to the evolution of the underlying OpenCL API the 2.0 C++ bindings
93  * include an updated approach to defining supported feature versions
94  * and the range of valid underlying OpenCL runtime versions supported.
95  *
96  * The combination of preprocessor macros CL_HPP_TARGET_OPENCL_VERSION and
97  * CL_HPP_MINIMUM_OPENCL_VERSION control this range. These are three digit
98  * decimal values representing OpenCL runime versions. The default for
99  * the target is 200, representing OpenCL 2.0 and the minimum is also
100  * defined as 200. These settings would use 2.0 API calls only.
101  * If backward compatibility with a 1.2 runtime is required, the minimum
102  * version may be set to 120.
103  *
104  * Note that this is a compile-time setting, and so affects linking against
105  * a particular SDK version rather than the versioning of the loaded runtime.
106  *
107  * The earlier versions of the header included basic vector and string
108  * classes based loosely on STL versions. These were difficult to
109  * maintain and very rarely used. For the 2.0 header we now assume
110  * the presence of the standard library unless requested otherwise.
111  * We use std::array, std::vector, std::shared_ptr and std::string
112  * throughout to safely manage memory and reduce the chance of a
113  * recurrance of earlier memory management bugs.
114  *
115  * These classes are used through typedefs in the cl namespace:
116  * cl::array, cl::vector, cl::pointer and cl::string.
117  * In addition cl::allocate_pointer forwards to std::allocate_shared
118  * by default.
119  * In all cases these standard library classes can be replaced with
120  * custom interface-compatible versions using the CL_HPP_NO_STD_ARRAY,
121  * CL_HPP_NO_STD_VECTOR, CL_HPP_NO_STD_UNIQUE_PTR and
122  * CL_HPP_NO_STD_STRING macros.
123  *
124  * The OpenCL 1.x versions of the C++ bindings included a size_t wrapper
125  * class to interface with kernel enqueue. This caused unpleasant interactions
126  * with the standard size_t declaration and led to namespacing bugs.
127  * In the 2.0 version we have replaced this with a std::array-based interface.
128  * However, the old behaviour can be regained for backward compatibility
129  * using the CL_HPP_ENABLE_SIZE_T_COMPATIBILITY macro.
130  *
131  * Finally, the program construction interface used a clumsy vector-of-pairs
132  * design in the earlier versions. We have replaced that with a cleaner
133  * vector-of-vectors and vector-of-strings design. However, for backward
134  * compatibility old behaviour can be regained with the
135  * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY macro.
136  *
137  * In OpenCL 2.0 OpenCL C is not entirely backward compatibility with
138  * earlier versions. As a result a flag must be passed to the OpenCL C
139  * compiled to request OpenCL 2.0 compilation of kernels with 1.2 as
140  * the default in the absence of the flag.
141  * In some cases the C++ bindings automatically compile code for ease.
142  * For those cases the compilation defaults to OpenCL C 2.0.
143  * If this is not wanted, the CL_HPP_CL_1_2_DEFAULT_BUILD macro may
144  * be specified to assume 1.2 compilation.
145  * If more fine-grained decisions on a per-kernel bases are required
146  * then explicit build operations that take the flag should be used.
147  *
148  *
149  * \section parameterization Parameters
150  * This header may be parameterized by a set of preprocessor macros.
151  *
152  * - CL_HPP_TARGET_OPENCL_VERSION
153  *
154  *   Defines the target OpenCL runtime version to build the header
155  *   against. Defaults to 200, representing OpenCL 2.0.
156  *
157  * - CL_HPP_NO_STD_STRING
158  *
159  *   Do not use the standard library string class. cl::string is not
160  *   defined and may be defined by the user before cl2.hpp is
161  *   included.
162  *
163  * - CL_HPP_NO_STD_VECTOR
164  *
165  *   Do not use the standard library vector class. cl::vector is not
166  *   defined and may be defined by the user before cl2.hpp is
167  *   included.
168  *
169  * - CL_HPP_NO_STD_ARRAY
170  *
171  *   Do not use the standard library array class. cl::array is not
172  *   defined and may be defined by the user before cl2.hpp is
173  *   included.
174  *
175  * - CL_HPP_NO_STD_UNIQUE_PTR
176  *
177  *   Do not use the standard library unique_ptr class. cl::pointer and
178  *   the cl::allocate_pointer functions are not defined and may be
179  *   defined by the user before cl2.hpp is included.
180  *
181  * - CL_HPP_ENABLE_DEVICE_FISSION
182  *
183  *   Enables device fission for OpenCL 1.2 platforms.
184  *
185  * - CL_HPP_ENABLE_EXCEPTIONS
186  *
187  *   Enable exceptions for use in the C++ bindings header. This is the
188  *   preferred error handling mechanism but is not required.
189  *
190  * - CL_HPP_ENABLE_SIZE_T_COMPATIBILITY
191  *
192  *   Backward compatibility option to support cl.hpp-style size_t
193  *   class.  Replaces the updated std::array derived version and
194  *   removal of size_t from the namespace. Note that in this case the
195  *   new size_t class is placed in the cl::compatibility namespace and
196  *   thus requires an additional using declaration for direct backward
197  *   compatibility.
198  *
199  * - CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY
200  *
201  *   Enable older vector of pairs interface for construction of
202  *   programs.
203  *
204  * - CL_HPP_CL_1_2_DEFAULT_BUILD
205  *
206  *   Default to OpenCL C 1.2 compilation rather than OpenCL C 2.0
207  *   applies to use of cl::Program construction and other program
208  *   build variants.
209  *
210  * - CL_HPP_USE_CL_SUB_GROUPS_KHR
211  *
212  *   Enable the cl_khr_subgroups extension.
213  *
214  * - CL_HPP_USE_IL_KHR
215  *
216  *   Enable the cl_khr_il_program extension.
217  *
218  *
219  * \section example Example
220  *
221  * The following example shows a general use case for the C++
222  * bindings, including support for the optional exception feature and
223  * also the supplied vector and string classes, see following sections for
224  * decriptions of these features.
225  *
226  * \code
227     #define CL_HPP_ENABLE_EXCEPTIONS
228     #define CL_HPP_TARGET_OPENCL_VERSION 200
229 
230     #include <CL/cl2.hpp>
231     #include <iostream>
232     #include <vector>
233     #include <memory>
234     #include <algorithm>
235 
236     const int numElements = 32;
237 
238     int main(void)
239     {
240         // Filter for a 2.0 platform and set it as the default
241         std::vector<cl::Platform> platforms;
242         cl::Platform::get(&platforms);
243         cl::Platform plat;
244         for (auto &p : platforms) {
245             std::string platver = p.getInfo<CL_PLATFORM_VERSION>();
246             if (platver.find("OpenCL 2.") != std::string::npos) {
247                 plat = p;
248             }
249         }
250         if (plat() == 0)  {
251             std::cout << "No OpenCL 2.0 platform found.";
252             return -1;
253         }
254 
255         cl::Platform newP = cl::Platform::setDefault(plat);
256         if (newP != plat) {
257             std::cout << "Error setting default platform.";
258             return -1;
259         }
260 
261         // Use C++11 raw string literals for kernel source code
262         std::string kernel1{R"CLC(
263             global int globalA;
264             kernel void updateGlobal()
265             {
266               globalA = 75;
267             }
268         )CLC"};
269         std::string kernel2{R"CLC(
270             typedef struct { global int *bar; } Foo;
271             kernel void vectorAdd(global const Foo* aNum, global const int *inputA, global const int *inputB,
272                                   global int *output, int val, write_only pipe int outPipe, queue_t childQueue)
273             {
274               output[get_global_id(0)] = inputA[get_global_id(0)] + inputB[get_global_id(0)] + val + *(aNum->bar);
275               write_pipe(outPipe, &val);
276               queue_t default_queue = get_default_queue();
277               ndrange_t ndrange = ndrange_1D(get_global_size(0)/2, get_global_size(0)/2);
278 
279               // Have a child kernel write into third quarter of output
280               enqueue_kernel(default_queue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange,
281                 ^{
282                     output[get_global_size(0)*2 + get_global_id(0)] =
283                       inputA[get_global_size(0)*2 + get_global_id(0)] + inputB[get_global_size(0)*2 + get_global_id(0)] + globalA;
284                 });
285 
286               // Have a child kernel write into last quarter of output
287               enqueue_kernel(childQueue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange,
288                 ^{
289                     output[get_global_size(0)*3 + get_global_id(0)] =
290                       inputA[get_global_size(0)*3 + get_global_id(0)] + inputB[get_global_size(0)*3 + get_global_id(0)] + globalA + 2;
291                 });
292             }
293         )CLC"};
294 
295         // New simpler string interface style
296         std::vector<std::string> programStrings {kernel1, kernel2};
297 
298         cl::Program vectorAddProgram(programStrings);
299         try {
300             vectorAddProgram.build("-cl-std=CL2.0");
301         }
302         catch (...) {
303             // Print build info for all devices
304             cl_int buildErr = CL_SUCCESS;
305             auto buildInfo = vectorAddProgram.getBuildInfo<CL_PROGRAM_BUILD_LOG>(&buildErr);
306             for (auto &pair : buildInfo) {
307                 std::cerr << pair.second << std::endl << std::endl;
308             }
309 
310             return 1;
311         }
312 
313         typedef struct { int *bar; } Foo;
314 
315         // Get and run kernel that initializes the program-scope global
316         // A test for kernels that take no arguments
317         auto program2Kernel =
318             cl::KernelFunctor<>(vectorAddProgram, "updateGlobal");
319         program2Kernel(
320             cl::EnqueueArgs(
321             cl::NDRange(1)));
322 
323         //////////////////
324         // SVM allocations
325 
326         auto anSVMInt = cl::allocate_svm<int, cl::SVMTraitCoarse<>>();
327         *anSVMInt = 5;
328         cl::SVMAllocator<Foo, cl::SVMTraitCoarse<cl::SVMTraitReadOnly<>>> svmAllocReadOnly;
329         auto fooPointer = cl::allocate_pointer<Foo>(svmAllocReadOnly);
330         fooPointer->bar = anSVMInt.get();
331         cl::SVMAllocator<int, cl::SVMTraitCoarse<>> svmAlloc;
332         std::vector<int, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>> inputA(numElements, 1, svmAlloc);
333         cl::coarse_svm_vector<int> inputB(numElements, 2, svmAlloc);
334 
335         //
336         //////////////
337 
338         // Traditional cl_mem allocations
339         std::vector<int> output(numElements, 0xdeadbeef);
340         cl::Buffer outputBuffer(begin(output), end(output), false);
341         cl::Pipe aPipe(sizeof(cl_int), numElements / 2);
342 
343         // Default command queue, also passed in as a parameter
344         cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault(
345             cl::Context::getDefault(), cl::Device::getDefault());
346 
347         auto vectorAddKernel =
348             cl::KernelFunctor<
349                 decltype(fooPointer)&,
350                 int*,
351                 cl::coarse_svm_vector<int>&,
352                 cl::Buffer,
353                 int,
354                 cl::Pipe&,
355                 cl::DeviceCommandQueue
356                 >(vectorAddProgram, "vectorAdd");
357 
358         // Ensure that the additional SVM pointer is available to the kernel
359         // This one was not passed as a parameter
360         vectorAddKernel.setSVMPointers(anSVMInt);
361 
362         // Hand control of coarse allocations to runtime
363         cl::enqueueUnmapSVM(anSVMInt);
364         cl::enqueueUnmapSVM(fooPointer);
365         cl::unmapSVM(inputB);
366         cl::unmapSVM(output2);
367 
368 	    cl_int error;
369 	    vectorAddKernel(
370             cl::EnqueueArgs(
371                 cl::NDRange(numElements/2),
372                 cl::NDRange(numElements/2)),
373             fooPointer,
374             inputA.data(),
375             inputB,
376             outputBuffer,
377             3,
378             aPipe,
379             defaultDeviceQueue,
380 		    error
381             );
382 
383         cl::copy(outputBuffer, begin(output), end(output));
384         // Grab the SVM output vector using a map
385         cl::mapSVM(output2);
386 
387         cl::Device d = cl::Device::getDefault();
388 
389         std::cout << "Output:\n";
390         for (int i = 1; i < numElements; ++i) {
391             std::cout << "\t" << output[i] << "\n";
392         }
393         std::cout << "\n\n";
394 
395         return 0;
396     }
397  *
398  * \endcode
399  *
400  */
401 #ifndef CL_HPP_
402 #define CL_HPP_
403 
404 /* Handle deprecated preprocessor definitions. In each case, we only check for
405  * the old name if the new name is not defined, so that user code can define
406  * both and hence work with either version of the bindings.
407  */
408 #if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
409 # pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
410 # define CL_HPP_USE_DX_INTEROP
411 #endif
412 #if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION)
413 # pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead")
414 # define CL_HPP_USE_CL_DEVICE_FISSION
415 #endif
416 #if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
417 # pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
418 # define CL_HPP_ENABLE_EXCEPTIONS
419 #endif
420 #if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
421 # pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
422 # define CL_HPP_NO_STD_VECTOR
423 #endif
424 #if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
425 # pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
426 # define CL_HPP_NO_STD_STRING
427 #endif
428 #if defined(VECTOR_CLASS)
429 # pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
430 #endif
431 #if defined(STRING_CLASS)
432 # pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
433 #endif
434 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
435 # pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
436 # define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
437 #endif
438 
439 /* Warn about features that are no longer supported
440  */
441 #if defined(__USE_DEV_VECTOR)
442 # pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
443 #endif
444 #if defined(__USE_DEV_STRING)
445 # pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
446 #endif
447 
448 /* Detect which version to target */
449 #if !defined(CL_HPP_TARGET_OPENCL_VERSION)
450 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 220 (OpenCL 2.2)")
451 # define CL_HPP_TARGET_OPENCL_VERSION 220
452 #endif
453 #if CL_HPP_TARGET_OPENCL_VERSION != 100 && \
454     CL_HPP_TARGET_OPENCL_VERSION != 110 && \
455     CL_HPP_TARGET_OPENCL_VERSION != 120 && \
456     CL_HPP_TARGET_OPENCL_VERSION != 200 && \
457     CL_HPP_TARGET_OPENCL_VERSION != 210 && \
458     CL_HPP_TARGET_OPENCL_VERSION != 220
459 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210 or 220). It will be set to 220")
460 # undef CL_HPP_TARGET_OPENCL_VERSION
461 # define CL_HPP_TARGET_OPENCL_VERSION 220
462 #endif
463 
464 /* Forward target OpenCL version to C headers if necessary */
465 #if defined(CL_TARGET_OPENCL_VERSION)
466 /* Warn if prior definition of CL_TARGET_OPENCL_VERSION is lower than
467  * requested C++ bindings version */
468 #if CL_TARGET_OPENCL_VERSION < CL_HPP_TARGET_OPENCL_VERSION
469 # pragma message("CL_TARGET_OPENCL_VERSION is already defined as is lower than CL_HPP_TARGET_OPENCL_VERSION")
470 #endif
471 #else
472 # define CL_TARGET_OPENCL_VERSION CL_HPP_TARGET_OPENCL_VERSION
473 #endif
474 
475 #if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
476 # define CL_HPP_MINIMUM_OPENCL_VERSION 200
477 #endif
478 #if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && \
479     CL_HPP_MINIMUM_OPENCL_VERSION != 110 && \
480     CL_HPP_MINIMUM_OPENCL_VERSION != 120 && \
481     CL_HPP_MINIMUM_OPENCL_VERSION != 200 && \
482     CL_HPP_MINIMUM_OPENCL_VERSION != 210 && \
483     CL_HPP_MINIMUM_OPENCL_VERSION != 220
484 # pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210 or 220). It will be set to 100")
485 # undef CL_HPP_MINIMUM_OPENCL_VERSION
486 # define CL_HPP_MINIMUM_OPENCL_VERSION 100
487 #endif
488 #if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
489 # error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
490 #endif
491 
492 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
493 # define CL_USE_DEPRECATED_OPENCL_1_0_APIS
494 #endif
495 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
496 # define CL_USE_DEPRECATED_OPENCL_1_1_APIS
497 #endif
498 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
499 # define CL_USE_DEPRECATED_OPENCL_1_2_APIS
500 #endif
501 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
502 # define CL_USE_DEPRECATED_OPENCL_2_0_APIS
503 #endif
504 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS)
505 # define CL_USE_DEPRECATED_OPENCL_2_1_APIS
506 #endif
507 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
508 # define CL_USE_DEPRECATED_OPENCL_2_2_APIS
509 #endif
510 
511 #ifdef _WIN32
512 
513 #include <malloc.h>
514 
515 #if defined(CL_HPP_USE_DX_INTEROP)
516 #include <CL/cl_d3d10.h>
517 #include <CL/cl_dx9_media_sharing.h>
518 #endif
519 #endif // _WIN32
520 
521 #if defined(_MSC_VER)
522 #include <intrin.h>
523 #endif // _MSC_VER
524 
525  // Check for a valid C++ version
526 
527 // Need to do both tests here because for some reason __cplusplus is not
528 // updated in visual studio
529 #if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
530 #error Visual studio 2013 or another C++11-supporting compiler required
531 #endif
532 
533 //
534 #if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
535 #include <CL/cl_ext.h>
536 #endif
537 
538 #if defined(__APPLE__) || defined(__MACOSX)
539 #include <OpenCL/opencl.h>
540 #else
541 #include <CL/opencl.h>
542 #endif // !__APPLE__
543 
544 #if (__cplusplus >= 201103L)
545 #define CL_HPP_NOEXCEPT_ noexcept
546 #else
547 #define CL_HPP_NOEXCEPT_
548 #endif
549 
550 #if defined(_MSC_VER)
551 # define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
552 #elif defined(__MINGW32__)
553 # define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((selectany))
554 #else
555 # define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
556 #endif // !_MSC_VER
557 
558 // Define deprecated prefixes and suffixes to ensure compilation
559 // in case they are not pre-defined
560 #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
561 #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
562 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
563 #if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
564 #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
565 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
566 
567 #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
568 #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED
569 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
570 #if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED)
571 #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
572 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
573 
574 #if !defined(CL_CALLBACK)
575 #define CL_CALLBACK
576 #endif //CL_CALLBACK
577 
578 #include <utility>
579 #include <limits>
580 #include <iterator>
581 #include <mutex>
582 #include <cstring>
583 #include <functional>
584 
585 
586 // Define a size_type to represent a correctly resolved size_t
587 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
588 namespace cl {
589     using size_type = ::size_t;
590 } // namespace cl
591 #else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
592 namespace cl {
593     using size_type = size_t;
594 } // namespace cl
595 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
596 
597 
598 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
599 #include <exception>
600 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
601 
602 #if !defined(CL_HPP_NO_STD_VECTOR)
603 #include <vector>
604 namespace cl {
605     template < class T, class Alloc = std::allocator<T> >
606     using vector = std::vector<T, Alloc>;
607 } // namespace cl
608 #endif // #if !defined(CL_HPP_NO_STD_VECTOR)
609 
610 #if !defined(CL_HPP_NO_STD_STRING)
611 #include <string>
612 namespace cl {
613     using string = std::string;
614 } // namespace cl
615 #endif // #if !defined(CL_HPP_NO_STD_STRING)
616 
617 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
618 
619 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
620 #include <memory>
621 namespace cl {
622     // Replace unique_ptr and allocate_pointer for internal use
623     // to allow user to replace them
624     template<class T, class D>
625     using pointer = std::unique_ptr<T, D>;
626 } // namespace cl
627 #endif
628 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
629 #if !defined(CL_HPP_NO_STD_ARRAY)
630 #include <array>
631 namespace cl {
632     template < class T, size_type N >
633     using array = std::array<T, N>;
634 } // namespace cl
635 #endif // #if !defined(CL_HPP_NO_STD_ARRAY)
636 
637 // Define size_type appropriately to allow backward-compatibility
638 // use of the old size_t interface class
639 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
640 namespace cl {
641     namespace compatibility {
642         /*! \brief class used to interface between C++ and
643         *  OpenCL C calls that require arrays of size_t values, whose
644         *  size is known statically.
645         */
646         template <int N>
647         class size_t
648         {
649         private:
650             size_type data_[N];
651 
652         public:
653             //! \brief Initialize size_t to all 0s
size_t()654             size_t()
655             {
656                 for (int i = 0; i < N; ++i) {
657                     data_[i] = 0;
658                 }
659             }
660 
size_t(const array<size_type,N> & rhs)661             size_t(const array<size_type, N> &rhs)
662             {
663                 for (int i = 0; i < N; ++i) {
664                     data_[i] = rhs[i];
665                 }
666             }
667 
operator [](int index)668             size_type& operator[](int index)
669             {
670                 return data_[index];
671             }
672 
operator [](int index) const673             const size_type& operator[](int index) const
674             {
675                 return data_[index];
676             }
677 
678             //! \brief Conversion operator to T*.
operator size_type*()679             operator size_type* ()             { return data_; }
680 
681             //! \brief Conversion operator to const T*.
operator const size_type*() const682             operator const size_type* () const { return data_; }
683 
operator array<size_type,N>() const684             operator array<size_type, N>() const
685             {
686                 array<size_type, N> ret;
687 
688                 for (int i = 0; i < N; ++i) {
689                     ret[i] = data_[i];
690                 }
691                 return ret;
692             }
693         };
694     } // namespace compatibility
695 
696     template<int N>
697     using size_t = compatibility::size_t<N>;
698 } // namespace cl
699 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
700 
701 // Helper alias to avoid confusing the macros
702 namespace cl {
703     namespace detail {
704         using size_t_array = array<size_type, 3>;
705     } // namespace detail
706 } // namespace cl
707 
708 
709 /*! \namespace cl
710  *
711  * \brief The OpenCL C++ bindings are defined within this namespace.
712  *
713  */
714 namespace cl {
715     class Memory;
716 
717 #define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
718     if (!pfn_##name) {    \
719     pfn_##name = (PFN_##name) \
720     clGetExtensionFunctionAddress(#name); \
721     if (!pfn_##name) {    \
722     } \
723     }
724 
725 #define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
726     if (!pfn_##name) {    \
727     pfn_##name = (PFN_##name) \
728     clGetExtensionFunctionAddressForPlatform(platform, #name); \
729     if (!pfn_##name) {    \
730     } \
731     }
732 
733     class Program;
734     class Device;
735     class Context;
736     class CommandQueue;
737     class DeviceCommandQueue;
738     class Memory;
739     class Buffer;
740     class Pipe;
741 
742 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
743     /*! \brief Exception class
744      *
745      *  This may be thrown by API functions when CL_HPP_ENABLE_EXCEPTIONS is defined.
746      */
747     class Error : public std::exception
748     {
749     private:
750         cl_int err_;
751         const char * errStr_;
752     public:
753         /*! \brief Create a new CL error exception for a given error code
754          *  and corresponding message.
755          *
756          *  \param err error code value.
757          *
758          *  \param errStr a descriptive string that must remain in scope until
759          *                handling of the exception has concluded.  If set, it
760          *                will be returned by what().
761          */
Error(cl_int err,const char * errStr=NULL)762         Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
763         {}
764 
~Error()765         ~Error() throw() {}
766 
767         /*! \brief Get error string associated with exception
768          *
769          * \return A memory pointer to the error message string.
770          */
what() const771         virtual const char * what() const throw ()
772         {
773             if (errStr_ == NULL) {
774                 return "empty";
775             }
776             else {
777                 return errStr_;
778             }
779         }
780 
781         /*! \brief Get error code associated with exception
782          *
783          *  \return The error code.
784          */
err(void) const785         cl_int err(void) const { return err_; }
786     };
787 #define CL_HPP_ERR_STR_(x) #x
788 #else
789 #define CL_HPP_ERR_STR_(x) NULL
790 #endif // CL_HPP_ENABLE_EXCEPTIONS
791 
792 
793 namespace detail
794 {
795 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
errHandler(cl_int err,const char * errStr=NULL)796 static inline cl_int errHandler (
797     cl_int err,
798     const char * errStr = NULL)
799 {
800     if (err != CL_SUCCESS) {
801         throw Error(err, errStr);
802     }
803     return err;
804 }
805 #else
806 static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
807 {
808     (void) errStr; // suppress unused variable warning
809     return err;
810 }
811 #endif // CL_HPP_ENABLE_EXCEPTIONS
812 }
813 
814 
815 
816 //! \cond DOXYGEN_DETAIL
817 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
818 #define __GET_DEVICE_INFO_ERR               CL_HPP_ERR_STR_(clGetDeviceInfo)
819 #define __GET_PLATFORM_INFO_ERR             CL_HPP_ERR_STR_(clGetPlatformInfo)
820 #define __GET_DEVICE_IDS_ERR                CL_HPP_ERR_STR_(clGetDeviceIDs)
821 #define __GET_PLATFORM_IDS_ERR              CL_HPP_ERR_STR_(clGetPlatformIDs)
822 #define __GET_CONTEXT_INFO_ERR              CL_HPP_ERR_STR_(clGetContextInfo)
823 #define __GET_EVENT_INFO_ERR                CL_HPP_ERR_STR_(clGetEventInfo)
824 #define __GET_EVENT_PROFILE_INFO_ERR        CL_HPP_ERR_STR_(clGetEventProfileInfo)
825 #define __GET_MEM_OBJECT_INFO_ERR           CL_HPP_ERR_STR_(clGetMemObjectInfo)
826 #define __GET_IMAGE_INFO_ERR                CL_HPP_ERR_STR_(clGetImageInfo)
827 #define __GET_SAMPLER_INFO_ERR              CL_HPP_ERR_STR_(clGetSamplerInfo)
828 #define __GET_KERNEL_INFO_ERR               CL_HPP_ERR_STR_(clGetKernelInfo)
829 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
830 #define __GET_KERNEL_ARG_INFO_ERR           CL_HPP_ERR_STR_(clGetKernelArgInfo)
831 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
832 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
833 #define __GET_KERNEL_SUB_GROUP_INFO_ERR     CL_HPP_ERR_STR_(clGetKernelSubGroupInfo)
834 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
835 #define __GET_KERNEL_WORK_GROUP_INFO_ERR    CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
836 #define __GET_PROGRAM_INFO_ERR              CL_HPP_ERR_STR_(clGetProgramInfo)
837 #define __GET_PROGRAM_BUILD_INFO_ERR        CL_HPP_ERR_STR_(clGetProgramBuildInfo)
838 #define __GET_COMMAND_QUEUE_INFO_ERR        CL_HPP_ERR_STR_(clGetCommandQueueInfo)
839 
840 #define __CREATE_CONTEXT_ERR                CL_HPP_ERR_STR_(clCreateContext)
841 #define __CREATE_CONTEXT_FROM_TYPE_ERR      CL_HPP_ERR_STR_(clCreateContextFromType)
842 #define __GET_SUPPORTED_IMAGE_FORMATS_ERR   CL_HPP_ERR_STR_(clGetSupportedImageFormats)
843 
844 #define __CREATE_BUFFER_ERR                 CL_HPP_ERR_STR_(clCreateBuffer)
845 #define __COPY_ERR                          CL_HPP_ERR_STR_(cl::copy)
846 #define __CREATE_SUBBUFFER_ERR              CL_HPP_ERR_STR_(clCreateSubBuffer)
847 #define __CREATE_GL_BUFFER_ERR              CL_HPP_ERR_STR_(clCreateFromGLBuffer)
848 #define __CREATE_GL_RENDER_BUFFER_ERR       CL_HPP_ERR_STR_(clCreateFromGLBuffer)
849 #define __GET_GL_OBJECT_INFO_ERR            CL_HPP_ERR_STR_(clGetGLObjectInfo)
850 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
851 #define __CREATE_IMAGE_ERR                  CL_HPP_ERR_STR_(clCreateImage)
852 #define __CREATE_GL_TEXTURE_ERR             CL_HPP_ERR_STR_(clCreateFromGLTexture)
853 #define __IMAGE_DIMENSION_ERR               CL_HPP_ERR_STR_(Incorrect image dimensions)
854 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
855 #define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
856 
857 #define __CREATE_USER_EVENT_ERR             CL_HPP_ERR_STR_(clCreateUserEvent)
858 #define __SET_USER_EVENT_STATUS_ERR         CL_HPP_ERR_STR_(clSetUserEventStatus)
859 #define __SET_EVENT_CALLBACK_ERR            CL_HPP_ERR_STR_(clSetEventCallback)
860 #define __WAIT_FOR_EVENTS_ERR               CL_HPP_ERR_STR_(clWaitForEvents)
861 
862 #define __CREATE_KERNEL_ERR                 CL_HPP_ERR_STR_(clCreateKernel)
863 #define __SET_KERNEL_ARGS_ERR               CL_HPP_ERR_STR_(clSetKernelArg)
864 #define __CREATE_PROGRAM_WITH_SOURCE_ERR    CL_HPP_ERR_STR_(clCreateProgramWithSource)
865 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
866 #define __CREATE_PROGRAM_WITH_IL_ERR        CL_HPP_ERR_STR_(clCreateProgramWithIL)
867 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
868 #define __CREATE_PROGRAM_WITH_BINARY_ERR    CL_HPP_ERR_STR_(clCreateProgramWithBinary)
869 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
870 #define __CREATE_PROGRAM_WITH_IL_ERR        CL_HPP_ERR_STR_(clCreateProgramWithIL)
871 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
872 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
873 #define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR    CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
874 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
875 #define __BUILD_PROGRAM_ERR                 CL_HPP_ERR_STR_(clBuildProgram)
876 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
877 #define __COMPILE_PROGRAM_ERR               CL_HPP_ERR_STR_(clCompileProgram)
878 #define __LINK_PROGRAM_ERR                  CL_HPP_ERR_STR_(clLinkProgram)
879 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
880 #define __CREATE_KERNELS_IN_PROGRAM_ERR     CL_HPP_ERR_STR_(clCreateKernelsInProgram)
881 
882 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
883 #define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR          CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
884 #define __CREATE_SAMPLER_WITH_PROPERTIES_ERR                CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
885 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
886 #define __SET_COMMAND_QUEUE_PROPERTY_ERR    CL_HPP_ERR_STR_(clSetCommandQueueProperty)
887 #define __ENQUEUE_READ_BUFFER_ERR           CL_HPP_ERR_STR_(clEnqueueReadBuffer)
888 #define __ENQUEUE_READ_BUFFER_RECT_ERR      CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
889 #define __ENQUEUE_WRITE_BUFFER_ERR          CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
890 #define __ENQUEUE_WRITE_BUFFER_RECT_ERR     CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
891 #define __ENQEUE_COPY_BUFFER_ERR            CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
892 #define __ENQEUE_COPY_BUFFER_RECT_ERR       CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
893 #define __ENQUEUE_FILL_BUFFER_ERR           CL_HPP_ERR_STR_(clEnqueueFillBuffer)
894 #define __ENQUEUE_READ_IMAGE_ERR            CL_HPP_ERR_STR_(clEnqueueReadImage)
895 #define __ENQUEUE_WRITE_IMAGE_ERR           CL_HPP_ERR_STR_(clEnqueueWriteImage)
896 #define __ENQUEUE_COPY_IMAGE_ERR            CL_HPP_ERR_STR_(clEnqueueCopyImage)
897 #define __ENQUEUE_FILL_IMAGE_ERR            CL_HPP_ERR_STR_(clEnqueueFillImage)
898 #define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR  CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
899 #define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR  CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
900 #define __ENQUEUE_MAP_BUFFER_ERR            CL_HPP_ERR_STR_(clEnqueueMapBuffer)
901 #define __ENQUEUE_MAP_IMAGE_ERR             CL_HPP_ERR_STR_(clEnqueueMapImage)
902 #define __ENQUEUE_UNMAP_MEM_OBJECT_ERR      CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
903 #define __ENQUEUE_NDRANGE_KERNEL_ERR        CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
904 #define __ENQUEUE_NATIVE_KERNEL             CL_HPP_ERR_STR_(clEnqueueNativeKernel)
905 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
906 #define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR   CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
907 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
908 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
909 #define __ENQUEUE_MIGRATE_SVM_ERR   CL_HPP_ERR_STR_(clEnqueueSVMMigrateMem)
910 #define __SET_DEFAULT_DEVICE_COMMAND_QUEUE_ERR   CL_HPP_ERR_STR_(clSetDefaultDeviceCommandQueue)
911 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
912 
913 
914 #define __ENQUEUE_ACQUIRE_GL_ERR            CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
915 #define __ENQUEUE_RELEASE_GL_ERR            CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
916 
917 #define __CREATE_PIPE_ERR             CL_HPP_ERR_STR_(clCreatePipe)
918 #define __GET_PIPE_INFO_ERR           CL_HPP_ERR_STR_(clGetPipeInfo)
919 
920 
921 #define __RETAIN_ERR                        CL_HPP_ERR_STR_(Retain Object)
922 #define __RELEASE_ERR                       CL_HPP_ERR_STR_(Release Object)
923 #define __FLUSH_ERR                         CL_HPP_ERR_STR_(clFlush)
924 #define __FINISH_ERR                        CL_HPP_ERR_STR_(clFinish)
925 #define __VECTOR_CAPACITY_ERR               CL_HPP_ERR_STR_(Vector capacity error)
926 
927 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
928 #define __GET_HOST_TIMER_ERR           CL_HPP_ERR_STR_(clGetHostTimer)
929 #define __GET_DEVICE_AND_HOST_TIMER_ERR           CL_HPP_ERR_STR_(clGetDeviceAndHostTimer)
930 #endif
931 #if CL_HPP_TARGET_OPENCL_VERSION >= 220
932 #define __SET_PROGRAM_RELEASE_CALLBACK_ERR          CL_HPP_ERR_STR_(clSetProgramReleaseCallback)
933 #define __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR   CL_HPP_ERR_STR_(clSetProgramSpecializationConstant)
934 #endif
935 
936 
937 /**
938  * CL 1.2 version that uses device fission.
939  */
940 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
941 #define __CREATE_SUB_DEVICES_ERR            CL_HPP_ERR_STR_(clCreateSubDevices)
942 #else
943 #define __CREATE_SUB_DEVICES_ERR            CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
944 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
945 
946 /**
947  * Deprecated APIs for 1.2
948  */
949 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
950 #define __ENQUEUE_MARKER_ERR                CL_HPP_ERR_STR_(clEnqueueMarker)
951 #define __ENQUEUE_WAIT_FOR_EVENTS_ERR       CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
952 #define __ENQUEUE_BARRIER_ERR               CL_HPP_ERR_STR_(clEnqueueBarrier)
953 #define __UNLOAD_COMPILER_ERR               CL_HPP_ERR_STR_(clUnloadCompiler)
954 #define __CREATE_GL_TEXTURE_2D_ERR          CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
955 #define __CREATE_GL_TEXTURE_3D_ERR          CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
956 #define __CREATE_IMAGE2D_ERR                CL_HPP_ERR_STR_(clCreateImage2D)
957 #define __CREATE_IMAGE3D_ERR                CL_HPP_ERR_STR_(clCreateImage3D)
958 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
959 
960 /**
961  * Deprecated APIs for 2.0
962  */
963 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
964 #define __CREATE_COMMAND_QUEUE_ERR          CL_HPP_ERR_STR_(clCreateCommandQueue)
965 #define __ENQUEUE_TASK_ERR                  CL_HPP_ERR_STR_(clEnqueueTask)
966 #define __CREATE_SAMPLER_ERR                CL_HPP_ERR_STR_(clCreateSampler)
967 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
968 
969 /**
970  * CL 1.2 marker and barrier commands
971  */
972 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
973 #define __ENQUEUE_MARKER_WAIT_LIST_ERR                CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
974 #define __ENQUEUE_BARRIER_WAIT_LIST_ERR               CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
975 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
976 
977 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
978 #define __CLONE_KERNEL_ERR     CL_HPP_ERR_STR_(clCloneKernel)
979 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
980 
981 #endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
982 //! \endcond
983 
984 
985 namespace detail {
986 
987 // Generic getInfoHelper. The final parameter is used to guide overload
988 // resolution: the actual parameter passed is an int, which makes this
989 // a worse conversion sequence than a specialization that declares the
990 // parameter as an int.
991 template<typename Functor, typename T>
getInfoHelper(Functor f,cl_uint name,T * param,long)992 inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
993 {
994     return f(name, sizeof(T), param, NULL);
995 }
996 
997 // Specialized for getInfo<CL_PROGRAM_BINARIES>
998 // Assumes that the output vector was correctly resized on the way in
999 template <typename Func>
getInfoHelper(Func f,cl_uint name,vector<vector<unsigned char>> * param,int)1000 inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
1001 {
1002     if (name != CL_PROGRAM_BINARIES) {
1003         return CL_INVALID_VALUE;
1004     }
1005     if (param) {
1006         // Create array of pointers, calculate total size and pass pointer array in
1007         size_type numBinaries = param->size();
1008         vector<unsigned char*> binariesPointers(numBinaries);
1009 
1010         for (size_type i = 0; i < numBinaries; ++i)
1011         {
1012             binariesPointers[i] = (*param)[i].data();
1013         }
1014 
1015         cl_int err = f(name, numBinaries * sizeof(unsigned char*), binariesPointers.data(), NULL);
1016 
1017         if (err != CL_SUCCESS) {
1018             return err;
1019         }
1020     }
1021 
1022 
1023     return CL_SUCCESS;
1024 }
1025 
1026 // Specialized getInfoHelper for vector params
1027 template <typename Func, typename T>
getInfoHelper(Func f,cl_uint name,vector<T> * param,long)1028 inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
1029 {
1030     size_type required;
1031     cl_int err = f(name, 0, NULL, &required);
1032     if (err != CL_SUCCESS) {
1033         return err;
1034     }
1035     const size_type elements = required / sizeof(T);
1036 
1037     // Temporary to avoid changing param on an error
1038     vector<T> localData(elements);
1039     err = f(name, required, localData.data(), NULL);
1040     if (err != CL_SUCCESS) {
1041         return err;
1042     }
1043     if (param) {
1044         *param = std::move(localData);
1045     }
1046 
1047     return CL_SUCCESS;
1048 }
1049 
1050 /* Specialization for reference-counted types. This depends on the
1051  * existence of Wrapper<T>::cl_type, and none of the other types having the
1052  * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1053  * does not work, because when using a derived type (e.g. Context) the generic
1054  * template will provide a better match.
1055  */
1056 template <typename Func, typename T>
getInfoHelper(Func f,cl_uint name,vector<T> * param,int,typename T::cl_type=0)1057 inline cl_int getInfoHelper(
1058     Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
1059 {
1060     size_type required;
1061     cl_int err = f(name, 0, NULL, &required);
1062     if (err != CL_SUCCESS) {
1063         return err;
1064     }
1065 
1066     const size_type elements = required / sizeof(typename T::cl_type);
1067 
1068     vector<typename T::cl_type> value(elements);
1069     err = f(name, required, value.data(), NULL);
1070     if (err != CL_SUCCESS) {
1071         return err;
1072     }
1073 
1074     if (param) {
1075         // Assign to convert CL type to T for each element
1076         param->resize(elements);
1077 
1078         // Assign to param, constructing with retain behaviour
1079         // to correctly capture each underlying CL object
1080         for (size_type i = 0; i < elements; i++) {
1081             (*param)[i] = T(value[i], true);
1082         }
1083     }
1084     return CL_SUCCESS;
1085 }
1086 
1087 // Specialized GetInfoHelper for string params
1088 template <typename Func>
getInfoHelper(Func f,cl_uint name,string * param,long)1089 inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
1090 {
1091     size_type required;
1092     cl_int err = f(name, 0, NULL, &required);
1093     if (err != CL_SUCCESS) {
1094         return err;
1095     }
1096 
1097     // std::string has a constant data member
1098     // a char vector does not
1099     if (required > 0) {
1100         vector<char> value(required);
1101         err = f(name, required, value.data(), NULL);
1102         if (err != CL_SUCCESS) {
1103             return err;
1104         }
1105         if (param) {
1106             param->assign(begin(value), prev(end(value)));
1107         }
1108     }
1109     else if (param) {
1110         param->assign("");
1111     }
1112     return CL_SUCCESS;
1113 }
1114 
1115 // Specialized GetInfoHelper for clsize_t params
1116 template <typename Func, size_type N>
getInfoHelper(Func f,cl_uint name,array<size_type,N> * param,long)1117 inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
1118 {
1119     size_type required;
1120     cl_int err = f(name, 0, NULL, &required);
1121     if (err != CL_SUCCESS) {
1122         return err;
1123     }
1124 
1125     size_type elements = required / sizeof(size_type);
1126     vector<size_type> value(elements, 0);
1127 
1128     err = f(name, required, value.data(), NULL);
1129     if (err != CL_SUCCESS) {
1130         return err;
1131     }
1132 
1133     // Bound the copy with N to prevent overruns
1134     // if passed N > than the amount copied
1135     if (elements > N) {
1136         elements = N;
1137     }
1138     for (size_type i = 0; i < elements; ++i) {
1139         (*param)[i] = value[i];
1140     }
1141 
1142     return CL_SUCCESS;
1143 }
1144 
1145 template<typename T> struct ReferenceHandler;
1146 
1147 /* Specialization for reference-counted types. This depends on the
1148  * existence of Wrapper<T>::cl_type, and none of the other types having the
1149  * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1150  * does not work, because when using a derived type (e.g. Context) the generic
1151  * template will provide a better match.
1152  */
1153 template<typename Func, typename T>
getInfoHelper(Func f,cl_uint name,T * param,int,typename T::cl_type=0)1154 inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
1155 {
1156     typename T::cl_type value;
1157     cl_int err = f(name, sizeof(value), &value, NULL);
1158     if (err != CL_SUCCESS) {
1159         return err;
1160     }
1161     *param = value;
1162     if (value != NULL)
1163     {
1164         err = param->retain();
1165         if (err != CL_SUCCESS) {
1166             return err;
1167         }
1168     }
1169     return CL_SUCCESS;
1170 }
1171 
1172 #define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
1173     F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
1174     F(cl_platform_info, CL_PLATFORM_VERSION, string) \
1175     F(cl_platform_info, CL_PLATFORM_NAME, string) \
1176     F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
1177     F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
1178     \
1179     F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
1180     F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
1181     F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
1182     F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
1183     F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
1184     F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
1185     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
1186     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
1187     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
1188     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
1189     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
1190     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
1191     F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
1192     F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
1193     F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
1194     F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
1195     F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
1196     F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
1197     F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
1198     F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
1199     F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
1200     F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
1201     F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
1202     F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
1203     F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
1204     F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
1205     F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
1206     F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
1207     F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
1208     F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
1209     F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
1210     F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
1211     F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
1212     F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
1213     F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
1214     F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
1215     F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
1216     F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
1217     F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
1218     F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
1219     F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
1220     F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
1221     F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
1222     F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
1223     F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
1224     F(cl_device_info, CL_DEVICE_NAME, string) \
1225     F(cl_device_info, CL_DEVICE_VENDOR, string) \
1226     F(cl_device_info, CL_DRIVER_VERSION, string) \
1227     F(cl_device_info, CL_DEVICE_PROFILE, string) \
1228     F(cl_device_info, CL_DEVICE_VERSION, string) \
1229     F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
1230     \
1231     F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
1232     F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
1233     F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
1234     \
1235     F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
1236     F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
1237     F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
1238     F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
1239     \
1240     F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
1241     F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
1242     F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
1243     F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
1244     \
1245     F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
1246     F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
1247     F(cl_mem_info, CL_MEM_SIZE, size_type) \
1248     F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
1249     F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
1250     F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
1251     F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
1252     \
1253     F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
1254     F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
1255     F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
1256     F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
1257     F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
1258     F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
1259     F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
1260     \
1261     F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
1262     F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
1263     F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
1264     F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
1265     F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
1266     \
1267     F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
1268     F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
1269     F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
1270     F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
1271     F(cl_program_info, CL_PROGRAM_SOURCE, string) \
1272     F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
1273     F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
1274     \
1275     F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
1276     F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
1277     F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
1278     \
1279     F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
1280     F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
1281     F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
1282     F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
1283     F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
1284     \
1285     F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
1286     F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
1287     F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
1288     \
1289     F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
1290     F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
1291     F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
1292     F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
1293 
1294 
1295 #define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
1296     F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
1297     F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
1298     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
1299     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
1300     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
1301     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
1302     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
1303     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
1304     F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
1305     F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
1306     \
1307     F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
1308     F(cl_mem_info, CL_MEM_OFFSET, size_type) \
1309     \
1310     F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1311     F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
1312     \
1313     F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
1314 
1315 #define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
1316     F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
1317     F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
1318     \
1319     F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
1320     \
1321     F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
1322     \
1323     F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
1324     F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
1325     F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
1326     F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
1327     F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
1328     \
1329     F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
1330     F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
1331     F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>)  \
1332     F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
1333     F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \
1334     F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
1335     F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
1336     \
1337     F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
1338     F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
1339     F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
1340 
1341 #define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
1342     F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
1343     F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
1344     F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
1345     F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
1346     F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
1347     F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
1348     F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
1349     F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
1350     F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
1351     F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
1352     F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
1353     F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
1354     F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
1355     F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
1356     F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
1357     F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
1358     F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
1359     F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
1360 
1361 #define CL_HPP_PARAM_NAME_INFO_SUBGROUP_KHR_(F) \
1362     F(cl_kernel_sub_group_info, CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE_KHR, size_type) \
1363     F(cl_kernel_sub_group_info, CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE_KHR, size_type)
1364 
1365 #define CL_HPP_PARAM_NAME_INFO_IL_KHR_(F) \
1366     F(cl_device_info, CL_DEVICE_IL_VERSION_KHR, string) \
1367     F(cl_program_info, CL_PROGRAM_IL_KHR, cl::vector<unsigned char>)
1368 
1369 #define CL_HPP_PARAM_NAME_INFO_2_1_(F) \
1370     F(cl_platform_info, CL_PLATFORM_HOST_TIMER_RESOLUTION, size_type) \
1371     F(cl_program_info, CL_PROGRAM_IL, cl::vector<unsigned char>) \
1372     F(cl_kernel_info, CL_KERNEL_MAX_NUM_SUB_GROUPS, size_type) \
1373     F(cl_kernel_info, CL_KERNEL_COMPILE_NUM_SUB_GROUPS, size_type) \
1374     F(cl_device_info, CL_DEVICE_MAX_NUM_SUB_GROUPS, cl_uint) \
1375     F(cl_device_info, CL_DEVICE_IL_VERSION, string) \
1376     F(cl_device_info, CL_DEVICE_SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS, cl_bool) \
1377     F(cl_command_queue_info, CL_QUEUE_DEVICE_DEFAULT, cl::DeviceCommandQueue) \
1378     F(cl_kernel_sub_group_info, CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE, size_type) \
1379     F(cl_kernel_sub_group_info, CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE, size_type) \
1380     F(cl_kernel_sub_group_info, CL_KERNEL_LOCAL_SIZE_FOR_SUB_GROUP_COUNT, cl::detail::size_t_array)
1381 
1382 #define CL_HPP_PARAM_NAME_INFO_2_2_(F) \
1383     F(cl_program_info, CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT, cl_bool) \
1384     F(cl_program_info, CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT, cl_bool)
1385 
1386 #define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \
1387     F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
1388     F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
1389     F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
1390     F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
1391     F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
1392 
1393 template <typename enum_type, cl_int Name>
1394 struct param_traits {};
1395 
1396 #define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
1397 struct token;                                        \
1398 template<>                                           \
1399 struct param_traits<detail:: token,param_name>       \
1400 {                                                    \
1401     enum { value = param_name };                     \
1402     typedef T param_type;                            \
1403 };
1404 
1405 CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1406 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
1407 CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1408 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1409 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1410 CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1411 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1412 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
1413 CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1414 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
1415 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
1416 CL_HPP_PARAM_NAME_INFO_2_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1417 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
1418 #if CL_HPP_TARGET_OPENCL_VERSION >= 220
1419 CL_HPP_PARAM_NAME_INFO_2_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1420 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
1421 
1422 #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) && CL_HPP_TARGET_OPENCL_VERSION < 210
1423 CL_HPP_PARAM_NAME_INFO_SUBGROUP_KHR_(CL_HPP_DECLARE_PARAM_TRAITS_)
1424 #endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) && CL_HPP_TARGET_OPENCL_VERSION < 210
1425 
1426 #if defined(CL_HPP_USE_IL_KHR)
1427 CL_HPP_PARAM_NAME_INFO_IL_KHR_(CL_HPP_DECLARE_PARAM_TRAITS_)
1428 #endif // #if defined(CL_HPP_USE_IL_KHR)
1429 
1430 
1431 // Flags deprecated in OpenCL 2.0
1432 #define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
1433     F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
1434 
1435 #define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
1436     F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
1437 
1438 #define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
1439     F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
1440 
1441 // Include deprecated query flags based on versions
1442 // Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
1443 #if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
1444 CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1445 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
1446 #if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1447 CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1448 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1449 #if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1450 CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1451 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
1452 
1453 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
1454 CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_);
1455 #endif // CL_HPP_USE_CL_DEVICE_FISSION
1456 
1457 #ifdef CL_PLATFORM_ICD_SUFFIX_KHR
CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info,CL_PLATFORM_ICD_SUFFIX_KHR,string)1458 CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
1459 #endif
1460 
1461 #ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
1462 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
1463 #endif
1464 
1465 #ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
1466 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
1467 #endif
1468 #ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
1469 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
1470 #endif
1471 #ifdef CL_DEVICE_SIMD_WIDTH_AMD
1472 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
1473 #endif
1474 #ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
1475 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
1476 #endif
1477 #ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
1478 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
1479 #endif
1480 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
1481 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
1482 #endif
1483 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
1484 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
1485 #endif
1486 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
1487 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
1488 #endif
1489 #ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
1490 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
1491 #endif
1492 #ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
1493 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
1494 #endif
1495 
1496 #ifdef CL_DEVICE_COMPUTE_UNITS_BITFIELD_ARM
1497 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_UNITS_BITFIELD_ARM, cl_ulong)
1498 #endif
1499 #ifdef CL_DEVICE_JOB_SLOTS_ARM
1500 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_JOB_SLOTS_ARM, cl_uint)
1501 #endif
1502 
1503 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
1504 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
1505 #endif
1506 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
1507 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
1508 #endif
1509 #ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
1510 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
1511 #endif
1512 #ifdef CL_DEVICE_WARP_SIZE_NV
1513 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
1514 #endif
1515 #ifdef CL_DEVICE_GPU_OVERLAP_NV
1516 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
1517 #endif
1518 #ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
1519 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
1520 #endif
1521 #ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
1522 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
1523 #endif
1524 
1525 // Convenience functions
1526 
1527 template <typename Func, typename T>
1528 inline cl_int
1529 getInfo(Func f, cl_uint name, T* param)
1530 {
1531     return getInfoHelper(f, name, param, 0);
1532 }
1533 
1534 template <typename Func, typename Arg0>
1535 struct GetInfoFunctor0
1536 {
1537     Func f_; const Arg0& arg0_;
operator ()cl::detail::GetInfoFunctor01538     cl_int operator ()(
1539         cl_uint param, size_type size, void* value, size_type* size_ret)
1540     { return f_(arg0_, param, size, value, size_ret); }
1541 };
1542 
1543 template <typename Func, typename Arg0, typename Arg1>
1544 struct GetInfoFunctor1
1545 {
1546     Func f_; const Arg0& arg0_; const Arg1& arg1_;
operator ()cl::detail::GetInfoFunctor11547     cl_int operator ()(
1548         cl_uint param, size_type size, void* value, size_type* size_ret)
1549     { return f_(arg0_, arg1_, param, size, value, size_ret); }
1550 };
1551 
1552 template <typename Func, typename Arg0, typename T>
1553 inline cl_int
getInfo(Func f,const Arg0 & arg0,cl_uint name,T * param)1554 getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
1555 {
1556     GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
1557     return getInfoHelper(f0, name, param, 0);
1558 }
1559 
1560 template <typename Func, typename Arg0, typename Arg1, typename T>
1561 inline cl_int
getInfo(Func f,const Arg0 & arg0,const Arg1 & arg1,cl_uint name,T * param)1562 getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
1563 {
1564     GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
1565     return getInfoHelper(f0, name, param, 0);
1566 }
1567 
1568 
1569 template<typename T>
1570 struct ReferenceHandler
1571 { };
1572 
1573 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1574 /**
1575  * OpenCL 1.2 devices do have retain/release.
1576  */
1577 template <>
1578 struct ReferenceHandler<cl_device_id>
1579 {
1580     /**
1581      * Retain the device.
1582      * \param device A valid device created using createSubDevices
1583      * \return
1584      *   CL_SUCCESS if the function executed successfully.
1585      *   CL_INVALID_DEVICE if device was not a valid subdevice
1586      *   CL_OUT_OF_RESOURCES
1587      *   CL_OUT_OF_HOST_MEMORY
1588      */
retaincl::detail::ReferenceHandler1589     static cl_int retain(cl_device_id device)
1590     { return ::clRetainDevice(device); }
1591     /**
1592      * Retain the device.
1593      * \param device A valid device created using createSubDevices
1594      * \return
1595      *   CL_SUCCESS if the function executed successfully.
1596      *   CL_INVALID_DEVICE if device was not a valid subdevice
1597      *   CL_OUT_OF_RESOURCES
1598      *   CL_OUT_OF_HOST_MEMORY
1599      */
releasecl::detail::ReferenceHandler1600     static cl_int release(cl_device_id device)
1601     { return ::clReleaseDevice(device); }
1602 };
1603 #else // CL_HPP_TARGET_OPENCL_VERSION >= 120
1604 /**
1605  * OpenCL 1.1 devices do not have retain/release.
1606  */
1607 template <>
1608 struct ReferenceHandler<cl_device_id>
1609 {
1610     // cl_device_id does not have retain().
retaincl::detail::ReferenceHandler1611     static cl_int retain(cl_device_id)
1612     { return CL_SUCCESS; }
1613     // cl_device_id does not have release().
releasecl::detail::ReferenceHandler1614     static cl_int release(cl_device_id)
1615     { return CL_SUCCESS; }
1616 };
1617 #endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
1618 
1619 template <>
1620 struct ReferenceHandler<cl_platform_id>
1621 {
1622     // cl_platform_id does not have retain().
retaincl::detail::ReferenceHandler1623     static cl_int retain(cl_platform_id)
1624     { return CL_SUCCESS; }
1625     // cl_platform_id does not have release().
releasecl::detail::ReferenceHandler1626     static cl_int release(cl_platform_id)
1627     { return CL_SUCCESS; }
1628 };
1629 
1630 template <>
1631 struct ReferenceHandler<cl_context>
1632 {
retaincl::detail::ReferenceHandler1633     static cl_int retain(cl_context context)
1634     { return ::clRetainContext(context); }
releasecl::detail::ReferenceHandler1635     static cl_int release(cl_context context)
1636     { return ::clReleaseContext(context); }
1637 };
1638 
1639 template <>
1640 struct ReferenceHandler<cl_command_queue>
1641 {
retaincl::detail::ReferenceHandler1642     static cl_int retain(cl_command_queue queue)
1643     { return ::clRetainCommandQueue(queue); }
releasecl::detail::ReferenceHandler1644     static cl_int release(cl_command_queue queue)
1645     { return ::clReleaseCommandQueue(queue); }
1646 };
1647 
1648 template <>
1649 struct ReferenceHandler<cl_mem>
1650 {
retaincl::detail::ReferenceHandler1651     static cl_int retain(cl_mem memory)
1652     { return ::clRetainMemObject(memory); }
releasecl::detail::ReferenceHandler1653     static cl_int release(cl_mem memory)
1654     { return ::clReleaseMemObject(memory); }
1655 };
1656 
1657 template <>
1658 struct ReferenceHandler<cl_sampler>
1659 {
retaincl::detail::ReferenceHandler1660     static cl_int retain(cl_sampler sampler)
1661     { return ::clRetainSampler(sampler); }
releasecl::detail::ReferenceHandler1662     static cl_int release(cl_sampler sampler)
1663     { return ::clReleaseSampler(sampler); }
1664 };
1665 
1666 template <>
1667 struct ReferenceHandler<cl_program>
1668 {
retaincl::detail::ReferenceHandler1669     static cl_int retain(cl_program program)
1670     { return ::clRetainProgram(program); }
releasecl::detail::ReferenceHandler1671     static cl_int release(cl_program program)
1672     { return ::clReleaseProgram(program); }
1673 };
1674 
1675 template <>
1676 struct ReferenceHandler<cl_kernel>
1677 {
retaincl::detail::ReferenceHandler1678     static cl_int retain(cl_kernel kernel)
1679     { return ::clRetainKernel(kernel); }
releasecl::detail::ReferenceHandler1680     static cl_int release(cl_kernel kernel)
1681     { return ::clReleaseKernel(kernel); }
1682 };
1683 
1684 template <>
1685 struct ReferenceHandler<cl_event>
1686 {
retaincl::detail::ReferenceHandler1687     static cl_int retain(cl_event event)
1688     { return ::clRetainEvent(event); }
releasecl::detail::ReferenceHandler1689     static cl_int release(cl_event event)
1690     { return ::clReleaseEvent(event); }
1691 };
1692 
1693 
1694 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1695 // Extracts version number with major in the upper 16 bits, minor in the lower 16
getVersion(const vector<char> & versionInfo)1696 static cl_uint getVersion(const vector<char> &versionInfo)
1697 {
1698     int highVersion = 0;
1699     int lowVersion = 0;
1700     int index = 7;
1701     while(versionInfo[index] != '.' ) {
1702         highVersion *= 10;
1703         highVersion += versionInfo[index]-'0';
1704         ++index;
1705     }
1706     ++index;
1707     while(versionInfo[index] != ' ' &&  versionInfo[index] != '\0') {
1708         lowVersion *= 10;
1709         lowVersion += versionInfo[index]-'0';
1710         ++index;
1711     }
1712     return (highVersion << 16) | lowVersion;
1713 }
1714 
getPlatformVersion(cl_platform_id platform)1715 static cl_uint getPlatformVersion(cl_platform_id platform)
1716 {
1717     size_type size = 0;
1718     clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
1719 
1720     vector<char> versionInfo(size);
1721     clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
1722     return getVersion(versionInfo);
1723 }
1724 
getDevicePlatformVersion(cl_device_id device)1725 static cl_uint getDevicePlatformVersion(cl_device_id device)
1726 {
1727     cl_platform_id platform;
1728     clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
1729     return getPlatformVersion(platform);
1730 }
1731 
getContextPlatformVersion(cl_context context)1732 static cl_uint getContextPlatformVersion(cl_context context)
1733 {
1734     // The platform cannot be queried directly, so we first have to grab a
1735     // device and obtain its context
1736     size_type size = 0;
1737     clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
1738     if (size == 0)
1739         return 0;
1740     vector<cl_device_id> devices(size/sizeof(cl_device_id));
1741     clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL);
1742     return getDevicePlatformVersion(devices[0]);
1743 }
1744 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1745 
1746 template <typename T>
1747 class Wrapper
1748 {
1749 public:
1750     typedef T cl_type;
1751 
1752 protected:
1753     cl_type object_;
1754 
1755 public:
Wrapper()1756     Wrapper() : object_(NULL) { }
1757 
Wrapper(const cl_type & obj,bool retainObject)1758     Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
1759     {
1760         if (retainObject) {
1761             detail::errHandler(retain(), __RETAIN_ERR);
1762         }
1763     }
1764 
~Wrapper()1765     ~Wrapper()
1766     {
1767         if (object_ != NULL) { release(); }
1768     }
1769 
Wrapper(const Wrapper<cl_type> & rhs)1770     Wrapper(const Wrapper<cl_type>& rhs)
1771     {
1772         object_ = rhs.object_;
1773         detail::errHandler(retain(), __RETAIN_ERR);
1774     }
1775 
Wrapper(Wrapper<cl_type> && rhs)1776     Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1777     {
1778         object_ = rhs.object_;
1779         rhs.object_ = NULL;
1780     }
1781 
operator =(const Wrapper<cl_type> & rhs)1782     Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1783     {
1784         if (this != &rhs) {
1785             detail::errHandler(release(), __RELEASE_ERR);
1786             object_ = rhs.object_;
1787             detail::errHandler(retain(), __RETAIN_ERR);
1788         }
1789         return *this;
1790     }
1791 
operator =(Wrapper<cl_type> && rhs)1792     Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1793     {
1794         if (this != &rhs) {
1795             detail::errHandler(release(), __RELEASE_ERR);
1796             object_ = rhs.object_;
1797             rhs.object_ = NULL;
1798         }
1799         return *this;
1800     }
1801 
operator =(const cl_type & rhs)1802     Wrapper<cl_type>& operator = (const cl_type &rhs)
1803     {
1804         detail::errHandler(release(), __RELEASE_ERR);
1805         object_ = rhs;
1806         return *this;
1807     }
1808 
operator ()() const1809     const cl_type& operator ()() const { return object_; }
1810 
operator ()()1811     cl_type& operator ()() { return object_; }
1812 
get() const1813     cl_type get() const { return object_; }
1814 
1815 protected:
1816     template<typename Func, typename U>
1817     friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1818 
retain() const1819     cl_int retain() const
1820     {
1821         if (object_ != nullptr) {
1822             return ReferenceHandler<cl_type>::retain(object_);
1823         }
1824         else {
1825             return CL_SUCCESS;
1826         }
1827     }
1828 
release() const1829     cl_int release() const
1830     {
1831         if (object_ != nullptr) {
1832             return ReferenceHandler<cl_type>::release(object_);
1833         }
1834         else {
1835             return CL_SUCCESS;
1836         }
1837     }
1838 };
1839 
1840 template <>
1841 class Wrapper<cl_device_id>
1842 {
1843 public:
1844     typedef cl_device_id cl_type;
1845 
1846 protected:
1847     cl_type object_;
1848     bool referenceCountable_;
1849 
isReferenceCountable(cl_device_id device)1850     static bool isReferenceCountable(cl_device_id device)
1851     {
1852         bool retVal = false;
1853 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1854 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
1855         if (device != NULL) {
1856             int version = getDevicePlatformVersion(device);
1857             if(version > ((1 << 16) + 1)) {
1858                 retVal = true;
1859             }
1860         }
1861 #else // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1862         retVal = true;
1863 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1864 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1865         return retVal;
1866     }
1867 
1868 public:
Wrapper()1869     Wrapper() : object_(NULL), referenceCountable_(false)
1870     {
1871     }
1872 
Wrapper(const cl_type & obj,bool retainObject)1873     Wrapper(const cl_type &obj, bool retainObject) :
1874         object_(obj),
1875         referenceCountable_(false)
1876     {
1877         referenceCountable_ = isReferenceCountable(obj);
1878 
1879         if (retainObject) {
1880             detail::errHandler(retain(), __RETAIN_ERR);
1881         }
1882     }
1883 
~Wrapper()1884     ~Wrapper()
1885     {
1886         release();
1887     }
1888 
Wrapper(const Wrapper<cl_type> & rhs)1889     Wrapper(const Wrapper<cl_type>& rhs)
1890     {
1891         object_ = rhs.object_;
1892         referenceCountable_ = isReferenceCountable(object_);
1893         detail::errHandler(retain(), __RETAIN_ERR);
1894     }
1895 
Wrapper(Wrapper<cl_type> && rhs)1896     Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1897     {
1898         object_ = rhs.object_;
1899         referenceCountable_ = rhs.referenceCountable_;
1900         rhs.object_ = NULL;
1901         rhs.referenceCountable_ = false;
1902     }
1903 
operator =(const Wrapper<cl_type> & rhs)1904     Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1905     {
1906         if (this != &rhs) {
1907             detail::errHandler(release(), __RELEASE_ERR);
1908             object_ = rhs.object_;
1909             referenceCountable_ = rhs.referenceCountable_;
1910             detail::errHandler(retain(), __RETAIN_ERR);
1911         }
1912         return *this;
1913     }
1914 
operator =(Wrapper<cl_type> && rhs)1915     Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1916     {
1917         if (this != &rhs) {
1918             detail::errHandler(release(), __RELEASE_ERR);
1919             object_ = rhs.object_;
1920             referenceCountable_ = rhs.referenceCountable_;
1921             rhs.object_ = NULL;
1922             rhs.referenceCountable_ = false;
1923         }
1924         return *this;
1925     }
1926 
operator =(const cl_type & rhs)1927     Wrapper<cl_type>& operator = (const cl_type &rhs)
1928     {
1929         detail::errHandler(release(), __RELEASE_ERR);
1930         object_ = rhs;
1931         referenceCountable_ = isReferenceCountable(object_);
1932         return *this;
1933     }
1934 
operator ()() const1935     const cl_type& operator ()() const { return object_; }
1936 
operator ()()1937     cl_type& operator ()() { return object_; }
1938 
get() const1939     cl_type get() const { return object_; }
1940 
1941 protected:
1942     template<typename Func, typename U>
1943     friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1944 
1945     template<typename Func, typename U>
1946     friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
1947 
retain() const1948     cl_int retain() const
1949     {
1950         if( object_ != nullptr && referenceCountable_ ) {
1951             return ReferenceHandler<cl_type>::retain(object_);
1952         }
1953         else {
1954             return CL_SUCCESS;
1955         }
1956     }
1957 
release() const1958     cl_int release() const
1959     {
1960         if (object_ != nullptr && referenceCountable_) {
1961             return ReferenceHandler<cl_type>::release(object_);
1962         }
1963         else {
1964             return CL_SUCCESS;
1965         }
1966     }
1967 };
1968 
1969 template <typename T>
operator ==(const Wrapper<T> & lhs,const Wrapper<T> & rhs)1970 inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1971 {
1972     return lhs() == rhs();
1973 }
1974 
1975 template <typename T>
operator !=(const Wrapper<T> & lhs,const Wrapper<T> & rhs)1976 inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1977 {
1978     return !operator==(lhs, rhs);
1979 }
1980 
1981 } // namespace detail
1982 //! \endcond
1983 
1984 
1985 using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
1986 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1987 /**
1988 * Exception class for build errors to carry build info
1989 */
1990 class BuildError : public Error
1991 {
1992 private:
1993     BuildLogType buildLogs;
1994 public:
BuildError(cl_int err,const char * errStr,const BuildLogType & vec)1995     BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
1996     {
1997     }
1998 
getBuildLog() const1999     BuildLogType getBuildLog() const
2000     {
2001         return buildLogs;
2002     }
2003 };
2004 namespace detail {
buildErrHandler(cl_int err,const char * errStr,const BuildLogType & buildLogs)2005     static inline cl_int buildErrHandler(
2006         cl_int err,
2007         const char * errStr,
2008         const BuildLogType &buildLogs)
2009     {
2010         if (err != CL_SUCCESS) {
2011             throw BuildError(err, errStr, buildLogs);
2012         }
2013         return err;
2014     }
2015 } // namespace detail
2016 
2017 #else
2018 namespace detail {
buildErrHandler(cl_int err,const char * errStr,const BuildLogType & buildLogs)2019     static inline cl_int buildErrHandler(
2020         cl_int err,
2021         const char * errStr,
2022         const BuildLogType &buildLogs)
2023     {
2024         (void)buildLogs; // suppress unused variable warning
2025         (void)errStr;
2026         return err;
2027     }
2028 } // namespace detail
2029 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2030 
2031 
2032 /*! \stuct ImageFormat
2033  *  \brief Adds constructors and member functions for cl_image_format.
2034  *
2035  *  \see cl_image_format
2036  */
2037 struct ImageFormat : public cl_image_format
2038 {
2039     //! \brief Default constructor - performs no initialization.
ImageFormatcl::ImageFormat2040     ImageFormat(){}
2041 
2042     //! \brief Initializing constructor.
ImageFormatcl::ImageFormat2043     ImageFormat(cl_channel_order order, cl_channel_type type)
2044     {
2045         image_channel_order = order;
2046         image_channel_data_type = type;
2047     }
2048 
2049     //! \brief Assignment operator.
operator =cl::ImageFormat2050     ImageFormat& operator = (const ImageFormat& rhs)
2051     {
2052         if (this != &rhs) {
2053             this->image_channel_data_type = rhs.image_channel_data_type;
2054             this->image_channel_order     = rhs.image_channel_order;
2055         }
2056         return *this;
2057     }
2058 };
2059 
2060 /*! \brief Class interface for cl_device_id.
2061  *
2062  *  \note Copies of these objects are inexpensive, since they don't 'own'
2063  *        any underlying resources or data structures.
2064  *
2065  *  \see cl_device_id
2066  */
2067 class Device : public detail::Wrapper<cl_device_id>
2068 {
2069 private:
2070     static std::once_flag default_initialized_;
2071     static Device default_;
2072     static cl_int default_error_;
2073 
2074     /*! \brief Create the default context.
2075     *
2076     * This sets @c default_ and @c default_error_. It does not throw
2077     * @c cl::Error.
2078     */
2079     static void makeDefault();
2080 
2081     /*! \brief Create the default platform from a provided platform.
2082     *
2083     * This sets @c default_. It does not throw
2084     * @c cl::Error.
2085     */
makeDefaultProvided(const Device & p)2086     static void makeDefaultProvided(const Device &p) {
2087         default_ = p;
2088     }
2089 
2090 public:
2091 #ifdef CL_HPP_UNIT_TEST_ENABLE
2092     /*! \brief Reset the default.
2093     *
2094     * This sets @c default_ to an empty value to support cleanup in
2095     * the unit test framework.
2096     * This function is not thread safe.
2097     */
unitTestClearDefault()2098     static void unitTestClearDefault() {
2099         default_ = Device();
2100     }
2101 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2102 
2103     //! \brief Default constructor - initializes to NULL.
Device()2104     Device() : detail::Wrapper<cl_type>() { }
2105 
2106     /*! \brief Constructor from cl_device_id.
2107      *
2108      *  This simply copies the device ID value, which is an inexpensive operation.
2109      */
Device(const cl_device_id & device,bool retainObject=false)2110     explicit Device(const cl_device_id &device, bool retainObject = false) :
2111         detail::Wrapper<cl_type>(device, retainObject) { }
2112 
2113     /*! \brief Returns the first device on the default context.
2114      *
2115      *  \see Context::getDefault()
2116      */
getDefault(cl_int * errResult=NULL)2117     static Device getDefault(
2118         cl_int *errResult = NULL)
2119     {
2120         std::call_once(default_initialized_, makeDefault);
2121         detail::errHandler(default_error_);
2122         if (errResult != NULL) {
2123             *errResult = default_error_;
2124         }
2125         return default_;
2126     }
2127 
2128     /**
2129     * Modify the default device to be used by
2130     * subsequent operations.
2131     * Will only set the default if no default was previously created.
2132     * @return updated default device.
2133     *         Should be compared to the passed value to ensure that it was updated.
2134     */
setDefault(const Device & default_device)2135     static Device setDefault(const Device &default_device)
2136     {
2137         std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
2138         detail::errHandler(default_error_);
2139         return default_;
2140     }
2141 
2142     /*! \brief Assignment operator from cl_device_id.
2143      *
2144      *  This simply copies the device ID value, which is an inexpensive operation.
2145      */
operator =(const cl_device_id & rhs)2146     Device& operator = (const cl_device_id& rhs)
2147     {
2148         detail::Wrapper<cl_type>::operator=(rhs);
2149         return *this;
2150     }
2151 
2152     /*! \brief Copy constructor to forward copy to the superclass correctly.
2153     * Required for MSVC.
2154     */
Device(const Device & dev)2155     Device(const Device& dev) : detail::Wrapper<cl_type>(dev) {}
2156 
2157     /*! \brief Copy assignment to forward copy to the superclass correctly.
2158     * Required for MSVC.
2159     */
operator =(const Device & dev)2160     Device& operator = (const Device &dev)
2161     {
2162         detail::Wrapper<cl_type>::operator=(dev);
2163         return *this;
2164     }
2165 
2166     /*! \brief Move constructor to forward move to the superclass correctly.
2167     * Required for MSVC.
2168     */
Device(Device && dev)2169     Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(dev)) {}
2170 
2171     /*! \brief Move assignment to forward move to the superclass correctly.
2172     * Required for MSVC.
2173     */
operator =(Device && dev)2174     Device& operator = (Device &&dev)
2175     {
2176         detail::Wrapper<cl_type>::operator=(std::move(dev));
2177         return *this;
2178     }
2179 
2180     //! \brief Wrapper for clGetDeviceInfo().
2181     template <typename T>
getInfo(cl_device_info name,T * param) const2182     cl_int getInfo(cl_device_info name, T* param) const
2183     {
2184         return detail::errHandler(
2185             detail::getInfo(&::clGetDeviceInfo, object_, name, param),
2186             __GET_DEVICE_INFO_ERR);
2187     }
2188 
2189     //! \brief Wrapper for clGetDeviceInfo() that returns by value.
2190     template <cl_int name> typename
2191     detail::param_traits<detail::cl_device_info, name>::param_type
getInfo(cl_int * err=NULL) const2192     getInfo(cl_int* err = NULL) const
2193     {
2194         typename detail::param_traits<
2195             detail::cl_device_info, name>::param_type param;
2196         cl_int result = getInfo(name, &param);
2197         if (err != NULL) {
2198             *err = result;
2199         }
2200         return param;
2201     }
2202 
2203 
2204 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
2205     /**
2206      * Return the current value of the host clock as seen by the device.
2207      * The resolution of the device timer may be queried with the
2208      * CL_DEVICE_PROFILING_TIMER_RESOLUTION query.
2209      * @return The host timer value.
2210      */
getHostTimer(cl_int * error=nullptr)2211     cl_ulong getHostTimer(cl_int *error = nullptr)
2212     {
2213         cl_ulong retVal = 0;
2214         cl_int err =
2215             clGetHostTimer(this->get(), &retVal);
2216         detail::errHandler(
2217             err,
2218             __GET_HOST_TIMER_ERR);
2219         if (error) {
2220             *error = err;
2221         }
2222         return retVal;
2223     }
2224 
2225     /**
2226      * Return a synchronized pair of host and device timestamps as seen by device.
2227      * Use to correlate the clocks and get the host timer only using getHostTimer
2228      * as a lower cost mechanism in between calls.
2229      * The resolution of the host timer may be queried with the
2230      * CL_PLATFORM_HOST_TIMER_RESOLUTION query.
2231      * The resolution of the device timer may be queried with the
2232      * CL_DEVICE_PROFILING_TIMER_RESOLUTION query.
2233      * @return A pair of (device timer, host timer) timer values.
2234      */
getDeviceAndHostTimer(cl_int * error=nullptr)2235     std::pair<cl_ulong, cl_ulong> getDeviceAndHostTimer(cl_int *error = nullptr)
2236     {
2237         std::pair<cl_ulong, cl_ulong> retVal;
2238         cl_int err =
2239             clGetDeviceAndHostTimer(this->get(), &(retVal.first), &(retVal.second));
2240         detail::errHandler(
2241             err,
2242             __GET_DEVICE_AND_HOST_TIMER_ERR);
2243         if (error) {
2244             *error = err;
2245         }
2246         return retVal;
2247     }
2248 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
2249 
2250     /**
2251      * CL 1.2 version
2252      */
2253 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2254     //! \brief Wrapper for clCreateSubDevices().
createSubDevices(const cl_device_partition_property * properties,vector<Device> * devices)2255     cl_int createSubDevices(
2256         const cl_device_partition_property * properties,
2257         vector<Device>* devices)
2258     {
2259         cl_uint n = 0;
2260         cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
2261         if (err != CL_SUCCESS) {
2262             return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2263         }
2264 
2265         vector<cl_device_id> ids(n);
2266         err = clCreateSubDevices(object_, properties, n, ids.data(), NULL);
2267         if (err != CL_SUCCESS) {
2268             return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2269         }
2270 
2271         // Cannot trivially assign because we need to capture intermediates
2272         // with safe construction
2273         if (devices) {
2274             devices->resize(ids.size());
2275 
2276             // Assign to param, constructing with retain behaviour
2277             // to correctly capture each underlying CL object
2278             for (size_type i = 0; i < ids.size(); i++) {
2279                 // We do not need to retain because this device is being created
2280                 // by the runtime
2281                 (*devices)[i] = Device(ids[i], false);
2282             }
2283         }
2284 
2285         return CL_SUCCESS;
2286     }
2287 #elif defined(CL_HPP_USE_CL_DEVICE_FISSION)
2288 
2289 /**
2290  * CL 1.1 version that uses device fission extension.
2291  */
createSubDevices(const cl_device_partition_property_ext * properties,vector<Device> * devices)2292     cl_int createSubDevices(
2293         const cl_device_partition_property_ext * properties,
2294         vector<Device>* devices)
2295     {
2296         typedef CL_API_ENTRY cl_int
2297             ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
2298                 cl_device_id /*in_device*/,
2299                 const cl_device_partition_property_ext * /* properties */,
2300                 cl_uint /*num_entries*/,
2301                 cl_device_id * /*out_devices*/,
2302                 cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
2303 
2304         static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
2305         CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
2306 
2307         cl_uint n = 0;
2308         cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
2309         if (err != CL_SUCCESS) {
2310             return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2311         }
2312 
2313         vector<cl_device_id> ids(n);
2314         err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL);
2315         if (err != CL_SUCCESS) {
2316             return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2317         }
2318         // Cannot trivially assign because we need to capture intermediates
2319         // with safe construction
2320         if (devices) {
2321             devices->resize(ids.size());
2322 
2323             // Assign to param, constructing with retain behaviour
2324             // to correctly capture each underlying CL object
2325             for (size_type i = 0; i < ids.size(); i++) {
2326                 // We do not need to retain because this device is being created
2327                 // by the runtime
2328                 (*devices)[i] = Device(ids[i], false);
2329             }
2330         }
2331         return CL_SUCCESS;
2332     }
2333 #endif // defined(CL_HPP_USE_CL_DEVICE_FISSION)
2334 };
2335 
2336 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
2337 CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
2338 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
2339 
2340 /*! \brief Class interface for cl_platform_id.
2341  *
2342  *  \note Copies of these objects are inexpensive, since they don't 'own'
2343  *        any underlying resources or data structures.
2344  *
2345  *  \see cl_platform_id
2346  */
2347 class Platform : public detail::Wrapper<cl_platform_id>
2348 {
2349 private:
2350     static std::once_flag default_initialized_;
2351     static Platform default_;
2352     static cl_int default_error_;
2353 
2354     /*! \brief Create the default context.
2355     *
2356     * This sets @c default_ and @c default_error_. It does not throw
2357     * @c cl::Error.
2358     */
makeDefault()2359     static void makeDefault() {
2360         /* Throwing an exception from a call_once invocation does not do
2361         * what we wish, so we catch it and save the error.
2362         */
2363 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2364         try
2365 #endif
2366         {
2367             // If default wasn't passed ,generate one
2368             // Otherwise set it
2369             cl_uint n = 0;
2370 
2371             cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2372             if (err != CL_SUCCESS) {
2373                 default_error_ = err;
2374                 return;
2375             }
2376             if (n == 0) {
2377                 default_error_ = CL_INVALID_PLATFORM;
2378                 return;
2379             }
2380 
2381             vector<cl_platform_id> ids(n);
2382             err = ::clGetPlatformIDs(n, ids.data(), NULL);
2383             if (err != CL_SUCCESS) {
2384                 default_error_ = err;
2385                 return;
2386             }
2387 
2388             default_ = Platform(ids[0]);
2389         }
2390 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2391         catch (cl::Error &e) {
2392             default_error_ = e.err();
2393         }
2394 #endif
2395     }
2396 
2397     /*! \brief Create the default platform from a provided platform.
2398      *
2399      * This sets @c default_. It does not throw
2400      * @c cl::Error.
2401      */
makeDefaultProvided(const Platform & p)2402     static void makeDefaultProvided(const Platform &p) {
2403        default_ = p;
2404     }
2405 
2406 public:
2407 #ifdef CL_HPP_UNIT_TEST_ENABLE
2408     /*! \brief Reset the default.
2409     *
2410     * This sets @c default_ to an empty value to support cleanup in
2411     * the unit test framework.
2412     * This function is not thread safe.
2413     */
unitTestClearDefault()2414     static void unitTestClearDefault() {
2415         default_ = Platform();
2416     }
2417 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2418 
2419     //! \brief Default constructor - initializes to NULL.
Platform()2420     Platform() : detail::Wrapper<cl_type>()  { }
2421 
2422     /*! \brief Constructor from cl_platform_id.
2423      *
2424      * \param retainObject will cause the constructor to retain its cl object.
2425      *                     Defaults to false to maintain compatibility with
2426      *                     earlier versions.
2427      *  This simply copies the platform ID value, which is an inexpensive operation.
2428      */
Platform(const cl_platform_id & platform,bool retainObject=false)2429     explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
2430         detail::Wrapper<cl_type>(platform, retainObject) { }
2431 
2432     /*! \brief Assignment operator from cl_platform_id.
2433      *
2434      *  This simply copies the platform ID value, which is an inexpensive operation.
2435      */
operator =(const cl_platform_id & rhs)2436     Platform& operator = (const cl_platform_id& rhs)
2437     {
2438         detail::Wrapper<cl_type>::operator=(rhs);
2439         return *this;
2440     }
2441 
getDefault(cl_int * errResult=NULL)2442     static Platform getDefault(
2443         cl_int *errResult = NULL)
2444     {
2445         std::call_once(default_initialized_, makeDefault);
2446         detail::errHandler(default_error_);
2447         if (errResult != NULL) {
2448             *errResult = default_error_;
2449         }
2450         return default_;
2451     }
2452 
2453     /**
2454      * Modify the default platform to be used by
2455      * subsequent operations.
2456      * Will only set the default if no default was previously created.
2457      * @return updated default platform.
2458      *         Should be compared to the passed value to ensure that it was updated.
2459      */
setDefault(const Platform & default_platform)2460     static Platform setDefault(const Platform &default_platform)
2461     {
2462         std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
2463         detail::errHandler(default_error_);
2464         return default_;
2465     }
2466 
2467     //! \brief Wrapper for clGetPlatformInfo().
getInfo(cl_platform_info name,string * param) const2468     cl_int getInfo(cl_platform_info name, string* param) const
2469     {
2470         return detail::errHandler(
2471             detail::getInfo(&::clGetPlatformInfo, object_, name, param),
2472             __GET_PLATFORM_INFO_ERR);
2473     }
2474 
2475     //! \brief Wrapper for clGetPlatformInfo() that returns by value.
2476     template <cl_int name> typename
2477     detail::param_traits<detail::cl_platform_info, name>::param_type
getInfo(cl_int * err=NULL) const2478     getInfo(cl_int* err = NULL) const
2479     {
2480         typename detail::param_traits<
2481             detail::cl_platform_info, name>::param_type param;
2482         cl_int result = getInfo(name, &param);
2483         if (err != NULL) {
2484             *err = result;
2485         }
2486         return param;
2487     }
2488 
2489     /*! \brief Gets a list of devices for this platform.
2490      *
2491      *  Wraps clGetDeviceIDs().
2492      */
getDevices(cl_device_type type,vector<Device> * devices) const2493     cl_int getDevices(
2494         cl_device_type type,
2495         vector<Device>* devices) const
2496     {
2497         cl_uint n = 0;
2498         if( devices == NULL ) {
2499             return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2500         }
2501         cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
2502         if (err != CL_SUCCESS) {
2503             return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2504         }
2505 
2506         vector<cl_device_id> ids(n);
2507         err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL);
2508         if (err != CL_SUCCESS) {
2509             return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2510         }
2511 
2512         // Cannot trivially assign because we need to capture intermediates
2513         // with safe construction
2514         // We must retain things we obtain from the API to avoid releasing
2515         // API-owned objects.
2516         if (devices) {
2517             devices->resize(ids.size());
2518 
2519             // Assign to param, constructing with retain behaviour
2520             // to correctly capture each underlying CL object
2521             for (size_type i = 0; i < ids.size(); i++) {
2522                 (*devices)[i] = Device(ids[i], true);
2523             }
2524         }
2525         return CL_SUCCESS;
2526     }
2527 
2528 #if defined(CL_HPP_USE_DX_INTEROP)
2529    /*! \brief Get the list of available D3D10 devices.
2530      *
2531      *  \param d3d_device_source.
2532      *
2533      *  \param d3d_object.
2534      *
2535      *  \param d3d_device_set.
2536      *
2537      *  \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device
2538      *  values returned in devices can be used to identify a specific OpenCL
2539      *  device. If \a devices argument is NULL, this argument is ignored.
2540      *
2541      *  \return One of the following values:
2542      *    - CL_SUCCESS if the function is executed successfully.
2543      *
2544      *  The application can query specific capabilities of the OpenCL device(s)
2545      *  returned by cl::getDevices. This can be used by the application to
2546      *  determine which device(s) to use.
2547      *
2548      * \note In the case that exceptions are enabled and a return value
2549      * other than CL_SUCCESS is generated, then cl::Error exception is
2550      * generated.
2551      */
getDevices(cl_d3d10_device_source_khr d3d_device_source,void * d3d_object,cl_d3d10_device_set_khr d3d_device_set,vector<Device> * devices) const2552     cl_int getDevices(
2553         cl_d3d10_device_source_khr d3d_device_source,
2554         void *                     d3d_object,
2555         cl_d3d10_device_set_khr    d3d_device_set,
2556         vector<Device>* devices) const
2557     {
2558         typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
2559             cl_platform_id platform,
2560             cl_d3d10_device_source_khr d3d_device_source,
2561             void * d3d_object,
2562             cl_d3d10_device_set_khr d3d_device_set,
2563             cl_uint num_entries,
2564             cl_device_id * devices,
2565             cl_uint* num_devices);
2566 
2567         if( devices == NULL ) {
2568             return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2569         }
2570 
2571         static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
2572         CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
2573 
2574         cl_uint n = 0;
2575         cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
2576             object_,
2577             d3d_device_source,
2578             d3d_object,
2579             d3d_device_set,
2580             0,
2581             NULL,
2582             &n);
2583         if (err != CL_SUCCESS) {
2584             return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2585         }
2586 
2587         vector<cl_device_id> ids(n);
2588         err = pfn_clGetDeviceIDsFromD3D10KHR(
2589             object_,
2590             d3d_device_source,
2591             d3d_object,
2592             d3d_device_set,
2593             n,
2594             ids.data(),
2595             NULL);
2596         if (err != CL_SUCCESS) {
2597             return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2598         }
2599 
2600         // Cannot trivially assign because we need to capture intermediates
2601         // with safe construction
2602         // We must retain things we obtain from the API to avoid releasing
2603         // API-owned objects.
2604         if (devices) {
2605             devices->resize(ids.size());
2606 
2607             // Assign to param, constructing with retain behaviour
2608             // to correctly capture each underlying CL object
2609             for (size_type i = 0; i < ids.size(); i++) {
2610                 (*devices)[i] = Device(ids[i], true);
2611             }
2612         }
2613         return CL_SUCCESS;
2614     }
2615 #endif
2616 
2617     /*! \brief Gets a list of available platforms.
2618      *
2619      *  Wraps clGetPlatformIDs().
2620      */
get(vector<Platform> * platforms)2621     static cl_int get(
2622         vector<Platform>* platforms)
2623     {
2624         cl_uint n = 0;
2625 
2626         if( platforms == NULL ) {
2627             return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
2628         }
2629 
2630         cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2631         if (err != CL_SUCCESS) {
2632             return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2633         }
2634 
2635         vector<cl_platform_id> ids(n);
2636         err = ::clGetPlatformIDs(n, ids.data(), NULL);
2637         if (err != CL_SUCCESS) {
2638             return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2639         }
2640 
2641         if (platforms) {
2642             platforms->resize(ids.size());
2643 
2644             // Platforms don't reference count
2645             for (size_type i = 0; i < ids.size(); i++) {
2646                 (*platforms)[i] = Platform(ids[i]);
2647             }
2648         }
2649         return CL_SUCCESS;
2650     }
2651 
2652     /*! \brief Gets the first available platform.
2653      *
2654      *  Wraps clGetPlatformIDs(), returning the first result.
2655      */
get(Platform * platform)2656     static cl_int get(
2657         Platform * platform)
2658     {
2659         cl_int err;
2660         Platform default_platform = Platform::getDefault(&err);
2661         if (platform) {
2662             *platform = default_platform;
2663         }
2664         return err;
2665     }
2666 
2667     /*! \brief Gets the first available platform, returning it by value.
2668      *
2669      * \return Returns a valid platform if one is available.
2670      *         If no platform is available will return a null platform.
2671      * Throws an exception if no platforms are available
2672      * or an error condition occurs.
2673      * Wraps clGetPlatformIDs(), returning the first result.
2674      */
get(cl_int * errResult=NULL)2675     static Platform get(
2676         cl_int * errResult = NULL)
2677     {
2678         cl_int err;
2679         Platform default_platform = Platform::getDefault(&err);
2680         if (errResult) {
2681             *errResult = err;
2682         }
2683         return default_platform;
2684     }
2685 
2686 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2687     //! \brief Wrapper for clUnloadCompiler().
2688     cl_int
unloadCompiler()2689     unloadCompiler()
2690     {
2691         return ::clUnloadPlatformCompiler(object_);
2692     }
2693 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
2694 }; // class Platform
2695 
2696 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
2697 CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
2698 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
2699 
2700 
2701 /**
2702  * Deprecated APIs for 1.2
2703  */
2704 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2705 /**
2706  * Unload the OpenCL compiler.
2707  * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead.
2708  */
2709 inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
2710 UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
2711 inline cl_int
UnloadCompiler()2712 UnloadCompiler()
2713 {
2714     return ::clUnloadCompiler();
2715 }
2716 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2717 
2718 /*! \brief Class interface for cl_context.
2719  *
2720  *  \note Copies of these objects are shallow, meaning that the copy will refer
2721  *        to the same underlying cl_context as the original.  For details, see
2722  *        clRetainContext() and clReleaseContext().
2723  *
2724  *  \see cl_context
2725  */
2726 class Context
2727     : public detail::Wrapper<cl_context>
2728 {
2729 private:
2730     static std::once_flag default_initialized_;
2731     static Context default_;
2732     static cl_int default_error_;
2733 
2734     /*! \brief Create the default context from the default device type in the default platform.
2735      *
2736      * This sets @c default_ and @c default_error_. It does not throw
2737      * @c cl::Error.
2738      */
makeDefault()2739     static void makeDefault() {
2740         /* Throwing an exception from a call_once invocation does not do
2741          * what we wish, so we catch it and save the error.
2742          */
2743 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2744         try
2745 #endif
2746         {
2747 #if !defined(__APPLE__) && !defined(__MACOS)
2748             const Platform &p = Platform::getDefault();
2749             cl_platform_id defaultPlatform = p();
2750             cl_context_properties properties[3] = {
2751                 CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
2752             };
2753 #else // #if !defined(__APPLE__) && !defined(__MACOS)
2754             cl_context_properties *properties = nullptr;
2755 #endif // #if !defined(__APPLE__) && !defined(__MACOS)
2756 
2757             default_ = Context(
2758                 CL_DEVICE_TYPE_DEFAULT,
2759                 properties,
2760                 NULL,
2761                 NULL,
2762                 &default_error_);
2763         }
2764 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2765         catch (cl::Error &e) {
2766             default_error_ = e.err();
2767         }
2768 #endif
2769     }
2770 
2771 
2772     /*! \brief Create the default context from a provided Context.
2773      *
2774      * This sets @c default_. It does not throw
2775      * @c cl::Error.
2776      */
makeDefaultProvided(const Context & c)2777     static void makeDefaultProvided(const Context &c) {
2778         default_ = c;
2779     }
2780 
2781 public:
2782 #ifdef CL_HPP_UNIT_TEST_ENABLE
2783     /*! \brief Reset the default.
2784     *
2785     * This sets @c default_ to an empty value to support cleanup in
2786     * the unit test framework.
2787     * This function is not thread safe.
2788     */
unitTestClearDefault()2789     static void unitTestClearDefault() {
2790         default_ = Context();
2791     }
2792 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2793 
2794     /*! \brief Constructs a context including a list of specified devices.
2795      *
2796      *  Wraps clCreateContext().
2797      */
Context(const vector<Device> & devices,cl_context_properties * properties=NULL,void (CL_CALLBACK * notifyFptr)(const char *,const void *,size_type,void *)=NULL,void * data=NULL,cl_int * err=NULL)2798     Context(
2799         const vector<Device>& devices,
2800         cl_context_properties* properties = NULL,
2801         void (CL_CALLBACK * notifyFptr)(
2802             const char *,
2803             const void *,
2804             size_type,
2805             void *) = NULL,
2806         void* data = NULL,
2807         cl_int* err = NULL)
2808     {
2809         cl_int error;
2810 
2811         size_type numDevices = devices.size();
2812         vector<cl_device_id> deviceIDs(numDevices);
2813 
2814         for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
2815             deviceIDs[deviceIndex] = (devices[deviceIndex])();
2816         }
2817 
2818         object_ = ::clCreateContext(
2819             properties, (cl_uint) numDevices,
2820             deviceIDs.data(),
2821             notifyFptr, data, &error);
2822 
2823         detail::errHandler(error, __CREATE_CONTEXT_ERR);
2824         if (err != NULL) {
2825             *err = error;
2826         }
2827     }
2828 
Context(const Device & device,cl_context_properties * properties=NULL,void (CL_CALLBACK * notifyFptr)(const char *,const void *,size_type,void *)=NULL,void * data=NULL,cl_int * err=NULL)2829     Context(
2830         const Device& device,
2831         cl_context_properties* properties = NULL,
2832         void (CL_CALLBACK * notifyFptr)(
2833             const char *,
2834             const void *,
2835             size_type,
2836             void *) = NULL,
2837         void* data = NULL,
2838         cl_int* err = NULL)
2839     {
2840         cl_int error;
2841 
2842         cl_device_id deviceID = device();
2843 
2844         object_ = ::clCreateContext(
2845             properties, 1,
2846             &deviceID,
2847             notifyFptr, data, &error);
2848 
2849         detail::errHandler(error, __CREATE_CONTEXT_ERR);
2850         if (err != NULL) {
2851             *err = error;
2852         }
2853     }
2854 
2855     /*! \brief Constructs a context including all or a subset of devices of a specified type.
2856      *
2857      *  Wraps clCreateContextFromType().
2858      */
Context(cl_device_type type,cl_context_properties * properties=NULL,void (CL_CALLBACK * notifyFptr)(const char *,const void *,size_type,void *)=NULL,void * data=NULL,cl_int * err=NULL)2859     Context(
2860         cl_device_type type,
2861         cl_context_properties* properties = NULL,
2862         void (CL_CALLBACK * notifyFptr)(
2863             const char *,
2864             const void *,
2865             size_type,
2866             void *) = NULL,
2867         void* data = NULL,
2868         cl_int* err = NULL)
2869     {
2870         cl_int error;
2871 
2872 #if !defined(__APPLE__) && !defined(__MACOS)
2873         cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
2874 
2875         if (properties == NULL) {
2876             // Get a valid platform ID as we cannot send in a blank one
2877             vector<Platform> platforms;
2878             error = Platform::get(&platforms);
2879             if (error != CL_SUCCESS) {
2880                 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2881                 if (err != NULL) {
2882                     *err = error;
2883                 }
2884                 return;
2885             }
2886 
2887             // Check the platforms we found for a device of our specified type
2888             cl_context_properties platform_id = 0;
2889             for (unsigned int i = 0; i < platforms.size(); i++) {
2890 
2891                 vector<Device> devices;
2892 
2893 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2894                 try {
2895 #endif
2896 
2897                     error = platforms[i].getDevices(type, &devices);
2898 
2899 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2900                 } catch (cl::Error& e) {
2901                     error = e.err();
2902                 }
2903     // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
2904     // We do error checking next anyway, and can throw there if needed
2905 #endif
2906 
2907                 // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
2908                 if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
2909                     detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2910                     if (err != NULL) {
2911                         *err = error;
2912                     }
2913                 }
2914 
2915                 if (devices.size() > 0) {
2916                     platform_id = (cl_context_properties)platforms[i]();
2917                     break;
2918                 }
2919             }
2920 
2921             if (platform_id == 0) {
2922                 detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
2923                 if (err != NULL) {
2924                     *err = CL_DEVICE_NOT_FOUND;
2925                 }
2926                 return;
2927             }
2928 
2929             prop[1] = platform_id;
2930             properties = &prop[0];
2931         }
2932 #endif
2933         object_ = ::clCreateContextFromType(
2934             properties, type, notifyFptr, data, &error);
2935 
2936         detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2937         if (err != NULL) {
2938             *err = error;
2939         }
2940     }
2941 
2942     /*! \brief Copy constructor to forward copy to the superclass correctly.
2943      * Required for MSVC.
2944      */
Context(const Context & ctx)2945     Context(const Context& ctx) : detail::Wrapper<cl_type>(ctx) {}
2946 
2947     /*! \brief Copy assignment to forward copy to the superclass correctly.
2948      * Required for MSVC.
2949      */
operator =(const Context & ctx)2950     Context& operator = (const Context &ctx)
2951     {
2952         detail::Wrapper<cl_type>::operator=(ctx);
2953         return *this;
2954     }
2955 
2956     /*! \brief Move constructor to forward move to the superclass correctly.
2957      * Required for MSVC.
2958      */
Context(Context && ctx)2959     Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(ctx)) {}
2960 
2961     /*! \brief Move assignment to forward move to the superclass correctly.
2962      * Required for MSVC.
2963      */
operator =(Context && ctx)2964     Context& operator = (Context &&ctx)
2965     {
2966         detail::Wrapper<cl_type>::operator=(std::move(ctx));
2967         return *this;
2968     }
2969 
2970 
2971     /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
2972      *
2973      *  \note All calls to this function return the same cl_context as the first.
2974      */
getDefault(cl_int * err=NULL)2975     static Context getDefault(cl_int * err = NULL)
2976     {
2977         std::call_once(default_initialized_, makeDefault);
2978         detail::errHandler(default_error_);
2979         if (err != NULL) {
2980             *err = default_error_;
2981         }
2982         return default_;
2983     }
2984 
2985     /**
2986      * Modify the default context to be used by
2987      * subsequent operations.
2988      * Will only set the default if no default was previously created.
2989      * @return updated default context.
2990      *         Should be compared to the passed value to ensure that it was updated.
2991      */
setDefault(const Context & default_context)2992     static Context setDefault(const Context &default_context)
2993     {
2994         std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
2995         detail::errHandler(default_error_);
2996         return default_;
2997     }
2998 
2999     //! \brief Default constructor - initializes to NULL.
Context()3000     Context() : detail::Wrapper<cl_type>() { }
3001 
3002     /*! \brief Constructor from cl_context - takes ownership.
3003      *
3004      *  This effectively transfers ownership of a refcount on the cl_context
3005      *  into the new Context object.
3006      */
Context(const cl_context & context,bool retainObject=false)3007     explicit Context(const cl_context& context, bool retainObject = false) :
3008         detail::Wrapper<cl_type>(context, retainObject) { }
3009 
3010     /*! \brief Assignment operator from cl_context - takes ownership.
3011      *
3012      *  This effectively transfers ownership of a refcount on the rhs and calls
3013      *  clReleaseContext() on the value previously held by this instance.
3014      */
operator =(const cl_context & rhs)3015     Context& operator = (const cl_context& rhs)
3016     {
3017         detail::Wrapper<cl_type>::operator=(rhs);
3018         return *this;
3019     }
3020 
3021     //! \brief Wrapper for clGetContextInfo().
3022     template <typename T>
getInfo(cl_context_info name,T * param) const3023     cl_int getInfo(cl_context_info name, T* param) const
3024     {
3025         return detail::errHandler(
3026             detail::getInfo(&::clGetContextInfo, object_, name, param),
3027             __GET_CONTEXT_INFO_ERR);
3028     }
3029 
3030     //! \brief Wrapper for clGetContextInfo() that returns by value.
3031     template <cl_int name> typename
3032     detail::param_traits<detail::cl_context_info, name>::param_type
getInfo(cl_int * err=NULL) const3033     getInfo(cl_int* err = NULL) const
3034     {
3035         typename detail::param_traits<
3036             detail::cl_context_info, name>::param_type param;
3037         cl_int result = getInfo(name, &param);
3038         if (err != NULL) {
3039             *err = result;
3040         }
3041         return param;
3042     }
3043 
3044     /*! \brief Gets a list of supported image formats.
3045      *
3046      *  Wraps clGetSupportedImageFormats().
3047      */
getSupportedImageFormats(cl_mem_flags flags,cl_mem_object_type type,vector<ImageFormat> * formats) const3048     cl_int getSupportedImageFormats(
3049         cl_mem_flags flags,
3050         cl_mem_object_type type,
3051         vector<ImageFormat>* formats) const
3052     {
3053         cl_uint numEntries;
3054 
3055         if (!formats) {
3056             return CL_SUCCESS;
3057         }
3058 
3059         cl_int err = ::clGetSupportedImageFormats(
3060            object_,
3061            flags,
3062            type,
3063            0,
3064            NULL,
3065            &numEntries);
3066         if (err != CL_SUCCESS) {
3067             return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
3068         }
3069 
3070         if (numEntries > 0) {
3071             vector<ImageFormat> value(numEntries);
3072             err = ::clGetSupportedImageFormats(
3073                 object_,
3074                 flags,
3075                 type,
3076                 numEntries,
3077                 (cl_image_format*)value.data(),
3078                 NULL);
3079             if (err != CL_SUCCESS) {
3080                 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
3081             }
3082 
3083             formats->assign(begin(value), end(value));
3084         }
3085         else {
3086             // If no values are being returned, ensure an empty vector comes back
3087             formats->clear();
3088         }
3089 
3090         return CL_SUCCESS;
3091     }
3092 };
3093 
makeDefault()3094 inline void Device::makeDefault()
3095 {
3096     /* Throwing an exception from a call_once invocation does not do
3097     * what we wish, so we catch it and save the error.
3098     */
3099 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3100     try
3101 #endif
3102     {
3103         cl_int error = 0;
3104 
3105         Context context = Context::getDefault(&error);
3106         detail::errHandler(error, __CREATE_CONTEXT_ERR);
3107 
3108         if (error != CL_SUCCESS) {
3109             default_error_ = error;
3110         }
3111         else {
3112             default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
3113             default_error_ = CL_SUCCESS;
3114         }
3115     }
3116 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3117     catch (cl::Error &e) {
3118         default_error_ = e.err();
3119     }
3120 #endif
3121 }
3122 
3123 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
3124 CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
3125 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
3126 
3127 /*! \brief Class interface for cl_event.
3128  *
3129  *  \note Copies of these objects are shallow, meaning that the copy will refer
3130  *        to the same underlying cl_event as the original.  For details, see
3131  *        clRetainEvent() and clReleaseEvent().
3132  *
3133  *  \see cl_event
3134  */
3135 class Event : public detail::Wrapper<cl_event>
3136 {
3137 public:
3138     //! \brief Default constructor - initializes to NULL.
Event()3139     Event() : detail::Wrapper<cl_type>() { }
3140 
3141     /*! \brief Constructor from cl_event - takes ownership.
3142      *
3143      * \param retainObject will cause the constructor to retain its cl object.
3144      *                     Defaults to false to maintain compatibility with
3145      *                     earlier versions.
3146      *  This effectively transfers ownership of a refcount on the cl_event
3147      *  into the new Event object.
3148      */
Event(const cl_event & event,bool retainObject=false)3149     explicit Event(const cl_event& event, bool retainObject = false) :
3150         detail::Wrapper<cl_type>(event, retainObject) { }
3151 
3152     /*! \brief Assignment operator from cl_event - takes ownership.
3153      *
3154      *  This effectively transfers ownership of a refcount on the rhs and calls
3155      *  clReleaseEvent() on the value previously held by this instance.
3156      */
operator =(const cl_event & rhs)3157     Event& operator = (const cl_event& rhs)
3158     {
3159         detail::Wrapper<cl_type>::operator=(rhs);
3160         return *this;
3161     }
3162 
3163     //! \brief Wrapper for clGetEventInfo().
3164     template <typename T>
getInfo(cl_event_info name,T * param) const3165     cl_int getInfo(cl_event_info name, T* param) const
3166     {
3167         return detail::errHandler(
3168             detail::getInfo(&::clGetEventInfo, object_, name, param),
3169             __GET_EVENT_INFO_ERR);
3170     }
3171 
3172     //! \brief Wrapper for clGetEventInfo() that returns by value.
3173     template <cl_int name> typename
3174     detail::param_traits<detail::cl_event_info, name>::param_type
getInfo(cl_int * err=NULL) const3175     getInfo(cl_int* err = NULL) const
3176     {
3177         typename detail::param_traits<
3178             detail::cl_event_info, name>::param_type param;
3179         cl_int result = getInfo(name, &param);
3180         if (err != NULL) {
3181             *err = result;
3182         }
3183         return param;
3184     }
3185 
3186     //! \brief Wrapper for clGetEventProfilingInfo().
3187     template <typename T>
getProfilingInfo(cl_profiling_info name,T * param) const3188     cl_int getProfilingInfo(cl_profiling_info name, T* param) const
3189     {
3190         return detail::errHandler(detail::getInfo(
3191             &::clGetEventProfilingInfo, object_, name, param),
3192             __GET_EVENT_PROFILE_INFO_ERR);
3193     }
3194 
3195     //! \brief Wrapper for clGetEventProfilingInfo() that returns by value.
3196     template <cl_int name> typename
3197     detail::param_traits<detail::cl_profiling_info, name>::param_type
getProfilingInfo(cl_int * err=NULL) const3198     getProfilingInfo(cl_int* err = NULL) const
3199     {
3200         typename detail::param_traits<
3201             detail::cl_profiling_info, name>::param_type param;
3202         cl_int result = getProfilingInfo(name, &param);
3203         if (err != NULL) {
3204             *err = result;
3205         }
3206         return param;
3207     }
3208 
3209     /*! \brief Blocks the calling thread until this event completes.
3210      *
3211      *  Wraps clWaitForEvents().
3212      */
wait() const3213     cl_int wait() const
3214     {
3215         return detail::errHandler(
3216             ::clWaitForEvents(1, &object_),
3217             __WAIT_FOR_EVENTS_ERR);
3218     }
3219 
3220 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3221     /*! \brief Registers a user callback function for a specific command execution status.
3222      *
3223      *  Wraps clSetEventCallback().
3224      */
setCallback(cl_int type,void (CL_CALLBACK * pfn_notify)(cl_event,cl_int,void *),void * user_data=NULL)3225     cl_int setCallback(
3226         cl_int type,
3227         void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
3228         void * user_data = NULL)
3229     {
3230         return detail::errHandler(
3231             ::clSetEventCallback(
3232                 object_,
3233                 type,
3234                 pfn_notify,
3235                 user_data),
3236             __SET_EVENT_CALLBACK_ERR);
3237     }
3238 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3239 
3240     /*! \brief Blocks the calling thread until every event specified is complete.
3241      *
3242      *  Wraps clWaitForEvents().
3243      */
3244     static cl_int
waitForEvents(const vector<Event> & events)3245     waitForEvents(const vector<Event>& events)
3246     {
3247         return detail::errHandler(
3248             ::clWaitForEvents(
3249                 (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3250             __WAIT_FOR_EVENTS_ERR);
3251     }
3252 };
3253 
3254 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3255 /*! \brief Class interface for user events (a subset of cl_event's).
3256  *
3257  *  See Event for details about copy semantics, etc.
3258  */
3259 class UserEvent : public Event
3260 {
3261 public:
3262     /*! \brief Constructs a user event on a given context.
3263      *
3264      *  Wraps clCreateUserEvent().
3265      */
UserEvent(const Context & context,cl_int * err=NULL)3266     UserEvent(
3267         const Context& context,
3268         cl_int * err = NULL)
3269     {
3270         cl_int error;
3271         object_ = ::clCreateUserEvent(
3272             context(),
3273             &error);
3274 
3275         detail::errHandler(error, __CREATE_USER_EVENT_ERR);
3276         if (err != NULL) {
3277             *err = error;
3278         }
3279     }
3280 
3281     //! \brief Default constructor - initializes to NULL.
UserEvent()3282     UserEvent() : Event() { }
3283 
3284     /*! \brief Sets the execution status of a user event object.
3285      *
3286      *  Wraps clSetUserEventStatus().
3287      */
setStatus(cl_int status)3288     cl_int setStatus(cl_int status)
3289     {
3290         return detail::errHandler(
3291             ::clSetUserEventStatus(object_,status),
3292             __SET_USER_EVENT_STATUS_ERR);
3293     }
3294 };
3295 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3296 
3297 /*! \brief Blocks the calling thread until every event specified is complete.
3298  *
3299  *  Wraps clWaitForEvents().
3300  */
3301 inline static cl_int
WaitForEvents(const vector<Event> & events)3302 WaitForEvents(const vector<Event>& events)
3303 {
3304     return detail::errHandler(
3305         ::clWaitForEvents(
3306             (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3307         __WAIT_FOR_EVENTS_ERR);
3308 }
3309 
3310 /*! \brief Class interface for cl_mem.
3311  *
3312  *  \note Copies of these objects are shallow, meaning that the copy will refer
3313  *        to the same underlying cl_mem as the original.  For details, see
3314  *        clRetainMemObject() and clReleaseMemObject().
3315  *
3316  *  \see cl_mem
3317  */
3318 class Memory : public detail::Wrapper<cl_mem>
3319 {
3320 public:
3321     //! \brief Default constructor - initializes to NULL.
Memory()3322     Memory() : detail::Wrapper<cl_type>() { }
3323 
3324     /*! \brief Constructor from cl_mem - takes ownership.
3325      *
3326      *  Optionally transfer ownership of a refcount on the cl_mem
3327      *  into the new Memory object.
3328      *
3329      * \param retainObject will cause the constructor to retain its cl object.
3330      *                     Defaults to false to maintain compatibility with
3331      *                     earlier versions.
3332      *
3333      *  See Memory for further details.
3334      */
Memory(const cl_mem & memory,bool retainObject)3335     explicit Memory(const cl_mem& memory, bool retainObject) :
3336         detail::Wrapper<cl_type>(memory, retainObject) { }
3337 
3338     /*! \brief Assignment operator from cl_mem - takes ownership.
3339      *
3340      *  This effectively transfers ownership of a refcount on the rhs and calls
3341      *  clReleaseMemObject() on the value previously held by this instance.
3342      */
operator =(const cl_mem & rhs)3343     Memory& operator = (const cl_mem& rhs)
3344     {
3345         detail::Wrapper<cl_type>::operator=(rhs);
3346         return *this;
3347     }
3348 
3349     /*! \brief Copy constructor to forward copy to the superclass correctly.
3350      * Required for MSVC.
3351      */
Memory(const Memory & mem)3352     Memory(const Memory& mem) : detail::Wrapper<cl_type>(mem) {}
3353 
3354     /*! \brief Copy assignment to forward copy to the superclass correctly.
3355      * Required for MSVC.
3356      */
operator =(const Memory & mem)3357     Memory& operator = (const Memory &mem)
3358     {
3359         detail::Wrapper<cl_type>::operator=(mem);
3360         return *this;
3361     }
3362 
3363     /*! \brief Move constructor to forward move to the superclass correctly.
3364      * Required for MSVC.
3365      */
Memory(Memory && mem)3366     Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(mem)) {}
3367 
3368     /*! \brief Move assignment to forward move to the superclass correctly.
3369      * Required for MSVC.
3370      */
operator =(Memory && mem)3371     Memory& operator = (Memory &&mem)
3372     {
3373         detail::Wrapper<cl_type>::operator=(std::move(mem));
3374         return *this;
3375     }
3376 
3377 
3378     //! \brief Wrapper for clGetMemObjectInfo().
3379     template <typename T>
getInfo(cl_mem_info name,T * param) const3380     cl_int getInfo(cl_mem_info name, T* param) const
3381     {
3382         return detail::errHandler(
3383             detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
3384             __GET_MEM_OBJECT_INFO_ERR);
3385     }
3386 
3387     //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
3388     template <cl_int name> typename
3389     detail::param_traits<detail::cl_mem_info, name>::param_type
getInfo(cl_int * err=NULL) const3390     getInfo(cl_int* err = NULL) const
3391     {
3392         typename detail::param_traits<
3393             detail::cl_mem_info, name>::param_type param;
3394         cl_int result = getInfo(name, &param);
3395         if (err != NULL) {
3396             *err = result;
3397         }
3398         return param;
3399     }
3400 
3401 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3402     /*! \brief Registers a callback function to be called when the memory object
3403      *         is no longer needed.
3404      *
3405      *  Wraps clSetMemObjectDestructorCallback().
3406      *
3407      *  Repeated calls to this function, for a given cl_mem value, will append
3408      *  to the list of functions called (in reverse order) when memory object's
3409      *  resources are freed and the memory object is deleted.
3410      *
3411      *  \note
3412      *  The registered callbacks are associated with the underlying cl_mem
3413      *  value - not the Memory class instance.
3414      */
setDestructorCallback(void (CL_CALLBACK * pfn_notify)(cl_mem,void *),void * user_data=NULL)3415     cl_int setDestructorCallback(
3416         void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
3417         void * user_data = NULL)
3418     {
3419         return detail::errHandler(
3420             ::clSetMemObjectDestructorCallback(
3421                 object_,
3422                 pfn_notify,
3423                 user_data),
3424             __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
3425     }
3426 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3427 
3428 };
3429 
3430 // Pre-declare copy functions
3431 class Buffer;
3432 template< typename IteratorType >
3433 cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3434 template< typename IteratorType >
3435 cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3436 template< typename IteratorType >
3437 cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3438 template< typename IteratorType >
3439 cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3440 
3441 
3442 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3443 namespace detail
3444 {
3445     class SVMTraitNull
3446     {
3447     public:
getSVMMemFlags()3448         static cl_svm_mem_flags getSVMMemFlags()
3449         {
3450             return 0;
3451         }
3452     };
3453 } // namespace detail
3454 
3455 template<class Trait = detail::SVMTraitNull>
3456 class SVMTraitReadWrite
3457 {
3458 public:
getSVMMemFlags()3459     static cl_svm_mem_flags getSVMMemFlags()
3460     {
3461         return CL_MEM_READ_WRITE |
3462             Trait::getSVMMemFlags();
3463     }
3464 };
3465 
3466 template<class Trait = detail::SVMTraitNull>
3467 class SVMTraitReadOnly
3468 {
3469 public:
getSVMMemFlags()3470     static cl_svm_mem_flags getSVMMemFlags()
3471     {
3472         return CL_MEM_READ_ONLY |
3473             Trait::getSVMMemFlags();
3474     }
3475 };
3476 
3477 template<class Trait = detail::SVMTraitNull>
3478 class SVMTraitWriteOnly
3479 {
3480 public:
getSVMMemFlags()3481     static cl_svm_mem_flags getSVMMemFlags()
3482     {
3483         return CL_MEM_WRITE_ONLY |
3484             Trait::getSVMMemFlags();
3485     }
3486 };
3487 
3488 template<class Trait = SVMTraitReadWrite<>>
3489 class SVMTraitCoarse
3490 {
3491 public:
getSVMMemFlags()3492     static cl_svm_mem_flags getSVMMemFlags()
3493     {
3494         return Trait::getSVMMemFlags();
3495     }
3496 };
3497 
3498 template<class Trait = SVMTraitReadWrite<>>
3499 class SVMTraitFine
3500 {
3501 public:
getSVMMemFlags()3502     static cl_svm_mem_flags getSVMMemFlags()
3503     {
3504         return CL_MEM_SVM_FINE_GRAIN_BUFFER |
3505             Trait::getSVMMemFlags();
3506     }
3507 };
3508 
3509 template<class Trait = SVMTraitReadWrite<>>
3510 class SVMTraitAtomic
3511 {
3512 public:
getSVMMemFlags()3513     static cl_svm_mem_flags getSVMMemFlags()
3514     {
3515         return
3516             CL_MEM_SVM_FINE_GRAIN_BUFFER |
3517             CL_MEM_SVM_ATOMICS |
3518             Trait::getSVMMemFlags();
3519     }
3520 };
3521 
3522 // Pre-declare SVM map function
3523 template<typename T>
3524 inline cl_int enqueueMapSVM(
3525     T* ptr,
3526     cl_bool blocking,
3527     cl_map_flags flags,
3528     size_type size,
3529     const vector<Event>* events = NULL,
3530     Event* event = NULL);
3531 
3532 /**
3533  * STL-like allocator class for managing SVM objects provided for convenience.
3534  *
3535  * Note that while this behaves like an allocator for the purposes of constructing vectors and similar objects,
3536  * care must be taken when using with smart pointers.
3537  * The allocator should not be used to construct a unique_ptr if we are using coarse-grained SVM mode because
3538  * the coarse-grained management behaviour would behave incorrectly with respect to reference counting.
3539  *
3540  * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used
3541  * with the allocate_shared and allocate_ptr supplied operations.
3542  */
3543 template<typename T, class SVMTrait>
3544 class SVMAllocator {
3545 private:
3546     Context context_;
3547 
3548 public:
3549     typedef T value_type;
3550     typedef value_type* pointer;
3551     typedef const value_type* const_pointer;
3552     typedef value_type& reference;
3553     typedef const value_type& const_reference;
3554     typedef std::size_t size_type;
3555     typedef std::ptrdiff_t difference_type;
3556 
3557     template<typename U>
3558     struct rebind
3559     {
3560         typedef SVMAllocator<U, SVMTrait> other;
3561     };
3562 
3563     template<typename U, typename V>
3564     friend class SVMAllocator;
3565 
SVMAllocator()3566     SVMAllocator() :
3567         context_(Context::getDefault())
3568     {
3569     }
3570 
SVMAllocator(cl::Context context)3571     explicit SVMAllocator(cl::Context context) :
3572         context_(context)
3573     {
3574     }
3575 
3576 
SVMAllocator(const SVMAllocator & other)3577     SVMAllocator(const SVMAllocator &other) :
3578         context_(other.context_)
3579     {
3580     }
3581 
3582     template<typename U>
SVMAllocator(const SVMAllocator<U,SVMTrait> & other)3583     SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
3584         context_(other.context_)
3585     {
3586     }
3587 
~SVMAllocator()3588     ~SVMAllocator()
3589     {
3590     }
3591 
address(reference r)3592     pointer address(reference r) CL_HPP_NOEXCEPT_
3593     {
3594         return std::addressof(r);
3595     }
3596 
address(const_reference r)3597     const_pointer address(const_reference r) CL_HPP_NOEXCEPT_
3598     {
3599         return std::addressof(r);
3600     }
3601 
3602     /**
3603      * Allocate an SVM pointer.
3604      *
3605      * If the allocator is coarse-grained, this will take ownership to allow
3606      * containers to correctly construct data in place.
3607      */
allocate(size_type size,typename cl::SVMAllocator<void,SVMTrait>::const_pointer=0)3608     pointer allocate(
3609         size_type size,
3610         typename cl::SVMAllocator<void, SVMTrait>::const_pointer = 0)
3611     {
3612         // Allocate memory with default alignment matching the size of the type
3613         void* voidPointer =
3614             clSVMAlloc(
3615             context_(),
3616             SVMTrait::getSVMMemFlags(),
3617             size*sizeof(T),
3618             0);
3619         pointer retValue = reinterpret_cast<pointer>(
3620             voidPointer);
3621 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3622         if (!retValue) {
3623             std::bad_alloc excep;
3624             throw excep;
3625         }
3626 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3627 
3628         // If allocation was coarse-grained then map it
3629         if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
3630             cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
3631             if (err != CL_SUCCESS) {
3632                 std::bad_alloc excep;
3633                 throw excep;
3634             }
3635         }
3636 
3637         // If exceptions disabled, return null pointer from allocator
3638         return retValue;
3639     }
3640 
deallocate(pointer p,size_type)3641     void deallocate(pointer p, size_type)
3642     {
3643         clSVMFree(context_(), p);
3644     }
3645 
3646     /**
3647      * Return the maximum possible allocation size.
3648      * This is the minimum of the maximum sizes of all devices in the context.
3649      */
max_size() const3650     size_type max_size() const CL_HPP_NOEXCEPT_
3651     {
3652         size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
3653 
3654         for (const Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
3655             maxSize = std::min(
3656                 maxSize,
3657                 static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
3658         }
3659 
3660         return maxSize;
3661     }
3662 
3663     template< class U, class... Args >
construct(U * p,Args &&...args)3664     void construct(U* p, Args&&... args)
3665     {
3666         new(p)T(args...);
3667     }
3668 
3669     template< class U >
destroy(U * p)3670     void destroy(U* p)
3671     {
3672         p->~U();
3673     }
3674 
3675     /**
3676      * Returns true if the contexts match.
3677      */
operator ==(SVMAllocator const & rhs)3678     inline bool operator==(SVMAllocator const& rhs)
3679     {
3680         return (context_==rhs.context_);
3681     }
3682 
operator !=(SVMAllocator const & a)3683     inline bool operator!=(SVMAllocator const& a)
3684     {
3685         return !operator==(a);
3686     }
3687 }; // class SVMAllocator        return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
3688 
3689 
3690 template<class SVMTrait>
3691 class SVMAllocator<void, SVMTrait> {
3692 public:
3693     typedef void value_type;
3694     typedef value_type* pointer;
3695     typedef const value_type* const_pointer;
3696 
3697     template<typename U>
3698     struct rebind
3699     {
3700         typedef SVMAllocator<U, SVMTrait> other;
3701     };
3702 
3703     template<typename U, typename V>
3704     friend class SVMAllocator;
3705 };
3706 
3707 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3708 namespace detail
3709 {
3710     template<class Alloc>
3711     class Deleter {
3712     private:
3713         Alloc alloc_;
3714         size_type copies_;
3715 
3716     public:
3717         typedef typename std::allocator_traits<Alloc>::pointer pointer;
3718 
Deleter(const Alloc & alloc,size_type copies)3719         Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
3720         {
3721         }
3722 
operator ()(pointer ptr) const3723         void operator()(pointer ptr) const {
3724             Alloc tmpAlloc{ alloc_ };
3725             std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
3726             std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
3727         }
3728     };
3729 } // namespace detail
3730 
3731 /**
3732  * Allocation operation compatible with std::allocate_ptr.
3733  * Creates a unique_ptr<T> by default.
3734  * This requirement is to ensure that the control block is not
3735  * allocated in memory inaccessible to the host.
3736  */
3737 template <class T, class Alloc, class... Args>
allocate_pointer(const Alloc & alloc_,Args &&...args)3738 cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
3739 {
3740     Alloc alloc(alloc_);
3741     static const size_type copies = 1;
3742 
3743     // Ensure that creation of the management block and the
3744     // object are dealt with separately such that we only provide a deleter
3745 
3746     T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
3747     if (!tmp) {
3748         std::bad_alloc excep;
3749         throw excep;
3750     }
3751     try {
3752         std::allocator_traits<Alloc>::construct(
3753             alloc,
3754             std::addressof(*tmp),
3755             std::forward<Args>(args)...);
3756 
3757         return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
3758     }
3759     catch (std::bad_alloc& b)
3760     {
3761         std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
3762         throw;
3763     }
3764 }
3765 
3766 template< class T, class SVMTrait, class... Args >
allocate_svm(Args...args)3767 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
3768 {
3769     SVMAllocator<T, SVMTrait> alloc;
3770     return cl::allocate_pointer<T>(alloc, args...);
3771 }
3772 
3773 template< class T, class SVMTrait, class... Args >
allocate_svm(const cl::Context & c,Args...args)3774 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
3775 {
3776     SVMAllocator<T, SVMTrait> alloc(c);
3777     return cl::allocate_pointer<T>(alloc, args...);
3778 }
3779 #endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3780 
3781 /*! \brief Vector alias to simplify contruction of coarse-grained SVM containers.
3782  *
3783  */
3784 template < class T >
3785 using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
3786 
3787 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers.
3788 *
3789 */
3790 template < class T >
3791 using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
3792 
3793 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics.
3794 *
3795 */
3796 template < class T >
3797 using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
3798 
3799 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3800 
3801 
3802 /*! \brief Class interface for Buffer Memory Objects.
3803  *
3804  *  See Memory for details about copy semantics, etc.
3805  *
3806  *  \see Memory
3807  */
3808 class Buffer : public Memory
3809 {
3810 public:
3811 
3812     /*! \brief Constructs a Buffer in a specified context.
3813      *
3814      *  Wraps clCreateBuffer().
3815      *
3816      *  \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3817      *                  specified.  Note alignment & exclusivity requirements.
3818      */
Buffer(const Context & context,cl_mem_flags flags,size_type size,void * host_ptr=NULL,cl_int * err=NULL)3819     Buffer(
3820         const Context& context,
3821         cl_mem_flags flags,
3822         size_type size,
3823         void* host_ptr = NULL,
3824         cl_int* err = NULL)
3825     {
3826         cl_int error;
3827         object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3828 
3829         detail::errHandler(error, __CREATE_BUFFER_ERR);
3830         if (err != NULL) {
3831             *err = error;
3832         }
3833     }
3834 
3835     /*! \brief Constructs a Buffer in the default context.
3836      *
3837      *  Wraps clCreateBuffer().
3838      *
3839      *  \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3840      *                  specified.  Note alignment & exclusivity requirements.
3841      *
3842      *  \see Context::getDefault()
3843      */
Buffer(cl_mem_flags flags,size_type size,void * host_ptr=NULL,cl_int * err=NULL)3844     Buffer(
3845          cl_mem_flags flags,
3846         size_type size,
3847         void* host_ptr = NULL,
3848         cl_int* err = NULL)
3849     {
3850         cl_int error;
3851 
3852         Context context = Context::getDefault(err);
3853 
3854         object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3855 
3856         detail::errHandler(error, __CREATE_BUFFER_ERR);
3857         if (err != NULL) {
3858             *err = error;
3859         }
3860     }
3861 
3862     /*!
3863      * \brief Construct a Buffer from a host container via iterators.
3864      * IteratorType must be random access.
3865      * If useHostPtr is specified iterators must represent contiguous data.
3866      */
3867     template< typename IteratorType >
Buffer(IteratorType startIterator,IteratorType endIterator,bool readOnly,bool useHostPtr=false,cl_int * err=NULL)3868     Buffer(
3869         IteratorType startIterator,
3870         IteratorType endIterator,
3871         bool readOnly,
3872         bool useHostPtr = false,
3873         cl_int* err = NULL)
3874     {
3875         typedef typename std::iterator_traits<IteratorType>::value_type DataType;
3876         cl_int error;
3877 
3878         cl_mem_flags flags = 0;
3879         if( readOnly ) {
3880             flags |= CL_MEM_READ_ONLY;
3881         }
3882         else {
3883             flags |= CL_MEM_READ_WRITE;
3884         }
3885         if( useHostPtr ) {
3886             flags |= CL_MEM_USE_HOST_PTR;
3887         }
3888 
3889         size_type size = sizeof(DataType)*(endIterator - startIterator);
3890 
3891         Context context = Context::getDefault(err);
3892 
3893         if( useHostPtr ) {
3894             object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
3895         } else {
3896             object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
3897         }
3898 
3899         detail::errHandler(error, __CREATE_BUFFER_ERR);
3900         if (err != NULL) {
3901             *err = error;
3902         }
3903 
3904         if( !useHostPtr ) {
3905             error = cl::copy(startIterator, endIterator, *this);
3906             detail::errHandler(error, __CREATE_BUFFER_ERR);
3907             if (err != NULL) {
3908                 *err = error;
3909             }
3910         }
3911     }
3912 
3913     /*!
3914      * \brief Construct a Buffer from a host container via iterators using a specified context.
3915      * IteratorType must be random access.
3916      * If useHostPtr is specified iterators must represent contiguous data.
3917      */
3918     template< typename IteratorType >
3919     Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
3920         bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3921 
3922     /*!
3923     * \brief Construct a Buffer from a host container via iterators using a specified queue.
3924     * If useHostPtr is specified iterators must be random access.
3925     */
3926     template< typename IteratorType >
3927     Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
3928         bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3929 
3930     //! \brief Default constructor - initializes to NULL.
Buffer()3931     Buffer() : Memory() { }
3932 
3933     /*! \brief Constructor from cl_mem - takes ownership.
3934      *
3935      * \param retainObject will cause the constructor to retain its cl object.
3936      *                     Defaults to false to maintain compatibility with earlier versions.
3937      *
3938      *  See Memory for further details.
3939      */
Buffer(const cl_mem & buffer,bool retainObject=false)3940     explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
3941         Memory(buffer, retainObject) { }
3942 
3943     /*! \brief Assignment from cl_mem - performs shallow copy.
3944     *
3945     *  See Memory for further details.
3946     */
operator =(const cl_mem & rhs)3947     Buffer& operator = (const cl_mem& rhs)
3948     {
3949         Memory::operator=(rhs);
3950         return *this;
3951     }
3952 
3953     /*! \brief Copy constructor to forward copy to the superclass correctly.
3954      * Required for MSVC.
3955      */
Buffer(const Buffer & buf)3956     Buffer(const Buffer& buf) : Memory(buf) {}
3957 
3958     /*! \brief Copy assignment to forward copy to the superclass correctly.
3959      * Required for MSVC.
3960      */
operator =(const Buffer & buf)3961     Buffer& operator = (const Buffer &buf)
3962     {
3963         Memory::operator=(buf);
3964         return *this;
3965     }
3966 
3967     /*! \brief Move constructor to forward move to the superclass correctly.
3968      * Required for MSVC.
3969      */
Buffer(Buffer && buf)3970     Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {}
3971 
3972     /*! \brief Move assignment to forward move to the superclass correctly.
3973      * Required for MSVC.
3974      */
operator =(Buffer && buf)3975     Buffer& operator = (Buffer &&buf)
3976     {
3977         Memory::operator=(std::move(buf));
3978         return *this;
3979     }
3980 
3981 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3982     /*! \brief Creates a new buffer object from this.
3983      *
3984      *  Wraps clCreateSubBuffer().
3985      */
createSubBuffer(cl_mem_flags flags,cl_buffer_create_type buffer_create_type,const void * buffer_create_info,cl_int * err=NULL)3986     Buffer createSubBuffer(
3987         cl_mem_flags flags,
3988         cl_buffer_create_type buffer_create_type,
3989         const void * buffer_create_info,
3990         cl_int * err = NULL)
3991     {
3992         Buffer result;
3993         cl_int error;
3994         result.object_ = ::clCreateSubBuffer(
3995             object_,
3996             flags,
3997             buffer_create_type,
3998             buffer_create_info,
3999             &error);
4000 
4001         detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
4002         if (err != NULL) {
4003             *err = error;
4004         }
4005 
4006         return result;
4007     }
4008 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
4009 };
4010 
4011 #if defined (CL_HPP_USE_DX_INTEROP)
4012 /*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's.
4013  *
4014  *  This is provided to facilitate interoperability with Direct3D.
4015  *
4016  *  See Memory for details about copy semantics, etc.
4017  *
4018  *  \see Memory
4019  */
4020 class BufferD3D10 : public Buffer
4021 {
4022 public:
4023 
4024 
4025     /*! \brief Constructs a BufferD3D10, in a specified context, from a
4026      *         given ID3D10Buffer.
4027      *
4028      *  Wraps clCreateFromD3D10BufferKHR().
4029      */
BufferD3D10(const Context & context,cl_mem_flags flags,ID3D10Buffer * bufobj,cl_int * err=NULL)4030     BufferD3D10(
4031         const Context& context,
4032         cl_mem_flags flags,
4033         ID3D10Buffer* bufobj,
4034         cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr)
4035     {
4036         typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
4037             cl_context context, cl_mem_flags flags, ID3D10Buffer*  buffer,
4038             cl_int* errcode_ret);
4039         PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
4040 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4041         vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
4042         cl_platform platform = -1;
4043         for( int i = 0; i < props.size(); ++i ) {
4044             if( props[i] == CL_CONTEXT_PLATFORM ) {
4045                 platform = props[i+1];
4046             }
4047         }
4048         CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
4049 #elif CL_HPP_TARGET_OPENCL_VERSION >= 110
4050         CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
4051 #endif
4052 
4053         cl_int error;
4054         object_ = pfn_clCreateFromD3D10BufferKHR(
4055             context(),
4056             flags,
4057             bufobj,
4058             &error);
4059 
4060         detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
4061         if (err != NULL) {
4062             *err = error;
4063         }
4064     }
4065 
4066     //! \brief Default constructor - initializes to NULL.
BufferD3D10()4067     BufferD3D10() : Buffer() { }
4068 
4069     /*! \brief Constructor from cl_mem - takes ownership.
4070      *
4071      * \param retainObject will cause the constructor to retain its cl object.
4072      *                     Defaults to false to maintain compatibility with
4073      *                     earlier versions.
4074      *  See Memory for further details.
4075      */
BufferD3D10(const cl_mem & buffer,bool retainObject=false)4076     explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
4077         Buffer(buffer, retainObject) { }
4078 
4079     /*! \brief Assignment from cl_mem - performs shallow copy.
4080      *
4081      *  See Memory for further details.
4082      */
operator =(const cl_mem & rhs)4083     BufferD3D10& operator = (const cl_mem& rhs)
4084     {
4085         Buffer::operator=(rhs);
4086         return *this;
4087     }
4088 
4089     /*! \brief Copy constructor to forward copy to the superclass correctly.
4090      * Required for MSVC.
4091      */
BufferD3D10(const BufferD3D10 & buf)4092     BufferD3D10(const BufferD3D10& buf) :
4093         Buffer(buf) {}
4094 
4095     /*! \brief Copy assignment to forward copy to the superclass correctly.
4096      * Required for MSVC.
4097      */
operator =(const BufferD3D10 & buf)4098     BufferD3D10& operator = (const BufferD3D10 &buf)
4099     {
4100         Buffer::operator=(buf);
4101         return *this;
4102     }
4103 
4104     /*! \brief Move constructor to forward move to the superclass correctly.
4105      * Required for MSVC.
4106      */
BufferD3D10(BufferD3D10 && buf)4107     BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4108 
4109     /*! \brief Move assignment to forward move to the superclass correctly.
4110      * Required for MSVC.
4111      */
operator =(BufferD3D10 && buf)4112     BufferD3D10& operator = (BufferD3D10 &&buf)
4113     {
4114         Buffer::operator=(std::move(buf));
4115         return *this;
4116     }
4117 };
4118 #endif
4119 
4120 /*! \brief Class interface for GL Buffer Memory Objects.
4121  *
4122  *  This is provided to facilitate interoperability with OpenGL.
4123  *
4124  *  See Memory for details about copy semantics, etc.
4125  *
4126  *  \see Memory
4127  */
4128 class BufferGL : public Buffer
4129 {
4130 public:
4131     /*! \brief Constructs a BufferGL in a specified context, from a given
4132      *         GL buffer.
4133      *
4134      *  Wraps clCreateFromGLBuffer().
4135      */
BufferGL(const Context & context,cl_mem_flags flags,cl_GLuint bufobj,cl_int * err=NULL)4136     BufferGL(
4137         const Context& context,
4138         cl_mem_flags flags,
4139         cl_GLuint bufobj,
4140         cl_int * err = NULL)
4141     {
4142         cl_int error;
4143         object_ = ::clCreateFromGLBuffer(
4144             context(),
4145             flags,
4146             bufobj,
4147             &error);
4148 
4149         detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
4150         if (err != NULL) {
4151             *err = error;
4152         }
4153     }
4154 
4155     //! \brief Default constructor - initializes to NULL.
BufferGL()4156     BufferGL() : Buffer() { }
4157 
4158     /*! \brief Constructor from cl_mem - takes ownership.
4159      *
4160      * \param retainObject will cause the constructor to retain its cl object.
4161      *                     Defaults to false to maintain compatibility with
4162      *                     earlier versions.
4163      *  See Memory for further details.
4164      */
BufferGL(const cl_mem & buffer,bool retainObject=false)4165     explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
4166         Buffer(buffer, retainObject) { }
4167 
4168     /*! \brief Assignment from cl_mem - performs shallow copy.
4169      *
4170      *  See Memory for further details.
4171      */
operator =(const cl_mem & rhs)4172     BufferGL& operator = (const cl_mem& rhs)
4173     {
4174         Buffer::operator=(rhs);
4175         return *this;
4176     }
4177 
4178     /*! \brief Copy constructor to forward copy to the superclass correctly.
4179      * Required for MSVC.
4180      */
BufferGL(const BufferGL & buf)4181     BufferGL(const BufferGL& buf) : Buffer(buf) {}
4182 
4183     /*! \brief Copy assignment to forward copy to the superclass correctly.
4184      * Required for MSVC.
4185      */
operator =(const BufferGL & buf)4186     BufferGL& operator = (const BufferGL &buf)
4187     {
4188         Buffer::operator=(buf);
4189         return *this;
4190     }
4191 
4192     /*! \brief Move constructor to forward move to the superclass correctly.
4193      * Required for MSVC.
4194      */
BufferGL(BufferGL && buf)4195     BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4196 
4197     /*! \brief Move assignment to forward move to the superclass correctly.
4198      * Required for MSVC.
4199      */
operator =(BufferGL && buf)4200     BufferGL& operator = (BufferGL &&buf)
4201     {
4202         Buffer::operator=(std::move(buf));
4203         return *this;
4204     }
4205 
4206     //! \brief Wrapper for clGetGLObjectInfo().
getObjectInfo(cl_gl_object_type * type,cl_GLuint * gl_object_name)4207     cl_int getObjectInfo(
4208         cl_gl_object_type *type,
4209         cl_GLuint * gl_object_name)
4210     {
4211         return detail::errHandler(
4212             ::clGetGLObjectInfo(object_,type,gl_object_name),
4213             __GET_GL_OBJECT_INFO_ERR);
4214     }
4215 };
4216 
4217 /*! \brief Class interface for GL Render Buffer Memory Objects.
4218  *
4219  *  This is provided to facilitate interoperability with OpenGL.
4220  *
4221  *  See Memory for details about copy semantics, etc.
4222  *
4223  *  \see Memory
4224  */
4225 class BufferRenderGL : public Buffer
4226 {
4227 public:
4228     /*! \brief Constructs a BufferRenderGL in a specified context, from a given
4229      *         GL Renderbuffer.
4230      *
4231      *  Wraps clCreateFromGLRenderbuffer().
4232      */
BufferRenderGL(const Context & context,cl_mem_flags flags,cl_GLuint bufobj,cl_int * err=NULL)4233     BufferRenderGL(
4234         const Context& context,
4235         cl_mem_flags flags,
4236         cl_GLuint bufobj,
4237         cl_int * err = NULL)
4238     {
4239         cl_int error;
4240         object_ = ::clCreateFromGLRenderbuffer(
4241             context(),
4242             flags,
4243             bufobj,
4244             &error);
4245 
4246         detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
4247         if (err != NULL) {
4248             *err = error;
4249         }
4250     }
4251 
4252     //! \brief Default constructor - initializes to NULL.
BufferRenderGL()4253     BufferRenderGL() : Buffer() { }
4254 
4255     /*! \brief Constructor from cl_mem - takes ownership.
4256      *
4257      * \param retainObject will cause the constructor to retain its cl object.
4258      *                     Defaults to false to maintain compatibility with
4259      *                     earlier versions.
4260      *  See Memory for further details.
4261      */
BufferRenderGL(const cl_mem & buffer,bool retainObject=false)4262     explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
4263         Buffer(buffer, retainObject) { }
4264 
4265     /*! \brief Assignment from cl_mem - performs shallow copy.
4266      *
4267      *  See Memory for further details.
4268      */
operator =(const cl_mem & rhs)4269     BufferRenderGL& operator = (const cl_mem& rhs)
4270     {
4271         Buffer::operator=(rhs);
4272         return *this;
4273     }
4274 
4275     /*! \brief Copy constructor to forward copy to the superclass correctly.
4276      * Required for MSVC.
4277      */
BufferRenderGL(const BufferRenderGL & buf)4278     BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {}
4279 
4280     /*! \brief Copy assignment to forward copy to the superclass correctly.
4281      * Required for MSVC.
4282      */
operator =(const BufferRenderGL & buf)4283     BufferRenderGL& operator = (const BufferRenderGL &buf)
4284     {
4285         Buffer::operator=(buf);
4286         return *this;
4287     }
4288 
4289     /*! \brief Move constructor to forward move to the superclass correctly.
4290      * Required for MSVC.
4291      */
BufferRenderGL(BufferRenderGL && buf)4292     BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4293 
4294     /*! \brief Move assignment to forward move to the superclass correctly.
4295      * Required for MSVC.
4296      */
operator =(BufferRenderGL && buf)4297     BufferRenderGL& operator = (BufferRenderGL &&buf)
4298     {
4299         Buffer::operator=(std::move(buf));
4300         return *this;
4301     }
4302 
4303     //! \brief Wrapper for clGetGLObjectInfo().
getObjectInfo(cl_gl_object_type * type,cl_GLuint * gl_object_name)4304     cl_int getObjectInfo(
4305         cl_gl_object_type *type,
4306         cl_GLuint * gl_object_name)
4307     {
4308         return detail::errHandler(
4309             ::clGetGLObjectInfo(object_,type,gl_object_name),
4310             __GET_GL_OBJECT_INFO_ERR);
4311     }
4312 };
4313 
4314 /*! \brief C++ base class for Image Memory objects.
4315  *
4316  *  See Memory for details about copy semantics, etc.
4317  *
4318  *  \see Memory
4319  */
4320 class Image : public Memory
4321 {
4322 protected:
4323     //! \brief Default constructor - initializes to NULL.
Image()4324     Image() : Memory() { }
4325 
4326     /*! \brief Constructor from cl_mem - takes ownership.
4327      *
4328      * \param retainObject will cause the constructor to retain its cl object.
4329      *                     Defaults to false to maintain compatibility with
4330      *                     earlier versions.
4331      *  See Memory for further details.
4332      */
Image(const cl_mem & image,bool retainObject=false)4333     explicit Image(const cl_mem& image, bool retainObject = false) :
4334         Memory(image, retainObject) { }
4335 
4336     /*! \brief Assignment from cl_mem - performs shallow copy.
4337      *
4338      *  See Memory for further details.
4339      */
operator =(const cl_mem & rhs)4340     Image& operator = (const cl_mem& rhs)
4341     {
4342         Memory::operator=(rhs);
4343         return *this;
4344     }
4345 
4346     /*! \brief Copy constructor to forward copy to the superclass correctly.
4347      * Required for MSVC.
4348      */
Image(const Image & img)4349     Image(const Image& img) : Memory(img) {}
4350 
4351     /*! \brief Copy assignment to forward copy to the superclass correctly.
4352      * Required for MSVC.
4353      */
operator =(const Image & img)4354     Image& operator = (const Image &img)
4355     {
4356         Memory::operator=(img);
4357         return *this;
4358     }
4359 
4360     /*! \brief Move constructor to forward move to the superclass correctly.
4361      * Required for MSVC.
4362      */
Image(Image && img)4363     Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {}
4364 
4365     /*! \brief Move assignment to forward move to the superclass correctly.
4366      * Required for MSVC.
4367      */
operator =(Image && img)4368     Image& operator = (Image &&img)
4369     {
4370         Memory::operator=(std::move(img));
4371         return *this;
4372     }
4373 
4374 
4375 public:
4376     //! \brief Wrapper for clGetImageInfo().
4377     template <typename T>
getImageInfo(cl_image_info name,T * param) const4378     cl_int getImageInfo(cl_image_info name, T* param) const
4379     {
4380         return detail::errHandler(
4381             detail::getInfo(&::clGetImageInfo, object_, name, param),
4382             __GET_IMAGE_INFO_ERR);
4383     }
4384 
4385     //! \brief Wrapper for clGetImageInfo() that returns by value.
4386     template <cl_int name> typename
4387     detail::param_traits<detail::cl_image_info, name>::param_type
getImageInfo(cl_int * err=NULL) const4388     getImageInfo(cl_int* err = NULL) const
4389     {
4390         typename detail::param_traits<
4391             detail::cl_image_info, name>::param_type param;
4392         cl_int result = getImageInfo(name, &param);
4393         if (err != NULL) {
4394             *err = result;
4395         }
4396         return param;
4397     }
4398 };
4399 
4400 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4401 /*! \brief Class interface for 1D Image Memory objects.
4402  *
4403  *  See Memory for details about copy semantics, etc.
4404  *
4405  *  \see Memory
4406  */
4407 class Image1D : public Image
4408 {
4409 public:
4410     /*! \brief Constructs a 1D Image in a specified context.
4411      *
4412      *  Wraps clCreateImage().
4413      */
Image1D(const Context & context,cl_mem_flags flags,ImageFormat format,size_type width,void * host_ptr=NULL,cl_int * err=NULL)4414     Image1D(
4415         const Context& context,
4416         cl_mem_flags flags,
4417         ImageFormat format,
4418         size_type width,
4419         void* host_ptr = NULL,
4420         cl_int* err = NULL)
4421     {
4422         cl_int error;
4423         cl_image_desc desc =
4424         {
4425             CL_MEM_OBJECT_IMAGE1D,
4426             width,
4427             0, 0, 0, 0, 0, 0, 0, 0
4428         };
4429         object_ = ::clCreateImage(
4430             context(),
4431             flags,
4432             &format,
4433             &desc,
4434             host_ptr,
4435             &error);
4436 
4437         detail::errHandler(error, __CREATE_IMAGE_ERR);
4438         if (err != NULL) {
4439             *err = error;
4440         }
4441     }
4442 
4443     //! \brief Default constructor - initializes to NULL.
Image1D()4444     Image1D() { }
4445 
4446     /*! \brief Constructor from cl_mem - takes ownership.
4447      *
4448      * \param retainObject will cause the constructor to retain its cl object.
4449      *                     Defaults to false to maintain compatibility with
4450      *                     earlier versions.
4451      *  See Memory for further details.
4452      */
Image1D(const cl_mem & image1D,bool retainObject=false)4453     explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
4454         Image(image1D, retainObject) { }
4455 
4456     /*! \brief Assignment from cl_mem - performs shallow copy.
4457      *
4458      *  See Memory for further details.
4459      */
operator =(const cl_mem & rhs)4460     Image1D& operator = (const cl_mem& rhs)
4461     {
4462         Image::operator=(rhs);
4463         return *this;
4464     }
4465 
4466     /*! \brief Copy constructor to forward copy to the superclass correctly.
4467      * Required for MSVC.
4468      */
Image1D(const Image1D & img)4469     Image1D(const Image1D& img) : Image(img) {}
4470 
4471     /*! \brief Copy assignment to forward copy to the superclass correctly.
4472      * Required for MSVC.
4473      */
operator =(const Image1D & img)4474     Image1D& operator = (const Image1D &img)
4475     {
4476         Image::operator=(img);
4477         return *this;
4478     }
4479 
4480     /*! \brief Move constructor to forward move to the superclass correctly.
4481      * Required for MSVC.
4482      */
Image1D(Image1D && img)4483     Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4484 
4485     /*! \brief Move assignment to forward move to the superclass correctly.
4486      * Required for MSVC.
4487      */
operator =(Image1D && img)4488     Image1D& operator = (Image1D &&img)
4489     {
4490         Image::operator=(std::move(img));
4491         return *this;
4492     }
4493 
4494 };
4495 
4496 /*! \class Image1DBuffer
4497  * \brief Image interface for 1D buffer images.
4498  */
4499 class Image1DBuffer : public Image
4500 {
4501 public:
Image1DBuffer(const Context & context,cl_mem_flags flags,ImageFormat format,size_type width,const Buffer & buffer,cl_int * err=NULL)4502     Image1DBuffer(
4503         const Context& context,
4504         cl_mem_flags flags,
4505         ImageFormat format,
4506         size_type width,
4507         const Buffer &buffer,
4508         cl_int* err = NULL)
4509     {
4510         cl_int error;
4511         cl_image_desc desc =
4512         {
4513             CL_MEM_OBJECT_IMAGE1D_BUFFER,
4514             width,
4515             0, 0, 0, 0, 0, 0, 0,
4516             buffer()
4517         };
4518         object_ = ::clCreateImage(
4519             context(),
4520             flags,
4521             &format,
4522             &desc,
4523             NULL,
4524             &error);
4525 
4526         detail::errHandler(error, __CREATE_IMAGE_ERR);
4527         if (err != NULL) {
4528             *err = error;
4529         }
4530     }
4531 
Image1DBuffer()4532     Image1DBuffer() { }
4533 
4534     /*! \brief Constructor from cl_mem - takes ownership.
4535      *
4536      * \param retainObject will cause the constructor to retain its cl object.
4537      *                     Defaults to false to maintain compatibility with
4538      *                     earlier versions.
4539      *  See Memory for further details.
4540      */
Image1DBuffer(const cl_mem & image1D,bool retainObject=false)4541     explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
4542         Image(image1D, retainObject) { }
4543 
operator =(const cl_mem & rhs)4544     Image1DBuffer& operator = (const cl_mem& rhs)
4545     {
4546         Image::operator=(rhs);
4547         return *this;
4548     }
4549 
4550     /*! \brief Copy constructor to forward copy to the superclass correctly.
4551      * Required for MSVC.
4552      */
Image1DBuffer(const Image1DBuffer & img)4553     Image1DBuffer(const Image1DBuffer& img) : Image(img) {}
4554 
4555     /*! \brief Copy assignment to forward copy to the superclass correctly.
4556      * Required for MSVC.
4557      */
operator =(const Image1DBuffer & img)4558     Image1DBuffer& operator = (const Image1DBuffer &img)
4559     {
4560         Image::operator=(img);
4561         return *this;
4562     }
4563 
4564     /*! \brief Move constructor to forward move to the superclass correctly.
4565      * Required for MSVC.
4566      */
Image1DBuffer(Image1DBuffer && img)4567     Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4568 
4569     /*! \brief Move assignment to forward move to the superclass correctly.
4570      * Required for MSVC.
4571      */
operator =(Image1DBuffer && img)4572     Image1DBuffer& operator = (Image1DBuffer &&img)
4573     {
4574         Image::operator=(std::move(img));
4575         return *this;
4576     }
4577 
4578 };
4579 
4580 /*! \class Image1DArray
4581  * \brief Image interface for arrays of 1D images.
4582  */
4583 class Image1DArray : public Image
4584 {
4585 public:
Image1DArray(const Context & context,cl_mem_flags flags,ImageFormat format,size_type arraySize,size_type width,size_type rowPitch,void * host_ptr=NULL,cl_int * err=NULL)4586     Image1DArray(
4587         const Context& context,
4588         cl_mem_flags flags,
4589         ImageFormat format,
4590         size_type arraySize,
4591         size_type width,
4592         size_type rowPitch,
4593         void* host_ptr = NULL,
4594         cl_int* err = NULL)
4595     {
4596         cl_int error;
4597         cl_image_desc desc =
4598         {
4599             CL_MEM_OBJECT_IMAGE1D_ARRAY,
4600             width,
4601             0, 0,  // height, depth (unused)
4602             arraySize,
4603             rowPitch,
4604             0, 0, 0, 0
4605         };
4606         object_ = ::clCreateImage(
4607             context(),
4608             flags,
4609             &format,
4610             &desc,
4611             host_ptr,
4612             &error);
4613 
4614         detail::errHandler(error, __CREATE_IMAGE_ERR);
4615         if (err != NULL) {
4616             *err = error;
4617         }
4618     }
4619 
Image1DArray()4620     Image1DArray() { }
4621 
4622     /*! \brief Constructor from cl_mem - takes ownership.
4623      *
4624      * \param retainObject will cause the constructor to retain its cl object.
4625      *                     Defaults to false to maintain compatibility with
4626      *                     earlier versions.
4627      *  See Memory for further details.
4628      */
Image1DArray(const cl_mem & imageArray,bool retainObject=false)4629     explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
4630         Image(imageArray, retainObject) { }
4631 
4632 
operator =(const cl_mem & rhs)4633     Image1DArray& operator = (const cl_mem& rhs)
4634     {
4635         Image::operator=(rhs);
4636         return *this;
4637     }
4638 
4639     /*! \brief Copy constructor to forward copy to the superclass correctly.
4640      * Required for MSVC.
4641      */
Image1DArray(const Image1DArray & img)4642     Image1DArray(const Image1DArray& img) : Image(img) {}
4643 
4644     /*! \brief Copy assignment to forward copy to the superclass correctly.
4645      * Required for MSVC.
4646      */
operator =(const Image1DArray & img)4647     Image1DArray& operator = (const Image1DArray &img)
4648     {
4649         Image::operator=(img);
4650         return *this;
4651     }
4652 
4653     /*! \brief Move constructor to forward move to the superclass correctly.
4654      * Required for MSVC.
4655      */
Image1DArray(Image1DArray && img)4656     Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4657 
4658     /*! \brief Move assignment to forward move to the superclass correctly.
4659      * Required for MSVC.
4660      */
operator =(Image1DArray && img)4661     Image1DArray& operator = (Image1DArray &&img)
4662     {
4663         Image::operator=(std::move(img));
4664         return *this;
4665     }
4666 
4667 };
4668 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4669 
4670 
4671 /*! \brief Class interface for 2D Image Memory objects.
4672  *
4673  *  See Memory for details about copy semantics, etc.
4674  *
4675  *  \see Memory
4676  */
4677 class Image2D : public Image
4678 {
4679 public:
4680     /*! \brief Constructs a 2D Image in a specified context.
4681      *
4682      *  Wraps clCreateImage().
4683      */
Image2D(const Context & context,cl_mem_flags flags,ImageFormat format,size_type width,size_type height,size_type row_pitch=0,void * host_ptr=NULL,cl_int * err=NULL)4684     Image2D(
4685         const Context& context,
4686         cl_mem_flags flags,
4687         ImageFormat format,
4688         size_type width,
4689         size_type height,
4690         size_type row_pitch = 0,
4691         void* host_ptr = NULL,
4692         cl_int* err = NULL)
4693     {
4694         cl_int error;
4695         bool useCreateImage;
4696 
4697 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4698         // Run-time decision based on the actual platform
4699         {
4700             cl_uint version = detail::getContextPlatformVersion(context());
4701             useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4702         }
4703 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4704         useCreateImage = true;
4705 #else
4706         useCreateImage = false;
4707 #endif
4708 
4709 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4710         if (useCreateImage)
4711         {
4712             cl_image_desc desc =
4713             {
4714                 CL_MEM_OBJECT_IMAGE2D,
4715                 width,
4716                 height,
4717                 0, 0, // depth, array size (unused)
4718                 row_pitch,
4719                 0, 0, 0, 0
4720             };
4721             object_ = ::clCreateImage(
4722                 context(),
4723                 flags,
4724                 &format,
4725                 &desc,
4726                 host_ptr,
4727                 &error);
4728 
4729             detail::errHandler(error, __CREATE_IMAGE_ERR);
4730             if (err != NULL) {
4731                 *err = error;
4732             }
4733         }
4734 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
4735 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4736         if (!useCreateImage)
4737         {
4738             object_ = ::clCreateImage2D(
4739                 context(), flags,&format, width, height, row_pitch, host_ptr, &error);
4740 
4741             detail::errHandler(error, __CREATE_IMAGE2D_ERR);
4742             if (err != NULL) {
4743                 *err = error;
4744             }
4745         }
4746 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
4747     }
4748 
4749 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4750     /*! \brief Constructs a 2D Image from a buffer.
4751     * \note This will share storage with the underlying buffer.
4752     *
4753     *  Wraps clCreateImage().
4754     */
Image2D(const Context & context,ImageFormat format,const Buffer & sourceBuffer,size_type width,size_type height,size_type row_pitch=0,cl_int * err=nullptr)4755     Image2D(
4756         const Context& context,
4757         ImageFormat format,
4758         const Buffer &sourceBuffer,
4759         size_type width,
4760         size_type height,
4761         size_type row_pitch = 0,
4762         cl_int* err = nullptr)
4763     {
4764         cl_int error;
4765 
4766         cl_image_desc desc =
4767         {
4768             CL_MEM_OBJECT_IMAGE2D,
4769             width,
4770             height,
4771             0, 0, // depth, array size (unused)
4772             row_pitch,
4773             0, 0, 0,
4774             // Use buffer as input to image
4775             sourceBuffer()
4776         };
4777         object_ = ::clCreateImage(
4778             context(),
4779             0, // flags inherited from buffer
4780             &format,
4781             &desc,
4782             nullptr,
4783             &error);
4784 
4785         detail::errHandler(error, __CREATE_IMAGE_ERR);
4786         if (err != nullptr) {
4787             *err = error;
4788         }
4789     }
4790 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4791 
4792 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4793     /*! \brief Constructs a 2D Image from an image.
4794     * \note This will share storage with the underlying image but may
4795     *       reinterpret the channel order and type.
4796     *
4797     * The image will be created matching with a descriptor matching the source.
4798     *
4799     * \param order is the channel order to reinterpret the image data as.
4800     *              The channel order may differ as described in the OpenCL
4801     *              2.0 API specification.
4802     *
4803     * Wraps clCreateImage().
4804     */
Image2D(const Context & context,cl_channel_order order,const Image & sourceImage,cl_int * err=nullptr)4805     Image2D(
4806         const Context& context,
4807         cl_channel_order order,
4808         const Image &sourceImage,
4809         cl_int* err = nullptr)
4810     {
4811         cl_int error;
4812 
4813         // Descriptor fields have to match source image
4814         size_type sourceWidth =
4815             sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
4816         size_type sourceHeight =
4817             sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
4818         size_type sourceRowPitch =
4819             sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
4820         cl_uint sourceNumMIPLevels =
4821             sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
4822         cl_uint sourceNumSamples =
4823             sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
4824         cl_image_format sourceFormat =
4825             sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
4826 
4827         // Update only the channel order.
4828         // Channel format inherited from source.
4829         sourceFormat.image_channel_order = order;
4830         cl_image_desc desc =
4831         {
4832             CL_MEM_OBJECT_IMAGE2D,
4833             sourceWidth,
4834             sourceHeight,
4835             0, 0, // depth (unused), array size (unused)
4836             sourceRowPitch,
4837             0, // slice pitch (unused)
4838             sourceNumMIPLevels,
4839             sourceNumSamples,
4840             // Use buffer as input to image
4841             sourceImage()
4842         };
4843         object_ = ::clCreateImage(
4844             context(),
4845             0, // flags should be inherited from mem_object
4846             &sourceFormat,
4847             &desc,
4848             nullptr,
4849             &error);
4850 
4851         detail::errHandler(error, __CREATE_IMAGE_ERR);
4852         if (err != nullptr) {
4853             *err = error;
4854         }
4855     }
4856 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
4857 
4858     //! \brief Default constructor - initializes to NULL.
Image2D()4859     Image2D() { }
4860 
4861     /*! \brief Constructor from cl_mem - takes ownership.
4862      *
4863      * \param retainObject will cause the constructor to retain its cl object.
4864      *                     Defaults to false to maintain compatibility with
4865      *                     earlier versions.
4866      *  See Memory for further details.
4867      */
Image2D(const cl_mem & image2D,bool retainObject=false)4868     explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
4869         Image(image2D, retainObject) { }
4870 
4871     /*! \brief Assignment from cl_mem - performs shallow copy.
4872      *
4873      *  See Memory for further details.
4874      */
operator =(const cl_mem & rhs)4875     Image2D& operator = (const cl_mem& rhs)
4876     {
4877         Image::operator=(rhs);
4878         return *this;
4879     }
4880 
4881     /*! \brief Copy constructor to forward copy to the superclass correctly.
4882      * Required for MSVC.
4883      */
Image2D(const Image2D & img)4884     Image2D(const Image2D& img) : Image(img) {}
4885 
4886     /*! \brief Copy assignment to forward copy to the superclass correctly.
4887      * Required for MSVC.
4888      */
operator =(const Image2D & img)4889     Image2D& operator = (const Image2D &img)
4890     {
4891         Image::operator=(img);
4892         return *this;
4893     }
4894 
4895     /*! \brief Move constructor to forward move to the superclass correctly.
4896      * Required for MSVC.
4897      */
Image2D(Image2D && img)4898     Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4899 
4900     /*! \brief Move assignment to forward move to the superclass correctly.
4901      * Required for MSVC.
4902      */
operator =(Image2D && img)4903     Image2D& operator = (Image2D &&img)
4904     {
4905         Image::operator=(std::move(img));
4906         return *this;
4907     }
4908 
4909 };
4910 
4911 
4912 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
4913 /*! \brief Class interface for GL 2D Image Memory objects.
4914  *
4915  *  This is provided to facilitate interoperability with OpenGL.
4916  *
4917  *  See Memory for details about copy semantics, etc.
4918  *
4919  *  \see Memory
4920  *  \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
4921  */
4922 class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
4923 {
4924 public:
4925     /*! \brief Constructs an Image2DGL in a specified context, from a given
4926      *         GL Texture.
4927      *
4928      *  Wraps clCreateFromGLTexture2D().
4929      */
Image2DGL(const Context & context,cl_mem_flags flags,cl_GLenum target,cl_GLint miplevel,cl_GLuint texobj,cl_int * err=NULL)4930     Image2DGL(
4931         const Context& context,
4932         cl_mem_flags flags,
4933         cl_GLenum target,
4934         cl_GLint  miplevel,
4935         cl_GLuint texobj,
4936         cl_int * err = NULL)
4937     {
4938         cl_int error;
4939         object_ = ::clCreateFromGLTexture2D(
4940             context(),
4941             flags,
4942             target,
4943             miplevel,
4944             texobj,
4945             &error);
4946 
4947         detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
4948         if (err != NULL) {
4949             *err = error;
4950         }
4951 
4952     }
4953 
4954     //! \brief Default constructor - initializes to NULL.
Image2DGL()4955     Image2DGL() : Image2D() { }
4956 
4957     /*! \brief Constructor from cl_mem - takes ownership.
4958      *
4959      * \param retainObject will cause the constructor to retain its cl object.
4960      *                     Defaults to false to maintain compatibility with
4961      *                     earlier versions.
4962      *  See Memory for further details.
4963      */
Image2DGL(const cl_mem & image,bool retainObject=false)4964     explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
4965         Image2D(image, retainObject) { }
4966 
4967     /*! \brief Assignment from cl_mem - performs shallow copy.
4968      *c
4969      *  See Memory for further details.
4970      */
operator =(const cl_mem & rhs)4971     Image2DGL& operator = (const cl_mem& rhs)
4972     {
4973         Image2D::operator=(rhs);
4974         return *this;
4975     }
4976 
4977     /*! \brief Copy constructor to forward copy to the superclass correctly.
4978      * Required for MSVC.
4979      */
Image2DGL(const Image2DGL & img)4980     Image2DGL(const Image2DGL& img) : Image2D(img) {}
4981 
4982     /*! \brief Copy assignment to forward copy to the superclass correctly.
4983      * Required for MSVC.
4984      */
operator =(const Image2DGL & img)4985     Image2DGL& operator = (const Image2DGL &img)
4986     {
4987         Image2D::operator=(img);
4988         return *this;
4989     }
4990 
4991     /*! \brief Move constructor to forward move to the superclass correctly.
4992      * Required for MSVC.
4993      */
Image2DGL(Image2DGL && img)4994     Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {}
4995 
4996     /*! \brief Move assignment to forward move to the superclass correctly.
4997      * Required for MSVC.
4998      */
operator =(Image2DGL && img)4999     Image2DGL& operator = (Image2DGL &&img)
5000     {
5001         Image2D::operator=(std::move(img));
5002         return *this;
5003     }
5004 
5005 } CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
5006 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5007 
5008 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5009 /*! \class Image2DArray
5010  * \brief Image interface for arrays of 2D images.
5011  */
5012 class Image2DArray : public Image
5013 {
5014 public:
Image2DArray(const Context & context,cl_mem_flags flags,ImageFormat format,size_type arraySize,size_type width,size_type height,size_type rowPitch,size_type slicePitch,void * host_ptr=NULL,cl_int * err=NULL)5015     Image2DArray(
5016         const Context& context,
5017         cl_mem_flags flags,
5018         ImageFormat format,
5019         size_type arraySize,
5020         size_type width,
5021         size_type height,
5022         size_type rowPitch,
5023         size_type slicePitch,
5024         void* host_ptr = NULL,
5025         cl_int* err = NULL)
5026     {
5027         cl_int error;
5028         cl_image_desc desc =
5029         {
5030             CL_MEM_OBJECT_IMAGE2D_ARRAY,
5031             width,
5032             height,
5033             0,       // depth (unused)
5034             arraySize,
5035             rowPitch,
5036             slicePitch,
5037             0, 0, 0
5038         };
5039         object_ = ::clCreateImage(
5040             context(),
5041             flags,
5042             &format,
5043             &desc,
5044             host_ptr,
5045             &error);
5046 
5047         detail::errHandler(error, __CREATE_IMAGE_ERR);
5048         if (err != NULL) {
5049             *err = error;
5050         }
5051     }
5052 
Image2DArray()5053     Image2DArray() { }
5054 
5055     /*! \brief Constructor from cl_mem - takes ownership.
5056      *
5057      * \param retainObject will cause the constructor to retain its cl object.
5058      *                     Defaults to false to maintain compatibility with
5059      *                     earlier versions.
5060      *  See Memory for further details.
5061      */
Image2DArray(const cl_mem & imageArray,bool retainObject=false)5062     explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
5063 
operator =(const cl_mem & rhs)5064     Image2DArray& operator = (const cl_mem& rhs)
5065     {
5066         Image::operator=(rhs);
5067         return *this;
5068     }
5069 
5070     /*! \brief Copy constructor to forward copy to the superclass correctly.
5071      * Required for MSVC.
5072      */
Image2DArray(const Image2DArray & img)5073     Image2DArray(const Image2DArray& img) : Image(img) {}
5074 
5075     /*! \brief Copy assignment to forward copy to the superclass correctly.
5076      * Required for MSVC.
5077      */
operator =(const Image2DArray & img)5078     Image2DArray& operator = (const Image2DArray &img)
5079     {
5080         Image::operator=(img);
5081         return *this;
5082     }
5083 
5084     /*! \brief Move constructor to forward move to the superclass correctly.
5085      * Required for MSVC.
5086      */
Image2DArray(Image2DArray && img)5087     Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5088 
5089     /*! \brief Move assignment to forward move to the superclass correctly.
5090      * Required for MSVC.
5091      */
operator =(Image2DArray && img)5092     Image2DArray& operator = (Image2DArray &&img)
5093     {
5094         Image::operator=(std::move(img));
5095         return *this;
5096     }
5097 };
5098 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5099 
5100 /*! \brief Class interface for 3D Image Memory objects.
5101  *
5102  *  See Memory for details about copy semantics, etc.
5103  *
5104  *  \see Memory
5105  */
5106 class Image3D : public Image
5107 {
5108 public:
5109     /*! \brief Constructs a 3D Image in a specified context.
5110      *
5111      *  Wraps clCreateImage().
5112      */
Image3D(const Context & context,cl_mem_flags flags,ImageFormat format,size_type width,size_type height,size_type depth,size_type row_pitch=0,size_type slice_pitch=0,void * host_ptr=NULL,cl_int * err=NULL)5113     Image3D(
5114         const Context& context,
5115         cl_mem_flags flags,
5116         ImageFormat format,
5117         size_type width,
5118         size_type height,
5119         size_type depth,
5120         size_type row_pitch = 0,
5121         size_type slice_pitch = 0,
5122         void* host_ptr = NULL,
5123         cl_int* err = NULL)
5124     {
5125         cl_int error;
5126         bool useCreateImage;
5127 
5128 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
5129         // Run-time decision based on the actual platform
5130         {
5131             cl_uint version = detail::getContextPlatformVersion(context());
5132             useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
5133         }
5134 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
5135         useCreateImage = true;
5136 #else
5137         useCreateImage = false;
5138 #endif
5139 
5140 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5141         if (useCreateImage)
5142         {
5143             cl_image_desc desc =
5144             {
5145                 CL_MEM_OBJECT_IMAGE3D,
5146                 width,
5147                 height,
5148                 depth,
5149                 0,      // array size (unused)
5150                 row_pitch,
5151                 slice_pitch,
5152                 0, 0, 0
5153             };
5154             object_ = ::clCreateImage(
5155                 context(),
5156                 flags,
5157                 &format,
5158                 &desc,
5159                 host_ptr,
5160                 &error);
5161 
5162             detail::errHandler(error, __CREATE_IMAGE_ERR);
5163             if (err != NULL) {
5164                 *err = error;
5165             }
5166         }
5167 #endif  // CL_HPP_TARGET_OPENCL_VERSION >= 120
5168 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
5169         if (!useCreateImage)
5170         {
5171             object_ = ::clCreateImage3D(
5172                 context(), flags, &format, width, height, depth, row_pitch,
5173                 slice_pitch, host_ptr, &error);
5174 
5175             detail::errHandler(error, __CREATE_IMAGE3D_ERR);
5176             if (err != NULL) {
5177                 *err = error;
5178             }
5179         }
5180 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
5181     }
5182 
5183     //! \brief Default constructor - initializes to NULL.
Image3D()5184     Image3D() : Image() { }
5185 
5186     /*! \brief Constructor from cl_mem - takes ownership.
5187      *
5188      * \param retainObject will cause the constructor to retain its cl object.
5189      *                     Defaults to false to maintain compatibility with
5190      *                     earlier versions.
5191      *  See Memory for further details.
5192      */
Image3D(const cl_mem & image3D,bool retainObject=false)5193     explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
5194         Image(image3D, retainObject) { }
5195 
5196     /*! \brief Assignment from cl_mem - performs shallow copy.
5197      *
5198      *  See Memory for further details.
5199      */
operator =(const cl_mem & rhs)5200     Image3D& operator = (const cl_mem& rhs)
5201     {
5202         Image::operator=(rhs);
5203         return *this;
5204     }
5205 
5206     /*! \brief Copy constructor to forward copy to the superclass correctly.
5207      * Required for MSVC.
5208      */
Image3D(const Image3D & img)5209     Image3D(const Image3D& img) : Image(img) {}
5210 
5211     /*! \brief Copy assignment to forward copy to the superclass correctly.
5212      * Required for MSVC.
5213      */
operator =(const Image3D & img)5214     Image3D& operator = (const Image3D &img)
5215     {
5216         Image::operator=(img);
5217         return *this;
5218     }
5219 
5220     /*! \brief Move constructor to forward move to the superclass correctly.
5221      * Required for MSVC.
5222      */
Image3D(Image3D && img)5223     Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5224 
5225     /*! \brief Move assignment to forward move to the superclass correctly.
5226      * Required for MSVC.
5227      */
operator =(Image3D && img)5228     Image3D& operator = (Image3D &&img)
5229     {
5230         Image::operator=(std::move(img));
5231         return *this;
5232     }
5233 };
5234 
5235 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5236 /*! \brief Class interface for GL 3D Image Memory objects.
5237  *
5238  *  This is provided to facilitate interoperability with OpenGL.
5239  *
5240  *  See Memory for details about copy semantics, etc.
5241  *
5242  *  \see Memory
5243  */
5244 class Image3DGL : public Image3D
5245 {
5246 public:
5247     /*! \brief Constructs an Image3DGL in a specified context, from a given
5248      *         GL Texture.
5249      *
5250      *  Wraps clCreateFromGLTexture3D().
5251      */
Image3DGL(const Context & context,cl_mem_flags flags,cl_GLenum target,cl_GLint miplevel,cl_GLuint texobj,cl_int * err=NULL)5252     Image3DGL(
5253         const Context& context,
5254         cl_mem_flags flags,
5255         cl_GLenum target,
5256         cl_GLint  miplevel,
5257         cl_GLuint texobj,
5258         cl_int * err = NULL)
5259     {
5260         cl_int error;
5261         object_ = ::clCreateFromGLTexture3D(
5262             context(),
5263             flags,
5264             target,
5265             miplevel,
5266             texobj,
5267             &error);
5268 
5269         detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
5270         if (err != NULL) {
5271             *err = error;
5272         }
5273     }
5274 
5275     //! \brief Default constructor - initializes to NULL.
Image3DGL()5276     Image3DGL() : Image3D() { }
5277 
5278     /*! \brief Constructor from cl_mem - takes ownership.
5279      *
5280      * \param retainObject will cause the constructor to retain its cl object.
5281      *                     Defaults to false to maintain compatibility with
5282      *                     earlier versions.
5283      *  See Memory for further details.
5284      */
Image3DGL(const cl_mem & image,bool retainObject=false)5285     explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
5286         Image3D(image, retainObject) { }
5287 
5288     /*! \brief Assignment from cl_mem - performs shallow copy.
5289      *
5290      *  See Memory for further details.
5291      */
operator =(const cl_mem & rhs)5292     Image3DGL& operator = (const cl_mem& rhs)
5293     {
5294         Image3D::operator=(rhs);
5295         return *this;
5296     }
5297 
5298     /*! \brief Copy constructor to forward copy to the superclass correctly.
5299      * Required for MSVC.
5300      */
Image3DGL(const Image3DGL & img)5301     Image3DGL(const Image3DGL& img) : Image3D(img) {}
5302 
5303     /*! \brief Copy assignment to forward copy to the superclass correctly.
5304      * Required for MSVC.
5305      */
operator =(const Image3DGL & img)5306     Image3DGL& operator = (const Image3DGL &img)
5307     {
5308         Image3D::operator=(img);
5309         return *this;
5310     }
5311 
5312     /*! \brief Move constructor to forward move to the superclass correctly.
5313      * Required for MSVC.
5314      */
Image3DGL(Image3DGL && img)5315     Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {}
5316 
5317     /*! \brief Move assignment to forward move to the superclass correctly.
5318      * Required for MSVC.
5319      */
operator =(Image3DGL && img)5320     Image3DGL& operator = (Image3DGL &&img)
5321     {
5322         Image3D::operator=(std::move(img));
5323         return *this;
5324     }
5325 };
5326 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5327 
5328 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5329 /*! \class ImageGL
5330  * \brief general image interface for GL interop.
5331  * We abstract the 2D and 3D GL images into a single instance here
5332  * that wraps all GL sourced images on the grounds that setup information
5333  * was performed by OpenCL anyway.
5334  */
5335 class ImageGL : public Image
5336 {
5337 public:
ImageGL(const Context & context,cl_mem_flags flags,cl_GLenum target,cl_GLint miplevel,cl_GLuint texobj,cl_int * err=NULL)5338     ImageGL(
5339         const Context& context,
5340         cl_mem_flags flags,
5341         cl_GLenum target,
5342         cl_GLint  miplevel,
5343         cl_GLuint texobj,
5344         cl_int * err = NULL)
5345     {
5346         cl_int error;
5347         object_ = ::clCreateFromGLTexture(
5348             context(),
5349             flags,
5350             target,
5351             miplevel,
5352             texobj,
5353             &error);
5354 
5355         detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
5356         if (err != NULL) {
5357             *err = error;
5358         }
5359     }
5360 
ImageGL()5361     ImageGL() : Image() { }
5362 
5363     /*! \brief Constructor from cl_mem - takes ownership.
5364      *
5365      * \param retainObject will cause the constructor to retain its cl object.
5366      *                     Defaults to false to maintain compatibility with
5367      *                     earlier versions.
5368      *  See Memory for further details.
5369      */
ImageGL(const cl_mem & image,bool retainObject=false)5370     explicit ImageGL(const cl_mem& image, bool retainObject = false) :
5371         Image(image, retainObject) { }
5372 
operator =(const cl_mem & rhs)5373     ImageGL& operator = (const cl_mem& rhs)
5374     {
5375         Image::operator=(rhs);
5376         return *this;
5377     }
5378 
5379     /*! \brief Copy constructor to forward copy to the superclass correctly.
5380      * Required for MSVC.
5381      */
ImageGL(const ImageGL & img)5382     ImageGL(const ImageGL& img) : Image(img) {}
5383 
5384     /*! \brief Copy assignment to forward copy to the superclass correctly.
5385      * Required for MSVC.
5386      */
operator =(const ImageGL & img)5387     ImageGL& operator = (const ImageGL &img)
5388     {
5389         Image::operator=(img);
5390         return *this;
5391     }
5392 
5393     /*! \brief Move constructor to forward move to the superclass correctly.
5394      * Required for MSVC.
5395      */
ImageGL(ImageGL && img)5396     ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5397 
5398     /*! \brief Move assignment to forward move to the superclass correctly.
5399      * Required for MSVC.
5400      */
operator =(ImageGL && img)5401     ImageGL& operator = (ImageGL &&img)
5402     {
5403         Image::operator=(std::move(img));
5404         return *this;
5405     }
5406 };
5407 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5408 
5409 
5410 
5411 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5412 /*! \brief Class interface for Pipe Memory Objects.
5413 *
5414 *  See Memory for details about copy semantics, etc.
5415 *
5416 *  \see Memory
5417 */
5418 class Pipe : public Memory
5419 {
5420 public:
5421 
5422     /*! \brief Constructs a Pipe in a specified context.
5423      *
5424      * Wraps clCreatePipe().
5425      * @param context Context in which to create the pipe.
5426      * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5427      * @param packet_size Size in bytes of a single packet of the pipe.
5428      * @param max_packets Number of packets that may be stored in the pipe.
5429      *
5430      */
Pipe(const Context & context,cl_uint packet_size,cl_uint max_packets,cl_int * err=NULL)5431     Pipe(
5432         const Context& context,
5433         cl_uint packet_size,
5434         cl_uint max_packets,
5435         cl_int* err = NULL)
5436     {
5437         cl_int error;
5438 
5439         cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5440         object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5441 
5442         detail::errHandler(error, __CREATE_PIPE_ERR);
5443         if (err != NULL) {
5444             *err = error;
5445         }
5446     }
5447 
5448     /*! \brief Constructs a Pipe in a the default context.
5449      *
5450      * Wraps clCreatePipe().
5451      * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5452      * @param packet_size Size in bytes of a single packet of the pipe.
5453      * @param max_packets Number of packets that may be stored in the pipe.
5454      *
5455      */
Pipe(cl_uint packet_size,cl_uint max_packets,cl_int * err=NULL)5456     Pipe(
5457         cl_uint packet_size,
5458         cl_uint max_packets,
5459         cl_int* err = NULL)
5460     {
5461         cl_int error;
5462 
5463         Context context = Context::getDefault(err);
5464 
5465         cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5466         object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5467 
5468         detail::errHandler(error, __CREATE_PIPE_ERR);
5469         if (err != NULL) {
5470             *err = error;
5471         }
5472     }
5473 
5474     //! \brief Default constructor - initializes to NULL.
Pipe()5475     Pipe() : Memory() { }
5476 
5477     /*! \brief Constructor from cl_mem - takes ownership.
5478      *
5479      * \param retainObject will cause the constructor to retain its cl object.
5480      *                     Defaults to false to maintain compatibility with earlier versions.
5481      *
5482      *  See Memory for further details.
5483      */
Pipe(const cl_mem & pipe,bool retainObject=false)5484     explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
5485         Memory(pipe, retainObject) { }
5486 
5487     /*! \brief Assignment from cl_mem - performs shallow copy.
5488      *
5489      *  See Memory for further details.
5490      */
operator =(const cl_mem & rhs)5491     Pipe& operator = (const cl_mem& rhs)
5492     {
5493         Memory::operator=(rhs);
5494         return *this;
5495     }
5496 
5497     /*! \brief Copy constructor to forward copy to the superclass correctly.
5498      * Required for MSVC.
5499      */
Pipe(const Pipe & pipe)5500     Pipe(const Pipe& pipe) : Memory(pipe) {}
5501 
5502     /*! \brief Copy assignment to forward copy to the superclass correctly.
5503      * Required for MSVC.
5504      */
operator =(const Pipe & pipe)5505     Pipe& operator = (const Pipe &pipe)
5506     {
5507         Memory::operator=(pipe);
5508         return *this;
5509     }
5510 
5511     /*! \brief Move constructor to forward move to the superclass correctly.
5512      * Required for MSVC.
5513      */
Pipe(Pipe && pipe)5514     Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {}
5515 
5516     /*! \brief Move assignment to forward move to the superclass correctly.
5517      * Required for MSVC.
5518      */
operator =(Pipe && pipe)5519     Pipe& operator = (Pipe &&pipe)
5520     {
5521         Memory::operator=(std::move(pipe));
5522         return *this;
5523     }
5524 
5525     //! \brief Wrapper for clGetMemObjectInfo().
5526     template <typename T>
getInfo(cl_pipe_info name,T * param) const5527     cl_int getInfo(cl_pipe_info name, T* param) const
5528     {
5529         return detail::errHandler(
5530             detail::getInfo(&::clGetPipeInfo, object_, name, param),
5531             __GET_PIPE_INFO_ERR);
5532     }
5533 
5534     //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
5535     template <cl_int name> typename
5536         detail::param_traits<detail::cl_pipe_info, name>::param_type
getInfo(cl_int * err=NULL) const5537         getInfo(cl_int* err = NULL) const
5538     {
5539         typename detail::param_traits<
5540             detail::cl_pipe_info, name>::param_type param;
5541         cl_int result = getInfo(name, &param);
5542         if (err != NULL) {
5543             *err = result;
5544         }
5545         return param;
5546     }
5547 }; // class Pipe
5548 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
5549 
5550 
5551 /*! \brief Class interface for cl_sampler.
5552  *
5553  *  \note Copies of these objects are shallow, meaning that the copy will refer
5554  *        to the same underlying cl_sampler as the original.  For details, see
5555  *        clRetainSampler() and clReleaseSampler().
5556  *
5557  *  \see cl_sampler
5558  */
5559 class Sampler : public detail::Wrapper<cl_sampler>
5560 {
5561 public:
5562     //! \brief Default constructor - initializes to NULL.
Sampler()5563     Sampler() { }
5564 
5565     /*! \brief Constructs a Sampler in a specified context.
5566      *
5567      *  Wraps clCreateSampler().
5568      */
Sampler(const Context & context,cl_bool normalized_coords,cl_addressing_mode addressing_mode,cl_filter_mode filter_mode,cl_int * err=NULL)5569     Sampler(
5570         const Context& context,
5571         cl_bool normalized_coords,
5572         cl_addressing_mode addressing_mode,
5573         cl_filter_mode filter_mode,
5574         cl_int* err = NULL)
5575     {
5576         cl_int error;
5577 
5578 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5579         cl_sampler_properties sampler_properties[] = {
5580             CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
5581             CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
5582             CL_SAMPLER_FILTER_MODE, filter_mode,
5583             0 };
5584         object_ = ::clCreateSamplerWithProperties(
5585             context(),
5586             sampler_properties,
5587             &error);
5588 
5589         detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
5590         if (err != NULL) {
5591             *err = error;
5592         }
5593 #else
5594         object_ = ::clCreateSampler(
5595             context(),
5596             normalized_coords,
5597             addressing_mode,
5598             filter_mode,
5599             &error);
5600 
5601         detail::errHandler(error, __CREATE_SAMPLER_ERR);
5602         if (err != NULL) {
5603             *err = error;
5604         }
5605 #endif
5606     }
5607 
5608     /*! \brief Constructor from cl_sampler - takes ownership.
5609      *
5610      * \param retainObject will cause the constructor to retain its cl object.
5611      *                     Defaults to false to maintain compatibility with
5612      *                     earlier versions.
5613      *  This effectively transfers ownership of a refcount on the cl_sampler
5614      *  into the new Sampler object.
5615      */
Sampler(const cl_sampler & sampler,bool retainObject=false)5616     explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
5617         detail::Wrapper<cl_type>(sampler, retainObject) { }
5618 
5619     /*! \brief Assignment operator from cl_sampler - takes ownership.
5620      *
5621      *  This effectively transfers ownership of a refcount on the rhs and calls
5622      *  clReleaseSampler() on the value previously held by this instance.
5623      */
operator =(const cl_sampler & rhs)5624     Sampler& operator = (const cl_sampler& rhs)
5625     {
5626         detail::Wrapper<cl_type>::operator=(rhs);
5627         return *this;
5628     }
5629 
5630     /*! \brief Copy constructor to forward copy to the superclass correctly.
5631      * Required for MSVC.
5632      */
Sampler(const Sampler & sam)5633     Sampler(const Sampler& sam) : detail::Wrapper<cl_type>(sam) {}
5634 
5635     /*! \brief Copy assignment to forward copy to the superclass correctly.
5636      * Required for MSVC.
5637      */
operator =(const Sampler & sam)5638     Sampler& operator = (const Sampler &sam)
5639     {
5640         detail::Wrapper<cl_type>::operator=(sam);
5641         return *this;
5642     }
5643 
5644     /*! \brief Move constructor to forward move to the superclass correctly.
5645      * Required for MSVC.
5646      */
Sampler(Sampler && sam)5647     Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(sam)) {}
5648 
5649     /*! \brief Move assignment to forward move to the superclass correctly.
5650      * Required for MSVC.
5651      */
operator =(Sampler && sam)5652     Sampler& operator = (Sampler &&sam)
5653     {
5654         detail::Wrapper<cl_type>::operator=(std::move(sam));
5655         return *this;
5656     }
5657 
5658     //! \brief Wrapper for clGetSamplerInfo().
5659     template <typename T>
getInfo(cl_sampler_info name,T * param) const5660     cl_int getInfo(cl_sampler_info name, T* param) const
5661     {
5662         return detail::errHandler(
5663             detail::getInfo(&::clGetSamplerInfo, object_, name, param),
5664             __GET_SAMPLER_INFO_ERR);
5665     }
5666 
5667     //! \brief Wrapper for clGetSamplerInfo() that returns by value.
5668     template <cl_int name> typename
5669     detail::param_traits<detail::cl_sampler_info, name>::param_type
getInfo(cl_int * err=NULL) const5670     getInfo(cl_int* err = NULL) const
5671     {
5672         typename detail::param_traits<
5673             detail::cl_sampler_info, name>::param_type param;
5674         cl_int result = getInfo(name, &param);
5675         if (err != NULL) {
5676             *err = result;
5677         }
5678         return param;
5679     }
5680 };
5681 
5682 class Program;
5683 class CommandQueue;
5684 class DeviceCommandQueue;
5685 class Kernel;
5686 
5687 //! \brief Class interface for specifying NDRange values.
5688 class NDRange
5689 {
5690 private:
5691     size_type sizes_[3];
5692     cl_uint dimensions_;
5693 
5694 public:
5695     //! \brief Default constructor - resulting range has zero dimensions.
NDRange()5696     NDRange()
5697         : dimensions_(0)
5698     {
5699         sizes_[0] = 0;
5700         sizes_[1] = 0;
5701         sizes_[2] = 0;
5702     }
5703 
5704     //! \brief Constructs one-dimensional range.
NDRange(size_type size0)5705     NDRange(size_type size0)
5706         : dimensions_(1)
5707     {
5708         sizes_[0] = size0;
5709         sizes_[1] = 1;
5710         sizes_[2] = 1;
5711     }
5712 
5713     //! \brief Constructs two-dimensional range.
NDRange(size_type size0,size_type size1)5714     NDRange(size_type size0, size_type size1)
5715         : dimensions_(2)
5716     {
5717         sizes_[0] = size0;
5718         sizes_[1] = size1;
5719         sizes_[2] = 1;
5720     }
5721 
5722     //! \brief Constructs three-dimensional range.
NDRange(size_type size0,size_type size1,size_type size2)5723     NDRange(size_type size0, size_type size1, size_type size2)
5724         : dimensions_(3)
5725     {
5726         sizes_[0] = size0;
5727         sizes_[1] = size1;
5728         sizes_[2] = size2;
5729     }
5730 
5731     /*! \brief Conversion operator to const size_type *.
5732      *
5733      *  \returns a pointer to the size of the first dimension.
5734      */
operator const size_type*() const5735     operator const size_type*() const {
5736         return sizes_;
5737     }
5738 
5739     //! \brief Queries the number of dimensions in the range.
dimensions() const5740     size_type dimensions() const
5741     {
5742         return dimensions_;
5743     }
5744 
5745     //! \brief Returns the size of the object in bytes based on the
5746     // runtime number of dimensions
size() const5747     size_type size() const
5748     {
5749         return dimensions_*sizeof(size_type);
5750     }
5751 
get()5752     size_type* get()
5753     {
5754         return sizes_;
5755     }
5756 
get() const5757     const size_type* get() const
5758     {
5759         return sizes_;
5760     }
5761 };
5762 
5763 //! \brief A zero-dimensional range.
5764 static const NDRange NullRange;
5765 
5766 //! \brief Local address wrapper for use with Kernel::setArg
5767 struct LocalSpaceArg
5768 {
5769     size_type size_;
5770 };
5771 
5772 namespace detail {
5773 
5774 template <typename T, class Enable = void>
5775 struct KernelArgumentHandler;
5776 
5777 // Enable for objects that are not subclasses of memory
5778 // Pointers, constants etc
5779 template <typename T>
5780 struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
5781 {
sizecl::detail::KernelArgumentHandler5782     static size_type size(const T&) { return sizeof(T); }
ptrcl::detail::KernelArgumentHandler5783     static const T* ptr(const T& value) { return &value; }
5784 };
5785 
5786 // Enable for subclasses of memory where we want to get a reference to the cl_mem out
5787 // and pass that in for safety
5788 template <typename T>
5789 struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
5790 {
sizecl::detail::KernelArgumentHandler5791     static size_type size(const T&) { return sizeof(cl_mem); }
ptrcl::detail::KernelArgumentHandler5792     static const cl_mem* ptr(const T& value) { return &(value()); }
5793 };
5794 
5795 // Specialization for DeviceCommandQueue defined later
5796 
5797 template <>
5798 struct KernelArgumentHandler<LocalSpaceArg, void>
5799 {
sizecl::detail::KernelArgumentHandler5800     static size_type size(const LocalSpaceArg& value) { return value.size_; }
ptrcl::detail::KernelArgumentHandler5801     static const void* ptr(const LocalSpaceArg&) { return NULL; }
5802 };
5803 
5804 }
5805 //! \endcond
5806 
5807 /*! Local
5808  * \brief Helper function for generating LocalSpaceArg objects.
5809  */
5810 inline LocalSpaceArg
Local(size_type size)5811 Local(size_type size)
5812 {
5813     LocalSpaceArg ret = { size };
5814     return ret;
5815 }
5816 
5817 /*! \brief Class interface for cl_kernel.
5818  *
5819  *  \note Copies of these objects are shallow, meaning that the copy will refer
5820  *        to the same underlying cl_kernel as the original.  For details, see
5821  *        clRetainKernel() and clReleaseKernel().
5822  *
5823  *  \see cl_kernel
5824  */
5825 class Kernel : public detail::Wrapper<cl_kernel>
5826 {
5827 public:
5828     inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
5829 
5830     //! \brief Default constructor - initializes to NULL.
Kernel()5831     Kernel() { }
5832 
5833     /*! \brief Constructor from cl_kernel - takes ownership.
5834      *
5835      * \param retainObject will cause the constructor to retain its cl object.
5836      *                     Defaults to false to maintain compatibility with
5837      *                     earlier versions.
5838      *  This effectively transfers ownership of a refcount on the cl_kernel
5839      *  into the new Kernel object.
5840      */
Kernel(const cl_kernel & kernel,bool retainObject=false)5841     explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
5842         detail::Wrapper<cl_type>(kernel, retainObject) { }
5843 
5844     /*! \brief Assignment operator from cl_kernel - takes ownership.
5845      *
5846      *  This effectively transfers ownership of a refcount on the rhs and calls
5847      *  clReleaseKernel() on the value previously held by this instance.
5848      */
operator =(const cl_kernel & rhs)5849     Kernel& operator = (const cl_kernel& rhs)
5850     {
5851         detail::Wrapper<cl_type>::operator=(rhs);
5852         return *this;
5853     }
5854 
5855     /*! \brief Copy constructor to forward copy to the superclass correctly.
5856      * Required for MSVC.
5857      */
Kernel(const Kernel & kernel)5858     Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) {}
5859 
5860     /*! \brief Copy assignment to forward copy to the superclass correctly.
5861      * Required for MSVC.
5862      */
operator =(const Kernel & kernel)5863     Kernel& operator = (const Kernel &kernel)
5864     {
5865         detail::Wrapper<cl_type>::operator=(kernel);
5866         return *this;
5867     }
5868 
5869     /*! \brief Move constructor to forward move to the superclass correctly.
5870      * Required for MSVC.
5871      */
Kernel(Kernel && kernel)5872     Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(kernel)) {}
5873 
5874     /*! \brief Move assignment to forward move to the superclass correctly.
5875      * Required for MSVC.
5876      */
operator =(Kernel && kernel)5877     Kernel& operator = (Kernel &&kernel)
5878     {
5879         detail::Wrapper<cl_type>::operator=(std::move(kernel));
5880         return *this;
5881     }
5882 
5883     template <typename T>
getInfo(cl_kernel_info name,T * param) const5884     cl_int getInfo(cl_kernel_info name, T* param) const
5885     {
5886         return detail::errHandler(
5887             detail::getInfo(&::clGetKernelInfo, object_, name, param),
5888             __GET_KERNEL_INFO_ERR);
5889     }
5890 
5891     template <cl_int name> typename
5892     detail::param_traits<detail::cl_kernel_info, name>::param_type
getInfo(cl_int * err=NULL) const5893     getInfo(cl_int* err = NULL) const
5894     {
5895         typename detail::param_traits<
5896             detail::cl_kernel_info, name>::param_type param;
5897         cl_int result = getInfo(name, &param);
5898         if (err != NULL) {
5899             *err = result;
5900         }
5901         return param;
5902     }
5903 
5904 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5905     template <typename T>
getArgInfo(cl_uint argIndex,cl_kernel_arg_info name,T * param) const5906     cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
5907     {
5908         return detail::errHandler(
5909             detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
5910             __GET_KERNEL_ARG_INFO_ERR);
5911     }
5912 
5913     template <cl_int name> typename
5914     detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
getArgInfo(cl_uint argIndex,cl_int * err=NULL) const5915     getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
5916     {
5917         typename detail::param_traits<
5918             detail::cl_kernel_arg_info, name>::param_type param;
5919         cl_int result = getArgInfo(argIndex, name, &param);
5920         if (err != NULL) {
5921             *err = result;
5922         }
5923         return param;
5924     }
5925 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5926 
5927     template <typename T>
getWorkGroupInfo(const Device & device,cl_kernel_work_group_info name,T * param) const5928     cl_int getWorkGroupInfo(
5929         const Device& device, cl_kernel_work_group_info name, T* param) const
5930     {
5931         return detail::errHandler(
5932             detail::getInfo(
5933                 &::clGetKernelWorkGroupInfo, object_, device(), name, param),
5934                 __GET_KERNEL_WORK_GROUP_INFO_ERR);
5935     }
5936 
5937     template <cl_int name> typename
5938     detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
getWorkGroupInfo(const Device & device,cl_int * err=NULL) const5939         getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
5940     {
5941         typename detail::param_traits<
5942         detail::cl_kernel_work_group_info, name>::param_type param;
5943         cl_int result = getWorkGroupInfo(device, name, &param);
5944         if (err != NULL) {
5945             *err = result;
5946         }
5947         return param;
5948     }
5949 
5950 #if (CL_HPP_TARGET_OPENCL_VERSION >= 200 && defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)) || CL_HPP_TARGET_OPENCL_VERSION >= 210
getSubGroupInfo(const cl::Device & dev,cl_kernel_sub_group_info name,const cl::NDRange & range,size_type * param) const5951     cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
5952     {
5953 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
5954 
5955         return detail::errHandler(
5956             clGetKernelSubGroupInfo(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
5957             __GET_KERNEL_SUB_GROUP_INFO_ERR);
5958 
5959 #else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
5960 
5961         typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
5962         static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL;
5963         CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
5964 
5965         return detail::errHandler(
5966             pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
5967             __GET_KERNEL_SUB_GROUP_INFO_ERR);
5968 
5969 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
5970     }
5971 
5972     template <cl_int name>
getSubGroupInfo(const cl::Device & dev,const cl::NDRange & range,cl_int * err=NULL) const5973         size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const
5974     {
5975         size_type param;
5976         cl_int result = getSubGroupInfo(dev, name, range, &param);
5977         if (err != NULL) {
5978             *err = result;
5979         }
5980         return param;
5981     }
5982 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5983 
5984 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5985     /*! \brief setArg overload taking a shared_ptr type
5986      */
5987     template<typename T, class D>
setArg(cl_uint index,const cl::pointer<T,D> & argPtr)5988     cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
5989     {
5990         return detail::errHandler(
5991             ::clSetKernelArgSVMPointer(object_, index, argPtr.get()),
5992             __SET_KERNEL_ARGS_ERR);
5993     }
5994 
5995     /*! \brief setArg overload taking a vector type.
5996      */
5997     template<typename T, class Alloc>
setArg(cl_uint index,const cl::vector<T,Alloc> & argPtr)5998     cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
5999     {
6000         return detail::errHandler(
6001             ::clSetKernelArgSVMPointer(object_, index, argPtr.data()),
6002             __SET_KERNEL_ARGS_ERR);
6003     }
6004 
6005     /*! \brief setArg overload taking a pointer type
6006      */
6007     template<typename T>
6008     typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
setArg(cl_uint index,const T argPtr)6009         setArg(cl_uint index, const T argPtr)
6010     {
6011         return detail::errHandler(
6012             ::clSetKernelArgSVMPointer(object_, index, argPtr),
6013             __SET_KERNEL_ARGS_ERR);
6014     }
6015 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6016 
6017     /*! \brief setArg overload taking a POD type
6018      */
6019     template <typename T>
6020     typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
setArg(cl_uint index,const T & value)6021         setArg(cl_uint index, const T &value)
6022     {
6023         return detail::errHandler(
6024             ::clSetKernelArg(
6025                 object_,
6026                 index,
6027                 detail::KernelArgumentHandler<T>::size(value),
6028                 detail::KernelArgumentHandler<T>::ptr(value)),
6029             __SET_KERNEL_ARGS_ERR);
6030     }
6031 
setArg(cl_uint index,size_type size,const void * argPtr)6032     cl_int setArg(cl_uint index, size_type size, const void* argPtr)
6033     {
6034         return detail::errHandler(
6035             ::clSetKernelArg(object_, index, size, argPtr),
6036             __SET_KERNEL_ARGS_ERR);
6037     }
6038 
6039 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6040     /*!
6041      * Specify a vector of SVM pointers that the kernel may access in
6042      * addition to its arguments.
6043      */
setSVMPointers(const vector<void * > & pointerList)6044     cl_int setSVMPointers(const vector<void*> &pointerList)
6045     {
6046         return detail::errHandler(
6047             ::clSetKernelExecInfo(
6048                 object_,
6049                 CL_KERNEL_EXEC_INFO_SVM_PTRS,
6050                 sizeof(void*)*pointerList.size(),
6051                 pointerList.data()));
6052     }
6053 
6054     /*!
6055      * Specify a std::array of SVM pointers that the kernel may access in
6056      * addition to its arguments.
6057      */
6058     template<int ArrayLength>
setSVMPointers(const std::array<void *,ArrayLength> & pointerList)6059     cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
6060     {
6061         return detail::errHandler(
6062             ::clSetKernelExecInfo(
6063                 object_,
6064                 CL_KERNEL_EXEC_INFO_SVM_PTRS,
6065                 sizeof(void*)*pointerList.size(),
6066                 pointerList.data()));
6067     }
6068 
6069     /*! \brief Enable fine-grained system SVM.
6070      *
6071      * \note It is only possible to enable fine-grained system SVM if all devices
6072      *       in the context associated with kernel support it.
6073      *
6074      * \param svmEnabled True if fine-grained system SVM is requested. False otherwise.
6075      * \return CL_SUCCESS if the function was executed succesfully. CL_INVALID_OPERATION
6076      *         if no devices in the context support fine-grained system SVM.
6077      *
6078      * \see clSetKernelExecInfo
6079      */
enableFineGrainedSystemSVM(bool svmEnabled)6080     cl_int enableFineGrainedSystemSVM(bool svmEnabled)
6081     {
6082         cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
6083         return detail::errHandler(
6084             ::clSetKernelExecInfo(
6085                 object_,
6086                 CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
6087                 sizeof(cl_bool),
6088                 &svmEnabled_
6089                 )
6090             );
6091     }
6092 
6093     template<int index, int ArrayLength, class D, typename T0, typename T1, typename... Ts>
setSVMPointersHelper(std::array<void *,ArrayLength> & pointerList,const pointer<T0,D> & t0,const pointer<T1,D> & t1,Ts &...ts)6094     void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, const pointer<T1, D> &t1, Ts & ... ts)
6095     {
6096         pointerList[index] = static_cast<void*>(t0.get());
6097         setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
6098     }
6099 
6100     template<int index, int ArrayLength, typename T0, typename T1, typename... Ts>
6101     typename std::enable_if<std::is_pointer<T0>::value, void>::type
setSVMPointersHelper(std::array<void *,ArrayLength> & pointerList,T0 t0,T1 t1,Ts...ts)6102     setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, T1 t1, Ts... ts)
6103     {
6104         pointerList[index] = static_cast<void*>(t0);
6105         setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
6106     }
6107 
6108     template<int index, int ArrayLength, typename T0, class D>
setSVMPointersHelper(std::array<void *,ArrayLength> & pointerList,const pointer<T0,D> & t0)6109     void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
6110     {
6111         pointerList[index] = static_cast<void*>(t0.get());
6112     }
6113 
6114 
6115     template<int index, int ArrayLength, typename T0>
6116     typename std::enable_if<std::is_pointer<T0>::value, void>::type
setSVMPointersHelper(std::array<void *,ArrayLength> & pointerList,T0 t0)6117     setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
6118     {
6119         pointerList[index] = static_cast<void*>(t0);
6120     }
6121 
6122     template<typename T0, typename... Ts>
setSVMPointers(const T0 & t0,Ts &...ts)6123     cl_int setSVMPointers(const T0 &t0, Ts & ... ts)
6124     {
6125         std::array<void*, 1 + sizeof...(Ts)> pointerList;
6126 
6127         setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
6128         return detail::errHandler(
6129             ::clSetKernelExecInfo(
6130             object_,
6131             CL_KERNEL_EXEC_INFO_SVM_PTRS,
6132             sizeof(void*)*(1 + sizeof...(Ts)),
6133             pointerList.data()));
6134     }
6135 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6136 
6137 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6138     /**
6139      * Make a deep copy of the kernel object including its arguments.
6140      * @return A new kernel object with internal state entirely separate from that
6141      *         of the original but with any arguments set on the original intact.
6142      */
clone()6143     Kernel clone()
6144     {
6145         cl_int error;
6146         Kernel retValue(clCloneKernel(this->get(), &error));
6147 
6148         detail::errHandler(error, __CLONE_KERNEL_ERR);
6149         return retValue;
6150     }
6151 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6152 };
6153 
6154 /*! \class Program
6155  * \brief Program interface that implements cl_program.
6156  */
6157 class Program : public detail::Wrapper<cl_program>
6158 {
6159 public:
6160 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6161     typedef vector<vector<unsigned char>> Binaries;
6162     typedef vector<string> Sources;
6163 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6164     typedef vector<std::pair<const void*, size_type> > Binaries;
6165     typedef vector<std::pair<const char*, size_type> > Sources;
6166 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6167 
Program(const string & source,bool build=false,cl_int * err=NULL)6168     Program(
6169         const string& source,
6170         bool build = false,
6171         cl_int* err = NULL)
6172     {
6173         cl_int error;
6174 
6175         const char * strings = source.c_str();
6176         const size_type length  = source.size();
6177 
6178         Context context = Context::getDefault(err);
6179 
6180         object_ = ::clCreateProgramWithSource(
6181             context(), (cl_uint)1, &strings, &length, &error);
6182 
6183         detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6184 
6185         if (error == CL_SUCCESS && build) {
6186 
6187             error = ::clBuildProgram(
6188                 object_,
6189                 0,
6190                 NULL,
6191 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6192                 "-cl-std=CL2.0",
6193 #else
6194                 "",
6195 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6196                 NULL,
6197                 NULL);
6198 
6199             detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6200         }
6201 
6202         if (err != NULL) {
6203             *err = error;
6204         }
6205     }
6206 
Program(const Context & context,const string & source,bool build=false,cl_int * err=NULL)6207     Program(
6208         const Context& context,
6209         const string& source,
6210         bool build = false,
6211         cl_int* err = NULL)
6212     {
6213         cl_int error;
6214 
6215         const char * strings = source.c_str();
6216         const size_type length  = source.size();
6217 
6218         object_ = ::clCreateProgramWithSource(
6219             context(), (cl_uint)1, &strings, &length, &error);
6220 
6221         detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6222 
6223         if (error == CL_SUCCESS && build) {
6224             error = ::clBuildProgram(
6225                 object_,
6226                 0,
6227                 NULL,
6228 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6229                 "-cl-std=CL2.0",
6230 #else
6231                 "",
6232 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6233                 NULL,
6234                 NULL);
6235 
6236             detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6237         }
6238 
6239         if (err != NULL) {
6240             *err = error;
6241         }
6242     }
6243 
6244     /**
6245      * Create a program from a vector of source strings and the default context.
6246      * Does not compile or link the program.
6247      */
Program(const Sources & sources,cl_int * err=NULL)6248     Program(
6249         const Sources& sources,
6250         cl_int* err = NULL)
6251     {
6252         cl_int error;
6253         Context context = Context::getDefault(err);
6254 
6255         const size_type n = (size_type)sources.size();
6256 
6257         vector<size_type> lengths(n);
6258         vector<const char*> strings(n);
6259 
6260         for (size_type i = 0; i < n; ++i) {
6261 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6262             strings[i] = sources[(int)i].data();
6263             lengths[i] = sources[(int)i].length();
6264 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6265             strings[i] = sources[(int)i].first;
6266             lengths[i] = sources[(int)i].second;
6267 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6268         }
6269 
6270         object_ = ::clCreateProgramWithSource(
6271             context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6272 
6273         detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6274         if (err != NULL) {
6275             *err = error;
6276         }
6277     }
6278 
6279     /**
6280      * Create a program from a vector of source strings and a provided context.
6281      * Does not compile or link the program.
6282      */
Program(const Context & context,const Sources & sources,cl_int * err=NULL)6283     Program(
6284         const Context& context,
6285         const Sources& sources,
6286         cl_int* err = NULL)
6287     {
6288         cl_int error;
6289 
6290         const size_type n = (size_type)sources.size();
6291 
6292         vector<size_type> lengths(n);
6293         vector<const char*> strings(n);
6294 
6295         for (size_type i = 0; i < n; ++i) {
6296 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6297             strings[i] = sources[(int)i].data();
6298             lengths[i] = sources[(int)i].length();
6299 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6300             strings[i] = sources[(int)i].first;
6301             lengths[i] = sources[(int)i].second;
6302 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6303         }
6304 
6305         object_ = ::clCreateProgramWithSource(
6306             context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6307 
6308         detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6309         if (err != NULL) {
6310             *err = error;
6311         }
6312     }
6313 
6314 
6315 #if CL_HPP_TARGET_OPENCL_VERSION >= 210 || (CL_HPP_TARGET_OPENCL_VERSION==200 && defined(CL_HPP_USE_IL_KHR))
6316     /**
6317      * Program constructor to allow construction of program from SPIR-V or another IL.
6318      * Valid for either OpenCL >= 2.1 or when CL_HPP_USE_IL_KHR is defined.
6319      */
Program(const vector<char> & IL,bool build=false,cl_int * err=NULL)6320     Program(
6321         const vector<char>& IL,
6322         bool build = false,
6323         cl_int* err = NULL)
6324     {
6325         cl_int error;
6326 
6327         Context context = Context::getDefault(err);
6328 
6329 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6330 
6331         object_ = ::clCreateProgramWithIL(
6332             context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6333 
6334 #else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6335 
6336         typedef clCreateProgramWithILKHR_fn PFN_clCreateProgramWithILKHR;
6337         static PFN_clCreateProgramWithILKHR pfn_clCreateProgramWithILKHR = NULL;
6338         CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateProgramWithILKHR);
6339 
6340         return detail::errHandler(
6341             pfn_clCreateProgramWithILKHR(
6342                 context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6343 
6344 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6345 
6346         detail::errHandler(error, __CREATE_PROGRAM_WITH_IL_ERR);
6347 
6348         if (error == CL_SUCCESS && build) {
6349 
6350             error = ::clBuildProgram(
6351                 object_,
6352                 0,
6353                 NULL,
6354 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6355                 "-cl-std=CL2.0",
6356 #else
6357                 "",
6358 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6359                 NULL,
6360                 NULL);
6361 
6362             detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6363         }
6364 
6365         if (err != NULL) {
6366             *err = error;
6367         }
6368     }
6369 
6370     /**
6371      * Program constructor to allow construction of program from SPIR-V or another IL
6372      * for a specific context.
6373      * Valid for either OpenCL >= 2.1 or when CL_HPP_USE_IL_KHR is defined.
6374      */
6375     Program(
6376         const Context& context,
6377         const vector<char>& IL,
6378         bool build = false,
6379         cl_int* err = NULL)
6380     {
6381         cl_int error;
6382 
6383 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6384 
6385         object_ = ::clCreateProgramWithIL(
6386             context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6387 
6388 #else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6389 
6390         typedef clCreateProgramWithILKHR_fn PFN_clCreateProgramWithILKHR;
6391         static PFN_clCreateProgramWithILKHR pfn_clCreateProgramWithILKHR = NULL;
6392         CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateProgramWithILKHR);
6393 
6394         return detail::errHandler(
6395             pfn_clCreateProgramWithILKHR(
6396             context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6397 
6398 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6399 
6400         detail::errHandler(error, __CREATE_PROGRAM_WITH_IL_ERR);
6401 
6402         if (error == CL_SUCCESS && build) {
6403             error = ::clBuildProgram(
6404                 object_,
6405                 0,
6406                 NULL,
6407 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6408                 "-cl-std=CL2.0",
6409 #else
6410                 "",
6411 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6412                 NULL,
6413                 NULL);
6414 
6415             detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6416         }
6417 
6418         if (err != NULL) {
6419             *err = error;
6420         }
6421     }
6422 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6423 
6424     /**
6425      * Construct a program object from a list of devices and a per-device list of binaries.
6426      * \param context A valid OpenCL context in which to construct the program.
6427      * \param devices A vector of OpenCL device objects for which the program will be created.
6428      * \param binaries A vector of pairs of a pointer to a binary object and its length.
6429      * \param binaryStatus An optional vector that on completion will be resized to
6430      *   match the size of binaries and filled with values to specify if each binary
6431      *   was successfully loaded.
6432      *   Set to CL_SUCCESS if the binary was successfully loaded.
6433      *   Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL.
6434      *   Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device.
6435      * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors:
6436      *   CL_INVALID_CONTEXT if context is not a valid context.
6437      *   CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices;
6438      *     or if any entry in binaries is NULL or has length 0.
6439      *   CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
6440      *   CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
6441      *   CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
6442      */
6443     Program(
6444         const Context& context,
6445         const vector<Device>& devices,
6446         const Binaries& binaries,
6447         vector<cl_int>* binaryStatus = NULL,
6448         cl_int* err = NULL)
6449     {
6450         cl_int error;
6451 
6452         const size_type numDevices = devices.size();
6453 
6454         // Catch size mismatch early and return
6455         if(binaries.size() != numDevices) {
6456             error = CL_INVALID_VALUE;
6457             detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6458             if (err != NULL) {
6459                 *err = error;
6460             }
6461             return;
6462         }
6463 
6464 
6465         vector<size_type> lengths(numDevices);
6466         vector<const unsigned char*> images(numDevices);
6467 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6468         for (size_type i = 0; i < numDevices; ++i) {
6469             images[i] = binaries[i].data();
6470             lengths[i] = binaries[(int)i].size();
6471         }
6472 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6473         for (size_type i = 0; i < numDevices; ++i) {
6474             images[i] = (const unsigned char*)binaries[i].first;
6475             lengths[i] = binaries[(int)i].second;
6476         }
6477 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6478 
6479         vector<cl_device_id> deviceIDs(numDevices);
6480         for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6481             deviceIDs[deviceIndex] = (devices[deviceIndex])();
6482         }
6483 
6484         if(binaryStatus) {
6485             binaryStatus->resize(numDevices);
6486         }
6487 
6488         object_ = ::clCreateProgramWithBinary(
6489             context(), (cl_uint) devices.size(),
6490             deviceIDs.data(),
6491             lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0)
6492                ? &binaryStatus->front()
6493                : NULL, &error);
6494 
6495         detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6496         if (err != NULL) {
6497             *err = error;
6498         }
6499     }
6500 
6501 
6502 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6503     /**
6504      * Create program using builtin kernels.
6505      * \param kernelNames Semi-colon separated list of builtin kernel names
6506      */
6507     Program(
6508         const Context& context,
6509         const vector<Device>& devices,
6510         const string& kernelNames,
6511         cl_int* err = NULL)
6512     {
6513         cl_int error;
6514 
6515 
6516         size_type numDevices = devices.size();
6517         vector<cl_device_id> deviceIDs(numDevices);
6518         for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6519             deviceIDs[deviceIndex] = (devices[deviceIndex])();
6520         }
6521 
6522         object_ = ::clCreateProgramWithBuiltInKernels(
6523             context(),
6524             (cl_uint) devices.size(),
6525             deviceIDs.data(),
6526             kernelNames.c_str(),
6527             &error);
6528 
6529         detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
6530         if (err != NULL) {
6531             *err = error;
6532         }
6533     }
6534 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6535 
6536     Program() { }
6537 
6538 
6539     /*! \brief Constructor from cl_mem - takes ownership.
6540      *
6541      * \param retainObject will cause the constructor to retain its cl object.
6542      *                     Defaults to false to maintain compatibility with
6543      *                     earlier versions.
6544      */
6545     explicit Program(const cl_program& program, bool retainObject = false) :
6546         detail::Wrapper<cl_type>(program, retainObject) { }
6547 
6548     Program& operator = (const cl_program& rhs)
6549     {
6550         detail::Wrapper<cl_type>::operator=(rhs);
6551         return *this;
6552     }
6553 
6554     /*! \brief Copy constructor to forward copy to the superclass correctly.
6555      * Required for MSVC.
6556      */
6557     Program(const Program& program) : detail::Wrapper<cl_type>(program) {}
6558 
6559     /*! \brief Copy assignment to forward copy to the superclass correctly.
6560      * Required for MSVC.
6561      */
6562     Program& operator = (const Program &program)
6563     {
6564         detail::Wrapper<cl_type>::operator=(program);
6565         return *this;
6566     }
6567 
6568     /*! \brief Move constructor to forward move to the superclass correctly.
6569      * Required for MSVC.
6570      */
6571     Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(program)) {}
6572 
6573     /*! \brief Move assignment to forward move to the superclass correctly.
6574      * Required for MSVC.
6575      */
6576     Program& operator = (Program &&program)
6577     {
6578         detail::Wrapper<cl_type>::operator=(std::move(program));
6579         return *this;
6580     }
6581 
6582     cl_int build(
6583         const vector<Device>& devices,
6584         const char* options = NULL,
6585         void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6586         void* data = NULL) const
6587     {
6588         size_type numDevices = devices.size();
6589         vector<cl_device_id> deviceIDs(numDevices);
6590 
6591         for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6592             deviceIDs[deviceIndex] = (devices[deviceIndex])();
6593         }
6594 
6595         cl_int buildError = ::clBuildProgram(
6596             object_,
6597             (cl_uint)
6598             devices.size(),
6599             deviceIDs.data(),
6600             options,
6601             notifyFptr,
6602             data);
6603 
6604         return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6605     }
6606 
6607     cl_int build(
6608         const char* options = NULL,
6609         void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6610         void* data = NULL) const
6611     {
6612         cl_int buildError = ::clBuildProgram(
6613             object_,
6614             0,
6615             NULL,
6616             options,
6617             notifyFptr,
6618             data);
6619 
6620 
6621         return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6622     }
6623 
6624 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6625     cl_int compile(
6626         const char* options = NULL,
6627         void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6628         void* data = NULL) const
6629     {
6630         cl_int error = ::clCompileProgram(
6631             object_,
6632             0,
6633             NULL,
6634             options,
6635             0,
6636             NULL,
6637             NULL,
6638             notifyFptr,
6639             data);
6640         return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6641     }
6642 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6643 
6644     template <typename T>
6645     cl_int getInfo(cl_program_info name, T* param) const
6646     {
6647         return detail::errHandler(
6648             detail::getInfo(&::clGetProgramInfo, object_, name, param),
6649             __GET_PROGRAM_INFO_ERR);
6650     }
6651 
6652     template <cl_int name> typename
6653     detail::param_traits<detail::cl_program_info, name>::param_type
6654     getInfo(cl_int* err = NULL) const
6655     {
6656         typename detail::param_traits<
6657             detail::cl_program_info, name>::param_type param;
6658         cl_int result = getInfo(name, &param);
6659         if (err != NULL) {
6660             *err = result;
6661         }
6662         return param;
6663     }
6664 
6665     template <typename T>
6666     cl_int getBuildInfo(
6667         const Device& device, cl_program_build_info name, T* param) const
6668     {
6669         return detail::errHandler(
6670             detail::getInfo(
6671                 &::clGetProgramBuildInfo, object_, device(), name, param),
6672                 __GET_PROGRAM_BUILD_INFO_ERR);
6673     }
6674 
6675     template <cl_int name> typename
6676     detail::param_traits<detail::cl_program_build_info, name>::param_type
6677     getBuildInfo(const Device& device, cl_int* err = NULL) const
6678     {
6679         typename detail::param_traits<
6680             detail::cl_program_build_info, name>::param_type param;
6681         cl_int result = getBuildInfo(device, name, &param);
6682         if (err != NULL) {
6683             *err = result;
6684         }
6685         return param;
6686     }
6687 
6688     /**
6689      * Build info function that returns a vector of device/info pairs for the specified
6690      * info type and for all devices in the program.
6691      * On an error reading the info for any device, an empty vector of info will be returned.
6692      */
6693     template <cl_int name>
6694     vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6695         getBuildInfo(cl_int *err = NULL) const
6696     {
6697         cl_int result = CL_SUCCESS;
6698 
6699         auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
6700         vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6701             devInfo;
6702 
6703         // If there was an initial error from getInfo return the error
6704         if (result != CL_SUCCESS) {
6705             if (err != NULL) {
6706                 *err = result;
6707             }
6708             return devInfo;
6709         }
6710 
6711         for (const cl::Device &d : devs) {
6712             typename detail::param_traits<
6713                 detail::cl_program_build_info, name>::param_type param;
6714             result = getBuildInfo(d, name, &param);
6715             devInfo.push_back(
6716                 std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>
6717                 (d, param));
6718             if (result != CL_SUCCESS) {
6719                 // On error, leave the loop and return the error code
6720                 break;
6721             }
6722         }
6723         if (err != NULL) {
6724             *err = result;
6725         }
6726         if (result != CL_SUCCESS) {
6727             devInfo.clear();
6728         }
6729         return devInfo;
6730     }
6731 
6732     cl_int createKernels(vector<Kernel>* kernels)
6733     {
6734         cl_uint numKernels;
6735         cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
6736         if (err != CL_SUCCESS) {
6737             return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6738         }
6739 
6740         vector<cl_kernel> value(numKernels);
6741 
6742         err = ::clCreateKernelsInProgram(
6743             object_, numKernels, value.data(), NULL);
6744         if (err != CL_SUCCESS) {
6745             return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6746         }
6747 
6748         if (kernels) {
6749             kernels->resize(value.size());
6750 
6751             // Assign to param, constructing with retain behaviour
6752             // to correctly capture each underlying CL object
6753             for (size_type i = 0; i < value.size(); i++) {
6754                 // We do not need to retain because this kernel is being created
6755                 // by the runtime
6756                 (*kernels)[i] = Kernel(value[i], false);
6757             }
6758         }
6759         return CL_SUCCESS;
6760     }
6761 
6762 #if CL_HPP_TARGET_OPENCL_VERSION >= 220
6763     /*! \brief Registers a callback function to be called when destructors for
6764      *         program scope global variables are complete and before the
6765      *         program is released.
6766      *
6767      *  Wraps clSetProgramReleaseCallback().
6768      *
6769      *  Each call to this function registers the specified user callback function
6770      *  on a callback stack associated with program. The registered user callback
6771      *  functions are called in the reverse order in which they were registered.
6772      */
6773     cl_int setReleaseCallback(
6774         void (CL_CALLBACK * pfn_notify)(cl_program program, void * user_data),
6775         void * user_data = NULL)
6776     {
6777         return detail::errHandler(
6778             ::clSetProgramReleaseCallback(
6779                 object_,
6780                 pfn_notify,
6781                 user_data),
6782             __SET_PROGRAM_RELEASE_CALLBACK_ERR);
6783     }
6784 
6785     /*! \brief Sets a SPIR-V specialization constant.
6786      *
6787      *  Wraps clSetProgramSpecializationConstant().
6788      */
6789     template <typename T>
6790     typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
6791         setSpecializationConstant(cl_uint index, const T &value)
6792     {
6793         return detail::errHandler(
6794             ::clSetProgramSpecializationConstant(
6795                 object_,
6796                 index,
6797                 sizeof(value),
6798                 &value),
6799             __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
6800     }
6801 
6802     /*! \brief Sets a SPIR-V specialization constant.
6803      *
6804      *  Wraps clSetProgramSpecializationConstant().
6805      */
6806     cl_int setSpecializationConstant(cl_uint index, size_type size, const void* value)
6807     {
6808         return detail::errHandler(
6809             ::clSetProgramSpecializationConstant(
6810                 object_,
6811                 index,
6812                 size,
6813                 value),
6814             __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
6815     }
6816 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
6817 };
6818 
6819 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6820 inline Program linkProgram(
6821     Program input1,
6822     Program input2,
6823     const char* options = NULL,
6824     void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6825     void* data = NULL,
6826     cl_int* err = NULL)
6827 {
6828     cl_int error_local = CL_SUCCESS;
6829 
6830     cl_program programs[2] = { input1(), input2() };
6831 
6832     Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6833     if(error_local!=CL_SUCCESS) {
6834         detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6835     }
6836 
6837     cl_program prog = ::clLinkProgram(
6838         ctx(),
6839         0,
6840         NULL,
6841         options,
6842         2,
6843         programs,
6844         notifyFptr,
6845         data,
6846         &error_local);
6847 
6848     detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6849     if (err != NULL) {
6850         *err = error_local;
6851     }
6852 
6853     return Program(prog);
6854 }
6855 
6856 inline Program linkProgram(
6857     vector<Program> inputPrograms,
6858     const char* options = NULL,
6859     void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6860     void* data = NULL,
6861     cl_int* err = NULL)
6862 {
6863     cl_int error_local = CL_SUCCESS;
6864 
6865     vector<cl_program> programs(inputPrograms.size());
6866 
6867     for (unsigned int i = 0; i < inputPrograms.size(); i++) {
6868         programs[i] = inputPrograms[i]();
6869     }
6870 
6871     Context ctx;
6872     if(inputPrograms.size() > 0) {
6873         ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6874         if(error_local!=CL_SUCCESS) {
6875             detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6876         }
6877     }
6878     cl_program prog = ::clLinkProgram(
6879         ctx(),
6880         0,
6881         NULL,
6882         options,
6883         (cl_uint)inputPrograms.size(),
6884         programs.data(),
6885         notifyFptr,
6886         data,
6887         &error_local);
6888 
6889     detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6890     if (err != NULL) {
6891         *err = error_local;
6892     }
6893 
6894     return Program(prog, false);
6895 }
6896 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6897 
6898 // Template specialization for CL_PROGRAM_BINARIES
6899 template <>
6900 inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
6901 {
6902     if (name != CL_PROGRAM_BINARIES) {
6903         return CL_INVALID_VALUE;
6904     }
6905     if (param) {
6906         // Resize the parameter array appropriately for each allocation
6907         // and pass down to the helper
6908 
6909         vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
6910         size_type numBinaries = sizes.size();
6911 
6912         // Resize the parameter array and constituent arrays
6913         param->resize(numBinaries);
6914         for (size_type i = 0; i < numBinaries; ++i) {
6915             (*param)[i].resize(sizes[i]);
6916         }
6917 
6918         return detail::errHandler(
6919             detail::getInfo(&::clGetProgramInfo, object_, name, param),
6920             __GET_PROGRAM_INFO_ERR);
6921     }
6922 
6923     return CL_SUCCESS;
6924 }
6925 
6926 template<>
6927 inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
6928 {
6929     vector<vector<unsigned char>> binariesVectors;
6930 
6931     cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
6932     if (err != NULL) {
6933         *err = result;
6934     }
6935     return binariesVectors;
6936 }
6937 
6938 #if CL_HPP_TARGET_OPENCL_VERSION >= 220
6939 // Template specialization for clSetProgramSpecializationConstant
6940 template <>
6941 inline cl_int cl::Program::setSpecializationConstant(cl_uint index, const bool &value)
6942 {
6943     cl_uchar ucValue = value ? CL_UCHAR_MAX : 0;
6944     return detail::errHandler(
6945         ::clSetProgramSpecializationConstant(
6946             object_,
6947             index,
6948             sizeof(ucValue),
6949             &ucValue),
6950         __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
6951 }
6952 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
6953 
6954 inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
6955 {
6956     cl_int error;
6957 
6958     object_ = ::clCreateKernel(program(), name, &error);
6959     detail::errHandler(error, __CREATE_KERNEL_ERR);
6960 
6961     if (err != NULL) {
6962         *err = error;
6963     }
6964 
6965 }
6966 
6967 enum class QueueProperties : cl_command_queue_properties
6968 {
6969     None = 0,
6970     Profiling = CL_QUEUE_PROFILING_ENABLE,
6971     OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
6972 };
6973 
6974 inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
6975 {
6976     return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
6977 }
6978 
6979 /*! \class CommandQueue
6980  * \brief CommandQueue interface for cl_command_queue.
6981  */
6982 class CommandQueue : public detail::Wrapper<cl_command_queue>
6983 {
6984 private:
6985     static std::once_flag default_initialized_;
6986     static CommandQueue default_;
6987     static cl_int default_error_;
6988 
6989     /*! \brief Create the default command queue returned by @ref getDefault.
6990      *
6991      * It sets default_error_ to indicate success or failure. It does not throw
6992      * @c cl::Error.
6993      */
6994     static void makeDefault()
6995     {
6996         /* We don't want to throw an error from this function, so we have to
6997          * catch and set the error flag.
6998          */
6999 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
7000         try
7001 #endif
7002         {
7003             int error;
7004             Context context = Context::getDefault(&error);
7005 
7006             if (error != CL_SUCCESS) {
7007                 default_error_ = error;
7008             }
7009             else {
7010                 Device device = Device::getDefault();
7011                 default_ = CommandQueue(context, device, 0, &default_error_);
7012             }
7013         }
7014 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
7015         catch (cl::Error &e) {
7016             default_error_ = e.err();
7017         }
7018 #endif
7019     }
7020 
7021     /*! \brief Create the default command queue.
7022      *
7023      * This sets @c default_. It does not throw
7024      * @c cl::Error.
7025      */
7026     static void makeDefaultProvided(const CommandQueue &c) {
7027         default_ = c;
7028     }
7029 
7030 public:
7031 #ifdef CL_HPP_UNIT_TEST_ENABLE
7032     /*! \brief Reset the default.
7033     *
7034     * This sets @c default_ to an empty value to support cleanup in
7035     * the unit test framework.
7036     * This function is not thread safe.
7037     */
7038     static void unitTestClearDefault() {
7039         default_ = CommandQueue();
7040     }
7041 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
7042 
7043 
7044     /*!
7045      * \brief Constructs a CommandQueue based on passed properties.
7046      * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7047      */
7048    CommandQueue(
7049         cl_command_queue_properties properties,
7050         cl_int* err = NULL)
7051     {
7052         cl_int error;
7053 
7054         Context context = Context::getDefault(&error);
7055         detail::errHandler(error, __CREATE_CONTEXT_ERR);
7056 
7057         if (error != CL_SUCCESS) {
7058             if (err != NULL) {
7059                 *err = error;
7060             }
7061         }
7062         else {
7063             Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
7064             bool useWithProperties;
7065 
7066 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7067             // Run-time decision based on the actual platform
7068             {
7069                 cl_uint version = detail::getContextPlatformVersion(context());
7070                 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7071             }
7072 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7073             useWithProperties = true;
7074 #else
7075             useWithProperties = false;
7076 #endif
7077 
7078 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7079             if (useWithProperties) {
7080                 cl_queue_properties queue_properties[] = {
7081                     CL_QUEUE_PROPERTIES, properties, 0 };
7082                 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
7083                     object_ = ::clCreateCommandQueueWithProperties(
7084                         context(), device(), queue_properties, &error);
7085                 }
7086                 else {
7087                     error = CL_INVALID_QUEUE_PROPERTIES;
7088                 }
7089 
7090                 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7091                 if (err != NULL) {
7092                     *err = error;
7093                 }
7094             }
7095 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7096 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7097             if (!useWithProperties) {
7098                 object_ = ::clCreateCommandQueue(
7099                     context(), device(), properties, &error);
7100 
7101                 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7102                 if (err != NULL) {
7103                     *err = error;
7104                 }
7105             }
7106 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7107         }
7108     }
7109 
7110    /*!
7111     * \brief Constructs a CommandQueue based on passed properties.
7112     * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7113     */
7114    CommandQueue(
7115        QueueProperties properties,
7116        cl_int* err = NULL)
7117    {
7118        cl_int error;
7119 
7120        Context context = Context::getDefault(&error);
7121        detail::errHandler(error, __CREATE_CONTEXT_ERR);
7122 
7123        if (error != CL_SUCCESS) {
7124            if (err != NULL) {
7125                *err = error;
7126            }
7127        }
7128        else {
7129            Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
7130            bool useWithProperties;
7131 
7132 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7133            // Run-time decision based on the actual platform
7134            {
7135                cl_uint version = detail::getContextPlatformVersion(context());
7136                useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7137            }
7138 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7139            useWithProperties = true;
7140 #else
7141            useWithProperties = false;
7142 #endif
7143 
7144 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7145            if (useWithProperties) {
7146                cl_queue_properties queue_properties[] = {
7147                    CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7148 
7149                object_ = ::clCreateCommandQueueWithProperties(
7150                    context(), device(), queue_properties, &error);
7151 
7152                detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7153                if (err != NULL) {
7154                    *err = error;
7155                }
7156            }
7157 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7158 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7159            if (!useWithProperties) {
7160                object_ = ::clCreateCommandQueue(
7161                    context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7162 
7163                detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7164                if (err != NULL) {
7165                    *err = error;
7166                }
7167            }
7168 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7169 
7170        }
7171    }
7172 
7173     /*!
7174      * \brief Constructs a CommandQueue for an implementation defined device in the given context
7175      * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7176      */
7177     explicit CommandQueue(
7178         const Context& context,
7179         cl_command_queue_properties properties = 0,
7180         cl_int* err = NULL)
7181     {
7182         cl_int error;
7183         bool useWithProperties;
7184         vector<cl::Device> devices;
7185         error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
7186 
7187         detail::errHandler(error, __CREATE_CONTEXT_ERR);
7188 
7189         if (error != CL_SUCCESS)
7190         {
7191             if (err != NULL) {
7192                 *err = error;
7193             }
7194             return;
7195         }
7196 
7197 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7198         // Run-time decision based on the actual platform
7199         {
7200             cl_uint version = detail::getContextPlatformVersion(context());
7201             useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7202         }
7203 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7204         useWithProperties = true;
7205 #else
7206         useWithProperties = false;
7207 #endif
7208 
7209 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7210         if (useWithProperties) {
7211             cl_queue_properties queue_properties[] = {
7212                 CL_QUEUE_PROPERTIES, properties, 0 };
7213             if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
7214                 object_ = ::clCreateCommandQueueWithProperties(
7215                     context(), devices[0](), queue_properties, &error);
7216             }
7217             else {
7218                 error = CL_INVALID_QUEUE_PROPERTIES;
7219             }
7220 
7221             detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7222             if (err != NULL) {
7223                 *err = error;
7224             }
7225         }
7226 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7227 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7228         if (!useWithProperties) {
7229             object_ = ::clCreateCommandQueue(
7230                 context(), devices[0](), properties, &error);
7231 
7232             detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7233             if (err != NULL) {
7234                 *err = error;
7235             }
7236         }
7237 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7238     }
7239 
7240     /*!
7241     * \brief Constructs a CommandQueue for an implementation defined device in the given context
7242     * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7243     */
7244     explicit CommandQueue(
7245         const Context& context,
7246         QueueProperties properties,
7247         cl_int* err = NULL)
7248     {
7249         cl_int error;
7250         bool useWithProperties;
7251         vector<cl::Device> devices;
7252         error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
7253 
7254         detail::errHandler(error, __CREATE_CONTEXT_ERR);
7255 
7256         if (error != CL_SUCCESS)
7257         {
7258             if (err != NULL) {
7259                 *err = error;
7260             }
7261             return;
7262         }
7263 
7264 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7265         // Run-time decision based on the actual platform
7266         {
7267             cl_uint version = detail::getContextPlatformVersion(context());
7268             useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7269         }
7270 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7271         useWithProperties = true;
7272 #else
7273         useWithProperties = false;
7274 #endif
7275 
7276 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7277         if (useWithProperties) {
7278             cl_queue_properties queue_properties[] = {
7279                 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7280             object_ = ::clCreateCommandQueueWithProperties(
7281                 context(), devices[0](), queue_properties, &error);
7282 
7283             detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7284             if (err != NULL) {
7285                 *err = error;
7286             }
7287         }
7288 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7289 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7290         if (!useWithProperties) {
7291             object_ = ::clCreateCommandQueue(
7292                 context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
7293 
7294             detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7295             if (err != NULL) {
7296                 *err = error;
7297             }
7298         }
7299 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7300     }
7301 
7302     /*!
7303      * \brief Constructs a CommandQueue for a passed device and context
7304      * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7305      */
7306     CommandQueue(
7307         const Context& context,
7308         const Device& device,
7309         cl_command_queue_properties properties = 0,
7310         cl_int* err = NULL)
7311     {
7312         cl_int error;
7313         bool useWithProperties;
7314 
7315 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7316         // Run-time decision based on the actual platform
7317         {
7318             cl_uint version = detail::getContextPlatformVersion(context());
7319             useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7320         }
7321 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7322         useWithProperties = true;
7323 #else
7324         useWithProperties = false;
7325 #endif
7326 
7327 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7328         if (useWithProperties) {
7329             cl_queue_properties queue_properties[] = {
7330                 CL_QUEUE_PROPERTIES, properties, 0 };
7331             object_ = ::clCreateCommandQueueWithProperties(
7332                 context(), device(), queue_properties, &error);
7333 
7334             detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7335             if (err != NULL) {
7336                 *err = error;
7337             }
7338         }
7339 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7340 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7341         if (!useWithProperties) {
7342             object_ = ::clCreateCommandQueue(
7343                 context(), device(), properties, &error);
7344 
7345             detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7346             if (err != NULL) {
7347                 *err = error;
7348             }
7349         }
7350 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7351     }
7352 
7353     /*!
7354      * \brief Constructs a CommandQueue for a passed device and context
7355      * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7356      */
7357     CommandQueue(
7358         const Context& context,
7359         const Device& device,
7360         QueueProperties properties,
7361         cl_int* err = NULL)
7362     {
7363         cl_int error;
7364         bool useWithProperties;
7365 
7366 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7367         // Run-time decision based on the actual platform
7368         {
7369             cl_uint version = detail::getContextPlatformVersion(context());
7370             useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7371         }
7372 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7373         useWithProperties = true;
7374 #else
7375         useWithProperties = false;
7376 #endif
7377 
7378 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7379         if (useWithProperties) {
7380             cl_queue_properties queue_properties[] = {
7381                 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7382             object_ = ::clCreateCommandQueueWithProperties(
7383                 context(), device(), queue_properties, &error);
7384 
7385             detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7386             if (err != NULL) {
7387                 *err = error;
7388             }
7389         }
7390 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7391 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7392         if (!useWithProperties) {
7393             object_ = ::clCreateCommandQueue(
7394                 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7395 
7396             detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7397             if (err != NULL) {
7398                 *err = error;
7399             }
7400         }
7401 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7402     }
7403 
7404     static CommandQueue getDefault(cl_int * err = NULL)
7405     {
7406         std::call_once(default_initialized_, makeDefault);
7407 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7408         detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7409 #else // CL_HPP_TARGET_OPENCL_VERSION >= 200
7410         detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
7411 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7412         if (err != NULL) {
7413             *err = default_error_;
7414         }
7415         return default_;
7416     }
7417 
7418     /**
7419      * Modify the default command queue to be used by
7420      * subsequent operations.
7421      * Will only set the default if no default was previously created.
7422      * @return updated default command queue.
7423      *         Should be compared to the passed value to ensure that it was updated.
7424      */
7425     static CommandQueue setDefault(const CommandQueue &default_queue)
7426     {
7427         std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
7428         detail::errHandler(default_error_);
7429         return default_;
7430     }
7431 
7432     CommandQueue() { }
7433 
7434 
7435     /*! \brief Constructor from cl_mem - takes ownership.
7436      *
7437      * \param retainObject will cause the constructor to retain its cl object.
7438      *                     Defaults to false to maintain compatibility with
7439      *                     earlier versions.
7440      */
7441     explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
7442         detail::Wrapper<cl_type>(commandQueue, retainObject) { }
7443 
7444     CommandQueue& operator = (const cl_command_queue& rhs)
7445     {
7446         detail::Wrapper<cl_type>::operator=(rhs);
7447         return *this;
7448     }
7449 
7450     /*! \brief Copy constructor to forward copy to the superclass correctly.
7451      * Required for MSVC.
7452      */
7453     CommandQueue(const CommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
7454 
7455     /*! \brief Copy assignment to forward copy to the superclass correctly.
7456      * Required for MSVC.
7457      */
7458     CommandQueue& operator = (const CommandQueue &queue)
7459     {
7460         detail::Wrapper<cl_type>::operator=(queue);
7461         return *this;
7462     }
7463 
7464     /*! \brief Move constructor to forward move to the superclass correctly.
7465      * Required for MSVC.
7466      */
7467     CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
7468 
7469     /*! \brief Move assignment to forward move to the superclass correctly.
7470      * Required for MSVC.
7471      */
7472     CommandQueue& operator = (CommandQueue &&queue)
7473     {
7474         detail::Wrapper<cl_type>::operator=(std::move(queue));
7475         return *this;
7476     }
7477 
7478     template <typename T>
7479     cl_int getInfo(cl_command_queue_info name, T* param) const
7480     {
7481         return detail::errHandler(
7482             detail::getInfo(
7483                 &::clGetCommandQueueInfo, object_, name, param),
7484                 __GET_COMMAND_QUEUE_INFO_ERR);
7485     }
7486 
7487     template <cl_int name> typename
7488     detail::param_traits<detail::cl_command_queue_info, name>::param_type
7489     getInfo(cl_int* err = NULL) const
7490     {
7491         typename detail::param_traits<
7492             detail::cl_command_queue_info, name>::param_type param;
7493         cl_int result = getInfo(name, &param);
7494         if (err != NULL) {
7495             *err = result;
7496         }
7497         return param;
7498     }
7499 
7500     cl_int enqueueReadBuffer(
7501         const Buffer& buffer,
7502         cl_bool blocking,
7503         size_type offset,
7504         size_type size,
7505         void* ptr,
7506         const vector<Event>* events = NULL,
7507         Event* event = NULL) const
7508     {
7509         cl_event tmp;
7510         cl_int err = detail::errHandler(
7511             ::clEnqueueReadBuffer(
7512                 object_, buffer(), blocking, offset, size,
7513                 ptr,
7514                 (events != NULL) ? (cl_uint) events->size() : 0,
7515                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7516                 (event != NULL) ? &tmp : NULL),
7517             __ENQUEUE_READ_BUFFER_ERR);
7518 
7519         if (event != NULL && err == CL_SUCCESS)
7520             *event = tmp;
7521 
7522         return err;
7523     }
7524 
7525     cl_int enqueueWriteBuffer(
7526         const Buffer& buffer,
7527         cl_bool blocking,
7528         size_type offset,
7529         size_type size,
7530         const void* ptr,
7531         const vector<Event>* events = NULL,
7532         Event* event = NULL) const
7533     {
7534         cl_event tmp;
7535         cl_int err = detail::errHandler(
7536             ::clEnqueueWriteBuffer(
7537                 object_, buffer(), blocking, offset, size,
7538                 ptr,
7539                 (events != NULL) ? (cl_uint) events->size() : 0,
7540                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7541                 (event != NULL) ? &tmp : NULL),
7542                 __ENQUEUE_WRITE_BUFFER_ERR);
7543 
7544         if (event != NULL && err == CL_SUCCESS)
7545             *event = tmp;
7546 
7547         return err;
7548     }
7549 
7550     cl_int enqueueCopyBuffer(
7551         const Buffer& src,
7552         const Buffer& dst,
7553         size_type src_offset,
7554         size_type dst_offset,
7555         size_type size,
7556         const vector<Event>* events = NULL,
7557         Event* event = NULL) const
7558     {
7559         cl_event tmp;
7560         cl_int err = detail::errHandler(
7561             ::clEnqueueCopyBuffer(
7562                 object_, src(), dst(), src_offset, dst_offset, size,
7563                 (events != NULL) ? (cl_uint) events->size() : 0,
7564                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7565                 (event != NULL) ? &tmp : NULL),
7566             __ENQEUE_COPY_BUFFER_ERR);
7567 
7568         if (event != NULL && err == CL_SUCCESS)
7569             *event = tmp;
7570 
7571         return err;
7572     }
7573 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
7574     cl_int enqueueReadBufferRect(
7575         const Buffer& buffer,
7576         cl_bool blocking,
7577         const array<size_type, 3>& buffer_offset,
7578         const array<size_type, 3>& host_offset,
7579         const array<size_type, 3>& region,
7580         size_type buffer_row_pitch,
7581         size_type buffer_slice_pitch,
7582         size_type host_row_pitch,
7583         size_type host_slice_pitch,
7584         void *ptr,
7585         const vector<Event>* events = NULL,
7586         Event* event = NULL) const
7587     {
7588         cl_event tmp;
7589         cl_int err = detail::errHandler(
7590             ::clEnqueueReadBufferRect(
7591                 object_,
7592                 buffer(),
7593                 blocking,
7594                 buffer_offset.data(),
7595                 host_offset.data(),
7596                 region.data(),
7597                 buffer_row_pitch,
7598                 buffer_slice_pitch,
7599                 host_row_pitch,
7600                 host_slice_pitch,
7601                 ptr,
7602                 (events != NULL) ? (cl_uint) events->size() : 0,
7603                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7604                 (event != NULL) ? &tmp : NULL),
7605                 __ENQUEUE_READ_BUFFER_RECT_ERR);
7606 
7607         if (event != NULL && err == CL_SUCCESS)
7608             *event = tmp;
7609 
7610         return err;
7611     }
7612 
7613     cl_int enqueueWriteBufferRect(
7614         const Buffer& buffer,
7615         cl_bool blocking,
7616         const array<size_type, 3>& buffer_offset,
7617         const array<size_type, 3>& host_offset,
7618         const array<size_type, 3>& region,
7619         size_type buffer_row_pitch,
7620         size_type buffer_slice_pitch,
7621         size_type host_row_pitch,
7622         size_type host_slice_pitch,
7623         const void *ptr,
7624         const vector<Event>* events = NULL,
7625         Event* event = NULL) const
7626     {
7627         cl_event tmp;
7628         cl_int err = detail::errHandler(
7629             ::clEnqueueWriteBufferRect(
7630                 object_,
7631                 buffer(),
7632                 blocking,
7633                 buffer_offset.data(),
7634                 host_offset.data(),
7635                 region.data(),
7636                 buffer_row_pitch,
7637                 buffer_slice_pitch,
7638                 host_row_pitch,
7639                 host_slice_pitch,
7640                 ptr,
7641                 (events != NULL) ? (cl_uint) events->size() : 0,
7642                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7643                 (event != NULL) ? &tmp : NULL),
7644                 __ENQUEUE_WRITE_BUFFER_RECT_ERR);
7645 
7646         if (event != NULL && err == CL_SUCCESS)
7647             *event = tmp;
7648 
7649         return err;
7650     }
7651 
7652     cl_int enqueueCopyBufferRect(
7653         const Buffer& src,
7654         const Buffer& dst,
7655         const array<size_type, 3>& src_origin,
7656         const array<size_type, 3>& dst_origin,
7657         const array<size_type, 3>& region,
7658         size_type src_row_pitch,
7659         size_type src_slice_pitch,
7660         size_type dst_row_pitch,
7661         size_type dst_slice_pitch,
7662         const vector<Event>* events = NULL,
7663         Event* event = NULL) const
7664     {
7665         cl_event tmp;
7666         cl_int err = detail::errHandler(
7667             ::clEnqueueCopyBufferRect(
7668                 object_,
7669                 src(),
7670                 dst(),
7671                 src_origin.data(),
7672                 dst_origin.data(),
7673                 region.data(),
7674                 src_row_pitch,
7675                 src_slice_pitch,
7676                 dst_row_pitch,
7677                 dst_slice_pitch,
7678                 (events != NULL) ? (cl_uint) events->size() : 0,
7679                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7680                 (event != NULL) ? &tmp : NULL),
7681             __ENQEUE_COPY_BUFFER_RECT_ERR);
7682 
7683         if (event != NULL && err == CL_SUCCESS)
7684             *event = tmp;
7685 
7686         return err;
7687     }
7688 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
7689 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7690     /**
7691      * Enqueue a command to fill a buffer object with a pattern
7692      * of a given size. The pattern is specified as a vector type.
7693      * \tparam PatternType The datatype of the pattern field.
7694      *     The pattern type must be an accepted OpenCL data type.
7695      * \tparam offset Is the offset in bytes into the buffer at
7696      *     which to start filling. This must be a multiple of
7697      *     the pattern size.
7698      * \tparam size Is the size in bytes of the region to fill.
7699      *     This must be a multiple of the pattern size.
7700      */
7701     template<typename PatternType>
7702     cl_int enqueueFillBuffer(
7703         const Buffer& buffer,
7704         PatternType pattern,
7705         size_type offset,
7706         size_type size,
7707         const vector<Event>* events = NULL,
7708         Event* event = NULL) const
7709     {
7710         cl_event tmp;
7711         cl_int err = detail::errHandler(
7712             ::clEnqueueFillBuffer(
7713                 object_,
7714                 buffer(),
7715                 static_cast<void*>(&pattern),
7716                 sizeof(PatternType),
7717                 offset,
7718                 size,
7719                 (events != NULL) ? (cl_uint) events->size() : 0,
7720                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7721                 (event != NULL) ? &tmp : NULL),
7722                 __ENQUEUE_FILL_BUFFER_ERR);
7723 
7724         if (event != NULL && err == CL_SUCCESS)
7725             *event = tmp;
7726 
7727         return err;
7728     }
7729 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7730 
7731     cl_int enqueueReadImage(
7732         const Image& image,
7733         cl_bool blocking,
7734         const array<size_type, 3>& origin,
7735         const array<size_type, 3>& region,
7736         size_type row_pitch,
7737         size_type slice_pitch,
7738         void* ptr,
7739         const vector<Event>* events = NULL,
7740         Event* event = NULL) const
7741     {
7742         cl_event tmp;
7743         cl_int err = detail::errHandler(
7744             ::clEnqueueReadImage(
7745                 object_,
7746                 image(),
7747                 blocking,
7748                 origin.data(),
7749                 region.data(),
7750                 row_pitch,
7751                 slice_pitch,
7752                 ptr,
7753                 (events != NULL) ? (cl_uint) events->size() : 0,
7754                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7755                 (event != NULL) ? &tmp : NULL),
7756             __ENQUEUE_READ_IMAGE_ERR);
7757 
7758         if (event != NULL && err == CL_SUCCESS)
7759             *event = tmp;
7760 
7761         return err;
7762     }
7763 
7764     cl_int enqueueWriteImage(
7765         const Image& image,
7766         cl_bool blocking,
7767         const array<size_type, 3>& origin,
7768         const array<size_type, 3>& region,
7769         size_type row_pitch,
7770         size_type slice_pitch,
7771         const void* ptr,
7772         const vector<Event>* events = NULL,
7773         Event* event = NULL) const
7774     {
7775         cl_event tmp;
7776         cl_int err = detail::errHandler(
7777             ::clEnqueueWriteImage(
7778                 object_,
7779                 image(),
7780                 blocking,
7781                 origin.data(),
7782                 region.data(),
7783                 row_pitch,
7784                 slice_pitch,
7785                 ptr,
7786                 (events != NULL) ? (cl_uint) events->size() : 0,
7787                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7788                 (event != NULL) ? &tmp : NULL),
7789             __ENQUEUE_WRITE_IMAGE_ERR);
7790 
7791         if (event != NULL && err == CL_SUCCESS)
7792             *event = tmp;
7793 
7794         return err;
7795     }
7796 
7797     cl_int enqueueCopyImage(
7798         const Image& src,
7799         const Image& dst,
7800         const array<size_type, 3>& src_origin,
7801         const array<size_type, 3>& dst_origin,
7802         const array<size_type, 3>& region,
7803         const vector<Event>* events = NULL,
7804         Event* event = NULL) const
7805     {
7806         cl_event tmp;
7807         cl_int err = detail::errHandler(
7808             ::clEnqueueCopyImage(
7809                 object_,
7810                 src(),
7811                 dst(),
7812                 src_origin.data(),
7813                 dst_origin.data(),
7814                 region.data(),
7815                 (events != NULL) ? (cl_uint) events->size() : 0,
7816                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7817                 (event != NULL) ? &tmp : NULL),
7818             __ENQUEUE_COPY_IMAGE_ERR);
7819 
7820         if (event != NULL && err == CL_SUCCESS)
7821             *event = tmp;
7822 
7823         return err;
7824     }
7825 
7826 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7827     /**
7828      * Enqueue a command to fill an image object with a specified color.
7829      * \param fillColor is the color to use to fill the image.
7830      *     This is a four component RGBA floating-point color value if
7831      *     the image channel data type is not an unnormalized signed or
7832      *     unsigned data type.
7833      */
7834     cl_int enqueueFillImage(
7835         const Image& image,
7836         cl_float4 fillColor,
7837         const array<size_type, 3>& origin,
7838         const array<size_type, 3>& region,
7839         const vector<Event>* events = NULL,
7840         Event* event = NULL) const
7841     {
7842         cl_event tmp;
7843         cl_int err = detail::errHandler(
7844             ::clEnqueueFillImage(
7845                 object_,
7846                 image(),
7847                 static_cast<void*>(&fillColor),
7848                 origin.data(),
7849                 region.data(),
7850                 (events != NULL) ? (cl_uint) events->size() : 0,
7851                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7852                 (event != NULL) ? &tmp : NULL),
7853                 __ENQUEUE_FILL_IMAGE_ERR);
7854 
7855         if (event != NULL && err == CL_SUCCESS)
7856             *event = tmp;
7857 
7858         return err;
7859     }
7860 
7861     /**
7862      * Enqueue a command to fill an image object with a specified color.
7863      * \param fillColor is the color to use to fill the image.
7864      *     This is a four component RGBA signed integer color value if
7865      *     the image channel data type is an unnormalized signed integer
7866      *     type.
7867      */
7868     cl_int enqueueFillImage(
7869         const Image& image,
7870         cl_int4 fillColor,
7871         const array<size_type, 3>& origin,
7872         const array<size_type, 3>& region,
7873         const vector<Event>* events = NULL,
7874         Event* event = NULL) const
7875     {
7876         cl_event tmp;
7877         cl_int err = detail::errHandler(
7878             ::clEnqueueFillImage(
7879                 object_,
7880                 image(),
7881                 static_cast<void*>(&fillColor),
7882                 origin.data(),
7883                 region.data(),
7884                 (events != NULL) ? (cl_uint) events->size() : 0,
7885                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7886                 (event != NULL) ? &tmp : NULL),
7887                 __ENQUEUE_FILL_IMAGE_ERR);
7888 
7889         if (event != NULL && err == CL_SUCCESS)
7890             *event = tmp;
7891 
7892         return err;
7893     }
7894 
7895     /**
7896      * Enqueue a command to fill an image object with a specified color.
7897      * \param fillColor is the color to use to fill the image.
7898      *     This is a four component RGBA unsigned integer color value if
7899      *     the image channel data type is an unnormalized unsigned integer
7900      *     type.
7901      */
7902     cl_int enqueueFillImage(
7903         const Image& image,
7904         cl_uint4 fillColor,
7905         const array<size_type, 3>& origin,
7906         const array<size_type, 3>& region,
7907         const vector<Event>* events = NULL,
7908         Event* event = NULL) const
7909     {
7910         cl_event tmp;
7911         cl_int err = detail::errHandler(
7912             ::clEnqueueFillImage(
7913                 object_,
7914                 image(),
7915                 static_cast<void*>(&fillColor),
7916                 origin.data(),
7917                 region.data(),
7918                 (events != NULL) ? (cl_uint) events->size() : 0,
7919                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7920                 (event != NULL) ? &tmp : NULL),
7921                 __ENQUEUE_FILL_IMAGE_ERR);
7922 
7923         if (event != NULL && err == CL_SUCCESS)
7924             *event = tmp;
7925 
7926         return err;
7927     }
7928 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7929 
7930     cl_int enqueueCopyImageToBuffer(
7931         const Image& src,
7932         const Buffer& dst,
7933         const array<size_type, 3>& src_origin,
7934         const array<size_type, 3>& region,
7935         size_type dst_offset,
7936         const vector<Event>* events = NULL,
7937         Event* event = NULL) const
7938     {
7939         cl_event tmp;
7940         cl_int err = detail::errHandler(
7941             ::clEnqueueCopyImageToBuffer(
7942                 object_,
7943                 src(),
7944                 dst(),
7945                 src_origin.data(),
7946                 region.data(),
7947                 dst_offset,
7948                 (events != NULL) ? (cl_uint) events->size() : 0,
7949                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7950                 (event != NULL) ? &tmp : NULL),
7951             __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
7952 
7953         if (event != NULL && err == CL_SUCCESS)
7954             *event = tmp;
7955 
7956         return err;
7957     }
7958 
7959     cl_int enqueueCopyBufferToImage(
7960         const Buffer& src,
7961         const Image& dst,
7962         size_type src_offset,
7963         const array<size_type, 3>& dst_origin,
7964         const array<size_type, 3>& region,
7965         const vector<Event>* events = NULL,
7966         Event* event = NULL) const
7967     {
7968         cl_event tmp;
7969         cl_int err = detail::errHandler(
7970             ::clEnqueueCopyBufferToImage(
7971                 object_,
7972                 src(),
7973                 dst(),
7974                 src_offset,
7975                 dst_origin.data(),
7976                 region.data(),
7977                 (events != NULL) ? (cl_uint) events->size() : 0,
7978                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7979                 (event != NULL) ? &tmp : NULL),
7980             __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
7981 
7982         if (event != NULL && err == CL_SUCCESS)
7983             *event = tmp;
7984 
7985         return err;
7986     }
7987 
7988     void* enqueueMapBuffer(
7989         const Buffer& buffer,
7990         cl_bool blocking,
7991         cl_map_flags flags,
7992         size_type offset,
7993         size_type size,
7994         const vector<Event>* events = NULL,
7995         Event* event = NULL,
7996         cl_int* err = NULL) const
7997     {
7998         cl_event tmp;
7999         cl_int error;
8000         void * result = ::clEnqueueMapBuffer(
8001             object_, buffer(), blocking, flags, offset, size,
8002             (events != NULL) ? (cl_uint) events->size() : 0,
8003             (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8004             (event != NULL) ? &tmp : NULL,
8005             &error);
8006 
8007         detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8008         if (err != NULL) {
8009             *err = error;
8010         }
8011         if (event != NULL && error == CL_SUCCESS)
8012             *event = tmp;
8013 
8014         return result;
8015     }
8016 
8017     void* enqueueMapImage(
8018         const Image& buffer,
8019         cl_bool blocking,
8020         cl_map_flags flags,
8021         const array<size_type, 3>& origin,
8022         const array<size_type, 3>& region,
8023         size_type * row_pitch,
8024         size_type * slice_pitch,
8025         const vector<Event>* events = NULL,
8026         Event* event = NULL,
8027         cl_int* err = NULL) const
8028     {
8029         cl_event tmp;
8030         cl_int error;
8031         void * result = ::clEnqueueMapImage(
8032             object_, buffer(), blocking, flags,
8033             origin.data(),
8034             region.data(),
8035             row_pitch, slice_pitch,
8036             (events != NULL) ? (cl_uint) events->size() : 0,
8037             (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8038             (event != NULL) ? &tmp : NULL,
8039             &error);
8040 
8041         detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
8042         if (err != NULL) {
8043               *err = error;
8044         }
8045         if (event != NULL && error == CL_SUCCESS)
8046             *event = tmp;
8047         return result;
8048     }
8049 
8050 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8051     /**
8052      * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8053      * This variant takes a raw SVM pointer.
8054      */
8055     template<typename T>
8056     cl_int enqueueMapSVM(
8057         T* ptr,
8058         cl_bool blocking,
8059         cl_map_flags flags,
8060         size_type size,
8061         const vector<Event>* events = NULL,
8062         Event* event = NULL) const
8063     {
8064         cl_event tmp;
8065         cl_int err = detail::errHandler(::clEnqueueSVMMap(
8066             object_, blocking, flags, static_cast<void*>(ptr), size,
8067             (events != NULL) ? (cl_uint)events->size() : 0,
8068             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8069             (event != NULL) ? &tmp : NULL),
8070             __ENQUEUE_MAP_BUFFER_ERR);
8071 
8072         if (event != NULL && err == CL_SUCCESS)
8073             *event = tmp;
8074 
8075         return err;
8076     }
8077 
8078 
8079     /**
8080      * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8081      * This variant takes a cl::pointer instance.
8082      */
8083     template<typename T, class D>
8084     cl_int enqueueMapSVM(
8085         cl::pointer<T, D> &ptr,
8086         cl_bool blocking,
8087         cl_map_flags flags,
8088         size_type size,
8089         const vector<Event>* events = NULL,
8090         Event* event = NULL) const
8091     {
8092         cl_event tmp;
8093         cl_int err = detail::errHandler(::clEnqueueSVMMap(
8094             object_, blocking, flags, static_cast<void*>(ptr.get()), size,
8095             (events != NULL) ? (cl_uint)events->size() : 0,
8096             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8097             (event != NULL) ? &tmp : NULL),
8098             __ENQUEUE_MAP_BUFFER_ERR);
8099 
8100         if (event != NULL && err == CL_SUCCESS)
8101             *event = tmp;
8102 
8103         return err;
8104     }
8105 
8106     /**
8107      * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8108      * This variant takes a cl::vector instance.
8109      */
8110     template<typename T, class Alloc>
8111     cl_int enqueueMapSVM(
8112         cl::vector<T, Alloc> &container,
8113         cl_bool blocking,
8114         cl_map_flags flags,
8115         const vector<Event>* events = NULL,
8116         Event* event = NULL) const
8117     {
8118         cl_event tmp;
8119         cl_int err = detail::errHandler(::clEnqueueSVMMap(
8120             object_, blocking, flags, static_cast<void*>(container.data()), container.size(),
8121             (events != NULL) ? (cl_uint)events->size() : 0,
8122             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8123             (event != NULL) ? &tmp : NULL),
8124             __ENQUEUE_MAP_BUFFER_ERR);
8125 
8126         if (event != NULL && err == CL_SUCCESS)
8127             *event = tmp;
8128 
8129         return err;
8130     }
8131 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8132 
8133     cl_int enqueueUnmapMemObject(
8134         const Memory& memory,
8135         void* mapped_ptr,
8136         const vector<Event>* events = NULL,
8137         Event* event = NULL) const
8138     {
8139         cl_event tmp;
8140         cl_int err = detail::errHandler(
8141             ::clEnqueueUnmapMemObject(
8142                 object_, memory(), mapped_ptr,
8143                 (events != NULL) ? (cl_uint) events->size() : 0,
8144                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8145                 (event != NULL) ? &tmp : NULL),
8146             __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8147 
8148         if (event != NULL && err == CL_SUCCESS)
8149             *event = tmp;
8150 
8151         return err;
8152     }
8153 
8154 
8155 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8156     /**
8157      * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
8158      * This variant takes a raw SVM pointer.
8159      */
8160     template<typename T>
8161     cl_int enqueueUnmapSVM(
8162         T* ptr,
8163         const vector<Event>* events = NULL,
8164         Event* event = NULL) const
8165     {
8166         cl_event tmp;
8167         cl_int err = detail::errHandler(
8168             ::clEnqueueSVMUnmap(
8169             object_, static_cast<void*>(ptr),
8170             (events != NULL) ? (cl_uint)events->size() : 0,
8171             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8172             (event != NULL) ? &tmp : NULL),
8173             __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8174 
8175         if (event != NULL && err == CL_SUCCESS)
8176             *event = tmp;
8177 
8178         return err;
8179     }
8180 
8181     /**
8182      * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
8183      * This variant takes a cl::pointer instance.
8184      */
8185     template<typename T, class D>
8186     cl_int enqueueUnmapSVM(
8187         cl::pointer<T, D> &ptr,
8188         const vector<Event>* events = NULL,
8189         Event* event = NULL) const
8190     {
8191         cl_event tmp;
8192         cl_int err = detail::errHandler(
8193             ::clEnqueueSVMUnmap(
8194             object_, static_cast<void*>(ptr.get()),
8195             (events != NULL) ? (cl_uint)events->size() : 0,
8196             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8197             (event != NULL) ? &tmp : NULL),
8198             __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8199 
8200         if (event != NULL && err == CL_SUCCESS)
8201             *event = tmp;
8202 
8203         return err;
8204     }
8205 
8206     /**
8207      * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
8208      * This variant takes a cl::vector instance.
8209      */
8210     template<typename T, class Alloc>
8211     cl_int enqueueUnmapSVM(
8212         cl::vector<T, Alloc> &container,
8213         const vector<Event>* events = NULL,
8214         Event* event = NULL) const
8215     {
8216         cl_event tmp;
8217         cl_int err = detail::errHandler(
8218             ::clEnqueueSVMUnmap(
8219             object_, static_cast<void*>(container.data()),
8220             (events != NULL) ? (cl_uint)events->size() : 0,
8221             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8222             (event != NULL) ? &tmp : NULL),
8223             __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8224 
8225         if (event != NULL && err == CL_SUCCESS)
8226             *event = tmp;
8227 
8228         return err;
8229     }
8230 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8231 
8232 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8233     /**
8234      * Enqueues a marker command which waits for either a list of events to complete,
8235      * or all previously enqueued commands to complete.
8236      *
8237      * Enqueues a marker command which waits for either a list of events to complete,
8238      * or if the list is empty it waits for all commands previously enqueued in command_queue
8239      * to complete before it completes. This command returns an event which can be waited on,
8240      * i.e. this event can be waited on to insure that all events either in the event_wait_list
8241      * or all previously enqueued commands, queued before this command to command_queue,
8242      * have completed.
8243      */
8244     cl_int enqueueMarkerWithWaitList(
8245         const vector<Event> *events = 0,
8246         Event *event = 0) const
8247     {
8248         cl_event tmp;
8249         cl_int err = detail::errHandler(
8250             ::clEnqueueMarkerWithWaitList(
8251                 object_,
8252                 (events != NULL) ? (cl_uint) events->size() : 0,
8253                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8254                 (event != NULL) ? &tmp : NULL),
8255             __ENQUEUE_MARKER_WAIT_LIST_ERR);
8256 
8257         if (event != NULL && err == CL_SUCCESS)
8258             *event = tmp;
8259 
8260         return err;
8261     }
8262 
8263     /**
8264      * A synchronization point that enqueues a barrier operation.
8265      *
8266      * Enqueues a barrier command which waits for either a list of events to complete,
8267      * or if the list is empty it waits for all commands previously enqueued in command_queue
8268      * to complete before it completes. This command blocks command execution, that is, any
8269      * following commands enqueued after it do not execute until it completes. This command
8270      * returns an event which can be waited on, i.e. this event can be waited on to insure that
8271      * all events either in the event_wait_list or all previously enqueued commands, queued
8272      * before this command to command_queue, have completed.
8273      */
8274     cl_int enqueueBarrierWithWaitList(
8275         const vector<Event> *events = 0,
8276         Event *event = 0) const
8277     {
8278         cl_event tmp;
8279         cl_int err = detail::errHandler(
8280             ::clEnqueueBarrierWithWaitList(
8281                 object_,
8282                 (events != NULL) ? (cl_uint) events->size() : 0,
8283                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8284                 (event != NULL) ? &tmp : NULL),
8285             __ENQUEUE_BARRIER_WAIT_LIST_ERR);
8286 
8287         if (event != NULL && err == CL_SUCCESS)
8288             *event = tmp;
8289 
8290         return err;
8291     }
8292 
8293     /**
8294      * Enqueues a command to indicate with which device a set of memory objects
8295      * should be associated.
8296      */
8297     cl_int enqueueMigrateMemObjects(
8298         const vector<Memory> &memObjects,
8299         cl_mem_migration_flags flags,
8300         const vector<Event>* events = NULL,
8301         Event* event = NULL
8302         ) const
8303     {
8304         cl_event tmp;
8305 
8306         vector<cl_mem> localMemObjects(memObjects.size());
8307 
8308         for( int i = 0; i < (int)memObjects.size(); ++i ) {
8309             localMemObjects[i] = memObjects[i]();
8310         }
8311 
8312         cl_int err = detail::errHandler(
8313             ::clEnqueueMigrateMemObjects(
8314                 object_,
8315                 (cl_uint)memObjects.size(),
8316                 localMemObjects.data(),
8317                 flags,
8318                 (events != NULL) ? (cl_uint) events->size() : 0,
8319                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8320                 (event != NULL) ? &tmp : NULL),
8321             __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8322 
8323         if (event != NULL && err == CL_SUCCESS)
8324             *event = tmp;
8325 
8326         return err;
8327     }
8328 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8329 
8330 
8331 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
8332     /**
8333      * Enqueues a command that will allow the host associate ranges within a set of
8334      * SVM allocations with a device.
8335      * @param sizes - The length from each pointer to migrate.
8336      */
8337     template<typename T>
8338     cl_int enqueueMigrateSVM(
8339         const cl::vector<T*> &svmRawPointers,
8340         const cl::vector<size_type> &sizes,
8341         cl_mem_migration_flags flags = 0,
8342         const vector<Event>* events = NULL,
8343         Event* event = NULL) const
8344     {
8345         cl_event tmp;
8346         cl_int err = detail::errHandler(::clEnqueueSVMMigrateMem(
8347             object_,
8348             svmRawPointers.size(), static_cast<void**>(svmRawPointers.data()),
8349             sizes.data(), // array of sizes not passed
8350             flags,
8351             (events != NULL) ? (cl_uint)events->size() : 0,
8352             (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8353             (event != NULL) ? &tmp : NULL),
8354             __ENQUEUE_MIGRATE_SVM_ERR);
8355 
8356         if (event != NULL && err == CL_SUCCESS)
8357             *event = tmp;
8358 
8359         return err;
8360     }
8361 
8362     /**
8363      * Enqueues a command that will allow the host associate a set of SVM allocations with
8364      * a device.
8365      */
8366     template<typename T>
8367     cl_int enqueueMigrateSVM(
8368         const cl::vector<T*> &svmRawPointers,
8369         cl_mem_migration_flags flags = 0,
8370         const vector<Event>* events = NULL,
8371         Event* event = NULL) const
8372     {
8373         return enqueueMigrateSVM(svmRawPointers, cl::vector<size_type>(svmRawPointers.size()), flags, events, event);
8374     }
8375 
8376 
8377     /**
8378      * Enqueues a command that will allow the host associate ranges within a set of
8379      * SVM allocations with a device.
8380      * @param sizes - The length from each pointer to migrate.
8381      */
8382     template<typename T, class D>
8383     cl_int enqueueMigrateSVM(
8384         const cl::vector<cl::pointer<T, D>> &svmPointers,
8385         const cl::vector<size_type> &sizes,
8386         cl_mem_migration_flags flags = 0,
8387         const vector<Event>* events = NULL,
8388         Event* event = NULL) const
8389     {
8390         cl::vector<void*> svmRawPointers;
8391         svmRawPointers.reserve(svmPointers.size());
8392         for (auto p : svmPointers) {
8393             svmRawPointers.push_back(static_cast<void*>(p.get()));
8394         }
8395 
8396         return enqueueMigrateSVM(svmRawPointers, sizes, flags, events, event);
8397     }
8398 
8399 
8400     /**
8401      * Enqueues a command that will allow the host associate a set of SVM allocations with
8402      * a device.
8403      */
8404     template<typename T, class D>
8405     cl_int enqueueMigrateSVM(
8406         const cl::vector<cl::pointer<T, D>> &svmPointers,
8407         cl_mem_migration_flags flags = 0,
8408         const vector<Event>* events = NULL,
8409         Event* event = NULL) const
8410     {
8411         return enqueueMigrateSVM(svmPointers, cl::vector<size_type>(svmPointers.size()), flags, events, event);
8412     }
8413 
8414     /**
8415      * Enqueues a command that will allow the host associate ranges within a set of
8416      * SVM allocations with a device.
8417      * @param sizes - The length from the beginning of each container to migrate.
8418      */
8419     template<typename T, class Alloc>
8420     cl_int enqueueMigrateSVM(
8421         const cl::vector<cl::vector<T, Alloc>> &svmContainers,
8422         const cl::vector<size_type> &sizes,
8423         cl_mem_migration_flags flags = 0,
8424         const vector<Event>* events = NULL,
8425         Event* event = NULL) const
8426     {
8427         cl::vector<void*> svmRawPointers;
8428         svmRawPointers.reserve(svmContainers.size());
8429         for (auto p : svmContainers) {
8430             svmRawPointers.push_back(static_cast<void*>(p.data()));
8431         }
8432 
8433         return enqueueMigrateSVM(svmRawPointers, sizes, flags, events, event);
8434     }
8435 
8436     /**
8437      * Enqueues a command that will allow the host associate a set of SVM allocations with
8438      * a device.
8439      */
8440     template<typename T, class Alloc>
8441     cl_int enqueueMigrateSVM(
8442         const cl::vector<cl::vector<T, Alloc>> &svmContainers,
8443         cl_mem_migration_flags flags = 0,
8444         const vector<Event>* events = NULL,
8445         Event* event = NULL) const
8446     {
8447         return enqueueMigrateSVM(svmContainers, cl::vector<size_type>(svmContainers.size()), flags, events, event);
8448     }
8449 
8450 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
8451 
8452     cl_int enqueueNDRangeKernel(
8453         const Kernel& kernel,
8454         const NDRange& offset,
8455         const NDRange& global,
8456         const NDRange& local = NullRange,
8457         const vector<Event>* events = NULL,
8458         Event* event = NULL) const
8459     {
8460         cl_event tmp;
8461         cl_int err = detail::errHandler(
8462             ::clEnqueueNDRangeKernel(
8463                 object_, kernel(), (cl_uint) global.dimensions(),
8464                 offset.dimensions() != 0 ? (const size_type*) offset : NULL,
8465                 (const size_type*) global,
8466                 local.dimensions() != 0 ? (const size_type*) local : NULL,
8467                 (events != NULL) ? (cl_uint) events->size() : 0,
8468                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8469                 (event != NULL) ? &tmp : NULL),
8470             __ENQUEUE_NDRANGE_KERNEL_ERR);
8471 
8472         if (event != NULL && err == CL_SUCCESS)
8473             *event = tmp;
8474 
8475         return err;
8476     }
8477 
8478 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
8479     CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
8480         const Kernel& kernel,
8481         const vector<Event>* events = NULL,
8482         Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
8483     {
8484         cl_event tmp;
8485         cl_int err = detail::errHandler(
8486             ::clEnqueueTask(
8487                 object_, kernel(),
8488                 (events != NULL) ? (cl_uint) events->size() : 0,
8489                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8490                 (event != NULL) ? &tmp : NULL),
8491             __ENQUEUE_TASK_ERR);
8492 
8493         if (event != NULL && err == CL_SUCCESS)
8494             *event = tmp;
8495 
8496         return err;
8497     }
8498 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
8499 
8500     cl_int enqueueNativeKernel(
8501         void (CL_CALLBACK *userFptr)(void *),
8502         std::pair<void*, size_type> args,
8503         const vector<Memory>* mem_objects = NULL,
8504         const vector<const void*>* mem_locs = NULL,
8505         const vector<Event>* events = NULL,
8506         Event* event = NULL) const
8507     {
8508         size_type elements = 0;
8509         if (mem_objects != NULL) {
8510             elements = mem_objects->size();
8511         }
8512         vector<cl_mem> mems(elements);
8513         for (unsigned int i = 0; i < elements; i++) {
8514             mems[i] = ((*mem_objects)[i])();
8515         }
8516 
8517         cl_event tmp;
8518         cl_int err = detail::errHandler(
8519             ::clEnqueueNativeKernel(
8520                 object_, userFptr, args.first, args.second,
8521                 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8522                 mems.data(),
8523                 (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL,
8524                 (events != NULL) ? (cl_uint) events->size() : 0,
8525                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8526                 (event != NULL) ? &tmp : NULL),
8527             __ENQUEUE_NATIVE_KERNEL);
8528 
8529         if (event != NULL && err == CL_SUCCESS)
8530             *event = tmp;
8531 
8532         return err;
8533     }
8534 
8535 /**
8536  * Deprecated APIs for 1.2
8537  */
8538 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8539     CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8540     cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8541     {
8542         cl_event tmp;
8543         cl_int err = detail::errHandler(
8544             ::clEnqueueMarker(
8545                 object_,
8546                 (event != NULL) ? &tmp : NULL),
8547             __ENQUEUE_MARKER_ERR);
8548 
8549         if (event != NULL && err == CL_SUCCESS)
8550             *event = tmp;
8551 
8552         return err;
8553     }
8554 
8555     CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8556     cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8557     {
8558         return detail::errHandler(
8559             ::clEnqueueWaitForEvents(
8560                 object_,
8561                 (cl_uint) events.size(),
8562                 events.size() > 0 ? (const cl_event*) &events.front() : NULL),
8563             __ENQUEUE_WAIT_FOR_EVENTS_ERR);
8564     }
8565 #endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8566 
8567     cl_int enqueueAcquireGLObjects(
8568          const vector<Memory>* mem_objects = NULL,
8569          const vector<Event>* events = NULL,
8570          Event* event = NULL) const
8571      {
8572         cl_event tmp;
8573         cl_int err = detail::errHandler(
8574              ::clEnqueueAcquireGLObjects(
8575                  object_,
8576                  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8577                  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8578                  (events != NULL) ? (cl_uint) events->size() : 0,
8579                  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8580                  (event != NULL) ? &tmp : NULL),
8581              __ENQUEUE_ACQUIRE_GL_ERR);
8582 
8583         if (event != NULL && err == CL_SUCCESS)
8584             *event = tmp;
8585 
8586         return err;
8587      }
8588 
8589     cl_int enqueueReleaseGLObjects(
8590          const vector<Memory>* mem_objects = NULL,
8591          const vector<Event>* events = NULL,
8592          Event* event = NULL) const
8593      {
8594         cl_event tmp;
8595         cl_int err = detail::errHandler(
8596              ::clEnqueueReleaseGLObjects(
8597                  object_,
8598                  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8599                  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8600                  (events != NULL) ? (cl_uint) events->size() : 0,
8601                  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8602                  (event != NULL) ? &tmp : NULL),
8603              __ENQUEUE_RELEASE_GL_ERR);
8604 
8605         if (event != NULL && err == CL_SUCCESS)
8606             *event = tmp;
8607 
8608         return err;
8609      }
8610 
8611 #if defined (CL_HPP_USE_DX_INTEROP)
8612 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
8613     cl_command_queue command_queue, cl_uint num_objects,
8614     const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
8615     const cl_event* event_wait_list, cl_event* event);
8616 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
8617     cl_command_queue command_queue, cl_uint num_objects,
8618     const cl_mem* mem_objects,  cl_uint num_events_in_wait_list,
8619     const cl_event* event_wait_list, cl_event* event);
8620 
8621     cl_int enqueueAcquireD3D10Objects(
8622          const vector<Memory>* mem_objects = NULL,
8623          const vector<Event>* events = NULL,
8624          Event* event = NULL) const
8625     {
8626         static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
8627 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8628         cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8629         cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8630         cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8631         CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
8632 #endif
8633 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8634         CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
8635 #endif
8636 
8637         cl_event tmp;
8638         cl_int err = detail::errHandler(
8639              pfn_clEnqueueAcquireD3D10ObjectsKHR(
8640                  object_,
8641                  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8642                  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8643                  (events != NULL) ? (cl_uint) events->size() : 0,
8644                  (events != NULL) ? (cl_event*) &events->front() : NULL,
8645                  (event != NULL) ? &tmp : NULL),
8646              __ENQUEUE_ACQUIRE_GL_ERR);
8647 
8648         if (event != NULL && err == CL_SUCCESS)
8649             *event = tmp;
8650 
8651         return err;
8652      }
8653 
8654     cl_int enqueueReleaseD3D10Objects(
8655          const vector<Memory>* mem_objects = NULL,
8656          const vector<Event>* events = NULL,
8657          Event* event = NULL) const
8658     {
8659         static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
8660 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8661         cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8662         cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8663         cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8664         CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
8665 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8666 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8667         CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
8668 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8669 
8670         cl_event tmp;
8671         cl_int err = detail::errHandler(
8672             pfn_clEnqueueReleaseD3D10ObjectsKHR(
8673                 object_,
8674                 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8675                 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8676                 (events != NULL) ? (cl_uint) events->size() : 0,
8677                 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8678                 (event != NULL) ? &tmp : NULL),
8679             __ENQUEUE_RELEASE_GL_ERR);
8680 
8681         if (event != NULL && err == CL_SUCCESS)
8682             *event = tmp;
8683 
8684         return err;
8685     }
8686 #endif
8687 
8688 /**
8689  * Deprecated APIs for 1.2
8690  */
8691 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8692     CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8693     cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8694     {
8695         return detail::errHandler(
8696             ::clEnqueueBarrier(object_),
8697             __ENQUEUE_BARRIER_ERR);
8698     }
8699 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
8700 
8701     cl_int flush() const
8702     {
8703         return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
8704     }
8705 
8706     cl_int finish() const
8707     {
8708         return detail::errHandler(::clFinish(object_), __FINISH_ERR);
8709     }
8710 }; // CommandQueue
8711 
8712 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
8713 CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
8714 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
8715 
8716 
8717 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8718 enum class DeviceQueueProperties : cl_command_queue_properties
8719 {
8720     None = 0,
8721     Profiling = CL_QUEUE_PROFILING_ENABLE,
8722 };
8723 
8724 inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
8725 {
8726     return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
8727 }
8728 
8729 /*! \class DeviceCommandQueue
8730  * \brief DeviceCommandQueue interface for device cl_command_queues.
8731  */
8732 class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
8733 {
8734 public:
8735 
8736     /*!
8737      * Trivial empty constructor to create a null queue.
8738      */
8739     DeviceCommandQueue() { }
8740 
8741     /*!
8742      * Default construct device command queue on default context and device
8743      */
8744     DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL)
8745     {
8746         cl_int error;
8747         cl::Context context = cl::Context::getDefault();
8748         cl::Device device = cl::Device::getDefault();
8749 
8750         cl_command_queue_properties mergedProperties =
8751             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8752 
8753         cl_queue_properties queue_properties[] = {
8754             CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8755         object_ = ::clCreateCommandQueueWithProperties(
8756             context(), device(), queue_properties, &error);
8757 
8758         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8759         if (err != NULL) {
8760             *err = error;
8761         }
8762     }
8763 
8764     /*!
8765      * Create a device command queue for a specified device in the passed context.
8766      */
8767     DeviceCommandQueue(
8768         const Context& context,
8769         const Device& device,
8770         DeviceQueueProperties properties = DeviceQueueProperties::None,
8771         cl_int* err = NULL)
8772     {
8773         cl_int error;
8774 
8775         cl_command_queue_properties mergedProperties =
8776             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8777         cl_queue_properties queue_properties[] = {
8778             CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8779         object_ = ::clCreateCommandQueueWithProperties(
8780             context(), device(), queue_properties, &error);
8781 
8782         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8783         if (err != NULL) {
8784             *err = error;
8785         }
8786     }
8787 
8788     /*!
8789      * Create a device command queue for a specified device in the passed context.
8790      */
8791     DeviceCommandQueue(
8792         const Context& context,
8793         const Device& device,
8794         cl_uint queueSize,
8795         DeviceQueueProperties properties = DeviceQueueProperties::None,
8796         cl_int* err = NULL)
8797     {
8798         cl_int error;
8799 
8800         cl_command_queue_properties mergedProperties =
8801             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8802         cl_queue_properties queue_properties[] = {
8803             CL_QUEUE_PROPERTIES, mergedProperties,
8804             CL_QUEUE_SIZE, queueSize,
8805             0 };
8806         object_ = ::clCreateCommandQueueWithProperties(
8807             context(), device(), queue_properties, &error);
8808 
8809         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8810         if (err != NULL) {
8811             *err = error;
8812         }
8813     }
8814 
8815     /*! \brief Constructor from cl_command_queue - takes ownership.
8816     *
8817     * \param retainObject will cause the constructor to retain its cl object.
8818     *                     Defaults to false to maintain compatibility with
8819     *                     earlier versions.
8820     */
8821     explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
8822         detail::Wrapper<cl_type>(commandQueue, retainObject) { }
8823 
8824     DeviceCommandQueue& operator = (const cl_command_queue& rhs)
8825     {
8826         detail::Wrapper<cl_type>::operator=(rhs);
8827         return *this;
8828     }
8829 
8830     /*! \brief Copy constructor to forward copy to the superclass correctly.
8831      * Required for MSVC.
8832      */
8833     DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
8834 
8835     /*! \brief Copy assignment to forward copy to the superclass correctly.
8836      * Required for MSVC.
8837      */
8838     DeviceCommandQueue& operator = (const DeviceCommandQueue &queue)
8839     {
8840         detail::Wrapper<cl_type>::operator=(queue);
8841         return *this;
8842     }
8843 
8844     /*! \brief Move constructor to forward move to the superclass correctly.
8845      * Required for MSVC.
8846      */
8847     DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
8848 
8849     /*! \brief Move assignment to forward move to the superclass correctly.
8850      * Required for MSVC.
8851      */
8852     DeviceCommandQueue& operator = (DeviceCommandQueue &&queue)
8853     {
8854         detail::Wrapper<cl_type>::operator=(std::move(queue));
8855         return *this;
8856     }
8857 
8858     template <typename T>
8859     cl_int getInfo(cl_command_queue_info name, T* param) const
8860     {
8861         return detail::errHandler(
8862             detail::getInfo(
8863             &::clGetCommandQueueInfo, object_, name, param),
8864             __GET_COMMAND_QUEUE_INFO_ERR);
8865     }
8866 
8867     template <cl_int name> typename
8868         detail::param_traits<detail::cl_command_queue_info, name>::param_type
8869         getInfo(cl_int* err = NULL) const
8870     {
8871         typename detail::param_traits<
8872             detail::cl_command_queue_info, name>::param_type param;
8873         cl_int result = getInfo(name, &param);
8874         if (err != NULL) {
8875             *err = result;
8876         }
8877         return param;
8878     }
8879 
8880     /*!
8881      * Create a new default device command queue for the default device,
8882      * in the default context and of the default size.
8883      * If there is already a default queue for the specified device this
8884      * function will return the pre-existing queue.
8885      */
8886     static DeviceCommandQueue makeDefault(
8887         cl_int *err = nullptr)
8888     {
8889         cl_int error;
8890         cl::Context context = cl::Context::getDefault();
8891         cl::Device device = cl::Device::getDefault();
8892 
8893         cl_command_queue_properties properties =
8894             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8895         cl_queue_properties queue_properties[] = {
8896             CL_QUEUE_PROPERTIES, properties,
8897             0 };
8898         DeviceCommandQueue deviceQueue(
8899             ::clCreateCommandQueueWithProperties(
8900             context(), device(), queue_properties, &error));
8901 
8902         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8903         if (err != NULL) {
8904             *err = error;
8905         }
8906 
8907         return deviceQueue;
8908     }
8909 
8910     /*!
8911      * Create a new default device command queue for the specified device
8912      * and of the default size.
8913      * If there is already a default queue for the specified device this
8914      * function will return the pre-existing queue.
8915      */
8916     static DeviceCommandQueue makeDefault(
8917         const Context &context, const Device &device, cl_int *err = nullptr)
8918     {
8919         cl_int error;
8920 
8921         cl_command_queue_properties properties =
8922             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8923         cl_queue_properties queue_properties[] = {
8924             CL_QUEUE_PROPERTIES, properties,
8925             0 };
8926         DeviceCommandQueue deviceQueue(
8927             ::clCreateCommandQueueWithProperties(
8928             context(), device(), queue_properties, &error));
8929 
8930         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8931         if (err != NULL) {
8932             *err = error;
8933         }
8934 
8935         return deviceQueue;
8936     }
8937 
8938     /*!
8939      * Create a new default device command queue for the specified device
8940      * and of the requested size in bytes.
8941      * If there is already a default queue for the specified device this
8942      * function will return the pre-existing queue.
8943      */
8944     static DeviceCommandQueue makeDefault(
8945         const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
8946     {
8947         cl_int error;
8948 
8949         cl_command_queue_properties properties =
8950             CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8951         cl_queue_properties queue_properties[] = {
8952             CL_QUEUE_PROPERTIES, properties,
8953             CL_QUEUE_SIZE, queueSize,
8954             0 };
8955         DeviceCommandQueue deviceQueue(
8956             ::clCreateCommandQueueWithProperties(
8957                 context(), device(), queue_properties, &error));
8958 
8959         detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8960         if (err != NULL) {
8961             *err = error;
8962         }
8963 
8964         return deviceQueue;
8965     }
8966 
8967 
8968 
8969 #if CL_HPP_TARGET_OPENCL_VERSION >= 210
8970     /*!
8971      * Modify the default device command queue to be used for subsequent kernels.
8972      * This can update the default command queue for a device repeatedly to account
8973      * for kernels that rely on the default.
8974      * @return updated default device command queue.
8975      */
8976     static DeviceCommandQueue updateDefault(const Context &context, const Device &device, const DeviceCommandQueue &default_queue, cl_int *err = nullptr)
8977     {
8978         cl_int error;
8979         error = clSetDefaultDeviceCommandQueue(context.get(), device.get(), default_queue.get());
8980 
8981         detail::errHandler(error, __SET_DEFAULT_DEVICE_COMMAND_QUEUE_ERR);
8982         if (err != NULL) {
8983             *err = error;
8984         }
8985         return default_queue;
8986     }
8987 
8988     /*!
8989      * Return the current default command queue for the specified command queue
8990      */
8991     static DeviceCommandQueue getDefault(const CommandQueue &queue, cl_int * err = NULL)
8992     {
8993         return queue.getInfo<CL_QUEUE_DEVICE_DEFAULT>(err);
8994     }
8995 
8996 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
8997 }; // DeviceCommandQueue
8998 
8999 namespace detail
9000 {
9001     // Specialization for device command queue
9002     template <>
9003     struct KernelArgumentHandler<cl::DeviceCommandQueue, void>
9004     {
9005         static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
9006         static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
9007     };
9008 } // namespace detail
9009 
9010 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9011 
9012 
9013 template< typename IteratorType >
9014 Buffer::Buffer(
9015     const Context &context,
9016     IteratorType startIterator,
9017     IteratorType endIterator,
9018     bool readOnly,
9019     bool useHostPtr,
9020     cl_int* err)
9021 {
9022     typedef typename std::iterator_traits<IteratorType>::value_type DataType;
9023     cl_int error;
9024 
9025     cl_mem_flags flags = 0;
9026     if( readOnly ) {
9027         flags |= CL_MEM_READ_ONLY;
9028     }
9029     else {
9030         flags |= CL_MEM_READ_WRITE;
9031     }
9032     if( useHostPtr ) {
9033         flags |= CL_MEM_USE_HOST_PTR;
9034     }
9035 
9036     size_type size = sizeof(DataType)*(endIterator - startIterator);
9037 
9038     if( useHostPtr ) {
9039         object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
9040     } else {
9041         object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
9042     }
9043 
9044     detail::errHandler(error, __CREATE_BUFFER_ERR);
9045     if (err != NULL) {
9046         *err = error;
9047     }
9048 
9049     if( !useHostPtr ) {
9050         CommandQueue queue(context, 0, &error);
9051         detail::errHandler(error, __CREATE_BUFFER_ERR);
9052         if (err != NULL) {
9053             *err = error;
9054         }
9055 
9056         error = cl::copy(queue, startIterator, endIterator, *this);
9057         detail::errHandler(error, __CREATE_BUFFER_ERR);
9058         if (err != NULL) {
9059             *err = error;
9060         }
9061     }
9062 }
9063 
9064 template< typename IteratorType >
9065 Buffer::Buffer(
9066     const CommandQueue &queue,
9067     IteratorType startIterator,
9068     IteratorType endIterator,
9069     bool readOnly,
9070     bool useHostPtr,
9071     cl_int* err)
9072 {
9073     typedef typename std::iterator_traits<IteratorType>::value_type DataType;
9074     cl_int error;
9075 
9076     cl_mem_flags flags = 0;
9077     if (readOnly) {
9078         flags |= CL_MEM_READ_ONLY;
9079     }
9080     else {
9081         flags |= CL_MEM_READ_WRITE;
9082     }
9083     if (useHostPtr) {
9084         flags |= CL_MEM_USE_HOST_PTR;
9085     }
9086 
9087     size_type size = sizeof(DataType)*(endIterator - startIterator);
9088 
9089     Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
9090 
9091     if (useHostPtr) {
9092         object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
9093     }
9094     else {
9095         object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
9096     }
9097 
9098     detail::errHandler(error, __CREATE_BUFFER_ERR);
9099     if (err != NULL) {
9100         *err = error;
9101     }
9102 
9103     if (!useHostPtr) {
9104         error = cl::copy(queue, startIterator, endIterator, *this);
9105         detail::errHandler(error, __CREATE_BUFFER_ERR);
9106         if (err != NULL) {
9107             *err = error;
9108         }
9109     }
9110 }
9111 
9112 inline cl_int enqueueReadBuffer(
9113     const Buffer& buffer,
9114     cl_bool blocking,
9115     size_type offset,
9116     size_type size,
9117     void* ptr,
9118     const vector<Event>* events = NULL,
9119     Event* event = NULL)
9120 {
9121     cl_int error;
9122     CommandQueue queue = CommandQueue::getDefault(&error);
9123 
9124     if (error != CL_SUCCESS) {
9125         return error;
9126     }
9127 
9128     return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
9129 }
9130 
9131 inline cl_int enqueueWriteBuffer(
9132         const Buffer& buffer,
9133         cl_bool blocking,
9134         size_type offset,
9135         size_type size,
9136         const void* ptr,
9137         const vector<Event>* events = NULL,
9138         Event* event = NULL)
9139 {
9140     cl_int error;
9141     CommandQueue queue = CommandQueue::getDefault(&error);
9142 
9143     if (error != CL_SUCCESS) {
9144         return error;
9145     }
9146 
9147     return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
9148 }
9149 
9150 inline void* enqueueMapBuffer(
9151         const Buffer& buffer,
9152         cl_bool blocking,
9153         cl_map_flags flags,
9154         size_type offset,
9155         size_type size,
9156         const vector<Event>* events = NULL,
9157         Event* event = NULL,
9158         cl_int* err = NULL)
9159 {
9160     cl_int error;
9161     CommandQueue queue = CommandQueue::getDefault(&error);
9162     detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9163     if (err != NULL) {
9164         *err = error;
9165     }
9166 
9167     void * result = ::clEnqueueMapBuffer(
9168             queue(), buffer(), blocking, flags, offset, size,
9169             (events != NULL) ? (cl_uint) events->size() : 0,
9170             (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
9171             (cl_event*) event,
9172             &error);
9173 
9174     detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9175     if (err != NULL) {
9176         *err = error;
9177     }
9178     return result;
9179 }
9180 
9181 
9182 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9183 /**
9184  * Enqueues to the default queue a command that will allow the host to
9185  * update a region of a coarse-grained SVM buffer.
9186  * This variant takes a raw SVM pointer.
9187  */
9188 template<typename T>
9189 inline cl_int enqueueMapSVM(
9190     T* ptr,
9191     cl_bool blocking,
9192     cl_map_flags flags,
9193     size_type size,
9194     const vector<Event>* events,
9195     Event* event)
9196 {
9197     cl_int error;
9198     CommandQueue queue = CommandQueue::getDefault(&error);
9199     if (error != CL_SUCCESS) {
9200         return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9201     }
9202 
9203     return queue.enqueueMapSVM(
9204         ptr, blocking, flags, size, events, event);
9205 }
9206 
9207 /**
9208  * Enqueues to the default queue a command that will allow the host to
9209  * update a region of a coarse-grained SVM buffer.
9210  * This variant takes a cl::pointer instance.
9211  */
9212 template<typename T, class D>
9213 inline cl_int enqueueMapSVM(
9214     cl::pointer<T, D> ptr,
9215     cl_bool blocking,
9216     cl_map_flags flags,
9217     size_type size,
9218     const vector<Event>* events = NULL,
9219     Event* event = NULL)
9220 {
9221     cl_int error;
9222     CommandQueue queue = CommandQueue::getDefault(&error);
9223     if (error != CL_SUCCESS) {
9224         return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9225     }
9226 
9227     return queue.enqueueMapSVM(
9228         ptr, blocking, flags, size, events, event);
9229 }
9230 
9231 /**
9232  * Enqueues to the default queue a command that will allow the host to
9233  * update a region of a coarse-grained SVM buffer.
9234  * This variant takes a cl::vector instance.
9235  */
9236 template<typename T, class Alloc>
9237 inline cl_int enqueueMapSVM(
9238     cl::vector<T, Alloc> container,
9239     cl_bool blocking,
9240     cl_map_flags flags,
9241     const vector<Event>* events = NULL,
9242     Event* event = NULL)
9243 {
9244     cl_int error;
9245     CommandQueue queue = CommandQueue::getDefault(&error);
9246     if (error != CL_SUCCESS) {
9247         return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9248     }
9249 
9250     return queue.enqueueMapSVM(
9251         container, blocking, flags, events, event);
9252 }
9253 
9254 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9255 
9256 inline cl_int enqueueUnmapMemObject(
9257     const Memory& memory,
9258     void* mapped_ptr,
9259     const vector<Event>* events = NULL,
9260     Event* event = NULL)
9261 {
9262     cl_int error;
9263     CommandQueue queue = CommandQueue::getDefault(&error);
9264     detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
9265     if (error != CL_SUCCESS) {
9266         return error;
9267     }
9268 
9269     cl_event tmp;
9270     cl_int err = detail::errHandler(
9271         ::clEnqueueUnmapMemObject(
9272         queue(), memory(), mapped_ptr,
9273         (events != NULL) ? (cl_uint)events->size() : 0,
9274         (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
9275         (event != NULL) ? &tmp : NULL),
9276         __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9277 
9278     if (event != NULL && err == CL_SUCCESS)
9279         *event = tmp;
9280 
9281     return err;
9282 }
9283 
9284 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9285 /**
9286  * Enqueues to the default queue a command that will release a coarse-grained
9287  * SVM buffer back to the OpenCL runtime.
9288  * This variant takes a raw SVM pointer.
9289  */
9290 template<typename T>
9291 inline cl_int enqueueUnmapSVM(
9292     T* ptr,
9293     const vector<Event>* events = NULL,
9294     Event* event = NULL)
9295 {
9296     cl_int error;
9297     CommandQueue queue = CommandQueue::getDefault(&error);
9298     if (error != CL_SUCCESS) {
9299         return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9300     }
9301 
9302     return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
9303         __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9304 
9305 }
9306 
9307 /**
9308  * Enqueues to the default queue a command that will release a coarse-grained
9309  * SVM buffer back to the OpenCL runtime.
9310  * This variant takes a cl::pointer instance.
9311  */
9312 template<typename T, class D>
9313 inline cl_int enqueueUnmapSVM(
9314     cl::pointer<T, D> &ptr,
9315     const vector<Event>* events = NULL,
9316     Event* event = NULL)
9317 {
9318     cl_int error;
9319     CommandQueue queue = CommandQueue::getDefault(&error);
9320     if (error != CL_SUCCESS) {
9321         return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9322     }
9323 
9324     return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
9325         __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9326 }
9327 
9328 /**
9329  * Enqueues to the default queue a command that will release a coarse-grained
9330  * SVM buffer back to the OpenCL runtime.
9331  * This variant takes a cl::vector instance.
9332  */
9333 template<typename T, class Alloc>
9334 inline cl_int enqueueUnmapSVM(
9335     cl::vector<T, Alloc> &container,
9336     const vector<Event>* events = NULL,
9337     Event* event = NULL)
9338 {
9339     cl_int error;
9340     CommandQueue queue = CommandQueue::getDefault(&error);
9341     if (error != CL_SUCCESS) {
9342         return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9343     }
9344 
9345     return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
9346         __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9347 }
9348 
9349 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9350 
9351 inline cl_int enqueueCopyBuffer(
9352         const Buffer& src,
9353         const Buffer& dst,
9354         size_type src_offset,
9355         size_type dst_offset,
9356         size_type size,
9357         const vector<Event>* events = NULL,
9358         Event* event = NULL)
9359 {
9360     cl_int error;
9361     CommandQueue queue = CommandQueue::getDefault(&error);
9362 
9363     if (error != CL_SUCCESS) {
9364         return error;
9365     }
9366 
9367     return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
9368 }
9369 
9370 /**
9371  * Blocking copy operation between iterators and a buffer.
9372  * Host to Device.
9373  * Uses default command queue.
9374  */
9375 template< typename IteratorType >
9376 inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
9377 {
9378     cl_int error;
9379     CommandQueue queue = CommandQueue::getDefault(&error);
9380     if (error != CL_SUCCESS)
9381         return error;
9382 
9383     return cl::copy(queue, startIterator, endIterator, buffer);
9384 }
9385 
9386 /**
9387  * Blocking copy operation between iterators and a buffer.
9388  * Device to Host.
9389  * Uses default command queue.
9390  */
9391 template< typename IteratorType >
9392 inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
9393 {
9394     cl_int error;
9395     CommandQueue queue = CommandQueue::getDefault(&error);
9396     if (error != CL_SUCCESS)
9397         return error;
9398 
9399     return cl::copy(queue, buffer, startIterator, endIterator);
9400 }
9401 
9402 /**
9403  * Blocking copy operation between iterators and a buffer.
9404  * Host to Device.
9405  * Uses specified queue.
9406  */
9407 template< typename IteratorType >
9408 inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
9409 {
9410     typedef typename std::iterator_traits<IteratorType>::value_type DataType;
9411     cl_int error;
9412 
9413     size_type length = endIterator-startIterator;
9414     size_type byteLength = length*sizeof(DataType);
9415 
9416     DataType *pointer =
9417         static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
9418     // if exceptions enabled, enqueueMapBuffer will throw
9419     if( error != CL_SUCCESS ) {
9420         return error;
9421     }
9422 #if defined(_MSC_VER)
9423     std::copy(
9424         startIterator,
9425         endIterator,
9426         stdext::checked_array_iterator<DataType*>(
9427             pointer, length));
9428 #else
9429     std::copy(startIterator, endIterator, pointer);
9430 #endif
9431     Event endEvent;
9432     error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
9433     // if exceptions enabled, enqueueUnmapMemObject will throw
9434     if( error != CL_SUCCESS ) {
9435         return error;
9436     }
9437     endEvent.wait();
9438     return CL_SUCCESS;
9439 }
9440 
9441 /**
9442  * Blocking copy operation between iterators and a buffer.
9443  * Device to Host.
9444  * Uses specified queue.
9445  */
9446 template< typename IteratorType >
9447 inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
9448 {
9449     typedef typename std::iterator_traits<IteratorType>::value_type DataType;
9450     cl_int error;
9451 
9452     size_type length = endIterator-startIterator;
9453     size_type byteLength = length*sizeof(DataType);
9454 
9455     DataType *pointer =
9456         static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
9457     // if exceptions enabled, enqueueMapBuffer will throw
9458     if( error != CL_SUCCESS ) {
9459         return error;
9460     }
9461     std::copy(pointer, pointer + length, startIterator);
9462     Event endEvent;
9463     error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
9464     // if exceptions enabled, enqueueUnmapMemObject will throw
9465     if( error != CL_SUCCESS ) {
9466         return error;
9467     }
9468     endEvent.wait();
9469     return CL_SUCCESS;
9470 }
9471 
9472 
9473 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9474 /**
9475  * Blocking SVM map operation - performs a blocking map underneath.
9476  */
9477 template<typename T, class Alloc>
9478 inline cl_int mapSVM(cl::vector<T, Alloc> &container)
9479 {
9480     return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
9481 }
9482 
9483 /**
9484 * Blocking SVM map operation - performs a blocking map underneath.
9485 */
9486 template<typename T, class Alloc>
9487 inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
9488 {
9489     return enqueueUnmapSVM(container);
9490 }
9491 
9492 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9493 
9494 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
9495 inline cl_int enqueueReadBufferRect(
9496     const Buffer& buffer,
9497     cl_bool blocking,
9498     const array<size_type, 3>& buffer_offset,
9499     const array<size_type, 3>& host_offset,
9500     const array<size_type, 3>& region,
9501     size_type buffer_row_pitch,
9502     size_type buffer_slice_pitch,
9503     size_type host_row_pitch,
9504     size_type host_slice_pitch,
9505     void *ptr,
9506     const vector<Event>* events = NULL,
9507     Event* event = NULL)
9508 {
9509     cl_int error;
9510     CommandQueue queue = CommandQueue::getDefault(&error);
9511 
9512     if (error != CL_SUCCESS) {
9513         return error;
9514     }
9515 
9516     return queue.enqueueReadBufferRect(
9517         buffer,
9518         blocking,
9519         buffer_offset,
9520         host_offset,
9521         region,
9522         buffer_row_pitch,
9523         buffer_slice_pitch,
9524         host_row_pitch,
9525         host_slice_pitch,
9526         ptr,
9527         events,
9528         event);
9529 }
9530 
9531 inline cl_int enqueueWriteBufferRect(
9532     const Buffer& buffer,
9533     cl_bool blocking,
9534     const array<size_type, 3>& buffer_offset,
9535     const array<size_type, 3>& host_offset,
9536     const array<size_type, 3>& region,
9537     size_type buffer_row_pitch,
9538     size_type buffer_slice_pitch,
9539     size_type host_row_pitch,
9540     size_type host_slice_pitch,
9541     const void *ptr,
9542     const vector<Event>* events = NULL,
9543     Event* event = NULL)
9544 {
9545     cl_int error;
9546     CommandQueue queue = CommandQueue::getDefault(&error);
9547 
9548     if (error != CL_SUCCESS) {
9549         return error;
9550     }
9551 
9552     return queue.enqueueWriteBufferRect(
9553         buffer,
9554         blocking,
9555         buffer_offset,
9556         host_offset,
9557         region,
9558         buffer_row_pitch,
9559         buffer_slice_pitch,
9560         host_row_pitch,
9561         host_slice_pitch,
9562         ptr,
9563         events,
9564         event);
9565 }
9566 
9567 inline cl_int enqueueCopyBufferRect(
9568     const Buffer& src,
9569     const Buffer& dst,
9570     const array<size_type, 3>& src_origin,
9571     const array<size_type, 3>& dst_origin,
9572     const array<size_type, 3>& region,
9573     size_type src_row_pitch,
9574     size_type src_slice_pitch,
9575     size_type dst_row_pitch,
9576     size_type dst_slice_pitch,
9577     const vector<Event>* events = NULL,
9578     Event* event = NULL)
9579 {
9580     cl_int error;
9581     CommandQueue queue = CommandQueue::getDefault(&error);
9582 
9583     if (error != CL_SUCCESS) {
9584         return error;
9585     }
9586 
9587     return queue.enqueueCopyBufferRect(
9588         src,
9589         dst,
9590         src_origin,
9591         dst_origin,
9592         region,
9593         src_row_pitch,
9594         src_slice_pitch,
9595         dst_row_pitch,
9596         dst_slice_pitch,
9597         events,
9598         event);
9599 }
9600 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
9601 
9602 inline cl_int enqueueReadImage(
9603     const Image& image,
9604     cl_bool blocking,
9605     const array<size_type, 3>& origin,
9606     const array<size_type, 3>& region,
9607     size_type row_pitch,
9608     size_type slice_pitch,
9609     void* ptr,
9610     const vector<Event>* events = NULL,
9611     Event* event = NULL)
9612 {
9613     cl_int error;
9614     CommandQueue queue = CommandQueue::getDefault(&error);
9615 
9616     if (error != CL_SUCCESS) {
9617         return error;
9618     }
9619 
9620     return queue.enqueueReadImage(
9621         image,
9622         blocking,
9623         origin,
9624         region,
9625         row_pitch,
9626         slice_pitch,
9627         ptr,
9628         events,
9629         event);
9630 }
9631 
9632 inline cl_int enqueueWriteImage(
9633     const Image& image,
9634     cl_bool blocking,
9635     const array<size_type, 3>& origin,
9636     const array<size_type, 3>& region,
9637     size_type row_pitch,
9638     size_type slice_pitch,
9639     const void* ptr,
9640     const vector<Event>* events = NULL,
9641     Event* event = NULL)
9642 {
9643     cl_int error;
9644     CommandQueue queue = CommandQueue::getDefault(&error);
9645 
9646     if (error != CL_SUCCESS) {
9647         return error;
9648     }
9649 
9650     return queue.enqueueWriteImage(
9651         image,
9652         blocking,
9653         origin,
9654         region,
9655         row_pitch,
9656         slice_pitch,
9657         ptr,
9658         events,
9659         event);
9660 }
9661 
9662 inline cl_int enqueueCopyImage(
9663     const Image& src,
9664     const Image& dst,
9665     const array<size_type, 3>& src_origin,
9666     const array<size_type, 3>& dst_origin,
9667     const array<size_type, 3>& region,
9668     const vector<Event>* events = NULL,
9669     Event* event = NULL)
9670 {
9671     cl_int error;
9672     CommandQueue queue = CommandQueue::getDefault(&error);
9673 
9674     if (error != CL_SUCCESS) {
9675         return error;
9676     }
9677 
9678     return queue.enqueueCopyImage(
9679         src,
9680         dst,
9681         src_origin,
9682         dst_origin,
9683         region,
9684         events,
9685         event);
9686 }
9687 
9688 inline cl_int enqueueCopyImageToBuffer(
9689     const Image& src,
9690     const Buffer& dst,
9691     const array<size_type, 3>& src_origin,
9692     const array<size_type, 3>& region,
9693     size_type dst_offset,
9694     const vector<Event>* events = NULL,
9695     Event* event = NULL)
9696 {
9697     cl_int error;
9698     CommandQueue queue = CommandQueue::getDefault(&error);
9699 
9700     if (error != CL_SUCCESS) {
9701         return error;
9702     }
9703 
9704     return queue.enqueueCopyImageToBuffer(
9705         src,
9706         dst,
9707         src_origin,
9708         region,
9709         dst_offset,
9710         events,
9711         event);
9712 }
9713 
9714 inline cl_int enqueueCopyBufferToImage(
9715     const Buffer& src,
9716     const Image& dst,
9717     size_type src_offset,
9718     const array<size_type, 3>& dst_origin,
9719     const array<size_type, 3>& region,
9720     const vector<Event>* events = NULL,
9721     Event* event = NULL)
9722 {
9723     cl_int error;
9724     CommandQueue queue = CommandQueue::getDefault(&error);
9725 
9726     if (error != CL_SUCCESS) {
9727         return error;
9728     }
9729 
9730     return queue.enqueueCopyBufferToImage(
9731         src,
9732         dst,
9733         src_offset,
9734         dst_origin,
9735         region,
9736         events,
9737         event);
9738 }
9739 
9740 
9741 inline cl_int flush(void)
9742 {
9743     cl_int error;
9744     CommandQueue queue = CommandQueue::getDefault(&error);
9745 
9746     if (error != CL_SUCCESS) {
9747         return error;
9748     }
9749 
9750     return queue.flush();
9751 }
9752 
9753 inline cl_int finish(void)
9754 {
9755     cl_int error;
9756     CommandQueue queue = CommandQueue::getDefault(&error);
9757 
9758     if (error != CL_SUCCESS) {
9759         return error;
9760     }
9761 
9762 
9763     return queue.finish();
9764 }
9765 
9766 class EnqueueArgs
9767 {
9768 private:
9769     CommandQueue queue_;
9770     const NDRange offset_;
9771     const NDRange global_;
9772     const NDRange local_;
9773     vector<Event> events_;
9774 
9775     template<typename... Ts>
9776     friend class KernelFunctor;
9777 
9778 public:
9779     EnqueueArgs(NDRange global) :
9780       queue_(CommandQueue::getDefault()),
9781       offset_(NullRange),
9782       global_(global),
9783       local_(NullRange)
9784     {
9785 
9786     }
9787 
9788     EnqueueArgs(NDRange global, NDRange local) :
9789       queue_(CommandQueue::getDefault()),
9790       offset_(NullRange),
9791       global_(global),
9792       local_(local)
9793     {
9794 
9795     }
9796 
9797     EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
9798       queue_(CommandQueue::getDefault()),
9799       offset_(offset),
9800       global_(global),
9801       local_(local)
9802     {
9803 
9804     }
9805 
9806     EnqueueArgs(Event e, NDRange global) :
9807       queue_(CommandQueue::getDefault()),
9808       offset_(NullRange),
9809       global_(global),
9810       local_(NullRange)
9811     {
9812         events_.push_back(e);
9813     }
9814 
9815     EnqueueArgs(Event e, NDRange global, NDRange local) :
9816       queue_(CommandQueue::getDefault()),
9817       offset_(NullRange),
9818       global_(global),
9819       local_(local)
9820     {
9821         events_.push_back(e);
9822     }
9823 
9824     EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
9825       queue_(CommandQueue::getDefault()),
9826       offset_(offset),
9827       global_(global),
9828       local_(local)
9829     {
9830         events_.push_back(e);
9831     }
9832 
9833     EnqueueArgs(const vector<Event> &events, NDRange global) :
9834       queue_(CommandQueue::getDefault()),
9835       offset_(NullRange),
9836       global_(global),
9837       local_(NullRange),
9838       events_(events)
9839     {
9840 
9841     }
9842 
9843     EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
9844       queue_(CommandQueue::getDefault()),
9845       offset_(NullRange),
9846       global_(global),
9847       local_(local),
9848       events_(events)
9849     {
9850 
9851     }
9852 
9853     EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9854       queue_(CommandQueue::getDefault()),
9855       offset_(offset),
9856       global_(global),
9857       local_(local),
9858       events_(events)
9859     {
9860 
9861     }
9862 
9863     EnqueueArgs(CommandQueue &queue, NDRange global) :
9864       queue_(queue),
9865       offset_(NullRange),
9866       global_(global),
9867       local_(NullRange)
9868     {
9869 
9870     }
9871 
9872     EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
9873       queue_(queue),
9874       offset_(NullRange),
9875       global_(global),
9876       local_(local)
9877     {
9878 
9879     }
9880 
9881     EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
9882       queue_(queue),
9883       offset_(offset),
9884       global_(global),
9885       local_(local)
9886     {
9887 
9888     }
9889 
9890     EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
9891       queue_(queue),
9892       offset_(NullRange),
9893       global_(global),
9894       local_(NullRange)
9895     {
9896         events_.push_back(e);
9897     }
9898 
9899     EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
9900       queue_(queue),
9901       offset_(NullRange),
9902       global_(global),
9903       local_(local)
9904     {
9905         events_.push_back(e);
9906     }
9907 
9908     EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
9909       queue_(queue),
9910       offset_(offset),
9911       global_(global),
9912       local_(local)
9913     {
9914         events_.push_back(e);
9915     }
9916 
9917     EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
9918       queue_(queue),
9919       offset_(NullRange),
9920       global_(global),
9921       local_(NullRange),
9922       events_(events)
9923     {
9924 
9925     }
9926 
9927     EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
9928       queue_(queue),
9929       offset_(NullRange),
9930       global_(global),
9931       local_(local),
9932       events_(events)
9933     {
9934 
9935     }
9936 
9937     EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9938       queue_(queue),
9939       offset_(offset),
9940       global_(global),
9941       local_(local),
9942       events_(events)
9943     {
9944 
9945     }
9946 };
9947 
9948 
9949 //----------------------------------------------------------------------------------------------
9950 
9951 
9952 /**
9953  * Type safe kernel functor.
9954  *
9955  */
9956 template<typename... Ts>
9957 class KernelFunctor
9958 {
9959 private:
9960     Kernel kernel_;
9961 
9962     template<int index, typename T0, typename... T1s>
9963     void setArgs(T0&& t0, T1s&&... t1s)
9964     {
9965         kernel_.setArg(index, t0);
9966         setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
9967     }
9968 
9969     template<int index, typename T0>
9970     void setArgs(T0&& t0)
9971     {
9972         kernel_.setArg(index, t0);
9973     }
9974 
9975     template<int index>
9976     void setArgs()
9977     {
9978     }
9979 
9980 
9981 public:
9982     KernelFunctor(Kernel kernel) : kernel_(kernel)
9983     {}
9984 
9985     KernelFunctor(
9986         const Program& program,
9987         const string name,
9988         cl_int * err = NULL) :
9989         kernel_(program, name.c_str(), err)
9990     {}
9991 
9992     //! \brief Return type of the functor
9993     typedef Event result_type;
9994 
9995     /**
9996      * Enqueue kernel.
9997      * @param args Launch parameters of the kernel.
9998      * @param t0... List of kernel arguments based on the template type of the functor.
9999      */
10000     Event operator() (
10001         const EnqueueArgs& args,
10002         Ts... ts)
10003     {
10004         Event event;
10005         setArgs<0>(std::forward<Ts>(ts)...);
10006 
10007         args.queue_.enqueueNDRangeKernel(
10008             kernel_,
10009             args.offset_,
10010             args.global_,
10011             args.local_,
10012             &args.events_,
10013             &event);
10014 
10015         return event;
10016     }
10017 
10018     /**
10019     * Enqueue kernel with support for error code.
10020     * @param args Launch parameters of the kernel.
10021     * @param t0... List of kernel arguments based on the template type of the functor.
10022     * @param error Out parameter returning the error code from the execution.
10023     */
10024     Event operator() (
10025         const EnqueueArgs& args,
10026         Ts... ts,
10027         cl_int &error)
10028     {
10029         Event event;
10030         setArgs<0>(std::forward<Ts>(ts)...);
10031 
10032         error = args.queue_.enqueueNDRangeKernel(
10033             kernel_,
10034             args.offset_,
10035             args.global_,
10036             args.local_,
10037             &args.events_,
10038             &event);
10039 
10040         return event;
10041     }
10042 
10043 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
10044     cl_int setSVMPointers(const vector<void*> &pointerList)
10045     {
10046         return kernel_.setSVMPointers(pointerList);
10047     }
10048 
10049     template<typename T0, typename... T1s>
10050     cl_int setSVMPointers(const T0 &t0, T1s &... ts)
10051     {
10052         return kernel_.setSVMPointers(t0, ts...);
10053     }
10054 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
10055 
10056     Kernel getKernel()
10057     {
10058         return kernel_;
10059     }
10060 };
10061 
10062 namespace compatibility {
10063     /**
10064      * Backward compatibility class to ensure that cl.hpp code works with cl2.hpp.
10065      * Please use KernelFunctor directly.
10066      */
10067     template<typename... Ts>
10068     struct make_kernel
10069     {
10070         typedef KernelFunctor<Ts...> FunctorType;
10071 
10072         FunctorType functor_;
10073 
10074         make_kernel(
10075             const Program& program,
10076             const string name,
10077             cl_int * err = NULL) :
10078             functor_(FunctorType(program, name, err))
10079         {}
10080 
10081         make_kernel(
10082             const Kernel kernel) :
10083             functor_(FunctorType(kernel))
10084         {}
10085 
10086         //! \brief Return type of the functor
10087         typedef Event result_type;
10088 
10089         //! \brief Function signature of kernel functor with no event dependency.
10090         typedef Event type_(
10091             const EnqueueArgs&,
10092             Ts...);
10093 
10094         Event operator()(
10095             const EnqueueArgs& enqueueArgs,
10096             Ts... args)
10097         {
10098             return functor_(
10099                 enqueueArgs, args...);
10100         }
10101     };
10102 } // namespace compatibility
10103 
10104 
10105 //----------------------------------------------------------------------------------------------------------------------
10106 
10107 #undef CL_HPP_ERR_STR_
10108 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
10109 #undef __GET_DEVICE_INFO_ERR
10110 #undef __GET_PLATFORM_INFO_ERR
10111 #undef __GET_DEVICE_IDS_ERR
10112 #undef __GET_PLATFORM_IDS_ERR
10113 #undef __GET_CONTEXT_INFO_ERR
10114 #undef __GET_EVENT_INFO_ERR
10115 #undef __GET_EVENT_PROFILE_INFO_ERR
10116 #undef __GET_MEM_OBJECT_INFO_ERR
10117 #undef __GET_IMAGE_INFO_ERR
10118 #undef __GET_SAMPLER_INFO_ERR
10119 #undef __GET_KERNEL_INFO_ERR
10120 #undef __GET_KERNEL_ARG_INFO_ERR
10121 #undef __GET_KERNEL_SUB_GROUP_INFO_ERR
10122 #undef __GET_KERNEL_WORK_GROUP_INFO_ERR
10123 #undef __GET_PROGRAM_INFO_ERR
10124 #undef __GET_PROGRAM_BUILD_INFO_ERR
10125 #undef __GET_COMMAND_QUEUE_INFO_ERR
10126 #undef __CREATE_CONTEXT_ERR
10127 #undef __CREATE_CONTEXT_FROM_TYPE_ERR
10128 #undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
10129 #undef __CREATE_BUFFER_ERR
10130 #undef __COPY_ERR
10131 #undef __CREATE_SUBBUFFER_ERR
10132 #undef __CREATE_GL_BUFFER_ERR
10133 #undef __CREATE_GL_RENDER_BUFFER_ERR
10134 #undef __GET_GL_OBJECT_INFO_ERR
10135 #undef __CREATE_IMAGE_ERR
10136 #undef __CREATE_GL_TEXTURE_ERR
10137 #undef __IMAGE_DIMENSION_ERR
10138 #undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
10139 #undef __CREATE_USER_EVENT_ERR
10140 #undef __SET_USER_EVENT_STATUS_ERR
10141 #undef __SET_EVENT_CALLBACK_ERR
10142 #undef __WAIT_FOR_EVENTS_ERR
10143 #undef __CREATE_KERNEL_ERR
10144 #undef __SET_KERNEL_ARGS_ERR
10145 #undef __CREATE_PROGRAM_WITH_SOURCE_ERR
10146 #undef __CREATE_PROGRAM_WITH_IL_ERR
10147 #undef __CREATE_PROGRAM_WITH_BINARY_ERR
10148 #undef __CREATE_PROGRAM_WITH_IL_ERR
10149 #undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
10150 #undef __BUILD_PROGRAM_ERR
10151 #undef __COMPILE_PROGRAM_ERR
10152 #undef __LINK_PROGRAM_ERR
10153 #undef __CREATE_KERNELS_IN_PROGRAM_ERR
10154 #undef __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR
10155 #undef __CREATE_SAMPLER_WITH_PROPERTIES_ERR
10156 #undef __SET_COMMAND_QUEUE_PROPERTY_ERR
10157 #undef __ENQUEUE_READ_BUFFER_ERR
10158 #undef __ENQUEUE_READ_BUFFER_RECT_ERR
10159 #undef __ENQUEUE_WRITE_BUFFER_ERR
10160 #undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
10161 #undef __ENQEUE_COPY_BUFFER_ERR
10162 #undef __ENQEUE_COPY_BUFFER_RECT_ERR
10163 #undef __ENQUEUE_FILL_BUFFER_ERR
10164 #undef __ENQUEUE_READ_IMAGE_ERR
10165 #undef __ENQUEUE_WRITE_IMAGE_ERR
10166 #undef __ENQUEUE_COPY_IMAGE_ERR
10167 #undef __ENQUEUE_FILL_IMAGE_ERR
10168 #undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
10169 #undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
10170 #undef __ENQUEUE_MAP_BUFFER_ERR
10171 #undef __ENQUEUE_MAP_IMAGE_ERR
10172 #undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
10173 #undef __ENQUEUE_NDRANGE_KERNEL_ERR
10174 #undef __ENQUEUE_NATIVE_KERNEL
10175 #undef __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR
10176 #undef __ENQUEUE_MIGRATE_SVM_ERR
10177 #undef __ENQUEUE_ACQUIRE_GL_ERR
10178 #undef __ENQUEUE_RELEASE_GL_ERR
10179 #undef __CREATE_PIPE_ERR
10180 #undef __GET_PIPE_INFO_ERR
10181 #undef __RETAIN_ERR
10182 #undef __RELEASE_ERR
10183 #undef __FLUSH_ERR
10184 #undef __FINISH_ERR
10185 #undef __VECTOR_CAPACITY_ERR
10186 #undef __CREATE_SUB_DEVICES_ERR
10187 #undef __CREATE_SUB_DEVICES_ERR
10188 #undef __ENQUEUE_MARKER_ERR
10189 #undef __ENQUEUE_WAIT_FOR_EVENTS_ERR
10190 #undef __ENQUEUE_BARRIER_ERR
10191 #undef __UNLOAD_COMPILER_ERR
10192 #undef __CREATE_GL_TEXTURE_2D_ERR
10193 #undef __CREATE_GL_TEXTURE_3D_ERR
10194 #undef __CREATE_IMAGE2D_ERR
10195 #undef __CREATE_IMAGE3D_ERR
10196 #undef __CREATE_COMMAND_QUEUE_ERR
10197 #undef __ENQUEUE_TASK_ERR
10198 #undef __CREATE_SAMPLER_ERR
10199 #undef __ENQUEUE_MARKER_WAIT_LIST_ERR
10200 #undef __ENQUEUE_BARRIER_WAIT_LIST_ERR
10201 #undef __CLONE_KERNEL_ERR
10202 #undef __GET_HOST_TIMER_ERR
10203 #undef __GET_DEVICE_AND_HOST_TIMER_ERR
10204 
10205 #endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
10206 
10207 // Extensions
10208 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_
10209 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
10210 
10211 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
10212 #undef CL_HPP_PARAM_NAME_DEVICE_FISSION_
10213 #endif // CL_HPP_USE_CL_DEVICE_FISSION
10214 
10215 #undef CL_HPP_NOEXCEPT_
10216 #undef CL_HPP_DEFINE_STATIC_MEMBER_
10217 
10218 } // namespace cl
10219 
10220 #endif // CL_HPP_
10221