OpenCL C++ Bindings
cl2.hpp
Go to the documentation of this file.
1 /*******************************************************************************
2  * Copyright (c) 2008-2016 The Khronos Group Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and/or associated documentation files (the
6  * "Materials"), to deal in the Materials without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Materials, and to
9  * permit persons to whom the Materials are furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Materials.
14  *
15  * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS
16  * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS
17  * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT
18  * https://www.khronos.org/registry/
19  *
20  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
27  ******************************************************************************/
28 
316 
329 
393 #ifndef CL_HPP_
394 #define CL_HPP_
395 
396 /* Handle deprecated preprocessor definitions. In each case, we only check for
397  * the old name if the new name is not defined, so that user code can define
398  * both and hence work with either version of the bindings.
399  */
400 #if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
401 # pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
402 # define CL_HPP_USE_DX_INTEROP
403 #endif
404 #if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION)
405 # pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead")
406 # define CL_HPP_USE_CL_DEVICE_FISSION
407 #endif
408 #if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
409 # pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
410 # define CL_HPP_ENABLE_EXCEPTIONS
411 #endif
412 #if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
413 # pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
414 # define CL_HPP_NO_STD_VECTOR
415 #endif
416 #if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
417 # pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
418 # define CL_HPP_NO_STD_STRING
419 #endif
420 #if defined(VECTOR_CLASS)
421 # pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
422 #endif
423 #if defined(STRING_CLASS)
424 # pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
425 #endif
426 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
427 # pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
428 # define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
429 #endif
430 
431 /* Warn about features that are no longer supported
432  */
433 #if defined(__USE_DEV_VECTOR)
434 # pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
435 #endif
436 #if defined(__USE_DEV_STRING)
437 # pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
438 #endif
439 
440 /* Detect which version to target */
441 #if !defined(CL_HPP_TARGET_OPENCL_VERSION)
442 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 200 (OpenCL 2.0)")
443 # define CL_HPP_TARGET_OPENCL_VERSION 200
444 #endif
445 #if CL_HPP_TARGET_OPENCL_VERSION != 100 && CL_HPP_TARGET_OPENCL_VERSION != 110 && CL_HPP_TARGET_OPENCL_VERSION != 120 && CL_HPP_TARGET_OPENCL_VERSION != 200
446 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 200")
447 # undef CL_HPP_TARGET_OPENCL_VERSION
448 # define CL_HPP_TARGET_OPENCL_VERSION 200
449 #endif
450 /* Forward target OpenCL version to C headers */
451 #define CL_TARGET_OPENCL_VERSION CL_HPP_TARGET_OPENCL_VERSION
452 
453 #if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
454 # define CL_HPP_MINIMUM_OPENCL_VERSION 200
455 #endif
456 #if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && CL_HPP_MINIMUM_OPENCL_VERSION != 110 && CL_HPP_MINIMUM_OPENCL_VERSION != 120 && CL_HPP_MINIMUM_OPENCL_VERSION != 200
457 # pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 100")
458 # undef CL_HPP_MINIMUM_OPENCL_VERSION
459 # define CL_HPP_MINIMUM_OPENCL_VERSION 100
460 #endif
461 #if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
462 # error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
463 #endif
464 
465 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
466 # define CL_USE_DEPRECATED_OPENCL_1_0_APIS
467 #endif
468 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
469 # define CL_USE_DEPRECATED_OPENCL_1_1_APIS
470 #endif
471 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
472 # define CL_USE_DEPRECATED_OPENCL_1_2_APIS
473 #endif
474 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
475 # define CL_USE_DEPRECATED_OPENCL_2_0_APIS
476 #endif
477 
478 #ifdef _WIN32
479 
480 #include <malloc.h>
481 
482 #if defined(CL_HPP_USE_DX_INTEROP)
483 #include <CL/cl_d3d10.h>
484 #include <CL/cl_dx9_media_sharing.h>
485 #endif
486 #endif // _WIN32
487 
488 #if defined(_MSC_VER)
489 #include <intrin.h>
490 #endif // _MSC_VER
491 
492  // Check for a valid C++ version
493 
494 // Need to do both tests here because for some reason __cplusplus is not
495 // updated in visual studio
496 #if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
497 #error Visual studio 2013 or another C++11-supporting compiler required
498 #endif
499 
500 //
501 #if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
502 #include <CL/cl_ext.h>
503 #endif
504 
505 #if defined(__APPLE__) || defined(__MACOSX)
506 #include <OpenCL/opencl.h>
507 #else
508 #include <CL/opencl.h>
509 #endif // !__APPLE__
510 
511 #if (__cplusplus >= 201103L)
512 #define CL_HPP_NOEXCEPT_ noexcept
513 #else
514 #define CL_HPP_NOEXCEPT_
515 #endif
516 
517 #if defined(_MSC_VER)
518 # define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
519 #else
520 # define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
521 #endif // !_MSC_VER
522 
523 // Define deprecated prefixes and suffixes to ensure compilation
524 // in case they are not pre-defined
525 #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
526 #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
527 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
528 #if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
529 #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
530 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
531 
532 #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
533 #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED
534 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
535 #if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED)
536 #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
537 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
538 
539 #if !defined(CL_CALLBACK)
540 #define CL_CALLBACK
541 #endif //CL_CALLBACK
542 
543 #include <utility>
544 #include <limits>
545 #include <iterator>
546 #include <mutex>
547 #include <cstring>
548 #include <functional>
549 
550 
551 // Define a size_type to represent a correctly resolved size_t
552 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
553 namespace cl {
554  using size_type = ::size_t;
555 } // namespace cl
556 #else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
557 namespace cl {
558  using size_type = size_t;
559 } // namespace cl
560 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
561 
562 
563 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
564 #include <exception>
565 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
566 
567 #if !defined(CL_HPP_NO_STD_VECTOR)
568 #include <vector>
569 namespace cl {
570  template < class T, class Alloc = std::allocator<T> >
571  using vector = std::vector<T, Alloc>;
572 } // namespace cl
573 #endif // #if !defined(CL_HPP_NO_STD_VECTOR)
574 
575 #if !defined(CL_HPP_NO_STD_STRING)
576 #include <string>
577 namespace cl {
578  using string = std::string;
579 } // namespace cl
580 #endif // #if !defined(CL_HPP_NO_STD_STRING)
581 
582 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
583 
584 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
585 #include <memory>
586 namespace cl {
587  // Replace unique_ptr and allocate_pointer for internal use
588  // to allow user to replace them
589  template<class T, class D>
590  using pointer = std::unique_ptr<T, D>;
591 } // namespace cl
592 #endif
593 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
594 #if !defined(CL_HPP_NO_STD_ARRAY)
595 #include <array>
596 namespace cl {
597  template < class T, size_type N >
598  using array = std::array<T, N>;
599 } // namespace cl
600 #endif // #if !defined(CL_HPP_NO_STD_ARRAY)
601 
602 // Define size_type appropriately to allow backward-compatibility
603 // use of the old size_t interface class
604 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
605 namespace cl {
606  namespace compatibility {
611  template <int N>
612  class size_t
613  {
614  private:
615  size_type data_[N];
616 
617  public:
619  size_t()
620  {
621  for (int i = 0; i < N; ++i) {
622  data_[i] = 0;
623  }
624  }
625 
626  size_t(const array<size_type, N> &rhs)
627  {
628  for (int i = 0; i < N; ++i) {
629  data_[i] = rhs[i];
630  }
631  }
632 
633  size_type& operator[](int index)
634  {
635  return data_[index];
636  }
637 
638  const size_type& operator[](int index) const
639  {
640  return data_[index];
641  }
642 
644  operator size_type* () { return data_; }
645 
647  operator const size_type* () const { return data_; }
648 
649  operator array<size_type, N>() const
650  {
651  array<size_type, N> ret;
652 
653  for (int i = 0; i < N; ++i) {
654  ret[i] = data_[i];
655  }
656  return ret;
657  }
658  };
659  } // namespace compatibility
660 
661  template<int N>
662  using size_t = compatibility::size_t<N>;
663 } // namespace cl
664 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
665 
666 // Helper alias to avoid confusing the macros
667 namespace cl {
668  namespace detail {
669  using size_t_array = array<size_type, 3>;
670  } // namespace detail
671 } // namespace cl
672 
673 
679 namespace cl {
680  class Memory;
681 
682 #define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
683  if (!pfn_##name) { \
684  pfn_##name = (PFN_##name) \
685  clGetExtensionFunctionAddress(#name); \
686  if (!pfn_##name) { \
687  } \
688  }
689 
690 #define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
691  if (!pfn_##name) { \
692  pfn_##name = (PFN_##name) \
693  clGetExtensionFunctionAddressForPlatform(platform, #name); \
694  if (!pfn_##name) { \
695  } \
696  }
697 
698  class Program;
699  class Device;
700  class Context;
701  class CommandQueue;
702  class DeviceCommandQueue;
703  class Memory;
704  class Buffer;
705  class Pipe;
706 
707 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
708 
712  class Error : public std::exception
713  {
714  private:
715  cl_int err_;
716  const char * errStr_;
717  public:
727  Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
728  {}
729 
730  ~Error() throw() {}
731 
736  virtual const char * what() const throw ()
737  {
738  if (errStr_ == NULL) {
739  return "empty";
740  }
741  else {
742  return errStr_;
743  }
744  }
745 
750  cl_int err(void) const { return err_; }
751  };
752 #define CL_HPP_ERR_STR_(x) #x
753 #else
754 #define CL_HPP_ERR_STR_(x) NULL
755 #endif // CL_HPP_ENABLE_EXCEPTIONS
756 
757 
758 namespace detail
759 {
760 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
761 static inline cl_int errHandler (
762  cl_int err,
763  const char * errStr = NULL)
764 {
765  if (err != CL_SUCCESS) {
766  throw Error(err, errStr);
767  }
768  return err;
769 }
770 #else
771 static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
772 {
773  (void) errStr; // suppress unused variable warning
774  return err;
775 }
776 #endif // CL_HPP_ENABLE_EXCEPTIONS
777 }
778 
779 
780 
782 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
783 #define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo)
784 #define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo)
785 #define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs)
786 #define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs)
787 #define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo)
788 #define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo)
789 #define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo)
790 #define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo)
791 #define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo)
792 #define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo)
793 #define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo)
794 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
795 #define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo)
796 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
797 #define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
798 #define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo)
799 #define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo)
800 #define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo)
801 
802 #define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext)
803 #define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType)
804 #define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats)
805 
806 #define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer)
807 #define __COPY_ERR CL_HPP_ERR_STR_(cl::copy)
808 #define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer)
809 #define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
810 #define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
811 #define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo)
812 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
813 #define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage)
814 #define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture)
815 #define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions)
816 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
817 #define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
818 
819 #define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent)
820 #define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus)
821 #define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback)
822 #define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents)
823 
824 #define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel)
825 #define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg)
826 #define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource)
827 #define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary)
828 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
829 #define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
830 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
831 #define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram)
832 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
833 #define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram)
834 #define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram)
835 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
836 #define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram)
837 
838 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
839 #define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
840 #define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
841 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
842 #define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty)
843 #define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer)
844 #define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
845 #define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
846 #define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
847 #define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
848 #define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
849 #define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer)
850 #define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage)
851 #define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage)
852 #define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage)
853 #define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage)
854 #define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
855 #define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
856 #define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer)
857 #define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage)
858 #define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
859 #define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
860 #define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel)
861 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
862 #define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
863 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
864 
865 #define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
866 #define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
867 
868 #define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe)
869 #define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo)
870 
871 
872 #define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object)
873 #define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object)
874 #define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush)
875 #define __FINISH_ERR CL_HPP_ERR_STR_(clFinish)
876 #define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error)
877 
881 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
882 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices)
883 #else
884 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
885 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
886 
890 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
891 #define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker)
892 #define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
893 #define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier)
894 #define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler)
895 #define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
896 #define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
897 #define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D)
898 #define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D)
899 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
900 
904 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
905 #define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue)
906 #define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask)
907 #define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler)
908 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
909 
913 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
914 #define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
915 #define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
916 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
917 
918 #endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
919 
921 
922 namespace detail {
923 
924 // Generic getInfoHelper. The final parameter is used to guide overload
925 // resolution: the actual parameter passed is an int, which makes this
926 // a worse conversion sequence than a specialization that declares the
927 // parameter as an int.
928 template<typename Functor, typename T>
929 inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
930 {
931  return f(name, sizeof(T), param, NULL);
932 }
933 
934 // Specialized for getInfo<CL_PROGRAM_BINARIES>
935 // Assumes that the output vector was correctly resized on the way in
936 template <typename Func>
937 inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
938 {
939  if (name != CL_PROGRAM_BINARIES) {
940  return CL_INVALID_VALUE;
941  }
942  if (param) {
943  // Create array of pointers, calculate total size and pass pointer array in
944  size_type numBinaries = param->size();
945  vector<unsigned char*> binariesPointers(numBinaries);
946 
947  for (size_type i = 0; i < numBinaries; ++i)
948  {
949  binariesPointers[i] = (*param)[i].data();
950  }
951 
952  cl_int err = f(name, numBinaries * sizeof(unsigned char*), binariesPointers.data(), NULL);
953 
954  if (err != CL_SUCCESS) {
955  return err;
956  }
957  }
958 
959 
960  return CL_SUCCESS;
961 }
962 
963 // Specialized getInfoHelper for vector params
964 template <typename Func, typename T>
965 inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
966 {
967  size_type required;
968  cl_int err = f(name, 0, NULL, &required);
969  if (err != CL_SUCCESS) {
970  return err;
971  }
972  const size_type elements = required / sizeof(T);
973 
974  // Temporary to avoid changing param on an error
975  vector<T> localData(elements);
976  err = f(name, required, localData.data(), NULL);
977  if (err != CL_SUCCESS) {
978  return err;
979  }
980  if (param) {
981  *param = std::move(localData);
982  }
983 
984  return CL_SUCCESS;
985 }
986 
987 /* Specialization for reference-counted types. This depends on the
988  * existence of Wrapper<T>::cl_type, and none of the other types having the
989  * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
990  * does not work, because when using a derived type (e.g. Context) the generic
991  * template will provide a better match.
992  */
993 template <typename Func, typename T>
994 inline cl_int getInfoHelper(
995  Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
996 {
997  size_type required;
998  cl_int err = f(name, 0, NULL, &required);
999  if (err != CL_SUCCESS) {
1000  return err;
1001  }
1002 
1003  const size_type elements = required / sizeof(typename T::cl_type);
1004 
1005  vector<typename T::cl_type> value(elements);
1006  err = f(name, required, value.data(), NULL);
1007  if (err != CL_SUCCESS) {
1008  return err;
1009  }
1010 
1011  if (param) {
1012  // Assign to convert CL type to T for each element
1013  param->resize(elements);
1014 
1015  // Assign to param, constructing with retain behaviour
1016  // to correctly capture each underlying CL object
1017  for (size_type i = 0; i < elements; i++) {
1018  (*param)[i] = T(value[i], true);
1019  }
1020  }
1021  return CL_SUCCESS;
1022 }
1023 
1024 // Specialized GetInfoHelper for string params
1025 template <typename Func>
1026 inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
1027 {
1028  size_type required;
1029  cl_int err = f(name, 0, NULL, &required);
1030  if (err != CL_SUCCESS) {
1031  return err;
1032  }
1033 
1034  // std::string has a constant data member
1035  // a char vector does not
1036  if (required > 0) {
1037  vector<char> value(required);
1038  err = f(name, required, value.data(), NULL);
1039  if (err != CL_SUCCESS) {
1040  return err;
1041  }
1042  if (param) {
1043  param->assign(begin(value), prev(end(value)));
1044  }
1045  }
1046  else if (param) {
1047  param->assign("");
1048  }
1049  return CL_SUCCESS;
1050 }
1051 
1052 // Specialized GetInfoHelper for clsize_t params
1053 template <typename Func, size_type N>
1054 inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
1055 {
1056  size_type required;
1057  cl_int err = f(name, 0, NULL, &required);
1058  if (err != CL_SUCCESS) {
1059  return err;
1060  }
1061 
1062  size_type elements = required / sizeof(size_type);
1063  vector<size_type> value(elements, 0);
1064 
1065  err = f(name, required, value.data(), NULL);
1066  if (err != CL_SUCCESS) {
1067  return err;
1068  }
1069 
1070  // Bound the copy with N to prevent overruns
1071  // if passed N > than the amount copied
1072  if (elements > N) {
1073  elements = N;
1074  }
1075  for (size_type i = 0; i < elements; ++i) {
1076  (*param)[i] = value[i];
1077  }
1078 
1079  return CL_SUCCESS;
1080 }
1081 
1082 template<typename T> struct ReferenceHandler;
1083 
1084 /* Specialization for reference-counted types. This depends on the
1085  * existence of Wrapper<T>::cl_type, and none of the other types having the
1086  * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1087  * does not work, because when using a derived type (e.g. Context) the generic
1088  * template will provide a better match.
1089  */
1090 template<typename Func, typename T>
1091 inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
1092 {
1093  typename T::cl_type value;
1094  cl_int err = f(name, sizeof(value), &value, NULL);
1095  if (err != CL_SUCCESS) {
1096  return err;
1097  }
1098  *param = value;
1099  if (value != NULL)
1100  {
1101  err = param->retain();
1102  if (err != CL_SUCCESS) {
1103  return err;
1104  }
1105  }
1106  return CL_SUCCESS;
1107 }
1108 
1109 #define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
1110  F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
1111  F(cl_platform_info, CL_PLATFORM_VERSION, string) \
1112  F(cl_platform_info, CL_PLATFORM_NAME, string) \
1113  F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
1114  F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
1115  \
1116  F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
1117  F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
1118  F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
1119  F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
1120  F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
1121  F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
1122  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
1123  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
1124  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
1125  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
1126  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
1127  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
1128  F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
1129  F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
1130  F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
1131  F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
1132  F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
1133  F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
1134  F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
1135  F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
1136  F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
1137  F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
1138  F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
1139  F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
1140  F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
1141  F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
1142  F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
1143  F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
1144  F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
1145  F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
1146  F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
1147  F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
1148  F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
1149  F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
1150  F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
1151  F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
1152  F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
1153  F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
1154  F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
1155  F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
1156  F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
1157  F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
1158  F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
1159  F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
1160  F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
1161  F(cl_device_info, CL_DEVICE_NAME, string) \
1162  F(cl_device_info, CL_DEVICE_VENDOR, string) \
1163  F(cl_device_info, CL_DRIVER_VERSION, string) \
1164  F(cl_device_info, CL_DEVICE_PROFILE, string) \
1165  F(cl_device_info, CL_DEVICE_VERSION, string) \
1166  F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
1167  \
1168  F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
1169  F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
1170  F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
1171  \
1172  F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
1173  F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
1174  F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
1175  F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
1176  \
1177  F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
1178  F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
1179  F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
1180  F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
1181  \
1182  F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
1183  F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
1184  F(cl_mem_info, CL_MEM_SIZE, size_type) \
1185  F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
1186  F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
1187  F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
1188  F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
1189  \
1190  F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
1191  F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
1192  F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
1193  F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
1194  F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
1195  F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
1196  F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
1197  \
1198  F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
1199  F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
1200  F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
1201  F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
1202  F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
1203  \
1204  F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
1205  F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
1206  F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
1207  F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
1208  F(cl_program_info, CL_PROGRAM_SOURCE, string) \
1209  F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
1210  F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
1211  \
1212  F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
1213  F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
1214  F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
1215  \
1216  F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
1217  F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
1218  F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
1219  F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
1220  F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
1221  \
1222  F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
1223  F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
1224  F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
1225  \
1226  F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
1227  F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
1228  F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
1229  F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
1230 
1231 
1232 #define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
1233  F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
1234  F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
1235  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
1236  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
1237  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
1238  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
1239  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
1240  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
1241  F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
1242  F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
1243  \
1244  F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
1245  F(cl_mem_info, CL_MEM_OFFSET, size_type) \
1246  \
1247  F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1248  F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
1249  \
1250  F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
1251 
1252 #define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
1253  F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
1254  F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
1255  \
1256  F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
1257  \
1258  F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
1259  \
1260  F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
1261  F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
1262  F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
1263  F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
1264  F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
1265  \
1266  F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
1267  F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
1268  F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>) \
1269  F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
1270  F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \
1271  F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
1272  F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
1273  \
1274  F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
1275  F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
1276  F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
1277 
1278 #define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
1279  F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
1280  F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
1281  F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
1282  F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
1283  F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
1284  F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
1285  F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
1286  F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
1287  F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
1288  F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
1289  F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
1290  F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
1291  F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
1292  F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
1293  F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
1294  F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
1295  F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
1296  F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
1297 
1298 #define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \
1299  F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
1300  F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
1301  F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
1302  F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
1303  F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
1304 
1305 template <typename enum_type, cl_int Name>
1306 struct param_traits {};
1307 
1308 #define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
1309 struct token; \
1310 template<> \
1311 struct param_traits<detail:: token,param_name> \
1312 { \
1313  enum { value = param_name }; \
1314  typedef T param_type; \
1315 };
1316 
1317 CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1318 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
1319 CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1320 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1321 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1322 CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1323 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1324 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
1325 CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1326 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1327 
1328 
1329 // Flags deprecated in OpenCL 2.0
1330 #define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
1331  F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
1332 
1333 #define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
1334  F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
1335 
1336 #define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
1337  F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
1338 
1339 // Include deprecated query flags based on versions
1340 // Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
1341 #if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
1342 CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1343 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
1344 #if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1345 CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1346 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1347 #if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1348 CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1349 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
1350 
1351 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
1352 CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_);
1353 #endif // CL_HPP_USE_CL_DEVICE_FISSION
1354 
1355 #ifdef CL_PLATFORM_ICD_SUFFIX_KHR
1356 CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
1357 #endif
1358 
1359 #ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
1360 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
1361 #endif
1362 
1363 #ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
1364 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
1365 #endif
1366 #ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
1367 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
1368 #endif
1369 #ifdef CL_DEVICE_SIMD_WIDTH_AMD
1370 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
1371 #endif
1372 #ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
1373 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
1374 #endif
1375 #ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
1376 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
1377 #endif
1378 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
1379 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
1380 #endif
1381 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
1382 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
1383 #endif
1384 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
1385 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
1386 #endif
1387 #ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
1388 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
1389 #endif
1390 #ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
1391 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
1392 #endif
1393 
1394 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
1395 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
1396 #endif
1397 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
1398 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
1399 #endif
1400 #ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
1401 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
1402 #endif
1403 #ifdef CL_DEVICE_WARP_SIZE_NV
1404 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
1405 #endif
1406 #ifdef CL_DEVICE_GPU_OVERLAP_NV
1407 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
1408 #endif
1409 #ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
1410 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
1411 #endif
1412 #ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
1413 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
1414 #endif
1415 
1416 // Convenience functions
1417 
1418 template <typename Func, typename T>
1419 inline cl_int
1420 getInfo(Func f, cl_uint name, T* param)
1421 {
1422  return getInfoHelper(f, name, param, 0);
1423 }
1424 
1425 template <typename Func, typename Arg0>
1427 {
1428  Func f_; const Arg0& arg0_;
1429  cl_int operator ()(
1430  cl_uint param, size_type size, void* value, size_type* size_ret)
1431  { return f_(arg0_, param, size, value, size_ret); }
1432 };
1433 
1434 template <typename Func, typename Arg0, typename Arg1>
1436 {
1437  Func f_; const Arg0& arg0_; const Arg1& arg1_;
1438  cl_int operator ()(
1439  cl_uint param, size_type size, void* value, size_type* size_ret)
1440  { return f_(arg0_, arg1_, param, size, value, size_ret); }
1441 };
1442 
1443 template <typename Func, typename Arg0, typename T>
1444 inline cl_int
1445 getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
1446 {
1447  GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
1448  return getInfoHelper(f0, name, param, 0);
1449 }
1450 
1451 template <typename Func, typename Arg0, typename Arg1, typename T>
1452 inline cl_int
1453 getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
1454 {
1455  GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
1456  return getInfoHelper(f0, name, param, 0);
1457 }
1458 
1459 
1460 template<typename T>
1461 struct ReferenceHandler
1462 { };
1463 
1464 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1465 
1468 template <>
1469 struct ReferenceHandler<cl_device_id>
1470 {
1480  static cl_int retain(cl_device_id device)
1481  { return ::clRetainDevice(device); }
1491  static cl_int release(cl_device_id device)
1492  { return ::clReleaseDevice(device); }
1493 };
1494 #else // CL_HPP_TARGET_OPENCL_VERSION >= 120
1495 
1498 template <>
1499 struct ReferenceHandler<cl_device_id>
1500 {
1501  // cl_device_id does not have retain().
1502  static cl_int retain(cl_device_id)
1503  { return CL_SUCCESS; }
1504  // cl_device_id does not have release().
1505  static cl_int release(cl_device_id)
1506  { return CL_SUCCESS; }
1507 };
1508 #endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
1509 
1510 template <>
1511 struct ReferenceHandler<cl_platform_id>
1512 {
1513  // cl_platform_id does not have retain().
1514  static cl_int retain(cl_platform_id)
1515  { return CL_SUCCESS; }
1516  // cl_platform_id does not have release().
1517  static cl_int release(cl_platform_id)
1518  { return CL_SUCCESS; }
1519 };
1520 
1521 template <>
1522 struct ReferenceHandler<cl_context>
1523 {
1524  static cl_int retain(cl_context context)
1525  { return ::clRetainContext(context); }
1526  static cl_int release(cl_context context)
1527  { return ::clReleaseContext(context); }
1528 };
1529 
1530 template <>
1531 struct ReferenceHandler<cl_command_queue>
1532 {
1533  static cl_int retain(cl_command_queue queue)
1534  { return ::clRetainCommandQueue(queue); }
1535  static cl_int release(cl_command_queue queue)
1536  { return ::clReleaseCommandQueue(queue); }
1537 };
1538 
1539 template <>
1540 struct ReferenceHandler<cl_mem>
1541 {
1542  static cl_int retain(cl_mem memory)
1543  { return ::clRetainMemObject(memory); }
1544  static cl_int release(cl_mem memory)
1545  { return ::clReleaseMemObject(memory); }
1546 };
1547 
1548 template <>
1549 struct ReferenceHandler<cl_sampler>
1550 {
1551  static cl_int retain(cl_sampler sampler)
1552  { return ::clRetainSampler(sampler); }
1553  static cl_int release(cl_sampler sampler)
1554  { return ::clReleaseSampler(sampler); }
1555 };
1556 
1557 template <>
1558 struct ReferenceHandler<cl_program>
1559 {
1560  static cl_int retain(cl_program program)
1561  { return ::clRetainProgram(program); }
1562  static cl_int release(cl_program program)
1563  { return ::clReleaseProgram(program); }
1564 };
1565 
1566 template <>
1567 struct ReferenceHandler<cl_kernel>
1568 {
1569  static cl_int retain(cl_kernel kernel)
1570  { return ::clRetainKernel(kernel); }
1571  static cl_int release(cl_kernel kernel)
1572  { return ::clReleaseKernel(kernel); }
1573 };
1574 
1575 template <>
1576 struct ReferenceHandler<cl_event>
1577 {
1578  static cl_int retain(cl_event event)
1579  { return ::clRetainEvent(event); }
1580  static cl_int release(cl_event event)
1581  { return ::clReleaseEvent(event); }
1582 };
1583 
1584 
1585 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1586 // Extracts version number with major in the upper 16 bits, minor in the lower 16
1587 static cl_uint getVersion(const vector<char> &versionInfo)
1588 {
1589  int highVersion = 0;
1590  int lowVersion = 0;
1591  int index = 7;
1592  while(versionInfo[index] != '.' ) {
1593  highVersion *= 10;
1594  highVersion += versionInfo[index]-'0';
1595  ++index;
1596  }
1597  ++index;
1598  while(versionInfo[index] != ' ' && versionInfo[index] != '\0') {
1599  lowVersion *= 10;
1600  lowVersion += versionInfo[index]-'0';
1601  ++index;
1602  }
1603  return (highVersion << 16) | lowVersion;
1604 }
1605 
1606 static cl_uint getPlatformVersion(cl_platform_id platform)
1607 {
1608  size_type size = 0;
1609  clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
1610 
1611  vector<char> versionInfo(size);
1612  clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
1613  return getVersion(versionInfo);
1614 }
1615 
1616 static cl_uint getDevicePlatformVersion(cl_device_id device)
1617 {
1618  cl_platform_id platform;
1619  clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
1620  return getPlatformVersion(platform);
1621 }
1622 
1623 static cl_uint getContextPlatformVersion(cl_context context)
1624 {
1625  // The platform cannot be queried directly, so we first have to grab a
1626  // device and obtain its context
1627  size_type size = 0;
1628  clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
1629  if (size == 0)
1630  return 0;
1631  vector<cl_device_id> devices(size/sizeof(cl_device_id));
1632  clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL);
1633  return getDevicePlatformVersion(devices[0]);
1634 }
1635 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1636 
1637 template <typename T>
1638 class Wrapper
1639 {
1640 public:
1641  typedef T cl_type;
1642 
1643 protected:
1644  cl_type object_;
1645 
1646 public:
1647  Wrapper() : object_(NULL) { }
1648 
1649  Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
1650  {
1651  if (retainObject) {
1652  detail::errHandler(retain(), __RETAIN_ERR);
1653  }
1654  }
1655 
1656  ~Wrapper()
1657  {
1658  if (object_ != NULL) { release(); }
1659  }
1660 
1661  Wrapper(const Wrapper<cl_type>& rhs)
1662  {
1663  object_ = rhs.object_;
1664  detail::errHandler(retain(), __RETAIN_ERR);
1665  }
1666 
1667  Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1668  {
1669  object_ = rhs.object_;
1670  rhs.object_ = NULL;
1671  }
1672 
1673  Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1674  {
1675  if (this != &rhs) {
1676  detail::errHandler(release(), __RELEASE_ERR);
1677  object_ = rhs.object_;
1678  detail::errHandler(retain(), __RETAIN_ERR);
1679  }
1680  return *this;
1681  }
1682 
1683  Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1684  {
1685  if (this != &rhs) {
1686  detail::errHandler(release(), __RELEASE_ERR);
1687  object_ = rhs.object_;
1688  rhs.object_ = NULL;
1689  }
1690  return *this;
1691  }
1692 
1693  Wrapper<cl_type>& operator = (const cl_type &rhs)
1694  {
1695  detail::errHandler(release(), __RELEASE_ERR);
1696  object_ = rhs;
1697  return *this;
1698  }
1699 
1700  const cl_type& operator ()() const { return object_; }
1701 
1702  cl_type& operator ()() { return object_; }
1703 
1704  const cl_type get() const { return object_; }
1705 
1706  cl_type get() { return object_; }
1707 
1708 
1709 protected:
1710  template<typename Func, typename U>
1711  friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1712 
1713  cl_int retain() const
1714  {
1715  if (object_ != nullptr) {
1716  return ReferenceHandler<cl_type>::retain(object_);
1717  }
1718  else {
1719  return CL_SUCCESS;
1720  }
1721  }
1722 
1723  cl_int release() const
1724  {
1725  if (object_ != nullptr) {
1726  return ReferenceHandler<cl_type>::release(object_);
1727  }
1728  else {
1729  return CL_SUCCESS;
1730  }
1731  }
1732 };
1733 
1734 template <>
1735 class Wrapper<cl_device_id>
1736 {
1737 public:
1738  typedef cl_device_id cl_type;
1739 
1740 protected:
1741  cl_type object_;
1742  bool referenceCountable_;
1743 
1744  static bool isReferenceCountable(cl_device_id device)
1745  {
1746  bool retVal = false;
1747 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1748 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
1749  if (device != NULL) {
1750  int version = getDevicePlatformVersion(device);
1751  if(version > ((1 << 16) + 1)) {
1752  retVal = true;
1753  }
1754  }
1755 #else // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1756  retVal = true;
1757 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1758 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1759  return retVal;
1760  }
1761 
1762 public:
1763  Wrapper() : object_(NULL), referenceCountable_(false)
1764  {
1765  }
1766 
1767  Wrapper(const cl_type &obj, bool retainObject) :
1768  object_(obj),
1769  referenceCountable_(false)
1770  {
1771  referenceCountable_ = isReferenceCountable(obj);
1772 
1773  if (retainObject) {
1774  detail::errHandler(retain(), __RETAIN_ERR);
1775  }
1776  }
1777 
1778  ~Wrapper()
1779  {
1780  release();
1781  }
1782 
1783  Wrapper(const Wrapper<cl_type>& rhs)
1784  {
1785  object_ = rhs.object_;
1786  referenceCountable_ = isReferenceCountable(object_);
1787  detail::errHandler(retain(), __RETAIN_ERR);
1788  }
1789 
1790  Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1791  {
1792  object_ = rhs.object_;
1793  referenceCountable_ = rhs.referenceCountable_;
1794  rhs.object_ = NULL;
1795  rhs.referenceCountable_ = false;
1796  }
1797 
1798  Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1799  {
1800  if (this != &rhs) {
1801  detail::errHandler(release(), __RELEASE_ERR);
1802  object_ = rhs.object_;
1803  referenceCountable_ = rhs.referenceCountable_;
1804  detail::errHandler(retain(), __RETAIN_ERR);
1805  }
1806  return *this;
1807  }
1808 
1809  Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1810  {
1811  if (this != &rhs) {
1812  detail::errHandler(release(), __RELEASE_ERR);
1813  object_ = rhs.object_;
1814  referenceCountable_ = rhs.referenceCountable_;
1815  rhs.object_ = NULL;
1816  rhs.referenceCountable_ = false;
1817  }
1818  return *this;
1819  }
1820 
1821  Wrapper<cl_type>& operator = (const cl_type &rhs)
1822  {
1823  detail::errHandler(release(), __RELEASE_ERR);
1824  object_ = rhs;
1825  referenceCountable_ = isReferenceCountable(object_);
1826  return *this;
1827  }
1828 
1829  const cl_type& operator ()() const { return object_; }
1830 
1831  cl_type& operator ()() { return object_; }
1832 
1833  cl_type get() const { return object_; }
1834 
1835 protected:
1836  template<typename Func, typename U>
1837  friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1838 
1839  template<typename Func, typename U>
1840  friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
1841 
1842  cl_int retain() const
1843  {
1844  if( object_ != nullptr && referenceCountable_ ) {
1845  return ReferenceHandler<cl_type>::retain(object_);
1846  }
1847  else {
1848  return CL_SUCCESS;
1849  }
1850  }
1851 
1852  cl_int release() const
1853  {
1854  if (object_ != nullptr && referenceCountable_) {
1855  return ReferenceHandler<cl_type>::release(object_);
1856  }
1857  else {
1858  return CL_SUCCESS;
1859  }
1860  }
1861 };
1862 
1863 template <typename T>
1864 inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1865 {
1866  return lhs() == rhs();
1867 }
1868 
1869 template <typename T>
1870 inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1871 {
1872  return !operator==(lhs, rhs);
1873 }
1874 
1875 } // namespace detail
1877 
1878 
1879 using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
1880 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1881 
1884 class BuildError : public Error
1885 {
1886 private:
1887  BuildLogType buildLogs;
1888 public:
1889  BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
1890  {
1891  }
1892 
1893  BuildLogType getBuildLog() const
1894  {
1895  return buildLogs;
1896  }
1897 };
1898 namespace detail {
1899  static inline cl_int buildErrHandler(
1900  cl_int err,
1901  const char * errStr,
1902  const BuildLogType &buildLogs)
1903  {
1904  if (err != CL_SUCCESS) {
1905  throw BuildError(err, errStr, buildLogs);
1906  }
1907  return err;
1908  }
1909 } // namespace detail
1910 
1911 #else
1912 namespace detail {
1913  static inline cl_int buildErrHandler(
1914  cl_int err,
1915  const char * errStr,
1916  const BuildLogType &buildLogs)
1917  {
1918  (void)buildLogs; // suppress unused variable warning
1919  (void)errStr;
1920  return err;
1921  }
1922 } // namespace detail
1923 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1924 
1925 
1931 struct ImageFormat : public cl_image_format
1932 {
1935 
1937  ImageFormat(cl_channel_order order, cl_channel_type type)
1938  {
1939  image_channel_order = order;
1940  image_channel_data_type = type;
1941  }
1942 
1944  ImageFormat& operator = (const ImageFormat& rhs)
1945  {
1946  if (this != &rhs) {
1947  this->image_channel_data_type = rhs.image_channel_data_type;
1948  this->image_channel_order = rhs.image_channel_order;
1949  }
1950  return *this;
1951  }
1952 };
1953 
1961 class Device : public detail::Wrapper<cl_device_id>
1962 {
1963 private:
1964  static std::once_flag default_initialized_;
1965  static Device default_;
1966  static cl_int default_error_;
1967 
1973  static void makeDefault();
1974 
1980  static void makeDefaultProvided(const Device &p) {
1981  default_ = p;
1982  }
1983 
1984 public:
1985 #ifdef CL_HPP_UNIT_TEST_ENABLE
1986 
1992  static void unitTestClearDefault() {
1993  default_ = Device();
1994  }
1995 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
1996 
1998  Device() : detail::Wrapper<cl_type>() { }
1999 
2004  explicit Device(const cl_device_id &device, bool retainObject = false) :
2005  detail::Wrapper<cl_type>(device, retainObject) { }
2006 
2012  cl_int *errResult = NULL)
2013  {
2014  std::call_once(default_initialized_, makeDefault);
2015  detail::errHandler(default_error_);
2016  if (errResult != NULL) {
2017  *errResult = default_error_;
2018  }
2019  return default_;
2020  }
2021 
2029  static Device setDefault(const Device &default_device)
2030  {
2031  std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
2032  detail::errHandler(default_error_);
2033  return default_;
2034  }
2035 
2040  Device& operator = (const cl_device_id& rhs)
2041  {
2043  return *this;
2044  }
2045 
2049  Device(const Device& dev) : detail::Wrapper<cl_type>(dev) {}
2050 
2054  Device& operator = (const Device &dev)
2055  {
2057  return *this;
2058  }
2059 
2063  Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(dev)) {}
2064 
2068  Device& operator = (Device &&dev)
2069  {
2070  detail::Wrapper<cl_type>::operator=(std::move(dev));
2071  return *this;
2072  }
2073 
2075  template <typename T>
2076  cl_int getInfo(cl_device_info name, T* param) const
2077  {
2078  return detail::errHandler(
2079  detail::getInfo(&::clGetDeviceInfo, object_, name, param),
2080  __GET_DEVICE_INFO_ERR);
2081  }
2082 
2084  template <cl_int name> typename
2086  getInfo(cl_int* err = NULL) const
2087  {
2088  typename detail::param_traits<
2089  detail::cl_device_info, name>::param_type param;
2090  cl_int result = getInfo(name, &param);
2091  if (err != NULL) {
2092  *err = result;
2093  }
2094  return param;
2095  }
2096 
2100 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2101  cl_int createSubDevices(
2103  const cl_device_partition_property * properties,
2104  vector<Device>* devices)
2105  {
2106  cl_uint n = 0;
2107  cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
2108  if (err != CL_SUCCESS) {
2109  return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2110  }
2111 
2112  vector<cl_device_id> ids(n);
2113  err = clCreateSubDevices(object_, properties, n, ids.data(), NULL);
2114  if (err != CL_SUCCESS) {
2115  return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2116  }
2117 
2118  // Cannot trivially assign because we need to capture intermediates
2119  // with safe construction
2120  if (devices) {
2121  devices->resize(ids.size());
2122 
2123  // Assign to param, constructing with retain behaviour
2124  // to correctly capture each underlying CL object
2125  for (size_type i = 0; i < ids.size(); i++) {
2126  // We do not need to retain because this device is being created
2127  // by the runtime
2128  (*devices)[i] = Device(ids[i], false);
2129  }
2130  }
2131 
2132  return CL_SUCCESS;
2133  }
2134 #elif defined(CL_HPP_USE_CL_DEVICE_FISSION)
2135 
2139  cl_int createSubDevices(
2140  const cl_device_partition_property_ext * properties,
2141  vector<Device>* devices)
2142  {
2143  typedef CL_API_ENTRY cl_int
2144  ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
2145  cl_device_id /*in_device*/,
2146  const cl_device_partition_property_ext * /* properties */,
2147  cl_uint /*num_entries*/,
2148  cl_device_id * /*out_devices*/,
2149  cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
2150 
2151  static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
2152  CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
2153 
2154  cl_uint n = 0;
2155  cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
2156  if (err != CL_SUCCESS) {
2157  return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2158  }
2159 
2160  vector<cl_device_id> ids(n);
2161  err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL);
2162  if (err != CL_SUCCESS) {
2163  return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2164  }
2165  // Cannot trivially assign because we need to capture intermediates
2166  // with safe construction
2167  if (devices) {
2168  devices->resize(ids.size());
2169 
2170  // Assign to param, constructing with retain behaviour
2171  // to correctly capture each underlying CL object
2172  for (size_type i = 0; i < ids.size(); i++) {
2173  // We do not need to retain because this device is being created
2174  // by the runtime
2175  (*devices)[i] = Device(ids[i], false);
2176  }
2177  }
2178  return CL_SUCCESS;
2179  }
2180 #endif // defined(CL_HPP_USE_CL_DEVICE_FISSION)
2181 };
2182 
2183 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
2184 CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
2185 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
2186 
2194 class Platform : public detail::Wrapper<cl_platform_id>
2195 {
2196 private:
2197  static std::once_flag default_initialized_;
2198  static Platform default_;
2199  static cl_int default_error_;
2200 
2206  static void makeDefault() {
2207  /* Throwing an exception from a call_once invocation does not do
2208  * what we wish, so we catch it and save the error.
2209  */
2210 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2211  try
2212 #endif
2213  {
2214  // If default wasn't passed ,generate one
2215  // Otherwise set it
2216  cl_uint n = 0;
2217 
2218  cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2219  if (err != CL_SUCCESS) {
2220  default_error_ = err;
2221  return;
2222  }
2223  if (n == 0) {
2224  default_error_ = CL_INVALID_PLATFORM;
2225  return;
2226  }
2227 
2228  vector<cl_platform_id> ids(n);
2229  err = ::clGetPlatformIDs(n, ids.data(), NULL);
2230  if (err != CL_SUCCESS) {
2231  default_error_ = err;
2232  return;
2233  }
2234 
2235  default_ = Platform(ids[0]);
2236  }
2237 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2238  catch (cl::Error &e) {
2239  default_error_ = e.err();
2240  }
2241 #endif
2242  }
2243 
2249  static void makeDefaultProvided(const Platform &p) {
2250  default_ = p;
2251  }
2252 
2253 public:
2254 #ifdef CL_HPP_UNIT_TEST_ENABLE
2255 
2261  static void unitTestClearDefault() {
2262  default_ = Platform();
2263  }
2264 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2265 
2267  Platform() : detail::Wrapper<cl_type>() { }
2268 
2276  explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
2277  detail::Wrapper<cl_type>(platform, retainObject) { }
2278 
2283  Platform& operator = (const cl_platform_id& rhs)
2284  {
2286  return *this;
2287  }
2288 
2289  static Platform getDefault(
2290  cl_int *errResult = NULL)
2291  {
2292  std::call_once(default_initialized_, makeDefault);
2293  detail::errHandler(default_error_);
2294  if (errResult != NULL) {
2295  *errResult = default_error_;
2296  }
2297  return default_;
2298  }
2299 
2307  static Platform setDefault(const Platform &default_platform)
2308  {
2309  std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
2310  detail::errHandler(default_error_);
2311  return default_;
2312  }
2313 
2315  cl_int getInfo(cl_platform_info name, string* param) const
2316  {
2317  return detail::errHandler(
2318  detail::getInfo(&::clGetPlatformInfo, object_, name, param),
2319  __GET_PLATFORM_INFO_ERR);
2320  }
2321 
2323  template <cl_int name> typename
2325  getInfo(cl_int* err = NULL) const
2326  {
2327  typename detail::param_traits<
2328  detail::cl_platform_info, name>::param_type param;
2329  cl_int result = getInfo(name, &param);
2330  if (err != NULL) {
2331  *err = result;
2332  }
2333  return param;
2334  }
2335 
2340  cl_int getDevices(
2341  cl_device_type type,
2342  vector<Device>* devices) const
2343  {
2344  cl_uint n = 0;
2345  if( devices == NULL ) {
2346  return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2347  }
2348  cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
2349  if (err != CL_SUCCESS) {
2350  return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2351  }
2352 
2353  vector<cl_device_id> ids(n);
2354  err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL);
2355  if (err != CL_SUCCESS) {
2356  return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2357  }
2358 
2359  // Cannot trivially assign because we need to capture intermediates
2360  // with safe construction
2361  // We must retain things we obtain from the API to avoid releasing
2362  // API-owned objects.
2363  if (devices) {
2364  devices->resize(ids.size());
2365 
2366  // Assign to param, constructing with retain behaviour
2367  // to correctly capture each underlying CL object
2368  for (size_type i = 0; i < ids.size(); i++) {
2369  (*devices)[i] = Device(ids[i], true);
2370  }
2371  }
2372  return CL_SUCCESS;
2373  }
2374 
2375 #if defined(CL_HPP_USE_DX_INTEROP)
2376 
2399  cl_int getDevices(
2400  cl_d3d10_device_source_khr d3d_device_source,
2401  void * d3d_object,
2402  cl_d3d10_device_set_khr d3d_device_set,
2403  vector<Device>* devices) const
2404  {
2405  typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
2406  cl_platform_id platform,
2407  cl_d3d10_device_source_khr d3d_device_source,
2408  void * d3d_object,
2409  cl_d3d10_device_set_khr d3d_device_set,
2410  cl_uint num_entries,
2411  cl_device_id * devices,
2412  cl_uint* num_devices);
2413 
2414  if( devices == NULL ) {
2415  return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2416  }
2417 
2418  static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
2419  CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
2420 
2421  cl_uint n = 0;
2422  cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
2423  object_,
2424  d3d_device_source,
2425  d3d_object,
2426  d3d_device_set,
2427  0,
2428  NULL,
2429  &n);
2430  if (err != CL_SUCCESS) {
2431  return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2432  }
2433 
2434  vector<cl_device_id> ids(n);
2435  err = pfn_clGetDeviceIDsFromD3D10KHR(
2436  object_,
2437  d3d_device_source,
2438  d3d_object,
2439  d3d_device_set,
2440  n,
2441  ids.data(),
2442  NULL);
2443  if (err != CL_SUCCESS) {
2444  return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2445  }
2446 
2447  // Cannot trivially assign because we need to capture intermediates
2448  // with safe construction
2449  // We must retain things we obtain from the API to avoid releasing
2450  // API-owned objects.
2451  if (devices) {
2452  devices->resize(ids.size());
2453 
2454  // Assign to param, constructing with retain behaviour
2455  // to correctly capture each underlying CL object
2456  for (size_type i = 0; i < ids.size(); i++) {
2457  (*devices)[i] = Device(ids[i], true);
2458  }
2459  }
2460  return CL_SUCCESS;
2461  }
2462 #endif
2463 
2468  static cl_int get(
2469  vector<Platform>* platforms)
2470  {
2471  cl_uint n = 0;
2472 
2473  if( platforms == NULL ) {
2474  return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
2475  }
2476 
2477  cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2478  if (err != CL_SUCCESS) {
2479  return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2480  }
2481 
2482  vector<cl_platform_id> ids(n);
2483  err = ::clGetPlatformIDs(n, ids.data(), NULL);
2484  if (err != CL_SUCCESS) {
2485  return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2486  }
2487 
2488  if (platforms) {
2489  platforms->resize(ids.size());
2490 
2491  // Platforms don't reference count
2492  for (size_type i = 0; i < ids.size(); i++) {
2493  (*platforms)[i] = Platform(ids[i]);
2494  }
2495  }
2496  return CL_SUCCESS;
2497  }
2498 
2503  static cl_int get(
2504  Platform * platform)
2505  {
2506  cl_int err;
2507  Platform default_platform = Platform::getDefault(&err);
2508  if (platform) {
2509  *platform = default_platform;
2510  }
2511  return err;
2512  }
2513 
2522  static Platform get(
2523  cl_int * errResult = NULL)
2524  {
2525  cl_int err;
2526  Platform default_platform = Platform::getDefault(&err);
2527  if (errResult) {
2528  *errResult = err;
2529  }
2530  return default_platform;
2531  }
2532 
2533 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2534  cl_int
2537  {
2538  return ::clUnloadPlatformCompiler(object_);
2539  }
2540 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
2541 }; // class Platform
2542 
2543 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
2544 CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
2545 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
2546 
2547 
2551 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2552 
2556 inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
2557 UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
2558 inline cl_int
2560 {
2561  return ::clUnloadCompiler();
2562 }
2563 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2564 
2573 class Context
2574  : public detail::Wrapper<cl_context>
2575 {
2576 private:
2577  static std::once_flag default_initialized_;
2578  static Context default_;
2579  static cl_int default_error_;
2580 
2586  static void makeDefault() {
2587  /* Throwing an exception from a call_once invocation does not do
2588  * what we wish, so we catch it and save the error.
2589  */
2590 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2591  try
2592 #endif
2593  {
2594 #if !defined(__APPLE__) && !defined(__MACOS)
2595  const Platform &p = Platform::getDefault();
2596  cl_platform_id defaultPlatform = p();
2597  cl_context_properties properties[3] = {
2598  CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
2599  };
2600 #else // #if !defined(__APPLE__) && !defined(__MACOS)
2601  cl_context_properties *properties = nullptr;
2602 #endif // #if !defined(__APPLE__) && !defined(__MACOS)
2603 
2604  default_ = Context(
2605  CL_DEVICE_TYPE_DEFAULT,
2606  properties,
2607  NULL,
2608  NULL,
2609  &default_error_);
2610  }
2611 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2612  catch (cl::Error &e) {
2613  default_error_ = e.err();
2614  }
2615 #endif
2616  }
2617 
2618 
2624  static void makeDefaultProvided(const Context &c) {
2625  default_ = c;
2626  }
2627 
2628 public:
2629 #ifdef CL_HPP_UNIT_TEST_ENABLE
2630 
2636  static void unitTestClearDefault() {
2637  default_ = Context();
2638  }
2639 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2640 
2646  const vector<Device>& devices,
2647  cl_context_properties* properties = NULL,
2648  void (CL_CALLBACK * notifyFptr)(
2649  const char *,
2650  const void *,
2651  size_type,
2652  void *) = NULL,
2653  void* data = NULL,
2654  cl_int* err = NULL)
2655  {
2656  cl_int error;
2657 
2658  size_type numDevices = devices.size();
2659  vector<cl_device_id> deviceIDs(numDevices);
2660 
2661  for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
2662  deviceIDs[deviceIndex] = (devices[deviceIndex])();
2663  }
2664 
2665  object_ = ::clCreateContext(
2666  properties, (cl_uint) numDevices,
2667  deviceIDs.data(),
2668  notifyFptr, data, &error);
2669 
2670  detail::errHandler(error, __CREATE_CONTEXT_ERR);
2671  if (err != NULL) {
2672  *err = error;
2673  }
2674  }
2675 
2676  Context(
2677  const Device& device,
2678  cl_context_properties* properties = NULL,
2679  void (CL_CALLBACK * notifyFptr)(
2680  const char *,
2681  const void *,
2682  size_type,
2683  void *) = NULL,
2684  void* data = NULL,
2685  cl_int* err = NULL)
2686  {
2687  cl_int error;
2688 
2689  cl_device_id deviceID = device();
2690 
2691  object_ = ::clCreateContext(
2692  properties, 1,
2693  &deviceID,
2694  notifyFptr, data, &error);
2695 
2696  detail::errHandler(error, __CREATE_CONTEXT_ERR);
2697  if (err != NULL) {
2698  *err = error;
2699  }
2700  }
2701 
2707  cl_device_type type,
2708  cl_context_properties* properties = NULL,
2709  void (CL_CALLBACK * notifyFptr)(
2710  const char *,
2711  const void *,
2712  size_type,
2713  void *) = NULL,
2714  void* data = NULL,
2715  cl_int* err = NULL)
2716  {
2717  cl_int error;
2718 
2719 #if !defined(__APPLE__) && !defined(__MACOS)
2720  cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
2721 
2722  if (properties == NULL) {
2723  // Get a valid platform ID as we cannot send in a blank one
2724  vector<Platform> platforms;
2725  error = Platform::get(&platforms);
2726  if (error != CL_SUCCESS) {
2727  detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2728  if (err != NULL) {
2729  *err = error;
2730  }
2731  return;
2732  }
2733 
2734  // Check the platforms we found for a device of our specified type
2735  cl_context_properties platform_id = 0;
2736  for (unsigned int i = 0; i < platforms.size(); i++) {
2737 
2738  vector<Device> devices;
2739 
2740 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2741  try {
2742 #endif
2743 
2744  error = platforms[i].getDevices(type, &devices);
2745 
2746 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2747  } catch (cl::Error& e) {
2748  error = e.err();
2749  }
2750  // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
2751  // We do error checking next anyway, and can throw there if needed
2752 #endif
2753 
2754  // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
2755  if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
2756  detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2757  if (err != NULL) {
2758  *err = error;
2759  }
2760  }
2761 
2762  if (devices.size() > 0) {
2763  platform_id = (cl_context_properties)platforms[i]();
2764  break;
2765  }
2766  }
2767 
2768  if (platform_id == 0) {
2769  detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
2770  if (err != NULL) {
2771  *err = CL_DEVICE_NOT_FOUND;
2772  }
2773  return;
2774  }
2775 
2776  prop[1] = platform_id;
2777  properties = &prop[0];
2778  }
2779 #endif
2780  object_ = ::clCreateContextFromType(
2781  properties, type, notifyFptr, data, &error);
2782 
2783  detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2784  if (err != NULL) {
2785  *err = error;
2786  }
2787  }
2788 
2792  Context(const Context& ctx) : detail::Wrapper<cl_type>(ctx) {}
2793 
2797  Context& operator = (const Context &ctx)
2798  {
2800  return *this;
2801  }
2802 
2806  Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(ctx)) {}
2807 
2811  Context& operator = (Context &&ctx)
2812  {
2813  detail::Wrapper<cl_type>::operator=(std::move(ctx));
2814  return *this;
2815  }
2816 
2817 
2822  static Context getDefault(cl_int * err = NULL)
2823  {
2824  std::call_once(default_initialized_, makeDefault);
2825  detail::errHandler(default_error_);
2826  if (err != NULL) {
2827  *err = default_error_;
2828  }
2829  return default_;
2830  }
2831 
2839  static Context setDefault(const Context &default_context)
2840  {
2841  std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
2842  detail::errHandler(default_error_);
2843  return default_;
2844  }
2845 
2847  Context() : detail::Wrapper<cl_type>() { }
2848 
2854  explicit Context(const cl_context& context, bool retainObject = false) :
2855  detail::Wrapper<cl_type>(context, retainObject) { }
2856 
2862  Context& operator = (const cl_context& rhs)
2863  {
2865  return *this;
2866  }
2867 
2869  template <typename T>
2870  cl_int getInfo(cl_context_info name, T* param) const
2871  {
2872  return detail::errHandler(
2873  detail::getInfo(&::clGetContextInfo, object_, name, param),
2874  __GET_CONTEXT_INFO_ERR);
2875  }
2876 
2878  template <cl_int name> typename
2880  getInfo(cl_int* err = NULL) const
2881  {
2882  typename detail::param_traits<
2883  detail::cl_context_info, name>::param_type param;
2884  cl_int result = getInfo(name, &param);
2885  if (err != NULL) {
2886  *err = result;
2887  }
2888  return param;
2889  }
2890 
2896  cl_mem_flags flags,
2897  cl_mem_object_type type,
2898  vector<ImageFormat>* formats) const
2899  {
2900  cl_uint numEntries;
2901 
2902  if (!formats) {
2903  return CL_SUCCESS;
2904  }
2905 
2906  cl_int err = ::clGetSupportedImageFormats(
2907  object_,
2908  flags,
2909  type,
2910  0,
2911  NULL,
2912  &numEntries);
2913  if (err != CL_SUCCESS) {
2914  return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2915  }
2916 
2917  if (numEntries > 0) {
2918  vector<ImageFormat> value(numEntries);
2919  err = ::clGetSupportedImageFormats(
2920  object_,
2921  flags,
2922  type,
2923  numEntries,
2924  (cl_image_format*)value.data(),
2925  NULL);
2926  if (err != CL_SUCCESS) {
2927  return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2928  }
2929 
2930  formats->assign(begin(value), end(value));
2931  }
2932  else {
2933  // If no values are being returned, ensure an empty vector comes back
2934  formats->clear();
2935  }
2936 
2937  return CL_SUCCESS;
2938  }
2939 };
2940 
2941 inline void Device::makeDefault()
2942 {
2943  /* Throwing an exception from a call_once invocation does not do
2944  * what we wish, so we catch it and save the error.
2945  */
2946 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2947  try
2948 #endif
2949  {
2950  cl_int error = 0;
2951 
2952  Context context = Context::getDefault(&error);
2953  detail::errHandler(error, __CREATE_CONTEXT_ERR);
2954 
2955  if (error != CL_SUCCESS) {
2956  default_error_ = error;
2957  }
2958  else {
2959  default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
2960  default_error_ = CL_SUCCESS;
2961  }
2962  }
2963 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2964  catch (cl::Error &e) {
2965  default_error_ = e.err();
2966  }
2967 #endif
2968 }
2969 
2970 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
2971 CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
2972 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
2973 
2982 class Event : public detail::Wrapper<cl_event>
2983 {
2984 public:
2986  Event() : detail::Wrapper<cl_type>() { }
2987 
2996  explicit Event(const cl_event& event, bool retainObject = false) :
2997  detail::Wrapper<cl_type>(event, retainObject) { }
2998 
3004  Event& operator = (const cl_event& rhs)
3005  {
3007  return *this;
3008  }
3009 
3011  template <typename T>
3012  cl_int getInfo(cl_event_info name, T* param) const
3013  {
3014  return detail::errHandler(
3015  detail::getInfo(&::clGetEventInfo, object_, name, param),
3016  __GET_EVENT_INFO_ERR);
3017  }
3018 
3020  template <cl_int name> typename
3022  getInfo(cl_int* err = NULL) const
3023  {
3024  typename detail::param_traits<
3025  detail::cl_event_info, name>::param_type param;
3026  cl_int result = getInfo(name, &param);
3027  if (err != NULL) {
3028  *err = result;
3029  }
3030  return param;
3031  }
3032 
3034  template <typename T>
3035  cl_int getProfilingInfo(cl_profiling_info name, T* param) const
3036  {
3037  return detail::errHandler(detail::getInfo(
3038  &::clGetEventProfilingInfo, object_, name, param),
3039  __GET_EVENT_PROFILE_INFO_ERR);
3040  }
3041 
3043  template <cl_int name> typename
3045  getProfilingInfo(cl_int* err = NULL) const
3046  {
3047  typename detail::param_traits<
3048  detail::cl_profiling_info, name>::param_type param;
3049  cl_int result = getProfilingInfo(name, &param);
3050  if (err != NULL) {
3051  *err = result;
3052  }
3053  return param;
3054  }
3055 
3060  cl_int wait() const
3061  {
3062  return detail::errHandler(
3063  ::clWaitForEvents(1, &object_),
3064  __WAIT_FOR_EVENTS_ERR);
3065  }
3066 
3067 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3068 
3072  cl_int setCallback(
3073  cl_int type,
3074  void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
3075  void * user_data = NULL)
3076  {
3077  return detail::errHandler(
3078  ::clSetEventCallback(
3079  object_,
3080  type,
3081  pfn_notify,
3082  user_data),
3083  __SET_EVENT_CALLBACK_ERR);
3084  }
3085 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3086 
3091  static cl_int
3092  waitForEvents(const vector<Event>& events)
3093  {
3094  return detail::errHandler(
3095  ::clWaitForEvents(
3096  (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3097  __WAIT_FOR_EVENTS_ERR);
3098  }
3099 };
3100 
3101 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3102 
3106 class UserEvent : public Event
3107 {
3108 public:
3114  const Context& context,
3115  cl_int * err = NULL)
3116  {
3117  cl_int error;
3118  object_ = ::clCreateUserEvent(
3119  context(),
3120  &error);
3121 
3122  detail::errHandler(error, __CREATE_USER_EVENT_ERR);
3123  if (err != NULL) {
3124  *err = error;
3125  }
3126  }
3127 
3129  UserEvent() : Event() { }
3130 
3135  cl_int setStatus(cl_int status)
3136  {
3137  return detail::errHandler(
3138  ::clSetUserEventStatus(object_,status),
3139  __SET_USER_EVENT_STATUS_ERR);
3140  }
3141 };
3142 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3143 
3148 inline static cl_int
3149 WaitForEvents(const vector<Event>& events)
3150 {
3151  return detail::errHandler(
3152  ::clWaitForEvents(
3153  (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3154  __WAIT_FOR_EVENTS_ERR);
3155 }
3156 
3165 class Memory : public detail::Wrapper<cl_mem>
3166 {
3167 public:
3169  Memory() : detail::Wrapper<cl_type>() { }
3170 
3182  explicit Memory(const cl_mem& memory, bool retainObject) :
3183  detail::Wrapper<cl_type>(memory, retainObject) { }
3184 
3190  Memory& operator = (const cl_mem& rhs)
3191  {
3193  return *this;
3194  }
3195 
3199  Memory(const Memory& mem) : detail::Wrapper<cl_type>(mem) {}
3200 
3204  Memory& operator = (const Memory &mem)
3205  {
3207  return *this;
3208  }
3209 
3213  Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(mem)) {}
3214 
3218  Memory& operator = (Memory &&mem)
3219  {
3220  detail::Wrapper<cl_type>::operator=(std::move(mem));
3221  return *this;
3222  }
3223 
3224 
3226  template <typename T>
3227  cl_int getInfo(cl_mem_info name, T* param) const
3228  {
3229  return detail::errHandler(
3230  detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
3231  __GET_MEM_OBJECT_INFO_ERR);
3232  }
3233 
3235  template <cl_int name> typename
3237  getInfo(cl_int* err = NULL) const
3238  {
3239  typename detail::param_traits<
3240  detail::cl_mem_info, name>::param_type param;
3241  cl_int result = getInfo(name, &param);
3242  if (err != NULL) {
3243  *err = result;
3244  }
3245  return param;
3246  }
3247 
3248 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3249 
3263  void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
3264  void * user_data = NULL)
3265  {
3266  return detail::errHandler(
3267  ::clSetMemObjectDestructorCallback(
3268  object_,
3269  pfn_notify,
3270  user_data),
3271  __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
3272  }
3273 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3274 
3275 };
3276 
3277 // Pre-declare copy functions
3278 class Buffer;
3279 template< typename IteratorType >
3280 cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3281 template< typename IteratorType >
3282 cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3283 template< typename IteratorType >
3284 cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3285 template< typename IteratorType >
3286 cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3287 
3288 
3289 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3290 namespace detail
3291 {
3293  {
3294  public:
3295  static cl_svm_mem_flags getSVMMemFlags()
3296  {
3297  return 0;
3298  }
3299  };
3300 } // namespace detail
3301 
3302 template<class Trait = detail::SVMTraitNull>
3304 {
3305 public:
3306  static cl_svm_mem_flags getSVMMemFlags()
3307  {
3308  return CL_MEM_READ_WRITE |
3309  Trait::getSVMMemFlags();
3310  }
3311 };
3312 
3313 template<class Trait = detail::SVMTraitNull>
3315 {
3316 public:
3317  static cl_svm_mem_flags getSVMMemFlags()
3318  {
3319  return CL_MEM_READ_ONLY |
3320  Trait::getSVMMemFlags();
3321  }
3322 };
3323 
3324 template<class Trait = detail::SVMTraitNull>
3326 {
3327 public:
3328  static cl_svm_mem_flags getSVMMemFlags()
3329  {
3330  return CL_MEM_WRITE_ONLY |
3331  Trait::getSVMMemFlags();
3332  }
3333 };
3334 
3335 template<class Trait = SVMTraitReadWrite<>>
3337 {
3338 public:
3339  static cl_svm_mem_flags getSVMMemFlags()
3340  {
3341  return Trait::getSVMMemFlags();
3342  }
3343 };
3344 
3345 template<class Trait = SVMTraitReadWrite<>>
3347 {
3348 public:
3349  static cl_svm_mem_flags getSVMMemFlags()
3350  {
3351  return CL_MEM_SVM_FINE_GRAIN_BUFFER |
3352  Trait::getSVMMemFlags();
3353  }
3354 };
3355 
3356 template<class Trait = SVMTraitReadWrite<>>
3358 {
3359 public:
3360  static cl_svm_mem_flags getSVMMemFlags()
3361  {
3362  return
3363  CL_MEM_SVM_FINE_GRAIN_BUFFER |
3364  CL_MEM_SVM_ATOMICS |
3365  Trait::getSVMMemFlags();
3366  }
3367 };
3368 
3369 // Pre-declare SVM map function
3370 template<typename T>
3371 inline cl_int enqueueMapSVM(
3372  T* ptr,
3373  cl_bool blocking,
3374  cl_map_flags flags,
3375  size_type size,
3376  const vector<Event>* events = NULL,
3377  Event* event = NULL);
3378 
3390 template<typename T, class SVMTrait>
3392 private:
3393  Context context_;
3394 
3395 public:
3396  typedef T value_type;
3397  typedef value_type* pointer;
3398  typedef const value_type* const_pointer;
3399  typedef value_type& reference;
3400  typedef const value_type& const_reference;
3401  typedef std::size_t size_type;
3402  typedef std::ptrdiff_t difference_type;
3403 
3404  template<typename U>
3405  struct rebind
3406  {
3408  };
3409 
3410  template<typename U, typename V>
3411  friend class SVMAllocator;
3412 
3413  SVMAllocator() :
3414  context_(Context::getDefault())
3415  {
3416  }
3417 
3418  explicit SVMAllocator(cl::Context context) :
3419  context_(context)
3420  {
3421  }
3422 
3423 
3424  SVMAllocator(const SVMAllocator &other) :
3425  context_(other.context_)
3426  {
3427  }
3428 
3429  template<typename U>
3430  SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
3431  context_(other.context_)
3432  {
3433  }
3434 
3435  ~SVMAllocator()
3436  {
3437  }
3438 
3439  pointer address(reference r) CL_HPP_NOEXCEPT_
3440  {
3441  return std::addressof(r);
3442  }
3443 
3444  const_pointer address(const_reference r) CL_HPP_NOEXCEPT_
3445  {
3446  return std::addressof(r);
3447  }
3448 
3455  pointer allocate(
3456  size_type size,
3458  {
3459  // Allocate memory with default alignment matching the size of the type
3460  void* voidPointer =
3461  clSVMAlloc(
3462  context_(),
3463  SVMTrait::getSVMMemFlags(),
3464  size*sizeof(T),
3465  0);
3466  pointer retValue = reinterpret_cast<pointer>(
3467  voidPointer);
3468 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3469  if (!retValue) {
3470  std::bad_alloc excep;
3471  throw excep;
3472  }
3473 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3474 
3475  // If allocation was coarse-grained then map it
3476  if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
3477  cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
3478  if (err != CL_SUCCESS) {
3479  std::bad_alloc excep;
3480  throw excep;
3481  }
3482  }
3483 
3484  // If exceptions disabled, return null pointer from allocator
3485  return retValue;
3486  }
3487 
3488  void deallocate(pointer p, size_type)
3489  {
3490  clSVMFree(context_(), p);
3491  }
3492 
3497  size_type max_size() const CL_HPP_NOEXCEPT_
3498  {
3499  size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
3500 
3501  for (const Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
3502  maxSize = std::min(
3503  maxSize,
3504  static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
3505  }
3506 
3507  return maxSize;
3508  }
3509 
3510  template< class U, class... Args >
3511  void construct(U* p, Args&&... args)
3512  {
3513  new(p)T(args...);
3514  }
3515 
3516  template< class U >
3517  void destroy(U* p)
3518  {
3519  p->~U();
3520  }
3521 
3525  inline bool operator==(SVMAllocator const& rhs)
3526  {
3527  return (context_==rhs.context_);
3528  }
3529 
3530  inline bool operator!=(SVMAllocator const& a)
3531  {
3532  return !operator==(a);
3533  }
3534 }; // class SVMAllocator return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
3535 
3536 
3537 template<class SVMTrait>
3538 class SVMAllocator<void, SVMTrait> {
3539 public:
3540  typedef void value_type;
3541  typedef value_type* pointer;
3542  typedef const value_type* const_pointer;
3543 
3544  template<typename U>
3545  struct rebind
3546  {
3548  };
3549 
3550  template<typename U, typename V>
3551  friend class SVMAllocator;
3552 };
3553 
3554 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3555 namespace detail
3556 {
3557  template<class Alloc>
3558  class Deleter {
3559  private:
3560  Alloc alloc_;
3561  size_type copies_;
3562 
3563  public:
3564  typedef typename std::allocator_traits<Alloc>::pointer pointer;
3565 
3566  Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
3567  {
3568  }
3569 
3570  void operator()(pointer ptr) const {
3571  Alloc tmpAlloc{ alloc_ };
3572  std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
3573  std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
3574  }
3575  };
3576 } // namespace detail
3577 
3584 template <class T, class Alloc, class... Args>
3585 cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
3586 {
3587  Alloc alloc(alloc_);
3588  static const size_type copies = 1;
3589 
3590  // Ensure that creation of the management block and the
3591  // object are dealt with separately such that we only provide a deleter
3592 
3593  T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
3594  if (!tmp) {
3595  std::bad_alloc excep;
3596  throw excep;
3597  }
3598  try {
3599  std::allocator_traits<Alloc>::construct(
3600  alloc,
3601  std::addressof(*tmp),
3602  std::forward<Args>(args)...);
3603 
3604  return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
3605  }
3606  catch (std::bad_alloc b)
3607  {
3608  std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
3609  throw;
3610  }
3611 }
3612 
3613 template< class T, class SVMTrait, class... Args >
3614 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
3615 {
3617  return cl::allocate_pointer<T>(alloc, args...);
3618 }
3619 
3620 template< class T, class SVMTrait, class... Args >
3621 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
3622 {
3623  SVMAllocator<T, SVMTrait> alloc(c);
3624  return cl::allocate_pointer<T>(alloc, args...);
3625 }
3626 #endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3627 
3631 template < class T >
3632 using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
3633 
3637 template < class T >
3638 using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
3639 
3643 template < class T >
3644 using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
3645 
3646 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3647 
3648 
3655 class Buffer : public Memory
3656 {
3657 public:
3658 
3667  const Context& context,
3668  cl_mem_flags flags,
3669  size_type size,
3670  void* host_ptr = NULL,
3671  cl_int* err = NULL)
3672  {
3673  cl_int error;
3674  object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3675 
3676  detail::errHandler(error, __CREATE_BUFFER_ERR);
3677  if (err != NULL) {
3678  *err = error;
3679  }
3680  }
3681 
3692  cl_mem_flags flags,
3693  size_type size,
3694  void* host_ptr = NULL,
3695  cl_int* err = NULL)
3696  {
3697  cl_int error;
3698 
3699  Context context = Context::getDefault(err);
3700 
3701  object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3702 
3703  detail::errHandler(error, __CREATE_BUFFER_ERR);
3704  if (err != NULL) {
3705  *err = error;
3706  }
3707  }
3708 
3714  template< typename IteratorType >
3716  IteratorType startIterator,
3717  IteratorType endIterator,
3718  bool readOnly,
3719  bool useHostPtr = false,
3720  cl_int* err = NULL)
3721  {
3722  typedef typename std::iterator_traits<IteratorType>::value_type DataType;
3723  cl_int error;
3724 
3725  cl_mem_flags flags = 0;
3726  if( readOnly ) {
3727  flags |= CL_MEM_READ_ONLY;
3728  }
3729  else {
3730  flags |= CL_MEM_READ_WRITE;
3731  }
3732  if( useHostPtr ) {
3733  flags |= CL_MEM_USE_HOST_PTR;
3734  }
3735 
3736  size_type size = sizeof(DataType)*(endIterator - startIterator);
3737 
3738  Context context = Context::getDefault(err);
3739 
3740  if( useHostPtr ) {
3741  object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
3742  } else {
3743  object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
3744  }
3745 
3746  detail::errHandler(error, __CREATE_BUFFER_ERR);
3747  if (err != NULL) {
3748  *err = error;
3749  }
3750 
3751  if( !useHostPtr ) {
3752  error = cl::copy(startIterator, endIterator, *this);
3753  detail::errHandler(error, __CREATE_BUFFER_ERR);
3754  if (err != NULL) {
3755  *err = error;
3756  }
3757  }
3758  }
3759 
3765  template< typename IteratorType >
3766  Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
3767  bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3768 
3773  template< typename IteratorType >
3774  Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
3775  bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3776 
3778  Buffer() : Memory() { }
3779 
3787  explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
3788  Memory(buffer, retainObject) { }
3789 
3794  Buffer& operator = (const cl_mem& rhs)
3795  {
3796  Memory::operator=(rhs);
3797  return *this;
3798  }
3799 
3803  Buffer(const Buffer& buf) : Memory(buf) {}
3804 
3808  Buffer& operator = (const Buffer &buf)
3809  {
3810  Memory::operator=(buf);
3811  return *this;
3812  }
3813 
3817  Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {}
3818 
3822  Buffer& operator = (Buffer &&buf)
3823  {
3824  Memory::operator=(std::move(buf));
3825  return *this;
3826  }
3827 
3828 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3829 
3834  cl_mem_flags flags,
3835  cl_buffer_create_type buffer_create_type,
3836  const void * buffer_create_info,
3837  cl_int * err = NULL)
3838  {
3839  Buffer result;
3840  cl_int error;
3841  result.object_ = ::clCreateSubBuffer(
3842  object_,
3843  flags,
3844  buffer_create_type,
3845  buffer_create_info,
3846  &error);
3847 
3848  detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
3849  if (err != NULL) {
3850  *err = error;
3851  }
3852 
3853  return result;
3854  }
3855 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3856 };
3857 
3858 #if defined (CL_HPP_USE_DX_INTEROP)
3859 
3867 class BufferD3D10 : public Buffer
3868 {
3869 public:
3870 
3871 
3877  BufferD3D10(
3878  const Context& context,
3879  cl_mem_flags flags,
3880  ID3D10Buffer* bufobj,
3881  cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr)
3882  {
3883  typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
3884  cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
3885  cl_int* errcode_ret);
3886  PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
3887 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
3888  vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
3889  cl_platform platform = -1;
3890  for( int i = 0; i < props.size(); ++i ) {
3891  if( props[i] == CL_CONTEXT_PLATFORM ) {
3892  platform = props[i+1];
3893  }
3894  }
3895  CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
3896 #elif CL_HPP_TARGET_OPENCL_VERSION >= 110
3897  CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
3898 #endif
3899 
3900  cl_int error;
3901  object_ = pfn_clCreateFromD3D10BufferKHR(
3902  context(),
3903  flags,
3904  bufobj,
3905  &error);
3906 
3907  detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
3908  if (err != NULL) {
3909  *err = error;
3910  }
3911  }
3912 
3914  BufferD3D10() : Buffer() { }
3915 
3923  explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
3924  Buffer(buffer, retainObject) { }
3925 
3930  BufferD3D10& operator = (const cl_mem& rhs)
3931  {
3932  Buffer::operator=(rhs);
3933  return *this;
3934  }
3935 
3939  BufferD3D10(const BufferD3D10& buf) :
3940  Buffer(buf) {}
3941 
3945  BufferD3D10& operator = (const BufferD3D10 &buf)
3946  {
3947  Buffer::operator=(buf);
3948  return *this;
3949  }
3950 
3954  BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
3955 
3959  BufferD3D10& operator = (BufferD3D10 &&buf)
3960  {
3961  Buffer::operator=(std::move(buf));
3962  return *this;
3963  }
3964 };
3965 #endif
3966 
3975 class BufferGL : public Buffer
3976 {
3977 public:
3984  const Context& context,
3985  cl_mem_flags flags,
3986  cl_GLuint bufobj,
3987  cl_int * err = NULL)
3988  {
3989  cl_int error;
3990  object_ = ::clCreateFromGLBuffer(
3991  context(),
3992  flags,
3993  bufobj,
3994  &error);
3995 
3996  detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
3997  if (err != NULL) {
3998  *err = error;
3999  }
4000  }
4001 
4003  BufferGL() : Buffer() { }
4004 
4012  explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
4013  Buffer(buffer, retainObject) { }
4014 
4019  BufferGL& operator = (const cl_mem& rhs)
4020  {
4021  Buffer::operator=(rhs);
4022  return *this;
4023  }
4024 
4028  BufferGL(const BufferGL& buf) : Buffer(buf) {}
4029 
4033  BufferGL& operator = (const BufferGL &buf)
4034  {
4035  Buffer::operator=(buf);
4036  return *this;
4037  }
4038 
4042  BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4043 
4047  BufferGL& operator = (BufferGL &&buf)
4048  {
4049  Buffer::operator=(std::move(buf));
4050  return *this;
4051  }
4052 
4055  cl_gl_object_type *type,
4056  cl_GLuint * gl_object_name)
4057  {
4058  return detail::errHandler(
4059  ::clGetGLObjectInfo(object_,type,gl_object_name),
4060  __GET_GL_OBJECT_INFO_ERR);
4061  }
4062 };
4063 
4072 class BufferRenderGL : public Buffer
4073 {
4074 public:
4081  const Context& context,
4082  cl_mem_flags flags,
4083  cl_GLuint bufobj,
4084  cl_int * err = NULL)
4085  {
4086  cl_int error;
4087  object_ = ::clCreateFromGLRenderbuffer(
4088  context(),
4089  flags,
4090  bufobj,
4091  &error);
4092 
4093  detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
4094  if (err != NULL) {
4095  *err = error;
4096  }
4097  }
4098 
4101 
4109  explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
4110  Buffer(buffer, retainObject) { }
4111 
4116  BufferRenderGL& operator = (const cl_mem& rhs)
4117  {
4118  Buffer::operator=(rhs);
4119  return *this;
4120  }
4121 
4125  BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {}
4126 
4130  BufferRenderGL& operator = (const BufferRenderGL &buf)
4131  {
4132  Buffer::operator=(buf);
4133  return *this;
4134  }
4135 
4139  BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4140 
4144  BufferRenderGL& operator = (BufferRenderGL &&buf)
4145  {
4146  Buffer::operator=(std::move(buf));
4147  return *this;
4148  }
4149 
4152  cl_gl_object_type *type,
4153  cl_GLuint * gl_object_name)
4154  {
4155  return detail::errHandler(
4156  ::clGetGLObjectInfo(object_,type,gl_object_name),
4157  __GET_GL_OBJECT_INFO_ERR);
4158  }
4159 };
4160 
4167 class Image : public Memory
4168 {
4169 protected:
4171  Image() : Memory() { }
4172 
4180  explicit Image(const cl_mem& image, bool retainObject = false) :
4181  Memory(image, retainObject) { }
4182 
4187  Image& operator = (const cl_mem& rhs)
4188  {
4189  Memory::operator=(rhs);
4190  return *this;
4191  }
4192 
4196  Image(const Image& img) : Memory(img) {}
4197 
4201  Image& operator = (const Image &img)
4202  {
4203  Memory::operator=(img);
4204  return *this;
4205  }
4206 
4210  Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {}
4211 
4215  Image& operator = (Image &&img)
4216  {
4217  Memory::operator=(std::move(img));
4218  return *this;
4219  }
4220 
4221 
4222 public:
4224  template <typename T>
4225  cl_int getImageInfo(cl_image_info name, T* param) const
4226  {
4227  return detail::errHandler(
4228  detail::getInfo(&::clGetImageInfo, object_, name, param),
4229  __GET_IMAGE_INFO_ERR);
4230  }
4231 
4233  template <cl_int name> typename
4235  getImageInfo(cl_int* err = NULL) const
4236  {
4237  typename detail::param_traits<
4238  detail::cl_image_info, name>::param_type param;
4239  cl_int result = getImageInfo(name, &param);
4240  if (err != NULL) {
4241  *err = result;
4242  }
4243  return param;
4244  }
4245 };
4246 
4247 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4248 
4254 class Image1D : public Image
4255 {
4256 public:
4262  const Context& context,
4263  cl_mem_flags flags,
4264  ImageFormat format,
4265  size_type width,
4266  void* host_ptr = NULL,
4267  cl_int* err = NULL)
4268  {
4269  cl_int error;
4270  cl_image_desc desc =
4271  {
4272  CL_MEM_OBJECT_IMAGE1D,
4273  width,
4274  0, 0, 0, 0, 0, 0, 0, 0
4275  };
4276  object_ = ::clCreateImage(
4277  context(),
4278  flags,
4279  &format,
4280  &desc,
4281  host_ptr,
4282  &error);
4283 
4284  detail::errHandler(error, __CREATE_IMAGE_ERR);
4285  if (err != NULL) {
4286  *err = error;
4287  }
4288  }
4289 
4291  Image1D() { }
4292 
4300  explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
4301  Image(image1D, retainObject) { }
4302 
4307  Image1D& operator = (const cl_mem& rhs)
4308  {
4309  Image::operator=(rhs);
4310  return *this;
4311  }
4312 
4316  Image1D(const Image1D& img) : Image(img) {}
4317 
4321  Image1D& operator = (const Image1D &img)
4322  {
4323  Image::operator=(img);
4324  return *this;
4325  }
4326 
4330  Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4331 
4335  Image1D& operator = (Image1D &&img)
4336  {
4337  Image::operator=(std::move(img));
4338  return *this;
4339  }
4340 
4341 };
4342 
4346 class Image1DBuffer : public Image
4347 {
4348 public:
4349  Image1DBuffer(
4350  const Context& context,
4351  cl_mem_flags flags,
4352  ImageFormat format,
4353  size_type width,
4354  const Buffer &buffer,
4355  cl_int* err = NULL)
4356  {
4357  cl_int error;
4358  cl_image_desc desc =
4359  {
4360  CL_MEM_OBJECT_IMAGE1D_BUFFER,
4361  width,
4362  0, 0, 0, 0, 0, 0, 0,
4363  buffer()
4364  };
4365  object_ = ::clCreateImage(
4366  context(),
4367  flags,
4368  &format,
4369  &desc,
4370  NULL,
4371  &error);
4372 
4373  detail::errHandler(error, __CREATE_IMAGE_ERR);
4374  if (err != NULL) {
4375  *err = error;
4376  }
4377  }
4378 
4379  Image1DBuffer() { }
4380 
4388  explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
4389  Image(image1D, retainObject) { }
4390 
4391  Image1DBuffer& operator = (const cl_mem& rhs)
4392  {
4393  Image::operator=(rhs);
4394  return *this;
4395  }
4396 
4400  Image1DBuffer(const Image1DBuffer& img) : Image(img) {}
4401 
4405  Image1DBuffer& operator = (const Image1DBuffer &img)
4406  {
4407  Image::operator=(img);
4408  return *this;
4409  }
4410 
4414  Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4415 
4419  Image1DBuffer& operator = (Image1DBuffer &&img)
4420  {
4421  Image::operator=(std::move(img));
4422  return *this;
4423  }
4424 
4425 };
4426 
4430 class Image1DArray : public Image
4431 {
4432 public:
4433  Image1DArray(
4434  const Context& context,
4435  cl_mem_flags flags,
4436  ImageFormat format,
4437  size_type arraySize,
4438  size_type width,
4439  size_type rowPitch,
4440  void* host_ptr = NULL,
4441  cl_int* err = NULL)
4442  {
4443  cl_int error;
4444  cl_image_desc desc =
4445  {
4446  CL_MEM_OBJECT_IMAGE1D_ARRAY,
4447  width,
4448  0, 0, // height, depth (unused)
4449  arraySize,
4450  rowPitch,
4451  0, 0, 0, 0
4452  };
4453  object_ = ::clCreateImage(
4454  context(),
4455  flags,
4456  &format,
4457  &desc,
4458  host_ptr,
4459  &error);
4460 
4461  detail::errHandler(error, __CREATE_IMAGE_ERR);
4462  if (err != NULL) {
4463  *err = error;
4464  }
4465  }
4466 
4467  Image1DArray() { }
4468 
4476  explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
4477  Image(imageArray, retainObject) { }
4478 
4479 
4480  Image1DArray& operator = (const cl_mem& rhs)
4481  {
4482  Image::operator=(rhs);
4483  return *this;
4484  }
4485 
4489  Image1DArray(const Image1DArray& img) : Image(img) {}
4490 
4494  Image1DArray& operator = (const Image1DArray &img)
4495  {
4496  Image::operator=(img);
4497  return *this;
4498  }
4499 
4503  Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4504 
4508  Image1DArray& operator = (Image1DArray &&img)
4509  {
4510  Image::operator=(std::move(img));
4511  return *this;
4512  }
4513 
4514 };
4515 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4516 
4517 
4524 class Image2D : public Image
4525 {
4526 public:
4532  const Context& context,
4533  cl_mem_flags flags,
4534  ImageFormat format,
4535  size_type width,
4536  size_type height,
4537  size_type row_pitch = 0,
4538  void* host_ptr = NULL,
4539  cl_int* err = NULL)
4540  {
4541  cl_int error;
4542  bool useCreateImage;
4543 
4544 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4545  // Run-time decision based on the actual platform
4546  {
4547  cl_uint version = detail::getContextPlatformVersion(context());
4548  useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4549  }
4550 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4551  useCreateImage = true;
4552 #else
4553  useCreateImage = false;
4554 #endif
4555 
4556 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4557  if (useCreateImage)
4558  {
4559  cl_image_desc desc =
4560  {
4561  CL_MEM_OBJECT_IMAGE2D,
4562  width,
4563  height,
4564  0, 0, // depth, array size (unused)
4565  row_pitch,
4566  0, 0, 0, 0
4567  };
4568  object_ = ::clCreateImage(
4569  context(),
4570  flags,
4571  &format,
4572  &desc,
4573  host_ptr,
4574  &error);
4575 
4576  detail::errHandler(error, __CREATE_IMAGE_ERR);
4577  if (err != NULL) {
4578  *err = error;
4579  }
4580  }
4581 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
4582 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4583  if (!useCreateImage)
4584  {
4585  object_ = ::clCreateImage2D(
4586  context(), flags,&format, width, height, row_pitch, host_ptr, &error);
4587 
4588  detail::errHandler(error, __CREATE_IMAGE2D_ERR);
4589  if (err != NULL) {
4590  *err = error;
4591  }
4592  }
4593 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
4594  }
4595 
4596 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4597 
4603  const Context& context,
4604  ImageFormat format,
4605  const Buffer &sourceBuffer,
4606  size_type width,
4607  size_type height,
4608  size_type row_pitch = 0,
4609  cl_int* err = nullptr)
4610  {
4611  cl_int error;
4612 
4613  cl_image_desc desc =
4614  {
4615  CL_MEM_OBJECT_IMAGE2D,
4616  width,
4617  height,
4618  0, 0, // depth, array size (unused)
4619  row_pitch,
4620  0, 0, 0,
4621  // Use buffer as input to image
4622  sourceBuffer()
4623  };
4624  object_ = ::clCreateImage(
4625  context(),
4626  0, // flags inherited from buffer
4627  &format,
4628  &desc,
4629  nullptr,
4630  &error);
4631 
4632  detail::errHandler(error, __CREATE_IMAGE_ERR);
4633  if (err != nullptr) {
4634  *err = error;
4635  }
4636  }
4637 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4638 
4639 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4640 
4653  const Context& context,
4654  cl_channel_order order,
4655  const Image &sourceImage,
4656  cl_int* err = nullptr)
4657  {
4658  cl_int error;
4659 
4660  // Descriptor fields have to match source image
4661  size_type sourceWidth =
4662  sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
4663  size_type sourceHeight =
4664  sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
4665  size_type sourceRowPitch =
4666  sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
4667  cl_uint sourceNumMIPLevels =
4668  sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
4669  cl_uint sourceNumSamples =
4670  sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
4671  cl_image_format sourceFormat =
4672  sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
4673 
4674  // Update only the channel order.
4675  // Channel format inherited from source.
4676  sourceFormat.image_channel_order = order;
4677  cl_image_desc desc =
4678  {
4679  CL_MEM_OBJECT_IMAGE2D,
4680  sourceWidth,
4681  sourceHeight,
4682  0, 0, // depth (unused), array size (unused)
4683  sourceRowPitch,
4684  0, // slice pitch (unused)
4685  sourceNumMIPLevels,
4686  sourceNumSamples,
4687  // Use buffer as input to image
4688  sourceImage()
4689  };
4690  object_ = ::clCreateImage(
4691  context(),
4692  0, // flags should be inherited from mem_object
4693  &sourceFormat,
4694  &desc,
4695  nullptr,
4696  &error);
4697 
4698  detail::errHandler(error, __CREATE_IMAGE_ERR);
4699  if (err != nullptr) {
4700  *err = error;
4701  }
4702  }
4703 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
4704 
4706  Image2D() { }
4707 
4715  explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
4716  Image(image2D, retainObject) { }
4717 
4722  Image2D& operator = (const cl_mem& rhs)
4723  {
4724  Image::operator=(rhs);
4725  return *this;
4726  }
4727 
4731  Image2D(const Image2D& img) : Image(img) {}
4732 
4736  Image2D& operator = (const Image2D &img)
4737  {
4738  Image::operator=(img);
4739  return *this;
4740  }
4741 
4745  Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4746 
4750  Image2D& operator = (Image2D &&img)
4751  {
4752  Image::operator=(std::move(img));
4753  return *this;
4754  }
4755 
4756 };
4757 
4758 
4759 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
4760 
4769 class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
4770 {
4771 public:
4778  const Context& context,
4779  cl_mem_flags flags,
4780  cl_GLenum target,
4781  cl_GLint miplevel,
4782  cl_GLuint texobj,
4783  cl_int * err = NULL)
4784  {
4785  cl_int error;
4786  object_ = ::clCreateFromGLTexture2D(
4787  context(),
4788  flags,
4789  target,
4790  miplevel,
4791  texobj,
4792  &error);
4793 
4794  detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
4795  if (err != NULL) {
4796  *err = error;
4797  }
4798 
4799  }
4800 
4802  Image2DGL() : Image2D() { }
4803 
4811  explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
4812  Image2D(image, retainObject) { }
4813 
4818  Image2DGL& operator = (const cl_mem& rhs)
4819  {
4820  Image2D::operator=(rhs);
4821  return *this;
4822  }
4823 
4827  Image2DGL(const Image2DGL& img) : Image2D(img) {}
4828 
4832  Image2DGL& operator = (const Image2DGL &img)
4833  {
4834  Image2D::operator=(img);
4835  return *this;
4836  }
4837 
4841  Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {}
4842 
4846  Image2DGL& operator = (Image2DGL &&img)
4847  {
4848  Image2D::operator=(std::move(img));
4849  return *this;
4850  }
4851 
4852 } CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
4853 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
4854 
4855 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4856 
4859 class Image2DArray : public Image
4860 {
4861 public:
4862  Image2DArray(
4863  const Context& context,
4864  cl_mem_flags flags,
4865  ImageFormat format,
4866  size_type arraySize,
4867  size_type width,
4868  size_type height,
4869  size_type rowPitch,
4870  size_type slicePitch,
4871  void* host_ptr = NULL,
4872  cl_int* err = NULL)
4873  {
4874  cl_int error;
4875  cl_image_desc desc =
4876  {
4877  CL_MEM_OBJECT_IMAGE2D_ARRAY,
4878  width,
4879  height,
4880  0, // depth (unused)
4881  arraySize,
4882  rowPitch,
4883  slicePitch,
4884  0, 0, 0
4885  };
4886  object_ = ::clCreateImage(
4887  context(),
4888  flags,
4889  &format,
4890  &desc,
4891  host_ptr,
4892  &error);
4893 
4894  detail::errHandler(error, __CREATE_IMAGE_ERR);
4895  if (err != NULL) {
4896  *err = error;
4897  }
4898  }
4899 
4900  Image2DArray() { }
4901 
4909  explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
4910 
4911  Image2DArray& operator = (const cl_mem& rhs)
4912  {
4913  Image::operator=(rhs);
4914  return *this;
4915  }
4916 
4920  Image2DArray(const Image2DArray& img) : Image(img) {}
4921 
4925  Image2DArray& operator = (const Image2DArray &img)
4926  {
4927  Image::operator=(img);
4928  return *this;
4929  }
4930 
4934  Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4935 
4939  Image2DArray& operator = (Image2DArray &&img)
4940  {
4941  Image::operator=(std::move(img));
4942  return *this;
4943  }
4944 };
4945 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4946 
4953 class Image3D : public Image
4954 {
4955 public:
4961  const Context& context,
4962  cl_mem_flags flags,
4963  ImageFormat format,
4964  size_type width,
4965  size_type height,
4966  size_type depth,
4967  size_type row_pitch = 0,
4968  size_type slice_pitch = 0,
4969  void* host_ptr = NULL,
4970  cl_int* err = NULL)
4971  {
4972  cl_int error;
4973  bool useCreateImage;
4974 
4975 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4976  // Run-time decision based on the actual platform
4977  {
4978  cl_uint version = detail::getContextPlatformVersion(context());
4979  useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4980  }
4981 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4982  useCreateImage = true;
4983 #else
4984  useCreateImage = false;
4985 #endif
4986 
4987 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4988  if (useCreateImage)
4989  {
4990  cl_image_desc desc =
4991  {
4992  CL_MEM_OBJECT_IMAGE3D,
4993  width,
4994  height,
4995  depth,
4996  0, // array size (unused)
4997  row_pitch,
4998  slice_pitch,
4999  0, 0, 0
5000  };
5001  object_ = ::clCreateImage(
5002  context(),
5003  flags,
5004  &format,
5005  &desc,
5006  host_ptr,
5007  &error);
5008 
5009  detail::errHandler(error, __CREATE_IMAGE_ERR);
5010  if (err != NULL) {
5011  *err = error;
5012  }
5013  }
5014 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5015 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
5016  if (!useCreateImage)
5017  {
5018  object_ = ::clCreateImage3D(
5019  context(), flags, &format, width, height, depth, row_pitch,
5020  slice_pitch, host_ptr, &error);
5021 
5022  detail::errHandler(error, __CREATE_IMAGE3D_ERR);
5023  if (err != NULL) {
5024  *err = error;
5025  }
5026  }
5027 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
5028  }
5029 
5031  Image3D() : Image() { }
5032 
5040  explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
5041  Image(image3D, retainObject) { }
5042 
5047  Image3D& operator = (const cl_mem& rhs)
5048  {
5049  Image::operator=(rhs);
5050  return *this;
5051  }
5052 
5056  Image3D(const Image3D& img) : Image(img) {}
5057 
5061  Image3D& operator = (const Image3D &img)
5062  {
5063  Image::operator=(img);
5064  return *this;
5065  }
5066 
5070  Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5071 
5075  Image3D& operator = (Image3D &&img)
5076  {
5077  Image::operator=(std::move(img));
5078  return *this;
5079  }
5080 };
5081 
5082 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5083 
5091 class Image3DGL : public Image3D
5092 {
5093 public:
5100  const Context& context,
5101  cl_mem_flags flags,
5102  cl_GLenum target,
5103  cl_GLint miplevel,
5104  cl_GLuint texobj,
5105  cl_int * err = NULL)
5106  {
5107  cl_int error;
5108  object_ = ::clCreateFromGLTexture3D(
5109  context(),
5110  flags,
5111  target,
5112  miplevel,
5113  texobj,
5114  &error);
5115 
5116  detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
5117  if (err != NULL) {
5118  *err = error;
5119  }
5120  }
5121 
5123  Image3DGL() : Image3D() { }
5124 
5132  explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
5133  Image3D(image, retainObject) { }
5134 
5139  Image3DGL& operator = (const cl_mem& rhs)
5140  {
5141  Image3D::operator=(rhs);
5142  return *this;
5143  }
5144 
5148  Image3DGL(const Image3DGL& img) : Image3D(img) {}
5149 
5153  Image3DGL& operator = (const Image3DGL &img)
5154  {
5155  Image3D::operator=(img);
5156  return *this;
5157  }
5158 
5162  Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {}
5163 
5167  Image3DGL& operator = (Image3DGL &&img)
5168  {
5169  Image3D::operator=(std::move(img));
5170  return *this;
5171  }
5172 };
5173 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5174 
5175 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5176 
5182 class ImageGL : public Image
5183 {
5184 public:
5185  ImageGL(
5186  const Context& context,
5187  cl_mem_flags flags,
5188  cl_GLenum target,
5189  cl_GLint miplevel,
5190  cl_GLuint texobj,
5191  cl_int * err = NULL)
5192  {
5193  cl_int error;
5194  object_ = ::clCreateFromGLTexture(
5195  context(),
5196  flags,
5197  target,
5198  miplevel,
5199  texobj,
5200  &error);
5201 
5202  detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
5203  if (err != NULL) {
5204  *err = error;
5205  }
5206  }
5207 
5208  ImageGL() : Image() { }
5209 
5217  explicit ImageGL(const cl_mem& image, bool retainObject = false) :
5218  Image(image, retainObject) { }
5219 
5220  ImageGL& operator = (const cl_mem& rhs)
5221  {
5222  Image::operator=(rhs);
5223  return *this;
5224  }
5225 
5229  ImageGL(const ImageGL& img) : Image(img) {}
5230 
5234  ImageGL& operator = (const ImageGL &img)
5235  {
5236  Image::operator=(img);
5237  return *this;
5238  }
5239 
5243  ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5244 
5248  ImageGL& operator = (ImageGL &&img)
5249  {
5250  Image::operator=(std::move(img));
5251  return *this;
5252  }
5253 };
5254 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5255 
5256 
5257 
5258 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5259 
5265 class Pipe : public Memory
5266 {
5267 public:
5268 
5279  const Context& context,
5280  cl_uint packet_size,
5281  cl_uint max_packets,
5282  cl_int* err = NULL)
5283  {
5284  cl_int error;
5285 
5286  cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5287  object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5288 
5289  detail::errHandler(error, __CREATE_PIPE_ERR);
5290  if (err != NULL) {
5291  *err = error;
5292  }
5293  }
5294 
5304  cl_uint packet_size,
5305  cl_uint max_packets,
5306  cl_int* err = NULL)
5307  {
5308  cl_int error;
5309 
5310  Context context = Context::getDefault(err);
5311 
5312  cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5313  object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5314 
5315  detail::errHandler(error, __CREATE_PIPE_ERR);
5316  if (err != NULL) {
5317  *err = error;
5318  }
5319  }
5320 
5322  Pipe() : Memory() { }
5323 
5331  explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
5332  Memory(pipe, retainObject) { }
5333 
5338  Pipe& operator = (const cl_mem& rhs)
5339  {
5340  Memory::operator=(rhs);
5341  return *this;
5342  }
5343 
5347  Pipe(const Pipe& pipe) : Memory(pipe) {}
5348 
5352  Pipe& operator = (const Pipe &pipe)
5353  {
5354  Memory::operator=(pipe);
5355  return *this;
5356  }
5357 
5361  Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {}
5362 
5366  Pipe& operator = (Pipe &&pipe)
5367  {
5368  Memory::operator=(std::move(pipe));
5369  return *this;
5370  }
5371 
5373  template <typename T>
5374  cl_int getInfo(cl_pipe_info name, T* param) const
5375  {
5376  return detail::errHandler(
5377  detail::getInfo(&::clGetPipeInfo, object_, name, param),
5378  __GET_PIPE_INFO_ERR);
5379  }
5380 
5382  template <cl_int name> typename
5384  getInfo(cl_int* err = NULL) const
5385  {
5386  typename detail::param_traits<
5387  detail::cl_pipe_info, name>::param_type param;
5388  cl_int result = getInfo(name, &param);
5389  if (err != NULL) {
5390  *err = result;
5391  }
5392  return param;
5393  }
5394 }; // class Pipe
5395 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
5396 
5397 
5406 class Sampler : public detail::Wrapper<cl_sampler>
5407 {
5408 public:
5410  Sampler() { }
5411 
5417  const Context& context,
5418  cl_bool normalized_coords,
5419  cl_addressing_mode addressing_mode,
5420  cl_filter_mode filter_mode,
5421  cl_int* err = NULL)
5422  {
5423  cl_int error;
5424 
5425 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5426  cl_sampler_properties sampler_properties[] = {
5427  CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
5428  CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
5429  CL_SAMPLER_FILTER_MODE, filter_mode,
5430  0 };
5431  object_ = ::clCreateSamplerWithProperties(
5432  context(),
5433  sampler_properties,
5434  &error);
5435 
5436  detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
5437  if (err != NULL) {
5438  *err = error;
5439  }
5440 #else
5441  object_ = ::clCreateSampler(
5442  context(),
5443  normalized_coords,
5444  addressing_mode,
5445  filter_mode,
5446  &error);
5447 
5448  detail::errHandler(error, __CREATE_SAMPLER_ERR);
5449  if (err != NULL) {
5450  *err = error;
5451  }
5452 #endif
5453  }
5454 
5463  explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
5464  detail::Wrapper<cl_type>(sampler, retainObject) { }
5465 
5471  Sampler& operator = (const cl_sampler& rhs)
5472  {
5474  return *this;
5475  }
5476 
5480  Sampler(const Sampler& sam) : detail::Wrapper<cl_type>(sam) {}
5481 
5485  Sampler& operator = (const Sampler &sam)
5486  {
5488  return *this;
5489  }
5490 
5494  Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(sam)) {}
5495 
5499  Sampler& operator = (Sampler &&sam)
5500  {
5501  detail::Wrapper<cl_type>::operator=(std::move(sam));
5502  return *this;
5503  }
5504 
5506  template <typename T>
5507  cl_int getInfo(cl_sampler_info name, T* param) const
5508  {
5509  return detail::errHandler(
5510  detail::getInfo(&::clGetSamplerInfo, object_, name, param),
5511  __GET_SAMPLER_INFO_ERR);
5512  }
5513 
5515  template <cl_int name> typename
5517  getInfo(cl_int* err = NULL) const
5518  {
5519  typename detail::param_traits<
5520  detail::cl_sampler_info, name>::param_type param;
5521  cl_int result = getInfo(name, &param);
5522  if (err != NULL) {
5523  *err = result;
5524  }
5525  return param;
5526  }
5527 };
5528 
5529 class Program;
5530 class CommandQueue;
5531 class DeviceCommandQueue;
5532 class Kernel;
5533 
5535 class NDRange
5536 {
5537 private:
5538  size_type sizes_[3];
5539  cl_uint dimensions_;
5540 
5541 public:
5544  : dimensions_(0)
5545  {
5546  sizes_[0] = 0;
5547  sizes_[1] = 0;
5548  sizes_[2] = 0;
5549  }
5550 
5552  NDRange(size_type size0)
5553  : dimensions_(1)
5554  {
5555  sizes_[0] = size0;
5556  sizes_[1] = 1;
5557  sizes_[2] = 1;
5558  }
5559 
5561  NDRange(size_type size0, size_type size1)
5562  : dimensions_(2)
5563  {
5564  sizes_[0] = size0;
5565  sizes_[1] = size1;
5566  sizes_[2] = 1;
5567  }
5568 
5570  NDRange(size_type size0, size_type size1, size_type size2)
5571  : dimensions_(3)
5572  {
5573  sizes_[0] = size0;
5574  sizes_[1] = size1;
5575  sizes_[2] = size2;
5576  }
5577 
5582  operator const size_type*() const {
5583  return sizes_;
5584  }
5585 
5587  size_type dimensions() const
5588  {
5589  return dimensions_;
5590  }
5591 
5593  // runtime number of dimensions
5594  size_type size() const
5595  {
5596  return dimensions_*sizeof(size_type);
5597  }
5598 
5599  size_type* get()
5600  {
5601  return sizes_;
5602  }
5603 
5604  const size_type* get() const
5605  {
5606  return sizes_;
5607  }
5608 };
5609 
5611 static const NDRange NullRange;
5612 
5615 {
5616  size_type size_;
5617 };
5618 
5619 namespace detail {
5620 
5621 template <typename T, class Enable = void>
5623 
5624 // Enable for objects that are not subclasses of memory
5625 // Pointers, constants etc
5626 template <typename T>
5627 struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
5628 {
5629  static size_type size(const T&) { return sizeof(T); }
5630  static const T* ptr(const T& value) { return &value; }
5631 };
5632 
5633 // Enable for subclasses of memory where we want to get a reference to the cl_mem out
5634 // and pass that in for safety
5635 template <typename T>
5636 struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
5637 {
5638  static size_type size(const T&) { return sizeof(cl_mem); }
5639  static const cl_mem* ptr(const T& value) { return &(value()); }
5640 };
5641 
5642 // Specialization for DeviceCommandQueue defined later
5643 
5644 template <>
5646 {
5647  static size_type size(const LocalSpaceArg& value) { return value.size_; }
5648  static const void* ptr(const LocalSpaceArg&) { return NULL; }
5649 };
5650 
5651 }
5653 
5657 inline LocalSpaceArg
5658 Local(size_type size)
5659 {
5660  LocalSpaceArg ret = { size };
5661  return ret;
5662 }
5663 
5672 class Kernel : public detail::Wrapper<cl_kernel>
5673 {
5674 public:
5675  inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
5676 
5678  Kernel() { }
5679 
5688  explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
5689  detail::Wrapper<cl_type>(kernel, retainObject) { }
5690 
5696  Kernel& operator = (const cl_kernel& rhs)
5697  {
5699  return *this;
5700  }
5701 
5705  Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) {}
5706 
5710  Kernel& operator = (const Kernel &kernel)
5711  {
5713  return *this;
5714  }
5715 
5719  Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(kernel)) {}
5720 
5724  Kernel& operator = (Kernel &&kernel)
5725  {
5726  detail::Wrapper<cl_type>::operator=(std::move(kernel));
5727  return *this;
5728  }
5729 
5730  template <typename T>
5731  cl_int getInfo(cl_kernel_info name, T* param) const
5732  {
5733  return detail::errHandler(
5734  detail::getInfo(&::clGetKernelInfo, object_, name, param),
5735  __GET_KERNEL_INFO_ERR);
5736  }
5737 
5738  template <cl_int name> typename
5740  getInfo(cl_int* err = NULL) const
5741  {
5742  typename detail::param_traits<
5743  detail::cl_kernel_info, name>::param_type param;
5744  cl_int result = getInfo(name, &param);
5745  if (err != NULL) {
5746  *err = result;
5747  }
5748  return param;
5749  }
5750 
5751 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5752  template <typename T>
5753  cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
5754  {
5755  return detail::errHandler(
5756  detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
5757  __GET_KERNEL_ARG_INFO_ERR);
5758  }
5759 
5760  template <cl_int name> typename
5762  getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
5763  {
5764  typename detail::param_traits<
5765  detail::cl_kernel_arg_info, name>::param_type param;
5766  cl_int result = getArgInfo(argIndex, name, &param);
5767  if (err != NULL) {
5768  *err = result;
5769  }
5770  return param;
5771  }
5772 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5773 
5774  template <typename T>
5775  cl_int getWorkGroupInfo(
5776  const Device& device, cl_kernel_work_group_info name, T* param) const
5777  {
5778  return detail::errHandler(
5779  detail::getInfo(
5780  &::clGetKernelWorkGroupInfo, object_, device(), name, param),
5781  __GET_KERNEL_WORK_GROUP_INFO_ERR);
5782  }
5783 
5784  template <cl_int name> typename
5786  getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
5787  {
5788  typename detail::param_traits<
5789  detail::cl_kernel_work_group_info, name>::param_type param;
5790  cl_int result = getWorkGroupInfo(device, name, &param);
5791  if (err != NULL) {
5792  *err = result;
5793  }
5794  return param;
5795  }
5796 
5797 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5798 #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5799  cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
5800  {
5801  typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
5802  static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL;
5803  CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
5804 
5805  return detail::errHandler(
5806  pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
5807  __GET_KERNEL_ARG_INFO_ERR);
5808  }
5809 
5810  template <cl_int name>
5811  size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const
5812  {
5813  size_type param;
5814  cl_int result = getSubGroupInfo(dev, name, range, &param);
5815  if (err != NULL) {
5816  *err = result;
5817  }
5818  return param;
5819  }
5820 #endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5821 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5822 
5823 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5824 
5826  template<typename T, class D>
5827  cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
5828  {
5829  return detail::errHandler(
5830  ::clSetKernelArgSVMPointer(object_, index, argPtr.get()),
5831  __SET_KERNEL_ARGS_ERR);
5832  }
5833 
5836  template<typename T, class Alloc>
5837  cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
5838  {
5839  return detail::errHandler(
5840  ::clSetKernelArgSVMPointer(object_, index, argPtr.data()),
5841  __SET_KERNEL_ARGS_ERR);
5842  }
5843 
5846  template<typename T>
5847  typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
5848  setArg(cl_uint index, const T argPtr)
5849  {
5850  return detail::errHandler(
5851  ::clSetKernelArgSVMPointer(object_, index, argPtr),
5852  __SET_KERNEL_ARGS_ERR);
5853  }
5854 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5855 
5858  template <typename T>
5859  typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
5860  setArg(cl_uint index, const T &value)
5861  {
5862  return detail::errHandler(
5863  ::clSetKernelArg(
5864  object_,
5865  index,
5868  __SET_KERNEL_ARGS_ERR);
5869  }
5870 
5871  cl_int setArg(cl_uint index, size_type size, const void* argPtr)
5872  {
5873  return detail::errHandler(
5874  ::clSetKernelArg(object_, index, size, argPtr),
5875  __SET_KERNEL_ARGS_ERR);
5876  }
5877 
5878 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5879 
5883  cl_int setSVMPointers(const vector<void*> &pointerList)
5884  {
5885  return detail::errHandler(
5886  ::clSetKernelExecInfo(
5887  object_,
5888  CL_KERNEL_EXEC_INFO_SVM_PTRS,
5889  sizeof(void*)*pointerList.size(),
5890  pointerList.data()));
5891  }
5892 
5897  template<int ArrayLength>
5898  cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
5899  {
5900  return detail::errHandler(
5901  ::clSetKernelExecInfo(
5902  object_,
5903  CL_KERNEL_EXEC_INFO_SVM_PTRS,
5904  sizeof(void*)*pointerList.size(),
5905  pointerList.data()));
5906  }
5907 
5919  cl_int enableFineGrainedSystemSVM(bool svmEnabled)
5920  {
5921  cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
5922  return detail::errHandler(
5923  ::clSetKernelExecInfo(
5924  object_,
5925  CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
5926  sizeof(cl_bool),
5927  &svmEnabled_
5928  )
5929  );
5930  }
5931 
5932  template<int index, int ArrayLength, class D, typename T0, typename T1, typename... Ts>
5933  void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, const pointer<T1, D> &t1, Ts & ... ts)
5934  {
5935  pointerList[index] = static_cast<void*>(t0.get());
5936  setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
5937  }
5938 
5939  template<int index, int ArrayLength, typename T0, typename T1, typename... Ts>
5940  typename std::enable_if<std::is_pointer<T0>::value, void>::type
5941  setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, T1 t1, Ts... ts)
5942  {
5943  pointerList[index] = static_cast<void*>(t0);
5944  setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
5945  }
5946 
5947  template<int index, int ArrayLength, typename T0, class D>
5948  void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
5949  {
5950  pointerList[index] = static_cast<void*>(t0.get());
5951  }
5952 
5953 
5954  template<int index, int ArrayLength, typename T0>
5955  typename std::enable_if<std::is_pointer<T0>::value, void>::type
5956  setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
5957  {
5958  pointerList[index] = static_cast<void*>(t0);
5959  }
5960 
5961  template<typename T0, typename... Ts>
5962  cl_int setSVMPointers(const T0 &t0, Ts & ... ts)
5963  {
5964  std::array<void*, 1 + sizeof...(Ts)> pointerList;
5965 
5966  setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
5967  return detail::errHandler(
5968  ::clSetKernelExecInfo(
5969  object_,
5970  CL_KERNEL_EXEC_INFO_SVM_PTRS,
5971  sizeof(void*)*(1 + sizeof...(Ts)),
5972  pointerList.data()));
5973  }
5974 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5975 };
5976 
5980 class Program : public detail::Wrapper<cl_program>
5981 {
5982 public:
5983 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5984  typedef vector<vector<unsigned char>> Binaries;
5985  typedef vector<string> Sources;
5986 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5987  typedef vector<std::pair<const void*, size_type> > Binaries;
5988  typedef vector<std::pair<const char*, size_type> > Sources;
5989 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5990 
5991  Program(
5992  const string& source,
5993  bool build = false,
5994  cl_int* err = NULL)
5995  {
5996  cl_int error;
5997 
5998  const char * strings = source.c_str();
5999  const size_type length = source.size();
6000 
6001  Context context = Context::getDefault(err);
6002 
6003  object_ = ::clCreateProgramWithSource(
6004  context(), (cl_uint)1, &strings, &length, &error);
6005 
6006  detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6007 
6008  if (error == CL_SUCCESS && build) {
6009 
6010  error = ::clBuildProgram(
6011  object_,
6012  0,
6013  NULL,
6014 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6015  "-cl-std=CL2.0",
6016 #else
6017  "",
6018 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6019  NULL,
6020  NULL);
6021 
6022  detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6023  }
6024 
6025  if (err != NULL) {
6026  *err = error;
6027  }
6028  }
6029 
6030  Program(
6031  const Context& context,
6032  const string& source,
6033  bool build = false,
6034  cl_int* err = NULL)
6035  {
6036  cl_int error;
6037 
6038  const char * strings = source.c_str();
6039  const size_type length = source.size();
6040 
6041  object_ = ::clCreateProgramWithSource(
6042  context(), (cl_uint)1, &strings, &length, &error);
6043 
6044  detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6045 
6046  if (error == CL_SUCCESS && build) {
6047  error = ::clBuildProgram(
6048  object_,
6049  0,
6050  NULL,
6051 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6052  "-cl-std=CL2.0",
6053 #else
6054  "",
6055 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6056  NULL,
6057  NULL);
6058 
6059  detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6060  }
6061 
6062  if (err != NULL) {
6063  *err = error;
6064  }
6065  }
6066 
6072  const Sources& sources,
6073  cl_int* err = NULL)
6074  {
6075  cl_int error;
6076  Context context = Context::getDefault(err);
6077 
6078  const size_type n = (size_type)sources.size();
6079 
6080  vector<size_type> lengths(n);
6081  vector<const char*> strings(n);
6082 
6083  for (size_type i = 0; i < n; ++i) {
6084 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6085  strings[i] = sources[(int)i].data();
6086  lengths[i] = sources[(int)i].length();
6087 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6088  strings[i] = sources[(int)i].first;
6089  lengths[i] = sources[(int)i].second;
6090 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6091  }
6092 
6093  object_ = ::clCreateProgramWithSource(
6094  context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6095 
6096  detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6097  if (err != NULL) {
6098  *err = error;
6099  }
6100  }
6101 
6107  const Context& context,
6108  const Sources& sources,
6109  cl_int* err = NULL)
6110  {
6111  cl_int error;
6112 
6113  const size_type n = (size_type)sources.size();
6114 
6115  vector<size_type> lengths(n);
6116  vector<const char*> strings(n);
6117 
6118  for (size_type i = 0; i < n; ++i) {
6119 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6120  strings[i] = sources[(int)i].data();
6121  lengths[i] = sources[(int)i].length();
6122 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6123  strings[i] = sources[(int)i].first;
6124  lengths[i] = sources[(int)i].second;
6125 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6126  }
6127 
6128  object_ = ::clCreateProgramWithSource(
6129  context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6130 
6131  detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6132  if (err != NULL) {
6133  *err = error;
6134  }
6135  }
6136 
6157  const Context& context,
6158  const vector<Device>& devices,
6159  const Binaries& binaries,
6160  vector<cl_int>* binaryStatus = NULL,
6161  cl_int* err = NULL)
6162  {
6163  cl_int error;
6164 
6165  const size_type numDevices = devices.size();
6166 
6167  // Catch size mismatch early and return
6168  if(binaries.size() != numDevices) {
6169  error = CL_INVALID_VALUE;
6170  detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6171  if (err != NULL) {
6172  *err = error;
6173  }
6174  return;
6175  }
6176 
6177 
6178  vector<size_type> lengths(numDevices);
6179  vector<const unsigned char*> images(numDevices);
6180 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6181  for (size_type i = 0; i < numDevices; ++i) {
6182  images[i] = binaries[i].data();
6183  lengths[i] = binaries[(int)i].size();
6184  }
6185 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6186  for (size_type i = 0; i < numDevices; ++i) {
6187  images[i] = (const unsigned char*)binaries[i].first;
6188  lengths[i] = binaries[(int)i].second;
6189  }
6190 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6191 
6192  vector<cl_device_id> deviceIDs(numDevices);
6193  for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6194  deviceIDs[deviceIndex] = (devices[deviceIndex])();
6195  }
6196 
6197  if(binaryStatus) {
6198  binaryStatus->resize(numDevices);
6199  }
6200 
6201  object_ = ::clCreateProgramWithBinary(
6202  context(), (cl_uint) devices.size(),
6203  deviceIDs.data(),
6204  lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0)
6205  ? &binaryStatus->front()
6206  : NULL, &error);
6207 
6208  detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6209  if (err != NULL) {
6210  *err = error;
6211  }
6212  }
6213 
6214 
6215 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6216 
6221  const Context& context,
6222  const vector<Device>& devices,
6223  const string& kernelNames,
6224  cl_int* err = NULL)
6225  {
6226  cl_int error;
6227 
6228 
6229  size_type numDevices = devices.size();
6230  vector<cl_device_id> deviceIDs(numDevices);
6231  for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6232  deviceIDs[deviceIndex] = (devices[deviceIndex])();
6233  }
6234 
6235  object_ = ::clCreateProgramWithBuiltInKernels(
6236  context(),
6237  (cl_uint) devices.size(),
6238  deviceIDs.data(),
6239  kernelNames.c_str(),
6240  &error);
6241 
6242  detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
6243  if (err != NULL) {
6244  *err = error;
6245  }
6246  }
6247 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6248 
6249  Program() { }
6250 
6251 
6258  explicit Program(const cl_program& program, bool retainObject = false) :
6259  detail::Wrapper<cl_type>(program, retainObject) { }
6260 
6261  Program& operator = (const cl_program& rhs)
6262  {
6264  return *this;
6265  }
6266 
6270  Program(const Program& program) : detail::Wrapper<cl_type>(program) {}
6271 
6275  Program& operator = (const Program &program)
6276  {
6278  return *this;
6279  }
6280 
6284  Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(program)) {}
6285 
6289  Program& operator = (Program &&program)
6290  {
6291  detail::Wrapper<cl_type>::operator=(std::move(program));
6292  return *this;
6293  }
6294 
6295  cl_int build(
6296  const vector<Device>& devices,
6297  const char* options = NULL,
6298  void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6299  void* data = NULL) const
6300  {
6301  size_type numDevices = devices.size();
6302  vector<cl_device_id> deviceIDs(numDevices);
6303 
6304  for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6305  deviceIDs[deviceIndex] = (devices[deviceIndex])();
6306  }
6307 
6308  cl_int buildError = ::clBuildProgram(
6309  object_,
6310  (cl_uint)
6311  devices.size(),
6312  deviceIDs.data(),
6313  options,
6314  notifyFptr,
6315  data);
6316 
6317  return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6318  }
6319 
6320  cl_int build(
6321  const char* options = NULL,
6322  void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6323  void* data = NULL) const
6324  {
6325  cl_int buildError = ::clBuildProgram(
6326  object_,
6327  0,
6328  NULL,
6329  options,
6330  notifyFptr,
6331  data);
6332 
6333 
6334  return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6335  }
6336 
6337 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6338  cl_int compile(
6339  const char* options = NULL,
6340  void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6341  void* data = NULL) const
6342  {
6343  cl_int error = ::clCompileProgram(
6344  object_,
6345  0,
6346  NULL,
6347  options,
6348  0,
6349  NULL,
6350  NULL,
6351  notifyFptr,
6352  data);
6353  return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6354  }
6355 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6356 
6357  template <typename T>
6358  cl_int getInfo(cl_program_info name, T* param) const
6359  {
6360  return detail::errHandler(
6361  detail::getInfo(&::clGetProgramInfo, object_, name, param),
6362  __GET_PROGRAM_INFO_ERR);
6363  }
6364 
6365  template <cl_int name> typename
6367  getInfo(cl_int* err = NULL) const
6368  {
6369  typename detail::param_traits<
6370  detail::cl_program_info, name>::param_type param;
6371  cl_int result = getInfo(name, &param);
6372  if (err != NULL) {
6373  *err = result;
6374  }
6375  return param;
6376  }
6377 
6378  template <typename T>
6379  cl_int getBuildInfo(
6380  const Device& device, cl_program_build_info name, T* param) const
6381  {
6382  return detail::errHandler(
6383  detail::getInfo(
6384  &::clGetProgramBuildInfo, object_, device(), name, param),
6385  __GET_PROGRAM_BUILD_INFO_ERR);
6386  }
6387 
6388  template <cl_int name> typename
6390  getBuildInfo(const Device& device, cl_int* err = NULL) const
6391  {
6392  typename detail::param_traits<
6393  detail::cl_program_build_info, name>::param_type param;
6394  cl_int result = getBuildInfo(device, name, &param);
6395  if (err != NULL) {
6396  *err = result;
6397  }
6398  return param;
6399  }
6400 
6406  template <cl_int name>
6407  vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6408  getBuildInfo(cl_int *err = NULL) const
6409  {
6410  cl_int result = CL_SUCCESS;
6411 
6412  auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
6413  vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6414  devInfo;
6415 
6416  // If there was an initial error from getInfo return the error
6417  if (result != CL_SUCCESS) {
6418  if (err != NULL) {
6419  *err = result;
6420  }
6421  return devInfo;
6422  }
6423 
6424  for (const cl::Device &d : devs) {
6425  typename detail::param_traits<
6426  detail::cl_program_build_info, name>::param_type param;
6427  result = getBuildInfo(d, name, &param);
6428  devInfo.push_back(
6430  (d, param));
6431  if (result != CL_SUCCESS) {
6432  // On error, leave the loop and return the error code
6433  break;
6434  }
6435  }
6436  if (err != NULL) {
6437  *err = result;
6438  }
6439  if (result != CL_SUCCESS) {
6440  devInfo.clear();
6441  }
6442  return devInfo;
6443  }
6444 
6445  cl_int createKernels(vector<Kernel>* kernels)
6446  {
6447  cl_uint numKernels;
6448  cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
6449  if (err != CL_SUCCESS) {
6450  return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6451  }
6452 
6453  vector<cl_kernel> value(numKernels);
6454 
6455  err = ::clCreateKernelsInProgram(
6456  object_, numKernels, value.data(), NULL);
6457  if (err != CL_SUCCESS) {
6458  return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6459  }
6460 
6461  if (kernels) {
6462  kernels->resize(value.size());
6463 
6464  // Assign to param, constructing with retain behaviour
6465  // to correctly capture each underlying CL object
6466  for (size_type i = 0; i < value.size(); i++) {
6467  // We do not need to retain because this kernel is being created
6468  // by the runtime
6469  (*kernels)[i] = Kernel(value[i], false);
6470  }
6471  }
6472  return CL_SUCCESS;
6473  }
6474 };
6475 
6476 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6477 inline Program linkProgram(
6478  Program input1,
6479  Program input2,
6480  const char* options = NULL,
6481  void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6482  void* data = NULL,
6483  cl_int* err = NULL)
6484 {
6485  cl_int error_local = CL_SUCCESS;
6486 
6487  cl_program programs[2] = { input1(), input2() };
6488 
6489  Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6490  if(error_local!=CL_SUCCESS) {
6491  detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6492  }
6493 
6494  cl_program prog = ::clLinkProgram(
6495  ctx(),
6496  0,
6497  NULL,
6498  options,
6499  2,
6500  programs,
6501  notifyFptr,
6502  data,
6503  &error_local);
6504 
6505  detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6506  if (err != NULL) {
6507  *err = error_local;
6508  }
6509 
6510  return Program(prog);
6511 }
6512 
6513 inline Program linkProgram(
6514  vector<Program> inputPrograms,
6515  const char* options = NULL,
6516  void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6517  void* data = NULL,
6518  cl_int* err = NULL)
6519 {
6520  cl_int error_local = CL_SUCCESS;
6521 
6522  vector<cl_program> programs(inputPrograms.size());
6523 
6524  for (unsigned int i = 0; i < inputPrograms.size(); i++) {
6525  programs[i] = inputPrograms[i]();
6526  }
6527 
6528  Context ctx;
6529  if(inputPrograms.size() > 0) {
6530  ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6531  if(error_local!=CL_SUCCESS) {
6532  detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6533  }
6534  }
6535  cl_program prog = ::clLinkProgram(
6536  ctx(),
6537  0,
6538  NULL,
6539  options,
6540  (cl_uint)inputPrograms.size(),
6541  programs.data(),
6542  notifyFptr,
6543  data,
6544  &error_local);
6545 
6546  detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6547  if (err != NULL) {
6548  *err = error_local;
6549  }
6550 
6551  return Program(prog, false);
6552 }
6553 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6554 
6555 // Template specialization for CL_PROGRAM_BINARIES
6556 template <>
6557 inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
6558 {
6559  if (name != CL_PROGRAM_BINARIES) {
6560  return CL_INVALID_VALUE;
6561  }
6562  if (param) {
6563  // Resize the parameter array appropriately for each allocation
6564  // and pass down to the helper
6565 
6566  vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
6567  size_type numBinaries = sizes.size();
6568 
6569  // Resize the parameter array and constituent arrays
6570  param->resize(numBinaries);
6571  for (size_type i = 0; i < numBinaries; ++i) {
6572  (*param)[i].resize(sizes[i]);
6573  }
6574 
6575  return detail::errHandler(
6576  detail::getInfo(&::clGetProgramInfo, object_, name, param),
6577  __GET_PROGRAM_INFO_ERR);
6578  }
6579 
6580  return CL_SUCCESS;
6581 }
6582 
6583 template<>
6584 inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
6585 {
6586  vector<vector<unsigned char>> binariesVectors;
6587 
6588  cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
6589  if (err != NULL) {
6590  *err = result;
6591  }
6592  return binariesVectors;
6593 }
6594 
6595 inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
6596 {
6597  cl_int error;
6598 
6599  object_ = ::clCreateKernel(program(), name, &error);
6600  detail::errHandler(error, __CREATE_KERNEL_ERR);
6601 
6602  if (err != NULL) {
6603  *err = error;
6604  }
6605 
6606 }
6607 
6608 enum class QueueProperties : cl_command_queue_properties
6609 {
6610  None = 0,
6611  Profiling = CL_QUEUE_PROFILING_ENABLE,
6612  OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
6613 };
6614 
6615 inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
6616 {
6617  return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
6618 }
6619 
6623 class CommandQueue : public detail::Wrapper<cl_command_queue>
6624 {
6625 private:
6626  static std::once_flag default_initialized_;
6627  static CommandQueue default_;
6628  static cl_int default_error_;
6629 
6635  static void makeDefault()
6636  {
6637  /* We don't want to throw an error from this function, so we have to
6638  * catch and set the error flag.
6639  */
6640 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6641  try
6642 #endif
6643  {
6644  int error;
6645  Context context = Context::getDefault(&error);
6646 
6647  if (error != CL_SUCCESS) {
6648  default_error_ = error;
6649  }
6650  else {
6651  Device device = Device::getDefault();
6652  default_ = CommandQueue(context, device, 0, &default_error_);
6653  }
6654  }
6655 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6656  catch (cl::Error &e) {
6657  default_error_ = e.err();
6658  }
6659 #endif
6660  }
6661 
6667  static void makeDefaultProvided(const CommandQueue &c) {
6668  default_ = c;
6669  }
6670 
6671 public:
6672 #ifdef CL_HPP_UNIT_TEST_ENABLE
6673 
6679  static void unitTestClearDefault() {
6680  default_ = CommandQueue();
6681  }
6682 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
6683 
6684 
6690  cl_command_queue_properties properties,
6691  cl_int* err = NULL)
6692  {
6693  cl_int error;
6694 
6695  Context context = Context::getDefault(&error);
6696  detail::errHandler(error, __CREATE_CONTEXT_ERR);
6697 
6698  if (error != CL_SUCCESS) {
6699  if (err != NULL) {
6700  *err = error;
6701  }
6702  }
6703  else {
6704  Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6705  bool useWithProperties;
6706 
6707 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6708  // Run-time decision based on the actual platform
6709  {
6710  cl_uint version = detail::getContextPlatformVersion(context());
6711  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6712  }
6713 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6714  useWithProperties = true;
6715 #else
6716  useWithProperties = false;
6717 #endif
6718 
6719 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6720  if (useWithProperties) {
6721  cl_queue_properties queue_properties[] = {
6722  CL_QUEUE_PROPERTIES, properties, 0 };
6723  if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6724  object_ = ::clCreateCommandQueueWithProperties(
6725  context(), device(), queue_properties, &error);
6726  }
6727  else {
6728  error = CL_INVALID_QUEUE_PROPERTIES;
6729  }
6730 
6731  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6732  if (err != NULL) {
6733  *err = error;
6734  }
6735  }
6736 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6737 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6738  if (!useWithProperties) {
6739  object_ = ::clCreateCommandQueue(
6740  context(), device(), properties, &error);
6741 
6742  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6743  if (err != NULL) {
6744  *err = error;
6745  }
6746  }
6747 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6748  }
6749  }
6750 
6756  QueueProperties properties,
6757  cl_int* err = NULL)
6758  {
6759  cl_int error;
6760 
6761  Context context = Context::getDefault(&error);
6762  detail::errHandler(error, __CREATE_CONTEXT_ERR);
6763 
6764  if (error != CL_SUCCESS) {
6765  if (err != NULL) {
6766  *err = error;
6767  }
6768  }
6769  else {
6770  Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6771  bool useWithProperties;
6772 
6773 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6774  // Run-time decision based on the actual platform
6775  {
6776  cl_uint version = detail::getContextPlatformVersion(context());
6777  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6778  }
6779 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6780  useWithProperties = true;
6781 #else
6782  useWithProperties = false;
6783 #endif
6784 
6785 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6786  if (useWithProperties) {
6787  cl_queue_properties queue_properties[] = {
6788  CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6789 
6790  object_ = ::clCreateCommandQueueWithProperties(
6791  context(), device(), queue_properties, &error);
6792 
6793  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6794  if (err != NULL) {
6795  *err = error;
6796  }
6797  }
6798 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6799 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6800  if (!useWithProperties) {
6801  object_ = ::clCreateCommandQueue(
6802  context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
6803 
6804  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6805  if (err != NULL) {
6806  *err = error;
6807  }
6808  }
6809 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6810 
6811  }
6812  }
6813 
6818  explicit CommandQueue(
6819  const Context& context,
6820  cl_command_queue_properties properties = 0,
6821  cl_int* err = NULL)
6822  {
6823  cl_int error;
6824  bool useWithProperties;
6825  vector<cl::Device> devices;
6826  error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6827 
6828  detail::errHandler(error, __CREATE_CONTEXT_ERR);
6829 
6830  if (error != CL_SUCCESS)
6831  {
6832  if (err != NULL) {
6833  *err = error;
6834  }
6835  return;
6836  }
6837 
6838 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6839  // Run-time decision based on the actual platform
6840  {
6841  cl_uint version = detail::getContextPlatformVersion(context());
6842  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6843  }
6844 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6845  useWithProperties = true;
6846 #else
6847  useWithProperties = false;
6848 #endif
6849 
6850 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6851  if (useWithProperties) {
6852  cl_queue_properties queue_properties[] = {
6853  CL_QUEUE_PROPERTIES, properties, 0 };
6854  if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6855  object_ = ::clCreateCommandQueueWithProperties(
6856  context(), devices[0](), queue_properties, &error);
6857  }
6858  else {
6859  error = CL_INVALID_QUEUE_PROPERTIES;
6860  }
6861 
6862  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6863  if (err != NULL) {
6864  *err = error;
6865  }
6866  }
6867 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6868 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6869  if (!useWithProperties) {
6870  object_ = ::clCreateCommandQueue(
6871  context(), devices[0](), properties, &error);
6872 
6873  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6874  if (err != NULL) {
6875  *err = error;
6876  }
6877  }
6878 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6879  }
6880 
6885  explicit CommandQueue(
6886  const Context& context,
6887  QueueProperties properties,
6888  cl_int* err = NULL)
6889  {
6890  cl_int error;
6891  bool useWithProperties;
6892  vector<cl::Device> devices;
6893  error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6894 
6895  detail::errHandler(error, __CREATE_CONTEXT_ERR);
6896 
6897  if (error != CL_SUCCESS)
6898  {
6899  if (err != NULL) {
6900  *err = error;
6901  }
6902  return;
6903  }
6904 
6905 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6906  // Run-time decision based on the actual platform
6907  {
6908  cl_uint version = detail::getContextPlatformVersion(context());
6909  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6910  }
6911 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6912  useWithProperties = true;
6913 #else
6914  useWithProperties = false;
6915 #endif
6916 
6917 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6918  if (useWithProperties) {
6919  cl_queue_properties queue_properties[] = {
6920  CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6921  object_ = ::clCreateCommandQueueWithProperties(
6922  context(), devices[0](), queue_properties, &error);
6923 
6924  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6925  if (err != NULL) {
6926  *err = error;
6927  }
6928  }
6929 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6930 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6931  if (!useWithProperties) {
6932  object_ = ::clCreateCommandQueue(
6933  context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
6934 
6935  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6936  if (err != NULL) {
6937  *err = error;
6938  }
6939  }
6940 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6941  }
6942 
6948  const Context& context,
6949  const Device& device,
6950  cl_command_queue_properties properties = 0,
6951  cl_int* err = NULL)
6952  {
6953  cl_int error;
6954  bool useWithProperties;
6955 
6956 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6957  // Run-time decision based on the actual platform
6958  {
6959  cl_uint version = detail::getContextPlatformVersion(context());
6960  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6961  }
6962 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6963  useWithProperties = true;
6964 #else
6965  useWithProperties = false;
6966 #endif
6967 
6968 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6969  if (useWithProperties) {
6970  cl_queue_properties queue_properties[] = {
6971  CL_QUEUE_PROPERTIES, properties, 0 };
6972  object_ = ::clCreateCommandQueueWithProperties(
6973  context(), device(), queue_properties, &error);
6974 
6975  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6976  if (err != NULL) {
6977  *err = error;
6978  }
6979  }
6980 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6981 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6982  if (!useWithProperties) {
6983  object_ = ::clCreateCommandQueue(
6984  context(), device(), properties, &error);
6985 
6986  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6987  if (err != NULL) {
6988  *err = error;
6989  }
6990  }
6991 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6992  }
6993 
6999  const Context& context,
7000  const Device& device,
7001  QueueProperties properties,
7002  cl_int* err = NULL)
7003  {
7004  cl_int error;
7005  bool useWithProperties;
7006 
7007 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7008  // Run-time decision based on the actual platform
7009  {
7010  cl_uint version = detail::getContextPlatformVersion(context());
7011  useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7012  }
7013 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7014  useWithProperties = true;
7015 #else
7016  useWithProperties = false;
7017 #endif
7018 
7019 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7020  if (useWithProperties) {
7021  cl_queue_properties queue_properties[] = {
7022  CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7023  object_ = ::clCreateCommandQueueWithProperties(
7024  context(), device(), queue_properties, &error);
7025 
7026  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7027  if (err != NULL) {
7028  *err = error;
7029  }
7030  }
7031 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7032 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7033  if (!useWithProperties) {
7034  object_ = ::clCreateCommandQueue(
7035  context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7036 
7037  detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7038  if (err != NULL) {
7039  *err = error;
7040  }
7041  }
7042 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7043  }
7044 
7045  static CommandQueue getDefault(cl_int * err = NULL)
7046  {
7047  std::call_once(default_initialized_, makeDefault);
7048 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7049  detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7050 #else // CL_HPP_TARGET_OPENCL_VERSION >= 200
7051  detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
7052 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7053  if (err != NULL) {
7054  *err = default_error_;
7055  }
7056  return default_;
7057  }
7058 
7066  static CommandQueue setDefault(const CommandQueue &default_queue)
7067  {
7068  std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
7069  detail::errHandler(default_error_);
7070  return default_;
7071  }
7072 
7073  CommandQueue() { }
7074 
7075 
7082  explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
7083  detail::Wrapper<cl_type>(commandQueue, retainObject) { }
7084 
7085  CommandQueue& operator = (const cl_command_queue& rhs)
7086  {
7088  return *this;
7089  }
7090 
7094  CommandQueue(const CommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
7095 
7099  CommandQueue& operator = (const CommandQueue &queue)
7100  {
7102  return *this;
7103  }
7104 
7108  CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
7109 
7113  CommandQueue& operator = (CommandQueue &&queue)
7114  {
7115  detail::Wrapper<cl_type>::operator=(std::move(queue));
7116  return *this;
7117  }
7118 
7119  template <typename T>
7120  cl_int getInfo(cl_command_queue_info name, T* param) const
7121  {
7122  return detail::errHandler(
7123  detail::getInfo(
7124  &::clGetCommandQueueInfo, object_, name, param),
7125  __GET_COMMAND_QUEUE_INFO_ERR);
7126  }
7127 
7128  template <cl_int name> typename
7130  getInfo(cl_int* err = NULL) const
7131  {
7132  typename detail::param_traits<
7133  detail::cl_command_queue_info, name>::param_type param;
7134  cl_int result = getInfo(name, &param);
7135  if (err != NULL) {
7136  *err = result;
7137  }
7138  return param;
7139  }
7140 
7141  cl_int enqueueReadBuffer(
7142  const Buffer& buffer,
7143  cl_bool blocking,
7144  size_type offset,
7145  size_type size,
7146  void* ptr,
7147  const vector<Event>* events = NULL,
7148  Event* event = NULL) const
7149  {
7150  cl_event tmp;
7151  cl_int err = detail::errHandler(
7152  ::clEnqueueReadBuffer(
7153  object_, buffer(), blocking, offset, size,
7154  ptr,
7155  (events != NULL) ? (cl_uint) events->size() : 0,
7156  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7157  (event != NULL) ? &tmp : NULL),
7158  __ENQUEUE_READ_BUFFER_ERR);
7159 
7160  if (event != NULL && err == CL_SUCCESS)
7161  *event = tmp;
7162 
7163  return err;
7164  }
7165 
7166  cl_int enqueueWriteBuffer(
7167  const Buffer& buffer,
7168  cl_bool blocking,
7169  size_type offset,
7170  size_type size,
7171  const void* ptr,
7172  const vector<Event>* events = NULL,
7173  Event* event = NULL) const
7174  {
7175  cl_event tmp;
7176  cl_int err = detail::errHandler(
7177  ::clEnqueueWriteBuffer(
7178  object_, buffer(), blocking, offset, size,
7179  ptr,
7180  (events != NULL) ? (cl_uint) events->size() : 0,
7181  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7182  (event != NULL) ? &tmp : NULL),
7183  __ENQUEUE_WRITE_BUFFER_ERR);
7184 
7185  if (event != NULL && err == CL_SUCCESS)
7186  *event = tmp;
7187 
7188  return err;
7189  }
7190 
7191  cl_int enqueueCopyBuffer(
7192  const Buffer& src,
7193  const Buffer& dst,
7194  size_type src_offset,
7195  size_type dst_offset,
7196  size_type size,
7197  const vector<Event>* events = NULL,
7198  Event* event = NULL) const
7199  {
7200  cl_event tmp;
7201  cl_int err = detail::errHandler(
7202  ::clEnqueueCopyBuffer(
7203  object_, src(), dst(), src_offset, dst_offset, size,
7204  (events != NULL) ? (cl_uint) events->size() : 0,
7205  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7206  (event != NULL) ? &tmp : NULL),
7207  __ENQEUE_COPY_BUFFER_ERR);
7208 
7209  if (event != NULL && err == CL_SUCCESS)
7210  *event = tmp;
7211 
7212  return err;
7213  }
7214 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
7215  cl_int enqueueReadBufferRect(
7216  const Buffer& buffer,
7217  cl_bool blocking,
7218  const array<size_type, 3>& buffer_offset,
7219  const array<size_type, 3>& host_offset,
7220  const array<size_type, 3>& region,
7221  size_type buffer_row_pitch,
7222  size_type buffer_slice_pitch,
7223  size_type host_row_pitch,
7224  size_type host_slice_pitch,
7225  void *ptr,
7226  const vector<Event>* events = NULL,
7227  Event* event = NULL) const
7228  {
7229  cl_event tmp;
7230  cl_int err = detail::errHandler(
7231  ::clEnqueueReadBufferRect(
7232  object_,
7233  buffer(),
7234  blocking,
7235  buffer_offset.data(),
7236  host_offset.data(),
7237  region.data(),
7238  buffer_row_pitch,
7239  buffer_slice_pitch,
7240  host_row_pitch,
7241  host_slice_pitch,
7242  ptr,
7243  (events != NULL) ? (cl_uint) events->size() : 0,
7244  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7245  (event != NULL) ? &tmp : NULL),
7246  __ENQUEUE_READ_BUFFER_RECT_ERR);
7247 
7248  if (event != NULL && err == CL_SUCCESS)
7249  *event = tmp;
7250 
7251  return err;
7252  }
7253 
7254  cl_int enqueueWriteBufferRect(
7255  const Buffer& buffer,
7256  cl_bool blocking,
7257  const array<size_type, 3>& buffer_offset,
7258  const array<size_type, 3>& host_offset,
7259  const array<size_type, 3>& region,
7260  size_type buffer_row_pitch,
7261  size_type buffer_slice_pitch,
7262  size_type host_row_pitch,
7263  size_type host_slice_pitch,
7264  const void *ptr,
7265  const vector<Event>* events = NULL,
7266  Event* event = NULL) const
7267  {
7268  cl_event tmp;
7269  cl_int err = detail::errHandler(
7270  ::clEnqueueWriteBufferRect(
7271  object_,
7272  buffer(),
7273  blocking,
7274  buffer_offset.data(),
7275  host_offset.data(),
7276  region.data(),
7277  buffer_row_pitch,
7278  buffer_slice_pitch,
7279  host_row_pitch,
7280  host_slice_pitch,
7281  ptr,
7282  (events != NULL) ? (cl_uint) events->size() : 0,
7283  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7284  (event != NULL) ? &tmp : NULL),
7285  __ENQUEUE_WRITE_BUFFER_RECT_ERR);
7286 
7287  if (event != NULL && err == CL_SUCCESS)
7288  *event = tmp;
7289 
7290  return err;
7291  }
7292 
7293  cl_int enqueueCopyBufferRect(
7294  const Buffer& src,
7295  const Buffer& dst,
7296  const array<size_type, 3>& src_origin,
7297  const array<size_type, 3>& dst_origin,
7298  const array<size_type, 3>& region,
7299  size_type src_row_pitch,
7300  size_type src_slice_pitch,
7301  size_type dst_row_pitch,
7302  size_type dst_slice_pitch,
7303  const vector<Event>* events = NULL,
7304  Event* event = NULL) const
7305  {
7306  cl_event tmp;
7307  cl_int err = detail::errHandler(
7308  ::clEnqueueCopyBufferRect(
7309  object_,
7310  src(),
7311  dst(),
7312  src_origin.data(),
7313  dst_origin.data(),
7314  region.data(),
7315  src_row_pitch,
7316  src_slice_pitch,
7317  dst_row_pitch,
7318  dst_slice_pitch,
7319  (events != NULL) ? (cl_uint) events->size() : 0,
7320  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7321  (event != NULL) ? &tmp : NULL),
7322  __ENQEUE_COPY_BUFFER_RECT_ERR);
7323 
7324  if (event != NULL && err == CL_SUCCESS)
7325  *event = tmp;
7326 
7327  return err;
7328  }
7329 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
7330 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7331 
7342  template<typename PatternType>
7344  const Buffer& buffer,
7345  PatternType pattern,
7346  size_type offset,
7347  size_type size,
7348  const vector<Event>* events = NULL,
7349  Event* event = NULL) const
7350  {
7351  cl_event tmp;
7352  cl_int err = detail::errHandler(
7353  ::clEnqueueFillBuffer(
7354  object_,
7355  buffer(),
7356  static_cast<void*>(&pattern),
7357  sizeof(PatternType),
7358  offset,
7359  size,
7360  (events != NULL) ? (cl_uint) events->size() : 0,
7361  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7362  (event != NULL) ? &tmp : NULL),
7363  __ENQUEUE_FILL_BUFFER_ERR);
7364 
7365  if (event != NULL && err == CL_SUCCESS)
7366  *event = tmp;
7367 
7368  return err;
7369  }
7370 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7371 
7372  cl_int enqueueReadImage(
7373  const Image& image,
7374  cl_bool blocking,
7375  const array<size_type, 3>& origin,
7376  const array<size_type, 3>& region,
7377  size_type row_pitch,
7378  size_type slice_pitch,
7379  void* ptr,
7380  const vector<Event>* events = NULL,
7381  Event* event = NULL) const
7382  {
7383  cl_event tmp;
7384  cl_int err = detail::errHandler(
7385  ::clEnqueueReadImage(
7386  object_,
7387  image(),
7388  blocking,
7389  origin.data(),
7390  region.data(),
7391  row_pitch,
7392  slice_pitch,
7393  ptr,
7394  (events != NULL) ? (cl_uint) events->size() : 0,
7395  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7396  (event != NULL) ? &tmp : NULL),
7397  __ENQUEUE_READ_IMAGE_ERR);
7398 
7399  if (event != NULL && err == CL_SUCCESS)
7400  *event = tmp;
7401 
7402  return err;
7403  }
7404 
7405  cl_int enqueueWriteImage(
7406  const Image& image,
7407  cl_bool blocking,
7408  const array<size_type, 3>& origin,
7409  const array<size_type, 3>& region,
7410  size_type row_pitch,
7411  size_type slice_pitch,
7412  const void* ptr,
7413  const vector<Event>* events = NULL,
7414  Event* event = NULL) const
7415  {
7416  cl_event tmp;
7417  cl_int err = detail::errHandler(
7418  ::clEnqueueWriteImage(
7419  object_,
7420  image(),
7421  blocking,
7422  origin.data(),
7423  region.data(),
7424  row_pitch,
7425  slice_pitch,
7426  ptr,
7427  (events != NULL) ? (cl_uint) events->size() : 0,
7428  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7429  (event != NULL) ? &tmp : NULL),
7430  __ENQUEUE_WRITE_IMAGE_ERR);
7431 
7432  if (event != NULL && err == CL_SUCCESS)
7433  *event = tmp;
7434 
7435  return err;
7436  }
7437 
7438  cl_int enqueueCopyImage(
7439  const Image& src,
7440  const Image& dst,
7441  const array<size_type, 3>& src_origin,
7442  const array<size_type, 3>& dst_origin,
7443  const array<size_type, 3>& region,
7444  const vector<Event>* events = NULL,
7445  Event* event = NULL) const
7446  {
7447  cl_event tmp;
7448  cl_int err = detail::errHandler(
7449  ::clEnqueueCopyImage(
7450  object_,
7451  src(),
7452  dst(),
7453  src_origin.data(),
7454  dst_origin.data(),
7455  region.data(),
7456  (events != NULL) ? (cl_uint) events->size() : 0,
7457  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7458  (event != NULL) ? &tmp : NULL),
7459  __ENQUEUE_COPY_IMAGE_ERR);
7460 
7461  if (event != NULL && err == CL_SUCCESS)
7462  *event = tmp;
7463 
7464  return err;
7465  }
7466 
7467 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7468 
7476  const Image& image,
7477  cl_float4 fillColor,
7478  const array<size_type, 3>& origin,
7479  const array<size_type, 3>& region,
7480  const vector<Event>* events = NULL,
7481  Event* event = NULL) const
7482  {
7483  cl_event tmp;
7484  cl_int err = detail::errHandler(
7485  ::clEnqueueFillImage(
7486  object_,
7487  image(),
7488  static_cast<void*>(&fillColor),
7489  origin.data(),
7490  region.data(),
7491  (events != NULL) ? (cl_uint) events->size() : 0,
7492  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7493  (event != NULL) ? &tmp : NULL),
7494  __ENQUEUE_FILL_IMAGE_ERR);
7495 
7496  if (event != NULL && err == CL_SUCCESS)
7497  *event = tmp;
7498 
7499  return err;
7500  }
7501 
7510  const Image& image,
7511  cl_int4 fillColor,
7512  const array<size_type, 3>& origin,
7513  const array<size_type, 3>& region,
7514  const vector<Event>* events = NULL,
7515  Event* event = NULL) const
7516  {
7517  cl_event tmp;
7518  cl_int err = detail::errHandler(
7519  ::clEnqueueFillImage(
7520  object_,
7521  image(),
7522  static_cast<void*>(&fillColor),
7523  origin.data(),
7524  region.data(),
7525  (events != NULL) ? (cl_uint) events->size() : 0,
7526  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7527  (event != NULL) ? &tmp : NULL),
7528  __ENQUEUE_FILL_IMAGE_ERR);
7529 
7530  if (event != NULL && err == CL_SUCCESS)
7531  *event = tmp;
7532 
7533  return err;
7534  }
7535 
7544  const Image& image,
7545  cl_uint4 fillColor,
7546  const array<size_type, 3>& origin,
7547  const array<size_type, 3>& region,
7548  const vector<Event>* events = NULL,
7549  Event* event = NULL) const
7550  {
7551  cl_event tmp;
7552  cl_int err = detail::errHandler(
7553  ::clEnqueueFillImage(
7554  object_,
7555  image(),
7556  static_cast<void*>(&fillColor),
7557  origin.data(),
7558  region.data(),
7559  (events != NULL) ? (cl_uint) events->size() : 0,
7560  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7561  (event != NULL) ? &tmp : NULL),
7562  __ENQUEUE_FILL_IMAGE_ERR);
7563 
7564  if (event != NULL && err == CL_SUCCESS)
7565  *event = tmp;
7566 
7567  return err;
7568  }
7569 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7570 
7571  cl_int enqueueCopyImageToBuffer(
7572  const Image& src,
7573  const Buffer& dst,
7574  const array<size_type, 3>& src_origin,
7575  const array<size_type, 3>& region,
7576  size_type dst_offset,
7577  const vector<Event>* events = NULL,
7578  Event* event = NULL) const
7579  {
7580  cl_event tmp;
7581  cl_int err = detail::errHandler(
7582  ::clEnqueueCopyImageToBuffer(
7583  object_,
7584  src(),
7585  dst(),
7586  src_origin.data(),
7587  region.data(),
7588  dst_offset,
7589  (events != NULL) ? (cl_uint) events->size() : 0,
7590  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7591  (event != NULL) ? &tmp : NULL),
7592  __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
7593 
7594  if (event != NULL && err == CL_SUCCESS)
7595  *event = tmp;
7596 
7597  return err;
7598  }
7599 
7600  cl_int enqueueCopyBufferToImage(
7601  const Buffer& src,
7602  const Image& dst,
7603  size_type src_offset,
7604  const array<size_type, 3>& dst_origin,
7605  const array<size_type, 3>& region,
7606  const vector<Event>* events = NULL,
7607  Event* event = NULL) const
7608  {
7609  cl_event tmp;
7610  cl_int err = detail::errHandler(
7611  ::clEnqueueCopyBufferToImage(
7612  object_,
7613  src(),
7614  dst(),
7615  src_offset,
7616  dst_origin.data(),
7617  region.data(),
7618  (events != NULL) ? (cl_uint) events->size() : 0,
7619  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7620  (event != NULL) ? &tmp : NULL),
7621  __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
7622 
7623  if (event != NULL && err == CL_SUCCESS)
7624  *event = tmp;
7625 
7626  return err;
7627  }
7628 
7629  void* enqueueMapBuffer(
7630  const Buffer& buffer,
7631  cl_bool blocking,
7632  cl_map_flags flags,
7633  size_type offset,
7634  size_type size,
7635  const vector<Event>* events = NULL,
7636  Event* event = NULL,
7637  cl_int* err = NULL) const
7638  {
7639  cl_event tmp;
7640  cl_int error;
7641  void * result = ::clEnqueueMapBuffer(
7642  object_, buffer(), blocking, flags, offset, size,
7643  (events != NULL) ? (cl_uint) events->size() : 0,
7644  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7645  (event != NULL) ? &tmp : NULL,
7646  &error);
7647 
7648  detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
7649  if (err != NULL) {
7650  *err = error;
7651  }
7652  if (event != NULL && error == CL_SUCCESS)
7653  *event = tmp;
7654 
7655  return result;
7656  }
7657 
7658  void* enqueueMapImage(
7659  const Image& buffer,
7660  cl_bool blocking,
7661  cl_map_flags flags,
7662  const array<size_type, 3>& origin,
7663  const array<size_type, 3>& region,
7664  size_type * row_pitch,
7665  size_type * slice_pitch,
7666  const vector<Event>* events = NULL,
7667  Event* event = NULL,
7668  cl_int* err = NULL) const
7669  {
7670  cl_event tmp;
7671  cl_int error;
7672  void * result = ::clEnqueueMapImage(
7673  object_, buffer(), blocking, flags,
7674  origin.data(),
7675  region.data(),
7676  row_pitch, slice_pitch,
7677  (events != NULL) ? (cl_uint) events->size() : 0,
7678  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7679  (event != NULL) ? &tmp : NULL,
7680  &error);
7681 
7682  detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
7683  if (err != NULL) {
7684  *err = error;
7685  }
7686  if (event != NULL && error == CL_SUCCESS)
7687  *event = tmp;
7688  return result;
7689  }
7690 
7691 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7692 
7696  template<typename T>
7698  T* ptr,
7699  cl_bool blocking,
7700  cl_map_flags flags,
7701  size_type size,
7702  const vector<Event>* events = NULL,
7703  Event* event = NULL) const
7704  {
7705  cl_event tmp;
7706  cl_int err = detail::errHandler(::clEnqueueSVMMap(
7707  object_, blocking, flags, static_cast<void*>(ptr), size,
7708  (events != NULL) ? (cl_uint)events->size() : 0,
7709  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7710  (event != NULL) ? &tmp : NULL),
7711  __ENQUEUE_MAP_BUFFER_ERR);
7712 
7713  if (event != NULL && err == CL_SUCCESS)
7714  *event = tmp;
7715 
7716  return err;
7717  }
7718 
7719 
7724  template<typename T, class D>
7726  cl::pointer<T, D> &ptr,
7727  cl_bool blocking,
7728  cl_map_flags flags,
7729  size_type size,
7730  const vector<Event>* events = NULL,
7731  Event* event = NULL) const
7732  {
7733  cl_event tmp;
7734  cl_int err = detail::errHandler(::clEnqueueSVMMap(
7735  object_, blocking, flags, static_cast<void*>(ptr.get()), size,
7736  (events != NULL) ? (cl_uint)events->size() : 0,
7737  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7738  (event != NULL) ? &tmp : NULL),
7739  __ENQUEUE_MAP_BUFFER_ERR);
7740 
7741  if (event != NULL && err == CL_SUCCESS)
7742  *event = tmp;
7743 
7744  return err;
7745  }
7746 
7751  template<typename T, class Alloc>
7753  cl::vector<T, Alloc> &container,
7754  cl_bool blocking,
7755  cl_map_flags flags,
7756  const vector<Event>* events = NULL,
7757  Event* event = NULL) const
7758  {
7759  cl_event tmp;
7760  cl_int err = detail::errHandler(::clEnqueueSVMMap(
7761  object_, blocking, flags, static_cast<void*>(container.data()), container.size(),
7762  (events != NULL) ? (cl_uint)events->size() : 0,
7763  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7764  (event != NULL) ? &tmp : NULL),
7765  __ENQUEUE_MAP_BUFFER_ERR);
7766 
7767  if (event != NULL && err == CL_SUCCESS)
7768  *event = tmp;
7769 
7770  return err;
7771  }
7772 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7773 
7774  cl_int enqueueUnmapMemObject(
7775  const Memory& memory,
7776  void* mapped_ptr,
7777  const vector<Event>* events = NULL,
7778  Event* event = NULL) const
7779  {
7780  cl_event tmp;
7781  cl_int err = detail::errHandler(
7782  ::clEnqueueUnmapMemObject(
7783  object_, memory(), mapped_ptr,
7784  (events != NULL) ? (cl_uint) events->size() : 0,
7785  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7786  (event != NULL) ? &tmp : NULL),
7787  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7788 
7789  if (event != NULL && err == CL_SUCCESS)
7790  *event = tmp;
7791 
7792  return err;
7793  }
7794 
7795 
7796 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7797 
7801  template<typename T>
7803  T* ptr,
7804  const vector<Event>* events = NULL,
7805  Event* event = NULL) const
7806  {
7807  cl_event tmp;
7808  cl_int err = detail::errHandler(
7809  ::clEnqueueSVMUnmap(
7810  object_, static_cast<void*>(ptr),
7811  (events != NULL) ? (cl_uint)events->size() : 0,
7812  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7813  (event != NULL) ? &tmp : NULL),
7814  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7815 
7816  if (event != NULL && err == CL_SUCCESS)
7817  *event = tmp;
7818 
7819  return err;
7820  }
7821 
7826  template<typename T, class D>
7828  cl::pointer<T, D> &ptr,
7829  const vector<Event>* events = NULL,
7830  Event* event = NULL) const
7831  {
7832  cl_event tmp;
7833  cl_int err = detail::errHandler(
7834  ::clEnqueueSVMUnmap(
7835  object_, static_cast<void*>(ptr.get()),
7836  (events != NULL) ? (cl_uint)events->size() : 0,
7837  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7838  (event != NULL) ? &tmp : NULL),
7839  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7840 
7841  if (event != NULL && err == CL_SUCCESS)
7842  *event = tmp;
7843 
7844  return err;
7845  }
7846 
7851  template<typename T, class Alloc>
7853  cl::vector<T, Alloc> &container,
7854  const vector<Event>* events = NULL,
7855  Event* event = NULL) const
7856  {
7857  cl_event tmp;
7858  cl_int err = detail::errHandler(
7859  ::clEnqueueSVMUnmap(
7860  object_, static_cast<void*>(container.data()),
7861  (events != NULL) ? (cl_uint)events->size() : 0,
7862  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7863  (event != NULL) ? &tmp : NULL),
7864  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7865 
7866  if (event != NULL && err == CL_SUCCESS)
7867  *event = tmp;
7868 
7869  return err;
7870  }
7871 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7872 
7873 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7874 
7886  const vector<Event> *events = 0,
7887  Event *event = 0) const
7888  {
7889  cl_event tmp;
7890  cl_int err = detail::errHandler(
7891  ::clEnqueueMarkerWithWaitList(
7892  object_,
7893  (events != NULL) ? (cl_uint) events->size() : 0,
7894  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7895  (event != NULL) ? &tmp : NULL),
7896  __ENQUEUE_MARKER_WAIT_LIST_ERR);
7897 
7898  if (event != NULL && err == CL_SUCCESS)
7899  *event = tmp;
7900 
7901  return err;
7902  }
7903 
7916  const vector<Event> *events = 0,
7917  Event *event = 0) const
7918  {
7919  cl_event tmp;
7920  cl_int err = detail::errHandler(
7921  ::clEnqueueBarrierWithWaitList(
7922  object_,
7923  (events != NULL) ? (cl_uint) events->size() : 0,
7924  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7925  (event != NULL) ? &tmp : NULL),
7926  __ENQUEUE_BARRIER_WAIT_LIST_ERR);
7927 
7928  if (event != NULL && err == CL_SUCCESS)
7929  *event = tmp;
7930 
7931  return err;
7932  }
7933 
7939  const vector<Memory> &memObjects,
7940  cl_mem_migration_flags flags,
7941  const vector<Event>* events = NULL,
7942  Event* event = NULL
7943  ) const
7944  {
7945  cl_event tmp;
7946 
7947  vector<cl_mem> localMemObjects(memObjects.size());
7948 
7949  for( int i = 0; i < (int)memObjects.size(); ++i ) {
7950  localMemObjects[i] = memObjects[i]();
7951  }
7952 
7953 
7954  cl_int err = detail::errHandler(
7955  ::clEnqueueMigrateMemObjects(
7956  object_,
7957  (cl_uint)memObjects.size(),
7958  localMemObjects.data(),
7959  flags,
7960  (events != NULL) ? (cl_uint) events->size() : 0,
7961  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7962  (event != NULL) ? &tmp : NULL),
7963  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7964 
7965  if (event != NULL && err == CL_SUCCESS)
7966  *event = tmp;
7967 
7968  return err;
7969  }
7970 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7971 
7972  cl_int enqueueNDRangeKernel(
7973  const Kernel& kernel,
7974  const NDRange& offset,
7975  const NDRange& global,
7976  const NDRange& local = NullRange,
7977  const vector<Event>* events = NULL,
7978  Event* event = NULL) const
7979  {
7980  cl_event tmp;
7981  cl_int err = detail::errHandler(
7982  ::clEnqueueNDRangeKernel(
7983  object_, kernel(), (cl_uint) global.dimensions(),
7984  offset.dimensions() != 0 ? (const size_type*) offset : NULL,
7985  (const size_type*) global,
7986  local.dimensions() != 0 ? (const size_type*) local : NULL,
7987  (events != NULL) ? (cl_uint) events->size() : 0,
7988  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7989  (event != NULL) ? &tmp : NULL),
7990  __ENQUEUE_NDRANGE_KERNEL_ERR);
7991 
7992  if (event != NULL && err == CL_SUCCESS)
7993  *event = tmp;
7994 
7995  return err;
7996  }
7997 
7998 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
7999  CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
8000  const Kernel& kernel,
8001  const vector<Event>* events = NULL,
8002  Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
8003  {
8004  cl_event tmp;
8005  cl_int err = detail::errHandler(
8006  ::clEnqueueTask(
8007  object_, kernel(),
8008  (events != NULL) ? (cl_uint) events->size() : 0,
8009  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8010  (event != NULL) ? &tmp : NULL),
8011  __ENQUEUE_TASK_ERR);
8012 
8013  if (event != NULL && err == CL_SUCCESS)
8014  *event = tmp;
8015 
8016  return err;
8017  }
8018 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
8019 
8020  cl_int enqueueNativeKernel(
8021  void (CL_CALLBACK *userFptr)(void *),
8022  std::pair<void*, size_type> args,
8023  const vector<Memory>* mem_objects = NULL,
8024  const vector<const void*>* mem_locs = NULL,
8025  const vector<Event>* events = NULL,
8026  Event* event = NULL) const
8027  {
8028  size_type elements = 0;
8029  if (mem_objects != NULL) {
8030  elements = mem_objects->size();
8031  }
8032  vector<cl_mem> mems(elements);
8033  for (unsigned int i = 0; i < elements; i++) {
8034  mems[i] = ((*mem_objects)[i])();
8035  }
8036 
8037  cl_event tmp;
8038  cl_int err = detail::errHandler(
8039  ::clEnqueueNativeKernel(
8040  object_, userFptr, args.first, args.second,
8041  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8042  mems.data(),
8043  (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL,
8044  (events != NULL) ? (cl_uint) events->size() : 0,
8045  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8046  (event != NULL) ? &tmp : NULL),
8047  __ENQUEUE_NATIVE_KERNEL);
8048 
8049  if (event != NULL && err == CL_SUCCESS)
8050  *event = tmp;
8051 
8052  return err;
8053  }
8054 
8058 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8059  CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8060  cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8061  {
8062  cl_event tmp;
8063  cl_int err = detail::errHandler(
8064  ::clEnqueueMarker(
8065  object_,
8066  (event != NULL) ? &tmp : NULL),
8067  __ENQUEUE_MARKER_ERR);
8068 
8069  if (event != NULL && err == CL_SUCCESS)
8070  *event = tmp;
8071 
8072  return err;
8073  }
8074 
8075  CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8076  cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8077  {
8078  return detail::errHandler(
8079  ::clEnqueueWaitForEvents(
8080  object_,
8081  (cl_uint) events.size(),
8082  events.size() > 0 ? (const cl_event*) &events.front() : NULL),
8083  __ENQUEUE_WAIT_FOR_EVENTS_ERR);
8084  }
8085 #endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8086 
8087  cl_int enqueueAcquireGLObjects(
8088  const vector<Memory>* mem_objects = NULL,
8089  const vector<Event>* events = NULL,
8090  Event* event = NULL) const
8091  {
8092  cl_event tmp;
8093  cl_int err = detail::errHandler(
8094  ::clEnqueueAcquireGLObjects(
8095  object_,
8096  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8097  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8098  (events != NULL) ? (cl_uint) events->size() : 0,
8099  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8100  (event != NULL) ? &tmp : NULL),
8101  __ENQUEUE_ACQUIRE_GL_ERR);
8102 
8103  if (event != NULL && err == CL_SUCCESS)
8104  *event = tmp;
8105 
8106  return err;
8107  }
8108 
8109  cl_int enqueueReleaseGLObjects(
8110  const vector<Memory>* mem_objects = NULL,
8111  const vector<Event>* events = NULL,
8112  Event* event = NULL) const
8113  {
8114  cl_event tmp;
8115  cl_int err = detail::errHandler(
8116  ::clEnqueueReleaseGLObjects(
8117  object_,
8118  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8119  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8120  (events != NULL) ? (cl_uint) events->size() : 0,
8121  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8122  (event != NULL) ? &tmp : NULL),
8123  __ENQUEUE_RELEASE_GL_ERR);
8124 
8125  if (event != NULL && err == CL_SUCCESS)
8126  *event = tmp;
8127 
8128  return err;
8129  }
8130 
8131 #if defined (CL_HPP_USE_DX_INTEROP)
8132 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
8133  cl_command_queue command_queue, cl_uint num_objects,
8134  const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
8135  const cl_event* event_wait_list, cl_event* event);
8136 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
8137  cl_command_queue command_queue, cl_uint num_objects,
8138  const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
8139  const cl_event* event_wait_list, cl_event* event);
8140 
8141  cl_int enqueueAcquireD3D10Objects(
8142  const vector<Memory>* mem_objects = NULL,
8143  const vector<Event>* events = NULL,
8144  Event* event = NULL) const
8145  {
8146  static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
8147 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8148  cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8149  cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8150  cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8151  CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
8152 #endif
8153 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8154  CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
8155 #endif
8156 
8157  cl_event tmp;
8158  cl_int err = detail::errHandler(
8159  pfn_clEnqueueAcquireD3D10ObjectsKHR(
8160  object_,
8161  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8162  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8163  (events != NULL) ? (cl_uint) events->size() : 0,
8164  (events != NULL) ? (cl_event*) &events->front() : NULL,
8165  (event != NULL) ? &tmp : NULL),
8166  __ENQUEUE_ACQUIRE_GL_ERR);
8167 
8168  if (event != NULL && err == CL_SUCCESS)
8169  *event = tmp;
8170 
8171  return err;
8172  }
8173 
8174  cl_int enqueueReleaseD3D10Objects(
8175  const vector<Memory>* mem_objects = NULL,
8176  const vector<Event>* events = NULL,
8177  Event* event = NULL) const
8178  {
8179  static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
8180 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8181  cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8182  cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8183  cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8184  CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
8185 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8186 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8187  CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
8188 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8189 
8190  cl_event tmp;
8191  cl_int err = detail::errHandler(
8192  pfn_clEnqueueReleaseD3D10ObjectsKHR(
8193  object_,
8194  (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8195  (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8196  (events != NULL) ? (cl_uint) events->size() : 0,
8197  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8198  (event != NULL) ? &tmp : NULL),
8199  __ENQUEUE_RELEASE_GL_ERR);
8200 
8201  if (event != NULL && err == CL_SUCCESS)
8202  *event = tmp;
8203 
8204  return err;
8205  }
8206 #endif
8207 
8211 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8212  CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8213  cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8214  {
8215  return detail::errHandler(
8216  ::clEnqueueBarrier(object_),
8217  __ENQUEUE_BARRIER_ERR);
8218  }
8219 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
8220 
8221  cl_int flush() const
8222  {
8223  return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
8224  }
8225 
8226  cl_int finish() const
8227  {
8228  return detail::errHandler(::clFinish(object_), __FINISH_ERR);
8229  }
8230 }; // CommandQueue
8231 
8232 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
8233 CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
8234 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
8235 
8236 
8237 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8238 enum class DeviceQueueProperties : cl_command_queue_properties
8239 {
8240  None = 0,
8241  Profiling = CL_QUEUE_PROFILING_ENABLE,
8242 };
8243 
8244 inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
8245 {
8246  return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
8247 }
8248 
8252 class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
8253 {
8254 public:
8255 
8260 
8264  DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL)
8265  {
8266  cl_int error;
8269 
8270  cl_command_queue_properties mergedProperties =
8271  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8272 
8273  cl_queue_properties queue_properties[] = {
8274  CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8275  object_ = ::clCreateCommandQueueWithProperties(
8276  context(), device(), queue_properties, &error);
8277 
8278  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8279  if (err != NULL) {
8280  *err = error;
8281  }
8282  }
8283 
8288  const Context& context,
8289  const Device& device,
8290  DeviceQueueProperties properties = DeviceQueueProperties::None,
8291  cl_int* err = NULL)
8292  {
8293  cl_int error;
8294 
8295  cl_command_queue_properties mergedProperties =
8296  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8297  cl_queue_properties queue_properties[] = {
8298  CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8299  object_ = ::clCreateCommandQueueWithProperties(
8300  context(), device(), queue_properties, &error);
8301 
8302  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8303  if (err != NULL) {
8304  *err = error;
8305  }
8306  }
8307 
8312  const Context& context,
8313  const Device& device,
8314  cl_uint queueSize,
8315  DeviceQueueProperties properties = DeviceQueueProperties::None,
8316  cl_int* err = NULL)
8317  {
8318  cl_int error;
8319 
8320  cl_command_queue_properties mergedProperties =
8321  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8322  cl_queue_properties queue_properties[] = {
8323  CL_QUEUE_PROPERTIES, mergedProperties,
8324  CL_QUEUE_SIZE, queueSize,
8325  0 };
8326  object_ = ::clCreateCommandQueueWithProperties(
8327  context(), device(), queue_properties, &error);
8328 
8329  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8330  if (err != NULL) {
8331  *err = error;
8332  }
8333  }
8334 
8341  explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
8342  detail::Wrapper<cl_type>(commandQueue, retainObject) { }
8343 
8344  DeviceCommandQueue& operator = (const cl_command_queue& rhs)
8345  {
8347  return *this;
8348  }
8349 
8353  DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
8354 
8358  DeviceCommandQueue& operator = (const DeviceCommandQueue &queue)
8359  {
8361  return *this;
8362  }
8363 
8367  DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
8368 
8373  {
8374  detail::Wrapper<cl_type>::operator=(std::move(queue));
8375  return *this;
8376  }
8377 
8378  template <typename T>
8379  cl_int getInfo(cl_command_queue_info name, T* param) const
8380  {
8381  return detail::errHandler(
8382  detail::getInfo(
8383  &::clGetCommandQueueInfo, object_, name, param),
8384  __GET_COMMAND_QUEUE_INFO_ERR);
8385  }
8386 
8387  template <cl_int name> typename
8389  getInfo(cl_int* err = NULL) const
8390  {
8391  typename detail::param_traits<
8392  detail::cl_command_queue_info, name>::param_type param;
8393  cl_int result = getInfo(name, &param);
8394  if (err != NULL) {
8395  *err = result;
8396  }
8397  return param;
8398  }
8399 
8407  cl_int *err = nullptr)
8408  {
8409  cl_int error;
8412 
8413  cl_command_queue_properties properties =
8414  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8415  cl_queue_properties queue_properties[] = {
8416  CL_QUEUE_PROPERTIES, properties,
8417  0 };
8418  DeviceCommandQueue deviceQueue(
8419  ::clCreateCommandQueueWithProperties(
8420  context(), device(), queue_properties, &error));
8421 
8422  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8423  if (err != NULL) {
8424  *err = error;
8425  }
8426 
8427  return deviceQueue;
8428  }
8429 
8437  const Context &context, const Device &device, cl_int *err = nullptr)
8438  {
8439  cl_int error;
8440 
8441  cl_command_queue_properties properties =
8442  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8443  cl_queue_properties queue_properties[] = {
8444  CL_QUEUE_PROPERTIES, properties,
8445  0 };
8446  DeviceCommandQueue deviceQueue(
8447  ::clCreateCommandQueueWithProperties(
8448  context(), device(), queue_properties, &error));
8449 
8450  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8451  if (err != NULL) {
8452  *err = error;
8453  }
8454 
8455  return deviceQueue;
8456  }
8457 
8465  const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
8466  {
8467  cl_int error;
8468 
8469  cl_command_queue_properties properties =
8470  CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8471  cl_queue_properties queue_properties[] = {
8472  CL_QUEUE_PROPERTIES, properties,
8473  CL_QUEUE_SIZE, queueSize,
8474  0 };
8475  DeviceCommandQueue deviceQueue(
8476  ::clCreateCommandQueueWithProperties(
8477  context(), device(), queue_properties, &error));
8478 
8479  detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8480  if (err != NULL) {
8481  *err = error;
8482  }
8483 
8484  return deviceQueue;
8485  }
8486 }; // DeviceCommandQueue
8487 
8488 namespace detail
8489 {
8490  // Specialization for device command queue
8491  template <>
8493  {
8494  static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
8495  static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
8496  };
8497 } // namespace detail
8498 
8499 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8500 
8501 
8502 template< typename IteratorType >
8504  const Context &context,
8505  IteratorType startIterator,
8506  IteratorType endIterator,
8507  bool readOnly,
8508  bool useHostPtr,
8509  cl_int* err)
8510 {
8511  typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8512  cl_int error;
8513 
8514  cl_mem_flags flags = 0;
8515  if( readOnly ) {
8516  flags |= CL_MEM_READ_ONLY;
8517  }
8518  else {
8519  flags |= CL_MEM_READ_WRITE;
8520  }
8521  if( useHostPtr ) {
8522  flags |= CL_MEM_USE_HOST_PTR;
8523  }
8524 
8525  size_type size = sizeof(DataType)*(endIterator - startIterator);
8526 
8527  if( useHostPtr ) {
8528  object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8529  } else {
8530  object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8531  }
8532 
8533  detail::errHandler(error, __CREATE_BUFFER_ERR);
8534  if (err != NULL) {
8535  *err = error;
8536  }
8537 
8538  if( !useHostPtr ) {
8539  CommandQueue queue(context, 0, &error);
8540  detail::errHandler(error, __CREATE_BUFFER_ERR);
8541  if (err != NULL) {
8542  *err = error;
8543  }
8544 
8545  error = cl::copy(queue, startIterator, endIterator, *this);
8546  detail::errHandler(error, __CREATE_BUFFER_ERR);
8547  if (err != NULL) {
8548  *err = error;
8549  }
8550  }
8551 }
8552 
8553 template< typename IteratorType >
8555  const CommandQueue &queue,
8556  IteratorType startIterator,
8557  IteratorType endIterator,
8558  bool readOnly,
8559  bool useHostPtr,
8560  cl_int* err)
8561 {
8562  typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8563  cl_int error;
8564 
8565  cl_mem_flags flags = 0;
8566  if (readOnly) {
8567  flags |= CL_MEM_READ_ONLY;
8568  }
8569  else {
8570  flags |= CL_MEM_READ_WRITE;
8571  }
8572  if (useHostPtr) {
8573  flags |= CL_MEM_USE_HOST_PTR;
8574  }
8575 
8576  size_type size = sizeof(DataType)*(endIterator - startIterator);
8577 
8578  Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
8579 
8580  if (useHostPtr) {
8581  object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8582  }
8583  else {
8584  object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8585  }
8586 
8587  detail::errHandler(error, __CREATE_BUFFER_ERR);
8588  if (err != NULL) {
8589  *err = error;
8590  }
8591 
8592  if (!useHostPtr) {
8593  error = cl::copy(queue, startIterator, endIterator, *this);
8594  detail::errHandler(error, __CREATE_BUFFER_ERR);
8595  if (err != NULL) {
8596  *err = error;
8597  }
8598  }
8599 }
8600 
8601 inline cl_int enqueueReadBuffer(
8602  const Buffer& buffer,
8603  cl_bool blocking,
8604  size_type offset,
8605  size_type size,
8606  void* ptr,
8607  const vector<Event>* events = NULL,
8608  Event* event = NULL)
8609 {
8610  cl_int error;
8611  CommandQueue queue = CommandQueue::getDefault(&error);
8612 
8613  if (error != CL_SUCCESS) {
8614  return error;
8615  }
8616 
8617  return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
8618 }
8619 
8620 inline cl_int enqueueWriteBuffer(
8621  const Buffer& buffer,
8622  cl_bool blocking,
8623  size_type offset,
8624  size_type size,
8625  const void* ptr,
8626  const vector<Event>* events = NULL,
8627  Event* event = NULL)
8628 {
8629  cl_int error;
8630  CommandQueue queue = CommandQueue::getDefault(&error);
8631 
8632  if (error != CL_SUCCESS) {
8633  return error;
8634  }
8635 
8636  return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
8637 }
8638 
8639 inline void* enqueueMapBuffer(
8640  const Buffer& buffer,
8641  cl_bool blocking,
8642  cl_map_flags flags,
8643  size_type offset,
8644  size_type size,
8645  const vector<Event>* events = NULL,
8646  Event* event = NULL,
8647  cl_int* err = NULL)
8648 {
8649  cl_int error;
8650  CommandQueue queue = CommandQueue::getDefault(&error);
8651  detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8652  if (err != NULL) {
8653  *err = error;
8654  }
8655 
8656  void * result = ::clEnqueueMapBuffer(
8657  queue(), buffer(), blocking, flags, offset, size,
8658  (events != NULL) ? (cl_uint) events->size() : 0,
8659  (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8660  (cl_event*) event,
8661  &error);
8662 
8663  detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8664  if (err != NULL) {
8665  *err = error;
8666  }
8667  return result;
8668 }
8669 
8670 
8671 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8672 
8677 template<typename T>
8678 inline cl_int enqueueMapSVM(
8679  T* ptr,
8680  cl_bool blocking,
8681  cl_map_flags flags,
8682  size_type size,
8683  const vector<Event>* events,
8684  Event* event)
8685 {
8686  cl_int error;
8687  CommandQueue queue = CommandQueue::getDefault(&error);
8688  if (error != CL_SUCCESS) {
8689  return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8690  }
8691 
8692  return queue.enqueueMapSVM(
8693  ptr, blocking, flags, size, events, event);
8694 }
8695 
8701 template<typename T, class D>
8702 inline cl_int enqueueMapSVM(
8703  cl::pointer<T, D> ptr,
8704  cl_bool blocking,
8705  cl_map_flags flags,
8706  size_type size,
8707  const vector<Event>* events = NULL,
8708  Event* event = NULL)
8709 {
8710  cl_int error;
8711  CommandQueue queue = CommandQueue::getDefault(&error);
8712  if (error != CL_SUCCESS) {
8713  return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8714  }
8715 
8716  return queue.enqueueMapSVM(
8717  ptr, blocking, flags, size, events, event);
8718 }
8719 
8725 template<typename T, class Alloc>
8726 inline cl_int enqueueMapSVM(
8727  cl::vector<T, Alloc> container,
8728  cl_bool blocking,
8729  cl_map_flags flags,
8730  const vector<Event>* events = NULL,
8731  Event* event = NULL)
8732 {
8733  cl_int error;
8734  CommandQueue queue = CommandQueue::getDefault(&error);
8735  if (error != CL_SUCCESS) {
8736  return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8737  }
8738 
8739  return queue.enqueueMapSVM(
8740  container, blocking, flags, events, event);
8741 }
8742 
8743 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8744 
8745 inline cl_int enqueueUnmapMemObject(
8746  const Memory& memory,
8747  void* mapped_ptr,
8748  const vector<Event>* events = NULL,
8749  Event* event = NULL)
8750 {
8751  cl_int error;
8752  CommandQueue queue = CommandQueue::getDefault(&error);
8753  detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8754  if (error != CL_SUCCESS) {
8755  return error;
8756  }
8757 
8758  cl_event tmp;
8759  cl_int err = detail::errHandler(
8760  ::clEnqueueUnmapMemObject(
8761  queue(), memory(), mapped_ptr,
8762  (events != NULL) ? (cl_uint)events->size() : 0,
8763  (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8764  (event != NULL) ? &tmp : NULL),
8765  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8766 
8767  if (event != NULL && err == CL_SUCCESS)
8768  *event = tmp;
8769 
8770  return err;
8771 }
8772 
8773 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8774 
8779 template<typename T>
8780 inline cl_int enqueueUnmapSVM(
8781  T* ptr,
8782  const vector<Event>* events = NULL,
8783  Event* event = NULL)
8784 {
8785  cl_int error;
8786  CommandQueue queue = CommandQueue::getDefault(&error);
8787  if (error != CL_SUCCESS) {
8788  return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8789  }
8790 
8791  return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8792  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8793 
8794 }
8795 
8801 template<typename T, class D>
8802 inline cl_int enqueueUnmapSVM(
8803  cl::pointer<T, D> &ptr,
8804  const vector<Event>* events = NULL,
8805  Event* event = NULL)
8806 {
8807  cl_int error;
8808  CommandQueue queue = CommandQueue::getDefault(&error);
8809  if (error != CL_SUCCESS) {
8810  return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8811  }
8812 
8813  return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8814  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8815 }
8816 
8822 template<typename T, class Alloc>
8823 inline cl_int enqueueUnmapSVM(
8824  cl::vector<T, Alloc> &container,
8825  const vector<Event>* events = NULL,
8826  Event* event = NULL)
8827 {
8828  cl_int error;
8829  CommandQueue queue = CommandQueue::getDefault(&error);
8830  if (error != CL_SUCCESS) {
8831  return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8832  }
8833 
8834  return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
8835  __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8836 }
8837 
8838 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8839 
8840 inline cl_int enqueueCopyBuffer(
8841  const Buffer& src,
8842  const Buffer& dst,
8843  size_type src_offset,
8844  size_type dst_offset,
8845  size_type size,
8846  const vector<Event>* events = NULL,
8847  Event* event = NULL)
8848 {
8849  cl_int error;
8850  CommandQueue queue = CommandQueue::getDefault(&error);
8851 
8852  if (error != CL_SUCCESS) {
8853  return error;
8854  }
8855 
8856  return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
8857 }
8858 
8864 template< typename IteratorType >
8865 inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8866 {
8867  cl_int error;
8868  CommandQueue queue = CommandQueue::getDefault(&error);
8869  if (error != CL_SUCCESS)
8870  return error;
8871 
8872  return cl::copy(queue, startIterator, endIterator, buffer);
8873 }
8874 
8880 template< typename IteratorType >
8881 inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8882 {
8883  cl_int error;
8884  CommandQueue queue = CommandQueue::getDefault(&error);
8885  if (error != CL_SUCCESS)
8886  return error;
8887 
8888  return cl::copy(queue, buffer, startIterator, endIterator);
8889 }
8890 
8896 template< typename IteratorType >
8897 inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8898 {
8899  typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8900  cl_int error;
8901 
8902  size_type length = endIterator-startIterator;
8903  size_type byteLength = length*sizeof(DataType);
8904 
8905  DataType *pointer =
8906  static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
8907  // if exceptions enabled, enqueueMapBuffer will throw
8908  if( error != CL_SUCCESS ) {
8909  return error;
8910  }
8911 #if defined(_MSC_VER)
8912  std::copy(
8913  startIterator,
8914  endIterator,
8915  stdext::checked_array_iterator<DataType*>(
8916  pointer, length));
8917 #else
8918  std::copy(startIterator, endIterator, pointer);
8919 #endif
8920  Event endEvent;
8921  error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8922  // if exceptions enabled, enqueueUnmapMemObject will throw
8923  if( error != CL_SUCCESS ) {
8924  return error;
8925  }
8926  endEvent.wait();
8927  return CL_SUCCESS;
8928 }
8929 
8935 template< typename IteratorType >
8936 inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8937 {
8938  typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8939  cl_int error;
8940 
8941  size_type length = endIterator-startIterator;
8942  size_type byteLength = length*sizeof(DataType);
8943 
8944  DataType *pointer =
8945  static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
8946  // if exceptions enabled, enqueueMapBuffer will throw
8947  if( error != CL_SUCCESS ) {
8948  return error;
8949  }
8950  std::copy(pointer, pointer + length, startIterator);
8951  Event endEvent;
8952  error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8953  // if exceptions enabled, enqueueUnmapMemObject will throw
8954  if( error != CL_SUCCESS ) {
8955  return error;
8956  }
8957  endEvent.wait();
8958  return CL_SUCCESS;
8959 }
8960 
8961 
8962 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8963 
8966 template<typename T, class Alloc>
8967 inline cl_int mapSVM(cl::vector<T, Alloc> &container)
8968 {
8969  return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
8970 }
8971 
8975 template<typename T, class Alloc>
8976 inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
8977 {
8978  return enqueueUnmapSVM(container);
8979 }
8980 
8981 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8982 
8983 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8984 inline cl_int enqueueReadBufferRect(
8985  const Buffer& buffer,
8986  cl_bool blocking,
8987  const array<size_type, 3>& buffer_offset,
8988  const array<size_type, 3>& host_offset,
8989  const array<size_type, 3>& region,
8990  size_type buffer_row_pitch,
8991  size_type buffer_slice_pitch,
8992  size_type host_row_pitch,
8993  size_type host_slice_pitch,
8994  void *ptr,
8995  const vector<Event>* events = NULL,
8996  Event* event = NULL)
8997 {
8998  cl_int error;
8999  CommandQueue queue = CommandQueue::getDefault(&error);
9000 
9001  if (error != CL_SUCCESS) {
9002  return error;
9003  }
9004 
9005  return queue.enqueueReadBufferRect(
9006  buffer,
9007  blocking,
9008  buffer_offset,
9009  host_offset,
9010  region,
9011  buffer_row_pitch,
9012  buffer_slice_pitch,
9013  host_row_pitch,
9014  host_slice_pitch,
9015  ptr,
9016  events,
9017  event);
9018 }
9019 
9020 inline cl_int enqueueWriteBufferRect(
9021  const Buffer& buffer,
9022  cl_bool blocking,
9023  const array<size_type, 3>& buffer_offset,
9024  const array<size_type, 3>& host_offset,
9025  const array<size_type, 3>& region,
9026  size_type buffer_row_pitch,
9027  size_type buffer_slice_pitch,
9028  size_type host_row_pitch,
9029  size_type host_slice_pitch,
9030  const void *ptr,
9031  const vector<Event>* events = NULL,
9032  Event* event = NULL)
9033 {
9034  cl_int error;
9035  CommandQueue queue = CommandQueue::getDefault(&error);
9036 
9037  if (error != CL_SUCCESS) {
9038  return error;
9039  }
9040 
9041  return queue.enqueueWriteBufferRect(
9042  buffer,
9043  blocking,
9044  buffer_offset,
9045  host_offset,
9046  region,
9047  buffer_row_pitch,
9048  buffer_slice_pitch,
9049  host_row_pitch,
9050  host_slice_pitch,
9051  ptr,
9052  events,
9053  event);
9054 }
9055 
9056 inline cl_int enqueueCopyBufferRect(
9057  const Buffer& src,
9058  const Buffer& dst,
9059  const array<size_type, 3>& src_origin,
9060  const array<size_type, 3>& dst_origin,
9061  const array<size_type, 3>& region,
9062  size_type src_row_pitch,
9063  size_type src_slice_pitch,
9064  size_type dst_row_pitch,
9065  size_type dst_slice_pitch,
9066  const vector<Event>* events = NULL,
9067  Event* event = NULL)
9068 {
9069  cl_int error;
9070  CommandQueue queue = CommandQueue::getDefault(&error);
9071 
9072  if (error != CL_SUCCESS) {
9073  return error;
9074  }
9075 
9076  return queue.enqueueCopyBufferRect(
9077  src,
9078  dst,
9079  src_origin,
9080  dst_origin,
9081  region,
9082  src_row_pitch,
9083  src_slice_pitch,
9084  dst_row_pitch,
9085  dst_slice_pitch,
9086  events,
9087  event);
9088 }
9089 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
9090 
9091 inline cl_int enqueueReadImage(
9092  const Image& image,
9093  cl_bool blocking,
9094  const array<size_type, 3>& origin,
9095  const array<size_type, 3>& region,
9096  size_type row_pitch,
9097  size_type slice_pitch,
9098  void* ptr,
9099  const vector<Event>* events = NULL,
9100  Event* event = NULL)
9101 {
9102  cl_int error;
9103  CommandQueue queue = CommandQueue::getDefault(&error);
9104 
9105  if (error != CL_SUCCESS) {
9106  return error;
9107  }
9108 
9109  return queue.enqueueReadImage(
9110  image,
9111  blocking,
9112  origin,
9113  region,
9114  row_pitch,
9115  slice_pitch,
9116  ptr,
9117  events,
9118  event);
9119 }
9120 
9121 inline cl_int enqueueWriteImage(
9122  const Image& image,
9123  cl_bool blocking,
9124  const array<size_type, 3>& origin,
9125  const array<size_type, 3>& region,
9126  size_type row_pitch,
9127  size_type slice_pitch,
9128  const void* ptr,
9129  const vector<Event>* events = NULL,
9130  Event* event = NULL)
9131 {
9132  cl_int error;
9133  CommandQueue queue = CommandQueue::getDefault(&error);
9134 
9135  if (error != CL_SUCCESS) {
9136  return error;
9137  }
9138 
9139  return queue.enqueueWriteImage(
9140  image,
9141  blocking,
9142  origin,
9143  region,
9144  row_pitch,
9145  slice_pitch,
9146  ptr,
9147  events,
9148  event);
9149 }
9150 
9151 inline cl_int enqueueCopyImage(
9152  const Image& src,
9153  const Image& dst,
9154  const array<size_type, 3>& src_origin,
9155  const array<size_type, 3>& dst_origin,
9156  const array<size_type, 3>& region,
9157  const vector<Event>* events = NULL,
9158  Event* event = NULL)
9159 {
9160  cl_int error;
9161  CommandQueue queue = CommandQueue::getDefault(&error);
9162 
9163  if (error != CL_SUCCESS) {
9164  return error;
9165  }
9166 
9167  return queue.enqueueCopyImage(
9168  src,
9169  dst,
9170  src_origin,
9171  dst_origin,
9172  region,
9173  events,
9174  event);
9175 }
9176 
9177 inline cl_int enqueueCopyImageToBuffer(
9178  const Image& src,
9179  const Buffer& dst,
9180  const array<size_type, 3>& src_origin,
9181  const array<size_type, 3>& region,
9182  size_type dst_offset,
9183  const vector<Event>* events = NULL,
9184  Event* event = NULL)
9185 {
9186  cl_int error;
9187  CommandQueue queue = CommandQueue::getDefault(&error);
9188 
9189  if (error != CL_SUCCESS) {
9190  return error;
9191  }
9192 
9193  return queue.enqueueCopyImageToBuffer(
9194  src,
9195  dst,
9196  src_origin,
9197  region,
9198  dst_offset,
9199  events,
9200  event);
9201 }
9202 
9203 inline cl_int enqueueCopyBufferToImage(
9204  const Buffer& src,
9205  const Image& dst,
9206  size_type src_offset,
9207  const array<size_type, 3>& dst_origin,
9208  const array<size_type, 3>& region,
9209  const vector<Event>* events = NULL,
9210  Event* event = NULL)
9211 {
9212  cl_int error;
9213  CommandQueue queue = CommandQueue::getDefault(&error);
9214 
9215  if (error != CL_SUCCESS) {
9216  return error;
9217  }
9218 
9219  return queue.enqueueCopyBufferToImage(
9220  src,
9221  dst,
9222  src_offset,
9223  dst_origin,
9224  region,
9225  events,
9226  event);
9227 }
9228 
9229 
9230 inline cl_int flush(void)
9231 {
9232  cl_int error;
9233  CommandQueue queue = CommandQueue::getDefault(&error);
9234 
9235  if (error != CL_SUCCESS) {
9236  return error;
9237  }
9238 
9239  return queue.flush();
9240 }
9241 
9242 inline cl_int finish(void)
9243 {
9244  cl_int error;
9245  CommandQueue queue = CommandQueue::getDefault(&error);
9246 
9247  if (error != CL_SUCCESS) {
9248  return error;
9249  }
9250 
9251 
9252  return queue.finish();
9253 }
9254 
9256 {
9257 private:
9258  CommandQueue queue_;
9259  const NDRange offset_;
9260  const NDRange global_;
9261  const NDRange local_;
9262  vector<Event> events_;
9263 
9264  template<typename... Ts>
9265  friend class KernelFunctor;
9266 
9267 public:
9268  EnqueueArgs(NDRange global) :
9269  queue_(CommandQueue::getDefault()),
9270  offset_(NullRange),
9271  global_(global),
9272  local_(NullRange)
9273  {
9274 
9275  }
9276 
9277  EnqueueArgs(NDRange global, NDRange local) :
9278  queue_(CommandQueue::getDefault()),
9279  offset_(NullRange),
9280  global_(global),
9281  local_(local)
9282  {
9283 
9284  }
9285 
9286  EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
9287  queue_(CommandQueue::getDefault()),
9288  offset_(offset),
9289  global_(global),
9290  local_(local)
9291  {
9292 
9293  }
9294 
9295  EnqueueArgs(Event e, NDRange global) :
9296  queue_(CommandQueue::getDefault()),
9297  offset_(NullRange),
9298  global_(global),
9299  local_(NullRange)
9300  {
9301  events_.push_back(e);
9302  }
9303 
9304  EnqueueArgs(Event e, NDRange global, NDRange local) :
9305  queue_(CommandQueue::getDefault()),
9306  offset_(NullRange),
9307  global_(global),
9308  local_(local)
9309  {
9310  events_.push_back(e);
9311  }
9312 
9313  EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
9314  queue_(CommandQueue::getDefault()),
9315  offset_(offset),
9316  global_(global),
9317  local_(local)
9318  {
9319  events_.push_back(e);
9320  }
9321 
9322  EnqueueArgs(const vector<Event> &events, NDRange global) :
9323  queue_(CommandQueue::getDefault()),
9324  offset_(NullRange),
9325  global_(global),
9326  local_(NullRange),
9327  events_(events)
9328  {
9329 
9330  }
9331 
9332  EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
9333  queue_(CommandQueue::getDefault()),
9334  offset_(NullRange),
9335  global_(global),
9336  local_(local),
9337  events_(events)
9338  {
9339 
9340  }
9341 
9342  EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9343  queue_(CommandQueue::getDefault()),
9344  offset_(offset),
9345  global_(global),
9346  local_(local),
9347  events_(events)
9348  {
9349 
9350  }
9351 
9352  EnqueueArgs(CommandQueue &queue, NDRange global) :
9353  queue_(queue),
9354  offset_(NullRange),
9355  global_(global),
9356  local_(NullRange)
9357  {
9358 
9359  }
9360 
9361  EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
9362  queue_(queue),
9363  offset_(NullRange),
9364  global_(global),
9365  local_(local)
9366  {
9367 
9368  }
9369 
9370  EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
9371  queue_(queue),
9372  offset_(offset),
9373  global_(global),
9374  local_(local)
9375  {
9376 
9377  }
9378 
9379  EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
9380  queue_(queue),
9381  offset_(NullRange),
9382  global_(global),
9383  local_(NullRange)
9384  {
9385  events_.push_back(e);
9386  }
9387 
9388  EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
9389  queue_(queue),
9390  offset_(NullRange),
9391  global_(global),
9392  local_(local)
9393  {
9394  events_.push_back(e);
9395  }
9396 
9397  EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
9398  queue_(queue),
9399  offset_(offset),
9400  global_(global),
9401  local_(local)
9402  {
9403  events_.push_back(e);
9404  }
9405 
9406  EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
9407  queue_(queue),
9408  offset_(NullRange),
9409  global_(global),
9410  local_(NullRange),
9411  events_(events)
9412  {
9413 
9414  }
9415 
9416  EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
9417  queue_(queue),
9418  offset_(NullRange),
9419  global_(global),
9420  local_(local),
9421  events_(events)
9422  {
9423 
9424  }
9425 
9426  EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9427  queue_(queue),
9428  offset_(offset),
9429  global_(global),
9430  local_(local),
9431  events_(events)
9432  {
9433 
9434  }
9435 };
9436 
9437 
9438 //----------------------------------------------------------------------------------------------
9439 
9440 
9445 template<typename... Ts>
9447 {
9448 private:
9449  Kernel kernel_;
9450 
9451  template<int index, typename T0, typename... T1s>
9452  void setArgs(T0&& t0, T1s&&... t1s)
9453  {
9454  kernel_.setArg(index, t0);
9455  setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
9456  }
9457 
9458  template<int index, typename T0>
9459  void setArgs(T0&& t0)
9460  {
9461  kernel_.setArg(index, t0);
9462  }
9463 
9464  template<int index>
9465  void setArgs()
9466  {
9467  }
9468 
9469 
9470 public:
9471  KernelFunctor(Kernel kernel) : kernel_(kernel)
9472  {}
9473 
9474  KernelFunctor(
9475  const Program& program,
9476  const string name,
9477  cl_int * err = NULL) :
9478  kernel_(program, name.c_str(), err)
9479  {}
9480 
9483 
9489  Event operator() (
9490  const EnqueueArgs& args,
9491  Ts... ts)
9492  {
9493  Event event;
9494  setArgs<0>(std::forward<Ts>(ts)...);
9495 
9496  args.queue_.enqueueNDRangeKernel(
9497  kernel_,
9498  args.offset_,
9499  args.global_,
9500  args.local_,
9501  &args.events_,
9502  &event);
9503 
9504  return event;
9505  }
9506 
9513  Event operator() (
9514  const EnqueueArgs& args,
9515  Ts... ts,
9516  cl_int &error)
9517  {
9518  Event event;
9519  setArgs<0>(std::forward<Ts>(ts)...);
9520 
9521  error = args.queue_.enqueueNDRangeKernel(
9522  kernel_,
9523  args.offset_,
9524  args.global_,
9525  args.local_,
9526  &args.events_,
9527  &event);
9528 
9529  return event;
9530  }
9531 
9532 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9533  cl_int setSVMPointers(const vector<void*> &pointerList)
9534  {
9535  return kernel_.setSVMPointers(pointerList);
9536  }
9537 
9538  template<typename T0, typename... T1s>
9539  cl_int setSVMPointers(const T0 &t0, T1s &... ts)
9540  {
9541  return kernel_.setSVMPointers(t0, ts...);
9542  }
9543 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9544 
9545  Kernel getKernel()
9546  {
9547  return kernel_;
9548  }
9549 };
9550 
9551 namespace compatibility {
9556  template<typename... Ts>
9558  {
9559  typedef KernelFunctor<Ts...> FunctorType;
9560 
9561  FunctorType functor_;
9562 
9563  make_kernel(
9564  const Program& program,
9565  const string name,
9566  cl_int * err = NULL) :
9567  functor_(FunctorType(program, name, err))
9568  {}
9569 
9570  make_kernel(
9571  const Kernel kernel) :
9572  functor_(FunctorType(kernel))
9573  {}
9574 
9577 
9579  typedef Event type_(
9580  const EnqueueArgs&,
9581  Ts...);
9582 
9583  Event operator()(
9584  const EnqueueArgs& enqueueArgs,
9585  Ts... args)
9586  {
9587  return functor_(
9588  enqueueArgs, args...);
9589  }
9590  };
9591 } // namespace compatibility
9592 
9593 
9594 //----------------------------------------------------------------------------------------------------------------------
9595 
9596 #undef CL_HPP_ERR_STR_
9597 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
9598 #undef __GET_DEVICE_INFO_ERR
9599 #undef __GET_PLATFORM_INFO_ERR
9600 #undef __GET_DEVICE_IDS_ERR
9601 #undef __GET_CONTEXT_INFO_ERR
9602 #undef __GET_EVENT_INFO_ERR
9603 #undef __GET_EVENT_PROFILE_INFO_ERR
9604 #undef __GET_MEM_OBJECT_INFO_ERR
9605 #undef __GET_IMAGE_INFO_ERR
9606 #undef __GET_SAMPLER_INFO_ERR
9607 #undef __GET_KERNEL_INFO_ERR
9608 #undef __GET_KERNEL_ARG_INFO_ERR
9609 #undef __GET_KERNEL_WORK_GROUP_INFO_ERR
9610 #undef __GET_PROGRAM_INFO_ERR
9611 #undef __GET_PROGRAM_BUILD_INFO_ERR
9612 #undef __GET_COMMAND_QUEUE_INFO_ERR
9613 
9614 #undef __CREATE_CONTEXT_ERR
9615 #undef __CREATE_CONTEXT_FROM_TYPE_ERR
9616 #undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
9617 
9618 #undef __CREATE_BUFFER_ERR
9619 #undef __CREATE_SUBBUFFER_ERR
9620 #undef __CREATE_IMAGE2D_ERR
9621 #undef __CREATE_IMAGE3D_ERR
9622 #undef __CREATE_SAMPLER_ERR
9623 #undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
9624 
9625 #undef __CREATE_USER_EVENT_ERR
9626 #undef __SET_USER_EVENT_STATUS_ERR
9627 #undef __SET_EVENT_CALLBACK_ERR
9628 #undef __SET_PRINTF_CALLBACK_ERR
9629 
9630 #undef __WAIT_FOR_EVENTS_ERR
9631 
9632 #undef __CREATE_KERNEL_ERR
9633 #undef __SET_KERNEL_ARGS_ERR
9634 #undef __CREATE_PROGRAM_WITH_SOURCE_ERR
9635 #undef __CREATE_PROGRAM_WITH_BINARY_ERR
9636 #undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
9637 #undef __BUILD_PROGRAM_ERR
9638 #undef __CREATE_KERNELS_IN_PROGRAM_ERR
9639 
9640 #undef __CREATE_COMMAND_QUEUE_ERR
9641 #undef __SET_COMMAND_QUEUE_PROPERTY_ERR
9642 #undef __ENQUEUE_READ_BUFFER_ERR
9643 #undef __ENQUEUE_WRITE_BUFFER_ERR
9644 #undef __ENQUEUE_READ_BUFFER_RECT_ERR
9645 #undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
9646 #undef __ENQEUE_COPY_BUFFER_ERR
9647 #undef __ENQEUE_COPY_BUFFER_RECT_ERR
9648 #undef __ENQUEUE_READ_IMAGE_ERR
9649 #undef __ENQUEUE_WRITE_IMAGE_ERR
9650 #undef __ENQUEUE_COPY_IMAGE_ERR
9651 #undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
9652 #undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
9653 #undef __ENQUEUE_MAP_BUFFER_ERR
9654 #undef __ENQUEUE_MAP_IMAGE_ERR
9655 #undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
9656 #undef __ENQUEUE_NDRANGE_KERNEL_ERR
9657 #undef __ENQUEUE_TASK_ERR
9658 #undef __ENQUEUE_NATIVE_KERNEL
9659 
9660 #undef __UNLOAD_COMPILER_ERR
9661 #undef __CREATE_SUB_DEVICES_ERR
9662 
9663 #undef __CREATE_PIPE_ERR
9664 #undef __GET_PIPE_INFO_ERR
9665 
9666 #endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
9667 
9668 // Extensions
9669 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_
9670 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
9671 
9672 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
9673 #undef CL_HPP_PARAM_NAME_DEVICE_FISSION_
9674 #endif // CL_HPP_USE_CL_DEVICE_FISSION
9675 
9676 #undef CL_HPP_NOEXCEPT_
9677 #undef CL_HPP_DEFINE_STATIC_MEMBER_
9678 
9679 } // namespace cl
9680 
9681 #endif // CL_HPP_
Memory()
Default constructor - initializes to NULL.
Definition: cl2.hpp:3169
Event type_(const EnqueueArgs &, Ts...)
Function signature of kernel functor with no event dependency.
Definition: cl2.hpp:9579
BufferRenderGL(const cl_mem &buffer, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4109
CommandQueue(CommandQueue &&queue) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:7108
Image interface for arrays of 2D images.
Definition: cl2.hpp:4859
Image1DBuffer(const cl_mem &image1D, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4388
Image interface for arrays of 1D images.
Definition: cl2.hpp:4430
Image2DGL()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4802
cl_int getInfo(cl_context_info name, T *param) const
Wrapper for clGetContextInfo().
Definition: cl2.hpp:2870
Adds constructors and member functions for cl_image_format.
Definition: cl2.hpp:1931
Context(const cl_context &context, bool retainObject=false)
Constructor from cl_context - takes ownership.
Definition: cl2.hpp:2854
Image & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition: cl2.hpp:4187
Image3D(const Image3D &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5056
static DeviceCommandQueue makeDefault(const Context &context, const Device &device, cl_uint queueSize, cl_int *err=nullptr)
Definition: cl2.hpp:8464
Image2DGL(const Image2DGL &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4827
std::enable_if< std::is_pointer< T >::value, cl_int >::type setArg(cl_uint index, const T argPtr)
setArg overload taking a pointer type
Definition: cl2.hpp:5848
Image1DBuffer(const Image1DBuffer &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4400
Image2D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, size_type height, size_type row_pitch=0, void *host_ptr=NULL, cl_int *err=NULL)
Constructs a 2D Image in a specified context.
Definition: cl2.hpp:4531
Image1D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, void *host_ptr=NULL, cl_int *err=NULL)
Constructs a 1D Image in a specified context.
Definition: cl2.hpp:4261
Image2D & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition: cl2.hpp:4722
cl_int setSVMPointers(const vector< void *> &pointerList)
Definition: cl2.hpp:5883
The OpenCL C++ bindings are defined within this namespace.
Definition: cl2.hpp:557
CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
Definition: cl2.hpp:8213
Image(Image &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4210
detail::param_traits< detail::cl_device_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetDeviceInfo() that returns by value.
Definition: cl2.hpp:2086
cl_int unmapSVM(cl::vector< T, Alloc > &container)
Definition: cl2.hpp:8976
Context(const Context &ctx)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:2792
Local address wrapper for use with Kernel::setArg.
Definition: cl2.hpp:5614
detail::param_traits< detail::cl_pipe_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetMemObjectInfo() that returns by value.
Definition: cl2.hpp:5384
cl_int enqueueMapSVM(cl::vector< T, Alloc > &container, cl_bool blocking, cl_map_flags flags, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7752
Class interface for GL 3D Image Memory objects.
Definition: cl2.hpp:5091
CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int enqueueMarker(Event *event=NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
Definition: cl2.hpp:8060
CommandQueue(const Context &context, const Device &device, QueueProperties properties, cl_int *err=NULL)
Constructs a CommandQueue for a passed device and context Will return an CL_INVALID_QUEUE_PROPERTIES ...
Definition: cl2.hpp:6998
CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
Definition: cl2.hpp:2559
static DeviceCommandQueue makeDefault(const Context &context, const Device &device, cl_int *err=nullptr)
Definition: cl2.hpp:8436
cl_int getDevices(cl_device_type type, vector< Device > *devices) const
Gets a list of devices for this platform.
Definition: cl2.hpp:2340
Program(const Context &context, const vector< Device > &devices, const Binaries &binaries, vector< cl_int > *binaryStatus=NULL, cl_int *err=NULL)
Definition: cl2.hpp:6156
Buffer(IteratorType startIterator, IteratorType endIterator, bool readOnly, bool useHostPtr=false, cl_int *err=NULL)
Construct a Buffer from a host container via iterators. IteratorType must be random access...
Definition: cl2.hpp:3715
Class interface for cl_mem.
Definition: cl2.hpp:3165
Pipe(cl_uint packet_size, cl_uint max_packets, cl_int *err=NULL)
Constructs a Pipe in a the default context.
Definition: cl2.hpp:5303
cl_int enqueueMigrateMemObjects(const vector< Memory > &memObjects, cl_mem_migration_flags flags, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7938
Device(const Device &dev)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:2049
BufferRenderGL(BufferRenderGL &&buf) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4139
Image2DArray(Image2DArray &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4934
detail::param_traits< detail::cl_image_info, name >::param_type getImageInfo(cl_int *err=NULL) const
Wrapper for clGetImageInfo() that returns by value.
Definition: cl2.hpp:4235
Memory(Memory &&mem) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:3213
cl_int enqueueMarkerWithWaitList(const vector< Event > *events=0, Event *event=0) const
Definition: cl2.hpp:7885
Image3D(Image3D &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5070
cl_int getSupportedImageFormats(cl_mem_flags flags, cl_mem_object_type type, vector< ImageFormat > *formats) const
Gets a list of supported image formats.
Definition: cl2.hpp:2895
BufferRenderGL()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4100
Image(const Image &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4196
cl_int enqueueFillBuffer(const Buffer &buffer, PatternType pattern, size_type offset, size_type size, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7343
Image1DArray(const Image1DArray &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4489
Kernel(const Kernel &kernel)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5705
CommandQueue(const Context &context, cl_command_queue_properties properties=0, cl_int *err=NULL)
Constructs a CommandQueue for an implementation defined device in the given context Will return an CL...
Definition: cl2.hpp:6818
Buffer createSubBuffer(cl_mem_flags flags, cl_buffer_create_type buffer_create_type, const void *buffer_create_info, cl_int *err=NULL)
Creates a new buffer object from this.
Definition: cl2.hpp:3833
cl_int enqueueBarrierWithWaitList(const vector< Event > *events=0, Event *event=0) const
Definition: cl2.hpp:7915
Buffer(Buffer &&buf) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:3817
vector< T, cl::SVMAllocator< int, cl::SVMTraitCoarse<> >> coarse_svm_vector
Vector alias to simplify contruction of coarse-grained SVM containers.
Definition: cl2.hpp:3632
static Device getDefault(cl_int *errResult=NULL)
Returns the first device on the default context.
Definition: cl2.hpp:2011
cl_int enqueueUnmapSVM(T *ptr, const vector< Event > *events=NULL, Event *event=NULL)
Definition: cl2.hpp:8780
Sampler(const Sampler &sam)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5480
cl_int setCallback(cl_int type, void(CL_CALLBACK *pfn_notify)(cl_event, cl_int, void *), void *user_data=NULL)
Registers a user callback function for a specific command execution status.
Definition: cl2.hpp:3072
size_type dimensions() const
Queries the number of dimensions in the range.
Definition: cl2.hpp:5587
Event()
Default constructor - initializes to NULL.
Definition: cl2.hpp:2986
ImageGL(const cl_mem &image, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:5217
Image3D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, size_type height, size_type depth, size_type row_pitch=0, size_type slice_pitch=0, void *host_ptr=NULL, cl_int *err=NULL)
Constructs a 3D Image in a specified context.
Definition: cl2.hpp:4960
cl_int setArg(cl_uint index, const cl::vector< T, Alloc > &argPtr)
setArg overload taking a vector type.
Definition: cl2.hpp:5837
detail::param_traits< detail::cl_profiling_info, name >::param_type getProfilingInfo(cl_int *err=NULL) const
Wrapper for clGetEventProfilingInfo() that returns by value.
Definition: cl2.hpp:3045
Image1D(const Image1D &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4316
BufferRenderGL(const BufferRenderGL &buf)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4125
DeviceCommandQueue(DeviceQueueProperties properties, cl_int *err=NULL)
Definition: cl2.hpp:8264
Pipe(const Context &context, cl_uint packet_size, cl_uint max_packets, cl_int *err=NULL)
Constructs a Pipe in a specified context.
Definition: cl2.hpp:5278
Image3D()
Default constructor - initializes to NULL.
Definition: cl2.hpp:5031
CommandQueue(const Context &context, const Device &device, cl_command_queue_properties properties=0, cl_int *err=NULL)
Constructs a CommandQueue for a passed device and context Will return an CL_INVALID_QUEUE_PROPERTIES ...
Definition: cl2.hpp:6947
Image2DGL(Image2DGL &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4841
cl_int getInfo(cl_sampler_info name, T *param) const
Wrapper for clGetSamplerInfo().
Definition: cl2.hpp:5507
Kernel()
Default constructor - initializes to NULL.
Definition: cl2.hpp:5678
Image2DGL(const Context &context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texobj, cl_int *err=NULL)
Constructs an Image2DGL in a specified context, from a given GL Texture.
Definition: cl2.hpp:4777
Sampler()
Default constructor - initializes to NULL.
Definition: cl2.hpp:5410
Image1D(Image1D &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4330
Program(Program &&program) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:6284
cl_int getObjectInfo(cl_gl_object_type *type, cl_GLuint *gl_object_name)
Wrapper for clGetGLObjectInfo().
Definition: cl2.hpp:4151
cl_int unloadCompiler()
Wrapper for clUnloadCompiler().
Definition: cl2.hpp:2536
std::enable_if<!std::is_pointer< T >::value, cl_int >::type setArg(cl_uint index, const T &value)
setArg overload taking a POD type
Definition: cl2.hpp:5860
size_type max_size() const CL_HPP_NOEXCEPT_
Definition: cl2.hpp:3497
Image1DArray(const cl_mem &imageArray, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4476
Class interface for cl_event.
Definition: cl2.hpp:2982
vector< T, cl::SVMAllocator< int, cl::SVMTraitFine<> >> fine_svm_vector
Vector alias to simplify contruction of fine-grained SVM containers.
Definition: cl2.hpp:3638
Image2DGL(const cl_mem &image, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4811
NDRange(size_type size0)
Constructs one-dimensional range.
Definition: cl2.hpp:5552
Program(const Context &context, const Sources &sources, cl_int *err=NULL)
Definition: cl2.hpp:6106
vector< T, cl::SVMAllocator< int, cl::SVMTraitAtomic<> >> atomic_svm_vector
Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics...
Definition: cl2.hpp:3644
Buffer & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition: cl2.hpp:3794
detail::param_traits< detail::cl_event_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetEventInfo() that returns by value.
Definition: cl2.hpp:3022
Class interface for Pipe Memory Objects.
Definition: cl2.hpp:5265
cl_int enqueueUnmapSVM(cl::pointer< T, D > &ptr, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7827
Image2D(const Image2D &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4731
Buffer()
Default constructor - initializes to NULL.
Definition: cl2.hpp:3778
DeviceCommandQueue(const Context &context, const Device &device, DeviceQueueProperties properties=DeviceQueueProperties::None, cl_int *err=NULL)
Definition: cl2.hpp:8287
cl_int setArg(cl_uint index, const cl::pointer< T, D > &argPtr)
setArg overload taking a shared_ptr type
Definition: cl2.hpp:5827
Image3DGL(const Image3DGL &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5148
cl_int copy(IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer)
Definition: cl2.hpp:8865
Context(const vector< Device > &devices, cl_context_properties *properties=NULL, void(CL_CALLBACK *notifyFptr)(const char *, const void *, size_type, void *)=NULL, void *data=NULL, cl_int *err=NULL)
Constructs a context including a list of specified devices.
Definition: cl2.hpp:2645
DeviceCommandQueue(const DeviceCommandQueue &queue)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:8353
Sampler(const cl_sampler &sampler, bool retainObject=false)
Constructor from cl_sampler - takes ownership.
Definition: cl2.hpp:5463
Image()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4171
static cl_int release(cl_device_id device)
Definition: cl2.hpp:1491
cl_int getInfo(cl_device_info name, T *param) const
Wrapper for clGetDeviceInfo().
Definition: cl2.hpp:2076
Event(const cl_event &event, bool retainObject=false)
Constructor from cl_event - takes ownership.
Definition: cl2.hpp:2996
DeviceCommandQueue(const Context &context, const Device &device, cl_uint queueSize, DeviceQueueProperties properties=DeviceQueueProperties::None, cl_int *err=NULL)
Definition: cl2.hpp:8311
NDRange(size_type size0, size_type size1)
Constructs two-dimensional range.
Definition: cl2.hpp:5561
Image3DGL(const Context &context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texobj, cl_int *err=NULL)
Constructs an Image3DGL in a specified context, from a given GL Texture.
Definition: cl2.hpp:5099
Image2D()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4706
Image3D(const cl_mem &image3D, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:5040
Image2D(Image2D &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4745
DeviceCommandQueue interface for device cl_command_queues.
Definition: cl2.hpp:8252
Image1DArray(Image1DArray &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4503
Pipe()
Default constructor - initializes to NULL.
Definition: cl2.hpp:5322
CommandQueue(const cl_command_queue &commandQueue, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:7082
Pipe(const Pipe &pipe)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5347
cl_int getInfo(cl_event_info name, T *param) const
Wrapper for clGetEventInfo().
Definition: cl2.hpp:3012
Image1D()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4291
Buffer(const cl_mem &buffer, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:3787
Sampler(Sampler &&sam) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5494
static CommandQueue setDefault(const CommandQueue &default_queue)
Definition: cl2.hpp:7066
Program(const Context &context, const vector< Device > &devices, const string &kernelNames, cl_int *err=NULL)
Definition: cl2.hpp:6220
CommandQueue(const CommandQueue &queue)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:7094
cl_int enqueueMapSVM(T *ptr, cl_bool blocking, cl_map_flags flags, size_type size, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7697
Kernel(Kernel &&kernel) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5719
cl_int enqueueFillImage(const Image &image, cl_uint4 fillColor, const array< size_type, 3 > &origin, const array< size_type, 3 > &region, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7543
Image2D(const Context &context, ImageFormat format, const Buffer &sourceBuffer, size_type width, size_type height, size_type row_pitch=0, cl_int *err=nullptr)
Constructs a 2D Image from a buffer.
Definition: cl2.hpp:4602
Program(const Sources &sources, cl_int *err=NULL)
Definition: cl2.hpp:6071
Device(const cl_device_id &device, bool retainObject=false)
Constructor from cl_device_id.
Definition: cl2.hpp:2004
NDRange()
Default constructor - resulting range has zero dimensions.
Definition: cl2.hpp:5543
BufferGL(const BufferGL &buf)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4028
Image3D & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition: cl2.hpp:5047
CommandQueue(const Context &context, QueueProperties properties, cl_int *err=NULL)
Constructs a CommandQueue for an implementation defined device in the given context Will return an CL...
Definition: cl2.hpp:6885
cl_int setDestructorCallback(void(CL_CALLBACK *pfn_notify)(cl_mem, void *), void *user_data=NULL)
Registers a callback function to be called when the memory object is no longer needed.
Definition: cl2.hpp:3262
CommandQueue(cl_command_queue_properties properties, cl_int *err=NULL)
Constructs a CommandQueue based on passed properties. Will return an CL_INVALID_QUEUE_PROPERTIES erro...
Definition: cl2.hpp:6689
Buffer(const Context &context, cl_mem_flags flags, size_type size, void *host_ptr=NULL, cl_int *err=NULL)
Constructs a Buffer in a specified context.
Definition: cl2.hpp:3666
cl_int wait() const
Blocks the calling thread until this event completes.
Definition: cl2.hpp:3060
ImageFormat()
Default constructor - performs no initialization.
Definition: cl2.hpp:1934
Event result_type
Return type of the functor.
Definition: cl2.hpp:9482
Class interface for cl_platform_id.
Definition: cl2.hpp:2194
cl_int setSVMPointers(const std::array< void *, ArrayLength > &pointerList)
Definition: cl2.hpp:5898
Image2DArray(const cl_mem &imageArray, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4909
Buffer(const Buffer &buf)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:3803
Image(const cl_mem &image, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4180
Memory(const Memory &mem)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:3199
Program(const Program &program)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:6270
cl_int getObjectInfo(cl_gl_object_type *type, cl_GLuint *gl_object_name)
Wrapper for clGetGLObjectInfo().
Definition: cl2.hpp:4054
static cl_int retain(cl_device_id device)
Definition: cl2.hpp:1480
Image3DGL()
Default constructor - initializes to NULL.
Definition: cl2.hpp:5123
Image2D(const Context &context, cl_channel_order order, const Image &sourceImage, cl_int *err=nullptr)
Constructs a 2D Image from an image.
Definition: cl2.hpp:4652
CommandQueue interface for cl_command_queue.
Definition: cl2.hpp:6623
Image2D(const cl_mem &image2D, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4715
cl_int enqueueUnmapSVM(cl::vector< T, Alloc > &container, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7852
Sampler(const Context &context, cl_bool normalized_coords, cl_addressing_mode addressing_mode, cl_filter_mode filter_mode, cl_int *err=NULL)
Constructs a Sampler in a specified context.
Definition: cl2.hpp:5416
Image1D(const cl_mem &image1D, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4300
pointer allocate(size_type size, typename cl::SVMAllocator< void, SVMTrait >::const_pointer=0)
Definition: cl2.hpp:3455
static DeviceCommandQueue makeDefault(cl_int *err=nullptr)
Definition: cl2.hpp:8406
UserEvent()
Default constructor - initializes to NULL.
Definition: cl2.hpp:3129
DeviceCommandQueue(const cl_command_queue &commandQueue, bool retainObject=false)
Constructor from cl_command_queue - takes ownership.
Definition: cl2.hpp:8341
cl_int enableFineGrainedSystemSVM(bool svmEnabled)
Enable fine-grained system SVM.
Definition: cl2.hpp:5919
ImageFormat(cl_channel_order order, cl_channel_type type)
Initializing constructor.
Definition: cl2.hpp:1937
Pipe(const cl_mem &pipe, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:5331
detail::param_traits< detail::cl_context_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetContextInfo() that returns by value.
Definition: cl2.hpp:2880
Pipe(Pipe &&pipe) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5361
cl_int mapSVM(cl::vector< T, Alloc > &container)
Definition: cl2.hpp:8967
cl::pointer< T, detail::Deleter< Alloc > > allocate_pointer(const Alloc &alloc_, Args &&... args)
Definition: cl2.hpp:3585
Class interface for user events (a subset of cl_event&#39;s).
Definition: cl2.hpp:3106
bool operator==(SVMAllocator const &rhs)
Definition: cl2.hpp:3525
cl_int enqueueFillImage(const Image &image, cl_int4 fillColor, const array< size_type, 3 > &origin, const array< size_type, 3 > &region, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7509
Program(const cl_program &program, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:6258
cl_int enqueueUnmapSVM(T *ptr, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7802
Kernel(const cl_kernel &kernel, bool retainObject=false)
Constructor from cl_kernel - takes ownership.
Definition: cl2.hpp:5688
vector< std::pair< cl::Device, typename detail::param_traits< detail::cl_program_build_info, name >::param_type > > getBuildInfo(cl_int *err=NULL) const
Definition: cl2.hpp:6408
cl_int getProfilingInfo(cl_profiling_info name, T *param) const
Wrapper for clGetEventProfilingInfo().
Definition: cl2.hpp:3035
Class interface for cl_sampler.
Definition: cl2.hpp:5406
ImageGL(const ImageGL &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5229
LocalSpaceArg Local(size_type size)
Helper function for generating LocalSpaceArg objects.
Definition: cl2.hpp:5658
Event result_type
Return type of the functor.
Definition: cl2.hpp:9576
detail::param_traits< detail::cl_mem_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetMemObjectInfo() that returns by value.
Definition: cl2.hpp:3237
C++ base class for Image Memory objects.
Definition: cl2.hpp:4167
cl_int enqueueMapSVM(T *ptr, cl_bool blocking, cl_map_flags flags, size_type size, const vector< Event > *events=NULL, Event *event=NULL)
Definition: cl2.hpp:8678
Buffer(cl_mem_flags flags, size_type size, void *host_ptr=NULL, cl_int *err=NULL)
Constructs a Buffer in the default context.
Definition: cl2.hpp:3691
static Context setDefault(const Context &default_context)
Definition: cl2.hpp:2839
cl_int enqueueMapSVM(cl::pointer< T, D > &ptr, cl_bool blocking, cl_map_flags flags, size_type size, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7725
BufferGL(BufferGL &&buf) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4042
Class interface for Buffer Memory Objects.
Definition: cl2.hpp:3655
detail::param_traits< detail::cl_sampler_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetSamplerInfo() that returns by value.
Definition: cl2.hpp:5517
static Device setDefault(const Device &default_device)
Definition: cl2.hpp:2029
Image1DBuffer(Image1DBuffer &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4414
Class interface for specifying NDRange values.
Definition: cl2.hpp:5535
Class interface for 2D Image Memory objects.
Definition: cl2.hpp:4524
ImageGL(ImageGL &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5243
Class interface for 1D Image Memory objects.
Definition: cl2.hpp:4254
Class interface for cl_kernel.
Definition: cl2.hpp:5672
NDRange(size_type size0, size_type size1, size_type size2)
Constructs three-dimensional range.
Definition: cl2.hpp:5570
static cl_int waitForEvents(const vector< Event > &events)
Blocks the calling thread until every event specified is complete.
Definition: cl2.hpp:3092
Class interface for 3D Image Memory objects.
Definition: cl2.hpp:4953
Class interface for GL Render Buffer Memory Objects.
Definition: cl2.hpp:4072
general image interface for GL interop. We abstract the 2D and 3D GL images into a single instance he...
Definition: cl2.hpp:5182
cl_int setStatus(cl_int status)
Sets the execution status of a user event object.
Definition: cl2.hpp:3135
detail::param_traits< detail::cl_platform_info, name >::param_type getInfo(cl_int *err=NULL) const
Wrapper for clGetPlatformInfo() that returns by value.
Definition: cl2.hpp:2325
Device()
Default constructor - initializes to NULL.
Definition: cl2.hpp:1998
Context()
Default constructor - initializes to NULL.
Definition: cl2.hpp:2847
Image2DArray(const Image2DArray &img)
Copy constructor to forward copy to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:4920
Class interface for cl_device_id.
Definition: cl2.hpp:1961
Context(Context &&ctx) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:2806
static Context getDefault(cl_int *err=NULL)
Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
Definition: cl2.hpp:2822
Context(cl_device_type type, cl_context_properties *properties=NULL, void(CL_CALLBACK *notifyFptr)(const char *, const void *, size_type, void *)=NULL, void *data=NULL, cl_int *err=NULL)
Constructs a context including all or a subset of devices of a specified type.
Definition: cl2.hpp:2706
Memory(const cl_mem &memory, bool retainObject)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:3182
Platform(const cl_platform_id &platform, bool retainObject=false)
Constructor from cl_platform_id.
Definition: cl2.hpp:2276
Device(Device &&dev) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:2063
BufferRenderGL(const Context &context, cl_mem_flags flags, cl_GLuint bufobj, cl_int *err=NULL)
Constructs a BufferRenderGL in a specified context, from a given GL Renderbuffer. ...
Definition: cl2.hpp:4080
cl_int getImageInfo(cl_image_info name, T *param) const
Wrapper for clGetImageInfo().
Definition: cl2.hpp:4225
BufferGL()
Default constructor - initializes to NULL.
Definition: cl2.hpp:4003
UserEvent(const Context &context, cl_int *err=NULL)
Constructs a user event on a given context.
Definition: cl2.hpp:3113
cl_int enqueueFillImage(const Image &image, cl_float4 fillColor, const array< size_type, 3 > &origin, const array< size_type, 3 > &region, const vector< Event > *events=NULL, Event *event=NULL) const
Definition: cl2.hpp:7475
CommandQueue(QueueProperties properties, cl_int *err=NULL)
Constructs a CommandQueue based on passed properties. Will return an CL_INVALID_QUEUE_PROPERTIES erro...
Definition: cl2.hpp:6755
cl_int getInfo(cl_pipe_info name, T *param) const
Wrapper for clGetMemObjectInfo().
Definition: cl2.hpp:5374
Image interface for 1D buffer images.
Definition: cl2.hpp:4346
Program interface that implements cl_program.
Definition: cl2.hpp:5980
Image3DGL(const cl_mem &image, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:5132
Image3DGL(Image3DGL &&img) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:5162
cl_int getInfo(cl_platform_info name, string *param) const
Wrapper for clGetPlatformInfo().
Definition: cl2.hpp:2315
Class interface for GL Buffer Memory Objects.
Definition: cl2.hpp:3975
DeviceCommandQueue(DeviceCommandQueue &&queue) CL_HPP_NOEXCEPT_
Move constructor to forward move to the superclass correctly. Required for MSVC.
Definition: cl2.hpp:8367
cl_int copy(const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator)
Definition: cl2.hpp:8936
static Platform setDefault(const Platform &default_platform)
Definition: cl2.hpp:2307
Class interface for GL 2D Image Memory objects.
Definition: cl2.hpp:4769
Class interface for cl_context.
Definition: cl2.hpp:2573
BufferGL(const Context &context, cl_mem_flags flags, cl_GLuint bufobj, cl_int *err=NULL)
Constructs a BufferGL in a specified context, from a given GL buffer.
Definition: cl2.hpp:3983
Platform()
Default constructor - initializes to NULL.
Definition: cl2.hpp:2267
Memory & operator=(const cl_mem &rhs)
Assignment operator from cl_mem - takes ownership.
Definition: cl2.hpp:3190
size_type size() const
Returns the size of the object in bytes based on the.
Definition: cl2.hpp:5594
BufferGL(const cl_mem &buffer, bool retainObject=false)
Constructor from cl_mem - takes ownership.
Definition: cl2.hpp:4012
cl_int getInfo(cl_mem_info name, T *param) const
Wrapper for clGetMemObjectInfo().
Definition: cl2.hpp:3227