1[/ 2 Copyright Oliver Kowalke 2017. 3 Distributed under the Boost Software License, Version 1.0. 4 (See accompanying file LICENSE_1_0.txt or copy at 5 http://www.boost.org/LICENSE_1_0.txt 6] 7 8[#hip] 9[section:hip ROCm/HIP] 10 11[@http://github.com/ROCm-Developer-Tools/HIP/tree/roc-1.6.0/ HIP] is part of the 12[@http://rocm.github.io/ ROC (Radeon Open Compute)] platform for parallel computing 13on AMD and NVIDIA GPUs. The application programming interface of HIP gives access to 14GPU's instruction set and computation resources (Execution of compute kernels). 15 16 17[heading Synchronization with ROCm/HIP streams] 18 19HIP operation such as compute kernels or memory transfer (between host and 20device) can be grouped/queued by HIP streams. are executed on the GPUs. 21Boost.Fiber enables a fiber to sleep (suspend) till a HIP stream has completed 22its operations. This enables applications to run other fibers on the CPU without 23the need to spawn an additional OS-threads. And resume the fiber when the HIP 24streams has finished. 25 26 __global__ 27 void kernel( int size, int * a, int * b, int * c) { 28 int idx = threadIdx.x + blockIdx.x * blockDim.x; 29 if ( idx < size) { 30 int idx1 = (idx + 1) % 256; 31 int idx2 = (idx + 2) % 256; 32 float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; 33 float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; 34 c[idx] = (as + bs) / 2; 35 } 36 } 37 38 boost::fibers::fiber f([&done]{ 39 hipStream_t stream; 40 hipStreamCreate( & stream); 41 int size = 1024 * 1024; 42 int full_size = 20 * size; 43 int * host_a, * host_b, * host_c; 44 hipHostMalloc( & host_a, full_size * sizeof( int), hipHostMallocDefault); 45 hipHostMalloc( & host_b, full_size * sizeof( int), hipHostMallocDefault); 46 hipHostMalloc( & host_c, full_size * sizeof( int), hipHostMallocDefault); 47 int * dev_a, * dev_b, * dev_c; 48 hipMalloc( & dev_a, size * sizeof( int) ); 49 hipMalloc( & dev_b, size * sizeof( int) ); 50 hipMalloc( & dev_c, size * sizeof( int) ); 51 std::minstd_rand generator; 52 std::uniform_int_distribution<> distribution(1, 6); 53 for ( int i = 0; i < full_size; ++i) { 54 host_a[i] = distribution( generator); 55 host_b[i] = distribution( generator); 56 } 57 for ( int i = 0; i < full_size; i += size) { 58 hipMemcpyAsync( dev_a, host_a + i, size * sizeof( int), hipMemcpyHostToDevice, stream); 59 hipMemcpyAsync( dev_b, host_b + i, size * sizeof( int), hipMemcpyHostToDevice, stream); 60 hipLaunchKernel(kernel, dim3(size / 256), dim3(256), 0, stream, size, dev_a, dev_b, dev_c); 61 hipMemcpyAsync( host_c + i, dev_c, size * sizeof( int), hipMemcpyDeviceToHost, stream); 62 } 63 auto result = boost::fibers::hip::waitfor_all( stream); // suspend fiber till HIP stream has finished 64 BOOST_ASSERT( stream == std::get< 0 >( result) ); 65 BOOST_ASSERT( hipSuccess == std::get< 1 >( result) ); 66 std::cout << "f1: GPU computation finished" << std::endl; 67 hipHostFree( host_a); 68 hipHostFree( host_b); 69 hipHostFree( host_c); 70 hipFree( dev_a); 71 hipFree( dev_b); 72 hipFree( dev_c); 73 hipStreamDestroy( stream); 74 }); 75 f.join(); 76 77 78[heading Synopsis] 79 80 #include <boost/fiber/hip/waitfor.hpp> 81 82 namespace boost { 83 namespace fibers { 84 namespace hip { 85 86 std::tuple< hipStream_t, hipError_t > waitfor_all( hipStream_t st); 87 std::vector< std::tuple< hipStream_t, hipError_t > > waitfor_all( hipStream_t ... st); 88 89 }}} 90 91 92[ns_function_heading hip..waitfor] 93 94 #include <boost/fiber/hip/waitfor.hpp> 95 96 namespace boost { 97 namespace fibers { 98 namespace hip { 99 100 std::tuple< hipStream_t, hipError_t > waitfor_all( hipStream_t st); 101 std::vector< std::tuple< hipStream_t, hipError_t > > waitfor_all( hipStream_t ... st); 102 103 }}} 104 105[variablelist 106[[Effects:] [Suspends active fiber till HIP stream has finished its operations.]] 107[[Returns:] [tuple of stream reference and the HIP stream status]] 108] 109 110 111[endsect] 112