1 /*========================== begin_copyright_notice ============================
2 
3 Copyright (C) 2021 Intel Corporation
4 
5 SPDX-License-Identifier: MIT
6 
7 ============================= end_copyright_notice ===========================*/
8 
9 #ifndef CM_CL_ATOMIC_H
10 #define CM_CL_ATOMIC_H
11 
12 #include "define.h"
13 #include "detail/builtins.h"
14 #include "vector.h"
15 
16 #include <opencl_def.h>
17 #include <opencl_type_traits.h>
18 
19 namespace cm {
20 namespace atomic {
21 
22 // Load is implemented via atomicrmw or, that's why integer types are expected.
23 template <memory_order semantics, memory_scope scope, typename PtrT>
load(PtrT * ptr)24 auto load(PtrT *ptr) {
25   using element_type = typename cl::pointer_traits<PtrT *>::element_type;
26   static_assert(cl::is_integral<element_type>::value, "integral type expected");
27   element_type zero_init = 0;
28   return detail::atomicrmw<operation::orl, semantics, scope>(ptr, zero_init);
29 }
30 
31 template <memory_order semantics, memory_scope scope, typename PtrT,
32           typename OpT>
store(PtrT * ptr,OpT operand)33 void store(PtrT *ptr, OpT operand) {
34   detail::atomicrmw<operation::xchg, semantics, scope>(ptr, operand);
35 }
36 
37 // Infer semantics on failure regarding C++ rules.
38 constexpr memory_order
get_cmpxch_memory_semantics_on_failure(memory_order semantics_on_success)39 get_cmpxch_memory_semantics_on_failure(memory_order semantics_on_success) {
40   switch (semantics_on_success) {
41   default:
42     return semantics_on_success;
43   case memory_order_acq_rel:
44     return memory_order_acquire;
45   case memory_order_release:
46     return memory_order_relaxed;
47   }
48 }
49 
50 template <atomic::operation op, memory_order semantics, memory_scope scope,
51           typename PtrT, typename... OpT>
execute(PtrT * ptr,OpT...operands)52 auto execute(PtrT *ptr, OpT... operands) {
53   constexpr unsigned NumArgs = sizeof...(operands);
54   if constexpr (op == operation::cmpxchg) {
55     static_assert(NumArgs == 2, "illegal number of arguments for cmpxchg");
56     return detail::cmpxchg<
57         semantics, get_cmpxch_memory_semantics_on_failure(semantics), scope>(
58         ptr, operands...);
59   } else if constexpr (op == operation::load) {
60     static_assert(NumArgs == 0, "illegal number of arguments for load");
61     return load<semantics, scope, PtrT>(ptr);
62   } else if constexpr (op == operation::store) {
63     static_assert(NumArgs == 1, "illegal number of arguments for store");
64     store<semantics, scope, PtrT>(ptr, operands...);
65     return;
66   } else {
67     static_assert(NumArgs == 1, "illegal number of arguments for atomicrmw");
68     return detail::atomicrmw<op, semantics, scope>(ptr, operands...);
69   }
70 }
71 
72 } // namespace atomic
73 
74 } // namespace cm
75 
76 #endif // CM_CL_ATOMIC_H
77