1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -triple x86_64-apple-darwin10.6.0 -fopenmp-targets=nvptx64-nvidia-cuda  -emit-llvm-bc -o %t-host.bc %s
3 // RUN: %clang_cc1 -verify -fopenmp -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1
4 // expected-no-diagnostics
5 
6 #ifndef HEADER
7 #define HEADER
8 
9 #pragma omp declare target
10 typedef void **omp_allocator_handle_t;
11 extern const omp_allocator_handle_t omp_null_allocator;
12 extern const omp_allocator_handle_t omp_default_mem_alloc;
13 extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
14 extern const omp_allocator_handle_t omp_const_mem_alloc;
15 extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
16 extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
17 extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
18 extern const omp_allocator_handle_t omp_pteam_mem_alloc;
19 extern const omp_allocator_handle_t omp_thread_mem_alloc;
20 
21 struct St{
22  int a;
23 };
24 
25 struct St1{
26  int a;
27  static int b;
28 #pragma omp allocate(b) allocator(omp_default_mem_alloc)
29 } d;
30 
31 int a, b, c;
32 #pragma omp allocate(a) allocator(omp_large_cap_mem_alloc)
33 #pragma omp allocate(b) allocator(omp_const_mem_alloc)
34 #pragma omp allocate(d, c) allocator(omp_high_bw_mem_alloc)
35 
36 template <class T>
37 struct ST {
38   static T m;
39   #pragma omp allocate(m) allocator(omp_low_lat_mem_alloc)
40 };
41 
foo()42 template <class T> T foo() {
43   T v;
44   #pragma omp allocate(v) allocator(omp_cgroup_mem_alloc)
45   v = ST<T>::m;
46   return v;
47 }
48 
49 namespace ns{
50   int a;
51 }
52 #pragma omp allocate(ns::a) allocator(omp_pteam_mem_alloc)
53 
main()54 int main () {
55   static int a;
56 #pragma omp allocate(a) allocator(omp_thread_mem_alloc)
57   a=2;
58   double b = 3;
59   float c;
60 #pragma omp allocate(b) allocator(omp_default_mem_alloc)
61 #pragma omp allocate(c) allocator(omp_cgroup_mem_alloc)
62   return (foo<int>());
63 }
64 
65 
66 extern template int ST<int>::m;
67 
68 void baz(float &);
69 
bar()70 void bar() {
71   float bar_a;
72   double bar_b;
73   int bar_c;
74 #pragma omp allocate(bar_c) allocator(omp_cgroup_mem_alloc)
75 #pragma omp parallel private(bar_a, bar_b) allocate(omp_thread_mem_alloc                  \
76                                                     : bar_a) allocate(omp_pteam_mem_alloc \
77                                                                       : bar_b)
78   {
79     bar_b = bar_a;
80     baz(bar_a);
81   }
82 }
83 
84 #pragma omp end declare target
85 #endif
86 // CHECK1-LABEL: define {{[^@]+}}@main
87 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
88 // CHECK1-NEXT:  entry:
89 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
90 // CHECK1-NEXT:    [[B:%.*]] = alloca double, align 8
91 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
92 // CHECK1-NEXT:    store i32 2, i32* @_ZZ4mainE1a, align 4
93 // CHECK1-NEXT:    store double 3.000000e+00, double* [[B]], align 8
94 // CHECK1-NEXT:    [[CALL:%.*]] = call i32 @_Z3fooIiET_v() #[[ATTR6:[0-9]+]]
95 // CHECK1-NEXT:    ret i32 [[CALL]]
96 //
97 //
98 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIiET_v
99 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] comdat {
100 // CHECK1-NEXT:  entry:
101 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* @_ZN2STIiE1mE, align 4
102 // CHECK1-NEXT:    store i32 [[TMP0]], i32* @v, align 4
103 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* @v, align 4
104 // CHECK1-NEXT:    ret i32 [[TMP1]]
105 //
106 //
107 // CHECK1-LABEL: define {{[^@]+}}@_Z3barv
108 // CHECK1-SAME: () #[[ATTR1]] {
109 // CHECK1-NEXT:  entry:
110 // CHECK1-NEXT:    [[BAR_A:%.*]] = alloca float, align 4
111 // CHECK1-NEXT:    [[BAR_B:%.*]] = alloca double, align 8
112 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
113 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
114 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
115 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP1]], i64 0)
116 // CHECK1-NEXT:    ret void
117 //
118 //
119 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
120 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
121 // CHECK1-NEXT:  entry:
122 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
123 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
124 // CHECK1-NEXT:    [[BAR_A:%.*]] = alloca float, align 4
125 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
126 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
127 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[BAR_A]], align 4
128 // CHECK1-NEXT:    [[CONV:%.*]] = fpext float [[TMP0]] to double
129 // CHECK1-NEXT:    store double [[CONV]], double* addrspacecast (double addrspace(3)* @bar_b to double*), align 8
130 // CHECK1-NEXT:    call void @_Z3bazRf(float* nonnull align 4 dereferenceable(4) [[BAR_A]]) #[[ATTR6]]
131 // CHECK1-NEXT:    ret void
132 //
133 //
134 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
135 // CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
136 // CHECK1-NEXT:  entry:
137 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
138 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
139 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
140 // CHECK1-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
141 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
142 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
143 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
144 // CHECK1-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
145 // CHECK1-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR5:[0-9]+]]
146 // CHECK1-NEXT:    ret void
147 //
148