1 // Test target codegen - host bc file has to be created first.
2 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
7 // expected-no-diagnostics
8 #ifndef HEADER
9 #define HEADER
10
11 // Check that the execution mode of all 2 target regions on the gpu is set to non-SPMD Mode.
12 // CHECK-DAG: {{@__omp_offloading_.+l21}}_exec_mode = weak constant i8 0
13 // CHECK-DAG: {{@__omp_offloading_.+l26}}_exec_mode = weak constant i8 0
14
15 template<typename tx>
ftemplate(int n)16 tx ftemplate(int n) {
17 tx a = 0;
18 short aa = 0;
19 tx b[10];
20
21 #pragma omp target parallel map(tofrom: aa) num_threads(1024)
22 {
23 aa += 1;
24 }
25
26 #pragma omp target parallel map(tofrom:a, aa, b) if(target: n>40) num_threads(n)
27 {
28 a += 1;
29 aa += 1;
30 b[2] += 1;
31 }
32
33 return a;
34 }
35
bar(int n)36 int bar(int n){
37 int a = 0;
38
39 a += ftemplate<int>(n);
40
41 return a;
42 }
43
44 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l21}}(
45 // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
46 // CHECK: store i16* {{%.+}}, i16** [[AA_ADDR]], align
47 // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
48 // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
49 // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, i16 0)
50 // CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
51 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}})
52 // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
53 // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i16* [[AA]])
54 // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
55 // CHECK: ret void
56 // CHECK: }
57
58 // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i16* {{[^%]*}}[[ARG:%.+]])
59 // CHECK: = alloca i32*, align
60 // CHECK: = alloca i32*, align
61 // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
62 // CHECK: store i16* [[ARG]], i16** [[AA_ADDR]], align
63 // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
64 // CHECK: [[VAL:%.+]] = load i16, i16* [[AA]], align
65 // CHECK: store i16 {{%.+}}, i16* [[AA]], align
66 // CHECK: ret void
67 // CHECK: }
68
69 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l26}}(
70 // CHECK: [[A_ADDR:%.+]] = alloca i32*, align
71 // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
72 // CHECK: [[B_ADDR:%.+]] = alloca [10 x i32]*, align
73 // CHECK: store i32* {{%.+}}, i32** [[A_ADDR]], align
74 // CHECK: store i16* {{%.+}}, i16** [[AA_ADDR]], align
75 // CHECK: store [10 x i32]* {{%.+}}, [10 x i32]** [[B_ADDR]], align
76 // CHECK: [[A:%.+]] = load i32*, i32** [[A_ADDR]], align
77 // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
78 // CHECK: [[B:%.+]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align
79 // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
80 // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, i16 0)
81 // CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
82 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}})
83 // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
84 // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i32* [[A]], i16* [[AA]], [10 x i32]* [[B]])
85 // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
86 // CHECK: ret void
87 // CHECK: }
88
89 // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* {{[^%]*}}[[ARG1:%.+]], i16* {{[^%]*}}[[ARG2:%.+]], [10 x i32]* {{[^%]*}}[[ARG3:%.+]])
90 // CHECK: = alloca i32*, align
91 // CHECK: = alloca i32*, align
92 // CHECK: [[A_ADDR:%.+]] = alloca i32*, align
93 // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
94 // CHECK: [[B_ADDR:%.+]] = alloca [10 x i32]*, align
95 // CHECK: store i32* [[ARG1]], i32** [[A_ADDR]], align
96 // CHECK: store i16* [[ARG2]], i16** [[AA_ADDR]], align
97 // CHECK: store [10 x i32]* [[ARG3]], [10 x i32]** [[B_ADDR]], align
98 // CHECK: [[A:%.+]] = load i32*, i32** [[A_ADDR]], align
99 // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
100 // CHECK: [[B:%.+]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align
101 // CHECK: store i32 {{%.+}}, i32* [[A]], align
102 // CHECK: store i16 {{%.+}}, i16* [[AA]], align
103 // CHECK: [[ELT:%.+]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]],
104 // CHECK: store i32 {{%.+}}, i32* [[ELT]], align
105 // CHECK: ret void
106 // CHECK: }
107 #endif
108