1 /*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H)
18 #error Do not #include this internal file directly; use public TBB headers instead.
19 #endif
20
21 #define __TBB_machine_linux_intel64_H
22
23 #include <stdint.h>
24 #include "gcc_ia32_common.h"
25
26 #define __TBB_WORDSIZE 8
27 #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
28
29 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory")
30 #define __TBB_control_consistency_helper() __TBB_compiler_fence()
31 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
32 #define __TBB_release_consistency_helper() __TBB_compiler_fence()
33
34 #ifndef __TBB_full_memory_fence
35 #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory")
36 #endif
37
38 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X) \
39 static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \
40 { \
41 T result; \
42 \
43 __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \
44 : "=a"(result), "=m"(*(volatile T*)ptr) \
45 : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \
46 : "memory"); \
47 return result; \
48 } \
49 \
50 static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \
51 { \
52 T result; \
53 __asm__ __volatile__("lock\nxadd" X " %0,%1" \
54 : "=r"(result),"=m"(*(volatile T*)ptr) \
55 : "0"(addend), "m"(*(volatile T*)ptr) \
56 : "memory"); \
57 return result; \
58 } \
59 \
60 static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \
61 { \
62 T result; \
63 __asm__ __volatile__("lock\nxchg" X " %0,%1" \
64 : "=r"(result),"=m"(*(volatile T*)ptr) \
65 : "0"(value), "m"(*(volatile T*)ptr) \
66 : "memory"); \
67 return result; \
68 } \
69
70 __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"")
71 __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"")
72 __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"")
73 __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,"q")
74
75 #undef __TBB_MACHINE_DEFINE_ATOMICS
76
__TBB_machine_or(volatile void * ptr,uint64_t value)77 static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) {
78 __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory");
79 }
80
__TBB_machine_and(volatile void * ptr,uint64_t value)81 static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) {
82 __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory");
83 }
84
85 #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
86 #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
87
88 #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1
89 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1
90 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1
91 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1
92
93