1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=-sse | FileCheck %s --check-prefix=X64-NOSSE
3; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs | FileCheck %s --check-prefix=X64-SSE
4
5; Note: This test is testing that the lowering for atomics matches what we
6; currently emit for non-atomics + the atomic restriction.  The presence of
7; particular lowering detail in these tests should not be read as requiring
8; that detail for correctness unless it's related to the atomicity itself.
9; (Specifically, there were reviewer questions about the lowering for halfs
10;  and their calling convention which remain unresolved.)
11
12define void @store_fp128(fp128* %fptr, fp128 %v) {
13; X64-NOSSE-LABEL: store_fp128:
14; X64-NOSSE:       # %bb.0:
15; X64-NOSSE-NEXT:    pushq %rax
16; X64-NOSSE-NEXT:    .cfi_def_cfa_offset 16
17; X64-NOSSE-NEXT:    callq __sync_lock_test_and_set_16@PLT
18; X64-NOSSE-NEXT:    popq %rax
19; X64-NOSSE-NEXT:    .cfi_def_cfa_offset 8
20; X64-NOSSE-NEXT:    retq
21;
22; X64-SSE-LABEL: store_fp128:
23; X64-SSE:       # %bb.0:
24; X64-SSE-NEXT:    subq $24, %rsp
25; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
26; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
27; X64-SSE-NEXT:    movq (%rsp), %rsi
28; X64-SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
29; X64-SSE-NEXT:    callq __sync_lock_test_and_set_16@PLT
30; X64-SSE-NEXT:    addq $24, %rsp
31; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
32; X64-SSE-NEXT:    retq
33  store atomic fp128 %v, fp128* %fptr unordered, align 16
34  ret void
35}
36