1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefix=X64
4
5define void @load_store(<4 x i16>* %in) {
6; X86-LABEL: load_store:
7; X86:       # %bb.0: # %entry
8; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
9; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
10; X86-NEXT:    paddw %xmm0, %xmm0
11; X86-NEXT:    movq %xmm0, (%eax)
12; X86-NEXT:    retl
13;
14; X64-LABEL: load_store:
15; X64:       # %bb.0: # %entry
16; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
17; X64-NEXT:    paddw %xmm0, %xmm0
18; X64-NEXT:    movq %xmm0, (%rcx)
19; X64-NEXT:    retq
20entry:
21  %A27 = load <4 x i16>, <4 x i16>* %in, align 4
22  %A28 = add <4 x i16> %A27, %A27
23  store <4 x i16> %A28, <4 x i16>* %in, align 4
24  ret void
25}
26
27; Make sure that we store a 64bit value, even on 32bit systems.
28define void @store_64(<2 x i32>* %ptr) {
29; X86-LABEL: store_64:
30; X86:       # %bb.0: # %BB
31; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
32; X86-NEXT:    xorps %xmm0, %xmm0
33; X86-NEXT:    movlps %xmm0, (%eax)
34; X86-NEXT:    retl
35;
36; X64-LABEL: store_64:
37; X64:       # %bb.0: # %BB
38; X64-NEXT:    movq $0, (%rcx)
39; X64-NEXT:    retq
40BB:
41  store <2 x i32> zeroinitializer, <2 x i32>* %ptr
42  ret void
43}
44
45define <2 x i32> @load_64(<2 x i32>* %ptr) {
46; X86-LABEL: load_64:
47; X86:       # %bb.0: # %BB
48; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
49; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
50; X86-NEXT:    retl
51;
52; X64-LABEL: load_64:
53; X64:       # %bb.0: # %BB
54; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
55; X64-NEXT:    retq
56BB:
57  %t = load <2 x i32>, <2 x i32>* %ptr
58  ret <2 x i32> %t
59}
60