1; RUN: opt < %s -scalar-evolution -analyze -enable-new-pm=0 | FileCheck %s
2; RUN: opt < %s "-passes=print<scalar-evolution>" -disable-output 2>&1 | FileCheck %s
3
4; CHECK-LABEL: @test1
5; CHECK: -->  (zext
6; CHECK: -->  (zext
7; CHECK-NOT: -->  (zext
8
9define i32 @test1(i32 %x) {
10  %n = and i32 %x, 255
11  %y = xor i32 %n, 255
12  ret i32 %y
13}
14
15; ScalarEvolution shouldn't try to analyze %z into something like
16;   -->  (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64)
17; or
18;   -->  (8 * (zext i1 (trunc i64 ((8 * %x) /u 8) to i1) to i64))
19
20; CHECK-LABEL: @test2
21; CHECK: -->  (8 * (zext i1 (trunc i64 %x to i1) to i64))
22
23define i64 @test2(i64 %x) {
24  %a = shl i64 %x, 3
25  %t = and i64 %a, 8
26  %z = xor i64 %t, 8
27  ret i64 %z
28}
29
30; Check that we transform the naive lowering of the sequence below,
31;   (4 * (zext i5 (2 * (trunc i32 %x to i5)) to i32)),
32; to
33;   (8 * (zext i4 (trunc i32 %x to i4) to i32))
34;
35; CHECK-LABEL: @test3
36define i32 @test3(i32 %x) {
37  %a = mul i32 %x, 8
38; CHECK: %b
39; CHECK-NEXT: --> (8 * (zext i4 (trunc i32 %x to i4) to i32))
40  %b = and i32 %a, 124
41  ret i32 %b
42}
43