1;; Apply basic simplifications. 2;; 3;; This folds constants with arithmetic to form `_imm` instructions, and other 4;; minor simplifications. 5;; 6;; Doesn't apply some simplifications if the native word width (in bytes) is 7;; smaller than the controlling type's width of the instruction. This would 8;; result in an illegal instruction that would likely be expanded back into an 9;; instruction on smaller types with the same initial opcode, creating 10;; unnecessary churn. 11 12;; Binary instructions whose second argument is constant. 13(=> (when (iadd $x $C) 14 (fits-in-native-word $C)) 15 (iadd_imm $C $x)) 16(=> (when (imul $x $C) 17 (fits-in-native-word $C)) 18 (imul_imm $C $x)) 19(=> (when (sdiv $x $C) 20 (fits-in-native-word $C)) 21 (sdiv_imm $C $x)) 22(=> (when (udiv $x $C) 23 (fits-in-native-word $C)) 24 (udiv_imm $C $x)) 25(=> (when (srem $x $C) 26 (fits-in-native-word $C)) 27 (srem_imm $C $x)) 28(=> (when (urem $x $C) 29 (fits-in-native-word $C)) 30 (urem_imm $C $x)) 31(=> (when (band $x $C) 32 (fits-in-native-word $C)) 33 (band_imm $C $x)) 34(=> (when (bor $x $C) 35 (fits-in-native-word $C)) 36 (bor_imm $C $x)) 37(=> (when (bxor $x $C) 38 (fits-in-native-word $C)) 39 (bxor_imm $C $x)) 40(=> (when (rotl $x $C) 41 (fits-in-native-word $C)) 42 (rotl_imm $C $x)) 43(=> (when (rotr $x $C) 44 (fits-in-native-word $C)) 45 (rotr_imm $C $x)) 46(=> (when (ishl $x $C) 47 (fits-in-native-word $C)) 48 (ishl_imm $C $x)) 49(=> (when (ushr $x $C) 50 (fits-in-native-word $C)) 51 (ushr_imm $C $x)) 52(=> (when (sshr $x $C) 53 (fits-in-native-word $C)) 54 (sshr_imm $C $x)) 55(=> (when (isub $x $C) 56 (fits-in-native-word $C)) 57 (iadd_imm $(neg $C) $x)) 58(=> (when (ifcmp $x $C) 59 (fits-in-native-word $C)) 60 (ifcmp_imm $C $x)) 61(=> (when (icmp $cond $x $C) 62 (fits-in-native-word $C)) 63 (icmp_imm $cond $C $x)) 64 65;; Binary instructions whose first operand is constant. 66(=> (when (iadd $C $x) 67 (fits-in-native-word $C)) 68 (iadd_imm $C $x)) 69(=> (when (imul $C $x) 70 (fits-in-native-word $C)) 71 (imul_imm $C $x)) 72(=> (when (band $C $x) 73 (fits-in-native-word $C)) 74 (band_imm $C $x)) 75(=> (when (bor $C $x) 76 (fits-in-native-word $C)) 77 (bor_imm $C $x)) 78(=> (when (bxor $C $x) 79 (fits-in-native-word $C)) 80 (bxor_imm $C $x)) 81(=> (when (isub $C $x) 82 (fits-in-native-word $C)) 83 (irsub_imm $C $x)) 84 85;; Unary instructions whose operand is constant. 86(=> (adjust_sp_down $C) (adjust_sp_down_imm $C)) 87 88;; Fold `(binop_imm $C1 (binop_imm $C2 $x))` into `(binop_imm $(binop $C2 $C1) $x)`. 89(=> (iadd_imm $C1 (iadd_imm $C2 $x)) (iadd_imm $(iadd $C1 $C2) $x)) 90(=> (imul_imm $C1 (imul_imm $C2 $x)) (imul_imm $(imul $C1 $C2) $x)) 91(=> (bor_imm $C1 (bor_imm $C2 $x)) (bor_imm $(bor $C1 $C2) $x)) 92(=> (band_imm $C1 (band_imm $C2 $x)) (band_imm $(band $C1 $C2) $x)) 93(=> (bxor_imm $C1 (bxor_imm $C2 $x)) (bxor_imm $(bxor $C1 $C2) $x)) 94 95;; Remove operations that are no-ops. 96(=> (iadd_imm 0 $x) $x) 97(=> (imul_imm 1 $x) $x) 98(=> (sdiv_imm 1 $x) $x) 99(=> (udiv_imm 1 $x) $x) 100(=> (bor_imm 0 $x) $x) 101(=> (band_imm -1 $x) $x) 102(=> (bxor_imm 0 $x) $x) 103(=> (rotl_imm 0 $x) $x) 104(=> (rotr_imm 0 $x) $x) 105(=> (ishl_imm 0 $x) $x) 106(=> (ushr_imm 0 $x) $x) 107(=> (sshr_imm 0 $x) $x) 108 109;; Replace with zero. 110(=> (imul_imm 0 $x) 0) 111(=> (band_imm 0 $x) 0) 112 113;; Replace with negative 1. 114(=> (bor_imm -1 $x) -1) 115 116;; Transform `[(x << N) >> N]` into a (un)signed-extending move. 117;; 118;; i16 -> i8 -> i16 119(=> (when (ushr_imm 8 (ishl_imm 8 $x)) 120 (bit-width $x 16)) 121 (uextend{i16} (ireduce{i8} $x))) 122(=> (when (sshr_imm 8 (ishl_imm 8 $x)) 123 (bit-width $x 16)) 124 (sextend{i16} (ireduce{i8} $x))) 125;; i32 -> i8 -> i32 126(=> (when (ushr_imm 24 (ishl_imm 24 $x)) 127 (bit-width $x 32)) 128 (uextend{i32} (ireduce{i8} $x))) 129(=> (when (sshr_imm 24 (ishl_imm 24 $x)) 130 (bit-width $x 32)) 131 (sextend{i32} (ireduce{i8} $x))) 132;; i32 -> i16 -> i32 133(=> (when (ushr_imm 16 (ishl_imm 16 $x)) 134 (bit-width $x 32)) 135 (uextend{i32} (ireduce{i16} $x))) 136(=> (when (sshr_imm 16 (ishl_imm 16 $x)) 137 (bit-width $x 32)) 138 (sextend{i32} (ireduce{i16} $x))) 139;; i64 -> i8 -> i64 140(=> (when (ushr_imm 56 (ishl_imm 56 $x)) 141 (bit-width $x 64)) 142 (uextend{i64} (ireduce{i8} $x))) 143(=> (when (sshr_imm 56 (ishl_imm 56 $x)) 144 (bit-width $x 64)) 145 (sextend{i64} (ireduce{i8} $x))) 146;; i64 -> i16 -> i64 147(=> (when (ushr_imm 48 (ishl_imm 48 $x)) 148 (bit-width $x 64)) 149 (uextend{i64} (ireduce{i16} $x))) 150(=> (when (sshr_imm 48 (ishl_imm 48 $x)) 151 (bit-width $x 64)) 152 (sextend{i64} (ireduce{i16} $x))) 153;; i64 -> i32 -> i64 154(=> (when (ushr_imm 32 (ishl_imm 32 $x)) 155 (bit-width $x 64)) 156 (uextend{i64} (ireduce{i32} $x))) 157(=> (when (sshr_imm 32 (ishl_imm 32 $x)) 158 (bit-width $x 64)) 159 (sextend{i64} (ireduce{i32} $x))) 160 161;; Fold away redundant `bint` instructions that accept both integer and boolean 162;; arguments. 163(=> (select (bint $x) $y $z) (select $x $y $z)) 164(=> (brz (bint $x)) (brz $x)) 165(=> (brnz (bint $x)) (brnz $x)) 166(=> (trapz (bint $x)) (trapz $x)) 167(=> (trapnz (bint $x)) (trapnz $x)) 168 169;; Fold comparisons into branch operations when possible. 170;; 171;; This matches against operations which compare against zero, then use the 172;; result in a `brz` or `brnz` branch. It folds those two operations into a 173;; single `brz` or `brnz`. 174(=> (brnz (icmp_imm ne 0 $x)) (brnz $x)) 175(=> (brz (icmp_imm ne 0 $x)) (brz $x)) 176(=> (brnz (icmp_imm eq 0 $x)) (brz $x)) 177(=> (brz (icmp_imm eq 0 $x)) (brnz $x)) 178 179;; Division and remainder by constants. 180;; 181;; TODO: this section is incomplete, and a bunch of related optimizations are 182;; still hand-coded in `simple_preopt.rs`. 183 184;; (Division by one is handled above.) 185 186;; Remainder by one is zero. 187(=> (urem_imm 1 $x) 0) 188(=> (srem_imm 1 $x) 0) 189 190;; Division by a power of two -> shift right. 191(=> (when (udiv_imm $C $x) 192 (is-power-of-two $C)) 193 (ushr_imm $(log2 $C) $x)) 194