1 /* This Source Code Form is subject to the terms of the Mozilla Public
2  * License, v. 2.0. If a copy of the MPL was not distributed with this
3  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 
5 #include <stdlib.h>
6 
7 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
8 #include "jsapi-tests/tests.h"
9 #include "vm/JSAtom.h"
10 
11 // Tests for classes in:
12 //
13 //   jit/shared/IonAssemblerBuffer.h
14 //   jit/shared/IonAssemblerBufferWithConstantPools.h
15 //
16 // Classes in js::jit tested:
17 //
18 //   BufferOffset
19 //   BufferSlice (implicitly)
20 //   AssemblerBuffer
21 //
22 //   BranchDeadlineSet
23 //   Pool (implicitly)
24 //   AssemblerBufferWithConstantPools
25 //
26 
BEGIN_TEST(testAssemblerBuffer_BufferOffset)27 BEGIN_TEST(testAssemblerBuffer_BufferOffset) {
28   using js::jit::BufferOffset;
29 
30   BufferOffset off1;
31   BufferOffset off2(10);
32 
33   CHECK(!off1.assigned());
34   CHECK(off2.assigned());
35   CHECK_EQUAL(off2.getOffset(), 10);
36   off1 = off2;
37   CHECK(off1.assigned());
38   CHECK_EQUAL(off1.getOffset(), 10);
39 
40   return true;
41 }
42 END_TEST(testAssemblerBuffer_BufferOffset)
43 
BEGIN_TEST(testAssemblerBuffer_AssemblerBuffer)44 BEGIN_TEST(testAssemblerBuffer_AssemblerBuffer) {
45   using js::jit::BufferOffset;
46   typedef js::jit::AssemblerBuffer<5 * sizeof(uint32_t), uint32_t> AsmBuf;
47 
48   AsmBuf ab;
49   CHECK(ab.isAligned(16));
50   CHECK_EQUAL(ab.size(), 0u);
51   CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
52   CHECK(!ab.oom());
53   CHECK(!ab.bail());
54 
55   BufferOffset off1 = ab.putInt(1000017);
56   CHECK_EQUAL(off1.getOffset(), 0);
57   CHECK_EQUAL(ab.size(), 4u);
58   CHECK_EQUAL(ab.nextOffset().getOffset(), 4);
59   CHECK(!ab.isAligned(16));
60   CHECK(ab.isAligned(4));
61   CHECK(ab.isAligned(1));
62   CHECK_EQUAL(*ab.getInst(off1), 1000017u);
63 
64   BufferOffset off2 = ab.putInt(1000018);
65   CHECK_EQUAL(off2.getOffset(), 4);
66 
67   BufferOffset off3 = ab.putInt(1000019);
68   CHECK_EQUAL(off3.getOffset(), 8);
69 
70   BufferOffset off4 = ab.putInt(1000020);
71   CHECK_EQUAL(off4.getOffset(), 12);
72   CHECK_EQUAL(ab.size(), 16u);
73   CHECK_EQUAL(ab.nextOffset().getOffset(), 16);
74 
75   // Last one in the slice.
76   BufferOffset off5 = ab.putInt(1000021);
77   CHECK_EQUAL(off5.getOffset(), 16);
78   CHECK_EQUAL(ab.size(), 20u);
79   CHECK_EQUAL(ab.nextOffset().getOffset(), 20);
80 
81   BufferOffset off6 = ab.putInt(1000022);
82   CHECK_EQUAL(off6.getOffset(), 20);
83   CHECK_EQUAL(ab.size(), 24u);
84   CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
85 
86   // Reference previous slice. Excercise the finger.
87   CHECK_EQUAL(*ab.getInst(off1), 1000017u);
88   CHECK_EQUAL(*ab.getInst(off6), 1000022u);
89   CHECK_EQUAL(*ab.getInst(off1), 1000017u);
90   CHECK_EQUAL(*ab.getInst(off5), 1000021u);
91 
92   // Too much data for one slice.
93   const uint32_t fixdata[] = {2000036, 2000037, 2000038,
94                               2000039, 2000040, 2000041};
95 
96   // Split payload across multiple slices.
97   CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
98   BufferOffset good1 = ab.putBytesLarge(sizeof(fixdata), fixdata);
99   CHECK_EQUAL(good1.getOffset(), 24);
100   CHECK_EQUAL(ab.nextOffset().getOffset(), 48);
101   CHECK_EQUAL(*ab.getInst(good1), 2000036u);
102   CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 2000038u);
103   CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 2000039u);
104   CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 2000040u);
105   CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 2000041u);
106 
107   return true;
108 }
109 END_TEST(testAssemblerBuffer_AssemblerBuffer)
110 
BEGIN_TEST(testAssemblerBuffer_BranchDeadlineSet)111 BEGIN_TEST(testAssemblerBuffer_BranchDeadlineSet) {
112   typedef js::jit::BranchDeadlineSet<3> DLSet;
113   using js::jit::BufferOffset;
114 
115   js::LifoAlloc alloc(1024);
116   DLSet dls(alloc);
117 
118   CHECK(dls.empty());
119   CHECK(alloc.isEmpty());  // Constructor must be infallible.
120   CHECK_EQUAL(dls.size(), 0u);
121   CHECK_EQUAL(dls.maxRangeSize(), 0u);
122 
123   // Removing non-existant deadline is OK.
124   dls.removeDeadline(1, BufferOffset(7));
125 
126   // Add deadlines in increasing order as intended. This is optimal.
127   dls.addDeadline(1, BufferOffset(10));
128   CHECK(!dls.empty());
129   CHECK_EQUAL(dls.size(), 1u);
130   CHECK_EQUAL(dls.maxRangeSize(), 1u);
131   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
132   CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
133 
134   // Removing non-existant deadline is OK.
135   dls.removeDeadline(1, BufferOffset(7));
136   dls.removeDeadline(1, BufferOffset(17));
137   dls.removeDeadline(0, BufferOffset(10));
138   CHECK_EQUAL(dls.size(), 1u);
139   CHECK_EQUAL(dls.maxRangeSize(), 1u);
140 
141   // Two identical deadlines for different ranges.
142   dls.addDeadline(2, BufferOffset(10));
143   CHECK(!dls.empty());
144   CHECK_EQUAL(dls.size(), 2u);
145   CHECK_EQUAL(dls.maxRangeSize(), 1u);
146   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
147 
148   // It doesn't matter which range earliestDeadlineRange() reports first,
149   // but it must report both.
150   if (dls.earliestDeadlineRange() == 1) {
151     dls.removeDeadline(1, BufferOffset(10));
152     CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
153     CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
154   } else {
155     CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
156     dls.removeDeadline(2, BufferOffset(10));
157     CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
158     CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
159   }
160 
161   // Add deadline which is the front of range 0, but not the global earliest.
162   dls.addDeadline(0, BufferOffset(20));
163   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
164   CHECK(dls.earliestDeadlineRange() > 0);
165 
166   // Non-optimal add to front of single-entry range 0.
167   dls.addDeadline(0, BufferOffset(15));
168   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
169   CHECK(dls.earliestDeadlineRange() > 0);
170 
171   // Append to 2-entry range 0.
172   dls.addDeadline(0, BufferOffset(30));
173   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
174   CHECK(dls.earliestDeadlineRange() > 0);
175 
176   // Add penultimate entry.
177   dls.addDeadline(0, BufferOffset(25));
178   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
179   CHECK(dls.earliestDeadlineRange() > 0);
180 
181   // Prepend, stealing earliest from other range.
182   dls.addDeadline(0, BufferOffset(5));
183   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
184   CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
185 
186   // Remove central element.
187   dls.removeDeadline(0, BufferOffset(20));
188   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
189   CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
190 
191   // Remove front, giving back the lead.
192   dls.removeDeadline(0, BufferOffset(5));
193   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
194   CHECK(dls.earliestDeadlineRange() > 0);
195 
196   // Remove front, giving back earliest to range 0.
197   dls.removeDeadline(dls.earliestDeadlineRange(), BufferOffset(10));
198   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
199   CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
200 
201   // Remove tail.
202   dls.removeDeadline(0, BufferOffset(30));
203   CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
204   CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
205 
206   // Now range 0 = [15, 25].
207   CHECK_EQUAL(dls.size(), 2u);
208   dls.removeDeadline(0, BufferOffset(25));
209   dls.removeDeadline(0, BufferOffset(15));
210   CHECK(dls.empty());
211 
212   return true;
213 }
214 END_TEST(testAssemblerBuffer_BranchDeadlineSet)
215 
216 // Mock Assembler class for testing the AssemblerBufferWithConstantPools
217 // callbacks.
218 namespace {
219 
220 struct TestAssembler;
221 
222 typedef js::jit::AssemblerBufferWithConstantPools<
223     /* SliceSize */ 5 * sizeof(uint32_t),
224     /* InstSize */ 4,
225     /* Inst */ uint32_t,
226     /* Asm */ TestAssembler,
227     /* NumShortBranchRanges */ 3>
228     AsmBufWithPool;
229 
230 struct TestAssembler {
231   // Mock instruction set:
232   //
233   //   0x1111xxxx - align filler instructions.
234   //   0x2222xxxx - manually inserted 'arith' instructions.
235   //   0xaaaaxxxx - noop filler instruction.
236   //   0xb0bbxxxx - branch xxxx bytes forward. (Pool guard).
237   //   0xb1bbxxxx - branch xxxx bytes forward. (Short-range branch).
238   //   0xb2bbxxxx - branch xxxx bytes forward. (Veneer branch).
239   //   0xb3bbxxxx - branch xxxx bytes forward. (Patched short-range branch).
240   //   0xc0ccxxxx - constant pool load (uninitialized).
241   //   0xc1ccxxxx - constant pool load to index xxxx.
242   //   0xc2ccxxxx - constant pool load xxxx bytes ahead.
243   //   0xffffxxxx - pool header with xxxx bytes.
244 
245   static const unsigned BranchRange = 36;
246 
InsertIndexIntoTag__anonf64e43d60111::TestAssembler247   static void InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
248     uint32_t* load = reinterpret_cast<uint32_t*>(load_);
249     MOZ_ASSERT(*load == 0xc0cc0000,
250                "Expected uninitialized constant pool load");
251     MOZ_ASSERT(index < 0x10000);
252     *load = 0xc1cc0000 + index;
253   }
254 
PatchConstantPoolLoad__anonf64e43d60111::TestAssembler255   static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
256     uint32_t* load = reinterpret_cast<uint32_t*>(loadAddr);
257     uint32_t index = *load & 0xffff;
258     MOZ_ASSERT(*load == (0xc1cc0000 | index),
259                "Expected constant pool load(index)");
260     ptrdiff_t offset = reinterpret_cast<uint8_t*>(constPoolAddr) -
261                        reinterpret_cast<uint8_t*>(loadAddr);
262     offset += index * 4;
263     MOZ_ASSERT(offset % 4 == 0, "Unaligned constant pool");
264     MOZ_ASSERT(offset > 0 && offset < 0x10000, "Pool out of range");
265     *load = 0xc2cc0000 + offset;
266   }
267 
WritePoolGuard__anonf64e43d60111::TestAssembler268   static void WritePoolGuard(js::jit::BufferOffset branch, uint32_t* dest,
269                              js::jit::BufferOffset afterPool) {
270     MOZ_ASSERT(branch.assigned());
271     MOZ_ASSERT(afterPool.assigned());
272     size_t branchOff = branch.getOffset();
273     size_t afterPoolOff = afterPool.getOffset();
274     MOZ_ASSERT(afterPoolOff > branchOff);
275     uint32_t delta = afterPoolOff - branchOff;
276     *dest = 0xb0bb0000 + delta;
277   }
278 
WritePoolHeader__anonf64e43d60111::TestAssembler279   static void WritePoolHeader(void* start, js::jit::Pool* p, bool isNatural) {
280     MOZ_ASSERT(!isNatural, "Natural pool guards not implemented.");
281     uint32_t* hdr = reinterpret_cast<uint32_t*>(start);
282     *hdr = 0xffff0000 + p->getPoolSize();
283   }
284 
PatchShortRangeBranchToVeneer__anonf64e43d60111::TestAssembler285   static void PatchShortRangeBranchToVeneer(AsmBufWithPool* buffer,
286                                             unsigned rangeIdx,
287                                             js::jit::BufferOffset deadline,
288                                             js::jit::BufferOffset veneer) {
289     size_t branchOff = deadline.getOffset() - BranchRange;
290     size_t veneerOff = veneer.getOffset();
291     uint32_t* branch = buffer->getInst(js::jit::BufferOffset(branchOff));
292 
293     MOZ_ASSERT((*branch & 0xffff0000) == 0xb1bb0000,
294                "Expected short-range branch instruction");
295     // Copy branch offset to veneer. A real instruction set would require
296     // some adjustment of the label linked-list.
297     *buffer->getInst(veneer) = 0xb2bb0000 | (*branch & 0xffff);
298     MOZ_ASSERT(veneerOff > branchOff, "Veneer should follow branch");
299     *branch = 0xb3bb0000 + (veneerOff - branchOff);
300   }
301 };
302 }  // namespace
303 
BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools)304 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools) {
305   using js::jit::BufferOffset;
306 
307   AsmBufWithPool ab(/* guardSize= */ 1,
308                     /* headerSize= */ 1,
309                     /* instBufferAlign(unused)= */ 0,
310                     /* poolMaxOffset= */ 17,
311                     /* pcBias= */ 0,
312                     /* alignFillInst= */ 0x11110000,
313                     /* nopFillInst= */ 0xaaaa0000,
314                     /* nopFill= */ 0);
315 
316   CHECK(ab.isAligned(16));
317   CHECK_EQUAL(ab.size(), 0u);
318   CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
319   CHECK(!ab.oom());
320   CHECK(!ab.bail());
321 
322   // Each slice holds 5 instructions. Trigger a constant pool inside the slice.
323   uint32_t poolLoad[] = {0xc0cc0000};
324   uint32_t poolData[] = {0xdddd0000, 0xdddd0001, 0xdddd0002, 0xdddd0003};
325   AsmBufWithPool::PoolEntry pe;
326   BufferOffset load =
327       ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
328   CHECK_EQUAL(pe.index(), 0u);
329   CHECK_EQUAL(load.getOffset(), 0);
330 
331   // Pool hasn't been emitted yet. Load has been patched by
332   // InsertIndexIntoTag.
333   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);
334 
335   // Expected layout:
336   //
337   //   0: load [pc+16]
338   //   4: 0x22220001
339   //   8: guard branch pc+12
340   //  12: pool header
341   //  16: poolData
342   //  20: 0x22220002
343   //
344   ab.putInt(0x22220001);
345   // One could argue that the pool should be flushed here since there is no
346   // more room. However, the current implementation doesn't dump pool until
347   // asked to add data:
348   ab.putInt(0x22220002);
349 
350   CHECK_EQUAL(*ab.getInst(BufferOffset(0)), 0xc2cc0010u);
351   CHECK_EQUAL(*ab.getInst(BufferOffset(4)), 0x22220001u);
352   CHECK_EQUAL(*ab.getInst(BufferOffset(8)), 0xb0bb000cu);
353   CHECK_EQUAL(*ab.getInst(BufferOffset(12)), 0xffff0004u);
354   CHECK_EQUAL(*ab.getInst(BufferOffset(16)), 0xdddd0000u);
355   CHECK_EQUAL(*ab.getInst(BufferOffset(20)), 0x22220002u);
356 
357   // allocEntry() overwrites the load instruction! Restore the original.
358   poolLoad[0] = 0xc0cc0000;
359 
360   // Now try with load and pool data on separate slices.
361   load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
362   CHECK_EQUAL(pe.index(), 1u);  // Global pool entry index.
363   CHECK_EQUAL(load.getOffset(), 24);
364   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
365   ab.putInt(0x22220001);
366   ab.putInt(0x22220002);
367   CHECK_EQUAL(*ab.getInst(BufferOffset(24)), 0xc2cc0010u);
368   CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0x22220001u);
369   CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 0xb0bb000cu);
370   CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 0xffff0004u);
371   CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 0xdddd0000u);
372   CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220002u);
373 
374   // Two adjacent loads to the same pool.
375   poolLoad[0] = 0xc0cc0000;
376   load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
377   CHECK_EQUAL(pe.index(), 2u);  // Global pool entry index.
378   CHECK_EQUAL(load.getOffset(), 48);
379   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
380 
381   poolLoad[0] = 0xc0cc0000;
382   load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)(poolData + 1), &pe);
383   CHECK_EQUAL(pe.index(), 3u);  // Global pool entry index.
384   CHECK_EQUAL(load.getOffset(), 52);
385   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0001);  // Index into current pool.
386 
387   ab.putInt(0x22220005);
388 
389   CHECK_EQUAL(*ab.getInst(BufferOffset(48)), 0xc2cc0010u);  // load pc+16.
390   CHECK_EQUAL(*ab.getInst(BufferOffset(52)), 0xc2cc0010u);  // load pc+16.
391   CHECK_EQUAL(*ab.getInst(BufferOffset(56)),
392               0xb0bb0010u);  // guard branch pc+16.
393   CHECK_EQUAL(*ab.getInst(BufferOffset(60)), 0xffff0008u);  // header 8 bytes.
394   CHECK_EQUAL(*ab.getInst(BufferOffset(64)), 0xdddd0000u);  // datum 1.
395   CHECK_EQUAL(*ab.getInst(BufferOffset(68)), 0xdddd0001u);  // datum 2.
396   CHECK_EQUAL(*ab.getInst(BufferOffset(72)),
397               0x22220005u);  // putInt(0x22220005)
398 
399   // Two loads as above, but the first load has an 8-byte pool entry, and the
400   // second load wouldn't be able to reach its data. This must produce two
401   // pools.
402   poolLoad[0] = 0xc0cc0000;
403   load = ab.allocEntry(1, 2, (uint8_t*)poolLoad, (uint8_t*)(poolData + 2), &pe);
404   CHECK_EQUAL(pe.index(), 4u);  // Global pool entry index.
405   CHECK_EQUAL(load.getOffset(), 76);
406   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
407 
408   poolLoad[0] = 0xc0cc0000;
409   load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
410   CHECK_EQUAL(pe.index(),
411               6u);  // Global pool entry index. (Prev one is two indexes).
412   CHECK_EQUAL(load.getOffset(), 96);
413   CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
414 
415   CHECK_EQUAL(*ab.getInst(BufferOffset(76)), 0xc2cc000cu);  // load pc+12.
416   CHECK_EQUAL(*ab.getInst(BufferOffset(80)),
417               0xb0bb0010u);  // guard branch pc+16.
418   CHECK_EQUAL(*ab.getInst(BufferOffset(84)), 0xffff0008u);  // header 8 bytes.
419   CHECK_EQUAL(*ab.getInst(BufferOffset(88)), 0xdddd0002u);  // datum 1.
420   CHECK_EQUAL(*ab.getInst(BufferOffset(92)), 0xdddd0003u);  // datum 2.
421 
422   // Second pool is not flushed yet, and there is room for one instruction
423   // after the load. Test the keep-together feature.
424   ab.enterNoPool(2);
425   ab.putInt(0x22220006);
426   ab.putInt(0x22220007);
427   ab.leaveNoPool();
428 
429   CHECK_EQUAL(*ab.getInst(BufferOffset(96)), 0xc2cc000cu);  // load pc+16.
430   CHECK_EQUAL(*ab.getInst(BufferOffset(100)),
431               0xb0bb000cu);  // guard branch pc+12.
432   CHECK_EQUAL(*ab.getInst(BufferOffset(104)), 0xffff0004u);  // header 4 bytes.
433   CHECK_EQUAL(*ab.getInst(BufferOffset(108)), 0xdddd0000u);  // datum 1.
434   CHECK_EQUAL(*ab.getInst(BufferOffset(112)), 0x22220006u);
435   CHECK_EQUAL(*ab.getInst(BufferOffset(116)), 0x22220007u);
436 
437   return true;
438 }
439 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools)
440 
BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch)441 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch) {
442   using js::jit::BufferOffset;
443 
444   AsmBufWithPool ab(/* guardSize= */ 1,
445                     /* headerSize= */ 1,
446                     /* instBufferAlign(unused)= */ 0,
447                     /* poolMaxOffset= */ 17,
448                     /* pcBias= */ 0,
449                     /* alignFillInst= */ 0x11110000,
450                     /* nopFillInst= */ 0xaaaa0000,
451                     /* nopFill= */ 0);
452 
453   // Insert short-range branch.
454   BufferOffset br1 = ab.putInt(0xb1bb00cc);
455   ab.registerBranchDeadline(
456       1, BufferOffset(br1.getOffset() + TestAssembler::BranchRange));
457   ab.putInt(0x22220001);
458   BufferOffset off = ab.putInt(0x22220002);
459   ab.registerBranchDeadline(
460       1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
461   ab.putInt(0x22220003);
462   ab.putInt(0x22220004);
463 
464   // Second short-range branch that will be swiped up by hysteresis.
465   BufferOffset br2 = ab.putInt(0xb1bb0d2d);
466   ab.registerBranchDeadline(
467       1, BufferOffset(br2.getOffset() + TestAssembler::BranchRange));
468 
469   // Branch should not have been patched yet here.
470   CHECK_EQUAL(*ab.getInst(br1), 0xb1bb00cc);
471   CHECK_EQUAL(*ab.getInst(br2), 0xb1bb0d2d);
472 
473   // Cancel one of the pending branches.
474   // This is what will happen to most branches as they are bound before
475   // expiring by Assembler::bind().
476   ab.unregisterBranchDeadline(
477       1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
478 
479   off = ab.putInt(0x22220006);
480   // Here we may or may not have patched the branch yet, but it is inevitable
481   // now:
482   //
483   //  0: br1 pc+36
484   //  4: 0x22220001
485   //  8: 0x22220002 (unpatched)
486   // 12: 0x22220003
487   // 16: 0x22220004
488   // 20: br2 pc+20
489   // 24: 0x22220006
490   CHECK_EQUAL(off.getOffset(), 24);
491   // 28: guard branch pc+16
492   // 32: pool header
493   // 36: veneer1
494   // 40: veneer2
495   // 44: 0x22220007
496 
497   off = ab.putInt(0x22220007);
498   CHECK_EQUAL(off.getOffset(), 44);
499 
500   // Now the branch must have been patched.
501   CHECK_EQUAL(*ab.getInst(br1), 0xb3bb0000 + 36);  // br1 pc+36 (patched)
502   CHECK_EQUAL(*ab.getInst(BufferOffset(8)),
503               0x22220002u);                        // 0x22220002 (unpatched)
504   CHECK_EQUAL(*ab.getInst(br2), 0xb3bb0000 + 20);  // br2 pc+20 (patched)
505   CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0xb0bb0010u);  // br pc+16 (guard)
506   CHECK_EQUAL(*ab.getInst(BufferOffset(32)),
507               0xffff0000u);  // pool header 0 bytes.
508   CHECK_EQUAL(*ab.getInst(BufferOffset(36)),
509               0xb2bb00ccu);  // veneer1 w/ original 'cc' offset.
510   CHECK_EQUAL(*ab.getInst(BufferOffset(40)),
511               0xb2bb0d2du);  // veneer2 w/ original 'd2d' offset.
512   CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220007u);
513 
514   return true;
515 }
516 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch)
517 
518 // Test that everything is put together correctly in the ARM64 assembler.
519 #if defined(JS_CODEGEN_ARM64)
520 
521 #include "jit/MacroAssembler-inl.h"
522 
BEGIN_TEST(testAssemblerBuffer_ARM64)523 BEGIN_TEST(testAssemblerBuffer_ARM64) {
524   using namespace js::jit;
525 
526   js::LifoAlloc lifo(4096);
527   TempAllocator alloc(&lifo);
528   JitContext jc(cx, &alloc);
529   cx->runtime()->getJitRuntime(cx);
530   MacroAssembler masm;
531 
532   // Branches to an unbound label.
533   Label lab1;
534   masm.branch(Assembler::Equal, &lab1);
535   masm.branch(Assembler::LessThan, &lab1);
536   masm.bind(&lab1);
537   masm.branch(Assembler::Equal, &lab1);
538 
539   CHECK_EQUAL(masm.getInstructionAt(BufferOffset(0))->InstructionBits(),
540               vixl::B_cond | vixl::Assembler::ImmCondBranch(2) | vixl::eq);
541   CHECK_EQUAL(masm.getInstructionAt(BufferOffset(4))->InstructionBits(),
542               vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
543   CHECK_EQUAL(masm.getInstructionAt(BufferOffset(8))->InstructionBits(),
544               vixl::B_cond | vixl::Assembler::ImmCondBranch(0) | vixl::eq);
545 
546   // Branches can reach the label, but the linked list of uses needs to be
547   // rearranged. The final conditional branch cannot reach the first branch.
548   Label lab2a;
549   Label lab2b;
550   masm.bind(&lab2a);
551   masm.B(&lab2b);
552   // Generate 1,100,000 bytes of NOPs.
553   for (unsigned n = 0; n < 1100000; n += 4) masm.Nop();
554   masm.branch(Assembler::LessThan, &lab2b);
555   masm.bind(&lab2b);
556   CHECK_EQUAL(
557       masm.getInstructionAt(BufferOffset(lab2a.offset()))->InstructionBits(),
558       vixl::B | vixl::Assembler::ImmUncondBranch(1100000 / 4 + 2));
559   CHECK_EQUAL(masm.getInstructionAt(BufferOffset(lab2b.offset() - 4))
560                   ->InstructionBits(),
561               vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
562 
563   // Generate a conditional branch that can't reach its label.
564   Label lab3a;
565   Label lab3b;
566   masm.bind(&lab3a);
567   masm.branch(Assembler::LessThan, &lab3b);
568   for (unsigned n = 0; n < 1100000; n += 4) masm.Nop();
569   masm.bind(&lab3b);
570   masm.B(&lab3a);
571   Instruction* bcond3 = masm.getInstructionAt(BufferOffset(lab3a.offset()));
572   CHECK_EQUAL(bcond3->BranchType(), vixl::CondBranchType);
573   ptrdiff_t delta = bcond3->ImmPCRawOffset() * 4;
574   Instruction* veneer =
575       masm.getInstructionAt(BufferOffset(lab3a.offset() + delta));
576   CHECK_EQUAL(veneer->BranchType(), vixl::UncondBranchType);
577   delta += veneer->ImmPCRawOffset() * 4;
578   CHECK_EQUAL(delta, lab3b.offset() - lab3a.offset());
579   Instruction* b3 = masm.getInstructionAt(BufferOffset(lab3b.offset()));
580   CHECK_EQUAL(b3->BranchType(), vixl::UncondBranchType);
581   CHECK_EQUAL(4 * b3->ImmPCRawOffset(), -delta);
582 
583   return true;
584 }
585 END_TEST(testAssemblerBuffer_ARM64)
586 #endif /* JS_CODEGEN_ARM64 */
587