1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_IA32
6 
7 #include "src/codegen.h"
8 #include "src/heap/factory-inl.h"
9 #include "src/heap/heap.h"
10 #include "src/isolate.h"
11 #include "src/macro-assembler.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 #define __ masm.
17 
CreateSqrtFunction(Isolate * isolate)18 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
19   size_t allocated = 0;
20   byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
21   if (buffer == nullptr) return nullptr;
22 
23   MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
24                       CodeObjectRequired::kNo);
25   // esp[1 * kPointerSize]: raw double input
26   // esp[0 * kPointerSize]: return address
27   // Move double input into registers.
28   {
29     __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
30     __ sqrtsd(xmm0, xmm0);
31     __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
32     // Load result into floating point register as return value.
33     __ fld_d(Operand(esp, 1 * kPointerSize));
34     __ Ret();
35   }
36 
37   CodeDesc desc;
38   masm.GetCode(isolate, &desc);
39   DCHECK(!RelocInfo::RequiresRelocation(desc));
40 
41   Assembler::FlushICache(buffer, allocated);
42   CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
43   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
44 }
45 
46 
47 // Helper functions for CreateMemMoveFunction.
48 #undef __
49 #define __ ACCESS_MASM(masm)
50 
51 enum Direction { FORWARD, BACKWARD };
52 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
53 
54 // Expects registers:
55 // esi - source, aligned if alignment == ALIGNED
56 // edi - destination, always aligned
57 // ecx - count (copy size in bytes)
58 // edx - loop count (number of 64 byte chunks)
MemMoveEmitMainLoop(MacroAssembler * masm,Label * move_last_15,Direction direction,Alignment alignment)59 void MemMoveEmitMainLoop(MacroAssembler* masm,
60                          Label* move_last_15,
61                          Direction direction,
62                          Alignment alignment) {
63   Register src = esi;
64   Register dst = edi;
65   Register count = ecx;
66   Register loop_count = edx;
67   Label loop, move_last_31, move_last_63;
68   __ cmp(loop_count, 0);
69   __ j(equal, &move_last_63);
70   __ bind(&loop);
71   // Main loop. Copy in 64 byte chunks.
72   if (direction == BACKWARD) __ sub(src, Immediate(0x40));
73   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
74   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
75   __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
76   __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
77   if (direction == FORWARD) __ add(src, Immediate(0x40));
78   if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
79   __ movdqa(Operand(dst, 0x00), xmm0);
80   __ movdqa(Operand(dst, 0x10), xmm1);
81   __ movdqa(Operand(dst, 0x20), xmm2);
82   __ movdqa(Operand(dst, 0x30), xmm3);
83   if (direction == FORWARD) __ add(dst, Immediate(0x40));
84   __ dec(loop_count);
85   __ j(not_zero, &loop);
86   // At most 63 bytes left to copy.
87   __ bind(&move_last_63);
88   __ test(count, Immediate(0x20));
89   __ j(zero, &move_last_31);
90   if (direction == BACKWARD) __ sub(src, Immediate(0x20));
91   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
92   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
93   if (direction == FORWARD) __ add(src, Immediate(0x20));
94   if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
95   __ movdqa(Operand(dst, 0x00), xmm0);
96   __ movdqa(Operand(dst, 0x10), xmm1);
97   if (direction == FORWARD) __ add(dst, Immediate(0x20));
98   // At most 31 bytes left to copy.
99   __ bind(&move_last_31);
100   __ test(count, Immediate(0x10));
101   __ j(zero, move_last_15);
102   if (direction == BACKWARD) __ sub(src, Immediate(0x10));
103   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
104   if (direction == FORWARD) __ add(src, Immediate(0x10));
105   if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
106   __ movdqa(Operand(dst, 0), xmm0);
107   if (direction == FORWARD) __ add(dst, Immediate(0x10));
108 }
109 
110 
MemMoveEmitPopAndReturn(MacroAssembler * masm)111 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
112   __ pop(esi);
113   __ pop(edi);
114   __ ret(0);
115 }
116 
117 
118 #undef __
119 #define __ masm.
120 
121 
122 class LabelConverter {
123  public:
LabelConverter(byte * buffer)124   explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
address(Label * l) const125   int32_t address(Label* l) const {
126     return reinterpret_cast<int32_t>(buffer_) + l->pos();
127   }
128  private:
129   byte* buffer_;
130 };
131 
132 
CreateMemMoveFunction(Isolate * isolate)133 MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
134   size_t allocated = 0;
135   byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
136   if (buffer == nullptr) return nullptr;
137 
138   MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
139                       CodeObjectRequired::kNo);
140   LabelConverter conv(buffer);
141 
142   // Generated code is put into a fixed, unmovable buffer, and not into
143   // the V8 heap. We can't, and don't, refer to any relocatable addresses
144   // (e.g. the JavaScript nan-object).
145 
146   // 32-bit C declaration function calls pass arguments on stack.
147 
148   // Stack layout:
149   // esp[12]: Third argument, size.
150   // esp[8]: Second argument, source pointer.
151   // esp[4]: First argument, destination pointer.
152   // esp[0]: return address
153 
154   const int kDestinationOffset = 1 * kPointerSize;
155   const int kSourceOffset = 2 * kPointerSize;
156   const int kSizeOffset = 3 * kPointerSize;
157 
158   // When copying up to this many bytes, use special "small" handlers.
159   const size_t kSmallCopySize = 8;
160   // When copying up to this many bytes, use special "medium" handlers.
161   const size_t kMediumCopySize = 63;
162   // When non-overlapping region of src and dst is less than this,
163   // use a more careful implementation (slightly slower).
164   const size_t kMinMoveDistance = 16;
165   // Note that these values are dictated by the implementation below,
166   // do not just change them and hope things will work!
167 
168   int stack_offset = 0;  // Update if we change the stack height.
169 
170   Label backward, backward_much_overlap;
171   Label forward_much_overlap, small_size, medium_size, pop_and_return;
172   __ push(edi);
173   __ push(esi);
174   stack_offset += 2 * kPointerSize;
175   Register dst = edi;
176   Register src = esi;
177   Register count = ecx;
178   Register loop_count = edx;
179   __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
180   __ mov(src, Operand(esp, stack_offset + kSourceOffset));
181   __ mov(count, Operand(esp, stack_offset + kSizeOffset));
182 
183   __ cmp(dst, src);
184   __ j(equal, &pop_and_return);
185 
186   __ prefetch(Operand(src, 0), 1);
187   __ cmp(count, kSmallCopySize);
188   __ j(below_equal, &small_size);
189   __ cmp(count, kMediumCopySize);
190   __ j(below_equal, &medium_size);
191   __ cmp(dst, src);
192   __ j(above, &backward);
193 
194   {
195     // |dst| is a lower address than |src|. Copy front-to-back.
196     Label unaligned_source, move_last_15, skip_last_move;
197     __ mov(eax, src);
198     __ sub(eax, dst);
199     __ cmp(eax, kMinMoveDistance);
200     __ j(below, &forward_much_overlap);
201     // Copy first 16 bytes.
202     __ movdqu(xmm0, Operand(src, 0));
203     __ movdqu(Operand(dst, 0), xmm0);
204     // Determine distance to alignment: 16 - (dst & 0xF).
205     __ mov(edx, dst);
206     __ and_(edx, 0xF);
207     __ neg(edx);
208     __ add(edx, Immediate(16));
209     __ add(dst, edx);
210     __ add(src, edx);
211     __ sub(count, edx);
212     // dst is now aligned. Main copy loop.
213     __ mov(loop_count, count);
214     __ shr(loop_count, 6);
215     // Check if src is also aligned.
216     __ test(src, Immediate(0xF));
217     __ j(not_zero, &unaligned_source);
218     // Copy loop for aligned source and destination.
219     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
220     // At most 15 bytes to copy. Copy 16 bytes at end of string.
221     __ bind(&move_last_15);
222     __ and_(count, 0xF);
223     __ j(zero, &skip_last_move, Label::kNear);
224     __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
225     __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
226     __ bind(&skip_last_move);
227     MemMoveEmitPopAndReturn(&masm);
228 
229     // Copy loop for unaligned source and aligned destination.
230     __ bind(&unaligned_source);
231     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
232     __ jmp(&move_last_15);
233 
234     // Less than kMinMoveDistance offset between dst and src.
235     Label loop_until_aligned, last_15_much_overlap;
236     __ bind(&loop_until_aligned);
237     __ mov_b(eax, Operand(src, 0));
238     __ inc(src);
239     __ mov_b(Operand(dst, 0), eax);
240     __ inc(dst);
241     __ dec(count);
242     __ bind(&forward_much_overlap);  // Entry point into this block.
243     __ test(dst, Immediate(0xF));
244     __ j(not_zero, &loop_until_aligned);
245     // dst is now aligned, src can't be. Main copy loop.
246     __ mov(loop_count, count);
247     __ shr(loop_count, 6);
248     MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
249                         FORWARD, MOVE_UNALIGNED);
250     __ bind(&last_15_much_overlap);
251     __ and_(count, 0xF);
252     __ j(zero, &pop_and_return);
253     __ cmp(count, kSmallCopySize);
254     __ j(below_equal, &small_size);
255     __ jmp(&medium_size);
256   }
257 
258   {
259     // |dst| is a higher address than |src|. Copy backwards.
260     Label unaligned_source, move_first_15, skip_last_move;
261     __ bind(&backward);
262     // |dst| and |src| always point to the end of what's left to copy.
263     __ add(dst, count);
264     __ add(src, count);
265     __ mov(eax, dst);
266     __ sub(eax, src);
267     __ cmp(eax, kMinMoveDistance);
268     __ j(below, &backward_much_overlap);
269     // Copy last 16 bytes.
270     __ movdqu(xmm0, Operand(src, -0x10));
271     __ movdqu(Operand(dst, -0x10), xmm0);
272     // Find distance to alignment: dst & 0xF
273     __ mov(edx, dst);
274     __ and_(edx, 0xF);
275     __ sub(dst, edx);
276     __ sub(src, edx);
277     __ sub(count, edx);
278     // dst is now aligned. Main copy loop.
279     __ mov(loop_count, count);
280     __ shr(loop_count, 6);
281     // Check if src is also aligned.
282     __ test(src, Immediate(0xF));
283     __ j(not_zero, &unaligned_source);
284     // Copy loop for aligned source and destination.
285     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
286     // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
287     __ bind(&move_first_15);
288     __ and_(count, 0xF);
289     __ j(zero, &skip_last_move, Label::kNear);
290     __ sub(src, count);
291     __ sub(dst, count);
292     __ movdqu(xmm0, Operand(src, 0));
293     __ movdqu(Operand(dst, 0), xmm0);
294     __ bind(&skip_last_move);
295     MemMoveEmitPopAndReturn(&masm);
296 
297     // Copy loop for unaligned source and aligned destination.
298     __ bind(&unaligned_source);
299     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
300     __ jmp(&move_first_15);
301 
302     // Less than kMinMoveDistance offset between dst and src.
303     Label loop_until_aligned, first_15_much_overlap;
304     __ bind(&loop_until_aligned);
305     __ dec(src);
306     __ dec(dst);
307     __ mov_b(eax, Operand(src, 0));
308     __ mov_b(Operand(dst, 0), eax);
309     __ dec(count);
310     __ bind(&backward_much_overlap);  // Entry point into this block.
311     __ test(dst, Immediate(0xF));
312     __ j(not_zero, &loop_until_aligned);
313     // dst is now aligned, src can't be. Main copy loop.
314     __ mov(loop_count, count);
315     __ shr(loop_count, 6);
316     MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
317                         BACKWARD, MOVE_UNALIGNED);
318     __ bind(&first_15_much_overlap);
319     __ and_(count, 0xF);
320     __ j(zero, &pop_and_return);
321     // Small/medium handlers expect dst/src to point to the beginning.
322     __ sub(dst, count);
323     __ sub(src, count);
324     __ cmp(count, kSmallCopySize);
325     __ j(below_equal, &small_size);
326     __ jmp(&medium_size);
327   }
328   {
329     // Special handlers for 9 <= copy_size < 64. No assumptions about
330     // alignment or move distance, so all reads must be unaligned and
331     // must happen before any writes.
332     Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
333 
334     __ bind(&f9_16);
335     __ movsd(xmm0, Operand(src, 0));
336     __ movsd(xmm1, Operand(src, count, times_1, -8));
337     __ movsd(Operand(dst, 0), xmm0);
338     __ movsd(Operand(dst, count, times_1, -8), xmm1);
339     MemMoveEmitPopAndReturn(&masm);
340 
341     __ bind(&f17_32);
342     __ movdqu(xmm0, Operand(src, 0));
343     __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
344     __ movdqu(Operand(dst, 0x00), xmm0);
345     __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
346     MemMoveEmitPopAndReturn(&masm);
347 
348     __ bind(&f33_48);
349     __ movdqu(xmm0, Operand(src, 0x00));
350     __ movdqu(xmm1, Operand(src, 0x10));
351     __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
352     __ movdqu(Operand(dst, 0x00), xmm0);
353     __ movdqu(Operand(dst, 0x10), xmm1);
354     __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
355     MemMoveEmitPopAndReturn(&masm);
356 
357     __ bind(&f49_63);
358     __ movdqu(xmm0, Operand(src, 0x00));
359     __ movdqu(xmm1, Operand(src, 0x10));
360     __ movdqu(xmm2, Operand(src, 0x20));
361     __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
362     __ movdqu(Operand(dst, 0x00), xmm0);
363     __ movdqu(Operand(dst, 0x10), xmm1);
364     __ movdqu(Operand(dst, 0x20), xmm2);
365     __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
366     MemMoveEmitPopAndReturn(&masm);
367 
368     __ bind(&medium_handlers);
369     __ dd(conv.address(&f9_16));
370     __ dd(conv.address(&f17_32));
371     __ dd(conv.address(&f33_48));
372     __ dd(conv.address(&f49_63));
373 
374     __ bind(&medium_size);  // Entry point into this block.
375     __ mov(eax, count);
376     __ dec(eax);
377     __ shr(eax, 4);
378     if (FLAG_debug_code) {
379       Label ok;
380       __ cmp(eax, 3);
381       __ j(below_equal, &ok);
382       __ int3();
383       __ bind(&ok);
384     }
385     __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
386     __ jmp(eax);
387   }
388   {
389     // Specialized copiers for copy_size <= 8 bytes.
390     Label small_handlers, f0, f1, f2, f3, f4, f5_8;
391     __ bind(&f0);
392     MemMoveEmitPopAndReturn(&masm);
393 
394     __ bind(&f1);
395     __ mov_b(eax, Operand(src, 0));
396     __ mov_b(Operand(dst, 0), eax);
397     MemMoveEmitPopAndReturn(&masm);
398 
399     __ bind(&f2);
400     __ mov_w(eax, Operand(src, 0));
401     __ mov_w(Operand(dst, 0), eax);
402     MemMoveEmitPopAndReturn(&masm);
403 
404     __ bind(&f3);
405     __ mov_w(eax, Operand(src, 0));
406     __ mov_b(edx, Operand(src, 2));
407     __ mov_w(Operand(dst, 0), eax);
408     __ mov_b(Operand(dst, 2), edx);
409     MemMoveEmitPopAndReturn(&masm);
410 
411     __ bind(&f4);
412     __ mov(eax, Operand(src, 0));
413     __ mov(Operand(dst, 0), eax);
414     MemMoveEmitPopAndReturn(&masm);
415 
416     __ bind(&f5_8);
417     __ mov(eax, Operand(src, 0));
418     __ mov(edx, Operand(src, count, times_1, -4));
419     __ mov(Operand(dst, 0), eax);
420     __ mov(Operand(dst, count, times_1, -4), edx);
421     MemMoveEmitPopAndReturn(&masm);
422 
423     __ bind(&small_handlers);
424     __ dd(conv.address(&f0));
425     __ dd(conv.address(&f1));
426     __ dd(conv.address(&f2));
427     __ dd(conv.address(&f3));
428     __ dd(conv.address(&f4));
429     __ dd(conv.address(&f5_8));
430     __ dd(conv.address(&f5_8));
431     __ dd(conv.address(&f5_8));
432     __ dd(conv.address(&f5_8));
433 
434     __ bind(&small_size);  // Entry point into this block.
435     if (FLAG_debug_code) {
436       Label ok;
437       __ cmp(count, 8);
438       __ j(below_equal, &ok);
439       __ int3();
440       __ bind(&ok);
441     }
442     __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
443     __ jmp(eax);
444   }
445 
446   __ bind(&pop_and_return);
447   MemMoveEmitPopAndReturn(&masm);
448 
449   CodeDesc desc;
450   masm.GetCode(isolate, &desc);
451   DCHECK(!RelocInfo::RequiresRelocation(desc));
452   Assembler::FlushICache(buffer, allocated);
453   CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
454   // TODO(jkummerow): It would be nice to register this code creation event
455   // with the PROFILE / GDBJIT system.
456   return FUNCTION_CAST<MemMoveFunction>(buffer);
457 }
458 
459 
460 #undef __
461 
462 }  // namespace internal
463 }  // namespace v8
464 
465 #endif  // V8_TARGET_ARCH_IA32
466