1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef vm_TypedArrayObject_inl_h
8 #define vm_TypedArrayObject_inl_h
9
10 /* Utilities and common inline code for TypedArray */
11
12 #include "vm/TypedArrayObject.h"
13
14 #include "mozilla/Assertions.h"
15 #include "mozilla/Compiler.h"
16 #include "mozilla/FloatingPoint.h"
17
18 #include <algorithm>
19 #include <type_traits>
20
21 #include "jsnum.h"
22
23 #include "builtin/Array.h"
24 #include "gc/Zone.h"
25 #include "jit/AtomicOperations.h"
26 #include "js/Conversions.h"
27 #include "js/ScalarType.h" // js::Scalar::Type
28 #include "js/Value.h"
29 #include "util/DifferentialTesting.h"
30 #include "util/Memory.h"
31 #include "vm/BigIntType.h"
32 #include "vm/JSContext.h"
33 #include "vm/NativeObject.h"
34 #include "vm/Uint8Clamped.h"
35
36 #include "gc/ObjectKind-inl.h"
37 #include "vm/ObjectOperations-inl.h"
38
39 namespace js {
40
41 template <typename To, typename From>
42 inline To ConvertNumber(From src);
43
44 template <>
45 inline int8_t ConvertNumber<int8_t, float>(float src) {
46 return JS::ToInt8(src);
47 }
48
49 template <>
50 inline uint8_t ConvertNumber<uint8_t, float>(float src) {
51 return JS::ToUint8(src);
52 }
53
54 template <>
55 inline uint8_clamped ConvertNumber<uint8_clamped, float>(float src) {
56 return uint8_clamped(src);
57 }
58
59 template <>
60 inline int16_t ConvertNumber<int16_t, float>(float src) {
61 return JS::ToInt16(src);
62 }
63
64 template <>
65 inline uint16_t ConvertNumber<uint16_t, float>(float src) {
66 return JS::ToUint16(src);
67 }
68
69 template <>
70 inline int32_t ConvertNumber<int32_t, float>(float src) {
71 return JS::ToInt32(src);
72 }
73
74 template <>
75 inline uint32_t ConvertNumber<uint32_t, float>(float src) {
76 return JS::ToUint32(src);
77 }
78
79 template <>
80 inline int64_t ConvertNumber<int64_t, float>(float src) {
81 return JS::ToInt64(src);
82 }
83
84 template <>
85 inline uint64_t ConvertNumber<uint64_t, float>(float src) {
86 return JS::ToUint64(src);
87 }
88
89 template <>
90 inline int8_t ConvertNumber<int8_t, double>(double src) {
91 return JS::ToInt8(src);
92 }
93
94 template <>
95 inline uint8_t ConvertNumber<uint8_t, double>(double src) {
96 return JS::ToUint8(src);
97 }
98
99 template <>
100 inline uint8_clamped ConvertNumber<uint8_clamped, double>(double src) {
101 return uint8_clamped(src);
102 }
103
104 template <>
105 inline int16_t ConvertNumber<int16_t, double>(double src) {
106 return JS::ToInt16(src);
107 }
108
109 template <>
110 inline uint16_t ConvertNumber<uint16_t, double>(double src) {
111 return JS::ToUint16(src);
112 }
113
114 template <>
115 inline int32_t ConvertNumber<int32_t, double>(double src) {
116 return JS::ToInt32(src);
117 }
118
119 template <>
120 inline uint32_t ConvertNumber<uint32_t, double>(double src) {
121 return JS::ToUint32(src);
122 }
123
124 template <>
125 inline int64_t ConvertNumber<int64_t, double>(double src) {
126 return JS::ToInt64(src);
127 }
128
129 template <>
130 inline uint64_t ConvertNumber<uint64_t, double>(double src) {
131 return JS::ToUint64(src);
132 }
133
134 template <typename To, typename From>
ConvertNumber(From src)135 inline To ConvertNumber(From src) {
136 static_assert(
137 !std::is_floating_point_v<From> ||
138 (std::is_floating_point_v<From> && std::is_floating_point_v<To>),
139 "conversion from floating point to int should have been handled by "
140 "specializations above");
141 return To(src);
142 }
143
144 template <typename NativeType>
145 struct TypeIDOfType;
146 template <>
147 struct TypeIDOfType<int8_t> {
148 static const Scalar::Type id = Scalar::Int8;
149 static const JSProtoKey protoKey = JSProto_Int8Array;
150 };
151 template <>
152 struct TypeIDOfType<uint8_t> {
153 static const Scalar::Type id = Scalar::Uint8;
154 static const JSProtoKey protoKey = JSProto_Uint8Array;
155 };
156 template <>
157 struct TypeIDOfType<int16_t> {
158 static const Scalar::Type id = Scalar::Int16;
159 static const JSProtoKey protoKey = JSProto_Int16Array;
160 };
161 template <>
162 struct TypeIDOfType<uint16_t> {
163 static const Scalar::Type id = Scalar::Uint16;
164 static const JSProtoKey protoKey = JSProto_Uint16Array;
165 };
166 template <>
167 struct TypeIDOfType<int32_t> {
168 static const Scalar::Type id = Scalar::Int32;
169 static const JSProtoKey protoKey = JSProto_Int32Array;
170 };
171 template <>
172 struct TypeIDOfType<uint32_t> {
173 static const Scalar::Type id = Scalar::Uint32;
174 static const JSProtoKey protoKey = JSProto_Uint32Array;
175 };
176 template <>
177 struct TypeIDOfType<int64_t> {
178 static const Scalar::Type id = Scalar::BigInt64;
179 static const JSProtoKey protoKey = JSProto_BigInt64Array;
180 };
181 template <>
182 struct TypeIDOfType<uint64_t> {
183 static const Scalar::Type id = Scalar::BigUint64;
184 static const JSProtoKey protoKey = JSProto_BigUint64Array;
185 };
186 template <>
187 struct TypeIDOfType<float> {
188 static const Scalar::Type id = Scalar::Float32;
189 static const JSProtoKey protoKey = JSProto_Float32Array;
190 };
191 template <>
192 struct TypeIDOfType<double> {
193 static const Scalar::Type id = Scalar::Float64;
194 static const JSProtoKey protoKey = JSProto_Float64Array;
195 };
196 template <>
197 struct TypeIDOfType<uint8_clamped> {
198 static const Scalar::Type id = Scalar::Uint8Clamped;
199 static const JSProtoKey protoKey = JSProto_Uint8ClampedArray;
200 };
201
202 class SharedOps {
203 public:
204 template <typename T>
205 static T load(SharedMem<T*> addr) {
206 return js::jit::AtomicOperations::loadSafeWhenRacy(addr);
207 }
208
209 template <typename T>
210 static void store(SharedMem<T*> addr, T value) {
211 js::jit::AtomicOperations::storeSafeWhenRacy(addr, value);
212 }
213
214 template <typename T>
215 static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
216 js::jit::AtomicOperations::memcpySafeWhenRacy(dest, src, size);
217 }
218
219 template <typename T>
220 static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
221 js::jit::AtomicOperations::memmoveSafeWhenRacy(dest, src, size);
222 }
223
224 template <typename T>
225 static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
226 js::jit::AtomicOperations::podCopySafeWhenRacy(dest, src, nelem);
227 }
228
229 template <typename T>
230 static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
231 js::jit::AtomicOperations::podMoveSafeWhenRacy(dest, src, nelem);
232 }
233
234 static SharedMem<void*> extract(TypedArrayObject* obj) {
235 return obj->dataPointerEither();
236 }
237 };
238
239 class UnsharedOps {
240 public:
241 template <typename T>
242 static T load(SharedMem<T*> addr) {
243 return *addr.unwrapUnshared();
244 }
245
246 template <typename T>
247 static void store(SharedMem<T*> addr, T value) {
248 *addr.unwrapUnshared() = value;
249 }
250
251 template <typename T>
252 static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
253 ::memcpy(dest.unwrapUnshared(), src.unwrapUnshared(), size);
254 }
255
256 template <typename T>
257 static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
258 ::memmove(dest.unwrapUnshared(), src.unwrapUnshared(), size);
259 }
260
261 template <typename T>
262 static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
263 // std::copy_n better matches the argument values/types of this
264 // function, but as noted below it allows the input/output ranges to
265 // overlap. std::copy does not, so use it so the compiler has extra
266 // ability to optimize.
267 const auto* first = src.unwrapUnshared();
268 const auto* last = first + nelem;
269 auto* result = dest.unwrapUnshared();
270 std::copy(first, last, result);
271 }
272
273 template <typename T>
274 static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t n) {
275 // std::copy_n copies from |src| to |dest| starting from |src|, so
276 // input/output ranges *may* permissibly overlap, as this function
277 // allows.
278 const auto* start = src.unwrapUnshared();
279 auto* result = dest.unwrapUnshared();
280 std::copy_n(start, n, result);
281 }
282
283 static SharedMem<void*> extract(TypedArrayObject* obj) {
284 return SharedMem<void*>::unshared(obj->dataPointerUnshared());
285 }
286 };
287
288 template <typename T, typename Ops>
289 class ElementSpecific {
290 public:
291 /*
292 * Copy |source|'s elements into |target|, starting at |target[offset]|.
293 * Act as if the assignments occurred from a fresh copy of |source|, in
294 * case the two memory ranges overlap.
295 */
296 static bool setFromTypedArray(Handle<TypedArrayObject*> target,
297 Handle<TypedArrayObject*> source,
298 size_t offset) {
299 // WARNING: |source| may be an unwrapped typed array from a different
300 // compartment. Proceed with caution!
301
302 MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
303 "calling wrong setFromTypedArray specialization");
304 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
305 MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
306
307 MOZ_ASSERT(offset <= target->length());
308 MOZ_ASSERT(source->length() <= target->length() - offset);
309
310 if (TypedArrayObject::sameBuffer(target, source)) {
311 return setFromOverlappingTypedArray(target, source, offset);
312 }
313
314 SharedMem<T*> dest =
315 target->dataPointerEither().template cast<T*>() + offset;
316 size_t count = source->length();
317
318 if (source->type() == target->type()) {
319 Ops::podCopy(dest, source->dataPointerEither().template cast<T*>(),
320 count);
321 return true;
322 }
323
324 SharedMem<void*> data = Ops::extract(source);
325 switch (source->type()) {
326 case Scalar::Int8: {
327 SharedMem<int8_t*> src = data.cast<int8_t*>();
328 for (size_t i = 0; i < count; ++i) {
329 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
330 }
331 break;
332 }
333 case Scalar::Uint8:
334 case Scalar::Uint8Clamped: {
335 SharedMem<uint8_t*> src = data.cast<uint8_t*>();
336 for (size_t i = 0; i < count; ++i) {
337 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
338 }
339 break;
340 }
341 case Scalar::Int16: {
342 SharedMem<int16_t*> src = data.cast<int16_t*>();
343 for (size_t i = 0; i < count; ++i) {
344 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
345 }
346 break;
347 }
348 case Scalar::Uint16: {
349 SharedMem<uint16_t*> src = data.cast<uint16_t*>();
350 for (size_t i = 0; i < count; ++i) {
351 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
352 }
353 break;
354 }
355 case Scalar::Int32: {
356 SharedMem<int32_t*> src = data.cast<int32_t*>();
357 for (size_t i = 0; i < count; ++i) {
358 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
359 }
360 break;
361 }
362 case Scalar::Uint32: {
363 SharedMem<uint32_t*> src = data.cast<uint32_t*>();
364 for (size_t i = 0; i < count; ++i) {
365 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
366 }
367 break;
368 }
369 case Scalar::BigInt64: {
370 SharedMem<int64_t*> src = data.cast<int64_t*>();
371 for (size_t i = 0; i < count; ++i) {
372 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
373 }
374 break;
375 }
376 case Scalar::BigUint64: {
377 SharedMem<uint64_t*> src = data.cast<uint64_t*>();
378 for (size_t i = 0; i < count; ++i) {
379 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
380 }
381 break;
382 }
383 case Scalar::Float32: {
384 SharedMem<float*> src = data.cast<float*>();
385 for (size_t i = 0; i < count; ++i) {
386 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
387 }
388 break;
389 }
390 case Scalar::Float64: {
391 SharedMem<double*> src = data.cast<double*>();
392 for (size_t i = 0; i < count; ++i) {
393 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
394 }
395 break;
396 }
397 default:
398 MOZ_CRASH("setFromTypedArray with a typed array with bogus type");
399 }
400
401 return true;
402 }
403
404 /*
405 * Copy |source[0]| to |source[len]| (exclusive) elements into the typed
406 * array |target|, starting at index |offset|. |source| must not be a
407 * typed array.
408 */
409 static bool setFromNonTypedArray(JSContext* cx,
410 Handle<TypedArrayObject*> target,
411 HandleObject source, size_t len,
412 size_t offset = 0) {
413 MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
414 "target type and NativeType must match");
415 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
416 MOZ_ASSERT(!source->is<TypedArrayObject>(),
417 "use setFromTypedArray instead of this method");
418
419 size_t i = 0;
420 if (source->is<NativeObject>()) {
421 // Attempt fast-path infallible conversion of dense elements up to
422 // the first potentially side-effectful lookup or conversion.
423 size_t bound = std::min<size_t>(
424 source->as<NativeObject>().getDenseInitializedLength(), len);
425
426 SharedMem<T*> dest =
427 target->dataPointerEither().template cast<T*>() + offset;
428
429 MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
430 "the following loop must abort on holes");
431
432 const Value* srcValues = source->as<NativeObject>().getDenseElements();
433 for (; i < bound; i++) {
434 if (!canConvertInfallibly(srcValues[i])) {
435 break;
436 }
437 Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
438 }
439 if (i == len) {
440 return true;
441 }
442 }
443
444 // Convert and copy any remaining elements generically.
445 RootedValue v(cx);
446 for (; i < len; i++) {
447 if constexpr (sizeof(i) == sizeof(uint32_t)) {
448 if (!GetElement(cx, source, source, uint32_t(i), &v)) {
449 return false;
450 }
451 } else {
452 if (!GetElementLargeIndex(cx, source, source, i, &v)) {
453 return false;
454 }
455 }
456
457 T n;
458 if (!valueToNative(cx, v, &n)) {
459 return false;
460 }
461
462 len = std::min<size_t>(len, target->length());
463 if (i >= len) {
464 break;
465 }
466
467 // Compute every iteration in case getElement/valueToNative
468 // detaches the underlying array buffer or GC moves the data.
469 SharedMem<T*> dest =
470 target->dataPointerEither().template cast<T*>() + offset + i;
471 Ops::store(dest, n);
472 }
473
474 return true;
475 }
476
477 /*
478 * Copy |source| into the typed array |target|.
479 */
480 static bool initFromIterablePackedArray(JSContext* cx,
481 Handle<TypedArrayObject*> target,
482 HandleArrayObject source) {
483 MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
484 "target type and NativeType must match");
485 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
486 MOZ_ASSERT(IsPackedArray(source), "source array must be packed");
487 MOZ_ASSERT(source->getDenseInitializedLength() <= target->length());
488
489 size_t len = source->getDenseInitializedLength();
490 size_t i = 0;
491
492 // Attempt fast-path infallible conversion of dense elements up to the
493 // first potentially side-effectful conversion.
494
495 SharedMem<T*> dest = target->dataPointerEither().template cast<T*>();
496
497 const Value* srcValues = source->getDenseElements();
498 for (; i < len; i++) {
499 if (!canConvertInfallibly(srcValues[i])) {
500 break;
501 }
502 Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
503 }
504 if (i == len) {
505 return true;
506 }
507
508 // Convert any remaining elements by first collecting them into a
509 // temporary list, and then copying them into the typed array.
510 RootedValueVector values(cx);
511 if (!values.append(srcValues + i, len - i)) {
512 return false;
513 }
514
515 RootedValue v(cx);
516 for (size_t j = 0; j < values.length(); i++, j++) {
517 v = values[j];
518
519 T n;
520 if (!valueToNative(cx, v, &n)) {
521 return false;
522 }
523
524 // |target| is a newly allocated typed array and not yet visible to
525 // content script, so valueToNative can't detach the underlying
526 // buffer.
527 MOZ_ASSERT(i < target->length());
528
529 // Compute every iteration in case GC moves the data.
530 SharedMem<T*> newDest = target->dataPointerEither().template cast<T*>();
531 Ops::store(newDest + i, n);
532 }
533
534 return true;
535 }
536
537 private:
538 static bool setFromOverlappingTypedArray(Handle<TypedArrayObject*> target,
539 Handle<TypedArrayObject*> source,
540 size_t offset) {
541 // WARNING: |source| may be an unwrapped typed array from a different
542 // compartment. Proceed with caution!
543
544 MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
545 "calling wrong setFromTypedArray specialization");
546 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
547 MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
548 MOZ_ASSERT(TypedArrayObject::sameBuffer(target, source),
549 "the provided arrays don't actually overlap, so it's "
550 "undesirable to use this method");
551
552 MOZ_ASSERT(offset <= target->length());
553 MOZ_ASSERT(source->length() <= target->length() - offset);
554
555 SharedMem<T*> dest =
556 target->dataPointerEither().template cast<T*>() + offset;
557 size_t len = source->length();
558
559 if (source->type() == target->type()) {
560 SharedMem<T*> src = source->dataPointerEither().template cast<T*>();
561 Ops::podMove(dest, src, len);
562 return true;
563 }
564
565 // Copy |source| in case it overlaps the target elements being set.
566 size_t sourceByteLen = len * source->bytesPerElement();
567 void* data = target->zone()->template pod_malloc<uint8_t>(sourceByteLen);
568 if (!data) {
569 return false;
570 }
571 Ops::memcpy(SharedMem<void*>::unshared(data), source->dataPointerEither(),
572 sourceByteLen);
573
574 switch (source->type()) {
575 case Scalar::Int8: {
576 int8_t* src = static_cast<int8_t*>(data);
577 for (size_t i = 0; i < len; ++i) {
578 Ops::store(dest++, ConvertNumber<T>(*src++));
579 }
580 break;
581 }
582 case Scalar::Uint8:
583 case Scalar::Uint8Clamped: {
584 uint8_t* src = static_cast<uint8_t*>(data);
585 for (size_t i = 0; i < len; ++i) {
586 Ops::store(dest++, ConvertNumber<T>(*src++));
587 }
588 break;
589 }
590 case Scalar::Int16: {
591 int16_t* src = static_cast<int16_t*>(data);
592 for (size_t i = 0; i < len; ++i) {
593 Ops::store(dest++, ConvertNumber<T>(*src++));
594 }
595 break;
596 }
597 case Scalar::Uint16: {
598 uint16_t* src = static_cast<uint16_t*>(data);
599 for (size_t i = 0; i < len; ++i) {
600 Ops::store(dest++, ConvertNumber<T>(*src++));
601 }
602 break;
603 }
604 case Scalar::Int32: {
605 int32_t* src = static_cast<int32_t*>(data);
606 for (size_t i = 0; i < len; ++i) {
607 Ops::store(dest++, ConvertNumber<T>(*src++));
608 }
609 break;
610 }
611 case Scalar::Uint32: {
612 uint32_t* src = static_cast<uint32_t*>(data);
613 for (size_t i = 0; i < len; ++i) {
614 Ops::store(dest++, ConvertNumber<T>(*src++));
615 }
616 break;
617 }
618 case Scalar::BigInt64: {
619 int64_t* src = static_cast<int64_t*>(data);
620 for (size_t i = 0; i < len; ++i) {
621 Ops::store(dest++, ConvertNumber<T>(*src++));
622 }
623 break;
624 }
625 case Scalar::BigUint64: {
626 uint64_t* src = static_cast<uint64_t*>(data);
627 for (size_t i = 0; i < len; ++i) {
628 Ops::store(dest++, ConvertNumber<T>(*src++));
629 }
630 break;
631 }
632 case Scalar::Float32: {
633 float* src = static_cast<float*>(data);
634 for (size_t i = 0; i < len; ++i) {
635 Ops::store(dest++, ConvertNumber<T>(*src++));
636 }
637 break;
638 }
639 case Scalar::Float64: {
640 double* src = static_cast<double*>(data);
641 for (size_t i = 0; i < len; ++i) {
642 Ops::store(dest++, ConvertNumber<T>(*src++));
643 }
644 break;
645 }
646 default:
647 MOZ_CRASH(
648 "setFromOverlappingTypedArray with a typed array with bogus type");
649 }
650
651 js_free(data);
652 return true;
653 }
654
655 static bool canConvertInfallibly(const Value& v) {
656 if (TypeIDOfType<T>::id == Scalar::BigInt64 ||
657 TypeIDOfType<T>::id == Scalar::BigUint64) {
658 // Numbers, Null, Undefined, and Symbols throw a TypeError. Strings may
659 // OOM and Objects may have side-effects.
660 return v.isBigInt() || v.isBoolean();
661 }
662 // BigInts and Symbols throw a TypeError. Strings may OOM and Objects may
663 // have side-effects.
664 return v.isNumber() || v.isBoolean() || v.isNull() || v.isUndefined();
665 }
666
667 static T infallibleValueToNative(const Value& v) {
668 if (TypeIDOfType<T>::id == Scalar::BigInt64) {
669 if (v.isBigInt()) {
670 return T(BigInt::toInt64(v.toBigInt()));
671 }
672 return T(v.toBoolean());
673 }
674 if (TypeIDOfType<T>::id == Scalar::BigUint64) {
675 if (v.isBigInt()) {
676 return T(BigInt::toUint64(v.toBigInt()));
677 }
678 return T(v.toBoolean());
679 }
680 if (v.isInt32()) {
681 return T(v.toInt32());
682 }
683 if (v.isDouble()) {
684 return doubleToNative(v.toDouble());
685 }
686 if (v.isBoolean()) {
687 return T(v.toBoolean());
688 }
689 if (v.isNull()) {
690 return T(0);
691 }
692
693 MOZ_ASSERT(v.isUndefined());
694 return TypeIsFloatingPoint<T>() ? T(JS::GenericNaN()) : T(0);
695 }
696
697 static bool valueToNative(JSContext* cx, HandleValue v, T* result) {
698 MOZ_ASSERT(!v.isMagic());
699
700 if (MOZ_LIKELY(canConvertInfallibly(v))) {
701 *result = infallibleValueToNative(v);
702 return true;
703 }
704
705 if (std::is_same_v<T, int64_t>) {
706 JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigInt64(cx, v));
707 return true;
708 }
709
710 if (std::is_same_v<T, uint64_t>) {
711 JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigUint64(cx, v));
712 return true;
713 }
714
715 double d;
716 MOZ_ASSERT(v.isString() || v.isObject() || v.isSymbol() || v.isBigInt());
717 if (!(v.isString() ? StringToNumber(cx, v.toString(), &d)
718 : ToNumber(cx, v, &d))) {
719 return false;
720 }
721
722 *result = doubleToNative(d);
723 return true;
724 }
725
726 static T doubleToNative(double d) {
727 if (TypeIsFloatingPoint<T>()) {
728 // The JS spec doesn't distinguish among different NaN values, and
729 // it deliberately doesn't specify the bit pattern written to a
730 // typed array when NaN is written into it. This bit-pattern
731 // inconsistency could confuse differential testing, so always
732 // canonicalize NaN values in differential testing.
733 if (js::SupportDifferentialTesting()) {
734 d = JS::CanonicalizeNaN(d);
735 }
736 return T(d);
737 }
738 if (MOZ_UNLIKELY(mozilla::IsNaN(d))) {
739 return T(0);
740 }
741 if (TypeIDOfType<T>::id == Scalar::Uint8Clamped) {
742 return T(d);
743 }
744 if (TypeIsUnsigned<T>()) {
745 return T(JS::ToUint32(d));
746 }
747 return T(JS::ToInt32(d));
748 }
749 };
750
751 /* static */ gc::AllocKind js::TypedArrayObject::AllocKindForLazyBuffer(
752 size_t nbytes) {
753 MOZ_ASSERT(nbytes <= INLINE_BUFFER_LIMIT);
754 if (nbytes == 0) {
755 nbytes += sizeof(uint8_t);
756 }
757 size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
758 MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
759 return gc::GetGCObjectKind(FIXED_DATA_START + dataSlots);
760 }
761
762 } // namespace js
763
764 #endif // vm_TypedArrayObject_inl_h
765