1 /*========================== begin_copyright_notice ============================
2
3 Copyright (C) 2017-2021 Intel Corporation
4
5 SPDX-License-Identifier: MIT
6
7 ============================= end_copyright_notice ===========================*/
8
9 #include <cstring>
10 #include <iomanip>
11 #include <algorithm>
12 #include "common/ged_string_utils.h"
13 #include "xcoder/ged_ins.h"
14
15 using std::setw;
16 using std::setfill;
17 using std::hex;
18 using std::memset;
19 #if GED_VALIDATION_API
20 using std::sort;
21 using std::end;
22 using std::next;
23 using std::cout;
24 using std::endl;
25 using std::dec;
26 using std::left;
27 using std::stringstream;
28 #endif
29
30 #if ((!defined _WIN32) && ( !defined __STDC_LIB_EXT1__ ))
31 #include <errno.h>
32 #include <string.h>
33 #include <stdio.h>
34 typedef int errno_t;
memcpy_s(void * dst,size_t numberOfElements,const void * src,size_t count)35 inline errno_t memcpy_s( void *dst, size_t numberOfElements, const void *src, size_t count )
36 {
37 if( ( dst == NULL ) || ( src == NULL ) )
38 {
39 return EINVAL;
40 }
41 if( numberOfElements < count )
42 {
43 return ERANGE;
44 }
45 memcpy( dst, src, count );
46 return 0;
47 }
48 #endif
49
50
51 /*************************************************************************************************
52 * class GEDIns static data members
53 *************************************************************************************************/
54
55 const int64_t GEDIns::signExtendTable[] =
56 {
57 (int64_t)0xffffffffffffffff, // 0
58 (int64_t)0xfffffffffffffffe, // 1
59 (int64_t)0xfffffffffffffffc, // 2
60 (int64_t)0xfffffffffffffff8, // 3
61 (int64_t)0xfffffffffffffff0, // 4
62 (int64_t)0xffffffffffffffe0, // 5
63 (int64_t)0xffffffffffffffc0, // 6
64 (int64_t)0xffffffffffffff80, // 7
65 (int64_t)0xffffffffffffff00, // 8
66 (int64_t)0xfffffffffffffe00, // 9
67 (int64_t)0xfffffffffffffc00, // 10
68 (int64_t)0xfffffffffffff800, // 11
69 (int64_t)0xfffffffffffff000, // 12
70 (int64_t)0xffffffffffffe000, // 13
71 (int64_t)0xffffffffffffc000, // 14
72 (int64_t)0xffffffffffff8000, // 15
73 (int64_t)0xffffffffffff0000, // 16
74 (int64_t)0xfffffffffffe0000, // 17
75 (int64_t)0xfffffffffffc0000, // 18
76 (int64_t)0xfffffffffff80000, // 19
77 (int64_t)0xfffffffffff00000, // 20
78 (int64_t)0xffffffffffe00000, // 21
79 (int64_t)0xffffffffffc00000, // 22
80 (int64_t)0xffffffffff800000, // 23
81 (int64_t)0xffffffffff000000, // 24
82 (int64_t)0xfffffffffe000000, // 25
83 (int64_t)0xfffffffffc000000, // 26
84 (int64_t)0xfffffffff8000000, // 27
85 (int64_t)0xfffffffff0000000, // 28
86 (int64_t)0xffffffffe0000000, // 29
87 (int64_t)0xffffffffc0000000, // 30
88 (int64_t)0xffffffff80000000, // 31
89 (int64_t)0xffffffff00000000, // 32
90 (int64_t)0xfffffffe00000000, // 33
91 (int64_t)0xfffffffc00000000, // 34
92 (int64_t)0xfffffff800000000, // 35
93 (int64_t)0xfffffff000000000, // 36
94 (int64_t)0xffffffe000000000, // 37
95 (int64_t)0xffffffc000000000, // 38
96 (int64_t)0xffffff8000000000, // 39
97 (int64_t)0xffffff0000000000, // 40
98 (int64_t)0xfffffe0000000000, // 41
99 (int64_t)0xfffffc0000000000, // 42
100 (int64_t)0xfffff80000000000, // 43
101 (int64_t)0xfffff00000000000, // 44
102 (int64_t)0xffffe00000000000, // 45
103 (int64_t)0xffffc00000000000, // 46
104 (int64_t)0xffff800000000000, // 47
105 (int64_t)0xffff000000000000, // 48
106 (int64_t)0xfffe000000000000, // 49
107 (int64_t)0xfffc000000000000, // 50
108 (int64_t)0xfff8000000000000, // 51
109 (int64_t)0xfff0000000000000, // 52
110 (int64_t)0xffe0000000000000, // 53
111 (int64_t)0xffc0000000000000, // 54
112 (int64_t)0xff80000000000000, // 55
113 (int64_t)0xff00000000000000, // 56
114 (int64_t)0xfe00000000000000, // 57
115 (int64_t)0xfc00000000000000, // 58
116 (int64_t)0xf800000000000000, // 59
117 (int64_t)0xf000000000000000, // 60
118 (int64_t)0xe000000000000000, // 61
119 (int64_t)0xc000000000000000, // 62
120 (int64_t)0x8000000000000000, // 63
121 };
122
123
124 /*************************************************************************************************
125 * class GEDIns API functions
126 *************************************************************************************************/
127
Init(const uint8_t modelId,uint32_t opcode)128 GED_RETURN_VALUE GEDIns::Init(const /* GED_MODEL */ uint8_t modelId, /* GED_OPCODE */ uint32_t opcode)
129 {
130 GED_RETURN_VALUE ret = GED_RETURN_VALUE_SUCCESS;
131 if (modelId >= numOfSupportedModels)
132 {
133 ret = GED_RETURN_VALUE_INVALID_MODEL;
134 return ret;
135 }
136 _modelId = modelId;
137 _opcode = invalidOpcode;
138 _decodingTable = NULL;
139 ret = SetOpcode(opcode);
140 #if defined(GED_VALIDATE)
141 if (GED_RETURN_VALUE_OPCODE_NOT_SUPPORTED != ret)
142 {
143 GEDASSERT(invalidOpcode != _opcode);
144 GEDASSERT(NULL != _decodingTable);
145 }
146 #endif // GED_VALIDATE
147 return ret;
148 }
149
150
Decode(const uint8_t modelId,const unsigned char * rawBytes,const unsigned int size)151 GED_RETURN_VALUE GEDIns::Decode(const /* GED_MODEL */ uint8_t modelId, const unsigned char* rawBytes, const unsigned int size)
152 {
153 if (NULL == rawBytes)
154 {
155 return GED_RETURN_VALUE_NULL_POINTER;
156 }
157 if (modelId >= numOfSupportedModels)
158 {
159 return GED_RETURN_VALUE_INVALID_MODEL;
160 }
161 if (size < GED_COMPACT_INS_SIZE)
162 {
163 // Buffer isn't large enough to extract opcode and compact bit.
164 return GED_RETURN_VALUE_BUFFER_TOO_SHORT;
165 }
166 ClearStatus();
167 _modelId = modelId;
168 ExtractOpcode(rawBytes);
169 _decodingTable = GetCurrentModelData().opcodeTables[_opcode].nativeDecoding;
170 if (NULL == _decodingTable)
171 {
172 return GED_RETURN_VALUE_OPCODE_NOT_SUPPORTED;
173 }
174 ExtractCmptCtrl(rawBytes);
175 if (!IsCompactValid())
176 {
177 if (size < GED_NATIVE_INS_SIZE)
178 {
179 // Buffer isn't large enough for the given Native instruction.
180 return GED_RETURN_VALUE_BUFFER_TOO_SHORT;
181 }
182 }
183
184 GED_RETURN_VALUE ret = GED_RETURN_VALUE_SUCCESS;
185 if (IsCompactValid())
186 {
187 SetInstructionBytes(_compactBytes, rawBytes, size, GED_COMPACT_INS_SIZE);
188 ret = BuildNativeInsFromCompact(); // should always succeed, however this is not true, see Mantis 3755
189 if (GED_RETURN_VALUE_SUCCESS != ret) return ret;
190
191 // The API function IsCompact() checks the GED_INS_STATUS_COMPACT_ENCODED bit, so we must enforce the encoding restrictions
192 // and set the instruction's compact form as encoded.
193 ApplyCompactEncodingMasks(_compactBytes); // enforce the per-instruction encoding restrictions for the compact format
194
195 #if defined (GED_VALIDATE)
196 // TODO: Save the current instruction bytes, then call BuildNativeInsFromCompact again and compare the two
197 // to prevent field/padding conflicts in the XMLs.
198 #endif // GED_VALIDATE
199 }
200 else
201 {
202 SetInstructionBytes(_nativeBytes, rawBytes, size, GED_NATIVE_INS_SIZE);
203 SetNativeValid();
204
205 // The API function IsModified() checks the GED_INS_STATUS_NATIVE_ENCODED bit, so we must enforce the encoding restrictions
206 // and set the instruction's native format as encoded.
207 ApplyNativeEncodingMasks(); // enforce the per-instruction encoding restrictions for the native format
208 }
209 GEDASSERT(IsNativeValid());
210 return ret;
211 }
212
213
Encode(const GED_INS_TYPE insType,unsigned char * rawBytes)214 GED_RETURN_VALUE GEDIns::Encode(const GED_INS_TYPE insType, unsigned char* rawBytes)
215 {
216 GED_RETURN_VALUE ret = GED_RETURN_VALUE_SUCCESS;
217 if (GED_INS_TYPE_COMPACT == insType) // retrieve the compact instruction bytes
218 {
219 if (!IsCompactEncoded())
220 {
221 if (!IsCompactValid())
222 {
223 GEDASSERT(IsNativeValid());
224 // Try to build a compact format for this instruction.
225 if (!BuildCompactInsFromNative())
226 {
227 GEDASSERT(!IsCompactValid());
228 ret = GED_RETURN_VALUE_NO_COMPACT_FORM;
229 return ret; // the instruction does not have a compact format in its current state
230 }
231 GEDASSERT(IsCompactValid());
232 }
233 ApplyCompactEncodingMasks(_compactBytes); // enforce the per-instruction encoding restrictions for the compact format
234 }
235 GEDASSERT(IsCompactValid());
236 if (NULL != rawBytes)
237 {
238 memcpy_s(rawBytes, GED_COMPACT_INS_SIZE, _compactBytes, GED_COMPACT_INS_SIZE); // copy the compact instruction bytes
239 }
240 }
241 else // retrieve the native instruction bytes
242 {
243 GEDASSERT(GED_INS_TYPE_NATIVE == insType);
244 if (!IsNativeEncoded())
245 {
246 if (!IsNativeValid())
247 {
248 GEDASSERT(IsCompactValid());
249 BuildNativeInsFromCompact(); // should always succeed, however this is not true, see Mantis 3755
250 GEDASSERT(IsNativeValid());
251 }
252 ApplyNativeEncodingMasks(); // enforce the per-instruction encoding restrictions for the native format
253 }
254 GEDASSERT(IsNativeValid());
255 if (NULL != rawBytes)
256 {
257 memcpy_s(rawBytes, GED_NATIVE_INS_SIZE, _nativeBytes, GED_NATIVE_INS_SIZE); // copy the native instruction bytes
258 }
259 }
260 return ret;
261 }
262
263
264 #if GED_VALIDATION_API
265
CountCompacted(unsigned int & count)266 GED_RETURN_VALUE GEDIns::CountCompacted(unsigned int& count)
267 {
268 if (!IsNativeEncoded())
269 {
270 if (!IsNativeValid())
271 {
272 // Can only use the native format as a reference, so first uncompact the instruction
273 GEDASSERT(IsCompactValid());
274 BuildNativeInsFromCompact(); // Should always succeed, however this is not true, see Mantis 3755
275 GEDASSERT(IsNativeValid());
276 }
277 ApplyNativeEncodingMasks(); // enforce the per-instruction encoding restrictions for the native format
278 }
279 GEDASSERT(IsNativeValid());
280 if (!CountCompactFormats(count))
281 {
282 GEDASSERT(!IsCompactValid());
283 return GED_RETURN_VALUE_NO_COMPACT_FORM; // the instruction does not have a compact form in its current state
284 }
285 return GED_RETURN_VALUE_SUCCESS;
286 }
287
288
RetrieveAllCompactedFormats(const unsigned int size,unsigned char * compactBytesArray)289 GED_RETURN_VALUE GEDIns::RetrieveAllCompactedFormats(const unsigned int size, unsigned char* compactBytesArray)
290 {
291 GEDFORASSERT(unsigned int count = 0);
292 GEDASSERT(GED_RETURN_VALUE_SUCCESS == CountCompacted(count));
293 GEDASSERT((size / GED_COMPACT_INS_SIZE) <= count);
294 if (NULL == compactBytesArray)
295 {
296 return GED_RETURN_VALUE_NULL_POINTER;
297 }
298 if (!IsNativeEncoded())
299 {
300 if (!IsNativeValid())
301 {
302 // Can only use the native format as a reference, so first uncompact the instruction
303 GEDASSERT(IsCompactValid());
304 BuildNativeInsFromCompact(); // should always succeed, however this is not true, see Mantis 3755
305 GEDASSERT(IsNativeValid());
306 }
307 ApplyNativeEncodingMasks(); // enforce the per-instruction encoding restrictions for the native format
308 }
309 GEDASSERT(IsNativeValid());
310 if (!BuildAllCompactedFormats(compactBytesArray, size))
311 {
312 GEDASSERT(!IsCompactValid());
313 return GED_RETURN_VALUE_NO_COMPACT_FORM; // the instruction does not have a compact form in its current state
314 }
315 unsigned char* compactBytesPtr = compactBytesArray;
316 for (unsigned int i = 0; i < size / GED_COMPACT_INS_SIZE; ++i)
317 {
318 ApplyCompactEncodingMasks(compactBytesPtr);
319 compactBytesPtr += GED_COMPACT_INS_SIZE;
320 }
321 return GED_RETURN_VALUE_SUCCESS;
322 }
323
324
PrintFieldBitLocation(const uint32_t field) const325 GED_RETURN_VALUE GEDIns::PrintFieldBitLocation(const /* GED_INS_FIELD */ uint32_t field) const
326 {
327 GEDASSERT(field < GetCurrentModelData().numberOfInstructionFields);
328 GED_RETURN_VALUE status = GED_RETURN_VALUE_INVALID_FIELD;
329 ged_ins_decoding_table_t table = _decodingTable;
330 const unsigned char* bytes = _nativeBytes;
331 if (IsCompactValid())
332 {
333 table = GetCurrentModelData().opcodeTables[_opcode].compactDecoding;
334 bytes = _compactBytes;
335 }
336 const string& fieldName = fieldNameByField[field];
337 const ged_ins_field_entry_t* dataEntry = GetInstructionDataEntry(table, field);
338 if (NULL == dataEntry)
339 {
340 cout << "Field " << fieldName << " is invalid for the current instruction." << endl;
341 return status;
342 }
343
344 vector<ged_ins_field_mapping_fragment_t> mappingFragments;
345 bool hasFixedValue = false;
346
347 // Record padding, if exists.
348 hasFixedValue |= RecordPadding(mappingFragments, dataEntry);
349
350 // Record the explicit position fragments, if exists.
351 hasFixedValue |= RecordPosition(mappingFragments, dataEntry);
352
353 // Sort the vector by the from.lowBit
354 sort(mappingFragments.begin(), mappingFragments.end());
355
356 // Merge fragments which are consecutive by both from and to values.
357 MergeFragments(mappingFragments);
358
359 // Print the bit mapping of the field.
360 const uint32_t maxBitPosition = (uint32_t)mappingFragments.back()._from._highBit;
361 const uint32_t maxBitPositionLength = static_cast<uint32_t>(DecStr(maxBitPosition).length());
362 const uint32_t bitSpacing = maxBitPositionLength * 2 + 1; // printing 2 bit positions, plus a colon
363 const uint64_t rawValue = GetField<uint64_t>(bytes, table, field, GED_VALUE_TYPE_ENCODED, status);
364 static const string implicitString = "Implicit";
365 GEDASSERT(GED_RETURN_VALUE_SUCCESS == status);
366 cout << "Field " << fieldName << ", width: " << (uint32_t)dataEntry->_bitSize <<
367 ((uint32_t)dataEntry->_bitSize == 1 ? " bit, " : " bits, ") << "raw value: " <<
368 HexStr(rawValue) << " (" << BinStr(rawValue, dataEntry->_bitSize) << "b)" << endl;
369 for (const auto& it : mappingFragments)
370 {
371 const size_t fragmentBitLength = it._from._highBit - it._from._lowBit + 1;
372 stringstream strm;
373 strm << (uint32_t)it._from._lowBit << ":" << (uint32_t)it._from._highBit;
374 cout << left << setfill(' ') << setw(bitSpacing) << strm.str() << " = ";
375 if (it._fixed)
376 cout << implicitString << " (" << BinStr(it._value, fragmentBitLength) << "b)" << endl;
377 else
378 {
379 strm.str(string()); // clear stream
380 strm.clear();
381 uint64_t mask = 0;
382 for (uint8_t i = it._from._lowBit; i <= it._from._highBit; ++i)
383 {
384 mask |= (1ULL << i);
385 }
386 const uint64_t fragmentVal = ((rawValue & mask) >> it._from._lowBit);
387 strm << (uint32_t)it._to._lowBit << ":" << (uint32_t)it._to._highBit;
388 cout << left << setfill(' ') << setw(hasFixedValue ? implicitString.length() : bitSpacing) << strm.str() << " (" <<
389 BinStr(fragmentVal, fragmentBitLength) << "b)" << endl;
390 }
391 }
392 return GED_RETURN_VALUE_SUCCESS;
393 }
394 #endif //GED_VALIDATION_API
395
QueryFieldBitLocation(const uint32_t field,uint32_t * fragments,uint32_t * length) const396 GED_RETURN_VALUE GEDIns::QueryFieldBitLocation(const /* GED_INS_FIELD */ uint32_t field, uint32_t *fragments, uint32_t *length) const
397 {
398 GEDASSERT(field < GetCurrentModelData().numberOfInstructionFields);
399 GEDASSERT((fragments != NULL) || (length != NULL));
400 GED_RETURN_VALUE status = GED_RETURN_VALUE_INVALID_FIELD;
401 ged_ins_decoding_table_t table = _decodingTable;
402
403 if (length != NULL)
404 {
405 *length = 0;
406 }
407 const ged_ins_field_entry_t* dataEntry = GetInstructionDataEntry(table, field);
408 if (NULL == dataEntry)
409 {
410 return status;
411 }
412
413 vector<ged_ins_field_mapping_fragment_t> mappingFragments;
414 RecordPosition(mappingFragments, dataEntry);
415 sort(mappingFragments.begin(), mappingFragments.end());
416 MergeFragments(mappingFragments);
417
418 uint32_t index = 0;
419 for (const auto& it : mappingFragments)
420 {
421 if (it._fixed)
422 ;
423 else
424 {
425 if (fragments != NULL)
426 {
427 uint32_t len = it._from._highBit - it._from._lowBit + 1;
428 fragments[index++] = (len << 16) | it._to._lowBit;
429 }
430 if (length != NULL)
431 {
432 (*length)++;
433 }
434 }
435 }
436 return GED_RETURN_VALUE_SUCCESS;
437 }
438
GetFieldSize(const uint32_t field) const439 uint32_t GEDIns::GetFieldSize(const /* GED_INS_FIELD */ uint32_t field) const
440 {
441 if (GetCurrentModelData().numberOfInstructionFields <= field) return 0;
442
443 // First try the native format.
444 const ged_ins_field_entry_t* dataEntry = GetInstructionDataEntry(_decodingTable, field);
445 if (NULL != dataEntry) return (uint32_t)dataEntry->_bitSize;
446
447 // The field was not found in the native format, try the compact format (if it exists).
448 const ged_ins_decoding_table_t compactTable = GetCurrentModelData().opcodeTables[_opcode].compactDecoding;
449 if (NULL != compactTable)
450 {
451 const ged_ins_field_entry_t* compactDataEntry = GetInstructionDataEntry(compactTable, field);
452 if (NULL != compactDataEntry) return (uint32_t)compactDataEntry->_bitSize;
453 }
454
455 // The field is not valid in either format.
456 return 0;
457 }
458
459
SetOpcode(uint32_t opcode)460 GED_RETURN_VALUE GEDIns::SetOpcode(/* GED_OPCODE */ uint32_t opcode)
461 {
462 GED_RETURN_VALUE ret = GED_RETURN_VALUE_SUCCESS;
463 const uint32_t highestOpcodeValue = GED_MAX_ENTRIES_IN_OPCODE_TABLE - 1;
464 if (!GEDRestrictionsHandler::ConvertEnumeratedValueToRawEncodedValue(opcode, highestOpcodeValue, GetCurrentModelData().Opcodes))
465 {
466 ret = GED_RETURN_VALUE_OPCODE_NOT_SUPPORTED;
467 return ret;
468 }
469
470 if (opcode == _opcode)
471 {
472 return ret;
473 }
474 _opcode = opcode;
475
476 const ged_ins_decoding_table_t decodingTable = GetCurrentModelData().opcodeTables[opcode].nativeDecoding;
477 GEDASSERT(NULL != decodingTable);
478 if (decodingTable != _decodingTable)
479 {
480 ClearStatus();
481 memset(_nativeBytes, 0, GED_NATIVE_INS_SIZE);
482
483 // Set the new decoding table.
484 _decodingTable = decodingTable;
485 SetNativeValid(); // mark that the native instruction bytes are valid
486 SetNativeOpcode(); // set the opcode in the native instruction bytes
487
488 // Check if the new opcode has a compact format.
489 if (GetCurrentModelData().opcodeTables[_opcode].compactDecoding)
490 {
491 // TODO: Once the compact bit is set in the encoding masks, remove the next line and uncomment the one after that (Mantis 3732).
492 *(reinterpret_cast<uint64_t*>(_compactBytes)) = compactInsInitializer;
493 //memset(_compactBytes, 0, GED_COMPACT_INS_SIZE);
494 SetCompactValid(); // mark that the compact instruction bytes are valid
495 SetCompactOpcode(); // set the opcode in the compact instruction bytes
496 }
497 }
498 GEDASSERT(IsValid()); // at least one format must be valid
499 return ret;
500 }
501
502 # if GED_EXPERIMENTAL
SetRawBits(const uint8_t lowBit,const uint8_t highBit,const uint64_t val)503 GED_RETURN_VALUE GEDIns::SetRawBits(const uint8_t lowBit, const uint8_t highBit, const uint64_t val)
504 {
505 GEDASSERT(lowBit < GED_NATIVE_INS_SIZE_BITS);
506 GEDASSERT(highBit < GED_NATIVE_INS_SIZE_BITS);
507 GEDASSERT(lowBit <= highBit);
508 GEDASSERT(IsNativeValid());
509 const uint8_t lowDword = lowBit / GED_DWORD_BITS;
510 const uint8_t highDword = highBit / GED_DWORD_BITS;
511 //const unsigned int numberOfDwords = highDword - lowDword;
512 ged_ins_field_position_fragment_t pos;
513 if (lowDword == highDword)
514 {
515 pos._lowBit = lowBit;
516 pos._highBit = highBit;
517 pos._dwordIndex = lowBit / GED_DWORD_BITS;
518 pos._shift = lowBit % GED_DWORD_BITS;
519 pos._bitMask = rightShiftedMasks[(highBit - lowBit)] << pos._shift;
520 SetFragment(_nativeBytes, pos, val);
521 }
522 else
523 {
524 NYI;
525 }
526 SetEncodingMasksDisabled();
527 return GED_RETURN_VALUE_SUCCESS;
528 }
529 # endif // GED_EXPERIMENTAL
530
531 /*************************************************************************************************
532 * class GEDIns protected member functions
533 *************************************************************************************************/
534
GetFieldWidth(const uint16_t field,const bool interpField) const535 uint8_t GEDIns::GetFieldWidth(const uint16_t field, const bool interpField /* = false */) const
536 {
537 const ged_ins_field_entry_t* entry =
538 GetInstructionDataEntry((interpField) ? GetCurrentModelData().pseudoFields : _decodingTable, field);
539 if (NULL == entry) return 0;
540 if (IsVariableField(entry))
541 {
542 return entry->_restrictions[0]->_fieldType._attr._bits;
543 }
544 else
545 {
546 return entry->_bitSize;
547 }
548 }
549
550
GetInstructionBytes() const551 string GEDIns::GetInstructionBytes() const
552 {
553 return (IsCompact()) ? GetInstructionBytes(_compactBytes, GED_NUM_OF_COMPACT_INS_DWORDS)
554 : GetInstructionBytes(_nativeBytes, GED_NUM_OF_NATIVE_INS_DWORDS);
555 }
556
557
558 /*************************************************************************************************
559 * class GEDIns private member functions
560 *************************************************************************************************/
561
SetInstructionBytes(unsigned char * dst,const unsigned char * src,unsigned int size,const unsigned int maxSize) const562 void GEDIns::SetInstructionBytes(unsigned char* dst, const unsigned char* src, unsigned int size, const unsigned int maxSize) const
563 {
564 if (size > maxSize)
565 {
566 size = maxSize; // given buffer is too large, ignore the rest of the bytes.
567 }
568 memcpy_s(dst, size, src, size);
569 }
570
571
GetMappedField(const uint32_t field,const unsigned char * validBits,bool & extracted) const572 uint32_t GEDIns::GetMappedField(const /* GED_INS_FIELD */ uint32_t field, const unsigned char* validBits, bool& extracted) const
573 {
574 GEDASSERT(NULL != validBits);
575 GEDASSERT(field < GetCurrentModelData().numberOfInstructionFields);
576 GEDASSERT(field == _decodingTable[field]._field);
577
578 // Traverse the intermediate tables (if necessary).
579 extracted = false;
580 const ged_ins_field_entry_t* dataEntry = GetMappedInstructionDataEntry(_decodingTable, field, validBits, extracted);
581 if (!extracted) return MAX_UINT32_T;
582 GEDASSERT(NULL != dataEntry);
583
584 // Now that we are at the bottommost table, extract the actual data if they were already mapped. The "extracted" out parameter is
585 // already set to TRUE.
586 switch (dataEntry->_entryType)
587 {
588 case GED_TABLE_ENTRY_TYPE_CONSECUTIVE:
589 if (0 != ExtractConsecutiveEntryValue(validBits, dataEntry->_consecutive._position)) break;
590 return ExtractConsecutiveEntryValue(_nativeBytes, dataEntry->_consecutive._position);
591 case GED_TABLE_ENTRY_TYPE_FRAGMENTED:
592 if (0 != ExtractFragmentedEntryValue<uint32_t>(validBits, dataEntry)) break;
593 return ExtractFragmentedEntryValue<uint32_t>(_nativeBytes, dataEntry);
594 default:
595 GEDASSERT(0);
596 }
597
598 // Mark that some (or all) necessary bits for this field were not yet mapped.
599 extracted = false;
600 return MAX_UINT32_T;
601 }
602
603
GetDependentInstructionDataEntry(ged_ins_decoding_table_t table,uint32_t tableIndex) const604 const ged_ins_field_entry_t* GEDIns::GetDependentInstructionDataEntry(ged_ins_decoding_table_t table,
605 /* GED_INS_FIELD */ uint32_t tableIndex) const
606 {
607 GEDASSERT(NULL != table);
608 while (GED_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
609 {
610 const ged_ins_field_next_table_t* nextTable = &table[tableIndex]._nextTable;
611 GED_RETURN_VALUE ret = GED_RETURN_VALUE_INVALID_FIELD;
612 tableIndex = GetField<uint32_t>(_nativeBytes, _decodingTable, nextTable->_tableKey, GED_VALUE_TYPE_ENCODED, ret);
613
614 // GetField is expected to succeed since the models ensure that the dependent fields are always valid. However, we need to
615 // account for invalid instructions, i.e. instructions with an illegal encoding. Such instructions may have invalid values for
616 // valid fields. If a dependee field is in itself a dependent field, it may even be an invalid field in the given (poorly
617 // encoded) instruction.
618 if (GED_RETURN_VALUE_INVALID_FIELD == ret || GED_RETURN_VALUE_INVALID_VALUE == ret) return NULL;
619 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
620 table = nextTable->_tablePtr;
621 GEDASSERT(NULL != table);
622 }
623
624 // Verify that this is a legal entry.
625 GEDASSERT(table[tableIndex]._entryType < GED_TABLE_ENTRY_TYPE_SIZE);
626
627 // Verify that the field is supported in this format.
628 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == table[tableIndex]._entryType)
629 {
630 return NULL;
631 }
632
633 // Verify that the final entry actually holds data.
634 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != table[tableIndex]._entryType);
635
636 return &table[tableIndex];
637 }
638
639
GetDependentMappedInstructionDataEntry(ged_ins_decoding_table_t table,uint32_t tableIndex,const unsigned char * validBits,bool & extracted) const640 const ged_ins_field_entry_t* GEDIns::GetDependentMappedInstructionDataEntry(ged_ins_decoding_table_t table,
641 /* GED_INS_FIELD */ uint32_t tableIndex,
642 const unsigned char* validBits, bool& extracted) const
643 {
644 GEDASSERT(NULL != table);
645 GEDASSERT(NULL != validBits);
646 while (GED_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
647 {
648 // Extract the value from the _nativeBytes array if it is valid (i.e. if it has already been mapped)
649 const ged_ins_field_next_table_t* nextTable = &table[tableIndex]._nextTable;
650 tableIndex = GetMappedField(nextTable->_tableKey, validBits, extracted);
651 if (!extracted) return NULL; // the field is not valid in the _nativeBytes array (i.e. it was not yet mapped)
652 GEDASSERT(MAX_UINT32_T != tableIndex);
653 table = nextTable->_tablePtr;
654 GEDASSERT(NULL != table);
655 }
656
657 // Verify that this is a legal entry.
658 GEDASSERT(table[tableIndex]._entryType < GED_TABLE_ENTRY_TYPE_SIZE);
659
660 // Verify that the field is supported in this format.
661 GEDASSERT(GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED != table[tableIndex]._entryType);
662
663 // Verify that the final entry actually holds data.
664 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != table[tableIndex]._entryType);
665
666 extracted = true;
667 return &table[tableIndex];
668 }
669
670
BuildNativeInsFromCompact()671 GED_RETURN_VALUE GEDIns::BuildNativeInsFromCompact()
672 {
673 GEDASSERT(IsCompactValid()); // nothing to do if the instruction is not compacted
674 memset(_nativeBytes, 0, GED_NATIVE_INS_SIZE);
675
676 const ged_ins_decoding_table_t compactTable = GetCurrentModelData().opcodeTables[_opcode].compactDecoding; // map from this
677 if (NULL == compactTable) return GED_RETURN_VALUE_NO_COMPACT_FORM;
678
679 const ged_compact_mapping_table_t mappingTable = GetCurrentModelData().opcodeTables[_opcode].compactMapping;
680 GEDASSERT(NULL != mappingTable);
681
682 // Mapping algorithm:
683 //
684 // Iterate compactTable. For each valid entry i.e. GED_TABLE_ENTRY_TYPE_CONSECUTIVE, GED_TABLE_ENTRY_TYPE_FRAGMENTED
685 // or GED_TABLE_ENTRY_TYPE_NEXT_TABLE:
686 // a) Get compact field value (from _compactBytes using compactTable). If the field depends on another field which was not
687 // yet mapped, add it to the unmapped list and continue to the next field.
688 // b) Find mapping type:
689 // One of the four combinations between value/index and consecutive/fragmented. The mapping type may have dependencies,
690 // in which case there will be a next table entry and finding the mapping type will require iterating the mapping chain.
691 // The mapping type may be GED_MAPPING_TABLE_ENTRY_TYPE_NOT_SUPPORTED if the field is not supported in this format, in
692 // which case the corresponding entry in the decoding table will be of type GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED as well.
693 // c) If this is a value mapping (either consecutive or fragmented):
694 // Map bits from _compactBytes to _nativeBytes according to the mapping table entry.
695 // d) If this is an index mapping, i.e. mapping from a compaction table:
696 // i) Get the value from the compaction table (ged_compaction_table_t), in which case the field's value serves
697 // as the index of the entry in the compaction table.
698 // ii) Map the bits from the compaction table entry value to _nativeBytes.
699 // e) Repeat steps (a)-(d) for every field in the unmapped list until it is empty.
700
701 // The validBits array will be used to note which bits of the instruction's native format were already mapped (and can be used as
702 // dependencies). We start with all bits set and clear the relevant bits as they are being mapped. Thus, in order to check if a
703 // certain field was mapped or not, the relevant bits are simply compared to zero.
704 unsigned char validBits[GED_NATIVE_INS_SIZE];
705 memset(validBits, 0xff, GED_NATIVE_INS_SIZE);
706 set<uint32_t> unMapped;
707
708 for (uint32_t i = 0; i < GetCurrentModelData().numberOfInstructionFields; ++i)
709 {
710 GEDASSERT(compactTable[i]._entryType < GED_TABLE_ENTRY_TYPE_SIZE); // verify that this is a valid entry
711 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == compactTable[i]._entryType) continue; // this field is not supported in this format
712 if (!MapCurrentField(compactTable, mappingTable, i, validBits))
713 {
714 // The field depends on another field which was not yet mapped. Add it to the unmapped list for later processing.
715 GEDASSERT(0 == unMapped.count(i));
716 unMapped.insert(i);
717 continue;
718 }
719 }
720
721 // In every iteration of the outer loop, try to map the first unmapped (dependent) field in the unMapped set. If it depends on
722 // another dependent field which was not yet mapped, skip it and try the rest one by one (the inner loop) until one field is
723 // mapped successfully. If no field was mapped successfully, it means that the remaining fields have a cyclic dependency on each
724 // other, in which case an error is emitted.
725 const set<uint32_t>::const_iterator end = unMapped.end();
726 while (!unMapped.empty())
727 {
728 set<uint32_t>::const_iterator it = unMapped.begin();
729 for (; end != it; ++it)
730 {
731 if (MapCurrentField(compactTable, mappingTable, *it, validBits)) break;
732 }
733
734 // Make sure a dependent field was mapped successfully.
735 if (end == it)
736 {
737 // TODO: Call this function after adding logging to GED.
738 // EmitMappingCyclicDependencyError(unMapped, validBits);
739 return GED_RETURN_VALUE_BAD_COMPACT_ENCODING;
740 }
741 unMapped.erase(it);
742 }
743
744 // This instruction is marked as compact in the _status field, but the instruction bits in the _nativeBytes array should reflect
745 // a native instruction now that it is expanded.
746 SetNonCompact();
747 SetNativeValid();
748 return GED_RETURN_VALUE_SUCCESS;
749 }
750
751
MapCurrentField(const ged_ins_decoding_table_t compactTable,const ged_compact_mapping_table_t mappingTable,const uint32_t field,unsigned char * validBits)752 bool GEDIns::MapCurrentField(const ged_ins_decoding_table_t compactTable, const ged_compact_mapping_table_t mappingTable,
753 const /* GED_INS_FIELD */ uint32_t field, unsigned char* validBits)
754 {
755 GEDASSERT(NULL != compactTable);
756 GEDASSERT(NULL != mappingTable);
757 GEDASSERT(NULL != validBits);
758
759 // Get the mapping table entry for this field.
760 const ged_compact_mapping_entry_t* mappingEntry = GetCompactionMappingEntry(mappingTable, field, validBits);
761 if (NULL == mappingEntry) return false; // when building compact from native but the mapping is invalid
762 if (GED_MAPPING_TABLE_ENTRY_TYPE_NO_MAPPING == mappingEntry->_entryType) return true; // early out
763 GEDASSERT(mappingEntry->_field == field);
764
765 // Get the compact field value.
766 GED_RETURN_VALUE ret = GED_RETURN_VALUE_INVALID_FIELD;
767 uint32_t mappingValue = GetField<uint32_t>(_compactBytes, compactTable, field, GED_VALUE_TYPE_ENCODED, ret);
768 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
769
770 // Map the compact field to the native field(s).
771 switch (mappingEntry->_entryType)
772 {
773 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE:
774 GEDASSERT(NULL != mappingEntry->_compactionTable);
775 GEDASSERT(MAX_UINT32_T >= mappingEntry->_compactionTable[mappingValue]);
776 MapRawBytes((uint32_t)mappingEntry->_compactionTable[mappingValue], &mappingEntry->_consecutive._to,
777 mappingEntry->_consecutive._fromMask, validBits);
778 break;
779 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_CONSECUTIVE:
780 MapRawBytes(mappingValue, &mappingEntry->_consecutive._to, mappingEntry->_consecutive._fromMask, validBits);
781 break;
782 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED:
783 GEDASSERT(NULL != mappingEntry->_compactionTable);
784 MapRawBytes(mappingEntry->_compactionTable[mappingValue], mappingEntry->_fragmented._numOfMappingFragments,
785 mappingEntry->_fragmented._fragments, validBits);
786 break;
787 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED:
788 MapRawBytes(mappingValue, mappingEntry->_fragmented._numOfMappingFragments, mappingEntry->_fragmented._fragments, validBits);
789 break;
790 default:
791 GEDASSERT(0);
792 }
793 return true;
794 }
795
796
GetDependentCompactionMappingEntry(ged_compact_mapping_table_t table,uint32_t tableIndex,const unsigned char * validBits) const797 const ged_compact_mapping_entry_t* GEDIns::GetDependentCompactionMappingEntry(ged_compact_mapping_table_t table,
798 /* GED_INS_FIELD */ uint32_t tableIndex,
799 const unsigned char* validBits) const
800 {
801 GEDASSERT(NULL != table);
802 GEDASSERT(NULL != validBits);
803 while (GED_MAPPING_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
804 {
805 const ged_compact_mapping_next_table_t* nextTable = &table[tableIndex]._nextTable;
806 // Extract the value from the _nativeBytes array if it is valid (i.e. if it has already been mapped)
807 bool extracted = false;
808 tableIndex = GetMappedField(nextTable->_tableKey, validBits, extracted);
809 if (!extracted) return NULL; // the field is not valid in the _nativeBytes array (i.e. it was not yet mapped)
810 table = nextTable->_tablePtr;
811 GEDASSERT(NULL != table);
812 }
813
814 // Verify that this is a legal entry.
815 GEDASSERT(table[tableIndex]._entryType < GED_MAPPING_TABLE_ENTRY_TYPE_SIZE);
816
817 // Verify that the field is supported in this format.
818 if (GED_MAPPING_TABLE_ENTRY_TYPE_NOT_SUPPORTED == table[tableIndex]._entryType) return NULL;
819
820 // Verify that the final entry actually holds data.
821 GEDASSERT(GED_MAPPING_TABLE_ENTRY_TYPE_NEXT_TABLE != table[tableIndex]._entryType);
822
823 return &table[tableIndex];
824 }
825
826
MapRawBytes(const uint32_t src,const ged_ins_field_position_fragment_t * to,const uint32_t fromMask,unsigned char * validBits)827 void GEDIns::MapRawBytes(const uint32_t src, const ged_ins_field_position_fragment_t* to, const uint32_t fromMask,
828 unsigned char* validBits)
829 {
830 GEDASSERT(NULL != to);
831 GEDASSERT(NULL != validBits);
832 uint32_t value = (src & fromMask);
833 GEDASSERT(0 <= to->_shift);
834 value <<= to->_shift;
835 SetMappedBits(to->_dwordIndex, to->_bitMask, value, validBits);
836 }
837
838
MapRawBytes(const uint32_t src,const uint32_t numOfFragments,const ged_compact_mapping_fragment_t * fragments,unsigned char * validBits)839 void GEDIns::MapRawBytes(const uint32_t src, const uint32_t numOfFragments, const ged_compact_mapping_fragment_t* fragments,
840 unsigned char* validBits)
841 {
842 GEDASSERT(NULL != fragments);
843 GEDASSERT(NULL != validBits);
844 for (uint32_t i = 0; i < numOfFragments; ++i)
845 {
846 if (GED_COMPACT_MAPPING_TYPE_REP == fragments[i]._mappingType)
847 {
848 GEDASSERT(0 == fragments[i]._from._dwordIndex); // src only has 32 bits
849 MapReppedValue(src, &fragments[i]._to, &fragments[i]._from, validBits);
850 }
851 else if (GED_COMPACT_MAPPING_TYPE_1x1 == fragments[i]._mappingType)
852 {
853 GEDASSERT(0 == fragments[i]._from._dwordIndex); // src only has 32 bits
854 MapOneToOneValue(src, &fragments[i]._to, &fragments[i]._from, validBits);
855 }
856 else if (GED_COMPACT_MAPPING_TYPE_FIXED == fragments[i]._mappingType)
857 {
858 GEDASSERT(0 == fragments[i]._from._dwordIndex); // src only has 32 bits
859 MapFixedValue(fragments[i]._from._lowBit, &fragments[i]._to, validBits);
860 }
861 else { GEDASSERT(0); }
862 }
863 }
864
865
MapRawBytes(const uint64_t src,const uint32_t numOfFragments,const ged_compact_mapping_fragment_t * fragments,unsigned char * validBits)866 void GEDIns::MapRawBytes(const uint64_t src, const uint32_t numOfFragments, const ged_compact_mapping_fragment_t* fragments,
867 unsigned char* validBits)
868 {
869 GEDASSERT(NULL != fragments);
870 GEDASSERT(NULL != validBits);
871 for (uint32_t i = 0; i < numOfFragments; ++i)
872 {
873 if (GED_COMPACT_MAPPING_TYPE_REP == fragments[i]._mappingType)
874 {
875 GEDASSERT(fragments[i]._from._dwordIndex < 2); // src only has 64 bits
876 MapReppedValue((0 == fragments[i]._from._dwordIndex) ? (uint32_t)src : (uint32_t)(src >> GED_DWORD_BITS),
877 &fragments[i]._to, &fragments[i]._from, validBits);
878 }
879 else if (GED_COMPACT_MAPPING_TYPE_1x1 == fragments[i]._mappingType)
880 {
881 GEDASSERT(fragments[i]._from._dwordIndex < 2); // src only has 64 bits
882 MapOneToOneValue((0 == fragments[i]._from._dwordIndex) ? (uint32_t)src : (uint32_t)(src >> GED_DWORD_BITS),
883 &fragments[i]._to, &fragments[i]._from, validBits);
884 }
885 else if (GED_COMPACT_MAPPING_TYPE_FIXED == fragments[i]._mappingType)
886 {
887 NYI; // no fixed mapping for fragmented index mapping
888 }
889 else { GEDASSERT(0); }
890 }
891 }
892
893
MapReppedValue(const uint32_t src,const ged_ins_field_position_fragment_t * to,const ged_ins_field_position_fragment_t * from,unsigned char * validBits)894 void GEDIns::MapReppedValue(const uint32_t src, const ged_ins_field_position_fragment_t* to,
895 const ged_ins_field_position_fragment_t* from, unsigned char* validBits)
896 {
897 GEDASSERT(NULL != to);
898 GEDASSERT(NULL != from);
899 const uint8_t fromSize = FragmentSize(from);
900 const uint8_t toSize = FragmentSize(to);
901 GEDASSERT(fromSize > 0); // verify that the source size is non-zero (about to be used as a denominator)
902 GEDASSERT(toSize >= fromSize); // verify that the target is at least as wide as the source
903 GEDASSERT(0 == (toSize % fromSize)); // verify that the target is a whole multiple of the source
904 const uint8_t numOfReps = toSize / fromSize;
905 const uint32_t rep = ((src & from->_bitMask) >> from->_shift);
906 uint32_t value = rep;
907 for (uint8_t i = 1; i < numOfReps; ++i)
908 {
909 value <<= fromSize;
910 value |= rep;
911 }
912 value <<= to->_shift;
913 SetMappedBits(to->_dwordIndex, to->_bitMask, value, validBits);
914 }
915
916
MapOneToOneValue(const uint32_t src,const ged_ins_field_position_fragment_t * to,const ged_ins_field_position_fragment_t * from,unsigned char * validBits)917 void GEDIns::MapOneToOneValue(const uint32_t src, const ged_ins_field_position_fragment_t* to,
918 const ged_ins_field_position_fragment_t* from, unsigned char* validBits)
919 {
920 GEDASSERT(NULL != to);
921 GEDASSERT(NULL != from);
922 uint32_t value = (src & from->_bitMask);
923 const int8_t shift = from->_shift - to->_shift;
924 if (shift > 0)
925 {
926 value >>= shift;
927 }
928 else
929 {
930 value <<= abs(shift);
931 }
932 SetMappedBits(to->_dwordIndex, to->_bitMask, value, validBits);
933 }
934
935
MapFixedValue(const uint32_t value,const ged_ins_field_position_fragment_t * to,unsigned char * validBits)936 void GEDIns::MapFixedValue(const uint32_t value, const ged_ins_field_position_fragment_t* to, unsigned char* validBits)
937 {
938 GEDASSERT(NULL != to);
939 SetMappedBits(to->_dwordIndex, to->_bitMask, value << to->_shift, validBits);
940 }
941
942
EmitMappingCyclicDependencyError(const set<uint32_t> & unMapped,const unsigned char * validBits) const943 void GEDIns::EmitMappingCyclicDependencyError(const set<uint32_t>& unMapped, const unsigned char* validBits) const
944 {
945 GEDASSERT(NULL != validBits);
946 set<uint32_t>::const_iterator it = unMapped.begin();
947 const set<uint32_t>::const_iterator end = unMapped.end();
948 string depErrorStr = DecStr(*it);
949 for (++it; end != it; ++it)
950 {
951 depErrorStr += ", " + DecStr(*it);
952 }
953 #if defined(GED_VALIDATE)
954 stringstream strm;
955 strm << setfill('0') << hex;
956 for (int i = GED_NUM_OF_NATIVE_INS_DWORDS - 1; i >= 0; --i)
957 {
958 strm << setw(8) << ((uint32_t*)validBits)[i];
959 }
960 depErrorStr += "\nValidBits: 0x" + strm.str();
961 #endif // GED_VALIDATE
962 // TODO: This should use GED logs instead of emitting an error. Also, consider changing the function name.
963 GEDERROR("Unable to map remaining unmapped fields, probably due to an implicit dependency cycle: " + depErrorStr);
964 }
965
966
BuildCompactInsFromNative()967 bool GEDIns::BuildCompactInsFromNative()
968 {
969 GEDASSERT(IsNativeValid());
970 GEDASSERT(!IsCompactValid());
971 GEDASSERT(!IsCompactEncoded());
972 memset(_compactBytes, 0, GED_COMPACT_INS_SIZE); // clear the compact bytes
973
974 const ged_ins_decoding_table_t compactTable = GetCurrentModelData().opcodeTables[_opcode].compactDecoding; // map to this
975 if (NULL == compactTable) return false; // the instruction doesn't have a compact format
976
977 const ged_compact_mapping_table_t mappingTable = GetCurrentModelData().opcodeTables[_opcode].compactMapping;
978 GEDASSERT(NULL != mappingTable);
979
980 // Traverse all the compact instruction fields. For every valid field, collect its value from the native instruction and set that
981 // value in the compact instruction bytes.
982 unsigned char orMask[GED_NATIVE_INS_SIZE] = { 0 }; // used in CollectCurrentField, see documentation therein for its usage
983 BuildNativeOrMask(orMask); // prepare the or-mask
984 for (uint32_t i = 0; i < GetCurrentModelData().numberOfInstructionFields; ++i)
985 {
986 GEDASSERT(compactTable[i]._entryType < GED_TABLE_ENTRY_TYPE_SIZE); // verify that this is a valid entry
987 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == compactTable[i]._entryType) continue; // this field is not supported in this format
988 if (!CollectCurrentField(compactTable, mappingTable, orMask, i)) return false;
989 }
990
991 // Set the CmptCtrl bit - is was cleared by the loop above when reverse-mapping the CmptCtrl field from the native instruction.
992 SetCompact(_compactBytes);
993 SetCompactValid(); // the compact format is now valid
994 return true;
995 }
996
997
998 #if GED_VALIDATION_API
CountCompactFormats(unsigned int & count)999 bool GEDIns::CountCompactFormats(unsigned int& count)
1000 {
1001 unsigned int tempCounter = 1;
1002 unsigned char orMask[GED_NATIVE_INS_SIZE] = { 0 }; // used in CountCurrentField, see documentation therein for its usage
1003 BuildNativeOrMask(orMask); // prepare the or-mask
1004 const ged_ins_decoding_table_t compactTable = GetCurrentModelData().opcodeTables[_opcode].compactDecoding; // map to this
1005 if (NULL == compactTable) return false; // the instruction doesn't have a compact format
1006
1007 const ged_compact_mapping_table_t mappingTable = GetCurrentModelData().opcodeTables[_opcode].compactMapping;
1008 GEDASSERT(NULL != mappingTable);
1009 for (uint32_t i = 0; i < GetCurrentModelData().numberOfInstructionFields; ++i)
1010 {
1011 GEDASSERT(compactTable[i]._entryType < GED_TABLE_ENTRY_TYPE_SIZE); // verify that this is a valid entry
1012 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == compactTable[i]._entryType) continue; // this field is not supported in this format
1013 if (!CountCurrentField(compactTable, mappingTable, orMask, i, tempCounter)) return false;
1014 }
1015 count = tempCounter;
1016 return true;
1017 }
1018
1019
CountCurrentField(const ged_ins_decoding_table_t compactTable,const ged_compact_mapping_table_t mappingTable,const unsigned char * const orMask,const uint32_t field,unsigned int & count)1020 bool GEDIns::CountCurrentField(const ged_ins_decoding_table_t compactTable, const ged_compact_mapping_table_t mappingTable,
1021 const unsigned char* const orMask, const /* GED_INS_FIELD */ uint32_t field, unsigned int& count)
1022 {
1023 GEDASSERT(NULL != compactTable);
1024 GEDASSERT(NULL != mappingTable);
1025 // Get the mapping table entry for this field.
1026 static const unsigned char validBits[GED_NATIVE_INS_SIZE] = { 0 }; // assume that all the native instruction fields are valid
1027 const ged_compact_mapping_entry_t* mappingEntry = GetCompactionMappingEntry(mappingTable, field, validBits);
1028 if (NULL == mappingEntry) return false; // when building compact from native but the mapping is invalid
1029 GEDASSERT(mappingEntry->_field == field);
1030
1031 // For each entry type, count the amount of valid values: Value mappings have 1 valid value, and index mappings require iterating
1032 // over all indexes, counting the valid ones and multiplying in the end.
1033 switch (mappingEntry->_entryType)
1034 {
1035 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE:
1036 {
1037 // This is an index field, so the actual value to be encoded is an index in the field's compaction table. Try to find a
1038 // compaction table entry that matches the collected value. If such an entry is found, "count" is multiplied by the amount
1039 // of valid entries in the compact table, otherwise the function returns FALSE without setting the field.
1040 uint64_t qwval = (uint64_t)ExtractConsecutiveEntryValue(_nativeBytes, mappingEntry->_consecutive._to);
1041 const uint64_t valMask = (uint64_t)ExtractConsecutiveEntryValue(orMask, mappingEntry->_consecutive._to);
1042 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1043 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1044 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1045 GEDASSERT(NULL != mappingEntry->_compactionTable);
1046 if (!CountCompactionTableEntry(qwval, valMask, tableSize, mappingEntry->_compactionTable, count)) return false;
1047 break;
1048 }
1049 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_CONSECUTIVE:
1050 break;
1051 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED:
1052 {
1053 // See documentation in the GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE case above.
1054 uint64_t qwval = 0;
1055 uint64_t tempValMask = 0;
1056 if (!CollectFragmentedEntryQWValue(qwval, _nativeBytes, mappingEntry)) return false;
1057 if (!CollectFragmentedEntryQWValue(tempValMask, orMask, mappingEntry)) return false;
1058 const uint64_t valMask = tempValMask;
1059 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1060 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1061 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1062 GEDASSERT(NULL != mappingEntry->_compactionTable);
1063 if (!CountCompactionTableEntry(qwval, valMask, tableSize, mappingEntry->_compactionTable, count)) return false;
1064 break;
1065 }
1066 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED:
1067 break;
1068 case GED_MAPPING_TABLE_ENTRY_TYPE_NO_MAPPING:
1069 break;
1070 default:
1071 GEDASSERT(0);
1072 }
1073 return true;
1074 }
1075
1076
CountCompactionTableEntry(uint64_t & val,const uint64_t & valMask,const uint32_t tableSize,ged_compaction_table_t table,unsigned int & count) const1077 bool GEDIns::CountCompactionTableEntry(uint64_t& val, const uint64_t& valMask, const uint32_t tableSize,
1078 ged_compaction_table_t table, unsigned int& count) const
1079 {
1080 uint32_t counter = 0;
1081 GEDASSERT(0 != tableSize);
1082 GEDASSERT(tableSize < GED_MAX_ENTRIES_IN_COMPACT_TABLE); // sanity check
1083 val |= valMask;
1084 for (uint32_t i = 0; i < tableSize; ++i)
1085 {
1086 if ((table[i] | valMask) == val)
1087 {
1088 ++counter;
1089 }
1090 }
1091 count *= counter;
1092 return (0 != counter);
1093 }
1094
1095
BuildAllCompactedFormats(unsigned char * compactBytesArray,const unsigned int size)1096 bool GEDIns::BuildAllCompactedFormats(unsigned char* compactBytesArray, const unsigned int size)
1097 {
1098 const ged_ins_decoding_table_t compactTable = GetCurrentModelData().opcodeTables[_opcode].compactDecoding; // map to this
1099 if (NULL == compactTable) return false; // the instruction doesn't have a compact format
1100
1101 const ged_compact_mapping_table_t mappingTable = GetCurrentModelData().opcodeTables[_opcode].compactMapping;
1102 GEDASSERT(NULL != mappingTable);
1103
1104 // Traverse all the compact instruction fields. For every valid non-indexed field, collect its value from the native instruction
1105 // and set that value in the compact instruction template.
1106 unsigned char compactBytesTemplate[GED_COMPACT_INS_SIZE] = { 0 };
1107 for (uint32_t i = 0; i < GetCurrentModelData().numberOfInstructionFields; ++i)
1108 {
1109 GEDASSERT(compactTable[i]._entryType < GED_TABLE_ENTRY_TYPE_SIZE); // verify that this is a valid entry
1110 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == compactTable[i]._entryType) continue; // this field is not supported in this format
1111 if (!CollectCurrentValueField(compactTable, mappingTable, i, compactBytesTemplate)) return false;
1112 }
1113 SetCompact(compactBytesTemplate);
1114
1115 // Traverse all the compact instruction fields. For every valid indexed field, collect all possible options.
1116 // Basically, create a Cartesian product of all possible indexes which create a valid compact encoding.
1117 unsigned char orMask[GED_NATIVE_INS_SIZE] = { 0 }; // used in CollectCurrentMappedFields, see documentation therein for its usage
1118 BuildNativeOrMask(orMask); // prepare the or-mask
1119 bool succeeded = true;
1120 vector<vector<unsigned char> > compactBytesIndexedVector = CollectCurrentMappedFields(compactTable, mappingTable, orMask, 0,
1121 succeeded);
1122 if (!succeeded) return false;
1123
1124 // Merge the mapped values with the indexed ones
1125 memset(compactBytesArray, 0x0, size); // clear the entire preallocated buffer
1126 unsigned char* ptr = compactBytesArray;
1127 for (uint32_t i = 0; i < size / GED_COMPACT_INS_SIZE; ++i)
1128 {
1129 for (uint32_t j = 0; j < GED_NUM_OF_COMPACT_INS_DWORDS; ++j)
1130 {
1131 vector<unsigned char>* tmpVecPtr = &compactBytesIndexedVector[i];
1132 unsigned char* tmpWordPtr = &((*tmpVecPtr)[0]);
1133 ((uint32_t*)ptr)[j] = ((uint32_t*)compactBytesTemplate)[j] | ((uint32_t*)tmpWordPtr)[j];
1134 }
1135 ptr += GED_COMPACT_INS_SIZE;
1136 }
1137 return true;
1138
1139 // Set an arbitrary valid encoding into the instruction's compact bytes.
1140 memcpy_s(_compactBytes, GED_COMPACT_INS_SIZE, compactBytesArray, GED_COMPACT_INS_SIZE);
1141 SetCompactValid();
1142 return true;
1143 }
1144
1145
CollectCurrentValueField(const ged_ins_decoding_table_t compactTable,const ged_compact_mapping_table_t mappingTable,const uint32_t field,unsigned char * buf)1146 bool GEDIns::CollectCurrentValueField(const ged_ins_decoding_table_t compactTable, const ged_compact_mapping_table_t mappingTable,
1147 const /* GED_INS_FIELD */ uint32_t field, unsigned char* buf)
1148 {
1149 GEDASSERT(NULL != compactTable);
1150 GEDASSERT(NULL != mappingTable);
1151
1152 // Get the mapping table entry for this field.
1153 static const unsigned char validBits[GED_NATIVE_INS_SIZE] = { 0 }; // assume that all the native instruction fields are valid
1154 const ged_compact_mapping_entry_t* mappingEntry = GetCompactionMappingEntry(mappingTable, field, validBits);
1155 if (NULL == mappingEntry) return false; // when building compact from native but the mapping is invalid
1156 GEDASSERT(mappingEntry->_field == field);
1157
1158 // Collect the compact field from the native field(s).
1159 uint32_t val = MAX_UINT32_T;
1160 switch (mappingEntry->_entryType)
1161 {
1162 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE:
1163 return true;
1164 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_CONSECUTIVE:
1165 val = ExtractConsecutiveEntryValue(_nativeBytes, mappingEntry->_consecutive._to);
1166 break;
1167 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED:
1168 return true;
1169 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED:
1170 if (!CollectFragmentedEntryDWValue(val, _nativeBytes, mappingEntry)) return false;
1171 break;
1172 case GED_MAPPING_TABLE_ENTRY_TYPE_NO_MAPPING:
1173 return true;
1174 default:
1175 GEDASSERT(0);
1176 }
1177
1178 // Set the field in the compact bytes. The given value is already the raw value, no need to convert it if it has an enumeration.
1179 GEDFORASSERT(GED_RETURN_VALUE ret = )
1180 SetField(buf, compactTable, field, GED_VALUE_TYPE_ENCODED, val);
1181 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1182 return true;
1183 }
1184
1185
CollectCurrentMappedFields(const ged_ins_decoding_table_t compactTable,const ged_compact_mapping_table_t mappingTable,const unsigned char * const orMask,uint32_t field,bool & succeeded)1186 vector<vector<unsigned char> > GEDIns::CollectCurrentMappedFields(const ged_ins_decoding_table_t compactTable,
1187 const ged_compact_mapping_table_t mappingTable,
1188 const unsigned char* const orMask,
1189 /* GED_INS_FIELD */ uint32_t field, bool& succeeded)
1190 {
1191 // Collecting all mapped fields algorithm:
1192 //
1193 // CollectCurrentMappedFields is a backtracking recursion designed to generate a Cartesian product of all mapped fields.
1194 // The slight difference from a regular AxBxCx... Cartesian product where each element is on its own in the n-tuple, here each
1195 // element in the n-tuple consists of fields and their location. A is a field, set into its position in the compacted bytes by
1196 // calling SetField()
1197 //
1198 // How the recursion works:
1199 // Stop condition: when finished traversing all fields, return a vector with a single, empty encoding.
1200 // Recursion step:
1201 // 1 Declare temporary vector of compact instruction "res"
1202 // 2 For each Compact Encoding "it" in CollectCurrentMappedFields(..., field + 1, ...):
1203 // 2.1 For each valid index "compactedTableIndex" in the compacted table:
1204 // 2.1.1 Create a buffer "buf" and copy the compact encoding of "it" to it
1205 // 2.1.2 Set the compactedTableIndex in the appropriate location in buf
1206 // 2.1.3 Push "buf" into the vector of compacted instructions "res".
1207 // 3 Return "res".
1208
1209 GEDASSERT(NULL != compactTable);
1210 GEDASSERT(NULL != mappingTable);
1211 GEDASSERT(field < GetCurrentModelData().numberOfInstructionFields);
1212
1213 static const unsigned char validBits[GED_NATIVE_INS_SIZE] = { 0 }; // assume that all the native instruction fields are valid
1214 const ged_compact_mapping_entry_t* mappingEntry;
1215
1216 // Locate the next Index-mapped field.
1217 while (true)
1218 {
1219 if (GetCurrentModelData().numberOfInstructionFields == field)
1220 {
1221 // Stop condition.
1222 vector<vector<unsigned char> > stubVector;
1223 vector<unsigned char> stubEncoding(8); // insert an empty encoding to the vector
1224 stubVector.push_back(stubEncoding);
1225 return stubVector;
1226 }
1227 if (GED_TABLE_ENTRY_TYPE_NOT_SUPPORTED == compactTable[field]._entryType)
1228 {
1229 ++field;
1230 continue;
1231 }
1232 mappingEntry = GetCompactionMappingEntry(mappingTable, field, validBits);
1233 GEDASSERT(NULL != mappingTable); // this is potentially wrong, see other GetCompactionMappingEntry "if" checks
1234 if (GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED == mappingEntry->_entryType ||
1235 GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE == mappingEntry->_entryType)
1236 {
1237 break; // Handle index fields - enter the body of the function.
1238 }
1239 ++field;
1240 }
1241
1242 // Step 1
1243 vector<vector<unsigned char> > res;
1244
1245 // Step 2
1246 vector<vector<unsigned char> > v = CollectCurrentMappedFields(compactTable, mappingTable, orMask, field + 1, succeeded);
1247 if (!succeeded) return v;
1248
1249 for (vector<vector<unsigned char> >::const_iterator it = v.begin(); it != v.end(); ++it)
1250 {
1251 switch (mappingEntry->_entryType)
1252 {
1253 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE:
1254 {
1255 uint32_t val = MAX_UINT32_T;
1256 uint64_t qwval = (uint64_t)ExtractConsecutiveEntryValue(_nativeBytes, mappingEntry->_consecutive._to);
1257 const uint64_t valMask = (uint64_t)ExtractConsecutiveEntryValue(orMask, mappingEntry->_consecutive._to);
1258 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1259 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1260 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1261 GEDASSERT(NULL != mappingEntry->_compactionTable);
1262 GEDASSERT(0 != tableSize);
1263 GEDASSERT(tableSize < GED_MAX_ENTRIES_IN_COMPACT_TABLE); // sanity check
1264 qwval |= valMask;
1265 // Step 2.1
1266 for (unsigned int compactedTableIndex = 0; compactedTableIndex < tableSize; ++compactedTableIndex)
1267 {
1268 if ((mappingEntry->_compactionTable[compactedTableIndex] | valMask) == qwval)
1269 {
1270 val = (uint32_t)compactedTableIndex;
1271 unsigned char buf[GED_COMPACT_INS_SIZE];
1272 // Step 2.1.1
1273 std::copy(it->begin(), it->end(), buf);
1274 // Step 2.1.2
1275 GEDFORASSERT(GED_RETURN_VALUE ret = )
1276 SetField(buf, compactTable, field, GED_VALUE_TYPE_ENCODED, val);
1277 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1278 // Step 2.1.3
1279 res.push_back(vector<unsigned char>(buf, buf + GED_COMPACT_INS_SIZE * sizeof(buf[0])));
1280 }
1281 }
1282 if (MAX_UINT32_T == val) succeeded = false;
1283 break;
1284 }
1285 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED:
1286 {
1287 uint32_t val = MAX_UINT32_T;
1288 uint64_t qwval = 0;
1289 uint64_t tempValMask = 0;
1290 if (!CollectFragmentedEntryQWValue(qwval, _nativeBytes, mappingEntry)) succeeded = false;
1291 if (!CollectFragmentedEntryQWValue(tempValMask, orMask, mappingEntry)) succeeded = false;
1292 if (!succeeded) break;
1293 const uint64_t valMask = tempValMask;
1294 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1295 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1296 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1297 GEDASSERT(NULL != mappingEntry->_compactionTable);
1298 GEDASSERT(0 != tableSize);
1299 GEDASSERT(tableSize < GED_MAX_ENTRIES_IN_COMPACT_TABLE); // sanity check
1300 qwval |= valMask;
1301 // Step 2.1
1302 for (unsigned int compactedTableIndex = 0; compactedTableIndex < tableSize; ++compactedTableIndex)
1303 {
1304 if ((mappingEntry->_compactionTable[compactedTableIndex] | valMask) == qwval)
1305 {
1306 val = (uint32_t)compactedTableIndex;
1307 unsigned char buf[GED_COMPACT_INS_SIZE];
1308 // Step 2.1.1
1309 std::copy(it->begin(), it->end(), buf);
1310 // Step 2.1.2
1311 GEDFORASSERT(GED_RETURN_VALUE ret = )
1312 SetField(buf, compactTable, field, GED_VALUE_TYPE_ENCODED, val);
1313 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1314 // Step 2.1.3
1315 res.push_back(vector<unsigned char>(buf, buf + GED_COMPACT_INS_SIZE * sizeof(buf[0])));
1316 }
1317 }
1318 if (MAX_UINT32_T == val) succeeded = false;
1319 break;
1320 }
1321 default:
1322 GEDASSERT(0);
1323 }
1324 }
1325 // Step 3.
1326 return res;
1327 }
1328 #endif // GED_VALIDATION_API
1329
1330
CollectCurrentField(const ged_ins_decoding_table_t compactTable,const ged_compact_mapping_table_t mappingTable,const unsigned char * const orMask,const uint32_t field)1331 bool GEDIns::CollectCurrentField(const ged_ins_decoding_table_t compactTable, const ged_compact_mapping_table_t mappingTable,
1332 const unsigned char* const orMask, const /* GED_INS_FIELD */ uint32_t field)
1333 {
1334 GEDASSERT(NULL != compactTable);
1335 GEDASSERT(NULL != mappingTable);
1336
1337 // Get the mapping table entry for this field.
1338 static const unsigned char validBits[GED_NATIVE_INS_SIZE] = { 0 }; // assume that all the native instruction fields are valid
1339 const ged_compact_mapping_entry_t* mappingEntry = GetCompactionMappingEntry(mappingTable, field, validBits);
1340 if (NULL == mappingEntry) return false; // when building compact from native but the mapping is invalid
1341 GEDASSERT(mappingEntry->_field == field);
1342
1343 // Collect the compact field from the native field(s).
1344 uint32_t val = MAX_UINT32_T;
1345 switch (mappingEntry->_entryType)
1346 {
1347 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE:
1348 {
1349 // This is an index field, so the actual value to be encoded is an index in the field's compaction table. Try to find a
1350 // compaction table entry that matches the collected value. If such an entry is found, "val" is changed to that entry's
1351 // index, otherwise the function returns FALSE without setting the field.
1352
1353 // If a certain bit in a compaction table entry is set but it is mapped to a reserved field in the native instruction, it
1354 // will be cleared in the native instruction. As a result it will also be cleared in the collected value, which means that
1355 // the collected value will not match the compaction table entry. To overcome this, an or-mask of the reserved bits is
1356 // applied to the collected value and to the compaction table entries when they are being compared. This may result in
1357 // several compaction table entries that match the collected value, in which case the first entry found will be returned.
1358
1359 uint64_t qwval = (uint64_t)ExtractConsecutiveEntryValue(_nativeBytes, mappingEntry->_consecutive._to);
1360 const uint64_t valMask = (uint64_t)ExtractConsecutiveEntryValue(orMask, mappingEntry->_consecutive._to);
1361 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1362 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1363 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1364 GEDASSERT(NULL != mappingEntry->_compactionTable);
1365 if (!FindCompactionTableEntry(qwval, valMask, tableSize, mappingEntry->_compactionTable)) return false;
1366 GEDASSERT(MAX_UINT32_T >= qwval);
1367 val = (uint32_t)qwval;
1368 break;
1369 }
1370 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_CONSECUTIVE:
1371 val = ExtractConsecutiveEntryValue(_nativeBytes, mappingEntry->_consecutive._to);
1372 break;
1373 case GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED:
1374 {
1375 // See documentation in the GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_CONSECUTIVE case above.
1376 uint64_t qwval = 0;
1377 uint64_t tempValMask = 0;
1378 if (!CollectFragmentedEntryQWValue(qwval, _nativeBytes, mappingEntry)) return false;
1379 if (!CollectFragmentedEntryQWValue(tempValMask, orMask, mappingEntry)) return false;
1380 const uint64_t valMask = tempValMask;
1381 GEDASSERT(GED_TABLE_ENTRY_TYPE_NEXT_TABLE != compactTable[field]._entryType); // index fields must be explicit
1382 GEDASSERT(MAX_UINT32_T > MaxValue(compactTable[field]));
1383 const uint32_t tableSize = BitsToNumOfValues(compactTable[field]._bitSize); // size of the compaction table
1384 GEDASSERT(NULL != mappingEntry->_compactionTable);
1385 if (!FindCompactionTableEntry(qwval, valMask, tableSize, mappingEntry->_compactionTable)) return false;
1386 GEDASSERT(MAX_UINT32_T >= qwval);
1387 val = (uint32_t)qwval;
1388 break;
1389 }
1390 case GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED:
1391 if (!CollectFragmentedEntryDWValue(val, _nativeBytes, mappingEntry)) return false;
1392 break;
1393 case GED_MAPPING_TABLE_ENTRY_TYPE_NO_MAPPING:
1394 return true;
1395 default:
1396 GEDASSERT(0);
1397 }
1398
1399 // Set the field in the compact bytes. The given value is already the raw value, no need to convert it if it has an enumeration.
1400 GEDFORASSERT(GED_RETURN_VALUE ret = )
1401 SetField(_compactBytes, compactTable, field, GED_VALUE_TYPE_ENCODED, val);
1402 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1403 return true;
1404 }
1405
1406
CollectFragmentedEntryDWValue(uint32_t & fullVal,const unsigned char * bytes,const ged_compact_mapping_entry_t * mappingEntry) const1407 bool GEDIns::CollectFragmentedEntryDWValue(uint32_t& fullVal, const unsigned char* bytes, const ged_compact_mapping_entry_t* mappingEntry) const
1408 {
1409 GEDASSERT(NULL != bytes);
1410 GEDASSERT(NULL != mappingEntry);
1411 GEDASSERT(GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED == mappingEntry->_entryType ||
1412 GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED == mappingEntry->_entryType);
1413 GEDASSERT(mappingEntry->_fragmented._numOfMappingFragments > 1);
1414
1415 // The field is fragmented, so all fragments will eventually be gathered in fullVal.
1416 fullVal = 0;
1417 for (unsigned int i = 0; i < mappingEntry->_fragmented._numOfMappingFragments; ++i)
1418 {
1419 GEDASSERT(0 == mappingEntry->_fragmented._fragments[i]._from._dwordIndex); // the mapping source may only have 32 bits
1420 uint32_t fragmentVal = 0;
1421 if (!CollectFragmentValue(fragmentVal, bytes, mappingEntry->_fragmented._fragments[i])) return false;
1422 fullVal |= fragmentVal;
1423 }
1424 return true;
1425 }
1426
1427
CollectFragmentedEntryQWValue(uint64_t & val,const unsigned char * bytes,const ged_compact_mapping_entry_t * mappingEntry) const1428 bool GEDIns::CollectFragmentedEntryQWValue(uint64_t& val, const unsigned char* bytes, const ged_compact_mapping_entry_t* mappingEntry) const
1429 {
1430 GEDASSERT(NULL != bytes);
1431 GEDASSERT(NULL != mappingEntry);
1432 GEDASSERT(GED_MAPPING_TABLE_ENTRY_TYPE_INDEX_MAPPING_FRAGMENTED == mappingEntry->_entryType ||
1433 GED_MAPPING_TABLE_ENTRY_TYPE_VALUE_MAPPING_FRAGMENTED == mappingEntry->_entryType);
1434 GEDASSERT(mappingEntry->_fragmented._numOfMappingFragments > 1);
1435
1436 // The field is fragmented, so all fragments will eventually be gathered in fullVal. A fragment can't be larger than 32 bits, but
1437 // the full value may be an entry in a compaction table, which may be 64-bit long.
1438 val = 0;
1439 for (unsigned int i = 0; i < mappingEntry->_fragmented._numOfMappingFragments; ++i)
1440 {
1441 GEDASSERT(mappingEntry->_fragmented._fragments[i]._from._dwordIndex < 2); // the mapping source may only have 64 bits
1442 uint32_t fragmentVal = 0;
1443 if (!CollectFragmentValue(fragmentVal, bytes, mappingEntry->_fragmented._fragments[i])) return false;
1444 (reinterpret_cast<uint32_t*>(&val))[mappingEntry->_fragmented._fragments[i]._from._dwordIndex] |= fragmentVal;
1445 }
1446 return true;
1447 }
1448
1449
CollectFragmentValue(uint32_t & val,const unsigned char * bytes,const ged_compact_mapping_fragment_t & mapping) const1450 bool GEDIns::CollectFragmentValue(uint32_t& val, const unsigned char* bytes, const ged_compact_mapping_fragment_t& mapping) const
1451 {
1452 val = ((uint32_t*)bytes)[mapping._to._dwordIndex];
1453 val &= mapping._to._bitMask;
1454 if (0 == val) return true; // early out - nothing to do
1455 const int8_t shift = mapping._to._shift - mapping._from._shift;
1456 if (shift > 0)
1457 {
1458 val >>= shift;
1459 }
1460 else if (shift < 0)
1461 {
1462 val <<= abs(shift);
1463 }
1464
1465 // For 1-to-1 mappings there is nothing more to do.
1466 if (GED_COMPACT_MAPPING_TYPE_1x1 == mapping._mappingType) return true;
1467 if (GED_COMPACT_MAPPING_TYPE_FIXED == mapping._mappingType)
1468 {
1469 return (mapping._from._lowBit == val);
1470 }
1471 // Now handle the repped mapping case.
1472 GEDASSERT(GED_COMPACT_MAPPING_TYPE_REP == mapping._mappingType);
1473 const uint8_t fromSize = FragmentSize(mapping._from);
1474 const uint8_t toSize = FragmentSize(mapping._to);
1475 GEDASSERT(fromSize > 0); // verify that the source size is non-zero
1476 GEDASSERT(toSize >= fromSize); // verify that the target is at least as wide as the source
1477 GEDASSERT(0 == (toSize % fromSize)); // verify that the target is a whole multiple of the source
1478 GEDASSERT(0 <= mapping._from._shift); // verify that the shifting amount is non-negative (in order to shift left)
1479 // Validate the value is repeated by its substring.
1480 const uint32_t maxSourceValue = MaxFragmentValue(mapping._from) << mapping._from._shift; // shift the max value to its position
1481 uint32_t shiftedMask = maxSourceValue;
1482 uint32_t shiftedRep = val & maxSourceValue;
1483 // TODO: optimization opportunity: if FragmentSize(fragment) is 1 (which right now, always is), it's possible to simply
1484 // compare with all ones (using MaxGragmentValue(mapping._to)) or all-zeros.
1485 // The current method used is a general ways, which uses loops and thus slower.
1486 for (unsigned int i = 0; i < toSize; i += fromSize)
1487 {
1488 if (((val & shiftedMask) != shiftedRep)) return false;
1489 shiftedRep <<= fromSize;
1490 shiftedMask <<= fromSize;
1491 }
1492 val &= maxSourceValue;
1493 return true;
1494 }
1495
1496
FindCompactionTableEntry(uint64_t & val,const uint64_t & valMask,const uint32_t tableSize,ged_compaction_table_t table) const1497 bool GEDIns::FindCompactionTableEntry(uint64_t& val, const uint64_t& valMask, const uint32_t tableSize,
1498 ged_compaction_table_t table) const
1499 {
1500 GEDASSERT(0 != tableSize);
1501 GEDASSERT(tableSize < GED_MAX_ENTRIES_IN_COMPACT_TABLE); // sanity check
1502 val |= valMask;
1503 for (uint32_t i = 0; i < tableSize; ++i)
1504 {
1505 if ((table[i] | valMask) == val)
1506 {
1507 val = i;
1508 return true;
1509 }
1510 }
1511 return false;
1512 }
1513
1514
ApplyNativeEncodingMasks()1515 void GEDIns::ApplyNativeEncodingMasks()
1516 {
1517 # if GED_EXPERIMENTAL
1518 if (!ShouldApplyEncodingMasks())
1519 {
1520 return;
1521 }
1522 # endif // GED_EXPERIMENTAL
1523 const ged_instruction_masks_table_t topLevelTable = GetCurrentModelData().opcodeTables[_opcode].nativeEncodingMasks;
1524 GEDASSERT(NULL != topLevelTable);
1525
1526 for (unsigned int i = 0; GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS != topLevelTable[i]._entryType; ++i)
1527 {
1528 // Traverse the intermediate tables (if necessary).
1529 unsigned int tableIndex = i;
1530 ged_instruction_masks_table_t table = topLevelTable;
1531 while (GED_MASKS_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
1532 {
1533 const ged_instruction_masks_next_table_t* nextTable = &table[tableIndex]._nextTable;
1534 GED_RETURN_VALUE ret = GED_RETURN_VALUE_INVALID_FIELD;
1535 tableIndex = GetField<uint32_t>(_nativeBytes, _decodingTable, nextTable->_tableKey, GED_VALUE_TYPE_ENCODED, ret);
1536
1537 // GetField is expected to succeed since the models ensure that the dependent fields are always valid. However, we need to
1538 // account for invalid instructions, i.e. instructions with an illegal encoding. Such instructions may have invalid values
1539 // for valid fields. If a dependee field is in itself a dependent field, it may even be an invalid field in the given
1540 // (poorly encoded) instruction.
1541 if (GED_RETURN_VALUE_INVALID_FIELD == ret || GED_RETURN_VALUE_INVALID_VALUE == ret) return;
1542 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1543 table = nextTable->_tablePtr;
1544 GEDASSERT(NULL != table);
1545 }
1546 if (GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS == table[tableIndex]._entryType) continue; // no mask necessary
1547
1548 // Verify that this is a masks entry.
1549 GEDASSERTM(GED_MASKS_TABLE_ENTRY_TYPE_MASKS == table[tableIndex]._entryType, DecStr(table[tableIndex]._entryType));
1550 for (unsigned int dword = 0; dword < GED_NUM_OF_NATIVE_INS_DWORDS; ++dword)
1551 {
1552 ((uint32_t*)_nativeBytes)[dword] |= ((uint32_t*)(table[tableIndex]._masks._or))[dword];
1553 ((uint32_t*)_nativeBytes)[dword] &= ((uint32_t*)(table[tableIndex]._masks._and))[dword];
1554 }
1555 }
1556 SetNativeEncoded();
1557 }
1558
1559
ApplyCompactEncodingMasks(unsigned char * compactBytes)1560 void GEDIns::ApplyCompactEncodingMasks(unsigned char* compactBytes)
1561 {
1562 const ged_instruction_masks_table_t topLevelTable = GetCurrentModelData().opcodeTables[_opcode].compactEncodingMasks;
1563 GEDASSERT(NULL != topLevelTable);
1564
1565 for (unsigned int i = 0; GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS != topLevelTable[i]._entryType; ++i)
1566 {
1567 // Traverse the intermediate tables (if necessary).
1568 unsigned int tableIndex = i;
1569 ged_instruction_masks_table_t table = topLevelTable;
1570 while (GED_MASKS_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
1571 {
1572 const ged_instruction_masks_next_table_t* nextTable = &table[tableIndex]._nextTable;
1573 GED_RETURN_VALUE ret = GED_RETURN_VALUE_INVALID_FIELD;
1574 tableIndex = GetField<uint32_t>(_nativeBytes, _decodingTable, nextTable->_tableKey, GED_VALUE_TYPE_ENCODED, ret);
1575
1576 // GetField is expected to succeed since the models ensure that the dependent fields are always valid. However, we need to
1577 // account for invalid instructions, i.e. instructions with an illegal encoding. Such instructions may have invalid values
1578 // for valid fields. If a dependee field is in itself a dependent field, it may even be an invalid field in the given
1579 // (poorly encoded) instruction.
1580 if (GED_RETURN_VALUE_INVALID_FIELD == ret || GED_RETURN_VALUE_INVALID_VALUE == ret) return;
1581 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1582 table = nextTable->_tablePtr;
1583 GEDASSERT(NULL != table);
1584 }
1585 if (GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS == table[tableIndex]._entryType) continue; // no mask necessary
1586
1587 // Verify that this is a masks entry.
1588 GEDASSERT(GED_MASKS_TABLE_ENTRY_TYPE_MASKS == table[tableIndex]._entryType);
1589 for (unsigned int dword = 0; dword < GED_NUM_OF_COMPACT_INS_DWORDS; ++dword)
1590 {
1591 ((uint32_t*)compactBytes)[dword] |= ((uint32_t*)(table[tableIndex]._masks._or))[dword];
1592 ((uint32_t*)compactBytes)[dword] &= ((uint32_t*)(table[tableIndex]._masks._and))[dword];
1593 }
1594 }
1595 SetCompactEncoded();
1596 }
1597
1598
BuildNativeOrMask(unsigned char * orMask) const1599 void GEDIns::BuildNativeOrMask(unsigned char* orMask) const
1600 {
1601 const ged_instruction_masks_table_t topLevelTable = GetCurrentModelData().opcodeTables[_opcode].nativeEncodingMasks;
1602 GEDASSERT(NULL != topLevelTable);
1603
1604 for (unsigned int i = 0; GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS != topLevelTable[i]._entryType; ++i)
1605 {
1606 // Traverse the intermediate tables (if necessary).
1607 unsigned int tableIndex = i;
1608 ged_instruction_masks_table_t table = topLevelTable;
1609 while (GED_MASKS_TABLE_ENTRY_TYPE_NEXT_TABLE == table[tableIndex]._entryType)
1610 {
1611 const ged_instruction_masks_next_table_t* nextTable = &table[tableIndex]._nextTable;
1612 GED_RETURN_VALUE ret = GED_RETURN_VALUE_INVALID_FIELD;
1613 tableIndex = GetField<uint32_t>(_nativeBytes, _decodingTable, nextTable->_tableKey, GED_VALUE_TYPE_ENCODED, ret);
1614 GEDASSERT(GED_RETURN_VALUE_SUCCESS == ret);
1615 table = nextTable->_tablePtr;
1616 GEDASSERT(NULL != table);
1617 }
1618 if (GED_MASKS_TABLE_ENTRY_TYPE_NO_MASKS == table[tableIndex]._entryType) continue; // no mask necessary
1619
1620 // Verify that this is a masks entry.
1621 GEDASSERTM(GED_MASKS_TABLE_ENTRY_TYPE_MASKS == table[tableIndex]._entryType, DecStr(table[tableIndex]._entryType));
1622 for (unsigned int dword = 0; dword < GED_NUM_OF_NATIVE_INS_DWORDS; ++dword)
1623 {
1624 ((uint32_t*)orMask)[dword] |= ((uint32_t*)(table[tableIndex]._masks._or))[dword];
1625 ((uint32_t*)orMask)[dword] &= ((uint32_t*)(table[tableIndex]._masks._and))[dword];
1626 }
1627 }
1628 }
1629
1630
GetInstructionBytes(const unsigned char * instructionBytes,int dwords) const1631 string GEDIns::GetInstructionBytes(const unsigned char* instructionBytes, int dwords) const
1632 {
1633 stringstream strm;
1634 strm << "0x" << setfill('0') << hex;
1635 for (--dwords; dwords >= 0; --dwords)
1636 {
1637 strm << setw(8) << reinterpret_cast<const uint32_t*>(instructionBytes)[dwords];
1638 }
1639 return strm.str();
1640 }
1641
1642
1643 #if GED_VALIDATION_API
RecordPadding(vector<ged_ins_field_mapping_fragment_t> & mappingFragments,const ged_ins_field_entry_t * dataEntry) const1644 bool GEDIns::RecordPadding(vector<ged_ins_field_mapping_fragment_t> &mappingFragments, const ged_ins_field_entry_t* dataEntry) const
1645 {
1646 if (dataEntry->_restrictions && (dataEntry->_restrictions[0]->_restrictionType == GED_FIELD_RESTRICTIONS_TYPE_PADDING))
1647 {
1648 const get_field_restriction_padding_t& padding = dataEntry->_restrictions[0]->_padding;
1649 const uint32_t paddingMask = padding._mask;
1650 const uint32_t paddingValue = padding._value;
1651 for (unsigned int i = 0; i < GED_DWORD_BITS; ++i)
1652 {
1653 if (0 == (paddingMask & (1 << i))) continue;
1654
1655 // Bit number i is set to one, count the length of sequential ones.
1656 unsigned int sequenceLength = 0;
1657 for (; paddingMask & (1 << i); ++i)
1658 {
1659 ++sequenceLength;
1660 }
1661
1662 // Record the padding fragment.
1663 const uint8_t lowBit = i - sequenceLength;
1664 const uint8_t mask = (1 << (sequenceLength + 1)) - 1; // starting from LSB
1665 ged_ins_field_mapping_fragment_t fixedFragment;
1666 fixedFragment._fixed = true;
1667 fixedFragment._from._lowBit = lowBit;
1668 fixedFragment._from._highBit = i - 1;
1669 fixedFragment._value = (paddingValue >> lowBit) & mask;
1670 mappingFragments.emplace_back(fixedFragment);
1671 }
1672 return true;
1673 }
1674 return false;
1675 }
1676 #endif // GED_VALIDATION_API
1677
RecordPosition(vector<ged_ins_field_mapping_fragment_t> & mappingFragments,const ged_ins_field_entry_t * dataEntry) const1678 bool GEDIns::RecordPosition(vector<ged_ins_field_mapping_fragment_t> &mappingFragments, const ged_ins_field_entry_t* dataEntry) const
1679 {
1680 bool hasFixedValue = false;
1681 switch (dataEntry->_entryType)
1682 {
1683 case GED_TABLE_ENTRY_TYPE_CONSECUTIVE:
1684 {
1685 GEDASSERT(dataEntry->_bitSize <= GED_DWORD_BITS);
1686 if (0 == dataEntry->_consecutive._position._bitMask)
1687 {
1688 // This field consists only from padded positions, which was already recorded previously.
1689 GEDASSERT(dataEntry->_restrictions);
1690 GEDASSERT(dataEntry->_restrictions[0]->_restrictionType == GED_FIELD_RESTRICTIONS_TYPE_PADDING);
1691 break;
1692 }
1693 RecordSingleFragment(mappingFragments, dataEntry->_consecutive._position);
1694 break;
1695 }
1696 case GED_TABLE_ENTRY_TYPE_FRAGMENTED:
1697 {
1698 const ged_ins_field_multiple_fragments_t& fragmentedPosition = dataEntry->_fragmented;
1699 const uint32_t numOfFragments = fragmentedPosition._numOfPositionFragments;
1700 for (uint8_t i = 0; i < numOfFragments; ++i)
1701 {
1702 RecordSingleFragment(mappingFragments, fragmentedPosition._fragments[i]);
1703 }
1704 break;
1705 }
1706 case GED_TABLE_ENTRY_TYPE_FIXED_VALUE:
1707 {
1708 GEDASSERT(dataEntry->_bitSize <= GED_DWORD_BITS);
1709 ged_ins_field_mapping_fragment_t fixedFragment;
1710 fixedFragment._fixed = true;
1711 fixedFragment._from._lowBit = 0;
1712 fixedFragment._from._highBit = dataEntry->_bitSize - 1;
1713 fixedFragment._value = dataEntry->_fixed._value;
1714 mappingFragments.emplace_back(fixedFragment);
1715 hasFixedValue = true;
1716 break;
1717 }
1718 default:
1719 GEDASSERT(0);
1720 }
1721 GEDASSERT(!mappingFragments.empty());
1722 return hasFixedValue;
1723 }
1724
1725
RecordSingleFragment(vector<ged_ins_field_mapping_fragment_t> & mappingFragments,const ged_ins_field_position_fragment_t & position) const1726 void GEDIns::RecordSingleFragment(vector<ged_ins_field_mapping_fragment_t> &mappingFragments,
1727 const ged_ins_field_position_fragment_t &position) const
1728 {
1729 const uint8_t normalizedLowBit = position._lowBit - position._dwordIndex * GED_DWORD_BITS - position._shift;
1730 ged_ins_field_mapping_fragment_t fragment;
1731 fragment._fixed = false;
1732 fragment._from._lowBit = normalizedLowBit;
1733 fragment._from._highBit = normalizedLowBit + position._highBit - position._lowBit;
1734 fragment._to = position;
1735 mappingFragments.emplace_back(fragment);
1736 }
1737
1738
MergeFragments(vector<ged_ins_field_mapping_fragment_t> & mappingFragments) const1739 void GEDIns::MergeFragments(vector<ged_ins_field_mapping_fragment_t> &mappingFragments) const
1740 {
1741 for (auto iter = mappingFragments.begin(); iter != mappingFragments.end(); ++iter)
1742 {
1743 if (mappingFragments.end() == iter + 1) break;
1744
1745 const auto nextIter = next(iter);
1746 if ((iter->_from._highBit + 1 == nextIter->_from._lowBit) &&
1747 (iter->_fixed == nextIter->_fixed) &&
1748 (iter->_to._highBit + 1 == nextIter->_to._lowBit))
1749 {
1750 // Merge the next cell into current
1751 iter->_from._highBit = nextIter->_from._highBit;
1752 iter->_to._highBit = nextIter->_to._highBit;
1753 mappingFragments.erase(nextIter);
1754 }
1755 }
1756 }
1757
1758