10b57cec5SDimitry Andric //===-- xray_fdr_log_writer.h ---------------------------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file is a part of XRay, a function call tracing system.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric #ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
130b57cec5SDimitry Andric #define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
140b57cec5SDimitry Andric 
150b57cec5SDimitry Andric #include "xray_buffer_queue.h"
160b57cec5SDimitry Andric #include "xray_fdr_log_records.h"
170b57cec5SDimitry Andric #include <functional>
180b57cec5SDimitry Andric #include <tuple>
190b57cec5SDimitry Andric #include <type_traits>
200b57cec5SDimitry Andric #include <utility>
210b57cec5SDimitry Andric 
220b57cec5SDimitry Andric namespace __xray {
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric template <size_t Index> struct SerializerImpl {
250b57cec5SDimitry Andric   template <class Tuple,
260b57cec5SDimitry Andric             typename std::enable_if<
270b57cec5SDimitry Andric                 Index<std::tuple_size<
280b57cec5SDimitry Andric                           typename std::remove_reference<Tuple>::type>::value,
290b57cec5SDimitry Andric                       int>::type = 0> static void serializeTo(char *Buffer,
300b57cec5SDimitry Andric                                                               Tuple &&T) {
310b57cec5SDimitry Andric     auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
320b57cec5SDimitry Andric     constexpr auto Size = sizeof(std::get<Index>(T));
330b57cec5SDimitry Andric     internal_memcpy(Buffer, P, Size);
340b57cec5SDimitry Andric     SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
350b57cec5SDimitry Andric                                            std::forward<Tuple>(T));
360b57cec5SDimitry Andric   }
370b57cec5SDimitry Andric 
380b57cec5SDimitry Andric   template <class Tuple,
390b57cec5SDimitry Andric             typename std::enable_if<
400b57cec5SDimitry Andric                 Index >= std::tuple_size<typename std::remove_reference<
410b57cec5SDimitry Andric                              Tuple>::type>::value,
420b57cec5SDimitry Andric                 int>::type = 0>
serializeToSerializerImpl430b57cec5SDimitry Andric   static void serializeTo(char *, Tuple &&) {}
440b57cec5SDimitry Andric };
450b57cec5SDimitry Andric 
460b57cec5SDimitry Andric using Serializer = SerializerImpl<0>;
470b57cec5SDimitry Andric 
480b57cec5SDimitry Andric template <class Tuple, size_t Index> struct AggregateSizesImpl {
490b57cec5SDimitry Andric   static constexpr size_t value =
500b57cec5SDimitry Andric       sizeof(typename std::tuple_element<Index, Tuple>::type) +
510b57cec5SDimitry Andric       AggregateSizesImpl<Tuple, Index - 1>::value;
520b57cec5SDimitry Andric };
530b57cec5SDimitry Andric 
540b57cec5SDimitry Andric template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
550b57cec5SDimitry Andric   static constexpr size_t value =
560b57cec5SDimitry Andric       sizeof(typename std::tuple_element<0, Tuple>::type);
570b57cec5SDimitry Andric };
580b57cec5SDimitry Andric 
590b57cec5SDimitry Andric template <class Tuple> struct AggregateSizes {
600b57cec5SDimitry Andric   static constexpr size_t value =
610b57cec5SDimitry Andric       AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
620b57cec5SDimitry Andric };
630b57cec5SDimitry Andric 
640b57cec5SDimitry Andric template <MetadataRecord::RecordKinds Kind, class... DataTypes>
650b57cec5SDimitry Andric MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
660b57cec5SDimitry Andric   static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
670b57cec5SDimitry Andric                     sizeof(MetadataRecord) - 1,
680b57cec5SDimitry Andric                 "Metadata payload longer than metadata buffer!");
690b57cec5SDimitry Andric   MetadataRecord R;
700b57cec5SDimitry Andric   R.Type = 1;
710b57cec5SDimitry Andric   R.RecordKind = static_cast<uint8_t>(Kind);
720b57cec5SDimitry Andric   Serializer::serializeTo(R.Data,
730b57cec5SDimitry Andric                           std::make_tuple(std::forward<DataTypes>(Ds)...));
740b57cec5SDimitry Andric   return R;
750b57cec5SDimitry Andric }
760b57cec5SDimitry Andric 
770b57cec5SDimitry Andric class FDRLogWriter {
780b57cec5SDimitry Andric   BufferQueue::Buffer &Buffer;
790b57cec5SDimitry Andric   char *NextRecord = nullptr;
800b57cec5SDimitry Andric 
810b57cec5SDimitry Andric   template <class T> void writeRecord(const T &R) {
820b57cec5SDimitry Andric     internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
830b57cec5SDimitry Andric     NextRecord += sizeof(T);
840b57cec5SDimitry Andric     // We need this atomic fence here to ensure that other threads attempting to
850b57cec5SDimitry Andric     // read the bytes in the buffer will see the writes committed before the
860b57cec5SDimitry Andric     // extents are updated.
870b57cec5SDimitry Andric     atomic_thread_fence(memory_order_release);
880b57cec5SDimitry Andric     atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
890b57cec5SDimitry Andric   }
900b57cec5SDimitry Andric 
910b57cec5SDimitry Andric public:
920b57cec5SDimitry Andric   explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
930b57cec5SDimitry Andric       : Buffer(B), NextRecord(P) {
940b57cec5SDimitry Andric     DCHECK_NE(Buffer.Data, nullptr);
950b57cec5SDimitry Andric     DCHECK_NE(NextRecord, nullptr);
960b57cec5SDimitry Andric   }
970b57cec5SDimitry Andric 
980b57cec5SDimitry Andric   explicit FDRLogWriter(BufferQueue::Buffer &B)
990b57cec5SDimitry Andric       : FDRLogWriter(B, static_cast<char *>(B.Data)) {}
1000b57cec5SDimitry Andric 
1010b57cec5SDimitry Andric   template <MetadataRecord::RecordKinds Kind, class... Data>
1020b57cec5SDimitry Andric   bool writeMetadata(Data &&... Ds) {
1030b57cec5SDimitry Andric     // TODO: Check boundary conditions:
1040b57cec5SDimitry Andric     // 1) Buffer is full, and cannot handle one metadata record.
1050b57cec5SDimitry Andric     // 2) Buffer queue is finalising.
1060b57cec5SDimitry Andric     writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
1070b57cec5SDimitry Andric     return true;
1080b57cec5SDimitry Andric   }
1090b57cec5SDimitry Andric 
1100b57cec5SDimitry Andric   template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
1110b57cec5SDimitry Andric     constexpr auto Size = sizeof(MetadataRecord) * N;
1120b57cec5SDimitry Andric     internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
1130b57cec5SDimitry Andric     NextRecord += Size;
1140b57cec5SDimitry Andric     // We need this atomic fence here to ensure that other threads attempting to
1150b57cec5SDimitry Andric     // read the bytes in the buffer will see the writes committed before the
1160b57cec5SDimitry Andric     // extents are updated.
1170b57cec5SDimitry Andric     atomic_thread_fence(memory_order_release);
1180b57cec5SDimitry Andric     atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
1190b57cec5SDimitry Andric     return Size;
1200b57cec5SDimitry Andric   }
1210b57cec5SDimitry Andric 
1220b57cec5SDimitry Andric   enum class FunctionRecordKind : uint8_t {
1230b57cec5SDimitry Andric     Enter = 0x00,
1240b57cec5SDimitry Andric     Exit = 0x01,
1250b57cec5SDimitry Andric     TailExit = 0x02,
1260b57cec5SDimitry Andric     EnterArg = 0x03,
1270b57cec5SDimitry Andric   };
1280b57cec5SDimitry Andric 
1290b57cec5SDimitry Andric   bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
1300b57cec5SDimitry Andric     FunctionRecord R;
1310b57cec5SDimitry Andric     R.Type = 0;
1320b57cec5SDimitry Andric     R.RecordKind = uint8_t(Kind);
1330b57cec5SDimitry Andric     R.FuncId = FuncId;
1340b57cec5SDimitry Andric     R.TSCDelta = Delta;
1350b57cec5SDimitry Andric     writeRecord(R);
1360b57cec5SDimitry Andric     return true;
1370b57cec5SDimitry Andric   }
1380b57cec5SDimitry Andric 
1390b57cec5SDimitry Andric   bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
1400b57cec5SDimitry Andric                             int32_t Delta, uint64_t Arg) {
1410b57cec5SDimitry Andric     // We need to write the function with arg into the buffer, and then
1420b57cec5SDimitry Andric     // atomically update the buffer extents. This ensures that any reads
1430b57cec5SDimitry Andric     // synchronised on the buffer extents record will always see the writes
1440b57cec5SDimitry Andric     // that happen before the atomic update.
1450b57cec5SDimitry Andric     FunctionRecord R;
1460b57cec5SDimitry Andric     R.Type = 0;
1470b57cec5SDimitry Andric     R.RecordKind = uint8_t(Kind);
1480b57cec5SDimitry Andric     R.FuncId = FuncId;
1490b57cec5SDimitry Andric     R.TSCDelta = Delta;
1500b57cec5SDimitry Andric     MetadataRecord A =
1510b57cec5SDimitry Andric         createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
1520b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(internal_memcpy(
1530b57cec5SDimitry Andric                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
1540b57cec5SDimitry Andric                  sizeof(R);
1550b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(internal_memcpy(
1560b57cec5SDimitry Andric                      NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
1570b57cec5SDimitry Andric                  sizeof(A);
1580b57cec5SDimitry Andric     // We need this atomic fence here to ensure that other threads attempting to
1590b57cec5SDimitry Andric     // read the bytes in the buffer will see the writes committed before the
1600b57cec5SDimitry Andric     // extents are updated.
1610b57cec5SDimitry Andric     atomic_thread_fence(memory_order_release);
1620b57cec5SDimitry Andric     atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
1630b57cec5SDimitry Andric                      memory_order_acq_rel);
1640b57cec5SDimitry Andric     return true;
1650b57cec5SDimitry Andric   }
1660b57cec5SDimitry Andric 
1670b57cec5SDimitry Andric   bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
1680b57cec5SDimitry Andric     // We write the metadata record and the custom event data into the buffer
1690b57cec5SDimitry Andric     // first, before we atomically update the extents for the buffer. This
1700b57cec5SDimitry Andric     // allows us to ensure that any threads reading the extents of the buffer
1710b57cec5SDimitry Andric     // will only ever see the full metadata and custom event payload accounted
1720b57cec5SDimitry Andric     // (no partial writes accounted).
1730b57cec5SDimitry Andric     MetadataRecord R =
1740b57cec5SDimitry Andric         createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
1750b57cec5SDimitry Andric             EventSize, Delta);
1760b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(internal_memcpy(
1770b57cec5SDimitry Andric                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
1780b57cec5SDimitry Andric                  sizeof(R);
1790b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(
1800b57cec5SDimitry Andric                      internal_memcpy(NextRecord, Event, EventSize)) +
1810b57cec5SDimitry Andric                  EventSize;
1820b57cec5SDimitry Andric 
1830b57cec5SDimitry Andric     // We need this atomic fence here to ensure that other threads attempting to
1840b57cec5SDimitry Andric     // read the bytes in the buffer will see the writes committed before the
1850b57cec5SDimitry Andric     // extents are updated.
1860b57cec5SDimitry Andric     atomic_thread_fence(memory_order_release);
1870b57cec5SDimitry Andric     atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
1880b57cec5SDimitry Andric                      memory_order_acq_rel);
1890b57cec5SDimitry Andric     return true;
1900b57cec5SDimitry Andric   }
1910b57cec5SDimitry Andric 
1920b57cec5SDimitry Andric   bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
1930b57cec5SDimitry Andric                        int32_t EventSize) {
1940b57cec5SDimitry Andric     // We do something similar when writing out typed events, see
1950b57cec5SDimitry Andric     // writeCustomEvent(...) above for details.
1960b57cec5SDimitry Andric     MetadataRecord R =
1970b57cec5SDimitry Andric         createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
1980b57cec5SDimitry Andric             EventSize, Delta, EventType);
1990b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(internal_memcpy(
2000b57cec5SDimitry Andric                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
2010b57cec5SDimitry Andric                  sizeof(R);
2020b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(
2030b57cec5SDimitry Andric                      internal_memcpy(NextRecord, Event, EventSize)) +
2040b57cec5SDimitry Andric                  EventSize;
2050b57cec5SDimitry Andric 
2060b57cec5SDimitry Andric     // We need this atomic fence here to ensure that other threads attempting to
2070b57cec5SDimitry Andric     // read the bytes in the buffer will see the writes committed before the
2080b57cec5SDimitry Andric     // extents are updated.
2090b57cec5SDimitry Andric     atomic_thread_fence(memory_order_release);
2100b57cec5SDimitry Andric     atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
2110b57cec5SDimitry Andric     return true;
2120b57cec5SDimitry Andric   }
2130b57cec5SDimitry Andric 
2140b57cec5SDimitry Andric   char *getNextRecord() const { return NextRecord; }
2150b57cec5SDimitry Andric 
2160b57cec5SDimitry Andric   void resetRecord() {
2170b57cec5SDimitry Andric     NextRecord = reinterpret_cast<char *>(Buffer.Data);
2180b57cec5SDimitry Andric     atomic_store(Buffer.Extents, 0, memory_order_release);
2190b57cec5SDimitry Andric   }
2200b57cec5SDimitry Andric 
2210b57cec5SDimitry Andric   void undoWrites(size_t B) {
2220b57cec5SDimitry Andric     DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
2230b57cec5SDimitry Andric     NextRecord -= B;
2240b57cec5SDimitry Andric     atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
2250b57cec5SDimitry Andric   }
2260b57cec5SDimitry Andric 
2270b57cec5SDimitry Andric }; // namespace __xray
2280b57cec5SDimitry Andric 
2290b57cec5SDimitry Andric } // namespace __xray
2300b57cec5SDimitry Andric 
2310b57cec5SDimitry Andric #endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
232