1 /*
2  * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "Base.h"
17 
18 #include "glow/Exporter/ONNXModelWriter.h"
19 #include "glow/Importer/ONNXIFIModelLoader.h"
20 #include "glow/Optimizer/GraphOptimizer/FunctionPasses.h"
21 #include "glow/Optimizer/GraphOptimizer/GraphOptimizer.h"
22 
23 #include "llvm/Support/Format.h"
24 #include <glog/logging.h>
25 
26 namespace glow {
27 namespace onnxifi {
28 bool GlowSaveOnnxifiModel = false;
29 bool GlowSaveOnnxifiIO = false;
30 bool GlowEnablePartialTensors = true;
31 bool GlowUseCustomOpsForExport = true;
32 
33 extern bool GlowDumpDebugTraces;
34 
35 namespace {
36 const char *compatibilityFunctionName = "check";
37 
38 /// Get the width of the \p dtype. If dtype is not recognized or undefined, we
39 /// return 0 width.
getOnnxTensorDescriptorElementSize(unsigned dtype)40 unsigned getOnnxTensorDescriptorElementSize(unsigned dtype) {
41   constexpr unsigned size = 17;
42   const static std::array<unsigned, size> mapping{
43       0u /* ONNXIFI_DATATYPE_UNDEFINED */,
44       4u /* ONNXIFI_DATATYPE_FLOAT32 */,
45       1u /* ONNXIFI_DATATYPE_UINT8 */,
46       1u /* ONNXIFI_DATATYPE_INT8 */,
47       2u /* ONNXIFI_DATATYPE_UINT16 */,
48       2u /* ONNXIFI_DATATYPE_INT16 */,
49       4u /* ONNXIFI_DATATYPE_INT32 */,
50       8u /* ONNXIFI_DATATYPE_INT64 */,
51       0u /* undefined */,
52       0u /* undefined */,
53       2u /* ONNXIFI_DATATYPE_FLOAT16 */,
54       8u /* ONNXIFI_DATATYPE_FLOAT64 */,
55       4u /* ONNXIFI_DATATYPE_UINT32 */,
56       8u /* ONNXIFI_DATATYPE_UINT64 */,
57       16u /* ONNXIFI_DATATYPE_COMPLEX64 */,
58       32u /*ONNXIFI_DATATYPE_COMPLEX128 */,
59       2u /* ONNXIFI_DATATYPE_BFLOAT16 */};
60   return (dtype < size) ? mapping[dtype] : 0;
61 }
62 
63 } // namespace
64 
saveOnnxifiModel(Function * F)65 void saveOnnxifiModel(Function *F) {
66   std::string fname = F->getName().str() + ".zip";
67   LOG(INFO) << "Saving model to " << fname;
68   Error err = Error::empty();
69   constexpr size_t kIrVer = 7, kOpsetVer = 9;
70   {
71     ONNXModelWriter onnxWR(fname, *F, kIrVer, kOpsetVer, &err, false, true,
72                            GlowUseCustomOpsForExport);
73   }
74   if (ERR_TO_BOOL(std::move(err))) {
75     LOG(ERROR) << "ONNXModelWriter failed to write model: " << fname;
76   }
77 }
78 
checkGraphCompatibility(const void * onnxModel,size_t onnxModelSize)79 onnxStatus Backend::checkGraphCompatibility(const void *onnxModel,
80                                             size_t onnxModelSize) {
81   Module module;
82 
83   std::unique_ptr<ONNXIFIModelLoader> loader;
84   // Note: Because we are not loading inputs as Placeholders, we need to
85   // explicitly not do constant folding in the loader. This is because the
86   // inputs will be loaded as uninitialized Constants. We do this for now
87   // because backends may have limitations on some ops to have inputs as
88   // Constants, such as a Convolution's weights. In the future we should clean
89   // this up so that we load Constants and Placeholders based on the actual
90   // eventual input graph.
91   auto loaderOrErr = ONNXIFIModelLoader::parse(
92       onnxModel, onnxModelSize, 0 /*weightCount*/,
93       nullptr /*weightDescriptors*/, module, compatibilityFunctionName,
94       /* PPC */ nullptr, false /*loadInputsAsPlaceholdersForOnnx*/,
95       getUseOnnx(),
96       /*constFoldInLoader*/ false);
97   if (loaderOrErr) {
98     loader = std::move(*loaderOrErr);
99   } else {
100     // TODO: Use a more specific ONNXIFI error code here to denote what about
101     // this operator is not supported (shape, type, etc).
102     LOG(INFO)
103         << "ONNXIFI checkGraphCompatibility incompatibility found when loading "
104            "protobuf: "
105         << ERR_TO_STRING(loaderOrErr.takeError(), /*warning*/ true);
106     return ONNXIFI_STATUS_UNSUPPORTED_OPERATOR;
107   }
108 
109   if (!glowBackend_) {
110     return ONNXIFI_STATUS_INTERNAL_ERROR;
111   }
112 
113   if (module.getFunctions().size() != 1) {
114     LOG(ERROR) << "Should have exactly one Function in compatibiliity mode.";
115     return ONNXIFI_STATUS_INTERNAL_ERROR;
116   }
117   Function *function = *module.getFunctions().begin();
118 
119   // Check if the function is verified as valid for Glow/the backend -- if not
120   // then conservatively early return on unsupported operator.
121   if (!function->verify(glowBackend_.get())) {
122     LOG(INFO)
123         << "ONNXIFI checkGraphCompatibility incompatibility: Glow function "
124            "verification failed.";
125     return ONNXIFI_STATUS_UNSUPPORTED_OPERATOR;
126   }
127 
128   // Perform the normal optimization pipeline, returning an internal error if we
129   // encounter an issue during optimization.
130   CompilationContext cctx;
131   auto optErr = glow::optimizeFunction(function, *glowBackend_, cctx);
132   if (optErr) {
133     LOG(ERROR) << "Error during glow::optimizeFunction():\n" +
134                       ERR_TO_STRING(std::move(optErr));
135     return ONNXIFI_STATUS_INTERNAL_ERROR;
136   }
137 
138   const auto &nodes = function->getNodes();
139   for (const auto &node : nodes) {
140     if (!glowBackend_->acceptForExecution(node)) {
141       LOG(INFO) << "ONNXIFI checkGraphCompatibility incompatibility, op "
142                    "rejected by backend: "
143                 << node.getDebugDesc();
144       // TODO: Use a more specific ONNXIFI error code here to denote what
145       // about this operator is not supported (shape, type, etc).
146       return ONNXIFI_STATUS_UNSUPPORTED_OPERATOR;
147     }
148   }
149 
150   return ONNXIFI_STATUS_SUCCESS;
151 }
152 
signal(onnxStatus status)153 bool Event::signal(onnxStatus status) {
154   {
155     std::lock_guard<std::mutex> guard(mutex_);
156     if (fired_) {
157       return false;
158     }
159     status_ = status;
160     fired_ = true;
161   }
162   cond_.notify_all();
163   return true;
164 }
165 
wait()166 onnxStatus Event::wait() {
167   std::unique_lock<std::mutex> guard(mutex_);
168   cond_.wait(guard, [this] { return fired_ == true; });
169   return status_;
170 }
171 
waitFor(size_t timeoutMs)172 std::pair<bool, onnxStatus> Event::waitFor(size_t timeoutMs) {
173   DCHECK_GT(timeoutMs, 0)
174       << "0 timeoutMs should instead use Event::wait to wait indefinitely";
175 
176   auto endTime =
177       std::chrono::steady_clock::now() + std::chrono::milliseconds(timeoutMs);
178 
179   std::unique_lock<std::mutex> guard(mutex_);
180   while (!fired_) {
181     if (std::cv_status::timeout == cond_.wait_until(guard, endTime)) {
182       return {/*signalled*/ false, status_};
183     }
184   }
185 
186   return {/*signalled*/ true, status_};
187 }
188 
setZeroLengthSequence(dim_t maxSeqLength)189 void Graph::setZeroLengthSequence(dim_t maxSeqLength) {
190   Type ty(ElemKind::Int64ITy, {maxSeqLength});
191   zeroLengthSequence_.reset(ty);
192   zeroLengthSequence_.zero();
193 }
194 
bindPlaceholders(const ONNXIFIModelLoader & loader)195 void Graph::bindPlaceholders(const ONNXIFIModelLoader &loader) {
196   onnxInputToPlaceholder_ = loader.getInputVarsMapping();
197   onnxOutputToPlaceholder_ = loader.getOutputVarsMapping();
198   onnxInputNames_ = loader.getPositionalInputNames();
199   onnxInputPlaceholders_.reserve(onnxInputNames_.size());
200   for (const auto &i : onnxInputNames_) {
201     const auto it = onnxInputToPlaceholder_.find(i);
202     if (it == onnxInputToPlaceholder_.end()) {
203       break;
204     }
205     onnxInputPlaceholders_.push_back(it->second);
206   }
207   if (onnxInputPlaceholders_.size() != onnxInputToPlaceholder_.size()) {
208     onnxInputPlaceholders_.clear();
209   }
210   onnxOutputNames_ = loader.getPositionalOutputNames();
211   onnxOutputPlaceholders_.reserve(onnxOutputNames_.size());
212   for (const auto &i : onnxOutputNames_) {
213     const auto it = onnxOutputToPlaceholder_.find(i);
214     if (it == onnxOutputToPlaceholder_.end()) {
215       break;
216     }
217     onnxOutputPlaceholders_.push_back(it->second);
218   }
219   if (onnxOutputPlaceholders_.size() != onnxOutputToPlaceholder_.size()) {
220     onnxOutputPlaceholders_.clear();
221   }
222 }
223 
adjustInputs(uint32_t inputsCount,const onnxTensorDescriptorV1 * inputDescriptors,ExecutionContext * ctx)224 onnxStatus Graph::adjustInputs(uint32_t inputsCount,
225                                const onnxTensorDescriptorV1 *inputDescriptors,
226                                ExecutionContext *ctx) {
227   // Create tensors for input placeholders
228   auto &externalIOBindings = ctx->getExternalIOBindings();
229   for (unsigned i = 0; i < inputsCount; ++i) {
230     const auto &inOnnxTensor = inputDescriptors[i];
231     auto *inOnnxBuffer = reinterpret_cast<void *>(inOnnxTensor.buffer);
232     Placeholder *inPhPtr;
233 
234     if (onnxInputNames_.size() == inputsCount) {
235       inPhPtr = onnxInputPlaceholders_[i];
236     } else {
237       auto inPhIt = onnxInputToPlaceholder_.find(inOnnxTensor.name);
238       if (inPhIt == onnxInputToPlaceholder_.end()) {
239         LOG(ERROR) << "Input Name Unknown: " << inOnnxTensor.name;
240         return ONNXIFI_STATUS_UNIDENTIFIED_NAME;
241       }
242       inPhPtr = inPhIt->getValue();
243     }
244 
245     std::vector<dim_t> inOnnxTensorDims(inOnnxTensor.dimensions);
246     size_t inOnnxTensorSize = 1;
247     for (unsigned j = 0; j < inOnnxTensor.dimensions; ++j) {
248       inOnnxTensorDims[j] = inOnnxTensor.shape[j];
249       inOnnxTensorSize *= inOnnxTensorDims[j];
250     }
251 
252     if (inOnnxTensorSize > inPhPtr->getType()->size()) {
253       std::stringstream ss;
254       for (const auto j : inOnnxTensorDims) {
255         ss << j << ", ";
256       }
257       ss << " vs ";
258       auto sizes = inPhPtr->getType()->dims();
259       for (const auto j : sizes) {
260         ss << j << ", ";
261       }
262       LOG(ERROR) << "Input tensor is too large: " << inOnnxTensorSize << " vs "
263                  << inPhPtr->getType()->size() << ": " << inOnnxTensor.name
264                  << ", shape: " << ss.str();
265       return ONNXIFI_STATUS_INVALID_SHAPE;
266     }
267 
268     // Only allocate a tensor if insufficient backing storage is provided.
269     const unsigned elementSize =
270         getOnnxTensorDescriptorElementSize(inOnnxTensor.dataType);
271     const unsigned glowElementSize = inPhPtr->getType()->getElementSize();
272     if (elementSize != glowElementSize) {
273       LOG(ERROR) << "Input data width (" << elementSize
274                  << ") is different from glow placeholder data width ("
275                  << glowElementSize << "), tensor: " << inOnnxTensor.name
276                  << ", onnxifi data type: " << inOnnxTensor.dataType
277                  << ", glow data type: "
278                  << inPhPtr->getType()->getElementName().data();
279       return ONNXIFI_STATUS_INVALID_DATATYPE;
280     }
281     size_t onnxBytes = inOnnxTensorSize * elementSize;
282     if (inPhPtr->dims().equals(inOnnxTensorDims)) {
283       externalIOBindings.emplace_back(
284           std::piecewise_construct, std::forward_as_tuple(inPhPtr),
285           std::forward_as_tuple(inOnnxBuffer, inPhPtr->getType()));
286     } else if (GlowEnablePartialTensors &&
287                backendPtr_->getBackend().supportsPartialTensors()) {
288       // We have a partial input buffer.  Create a padded unowned tensor that
289       // remembers the actual size of the input.
290       externalIOBindings.emplace_back(
291           std::piecewise_construct, std::forward_as_tuple(inPhPtr),
292           std::forward_as_tuple(inOnnxBuffer, inPhPtr->getType(), onnxBytes));
293     } else if (!inOnnxBuffer && inPhPtr->getType()->size() <=
294                                     zeroLengthSequence_.getType().size()) {
295       externalIOBindings.emplace_back(
296           std::piecewise_construct, std::forward_as_tuple(inPhPtr),
297           std::forward_as_tuple((void *)(zeroLengthSequence_.getUnsafePtr()),
298                                 inPhPtr->getType()));
299     } else {
300       llvm::Optional<Tensor> inputTensorOpt =
301           tensorPool_.get(inPhPtr->getType());
302       if (!inputTensorOpt.hasValue()) {
303         DLOG(FATAL) << "Tensorpool tensor not found for input "
304                     << inOnnxTensor.name;
305         return ONNXIFI_STATUS_INTERNAL_ERROR;
306       }
307       // We want fresh DeviceResidencyInfo for this fresh Tensor.
308       externalIOBindings.emplace_back(inPhPtr,
309                                       std::move(inputTensorOpt.getValue()));
310       Tensor &inputTensor = externalIOBindings.back().second;
311       inputTensor.resetDeviceInfo();
312       // Copy the input from onnxTensorDescriptor unless it has a NULL buffer
313       // pointer (which is a valid case if the tensor is empty).
314       if (inOnnxBuffer) {
315         memcpy(inputTensor.getUnsafePtr(), inOnnxBuffer, onnxBytes);
316         // Pad remaining space with zeroes.
317         memset(inputTensor.getUnsafePtr() + onnxBytes, 0,
318                inputTensor.getSizeInBytes() - onnxBytes);
319       } else {
320         inputTensor.zero();
321       }
322     }
323   }
324   return ONNXIFI_STATUS_SUCCESS;
325 }
326 
setIOAndRun(uint32_t inputsCount,const onnxTensorDescriptorV1 * inputDescriptors,uint32_t outputsCount,const onnxTensorDescriptorV1 * outputDescriptors,EventPtr outputEvent,onnxTraceEventList * traceEvents)327 onnxStatus Graph::setIOAndRun(uint32_t inputsCount,
328                               const onnxTensorDescriptorV1 *inputDescriptors,
329                               uint32_t outputsCount,
330                               const onnxTensorDescriptorV1 *outputDescriptors,
331                               EventPtr outputEvent,
332                               onnxTraceEventList *traceEvents) {
333   auto ctx = glow::make_unique<ExecutionContext>();
334 
335   TraceContext *traceContext = nullptr;
336   if (traceEvents || GlowDumpDebugTraces) {
337     ctx->setTraceContext(glow::make_unique<TraceContext>(TraceLevel::STANDARD));
338     traceContext = ctx->getTraceContext();
339     traceContext->setThreadName("Onnxifi");
340   }
341   TRACE_EVENT_SCOPE(traceContext, TraceLevel::RUNTIME, "Onnxifi::setIOAndRun");
342   TRACE_EVENT_SCOPE_NAMED(traceContext, TraceLevel::RUNTIME, "adjustInputs",
343                           aiEvent);
344 
345   auto r = adjustInputs(inputsCount, inputDescriptors, ctx.get());
346   if (r != ONNXIFI_STATUS_SUCCESS) {
347     return r;
348   }
349 
350   size_t seq = 0;
351   if (GlowSaveOnnxifiIO) {
352     seq = ioDumpCounter_++;
353     std::stringstream ss;
354     ss << "input_" << seq << ".onnx";
355     std::ofstream of(ss.str(), std::ios::binary);
356     if (!of) {
357       LOG(ERROR) << "Cannot create input file " << ss.str();
358     } else {
359       ONNX_NAMESPACE::GraphProto inputG;
360       for (const auto &p : ctx->getExternalIOBindings()) {
361         auto *t = inputG.add_initializer();
362         const auto &inputTensor = p.second;
363         size_t unpaddedSize = inputTensor.getUnpaddedSizeInBytes();
364         size_t tensorSize = inputTensor.getSizeInBytes();
365         if (unpaddedSize == tensorSize) {
366           ONNXModelWriter::writeTensor(inputTensor, t,
367                                        GlowUseCustomOpsForExport);
368         } else {
369           // If the input is a partial tensor, then save only the part that has
370           // data.
371           auto ty = inputTensor.getType();
372           auto dims = ty.dims().vec();
373           dims[0] = dims[0] * unpaddedSize / tensorSize;
374           const auto &resized = inputTensor.getUnowned(dims);
375           ONNXModelWriter::writeTensor(resized, t, GlowUseCustomOpsForExport);
376           VLOG(1) << "Writing partial tensor " << p.first->getName().str()
377                   << " full size=" << inputTensor.getType().toString()
378                   << " partial size=" << inputTensor.getUnpaddedSizeInBytes()
379                   << " resized size=" << resized.getType().toString();
380         }
381         t->set_name(p.first->getName());
382       }
383       std::string buffer;
384       inputG.SerializeToString(&buffer);
385       of << buffer;
386     }
387   }
388 
389   TRACE_EVENT_SCOPE_END_NAMED(aiEvent);
390   TRACE_EVENT_SCOPE_NAMED(traceContext, TraceLevel::RUNTIME,
391                           "setOnnxifiOutputs", soEvent);
392 
393   // Create tensors for output placeholders
394   auto &externalIOBindings = ctx->getExternalIOBindings();
395   for (unsigned i = 0; i < outputsCount; ++i) {
396     auto &outOnnxTensor =
397         const_cast<onnxTensorDescriptorV1 &>(outputDescriptors[i]);
398     auto *outOnnxBuffer = reinterpret_cast<void *>(outOnnxTensor.buffer);
399     Placeholder *outPhPtr;
400 
401     if (outputsCount == onnxOutputNames_.size()) {
402       outPhPtr = onnxOutputPlaceholders_[i];
403     } else {
404       auto outPhIt = onnxOutputToPlaceholder_.find(outOnnxTensor.name);
405       if (outPhIt == onnxOutputToPlaceholder_.end()) {
406         LOG(ERROR) << "Output name unknown: " << outOnnxTensor.name;
407         return ONNXIFI_STATUS_UNIDENTIFIED_NAME;
408       }
409       outPhPtr = outPhIt->getValue();
410     }
411     // Compute the total size of the onnxifi tensor.
412     std::vector<dim_t> outOnnxTensorDims(outOnnxTensor.dimensions);
413     dim_t outOnnxTensorSize = 1;
414     for (unsigned j = 0; j < outOnnxTensor.dimensions; ++j) {
415       outOnnxTensorDims[j] = outOnnxTensor.shape[j];
416       outOnnxTensorSize *= outOnnxTensorDims[j];
417     }
418 
419     // Check that tensor provided by onnxifi is the correct size.
420     if (!outPhPtr->dims().equals(outOnnxTensorDims)) {
421       LOG(ERROR) << "Output tensor is the wrong shape: " << outOnnxTensorSize
422                  << " total dims vs " << outPhPtr->getType()->size() << ": "
423                  << outOnnxTensor.name;
424       return ONNXIFI_STATUS_INVALID_SHAPE;
425     }
426 
427     // Set quantized output scale/output. Do not support channelwise quantized
428     // output with multiple quantization parameters for now.
429     auto type = outPhPtr->getType();
430     if (outOnnxTensor.quantizationParams == 1 && type->isQuantizedType()) {
431       const_cast<float *>(outOnnxTensor.scales)[0] = type->getScale();
432       const_cast<int32_t *>(outOnnxTensor.biases)[0] = type->getOffset();
433     }
434 
435     // Create a Glow tensor backed by the memory from the provided onnxifi
436     // tensor and bind it to the appropriate placeholder for the graph output.
437     Tensor outputTensor(outOnnxBuffer, outPhPtr->getType());
438     externalIOBindings.emplace_back(outPhPtr, std::move(outputTensor));
439   }
440   TRACE_EVENT_SCOPE_END_NAMED(soEvent);
441 
442   if (ctx->getTraceContext()) {
443     ctx->getTraceContext()->setThreadName("Caller");
444   }
445 
446   // End trace scope before calling into run. run() can trigger the completion
447   // callback which deallocates ctx and traceContext. So it will no longer be
448   // safe to access the trace context after calling into run().
449   TRACE_EVENT_SCOPE_END();
450   auto ret = run(std::move(ctx), outputEvent, traceEvents);
451   if (GlowSaveOnnxifiIO) {
452     // We need to wait for the execution to finish in order to extract output
453     // values.
454     outputEvent->wait();
455     std::stringstream ss;
456     ss << "output_" << seq << ".onnx";
457     std::ofstream of(ss.str(), std::ios::binary);
458     if (!of) {
459       LOG(ERROR) << "Cannot create output file " << ss.str();
460     } else {
461       ONNX_NAMESPACE::GraphProto inputG;
462       for (unsigned i = 0; i < outputsCount; ++i) {
463         const auto &outOnnxTensor = outputDescriptors[i];
464         auto *outOnnxBuffer = reinterpret_cast<void *>(outOnnxTensor.buffer);
465         Placeholder *outPhPtr;
466         if (outputsCount == onnxOutputNames_.size()) {
467           outPhPtr = onnxOutputPlaceholders_[i];
468         } else {
469           auto outPhIt = onnxOutputToPlaceholder_.find(outOnnxTensor.name);
470           CHECK(outPhIt != onnxOutputToPlaceholder_.end());
471           outPhPtr = outPhIt->getValue();
472         }
473         Tensor outputTensor(outOnnxBuffer, outPhPtr->getType());
474         auto *t = inputG.add_initializer();
475         ONNXModelWriter::writeTensor(outputTensor, t,
476                                      GlowUseCustomOpsForExport);
477         t->set_name(outPhPtr->getName());
478       }
479       std::string buffer;
480       inputG.SerializeToString(&buffer);
481       of << buffer;
482     }
483   }
484 
485   return ret;
486 }
487 
setTraceEvents(onnxTraceEventList * traceEvents,TraceContext * traceContext)488 void Graph::setTraceEvents(onnxTraceEventList *traceEvents,
489                            TraceContext *traceContext) {
490   if (!traceEvents || !traceContext) {
491     return;
492   }
493 
494   /// Internally we use steady_clock, but our interface is system_clock
495   /// timestamps. Do a simple conversion.
496   auto steadyTS = TraceEvent::now();
497   auto systemTS = std::chrono::duration_cast<std::chrono::microseconds>(
498                       std::chrono::system_clock::now().time_since_epoch())
499                       .count();
500 
501   // Timestamps are uint64_t so branch rather than use abs(), we want to make
502   // sure we always subtract the smaller from the larger value to avoid
503   // underflowing the uint64_t. Then if the timestamp should be moved backwards
504   // negate the result.
505   int64_t offset =
506       steadyTS > systemTS ? -(steadyTS - systemTS) : (systemTS - steadyTS);
507   TRACE_EVENT_SCOPE(traceContext, TraceLevel::RUNTIME,
508                     "Onnxifi::setTraceEvents");
509 
510   std::vector<onnxTraceEvent *> traceEventsVec;
511   for (const auto &glowTraceEvent : traceContext->getTraceEvents()) {
512     auto *traceEvent = new onnxTraceEvent();
513     traceEvent->eventType = glowTraceEvent.type;
514     traceEvent->timestamp = glowTraceEvent.timestamp + offset;
515     traceEvent->tid = glowTraceEvent.tid;
516     traceEvent->duration = glowTraceEvent.duration;
517     size_t nameSize = std::min(glowTraceEvent.name.size(),
518                                (size_t)ONNXIFI_TRACE_EVENT_NAME_SIZE);
519     strncpy(traceEvent->eventName, glowTraceEvent.name.c_str(), nameSize);
520     traceEvent->eventName[nameSize] = '\0';
521     traceEventsVec.push_back(traceEvent);
522   }
523 
524   traceEvents->numEvents = traceEventsVec.size();
525   traceEvents->traceEvents = new onnxTraceEvent *[traceEventsVec.size()];
526   DCHECK(traceEvents->traceEvents);
527   std::copy(traceEventsVec.begin(), traceEventsVec.end(),
528             traceEvents->traceEvents);
529 }
530 
releaseTraceEvents(onnxTraceEventList * traceEvents)531 void Graph::releaseTraceEvents(onnxTraceEventList *traceEvents) {
532   DCHECK(traceEvents);
533   for (uint64_t i = 0; i < traceEvents->numEvents; ++i) {
534     onnxTraceEvent *traceEvent = traceEvents->traceEvents[i];
535     delete traceEvent;
536   }
537 
538   delete[] traceEvents->traceEvents;
539 }
540 
Graph(BackendPtr backendPtr)541 Graph::Graph(BackendPtr backendPtr) : backendPtr_(backendPtr) {}
542 
543 } // namespace onnxifi
544 } // namespace glow
545