1 /* Generated by the protocol buffer compiler.  DO NOT EDIT! */
2 /* Generated from: NeuralNetwork.proto */
3 
4 #ifndef PROTOBUF_C_NeuralNetwork_2eproto__INCLUDED
5 #define PROTOBUF_C_NeuralNetwork_2eproto__INCLUDED
6 
7 #include <protobuf-c/protobuf-c.h>
8 
9 PROTOBUF_C__BEGIN_DECLS
10 
11 #if PROTOBUF_C_VERSION_NUMBER < 1003000
12 # error This file was generated by a newer version of protoc-c which is incompatible with your libprotobuf-c headers. Please update your headers.
13 #elif 1003003 < PROTOBUF_C_MIN_COMPILER_VERSION
14 # error This file was generated by an older version of protoc-c which is incompatible with your libprotobuf-c headers. Please regenerate this file with a newer version of protoc-c.
15 #endif
16 
17 #include "DataStructures.pb-c.h"
18 #include "Parameters.pb-c.h"
19 
20 typedef struct _CoreML__Specification__NeuralNetwork CoreML__Specification__NeuralNetwork;
21 typedef struct _CoreML__Specification__NeuralNetworkImageScaler CoreML__Specification__NeuralNetworkImageScaler;
22 typedef struct _CoreML__Specification__NeuralNetworkMeanImage CoreML__Specification__NeuralNetworkMeanImage;
23 typedef struct _CoreML__Specification__NeuralNetworkPreprocessing CoreML__Specification__NeuralNetworkPreprocessing;
24 typedef struct _CoreML__Specification__ActivationReLU CoreML__Specification__ActivationReLU;
25 typedef struct _CoreML__Specification__ActivationLeakyReLU CoreML__Specification__ActivationLeakyReLU;
26 typedef struct _CoreML__Specification__ActivationTanh CoreML__Specification__ActivationTanh;
27 typedef struct _CoreML__Specification__ActivationScaledTanh CoreML__Specification__ActivationScaledTanh;
28 typedef struct _CoreML__Specification__ActivationSigmoid CoreML__Specification__ActivationSigmoid;
29 typedef struct _CoreML__Specification__ActivationLinear CoreML__Specification__ActivationLinear;
30 typedef struct _CoreML__Specification__ActivationSigmoidHard CoreML__Specification__ActivationSigmoidHard;
31 typedef struct _CoreML__Specification__ActivationPReLU CoreML__Specification__ActivationPReLU;
32 typedef struct _CoreML__Specification__ActivationELU CoreML__Specification__ActivationELU;
33 typedef struct _CoreML__Specification__ActivationThresholdedReLU CoreML__Specification__ActivationThresholdedReLU;
34 typedef struct _CoreML__Specification__ActivationSoftsign CoreML__Specification__ActivationSoftsign;
35 typedef struct _CoreML__Specification__ActivationSoftplus CoreML__Specification__ActivationSoftplus;
36 typedef struct _CoreML__Specification__ActivationParametricSoftplus CoreML__Specification__ActivationParametricSoftplus;
37 typedef struct _CoreML__Specification__ActivationParams CoreML__Specification__ActivationParams;
38 typedef struct _CoreML__Specification__Tensor CoreML__Specification__Tensor;
39 typedef struct _CoreML__Specification__NeuralNetworkLayer CoreML__Specification__NeuralNetworkLayer;
40 typedef struct _CoreML__Specification__BranchLayerParams CoreML__Specification__BranchLayerParams;
41 typedef struct _CoreML__Specification__LoopLayerParams CoreML__Specification__LoopLayerParams;
42 typedef struct _CoreML__Specification__LoopBreakLayerParams CoreML__Specification__LoopBreakLayerParams;
43 typedef struct _CoreML__Specification__LoopContinueLayerParams CoreML__Specification__LoopContinueLayerParams;
44 typedef struct _CoreML__Specification__CopyLayerParams CoreML__Specification__CopyLayerParams;
45 typedef struct _CoreML__Specification__GreaterThanLayerParams CoreML__Specification__GreaterThanLayerParams;
46 typedef struct _CoreML__Specification__GreaterEqualLayerParams CoreML__Specification__GreaterEqualLayerParams;
47 typedef struct _CoreML__Specification__LessThanLayerParams CoreML__Specification__LessThanLayerParams;
48 typedef struct _CoreML__Specification__LessEqualLayerParams CoreML__Specification__LessEqualLayerParams;
49 typedef struct _CoreML__Specification__EqualLayerParams CoreML__Specification__EqualLayerParams;
50 typedef struct _CoreML__Specification__NotEqualLayerParams CoreML__Specification__NotEqualLayerParams;
51 typedef struct _CoreML__Specification__LogicalAndLayerParams CoreML__Specification__LogicalAndLayerParams;
52 typedef struct _CoreML__Specification__LogicalOrLayerParams CoreML__Specification__LogicalOrLayerParams;
53 typedef struct _CoreML__Specification__LogicalXorLayerParams CoreML__Specification__LogicalXorLayerParams;
54 typedef struct _CoreML__Specification__LogicalNotLayerParams CoreML__Specification__LogicalNotLayerParams;
55 typedef struct _CoreML__Specification__BorderAmounts CoreML__Specification__BorderAmounts;
56 typedef struct _CoreML__Specification__BorderAmounts__EdgeSizes CoreML__Specification__BorderAmounts__EdgeSizes;
57 typedef struct _CoreML__Specification__ValidPadding CoreML__Specification__ValidPadding;
58 typedef struct _CoreML__Specification__SamePadding CoreML__Specification__SamePadding;
59 typedef struct _CoreML__Specification__SamplingMode CoreML__Specification__SamplingMode;
60 typedef struct _CoreML__Specification__BoxCoordinatesMode CoreML__Specification__BoxCoordinatesMode;
61 typedef struct _CoreML__Specification__WeightParams CoreML__Specification__WeightParams;
62 typedef struct _CoreML__Specification__QuantizationParams CoreML__Specification__QuantizationParams;
63 typedef struct _CoreML__Specification__LinearQuantizationParams CoreML__Specification__LinearQuantizationParams;
64 typedef struct _CoreML__Specification__LookUpTableQuantizationParams CoreML__Specification__LookUpTableQuantizationParams;
65 typedef struct _CoreML__Specification__ConvolutionLayerParams CoreML__Specification__ConvolutionLayerParams;
66 typedef struct _CoreML__Specification__Convolution3DLayerParams CoreML__Specification__Convolution3DLayerParams;
67 typedef struct _CoreML__Specification__InnerProductLayerParams CoreML__Specification__InnerProductLayerParams;
68 typedef struct _CoreML__Specification__EmbeddingLayerParams CoreML__Specification__EmbeddingLayerParams;
69 typedef struct _CoreML__Specification__EmbeddingNDLayerParams CoreML__Specification__EmbeddingNDLayerParams;
70 typedef struct _CoreML__Specification__BatchnormLayerParams CoreML__Specification__BatchnormLayerParams;
71 typedef struct _CoreML__Specification__PoolingLayerParams CoreML__Specification__PoolingLayerParams;
72 typedef struct _CoreML__Specification__PoolingLayerParams__ValidCompletePadding CoreML__Specification__PoolingLayerParams__ValidCompletePadding;
73 typedef struct _CoreML__Specification__Pooling3DLayerParams CoreML__Specification__Pooling3DLayerParams;
74 typedef struct _CoreML__Specification__GlobalPooling3DLayerParams CoreML__Specification__GlobalPooling3DLayerParams;
75 typedef struct _CoreML__Specification__PaddingLayerParams CoreML__Specification__PaddingLayerParams;
76 typedef struct _CoreML__Specification__PaddingLayerParams__PaddingConstant CoreML__Specification__PaddingLayerParams__PaddingConstant;
77 typedef struct _CoreML__Specification__PaddingLayerParams__PaddingReflection CoreML__Specification__PaddingLayerParams__PaddingReflection;
78 typedef struct _CoreML__Specification__PaddingLayerParams__PaddingReplication CoreML__Specification__PaddingLayerParams__PaddingReplication;
79 typedef struct _CoreML__Specification__ConcatLayerParams CoreML__Specification__ConcatLayerParams;
80 typedef struct _CoreML__Specification__LRNLayerParams CoreML__Specification__LRNLayerParams;
81 typedef struct _CoreML__Specification__SoftmaxLayerParams CoreML__Specification__SoftmaxLayerParams;
82 typedef struct _CoreML__Specification__SplitLayerParams CoreML__Specification__SplitLayerParams;
83 typedef struct _CoreML__Specification__AddLayerParams CoreML__Specification__AddLayerParams;
84 typedef struct _CoreML__Specification__MultiplyLayerParams CoreML__Specification__MultiplyLayerParams;
85 typedef struct _CoreML__Specification__UnaryFunctionLayerParams CoreML__Specification__UnaryFunctionLayerParams;
86 typedef struct _CoreML__Specification__UpsampleLayerParams CoreML__Specification__UpsampleLayerParams;
87 typedef struct _CoreML__Specification__ResizeBilinearLayerParams CoreML__Specification__ResizeBilinearLayerParams;
88 typedef struct _CoreML__Specification__CropResizeLayerParams CoreML__Specification__CropResizeLayerParams;
89 typedef struct _CoreML__Specification__BiasLayerParams CoreML__Specification__BiasLayerParams;
90 typedef struct _CoreML__Specification__ScaleLayerParams CoreML__Specification__ScaleLayerParams;
91 typedef struct _CoreML__Specification__LoadConstantLayerParams CoreML__Specification__LoadConstantLayerParams;
92 typedef struct _CoreML__Specification__L2NormalizeLayerParams CoreML__Specification__L2NormalizeLayerParams;
93 typedef struct _CoreML__Specification__FlattenLayerParams CoreML__Specification__FlattenLayerParams;
94 typedef struct _CoreML__Specification__ReshapeLayerParams CoreML__Specification__ReshapeLayerParams;
95 typedef struct _CoreML__Specification__PermuteLayerParams CoreML__Specification__PermuteLayerParams;
96 typedef struct _CoreML__Specification__ReorganizeDataLayerParams CoreML__Specification__ReorganizeDataLayerParams;
97 typedef struct _CoreML__Specification__SliceLayerParams CoreML__Specification__SliceLayerParams;
98 typedef struct _CoreML__Specification__ReduceLayerParams CoreML__Specification__ReduceLayerParams;
99 typedef struct _CoreML__Specification__CropLayerParams CoreML__Specification__CropLayerParams;
100 typedef struct _CoreML__Specification__AverageLayerParams CoreML__Specification__AverageLayerParams;
101 typedef struct _CoreML__Specification__MaxLayerParams CoreML__Specification__MaxLayerParams;
102 typedef struct _CoreML__Specification__MinLayerParams CoreML__Specification__MinLayerParams;
103 typedef struct _CoreML__Specification__DotProductLayerParams CoreML__Specification__DotProductLayerParams;
104 typedef struct _CoreML__Specification__MeanVarianceNormalizeLayerParams CoreML__Specification__MeanVarianceNormalizeLayerParams;
105 typedef struct _CoreML__Specification__SequenceRepeatLayerParams CoreML__Specification__SequenceRepeatLayerParams;
106 typedef struct _CoreML__Specification__SimpleRecurrentLayerParams CoreML__Specification__SimpleRecurrentLayerParams;
107 typedef struct _CoreML__Specification__GRULayerParams CoreML__Specification__GRULayerParams;
108 typedef struct _CoreML__Specification__LSTMParams CoreML__Specification__LSTMParams;
109 typedef struct _CoreML__Specification__LSTMWeightParams CoreML__Specification__LSTMWeightParams;
110 typedef struct _CoreML__Specification__UniDirectionalLSTMLayerParams CoreML__Specification__UniDirectionalLSTMLayerParams;
111 typedef struct _CoreML__Specification__BiDirectionalLSTMLayerParams CoreML__Specification__BiDirectionalLSTMLayerParams;
112 typedef struct _CoreML__Specification__CustomLayerParams CoreML__Specification__CustomLayerParams;
113 typedef struct _CoreML__Specification__CustomLayerParams__CustomLayerParamValue CoreML__Specification__CustomLayerParams__CustomLayerParamValue;
114 typedef struct _CoreML__Specification__CustomLayerParams__ParametersEntry CoreML__Specification__CustomLayerParams__ParametersEntry;
115 typedef struct _CoreML__Specification__TransposeLayerParams CoreML__Specification__TransposeLayerParams;
116 typedef struct _CoreML__Specification__BatchedMatMulLayerParams CoreML__Specification__BatchedMatMulLayerParams;
117 typedef struct _CoreML__Specification__ConcatNDLayerParams CoreML__Specification__ConcatNDLayerParams;
118 typedef struct _CoreML__Specification__SoftmaxNDLayerParams CoreML__Specification__SoftmaxNDLayerParams;
119 typedef struct _CoreML__Specification__ReverseLayerParams CoreML__Specification__ReverseLayerParams;
120 typedef struct _CoreML__Specification__ReverseSeqLayerParams CoreML__Specification__ReverseSeqLayerParams;
121 typedef struct _CoreML__Specification__LoadConstantNDLayerParams CoreML__Specification__LoadConstantNDLayerParams;
122 typedef struct _CoreML__Specification__FillLikeLayerParams CoreML__Specification__FillLikeLayerParams;
123 typedef struct _CoreML__Specification__FillStaticLayerParams CoreML__Specification__FillStaticLayerParams;
124 typedef struct _CoreML__Specification__FillDynamicLayerParams CoreML__Specification__FillDynamicLayerParams;
125 typedef struct _CoreML__Specification__WhereBroadcastableLayerParams CoreML__Specification__WhereBroadcastableLayerParams;
126 typedef struct _CoreML__Specification__SinLayerParams CoreML__Specification__SinLayerParams;
127 typedef struct _CoreML__Specification__CosLayerParams CoreML__Specification__CosLayerParams;
128 typedef struct _CoreML__Specification__TanLayerParams CoreML__Specification__TanLayerParams;
129 typedef struct _CoreML__Specification__AsinLayerParams CoreML__Specification__AsinLayerParams;
130 typedef struct _CoreML__Specification__AcosLayerParams CoreML__Specification__AcosLayerParams;
131 typedef struct _CoreML__Specification__AtanLayerParams CoreML__Specification__AtanLayerParams;
132 typedef struct _CoreML__Specification__SinhLayerParams CoreML__Specification__SinhLayerParams;
133 typedef struct _CoreML__Specification__CoshLayerParams CoreML__Specification__CoshLayerParams;
134 typedef struct _CoreML__Specification__TanhLayerParams CoreML__Specification__TanhLayerParams;
135 typedef struct _CoreML__Specification__AsinhLayerParams CoreML__Specification__AsinhLayerParams;
136 typedef struct _CoreML__Specification__AcoshLayerParams CoreML__Specification__AcoshLayerParams;
137 typedef struct _CoreML__Specification__AtanhLayerParams CoreML__Specification__AtanhLayerParams;
138 typedef struct _CoreML__Specification__PowBroadcastableLayerParams CoreML__Specification__PowBroadcastableLayerParams;
139 typedef struct _CoreML__Specification__Exp2LayerParams CoreML__Specification__Exp2LayerParams;
140 typedef struct _CoreML__Specification__WhereNonZeroLayerParams CoreML__Specification__WhereNonZeroLayerParams;
141 typedef struct _CoreML__Specification__MatrixBandPartLayerParams CoreML__Specification__MatrixBandPartLayerParams;
142 typedef struct _CoreML__Specification__UpperTriangularLayerParams CoreML__Specification__UpperTriangularLayerParams;
143 typedef struct _CoreML__Specification__LowerTriangularLayerParams CoreML__Specification__LowerTriangularLayerParams;
144 typedef struct _CoreML__Specification__BroadcastToLikeLayerParams CoreML__Specification__BroadcastToLikeLayerParams;
145 typedef struct _CoreML__Specification__BroadcastToStaticLayerParams CoreML__Specification__BroadcastToStaticLayerParams;
146 typedef struct _CoreML__Specification__BroadcastToDynamicLayerParams CoreML__Specification__BroadcastToDynamicLayerParams;
147 typedef struct _CoreML__Specification__AddBroadcastableLayerParams CoreML__Specification__AddBroadcastableLayerParams;
148 typedef struct _CoreML__Specification__MaxBroadcastableLayerParams CoreML__Specification__MaxBroadcastableLayerParams;
149 typedef struct _CoreML__Specification__MinBroadcastableLayerParams CoreML__Specification__MinBroadcastableLayerParams;
150 typedef struct _CoreML__Specification__ModBroadcastableLayerParams CoreML__Specification__ModBroadcastableLayerParams;
151 typedef struct _CoreML__Specification__FloorDivBroadcastableLayerParams CoreML__Specification__FloorDivBroadcastableLayerParams;
152 typedef struct _CoreML__Specification__SubtractBroadcastableLayerParams CoreML__Specification__SubtractBroadcastableLayerParams;
153 typedef struct _CoreML__Specification__MultiplyBroadcastableLayerParams CoreML__Specification__MultiplyBroadcastableLayerParams;
154 typedef struct _CoreML__Specification__DivideBroadcastableLayerParams CoreML__Specification__DivideBroadcastableLayerParams;
155 typedef struct _CoreML__Specification__GatherLayerParams CoreML__Specification__GatherLayerParams;
156 typedef struct _CoreML__Specification__ScatterLayerParams CoreML__Specification__ScatterLayerParams;
157 typedef struct _CoreML__Specification__GatherNDLayerParams CoreML__Specification__GatherNDLayerParams;
158 typedef struct _CoreML__Specification__ScatterNDLayerParams CoreML__Specification__ScatterNDLayerParams;
159 typedef struct _CoreML__Specification__GatherAlongAxisLayerParams CoreML__Specification__GatherAlongAxisLayerParams;
160 typedef struct _CoreML__Specification__ScatterAlongAxisLayerParams CoreML__Specification__ScatterAlongAxisLayerParams;
161 typedef struct _CoreML__Specification__StackLayerParams CoreML__Specification__StackLayerParams;
162 typedef struct _CoreML__Specification__RankPreservingReshapeLayerParams CoreML__Specification__RankPreservingReshapeLayerParams;
163 typedef struct _CoreML__Specification__ConstantPaddingLayerParams CoreML__Specification__ConstantPaddingLayerParams;
164 typedef struct _CoreML__Specification__RandomNormalLikeLayerParams CoreML__Specification__RandomNormalLikeLayerParams;
165 typedef struct _CoreML__Specification__RandomNormalStaticLayerParams CoreML__Specification__RandomNormalStaticLayerParams;
166 typedef struct _CoreML__Specification__RandomNormalDynamicLayerParams CoreML__Specification__RandomNormalDynamicLayerParams;
167 typedef struct _CoreML__Specification__RandomUniformLikeLayerParams CoreML__Specification__RandomUniformLikeLayerParams;
168 typedef struct _CoreML__Specification__RandomUniformStaticLayerParams CoreML__Specification__RandomUniformStaticLayerParams;
169 typedef struct _CoreML__Specification__RandomUniformDynamicLayerParams CoreML__Specification__RandomUniformDynamicLayerParams;
170 typedef struct _CoreML__Specification__RandomBernoulliLikeLayerParams CoreML__Specification__RandomBernoulliLikeLayerParams;
171 typedef struct _CoreML__Specification__RandomBernoulliStaticLayerParams CoreML__Specification__RandomBernoulliStaticLayerParams;
172 typedef struct _CoreML__Specification__RandomBernoulliDynamicLayerParams CoreML__Specification__RandomBernoulliDynamicLayerParams;
173 typedef struct _CoreML__Specification__CategoricalDistributionLayerParams CoreML__Specification__CategoricalDistributionLayerParams;
174 typedef struct _CoreML__Specification__ReduceL1LayerParams CoreML__Specification__ReduceL1LayerParams;
175 typedef struct _CoreML__Specification__ReduceL2LayerParams CoreML__Specification__ReduceL2LayerParams;
176 typedef struct _CoreML__Specification__ReduceMaxLayerParams CoreML__Specification__ReduceMaxLayerParams;
177 typedef struct _CoreML__Specification__ReduceMinLayerParams CoreML__Specification__ReduceMinLayerParams;
178 typedef struct _CoreML__Specification__ReduceSumLayerParams CoreML__Specification__ReduceSumLayerParams;
179 typedef struct _CoreML__Specification__ReduceProdLayerParams CoreML__Specification__ReduceProdLayerParams;
180 typedef struct _CoreML__Specification__ReduceMeanLayerParams CoreML__Specification__ReduceMeanLayerParams;
181 typedef struct _CoreML__Specification__ReduceLogSumLayerParams CoreML__Specification__ReduceLogSumLayerParams;
182 typedef struct _CoreML__Specification__ReduceSumSquareLayerParams CoreML__Specification__ReduceSumSquareLayerParams;
183 typedef struct _CoreML__Specification__ReduceLogSumExpLayerParams CoreML__Specification__ReduceLogSumExpLayerParams;
184 typedef struct _CoreML__Specification__ExpandDimsLayerParams CoreML__Specification__ExpandDimsLayerParams;
185 typedef struct _CoreML__Specification__FlattenTo2DLayerParams CoreML__Specification__FlattenTo2DLayerParams;
186 typedef struct _CoreML__Specification__ReshapeStaticLayerParams CoreML__Specification__ReshapeStaticLayerParams;
187 typedef struct _CoreML__Specification__ReshapeLikeLayerParams CoreML__Specification__ReshapeLikeLayerParams;
188 typedef struct _CoreML__Specification__ReshapeDynamicLayerParams CoreML__Specification__ReshapeDynamicLayerParams;
189 typedef struct _CoreML__Specification__SqueezeLayerParams CoreML__Specification__SqueezeLayerParams;
190 typedef struct _CoreML__Specification__TopKLayerParams CoreML__Specification__TopKLayerParams;
191 typedef struct _CoreML__Specification__ArgMaxLayerParams CoreML__Specification__ArgMaxLayerParams;
192 typedef struct _CoreML__Specification__ArgMinLayerParams CoreML__Specification__ArgMinLayerParams;
193 typedef struct _CoreML__Specification__SplitNDLayerParams CoreML__Specification__SplitNDLayerParams;
194 typedef struct _CoreML__Specification__CeilLayerParams CoreML__Specification__CeilLayerParams;
195 typedef struct _CoreML__Specification__RoundLayerParams CoreML__Specification__RoundLayerParams;
196 typedef struct _CoreML__Specification__FloorLayerParams CoreML__Specification__FloorLayerParams;
197 typedef struct _CoreML__Specification__SignLayerParams CoreML__Specification__SignLayerParams;
198 typedef struct _CoreML__Specification__ClipLayerParams CoreML__Specification__ClipLayerParams;
199 typedef struct _CoreML__Specification__SliceStaticLayerParams CoreML__Specification__SliceStaticLayerParams;
200 typedef struct _CoreML__Specification__SliceDynamicLayerParams CoreML__Specification__SliceDynamicLayerParams;
201 typedef struct _CoreML__Specification__TileLayerParams CoreML__Specification__TileLayerParams;
202 typedef struct _CoreML__Specification__GetShapeLayerParams CoreML__Specification__GetShapeLayerParams;
203 typedef struct _CoreML__Specification__ErfLayerParams CoreML__Specification__ErfLayerParams;
204 typedef struct _CoreML__Specification__GeluLayerParams CoreML__Specification__GeluLayerParams;
205 typedef struct _CoreML__Specification__RangeStaticLayerParams CoreML__Specification__RangeStaticLayerParams;
206 typedef struct _CoreML__Specification__RangeDynamicLayerParams CoreML__Specification__RangeDynamicLayerParams;
207 typedef struct _CoreML__Specification__SlidingWindowsLayerParams CoreML__Specification__SlidingWindowsLayerParams;
208 typedef struct _CoreML__Specification__LayerNormalizationLayerParams CoreML__Specification__LayerNormalizationLayerParams;
209 typedef struct _CoreML__Specification__NonMaximumSuppressionLayerParams CoreML__Specification__NonMaximumSuppressionLayerParams;
210 typedef struct _CoreML__Specification__ClampedReLULayerParams CoreML__Specification__ClampedReLULayerParams;
211 typedef struct _CoreML__Specification__ArgSortLayerParams CoreML__Specification__ArgSortLayerParams;
212 typedef struct _CoreML__Specification__SliceBySizeLayerParams CoreML__Specification__SliceBySizeLayerParams;
213 typedef struct _CoreML__Specification__NeuralNetworkClassifier CoreML__Specification__NeuralNetworkClassifier;
214 typedef struct _CoreML__Specification__OneHotLayerParams CoreML__Specification__OneHotLayerParams;
215 typedef struct _CoreML__Specification__CumSumLayerParams CoreML__Specification__CumSumLayerParams;
216 typedef struct _CoreML__Specification__NeuralNetworkRegressor CoreML__Specification__NeuralNetworkRegressor;
217 typedef struct _CoreML__Specification__NetworkUpdateParameters CoreML__Specification__NetworkUpdateParameters;
218 typedef struct _CoreML__Specification__LossLayer CoreML__Specification__LossLayer;
219 typedef struct _CoreML__Specification__CategoricalCrossEntropyLossLayer CoreML__Specification__CategoricalCrossEntropyLossLayer;
220 typedef struct _CoreML__Specification__MeanSquaredErrorLossLayer CoreML__Specification__MeanSquaredErrorLossLayer;
221 typedef struct _CoreML__Specification__Optimizer CoreML__Specification__Optimizer;
222 typedef struct _CoreML__Specification__SGDOptimizer CoreML__Specification__SGDOptimizer;
223 typedef struct _CoreML__Specification__AdamOptimizer CoreML__Specification__AdamOptimizer;
224 
225 
226 /* --- enums --- */
227 
228 typedef enum _CoreML__Specification__SamePadding__SamePaddingMode {
229   CORE_ML__SPECIFICATION__SAME_PADDING__SAME_PADDING_MODE__BOTTOM_RIGHT_HEAVY = 0,
230   CORE_ML__SPECIFICATION__SAME_PADDING__SAME_PADDING_MODE__TOP_LEFT_HEAVY = 1
231     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__SAME_PADDING__SAME_PADDING_MODE)
232 } CoreML__Specification__SamePadding__SamePaddingMode;
233 typedef enum _CoreML__Specification__SamplingMode__Method {
234   /*
235    **
236    * start = 0, end = X-1
237    * grid points = numpy.linspace(start, end)
238    */
239   CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD__STRICT_ALIGN_ENDPOINTS_MODE = 0,
240   /*
241    **
242    * if N == 1: start = end = (X-1)/2
243    * otherwise, start = 0, end = X-1
244    * grid points = numpy.linspace(start, end)
245    */
246   CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD__ALIGN_ENDPOINTS_MODE = 1,
247   /*
248    **
249    * start = 0, end = X - X/N
250    * grid points = min(X-1, numpy.linspace(start, end))
251    * This is same as the mode used in the upsample layer in this specification, when used with bilinear interpolation. In that case N/X = upsample ratio.
252    */
253   CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD__UPSAMPLE_MODE = 2,
254   /*
255    **
256    * spacing = max(1, X-1)/N
257    * start = 0.5 * spacing
258    * end = start + (N-1) * spacing
259    * grid points = min(X-1, numpy.linspace(start, end))
260    */
261   CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD__ROI_ALIGN_MODE = 3
262     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD)
263 } CoreML__Specification__SamplingMode__Method;
264 typedef enum _CoreML__Specification__BoxCoordinatesMode__Coordinates {
265   /*
266    **
267    * [h_start, w_start, h_end, w_end]
268    */
269   CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES__CORNERS_HEIGHT_FIRST = 0,
270   /*
271    **
272    * [w_start, h_start, w_end, h_end]
273    */
274   CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES__CORNERS_WIDTH_FIRST = 1,
275   /*
276    **
277    * [h_center, w_center, box_height, box_width]
278    */
279   CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES__CENTER_SIZE_HEIGHT_FIRST = 2,
280   /*
281    **
282    * [w_center, h_center, box_width, box_height]
283    */
284   CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES__CENTER_SIZE_WIDTH_FIRST = 3
285     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES)
286 } CoreML__Specification__BoxCoordinatesMode__Coordinates;
287 /*
288  **
289  * The type of padding.
290  * All padding types pad the input shape with zeros.
291  * CUSTOM padding will add the custom padding values specified below to their respective
292  * dimensions, e.g., `customPaddingFront` number of zeros will be added to one side of the
293  * input's depth dimension and `customPaddingBack` number of zeros will be added to the other
294  * side of the input's depth dimension.
295  * VALID padding adds no padding to any dimension. In this case, the last convolution along
296  * each dimension will be dropped if the input dimension and the kernel size, stride, and
297  * dilation do not match.
298  * SAME padding adds enough padding to each dimension such that the output of the convolution
299  * has size ``Ceiling(inputShape / stride)``. Padding is added evenly to both sides of each
300  * dimension unless the total padding to add is odd, in which case it is added to the
301  * back/bottom/right side of the respective dimension. For example, if the total padding needed
302  * in the depth dimension is 3, 1 zero will be added to the front side of the depth dimension
303  * and 2 zeros will be added to the back side.
304  */
305 typedef enum _CoreML__Specification__Convolution3DLayerParams__PaddingType {
306   CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__PADDING_TYPE__CUSTOM = 0,
307   CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__PADDING_TYPE__VALID = 1,
308   CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__PADDING_TYPE__SAME = 2
309     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__PADDING_TYPE)
310 } CoreML__Specification__Convolution3DLayerParams__PaddingType;
311 typedef enum _CoreML__Specification__PoolingLayerParams__PoolingType {
312   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__MAX = 0,
313   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__AVERAGE = 1,
314   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__L2 = 2
315     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE)
316 } CoreML__Specification__PoolingLayerParams__PoolingType;
317 typedef enum _CoreML__Specification__Pooling3DLayerParams__PoolingType3D {
318   CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING_TYPE3_D__MAX = 0,
319   CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING_TYPE3_D__AVERAGE = 1
320     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING_TYPE3_D)
321 } CoreML__Specification__Pooling3DLayerParams__PoolingType3D;
322 /*
323  **
324  * The type of padding.
325  * All padding types pad the input shape with zeros.
326  * CUSTOM padding will add the custom padding values specified below to their respective
327  * dimensions, e.g., `customPaddingFront` number of zeros will be added to one side of the
328  * input's depth dimension and `customPaddingBack` number of zeros will be added to the other
329  * side of the input's depth dimension.
330  * VALID padding adds no padding to any dimension. In this case, the last pool along
331  * each dimension will be dropped if the input dimension and the kernel size, and stride do not match.
332  * SAME padding adds enough padding to each dimension such that the output
333  * has the same spatial dimensions as the input. Padding is added evenly to both
334  * sides of each dimension unless the total padding to add is odd, in which case the extra padding
335  * is added to the back/bottom/right side of the respective dimension.  For example, if the the
336  * total horizontal padding is 3, then there will be 1 padding on the left, and 2 padding on the right.
337  */
338 typedef enum _CoreML__Specification__Pooling3DLayerParams__Pooling3DPaddingType {
339   CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING3_DPADDING_TYPE__CUSTOM = 0,
340   CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING3_DPADDING_TYPE__VALID = 1,
341   CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING3_DPADDING_TYPE__SAME = 2
342     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING3_DPADDING_TYPE)
343 } CoreML__Specification__Pooling3DLayerParams__Pooling3DPaddingType;
344 typedef enum _CoreML__Specification__GlobalPooling3DLayerParams__GlobalPoolingType3D {
345   CORE_ML__SPECIFICATION__GLOBAL_POOLING3_DLAYER_PARAMS__GLOBAL_POOLING_TYPE3_D__MAX = 0,
346   CORE_ML__SPECIFICATION__GLOBAL_POOLING3_DLAYER_PARAMS__GLOBAL_POOLING_TYPE3_D__AVERAGE = 1
347     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__GLOBAL_POOLING3_DLAYER_PARAMS__GLOBAL_POOLING_TYPE3_D)
348 } CoreML__Specification__GlobalPooling3DLayerParams__GlobalPoolingType3D;
349 /*
350  **
351  * A unary operator.
352  * The following functions are supported:
353  * ``SQRT``
354  *     .. math:: f(x) = \sqrt{x}
355  * ``RSQRT``
356  *     .. math:: f(x) = \dfrac{1}{\sqrt{x + \epsilon}}
357  * ``INVERSE``
358  *     .. math:: f(x) = \dfrac{1}{x + \epsilon}
359  * ``POWER``
360  *     .. math:: f(x) = x^\alpha
361  * ``EXP``
362  *     .. math:: f(x) = e^x
363  * ``LOG``
364  *     .. math:: f(x) = \log x
365  * ``ABS``
366  *     .. math:: f(x) = |x|
367  * ``THRESHOLD``
368  *     .. math:: f(x) = \text{max}(\alpha, x)
369  */
370 typedef enum _CoreML__Specification__UnaryFunctionLayerParams__Operation {
371   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__SQRT = 0,
372   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__RSQRT = 1,
373   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__INVERSE = 2,
374   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__POWER = 3,
375   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__EXP = 4,
376   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__LOG = 5,
377   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__ABS = 6,
378   CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__THRESHOLD = 7
379     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION)
380 } CoreML__Specification__UnaryFunctionLayerParams__Operation;
381 /*
382  * Overall mode for interpolating new elements when upsampling.
383  * NN - Nearest Neighbors - simply pick the nearest true value for interpolated values.
384  * BILINEAR - Use bilinear interpolation. See LinearUpsamplingMode for behavior.
385  */
386 typedef enum _CoreML__Specification__UpsampleLayerParams__InterpolationMode {
387   /*
388    * / Nearest Neighbour
389    */
390   CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__INTERPOLATION_MODE__NN = 0,
391   /*
392    * / Bilinear
393    */
394   CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__INTERPOLATION_MODE__BILINEAR = 1
395     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__INTERPOLATION_MODE)
396 } CoreML__Specification__UpsampleLayerParams__InterpolationMode;
397 /*
398  **
399  * LinearUpsampleMode specifies the behavior for linear upsampling. Only valid when Interpolation Mode is BILINEAR.
400  * If input grid is [0, Xin-1] (corresponding to an input size of Xin), and if the output size is Xout,
401  * then the grid points are sampled in the following manner:
402  * DEFAULT:
403  *   spacing = (Xin-Xin/Xout) / (Xout-1)
404  *   grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,….,Xout-1
405  * ALIGN_CORNERS_TRUE:
406  *   spacing = (Xin-1) / (Xout-1)
407  *   grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,….,Xout-1
408  * ALIGN_CORNERS_FALSE:
409  *   spacing = Xin / Xout
410  *   grid_point[i] = min(Xin-1, max(0, i * spacing + 0.5 * spacing - 0.5)), for i = 0,1,2,….,Xout-1
411  */
412 typedef enum _CoreML__Specification__UpsampleLayerParams__LinearUpsampleMode {
413   CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__LINEAR_UPSAMPLE_MODE__DEFAULT = 0,
414   CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__LINEAR_UPSAMPLE_MODE__ALIGN_CORNERS_TRUE = 1,
415   CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__LINEAR_UPSAMPLE_MODE__ALIGN_CORNERS_FALSE = 2
416     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__LINEAR_UPSAMPLE_MODE)
417 } CoreML__Specification__UpsampleLayerParams__LinearUpsampleMode;
418 typedef enum _CoreML__Specification__FlattenLayerParams__FlattenOrder {
419   CORE_ML__SPECIFICATION__FLATTEN_LAYER_PARAMS__FLATTEN_ORDER__CHANNEL_FIRST = 0,
420   CORE_ML__SPECIFICATION__FLATTEN_LAYER_PARAMS__FLATTEN_ORDER__CHANNEL_LAST = 1
421     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__FLATTEN_LAYER_PARAMS__FLATTEN_ORDER)
422 } CoreML__Specification__FlattenLayerParams__FlattenOrder;
423 typedef enum _CoreML__Specification__ReshapeLayerParams__ReshapeOrder {
424   CORE_ML__SPECIFICATION__RESHAPE_LAYER_PARAMS__RESHAPE_ORDER__CHANNEL_FIRST = 0,
425   CORE_ML__SPECIFICATION__RESHAPE_LAYER_PARAMS__RESHAPE_ORDER__CHANNEL_LAST = 1
426     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__RESHAPE_LAYER_PARAMS__RESHAPE_ORDER)
427 } CoreML__Specification__ReshapeLayerParams__ReshapeOrder;
428 typedef enum _CoreML__Specification__ReorganizeDataLayerParams__ReorganizationType {
429   CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__REORGANIZATION_TYPE__SPACE_TO_DEPTH = 0,
430   CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__REORGANIZATION_TYPE__DEPTH_TO_SPACE = 1,
431   CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__REORGANIZATION_TYPE__PIXEL_SHUFFLE = 2
432     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__REORGANIZATION_TYPE)
433 } CoreML__Specification__ReorganizeDataLayerParams__ReorganizationType;
434 typedef enum _CoreML__Specification__SliceLayerParams__SliceAxis {
435   CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__SLICE_AXIS__CHANNEL_AXIS = 0,
436   CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__SLICE_AXIS__HEIGHT_AXIS = 1,
437   CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__SLICE_AXIS__WIDTH_AXIS = 2
438     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__SLICE_AXIS)
439 } CoreML__Specification__SliceLayerParams__SliceAxis;
440 /*
441  * The following reduction operations are supported
442  * and are applied on the specified axis of the input array:
443  * ``SUM``
444  *     Sum of all elements
445  *     .. math:: \sum{x_i}
446  * ``AVG``
447  *     Sum of all elements divided by the number of elements
448  *     .. math:: \dfrac{\sum^n{x_i}}{n}
449  * ``PROD``
450  *     Product of all elements
451  *     .. math:: \prod{x_i}
452  * ``LOGSUM``
453  *     Sum of the natural logarithm of all elements
454  *     .. math:: \sum{\ln{(x_i + \epsilon)}}
455  * ``SUMSQUARE``
456  *     Sum of squares of all elements
457  *     .. math:: \sum{x^2}
458  * ``L1``
459  *     L1 normalization of all elements
460  *     .. math:: ||x||_1 = \sum{|x_i|}
461  * ``L2``
462  *     L2 normalization of all elements
463  *     .. math:: ||x||_2 = \sqrt{\sum{x_i^2}}
464  * ``MAX``
465  *     Maximum of all elements
466  *     .. math:: \text{max}(x_i)
467  * ``MIN``
468  *     Minumum of all elements
469  *     .. math:: \text{min}(x_i)
470  * ``ARGMAX``
471  *     Argument of the maximum of all elements
472  *     .. math:: \text{argmax}(x_i)
473  */
474 typedef enum _CoreML__Specification__ReduceLayerParams__ReduceOperation {
475   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__SUM = 0,
476   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__AVG = 1,
477   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__PROD = 2,
478   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__LOGSUM = 3,
479   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__SUMSQUARE = 4,
480   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__L1 = 5,
481   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__L2 = 6,
482   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__MAX = 7,
483   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__MIN = 8,
484   /*
485    * / only supported with axis = C, H or W.
486    */
487   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__ARGMAX = 9
488     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION)
489 } CoreML__Specification__ReduceLayerParams__ReduceOperation;
490 typedef enum _CoreML__Specification__ReduceLayerParams__ReduceAxis {
491   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__CHW = 0,
492   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__HW = 1,
493   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__C = 2,
494   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__H = 3,
495   CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__W = 4
496     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS)
497 } CoreML__Specification__ReduceLayerParams__ReduceAxis;
498 typedef enum _CoreML__Specification__GeluLayerParams__GeluMode {
499   CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__GELU_MODE__EXACT = 0,
500   CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__GELU_MODE__TANH_APPROXIMATION = 1,
501   CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__GELU_MODE__SIGMOID_APPROXIMATION = 2
502     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__GELU_MODE)
503 } CoreML__Specification__GeluLayerParams__GeluMode;
504 typedef enum _CoreML__Specification__NeuralNetworkMultiArrayShapeMapping {
505   /*
506    * Default legacy value. Only supported for Core ML Specification version <= 3.
507    * The default legacy shape mapping resolves all input shapes to a rank 5 equivalent
508    * with axis notation of [Seq, Batch, Channel, Height, Width].
509    * When this enum value is selected,
510    * the repeated shape field in the message "ArrayFeatureType" in feature types proto,
511    * must be either length 1 or length 3.
512    * The following rule is used to map the values in the shape field to the actual tensor shape:
513    * rank 1 shape is mapped to shape [1,1,C,1,1]
514    * rank 3 shape is mapped to shape [1,1,C,H,W]
515    * At runtime, the first two dimensions (Seq or Batch) can be presented as well, with non-1 values.
516    * It is invalid to use this enum value if any of the layers added
517    * Specification version 4 (iOS >= 13, macOS >= 10.15) onwards are used in the network.
518    * Validator will raise an error in that case.
519    */
520   CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING__RANK5_ARRAY_MAPPING = 0,
521   /*
522    * The exact shape and rank (i.e. number of dimensions in the shape) of the input,
523    * as specified in the message "ArrayFeatureType", is passed through to the layers.
524    * Supported only for Specification version >= 4 (iOS >= 13, macOS >= 10.15).
525    */
526   CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING__EXACT_ARRAY_MAPPING = 1
527     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING)
528 } CoreML__Specification__NeuralNetworkMultiArrayShapeMapping;
529 typedef enum _CoreML__Specification__NeuralNetworkImageShapeMapping {
530   /*
531    * In this case, image input is mapped to a rank 5 tensor.
532    * For Color images, input tensor is shaped as [1,1,3,H,W].
533    * For Gray images, input tensor is shaped as [1,1,1,H,W].
534    */
535   CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING__RANK5_IMAGE_MAPPING = 0,
536   /*
537    * For Color images, input tensor is shaped as [1,3,H,W].
538    * For Gray images, input tensor is shaped as [1,1,H,W].
539    * Supported only for Specification version >= 4 (iOS >= 13, macOS >= 10.15).
540    */
541   CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING__RANK4_IMAGE_MAPPING = 1
542     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING)
543 } CoreML__Specification__NeuralNetworkImageShapeMapping;
544 /*
545  * Scatter accumulation mode.
546  */
547 typedef enum _CoreML__Specification__ScatterMode {
548   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_UPDATE = 0,
549   /*
550    * / add
551    */
552   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_ADD = 1,
553   /*
554    * / subtract
555    */
556   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_SUB = 2,
557   /*
558    * / multiply
559    */
560   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_MUL = 3,
561   /*
562    * / divide
563    */
564   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_DIV = 4,
565   /*
566    * / maximum
567    */
568   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_MAX = 5,
569   /*
570    * / minimum
571    */
572   CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_MIN = 6
573     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__SCATTER_MODE)
574 } CoreML__Specification__ScatterMode;
575 
576 /* --- messages --- */
577 
578 /*
579  **
580  *A neural network.
581  */
582 struct  _CoreML__Specification__NeuralNetwork
583 {
584   ProtobufCMessage base;
585   size_t n_layers;
586   CoreML__Specification__NeuralNetworkLayer **layers;
587   size_t n_preprocessing;
588   CoreML__Specification__NeuralNetworkPreprocessing **preprocessing;
589   /*
590    * use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs
591    */
592   CoreML__Specification__NeuralNetworkMultiArrayShapeMapping arrayinputshapemapping;
593   /*
594    * use this enum value to determine the input tensor shapes to the neural network, for image inputs
595    */
596   CoreML__Specification__NeuralNetworkImageShapeMapping imageinputshapemapping;
597   CoreML__Specification__NetworkUpdateParameters *updateparams;
598 };
599 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK__INIT \
600  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network__descriptor) \
601     , 0,NULL, 0,NULL, CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING__RANK5_ARRAY_MAPPING, CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING__RANK5_IMAGE_MAPPING, NULL }
602 
603 
604 /*
605  **
606  * A neural network preprocessor that
607  * performs a scalar multiplication of an image
608  * followed by addition of scalar biases to the channels.
609  * Input: X
610  *    An image in BGR or RGB format with shape ``[3, H, W]``
611  *    or in grayscale format with shape ``[1, H, W]``.
612  * Output: Y
613  *    An image with format and shape corresponding to the input.
614  * If the input image is in BGR format:
615  * .. code::
616  *     Y[0, :, :] = channelScale * X[0, :, :] + blueBias
617  *     Y[1, :, :] = channelScale * X[1, :, :] + greenBias
618  *     Y[2, :, :] = channelScale * X[2, :, :] + redBias
619  * If the input image is in RGB format:
620  * .. code::
621  *     Y[0, :, :] = channelScale * X[0, :, :] + redBias
622  *     Y[1, :, :] = channelScale * X[1, :, :] + greenBias
623  *     Y[2, :, :] = channelScale * X[2, :, :] + blueBias
624  * If the input image is in grayscale format:
625  * .. code::
626  *     Y[0, :, :] = channelScale * X[0, :, :] + grayBias
627  */
628 struct  _CoreML__Specification__NeuralNetworkImageScaler
629 {
630   ProtobufCMessage base;
631   /*
632    * /Scalar to be multiplied.
633    */
634   float channelscale;
635   /*
636    * /Scalar blue bias to be added.
637    */
638   float bluebias;
639   /*
640    * /Scalar green bias to be added.
641    */
642   float greenbias;
643   /*
644    * /Scalar red bias to be added.
645    */
646   float redbias;
647   /*
648    * /Scalar bias to be added for grayscale images.
649    */
650   float graybias;
651 };
652 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SCALER__INIT \
653  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_image_scaler__descriptor) \
654     , 0, 0, 0, 0, 0 }
655 
656 
657 /*
658  **
659  * A neural network preprocessor that
660  * subtracts the provided mean image from the input image.
661  * The mean image is subtracted from the input named
662  * ``NeuralNetworkPreprocessing.featureName``.
663  */
664 struct  _CoreML__Specification__NeuralNetworkMeanImage
665 {
666   ProtobufCMessage base;
667   /*
668    **
669    * Mean image stored as a flattened array of floats,
670    * representing shape [Channel,Height,Width].
671    */
672   size_t n_meanimage;
673   float *meanimage;
674 };
675 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_MEAN_IMAGE__INIT \
676  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_mean_image__descriptor) \
677     , 0,NULL }
678 
679 
680 typedef enum {
681   CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__PREPROCESSOR__NOT_SET = 0,
682   CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__PREPROCESSOR_SCALER = 10,
683   CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__PREPROCESSOR_MEAN_IMAGE = 11
684     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__PREPROCESSOR)
685 } CoreML__Specification__NeuralNetworkPreprocessing__PreprocessorCase;
686 
687 /*
688  * / Preprocessing parameters for image inputs.
689  */
690 struct  _CoreML__Specification__NeuralNetworkPreprocessing
691 {
692   ProtobufCMessage base;
693   /*
694    * / must be equal to the input name to which the preprocessing is applied
695    */
696   char *featurename;
697   CoreML__Specification__NeuralNetworkPreprocessing__PreprocessorCase preprocessor_case;
698   union {
699     CoreML__Specification__NeuralNetworkImageScaler *scaler;
700     CoreML__Specification__NeuralNetworkMeanImage *meanimage;
701   };
702 };
703 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__INIT \
704  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_preprocessing__descriptor) \
705     , (char *)protobuf_c_empty_string, CORE_ML__SPECIFICATION__NEURAL_NETWORK_PREPROCESSING__PREPROCESSOR__NOT_SET, {0} }
706 
707 
708 /*
709  **
710  * A rectified linear unit (ReLU) activation function.
711  * This function has the following formula:
712  * .. math::
713  *     f(x) = \text{max}(0, x)
714  */
715 struct  _CoreML__Specification__ActivationReLU
716 {
717   ProtobufCMessage base;
718 };
719 #define CORE_ML__SPECIFICATION__ACTIVATION_RE_LU__INIT \
720  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_re_lu__descriptor) \
721      }
722 
723 
724 /*
725  **
726  * A leaky rectified linear unit (ReLU) activation function.
727  * This function has the following formula:
728  * .. math::
729  *     f(x) = \begin{cases}
730  *             x      & \text{if } x \geq 0 \\
731  *             \alpha x & \text{if } x < 0
732  *            \end{cases}
733  */
734 struct  _CoreML__Specification__ActivationLeakyReLU
735 {
736   ProtobufCMessage base;
737   /*
738    *negative slope value for leakyReLU
739    */
740   float alpha;
741 };
742 #define CORE_ML__SPECIFICATION__ACTIVATION_LEAKY_RE_LU__INIT \
743  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_leaky_re_lu__descriptor) \
744     , 0 }
745 
746 
747 /*
748  **
749  * A hyperbolic tangent activation function.
750  * This function has the following formula:
751  * .. math::
752  *     f(x) = \dfrac{1 - e^{-2x}}{1 + e^{-2x}}
753  */
754 struct  _CoreML__Specification__ActivationTanh
755 {
756   ProtobufCMessage base;
757 };
758 #define CORE_ML__SPECIFICATION__ACTIVATION_TANH__INIT \
759  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_tanh__descriptor) \
760      }
761 
762 
763 /*
764  **
765  * A scaled hyperbolic tangent activation function.
766  * This function has the following formula:
767  * .. math::
768  *     f(x) = \alpha \tanh(\beta x)
769  */
770 struct  _CoreML__Specification__ActivationScaledTanh
771 {
772   ProtobufCMessage base;
773   float alpha;
774   float beta;
775 };
776 #define CORE_ML__SPECIFICATION__ACTIVATION_SCALED_TANH__INIT \
777  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_scaled_tanh__descriptor) \
778     , 0, 0 }
779 
780 
781 /*
782  **
783  * A sigmoid activation function.
784  * This function has the following formula:
785  * .. math::
786  *     f(x) = \dfrac{1}{1 + e^{-x}}
787  */
788 struct  _CoreML__Specification__ActivationSigmoid
789 {
790   ProtobufCMessage base;
791 };
792 #define CORE_ML__SPECIFICATION__ACTIVATION_SIGMOID__INIT \
793  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_sigmoid__descriptor) \
794      }
795 
796 
797 /*
798  **
799  * A linear activation function.
800  * This function has the following formula:
801  * .. math::
802  *     f(x) = \alpha x + \beta
803  */
804 struct  _CoreML__Specification__ActivationLinear
805 {
806   ProtobufCMessage base;
807   float alpha;
808   float beta;
809 };
810 #define CORE_ML__SPECIFICATION__ACTIVATION_LINEAR__INIT \
811  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_linear__descriptor) \
812     , 0, 0 }
813 
814 
815 /*
816  **
817  * A hard sigmoid activation function.
818  * This function has the following formula:
819  * .. math::
820  *     f(x) = \text{min}(\text{max}(\alpha x + \beta, 0), 1)
821  */
822 struct  _CoreML__Specification__ActivationSigmoidHard
823 {
824   ProtobufCMessage base;
825   float alpha;
826   float beta;
827 };
828 #define CORE_ML__SPECIFICATION__ACTIVATION_SIGMOID_HARD__INIT \
829  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_sigmoid_hard__descriptor) \
830     , 0, 0 }
831 
832 
833 /*
834  **
835  * A parameterized rectified linear unit (PReLU) activation function.
836  * Input must be at least rank 3. Axis = -3 is denoted by "C", or channels.
837  * "alpha" parameter can be a vector of length C.
838  * This function has the following formula:
839  * .. math::
840  *    f(x_i) = \begin{cases}
841  *                 x_i          & \text{if } x_i \geq 0 \\
842  *                 \alpha_i x_i & \text{if } x_i < 0
843  *             \end{cases} \;,\;i=1,...,C
844  */
845 struct  _CoreML__Specification__ActivationPReLU
846 {
847   ProtobufCMessage base;
848   /*
849    * parameter of length C or 1.
850    * If length is 1, same value is used for all channels
851    */
852   CoreML__Specification__WeightParams *alpha;
853 };
854 #define CORE_ML__SPECIFICATION__ACTIVATION_PRE_LU__INIT \
855  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_pre_lu__descriptor) \
856     , NULL }
857 
858 
859 /*
860  **
861  * An exponential linear unit (ELU) activation function.
862  * This function has the following formula:
863  * .. math::
864  *     f(x) = \begin{cases}
865  *             x              & \text{if } x \geq 0 \\
866  *             \alpha (e^x - 1) & \text{if } x < 0
867  *            \end{cases}
868  */
869 struct  _CoreML__Specification__ActivationELU
870 {
871   ProtobufCMessage base;
872   float alpha;
873 };
874 #define CORE_ML__SPECIFICATION__ACTIVATION_ELU__INIT \
875  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_elu__descriptor) \
876     , 0 }
877 
878 
879 /*
880  **
881  * A thresholded rectified linear unit (ReLU) activation function.
882  * This function has the following formula:
883  * .. math::
884  *     f(x) = \begin{cases}
885  *             x & \text{if } x \geq \alpha \\
886  *             0 & \text{if } x < \alpha
887  *            \end{cases}
888  */
889 struct  _CoreML__Specification__ActivationThresholdedReLU
890 {
891   ProtobufCMessage base;
892   float alpha;
893 };
894 #define CORE_ML__SPECIFICATION__ACTIVATION_THRESHOLDED_RE_LU__INIT \
895  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_thresholded_re_lu__descriptor) \
896     , 0 }
897 
898 
899 /*
900  **
901  * A softsign activation function.
902  * This function has the following formula:
903  * .. math::
904  *     f(x) = \dfrac{x}{1 + |x|}
905  */
906 struct  _CoreML__Specification__ActivationSoftsign
907 {
908   ProtobufCMessage base;
909 };
910 #define CORE_ML__SPECIFICATION__ACTIVATION_SOFTSIGN__INIT \
911  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_softsign__descriptor) \
912      }
913 
914 
915 /*
916  **
917  * A softplus activation function.
918  * This function has the following formula:
919  * .. math::
920  *     f(x) = \text{log}(1 + e^x)
921  */
922 struct  _CoreML__Specification__ActivationSoftplus
923 {
924   ProtobufCMessage base;
925 };
926 #define CORE_ML__SPECIFICATION__ACTIVATION_SOFTPLUS__INIT \
927  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_softplus__descriptor) \
928      }
929 
930 
931 /*
932  **
933  * A parametric softplus activation function.
934  * Input must be at least rank 3. axis = -3 is denoted by "C", or channels.
935  * "alpha"/"beta" parameter can be a vector of length C.
936  * This function has the following formula:
937  * .. math::
938  *     f(x_i) = \alpha_i \text{log}(1 + e^{\beta_i x_i}) \;,\;i=1,...,C
939  */
940 struct  _CoreML__Specification__ActivationParametricSoftplus
941 {
942   ProtobufCMessage base;
943   /*
944    * If length is 1, same value is used for all channels
945    */
946   /*
947    *parameter of length C or 1
948    */
949   CoreML__Specification__WeightParams *alpha;
950   /*
951    *parameter of length C or 1
952    */
953   CoreML__Specification__WeightParams *beta;
954 };
955 #define CORE_ML__SPECIFICATION__ACTIVATION_PARAMETRIC_SOFTPLUS__INIT \
956  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_parametric_softplus__descriptor) \
957     , NULL, NULL }
958 
959 
960 typedef enum {
961   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE__NOT_SET = 0,
962   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_LINEAR = 5,
963   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_RE_LU = 10,
964   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_LEAKY_RE_LU = 15,
965   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_THRESHOLDED_RE_LU = 20,
966   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_PRE_LU = 25,
967   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_TANH = 30,
968   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_SCALED_TANH = 31,
969   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_SIGMOID = 40,
970   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_SIGMOID_HARD = 41,
971   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_ELU = 50,
972   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_SOFTSIGN = 60,
973   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_SOFTPLUS = 70,
974   CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE_PARAMETRIC_SOFTPLUS = 71
975     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE)
976 } CoreML__Specification__ActivationParams__NonlinearityTypeCase;
977 
978 struct  _CoreML__Specification__ActivationParams
979 {
980   ProtobufCMessage base;
981   CoreML__Specification__ActivationParams__NonlinearityTypeCase nonlinearity_type_case;
982   union {
983     CoreML__Specification__ActivationLinear *linear;
984     CoreML__Specification__ActivationReLU *relu;
985     CoreML__Specification__ActivationLeakyReLU *leakyrelu;
986     CoreML__Specification__ActivationThresholdedReLU *thresholdedrelu;
987     CoreML__Specification__ActivationPReLU *prelu;
988     CoreML__Specification__ActivationTanh *tanh;
989     CoreML__Specification__ActivationScaledTanh *scaledtanh;
990     CoreML__Specification__ActivationSigmoid *sigmoid;
991     CoreML__Specification__ActivationSigmoidHard *sigmoidhard;
992     CoreML__Specification__ActivationELU *elu;
993     CoreML__Specification__ActivationSoftsign *softsign;
994     CoreML__Specification__ActivationSoftplus *softplus;
995     CoreML__Specification__ActivationParametricSoftplus *parametricsoftplus;
996   };
997 };
998 #define CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__INIT \
999  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__activation_params__descriptor) \
1000     , CORE_ML__SPECIFICATION__ACTIVATION_PARAMS__NONLINEARITY_TYPE__NOT_SET, {0} }
1001 
1002 
1003 /*
1004  **
1005  * Representation of the intermediate tensors
1006  */
1007 struct  _CoreML__Specification__Tensor
1008 {
1009   ProtobufCMessage base;
1010   /*
1011    * Number of dimensions in the tensor shape
1012    */
1013   uint32_t rank;
1014   /*
1015    * actual value of the tensor shape.
1016    * must be of length "rank". Can contain -1s for unknown dimensions.
1017    */
1018   size_t n_dimvalue;
1019   int64_t *dimvalue;
1020 };
1021 #define CORE_ML__SPECIFICATION__TENSOR__INIT \
1022  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__tensor__descriptor) \
1023     , 0, 0,NULL }
1024 
1025 
1026 typedef enum {
1027   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER__NOT_SET = 0,
1028   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONVOLUTION = 100,
1029   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_POOLING = 120,
1030   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ACTIVATION = 130,
1031   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_INNER_PRODUCT = 140,
1032   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_EMBEDDING = 150,
1033   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BATCHNORM = 160,
1034   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MVN = 165,
1035   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_L2NORMALIZE = 170,
1036   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SOFTMAX = 175,
1037   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LRN = 180,
1038   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CROP = 190,
1039   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PADDING = 200,
1040   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_UPSAMPLE = 210,
1041   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESIZE_BILINEAR = 211,
1042   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CROP_RESIZE = 212,
1043   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_UNARY = 220,
1044   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ADD = 230,
1045   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MULTIPLY = 231,
1046   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_AVERAGE = 240,
1047   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SCALE = 245,
1048   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BIAS = 250,
1049   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MAX = 260,
1050   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MIN = 261,
1051   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_DOT = 270,
1052   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE = 280,
1053   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOAD_CONSTANT = 290,
1054   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE = 300,
1055   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FLATTEN = 301,
1056   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PERMUTE = 310,
1057   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONCAT = 320,
1058   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SPLIT = 330,
1059   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SEQUENCE_REPEAT = 340,
1060   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REORGANIZE_DATA = 345,
1061   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLICE = 350,
1062   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SIMPLE_RECURRENT = 400,
1063   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GRU = 410,
1064   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_UNI_DIRECTIONAL_LSTM = 420,
1065   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BI_DIRECTIONAL_LSTM = 430,
1066   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CUSTOM = 500,
1067   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_COPY = 600,
1068   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BRANCH = 605,
1069   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOOP = 615,
1070   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOOP_BREAK = 620,
1071   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOOP_CONTINUE = 625,
1072   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANGE_STATIC = 635,
1073   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANGE_DYNAMIC = 640,
1074   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CLIP = 660,
1075   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CEIL = 665,
1076   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FLOOR = 670,
1077   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SIGN = 680,
1078   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ROUND = 685,
1079   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_EXP2 = 700,
1080   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SIN = 710,
1081   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_COS = 715,
1082   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_TAN = 720,
1083   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ASIN = 730,
1084   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ACOS = 735,
1085   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ATAN = 740,
1086   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SINH = 750,
1087   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_COSH = 755,
1088   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_TANH = 760,
1089   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ASINH = 770,
1090   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ACOSH = 775,
1091   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ATANH = 780,
1092   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ERF = 790,
1093   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GELU = 795,
1094   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_EQUAL = 815,
1095   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_NOT_EQUAL = 820,
1096   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LESS_THAN = 825,
1097   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LESS_EQUAL = 827,
1098   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GREATER_THAN = 830,
1099   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GREATER_EQUAL = 832,
1100   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOGICAL_OR = 840,
1101   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOGICAL_XOR = 845,
1102   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOGICAL_NOT = 850,
1103   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOGICAL_AND = 855,
1104   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MOD_BROADCASTABLE = 865,
1105   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MIN_BROADCASTABLE = 870,
1106   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MAX_BROADCASTABLE = 875,
1107   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ADD_BROADCASTABLE = 880,
1108   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_POW_BROADCASTABLE = 885,
1109   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_DIVIDE_BROADCASTABLE = 890,
1110   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FLOOR_DIV_BROADCASTABLE = 895,
1111   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MULTIPLY_BROADCASTABLE = 900,
1112   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SUBTRACT_BROADCASTABLE = 905,
1113   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_TILE = 920,
1114   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_STACK = 925,
1115   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GATHER = 930,
1116   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SCATTER = 935,
1117   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GATHER_ND = 940,
1118   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SCATTER_ND = 945,
1119   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SOFTMAX_ND = 950,
1120   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GATHER_ALONG_AXIS = 952,
1121   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SCATTER_ALONG_AXIS = 954,
1122   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REVERSE = 960,
1123   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REVERSE_SEQ = 965,
1124   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SPLIT_ND = 975,
1125   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONCAT_ND = 980,
1126   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_TRANSPOSE = 985,
1127   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLICE_STATIC = 995,
1128   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLICE_DYNAMIC = 1000,
1129   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLIDING_WINDOWS = 1005,
1130   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_TOP_K = 1015,
1131   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ARG_MIN = 1020,
1132   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ARG_MAX = 1025,
1133   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_EMBEDDING_ND = 1040,
1134   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BATCHED_MATMUL = 1045,
1135   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GET_SHAPE = 1065,
1136   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOAD_CONSTANT_ND = 1070,
1137   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FILL_LIKE = 1080,
1138   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FILL_STATIC = 1085,
1139   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FILL_DYNAMIC = 1090,
1140   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BROADCAST_TO_LIKE = 1100,
1141   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BROADCAST_TO_STATIC = 1105,
1142   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_BROADCAST_TO_DYNAMIC = 1110,
1143   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SQUEEZE = 1120,
1144   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_EXPAND_DIMS = 1125,
1145   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_FLATTEN_TO2_D = 1130,
1146   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE_LIKE = 1135,
1147   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE_STATIC = 1140,
1148   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE_DYNAMIC = 1145,
1149   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANK_PRESERVING_RESHAPE = 1150,
1150   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONSTANT_PAD = 1155,
1151   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_NORMAL_LIKE = 1170,
1152   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_NORMAL_STATIC = 1175,
1153   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_NORMAL_DYNAMIC = 1180,
1154   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_UNIFORM_LIKE = 1190,
1155   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_UNIFORM_STATIC = 1195,
1156   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_UNIFORM_DYNAMIC = 1200,
1157   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_BERNOULLI_LIKE = 1210,
1158   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_BERNOULLI_STATIC = 1215,
1159   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RANDOM_BERNOULLI_DYNAMIC = 1220,
1160   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CATEGORICAL_DISTRIBUTION = 1230,
1161   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_L1 = 1250,
1162   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_L2 = 1255,
1163   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_MAX = 1260,
1164   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_MIN = 1265,
1165   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_SUM = 1270,
1166   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_PROD = 1275,
1167   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_MEAN = 1280,
1168   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_LOG_SUM = 1285,
1169   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_SUM_SQUARE = 1290,
1170   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_REDUCE_LOG_SUM_EXP = 1295,
1171   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_WHERE_NON_ZERO = 1313,
1172   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_MATRIX_BAND_PART = 1315,
1173   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LOWER_TRIANGULAR = 1320,
1174   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_UPPER_TRIANGULAR = 1325,
1175   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_WHERE_BROADCASTABLE = 1330,
1176   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_LAYER_NORMALIZATION = 1350,
1177   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_NON_MAXIMUM_SUPPRESSION = 1400,
1178   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ONE_HOT = 1450,
1179   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CUM_SUM = 1455,
1180   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CLAMPED_RE_LU = 1460,
1181   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_ARG_SORT = 1461,
1182   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_POOLING3D = 1465,
1183   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_GLOBAL_POOLING3D = 1466,
1184   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLICE_BY_SIZE = 1470,
1185   CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONVOLUTION3D = 1471
1186     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER)
1187 } CoreML__Specification__NeuralNetworkLayer__LayerCase;
1188 
1189 /*
1190  **
1191  * A single neural network layer.
1192  */
1193 struct  _CoreML__Specification__NeuralNetworkLayer
1194 {
1195   ProtobufCMessage base;
1196   /*
1197    *descriptive name of the layer
1198    */
1199   char *name;
1200   size_t n_input;
1201   char **input;
1202   size_t n_output;
1203   char **output;
1204   /*
1205    * must be the same length as the "input" field
1206    */
1207   size_t n_inputtensor;
1208   CoreML__Specification__Tensor **inputtensor;
1209   /*
1210    * must be the same length as the "output" field
1211    */
1212   size_t n_outputtensor;
1213   CoreML__Specification__Tensor **outputtensor;
1214   /*
1215    * Must be set to true to mark the layer as updatable.
1216    * If true, the weightParams in the layer's properties must also be set to updatable
1217    * If false, the value of the isUpdatable parameter within the layer's weights are ignored
1218    */
1219   protobuf_c_boolean isupdatable;
1220   CoreML__Specification__NeuralNetworkLayer__LayerCase layer_case;
1221   union {
1222     /*
1223      * Start at 100 here
1224      */
1225     CoreML__Specification__ConvolutionLayerParams *convolution;
1226     CoreML__Specification__PoolingLayerParams *pooling;
1227     CoreML__Specification__ActivationParams *activation;
1228     CoreML__Specification__InnerProductLayerParams *innerproduct;
1229     CoreML__Specification__EmbeddingLayerParams *embedding;
1230     /*
1231      * Normalization-related Layers
1232      */
1233     CoreML__Specification__BatchnormLayerParams *batchnorm;
1234     CoreML__Specification__MeanVarianceNormalizeLayerParams *mvn;
1235     CoreML__Specification__L2NormalizeLayerParams *l2normalize;
1236     CoreML__Specification__SoftmaxLayerParams *softmax;
1237     CoreML__Specification__LRNLayerParams *lrn;
1238     CoreML__Specification__CropLayerParams *crop;
1239     CoreML__Specification__PaddingLayerParams *padding;
1240     CoreML__Specification__UpsampleLayerParams *upsample;
1241     CoreML__Specification__ResizeBilinearLayerParams *resizebilinear;
1242     CoreML__Specification__CropResizeLayerParams *cropresize;
1243     CoreML__Specification__UnaryFunctionLayerParams *unary;
1244     /*
1245      * Element-wise Operations
1246      */
1247     CoreML__Specification__AddLayerParams *add;
1248     CoreML__Specification__MultiplyLayerParams *multiply;
1249     CoreML__Specification__AverageLayerParams *average;
1250     CoreML__Specification__ScaleLayerParams *scale;
1251     CoreML__Specification__BiasLayerParams *bias;
1252     CoreML__Specification__MaxLayerParams *max;
1253     CoreML__Specification__MinLayerParams *min;
1254     CoreML__Specification__DotProductLayerParams *dot;
1255     CoreML__Specification__ReduceLayerParams *reduce;
1256     CoreML__Specification__LoadConstantLayerParams *loadconstant;
1257     /*
1258      * Data Reorganization
1259      */
1260     CoreML__Specification__ReshapeLayerParams *reshape;
1261     CoreML__Specification__FlattenLayerParams *flatten;
1262     CoreML__Specification__PermuteLayerParams *permute;
1263     CoreML__Specification__ConcatLayerParams *concat;
1264     CoreML__Specification__SplitLayerParams *split;
1265     CoreML__Specification__SequenceRepeatLayerParams *sequencerepeat;
1266     CoreML__Specification__ReorganizeDataLayerParams *reorganizedata;
1267     CoreML__Specification__SliceLayerParams *slice;
1268     /*
1269      * Recurrent Layers
1270      */
1271     CoreML__Specification__SimpleRecurrentLayerParams *simplerecurrent;
1272     CoreML__Specification__GRULayerParams *gru;
1273     CoreML__Specification__UniDirectionalLSTMLayerParams *unidirectionallstm;
1274     CoreML__Specification__BiDirectionalLSTMLayerParams *bidirectionallstm;
1275     /*
1276      * Custom (user-implemented) Layer
1277      */
1278     CoreML__Specification__CustomLayerParams *custom;
1279     /*
1280      * Control Flow related Layers
1281      */
1282     CoreML__Specification__CopyLayerParams *copy;
1283     CoreML__Specification__BranchLayerParams *branch;
1284     CoreML__Specification__LoopLayerParams *loop;
1285     CoreML__Specification__LoopBreakLayerParams *loopbreak;
1286     CoreML__Specification__LoopContinueLayerParams *loopcontinue;
1287     CoreML__Specification__RangeStaticLayerParams *rangestatic;
1288     CoreML__Specification__RangeDynamicLayerParams *rangedynamic;
1289     /*
1290      * Element-wise Unary Layers
1291      */
1292     CoreML__Specification__ClipLayerParams *clip;
1293     CoreML__Specification__CeilLayerParams *ceil;
1294     CoreML__Specification__FloorLayerParams *floor;
1295     CoreML__Specification__SignLayerParams *sign;
1296     CoreML__Specification__RoundLayerParams *round;
1297     CoreML__Specification__Exp2LayerParams *exp2;
1298     CoreML__Specification__SinLayerParams *sin;
1299     CoreML__Specification__CosLayerParams *cos;
1300     CoreML__Specification__TanLayerParams *tan;
1301     CoreML__Specification__AsinLayerParams *asin;
1302     CoreML__Specification__AcosLayerParams *acos;
1303     CoreML__Specification__AtanLayerParams *atan;
1304     CoreML__Specification__SinhLayerParams *sinh;
1305     CoreML__Specification__CoshLayerParams *cosh;
1306     CoreML__Specification__TanhLayerParams *tanh;
1307     CoreML__Specification__AsinhLayerParams *asinh;
1308     CoreML__Specification__AcoshLayerParams *acosh;
1309     CoreML__Specification__AtanhLayerParams *atanh;
1310     CoreML__Specification__ErfLayerParams *erf;
1311     CoreML__Specification__GeluLayerParams *gelu;
1312     /*
1313      * Element-wise Binary with Broadcasting Support
1314      */
1315     CoreML__Specification__EqualLayerParams *equal;
1316     CoreML__Specification__NotEqualLayerParams *notequal;
1317     CoreML__Specification__LessThanLayerParams *lessthan;
1318     CoreML__Specification__LessEqualLayerParams *lessequal;
1319     CoreML__Specification__GreaterThanLayerParams *greaterthan;
1320     CoreML__Specification__GreaterEqualLayerParams *greaterequal;
1321     CoreML__Specification__LogicalOrLayerParams *logicalor;
1322     CoreML__Specification__LogicalXorLayerParams *logicalxor;
1323     CoreML__Specification__LogicalNotLayerParams *logicalnot;
1324     CoreML__Specification__LogicalAndLayerParams *logicaland;
1325     CoreML__Specification__ModBroadcastableLayerParams *modbroadcastable;
1326     CoreML__Specification__MinBroadcastableLayerParams *minbroadcastable;
1327     CoreML__Specification__MaxBroadcastableLayerParams *maxbroadcastable;
1328     CoreML__Specification__AddBroadcastableLayerParams *addbroadcastable;
1329     CoreML__Specification__PowBroadcastableLayerParams *powbroadcastable;
1330     CoreML__Specification__DivideBroadcastableLayerParams *dividebroadcastable;
1331     CoreML__Specification__FloorDivBroadcastableLayerParams *floordivbroadcastable;
1332     CoreML__Specification__MultiplyBroadcastableLayerParams *multiplybroadcastable;
1333     CoreML__Specification__SubtractBroadcastableLayerParams *subtractbroadcastable;
1334     /*
1335      * Tensor Manipulations
1336      */
1337     CoreML__Specification__TileLayerParams *tile;
1338     CoreML__Specification__StackLayerParams *stack;
1339     CoreML__Specification__GatherLayerParams *gather;
1340     CoreML__Specification__ScatterLayerParams *scatter;
1341     CoreML__Specification__GatherNDLayerParams *gathernd;
1342     CoreML__Specification__ScatterNDLayerParams *scatternd;
1343     CoreML__Specification__SoftmaxNDLayerParams *softmaxnd;
1344     CoreML__Specification__GatherAlongAxisLayerParams *gatheralongaxis;
1345     CoreML__Specification__ScatterAlongAxisLayerParams *scatteralongaxis;
1346     CoreML__Specification__ReverseLayerParams *reverse;
1347     CoreML__Specification__ReverseSeqLayerParams *reverseseq;
1348     CoreML__Specification__SplitNDLayerParams *splitnd;
1349     CoreML__Specification__ConcatNDLayerParams *concatnd;
1350     CoreML__Specification__TransposeLayerParams *transpose;
1351     CoreML__Specification__SliceStaticLayerParams *slicestatic;
1352     CoreML__Specification__SliceDynamicLayerParams *slicedynamic;
1353     CoreML__Specification__SlidingWindowsLayerParams *slidingwindows;
1354     CoreML__Specification__TopKLayerParams *topk;
1355     CoreML__Specification__ArgMinLayerParams *argmin;
1356     CoreML__Specification__ArgMaxLayerParams *argmax;
1357     CoreML__Specification__EmbeddingNDLayerParams *embeddingnd;
1358     CoreML__Specification__BatchedMatMulLayerParams *batchedmatmul;
1359     /*
1360      * Tensor Allocation / Reshape-related Operations
1361      */
1362     CoreML__Specification__GetShapeLayerParams *getshape;
1363     CoreML__Specification__LoadConstantNDLayerParams *loadconstantnd;
1364     CoreML__Specification__FillLikeLayerParams *filllike;
1365     CoreML__Specification__FillStaticLayerParams *fillstatic;
1366     CoreML__Specification__FillDynamicLayerParams *filldynamic;
1367     CoreML__Specification__BroadcastToLikeLayerParams *broadcasttolike;
1368     CoreML__Specification__BroadcastToStaticLayerParams *broadcasttostatic;
1369     CoreML__Specification__BroadcastToDynamicLayerParams *broadcasttodynamic;
1370     CoreML__Specification__SqueezeLayerParams *squeeze;
1371     CoreML__Specification__ExpandDimsLayerParams *expanddims;
1372     CoreML__Specification__FlattenTo2DLayerParams *flattento2d;
1373     CoreML__Specification__ReshapeLikeLayerParams *reshapelike;
1374     CoreML__Specification__ReshapeStaticLayerParams *reshapestatic;
1375     CoreML__Specification__ReshapeDynamicLayerParams *reshapedynamic;
1376     CoreML__Specification__RankPreservingReshapeLayerParams *rankpreservingreshape;
1377     CoreML__Specification__ConstantPaddingLayerParams *constantpad;
1378     /*
1379      * Random Distributions
1380      */
1381     CoreML__Specification__RandomNormalLikeLayerParams *randomnormallike;
1382     CoreML__Specification__RandomNormalStaticLayerParams *randomnormalstatic;
1383     CoreML__Specification__RandomNormalDynamicLayerParams *randomnormaldynamic;
1384     CoreML__Specification__RandomUniformLikeLayerParams *randomuniformlike;
1385     CoreML__Specification__RandomUniformStaticLayerParams *randomuniformstatic;
1386     CoreML__Specification__RandomUniformDynamicLayerParams *randomuniformdynamic;
1387     CoreML__Specification__RandomBernoulliLikeLayerParams *randombernoullilike;
1388     CoreML__Specification__RandomBernoulliStaticLayerParams *randombernoullistatic;
1389     CoreML__Specification__RandomBernoulliDynamicLayerParams *randombernoullidynamic;
1390     CoreML__Specification__CategoricalDistributionLayerParams *categoricaldistribution;
1391     /*
1392      * Reduction-related Layers:
1393      */
1394     CoreML__Specification__ReduceL1LayerParams *reducel1;
1395     CoreML__Specification__ReduceL2LayerParams *reducel2;
1396     CoreML__Specification__ReduceMaxLayerParams *reducemax;
1397     CoreML__Specification__ReduceMinLayerParams *reducemin;
1398     CoreML__Specification__ReduceSumLayerParams *reducesum;
1399     CoreML__Specification__ReduceProdLayerParams *reduceprod;
1400     CoreML__Specification__ReduceMeanLayerParams *reducemean;
1401     CoreML__Specification__ReduceLogSumLayerParams *reducelogsum;
1402     CoreML__Specification__ReduceSumSquareLayerParams *reducesumsquare;
1403     CoreML__Specification__ReduceLogSumExpLayerParams *reducelogsumexp;
1404     /*
1405      * Masking / Selection Layers
1406      */
1407     CoreML__Specification__WhereNonZeroLayerParams *wherenonzero;
1408     CoreML__Specification__MatrixBandPartLayerParams *matrixbandpart;
1409     CoreML__Specification__LowerTriangularLayerParams *lowertriangular;
1410     CoreML__Specification__UpperTriangularLayerParams *uppertriangular;
1411     CoreML__Specification__WhereBroadcastableLayerParams *wherebroadcastable;
1412     /*
1413      * Normalization Layers
1414      */
1415     CoreML__Specification__LayerNormalizationLayerParams *layernormalization;
1416     CoreML__Specification__NonMaximumSuppressionLayerParams *nonmaximumsuppression;
1417     /*
1418      * Following layers are available only after Core ML Specification
1419      * version >= 5 (iOS >= 14, macOS >= 11.0)
1420      */
1421     CoreML__Specification__OneHotLayerParams *onehot;
1422     CoreML__Specification__CumSumLayerParams *cumsum;
1423     CoreML__Specification__ClampedReLULayerParams *clampedrelu;
1424     CoreML__Specification__ArgSortLayerParams *argsort;
1425     CoreML__Specification__Pooling3DLayerParams *pooling3d;
1426     CoreML__Specification__GlobalPooling3DLayerParams *globalpooling3d;
1427     CoreML__Specification__SliceBySizeLayerParams *slicebysize;
1428     CoreML__Specification__Convolution3DLayerParams *convolution3d;
1429   };
1430 };
1431 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__INIT \
1432  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_layer__descriptor) \
1433     , (char *)protobuf_c_empty_string, 0,NULL, 0,NULL, 0,NULL, 0,NULL, 0, CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER__NOT_SET, {0} }
1434 
1435 
1436 /*
1437  **
1438  * Branching Layer
1439  * A layer that provides the functionality of branching or an If-Else block.
1440  * Must have 1 input. There are no outputs as the execution is transferred to either the
1441  * if or the else branch based on the value of the input.
1442  * Input is the condition predicate. Must be a scalar (length 1 tensor).
1443  */
1444 struct  _CoreML__Specification__BranchLayerParams
1445 {
1446   ProtobufCMessage base;
1447   /*
1448    **
1449    * execute this graph if the absolute value of the input Tensor is greater than 1e-6
1450    * This must be present.
1451    */
1452   CoreML__Specification__NeuralNetwork *ifbranch;
1453   /*
1454    **
1455    * execute this graph if the absolute value of the input Tensor is less than 1e-6
1456    * This is optional.
1457    */
1458   CoreML__Specification__NeuralNetwork *elsebranch;
1459 };
1460 #define CORE_ML__SPECIFICATION__BRANCH_LAYER_PARAMS__INIT \
1461  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__branch_layer_params__descriptor) \
1462     , NULL, NULL }
1463 
1464 
1465 /*
1466  **
1467  * Loop Layer
1468  * A layer that provides the functionality of a "for" loop or a "while" loop.
1469  * There are either no inputs or 1 input. When an input is present, it corresponds to the maximum loop count,
1470  * in that case the value of the "maxLoopIterations" field is ignored. Input must be a scalar.
1471  * (For description below, maxLoopIterations is assumed to be the value of the input, when its present)
1472  * No outputs are produced. Blobs produced by the condition or the body network are visible in the scope of the overall network.
1473  * "conditionNetwork" must produce a tensor with the name specified in the "conditionVar" field.
1474  * There are 3 possible cases for determining the termination condition:
1475  * Case 1:
1476  * If there is no "conditionNetwork", in this case the layer corresponds to a pure for loop, which is run "maxLoopIterations" number of times.
1477  * Equivalent pseudo-code:
1478  * for loopIterator = 0 : maxLoopIterations
1479  *      bodyNetwork()
1480  * Case 2:
1481  * "conditionNetwork" is present, and "maxLoopIterations" is 0 and there is no input,
1482  * in this case the layer corresponds to a while loop. Equivalent pseudo-code:
1483  * conditionVar = conditionNetwork()
1484  * while conditionVar:
1485  *      bodyNetwork()
1486  *      conditionVar = conditionNetwork()
1487  * Case 3:
1488  * "conditionNetwork" is provided, and "maxLoopIterations" is positive or there is an input,
1489  * in this case the layer corresponds to a while loop with a joint condition. Equivalent pseudo-code:
1490  * loopIterator = 0
1491  * conditionVar = conditionNetwork()
1492  * while (conditionVar and loopIterator < maxLoopIterations):
1493  *      bodyNetwork()
1494  *      loopIterator = loopIterator + 1
1495  *      conditionVar = conditionNetwork()
1496  */
1497 struct  _CoreML__Specification__LoopLayerParams
1498 {
1499   ProtobufCMessage base;
1500   /*
1501    **
1502    * maximum number of iterations. Ignored if input is present.
1503    */
1504   uint64_t maxloopiterations;
1505   /*
1506    **
1507    * This field provides the name of the tensor which is produced by the conditionNetwork
1508    * and whose value is checked to start/continue/terminate the loop. Value close to 0.0f is treated as False.
1509    * This field is optional.
1510    * Must be a non empty string if and only if "conditionNetwork" is present.
1511    */
1512   char *conditionvar;
1513   /*
1514    **
1515    * Must generate a tensor with the name provided in the "conditionVar" field.
1516    * This field is optional.
1517    * Must be present if and only if "conditionVar" field is a non empty string.
1518    */
1519   CoreML__Specification__NeuralNetwork *conditionnetwork;
1520   /*
1521    **
1522    * Body of the loop.
1523    * This field must be present.
1524    */
1525   CoreML__Specification__NeuralNetwork *bodynetwork;
1526 };
1527 #define CORE_ML__SPECIFICATION__LOOP_LAYER_PARAMS__INIT \
1528  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__loop_layer_params__descriptor) \
1529     , 0, (char *)protobuf_c_empty_string, NULL, NULL }
1530 
1531 
1532 /*
1533  **
1534  * Loop break Layer
1535  * Terminate the loop that has this layer.
1536  * If present, it should always reside in the "bodyNetwork" of the loop layer
1537  * No inputs/outputs
1538  */
1539 struct  _CoreML__Specification__LoopBreakLayerParams
1540 {
1541   ProtobufCMessage base;
1542 };
1543 #define CORE_ML__SPECIFICATION__LOOP_BREAK_LAYER_PARAMS__INIT \
1544  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__loop_break_layer_params__descriptor) \
1545      }
1546 
1547 
1548 /*
1549  **
1550  * Loop Continue Layer
1551  * Stop the current loop iteration and continue on the next iteration.
1552  * If present, it should always reside in the "bodyNetwork" of the loop layer
1553  * No inputs/outputs
1554  */
1555 struct  _CoreML__Specification__LoopContinueLayerParams
1556 {
1557   ProtobufCMessage base;
1558 };
1559 #define CORE_ML__SPECIFICATION__LOOP_CONTINUE_LAYER_PARAMS__INIT \
1560  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__loop_continue_layer_params__descriptor) \
1561      }
1562 
1563 
1564 /*
1565  **
1566  * Copy Layer
1567  * A layer that copies its input tensor to the output tensor.
1568  * Must have 1 input and 1 output, with distinct names.
1569  * This is the only layer that is allowed to re-generate an output that is already present in the neural network prior to this layer,
1570  * in which case it will overwrite the output tensor.
1571  */
1572 struct  _CoreML__Specification__CopyLayerParams
1573 {
1574   ProtobufCMessage base;
1575 };
1576 #define CORE_ML__SPECIFICATION__COPY_LAYER_PARAMS__INIT \
1577  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__copy_layer_params__descriptor) \
1578      }
1579 
1580 
1581 /*
1582  **
1583  * GreaterThan Layer
1584  * Either 1 or 2 inputs.
1585  * Produces 1 output.
1586  * Perform elementwise greater than operation.
1587  * Output is 1.0f if the condition is true otherwise 0.0f.
1588  * .. code::
1589  *      y = x1 > x2
1590  *          or
1591  *      y = x1 > alpha, if only one input is provided
1592  * Broadcasting is supported.
1593  */
1594 struct  _CoreML__Specification__GreaterThanLayerParams
1595 {
1596   ProtobufCMessage base;
1597   /*
1598    **
1599    * Compare to the scalar value provided here if there is 1 input
1600    */
1601   float alpha;
1602 };
1603 #define CORE_ML__SPECIFICATION__GREATER_THAN_LAYER_PARAMS__INIT \
1604  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__greater_than_layer_params__descriptor) \
1605     , 0 }
1606 
1607 
1608 /*
1609  **
1610  * GreaterEqual Layer
1611  * Either 1 or 2 inputs.
1612  * Produces 1 output.
1613  * Perform elementwise greater equal operation.
1614  * Output is 1.0f if the condition is true otherwise 0.0f.
1615  * .. code::
1616  *      y = x1 >= x2
1617  *          or
1618  *      y = x1 >= alpha, if only one input is provided
1619  * Broadcasting is supported.
1620  */
1621 struct  _CoreML__Specification__GreaterEqualLayerParams
1622 {
1623   ProtobufCMessage base;
1624   /*
1625    **
1626    * Compare to the scalar value provided here if there is 1 input
1627    */
1628   float alpha;
1629 };
1630 #define CORE_ML__SPECIFICATION__GREATER_EQUAL_LAYER_PARAMS__INIT \
1631  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__greater_equal_layer_params__descriptor) \
1632     , 0 }
1633 
1634 
1635 /*
1636  **
1637  * LessThan Layer
1638  * Either 1 or 2 inputs.
1639  * Produces 1 output.
1640  * Perform elementwise less than operation.
1641  * Output is 1.0f if the condition is true otherwise 0.0f.
1642  * .. code::
1643  *      y = x1 < x2
1644  *          or
1645  *      y = x1 < alpha, if only one input is provided
1646  * Broadcasting is supported.
1647  */
1648 struct  _CoreML__Specification__LessThanLayerParams
1649 {
1650   ProtobufCMessage base;
1651   /*
1652    **
1653    * Compare to the scalar value provided here if there is 1 input
1654    */
1655   float alpha;
1656 };
1657 #define CORE_ML__SPECIFICATION__LESS_THAN_LAYER_PARAMS__INIT \
1658  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__less_than_layer_params__descriptor) \
1659     , 0 }
1660 
1661 
1662 /*
1663  **
1664  * LessEqual Layer
1665  * Either 1 or 2 inputs.
1666  * Produces 1 output.
1667  * Perform elementwise less equal operation.
1668  * Output is 1.0f if the condition is true otherwise 0.0f.
1669  * .. code::
1670  *      y = x1 <= x2
1671  *          or
1672  *      y = x1 <= alpha, if only one input is provided
1673  * Broadcasting is supported.
1674  */
1675 struct  _CoreML__Specification__LessEqualLayerParams
1676 {
1677   ProtobufCMessage base;
1678   /*
1679    **
1680    * Compare to the scalar value provided here if there is 1 input
1681    */
1682   float alpha;
1683 };
1684 #define CORE_ML__SPECIFICATION__LESS_EQUAL_LAYER_PARAMS__INIT \
1685  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__less_equal_layer_params__descriptor) \
1686     , 0 }
1687 
1688 
1689 /*
1690  **
1691  * Equal Layer
1692  * Either 1 or 2 inputs.
1693  * Produces 1 output.
1694  * Perform elementwise equal operation.
1695  * Output is 1.0f if the condition is true otherwise 0.0f.
1696  * .. code::
1697  *      y = x1 == x2
1698  *          or
1699  *      y = x1 == alpha, if only one input is provided
1700  * Broadcasting is supported.
1701  */
1702 struct  _CoreML__Specification__EqualLayerParams
1703 {
1704   ProtobufCMessage base;
1705   /*
1706    **
1707    * Compare to the scalar value provided here if there is 1 input
1708    */
1709   float alpha;
1710 };
1711 #define CORE_ML__SPECIFICATION__EQUAL_LAYER_PARAMS__INIT \
1712  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__equal_layer_params__descriptor) \
1713     , 0 }
1714 
1715 
1716 /*
1717  **
1718  * NotEqual Layer
1719  * Either 1 or 2 inputs.
1720  * Produces 1 output.
1721  * Perform elementwise not equal operation.
1722  * Output is 1.0f if the condition is true otherwise 0.0f.
1723  * .. code::
1724  *      y = x1 != x2
1725  *          or
1726  *      y = x1 != alpha, if only one input is provided
1727  * Broadcasting is supported.
1728  */
1729 struct  _CoreML__Specification__NotEqualLayerParams
1730 {
1731   ProtobufCMessage base;
1732   /*
1733    **
1734    * Compare to the scalar value provided here if there is 1 input
1735    */
1736   float alpha;
1737 };
1738 #define CORE_ML__SPECIFICATION__NOT_EQUAL_LAYER_PARAMS__INIT \
1739  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__not_equal_layer_params__descriptor) \
1740     , 0 }
1741 
1742 
1743 /*
1744  **
1745  * LogicalAnd Layer
1746  * Must have 2 inputs, produces 1 output.
1747  * Perform elementwise logical AND operation.
1748  * Input is considered False if equal to 0.0f otherwise True.
1749  * Output is 1.0f if the condition is true otherwise 0.0f.
1750  * .. code::
1751  *      y = AND(x1, x2)
1752  * Broadcasting is supported.
1753  */
1754 struct  _CoreML__Specification__LogicalAndLayerParams
1755 {
1756   ProtobufCMessage base;
1757 };
1758 #define CORE_ML__SPECIFICATION__LOGICAL_AND_LAYER_PARAMS__INIT \
1759  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__logical_and_layer_params__descriptor) \
1760      }
1761 
1762 
1763 /*
1764  **
1765  * LogicalOr Layer
1766  * Must have 2 inputs, produces 1 output.
1767  * Perform elementwise logical OR operation.
1768  * Input is considered False if equal to 0.0f otherwise True.
1769  * Output is 1.0f if the condition is true otherwise 0.0f.
1770  * .. code::
1771  *      y = OR(x1, x2)
1772  * Broadcasting is supported.
1773  */
1774 struct  _CoreML__Specification__LogicalOrLayerParams
1775 {
1776   ProtobufCMessage base;
1777 };
1778 #define CORE_ML__SPECIFICATION__LOGICAL_OR_LAYER_PARAMS__INIT \
1779  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__logical_or_layer_params__descriptor) \
1780      }
1781 
1782 
1783 /*
1784  **
1785  * LogicalXor Layer
1786  * Must have 2 inputs, produces 1 output.
1787  * Perform elementwise logical XOR operation.
1788  * Input is considered False if equal to 0.0f otherwise True.
1789  * Output is 1.0f if the condition is true otherwise 0.0f.
1790  * .. code::
1791  *      y = XOR(x1, x2)
1792  * Broadcasting is supported.
1793  */
1794 struct  _CoreML__Specification__LogicalXorLayerParams
1795 {
1796   ProtobufCMessage base;
1797 };
1798 #define CORE_ML__SPECIFICATION__LOGICAL_XOR_LAYER_PARAMS__INIT \
1799  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__logical_xor_layer_params__descriptor) \
1800      }
1801 
1802 
1803 /*
1804  **
1805  * LogicalNot Layer
1806  * Must have 1 input, produces 1 output.
1807  * Perform elementwise logical NOT operation.
1808  * Input is considered False if equal to 0.0f otherwise True.
1809  * Output is 1.0f if the condition is true otherwise 0.0f.
1810  * .. code::
1811  *      y = NOT(x)
1812  */
1813 struct  _CoreML__Specification__LogicalNotLayerParams
1814 {
1815   ProtobufCMessage base;
1816 };
1817 #define CORE_ML__SPECIFICATION__LOGICAL_NOT_LAYER_PARAMS__INIT \
1818  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__logical_not_layer_params__descriptor) \
1819      }
1820 
1821 
1822 struct  _CoreML__Specification__BorderAmounts__EdgeSizes
1823 {
1824   ProtobufCMessage base;
1825   /*
1826    **
1827    * The amount to be padded or cropped from the beginning.
1828    */
1829   uint64_t startedgesize;
1830   /*
1831    **
1832    * The amount to be padded or cropped from the end.
1833    */
1834   uint64_t endedgesize;
1835 };
1836 #define CORE_ML__SPECIFICATION__BORDER_AMOUNTS__EDGE_SIZES__INIT \
1837  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__border_amounts__edge_sizes__descriptor) \
1838     , 0, 0 }
1839 
1840 
1841 /*
1842  **
1843  * Specifies the amount of spatial border to be either padded or cropped.
1844  * For padding:
1845  * .. code::
1846  *     H_out = borderAmounts[0].startEdgeSize + H_in + borderAmounts[0].endEdgeSize
1847  *     W_out = borderAmounts[1].startEdgeSize + W_in + borderAmounts[1].endEdgeSize
1848  *     topPaddingAmount == Height startEdgeSize
1849  *     bottomPaddingAmount == Height endEdgeSize
1850  *     leftPaddingAmount == Width startEdgeSize
1851  *     rightPaddingAmount == Width endEdgeSize
1852  * For cropping:
1853  * .. code::
1854  *     H_out = (-borderAmounts[0].startEdgeSize) + H_in + (-borderAmounts[0].endEdgeSize)
1855  *     W_out = (-borderAmounts[1].startEdgeSize) + W_in + (-borderAmounts[1].endEdgeSize)
1856  *     topCropAmount == Height startEdgeSize
1857  *     bottomCropAmount == Height endEdgeSize
1858  *     leftCropAmount == Width startEdgeSize
1859  *     rightCropAmount == Width endEdgeSize
1860  */
1861 struct  _CoreML__Specification__BorderAmounts
1862 {
1863   ProtobufCMessage base;
1864   /*
1865    **
1866    * The border amounts.
1867    * This must be length 2 in the order ``[H, W]``.
1868    */
1869   size_t n_borderamounts;
1870   CoreML__Specification__BorderAmounts__EdgeSizes **borderamounts;
1871 };
1872 #define CORE_ML__SPECIFICATION__BORDER_AMOUNTS__INIT \
1873  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__border_amounts__descriptor) \
1874     , 0,NULL }
1875 
1876 
1877 /*
1878  **
1879  * Specifies the type of padding to be used with Convolution/Deconvolution and Pooling layers.
1880  * After padding, input spatial shape: ``[H_in, W_in]``, gets modified to the
1881  * output spatial shape ``[H_out, W_out]``.
1882  * .. code::
1883  *      topPaddingAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize
1884  *      bottomPaddingAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize
1885  *      leftPaddingAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize
1886  *      rightPaddingAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize
1887  * With Convolution or Pooling:
1888  * .. code::
1889  *    H_out = int_division_round_down((H_in + topPaddingAmount + bottomPaddingAmount - KernelSize[0]),stride[0]) + 1
1890  * which is same as:
1891  * .. code::
1892  *    H_out = int_division_round_up((H_in + topPaddingAmount + bottomPaddingAmount - KernelSize[0] + 1),stride[0])
1893  * With Deconvolution:
1894  * .. code::
1895  *    H_out = (H_in-1) * stride[0] + kernelSize[0] - (topPaddingAmount + bottomPaddingAmount)
1896  * The equivalent expressions hold true for ``W_out`` as well.
1897  * By default, the values of ``paddingAmounts`` are set to ``0``,
1898  * which results in a "true" valid padding.
1899  * If non-zero values are provided for ``paddingAmounts``,
1900  * "valid" convolution/pooling is performed within the spatially expanded input.
1901  */
1902 struct  _CoreML__Specification__ValidPadding
1903 {
1904   ProtobufCMessage base;
1905   CoreML__Specification__BorderAmounts *paddingamounts;
1906 };
1907 #define CORE_ML__SPECIFICATION__VALID_PADDING__INIT \
1908  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__valid_padding__descriptor) \
1909     , NULL }
1910 
1911 
1912 /*
1913  **
1914  * Specifies the type of padding to be used with Convolution/Deconvolution and pooling layers.
1915  * After padding, input spatial shape: ``[H_in, W_in]``, gets modified to the
1916  * output spatial shape ``[H_out, W_out]``.
1917  * With Convolution or pooling:
1918  * .. code::
1919  *      H_out = int_division_round_up(H_in,stride[0])
1920  *      W_out = int_division_round_up(W_in,stride[1])
1921  * This is achieved by using the following padding amounts:
1922  * .. code::
1923  *     totalPaddingHeight = max(0,(H_out-1) * stride[0] + KernelSize[0] - Hin)
1924  *     totalPaddingWidth = max(0,(W_out-1) * stride[1] + KernelSize[1] - Win)
1925  * There are two modes of asymmetry:
1926  * ``BOTTOM_RIGHT_HEAVY``, and ``TOP_LEFT_HEAVY``.
1927  * If the mode is ``BOTTOM_RIGHT_HEAVY``:
1928  * .. code::
1929  *     topPaddingAmount = floor(totalPaddingHeight / 2)
1930  *     bottomPaddingAmount = totalPaddingHeight - topPaddingAmount
1931  *     leftPaddingAmount = floor(totalPaddingWidth / 2)
1932  *     rightPaddingAmount = totalPaddingWidth - leftPaddingAmount
1933  * If the mode is ``TOP_LEFT_HEAVY``:
1934  * .. code::
1935  *     bottomPaddingAmount = floor(totalPaddingHeight / 2)
1936  *     topPaddingAmount = totalPaddingHeight - bottomPaddingAmount
1937  *     rightPaddingAmount = floor(totalPaddingWidth / 2)
1938  *     leftPaddingAmount = totalPaddingWidth - rightPaddingAmount
1939  * With Deconvolution:
1940  * .. code::
1941  *    H_out = H_in * stride[0]
1942  *    W_out = W_in * stride[1]
1943  */
1944 struct  _CoreML__Specification__SamePadding
1945 {
1946   ProtobufCMessage base;
1947   CoreML__Specification__SamePadding__SamePaddingMode asymmetrymode;
1948 };
1949 #define CORE_ML__SPECIFICATION__SAME_PADDING__INIT \
1950  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__same_padding__descriptor) \
1951     , CORE_ML__SPECIFICATION__SAME_PADDING__SAME_PADDING_MODE__BOTTOM_RIGHT_HEAVY }
1952 
1953 
1954 /*
1955  **
1956  * Specifies how grid points are sampled from an interval.
1957  * Without the loss of generality, assume the interval to be [0, X-1] from which N points are to be sampled.
1958  * Here X may correspond to an input image's height or width.
1959  * All the methods can be expressed in terms of numpy's linspace function, along with the constraint that grid points have to lie in the interval [0, X-1].
1960  * Note: numpy.linspace(start = start, end = end, num = N, endpoint = True) corresponds to sampling
1961  * N points uniformly from the interval [start, end], endpoints included.
1962  * The methods vary in how the ``start`` and ``end`` values are computed.
1963  */
1964 struct  _CoreML__Specification__SamplingMode
1965 {
1966   ProtobufCMessage base;
1967   CoreML__Specification__SamplingMode__Method samplingmethod;
1968 };
1969 #define CORE_ML__SPECIFICATION__SAMPLING_MODE__INIT \
1970  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sampling_mode__descriptor) \
1971     , CORE_ML__SPECIFICATION__SAMPLING_MODE__METHOD__STRICT_ALIGN_ENDPOINTS_MODE }
1972 
1973 
1974 /*
1975  **
1976  * Specifies the convention used to specify four bounding box coordinates for an image of size (Height, Width).
1977  * The (0,0) coordinate corresponds to the top-left corner of the image.
1978  */
1979 struct  _CoreML__Specification__BoxCoordinatesMode
1980 {
1981   ProtobufCMessage base;
1982   CoreML__Specification__BoxCoordinatesMode__Coordinates boxmode;
1983 };
1984 #define CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__INIT \
1985  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__box_coordinates_mode__descriptor) \
1986     , CORE_ML__SPECIFICATION__BOX_COORDINATES_MODE__COORDINATES__CORNERS_HEIGHT_FIRST }
1987 
1988 
1989 /*
1990  **
1991  * Weights for layer parameters.
1992  * Weights are stored as repeated floating point numbers
1993  * using row-major ordering
1994  * and can represent 1-, 2-, 3-, or 4-dimensional data.
1995  */
1996 struct  _CoreML__Specification__WeightParams
1997 {
1998   ProtobufCMessage base;
1999   /*
2000    **
2001    * Values specified in single / float / FP32 precision.
2002    */
2003   size_t n_floatvalue;
2004   float *floatvalue;
2005   /*
2006    **
2007    * Values in 16-bit half precision floating point.
2008    */
2009   ProtobufCBinaryData float16value;
2010   /*
2011    **
2012    * Raw value specification for quantized lower precisions.
2013    * This field is interpreted as uintN, where N is the number of bits in quantization.
2014    * E.g. if n=8, the field is interpreted as an array of UINT8.
2015    * Use this field for quantized parameters unless specifically noted to use
2016    * int8RawValue.
2017    */
2018   ProtobufCBinaryData rawvalue;
2019   /*
2020    **
2021    * Field to be used if int8DynamicQuantize is set in the parent layer.
2022    * Cannot be set if rawValue is also set.
2023    * The values in this field are interpreted as INT8.
2024    * If this field is set, following conditions must hold true:
2025    * * QuantizationType == LinearQuantizationParams, such that
2026    *   * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams"
2027    */
2028   ProtobufCBinaryData int8rawvalue;
2029   /*
2030    **
2031    * Quantization related parameters.
2032    */
2033   CoreML__Specification__QuantizationParams *quantization;
2034   protobuf_c_boolean isupdatable;
2035 };
2036 #define CORE_ML__SPECIFICATION__WEIGHT_PARAMS__INIT \
2037  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__weight_params__descriptor) \
2038     , 0,NULL, {0,NULL}, {0,NULL}, {0,NULL}, NULL, 0 }
2039 
2040 
2041 typedef enum {
2042   CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__QUANTIZATION_TYPE__NOT_SET = 0,
2043   CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__QUANTIZATION_TYPE_LINEAR_QUANTIZATION = 101,
2044   CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__QUANTIZATION_TYPE_LOOKUP_TABLE_QUANTIZATION = 102
2045     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__QUANTIZATION_TYPE)
2046 } CoreML__Specification__QuantizationParams__QuantizationTypeCase;
2047 
2048 /*
2049  **
2050  * Quantization parameters.
2051  */
2052 struct  _CoreML__Specification__QuantizationParams
2053 {
2054   ProtobufCMessage base;
2055   uint64_t numberofbits;
2056   CoreML__Specification__QuantizationParams__QuantizationTypeCase quantization_type_case;
2057   union {
2058     CoreML__Specification__LinearQuantizationParams *linearquantization;
2059     CoreML__Specification__LookUpTableQuantizationParams *lookuptablequantization;
2060   };
2061 };
2062 #define CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__INIT \
2063  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__quantization_params__descriptor) \
2064     , 0, CORE_ML__SPECIFICATION__QUANTIZATION_PARAMS__QUANTIZATION_TYPE__NOT_SET, {0} }
2065 
2066 
2067 struct  _CoreML__Specification__LinearQuantizationParams
2068 {
2069   ProtobufCMessage base;
2070   /*
2071    **
2072    * Stores scale and bias values corresponding to the quantized weights.
2073    * Must be an array of 1 element, or an array of C elements, where C
2074    * is number of output channels. For recurrent layers it is equal to
2075    * the output vector size.
2076    * Relationship between quantized weights, unquantized weights, scale and bias:
2077    * W_unquantized = W_quantized * scale + bias
2078    */
2079   size_t n_scale;
2080   float *scale;
2081   size_t n_bias;
2082   float *bias;
2083 };
2084 #define CORE_ML__SPECIFICATION__LINEAR_QUANTIZATION_PARAMS__INIT \
2085  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__linear_quantization_params__descriptor) \
2086     , 0,NULL, 0,NULL }
2087 
2088 
2089 struct  _CoreML__Specification__LookUpTableQuantizationParams
2090 {
2091   ProtobufCMessage base;
2092   /*
2093    * Stores look-up table quantization values. Must be an array of
2094    *(2^numberOfBits) Elements.
2095    */
2096   size_t n_floatvalue;
2097   float *floatvalue;
2098 };
2099 #define CORE_ML__SPECIFICATION__LOOK_UP_TABLE_QUANTIZATION_PARAMS__INIT \
2100  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__look_up_table_quantization_params__descriptor) \
2101     , 0,NULL }
2102 
2103 
2104 typedef enum {
2105   CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__CONVOLUTION_PADDING_TYPE__NOT_SET = 0,
2106   CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__CONVOLUTION_PADDING_TYPE_VALID = 50,
2107   CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__CONVOLUTION_PADDING_TYPE_SAME = 51
2108     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__CONVOLUTION_PADDING_TYPE)
2109 } CoreML__Specification__ConvolutionLayerParams__ConvolutionPaddingTypeCase;
2110 
2111 /*
2112  **
2113  * A layer that performs spatial convolution or deconvolution.
2114  * .. code::
2115  *      y = ConvolutionLayer(x)
2116  * Requires 1 or 2 inputs and produces 1 output.
2117  * Input
2118  *    First Input:
2119  *      A blob with rank greater than or equal to 4.
2120  *      Rank 4 blob represents [Batch, channels, height, width].
2121  *      For ranks greater than 4, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
2122  *     From Core ML specification version 4 onwards (iOS >= 13, macOS >= 10.15).
2123  *     convolution layer can have 2 inputs, in which case the second input is
2124  *     the blob representing the weights. This is allowed when "isDeconvolution" = False.
2125  *     The weight blob should have shape
2126  *     ``[outputChannels, kernelChannels, kernelHeight, kernelWidth]``,
2127  *     where kernelChannels == inputChannels / nGroups.
2128  * Output
2129  *   Rank is same as the input. e.g.: for rank 4 input, output shape is [B, C_out, H_out, W_out]
2130  * If ``dilationFactor`` is not 1, effective kernel size is
2131  * modified as follows:
2132  * .. code::
2133  *      KernelSize[0] <-- (kernelSize[0]-1) * dilationFactor[0] + 1
2134  *      KernelSize[1] <-- (kernelSize[1]-1) * dilationFactor[1] + 1
2135  * Type of padding can be ``valid`` or ``same``. Output spatial dimensions depend on the
2136  * the type of padding. For details, refer to the descriptions of the messages "ValidPadding"
2137  * and "SamePadding". Padded values are all zeros.
2138  * For Deconvolution, ``ConvolutionPaddingType`` (``valid`` or ``same``) is ignored when ``outputShape`` is set.
2139  */
2140 struct  _CoreML__Specification__ConvolutionLayerParams
2141 {
2142   ProtobufCMessage base;
2143   /*
2144    **
2145    * The number of kernels.
2146    * Same as ``C_out`` used in the layer description.
2147    */
2148   uint64_t outputchannels;
2149   /*
2150    **
2151    * Channel dimension of the kernels.
2152    * Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False
2153    * Must be equal to ``inputChannels``, if isDeconvolution == True
2154    */
2155   uint64_t kernelchannels;
2156   /*
2157    **
2158    * Group convolution, i.e. weight reuse along channel axis.
2159    * Input and kernels are divided into g groups
2160    * and convolution / deconvolution is applied within the groups independently.
2161    * If not set or 0, it is set to the default value 1.
2162    */
2163   uint64_t ngroups;
2164   /*
2165    **
2166    * Must be length 2 in the order ``[H, W]``.
2167    * If not set, default value ``[3, 3]`` is used.
2168    */
2169   size_t n_kernelsize;
2170   uint64_t *kernelsize;
2171   /*
2172    **
2173    * Must be length 2 in the order ``[H, W]``.
2174    * If not set, default value ``[1, 1]`` is used.
2175    */
2176   size_t n_stride;
2177   uint64_t *stride;
2178   /*
2179    **
2180    * Must be length 2 in order ``[H, W]``.
2181    * If not set, default value ``[1, 1]`` is used.
2182    * It is ignored if ``isDeconvolution == true``.
2183    */
2184   size_t n_dilationfactor;
2185   uint64_t *dilationfactor;
2186   /*
2187    **
2188    * Flag to specify whether it is a deconvolution layer.
2189    */
2190   protobuf_c_boolean isdeconvolution;
2191   /*
2192    **
2193    * Flag to specify whether a bias is to be added or not.
2194    */
2195   protobuf_c_boolean hasbias;
2196   /*
2197    **
2198    * Weights associated with this layer.
2199    * If convolution (``isDeconvolution == false``), weights have the shape
2200    * ``[outputChannels, kernelChannels, kernelHeight, kernelWidth]``, where kernelChannels == inputChannels / nGroups
2201    * If deconvolution (``isDeconvolution == true``) weights have the shape
2202    * ``[kernelChannels, outputChannels / nGroups, kernelHeight, kernelWidth]``, where kernelChannels == inputChannels
2203    */
2204   CoreML__Specification__WeightParams *weights;
2205   /*
2206    * / Must be of size [outputChannels].
2207    */
2208   CoreML__Specification__WeightParams *bias;
2209   /*
2210    **
2211    * The output shape, which has length 2 ``[H_out, W_out]``.
2212    * This is used only for deconvolution (``isDeconvolution == true``).
2213    * If not set, the deconvolution output shape is calculated
2214    * based on ``ConvolutionPaddingType``.
2215    */
2216   size_t n_outputshape;
2217   uint64_t *outputshape;
2218   CoreML__Specification__ConvolutionLayerParams__ConvolutionPaddingTypeCase convolution_padding_type_case;
2219   union {
2220     CoreML__Specification__ValidPadding *valid;
2221     CoreML__Specification__SamePadding *same;
2222   };
2223 };
2224 #define CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__INIT \
2225  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__convolution_layer_params__descriptor) \
2226     , 0, 0, 0, 0,NULL, 0,NULL, 0,NULL, 0, 0, NULL, NULL, 0,NULL, CORE_ML__SPECIFICATION__CONVOLUTION_LAYER_PARAMS__CONVOLUTION_PADDING_TYPE__NOT_SET, {0} }
2227 
2228 
2229 /*
2230  **
2231  * A layer that performs a 3-dimensional convolution.
2232  * .. code::
2233  *      y = Convolution3DLayer(x)
2234  * Input
2235  *    A blob of rank 5.
2236  *    The input blob's shape should be ``[batch, channels, depth, height, width]``.
2237  * Fields
2238  *   The bias field, if set, should have shape of ``[channelsOut]``.
2239  * Output
2240  *   A blob of rank 5.
2241  *   The output blob's shape is ``[batch, channelsOut, depthOut, heightOut, widthOut]``.
2242  * Type of padding can be ``custom``, ``valid``, or ``same``. Padded values are all zeros.
2243  * Output spatial dimensions depend on the the type of padding. For details, refer to the
2244  * descriptions of the ``PaddingType`` field of this ``Convolution3DLayerParams`` message.
2245  * Example
2246  *   For example, given an input of size ``[1, 3, 3, 8, 8]``, a stride of 2 in each dimension,
2247  *   a kernel of 3 in each dimension, 2 output channels, and ``same`` padding, this layer will
2248  *   compute the total padding applied in the depth, height, and width dimensions to be 2, 1, and 1,
2249  *   respectively. The depth padding is even and will be applied equally to both sides of the depth
2250  *   dimension. Since the height and width padding values are odd, they'll be applied to the
2251  *   bottom/right of the height/width dimensions. Thus, the padding applied to the input will be
2252  *   ``[1, 1, 0, 1, 0, 1]`` (front, back, top, bottom, left, right). Finally, the output produced
2253  *   will have size ``[1, 2, 2, 4, 4]``.
2254  */
2255 struct  _CoreML__Specification__Convolution3DLayerParams
2256 {
2257   ProtobufCMessage base;
2258   /*
2259    **
2260    * The number of channels in the output (channelsOut). Must be a positive integer.
2261    */
2262   int32_t outputchannels;
2263   /*
2264    **
2265    * The number of channels in the input (channels). Must be a positive integer.
2266    */
2267   int32_t inputchannels;
2268   /*
2269    **
2270    * Group convolution, i.e., weight reuse along the channel axis.
2271    * It must evenly divide both the number of input and output channels and be at most the number
2272    * of input channels (a depthwise convolution).
2273    * Input and kernels are divided into g groups and convolution is applied within the groups
2274    * independently.
2275    */
2276   int32_t ngroups;
2277   /*
2278    * Depth of the convolution kernel. Must be a positive integer.
2279    */
2280   int32_t kerneldepth;
2281   /*
2282    * Height of the convolution kernel. Must be a positive integer.
2283    */
2284   int32_t kernelheight;
2285   /*
2286    * Width of the convolution kernel. Must be a positive integer.
2287    */
2288   int32_t kernelwidth;
2289   /*
2290    * Stride along the depth direction. Must be a positive integer.
2291    */
2292   int32_t stridedepth;
2293   /*
2294    * Stride along the height direction. Must be a positive integer.
2295    */
2296   int32_t strideheight;
2297   /*
2298    * Stride along the width direction. Must be a positive integer.
2299    */
2300   int32_t stridewidth;
2301   /*
2302    * Dilation along the depth direction. Must be a positive integer.
2303    */
2304   int32_t dilationdepth;
2305   /*
2306    * Dilation along the height direction. Must be a positive integer.
2307    */
2308   int32_t dilationheight;
2309   /*
2310    * Dilation along the width direction. Must be a positive integer.
2311    */
2312   int32_t dilationwidth;
2313   /*
2314    **
2315    * Flag to specify whether a bias is to be added or not.
2316    * If false, then no bias is added.
2317    */
2318   protobuf_c_boolean hasbias;
2319   /*
2320    **
2321    * Weights associated with this layer.
2322    * Weights have the shape
2323    * if deconvolution == False
2324    * ``[outputChannels, kernelChannels, kernelDepth, kernelHeight, kernelWidth]``, where
2325    * kernelChannels == inputChannels / nGroups
2326    * else if deconvolution == True
2327    * ``[outputChannels / nGroups, kernelChannels, kernelDepth, kernelHeight, kernelWidth]``, where
2328    */
2329   CoreML__Specification__WeightParams *weights;
2330   /*
2331    **
2332    * Must be of size ``[outputChannels]``.
2333    */
2334   CoreML__Specification__WeightParams *bias;
2335   CoreML__Specification__Convolution3DLayerParams__PaddingType paddingtype;
2336   /*
2337    * Padding before the input in the depth direction. Must be zero or a positive integer.
2338    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2339    */
2340   int32_t custompaddingfront;
2341   /*
2342    * Padding after the input in the depth direction. Must be zero or a positive integer.
2343    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2344    */
2345   int32_t custompaddingback;
2346   /*
2347    * Padding before the input in the height direction. Must be zero or a positive integer.
2348    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2349    */
2350   int32_t custompaddingtop;
2351   /*
2352    * Padding after the input in the height direction. Must be zero or a positive integer.
2353    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2354    */
2355   int32_t custompaddingbottom;
2356   /*
2357    * Padding before the input in the width direction. Must be zero or a positive integer.
2358    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2359    */
2360   int32_t custompaddingleft;
2361   /*
2362    * Padding after the input in the width direction. Must be zero or a positive integer.
2363    * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types.
2364    */
2365   int32_t custompaddingright;
2366   /*
2367    * Flag to specify if this is Convolution Transpose or not.
2368    */
2369   protobuf_c_boolean isdeconvolution;
2370   /*
2371    * The output shape, which has length 3 ``[D_out, H_out, W_out]``.
2372    * This is used only for deconvolution (``isDeconvolution == true``).
2373    * If not set, the deconvolution output shape is calculated
2374    * based on ``PaddingType``.
2375    */
2376   size_t n_outputshape;
2377   uint64_t *outputshape;
2378 };
2379 #define CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__INIT \
2380  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__convolution3_dlayer_params__descriptor) \
2381     , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL, CORE_ML__SPECIFICATION__CONVOLUTION3_DLAYER_PARAMS__PADDING_TYPE__CUSTOM, 0, 0, 0, 0, 0, 0, 0, 0,NULL }
2382 
2383 
2384 /*
2385  **
2386  * A layer that performs a matrix-vector or matrix-matrix product.
2387  * This is equivalent to a fully-connected, or dense layer.
2388  * The weight parameters correspond to a matrix of dimensions (inputChannels, outputChannels) i.e. (C_in, C_out)
2389  * .. code::
2390  *      y = InnerProductLayer(x)
2391  * Requires 1 input and produces 1 output.
2392  * Input
2393  *      Input can have rank 1 to rank 5. This is how it is reshaped in to the matrix (for rank > 1):
2394  *      rank 1 (x1) : in this case, the layer corresponds to a matrix-vector product. x1 must be equal to C_in
2395  *      rank 2 (x1, x2): x2 must be equal to C_in
2396  *      rank 3 (x1, x2, x3) --> (x1 * x2, x3). x3 must be equal to C_in
2397  *      rank 4 (x1, x2, x3, x4) ---> (x1, x2 * x3 * x4). x2 * x3 * x4 must be equal to C_in
2398  *      rank 5 (x1, x2, x3, x4, x5) ---> (x1 * x2, x3 * x4 * x5). x3 * x4 * x5 must be equal to C_in
2399  * Output
2400  *      Output rank is same as the input rank
2401  *      rank 1: (C_out)
2402  *      rank 2: (x1, C_out)
2403  *      rank 3: (x1, x2, C_out)
2404  *      rank 4: (x1, C_out, 1, 1)
2405  *      rank 5: (x1, x2, C_out, 1, 1)
2406  */
2407 struct  _CoreML__Specification__InnerProductLayerParams
2408 {
2409   ProtobufCMessage base;
2410   /*
2411    * / Input size: C_in.
2412    */
2413   uint64_t inputchannels;
2414   /*
2415    * / Output size: C_out.
2416    */
2417   uint64_t outputchannels;
2418   /*
2419    * / Whether a bias is added or not.
2420    */
2421   protobuf_c_boolean hasbias;
2422   /*
2423    * / Weight matrix [C_out, C_in].
2424    */
2425   CoreML__Specification__WeightParams *weights;
2426   /*
2427    * / Bias vector [C_out].
2428    */
2429   CoreML__Specification__WeightParams *bias;
2430   /*
2431    **
2432    * If set, this layer, at runtime, quantizes the floating point input blob to int8 before applying an
2433    * inner product using INT8 weight matrix parameters, as provided in weights->int8RawValue. The
2434    * result is then dequantized.
2435    * Requires:
2436    * * hasBias == false
2437    * * QuantizationType == LinearQuantizationParams, such that
2438    *   * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams"
2439    * * numberOfBits == 8
2440    * * weights->rawValue_size to be empty
2441    */
2442   protobuf_c_boolean int8dynamicquantize;
2443 };
2444 #define CORE_ML__SPECIFICATION__INNER_PRODUCT_LAYER_PARAMS__INIT \
2445  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__inner_product_layer_params__descriptor) \
2446     , 0, 0, 0, NULL, NULL, 0 }
2447 
2448 
2449 /*
2450  **
2451  * A layer that performs a matrix lookup and optionally adds a bias.
2452  * The weights matrix is stored with dimensions [outputChannels, inputDim].
2453  * .. code::
2454  *      y = EmbeddingLayer(x)
2455  * Requires 1 input and produces 1 output.
2456  * Input
2457  *     Input values must be in the range ``[0, inputDim - 1]``.
2458  *     Input must have rank equal to 4 or 5, such that the last 3 dimensions are all 1.
2459  *     rank 4: shape (x1, 1, 1, 1). x1 is effectively the batch/sequence length.
2460  *     rank 5: shape (x1, x2 , 1, 1, 1). x1 * x2 is effectively the combined batch/sequence length.
2461  * Output
2462  *      Output rank is same as the input rank. Please see input description above.
2463  *      rank 4: shape (x1, outputChannels, 1, 1)
2464  *      rank 5: shape (x1, x2, outputChannels, 1, 1)
2465  */
2466 struct  _CoreML__Specification__EmbeddingLayerParams
2467 {
2468   ProtobufCMessage base;
2469   /*
2470    * / Size of the input dictionary.
2471    */
2472   uint64_t inputdim;
2473   /*
2474    * / Size of the output vectors.
2475    */
2476   uint64_t outputchannels;
2477   /*
2478    * / Whether a bias is added or not.
2479    */
2480   protobuf_c_boolean hasbias;
2481   /*
2482    * / 2-D weights of dimensions [outputChannels, inputDim].
2483    */
2484   CoreML__Specification__WeightParams *weights;
2485   /*
2486    * / Bias of size [outputChannels].
2487    */
2488   CoreML__Specification__WeightParams *bias;
2489 };
2490 #define CORE_ML__SPECIFICATION__EMBEDDING_LAYER_PARAMS__INIT \
2491  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__embedding_layer_params__descriptor) \
2492     , 0, 0, 0, NULL, NULL }
2493 
2494 
2495 /*
2496  **
2497  * A layer that performs a matrix lookup and optionally adds a bias.
2498  * The weights matrix is stored with dimensions [embeddingSize, vocabSize].
2499  * .. code::
2500  *      y = EmbeddingNDLayer(x)
2501  * Requires 1 input and produces 1 output.
2502  * Input
2503  *     Input values must be in the range ``[0, vocabSize - 1]``.
2504  *     Input must have rank at least 2. The last dimension must always be 1.
2505  *     rank 2: shape (x1, 1). x1 is the batch/sequence length.
2506  *     rank 3: shape (x1, x2, 1). x1 * x2 is effectively the combined batch/sequence length.
2507  *     rank 4: shape (x1, x2, x3, 1). x1 * x2 * x2 is effectively the combined batch/sequence length.
2508  *     rank 5: shape (x1, x2 , x3, x4, 1). x1 * x2 * x3 * x4 is effectively the combined batch/sequence length.
2509  * Output
2510  *      Output rank is same as the input rank. Please see input description above.
2511  *      rank 2: shape (x1, embeddingSize)
2512  *      rank 3: shape (x1, x2, embeddingSize)
2513  *      rank 4: shape (x1, x2, x3, embeddingSize)
2514  *      rank 5: shape (x1, x2, x3, x4, embeddingSize)
2515  */
2516 struct  _CoreML__Specification__EmbeddingNDLayerParams
2517 {
2518   ProtobufCMessage base;
2519   /*
2520    * / Size of the input dictionary.
2521    */
2522   uint64_t vocabsize;
2523   /*
2524    * / Size of the output vectors.
2525    */
2526   uint64_t embeddingsize;
2527   /*
2528    * / Whether a bias is added or not.
2529    */
2530   protobuf_c_boolean hasbias;
2531   /*
2532    * / 2-D weights of dimensions [embeddingSize, vocabSize].
2533    */
2534   CoreML__Specification__WeightParams *weights;
2535   /*
2536    * / Bias of size [embeddingSize].
2537    */
2538   CoreML__Specification__WeightParams *bias;
2539 };
2540 #define CORE_ML__SPECIFICATION__EMBEDDING_NDLAYER_PARAMS__INIT \
2541  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__embedding_ndlayer_params__descriptor) \
2542     , 0, 0, 0, NULL, NULL }
2543 
2544 
2545 /*
2546  **
2547  * A layer that performs batch normalization,
2548  * which is performed along axis = -3,
2549  * and repeated along the other axes, if present.
2550  * .. code::
2551  *      y = BatchnormLayer(x)
2552  * Requires 1 input and produces 1 output.
2553  * This operation is described by the following formula:
2554  * .. math::
2555  *     y_i = \gamma_i \dfrac{ (x_i - \mu_i)}{\sqrt{\sigma_i^2 + \epsilon}} + \beta_i \;,\;i=1,....,C
2556  * Input
2557  *     A blob with rank greater than equal to 3.
2558  *     Example: Rank 4 blob represents [Batch, channels, height, width]
2559  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
2560  * Output
2561  *     A blob with the same shape as the input.
2562  */
2563 struct  _CoreML__Specification__BatchnormLayerParams
2564 {
2565   ProtobufCMessage base;
2566   /*
2567    * / Size of the channel dimension in the input.
2568    */
2569   uint64_t channels;
2570   /*
2571    **
2572    * If ``computeMeanVar == true``,
2573    * the mean and variance are calculated from either
2574    * the single input instance, if ``instanceNormalization == true``,
2575    * or the whole batch, if ``instanceNormalization = false``.
2576    * and the values provided in parameters "mean" and "variance" are ignored.
2577    */
2578   protobuf_c_boolean computemeanvar;
2579   protobuf_c_boolean instancenormalization;
2580   /*
2581    **
2582    * A small constant to avoid division by 0 while normalizing by variance.
2583    * Defaults to ``1e-5`` if not set or set to ``0``.
2584    */
2585   float epsilon;
2586   /*
2587    * / Parameter of length [channels]
2588    */
2589   CoreML__Specification__WeightParams *gamma;
2590   /*
2591    * / Parameter of length [channels]
2592    */
2593   CoreML__Specification__WeightParams *beta;
2594   /*
2595    * / Parameter of length [channels]
2596    */
2597   CoreML__Specification__WeightParams *mean;
2598   /*
2599    * / Parameter of length [channels]
2600    */
2601   CoreML__Specification__WeightParams *variance;
2602 };
2603 #define CORE_ML__SPECIFICATION__BATCHNORM_LAYER_PARAMS__INIT \
2604  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__batchnorm_layer_params__descriptor) \
2605     , 0, 0, 0, 0, NULL, NULL, NULL, NULL }
2606 
2607 
2608 struct  _CoreML__Specification__PoolingLayerParams__ValidCompletePadding
2609 {
2610   ProtobufCMessage base;
2611   /*
2612    **
2613    * Must be length 2 in order ``[H, W]``.
2614    * If not set, value ``[0, 0]`` is used.
2615    */
2616   size_t n_paddingamounts;
2617   uint64_t *paddingamounts;
2618 };
2619 #define CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__VALID_COMPLETE_PADDING__INIT \
2620  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__pooling_layer_params__valid_complete_padding__descriptor) \
2621     , 0,NULL }
2622 
2623 
2624 typedef enum {
2625   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE__NOT_SET = 0,
2626   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_VALID = 30,
2627   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_SAME = 31,
2628   CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_INCLUDE_LAST_PIXEL = 32
2629     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE)
2630 } CoreML__Specification__PoolingLayerParams__PoolingPaddingTypeCase;
2631 
2632 /*
2633  **
2634  * A spatial pooling layer.
2635  * .. code::
2636  *      y = PoolingLayer(x)
2637  * Requires 1 input and produces 1 output.
2638  * Input
2639  *     A blob with rank greater than equal to 4.
2640  *     Rank 4 blob represents [Batch, channels, height, width]
2641  *     For ranks greater than 4, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
2642  * Output
2643  *     Rank is same as the input. e.g.: for rank 4 input, output shape is [B, C, H_out, W_out]
2644  * Padding options are similar to ``ConvolutionLayerParams``
2645  * with the additional option of ``ValidCompletePadding`` (``includeLastPixel``),
2646  * which ensures that the last application of the kernel
2647  * always includes the last pixel of the input image, if there is padding.
2648  * .. code::
2649  *     H_out = ceil(float(H_in + 2 * paddingAmounts[0] - kernelSize[0])/float(Stride[0])) + 1
2650  *     if (paddingAmounts[0] > 0 or paddingAmounts[1] > 0)
2651  *          if ((H_out - 1) * Stride >= H_in + paddingAmounts[0]) {
2652  *              H_out = H_out - 1
2653  *          }
2654  *     }
2655  * The equivalent expressions hold true for ``W_out`` as well.
2656  * Only symmetric padding is supported with this option.
2657  */
2658 struct  _CoreML__Specification__PoolingLayerParams
2659 {
2660   ProtobufCMessage base;
2661   /*
2662    * / Type of pooling operation.
2663    */
2664   CoreML__Specification__PoolingLayerParams__PoolingType type;
2665   /*
2666    **
2667    * Must be length 2 in the order ``[H, W]``.
2668    * If not set, default value ``[3, 3]`` is used.
2669    */
2670   size_t n_kernelsize;
2671   uint64_t *kernelsize;
2672   /*
2673    **
2674    * Must be length 2 in the order ``[H, W]``.
2675    * If not set, default value ``[1, 1]`` is used.
2676    */
2677   size_t n_stride;
2678   uint64_t *stride;
2679   /*
2680    **
2681    * If true, padded values are excluded from the count (denominator)
2682    * when computing average pooling.
2683    */
2684   protobuf_c_boolean avgpoolexcludepadding;
2685   /*
2686    **
2687    * If true, global pooling is performed.
2688    * Kernel size is inferred from the input data spatial dimensions.
2689    */
2690   protobuf_c_boolean globalpooling;
2691   CoreML__Specification__PoolingLayerParams__PoolingPaddingTypeCase pooling_padding_type_case;
2692   union {
2693     CoreML__Specification__ValidPadding *valid;
2694     CoreML__Specification__SamePadding *same;
2695     CoreML__Specification__PoolingLayerParams__ValidCompletePadding *includelastpixel;
2696   };
2697 };
2698 #define CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__INIT \
2699  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__pooling_layer_params__descriptor) \
2700     , CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__MAX, 0,NULL, 0,NULL, 0, 0, CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE__NOT_SET, {0} }
2701 
2702 
2703 /*
2704  * A layer to pool three spatial dimensions
2705  * Input
2706  *      A blob with rank equal to 5, representing [Batch, channels, depth, height, width].
2707  * Output
2708  *      Rank is same as the input: A blob with rank equal to 5, representing [Batch, channels, depth, height, width].
2709  * Requires 1 input and produces 1 output.
2710  * For example, given an input of shape (1,1,2,3,3):
2711  *        +----+----+----+
2712  *      / | 10 | 11 | 12 |
2713  *     /  +----+----+----+
2714  *    /   | 13 | 14 | 15 |
2715  *   /    +----+----+----+
2716  *  /     | 16 | 17 | 18 |
2717  * /      +----+----+----+
2718  * +----+----+----+      /
2719  * |  1 |  2 |  3 |     /
2720  * +----+----+----+    /
2721  * |  4 |  5 |  6 |   /
2722  * +----+----+----+  /
2723  * |  7 |  8 |  9 | /
2724  * +----+----+----+
2725  * And applying MAX pooling using:
2726  *      Kernel: 2x2x2
2727  *      Stride: 1x1x1
2728  *      Valid Padding
2729  * We expect to get an output with shape: (1,1,1,2,2) and value:
2730  * +----+----+
2731  * | 14 | 15 |
2732  * +----+----+
2733  * | 17 | 18 |
2734  * +----+----+
2735  */
2736 struct  _CoreML__Specification__Pooling3DLayerParams
2737 {
2738   ProtobufCMessage base;
2739   /*
2740    * Whether to use Max or Average
2741    */
2742   CoreML__Specification__Pooling3DLayerParams__PoolingType3D type;
2743   /*
2744    * Depth of the pooling region.
2745    */
2746   int32_t kerneldepth;
2747   /*
2748    * Height of the pooling region.
2749    */
2750   int32_t kernelheight;
2751   /*
2752    * Width of the pooling region.
2753    */
2754   int32_t kernelwidth;
2755   /*
2756    * Stride along the depth direction
2757    */
2758   int32_t stridedepth;
2759   /*
2760    * Stride along the height direction
2761    */
2762   int32_t strideheight;
2763   /*
2764    * Stride along the width direction
2765    */
2766   int32_t stridewidth;
2767   CoreML__Specification__Pooling3DLayerParams__Pooling3DPaddingType paddingtype;
2768   /*
2769    * Padding before the input in the depth direction.
2770    */
2771   int32_t custompaddingfront;
2772   /*
2773    * Padding after the input in the depth direction.
2774    */
2775   int32_t custompaddingback;
2776   /*
2777    * Padding before the input in the height direction.
2778    */
2779   int32_t custompaddingtop;
2780   /*
2781    * Padding after the input in the height direction.
2782    */
2783   int32_t custompaddingbottom;
2784   /*
2785    * Padding before the input in the width direction.
2786    */
2787   int32_t custompaddingleft;
2788   /*
2789    * Padding after the input in the width direction.
2790    */
2791   int32_t custompaddingright;
2792   /*
2793    * If true, exclude zeros from padding in Average pooling.  Meaningless in Max Pooling.
2794    */
2795   protobuf_c_boolean countexcludepadding;
2796 };
2797 #define CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__INIT \
2798  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__pooling3_dlayer_params__descriptor) \
2799     , CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING_TYPE3_D__MAX, 0, 0, 0, 0, 0, 0, CORE_ML__SPECIFICATION__POOLING3_DLAYER_PARAMS__POOLING3_DPADDING_TYPE__CUSTOM, 0, 0, 0, 0, 0, 0, 0 }
2800 
2801 
2802 /*
2803  * A layer to pool three spatial dimensions down to one value.
2804  * This behaves like a special case of Pooling3DLayerParams in which
2805  * the Kernel is the size of the input and there is no padding.
2806  * Input
2807  *      A blob with rank equal to 5, representing [Batch, channels, depth, height, width].
2808  * Output
2809  *      Rank is same as the input: A blob with rank equal to 5, representing [Batch, channels, depth, height, width].
2810  *      Depth, height, and width of the output will always be 1.
2811  * Requires 1 input and produces 1 output.
2812  * For example, given an input of shape (1,1,2,3,3):
2813  *        +----+----+----+
2814  *      / | 10 | 11 | 12 |
2815  *     /  +----+----+----+
2816  *    /   | 13 | 14 | 15 |
2817  *   /    +----+----+----+
2818  *  /     | 16 | 17 | 18 |
2819  * /      +----+----+----+
2820  * +----+----+----+      /
2821  * |  1 |  2 |  3 |     /
2822  * +----+----+----+    /
2823  * |  4 |  5 |  6 |   /
2824  * +----+----+----+  /
2825  * |  7 |  8 |  9 | /
2826  * +----+----+----+
2827  * And applying MAX global 3d pooling, we expect to get an output with shape: (1,1,1,1,1) and value:
2828  * +----+
2829  * | 18 |
2830  * +----+
2831  */
2832 struct  _CoreML__Specification__GlobalPooling3DLayerParams
2833 {
2834   ProtobufCMessage base;
2835   /*
2836    * Whether to use Max or Average
2837    */
2838   CoreML__Specification__GlobalPooling3DLayerParams__GlobalPoolingType3D type;
2839 };
2840 #define CORE_ML__SPECIFICATION__GLOBAL_POOLING3_DLAYER_PARAMS__INIT \
2841  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__global_pooling3_dlayer_params__descriptor) \
2842     , CORE_ML__SPECIFICATION__GLOBAL_POOLING3_DLAYER_PARAMS__GLOBAL_POOLING_TYPE3_D__MAX }
2843 
2844 
2845 /*
2846  **
2847  * Fill a constant value in the padded region.
2848  */
2849 struct  _CoreML__Specification__PaddingLayerParams__PaddingConstant
2850 {
2851   ProtobufCMessage base;
2852   float value;
2853 };
2854 #define CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_CONSTANT__INIT \
2855  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__padding_layer_params__padding_constant__descriptor) \
2856     , 0 }
2857 
2858 
2859 /*
2860  **
2861  * Reflect the values at the border for padding.
2862  */
2863 struct  _CoreML__Specification__PaddingLayerParams__PaddingReflection
2864 {
2865   ProtobufCMessage base;
2866 };
2867 #define CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_REFLECTION__INIT \
2868  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__padding_layer_params__padding_reflection__descriptor) \
2869      }
2870 
2871 
2872 /*
2873  **
2874  * Replicate the values at the border for padding.
2875  */
2876 struct  _CoreML__Specification__PaddingLayerParams__PaddingReplication
2877 {
2878   ProtobufCMessage base;
2879 };
2880 #define CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_REPLICATION__INIT \
2881  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__padding_layer_params__padding_replication__descriptor) \
2882      }
2883 
2884 
2885 typedef enum {
2886   CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE__NOT_SET = 0,
2887   CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE_CONSTANT = 1,
2888   CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE_REFLECTION = 2,
2889   CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE_REPLICATION = 3
2890     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE)
2891 } CoreML__Specification__PaddingLayerParams__PaddingTypeCase;
2892 
2893 /*
2894  **
2895  * A layer that performs padding along spatial dimensions.
2896  * .. code::
2897  *      y = PaddingLayer(x)
2898  * Requires 1 input and produces 1 output.
2899  * Input
2900  *     A blob with rank at least 2.
2901  *     e.g.: blob with shape ``[H_in, W_in]``.
2902  *     For ranks greater than 2, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch
2903  *     i.e. Padding is applied on last two dimensions.
2904  * Output
2905  *     Same rank as the input.
2906  *     e.g.: blob with shape ``[H_out, W_out]``.
2907  * Output dimensions are calculated as follows:
2908  * .. code::
2909  *     H_out = H_in + topPaddingAmount + bottomPaddingAmount
2910  *     W_out = W_in + leftPaddingAmount + rightPaddingAmount
2911  *     topPaddingAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize
2912  *     bottomPaddingAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize
2913  *     leftPaddingAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize
2914  *     rightPaddingAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize
2915  * There are three types of padding:
2916  * - ``PaddingConstant``, which fills a constant value at the border.
2917  * - ``PaddingReflection``, which reflects the values at the border.
2918  * - ``PaddingReplication``, which replicates the values at the border.
2919  * Given the following input:
2920  * .. code::
2921  *     [1, 3, 4]  :  1   2   3   4
2922  *                   5   6   7   8
2923  *                   9   10  11  12
2924  * Here is the output of applying the padding
2925  * ``(top=2, left=2, bottom=0, right=0)``
2926  * with each of the supported types:
2927  * - ``PaddingConstant`` (``value = 0``):
2928  *   .. code::
2929  *       [1, 5, 6]  :  0   0   0  0   0   0
2930  *                     0   0   0  0   0   0
2931  *                     0   0   1  2   3   4
2932  *                     0   0   5  6   7   8
2933  *                     0   0   9  10  11  12
2934  * - ``PaddingReflection``:
2935  *   .. code::
2936  *       [1, 5, 6]  :  11  10  9  10  11  12
2937  *                     7   6   5  6   7   8
2938  *                     3   2   1  2   3   4
2939  *                     7   6   5  6   7   8
2940  *                     11  10  9  10  11  12
2941  * - ``PaddingReplication``:
2942  *   .. code::
2943  *       [1, 5, 6]  :  1   1   1  2   3   4
2944  *                     1   1   1  2   3   4
2945  *                     1   1   1  2   3   4
2946  *                     5   5   5  6   7   8
2947  *                     9   9   9  10  11  12
2948  */
2949 struct  _CoreML__Specification__PaddingLayerParams
2950 {
2951   ProtobufCMessage base;
2952   /*
2953    * / Amounts to be padded to the input.
2954    */
2955   CoreML__Specification__BorderAmounts *paddingamounts;
2956   CoreML__Specification__PaddingLayerParams__PaddingTypeCase padding_type_case;
2957   union {
2958     CoreML__Specification__PaddingLayerParams__PaddingConstant *constant;
2959     CoreML__Specification__PaddingLayerParams__PaddingReflection *reflection;
2960     CoreML__Specification__PaddingLayerParams__PaddingReplication *replication;
2961   };
2962 };
2963 #define CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__INIT \
2964  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__padding_layer_params__descriptor) \
2965     , NULL, CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE__NOT_SET, {0} }
2966 
2967 
2968 /*
2969  **
2970  * A layer that concatenates along the axis = -3 or -5.
2971  * For general concatenation along any axis, see ConcatNDLayer.
2972  * .. code::
2973  *      y = ConcatLayer(x1,x2,....)
2974  * Requires more than 1 input and produces 1 output.
2975  * Input
2976  *   All input blobs must have same rank.
2977  *   If "sequenceConcat" = False, rank must be greater than equal to 3. In this case concatenation is along axis = -3
2978  *   If "sequenceConcat" = True, rank must be greater than equal to 5. In this case concatenation is along axis = -5
2979  * Output
2980  *   Same rank as the input.
2981  */
2982 struct  _CoreML__Specification__ConcatLayerParams
2983 {
2984   ProtobufCMessage base;
2985   /*
2986    **
2987    * If true, concatenate along the axis = -5 instead of axis = -3.
2988    */
2989   protobuf_c_boolean sequenceconcat;
2990 };
2991 #define CORE_ML__SPECIFICATION__CONCAT_LAYER_PARAMS__INIT \
2992  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__concat_layer_params__descriptor) \
2993     , 0 }
2994 
2995 
2996 /*
2997  **
2998  * A layer that performs local response normalization (LRN).
2999  * .. code::
3000  *      y = LRNLayer(x)
3001  * Requires 1 input and produces 1 output.
3002  * Input
3003  *     A blob with rank greater than equal to 3.
3004  *     Example: Rank 4 blob represents [Batch, channels, height, width]
3005  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3006  * Output
3007  *     A blob with the same shape as the input.
3008  * This layer is described by the following formula:
3009  * .. math::
3010  *     x_i \leftarrow  \dfrac{x_i}{\left ( k + \dfrac{\alpha}{\text{localSize}} \sum_j x_j^2 \right )^\beta}
3011  * where the summation is done over a ``(localSize, 1, 1)`` neighborhood ---
3012  * that is, over a window "across" channels in 1x1 spatial neighborhoods.
3013  */
3014 struct  _CoreML__Specification__LRNLayerParams
3015 {
3016   ProtobufCMessage base;
3017   float alpha;
3018   float beta;
3019   /*
3020    * / Number of channels in the normalization window.
3021    */
3022   uint64_t localsize;
3023   /*
3024    * / Defaults to 1 if not set or 0. Must be strictly positive.
3025    */
3026   float k;
3027 };
3028 #define CORE_ML__SPECIFICATION__LRNLAYER_PARAMS__INIT \
3029  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__lrnlayer_params__descriptor) \
3030     , 0, 0, 0, 0 }
3031 
3032 
3033 /*
3034  **
3035  * Softmax Normalization Layer
3036  * A layer that performs softmax normalization.
3037  * Normalization is applied along axis = -3 or N-3 (where N is the rank of the input)
3038  * For softmax layer that can operate on any axis, see SoftmaxNDLayer.
3039  * .. code::
3040  *      y = SoftmaxLayer(x)
3041  * Requires 1 input and produces 1 output.
3042  * Input
3043  *     Must be a blob with rank >= 3.
3044  * Output
3045  *     A blob with the same shape as the input.
3046  * This layer is described by the following formula:
3047  * .. math::
3048  *     x_i \leftarrow \dfrac{e^{x_i}}{\sum_i{e^{x_i}}}
3049  */
3050 struct  _CoreML__Specification__SoftmaxLayerParams
3051 {
3052   ProtobufCMessage base;
3053 };
3054 #define CORE_ML__SPECIFICATION__SOFTMAX_LAYER_PARAMS__INIT \
3055  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__softmax_layer_params__descriptor) \
3056      }
3057 
3058 
3059 /*
3060  **
3061  * A layer that uniformly splits across axis = -3 to produce a specified number of outputs.
3062  * For general split operation along any axis, see SplitNDLayer.
3063  * .. code::
3064  *      (y1,y2,...yN) = SplitLayer(x), where N = nOutputs
3065  * Requires 1 input and produces multiple outputs.
3066  * Input
3067  *     A blob with rank at least 3.
3068  *     e.g.: blob with shape ``[C, H, W]``
3069  * Output
3070  *     ``nOutputs`` blobs each with same rank as the input.
3071  *     e.g.: For input that is of shape ``[C, H, W]``, output shapes will be ``[C/nOutputs, H, W]``
3072  */
3073 struct  _CoreML__Specification__SplitLayerParams
3074 {
3075   ProtobufCMessage base;
3076   /*
3077    * / The number of outputs.
3078    */
3079   uint64_t noutputs;
3080 };
3081 #define CORE_ML__SPECIFICATION__SPLIT_LAYER_PARAMS__INIT \
3082  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__split_layer_params__descriptor) \
3083     , 0 }
3084 
3085 
3086 /*
3087  **
3088  * A layer that performs elementwise addition.
3089  * This layer has limited broadcasting support. For general broadcasting see AddBroadcastableLayer.
3090  * .. code::
3091  *      y = AddLayer(x1,x2,...)
3092  * Requires 1 or more than 1 input and produces 1 output.
3093  * Input
3094  *     In general, there are no rank constraints.
3095  *     However, only certain set of shapes are broadcastable. For example:
3096  *     [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W]
3097  * Output
3098  *     A blob with shape equal to the input blob.
3099  * If only one input is provided, scalar addition is performed:
3100  * .. math::
3101  *     y = x + \alpha
3102  */
3103 struct  _CoreML__Specification__AddLayerParams
3104 {
3105   ProtobufCMessage base;
3106   /*
3107    **
3108    * Scalar to be added to the input.
3109    * Only used if there is a single input.
3110    */
3111   float alpha;
3112 };
3113 #define CORE_ML__SPECIFICATION__ADD_LAYER_PARAMS__INIT \
3114  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__add_layer_params__descriptor) \
3115     , 0 }
3116 
3117 
3118 /*
3119  **
3120  * A layer that performs elementwise multiplication.
3121  * This layer has limited broadcasting support. For general broadcasting see MultiplyBroadcastableLayer.
3122  * .. code::
3123  *      y = MultiplyLayer(x1,x2,...)
3124  * Requires 1 or more than 1 input and produces 1 output.
3125  * Input
3126  *     In general, there are no rank constraints.
3127  *     However, only certain set of shapes are broadcastable. For example:
3128  *     [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W]
3129  * Output
3130  *     A blob with shape equal to the first input blob.
3131  * If only one input is provided, scalar multiplication is performed:
3132  * .. math::
3133  *     y = \alpha x
3134  */
3135 struct  _CoreML__Specification__MultiplyLayerParams
3136 {
3137   ProtobufCMessage base;
3138   /*
3139    **
3140    * Scalar to be multiplied with the input.
3141    * Only used if there is a single input.
3142    */
3143   float alpha;
3144 };
3145 #define CORE_ML__SPECIFICATION__MULTIPLY_LAYER_PARAMS__INIT \
3146  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__multiply_layer_params__descriptor) \
3147     , 0 }
3148 
3149 
3150 /*
3151  **
3152  * A layer that applies a unary function.
3153  * .. code::
3154  *      y = UnaryFunctionLayer(x)
3155  * Requires 1 input and produces 1 output.
3156  * Input
3157  *     A blob with no rank constraints.
3158  * Output
3159  *     A blob with the same shape as the input.
3160  * The input is first modified by shifting and scaling:
3161  * .. math::
3162  *     x \leftarrow \text{scale} \cdot x + \text{shift}
3163  */
3164 struct  _CoreML__Specification__UnaryFunctionLayerParams
3165 {
3166   ProtobufCMessage base;
3167   /*
3168    * / The type of unary function.
3169    */
3170   CoreML__Specification__UnaryFunctionLayerParams__Operation type;
3171   /*
3172    **
3173    * A constant used in ``POWER`` and ``THRESHOLD`` functions.
3174    */
3175   float alpha;
3176   /*
3177    **
3178    * A small constant to avoid division by 0 while normalizing variance.
3179    * Defaults to ``1e-6`` if not set or set to ``0``.
3180    */
3181   float epsilon;
3182   /*
3183    **
3184    * Input is shifted by this amount
3185    * before the unary function is applied.
3186    * Defaults to ``0.0`` if not set.
3187    */
3188   float shift;
3189   /*
3190    **
3191    * Input is scaled by this amount
3192    * before the unary function is applied.
3193    * Defaults to ``1.0`` if not set or set to ``0``.
3194    */
3195   float scale;
3196 };
3197 #define CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__INIT \
3198  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__unary_function_layer_params__descriptor) \
3199     , CORE_ML__SPECIFICATION__UNARY_FUNCTION_LAYER_PARAMS__OPERATION__SQRT, 0, 0, 0, 0 }
3200 
3201 
3202 /*
3203  **
3204  * A layer that scales up spatial dimensions.
3205  * It supports two modes: nearest neighbour (default) and bilinear.
3206  * .. code::
3207  *      y = UpsampleLayer(x)
3208  * Requires 1 input and produces 1 output.
3209  * Input
3210  *     A blob with rank at least 3.
3211  *     e.g.: blob with shape ``[C, H, W]``.
3212  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3213  * Output
3214  *     Same rank as the input.
3215  *     e.g.: blob with shape ``[C, scalingFactor[0] * H, scalingFactor[1] * W]``
3216  */
3217 struct  _CoreML__Specification__UpsampleLayerParams
3218 {
3219   ProtobufCMessage base;
3220   /*
3221    **
3222    * Scaling Factor. Mutually exclusive with fractionalScalingFactor.
3223    * Must be length 2 in order ``[H, W]``.
3224    * If not set, default value ``[1, 1]`` is used.
3225    */
3226   size_t n_scalingfactor;
3227   uint64_t *scalingfactor;
3228   /*
3229    **
3230    * Fractional scaling factor. Mutually exclusive with scalingFactor.
3231    * Must be length 2 in order ``[H, W]``.
3232    * If not set, default value ``[1.0, 1.0]`` is used.
3233    */
3234   size_t n_fractionalscalingfactor;
3235   float *fractionalscalingfactor;
3236   CoreML__Specification__UpsampleLayerParams__InterpolationMode mode;
3237   CoreML__Specification__UpsampleLayerParams__LinearUpsampleMode linearupsamplemode;
3238 };
3239 #define CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__INIT \
3240  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__upsample_layer_params__descriptor) \
3241     , 0,NULL, 0,NULL, CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__INTERPOLATION_MODE__NN, CORE_ML__SPECIFICATION__UPSAMPLE_LAYER_PARAMS__LINEAR_UPSAMPLE_MODE__DEFAULT }
3242 
3243 
3244 /*
3245  **
3246  * A layer that resizes the input to a pre-specified spatial size using bilinear interpolation.
3247  * .. code::
3248  *      y = ResizeBilinearLayer(x)
3249  * Requires 1 input and produces 1 output.
3250  * Input
3251  *     A blob with rank at least 3.
3252  *     e.g.: blob with shape ``[C, H_in, W_in]``.
3253  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3254  * Output
3255  *     Same rank as the input.
3256  *     e.g.: blob with shape ``[C, H_out, W_out]``.
3257  */
3258 struct  _CoreML__Specification__ResizeBilinearLayerParams
3259 {
3260   ProtobufCMessage base;
3261   /*
3262    **
3263    * Target Spatial Size.
3264    * Must be length 2 in order ``[Height, Width]``, i.e. ``[H_out, W_out]``.
3265    * If not set, default value ``[1, 1]`` is used.
3266    */
3267   size_t n_targetsize;
3268   uint64_t *targetsize;
3269   /*
3270    **
3271    * Mode used to compute the grid on which the spatial output values are evaluated.
3272    * Same mode is applied to both the height and width axes.
3273    */
3274   CoreML__Specification__SamplingMode *mode;
3275 };
3276 #define CORE_ML__SPECIFICATION__RESIZE_BILINEAR_LAYER_PARAMS__INIT \
3277  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__resize_bilinear_layer_params__descriptor) \
3278     , 0,NULL, NULL }
3279 
3280 
3281 /*
3282  **
3283  * A layer that extracts cropped spatial patches or RoIs (regions of interest) from the input and resizes them to a pre-specified size using
3284  * bilinear interpolation.
3285  * Note that RoI Align layer can be implemented with this layer followed by a pooling layer.
3286  * .. code::
3287  *      y = CropResizeLayer(x)
3288  * Requires 2 inputs and produces 1 output.
3289  * Input
3290  *     There are two inputs.
3291  *     First input represents an image feature map.
3292  *     Second input represents the bounding box coordinates for N patches or RoIs (region of interest).
3293  *     First input is rank 5: [1, Batch, C, H_in, W_in].
3294  *     Second input is rank 5. Its shape can be either [N, 1, 4, 1, 1] or [N, 1, 5, 1, 1].
3295  *     N: number of patches/RoIs to be extracted
3296  *     If RoI shape = ``[N, 1, 4, 1, 1]``
3297  *                    The axis=-3 corresponds to the four coordinates specifying the bounding box.
3298  *                    All the N RoIs are extracted from all the batches of the input.
3299  *     If RoI shape = ``[N, 1, 5, 1, 1]``
3300  *                     The first element of the axis=-3 specifies the input batch id from which to extract the RoI and
3301  *                               must be in the interval ``[0, Batch - 1]``. That is, n-th RoI is extracted from the RoI[n,0,0,0,0]-th
3302  *                     input batch id. The last four elements of the axis=-3 specify the bounding box coordinates.
3303  * Output
3304  *     A blob with rank 5.
3305  *           - Shape is [N, Batch, C, H_out, W_out] if input RoI shape is [N, 1, 4, 1, 1]
3306  *           - Shape is [N, 1, C, H_out, W_out] if input RoI shape is [N, 1, 5, 1, 1]
3307  */
3308 struct  _CoreML__Specification__CropResizeLayerParams
3309 {
3310   ProtobufCMessage base;
3311   /*
3312    **
3313    * Target Spatial Size.
3314    * Must be length 2 in order ``[Height, Width]``, i.e. ``[H_out, W_out]``.
3315    * If not set, default value ``[1, 1]`` is used.
3316    */
3317   size_t n_targetsize;
3318   uint64_t *targetsize;
3319   /*
3320    **
3321    * If true the bounding box coordinates must be in the interval [0, 1].
3322    * They are scaled by (H_in - 1), (W_in - 1), i.e. based on the input spatial dimensions.
3323    * If false the bounding box coordinates must be in the interval
3324    * [0, H_in -1] and [0, W_in - 1], respectively for height and width dimensions.
3325    */
3326   protobuf_c_boolean normalizedcoordinates;
3327   /*
3328    **
3329    * Mode used to compute the grid on which the spatial output values are evaluated.
3330    * Same mode is applied to both the height and width axes.
3331    */
3332   CoreML__Specification__SamplingMode *mode;
3333   /*
3334    **
3335    * Representation used to express the bounding box coordinates.
3336    * It determines how the values of the second input are interpreted.
3337    */
3338   CoreML__Specification__BoxCoordinatesMode *boxindicesmode;
3339   /*
3340    **
3341    * Additional spatial scale that multiplies the bounding box coordinates.
3342    * Generally used while implementing the RoI Align layer,
3343    * which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1.
3344    */
3345   float spatialscale;
3346 };
3347 #define CORE_ML__SPECIFICATION__CROP_RESIZE_LAYER_PARAMS__INIT \
3348  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__crop_resize_layer_params__descriptor) \
3349     , 0,NULL, 0, NULL, NULL, 0 }
3350 
3351 
3352 /*
3353  **
3354  * A layer that performs elementwise addition of a bias,
3355  * which is broadcasted to match the input shape.
3356  * .. code::
3357  *      y = BiasLayer(x)
3358  * Requires 1 input and produces 1 output.
3359  * Input
3360  *     A blob with rank at least 3.
3361  *     e.g.: blob with shape ``[C, H, W]``.
3362  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3363  * Output
3364  *     A blob with the same shape as the input.
3365  */
3366 struct  _CoreML__Specification__BiasLayerParams
3367 {
3368   ProtobufCMessage base;
3369   /*
3370    **
3371    * The shape of the bias.
3372    * Must be one of the following:
3373    * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``.
3374    */
3375   size_t n_shape;
3376   uint64_t *shape;
3377   /*
3378    **
3379    * The bias values.
3380    * The size must be equal to the product of the ``shape`` dimensions.
3381    */
3382   CoreML__Specification__WeightParams *bias;
3383 };
3384 #define CORE_ML__SPECIFICATION__BIAS_LAYER_PARAMS__INIT \
3385  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__bias_layer_params__descriptor) \
3386     , 0,NULL, NULL }
3387 
3388 
3389 /*
3390  **
3391  * A layer that performs elmentwise multiplication by a scale factor
3392  * and optionally adds a bias;
3393  * both the scale and bias are broadcasted to match the input shape.
3394  * .. code::
3395  *      y = ScaleLayer(x)
3396  * Requires 1 input and produces 1 output.
3397  * Input
3398  *     A blob with rank at least 3.
3399  *     e.g.: blob with shape ``[C, H, W]``.
3400  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3401  * Output
3402  *     A blob with the same shape as the input.
3403  */
3404 struct  _CoreML__Specification__ScaleLayerParams
3405 {
3406   ProtobufCMessage base;
3407   /*
3408    **
3409    * The shape of the scale.
3410    * Must be one of the following:
3411    * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``.
3412    */
3413   size_t n_shapescale;
3414   uint64_t *shapescale;
3415   /*
3416    **
3417    * The scale values.
3418    * The size must be equal to the product of the ``shape`` dimensions.
3419    */
3420   /*
3421    * / Scale values. Size must be equal to the product of dimensions specified in shapeScale.
3422    */
3423   CoreML__Specification__WeightParams *scale;
3424   /*
3425    * / If true, a bias is added after scaling.
3426    */
3427   protobuf_c_boolean hasbias;
3428   /*
3429    **
3430    * The shape of the bias.
3431    * Must be one of the following:
3432    * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``.
3433    */
3434   size_t n_shapebias;
3435   uint64_t *shapebias;
3436   /*
3437    **
3438    * The bias values.
3439    * The size must be equal to the product of the ``shape`` dimensions.
3440    */
3441   CoreML__Specification__WeightParams *bias;
3442 };
3443 #define CORE_ML__SPECIFICATION__SCALE_LAYER_PARAMS__INIT \
3444  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__scale_layer_params__descriptor) \
3445     , 0,NULL, NULL, 0, 0,NULL, NULL }
3446 
3447 
3448 /*
3449  **
3450  * A layer that loads data as a parameter and provides it as an output.
3451  * The output is rank 5. For general rank, see LoadConstantNDLayer.
3452  * .. code::
3453  *      y = LoadConstantLayer()
3454  * Requires no input and produces 1 output.
3455  * Output:
3456  *     A blob with rank 5 and shape ``[1, 1, C, H, W]``
3457  */
3458 struct  _CoreML__Specification__LoadConstantLayerParams
3459 {
3460   ProtobufCMessage base;
3461   /*
3462    **
3463    * The shape of the constant to be loaded,
3464    * which must be``[C, H, W]``, that is length 3.
3465    */
3466   size_t n_shape;
3467   uint64_t *shape;
3468   /*
3469    **
3470    * The data values,
3471    * of size ``C * H * W``.
3472    */
3473   CoreML__Specification__WeightParams *data;
3474 };
3475 #define CORE_ML__SPECIFICATION__LOAD_CONSTANT_LAYER_PARAMS__INIT \
3476  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__load_constant_layer_params__descriptor) \
3477     , 0,NULL, NULL }
3478 
3479 
3480 /*
3481  **
3482  * A layer that performs L2 normalization, i.e. divides by the
3483  * the square root of the sum of squares of all elements of input.
3484  * .. code::
3485  *      y = L2NormalizeLayer(x)
3486  * Requires 1 input and produces 1 output.
3487  * Input
3488  *     A blob with rank greater than equal to 3.
3489  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3490  * Output
3491  *     A blob with the same shape as the input.
3492  * This layer is described by the following formula:
3493  * .. math::
3494  *     x_i \leftarrow \dfrac{x_i}{\sqrt{\sum{x_i^2} + \epsilon}}
3495  */
3496 struct  _CoreML__Specification__L2NormalizeLayerParams
3497 {
3498   ProtobufCMessage base;
3499   /*
3500    **
3501    * A small constant to avoid division by 0 while normalizing variance.
3502    * Defaults to ``1e-6`` if not set or set to ``0``.
3503    */
3504   float epsilon;
3505 };
3506 #define CORE_ML__SPECIFICATION__L2_NORMALIZE_LAYER_PARAMS__INIT \
3507  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__l2_normalize_layer_params__descriptor) \
3508     , 0 }
3509 
3510 
3511 /*
3512  **
3513  * A layer that flattens the input.
3514  * .. code::
3515  *      y = FlattenLayer(x)
3516  * Requires 1 input and produces 1 output.
3517  * Input
3518  *     A blob with rank greater than equal to 3.
3519  *     e.g.: Rank 4 blob represents [Batch, C, H, W]
3520  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3521  * Output
3522  *     Same rank as the input, such that last two dimensions are both 1.
3523  *     e.g.: For rank 4 input, output shape is ``[Batch, C * H * W, 1, 1]``
3524  * There are two X orders: ``CHANNEL_FIRST`` and ``CHANNEL_LAST``.
3525  * ``CHANNEL_FIRST`` does not require data to be rearranged,
3526  * because row major ordering is used by internal storage.
3527  * ``CHANNEL_LAST`` requires data to be rearranged.
3528  */
3529 struct  _CoreML__Specification__FlattenLayerParams
3530 {
3531   ProtobufCMessage base;
3532   CoreML__Specification__FlattenLayerParams__FlattenOrder mode;
3533 };
3534 #define CORE_ML__SPECIFICATION__FLATTEN_LAYER_PARAMS__INIT \
3535  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__flatten_layer_params__descriptor) \
3536     , CORE_ML__SPECIFICATION__FLATTEN_LAYER_PARAMS__FLATTEN_ORDER__CHANNEL_FIRST }
3537 
3538 
3539 /*
3540  **
3541  * A layer that recasts the input into a new shape.
3542  * .. code::
3543  *      y = ReshapeLayer(x)
3544  * Requires 1 input and produces 1 output.
3545  * Input
3546  *     A blob with rank 5.
3547  *     e.g.: ``[1, 1, C, H, W]`` or ``[Seq, 1, C, H, W]``.
3548  * Output
3549  *     A blob with rank 5.
3550  *     e.g.: ``[1, 1, C_out, H_out, W_out]`` or ``[Seq_out, 1, C_out, H_out, W_out]``.
3551  * There are two reshape orders: ``CHANNEL_FIRST`` and ``CHANNEL_LAST``.
3552  * ``CHANNEL_FIRST`` is equivalent to
3553  * flattening the input to ``[Seq, 1, C * H * W, 1, 1]`` in channel first order
3554  * and then reshaping it to the target shape;
3555  * no data rearrangement is required.
3556  * ``CHANNEL_LAST`` is equivalent to
3557  * flattening the input to ``[Seq, 1, H * W * C, 1, 1]`` in channel last order,
3558  * reshaping it to ``[Seq_out, 1, H_out, W_out, C_out]`` (it is now in "H_out-major"" order),
3559  * and then permuting it to ``[C_out, H_out, W_out]``;
3560  * both the flattening and permuting requires the data to be rearranged.
3561  */
3562 struct  _CoreML__Specification__ReshapeLayerParams
3563 {
3564   ProtobufCMessage base;
3565   /*
3566    **
3567    * The shape of the output.
3568    * Must be of length 3 or 4.
3569    * If set to 3, ``targetShape`` is interpreted as
3570    * ``[1, 1, C_out, H_out, W_out]``, and sequence length of the input is preserved.
3571    * If set to 4, ``targetShape`` is interpreted as
3572    * ``[Seq_out, 1, C_out, H_out, W_out]``,
3573    * where ``Seq_out`` is the new sequence length.
3574    */
3575   size_t n_targetshape;
3576   int64_t *targetshape;
3577   CoreML__Specification__ReshapeLayerParams__ReshapeOrder mode;
3578 };
3579 #define CORE_ML__SPECIFICATION__RESHAPE_LAYER_PARAMS__INIT \
3580  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reshape_layer_params__descriptor) \
3581     , 0,NULL, CORE_ML__SPECIFICATION__RESHAPE_LAYER_PARAMS__RESHAPE_ORDER__CHANNEL_FIRST }
3582 
3583 
3584 /*
3585  **
3586  * A layer that rearranges the dimensions and data of an input.
3587  * For generic transpose/permute operation see TransposeLayer.
3588  * .. code::
3589  *      y = PermuteLayer(x)
3590  * Requires 1 input and produces 1 output.
3591  * Input
3592  *     Must be a rank 5 blob.
3593  *     e.g.: shape ``[Seq, B, C, H, W]``.
3594  * Output
3595  *     Rank 5 blob. Transposed version of the input, such that dimensions at axis=1 or axis=-4 is unchanged.
3596  * Examples:
3597  *  Assume input shape is [Seq, B, C, H, W]
3598  * - If ``axis`` is set to ``[0, 3, 1, 2]``,
3599  *   then the output has shape ``[Seq, B, W, C, H]``
3600  * - If ``axis`` is set to ``[3, 1, 2, 0]``,
3601  *   then the output has shape ``[W, B, C, H, Seq]``
3602  * - If ``axis`` is set to ``[0, 3, 2, 1]``,
3603  *   then the output has shape ``[Seq, B, W, H, C]``
3604  * - If ``axis`` is not set, or is set to ``[0, 1, 2, 3]``,
3605  *   the output is the same as the input.
3606  */
3607 struct  _CoreML__Specification__PermuteLayerParams
3608 {
3609   ProtobufCMessage base;
3610   /*
3611    **
3612    * The order in which to permute the dimensions.
3613    * Must have length 4 and a permutation of ``[0, 1, 2, 3]``.
3614    */
3615   size_t n_axis;
3616   uint64_t *axis;
3617 };
3618 #define CORE_ML__SPECIFICATION__PERMUTE_LAYER_PARAMS__INIT \
3619  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__permute_layer_params__descriptor) \
3620     , 0,NULL }
3621 
3622 
3623 /*
3624  **
3625  * A layer that reorganizes data in the input in specific ways.
3626  * .. code::
3627  *      y = ReorganizeDataLayer(x)
3628  * Requires 1 input and produces 1 output.
3629  * Input
3630  *     A blob with rank at least 3.
3631  *     e.g.: blob with shape ``[C, H, W]``.
3632  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3633  * Output
3634  *     Same rank as the input.
3635  *     e.g.: blob with shape ``[C_out, H_out, W_out]``.
3636  * mode == SPACE_TO_DEPTH
3637  *  ``[C_out, H_out, W_out]`` : ``[C * blockSize * blockSize, H/blockSize, W/blockSize]``.
3638  *  blockSize must divide H and W.
3639  *  Data is moved from the spatial dimensions to the channel dimension. Input is spatially divided into
3640  *  non-overlapping blocks of size blockSize X blockSize and data from each block is moved into the
3641  *  channel dimension.
3642  * mode == DEPTH_TO_SPACE
3643  *  ``[C_out, H_out, W_out]`` : ``[C/(blockSize * blockSize), H * blockSize, W * blockSize]``.
3644  *  Square of blockSize must divide C.
3645  *  Reverse of SPACE_TO_DEPTH. Data is moved from the channel dimension to the spatial dimensions.
3646  * mode == PIXEL_SHUFFLE
3647  *  ``[C_out, H_out, W_out]`` : ``[C/(blockSize * blockSize), H * blockSize, W *  blockSize]``.
3648  *  Square of blockSize must divide C.
3649  *  Similar to DEPTH_TO_SPACE, but using the pixel-shuffle semantics for channel order in the output space.
3650  *  In both modes, elements along the channel dimension are collapsed into
3651  *  blocks in the spatial dimensions. The difference is in the arrangement of
3652  *  the input-channels' data in the output space. See below example for more
3653  *  detail.
3654  *  (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0)
3655  * Examples:
3656  * Assume input is the following [C = 8, H = 1, W = 2] tensor:
3657  * .. code::
3658  *    [[[1 2]] [[3 4]] [[5 6]] [[7 8]] [[9 10]] [[11 12]] [[13 14]] [[15 16]]]
3659  * If block_size == 2 and mode == DEPTH_TO_SPACE, output will be the following
3660  * [C = 2, H = 2, W = 4] tensor:
3661  * .. code::
3662  *    [[[ 1  5  2  6]
3663  *      [ 9 13 10 14]]
3664  *     [[ 3  7  4  8]
3665  *      [11 15 12 16]]]
3666  * For mode == SPACE_TO_DEPTH, the behavior is the same as mode ==
3667  * DEPTH_TO_SPACE, but with the input and output swapped.
3668  * If block_size == 2 and mode == PIXEL_SHUFFLE, output will be the following
3669  * [C = 2, H = 2, W = 4] tensor:
3670  * .. code::
3671  *    [[[ 1  3  2  4]
3672  *      [ 5  7  6  8]]
3673  *     [[ 9 11 10 12]
3674  *      [13 15 14 16]]]
3675  */
3676 struct  _CoreML__Specification__ReorganizeDataLayerParams
3677 {
3678   ProtobufCMessage base;
3679   CoreML__Specification__ReorganizeDataLayerParams__ReorganizationType mode;
3680   /*
3681    * / must be greater than 1
3682    */
3683   uint64_t blocksize;
3684 };
3685 #define CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__INIT \
3686  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reorganize_data_layer_params__descriptor) \
3687     , CORE_ML__SPECIFICATION__REORGANIZE_DATA_LAYER_PARAMS__REORGANIZATION_TYPE__SPACE_TO_DEPTH, 0 }
3688 
3689 
3690 /*
3691  **
3692  * A layer that slices the input data along axis = -1 or -2 or -3.
3693  * For general slice along any axis, please see SliceStaticLayer/SliceDynamicLayer.
3694  * .. code::
3695  *      y = SliceLayer(x)
3696  * Requires 1 input and produces 1 output.
3697  * Input
3698  *     A blob that can, in general, have any rank. However, depending on the value of "axis" ,
3699  *     there may be additional rank constraints.
3700  * Output
3701  *     A blob with the same rank as the input.
3702  * Sliced section is taken from the interval ``[startIndex, endIndex)``, i.e.
3703  * startIndex is inclusive while endIndex is exclusive.
3704  * stride must be positive and represents the step size for slicing.
3705  * Negative indexing is supported for startIndex and endIndex.
3706  * -1 denotes N-1, -2 denotes N-2 and so on, where N is the length of the dimension to be sliced.
3707  */
3708 struct  _CoreML__Specification__SliceLayerParams
3709 {
3710   ProtobufCMessage base;
3711   /*
3712    * / start of the sliced section. Inclusive.
3713    */
3714   int64_t startindex;
3715   /*
3716    * / end of sliced section. Exclusive.
3717    */
3718   int64_t endindex;
3719   /*
3720    * / The step size. Must be positive.
3721    */
3722   uint64_t stride;
3723   /*
3724    * The following mapping is used for interpreting this parameter:
3725    * CHANNEL_AXIS => axis = -3, input must have rank at least 3.
3726    * HEIGHT_AXIS => axis = -2, input must have rank at least 2.
3727    * WIDTH_AXIS => axis = -1
3728    */
3729   CoreML__Specification__SliceLayerParams__SliceAxis axis;
3730 };
3731 #define CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__INIT \
3732  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__slice_layer_params__descriptor) \
3733     , 0, 0, 0, CORE_ML__SPECIFICATION__SLICE_LAYER_PARAMS__SLICE_AXIS__CHANNEL_AXIS }
3734 
3735 
3736 /*
3737  **
3738  * A layer that reduces the input using a specified operation.
3739  * .. code::
3740  *      y = ReduceLayer(x)
3741  * Requires 1 input and produces 1 output.
3742  * Input
3743  *     A blob that can, in general, have any rank. However, depending on the value of "axis" ,
3744  *      there may be additional rank constraints.
3745  * Output
3746  *     A blob with the same rank as the input, which has 1s on the dimensions specified in the parameter "axis"
3747  *     Values supported for axis are [-1], [-2], [-3], [-2,-1], [-3,-2,-1]
3748  *     and the equivalent positive values (depending on the rank of the input)
3749  *     For mode == 'ArgMax', axis must be [-1] or [-2] or [-3].
3750  */
3751 struct  _CoreML__Specification__ReduceLayerParams
3752 {
3753   ProtobufCMessage base;
3754   /*
3755    * / Specifies function used to reduce.
3756    */
3757   CoreML__Specification__ReduceLayerParams__ReduceOperation mode;
3758   /*
3759    **
3760    * Used if mode is ``LOGSUM``.
3761    * Defaults to ``1e-6`` if not set or is set to ``0``.
3762    */
3763   float epsilon;
3764   /*
3765    * The following mapping is used for interpreting this parameter:
3766    * CHW = axis [-3, -2, -1], input must have rank at least 3.
3767    * HW = axis [-2, -1], input must have rank at least 2.
3768    * C = axis [-3]
3769    * H = axis [-2]
3770    * W = axis [-1]
3771    */
3772   CoreML__Specification__ReduceLayerParams__ReduceAxis axis;
3773 };
3774 #define CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__INIT \
3775  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_layer_params__descriptor) \
3776     , CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_OPERATION__SUM, 0, CORE_ML__SPECIFICATION__REDUCE_LAYER_PARAMS__REDUCE_AXIS__CHW }
3777 
3778 
3779 /*
3780  **
3781  * A layer that crops the spatial dimensions of an input.
3782  * If two inputs are provided, the shape of the second input is used as the reference shape.
3783  * .. code::
3784  *      y = CropLayer(x1) or y = CropLayer(x1,x2)
3785  * Requires 1 or 2 inputs and produces 1 output.
3786  * Input
3787  *    1 or 2 tensors, each with rank at least 3, both inputs must have equal rank.
3788  *    Example:
3789  *     - 1 input case: A blob with shape ``[C, H_in, W_in]``.
3790  *     - 2 input case: 1st blob with shape ``[C, H_in, W_in]``, 2nd blob with shape ``[C, H_out, W_out]``.
3791  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3792  * Output
3793  *     Same rank as the inputs.
3794  *     e.g.: A blob with shape ``[C, H_out, W_out]``.
3795  * If one input is used, output is computed as follows:
3796  * .. code::
3797  *      y = x1[:, topCropAmount:H_in - bottomCropAmount, leftCropAmount:W_in - rightCropAmount]
3798  *      topCropAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize
3799  *      bottomCropAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize
3800  *      leftCropAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize
3801  *      rightCropAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize
3802  *      H_out = H_in - topCropAmount - bottomCropAmount
3803  *      W_out = W_in - leftCropAmount - rightCropAmount
3804  * If two inputs are used, output is computed as follows:
3805  * .. code::
3806  *      y = x1[:, offset[0]:offset[0] + H_out, offset[1]:offset[1] + W_out]
3807  */
3808 struct  _CoreML__Specification__CropLayerParams
3809 {
3810   ProtobufCMessage base;
3811   /*
3812    **
3813    * The amounts to be cropped from the input.
3814    * Used only if a single input is provided.
3815    */
3816   CoreML__Specification__BorderAmounts *cropamounts;
3817   /*
3818    **
3819    * The offset amounts.
3820    * Used only if two inputs are provided.
3821    * Must be of length 2, in order ``[H, W]``.
3822    */
3823   size_t n_offset;
3824   uint64_t *offset;
3825 };
3826 #define CORE_ML__SPECIFICATION__CROP_LAYER_PARAMS__INIT \
3827  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__crop_layer_params__descriptor) \
3828     , NULL, 0,NULL }
3829 
3830 
3831 /*
3832  **
3833  * A layer that computes the elementwise average of the inputs.
3834  * This layer has limited broadcasting support. For general broadcasting see AddBroadcastableLayer.
3835  * .. code::
3836  *      y = AverageLayer(x1,x2,...)
3837  * Requires multiple inputs and produces 1 output.
3838  * Input
3839  *     In general, there are no rank constraints.
3840  *     However, only certain set of shapes are broadcastable. For example:
3841  *     [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W]
3842  * Output
3843  *     A blob with the same shape as each input.
3844  */
3845 struct  _CoreML__Specification__AverageLayerParams
3846 {
3847   ProtobufCMessage base;
3848 };
3849 #define CORE_ML__SPECIFICATION__AVERAGE_LAYER_PARAMS__INIT \
3850  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__average_layer_params__descriptor) \
3851      }
3852 
3853 
3854 /*
3855  **
3856  * A layer that computes the elementwise maximum over the inputs.
3857  * .. code::
3858  *      y = MaxLayer(x1,x2,...)
3859  * Requires multiple inputs and produces 1 output.
3860  * Input
3861  *     In general, there are no rank constraints.
3862  *     However, only certain set of shapes are broadcastable. For example:
3863  *     [B, C, 1, 1], [B, C, H, W]
3864  * Output
3865  *     A blob with the same shape as each input.
3866  */
3867 struct  _CoreML__Specification__MaxLayerParams
3868 {
3869   ProtobufCMessage base;
3870 };
3871 #define CORE_ML__SPECIFICATION__MAX_LAYER_PARAMS__INIT \
3872  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__max_layer_params__descriptor) \
3873      }
3874 
3875 
3876 /*
3877  **
3878  * A layer that computes the elementwise minimum over the inputs.
3879  * .. code::
3880  *      y = MinLayer(x1,x2,...)
3881  * Requires multiple inputs and produces 1 output.
3882  * Input
3883  *     In general, there are no rank constraints.
3884  *     However, only certain set of shapes are broadcastable. For example:
3885  *     [B, C, 1, 1], [B, C, H, W]
3886  * Output
3887  *     A blob with the same shape as each input.
3888  */
3889 struct  _CoreML__Specification__MinLayerParams
3890 {
3891   ProtobufCMessage base;
3892 };
3893 #define CORE_ML__SPECIFICATION__MIN_LAYER_PARAMS__INIT \
3894  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__min_layer_params__descriptor) \
3895      }
3896 
3897 
3898 /*
3899  **
3900  * A layer that computes the dot product of two vectors.
3901  * .. code::
3902  *      y = DotProductLayer(x1,x2)
3903  * Requires 2 inputs and produces 1 output.
3904  * Input
3905  *     Two blobs with rank at least 3, such that the last two dimensions must be 1.
3906  *     e.g.: blobs with shape ``[B, C, 1, 1]``.
3907  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3908  * Output
3909  *     Same rank as the input.
3910  *     e.g. for rank 4 inputs, output shape: [B, 1, 1, 1]
3911  */
3912 struct  _CoreML__Specification__DotProductLayerParams
3913 {
3914   ProtobufCMessage base;
3915   /*
3916    **
3917    * If true, inputs are normalized first,
3918    * thereby computing the cosine similarity.
3919    */
3920   protobuf_c_boolean cosinesimilarity;
3921 };
3922 #define CORE_ML__SPECIFICATION__DOT_PRODUCT_LAYER_PARAMS__INIT \
3923  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__dot_product_layer_params__descriptor) \
3924     , 0 }
3925 
3926 
3927 /*
3928  **
3929  * A layer that performs mean variance normalization, along axis = -3.
3930  * .. code::
3931  *      y = MeanVarianceNormalizeLayer(x)
3932  * Requires 1 input and produces 1 output.
3933  * Input
3934  *     A blob with rank greater than equal to 3.
3935  *     Example: Rank 4 blob represents [Batch, channels, height, width]
3936  *     For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch.
3937  * Output
3938  *     A blob with the same shape as the input.
3939  * If ``acrossChannels == true``
3940  * normalization is performed on flattened input, i.e. the input is reshaped to (Batch,C), where "Batch" contains
3941  * all dimensions from 0 to -4 (inclusive), and C contains dimensions -1, -2, -3.
3942  * If ``acrossChannels == false``
3943  * normalization is performed within a channel,
3944  * across spatial dimensions (i.e. last two dimensions).
3945  */
3946 struct  _CoreML__Specification__MeanVarianceNormalizeLayerParams
3947 {
3948   ProtobufCMessage base;
3949   /*
3950    **
3951    * If true, mean and variance are computed across channels.
3952    */
3953   protobuf_c_boolean acrosschannels;
3954   /*
3955    **
3956    * If false, only mean is subtracted.
3957    */
3958   protobuf_c_boolean normalizevariance;
3959   /*
3960    **
3961    * A small constant to avoid division by 0 while normalizing variance.
3962    * Defaults to ``1e-6`` if not set or set to ``0``.
3963    */
3964   float epsilon;
3965 };
3966 #define CORE_ML__SPECIFICATION__MEAN_VARIANCE_NORMALIZE_LAYER_PARAMS__INIT \
3967  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__mean_variance_normalize_layer_params__descriptor) \
3968     , 0, 0, 0 }
3969 
3970 
3971 /*
3972  **
3973  * A layer that repeats a sequence or the dimension sitting at axis = -5
3974  * .. code::
3975  *      y = SequenceRepeatLayer(x)
3976  * Requires 1 input and produces 1 output.
3977  * Input
3978  *     A blob with rank at least 5.
3979  *     e.g: shape ``[Seq, B, C, H, W]``
3980  * Output
3981  *     A blob with the same rank as the input.
3982  *     e.g.: for input shape ``[Seq, B, C, H, W]``, output shape is ``[nRepetitions * Seq, B, C, H, W]``.
3983  */
3984 struct  _CoreML__Specification__SequenceRepeatLayerParams
3985 {
3986   ProtobufCMessage base;
3987   /*
3988    **
3989    * Number of repetitions.
3990    * Defaults to ``1`` if not set or set to ``0``.
3991    */
3992   uint64_t nrepetitions;
3993 };
3994 #define CORE_ML__SPECIFICATION__SEQUENCE_REPEAT_LAYER_PARAMS__INIT \
3995  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sequence_repeat_layer_params__descriptor) \
3996     , 0 }
3997 
3998 
3999 /*
4000  **
4001  * A simple recurrent layer.
4002  * .. code::
4003  *      y_t = SimpleRecurrentLayer(x_t, y_{t-1})
4004  * Input
4005  *    A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``.
4006  *    This represents a sequence of vectors of size ``inputVectorSize``.
4007  * Output
4008  *    Same rank as the input.
4009  *    Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps.
4010  * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false``
4011  * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true``
4012  * This layer is described by the following equation:
4013  * .. math::
4014  *     \boldsymbol{y_t} = f(\mathrm{clip}(W \boldsymbol{x_t} + \
4015  *                                        R \boldsymbol{y_{t-1}} + b))
4016  * - ``W`` is a 2-dimensional weight matrix
4017  *   (``[outputVectorSize, inputVectorSize]``, row-major)
4018  * - ``R`` is a 2-dimensional recursion matrix
4019  *   (``[outputVectorSize, outputVectorSize]``, row-major)
4020  * - ``b`` is a 1-dimensional bias vector (``[outputVectorSize]``)
4021  * - ``f()`` is an activation
4022  * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]``
4023  */
4024 struct  _CoreML__Specification__SimpleRecurrentLayerParams
4025 {
4026   ProtobufCMessage base;
4027   /*
4028    * / The size of the input vectors.
4029    */
4030   uint64_t inputvectorsize;
4031   /*
4032    * / The size of the output vectors.
4033    */
4034   uint64_t outputvectorsize;
4035   /*
4036    **
4037    * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5)
4038    */
4039   /*
4040    * / The activation function.
4041    */
4042   CoreML__Specification__ActivationParams *activation;
4043   /*
4044    **
4045    *If false output is just the result after final state update.
4046    *If true, output is a sequence, containing outputs at all time steps.
4047    */
4048   protobuf_c_boolean sequenceoutput;
4049   /*
4050    * / If false, no bias is added.
4051    */
4052   protobuf_c_boolean hasbiasvector;
4053   /*
4054    * / Weight matrix W.
4055    */
4056   CoreML__Specification__WeightParams *weightmatrix;
4057   /*
4058    * / Recursion Weight matrix R.
4059    */
4060   CoreML__Specification__WeightParams *recursionmatrix;
4061   /*
4062    * / Bias vector b.
4063    */
4064   CoreML__Specification__WeightParams *biasvector;
4065   /*
4066    * If true, then the node processes the input sequence from right to left
4067    */
4068   protobuf_c_boolean reverseinput;
4069 };
4070 #define CORE_ML__SPECIFICATION__SIMPLE_RECURRENT_LAYER_PARAMS__INIT \
4071  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__simple_recurrent_layer_params__descriptor) \
4072     , 0, 0, NULL, 0, 0, NULL, NULL, NULL, 0 }
4073 
4074 
4075 /*
4076  **
4077  * Gated-Recurrent Unit (GRU) Layer
4078  * .. code::
4079  *      y_t = GRULayer(x_t, y_{t-1})
4080  * Input
4081  *    A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``.
4082  *    This represents a sequence of vectors of size ``inputVectorSize``.
4083  * Output
4084  *    Same rank as the input.
4085  *    Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps.
4086  * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false``
4087  * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true``
4088  * This layer is described by the following equations:
4089  * Update Gate
4090  *     .. math::
4091  *         \boldsymbol{z_t} = \
4092  *             f(\mathrm{clip}(W_z \boldsymbol{x_t} + \
4093  *                             R_z \boldsymbol{y_{t-1}} + b_z)
4094  * Reset Gate
4095  *     .. math::
4096  *         \boldsymbol{r_t} = \
4097  *             f(\mathrm{clip}(W_r \boldsymbol{x_t} + \
4098  *                             R_r \boldsymbol{y_{t-1}} + b_r))
4099  * Cell Memory State
4100  *     .. math::
4101  *         \boldsymbol{c_t} = \
4102  *             \boldsymbol{y_{t-1}} \odot \boldsymbol{r_t}
4103  * Output Gate
4104  *     .. math::
4105  *         \boldsymbol{o_t} = \
4106  *             g(\mathrm{clip}(W_o \boldsymbol{x_t} + \
4107  *                             R_o \boldsymbol{c_t} + b_o))
4108  * Output
4109  *     .. math::
4110  *         \boldsymbol{y_t} = \
4111  *             (1 - \boldsymbol{z_t}) \odot \boldsymbol{o_t} + \
4112  *              \boldsymbol{z_t} \odot \boldsymbol{y_{t-1}}
4113  * - ``W_z``, ``W_r``, ``W_o`` are 2-dimensional input weight matrices
4114  *   (``[outputVectorSize, inputVectorSize]``, row-major)
4115  * - ``R_z``, ``R_r``, ``R_o`` are 2-dimensional recursion matrices
4116  *   (``[outputVectorSize, outputVectorSize]``, row-major)
4117  * - ``b_z``, ``b_r``, ``b_o`` are 1-dimensional bias vectors
4118  *   (``[outputVectorSize]``)
4119  * - ``f()``, ``g()`` are activations
4120  * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]``
4121  * - ``⊙`` denotes the elementwise product of matrices
4122  */
4123 struct  _CoreML__Specification__GRULayerParams
4124 {
4125   ProtobufCMessage base;
4126   /*
4127    * / Size of the input vectors.
4128    */
4129   uint64_t inputvectorsize;
4130   /*
4131    * / Size of the output vectors.
4132    */
4133   uint64_t outputvectorsize;
4134   /*
4135    **
4136    * 2 element array representing activations [f(), g()] in that order.
4137    * Typical values used = [sigmoid, tanh].
4138    * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5)
4139    */
4140   size_t n_activations;
4141   CoreML__Specification__ActivationParams **activations;
4142   /*
4143    **
4144    * If false output is just the result after final state update.
4145    * If true, output is a sequence, containing outputs at all time steps.
4146    */
4147   protobuf_c_boolean sequenceoutput;
4148   /*
4149    **
4150    * If false, no biases (``b_z``, ``b_r``, ``b_o``) are added.
4151    */
4152   protobuf_c_boolean hasbiasvectors;
4153   /*
4154    * / Weight Matrix W_z.
4155    */
4156   CoreML__Specification__WeightParams *updategateweightmatrix;
4157   /*
4158    * / Weight Matrix W_r.
4159    */
4160   CoreML__Specification__WeightParams *resetgateweightmatrix;
4161   /*
4162    * / Weight Matrix W_o.
4163    */
4164   CoreML__Specification__WeightParams *outputgateweightmatrix;
4165   /*
4166    * / Recursion Weight Matrix R_z.
4167    */
4168   CoreML__Specification__WeightParams *updategaterecursionmatrix;
4169   /*
4170    * / Recursion Weight Matrix R_r.
4171    */
4172   CoreML__Specification__WeightParams *resetgaterecursionmatrix;
4173   /*
4174    * / Recursion Weight Matrix R_o.
4175    */
4176   CoreML__Specification__WeightParams *outputgaterecursionmatrix;
4177   /*
4178    * / Bias vector b_z.
4179    */
4180   CoreML__Specification__WeightParams *updategatebiasvector;
4181   /*
4182    * / Bias vector b_r.
4183    */
4184   CoreML__Specification__WeightParams *resetgatebiasvector;
4185   /*
4186    * / Bias vector b_o.
4187    */
4188   CoreML__Specification__WeightParams *outputgatebiasvector;
4189   /*
4190    * / If true, then the node processes the input sequence from right to left
4191    */
4192   protobuf_c_boolean reverseinput;
4193 };
4194 #define CORE_ML__SPECIFICATION__GRULAYER_PARAMS__INIT \
4195  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__grulayer_params__descriptor) \
4196     , 0, 0, 0,NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0 }
4197 
4198 
4199 /*
4200  **
4201  * Long short-term memory (LSTM) parameters.
4202  * This is described by the following equations:
4203  * Input Gate
4204  *     .. math::
4205  *         \boldsymbol{i_t} = \
4206  *             f(\mathrm{clip}(W_i \boldsymbol{x_t} + \
4207  *                             R_i \boldsymbol{y_{t-1}} + \
4208  *                             p_i \odot c_{t-1} + b_i))
4209  * Forget Gate
4210  *     .. math::
4211  *         \boldsymbol{f_t} = \
4212  *             f(\mathrm{clip}(W_f \boldsymbol{x_t} + \
4213  *                             R_f \boldsymbol{y_{t-1}} + \
4214  *                             p_f \odot c_{t-1} + b_f))
4215  * Block Input
4216  *     .. math::
4217  *         \boldsymbol{z_t} = \
4218  *             g(\mathrm{clip}(W_z \boldsymbol{x_t} + \
4219  *                             R_z \boldsymbol{y_{t-1}} + b_z))
4220  * Cell Memory State
4221  *     .. math::
4222  *         \boldsymbol{c_t} = \
4223  *             \boldsymbol{c_{t-1}} \odot \boldsymbol{f_t} + \
4224  *             \boldsymbol{i_t} \odot \boldsymbol{z_t}
4225  * Output Gate
4226  *     .. math::
4227  *         \boldsymbol{o_t} = \
4228  *             f(\mathrm{clip}(W_o \boldsymbol{x_t} + \
4229  *                             R_o \boldsymbol{y_{t-1}} + \
4230  *                             p_o \odot c_t + b_o))
4231  * Output
4232  *     .. math::
4233  *         \boldsymbol{y_t} = \
4234  *             h(\boldsymbol{c_t}) \odot \boldsymbol{o_t}
4235  * - ``W_i``, ``W_f``, ``W_z``, ``W_o`` are 2-dimensional input weight matrices
4236  *   (``[outputVectorSize, inputVectorSize]``, row-major)
4237  * - ``R_i``, ``R_f``, ``R_z``, ``R_o`` are 2-dimensional recursion matrices
4238  *   (``[outputVectorSize, outputVectorSize]``, row-major)
4239  * - ``b_i``, ``b_f``, ``b_z``, ``b_o`` are 1-dimensional bias vectors
4240  *   (``[outputVectorSize]``)
4241  * - ``p_``, ``p_f``, ``p_o`` are 1-dimensional peephole vectors
4242  *   (``[outputVectorSize]``)
4243  * - ``f()``, ``g()``, ``h()`` are activations
4244  * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]``
4245  * - ``⊙`` denotes the elementwise product of matrices
4246  */
4247 struct  _CoreML__Specification__LSTMParams
4248 {
4249   ProtobufCMessage base;
4250   /*
4251    **
4252    * If true, output is a sequence, containing outputs at all time steps.
4253    * If false, output is just the result after final state update.
4254    */
4255   protobuf_c_boolean sequenceoutput;
4256   /*
4257    **
4258    * If false, no biases (``b_i``, ``b_f``, ``b_z``, ``b_o``) are added.
4259    */
4260   protobuf_c_boolean hasbiasvectors;
4261   /*
4262    **
4263    * If true, a vector of ``1`` values is added to ``b_f``.
4264    */
4265   protobuf_c_boolean forgetbias;
4266   /*
4267    **
4268    * If true, peephole vectors are included.
4269    */
4270   protobuf_c_boolean haspeepholevectors;
4271   /*
4272    **
4273    * If the coupled Input and Forget flag is on, the behaviour of
4274    * ``c_t`` is changed to the following (i.e. forget gate is not used):
4275    * .. math::
4276    *     \boldsymbol{c_t} = \
4277    *         \boldsymbol{c_{t-1}} \odot (1 - \boldsymbol{i_t}) + \
4278    *         \boldsymbol{i_t} \odot \boldsymbol{z_t}
4279    */
4280   protobuf_c_boolean coupledinputandforgetgate;
4281   /*
4282    **
4283    * Places a limit on the maximum and minimum values of ``c_t``.
4284    * c_t = min(c_t, cellClipThreshold)
4285    * c_t = max(c_t, -cellClipThreshold)
4286    * If 0, it is set to its default value = 50.0.
4287    */
4288   float cellclipthreshold;
4289 };
4290 #define CORE_ML__SPECIFICATION__LSTMPARAMS__INIT \
4291  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__lstmparams__descriptor) \
4292     , 0, 0, 0, 0, 0, 0 }
4293 
4294 
4295 /*
4296  **
4297  * Weights for long short-term memory (LSTM) layers
4298  */
4299 struct  _CoreML__Specification__LSTMWeightParams
4300 {
4301   ProtobufCMessage base;
4302   /*
4303    * / Weight Matrix W_i.
4304    */
4305   CoreML__Specification__WeightParams *inputgateweightmatrix;
4306   /*
4307    * / Weight Matrix W_f.
4308    */
4309   CoreML__Specification__WeightParams *forgetgateweightmatrix;
4310   /*
4311    * / Weight Matrix W_z.
4312    */
4313   CoreML__Specification__WeightParams *blockinputweightmatrix;
4314   /*
4315    * / Weight Matrix W_o.
4316    */
4317   CoreML__Specification__WeightParams *outputgateweightmatrix;
4318   /*
4319    * / Recursion Weight Matrix R_i.
4320    */
4321   CoreML__Specification__WeightParams *inputgaterecursionmatrix;
4322   /*
4323    * / Recursion Weight Matrix R_f.
4324    */
4325   CoreML__Specification__WeightParams *forgetgaterecursionmatrix;
4326   /*
4327    * / Recursion Weight Matrix R_z.
4328    */
4329   CoreML__Specification__WeightParams *blockinputrecursionmatrix;
4330   /*
4331    * / Recursion Weight Matrix R_o.
4332    */
4333   CoreML__Specification__WeightParams *outputgaterecursionmatrix;
4334   /*
4335    *biases:
4336    */
4337   /*
4338    * / Bias vector b_i.
4339    */
4340   CoreML__Specification__WeightParams *inputgatebiasvector;
4341   /*
4342    * / Bias vector b_f.
4343    */
4344   CoreML__Specification__WeightParams *forgetgatebiasvector;
4345   /*
4346    * / Bias vector b_z.
4347    */
4348   CoreML__Specification__WeightParams *blockinputbiasvector;
4349   /*
4350    * / Bias vector b_o.
4351    */
4352   CoreML__Specification__WeightParams *outputgatebiasvector;
4353   /*
4354    *peepholes:
4355    */
4356   /*
4357    * / Peephole vector p_i.
4358    */
4359   CoreML__Specification__WeightParams *inputgatepeepholevector;
4360   /*
4361    * / Peephole vector p_f.
4362    */
4363   CoreML__Specification__WeightParams *forgetgatepeepholevector;
4364   /*
4365    * / Peephole vector p_o.
4366    */
4367   CoreML__Specification__WeightParams *outputgatepeepholevector;
4368 };
4369 #define CORE_ML__SPECIFICATION__LSTMWEIGHT_PARAMS__INIT \
4370  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__lstmweight_params__descriptor) \
4371     , NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
4372 
4373 
4374 /*
4375  **
4376  * A unidirectional long short-term memory (LSTM) layer.
4377  * .. code::
4378  *      (y_t, c_t) = UniDirectionalLSTMLayer(x_t, y_{t-1}, c_{t-1})
4379  * Input
4380  *    A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``.
4381  *    This represents a sequence of vectors of size ``inputVectorSize``.
4382  * Output
4383  *    Same rank as the input.
4384  *    Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps.
4385  * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false``
4386  * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true``
4387  */
4388 struct  _CoreML__Specification__UniDirectionalLSTMLayerParams
4389 {
4390   ProtobufCMessage base;
4391   /*
4392    * / Size of the input vectors.
4393    */
4394   uint64_t inputvectorsize;
4395   /*
4396    * / Size of the output vectors.
4397    */
4398   uint64_t outputvectorsize;
4399   /*
4400    **
4401    * 3 element array representing activations [f(),g(),h()] in that order.
4402    * Typical values used = [sigmoid, tanh, tanh].
4403    * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5)
4404    */
4405   size_t n_activations;
4406   CoreML__Specification__ActivationParams **activations;
4407   CoreML__Specification__LSTMParams *params;
4408   /*
4409    * / Weights, biases and peepholes.
4410    */
4411   CoreML__Specification__LSTMWeightParams *weightparams;
4412   /*
4413    * / If true, then the node processes the input sequence from right to left
4414    */
4415   protobuf_c_boolean reverseinput;
4416 };
4417 #define CORE_ML__SPECIFICATION__UNI_DIRECTIONAL_LSTMLAYER_PARAMS__INIT \
4418  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__uni_directional_lstmlayer_params__descriptor) \
4419     , 0, 0, 0,NULL, NULL, NULL, 0 }
4420 
4421 
4422 /*
4423  **
4424  * Bidirectional long short-term memory (LSTM) layer
4425  * .. code::
4426  *      (y_t, c_t, y_t_reverse, c_t_reverse) = BiDirectionalLSTMLayer(x_t, y_{t-1}, c_{t-1}, y_{t-1}_reverse, c_{t-1}_reverse)
4427  * Input
4428  *    A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``.
4429  *    This represents a sequence of vectors of size ``inputVectorSize``.
4430  * Output
4431  *    Same rank as the input.
4432  *    Represents a vector of size ``2 * outputVectorSize``. It is either the final output or a sequence of outputs at all time steps.
4433  * - Output Shape: ``[1, Batch, 2 * outputVectorSize, 1, 1]`` , if ``sequenceOutput == false``
4434  * - Output Shape: ``[Seq, Batch, 2 * outputVectorSize, 1, 1]`` , if ``sequenceOutput == true``
4435  * The first LSTM operates on the input sequence in the forward direction.
4436  * The second LSTM operates on the input sequence in the reverse direction.
4437  * Example: given the input sequence ``[x_1, x_2, x_3]``,
4438  * where ``x_i`` are vectors at time index ``i``:
4439  * The forward LSTM output is ``[yf_1, yf_2, yf_3]``,
4440  * where ``yf_i`` are vectors of size ``outputVectorSize``:
4441  * - ``yf_1`` is the output at the end of sequence {``x_1``}
4442  * - ``yf_2`` is the output at the end of sequence {``x_1``, ``x_2``}
4443  * - ``yf_3`` is the output at the end of sequence {``x_1``, ``x_2``, ``x_3``}
4444  * The backward LSTM output: ``[yb_1, yb_2, yb_3]``,
4445  * where ``yb_i`` are vectors of size ``outputVectorSize``:
4446  * - ``yb_1`` is the output at the end of sequence {``x_3``}
4447  * - ``yb_2`` is the output at the end of sequence {``x_3``, ``x_2``}
4448  * - ``yb_3`` is the output at the end of sequence {``x_3``, ``x_2``, ``x_1``}
4449  * Output of the bi-dir layer:
4450  * - if ``sequenceOutput = True`` : { ``[yf_1, yb_3]``,  ``[yf_2, yb_2]``,  ``[yf_3, yb_1]`` }
4451  * - if ``sequenceOutput = False`` : { ``[yf_3, yb_3]`` }
4452  */
4453 struct  _CoreML__Specification__BiDirectionalLSTMLayerParams
4454 {
4455   ProtobufCMessage base;
4456   /*
4457    **
4458    * Size of the input vectors.
4459    */
4460   uint64_t inputvectorsize;
4461   /*
4462    **
4463    * Size of the outputs vectors.
4464    * It is same for both forward and backward LSTMs.
4465    */
4466   uint64_t outputvectorsize;
4467   /*
4468    **
4469    * 3 element array representing activations [f(),g(),h()] in that order.
4470    * Typical values used = [sigmoid, tanh, tanh].
4471    * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5)
4472    */
4473   size_t n_activationsforwardlstm;
4474   CoreML__Specification__ActivationParams **activationsforwardlstm;
4475   /*
4476    **
4477    * Currently, backward LSTM activations
4478    * must be same as the ones for the forward LSTM.
4479    */
4480   size_t n_activationsbackwardlstm;
4481   CoreML__Specification__ActivationParams **activationsbackwardlstm;
4482   /*
4483    **
4484    * Common parameters shared by the forward and backward LSTMs.
4485    */
4486   CoreML__Specification__LSTMParams *params;
4487   /*
4488    **
4489    * Weights and biases.
4490    * Must be a length 2 message,
4491    * for the forward and backward LSTM respectively.
4492    */
4493   size_t n_weightparams;
4494   CoreML__Specification__LSTMWeightParams **weightparams;
4495 };
4496 #define CORE_ML__SPECIFICATION__BI_DIRECTIONAL_LSTMLAYER_PARAMS__INIT \
4497  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__bi_directional_lstmlayer_params__descriptor) \
4498     , 0, 0, 0,NULL, 0,NULL, NULL, 0,NULL }
4499 
4500 
4501 typedef enum {
4502   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE__NOT_SET = 0,
4503   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE_DOUBLE_VALUE = 10,
4504   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE_STRING_VALUE = 20,
4505   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE_INT_VALUE = 30,
4506   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE_LONG_VALUE = 40,
4507   CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE_BOOL_VALUE = 50
4508     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE)
4509 } CoreML__Specification__CustomLayerParams__CustomLayerParamValue__ValueCase;
4510 
4511 struct  _CoreML__Specification__CustomLayerParams__CustomLayerParamValue
4512 {
4513   ProtobufCMessage base;
4514   CoreML__Specification__CustomLayerParams__CustomLayerParamValue__ValueCase value_case;
4515   union {
4516     double doublevalue;
4517     char *stringvalue;
4518     int32_t intvalue;
4519     int64_t longvalue;
4520     protobuf_c_boolean boolvalue;
4521   };
4522 };
4523 #define CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__INIT \
4524  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__custom_layer_params__custom_layer_param_value__descriptor) \
4525     , CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__CUSTOM_LAYER_PARAM_VALUE__VALUE__NOT_SET, {0} }
4526 
4527 
4528 struct  _CoreML__Specification__CustomLayerParams__ParametersEntry
4529 {
4530   ProtobufCMessage base;
4531   char *key;
4532   CoreML__Specification__CustomLayerParams__CustomLayerParamValue *value;
4533 };
4534 #define CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__PARAMETERS_ENTRY__INIT \
4535  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__custom_layer_params__parameters_entry__descriptor) \
4536     , (char *)protobuf_c_empty_string, NULL }
4537 
4538 
4539 struct  _CoreML__Specification__CustomLayerParams
4540 {
4541   ProtobufCMessage base;
4542   /*
4543    * The name of the class (conforming to MLCustomLayer) corresponding to this layer
4544    */
4545   char *classname;
4546   /*
4547    * Any weights -- these are serialized in binary format and memmapped at runtime
4548    */
4549   size_t n_weights;
4550   CoreML__Specification__WeightParams **weights;
4551   /*
4552    * these may be handled as strings, so this should not be large
4553    */
4554   size_t n_parameters;
4555   CoreML__Specification__CustomLayerParams__ParametersEntry **parameters;
4556   /*
4557    * An (optional) description of the layer provided by the model creator. This information is displayed when viewing the model, but does not affect the model's execution on device.
4558    */
4559   char *description;
4560 };
4561 #define CORE_ML__SPECIFICATION__CUSTOM_LAYER_PARAMS__INIT \
4562  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__custom_layer_params__descriptor) \
4563     , (char *)protobuf_c_empty_string, 0,NULL, 0,NULL, (char *)protobuf_c_empty_string }
4564 
4565 
4566 struct  _CoreML__Specification__TransposeLayerParams
4567 {
4568   ProtobufCMessage base;
4569   /*
4570    **
4571    * Length of "axes" should match the rank of input & output tensor
4572    * "axes" should be a permutation of "[0,1,2,...,N-1]" where N is the rank.
4573    */
4574   /*
4575    */
4576   size_t n_axes;
4577   uint64_t *axes;
4578 };
4579 #define CORE_ML__SPECIFICATION__TRANSPOSE_LAYER_PARAMS__INIT \
4580  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__transpose_layer_params__descriptor) \
4581     , 0,NULL }
4582 
4583 
4584 /*
4585  **
4586  * A layer that computes the matrix multiplication of two tensors with numpy-like broadcasting
4587  * where the matrices reside in the last two indices of the tensor.
4588  * .. code::
4589  *      y = BatchedMatMul(a,b)
4590  * Requires 1 or 2 inputs and produces 1 output.
4591  * The first tensor, "a", must be provided as an input. The second tensor can either be an input or provided as a weight matrix parameter.
4592  * Input
4593  *     - a: First N-Dimensional tensor
4594  *     - b: Second N-Dimensional tensor (either a rank-N input or a matrix, i.e. N=2, provided as a layer parameter)
4595  * Output
4596  *     A tensor containing the matrix product of two tensors.
4597  *     When there are two inputs: rank is max(2, rank(a), rank(b))
4598  *     When there is one input: rank is same as that of the input.
4599  * This operation behaves as following:
4600  *  When there are two inputs:
4601  *      - If N >= 2 for both tensors, it is treated as a batch of matrices residing in the last two indices.
4602  *        All the indices, except for the last two, are broadcasted using conventional rules.
4603  *      - If the first tensor is 1-D, it is converted to a 2-D tensor by prepending a 1 to its shape. Eg. (D) -> (1,D)
4604  *      - If the second tensor is 1-D, it is converted to a 2-D tensor by appending a 1 to its shape. Eg. (D) -> (D,1)
4605  *  When there is one input:
4606  *      - The weight matrix corresponds to a matrix, of shape (X1, X2). Values of X1, X2 must be provided as layer parameters.
4607  *      - The input, "a", is reshaped into a matrix by combining all the leading dimensions, except the last, into a batch dimension. eg:
4608  *             - if "a" is rank 1 (X1,) -->  (1, X1). Output shape will be (X2,)
4609  *             - if "a" is rank 2 (B1, X1) --> no need to reshape. Output shape will be (B1, X2)
4610  *             - if "a" is rank 3 (B1, B2, X1) --> (B1 * B2, X1). Output shape will be (B1, B2, X2)
4611  *             - etc
4612  */
4613 struct  _CoreML__Specification__BatchedMatMulLayerParams
4614 {
4615   ProtobufCMessage base;
4616   /*
4617    **
4618    * If transposeA is true, it transposes the left matrix on the fly before matrix multiplication.
4619    * (is ignored when there is one input)
4620    */
4621   protobuf_c_boolean transposea;
4622   /*
4623    **
4624    * If transposeB is true, it transposes the right matrix on the fly before matrix multiplication.
4625    * (is ignored when there is one input)
4626    */
4627   protobuf_c_boolean transposeb;
4628   /*
4629    * / X1: same as the last dimension of the input tensor
4630    */
4631   uint64_t weightmatrixfirstdimension;
4632   /*
4633    * / X2: same as the last dimension of the output tensor
4634    */
4635   uint64_t weightmatrixseconddimension;
4636   /*
4637    * / Whether a bias is added or not. Supported only when there is one input.
4638    */
4639   protobuf_c_boolean hasbias;
4640   /*
4641    * Weight matrix representing shape [X1, X2].
4642    * Values are however stored in column major order,
4643    * in the "repeated float" or "bytes" fields of the message "WeightParams"
4644    */
4645   CoreML__Specification__WeightParams *weights;
4646   /*
4647    * / Bias vector [X2]. Supported only when there is one input.
4648    */
4649   CoreML__Specification__WeightParams *bias;
4650   /*
4651    **
4652    * If set, this layer, at runtime, quantizes the floating point input blob to int8 before applying the
4653    * matrix multiplication using the INT8 weight parameters provided in weights->int8RawValue. The
4654    * result is then dequantized.
4655    * Requires:
4656    * * number of inputs to be 1
4657    * * hasBias == false
4658    * * QuantizationType == LinearQuantizationParams, such that
4659    *   * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams"
4660    * * numberOfBits == 8
4661    * * weights->rawValue_size to be empty
4662    */
4663   protobuf_c_boolean int8dynamicquantize;
4664 };
4665 #define CORE_ML__SPECIFICATION__BATCHED_MAT_MUL_LAYER_PARAMS__INIT \
4666  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__batched_mat_mul_layer_params__descriptor) \
4667     , 0, 0, 0, 0, 0, NULL, NULL, 0 }
4668 
4669 
4670 /*
4671  **
4672  * A layer that concatenates a list of tensors along a specified axis.
4673  * .. code::
4674  *      y = ConcatNDLayer(x1,x2,....)
4675  * Requires at least 2 input and produces 1 output.
4676  * Input
4677  *     The rank of the input tensors must match and all dimensions also must match, except for the dimension 'axis'.
4678  * Output
4679  *     Same rank as the input. The dimension along "axis", is the sum of the dimensions of the inputs.
4680  * example:
4681  * in1 : shape (3, 2), value = [[1, 2], [3, 4], [5, 6]]
4682  * in2 : shape (3, 2), value = [[7, 8], [9, 10], [11, 12]]
4683  * axis = 0
4684  * if interleave = False (default)
4685  * output : shape (6, 2)
4686  * output[0:3, :] = in1
4687  * output[3:6, :] = in2
4688  * value = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]
4689  * if interleave = True
4690  * output : shape (6, 2)
4691  * output[0::2, :] = in1
4692  * output[1::2, :] = in2
4693  * value = [[1, 2], [7, 8], [3, 4], [9, 10], [5, 6], [11, 12]]
4694  */
4695 struct  _CoreML__Specification__ConcatNDLayerParams
4696 {
4697   ProtobufCMessage base;
4698   /*
4699    **
4700    * Dimension along which to concatenate. Supports negative values of the parameter 'axis'.
4701    */
4702   int64_t axis;
4703   /*
4704    **
4705    * (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0)
4706    * Interleave option. If True, concatenation is done via interleaving the inputs.
4707    * This requires all inputs to have the exact same shape.
4708    */
4709   protobuf_c_boolean interleave;
4710 };
4711 #define CORE_ML__SPECIFICATION__CONCAT_NDLAYER_PARAMS__INIT \
4712  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__concat_ndlayer_params__descriptor) \
4713     , 0, 0 }
4714 
4715 
4716 /*
4717  **
4718  * A layer that performs softmax normalization along a specified axis.
4719  * .. code::
4720  *      y = SoftmaxNDLayer(x)
4721  * Requires 1 input and produces 1 output.
4722  * Output shape is same as the input.
4723  */
4724 struct  _CoreML__Specification__SoftmaxNDLayerParams
4725 {
4726   ProtobufCMessage base;
4727   /*
4728    **
4729    * Dimension on which the softmax would be performed. Supports negative values of the parameter 'axis'.
4730    */
4731   int64_t axis;
4732 };
4733 #define CORE_ML__SPECIFICATION__SOFTMAX_NDLAYER_PARAMS__INIT \
4734  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__softmax_ndlayer_params__descriptor) \
4735     , 0 }
4736 
4737 
4738 /*
4739  **
4740  * A layer that reverses specific dimensions of the input tensor.
4741  * It is similar in functionality to the numpy.flip method.
4742  * Requires 1 input and produces 1 output.
4743  * Output shape is same as the input.
4744  */
4745 struct  _CoreML__Specification__ReverseLayerParams
4746 {
4747   ProtobufCMessage base;
4748   /*
4749    **
4750    * Reverses each dimension of the input tensor for which corresponding reverseDim is set to True.
4751    * Requires len(reverseDim) == rank(inputTensor)
4752    */
4753   size_t n_reversedim;
4754   protobuf_c_boolean *reversedim;
4755 };
4756 #define CORE_ML__SPECIFICATION__REVERSE_LAYER_PARAMS__INIT \
4757  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reverse_layer_params__descriptor) \
4758     , 0,NULL }
4759 
4760 
4761 /*
4762  **
4763  * A layer that reverses variable length slices.
4764  * Requires 2 inputs and produces 1 output.
4765  * 2 inputs, in order are denoted by "data", "seq_lengths".
4766  * "seq_lenghts" must be a rank 1 tensor, i.e. seq_lengths.shape = (B,)
4767  * which contains the lengths of the amount of sequence to be reversed, for each element of the batch.
4768  * Dimension "batchAxis" in "data" must be equal to B, i.e,
4769  * data.shape[batchAxis] = B.
4770  * According to the batch axis, input "data" is first divided into a batch of B inputs,
4771  * each of which is flipped along the dimension "sequenceAxis", by the amount specified in
4772  * "seq_lengths", the second input.
4773  * e.g.:
4774  * data [shape = (2,4)]:
4775  * [0 1 2 3]
4776  * [4 5 6 7]
4777  * seq_lengths [shape = (2,)]:
4778  * [3, 0]
4779  * batchAxis = 0
4780  * sequenceAxis = 1
4781  * output [shape = (2,4)]:
4782  * [2 1 0 3]
4783  * [4 5 6 7]
4784  * data [shape = (2,3,2)]:
4785  * [0 1]
4786  * [2 3]
4787  * [4 5] (slice = 0)
4788  * [6 7]
4789  * [8 9]
4790  * [10 11] (slice = 1)
4791  * seq_lengths [shape = (2,)]:
4792  * [2, 3]
4793  * batchAxis = 0
4794  * sequenceAxis = 1
4795  * output [shape = (2,3,2)]:
4796  * [2 3]
4797  * [0 1]
4798  * [4 5] (slice = 0)
4799  * [10 11]
4800  * [8 9]
4801  * [6 7] (slice = 1)
4802  * Output shape is same as the input.
4803  */
4804 struct  _CoreML__Specification__ReverseSeqLayerParams
4805 {
4806   ProtobufCMessage base;
4807   /*
4808    * batch axis has to be strictly less than seq_axis
4809    */
4810   int64_t batchaxis;
4811   int64_t sequenceaxis;
4812 };
4813 #define CORE_ML__SPECIFICATION__REVERSE_SEQ_LAYER_PARAMS__INIT \
4814  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reverse_seq_layer_params__descriptor) \
4815     , 0, 0 }
4816 
4817 
4818 /*
4819  **
4820  * A layer that loads data as a parameter and provides it as an output.
4821  * .. code::
4822  *      y = LoadConstantNDLayer()
4823  * Requires no input and produces 1 output.
4824  * Output: A tensor with shape as provided in the parameter "shape"
4825  */
4826 struct  _CoreML__Specification__LoadConstantNDLayerParams
4827 {
4828   ProtobufCMessage base;
4829   /*
4830    **
4831    * The shape of the constant to be loaded.
4832    */
4833   size_t n_shape;
4834   uint64_t *shape;
4835   CoreML__Specification__WeightParams *data;
4836 };
4837 #define CORE_ML__SPECIFICATION__LOAD_CONSTANT_NDLAYER_PARAMS__INIT \
4838  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__load_constant_ndlayer_params__descriptor) \
4839     , 0,NULL, NULL }
4840 
4841 
4842 /*
4843  **
4844  * A layer that generates an output tensor with a constant value.
4845  * Input is only used to determine the shape of the output.
4846  * This layer is used to allocate a tensor with a dynamic shape (that of the input) and constant value.
4847  * Requires 1 input and produces 1 output.
4848  * .. code::
4849  *      y = FillLikeLayer(x)
4850  * Input
4851  *     A N-Dimensional tensor, whose values are ignored. Only the shape is used to
4852  *     infer the shape of the output.
4853  * Output
4854  *     A N-Dimensional tensor with the same shape as the input tensor.
4855  */
4856 struct  _CoreML__Specification__FillLikeLayerParams
4857 {
4858   ProtobufCMessage base;
4859   float value;
4860 };
4861 #define CORE_ML__SPECIFICATION__FILL_LIKE_LAYER_PARAMS__INIT \
4862  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__fill_like_layer_params__descriptor) \
4863     , 0 }
4864 
4865 
4866 /*
4867  **
4868  * A layer that generates an output tensor with a constant value.
4869  * This layer is used to allocate a tensor with a static shape and constant value.
4870  * Requires no input and produces 1 output.
4871  * .. code::
4872  *      y = FillStaticLayer(x)
4873  * Output
4874  *     A N-Dimensional tensor of shape "targetShape".
4875  */
4876 struct  _CoreML__Specification__FillStaticLayerParams
4877 {
4878   ProtobufCMessage base;
4879   float value;
4880   size_t n_targetshape;
4881   uint64_t *targetshape;
4882 };
4883 #define CORE_ML__SPECIFICATION__FILL_STATIC_LAYER_PARAMS__INIT \
4884  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__fill_static_layer_params__descriptor) \
4885     , 0, 0,NULL }
4886 
4887 
4888 /*
4889  **
4890  * A layer that generates an output tensor with a constant value.
4891  * This layer is used to allocate a tensor with a dynamic shape (as specified by the input) and constant value.
4892  * Requires 1 input and produces 1 output.
4893  * .. code::
4894  *      y = FillDynamicLayer(x)
4895  * Input
4896  *     A rank 1 tensor specifying the shape of the output
4897  * Output
4898  *     An N-Dimensional tensor with the shape specified by the values in the input tensor.
4899  */
4900 struct  _CoreML__Specification__FillDynamicLayerParams
4901 {
4902   ProtobufCMessage base;
4903   float value;
4904 };
4905 #define CORE_ML__SPECIFICATION__FILL_DYNAMIC_LAYER_PARAMS__INIT \
4906  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__fill_dynamic_layer_params__descriptor) \
4907     , 0 }
4908 
4909 
4910 /*
4911  **
4912  * A layer that returns the elements either from tensor x or tensor y,
4913  * depending on the value in the condition tensor.
4914  * It is similar in functionality to the numpy.where method with 3 inputs.
4915  * Requires 3 inputs and produces 1 output.
4916  * Inputs, in order, are the condition tensor, x and y.
4917  * for each vector index (i,...,j):
4918  *    output[i,...,j] = x[i,...,j] if condition[i,...,j] = True
4919  *                      y[i,...,j] if condition[i,...,j] = False
4920  * All the 3 inputs are first broadcasted to a common shape.
4921  * (the shapes must be broadcastable)
4922  * output.rank = max(input[0].rank, input[1].rank, input[2].rank)
4923  */
4924 struct  _CoreML__Specification__WhereBroadcastableLayerParams
4925 {
4926   ProtobufCMessage base;
4927 };
4928 #define CORE_ML__SPECIFICATION__WHERE_BROADCASTABLE_LAYER_PARAMS__INIT \
4929  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__where_broadcastable_layer_params__descriptor) \
4930      }
4931 
4932 
4933 /*
4934  **
4935  * A layer that computes elementwise trigonometric sine function.
4936  * .. code::
4937  *      y = SinLayer(x)
4938  * Requires 1 input and produces 1 output.
4939  * Output shape is same as the input.
4940  */
4941 struct  _CoreML__Specification__SinLayerParams
4942 {
4943   ProtobufCMessage base;
4944 };
4945 #define CORE_ML__SPECIFICATION__SIN_LAYER_PARAMS__INIT \
4946  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sin_layer_params__descriptor) \
4947      }
4948 
4949 
4950 /*
4951  **
4952  * A layer that computes elementwise trigonometric cosine function.
4953  * .. code::
4954  *      y = CosLayer(x)
4955  * Requires 1 input and produces 1 output.
4956  * Output shape is same as the input.
4957  */
4958 struct  _CoreML__Specification__CosLayerParams
4959 {
4960   ProtobufCMessage base;
4961 };
4962 #define CORE_ML__SPECIFICATION__COS_LAYER_PARAMS__INIT \
4963  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__cos_layer_params__descriptor) \
4964      }
4965 
4966 
4967 /*
4968  **
4969  * A layer that computes elementwise trigonometric tangent function.
4970  * .. code::
4971  *      y = TanLayer(x)
4972  * Requires 1 input and produces 1 output.
4973  * Output shape is same as the input.
4974  */
4975 struct  _CoreML__Specification__TanLayerParams
4976 {
4977   ProtobufCMessage base;
4978 };
4979 #define CORE_ML__SPECIFICATION__TAN_LAYER_PARAMS__INIT \
4980  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__tan_layer_params__descriptor) \
4981      }
4982 
4983 
4984 /*
4985  **
4986  * A layer that computes elementwise trigonometric arcsine function.
4987  * .. code::
4988  *      y = AsinLayer(x)
4989  * Requires 1 input and produces 1 output.
4990  * Output shape is same as the input.
4991  */
4992 struct  _CoreML__Specification__AsinLayerParams
4993 {
4994   ProtobufCMessage base;
4995 };
4996 #define CORE_ML__SPECIFICATION__ASIN_LAYER_PARAMS__INIT \
4997  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__asin_layer_params__descriptor) \
4998      }
4999 
5000 
5001 /*
5002  **
5003  * A layer that computes elementwise trigonometric arccosine function.
5004  * .. code::
5005  *      y = AcosLayer(x)
5006  * Requires 1 input and produces 1 output.
5007  * Output shape is same as the input.
5008  */
5009 struct  _CoreML__Specification__AcosLayerParams
5010 {
5011   ProtobufCMessage base;
5012 };
5013 #define CORE_ML__SPECIFICATION__ACOS_LAYER_PARAMS__INIT \
5014  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__acos_layer_params__descriptor) \
5015      }
5016 
5017 
5018 /*
5019  **
5020  * A layer that computes elementwise trigonometric arctangent function.
5021  * .. code::
5022  *      y = AtanLayer(x)
5023  * Requires 1 input and produces 1 output.
5024  * Output shape is same as the input.
5025  */
5026 struct  _CoreML__Specification__AtanLayerParams
5027 {
5028   ProtobufCMessage base;
5029 };
5030 #define CORE_ML__SPECIFICATION__ATAN_LAYER_PARAMS__INIT \
5031  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__atan_layer_params__descriptor) \
5032      }
5033 
5034 
5035 /*
5036  **
5037  * A layer that computes elementwise trigonometric hyperbolic sine function.
5038  * .. code::
5039  *      y = SinhLayer(x)
5040  * Requires 1 input and produces 1 output.
5041  * Output shape is same as the input.
5042  */
5043 struct  _CoreML__Specification__SinhLayerParams
5044 {
5045   ProtobufCMessage base;
5046 };
5047 #define CORE_ML__SPECIFICATION__SINH_LAYER_PARAMS__INIT \
5048  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sinh_layer_params__descriptor) \
5049      }
5050 
5051 
5052 /*
5053  **
5054  * A layer that computes elementwise trigonometric hyperbolic cosine function.
5055  * .. code::
5056  *      y = CoshLayer(x)
5057  * Requires 1 input and produces 1 output.
5058  * Output shape is same as the input.
5059  */
5060 struct  _CoreML__Specification__CoshLayerParams
5061 {
5062   ProtobufCMessage base;
5063 };
5064 #define CORE_ML__SPECIFICATION__COSH_LAYER_PARAMS__INIT \
5065  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__cosh_layer_params__descriptor) \
5066      }
5067 
5068 
5069 /*
5070  **
5071  * A layer that computes elementwise trigonometric hyperbolic tangent function.
5072  * .. code::
5073  *      y = TanhLayer(x)
5074  * Requires 1 input and produces 1 output.
5075  * Output shape is same as the input.
5076  */
5077 struct  _CoreML__Specification__TanhLayerParams
5078 {
5079   ProtobufCMessage base;
5080 };
5081 #define CORE_ML__SPECIFICATION__TANH_LAYER_PARAMS__INIT \
5082  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__tanh_layer_params__descriptor) \
5083      }
5084 
5085 
5086 /*
5087  **
5088  * A layer that computes elementwise trigonometric hyperbolic arcsine function.
5089  * .. code::
5090  *      y = AsinhLayer(x)
5091  * Requires 1 input and produces 1 output.
5092  * Output shape is same as the input.
5093  */
5094 struct  _CoreML__Specification__AsinhLayerParams
5095 {
5096   ProtobufCMessage base;
5097 };
5098 #define CORE_ML__SPECIFICATION__ASINH_LAYER_PARAMS__INIT \
5099  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__asinh_layer_params__descriptor) \
5100      }
5101 
5102 
5103 /*
5104  **
5105  * A layer that computes elementwise trigonometric hyperbolic arccosine function.
5106  * .. code::
5107  *      y = AcoshLayer(x)
5108  * Requires 1 input and produces 1 output.
5109  * Output shape is same as the input.
5110  */
5111 struct  _CoreML__Specification__AcoshLayerParams
5112 {
5113   ProtobufCMessage base;
5114 };
5115 #define CORE_ML__SPECIFICATION__ACOSH_LAYER_PARAMS__INIT \
5116  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__acosh_layer_params__descriptor) \
5117      }
5118 
5119 
5120 /*
5121  **
5122  * A layer that computes elementwise trigonometric hyperbolic arctangent function.
5123  * .. code::
5124  *      y = AtanhLayer(x)
5125  * Requires 1 input and produces 1 output.
5126  * Output shape is same as the input.
5127  */
5128 struct  _CoreML__Specification__AtanhLayerParams
5129 {
5130   ProtobufCMessage base;
5131 };
5132 #define CORE_ML__SPECIFICATION__ATANH_LAYER_PARAMS__INIT \
5133  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__atanh_layer_params__descriptor) \
5134      }
5135 
5136 
5137 /*
5138  **
5139  * A layer that raises each element in first tensor to the power of
5140  * corresponding element in the second tensor.
5141  * Supports conventional numpy-like broadcasting.
5142  * .. code::
5143  *      y = PowBroadcastableLayer(x)
5144  * Requires 2 inputs and produces 1 output.
5145  * Input
5146  *     - First N-Dimensional tensor
5147  *     - Second N-Dimensional tensor
5148  * Output
5149  *     An N-Dimensional tensor with the broadcast shape.
5150  */
5151 struct  _CoreML__Specification__PowBroadcastableLayerParams
5152 {
5153   ProtobufCMessage base;
5154 };
5155 #define CORE_ML__SPECIFICATION__POW_BROADCASTABLE_LAYER_PARAMS__INIT \
5156  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__pow_broadcastable_layer_params__descriptor) \
5157      }
5158 
5159 
5160 /*
5161  **
5162  * A layer that computes the exponential of all elements in the input tensor, with the base 2.
5163  * .. code::
5164  *      y = Exp2Layer(x)
5165  * Requires 1 input and produces 1 output.
5166  * Output shape is same as the input.
5167  */
5168 struct  _CoreML__Specification__Exp2LayerParams
5169 {
5170   ProtobufCMessage base;
5171 };
5172 #define CORE_ML__SPECIFICATION__EXP2_LAYER_PARAMS__INIT \
5173  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__exp2_layer_params__descriptor) \
5174      }
5175 
5176 
5177 /*
5178  **
5179  * A layer that returns a tensor containing the indices of all non-zero
5180  * elements of input tensor.
5181  * It is similar in functionality to the numpy.where method with 1 input.
5182  * Requires 1 input and produces 1 output.
5183  * Output is of rank 2, of shape (N,R),
5184  * where N is the number of non-zero elements in the input and R is the rank of the input.
5185  * Output contains indices represented in the multi-index form
5186  * e.g.:
5187  * input {shape = (4,)}:
5188  * [0 1 0 2]
5189  * output {shape = (2,1)}:
5190  * [1]
5191  * [3]
5192  * input {shape = (3, 3)}:
5193  * [1 2 1]
5194  * [0 2 2]
5195  * [2 1 0]
5196  * output {shape = (7,1)}:
5197  * [0. 0.]
5198  * [0. 1.]
5199  * [0. 2.]
5200  * [1. 1.]
5201  * [1. 2.]
5202  * [2. 0.]
5203  * [2. 1.]
5204  */
5205 struct  _CoreML__Specification__WhereNonZeroLayerParams
5206 {
5207   ProtobufCMessage base;
5208 };
5209 #define CORE_ML__SPECIFICATION__WHERE_NON_ZERO_LAYER_PARAMS__INIT \
5210  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__where_non_zero_layer_params__descriptor) \
5211      }
5212 
5213 
5214 /*
5215  **
5216  * A layer that copies a tensor setting everything outside a central band in
5217  * each inner-most matrix to zero.
5218  * Requires 1 input and produces 1 output.
5219  * Parameters for matrix_band_part layer
5220  * band(m, n) = (num_lower < 0 || (m-n) <= num_lower) && (num_upper < 0 || (n-m) <= num_upper).
5221  * output[i, j, k, ..., m, n] = band(m, n) * input[i, j, k, ..., m, n]
5222  * Output shape is same as the input shape.
5223  * Rank of the input must be at least 2.
5224  * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch.
5225  */
5226 struct  _CoreML__Specification__MatrixBandPartLayerParams
5227 {
5228   ProtobufCMessage base;
5229   int64_t numlower;
5230   int64_t numupper;
5231 };
5232 #define CORE_ML__SPECIFICATION__MATRIX_BAND_PART_LAYER_PARAMS__INIT \
5233  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__matrix_band_part_layer_params__descriptor) \
5234     , 0, 0 }
5235 
5236 
5237 /*
5238  **
5239  * A layer that copies a tensor setting everything outside upper triangular to zero.
5240  * Requires 1 input and produces 1 output.
5241  * Output shape is same as the input shape.
5242  * Rank of the input must be at least 2.
5243  * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch.
5244  */
5245 struct  _CoreML__Specification__UpperTriangularLayerParams
5246 {
5247   ProtobufCMessage base;
5248   /*
5249    * Diagonal below which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above
5250    */
5251   int64_t k;
5252 };
5253 #define CORE_ML__SPECIFICATION__UPPER_TRIANGULAR_LAYER_PARAMS__INIT \
5254  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__upper_triangular_layer_params__descriptor) \
5255     , 0 }
5256 
5257 
5258 /*
5259  **
5260  * A layer that copies a tensor setting everything outside lower triangular to zero.
5261  * Requires 1 input and produces 1 output.
5262  * Output shape is same as the input shape.
5263  * Rank of the input must be at least 2.
5264  * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch.
5265  */
5266 struct  _CoreML__Specification__LowerTriangularLayerParams
5267 {
5268   ProtobufCMessage base;
5269   /*
5270    * Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above
5271    */
5272   int64_t k;
5273 };
5274 #define CORE_ML__SPECIFICATION__LOWER_TRIANGULAR_LAYER_PARAMS__INIT \
5275  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__lower_triangular_layer_params__descriptor) \
5276     , 0 }
5277 
5278 
5279 /*
5280  **
5281  * A layer that broadcasts a tensor to a new shape.
5282  * Requires 2 inputs and produces 1 output.
5283  * First input is broadcast to produce the output, while the second input is only
5284  * used to determine the shape of the output. Values of second input are not used.
5285  * Output is a tensor with the same shape as the second input.
5286  */
5287 struct  _CoreML__Specification__BroadcastToLikeLayerParams
5288 {
5289   ProtobufCMessage base;
5290 };
5291 #define CORE_ML__SPECIFICATION__BROADCAST_TO_LIKE_LAYER_PARAMS__INIT \
5292  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__broadcast_to_like_layer_params__descriptor) \
5293      }
5294 
5295 
5296 /*
5297  **
5298  * A layer that broadcasts a tensor to a new shape.
5299  * Requires 1 input and produces 1 output.
5300  * Output tensor is the broadcasted version of the input and has shape as specified in the
5301  * parameter "targetShape".
5302  */
5303 struct  _CoreML__Specification__BroadcastToStaticLayerParams
5304 {
5305   ProtobufCMessage base;
5306   size_t n_targetshape;
5307   uint64_t *targetshape;
5308 };
5309 #define CORE_ML__SPECIFICATION__BROADCAST_TO_STATIC_LAYER_PARAMS__INIT \
5310  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__broadcast_to_static_layer_params__descriptor) \
5311     , 0,NULL }
5312 
5313 
5314 /*
5315  **
5316  * A layer that broadcasts a tensor to a new shape.
5317  * Requires 2 inputs and produces 1 output.
5318  * First input is the one that is broadcasted to produce the output.
5319  * Second input is a rank 1 tensor specifying the shape of the output.
5320  * Output tensor has shape as specified by the values in the 2nd input tensor.
5321  */
5322 struct  _CoreML__Specification__BroadcastToDynamicLayerParams
5323 {
5324   ProtobufCMessage base;
5325 };
5326 #define CORE_ML__SPECIFICATION__BROADCAST_TO_DYNAMIC_LAYER_PARAMS__INIT \
5327  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__broadcast_to_dynamic_layer_params__descriptor) \
5328      }
5329 
5330 
5331 /*
5332  **
5333  * A layer that performs element-wise addition operation with broadcast support.
5334  * Requires 2 inputs and produces 1 output.
5335  */
5336 struct  _CoreML__Specification__AddBroadcastableLayerParams
5337 {
5338   ProtobufCMessage base;
5339 };
5340 #define CORE_ML__SPECIFICATION__ADD_BROADCASTABLE_LAYER_PARAMS__INIT \
5341  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__add_broadcastable_layer_params__descriptor) \
5342      }
5343 
5344 
5345 /*
5346  **
5347  * A layer that performs element-wise maximum operation with broadcast support.
5348  * Requires 2 inputs and produces 1 output.
5349  */
5350 struct  _CoreML__Specification__MaxBroadcastableLayerParams
5351 {
5352   ProtobufCMessage base;
5353 };
5354 #define CORE_ML__SPECIFICATION__MAX_BROADCASTABLE_LAYER_PARAMS__INIT \
5355  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__max_broadcastable_layer_params__descriptor) \
5356      }
5357 
5358 
5359 /*
5360  **
5361  * A layer that performs element-wise minimum operation with broadcast support.
5362  * Requires 2 inputs and produces 1 output.
5363  */
5364 struct  _CoreML__Specification__MinBroadcastableLayerParams
5365 {
5366   ProtobufCMessage base;
5367 };
5368 #define CORE_ML__SPECIFICATION__MIN_BROADCASTABLE_LAYER_PARAMS__INIT \
5369  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__min_broadcastable_layer_params__descriptor) \
5370      }
5371 
5372 
5373 /*
5374  **
5375  * A layer that performs element-wise modular operation with broadcast support.
5376  * Requires 2 inputs and produces 1 output.
5377  */
5378 struct  _CoreML__Specification__ModBroadcastableLayerParams
5379 {
5380   ProtobufCMessage base;
5381 };
5382 #define CORE_ML__SPECIFICATION__MOD_BROADCASTABLE_LAYER_PARAMS__INIT \
5383  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__mod_broadcastable_layer_params__descriptor) \
5384      }
5385 
5386 
5387 /*
5388  **
5389  * A layer that performs element-wise floor division operation with broadcast support.
5390  * Requires 2 inputs and produces 1 output.
5391  */
5392 struct  _CoreML__Specification__FloorDivBroadcastableLayerParams
5393 {
5394   ProtobufCMessage base;
5395 };
5396 #define CORE_ML__SPECIFICATION__FLOOR_DIV_BROADCASTABLE_LAYER_PARAMS__INIT \
5397  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__floor_div_broadcastable_layer_params__descriptor) \
5398      }
5399 
5400 
5401 /*
5402  **
5403  * A layer that performs element-wise subtract operation with broadcast support.
5404  * Requires 2 inputs and produces 1 output.
5405  */
5406 struct  _CoreML__Specification__SubtractBroadcastableLayerParams
5407 {
5408   ProtobufCMessage base;
5409 };
5410 #define CORE_ML__SPECIFICATION__SUBTRACT_BROADCASTABLE_LAYER_PARAMS__INIT \
5411  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__subtract_broadcastable_layer_params__descriptor) \
5412      }
5413 
5414 
5415 /*
5416  **
5417  * A layer that performs element-wise multiply operation with broadcast support.
5418  * Requires 2 inputs and produces 1 output.
5419  */
5420 struct  _CoreML__Specification__MultiplyBroadcastableLayerParams
5421 {
5422   ProtobufCMessage base;
5423 };
5424 #define CORE_ML__SPECIFICATION__MULTIPLY_BROADCASTABLE_LAYER_PARAMS__INIT \
5425  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__multiply_broadcastable_layer_params__descriptor) \
5426      }
5427 
5428 
5429 /*
5430  **
5431  * A layer that performs element-wise division operation with broadcast support.
5432  * Requires 2 inputs and produces 1 output.
5433  */
5434 struct  _CoreML__Specification__DivideBroadcastableLayerParams
5435 {
5436   ProtobufCMessage base;
5437 };
5438 #define CORE_ML__SPECIFICATION__DIVIDE_BROADCASTABLE_LAYER_PARAMS__INIT \
5439  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__divide_broadcastable_layer_params__descriptor) \
5440      }
5441 
5442 
5443 /*
5444  **
5445  * Gather layer that gathers elements from the first input, along a specified axis,
5446  * at indices specified in the second input.
5447  * It is similar in functionality to the numpy.take method.
5448  * Requires 2 inputs and produces 1 output.
5449  * Given two inputs, 'data' and 'indices', gather the slices of 'data'
5450  * and store into output.
5451  * e.g.
5452  * for i in [0, length(indices) - 1]
5453  *    output[i] = data[indices[i]]  (1-D case, axis=0)
5454  * if axis = 0:
5455  * for each vector index (i,...,j)
5456  *    output[i,...,j,:,..,:] = data[indices[i,...,j],:,..,:]
5457  * output.rank = (data.rank - 1) + indices.rank
5458  * Negative indices and negative axis are supported.
5459  * e.g:
5460  * data shape = (2, 3)
5461  * indices shape = (6, 8)
5462  * axis = 0
5463  * output shape = (6, 8) + (3,) = (6, 8, 3)
5464  * data shape = (2, 3, 5)
5465  * indices shape = (6, 8)
5466  * axis = 1
5467  * output shape = (2,) + (6, 8) + (5,) =  (2, 6, 8, 5)
5468  */
5469 struct  _CoreML__Specification__GatherLayerParams
5470 {
5471   ProtobufCMessage base;
5472   int64_t axis;
5473 };
5474 #define CORE_ML__SPECIFICATION__GATHER_LAYER_PARAMS__INIT \
5475  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__gather_layer_params__descriptor) \
5476     , 0 }
5477 
5478 
5479 struct  _CoreML__Specification__ScatterLayerParams
5480 {
5481   ProtobufCMessage base;
5482   int64_t axis;
5483   /*
5484    * / mode of accumulation.
5485    */
5486   CoreML__Specification__ScatterMode mode;
5487 };
5488 #define CORE_ML__SPECIFICATION__SCATTER_LAYER_PARAMS__INIT \
5489  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__scatter_layer_params__descriptor) \
5490     , 0, CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_UPDATE }
5491 
5492 
5493 /*
5494  **
5495  * A layer that gathers elements from the first input, 'params', at the multi-indices specified
5496  * by the second input, 'indices'.
5497  * Requires 2 inputs and produces 1 output.
5498  * 'params' = input[0], 'indices' = input[1]
5499  * 'indices' is a rank K+1 tensor of shape [I_0, I_1, .., I_(K-1), I_K] which is viewed as a collection of
5500  * indices of (I_0 * I_1 * ... * I_(K-1)) points in the I_K dimensional space. For instance, the multi-index of the first point
5501  * is indices[0,0,...,0,:].
5502  * Here is how the output is constructed:
5503  * for i = 0,1,...,(I_0-1)
5504  *   ...
5505  *     for j = 0,1,....,(I_(K-1)-1)
5506  *          output[i,....,j,:,:,..,:] = params[indices[i,...,j,:], :,:,..,:]
5507  * Hence, output shape is [I_0, I_1,...,I(K-1)] + params.shape[I_K:]
5508  * output.rank = indices.rank - 1 + params.rank - indices.shape[-1]
5509  * e.g:
5510  * input[0] shape = (4, 2, 3, 4)
5511  * input[1] shape = (6, 2)
5512  * output shape = (6,) + (3, 4) = (6, 3, 4)
5513  * input[0] shape = (3, 3, 3, 4, 7)
5514  * input[1] shape = (3, 5)
5515  * output shape = (3,) + () = (3,)
5516  * input[0] shape = (5, 3, 2, 5)
5517  * input[1] shape = (2, 7, 3, 2)
5518  * output shape = (2, 7, 3) + (2, 5) = (2, 7, 3, 2, 5)
5519  */
5520 struct  _CoreML__Specification__GatherNDLayerParams
5521 {
5522   ProtobufCMessage base;
5523 };
5524 #define CORE_ML__SPECIFICATION__GATHER_NDLAYER_PARAMS__INIT \
5525  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__gather_ndlayer_params__descriptor) \
5526      }
5527 
5528 
5529 /*
5530  * A layer that scatters data into a new tensor according to multi-indices from the input.
5531  * This is the inverse operation of GatherND.
5532  * Requires 3 inputs and produces 1 output.
5533  * 3 inputs, in order are denoted as "container", "indices", "updates".
5534  * 'indices' is a rank K+1 tensor of shape [I_0, I_1, .., I_(K-1), I_K] which is viewed as a collection of
5535  * indices of (I_0 * I_1 * ... * I_(K-1)) points in the I_K dimensional space. For instance, the multi-index of the first point
5536  * is indices[0,0,...,0,:].
5537  * container.rank >= I_K
5538  * updates.rank = K + (container.rank - I_K)
5539  * shape of 'updates' = [I_0, I_1,...,I(K-1)] + container.shape[I_K:]
5540  * output = container
5541  * For each vector index (i,...,j) s.t. 0<=i<I_0,..., 0<=j<I_K
5542  *   output[indices[i,...,j,:], :,:,..,:] = updates[i,....,j,:,:,..,:] // if mode == "SCATTER_UPDATE"
5543  * The output has the same shape as the first input.
5544  * e.g:
5545  * container shape = (3, 2)
5546  * indices shape = (4, 2)
5547  * updates shape = (4,)
5548  * output shape = (3, 2)
5549  * container shape = (7, 6)
5550  * indices shape = (4, 7, 2, 5, 1)
5551  * updates shape = (4, 7, 2, 5, 6)
5552  * output shape = (7, 6)
5553  */
5554 struct  _CoreML__Specification__ScatterNDLayerParams
5555 {
5556   ProtobufCMessage base;
5557   /*
5558    * / mode of accumulation.
5559    */
5560   CoreML__Specification__ScatterMode mode;
5561 };
5562 #define CORE_ML__SPECIFICATION__SCATTER_NDLAYER_PARAMS__INIT \
5563  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__scatter_ndlayer_params__descriptor) \
5564     , CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_UPDATE }
5565 
5566 
5567 /*
5568  **
5569  * Gather layer that gathers elements from the first input, along a specified axis,
5570  * at indices specified in the second input.
5571  * It is similar in functionality to the numpy.take_along_axis method.
5572  * Requires 2 inputs and produces 1 output.
5573  * Given two inputs, 'data' and 'indices', gather the slices of 'data'
5574  * and store into output.
5575  * Both inputs and output have the same rank.
5576  * Output shape is same as the shape of 'indices'
5577  * Shapes of 'indices' and 'data' match, except at the 'axis' dimension.
5578  * This operation performs the following operation for axis=0:
5579  * for each vector index (i,j,....,k)
5580  *    output[i,j,....,k] = data[index[i,j,....,k],j,....,k]
5581  * Negative indices and negative axis are supported.
5582  * e.g:
5583  * data shape = (4, 4, 7)
5584  * indices shape = (4, 5, 7)
5585  * axis = 1
5586  * output shape = (4, 5, 7)
5587  */
5588 struct  _CoreML__Specification__GatherAlongAxisLayerParams
5589 {
5590   ProtobufCMessage base;
5591   int64_t axis;
5592 };
5593 #define CORE_ML__SPECIFICATION__GATHER_ALONG_AXIS_LAYER_PARAMS__INIT \
5594  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__gather_along_axis_layer_params__descriptor) \
5595     , 0 }
5596 
5597 
5598 /*
5599  **
5600  * A layer that scatters data into a new tensor according to indices from
5601  * the input along the given axis into the output tensor.
5602  * This is the inverse operation of GatherAlongAxis.
5603  * It is similar in functionality to the numpy.put_along_axis method.
5604  * Requires 3 inputs and produces 1 output.
5605  * 3 inputs, in order are denoted as "container", "indices", "updates".
5606  * All inputs and output have the same rank.
5607  * Output shape is same as the shape of 'container'
5608  * Shapes of 'indices' and 'updates' match, which is same as the shape of 'container' except at the 'axis' dimension.
5609  * Negative indices and negative axis are supported.
5610  * This operation performs the following operation for axis=0:
5611  * output = container
5612  * for each vector index (i,j,....,k)
5613  *    output[index[i,j,....,k],j,....,k] = updates[i,j,....,k]
5614  * e.g.:
5615  * container shape = (2, 5, 6)
5616  * indices shape = (2, 2, 6)
5617  * updates shape = (2, 2, 6)
5618  * axis = -2
5619  * output shape = (2, 5, 6)
5620  */
5621 struct  _CoreML__Specification__ScatterAlongAxisLayerParams
5622 {
5623   ProtobufCMessage base;
5624   int64_t axis;
5625   /*
5626    * / mode of accumulation.
5627    */
5628   CoreML__Specification__ScatterMode mode;
5629 };
5630 #define CORE_ML__SPECIFICATION__SCATTER_ALONG_AXIS_LAYER_PARAMS__INIT \
5631  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__scatter_along_axis_layer_params__descriptor) \
5632     , 0, CORE_ML__SPECIFICATION__SCATTER_MODE__SCATTER_UPDATE }
5633 
5634 
5635 /*
5636  **
5637  * A layer that stacks the input tensors along the given axis.
5638  * It is similar in functionality to the numpy.stack method.
5639  * Requires at least 2 inputs and produces 1 output.
5640  * All inputs must have the same shape.
5641  * Rank of the output is 1 greater than the rank of the inputs.
5642  * Negative indexing is supported for the "axis" parameter.
5643  * e.g.:
5644  * input shape = (2, 4, 2)
5645  * number of inputs = 5
5646  * axis = 3
5647  * output shape = (2, 4, 2, 5)
5648  * input shape = (2, 4, 2)
5649  * number of inputs = 5
5650  * axis = -2
5651  * output shape = (2, 4, 5, 2)
5652  */
5653 struct  _CoreML__Specification__StackLayerParams
5654 {
5655   ProtobufCMessage base;
5656   int64_t axis;
5657 };
5658 #define CORE_ML__SPECIFICATION__STACK_LAYER_PARAMS__INIT \
5659  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__stack_layer_params__descriptor) \
5660     , 0 }
5661 
5662 
5663 /*
5664  **
5665  * A layer that reshapes a tensor that does not alter the rank of the input.
5666  * Order of the data is left unchanged.
5667  * Requires 1 input and produces 1 output.
5668  * e.g:
5669  * input shape = (20,10)
5670  * targetShape = (5,-1)
5671  * output shape = (5,40)
5672  * input shape = (20,10,5)
5673  * targetShape = (0,2,25)
5674  * output shape = (20,2,25)
5675  * input shape = (10,3,5)
5676  * targetShape = (25,0,-1)
5677  * output shape = (25,3,2)
5678  */
5679 struct  _CoreML__Specification__RankPreservingReshapeLayerParams
5680 {
5681   ProtobufCMessage base;
5682   /*
5683    **
5684    * Length of this field must be same as the input/output rank.
5685    * It can have 0's, in which case the corresponding input dimension is kept intact.
5686    * At most one element can be -1, in which case the output dimension is calculated from rest of the shape.
5687    */
5688   size_t n_targetshape;
5689   int64_t *targetshape;
5690 };
5691 #define CORE_ML__SPECIFICATION__RANK_PRESERVING_RESHAPE_LAYER_PARAMS__INIT \
5692  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__rank_preserving_reshape_layer_params__descriptor) \
5693     , 0,NULL }
5694 
5695 
5696 /*
5697  **
5698  * Constant padding layer.
5699  * Pad the input array with a constant value, either along a single given axis or along a set of axes.
5700  * Requires 1 or 2 inputs and produces 1 output.
5701  * The amount of padding can be either set as a parameter ("padAmounts") or provided as a second input.
5702  * Output rank is same as the rank of the first input.
5703  * when "padToGivenOutputSizeMode" is False:
5704  * output_shape[i] = input_shape[i] + padAmounts[2*i] + padAmounts[2*i+1], i=0,...,rank-1
5705  * Examples:
5706  * input shape = (20,10)
5707  * padAmounts = [0,1,4,0]
5708  * output shape = (21,14)
5709  * input shape = (20,10,5)
5710  * padAmounts = [0,0,3,4,0,9]
5711  * output shape = (20,17,14)
5712  * when "padToGivenOutputSizeMode" is True
5713  * output_shape[i] = max(input_shape[i], max(padAmounts[2*i] + padAmounts[2*i+1])), i=0,...,rank-1
5714  * input shape = (20,10)
5715  * padAmounts = [0,21,14,0]
5716  * output shape = (21,14)
5717  * input shape = (20,10,5)
5718  * padAmounts = [0,0,17,0,0,14]
5719  * output shape = (20,17,14)
5720  */
5721 struct  _CoreML__Specification__ConstantPaddingLayerParams
5722 {
5723   ProtobufCMessage base;
5724   /*
5725    **
5726    * The value to be used for padding.
5727    */
5728   float value;
5729   /*
5730    **
5731    * Length of this repeated field must be twice the rank of the first input.
5732    * 2*i-th and (2*i+1)-th values represent the amount of padding to be applied to the the i-th input
5733    * dimension, "before" and "after" the input values, respectively.
5734    */
5735   size_t n_padamounts;
5736   uint64_t *padamounts;
5737   /*
5738    **
5739    * When this is True, positive values in "padAmounts" are equivalent to the output shape.
5740    * In that case only one of padAmounts[2*i] and padAmounts[2*i+1] can be non zero, for i=0,..,rank-1.
5741    */
5742   protobuf_c_boolean padtogivenoutputsizemode;
5743 };
5744 #define CORE_ML__SPECIFICATION__CONSTANT_PADDING_LAYER_PARAMS__INIT \
5745  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__constant_padding_layer_params__descriptor) \
5746     , 0, 0,NULL, 0 }
5747 
5748 
5749 /*
5750  **
5751  * A layer that returns a tensor filled with values from the normal distribution.
5752  * Requires 1 input and produces 1 output.
5753  * Parameters
5754  *     seed: seed used for the normal distribution.
5755  *     mean: mean of the normal distribution.
5756  *     stdDev: standard deviation of the normal distribution.
5757  * Input
5758  *     An N-Dimensional tensor, whose values are ignored. Only the shape is used to
5759  *     infer the shape of the output.
5760  * Output
5761  *     An N-Dimensional tensor with the same shape as the input tensor.
5762  */
5763 struct  _CoreML__Specification__RandomNormalLikeLayerParams
5764 {
5765   ProtobufCMessage base;
5766   int64_t seed;
5767   float mean;
5768   float stddev;
5769 };
5770 #define CORE_ML__SPECIFICATION__RANDOM_NORMAL_LIKE_LAYER_PARAMS__INIT \
5771  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_normal_like_layer_params__descriptor) \
5772     , 0, 0, 0 }
5773 
5774 
5775 /*
5776  **
5777  * A layer that returns a tensor filled with values from the normal distribution.
5778  * Requires no input and produces 1 output.
5779  * Parameters
5780  *     seed: seed used for the normal distribution.
5781  *     mean: mean of the normal distribution.
5782  *     stdDev: standard deviation of the normal distribution.
5783  *     outputShape: shape of the output tensor.
5784  * Output
5785  *     An N-Dimensional tensor of shape "outputShape".
5786  */
5787 struct  _CoreML__Specification__RandomNormalStaticLayerParams
5788 {
5789   ProtobufCMessage base;
5790   int64_t seed;
5791   float mean;
5792   float stddev;
5793   size_t n_outputshape;
5794   uint64_t *outputshape;
5795 };
5796 #define CORE_ML__SPECIFICATION__RANDOM_NORMAL_STATIC_LAYER_PARAMS__INIT \
5797  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_normal_static_layer_params__descriptor) \
5798     , 0, 0, 0, 0,NULL }
5799 
5800 
5801 /*
5802  **
5803  * A layer that returns a tensor filled with values from the normal distribution.
5804  * Requires 1 input and produces 1 output.
5805  * Parameters:
5806  *     seed: seed used for the normal distribution.
5807  *     mean: mean of the normal distribution.
5808  *     stdDev: standard deviation of the normal distribution.
5809  * Input
5810  *     A rank 1 tensor specifying the shape of the output
5811  * Output
5812  *     An N-Dimensional tensor with the shape specified by the values in the input tensor.
5813  */
5814 struct  _CoreML__Specification__RandomNormalDynamicLayerParams
5815 {
5816   ProtobufCMessage base;
5817   int64_t seed;
5818   float mean;
5819   float stddev;
5820 };
5821 #define CORE_ML__SPECIFICATION__RANDOM_NORMAL_DYNAMIC_LAYER_PARAMS__INIT \
5822  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_normal_dynamic_layer_params__descriptor) \
5823     , 0, 0, 0 }
5824 
5825 
5826 /*
5827  **
5828  * A layer that returns a tensor filled with values from the uniform distribution.
5829  * Requires 1 input and produces 1 output.
5830  * Parameters
5831  *     seed: seed used for the uniform distribution.
5832  *     minVal: lower bound on the range of random values for the uniform distribution.
5833  *     maxVal: upper bound on the range of random values for the uniform distribution.
5834  * Input
5835  *     An N-Dimensional tensor, whose values are ignored. Only the shape is used to
5836  *     infer the shape of the output.
5837  * Output
5838  *     An N-Dimensional tensor with the same shape as the input tensor.
5839  */
5840 struct  _CoreML__Specification__RandomUniformLikeLayerParams
5841 {
5842   ProtobufCMessage base;
5843   int64_t seed;
5844   float minval;
5845   float maxval;
5846 };
5847 #define CORE_ML__SPECIFICATION__RANDOM_UNIFORM_LIKE_LAYER_PARAMS__INIT \
5848  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_uniform_like_layer_params__descriptor) \
5849     , 0, 0, 0 }
5850 
5851 
5852 /*
5853  **
5854  * A layer that returns a tensor filled with values from the uniform distribution.
5855  * Requires no input and produces 1 output.
5856  * Parameters
5857  *     seed: seed used for the uniform distribution.
5858  *     minVal: lower bound on the range of random values for the uniform distribution.
5859  *     maxVal: upper bound on the range of random values for the uniform distribution.
5860  *     outputShape: shape of the output tensor.
5861  * Output
5862  *     An N-Dimensional tensor of shape "outputShape".
5863  */
5864 struct  _CoreML__Specification__RandomUniformStaticLayerParams
5865 {
5866   ProtobufCMessage base;
5867   int64_t seed;
5868   float minval;
5869   float maxval;
5870   size_t n_outputshape;
5871   uint64_t *outputshape;
5872 };
5873 #define CORE_ML__SPECIFICATION__RANDOM_UNIFORM_STATIC_LAYER_PARAMS__INIT \
5874  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_uniform_static_layer_params__descriptor) \
5875     , 0, 0, 0, 0,NULL }
5876 
5877 
5878 /*
5879  **
5880  * A layer that returns a tensor filled with values from the uniform distribution.
5881  * Requires 1 input and produces 1 output.
5882  * Parameters:
5883  *     seed: seed used for the uniform distribution.
5884  *     minVal: lower bound on the range of random values for the uniform distribution.
5885  *     maxVal: upper bound on the range of random values for the uniform distribution.
5886  * Input
5887  *     A rank 1 tensor specifying the shape of the output
5888  * Output
5889  *     An N-Dimensional tensor with the shape specified by the values in the input tensor.
5890  */
5891 struct  _CoreML__Specification__RandomUniformDynamicLayerParams
5892 {
5893   ProtobufCMessage base;
5894   int64_t seed;
5895   float minval;
5896   float maxval;
5897 };
5898 #define CORE_ML__SPECIFICATION__RANDOM_UNIFORM_DYNAMIC_LAYER_PARAMS__INIT \
5899  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_uniform_dynamic_layer_params__descriptor) \
5900     , 0, 0, 0 }
5901 
5902 
5903 /*
5904  **
5905  * A layer that returns a tensor filled with values from the Bernoulli distribution.
5906  * Requires 1 input and produces 1 output.
5907  * Parameters
5908  *     seed: seed used for the Bernoulli distribution.
5909  *     prob: probability of a 1 event.
5910  * Input
5911  *     An N-Dimensional tensor, whose values are ignored. Only the shape is used to
5912  *     infer the shape of the output.
5913  * Output
5914  *     An N-Dimensional tensor with the same shape as the input tensor.
5915  */
5916 struct  _CoreML__Specification__RandomBernoulliLikeLayerParams
5917 {
5918   ProtobufCMessage base;
5919   int64_t seed;
5920   float prob;
5921 };
5922 #define CORE_ML__SPECIFICATION__RANDOM_BERNOULLI_LIKE_LAYER_PARAMS__INIT \
5923  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_bernoulli_like_layer_params__descriptor) \
5924     , 0, 0 }
5925 
5926 
5927 /*
5928  **
5929  * A layer that returns a tensor filled with values from the Bernoulli distribution.
5930  * Requires no input and produces 1 output.
5931  * Parameters
5932  *     seed: seed used for the Bernoulli distribution.
5933  *     prob: probability of a 1 event.
5934  *     outputShape: shape of the output tensor.
5935  * Output
5936  *     An N-Dimensional tensor of shape "outputShape".
5937  */
5938 struct  _CoreML__Specification__RandomBernoulliStaticLayerParams
5939 {
5940   ProtobufCMessage base;
5941   int64_t seed;
5942   float prob;
5943   size_t n_outputshape;
5944   uint64_t *outputshape;
5945 };
5946 #define CORE_ML__SPECIFICATION__RANDOM_BERNOULLI_STATIC_LAYER_PARAMS__INIT \
5947  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_bernoulli_static_layer_params__descriptor) \
5948     , 0, 0, 0,NULL }
5949 
5950 
5951 /*
5952  **
5953  * A layer that returns a tensor filled with values from the Bernoulli distribution.
5954  * Requires 1 input and produces 1 output.
5955  * Parameters:
5956  *     seed: seed used for the Bernoulli distribution.
5957  *     prob: probability of a 1 event.
5958  * Input
5959  *     A rank 1 tensor specifying the shape of the output
5960  * Output
5961  *     An N-Dimensional tensor with the shape specified by the values in the input tensor.
5962  */
5963 struct  _CoreML__Specification__RandomBernoulliDynamicLayerParams
5964 {
5965   ProtobufCMessage base;
5966   int64_t seed;
5967   float prob;
5968 };
5969 #define CORE_ML__SPECIFICATION__RANDOM_BERNOULLI_DYNAMIC_LAYER_PARAMS__INIT \
5970  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__random_bernoulli_dynamic_layer_params__descriptor) \
5971     , 0, 0 }
5972 
5973 
5974 /*
5975  **
5976  * A layer that returns a tensor of the specified shape filled with values from the categorical distribution.
5977  * Requires 1 input and produces 1 output.
5978  * Parameter:
5979  *     seed: seed used for the categorical distribution.
5980  *     numSamples: number of samples to draw.
5981  *     isLogits: true if the inputs are logits, false if the inputs are probabilities.
5982  *     eps: default value is 1e-10.
5983  *     temperature: default value is 1.0.
5984  * Input tensor shape = [D_1, D_2, ... , D_(R-1), D_R] (Rank = R)
5985  * Then the shape of the output is [D_1, D_2, ... , D_(R-1), numSamples] (Rank = R)
5986  */
5987 struct  _CoreML__Specification__CategoricalDistributionLayerParams
5988 {
5989   ProtobufCMessage base;
5990   int64_t seed;
5991   int64_t numsamples;
5992   protobuf_c_boolean islogits;
5993   float eps;
5994   float temperature;
5995 };
5996 #define CORE_ML__SPECIFICATION__CATEGORICAL_DISTRIBUTION_LAYER_PARAMS__INIT \
5997  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__categorical_distribution_layer_params__descriptor) \
5998     , 0, 0, 0, 0, 0 }
5999 
6000 
6001 /*
6002  **
6003  * A layer that performs reduction with L1 normalization operation.
6004  * Negative indexing is supported.
6005  * Requires 1 input and produces 1 output.
6006  * Parameters:
6007  *    axes: dimensions along which to perform reduction
6008  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6009  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6010  */
6011 struct  _CoreML__Specification__ReduceL1LayerParams
6012 {
6013   ProtobufCMessage base;
6014   size_t n_axes;
6015   int64_t *axes;
6016   protobuf_c_boolean keepdims;
6017   protobuf_c_boolean reduceall;
6018 };
6019 #define CORE_ML__SPECIFICATION__REDUCE_L1_LAYER_PARAMS__INIT \
6020  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_l1_layer_params__descriptor) \
6021     , 0,NULL, 0, 0 }
6022 
6023 
6024 /*
6025  **
6026  * A layer that performs reduction with L2 normalization operation.
6027  * Negative indexing is supported.
6028  * Requires 1 input and produces 1 output.
6029  * Parameters:
6030  *    axes: dimensions along which to perform reduction
6031  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6032  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6033  */
6034 struct  _CoreML__Specification__ReduceL2LayerParams
6035 {
6036   ProtobufCMessage base;
6037   size_t n_axes;
6038   int64_t *axes;
6039   protobuf_c_boolean keepdims;
6040   protobuf_c_boolean reduceall;
6041 };
6042 #define CORE_ML__SPECIFICATION__REDUCE_L2_LAYER_PARAMS__INIT \
6043  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_l2_layer_params__descriptor) \
6044     , 0,NULL, 0, 0 }
6045 
6046 
6047 /*
6048  **
6049  * A layer that performs reduction with max operation.
6050  * Negative indexing is supported.
6051  * Requires 1 input and produces 1 output.
6052  * Parameters:
6053  *    axes: dimensions along which to perform reduction
6054  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6055  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6056  */
6057 struct  _CoreML__Specification__ReduceMaxLayerParams
6058 {
6059   ProtobufCMessage base;
6060   size_t n_axes;
6061   int64_t *axes;
6062   protobuf_c_boolean keepdims;
6063   protobuf_c_boolean reduceall;
6064 };
6065 #define CORE_ML__SPECIFICATION__REDUCE_MAX_LAYER_PARAMS__INIT \
6066  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_max_layer_params__descriptor) \
6067     , 0,NULL, 0, 0 }
6068 
6069 
6070 /*
6071  **
6072  * A layer that performs reduction with min operation.
6073  * Negative indexing is supported.
6074  * Requires 1 input and produces 1 output.
6075  * Parameters:
6076  *    axes: dimensions along which to perform reduction
6077  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6078  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6079  */
6080 struct  _CoreML__Specification__ReduceMinLayerParams
6081 {
6082   ProtobufCMessage base;
6083   size_t n_axes;
6084   int64_t *axes;
6085   protobuf_c_boolean keepdims;
6086   protobuf_c_boolean reduceall;
6087 };
6088 #define CORE_ML__SPECIFICATION__REDUCE_MIN_LAYER_PARAMS__INIT \
6089  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_min_layer_params__descriptor) \
6090     , 0,NULL, 0, 0 }
6091 
6092 
6093 /*
6094  **
6095  * A layer that performs reduction with sum operation.
6096  * Negative indexing is supported.
6097  * Requires 1 input and produces 1 output.
6098  * Parameters:
6099  *    axes: dimensions along which to perform reduction
6100  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6101  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6102  */
6103 struct  _CoreML__Specification__ReduceSumLayerParams
6104 {
6105   ProtobufCMessage base;
6106   size_t n_axes;
6107   int64_t *axes;
6108   protobuf_c_boolean keepdims;
6109   protobuf_c_boolean reduceall;
6110 };
6111 #define CORE_ML__SPECIFICATION__REDUCE_SUM_LAYER_PARAMS__INIT \
6112  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_sum_layer_params__descriptor) \
6113     , 0,NULL, 0, 0 }
6114 
6115 
6116 /*
6117  **
6118  * A layer that performs reduction with prod operation.
6119  * Negative indexing is supported.
6120  * Requires 1 input and produces 1 output.
6121  * Parameters:
6122  *    axes: dimensions along which to perform reduction
6123  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6124  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6125  */
6126 struct  _CoreML__Specification__ReduceProdLayerParams
6127 {
6128   ProtobufCMessage base;
6129   size_t n_axes;
6130   int64_t *axes;
6131   protobuf_c_boolean keepdims;
6132   protobuf_c_boolean reduceall;
6133 };
6134 #define CORE_ML__SPECIFICATION__REDUCE_PROD_LAYER_PARAMS__INIT \
6135  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_prod_layer_params__descriptor) \
6136     , 0,NULL, 0, 0 }
6137 
6138 
6139 /*
6140  **
6141  * A layer that performs reduction with mean operation.
6142  * Negative indexing is supported.
6143  * Requires 1 input and produces 1 output.
6144  * Parameters:
6145  *    axes: dimensions along which to perform reduction
6146  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6147  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6148  */
6149 struct  _CoreML__Specification__ReduceMeanLayerParams
6150 {
6151   ProtobufCMessage base;
6152   size_t n_axes;
6153   int64_t *axes;
6154   protobuf_c_boolean keepdims;
6155   protobuf_c_boolean reduceall;
6156 };
6157 #define CORE_ML__SPECIFICATION__REDUCE_MEAN_LAYER_PARAMS__INIT \
6158  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_mean_layer_params__descriptor) \
6159     , 0,NULL, 0, 0 }
6160 
6161 
6162 /*
6163  **
6164  * A layer that performs reduction with logSum operation.
6165  * Negative indexing is supported.
6166  * Requires 1 input and produces 1 output.
6167  * Parameters:
6168  *    axes: dimensions along which to perform reduction
6169  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6170  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6171  */
6172 struct  _CoreML__Specification__ReduceLogSumLayerParams
6173 {
6174   ProtobufCMessage base;
6175   size_t n_axes;
6176   int64_t *axes;
6177   protobuf_c_boolean keepdims;
6178   protobuf_c_boolean reduceall;
6179 };
6180 #define CORE_ML__SPECIFICATION__REDUCE_LOG_SUM_LAYER_PARAMS__INIT \
6181  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_log_sum_layer_params__descriptor) \
6182     , 0,NULL, 0, 0 }
6183 
6184 
6185 /*
6186  **
6187  * A layer that performs reduction with logSumExp operation.
6188  * Negative indexing is supported.
6189  * Requires 1 input and produces 1 output.
6190  * Parameters:
6191  *    axes: dimensions along which to perform reduction
6192  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6193  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6194  */
6195 struct  _CoreML__Specification__ReduceSumSquareLayerParams
6196 {
6197   ProtobufCMessage base;
6198   size_t n_axes;
6199   int64_t *axes;
6200   protobuf_c_boolean keepdims;
6201   protobuf_c_boolean reduceall;
6202 };
6203 #define CORE_ML__SPECIFICATION__REDUCE_SUM_SQUARE_LAYER_PARAMS__INIT \
6204  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_sum_square_layer_params__descriptor) \
6205     , 0,NULL, 0, 0 }
6206 
6207 
6208 /*
6209  **
6210  * A layer that performs reduction with logSumExp operation.
6211  * Negative indexing is supported.
6212  * Requires 1 input and produces 1 output.
6213  * Parameters:
6214  *    axes: dimensions along which to perform reduction
6215  *    keepDims: if True, keep the reduced dimensions (value will be 1), otherwise, reduced dimensions are squeezed
6216  *    reduceAll: ignore the "axes" parameter, perform reduction along all axes
6217  */
6218 struct  _CoreML__Specification__ReduceLogSumExpLayerParams
6219 {
6220   ProtobufCMessage base;
6221   size_t n_axes;
6222   int64_t *axes;
6223   protobuf_c_boolean keepdims;
6224   protobuf_c_boolean reduceall;
6225 };
6226 #define CORE_ML__SPECIFICATION__REDUCE_LOG_SUM_EXP_LAYER_PARAMS__INIT \
6227  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reduce_log_sum_exp_layer_params__descriptor) \
6228     , 0,NULL, 0, 0 }
6229 
6230 
6231 /*
6232  **
6233  * A layer that increases the rank of the input tensor by adding unit dimensions.
6234  * Requires 1 input and produces 1 output.
6235  * e.g.:
6236  * input shape = (10,5)
6237  * axes = (0,1)
6238  * output shape = (1,1,10,5)
6239  * input shape = (10,5)
6240  * axes = (0,2)
6241  * output shape = (1,10,1,5)
6242  * input shape = (10,5)
6243  * axes = (-2,-1)
6244  * output shape = (10,5,1,1)
6245  */
6246 struct  _CoreML__Specification__ExpandDimsLayerParams
6247 {
6248   ProtobufCMessage base;
6249   /*
6250    **
6251    * Axis values provided here get dimension 1 in the output tensor.
6252    * Negative indexing is supported.
6253    */
6254   size_t n_axes;
6255   int64_t *axes;
6256 };
6257 #define CORE_ML__SPECIFICATION__EXPAND_DIMS_LAYER_PARAMS__INIT \
6258  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__expand_dims_layer_params__descriptor) \
6259     , 0,NULL }
6260 
6261 
6262 /*
6263  **
6264  * A layer that flattens the input tensor into a 2-dimensional matrix.
6265  * Requires 1 input and produces 1 output.
6266  * Output tensor is always rank 2.
6267  * First dimension of output is the product of all the dimensions in input[:axis] ("axis" is exclusive)
6268  * Second dimension of output is the product of all the dimensions in input[axis:] ("axis" is inclusive)
6269  * e.g.:
6270  * input shape:  (3,)
6271  * axis:  -1
6272  * output shape:  (1, 3)
6273  * input shape:  (3,)
6274  * axis:  1
6275  * output shape:  (3, 1)
6276  * input shape:  (4, 3)
6277  * axis:  -1
6278  * output shape:  (4, 3)
6279  * input shape:  (5, 2)
6280  * axis:  0
6281  * output shape:  (1, 10)
6282  * input shape:  (5, 5, 3)
6283  * axis:  -2
6284  * output shape:  (5, 15)
6285  * input shape:  (2, 3, 2)
6286  * axis:  -1
6287  * output shape:  (6, 2)
6288  */
6289 struct  _CoreML__Specification__FlattenTo2DLayerParams
6290 {
6291   ProtobufCMessage base;
6292   int64_t axis;
6293 };
6294 #define CORE_ML__SPECIFICATION__FLATTEN_TO2_DLAYER_PARAMS__INIT \
6295  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__flatten_to2_dlayer_params__descriptor) \
6296     , 0 }
6297 
6298 
6299 /*
6300  **
6301  * A layer that reshapes a tensor.
6302  * Requires 1 input and produces 1 output.
6303  * Output tensor is the reshaped version of the input and has shape as specified in the
6304  * parameter "targetShape".
6305  */
6306 struct  _CoreML__Specification__ReshapeStaticLayerParams
6307 {
6308   ProtobufCMessage base;
6309   size_t n_targetshape;
6310   int64_t *targetshape;
6311 };
6312 #define CORE_ML__SPECIFICATION__RESHAPE_STATIC_LAYER_PARAMS__INIT \
6313  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reshape_static_layer_params__descriptor) \
6314     , 0,NULL }
6315 
6316 
6317 /*
6318  **
6319  * A layer that reshapes a tensor.
6320  * Requires 2 inputs and produces 1 output.
6321  * First input is reshaped to produce the output, while the second input is only
6322  * used to determine the shape of the output. Values of the second input are not used.
6323  * Output is a tensor with the same shape as the second input.
6324  */
6325 struct  _CoreML__Specification__ReshapeLikeLayerParams
6326 {
6327   ProtobufCMessage base;
6328 };
6329 #define CORE_ML__SPECIFICATION__RESHAPE_LIKE_LAYER_PARAMS__INIT \
6330  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reshape_like_layer_params__descriptor) \
6331      }
6332 
6333 
6334 /*
6335  **
6336  * A layer that reshapes a tensor.
6337  * Requires 2 inputs and produces 1 output.
6338  * First input is the one that is reshaped to produce the output.
6339  * Second input is a rank 1 tensor specifying the shape of the output.
6340  * Output tensor has shape as specified by the values in the 2nd input tensor.
6341  */
6342 struct  _CoreML__Specification__ReshapeDynamicLayerParams
6343 {
6344   ProtobufCMessage base;
6345 };
6346 #define CORE_ML__SPECIFICATION__RESHAPE_DYNAMIC_LAYER_PARAMS__INIT \
6347  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__reshape_dynamic_layer_params__descriptor) \
6348      }
6349 
6350 
6351 /*
6352  **
6353  * A layer that decreases the rank of the input tensor by removing unit dimensions.
6354  * Requires 1 input and produces 1 output.
6355  * Output rank is one less than input rank, if input rank is more than 1.
6356  * If input rank is 1, output rank is also 1.
6357  * e.g.:
6358  * input shape = (1,1,10,5)
6359  * axes = (0,1)
6360  * output shape = (10,5)
6361  * input shape = (1,10,5,1)
6362  * axes = (0,3)
6363  * output shape = (10,5)
6364  * input shape = (10,5,1,1)
6365  * axes = (-2,-1)
6366  * output shape = (10,5)
6367  * input shape = (1,)
6368  * axes = (0)
6369  * output shape = (1,)
6370  */
6371 struct  _CoreML__Specification__SqueezeLayerParams
6372 {
6373   ProtobufCMessage base;
6374   /*
6375    **
6376    * Axis values provided here get removed from the input tensor.
6377    * Negative indexing is supported.
6378    */
6379   size_t n_axes;
6380   int64_t *axes;
6381   /*
6382    * if true squeeze all dimensions that are 1.
6383    */
6384   protobuf_c_boolean squeezeall;
6385 };
6386 #define CORE_ML__SPECIFICATION__SQUEEZE_LAYER_PARAMS__INIT \
6387  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__squeeze_layer_params__descriptor) \
6388     , 0,NULL, 0 }
6389 
6390 
6391 /*
6392  **
6393  * A layer that returns top K (or bottom K) values and the corresponding indices
6394  * of the input along a given axis.
6395  * Requires 1 or 2 inputs and produces 2 outputs.
6396  * The second input is the value of the K, and is optional.
6397  * If there is only one input, value of K that is specified in the layer parameter is used.
6398  * Both outputs have the same rank as the first input.
6399  * Second input must correspond to a scalar tensor.
6400  * e.g.:
6401  * first input's shape = (45, 34, 10, 5)
6402  * axis = 1
6403  * output shape, for both outputs = (45, K, 10, 5)
6404  */
6405 struct  _CoreML__Specification__TopKLayerParams
6406 {
6407   ProtobufCMessage base;
6408   /*
6409    * /  negative indexing is supported
6410    */
6411   int64_t axis;
6412   /*
6413    * / is ignored if a second input is present.
6414    */
6415   uint64_t k;
6416   /*
6417    * / if true, bottom K (values, indices) are returned instead
6418    */
6419   protobuf_c_boolean usebottomk;
6420 };
6421 #define CORE_ML__SPECIFICATION__TOP_KLAYER_PARAMS__INIT \
6422  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__top_klayer_params__descriptor) \
6423     , 0, 0, 0 }
6424 
6425 
6426 /*
6427  **
6428  * A layer that returns the indices of the maximum value along a specified axis in a tensor.
6429  * Requires 1 input and produces 1 output. Negative indexing is supported.
6430  * Output has the same rank as the input if "removeDim" is False (default).
6431  * Output has rank one less than the input if "removeDim" is True and input rank is more than 1.
6432  * e.g.:
6433  * input shape = (45, 34, 10, 5)
6434  * axis = -2
6435  * output shape = (45, 1, 10, 5), if removeDim = False (default)
6436  * output shape = (45, 10, 5), if removeDim = True
6437  * input shape = (5,)
6438  * axis = 0
6439  * output shape = (1,), if removeDim = False or True
6440  */
6441 struct  _CoreML__Specification__ArgMaxLayerParams
6442 {
6443   ProtobufCMessage base;
6444   int64_t axis;
6445   protobuf_c_boolean removedim;
6446 };
6447 #define CORE_ML__SPECIFICATION__ARG_MAX_LAYER_PARAMS__INIT \
6448  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__arg_max_layer_params__descriptor) \
6449     , 0, 0 }
6450 
6451 
6452 /*
6453  **
6454  * A layer that returns the indices of the minimum value along a specified axis in a tensor.
6455  * Requires 1 input and produces 1 output. Negative indexing is supported.
6456  * Output has the same rank as the input if "removeDim" is False (default).
6457  * Output has rank one less than the input if "removeDim" is True and input rank is more than 1.
6458  * e.g.:
6459  * input shape = (45, 34, 10, 5)
6460  * axis = -2
6461  * output shape = (45, 1, 10, 5), if removeDim = False (default)
6462  * output shape = (45, 10, 5), if removeDim = True
6463  * input shape = (5,)
6464  * axis = 0
6465  * output shape = (1,), if removeDim = False or True
6466  */
6467 struct  _CoreML__Specification__ArgMinLayerParams
6468 {
6469   ProtobufCMessage base;
6470   int64_t axis;
6471   protobuf_c_boolean removedim;
6472 };
6473 #define CORE_ML__SPECIFICATION__ARG_MIN_LAYER_PARAMS__INIT \
6474  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__arg_min_layer_params__descriptor) \
6475     , 0, 0 }
6476 
6477 
6478 /*
6479  **
6480  * A layer layer that splits the input tensor into multiple output tensors,
6481  * along the specified axis.
6482  * The layer either uniformly splits the input tensor into ``num_splits`` tensors, or
6483  * splits according to the given split sizes in ``split_sizes``.
6484  * Supports unequal splits and negative indexing.
6485  * Requires 1 input and produces at least 2 outputs.
6486  * Rank of all the outputs is same as that of the input.
6487  * If parameter "splitSizes" is provided, value of the parameter "numSplits" is ignored, since in that case
6488  * "numSplits" is automatically inferred to be the length of "splitSizes".
6489  * e.g.:
6490  * input shape:  (5, 3, 4)
6491  * axis = -3, split_sizes = [3, 2]
6492  * output shape:  (3, 3, 4)
6493  * output shape:  (2, 3, 4)
6494  */
6495 struct  _CoreML__Specification__SplitNDLayerParams
6496 {
6497   ProtobufCMessage base;
6498   int64_t axis;
6499   uint64_t numsplits;
6500   size_t n_splitsizes;
6501   uint64_t *splitsizes;
6502 };
6503 #define CORE_ML__SPECIFICATION__SPLIT_NDLAYER_PARAMS__INIT \
6504  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__split_ndlayer_params__descriptor) \
6505     , 0, 0, 0,NULL }
6506 
6507 
6508 /*
6509  **
6510  * A layer that performs element-wise ceil operation on the input tensor that
6511  * rounds the value to the smallest integer not less than x.
6512  * Requires 1 input and produces 1 output.
6513  * Output shape is same as the input.
6514  */
6515 struct  _CoreML__Specification__CeilLayerParams
6516 {
6517   ProtobufCMessage base;
6518 };
6519 #define CORE_ML__SPECIFICATION__CEIL_LAYER_PARAMS__INIT \
6520  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__ceil_layer_params__descriptor) \
6521      }
6522 
6523 
6524 /*
6525  **
6526  * A layer that performs element-wise round operation on the input tensor
6527  * that rounds the value to the nearest integer.
6528  * Requires 1 input and produces 1 output.
6529  * Output shape is same as the input.
6530  */
6531 struct  _CoreML__Specification__RoundLayerParams
6532 {
6533   ProtobufCMessage base;
6534 };
6535 #define CORE_ML__SPECIFICATION__ROUND_LAYER_PARAMS__INIT \
6536  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__round_layer_params__descriptor) \
6537      }
6538 
6539 
6540 /*
6541  **
6542  * A layer that performs element-wise floor operation on the input tensor
6543  * that rounds the value to the largest integer not greater than x.
6544  * Requires 1 input and produces 1 output.
6545  * Output shape is same as the input.
6546  */
6547 struct  _CoreML__Specification__FloorLayerParams
6548 {
6549   ProtobufCMessage base;
6550 };
6551 #define CORE_ML__SPECIFICATION__FLOOR_LAYER_PARAMS__INIT \
6552  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__floor_layer_params__descriptor) \
6553      }
6554 
6555 
6556 /*
6557  **
6558  * A layer that performs element-wise sign operation (+1 for positive values,
6559  * -1 for negative values, 0 for zeros).
6560  * Requires 1 input and produces 1 output.
6561  * Output shape is same as the input.
6562  */
6563 struct  _CoreML__Specification__SignLayerParams
6564 {
6565   ProtobufCMessage base;
6566 };
6567 #define CORE_ML__SPECIFICATION__SIGN_LAYER_PARAMS__INIT \
6568  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sign_layer_params__descriptor) \
6569      }
6570 
6571 
6572 /*
6573  **
6574  * A layer that performs element-wise clip operation. Clip the values in the
6575  * input tensor to the threshold values [min_value, max_value].
6576  * Requires 1 input and produces 1 output.
6577  * Parameter minVal: the minimum threshold.
6578  * Parameter maxVal: the maximum threshold.
6579  * output =  min(max(input, minVal), maxVal)
6580  * Output shape is same as the input.
6581  */
6582 struct  _CoreML__Specification__ClipLayerParams
6583 {
6584   ProtobufCMessage base;
6585   float minval;
6586   float maxval;
6587 };
6588 #define CORE_ML__SPECIFICATION__CLIP_LAYER_PARAMS__INIT \
6589  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__clip_layer_params__descriptor) \
6590     , 0, 0 }
6591 
6592 
6593 /*
6594  **
6595  * A layer that extracts a slice of size ``(end - begin) / stride``
6596  * from the given input tensor.
6597  * Support negative indexing and negative strides.
6598  * Requires 1 input and produces 1 output.
6599  * Output rank is same as the input rank.
6600  * Value of beginIds, beginMasks, endIds, endMasks, strides are required parameters.
6601  * Lengths of all the parameters must equal the rank of the input.
6602  * i-th element of "beginIds" is ignored and assumed to be 0 if the i-th element of
6603  * "beginMasks" is True
6604  * i-th element of "endIds" is ignored and assumed to be -1 if the i-th element of
6605  * "endMasks" is True
6606  * e.g.:
6607  * if i-th element of "squeezeMasks" is set to True, only beginIds[i] would be sliced
6608  * out, and all other masks and inputs are ignored.
6609  * e.g. (without squeezeMasks):
6610  * input shape:  (5, 5, 5)
6611  * beginIds:  [1, 2, 3]
6612  * beginMasks:  [True, False, True]
6613  * endIds:  [3, -3, 2]
6614  * endMasks:  [False, True, True]
6615  * strides:  [2, 2, 2]
6616  * SqueezeMasks:  [False, False, False]
6617  * output shape:  (2, 2, 3)
6618  * This is equivalent to input[:3:2, 2::2, ::2]
6619  * e.g. (with squeezeMasks):
6620  * input shape:  (5, 5, 5)
6621  * beginIds:  [1, 2, 3]
6622  * beginMasks:  [True, False, True]
6623  * endIds:  [3, -3, 2]
6624  * endMasks:  [False, True, True]
6625  * strides:  [2, 2, 2]
6626  * SqueezeMasks:  [False, True, False]
6627  * output shape:  (2, 3)
6628  * This is equivalent to input[:3:2, 2, ::2]
6629  */
6630 struct  _CoreML__Specification__SliceStaticLayerParams
6631 {
6632   ProtobufCMessage base;
6633   size_t n_beginids;
6634   int64_t *beginids;
6635   size_t n_beginmasks;
6636   protobuf_c_boolean *beginmasks;
6637   size_t n_endids;
6638   int64_t *endids;
6639   size_t n_endmasks;
6640   protobuf_c_boolean *endmasks;
6641   size_t n_strides;
6642   int64_t *strides;
6643   size_t n_squeezemasks;
6644   protobuf_c_boolean *squeezemasks;
6645 };
6646 #define CORE_ML__SPECIFICATION__SLICE_STATIC_LAYER_PARAMS__INIT \
6647  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__slice_static_layer_params__descriptor) \
6648     , 0,NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL }
6649 
6650 
6651 /*
6652  **
6653  * A layer that extracts a slice of size ``(end - begin) / stride``
6654  * from the given input tensor.
6655  * Support negative indexing and negative strides.
6656  * See "SliceStaticLayerParams" for the description and an example of the functionality of the layer.
6657  * Requires 2 to 7 inputs and produces 1 output.
6658  * Rank of the output is same as the rank of the first input unless squeezeMask is set.
6659  * Value of beginIds, beginMasks, endIds, endMasks, strides can be passed in either
6660  * as dynamic inputs or as static parameters.
6661  * Lengths of all the parameters or inputs from 2-6 must equal the rank of the first input.
6662  * The 2nd input represents the "beginIds".
6663  * The 3rd input, if present, corresponds to "endIds". In this case the value of the "endIds" parameter is ignored.
6664  * The 4th input, if present, corresponds to "strides". In this case the value of the "strides" parameter is ignored.
6665  * The 5th input, if present, corresponds to "beginMasks". In this case the value of the "beginMasks" parameter is ignored.
6666  * The 6th input, if present, corresponds to "endMasks". In this case the value of the "endMasks" parameter is ignored.
6667  * The 7th input, if present, corresponds to "squeezeMasks". In this case the value of the "squeezeMasks" parameter is ignored.
6668  */
6669 struct  _CoreML__Specification__SliceDynamicLayerParams
6670 {
6671   ProtobufCMessage base;
6672   size_t n_beginmasks;
6673   protobuf_c_boolean *beginmasks;
6674   size_t n_endids;
6675   int64_t *endids;
6676   size_t n_endmasks;
6677   protobuf_c_boolean *endmasks;
6678   size_t n_strides;
6679   int64_t *strides;
6680   size_t n_squeezemasks;
6681   protobuf_c_boolean *squeezemasks;
6682 };
6683 #define CORE_ML__SPECIFICATION__SLICE_DYNAMIC_LAYER_PARAMS__INIT \
6684  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__slice_dynamic_layer_params__descriptor) \
6685     , 0,NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL }
6686 
6687 
6688 /*
6689  **
6690  * A layer that constructs a tensor by repeating the input tensor multiple
6691  * number of times.
6692  * Requires 1 or 2 inputs and produces 1 output.
6693  * Output rank is same as the input rank.
6694  * If two inputs are provided, second input is used as "reps"
6695  * and "reps" parameter is ignored.
6696  * If only one input is provided,
6697  * length of the "reps" parameter must be at least 1 and
6698  * not greater than the rank of the input.
6699  * If it is less than the input rank, it is made equal to the input rank by prepending 1's to it.
6700  * e.g.:
6701  * input shape = (2, 4, 2)
6702  * reps = (1, 2, 6)
6703  * output shape = (2, 8, 12)
6704  * input shape = (2, 4, 2)
6705  * reps = (6)
6706  * reps after prepending ones = (1, 1, 6)
6707  * output shape = (2, 4, 12)
6708  * input shape = (2, 4, 2)
6709  * second input = [1, 2, 6] -> shape: (3,)
6710  * reps = N/A [Ignored]
6711  * output shape = (2, 8, 12)
6712  */
6713 struct  _CoreML__Specification__TileLayerParams
6714 {
6715   ProtobufCMessage base;
6716   size_t n_reps;
6717   uint64_t *reps;
6718 };
6719 #define CORE_ML__SPECIFICATION__TILE_LAYER_PARAMS__INIT \
6720  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__tile_layer_params__descriptor) \
6721     , 0,NULL }
6722 
6723 
6724 /*
6725  **
6726  * A layer that returns the shape of an input tensor.
6727  * Requires 1 input and produces 1 output.
6728  * Input: a tensor.
6729  * Output: a vector of length R, where R is the rank of the input tensor
6730  * Output is always a rank 1 tensor.
6731  */
6732 struct  _CoreML__Specification__GetShapeLayerParams
6733 {
6734   ProtobufCMessage base;
6735 };
6736 #define CORE_ML__SPECIFICATION__GET_SHAPE_LAYER_PARAMS__INIT \
6737  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__get_shape_layer_params__descriptor) \
6738      }
6739 
6740 
6741 /*
6742  **
6743  * A layer that computes the Gauss error function,
6744  * which is defined as:
6745  * .. math::
6746  *     f(x) = \dfrac{1}{\sqrt{\pi}}\int_{-x}^{x}{e^{-t^2}dt}
6747  * Requires 1 input and produces 1 output.
6748  * Output shape is same as the input.
6749  */
6750 struct  _CoreML__Specification__ErfLayerParams
6751 {
6752   ProtobufCMessage base;
6753 };
6754 #define CORE_ML__SPECIFICATION__ERF_LAYER_PARAMS__INIT \
6755  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__erf_layer_params__descriptor) \
6756      }
6757 
6758 
6759 /*
6760  **
6761  * A layer that evaluates the Gaussian Error Linear Unit (GELU) activation.
6762  * Following equations are used to compute the activation based on the value of the "mode" parameter:
6763  * mode == 'EXACT':
6764  * .. math::
6765  *     f(x) = 0.5x\left ( 1+\rm{erf}\left ( \frac{x}{\sqrt{2}} \right ) \right )
6766  * mode == 'TANH_APPROXIMATION':
6767  * .. math::
6768  *     f(x) = 0.5x\left ( 1+\rm{tanh}\left ( \sqrt{2/\pi}\left ( x + 0.044715x^3 \right ) \right ) \right )
6769  * mode == 'SIGMOID_APPROXIMATION':
6770  * .. math::
6771  *     f(x) = x*\rm{sigmoid}(1.702x)
6772  * Requires 1 input and produces 1 output.
6773  * Output shape is same as the input.
6774  */
6775 struct  _CoreML__Specification__GeluLayerParams
6776 {
6777   ProtobufCMessage base;
6778   /*
6779    * / mode of GELU operation.
6780    */
6781   CoreML__Specification__GeluLayerParams__GeluMode mode;
6782 };
6783 #define CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__INIT \
6784  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__gelu_layer_params__descriptor) \
6785     , CORE_ML__SPECIFICATION__GELU_LAYER_PARAMS__GELU_MODE__EXACT }
6786 
6787 
6788 /*
6789  **
6790  * RangeStatic layer that returns a tensor that contains evenly spaced values.
6791  * It is similar in functionality to the numpy.arange method.
6792  * Requires no input and produces 1 output.
6793  * Output is a rank 1 tensor.
6794  */
6795 struct  _CoreML__Specification__RangeStaticLayerParams
6796 {
6797   ProtobufCMessage base;
6798   float endvalue;
6799   float startvalue;
6800   float stepsizevalue;
6801 };
6802 #define CORE_ML__SPECIFICATION__RANGE_STATIC_LAYER_PARAMS__INIT \
6803  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__range_static_layer_params__descriptor) \
6804     , 0, 0, 0 }
6805 
6806 
6807 /*
6808  **
6809  * A layer that returns a tensor that contains evenly spaced values.
6810  * Its functionality is similar to the numpy.arange method.
6811  * Requires at least 1 input, up to a maximum of 3 inputs.
6812  * Produces 1 output, which is a rank 1 tensor.
6813  * Each input must be a scalar, or rank 1 and shape (1,).
6814  * The first input represents the "endValue".
6815  * The second input, if present, corresponds to "startValue". In this case the value of the "startValue" parameter is ignored.
6816  * The third input, if present, corresponds to "stepSizeValue". In this case the value of the "stepSizeValue" parameter is ignored.
6817  */
6818 struct  _CoreML__Specification__RangeDynamicLayerParams
6819 {
6820   ProtobufCMessage base;
6821   float startvalue;
6822   float stepsizevalue;
6823 };
6824 #define CORE_ML__SPECIFICATION__RANGE_DYNAMIC_LAYER_PARAMS__INIT \
6825  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__range_dynamic_layer_params__descriptor) \
6826     , 0, 0 }
6827 
6828 
6829 /*
6830  **
6831  * A layer that returns a tensor containing all windows of size ``windowSize``
6832  * separated by ``step`` along the dimension ``axis``.
6833  * .. code::
6834  *      y = SlidingWindows(x)
6835  * Requires 1 input and produces 1 output.
6836  * Input
6837  *     An N-Dimensional tensor.
6838  * Output
6839  *     An (N+1)-Dimensional tensor.
6840  * This operation behaves as following:
6841  *      - if axis = 0 & input is rank 1 (L,). Output shape will be (M, W).
6842  *      - if axis = 1 & input is rank 3 (B1, L, C1). Output shape will be (B1, M, W, C1)
6843  *      - if axis = 2 & input is rank 5 (B1, B2, L, C1, C2) --> (B1 * B2, L, C1 * C2) --> (B1 * B2, M, W, C1 * C2). Output shape will be (B1, B2, M, W, C1, C2)
6844  *      - etc.
6845  * where
6846  *      - L, C, B refer to input length, feature dimension length & batch size respectively
6847  *      - W is the window size.
6848  *      - M is the number of windows/slices calculated as M = (L - W) / step + 1
6849  */
6850 struct  _CoreML__Specification__SlidingWindowsLayerParams
6851 {
6852   ProtobufCMessage base;
6853   int64_t axis;
6854   uint64_t windowsize;
6855   uint64_t step;
6856 };
6857 #define CORE_ML__SPECIFICATION__SLIDING_WINDOWS_LAYER_PARAMS__INIT \
6858  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sliding_windows_layer_params__descriptor) \
6859     , 0, 0, 0 }
6860 
6861 
6862 /*
6863  **
6864  * A layer that applies layer normalization over the input tensor.
6865  * Requires 1 input and produces 1 output.
6866  * output = gamma * (input - computed_mean) / (sqrt(computed_variance + eps)) + beta
6867  * Parameters
6868  *     normalizedShape: subset of the input shape, along with layer norm is performed, rest of the input shape is treated as the batch dimension. The mean and variance are computed for the input, over the last few dimensions as specified by the normalizedShape parameter.
6869  *     gamma: must have shape = "normalizedShape"
6870  *     beta: must have shape = "normalizedShape"
6871  *     eps: small constant to avoid division by 0
6872  * Output shape is same as the input.
6873  * e.g.:
6874  * input shape = (10,5)
6875  * normalized shape = (5,) or (10,5)
6876  * input shape = (10,5,6,7)
6877  * normalized shape = (7,) or (6,7) or (5,6,7) or (10,5,6,7)
6878  */
6879 struct  _CoreML__Specification__LayerNormalizationLayerParams
6880 {
6881   ProtobufCMessage base;
6882   size_t n_normalizedshape;
6883   int64_t *normalizedshape;
6884   float eps;
6885   CoreML__Specification__WeightParams *gamma;
6886   CoreML__Specification__WeightParams *beta;
6887 };
6888 #define CORE_ML__SPECIFICATION__LAYER_NORMALIZATION_LAYER_PARAMS__INIT \
6889  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__layer_normalization_layer_params__descriptor) \
6890     , 0,NULL, 0, NULL, NULL }
6891 
6892 
6893 /*
6894  **
6895  * Non maximum suppression (NMS) layer.
6896  * Applies the non maximum suppression algorithm to input bounding box coordinates.
6897  * The effect of this layer is similar to the functionality of the "NonMaximumSuppression"
6898  * model type (for details please see NonMaximumSuppression.proto) with a couple of differences.
6899  * One, this is a layer in a neural network model, whereas that is a different model type. Second,
6900  * this layer supports a batch of bounding boxes.
6901  * The NMS layer requires at least 2 inputs, and up to a maximum of 5 inputs. It produces 4 outputs.
6902  * Following is the description of inputs and outputs:
6903  * input 1, shape (B,N,4): coordinates of N boxes, for a batch size B.
6904  * input 2, shape (B,N,C): class scores for each box. C can be 1 when there is only 1 score per box, i.e., no class specific score.
6905  * input 3, optional, shape (1,): IoU threshold. When present, it overwrites the value provided in layer parameter "iouThreshold".
6906  * input 4, optional, shape (1,): Score threshold. When present, it overwrites the value provided in layer parameter "scoreThreshold".
6907  * input 5, optional, shape (1,): Maximum number of boxes. When present, it overwrites the value provided in layer parameter "maxBoxes".
6908  * output 1, shape (B,maxBoxes,4): box coordinates, corresponding to the surviving boxes.
6909  * output 2, shape (B,maxBoxes,C): box scores, corresponding to the surviving boxes.
6910  * output 3, shape (B,maxBoxes): indices of the surviving boxes. Hence it will have values in the range [0,N-1], except for padding.
6911  * output 4, shape (B,): number of boxes selected after the NMS algorithm, for each batch.
6912  * When surviving boxes are less than "maxBoxes", the first 3 outputs are padded.
6913  * For the first two outputs, the padding is done using values 0, whereas for the third output the
6914  * padding value used is -1, since the output values represent indices.
6915  * If no box survives, that is, all the scores are below the "scoreThreshold",
6916  * then for that batch, number of boxes (value of the fourth output) will be 1. The first 3 outputs will
6917  * correspond to the box with the highest score. This is to avoid generating an "empty" output.
6918  * The four values that describe the box dimensions are (in order):
6919  *  - x (center location of the box along the horizontal axis)
6920  *  - y (center location of the box along the vertical axis)
6921  *  - width (size of box along the horizontal axis)
6922  *  - height (size of box on along the vertical axis)
6923  * In each batch,
6924  * the N scores for N boxes, used for suppression, are generated by taking the max of the matrix (N,C)
6925  * along the columns.
6926  * If "perClassSuppression" flag is false, suppression happens across all classes.
6927  * If "perClassSuppression" flag is true, each box is assigned to the class with the highest
6928  * score and then the suppression happens separately for boxes within the same class.
6929  * Note that the 4th output can be used to dynamically slice the first 3 outputs, in case
6930  * the padded outputs are not required.
6931  */
6932 struct  _CoreML__Specification__NonMaximumSuppressionLayerParams
6933 {
6934   ProtobufCMessage base;
6935   /*
6936    **
6937    * The intersection over union (IoU) threshold over which boxes are suppressed.
6938    */
6939   float iouthreshold;
6940   /*
6941    **
6942    * Before IoU suppression is performed, boxes with class scores below this threshold are rejected.
6943    */
6944   float scorethreshold;
6945   /*
6946    **
6947    * The maximum number of boxes to be given out as output.
6948    * If the number of surviving boxes are less, output is padded up to this number.
6949    */
6950   uint64_t maxboxes;
6951   /*
6952    **
6953    * If true, suppression is performed independently within boxes of each class.
6954    */
6955   protobuf_c_boolean perclasssuppression;
6956 };
6957 #define CORE_ML__SPECIFICATION__NON_MAXIMUM_SUPPRESSION_LAYER_PARAMS__INIT \
6958  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__non_maximum_suppression_layer_params__descriptor) \
6959     , 0, 0, 0, 0 }
6960 
6961 
6962 /*
6963  **
6964  * A layer that performs element-wise clamped ReLU operation.
6965  * Requires 1 input and produces 1 output.
6966  * This function has the following formula:
6967  * .. math::
6968  *     f(x) = \begin{cases}
6969  *               \text{min}(\text{beta},x) \;\; \text{if} \;\; x \geq 0\\
6970  *               \text{min}(\text{beta} ,\text{alpha}\cdot x) \;\; \text{if} \;\; x<0
6971  *            \end{cases}
6972  * Output shape is same as the input.
6973  * Available (iOS >= 14, macOS >= 11.0, watchOS >= 7)
6974  */
6975 struct  _CoreML__Specification__ClampedReLULayerParams
6976 {
6977   ProtobufCMessage base;
6978   float alpha;
6979   float beta;
6980 };
6981 #define CORE_ML__SPECIFICATION__CLAMPED_RE_LULAYER_PARAMS__INIT \
6982  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__clamped_re_lulayer_params__descriptor) \
6983     , 0, 0 }
6984 
6985 
6986 /*
6987  **
6988  * A layer that returns the indices that would sort the input tensor, along a specified axis.
6989  * Requires 1 input and produces 1 output.
6990  * Output has the same rank and shape as the input.
6991  * Value of "axis" must be positive and less than the rank of the input.
6992  * e.g.:
6993  * input shape = (5,)
6994  * axis = 0
6995  * input values = [3.1, 5.4, 32.9, 3.2, 77.0]
6996  * output shape = (5,)
6997  * output values = [0, 3, 1, 2, 4], descending = False
6998  * output values = [4, 2, 1, 3, 0], descending = True
6999  * input shape = (2,3)
7000  * axis = 1
7001  * input values = [[3, 5, 32], [3, 77, 6]]
7002  * output shape = (2,3)
7003  * output values = [[0, 1, 2], [0, 2, 1]], descending = False
7004  * output values = [[2, 1, 0], [1, 2, 0]], descending = True
7005  */
7006 struct  _CoreML__Specification__ArgSortLayerParams
7007 {
7008   ProtobufCMessage base;
7009   /*
7010    * / must be between [0, input_rank - 1]
7011    */
7012   int64_t axis;
7013   protobuf_c_boolean descending;
7014 };
7015 #define CORE_ML__SPECIFICATION__ARG_SORT_LAYER_PARAMS__INIT \
7016  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__arg_sort_layer_params__descriptor) \
7017     , 0, 0 }
7018 
7019 
7020 /*
7021  **
7022  * A layer that does slice operation by providing size to be extracted
7023  * from the given input tensor.
7024  * Requires 2 inputs and produces 1 output.
7025  * Rank of the output is same as the rank of the first input.
7026  * The 1st input represents the tensor to be sliced.
7027  * The 2nd input represents the beginning index to be sliced from.
7028  * Example:
7029  * Input 1: x (x.shape = (2, 3, 4))
7030  * Input 2: begin
7031  * size: 2
7032  * axis: 1
7033  * Output: x[:, begin:begin+2, :]
7034  */
7035 struct  _CoreML__Specification__SliceBySizeLayerParams
7036 {
7037   ProtobufCMessage base;
7038   int64_t size;
7039   int64_t axis;
7040 };
7041 #define CORE_ML__SPECIFICATION__SLICE_BY_SIZE_LAYER_PARAMS__INIT \
7042  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__slice_by_size_layer_params__descriptor) \
7043     , 0, 0 }
7044 
7045 
7046 typedef enum {
7047   CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__CLASS_LABELS__NOT_SET = 0,
7048   CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__CLASS_LABELS_STRING_CLASS_LABELS = 100,
7049   CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__CLASS_LABELS_INT64_CLASS_LABELS = 101
7050     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__CLASS_LABELS)
7051 } CoreML__Specification__NeuralNetworkClassifier__ClassLabelsCase;
7052 
7053 /*
7054  **
7055  * A neural network specialized as a classifier.
7056  */
7057 struct  _CoreML__Specification__NeuralNetworkClassifier
7058 {
7059   ProtobufCMessage base;
7060   size_t n_layers;
7061   CoreML__Specification__NeuralNetworkLayer **layers;
7062   size_t n_preprocessing;
7063   CoreML__Specification__NeuralNetworkPreprocessing **preprocessing;
7064   /*
7065    * use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs
7066    */
7067   CoreML__Specification__NeuralNetworkMultiArrayShapeMapping arrayinputshapemapping;
7068   /*
7069    * use this enum value to determine the input tensor shapes to the neural network, for image inputs
7070    */
7071   CoreML__Specification__NeuralNetworkImageShapeMapping imageinputshapemapping;
7072   CoreML__Specification__NetworkUpdateParameters *updateparams;
7073   /*
7074    * The name of the output blob containing the probability of each class.
7075    * In other words, the score vector. Must be a 1-D tensor with the same
7076    * number and order of elements as ClassLabels.
7077    */
7078   char *labelprobabilitylayername;
7079   CoreML__Specification__NeuralNetworkClassifier__ClassLabelsCase class_labels_case;
7080   union {
7081     CoreML__Specification__StringVector *stringclasslabels;
7082     CoreML__Specification__Int64Vector *int64classlabels;
7083   };
7084 };
7085 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__INIT \
7086  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_classifier__descriptor) \
7087     , 0,NULL, 0,NULL, CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING__RANK5_ARRAY_MAPPING, CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING__RANK5_IMAGE_MAPPING, NULL, (char *)protobuf_c_empty_string, CORE_ML__SPECIFICATION__NEURAL_NETWORK_CLASSIFIER__CLASS_LABELS__NOT_SET, {0} }
7088 
7089 
7090 struct  _CoreML__Specification__OneHotLayerParams
7091 {
7092   ProtobufCMessage base;
7093   /*
7094    * / size of the one hot vector
7095    */
7096   uint64_t onehotvectorsize;
7097   /*
7098    * /  negative indexing is supported. It refers to the axis in the output tensor.
7099    */
7100   int64_t axis;
7101   float onvalue;
7102   float offvalue;
7103 };
7104 #define CORE_ML__SPECIFICATION__ONE_HOT_LAYER_PARAMS__INIT \
7105  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__one_hot_layer_params__descriptor) \
7106     , 0, 0, 0, 0 }
7107 
7108 
7109 struct  _CoreML__Specification__CumSumLayerParams
7110 {
7111   ProtobufCMessage base;
7112   /*
7113    * /  negative indexing is supported
7114    */
7115   int64_t axis;
7116   /*
7117    * / if true, the first element of the output is 0, and the last element contains the sum of the input up to the penultimate value
7118    * / if false, the first element of the output is same as the input and the last element is the sum of all the input values
7119    * / (this behavior is reversed when "reverse" flag is True)
7120    */
7121   protobuf_c_boolean excludefinalsum;
7122   /*
7123    * / if true, cumsum is performed in the opposite direction
7124    */
7125   protobuf_c_boolean reverse;
7126 };
7127 #define CORE_ML__SPECIFICATION__CUM_SUM_LAYER_PARAMS__INIT \
7128  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__cum_sum_layer_params__descriptor) \
7129     , 0, 0, 0 }
7130 
7131 
7132 /*
7133  **
7134  * A neural network specialized as a regressor.
7135  */
7136 struct  _CoreML__Specification__NeuralNetworkRegressor
7137 {
7138   ProtobufCMessage base;
7139   size_t n_layers;
7140   CoreML__Specification__NeuralNetworkLayer **layers;
7141   size_t n_preprocessing;
7142   CoreML__Specification__NeuralNetworkPreprocessing **preprocessing;
7143   /*
7144    * use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs
7145    */
7146   CoreML__Specification__NeuralNetworkMultiArrayShapeMapping arrayinputshapemapping;
7147   /*
7148    * use this enum value to determine the input tensor shapes to the neural network, for image inputs
7149    */
7150   CoreML__Specification__NeuralNetworkImageShapeMapping imageinputshapemapping;
7151   CoreML__Specification__NetworkUpdateParameters *updateparams;
7152 };
7153 #define CORE_ML__SPECIFICATION__NEURAL_NETWORK_REGRESSOR__INIT \
7154  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__neural_network_regressor__descriptor) \
7155     , 0,NULL, 0,NULL, CORE_ML__SPECIFICATION__NEURAL_NETWORK_MULTI_ARRAY_SHAPE_MAPPING__RANK5_ARRAY_MAPPING, CORE_ML__SPECIFICATION__NEURAL_NETWORK_IMAGE_SHAPE_MAPPING__RANK5_IMAGE_MAPPING, NULL }
7156 
7157 
7158 /*
7159  **
7160  * Details on how the network will be updated
7161  */
7162 struct  _CoreML__Specification__NetworkUpdateParameters
7163 {
7164   ProtobufCMessage base;
7165   size_t n_losslayers;
7166   CoreML__Specification__LossLayer **losslayers;
7167   CoreML__Specification__Optimizer *optimizer;
7168   CoreML__Specification__Int64Parameter *epochs;
7169   /*
7170    **
7171    * Describes whether to shuffle the batch of data between epochs.
7172    */
7173   CoreML__Specification__BoolParameter *shuffle;
7174   /*
7175    **
7176    * The seed to be used in an associated random number generator.
7177    */
7178   CoreML__Specification__Int64Parameter *seed;
7179 };
7180 #define CORE_ML__SPECIFICATION__NETWORK_UPDATE_PARAMETERS__INIT \
7181  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__network_update_parameters__descriptor) \
7182     , 0,NULL, NULL, NULL, NULL, NULL }
7183 
7184 
7185 typedef enum {
7186   CORE_ML__SPECIFICATION__LOSS_LAYER__LOSS_LAYER_TYPE__NOT_SET = 0,
7187   CORE_ML__SPECIFICATION__LOSS_LAYER__LOSS_LAYER_TYPE_CATEGORICAL_CROSS_ENTROPY_LOSS_LAYER = 10,
7188   CORE_ML__SPECIFICATION__LOSS_LAYER__LOSS_LAYER_TYPE_MEAN_SQUARED_ERROR_LOSS_LAYER = 11
7189     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__LOSS_LAYER__LOSS_LAYER_TYPE)
7190 } CoreML__Specification__LossLayer__LossLayerTypeCase;
7191 
7192 /*
7193  **
7194  * Loss layer - categorical cross entropy and mean squared error are the only supported loss functions currently
7195  */
7196 struct  _CoreML__Specification__LossLayer
7197 {
7198   ProtobufCMessage base;
7199   char *name;
7200   CoreML__Specification__LossLayer__LossLayerTypeCase loss_layer_type_case;
7201   union {
7202     CoreML__Specification__CategoricalCrossEntropyLossLayer *categoricalcrossentropylosslayer;
7203     CoreML__Specification__MeanSquaredErrorLossLayer *meansquarederrorlosslayer;
7204   };
7205 };
7206 #define CORE_ML__SPECIFICATION__LOSS_LAYER__INIT \
7207  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__loss_layer__descriptor) \
7208     , (char *)protobuf_c_empty_string, CORE_ML__SPECIFICATION__LOSS_LAYER__LOSS_LAYER_TYPE__NOT_SET, {0} }
7209 
7210 
7211 /*
7212  **
7213  * Categorical cross entropy loss layer
7214  * Categorical cross entropy is used for single label categorization (only one category is applicable for each data point).
7215  * The input is a vector of length N representing the distribution over N categories.  It must be the output of a softmax.
7216  * The target is a single value representing the true category or class label. If the target is the predictedFeatureName of a neural network classifier it will be inverse mapped to the corresponding categorical index for you.
7217  * math:
7218  * Loss_{CCE}(input, target) = -\sum_{i=1}^{N} (target == i) log( input[i] ) = - log (input[target])
7219  */
7220 struct  _CoreML__Specification__CategoricalCrossEntropyLossLayer
7221 {
7222   ProtobufCMessage base;
7223   char *input;
7224   char *target;
7225 };
7226 #define CORE_ML__SPECIFICATION__CATEGORICAL_CROSS_ENTROPY_LOSS_LAYER__INIT \
7227  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__categorical_cross_entropy_loss_layer__descriptor) \
7228     , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string }
7229 
7230 
7231 /*
7232  **
7233  * Mean squared error loss layer,
7234  * specifying input and target
7235  */
7236 struct  _CoreML__Specification__MeanSquaredErrorLossLayer
7237 {
7238   ProtobufCMessage base;
7239   char *input;
7240   char *target;
7241 };
7242 #define CORE_ML__SPECIFICATION__MEAN_SQUARED_ERROR_LOSS_LAYER__INIT \
7243  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__mean_squared_error_loss_layer__descriptor) \
7244     , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string }
7245 
7246 
7247 typedef enum {
7248   CORE_ML__SPECIFICATION__OPTIMIZER__OPTIMIZER_TYPE__NOT_SET = 0,
7249   CORE_ML__SPECIFICATION__OPTIMIZER__OPTIMIZER_TYPE_SGD_OPTIMIZER = 10,
7250   CORE_ML__SPECIFICATION__OPTIMIZER__OPTIMIZER_TYPE_ADAM_OPTIMIZER = 11
7251     PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(CORE_ML__SPECIFICATION__OPTIMIZER__OPTIMIZER_TYPE)
7252 } CoreML__Specification__Optimizer__OptimizerTypeCase;
7253 
7254 /*
7255  **
7256  * Optimizer - stochastic gradient descent and adam are the only supported optimizers currently
7257  */
7258 struct  _CoreML__Specification__Optimizer
7259 {
7260   ProtobufCMessage base;
7261   CoreML__Specification__Optimizer__OptimizerTypeCase optimizer_type_case;
7262   union {
7263     CoreML__Specification__SGDOptimizer *sgdoptimizer;
7264     CoreML__Specification__AdamOptimizer *adamoptimizer;
7265   };
7266 };
7267 #define CORE_ML__SPECIFICATION__OPTIMIZER__INIT \
7268  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__optimizer__descriptor) \
7269     , CORE_ML__SPECIFICATION__OPTIMIZER__OPTIMIZER_TYPE__NOT_SET, {0} }
7270 
7271 
7272 /*
7273  **
7274  * Stochastic gradient descent optimizer,
7275  * specifying configurable learning rate, mini batch size, and momentum
7276  */
7277 struct  _CoreML__Specification__SGDOptimizer
7278 {
7279   ProtobufCMessage base;
7280   CoreML__Specification__DoubleParameter *learningrate;
7281   CoreML__Specification__Int64Parameter *minibatchsize;
7282   CoreML__Specification__DoubleParameter *momentum;
7283 };
7284 #define CORE_ML__SPECIFICATION__SGDOPTIMIZER__INIT \
7285  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__sgdoptimizer__descriptor) \
7286     , NULL, NULL, NULL }
7287 
7288 
7289 /*
7290  **
7291  * Adam optimizer,
7292  * specifying configurable learning rate, mini batch size, betas, and eps
7293  */
7294 struct  _CoreML__Specification__AdamOptimizer
7295 {
7296   ProtobufCMessage base;
7297   CoreML__Specification__DoubleParameter *learningrate;
7298   CoreML__Specification__Int64Parameter *minibatchsize;
7299   CoreML__Specification__DoubleParameter *beta1;
7300   CoreML__Specification__DoubleParameter *beta2;
7301   CoreML__Specification__DoubleParameter *eps;
7302 };
7303 #define CORE_ML__SPECIFICATION__ADAM_OPTIMIZER__INIT \
7304  { PROTOBUF_C_MESSAGE_INIT (&core_ml__specification__adam_optimizer__descriptor) \
7305     , NULL, NULL, NULL, NULL, NULL }
7306 
7307 
7308 /* CoreML__Specification__NeuralNetwork methods */
7309 void   core_ml__specification__neural_network__init
7310                      (CoreML__Specification__NeuralNetwork         *message);
7311 size_t core_ml__specification__neural_network__get_packed_size
7312                      (const CoreML__Specification__NeuralNetwork   *message);
7313 size_t core_ml__specification__neural_network__pack
7314                      (const CoreML__Specification__NeuralNetwork   *message,
7315                       uint8_t             *out);
7316 size_t core_ml__specification__neural_network__pack_to_buffer
7317                      (const CoreML__Specification__NeuralNetwork   *message,
7318                       ProtobufCBuffer     *buffer);
7319 CoreML__Specification__NeuralNetwork *
7320        core_ml__specification__neural_network__unpack
7321                      (ProtobufCAllocator  *allocator,
7322                       size_t               len,
7323                       const uint8_t       *data);
7324 void   core_ml__specification__neural_network__free_unpacked
7325                      (CoreML__Specification__NeuralNetwork *message,
7326                       ProtobufCAllocator *allocator);
7327 /* CoreML__Specification__NeuralNetworkImageScaler methods */
7328 void   core_ml__specification__neural_network_image_scaler__init
7329                      (CoreML__Specification__NeuralNetworkImageScaler         *message);
7330 size_t core_ml__specification__neural_network_image_scaler__get_packed_size
7331                      (const CoreML__Specification__NeuralNetworkImageScaler   *message);
7332 size_t core_ml__specification__neural_network_image_scaler__pack
7333                      (const CoreML__Specification__NeuralNetworkImageScaler   *message,
7334                       uint8_t             *out);
7335 size_t core_ml__specification__neural_network_image_scaler__pack_to_buffer
7336                      (const CoreML__Specification__NeuralNetworkImageScaler   *message,
7337                       ProtobufCBuffer     *buffer);
7338 CoreML__Specification__NeuralNetworkImageScaler *
7339        core_ml__specification__neural_network_image_scaler__unpack
7340                      (ProtobufCAllocator  *allocator,
7341                       size_t               len,
7342                       const uint8_t       *data);
7343 void   core_ml__specification__neural_network_image_scaler__free_unpacked
7344                      (CoreML__Specification__NeuralNetworkImageScaler *message,
7345                       ProtobufCAllocator *allocator);
7346 /* CoreML__Specification__NeuralNetworkMeanImage methods */
7347 void   core_ml__specification__neural_network_mean_image__init
7348                      (CoreML__Specification__NeuralNetworkMeanImage         *message);
7349 size_t core_ml__specification__neural_network_mean_image__get_packed_size
7350                      (const CoreML__Specification__NeuralNetworkMeanImage   *message);
7351 size_t core_ml__specification__neural_network_mean_image__pack
7352                      (const CoreML__Specification__NeuralNetworkMeanImage   *message,
7353                       uint8_t             *out);
7354 size_t core_ml__specification__neural_network_mean_image__pack_to_buffer
7355                      (const CoreML__Specification__NeuralNetworkMeanImage   *message,
7356                       ProtobufCBuffer     *buffer);
7357 CoreML__Specification__NeuralNetworkMeanImage *
7358        core_ml__specification__neural_network_mean_image__unpack
7359                      (ProtobufCAllocator  *allocator,
7360                       size_t               len,
7361                       const uint8_t       *data);
7362 void   core_ml__specification__neural_network_mean_image__free_unpacked
7363                      (CoreML__Specification__NeuralNetworkMeanImage *message,
7364                       ProtobufCAllocator *allocator);
7365 /* CoreML__Specification__NeuralNetworkPreprocessing methods */
7366 void   core_ml__specification__neural_network_preprocessing__init
7367                      (CoreML__Specification__NeuralNetworkPreprocessing         *message);
7368 size_t core_ml__specification__neural_network_preprocessing__get_packed_size
7369                      (const CoreML__Specification__NeuralNetworkPreprocessing   *message);
7370 size_t core_ml__specification__neural_network_preprocessing__pack
7371                      (const CoreML__Specification__NeuralNetworkPreprocessing   *message,
7372                       uint8_t             *out);
7373 size_t core_ml__specification__neural_network_preprocessing__pack_to_buffer
7374                      (const CoreML__Specification__NeuralNetworkPreprocessing   *message,
7375                       ProtobufCBuffer     *buffer);
7376 CoreML__Specification__NeuralNetworkPreprocessing *
7377        core_ml__specification__neural_network_preprocessing__unpack
7378                      (ProtobufCAllocator  *allocator,
7379                       size_t               len,
7380                       const uint8_t       *data);
7381 void   core_ml__specification__neural_network_preprocessing__free_unpacked
7382                      (CoreML__Specification__NeuralNetworkPreprocessing *message,
7383                       ProtobufCAllocator *allocator);
7384 /* CoreML__Specification__ActivationReLU methods */
7385 void   core_ml__specification__activation_re_lu__init
7386                      (CoreML__Specification__ActivationReLU         *message);
7387 size_t core_ml__specification__activation_re_lu__get_packed_size
7388                      (const CoreML__Specification__ActivationReLU   *message);
7389 size_t core_ml__specification__activation_re_lu__pack
7390                      (const CoreML__Specification__ActivationReLU   *message,
7391                       uint8_t             *out);
7392 size_t core_ml__specification__activation_re_lu__pack_to_buffer
7393                      (const CoreML__Specification__ActivationReLU   *message,
7394                       ProtobufCBuffer     *buffer);
7395 CoreML__Specification__ActivationReLU *
7396        core_ml__specification__activation_re_lu__unpack
7397                      (ProtobufCAllocator  *allocator,
7398                       size_t               len,
7399                       const uint8_t       *data);
7400 void   core_ml__specification__activation_re_lu__free_unpacked
7401                      (CoreML__Specification__ActivationReLU *message,
7402                       ProtobufCAllocator *allocator);
7403 /* CoreML__Specification__ActivationLeakyReLU methods */
7404 void   core_ml__specification__activation_leaky_re_lu__init
7405                      (CoreML__Specification__ActivationLeakyReLU         *message);
7406 size_t core_ml__specification__activation_leaky_re_lu__get_packed_size
7407                      (const CoreML__Specification__ActivationLeakyReLU   *message);
7408 size_t core_ml__specification__activation_leaky_re_lu__pack
7409                      (const CoreML__Specification__ActivationLeakyReLU   *message,
7410                       uint8_t             *out);
7411 size_t core_ml__specification__activation_leaky_re_lu__pack_to_buffer
7412                      (const CoreML__Specification__ActivationLeakyReLU   *message,
7413                       ProtobufCBuffer     *buffer);
7414 CoreML__Specification__ActivationLeakyReLU *
7415        core_ml__specification__activation_leaky_re_lu__unpack
7416                      (ProtobufCAllocator  *allocator,
7417                       size_t               len,
7418                       const uint8_t       *data);
7419 void   core_ml__specification__activation_leaky_re_lu__free_unpacked
7420                      (CoreML__Specification__ActivationLeakyReLU *message,
7421                       ProtobufCAllocator *allocator);
7422 /* CoreML__Specification__ActivationTanh methods */
7423 void   core_ml__specification__activation_tanh__init
7424                      (CoreML__Specification__ActivationTanh         *message);
7425 size_t core_ml__specification__activation_tanh__get_packed_size
7426                      (const CoreML__Specification__ActivationTanh   *message);
7427 size_t core_ml__specification__activation_tanh__pack
7428                      (const CoreML__Specification__ActivationTanh   *message,
7429                       uint8_t             *out);
7430 size_t core_ml__specification__activation_tanh__pack_to_buffer
7431                      (const CoreML__Specification__ActivationTanh   *message,
7432                       ProtobufCBuffer     *buffer);
7433 CoreML__Specification__ActivationTanh *
7434        core_ml__specification__activation_tanh__unpack
7435                      (ProtobufCAllocator  *allocator,
7436                       size_t               len,
7437                       const uint8_t       *data);
7438 void   core_ml__specification__activation_tanh__free_unpacked
7439                      (CoreML__Specification__ActivationTanh *message,
7440                       ProtobufCAllocator *allocator);
7441 /* CoreML__Specification__ActivationScaledTanh methods */
7442 void   core_ml__specification__activation_scaled_tanh__init
7443                      (CoreML__Specification__ActivationScaledTanh         *message);
7444 size_t core_ml__specification__activation_scaled_tanh__get_packed_size
7445                      (const CoreML__Specification__ActivationScaledTanh   *message);
7446 size_t core_ml__specification__activation_scaled_tanh__pack
7447                      (const CoreML__Specification__ActivationScaledTanh   *message,
7448                       uint8_t             *out);
7449 size_t core_ml__specification__activation_scaled_tanh__pack_to_buffer
7450                      (const CoreML__Specification__ActivationScaledTanh   *message,
7451                       ProtobufCBuffer     *buffer);
7452 CoreML__Specification__ActivationScaledTanh *
7453        core_ml__specification__activation_scaled_tanh__unpack
7454                      (ProtobufCAllocator  *allocator,
7455                       size_t               len,
7456                       const uint8_t       *data);
7457 void   core_ml__specification__activation_scaled_tanh__free_unpacked
7458                      (CoreML__Specification__ActivationScaledTanh *message,
7459                       ProtobufCAllocator *allocator);
7460 /* CoreML__Specification__ActivationSigmoid methods */
7461 void   core_ml__specification__activation_sigmoid__init
7462                      (CoreML__Specification__ActivationSigmoid         *message);
7463 size_t core_ml__specification__activation_sigmoid__get_packed_size
7464                      (const CoreML__Specification__ActivationSigmoid   *message);
7465 size_t core_ml__specification__activation_sigmoid__pack
7466                      (const CoreML__Specification__ActivationSigmoid   *message,
7467                       uint8_t             *out);
7468 size_t core_ml__specification__activation_sigmoid__pack_to_buffer
7469                      (const CoreML__Specification__ActivationSigmoid   *message,
7470                       ProtobufCBuffer     *buffer);
7471 CoreML__Specification__ActivationSigmoid *
7472        core_ml__specification__activation_sigmoid__unpack
7473                      (ProtobufCAllocator  *allocator,
7474                       size_t               len,
7475                       const uint8_t       *data);
7476 void   core_ml__specification__activation_sigmoid__free_unpacked
7477                      (CoreML__Specification__ActivationSigmoid *message,
7478                       ProtobufCAllocator *allocator);
7479 /* CoreML__Specification__ActivationLinear methods */
7480 void   core_ml__specification__activation_linear__init
7481                      (CoreML__Specification__ActivationLinear         *message);
7482 size_t core_ml__specification__activation_linear__get_packed_size
7483                      (const CoreML__Specification__ActivationLinear   *message);
7484 size_t core_ml__specification__activation_linear__pack
7485                      (const CoreML__Specification__ActivationLinear   *message,
7486                       uint8_t             *out);
7487 size_t core_ml__specification__activation_linear__pack_to_buffer
7488                      (const CoreML__Specification__ActivationLinear   *message,
7489                       ProtobufCBuffer     *buffer);
7490 CoreML__Specification__ActivationLinear *
7491        core_ml__specification__activation_linear__unpack
7492                      (ProtobufCAllocator  *allocator,
7493                       size_t               len,
7494                       const uint8_t       *data);
7495 void   core_ml__specification__activation_linear__free_unpacked
7496                      (CoreML__Specification__ActivationLinear *message,
7497                       ProtobufCAllocator *allocator);
7498 /* CoreML__Specification__ActivationSigmoidHard methods */
7499 void   core_ml__specification__activation_sigmoid_hard__init
7500                      (CoreML__Specification__ActivationSigmoidHard         *message);
7501 size_t core_ml__specification__activation_sigmoid_hard__get_packed_size
7502                      (const CoreML__Specification__ActivationSigmoidHard   *message);
7503 size_t core_ml__specification__activation_sigmoid_hard__pack
7504                      (const CoreML__Specification__ActivationSigmoidHard   *message,
7505                       uint8_t             *out);
7506 size_t core_ml__specification__activation_sigmoid_hard__pack_to_buffer
7507                      (const CoreML__Specification__ActivationSigmoidHard   *message,
7508                       ProtobufCBuffer     *buffer);
7509 CoreML__Specification__ActivationSigmoidHard *
7510        core_ml__specification__activation_sigmoid_hard__unpack
7511                      (ProtobufCAllocator  *allocator,
7512                       size_t               len,
7513                       const uint8_t       *data);
7514 void   core_ml__specification__activation_sigmoid_hard__free_unpacked
7515                      (CoreML__Specification__ActivationSigmoidHard *message,
7516                       ProtobufCAllocator *allocator);
7517 /* CoreML__Specification__ActivationPReLU methods */
7518 void   core_ml__specification__activation_pre_lu__init
7519                      (CoreML__Specification__ActivationPReLU         *message);
7520 size_t core_ml__specification__activation_pre_lu__get_packed_size
7521                      (const CoreML__Specification__ActivationPReLU   *message);
7522 size_t core_ml__specification__activation_pre_lu__pack
7523                      (const CoreML__Specification__ActivationPReLU   *message,
7524                       uint8_t             *out);
7525 size_t core_ml__specification__activation_pre_lu__pack_to_buffer
7526                      (const CoreML__Specification__ActivationPReLU   *message,
7527                       ProtobufCBuffer     *buffer);
7528 CoreML__Specification__ActivationPReLU *
7529        core_ml__specification__activation_pre_lu__unpack
7530                      (ProtobufCAllocator  *allocator,
7531                       size_t               len,
7532                       const uint8_t       *data);
7533 void   core_ml__specification__activation_pre_lu__free_unpacked
7534                      (CoreML__Specification__ActivationPReLU *message,
7535                       ProtobufCAllocator *allocator);
7536 /* CoreML__Specification__ActivationELU methods */
7537 void   core_ml__specification__activation_elu__init
7538                      (CoreML__Specification__ActivationELU         *message);
7539 size_t core_ml__specification__activation_elu__get_packed_size
7540                      (const CoreML__Specification__ActivationELU   *message);
7541 size_t core_ml__specification__activation_elu__pack
7542                      (const CoreML__Specification__ActivationELU   *message,
7543                       uint8_t             *out);
7544 size_t core_ml__specification__activation_elu__pack_to_buffer
7545                      (const CoreML__Specification__ActivationELU   *message,
7546                       ProtobufCBuffer     *buffer);
7547 CoreML__Specification__ActivationELU *
7548        core_ml__specification__activation_elu__unpack
7549                      (ProtobufCAllocator  *allocator,
7550                       size_t               len,
7551                       const uint8_t       *data);
7552 void   core_ml__specification__activation_elu__free_unpacked
7553                      (CoreML__Specification__ActivationELU *message,
7554                       ProtobufCAllocator *allocator);
7555 /* CoreML__Specification__ActivationThresholdedReLU methods */
7556 void   core_ml__specification__activation_thresholded_re_lu__init
7557                      (CoreML__Specification__ActivationThresholdedReLU         *message);
7558 size_t core_ml__specification__activation_thresholded_re_lu__get_packed_size
7559                      (const CoreML__Specification__ActivationThresholdedReLU   *message);
7560 size_t core_ml__specification__activation_thresholded_re_lu__pack
7561                      (const CoreML__Specification__ActivationThresholdedReLU   *message,
7562                       uint8_t             *out);
7563 size_t core_ml__specification__activation_thresholded_re_lu__pack_to_buffer
7564                      (const CoreML__Specification__ActivationThresholdedReLU   *message,
7565                       ProtobufCBuffer     *buffer);
7566 CoreML__Specification__ActivationThresholdedReLU *
7567        core_ml__specification__activation_thresholded_re_lu__unpack
7568                      (ProtobufCAllocator  *allocator,
7569                       size_t               len,
7570                       const uint8_t       *data);
7571 void   core_ml__specification__activation_thresholded_re_lu__free_unpacked
7572                      (CoreML__Specification__ActivationThresholdedReLU *message,
7573                       ProtobufCAllocator *allocator);
7574 /* CoreML__Specification__ActivationSoftsign methods */
7575 void   core_ml__specification__activation_softsign__init
7576                      (CoreML__Specification__ActivationSoftsign         *message);
7577 size_t core_ml__specification__activation_softsign__get_packed_size
7578                      (const CoreML__Specification__ActivationSoftsign   *message);
7579 size_t core_ml__specification__activation_softsign__pack
7580                      (const CoreML__Specification__ActivationSoftsign   *message,
7581                       uint8_t             *out);
7582 size_t core_ml__specification__activation_softsign__pack_to_buffer
7583                      (const CoreML__Specification__ActivationSoftsign   *message,
7584                       ProtobufCBuffer     *buffer);
7585 CoreML__Specification__ActivationSoftsign *
7586        core_ml__specification__activation_softsign__unpack
7587                      (ProtobufCAllocator  *allocator,
7588                       size_t               len,
7589                       const uint8_t       *data);
7590 void   core_ml__specification__activation_softsign__free_unpacked
7591                      (CoreML__Specification__ActivationSoftsign *message,
7592                       ProtobufCAllocator *allocator);
7593 /* CoreML__Specification__ActivationSoftplus methods */
7594 void   core_ml__specification__activation_softplus__init
7595                      (CoreML__Specification__ActivationSoftplus         *message);
7596 size_t core_ml__specification__activation_softplus__get_packed_size
7597                      (const CoreML__Specification__ActivationSoftplus   *message);
7598 size_t core_ml__specification__activation_softplus__pack
7599                      (const CoreML__Specification__ActivationSoftplus   *message,
7600                       uint8_t             *out);
7601 size_t core_ml__specification__activation_softplus__pack_to_buffer
7602                      (const CoreML__Specification__ActivationSoftplus   *message,
7603                       ProtobufCBuffer     *buffer);
7604 CoreML__Specification__ActivationSoftplus *
7605        core_ml__specification__activation_softplus__unpack
7606                      (ProtobufCAllocator  *allocator,
7607                       size_t               len,
7608                       const uint8_t       *data);
7609 void   core_ml__specification__activation_softplus__free_unpacked
7610                      (CoreML__Specification__ActivationSoftplus *message,
7611                       ProtobufCAllocator *allocator);
7612 /* CoreML__Specification__ActivationParametricSoftplus methods */
7613 void   core_ml__specification__activation_parametric_softplus__init
7614                      (CoreML__Specification__ActivationParametricSoftplus         *message);
7615 size_t core_ml__specification__activation_parametric_softplus__get_packed_size
7616                      (const CoreML__Specification__ActivationParametricSoftplus   *message);
7617 size_t core_ml__specification__activation_parametric_softplus__pack
7618                      (const CoreML__Specification__ActivationParametricSoftplus   *message,
7619                       uint8_t             *out);
7620 size_t core_ml__specification__activation_parametric_softplus__pack_to_buffer
7621                      (const CoreML__Specification__ActivationParametricSoftplus   *message,
7622                       ProtobufCBuffer     *buffer);
7623 CoreML__Specification__ActivationParametricSoftplus *
7624        core_ml__specification__activation_parametric_softplus__unpack
7625                      (ProtobufCAllocator  *allocator,
7626                       size_t               len,
7627                       const uint8_t       *data);
7628 void   core_ml__specification__activation_parametric_softplus__free_unpacked
7629                      (CoreML__Specification__ActivationParametricSoftplus *message,
7630                       ProtobufCAllocator *allocator);
7631 /* CoreML__Specification__ActivationParams methods */
7632 void   core_ml__specification__activation_params__init
7633                      (CoreML__Specification__ActivationParams         *message);
7634 size_t core_ml__specification__activation_params__get_packed_size
7635                      (const CoreML__Specification__ActivationParams   *message);
7636 size_t core_ml__specification__activation_params__pack
7637                      (const CoreML__Specification__ActivationParams   *message,
7638                       uint8_t             *out);
7639 size_t core_ml__specification__activation_params__pack_to_buffer
7640                      (const CoreML__Specification__ActivationParams   *message,
7641                       ProtobufCBuffer     *buffer);
7642 CoreML__Specification__ActivationParams *
7643        core_ml__specification__activation_params__unpack
7644                      (ProtobufCAllocator  *allocator,
7645                       size_t               len,
7646                       const uint8_t       *data);
7647 void   core_ml__specification__activation_params__free_unpacked
7648                      (CoreML__Specification__ActivationParams *message,
7649                       ProtobufCAllocator *allocator);
7650 /* CoreML__Specification__Tensor methods */
7651 void   core_ml__specification__tensor__init
7652                      (CoreML__Specification__Tensor         *message);
7653 size_t core_ml__specification__tensor__get_packed_size
7654                      (const CoreML__Specification__Tensor   *message);
7655 size_t core_ml__specification__tensor__pack
7656                      (const CoreML__Specification__Tensor   *message,
7657                       uint8_t             *out);
7658 size_t core_ml__specification__tensor__pack_to_buffer
7659                      (const CoreML__Specification__Tensor   *message,
7660                       ProtobufCBuffer     *buffer);
7661 CoreML__Specification__Tensor *
7662        core_ml__specification__tensor__unpack
7663                      (ProtobufCAllocator  *allocator,
7664                       size_t               len,
7665                       const uint8_t       *data);
7666 void   core_ml__specification__tensor__free_unpacked
7667                      (CoreML__Specification__Tensor *message,
7668                       ProtobufCAllocator *allocator);
7669 /* CoreML__Specification__NeuralNetworkLayer methods */
7670 void   core_ml__specification__neural_network_layer__init
7671                      (CoreML__Specification__NeuralNetworkLayer         *message);
7672 size_t core_ml__specification__neural_network_layer__get_packed_size
7673                      (const CoreML__Specification__NeuralNetworkLayer   *message);
7674 size_t core_ml__specification__neural_network_layer__pack
7675                      (const CoreML__Specification__NeuralNetworkLayer   *message,
7676                       uint8_t             *out);
7677 size_t core_ml__specification__neural_network_layer__pack_to_buffer
7678                      (const CoreML__Specification__NeuralNetworkLayer   *message,
7679                       ProtobufCBuffer     *buffer);
7680 CoreML__Specification__NeuralNetworkLayer *
7681        core_ml__specification__neural_network_layer__unpack
7682                      (ProtobufCAllocator  *allocator,
7683                       size_t               len,
7684                       const uint8_t       *data);
7685 void   core_ml__specification__neural_network_layer__free_unpacked
7686                      (CoreML__Specification__NeuralNetworkLayer *message,
7687                       ProtobufCAllocator *allocator);
7688 /* CoreML__Specification__BranchLayerParams methods */
7689 void   core_ml__specification__branch_layer_params__init
7690                      (CoreML__Specification__BranchLayerParams         *message);
7691 size_t core_ml__specification__branch_layer_params__get_packed_size
7692                      (const CoreML__Specification__BranchLayerParams   *message);
7693 size_t core_ml__specification__branch_layer_params__pack
7694                      (const CoreML__Specification__BranchLayerParams   *message,
7695                       uint8_t             *out);
7696 size_t core_ml__specification__branch_layer_params__pack_to_buffer
7697                      (const CoreML__Specification__BranchLayerParams   *message,
7698                       ProtobufCBuffer     *buffer);
7699 CoreML__Specification__BranchLayerParams *
7700        core_ml__specification__branch_layer_params__unpack
7701                      (ProtobufCAllocator  *allocator,
7702                       size_t               len,
7703                       const uint8_t       *data);
7704 void   core_ml__specification__branch_layer_params__free_unpacked
7705                      (CoreML__Specification__BranchLayerParams *message,
7706                       ProtobufCAllocator *allocator);
7707 /* CoreML__Specification__LoopLayerParams methods */
7708 void   core_ml__specification__loop_layer_params__init
7709                      (CoreML__Specification__LoopLayerParams         *message);
7710 size_t core_ml__specification__loop_layer_params__get_packed_size
7711                      (const CoreML__Specification__LoopLayerParams   *message);
7712 size_t core_ml__specification__loop_layer_params__pack
7713                      (const CoreML__Specification__LoopLayerParams   *message,
7714                       uint8_t             *out);
7715 size_t core_ml__specification__loop_layer_params__pack_to_buffer
7716                      (const CoreML__Specification__LoopLayerParams   *message,
7717                       ProtobufCBuffer     *buffer);
7718 CoreML__Specification__LoopLayerParams *
7719        core_ml__specification__loop_layer_params__unpack
7720                      (ProtobufCAllocator  *allocator,
7721                       size_t               len,
7722                       const uint8_t       *data);
7723 void   core_ml__specification__loop_layer_params__free_unpacked
7724                      (CoreML__Specification__LoopLayerParams *message,
7725                       ProtobufCAllocator *allocator);
7726 /* CoreML__Specification__LoopBreakLayerParams methods */
7727 void   core_ml__specification__loop_break_layer_params__init
7728                      (CoreML__Specification__LoopBreakLayerParams         *message);
7729 size_t core_ml__specification__loop_break_layer_params__get_packed_size
7730                      (const CoreML__Specification__LoopBreakLayerParams   *message);
7731 size_t core_ml__specification__loop_break_layer_params__pack
7732                      (const CoreML__Specification__LoopBreakLayerParams   *message,
7733                       uint8_t             *out);
7734 size_t core_ml__specification__loop_break_layer_params__pack_to_buffer
7735                      (const CoreML__Specification__LoopBreakLayerParams   *message,
7736                       ProtobufCBuffer     *buffer);
7737 CoreML__Specification__LoopBreakLayerParams *
7738        core_ml__specification__loop_break_layer_params__unpack
7739                      (ProtobufCAllocator  *allocator,
7740                       size_t               len,
7741                       const uint8_t       *data);
7742 void   core_ml__specification__loop_break_layer_params__free_unpacked
7743                      (CoreML__Specification__LoopBreakLayerParams *message,
7744                       ProtobufCAllocator *allocator);
7745 /* CoreML__Specification__LoopContinueLayerParams methods */
7746 void   core_ml__specification__loop_continue_layer_params__init
7747                      (CoreML__Specification__LoopContinueLayerParams         *message);
7748 size_t core_ml__specification__loop_continue_layer_params__get_packed_size
7749                      (const CoreML__Specification__LoopContinueLayerParams   *message);
7750 size_t core_ml__specification__loop_continue_layer_params__pack
7751                      (const CoreML__Specification__LoopContinueLayerParams   *message,
7752                       uint8_t             *out);
7753 size_t core_ml__specification__loop_continue_layer_params__pack_to_buffer
7754                      (const CoreML__Specification__LoopContinueLayerParams   *message,
7755                       ProtobufCBuffer     *buffer);
7756 CoreML__Specification__LoopContinueLayerParams *
7757        core_ml__specification__loop_continue_layer_params__unpack
7758                      (ProtobufCAllocator  *allocator,
7759                       size_t               len,
7760                       const uint8_t       *data);
7761 void   core_ml__specification__loop_continue_layer_params__free_unpacked
7762                      (CoreML__Specification__LoopContinueLayerParams *message,
7763                       ProtobufCAllocator *allocator);
7764 /* CoreML__Specification__CopyLayerParams methods */
7765 void   core_ml__specification__copy_layer_params__init
7766                      (CoreML__Specification__CopyLayerParams         *message);
7767 size_t core_ml__specification__copy_layer_params__get_packed_size
7768                      (const CoreML__Specification__CopyLayerParams   *message);
7769 size_t core_ml__specification__copy_layer_params__pack
7770                      (const CoreML__Specification__CopyLayerParams   *message,
7771                       uint8_t             *out);
7772 size_t core_ml__specification__copy_layer_params__pack_to_buffer
7773                      (const CoreML__Specification__CopyLayerParams   *message,
7774                       ProtobufCBuffer     *buffer);
7775 CoreML__Specification__CopyLayerParams *
7776        core_ml__specification__copy_layer_params__unpack
7777                      (ProtobufCAllocator  *allocator,
7778                       size_t               len,
7779                       const uint8_t       *data);
7780 void   core_ml__specification__copy_layer_params__free_unpacked
7781                      (CoreML__Specification__CopyLayerParams *message,
7782                       ProtobufCAllocator *allocator);
7783 /* CoreML__Specification__GreaterThanLayerParams methods */
7784 void   core_ml__specification__greater_than_layer_params__init
7785                      (CoreML__Specification__GreaterThanLayerParams         *message);
7786 size_t core_ml__specification__greater_than_layer_params__get_packed_size
7787                      (const CoreML__Specification__GreaterThanLayerParams   *message);
7788 size_t core_ml__specification__greater_than_layer_params__pack
7789                      (const CoreML__Specification__GreaterThanLayerParams   *message,
7790                       uint8_t             *out);
7791 size_t core_ml__specification__greater_than_layer_params__pack_to_buffer
7792                      (const CoreML__Specification__GreaterThanLayerParams   *message,
7793                       ProtobufCBuffer     *buffer);
7794 CoreML__Specification__GreaterThanLayerParams *
7795        core_ml__specification__greater_than_layer_params__unpack
7796                      (ProtobufCAllocator  *allocator,
7797                       size_t               len,
7798                       const uint8_t       *data);
7799 void   core_ml__specification__greater_than_layer_params__free_unpacked
7800                      (CoreML__Specification__GreaterThanLayerParams *message,
7801                       ProtobufCAllocator *allocator);
7802 /* CoreML__Specification__GreaterEqualLayerParams methods */
7803 void   core_ml__specification__greater_equal_layer_params__init
7804                      (CoreML__Specification__GreaterEqualLayerParams         *message);
7805 size_t core_ml__specification__greater_equal_layer_params__get_packed_size
7806                      (const CoreML__Specification__GreaterEqualLayerParams   *message);
7807 size_t core_ml__specification__greater_equal_layer_params__pack
7808                      (const CoreML__Specification__GreaterEqualLayerParams   *message,
7809                       uint8_t             *out);
7810 size_t core_ml__specification__greater_equal_layer_params__pack_to_buffer
7811                      (const CoreML__Specification__GreaterEqualLayerParams   *message,
7812                       ProtobufCBuffer     *buffer);
7813 CoreML__Specification__GreaterEqualLayerParams *
7814        core_ml__specification__greater_equal_layer_params__unpack
7815                      (ProtobufCAllocator  *allocator,
7816                       size_t               len,
7817                       const uint8_t       *data);
7818 void   core_ml__specification__greater_equal_layer_params__free_unpacked
7819                      (CoreML__Specification__GreaterEqualLayerParams *message,
7820                       ProtobufCAllocator *allocator);
7821 /* CoreML__Specification__LessThanLayerParams methods */
7822 void   core_ml__specification__less_than_layer_params__init
7823                      (CoreML__Specification__LessThanLayerParams         *message);
7824 size_t core_ml__specification__less_than_layer_params__get_packed_size
7825                      (const CoreML__Specification__LessThanLayerParams   *message);
7826 size_t core_ml__specification__less_than_layer_params__pack
7827                      (const CoreML__Specification__LessThanLayerParams   *message,
7828                       uint8_t             *out);
7829 size_t core_ml__specification__less_than_layer_params__pack_to_buffer
7830                      (const CoreML__Specification__LessThanLayerParams   *message,
7831                       ProtobufCBuffer     *buffer);
7832 CoreML__Specification__LessThanLayerParams *
7833        core_ml__specification__less_than_layer_params__unpack
7834                      (ProtobufCAllocator  *allocator,
7835                       size_t               len,
7836                       const uint8_t       *data);
7837 void   core_ml__specification__less_than_layer_params__free_unpacked
7838                      (CoreML__Specification__LessThanLayerParams *message,
7839                       ProtobufCAllocator *allocator);
7840 /* CoreML__Specification__LessEqualLayerParams methods */
7841 void   core_ml__specification__less_equal_layer_params__init
7842                      (CoreML__Specification__LessEqualLayerParams         *message);
7843 size_t core_ml__specification__less_equal_layer_params__get_packed_size
7844                      (const CoreML__Specification__LessEqualLayerParams   *message);
7845 size_t core_ml__specification__less_equal_layer_params__pack
7846                      (const CoreML__Specification__LessEqualLayerParams   *message,
7847                       uint8_t             *out);
7848 size_t core_ml__specification__less_equal_layer_params__pack_to_buffer
7849                      (const CoreML__Specification__LessEqualLayerParams   *message,
7850                       ProtobufCBuffer     *buffer);
7851 CoreML__Specification__LessEqualLayerParams *
7852        core_ml__specification__less_equal_layer_params__unpack
7853                      (ProtobufCAllocator  *allocator,
7854                       size_t               len,
7855                       const uint8_t       *data);
7856 void   core_ml__specification__less_equal_layer_params__free_unpacked
7857                      (CoreML__Specification__LessEqualLayerParams *message,
7858                       ProtobufCAllocator *allocator);
7859 /* CoreML__Specification__EqualLayerParams methods */
7860 void   core_ml__specification__equal_layer_params__init
7861                      (CoreML__Specification__EqualLayerParams         *message);
7862 size_t core_ml__specification__equal_layer_params__get_packed_size
7863                      (const CoreML__Specification__EqualLayerParams   *message);
7864 size_t core_ml__specification__equal_layer_params__pack
7865                      (const CoreML__Specification__EqualLayerParams   *message,
7866                       uint8_t             *out);
7867 size_t core_ml__specification__equal_layer_params__pack_to_buffer
7868                      (const CoreML__Specification__EqualLayerParams   *message,
7869                       ProtobufCBuffer     *buffer);
7870 CoreML__Specification__EqualLayerParams *
7871        core_ml__specification__equal_layer_params__unpack
7872                      (ProtobufCAllocator  *allocator,
7873                       size_t               len,
7874                       const uint8_t       *data);
7875 void   core_ml__specification__equal_layer_params__free_unpacked
7876                      (CoreML__Specification__EqualLayerParams *message,
7877                       ProtobufCAllocator *allocator);
7878 /* CoreML__Specification__NotEqualLayerParams methods */
7879 void   core_ml__specification__not_equal_layer_params__init
7880                      (CoreML__Specification__NotEqualLayerParams         *message);
7881 size_t core_ml__specification__not_equal_layer_params__get_packed_size
7882                      (const CoreML__Specification__NotEqualLayerParams   *message);
7883 size_t core_ml__specification__not_equal_layer_params__pack
7884                      (const CoreML__Specification__NotEqualLayerParams   *message,
7885                       uint8_t             *out);
7886 size_t core_ml__specification__not_equal_layer_params__pack_to_buffer
7887                      (const CoreML__Specification__NotEqualLayerParams   *message,
7888                       ProtobufCBuffer     *buffer);
7889 CoreML__Specification__NotEqualLayerParams *
7890        core_ml__specification__not_equal_layer_params__unpack
7891                      (ProtobufCAllocator  *allocator,
7892                       size_t               len,
7893                       const uint8_t       *data);
7894 void   core_ml__specification__not_equal_layer_params__free_unpacked
7895                      (CoreML__Specification__NotEqualLayerParams *message,
7896                       ProtobufCAllocator *allocator);
7897 /* CoreML__Specification__LogicalAndLayerParams methods */
7898 void   core_ml__specification__logical_and_layer_params__init
7899                      (CoreML__Specification__LogicalAndLayerParams         *message);
7900 size_t core_ml__specification__logical_and_layer_params__get_packed_size
7901                      (const CoreML__Specification__LogicalAndLayerParams   *message);
7902 size_t core_ml__specification__logical_and_layer_params__pack
7903                      (const CoreML__Specification__LogicalAndLayerParams   *message,
7904                       uint8_t             *out);
7905 size_t core_ml__specification__logical_and_layer_params__pack_to_buffer
7906                      (const CoreML__Specification__LogicalAndLayerParams   *message,
7907                       ProtobufCBuffer     *buffer);
7908 CoreML__Specification__LogicalAndLayerParams *
7909        core_ml__specification__logical_and_layer_params__unpack
7910                      (ProtobufCAllocator  *allocator,
7911                       size_t               len,
7912                       const uint8_t       *data);
7913 void   core_ml__specification__logical_and_layer_params__free_unpacked
7914                      (CoreML__Specification__LogicalAndLayerParams *message,
7915                       ProtobufCAllocator *allocator);
7916 /* CoreML__Specification__LogicalOrLayerParams methods */
7917 void   core_ml__specification__logical_or_layer_params__init
7918                      (CoreML__Specification__LogicalOrLayerParams         *message);
7919 size_t core_ml__specification__logical_or_layer_params__get_packed_size
7920                      (const CoreML__Specification__LogicalOrLayerParams   *message);
7921 size_t core_ml__specification__logical_or_layer_params__pack
7922                      (const CoreML__Specification__LogicalOrLayerParams   *message,
7923                       uint8_t             *out);
7924 size_t core_ml__specification__logical_or_layer_params__pack_to_buffer
7925                      (const CoreML__Specification__LogicalOrLayerParams   *message,
7926                       ProtobufCBuffer     *buffer);
7927 CoreML__Specification__LogicalOrLayerParams *
7928        core_ml__specification__logical_or_layer_params__unpack
7929                      (ProtobufCAllocator  *allocator,
7930                       size_t               len,
7931                       const uint8_t       *data);
7932 void   core_ml__specification__logical_or_layer_params__free_unpacked
7933                      (CoreML__Specification__LogicalOrLayerParams *message,
7934                       ProtobufCAllocator *allocator);
7935 /* CoreML__Specification__LogicalXorLayerParams methods */
7936 void   core_ml__specification__logical_xor_layer_params__init
7937                      (CoreML__Specification__LogicalXorLayerParams         *message);
7938 size_t core_ml__specification__logical_xor_layer_params__get_packed_size
7939                      (const CoreML__Specification__LogicalXorLayerParams   *message);
7940 size_t core_ml__specification__logical_xor_layer_params__pack
7941                      (const CoreML__Specification__LogicalXorLayerParams   *message,
7942                       uint8_t             *out);
7943 size_t core_ml__specification__logical_xor_layer_params__pack_to_buffer
7944                      (const CoreML__Specification__LogicalXorLayerParams   *message,
7945                       ProtobufCBuffer     *buffer);
7946 CoreML__Specification__LogicalXorLayerParams *
7947        core_ml__specification__logical_xor_layer_params__unpack
7948                      (ProtobufCAllocator  *allocator,
7949                       size_t               len,
7950                       const uint8_t       *data);
7951 void   core_ml__specification__logical_xor_layer_params__free_unpacked
7952                      (CoreML__Specification__LogicalXorLayerParams *message,
7953                       ProtobufCAllocator *allocator);
7954 /* CoreML__Specification__LogicalNotLayerParams methods */
7955 void   core_ml__specification__logical_not_layer_params__init
7956                      (CoreML__Specification__LogicalNotLayerParams         *message);
7957 size_t core_ml__specification__logical_not_layer_params__get_packed_size
7958                      (const CoreML__Specification__LogicalNotLayerParams   *message);
7959 size_t core_ml__specification__logical_not_layer_params__pack
7960                      (const CoreML__Specification__LogicalNotLayerParams   *message,
7961                       uint8_t             *out);
7962 size_t core_ml__specification__logical_not_layer_params__pack_to_buffer
7963                      (const CoreML__Specification__LogicalNotLayerParams   *message,
7964                       ProtobufCBuffer     *buffer);
7965 CoreML__Specification__LogicalNotLayerParams *
7966        core_ml__specification__logical_not_layer_params__unpack
7967                      (ProtobufCAllocator  *allocator,
7968                       size_t               len,
7969                       const uint8_t       *data);
7970 void   core_ml__specification__logical_not_layer_params__free_unpacked
7971                      (CoreML__Specification__LogicalNotLayerParams *message,
7972                       ProtobufCAllocator *allocator);
7973 /* CoreML__Specification__BorderAmounts__EdgeSizes methods */
7974 void   core_ml__specification__border_amounts__edge_sizes__init
7975                      (CoreML__Specification__BorderAmounts__EdgeSizes         *message);
7976 /* CoreML__Specification__BorderAmounts methods */
7977 void   core_ml__specification__border_amounts__init
7978                      (CoreML__Specification__BorderAmounts         *message);
7979 size_t core_ml__specification__border_amounts__get_packed_size
7980                      (const CoreML__Specification__BorderAmounts   *message);
7981 size_t core_ml__specification__border_amounts__pack
7982                      (const CoreML__Specification__BorderAmounts   *message,
7983                       uint8_t             *out);
7984 size_t core_ml__specification__border_amounts__pack_to_buffer
7985                      (const CoreML__Specification__BorderAmounts   *message,
7986                       ProtobufCBuffer     *buffer);
7987 CoreML__Specification__BorderAmounts *
7988        core_ml__specification__border_amounts__unpack
7989                      (ProtobufCAllocator  *allocator,
7990                       size_t               len,
7991                       const uint8_t       *data);
7992 void   core_ml__specification__border_amounts__free_unpacked
7993                      (CoreML__Specification__BorderAmounts *message,
7994                       ProtobufCAllocator *allocator);
7995 /* CoreML__Specification__ValidPadding methods */
7996 void   core_ml__specification__valid_padding__init
7997                      (CoreML__Specification__ValidPadding         *message);
7998 size_t core_ml__specification__valid_padding__get_packed_size
7999                      (const CoreML__Specification__ValidPadding   *message);
8000 size_t core_ml__specification__valid_padding__pack
8001                      (const CoreML__Specification__ValidPadding   *message,
8002                       uint8_t             *out);
8003 size_t core_ml__specification__valid_padding__pack_to_buffer
8004                      (const CoreML__Specification__ValidPadding   *message,
8005                       ProtobufCBuffer     *buffer);
8006 CoreML__Specification__ValidPadding *
8007        core_ml__specification__valid_padding__unpack
8008                      (ProtobufCAllocator  *allocator,
8009                       size_t               len,
8010                       const uint8_t       *data);
8011 void   core_ml__specification__valid_padding__free_unpacked
8012                      (CoreML__Specification__ValidPadding *message,
8013                       ProtobufCAllocator *allocator);
8014 /* CoreML__Specification__SamePadding methods */
8015 void   core_ml__specification__same_padding__init
8016                      (CoreML__Specification__SamePadding         *message);
8017 size_t core_ml__specification__same_padding__get_packed_size
8018                      (const CoreML__Specification__SamePadding   *message);
8019 size_t core_ml__specification__same_padding__pack
8020                      (const CoreML__Specification__SamePadding   *message,
8021                       uint8_t             *out);
8022 size_t core_ml__specification__same_padding__pack_to_buffer
8023                      (const CoreML__Specification__SamePadding   *message,
8024                       ProtobufCBuffer     *buffer);
8025 CoreML__Specification__SamePadding *
8026        core_ml__specification__same_padding__unpack
8027                      (ProtobufCAllocator  *allocator,
8028                       size_t               len,
8029                       const uint8_t       *data);
8030 void   core_ml__specification__same_padding__free_unpacked
8031                      (CoreML__Specification__SamePadding *message,
8032                       ProtobufCAllocator *allocator);
8033 /* CoreML__Specification__SamplingMode methods */
8034 void   core_ml__specification__sampling_mode__init
8035                      (CoreML__Specification__SamplingMode         *message);
8036 size_t core_ml__specification__sampling_mode__get_packed_size
8037                      (const CoreML__Specification__SamplingMode   *message);
8038 size_t core_ml__specification__sampling_mode__pack
8039                      (const CoreML__Specification__SamplingMode   *message,
8040                       uint8_t             *out);
8041 size_t core_ml__specification__sampling_mode__pack_to_buffer
8042                      (const CoreML__Specification__SamplingMode   *message,
8043                       ProtobufCBuffer     *buffer);
8044 CoreML__Specification__SamplingMode *
8045        core_ml__specification__sampling_mode__unpack
8046                      (ProtobufCAllocator  *allocator,
8047                       size_t               len,
8048                       const uint8_t       *data);
8049 void   core_ml__specification__sampling_mode__free_unpacked
8050                      (CoreML__Specification__SamplingMode *message,
8051                       ProtobufCAllocator *allocator);
8052 /* CoreML__Specification__BoxCoordinatesMode methods */
8053 void   core_ml__specification__box_coordinates_mode__init
8054                      (CoreML__Specification__BoxCoordinatesMode         *message);
8055 size_t core_ml__specification__box_coordinates_mode__get_packed_size
8056                      (const CoreML__Specification__BoxCoordinatesMode   *message);
8057 size_t core_ml__specification__box_coordinates_mode__pack
8058                      (const CoreML__Specification__BoxCoordinatesMode   *message,
8059                       uint8_t             *out);
8060 size_t core_ml__specification__box_coordinates_mode__pack_to_buffer
8061                      (const CoreML__Specification__BoxCoordinatesMode   *message,
8062                       ProtobufCBuffer     *buffer);
8063 CoreML__Specification__BoxCoordinatesMode *
8064        core_ml__specification__box_coordinates_mode__unpack
8065                      (ProtobufCAllocator  *allocator,
8066                       size_t               len,
8067                       const uint8_t       *data);
8068 void   core_ml__specification__box_coordinates_mode__free_unpacked
8069                      (CoreML__Specification__BoxCoordinatesMode *message,
8070                       ProtobufCAllocator *allocator);
8071 /* CoreML__Specification__WeightParams methods */
8072 void   core_ml__specification__weight_params__init
8073                      (CoreML__Specification__WeightParams         *message);
8074 size_t core_ml__specification__weight_params__get_packed_size
8075                      (const CoreML__Specification__WeightParams   *message);
8076 size_t core_ml__specification__weight_params__pack
8077                      (const CoreML__Specification__WeightParams   *message,
8078                       uint8_t             *out);
8079 size_t core_ml__specification__weight_params__pack_to_buffer
8080                      (const CoreML__Specification__WeightParams   *message,
8081                       ProtobufCBuffer     *buffer);
8082 CoreML__Specification__WeightParams *
8083        core_ml__specification__weight_params__unpack
8084                      (ProtobufCAllocator  *allocator,
8085                       size_t               len,
8086                       const uint8_t       *data);
8087 void   core_ml__specification__weight_params__free_unpacked
8088                      (CoreML__Specification__WeightParams *message,
8089                       ProtobufCAllocator *allocator);
8090 /* CoreML__Specification__QuantizationParams methods */
8091 void   core_ml__specification__quantization_params__init
8092                      (CoreML__Specification__QuantizationParams         *message);
8093 size_t core_ml__specification__quantization_params__get_packed_size
8094                      (const CoreML__Specification__QuantizationParams   *message);
8095 size_t core_ml__specification__quantization_params__pack
8096                      (const CoreML__Specification__QuantizationParams   *message,
8097                       uint8_t             *out);
8098 size_t core_ml__specification__quantization_params__pack_to_buffer
8099                      (const CoreML__Specification__QuantizationParams   *message,
8100                       ProtobufCBuffer     *buffer);
8101 CoreML__Specification__QuantizationParams *
8102        core_ml__specification__quantization_params__unpack
8103                      (ProtobufCAllocator  *allocator,
8104                       size_t               len,
8105                       const uint8_t       *data);
8106 void   core_ml__specification__quantization_params__free_unpacked
8107                      (CoreML__Specification__QuantizationParams *message,
8108                       ProtobufCAllocator *allocator);
8109 /* CoreML__Specification__LinearQuantizationParams methods */
8110 void   core_ml__specification__linear_quantization_params__init
8111                      (CoreML__Specification__LinearQuantizationParams         *message);
8112 size_t core_ml__specification__linear_quantization_params__get_packed_size
8113                      (const CoreML__Specification__LinearQuantizationParams   *message);
8114 size_t core_ml__specification__linear_quantization_params__pack
8115                      (const CoreML__Specification__LinearQuantizationParams   *message,
8116                       uint8_t             *out);
8117 size_t core_ml__specification__linear_quantization_params__pack_to_buffer
8118                      (const CoreML__Specification__LinearQuantizationParams   *message,
8119                       ProtobufCBuffer     *buffer);
8120 CoreML__Specification__LinearQuantizationParams *
8121        core_ml__specification__linear_quantization_params__unpack
8122                      (ProtobufCAllocator  *allocator,
8123                       size_t               len,
8124                       const uint8_t       *data);
8125 void   core_ml__specification__linear_quantization_params__free_unpacked
8126                      (CoreML__Specification__LinearQuantizationParams *message,
8127                       ProtobufCAllocator *allocator);
8128 /* CoreML__Specification__LookUpTableQuantizationParams methods */
8129 void   core_ml__specification__look_up_table_quantization_params__init
8130                      (CoreML__Specification__LookUpTableQuantizationParams         *message);
8131 size_t core_ml__specification__look_up_table_quantization_params__get_packed_size
8132                      (const CoreML__Specification__LookUpTableQuantizationParams   *message);
8133 size_t core_ml__specification__look_up_table_quantization_params__pack
8134                      (const CoreML__Specification__LookUpTableQuantizationParams   *message,
8135                       uint8_t             *out);
8136 size_t core_ml__specification__look_up_table_quantization_params__pack_to_buffer
8137                      (const CoreML__Specification__LookUpTableQuantizationParams   *message,
8138                       ProtobufCBuffer     *buffer);
8139 CoreML__Specification__LookUpTableQuantizationParams *
8140        core_ml__specification__look_up_table_quantization_params__unpack
8141                      (ProtobufCAllocator  *allocator,
8142                       size_t               len,
8143                       const uint8_t       *data);
8144 void   core_ml__specification__look_up_table_quantization_params__free_unpacked
8145                      (CoreML__Specification__LookUpTableQuantizationParams *message,
8146                       ProtobufCAllocator *allocator);
8147 /* CoreML__Specification__ConvolutionLayerParams methods */
8148 void   core_ml__specification__convolution_layer_params__init
8149                      (CoreML__Specification__ConvolutionLayerParams         *message);
8150 size_t core_ml__specification__convolution_layer_params__get_packed_size
8151                      (const CoreML__Specification__ConvolutionLayerParams   *message);
8152 size_t core_ml__specification__convolution_layer_params__pack
8153                      (const CoreML__Specification__ConvolutionLayerParams   *message,
8154                       uint8_t             *out);
8155 size_t core_ml__specification__convolution_layer_params__pack_to_buffer
8156                      (const CoreML__Specification__ConvolutionLayerParams   *message,
8157                       ProtobufCBuffer     *buffer);
8158 CoreML__Specification__ConvolutionLayerParams *
8159        core_ml__specification__convolution_layer_params__unpack
8160                      (ProtobufCAllocator  *allocator,
8161                       size_t               len,
8162                       const uint8_t       *data);
8163 void   core_ml__specification__convolution_layer_params__free_unpacked
8164                      (CoreML__Specification__ConvolutionLayerParams *message,
8165                       ProtobufCAllocator *allocator);
8166 /* CoreML__Specification__Convolution3DLayerParams methods */
8167 void   core_ml__specification__convolution3_dlayer_params__init
8168                      (CoreML__Specification__Convolution3DLayerParams         *message);
8169 size_t core_ml__specification__convolution3_dlayer_params__get_packed_size
8170                      (const CoreML__Specification__Convolution3DLayerParams   *message);
8171 size_t core_ml__specification__convolution3_dlayer_params__pack
8172                      (const CoreML__Specification__Convolution3DLayerParams   *message,
8173                       uint8_t             *out);
8174 size_t core_ml__specification__convolution3_dlayer_params__pack_to_buffer
8175                      (const CoreML__Specification__Convolution3DLayerParams   *message,
8176                       ProtobufCBuffer     *buffer);
8177 CoreML__Specification__Convolution3DLayerParams *
8178        core_ml__specification__convolution3_dlayer_params__unpack
8179                      (ProtobufCAllocator  *allocator,
8180                       size_t               len,
8181                       const uint8_t       *data);
8182 void   core_ml__specification__convolution3_dlayer_params__free_unpacked
8183                      (CoreML__Specification__Convolution3DLayerParams *message,
8184                       ProtobufCAllocator *allocator);
8185 /* CoreML__Specification__InnerProductLayerParams methods */
8186 void   core_ml__specification__inner_product_layer_params__init
8187                      (CoreML__Specification__InnerProductLayerParams         *message);
8188 size_t core_ml__specification__inner_product_layer_params__get_packed_size
8189                      (const CoreML__Specification__InnerProductLayerParams   *message);
8190 size_t core_ml__specification__inner_product_layer_params__pack
8191                      (const CoreML__Specification__InnerProductLayerParams   *message,
8192                       uint8_t             *out);
8193 size_t core_ml__specification__inner_product_layer_params__pack_to_buffer
8194                      (const CoreML__Specification__InnerProductLayerParams   *message,
8195                       ProtobufCBuffer     *buffer);
8196 CoreML__Specification__InnerProductLayerParams *
8197        core_ml__specification__inner_product_layer_params__unpack
8198                      (ProtobufCAllocator  *allocator,
8199                       size_t               len,
8200                       const uint8_t       *data);
8201 void   core_ml__specification__inner_product_layer_params__free_unpacked
8202                      (CoreML__Specification__InnerProductLayerParams *message,
8203                       ProtobufCAllocator *allocator);
8204 /* CoreML__Specification__EmbeddingLayerParams methods */
8205 void   core_ml__specification__embedding_layer_params__init
8206                      (CoreML__Specification__EmbeddingLayerParams         *message);
8207 size_t core_ml__specification__embedding_layer_params__get_packed_size
8208                      (const CoreML__Specification__EmbeddingLayerParams   *message);
8209 size_t core_ml__specification__embedding_layer_params__pack
8210                      (const CoreML__Specification__EmbeddingLayerParams   *message,
8211                       uint8_t             *out);
8212 size_t core_ml__specification__embedding_layer_params__pack_to_buffer
8213                      (const CoreML__Specification__EmbeddingLayerParams   *message,
8214                       ProtobufCBuffer     *buffer);
8215 CoreML__Specification__EmbeddingLayerParams *
8216        core_ml__specification__embedding_layer_params__unpack
8217                      (ProtobufCAllocator  *allocator,
8218                       size_t               len,
8219                       const uint8_t       *data);
8220 void   core_ml__specification__embedding_layer_params__free_unpacked
8221                      (CoreML__Specification__EmbeddingLayerParams *message,
8222                       ProtobufCAllocator *allocator);
8223 /* CoreML__Specification__EmbeddingNDLayerParams methods */
8224 void   core_ml__specification__embedding_ndlayer_params__init
8225                      (CoreML__Specification__EmbeddingNDLayerParams         *message);
8226 size_t core_ml__specification__embedding_ndlayer_params__get_packed_size
8227                      (const CoreML__Specification__EmbeddingNDLayerParams   *message);
8228 size_t core_ml__specification__embedding_ndlayer_params__pack
8229                      (const CoreML__Specification__EmbeddingNDLayerParams   *message,
8230                       uint8_t             *out);
8231 size_t core_ml__specification__embedding_ndlayer_params__pack_to_buffer
8232                      (const CoreML__Specification__EmbeddingNDLayerParams   *message,
8233                       ProtobufCBuffer     *buffer);
8234 CoreML__Specification__EmbeddingNDLayerParams *
8235        core_ml__specification__embedding_ndlayer_params__unpack
8236                      (ProtobufCAllocator  *allocator,
8237                       size_t               len,
8238                       const uint8_t       *data);
8239 void   core_ml__specification__embedding_ndlayer_params__free_unpacked
8240                      (CoreML__Specification__EmbeddingNDLayerParams *message,
8241                       ProtobufCAllocator *allocator);
8242 /* CoreML__Specification__BatchnormLayerParams methods */
8243 void   core_ml__specification__batchnorm_layer_params__init
8244                      (CoreML__Specification__BatchnormLayerParams         *message);
8245 size_t core_ml__specification__batchnorm_layer_params__get_packed_size
8246                      (const CoreML__Specification__BatchnormLayerParams   *message);
8247 size_t core_ml__specification__batchnorm_layer_params__pack
8248                      (const CoreML__Specification__BatchnormLayerParams   *message,
8249                       uint8_t             *out);
8250 size_t core_ml__specification__batchnorm_layer_params__pack_to_buffer
8251                      (const CoreML__Specification__BatchnormLayerParams   *message,
8252                       ProtobufCBuffer     *buffer);
8253 CoreML__Specification__BatchnormLayerParams *
8254        core_ml__specification__batchnorm_layer_params__unpack
8255                      (ProtobufCAllocator  *allocator,
8256                       size_t               len,
8257                       const uint8_t       *data);
8258 void   core_ml__specification__batchnorm_layer_params__free_unpacked
8259                      (CoreML__Specification__BatchnormLayerParams *message,
8260                       ProtobufCAllocator *allocator);
8261 /* CoreML__Specification__PoolingLayerParams__ValidCompletePadding methods */
8262 void   core_ml__specification__pooling_layer_params__valid_complete_padding__init
8263                      (CoreML__Specification__PoolingLayerParams__ValidCompletePadding         *message);
8264 /* CoreML__Specification__PoolingLayerParams methods */
8265 void   core_ml__specification__pooling_layer_params__init
8266                      (CoreML__Specification__PoolingLayerParams         *message);
8267 size_t core_ml__specification__pooling_layer_params__get_packed_size
8268                      (const CoreML__Specification__PoolingLayerParams   *message);
8269 size_t core_ml__specification__pooling_layer_params__pack
8270                      (const CoreML__Specification__PoolingLayerParams   *message,
8271                       uint8_t             *out);
8272 size_t core_ml__specification__pooling_layer_params__pack_to_buffer
8273                      (const CoreML__Specification__PoolingLayerParams   *message,
8274                       ProtobufCBuffer     *buffer);
8275 CoreML__Specification__PoolingLayerParams *
8276        core_ml__specification__pooling_layer_params__unpack
8277                      (ProtobufCAllocator  *allocator,
8278                       size_t               len,
8279                       const uint8_t       *data);
8280 void   core_ml__specification__pooling_layer_params__free_unpacked
8281                      (CoreML__Specification__PoolingLayerParams *message,
8282                       ProtobufCAllocator *allocator);
8283 /* CoreML__Specification__Pooling3DLayerParams methods */
8284 void   core_ml__specification__pooling3_dlayer_params__init
8285                      (CoreML__Specification__Pooling3DLayerParams         *message);
8286 size_t core_ml__specification__pooling3_dlayer_params__get_packed_size
8287                      (const CoreML__Specification__Pooling3DLayerParams   *message);
8288 size_t core_ml__specification__pooling3_dlayer_params__pack
8289                      (const CoreML__Specification__Pooling3DLayerParams   *message,
8290                       uint8_t             *out);
8291 size_t core_ml__specification__pooling3_dlayer_params__pack_to_buffer
8292                      (const CoreML__Specification__Pooling3DLayerParams   *message,
8293                       ProtobufCBuffer     *buffer);
8294 CoreML__Specification__Pooling3DLayerParams *
8295        core_ml__specification__pooling3_dlayer_params__unpack
8296                      (ProtobufCAllocator  *allocator,
8297                       size_t               len,
8298                       const uint8_t       *data);
8299 void   core_ml__specification__pooling3_dlayer_params__free_unpacked
8300                      (CoreML__Specification__Pooling3DLayerParams *message,
8301                       ProtobufCAllocator *allocator);
8302 /* CoreML__Specification__GlobalPooling3DLayerParams methods */
8303 void   core_ml__specification__global_pooling3_dlayer_params__init
8304                      (CoreML__Specification__GlobalPooling3DLayerParams         *message);
8305 size_t core_ml__specification__global_pooling3_dlayer_params__get_packed_size
8306                      (const CoreML__Specification__GlobalPooling3DLayerParams   *message);
8307 size_t core_ml__specification__global_pooling3_dlayer_params__pack
8308                      (const CoreML__Specification__GlobalPooling3DLayerParams   *message,
8309                       uint8_t             *out);
8310 size_t core_ml__specification__global_pooling3_dlayer_params__pack_to_buffer
8311                      (const CoreML__Specification__GlobalPooling3DLayerParams   *message,
8312                       ProtobufCBuffer     *buffer);
8313 CoreML__Specification__GlobalPooling3DLayerParams *
8314        core_ml__specification__global_pooling3_dlayer_params__unpack
8315                      (ProtobufCAllocator  *allocator,
8316                       size_t               len,
8317                       const uint8_t       *data);
8318 void   core_ml__specification__global_pooling3_dlayer_params__free_unpacked
8319                      (CoreML__Specification__GlobalPooling3DLayerParams *message,
8320                       ProtobufCAllocator *allocator);
8321 /* CoreML__Specification__PaddingLayerParams__PaddingConstant methods */
8322 void   core_ml__specification__padding_layer_params__padding_constant__init
8323                      (CoreML__Specification__PaddingLayerParams__PaddingConstant         *message);
8324 /* CoreML__Specification__PaddingLayerParams__PaddingReflection methods */
8325 void   core_ml__specification__padding_layer_params__padding_reflection__init
8326                      (CoreML__Specification__PaddingLayerParams__PaddingReflection         *message);
8327 /* CoreML__Specification__PaddingLayerParams__PaddingReplication methods */
8328 void   core_ml__specification__padding_layer_params__padding_replication__init
8329                      (CoreML__Specification__PaddingLayerParams__PaddingReplication         *message);
8330 /* CoreML__Specification__PaddingLayerParams methods */
8331 void   core_ml__specification__padding_layer_params__init
8332                      (CoreML__Specification__PaddingLayerParams         *message);
8333 size_t core_ml__specification__padding_layer_params__get_packed_size
8334                      (const CoreML__Specification__PaddingLayerParams   *message);
8335 size_t core_ml__specification__padding_layer_params__pack
8336                      (const CoreML__Specification__PaddingLayerParams   *message,
8337                       uint8_t             *out);
8338 size_t core_ml__specification__padding_layer_params__pack_to_buffer
8339                      (const CoreML__Specification__PaddingLayerParams   *message,
8340                       ProtobufCBuffer     *buffer);
8341 CoreML__Specification__PaddingLayerParams *
8342        core_ml__specification__padding_layer_params__unpack
8343                      (ProtobufCAllocator  *allocator,
8344                       size_t               len,
8345                       const uint8_t       *data);
8346 void   core_ml__specification__padding_layer_params__free_unpacked
8347                      (CoreML__Specification__PaddingLayerParams *message,
8348                       ProtobufCAllocator *allocator);
8349 /* CoreML__Specification__ConcatLayerParams methods */
8350 void   core_ml__specification__concat_layer_params__init
8351                      (CoreML__Specification__ConcatLayerParams         *message);
8352 size_t core_ml__specification__concat_layer_params__get_packed_size
8353                      (const CoreML__Specification__ConcatLayerParams   *message);
8354 size_t core_ml__specification__concat_layer_params__pack
8355                      (const CoreML__Specification__ConcatLayerParams   *message,
8356                       uint8_t             *out);
8357 size_t core_ml__specification__concat_layer_params__pack_to_buffer
8358                      (const CoreML__Specification__ConcatLayerParams   *message,
8359                       ProtobufCBuffer     *buffer);
8360 CoreML__Specification__ConcatLayerParams *
8361        core_ml__specification__concat_layer_params__unpack
8362                      (ProtobufCAllocator  *allocator,
8363                       size_t               len,
8364                       const uint8_t       *data);
8365 void   core_ml__specification__concat_layer_params__free_unpacked
8366                      (CoreML__Specification__ConcatLayerParams *message,
8367                       ProtobufCAllocator *allocator);
8368 /* CoreML__Specification__LRNLayerParams methods */
8369 void   core_ml__specification__lrnlayer_params__init
8370                      (CoreML__Specification__LRNLayerParams         *message);
8371 size_t core_ml__specification__lrnlayer_params__get_packed_size
8372                      (const CoreML__Specification__LRNLayerParams   *message);
8373 size_t core_ml__specification__lrnlayer_params__pack
8374                      (const CoreML__Specification__LRNLayerParams   *message,
8375                       uint8_t             *out);
8376 size_t core_ml__specification__lrnlayer_params__pack_to_buffer
8377                      (const CoreML__Specification__LRNLayerParams   *message,
8378                       ProtobufCBuffer     *buffer);
8379 CoreML__Specification__LRNLayerParams *
8380        core_ml__specification__lrnlayer_params__unpack
8381                      (ProtobufCAllocator  *allocator,
8382                       size_t               len,
8383                       const uint8_t       *data);
8384 void   core_ml__specification__lrnlayer_params__free_unpacked
8385                      (CoreML__Specification__LRNLayerParams *message,
8386                       ProtobufCAllocator *allocator);
8387 /* CoreML__Specification__SoftmaxLayerParams methods */
8388 void   core_ml__specification__softmax_layer_params__init
8389                      (CoreML__Specification__SoftmaxLayerParams         *message);
8390 size_t core_ml__specification__softmax_layer_params__get_packed_size
8391                      (const CoreML__Specification__SoftmaxLayerParams   *message);
8392 size_t core_ml__specification__softmax_layer_params__pack
8393                      (const CoreML__Specification__SoftmaxLayerParams   *message,
8394                       uint8_t             *out);
8395 size_t core_ml__specification__softmax_layer_params__pack_to_buffer
8396                      (const CoreML__Specification__SoftmaxLayerParams   *message,
8397                       ProtobufCBuffer     *buffer);
8398 CoreML__Specification__SoftmaxLayerParams *
8399        core_ml__specification__softmax_layer_params__unpack
8400                      (ProtobufCAllocator  *allocator,
8401                       size_t               len,
8402                       const uint8_t       *data);
8403 void   core_ml__specification__softmax_layer_params__free_unpacked
8404                      (CoreML__Specification__SoftmaxLayerParams *message,
8405                       ProtobufCAllocator *allocator);
8406 /* CoreML__Specification__SplitLayerParams methods */
8407 void   core_ml__specification__split_layer_params__init
8408                      (CoreML__Specification__SplitLayerParams         *message);
8409 size_t core_ml__specification__split_layer_params__get_packed_size
8410                      (const CoreML__Specification__SplitLayerParams   *message);
8411 size_t core_ml__specification__split_layer_params__pack
8412                      (const CoreML__Specification__SplitLayerParams   *message,
8413                       uint8_t             *out);
8414 size_t core_ml__specification__split_layer_params__pack_to_buffer
8415                      (const CoreML__Specification__SplitLayerParams   *message,
8416                       ProtobufCBuffer     *buffer);
8417 CoreML__Specification__SplitLayerParams *
8418        core_ml__specification__split_layer_params__unpack
8419                      (ProtobufCAllocator  *allocator,
8420                       size_t               len,
8421                       const uint8_t       *data);
8422 void   core_ml__specification__split_layer_params__free_unpacked
8423                      (CoreML__Specification__SplitLayerParams *message,
8424                       ProtobufCAllocator *allocator);
8425 /* CoreML__Specification__AddLayerParams methods */
8426 void   core_ml__specification__add_layer_params__init
8427                      (CoreML__Specification__AddLayerParams         *message);
8428 size_t core_ml__specification__add_layer_params__get_packed_size
8429                      (const CoreML__Specification__AddLayerParams   *message);
8430 size_t core_ml__specification__add_layer_params__pack
8431                      (const CoreML__Specification__AddLayerParams   *message,
8432                       uint8_t             *out);
8433 size_t core_ml__specification__add_layer_params__pack_to_buffer
8434                      (const CoreML__Specification__AddLayerParams   *message,
8435                       ProtobufCBuffer     *buffer);
8436 CoreML__Specification__AddLayerParams *
8437        core_ml__specification__add_layer_params__unpack
8438                      (ProtobufCAllocator  *allocator,
8439                       size_t               len,
8440                       const uint8_t       *data);
8441 void   core_ml__specification__add_layer_params__free_unpacked
8442                      (CoreML__Specification__AddLayerParams *message,
8443                       ProtobufCAllocator *allocator);
8444 /* CoreML__Specification__MultiplyLayerParams methods */
8445 void   core_ml__specification__multiply_layer_params__init
8446                      (CoreML__Specification__MultiplyLayerParams         *message);
8447 size_t core_ml__specification__multiply_layer_params__get_packed_size
8448                      (const CoreML__Specification__MultiplyLayerParams   *message);
8449 size_t core_ml__specification__multiply_layer_params__pack
8450                      (const CoreML__Specification__MultiplyLayerParams   *message,
8451                       uint8_t             *out);
8452 size_t core_ml__specification__multiply_layer_params__pack_to_buffer
8453                      (const CoreML__Specification__MultiplyLayerParams   *message,
8454                       ProtobufCBuffer     *buffer);
8455 CoreML__Specification__MultiplyLayerParams *
8456        core_ml__specification__multiply_layer_params__unpack
8457                      (ProtobufCAllocator  *allocator,
8458                       size_t               len,
8459                       const uint8_t       *data);
8460 void   core_ml__specification__multiply_layer_params__free_unpacked
8461                      (CoreML__Specification__MultiplyLayerParams *message,
8462                       ProtobufCAllocator *allocator);
8463 /* CoreML__Specification__UnaryFunctionLayerParams methods */
8464 void   core_ml__specification__unary_function_layer_params__init
8465                      (CoreML__Specification__UnaryFunctionLayerParams         *message);
8466 size_t core_ml__specification__unary_function_layer_params__get_packed_size
8467                      (const CoreML__Specification__UnaryFunctionLayerParams   *message);
8468 size_t core_ml__specification__unary_function_layer_params__pack
8469                      (const CoreML__Specification__UnaryFunctionLayerParams   *message,
8470                       uint8_t             *out);
8471 size_t core_ml__specification__unary_function_layer_params__pack_to_buffer
8472                      (const CoreML__Specification__UnaryFunctionLayerParams   *message,
8473                       ProtobufCBuffer     *buffer);
8474 CoreML__Specification__UnaryFunctionLayerParams *
8475        core_ml__specification__unary_function_layer_params__unpack
8476                      (ProtobufCAllocator  *allocator,
8477                       size_t               len,
8478                       const uint8_t       *data);
8479 void   core_ml__specification__unary_function_layer_params__free_unpacked
8480                      (CoreML__Specification__UnaryFunctionLayerParams *message,
8481                       ProtobufCAllocator *allocator);
8482 /* CoreML__Specification__UpsampleLayerParams methods */
8483 void   core_ml__specification__upsample_layer_params__init
8484                      (CoreML__Specification__UpsampleLayerParams         *message);
8485 size_t core_ml__specification__upsample_layer_params__get_packed_size
8486                      (const CoreML__Specification__UpsampleLayerParams   *message);
8487 size_t core_ml__specification__upsample_layer_params__pack
8488                      (const CoreML__Specification__UpsampleLayerParams   *message,
8489                       uint8_t             *out);
8490 size_t core_ml__specification__upsample_layer_params__pack_to_buffer
8491                      (const CoreML__Specification__UpsampleLayerParams   *message,
8492                       ProtobufCBuffer     *buffer);
8493 CoreML__Specification__UpsampleLayerParams *
8494        core_ml__specification__upsample_layer_params__unpack
8495                      (ProtobufCAllocator  *allocator,
8496                       size_t               len,
8497                       const uint8_t       *data);
8498 void   core_ml__specification__upsample_layer_params__free_unpacked
8499                      (CoreML__Specification__UpsampleLayerParams *message,
8500                       ProtobufCAllocator *allocator);
8501 /* CoreML__Specification__ResizeBilinearLayerParams methods */
8502 void   core_ml__specification__resize_bilinear_layer_params__init
8503                      (CoreML__Specification__ResizeBilinearLayerParams         *message);
8504 size_t core_ml__specification__resize_bilinear_layer_params__get_packed_size
8505                      (const CoreML__Specification__ResizeBilinearLayerParams   *message);
8506 size_t core_ml__specification__resize_bilinear_layer_params__pack
8507                      (const CoreML__Specification__ResizeBilinearLayerParams   *message,
8508                       uint8_t             *out);
8509 size_t core_ml__specification__resize_bilinear_layer_params__pack_to_buffer
8510                      (const CoreML__Specification__ResizeBilinearLayerParams   *message,
8511                       ProtobufCBuffer     *buffer);
8512 CoreML__Specification__ResizeBilinearLayerParams *
8513        core_ml__specification__resize_bilinear_layer_params__unpack
8514                      (ProtobufCAllocator  *allocator,
8515                       size_t               len,
8516                       const uint8_t       *data);
8517 void   core_ml__specification__resize_bilinear_layer_params__free_unpacked
8518                      (CoreML__Specification__ResizeBilinearLayerParams *message,
8519                       ProtobufCAllocator *allocator);
8520 /* CoreML__Specification__CropResizeLayerParams methods */
8521 void   core_ml__specification__crop_resize_layer_params__init
8522                      (CoreML__Specification__CropResizeLayerParams         *message);
8523 size_t core_ml__specification__crop_resize_layer_params__get_packed_size
8524                      (const CoreML__Specification__CropResizeLayerParams   *message);
8525 size_t core_ml__specification__crop_resize_layer_params__pack
8526                      (const CoreML__Specification__CropResizeLayerParams   *message,
8527                       uint8_t             *out);
8528 size_t core_ml__specification__crop_resize_layer_params__pack_to_buffer
8529                      (const CoreML__Specification__CropResizeLayerParams   *message,
8530                       ProtobufCBuffer     *buffer);
8531 CoreML__Specification__CropResizeLayerParams *
8532        core_ml__specification__crop_resize_layer_params__unpack
8533                      (ProtobufCAllocator  *allocator,
8534                       size_t               len,
8535                       const uint8_t       *data);
8536 void   core_ml__specification__crop_resize_layer_params__free_unpacked
8537                      (CoreML__Specification__CropResizeLayerParams *message,
8538                       ProtobufCAllocator *allocator);
8539 /* CoreML__Specification__BiasLayerParams methods */
8540 void   core_ml__specification__bias_layer_params__init
8541                      (CoreML__Specification__BiasLayerParams         *message);
8542 size_t core_ml__specification__bias_layer_params__get_packed_size
8543                      (const CoreML__Specification__BiasLayerParams   *message);
8544 size_t core_ml__specification__bias_layer_params__pack
8545                      (const CoreML__Specification__BiasLayerParams   *message,
8546                       uint8_t             *out);
8547 size_t core_ml__specification__bias_layer_params__pack_to_buffer
8548                      (const CoreML__Specification__BiasLayerParams   *message,
8549                       ProtobufCBuffer     *buffer);
8550 CoreML__Specification__BiasLayerParams *
8551        core_ml__specification__bias_layer_params__unpack
8552                      (ProtobufCAllocator  *allocator,
8553                       size_t               len,
8554                       const uint8_t       *data);
8555 void   core_ml__specification__bias_layer_params__free_unpacked
8556                      (CoreML__Specification__BiasLayerParams *message,
8557                       ProtobufCAllocator *allocator);
8558 /* CoreML__Specification__ScaleLayerParams methods */
8559 void   core_ml__specification__scale_layer_params__init
8560                      (CoreML__Specification__ScaleLayerParams         *message);
8561 size_t core_ml__specification__scale_layer_params__get_packed_size
8562                      (const CoreML__Specification__ScaleLayerParams   *message);
8563 size_t core_ml__specification__scale_layer_params__pack
8564                      (const CoreML__Specification__ScaleLayerParams   *message,
8565                       uint8_t             *out);
8566 size_t core_ml__specification__scale_layer_params__pack_to_buffer
8567                      (const CoreML__Specification__ScaleLayerParams   *message,
8568                       ProtobufCBuffer     *buffer);
8569 CoreML__Specification__ScaleLayerParams *
8570        core_ml__specification__scale_layer_params__unpack
8571                      (ProtobufCAllocator  *allocator,
8572                       size_t               len,
8573                       const uint8_t       *data);
8574 void   core_ml__specification__scale_layer_params__free_unpacked
8575                      (CoreML__Specification__ScaleLayerParams *message,
8576                       ProtobufCAllocator *allocator);
8577 /* CoreML__Specification__LoadConstantLayerParams methods */
8578 void   core_ml__specification__load_constant_layer_params__init
8579                      (CoreML__Specification__LoadConstantLayerParams         *message);
8580 size_t core_ml__specification__load_constant_layer_params__get_packed_size
8581                      (const CoreML__Specification__LoadConstantLayerParams   *message);
8582 size_t core_ml__specification__load_constant_layer_params__pack
8583                      (const CoreML__Specification__LoadConstantLayerParams   *message,
8584                       uint8_t             *out);
8585 size_t core_ml__specification__load_constant_layer_params__pack_to_buffer
8586                      (const CoreML__Specification__LoadConstantLayerParams   *message,
8587                       ProtobufCBuffer     *buffer);
8588 CoreML__Specification__LoadConstantLayerParams *
8589        core_ml__specification__load_constant_layer_params__unpack
8590                      (ProtobufCAllocator  *allocator,
8591                       size_t               len,
8592                       const uint8_t       *data);
8593 void   core_ml__specification__load_constant_layer_params__free_unpacked
8594                      (CoreML__Specification__LoadConstantLayerParams *message,
8595                       ProtobufCAllocator *allocator);
8596 /* CoreML__Specification__L2NormalizeLayerParams methods */
8597 void   core_ml__specification__l2_normalize_layer_params__init
8598                      (CoreML__Specification__L2NormalizeLayerParams         *message);
8599 size_t core_ml__specification__l2_normalize_layer_params__get_packed_size
8600                      (const CoreML__Specification__L2NormalizeLayerParams   *message);
8601 size_t core_ml__specification__l2_normalize_layer_params__pack
8602                      (const CoreML__Specification__L2NormalizeLayerParams   *message,
8603                       uint8_t             *out);
8604 size_t core_ml__specification__l2_normalize_layer_params__pack_to_buffer
8605                      (const CoreML__Specification__L2NormalizeLayerParams   *message,
8606                       ProtobufCBuffer     *buffer);
8607 CoreML__Specification__L2NormalizeLayerParams *
8608        core_ml__specification__l2_normalize_layer_params__unpack
8609                      (ProtobufCAllocator  *allocator,
8610                       size_t               len,
8611                       const uint8_t       *data);
8612 void   core_ml__specification__l2_normalize_layer_params__free_unpacked
8613                      (CoreML__Specification__L2NormalizeLayerParams *message,
8614                       ProtobufCAllocator *allocator);
8615 /* CoreML__Specification__FlattenLayerParams methods */
8616 void   core_ml__specification__flatten_layer_params__init
8617                      (CoreML__Specification__FlattenLayerParams         *message);
8618 size_t core_ml__specification__flatten_layer_params__get_packed_size
8619                      (const CoreML__Specification__FlattenLayerParams   *message);
8620 size_t core_ml__specification__flatten_layer_params__pack
8621                      (const CoreML__Specification__FlattenLayerParams   *message,
8622                       uint8_t             *out);
8623 size_t core_ml__specification__flatten_layer_params__pack_to_buffer
8624                      (const CoreML__Specification__FlattenLayerParams   *message,
8625                       ProtobufCBuffer     *buffer);
8626 CoreML__Specification__FlattenLayerParams *
8627        core_ml__specification__flatten_layer_params__unpack
8628                      (ProtobufCAllocator  *allocator,
8629                       size_t               len,
8630                       const uint8_t       *data);
8631 void   core_ml__specification__flatten_layer_params__free_unpacked
8632                      (CoreML__Specification__FlattenLayerParams *message,
8633                       ProtobufCAllocator *allocator);
8634 /* CoreML__Specification__ReshapeLayerParams methods */
8635 void   core_ml__specification__reshape_layer_params__init
8636                      (CoreML__Specification__ReshapeLayerParams         *message);
8637 size_t core_ml__specification__reshape_layer_params__get_packed_size
8638                      (const CoreML__Specification__ReshapeLayerParams   *message);
8639 size_t core_ml__specification__reshape_layer_params__pack
8640                      (const CoreML__Specification__ReshapeLayerParams   *message,
8641                       uint8_t             *out);
8642 size_t core_ml__specification__reshape_layer_params__pack_to_buffer
8643                      (const CoreML__Specification__ReshapeLayerParams   *message,
8644                       ProtobufCBuffer     *buffer);
8645 CoreML__Specification__ReshapeLayerParams *
8646        core_ml__specification__reshape_layer_params__unpack
8647                      (ProtobufCAllocator  *allocator,
8648                       size_t               len,
8649                       const uint8_t       *data);
8650 void   core_ml__specification__reshape_layer_params__free_unpacked
8651                      (CoreML__Specification__ReshapeLayerParams *message,
8652                       ProtobufCAllocator *allocator);
8653 /* CoreML__Specification__PermuteLayerParams methods */
8654 void   core_ml__specification__permute_layer_params__init
8655                      (CoreML__Specification__PermuteLayerParams         *message);
8656 size_t core_ml__specification__permute_layer_params__get_packed_size
8657                      (const CoreML__Specification__PermuteLayerParams   *message);
8658 size_t core_ml__specification__permute_layer_params__pack
8659                      (const CoreML__Specification__PermuteLayerParams   *message,
8660                       uint8_t             *out);
8661 size_t core_ml__specification__permute_layer_params__pack_to_buffer
8662                      (const CoreML__Specification__PermuteLayerParams   *message,
8663                       ProtobufCBuffer     *buffer);
8664 CoreML__Specification__PermuteLayerParams *
8665        core_ml__specification__permute_layer_params__unpack
8666                      (ProtobufCAllocator  *allocator,
8667                       size_t               len,
8668                       const uint8_t       *data);
8669 void   core_ml__specification__permute_layer_params__free_unpacked
8670                      (CoreML__Specification__PermuteLayerParams *message,
8671                       ProtobufCAllocator *allocator);
8672 /* CoreML__Specification__ReorganizeDataLayerParams methods */
8673 void   core_ml__specification__reorganize_data_layer_params__init
8674                      (CoreML__Specification__ReorganizeDataLayerParams         *message);
8675 size_t core_ml__specification__reorganize_data_layer_params__get_packed_size
8676                      (const CoreML__Specification__ReorganizeDataLayerParams   *message);
8677 size_t core_ml__specification__reorganize_data_layer_params__pack
8678                      (const CoreML__Specification__ReorganizeDataLayerParams   *message,
8679                       uint8_t             *out);
8680 size_t core_ml__specification__reorganize_data_layer_params__pack_to_buffer
8681                      (const CoreML__Specification__ReorganizeDataLayerParams   *message,
8682                       ProtobufCBuffer     *buffer);
8683 CoreML__Specification__ReorganizeDataLayerParams *
8684        core_ml__specification__reorganize_data_layer_params__unpack
8685                      (ProtobufCAllocator  *allocator,
8686                       size_t               len,
8687                       const uint8_t       *data);
8688 void   core_ml__specification__reorganize_data_layer_params__free_unpacked
8689                      (CoreML__Specification__ReorganizeDataLayerParams *message,
8690                       ProtobufCAllocator *allocator);
8691 /* CoreML__Specification__SliceLayerParams methods */
8692 void   core_ml__specification__slice_layer_params__init
8693                      (CoreML__Specification__SliceLayerParams         *message);
8694 size_t core_ml__specification__slice_layer_params__get_packed_size
8695                      (const CoreML__Specification__SliceLayerParams   *message);
8696 size_t core_ml__specification__slice_layer_params__pack
8697                      (const CoreML__Specification__SliceLayerParams   *message,
8698                       uint8_t             *out);
8699 size_t core_ml__specification__slice_layer_params__pack_to_buffer
8700                      (const CoreML__Specification__SliceLayerParams   *message,
8701                       ProtobufCBuffer     *buffer);
8702 CoreML__Specification__SliceLayerParams *
8703        core_ml__specification__slice_layer_params__unpack
8704                      (ProtobufCAllocator  *allocator,
8705                       size_t               len,
8706                       const uint8_t       *data);
8707 void   core_ml__specification__slice_layer_params__free_unpacked
8708                      (CoreML__Specification__SliceLayerParams *message,
8709                       ProtobufCAllocator *allocator);
8710 /* CoreML__Specification__ReduceLayerParams methods */
8711 void   core_ml__specification__reduce_layer_params__init
8712                      (CoreML__Specification__ReduceLayerParams         *message);
8713 size_t core_ml__specification__reduce_layer_params__get_packed_size
8714                      (const CoreML__Specification__ReduceLayerParams   *message);
8715 size_t core_ml__specification__reduce_layer_params__pack
8716                      (const CoreML__Specification__ReduceLayerParams   *message,
8717                       uint8_t             *out);
8718 size_t core_ml__specification__reduce_layer_params__pack_to_buffer
8719                      (const CoreML__Specification__ReduceLayerParams   *message,
8720                       ProtobufCBuffer     *buffer);
8721 CoreML__Specification__ReduceLayerParams *
8722        core_ml__specification__reduce_layer_params__unpack
8723                      (ProtobufCAllocator  *allocator,
8724                       size_t               len,
8725                       const uint8_t       *data);
8726 void   core_ml__specification__reduce_layer_params__free_unpacked
8727                      (CoreML__Specification__ReduceLayerParams *message,
8728                       ProtobufCAllocator *allocator);
8729 /* CoreML__Specification__CropLayerParams methods */
8730 void   core_ml__specification__crop_layer_params__init
8731                      (CoreML__Specification__CropLayerParams         *message);
8732 size_t core_ml__specification__crop_layer_params__get_packed_size
8733                      (const CoreML__Specification__CropLayerParams   *message);
8734 size_t core_ml__specification__crop_layer_params__pack
8735                      (const CoreML__Specification__CropLayerParams   *message,
8736                       uint8_t             *out);
8737 size_t core_ml__specification__crop_layer_params__pack_to_buffer
8738                      (const CoreML__Specification__CropLayerParams   *message,
8739                       ProtobufCBuffer     *buffer);
8740 CoreML__Specification__CropLayerParams *
8741        core_ml__specification__crop_layer_params__unpack
8742                      (ProtobufCAllocator  *allocator,
8743                       size_t               len,
8744                       const uint8_t       *data);
8745 void   core_ml__specification__crop_layer_params__free_unpacked
8746                      (CoreML__Specification__CropLayerParams *message,
8747                       ProtobufCAllocator *allocator);
8748 /* CoreML__Specification__AverageLayerParams methods */
8749 void   core_ml__specification__average_layer_params__init
8750                      (CoreML__Specification__AverageLayerParams         *message);
8751 size_t core_ml__specification__average_layer_params__get_packed_size
8752                      (const CoreML__Specification__AverageLayerParams   *message);
8753 size_t core_ml__specification__average_layer_params__pack
8754                      (const CoreML__Specification__AverageLayerParams   *message,
8755                       uint8_t             *out);
8756 size_t core_ml__specification__average_layer_params__pack_to_buffer
8757                      (const CoreML__Specification__AverageLayerParams   *message,
8758                       ProtobufCBuffer     *buffer);
8759 CoreML__Specification__AverageLayerParams *
8760        core_ml__specification__average_layer_params__unpack
8761                      (ProtobufCAllocator  *allocator,
8762                       size_t               len,
8763                       const uint8_t       *data);
8764 void   core_ml__specification__average_layer_params__free_unpacked
8765                      (CoreML__Specification__AverageLayerParams *message,
8766                       ProtobufCAllocator *allocator);
8767 /* CoreML__Specification__MaxLayerParams methods */
8768 void   core_ml__specification__max_layer_params__init
8769                      (CoreML__Specification__MaxLayerParams         *message);
8770 size_t core_ml__specification__max_layer_params__get_packed_size
8771                      (const CoreML__Specification__MaxLayerParams   *message);
8772 size_t core_ml__specification__max_layer_params__pack
8773                      (const CoreML__Specification__MaxLayerParams   *message,
8774                       uint8_t             *out);
8775 size_t core_ml__specification__max_layer_params__pack_to_buffer
8776                      (const CoreML__Specification__MaxLayerParams   *message,
8777                       ProtobufCBuffer     *buffer);
8778 CoreML__Specification__MaxLayerParams *
8779        core_ml__specification__max_layer_params__unpack
8780                      (ProtobufCAllocator  *allocator,
8781                       size_t               len,
8782                       const uint8_t       *data);
8783 void   core_ml__specification__max_layer_params__free_unpacked
8784                      (CoreML__Specification__MaxLayerParams *message,
8785                       ProtobufCAllocator *allocator);
8786 /* CoreML__Specification__MinLayerParams methods */
8787 void   core_ml__specification__min_layer_params__init
8788                      (CoreML__Specification__MinLayerParams         *message);
8789 size_t core_ml__specification__min_layer_params__get_packed_size
8790                      (const CoreML__Specification__MinLayerParams   *message);
8791 size_t core_ml__specification__min_layer_params__pack
8792                      (const CoreML__Specification__MinLayerParams   *message,
8793                       uint8_t             *out);
8794 size_t core_ml__specification__min_layer_params__pack_to_buffer
8795                      (const CoreML__Specification__MinLayerParams   *message,
8796                       ProtobufCBuffer     *buffer);
8797 CoreML__Specification__MinLayerParams *
8798        core_ml__specification__min_layer_params__unpack
8799                      (ProtobufCAllocator  *allocator,
8800                       size_t               len,
8801                       const uint8_t       *data);
8802 void   core_ml__specification__min_layer_params__free_unpacked
8803                      (CoreML__Specification__MinLayerParams *message,
8804                       ProtobufCAllocator *allocator);
8805 /* CoreML__Specification__DotProductLayerParams methods */
8806 void   core_ml__specification__dot_product_layer_params__init
8807                      (CoreML__Specification__DotProductLayerParams         *message);
8808 size_t core_ml__specification__dot_product_layer_params__get_packed_size
8809                      (const CoreML__Specification__DotProductLayerParams   *message);
8810 size_t core_ml__specification__dot_product_layer_params__pack
8811                      (const CoreML__Specification__DotProductLayerParams   *message,
8812                       uint8_t             *out);
8813 size_t core_ml__specification__dot_product_layer_params__pack_to_buffer
8814                      (const CoreML__Specification__DotProductLayerParams   *message,
8815                       ProtobufCBuffer     *buffer);
8816 CoreML__Specification__DotProductLayerParams *
8817        core_ml__specification__dot_product_layer_params__unpack
8818                      (ProtobufCAllocator  *allocator,
8819                       size_t               len,
8820                       const uint8_t       *data);
8821 void   core_ml__specification__dot_product_layer_params__free_unpacked
8822                      (CoreML__Specification__DotProductLayerParams *message,
8823                       ProtobufCAllocator *allocator);
8824 /* CoreML__Specification__MeanVarianceNormalizeLayerParams methods */
8825 void   core_ml__specification__mean_variance_normalize_layer_params__init
8826                      (CoreML__Specification__MeanVarianceNormalizeLayerParams         *message);
8827 size_t core_ml__specification__mean_variance_normalize_layer_params__get_packed_size
8828                      (const CoreML__Specification__MeanVarianceNormalizeLayerParams   *message);
8829 size_t core_ml__specification__mean_variance_normalize_layer_params__pack
8830                      (const CoreML__Specification__MeanVarianceNormalizeLayerParams   *message,
8831                       uint8_t             *out);
8832 size_t core_ml__specification__mean_variance_normalize_layer_params__pack_to_buffer
8833                      (const CoreML__Specification__MeanVarianceNormalizeLayerParams   *message,
8834                       ProtobufCBuffer     *buffer);
8835 CoreML__Specification__MeanVarianceNormalizeLayerParams *
8836        core_ml__specification__mean_variance_normalize_layer_params__unpack
8837                      (ProtobufCAllocator  *allocator,
8838                       size_t               len,
8839                       const uint8_t       *data);
8840 void   core_ml__specification__mean_variance_normalize_layer_params__free_unpacked
8841                      (CoreML__Specification__MeanVarianceNormalizeLayerParams *message,
8842                       ProtobufCAllocator *allocator);
8843 /* CoreML__Specification__SequenceRepeatLayerParams methods */
8844 void   core_ml__specification__sequence_repeat_layer_params__init
8845                      (CoreML__Specification__SequenceRepeatLayerParams         *message);
8846 size_t core_ml__specification__sequence_repeat_layer_params__get_packed_size
8847                      (const CoreML__Specification__SequenceRepeatLayerParams   *message);
8848 size_t core_ml__specification__sequence_repeat_layer_params__pack
8849                      (const CoreML__Specification__SequenceRepeatLayerParams   *message,
8850                       uint8_t             *out);
8851 size_t core_ml__specification__sequence_repeat_layer_params__pack_to_buffer
8852                      (const CoreML__Specification__SequenceRepeatLayerParams   *message,
8853                       ProtobufCBuffer     *buffer);
8854 CoreML__Specification__SequenceRepeatLayerParams *
8855        core_ml__specification__sequence_repeat_layer_params__unpack
8856                      (ProtobufCAllocator  *allocator,
8857                       size_t               len,
8858                       const uint8_t       *data);
8859 void   core_ml__specification__sequence_repeat_layer_params__free_unpacked
8860                      (CoreML__Specification__SequenceRepeatLayerParams *message,
8861                       ProtobufCAllocator *allocator);
8862 /* CoreML__Specification__SimpleRecurrentLayerParams methods */
8863 void   core_ml__specification__simple_recurrent_layer_params__init
8864                      (CoreML__Specification__SimpleRecurrentLayerParams         *message);
8865 size_t core_ml__specification__simple_recurrent_layer_params__get_packed_size
8866                      (const CoreML__Specification__SimpleRecurrentLayerParams   *message);
8867 size_t core_ml__specification__simple_recurrent_layer_params__pack
8868                      (const CoreML__Specification__SimpleRecurrentLayerParams   *message,
8869                       uint8_t             *out);
8870 size_t core_ml__specification__simple_recurrent_layer_params__pack_to_buffer
8871                      (const CoreML__Specification__SimpleRecurrentLayerParams   *message,
8872                       ProtobufCBuffer     *buffer);
8873 CoreML__Specification__SimpleRecurrentLayerParams *
8874        core_ml__specification__simple_recurrent_layer_params__unpack
8875                      (ProtobufCAllocator  *allocator,
8876                       size_t               len,
8877                       const uint8_t       *data);
8878 void   core_ml__specification__simple_recurrent_layer_params__free_unpacked
8879                      (CoreML__Specification__SimpleRecurrentLayerParams *message,
8880                       ProtobufCAllocator *allocator);
8881 /* CoreML__Specification__GRULayerParams methods */
8882 void   core_ml__specification__grulayer_params__init
8883                      (CoreML__Specification__GRULayerParams         *message);
8884 size_t core_ml__specification__grulayer_params__get_packed_size
8885                      (const CoreML__Specification__GRULayerParams   *message);
8886 size_t core_ml__specification__grulayer_params__pack
8887                      (const CoreML__Specification__GRULayerParams   *message,
8888                       uint8_t             *out);
8889 size_t core_ml__specification__grulayer_params__pack_to_buffer
8890                      (const CoreML__Specification__GRULayerParams   *message,
8891                       ProtobufCBuffer     *buffer);
8892 CoreML__Specification__GRULayerParams *
8893        core_ml__specification__grulayer_params__unpack
8894                      (ProtobufCAllocator  *allocator,
8895                       size_t               len,
8896                       const uint8_t       *data);
8897 void   core_ml__specification__grulayer_params__free_unpacked
8898                      (CoreML__Specification__GRULayerParams *message,
8899                       ProtobufCAllocator *allocator);
8900 /* CoreML__Specification__LSTMParams methods */
8901 void   core_ml__specification__lstmparams__init
8902                      (CoreML__Specification__LSTMParams         *message);
8903 size_t core_ml__specification__lstmparams__get_packed_size
8904                      (const CoreML__Specification__LSTMParams   *message);
8905 size_t core_ml__specification__lstmparams__pack
8906                      (const CoreML__Specification__LSTMParams   *message,
8907                       uint8_t             *out);
8908 size_t core_ml__specification__lstmparams__pack_to_buffer
8909                      (const CoreML__Specification__LSTMParams   *message,
8910                       ProtobufCBuffer     *buffer);
8911 CoreML__Specification__LSTMParams *
8912        core_ml__specification__lstmparams__unpack
8913                      (ProtobufCAllocator  *allocator,
8914                       size_t               len,
8915                       const uint8_t       *data);
8916 void   core_ml__specification__lstmparams__free_unpacked
8917                      (CoreML__Specification__LSTMParams *message,
8918                       ProtobufCAllocator *allocator);
8919 /* CoreML__Specification__LSTMWeightParams methods */
8920 void   core_ml__specification__lstmweight_params__init
8921                      (CoreML__Specification__LSTMWeightParams         *message);
8922 size_t core_ml__specification__lstmweight_params__get_packed_size
8923                      (const CoreML__Specification__LSTMWeightParams   *message);
8924 size_t core_ml__specification__lstmweight_params__pack
8925                      (const CoreML__Specification__LSTMWeightParams   *message,
8926                       uint8_t             *out);
8927 size_t core_ml__specification__lstmweight_params__pack_to_buffer
8928                      (const CoreML__Specification__LSTMWeightParams   *message,
8929                       ProtobufCBuffer     *buffer);
8930 CoreML__Specification__LSTMWeightParams *
8931        core_ml__specification__lstmweight_params__unpack
8932                      (ProtobufCAllocator  *allocator,
8933                       size_t               len,
8934                       const uint8_t       *data);
8935 void   core_ml__specification__lstmweight_params__free_unpacked
8936                      (CoreML__Specification__LSTMWeightParams *message,
8937                       ProtobufCAllocator *allocator);
8938 /* CoreML__Specification__UniDirectionalLSTMLayerParams methods */
8939 void   core_ml__specification__uni_directional_lstmlayer_params__init
8940                      (CoreML__Specification__UniDirectionalLSTMLayerParams         *message);
8941 size_t core_ml__specification__uni_directional_lstmlayer_params__get_packed_size
8942                      (const CoreML__Specification__UniDirectionalLSTMLayerParams   *message);
8943 size_t core_ml__specification__uni_directional_lstmlayer_params__pack
8944                      (const CoreML__Specification__UniDirectionalLSTMLayerParams   *message,
8945                       uint8_t             *out);
8946 size_t core_ml__specification__uni_directional_lstmlayer_params__pack_to_buffer
8947                      (const CoreML__Specification__UniDirectionalLSTMLayerParams   *message,
8948                       ProtobufCBuffer     *buffer);
8949 CoreML__Specification__UniDirectionalLSTMLayerParams *
8950        core_ml__specification__uni_directional_lstmlayer_params__unpack
8951                      (ProtobufCAllocator  *allocator,
8952                       size_t               len,
8953                       const uint8_t       *data);
8954 void   core_ml__specification__uni_directional_lstmlayer_params__free_unpacked
8955                      (CoreML__Specification__UniDirectionalLSTMLayerParams *message,
8956                       ProtobufCAllocator *allocator);
8957 /* CoreML__Specification__BiDirectionalLSTMLayerParams methods */
8958 void   core_ml__specification__bi_directional_lstmlayer_params__init
8959                      (CoreML__Specification__BiDirectionalLSTMLayerParams         *message);
8960 size_t core_ml__specification__bi_directional_lstmlayer_params__get_packed_size
8961                      (const CoreML__Specification__BiDirectionalLSTMLayerParams   *message);
8962 size_t core_ml__specification__bi_directional_lstmlayer_params__pack
8963                      (const CoreML__Specification__BiDirectionalLSTMLayerParams   *message,
8964                       uint8_t             *out);
8965 size_t core_ml__specification__bi_directional_lstmlayer_params__pack_to_buffer
8966                      (const CoreML__Specification__BiDirectionalLSTMLayerParams   *message,
8967                       ProtobufCBuffer     *buffer);
8968 CoreML__Specification__BiDirectionalLSTMLayerParams *
8969        core_ml__specification__bi_directional_lstmlayer_params__unpack
8970                      (ProtobufCAllocator  *allocator,
8971                       size_t               len,
8972                       const uint8_t       *data);
8973 void   core_ml__specification__bi_directional_lstmlayer_params__free_unpacked
8974                      (CoreML__Specification__BiDirectionalLSTMLayerParams *message,
8975                       ProtobufCAllocator *allocator);
8976 /* CoreML__Specification__CustomLayerParams__CustomLayerParamValue methods */
8977 void   core_ml__specification__custom_layer_params__custom_layer_param_value__init
8978                      (CoreML__Specification__CustomLayerParams__CustomLayerParamValue         *message);
8979 /* CoreML__Specification__CustomLayerParams__ParametersEntry methods */
8980 void   core_ml__specification__custom_layer_params__parameters_entry__init
8981                      (CoreML__Specification__CustomLayerParams__ParametersEntry         *message);
8982 /* CoreML__Specification__CustomLayerParams methods */
8983 void   core_ml__specification__custom_layer_params__init
8984                      (CoreML__Specification__CustomLayerParams         *message);
8985 size_t core_ml__specification__custom_layer_params__get_packed_size
8986                      (const CoreML__Specification__CustomLayerParams   *message);
8987 size_t core_ml__specification__custom_layer_params__pack
8988                      (const CoreML__Specification__CustomLayerParams   *message,
8989                       uint8_t             *out);
8990 size_t core_ml__specification__custom_layer_params__pack_to_buffer
8991                      (const CoreML__Specification__CustomLayerParams   *message,
8992                       ProtobufCBuffer     *buffer);
8993 CoreML__Specification__CustomLayerParams *
8994        core_ml__specification__custom_layer_params__unpack
8995                      (ProtobufCAllocator  *allocator,
8996                       size_t               len,
8997                       const uint8_t       *data);
8998 void   core_ml__specification__custom_layer_params__free_unpacked
8999                      (CoreML__Specification__CustomLayerParams *message,
9000                       ProtobufCAllocator *allocator);
9001 /* CoreML__Specification__TransposeLayerParams methods */
9002 void   core_ml__specification__transpose_layer_params__init
9003                      (CoreML__Specification__TransposeLayerParams         *message);
9004 size_t core_ml__specification__transpose_layer_params__get_packed_size
9005                      (const CoreML__Specification__TransposeLayerParams   *message);
9006 size_t core_ml__specification__transpose_layer_params__pack
9007                      (const CoreML__Specification__TransposeLayerParams   *message,
9008                       uint8_t             *out);
9009 size_t core_ml__specification__transpose_layer_params__pack_to_buffer
9010                      (const CoreML__Specification__TransposeLayerParams   *message,
9011                       ProtobufCBuffer     *buffer);
9012 CoreML__Specification__TransposeLayerParams *
9013        core_ml__specification__transpose_layer_params__unpack
9014                      (ProtobufCAllocator  *allocator,
9015                       size_t               len,
9016                       const uint8_t       *data);
9017 void   core_ml__specification__transpose_layer_params__free_unpacked
9018                      (CoreML__Specification__TransposeLayerParams *message,
9019                       ProtobufCAllocator *allocator);
9020 /* CoreML__Specification__BatchedMatMulLayerParams methods */
9021 void   core_ml__specification__batched_mat_mul_layer_params__init
9022                      (CoreML__Specification__BatchedMatMulLayerParams         *message);
9023 size_t core_ml__specification__batched_mat_mul_layer_params__get_packed_size
9024                      (const CoreML__Specification__BatchedMatMulLayerParams   *message);
9025 size_t core_ml__specification__batched_mat_mul_layer_params__pack
9026                      (const CoreML__Specification__BatchedMatMulLayerParams   *message,
9027                       uint8_t             *out);
9028 size_t core_ml__specification__batched_mat_mul_layer_params__pack_to_buffer
9029                      (const CoreML__Specification__BatchedMatMulLayerParams   *message,
9030                       ProtobufCBuffer     *buffer);
9031 CoreML__Specification__BatchedMatMulLayerParams *
9032        core_ml__specification__batched_mat_mul_layer_params__unpack
9033                      (ProtobufCAllocator  *allocator,
9034                       size_t               len,
9035                       const uint8_t       *data);
9036 void   core_ml__specification__batched_mat_mul_layer_params__free_unpacked
9037                      (CoreML__Specification__BatchedMatMulLayerParams *message,
9038                       ProtobufCAllocator *allocator);
9039 /* CoreML__Specification__ConcatNDLayerParams methods */
9040 void   core_ml__specification__concat_ndlayer_params__init
9041                      (CoreML__Specification__ConcatNDLayerParams         *message);
9042 size_t core_ml__specification__concat_ndlayer_params__get_packed_size
9043                      (const CoreML__Specification__ConcatNDLayerParams   *message);
9044 size_t core_ml__specification__concat_ndlayer_params__pack
9045                      (const CoreML__Specification__ConcatNDLayerParams   *message,
9046                       uint8_t             *out);
9047 size_t core_ml__specification__concat_ndlayer_params__pack_to_buffer
9048                      (const CoreML__Specification__ConcatNDLayerParams   *message,
9049                       ProtobufCBuffer     *buffer);
9050 CoreML__Specification__ConcatNDLayerParams *
9051        core_ml__specification__concat_ndlayer_params__unpack
9052                      (ProtobufCAllocator  *allocator,
9053                       size_t               len,
9054                       const uint8_t       *data);
9055 void   core_ml__specification__concat_ndlayer_params__free_unpacked
9056                      (CoreML__Specification__ConcatNDLayerParams *message,
9057                       ProtobufCAllocator *allocator);
9058 /* CoreML__Specification__SoftmaxNDLayerParams methods */
9059 void   core_ml__specification__softmax_ndlayer_params__init
9060                      (CoreML__Specification__SoftmaxNDLayerParams         *message);
9061 size_t core_ml__specification__softmax_ndlayer_params__get_packed_size
9062                      (const CoreML__Specification__SoftmaxNDLayerParams   *message);
9063 size_t core_ml__specification__softmax_ndlayer_params__pack
9064                      (const CoreML__Specification__SoftmaxNDLayerParams   *message,
9065                       uint8_t             *out);
9066 size_t core_ml__specification__softmax_ndlayer_params__pack_to_buffer
9067                      (const CoreML__Specification__SoftmaxNDLayerParams   *message,
9068                       ProtobufCBuffer     *buffer);
9069 CoreML__Specification__SoftmaxNDLayerParams *
9070        core_ml__specification__softmax_ndlayer_params__unpack
9071                      (ProtobufCAllocator  *allocator,
9072                       size_t               len,
9073                       const uint8_t       *data);
9074 void   core_ml__specification__softmax_ndlayer_params__free_unpacked
9075                      (CoreML__Specification__SoftmaxNDLayerParams *message,
9076                       ProtobufCAllocator *allocator);
9077 /* CoreML__Specification__ReverseLayerParams methods */
9078 void   core_ml__specification__reverse_layer_params__init
9079                      (CoreML__Specification__ReverseLayerParams         *message);
9080 size_t core_ml__specification__reverse_layer_params__get_packed_size
9081                      (const CoreML__Specification__ReverseLayerParams   *message);
9082 size_t core_ml__specification__reverse_layer_params__pack
9083                      (const CoreML__Specification__ReverseLayerParams   *message,
9084                       uint8_t             *out);
9085 size_t core_ml__specification__reverse_layer_params__pack_to_buffer
9086                      (const CoreML__Specification__ReverseLayerParams   *message,
9087                       ProtobufCBuffer     *buffer);
9088 CoreML__Specification__ReverseLayerParams *
9089        core_ml__specification__reverse_layer_params__unpack
9090                      (ProtobufCAllocator  *allocator,
9091                       size_t               len,
9092                       const uint8_t       *data);
9093 void   core_ml__specification__reverse_layer_params__free_unpacked
9094                      (CoreML__Specification__ReverseLayerParams *message,
9095                       ProtobufCAllocator *allocator);
9096 /* CoreML__Specification__ReverseSeqLayerParams methods */
9097 void   core_ml__specification__reverse_seq_layer_params__init
9098                      (CoreML__Specification__ReverseSeqLayerParams         *message);
9099 size_t core_ml__specification__reverse_seq_layer_params__get_packed_size
9100                      (const CoreML__Specification__ReverseSeqLayerParams   *message);
9101 size_t core_ml__specification__reverse_seq_layer_params__pack
9102                      (const CoreML__Specification__ReverseSeqLayerParams   *message,
9103                       uint8_t             *out);
9104 size_t core_ml__specification__reverse_seq_layer_params__pack_to_buffer
9105                      (const CoreML__Specification__ReverseSeqLayerParams   *message,
9106                       ProtobufCBuffer     *buffer);
9107 CoreML__Specification__ReverseSeqLayerParams *
9108        core_ml__specification__reverse_seq_layer_params__unpack
9109                      (ProtobufCAllocator  *allocator,
9110                       size_t               len,
9111                       const uint8_t       *data);
9112 void   core_ml__specification__reverse_seq_layer_params__free_unpacked
9113                      (CoreML__Specification__ReverseSeqLayerParams *message,
9114                       ProtobufCAllocator *allocator);
9115 /* CoreML__Specification__LoadConstantNDLayerParams methods */
9116 void   core_ml__specification__load_constant_ndlayer_params__init
9117                      (CoreML__Specification__LoadConstantNDLayerParams         *message);
9118 size_t core_ml__specification__load_constant_ndlayer_params__get_packed_size
9119                      (const CoreML__Specification__LoadConstantNDLayerParams   *message);
9120 size_t core_ml__specification__load_constant_ndlayer_params__pack
9121                      (const CoreML__Specification__LoadConstantNDLayerParams   *message,
9122                       uint8_t             *out);
9123 size_t core_ml__specification__load_constant_ndlayer_params__pack_to_buffer
9124                      (const CoreML__Specification__LoadConstantNDLayerParams   *message,
9125                       ProtobufCBuffer     *buffer);
9126 CoreML__Specification__LoadConstantNDLayerParams *
9127        core_ml__specification__load_constant_ndlayer_params__unpack
9128                      (ProtobufCAllocator  *allocator,
9129                       size_t               len,
9130                       const uint8_t       *data);
9131 void   core_ml__specification__load_constant_ndlayer_params__free_unpacked
9132                      (CoreML__Specification__LoadConstantNDLayerParams *message,
9133                       ProtobufCAllocator *allocator);
9134 /* CoreML__Specification__FillLikeLayerParams methods */
9135 void   core_ml__specification__fill_like_layer_params__init
9136                      (CoreML__Specification__FillLikeLayerParams         *message);
9137 size_t core_ml__specification__fill_like_layer_params__get_packed_size
9138                      (const CoreML__Specification__FillLikeLayerParams   *message);
9139 size_t core_ml__specification__fill_like_layer_params__pack
9140                      (const CoreML__Specification__FillLikeLayerParams   *message,
9141                       uint8_t             *out);
9142 size_t core_ml__specification__fill_like_layer_params__pack_to_buffer
9143                      (const CoreML__Specification__FillLikeLayerParams   *message,
9144                       ProtobufCBuffer     *buffer);
9145 CoreML__Specification__FillLikeLayerParams *
9146        core_ml__specification__fill_like_layer_params__unpack
9147                      (ProtobufCAllocator  *allocator,
9148                       size_t               len,
9149                       const uint8_t       *data);
9150 void   core_ml__specification__fill_like_layer_params__free_unpacked
9151                      (CoreML__Specification__FillLikeLayerParams *message,
9152                       ProtobufCAllocator *allocator);
9153 /* CoreML__Specification__FillStaticLayerParams methods */
9154 void   core_ml__specification__fill_static_layer_params__init
9155                      (CoreML__Specification__FillStaticLayerParams         *message);
9156 size_t core_ml__specification__fill_static_layer_params__get_packed_size
9157                      (const CoreML__Specification__FillStaticLayerParams   *message);
9158 size_t core_ml__specification__fill_static_layer_params__pack
9159                      (const CoreML__Specification__FillStaticLayerParams   *message,
9160                       uint8_t             *out);
9161 size_t core_ml__specification__fill_static_layer_params__pack_to_buffer
9162                      (const CoreML__Specification__FillStaticLayerParams   *message,
9163                       ProtobufCBuffer     *buffer);
9164 CoreML__Specification__FillStaticLayerParams *
9165        core_ml__specification__fill_static_layer_params__unpack
9166                      (ProtobufCAllocator  *allocator,
9167                       size_t               len,
9168                       const uint8_t       *data);
9169 void   core_ml__specification__fill_static_layer_params__free_unpacked
9170                      (CoreML__Specification__FillStaticLayerParams *message,
9171                       ProtobufCAllocator *allocator);
9172 /* CoreML__Specification__FillDynamicLayerParams methods */
9173 void   core_ml__specification__fill_dynamic_layer_params__init
9174                      (CoreML__Specification__FillDynamicLayerParams         *message);
9175 size_t core_ml__specification__fill_dynamic_layer_params__get_packed_size
9176                      (const CoreML__Specification__FillDynamicLayerParams   *message);
9177 size_t core_ml__specification__fill_dynamic_layer_params__pack
9178                      (const CoreML__Specification__FillDynamicLayerParams   *message,
9179                       uint8_t             *out);
9180 size_t core_ml__specification__fill_dynamic_layer_params__pack_to_buffer
9181                      (const CoreML__Specification__FillDynamicLayerParams   *message,
9182                       ProtobufCBuffer     *buffer);
9183 CoreML__Specification__FillDynamicLayerParams *
9184        core_ml__specification__fill_dynamic_layer_params__unpack
9185                      (ProtobufCAllocator  *allocator,
9186                       size_t               len,
9187                       const uint8_t       *data);
9188 void   core_ml__specification__fill_dynamic_layer_params__free_unpacked
9189                      (CoreML__Specification__FillDynamicLayerParams *message,
9190                       ProtobufCAllocator *allocator);
9191 /* CoreML__Specification__WhereBroadcastableLayerParams methods */
9192 void   core_ml__specification__where_broadcastable_layer_params__init
9193                      (CoreML__Specification__WhereBroadcastableLayerParams         *message);
9194 size_t core_ml__specification__where_broadcastable_layer_params__get_packed_size
9195                      (const CoreML__Specification__WhereBroadcastableLayerParams   *message);
9196 size_t core_ml__specification__where_broadcastable_layer_params__pack
9197                      (const CoreML__Specification__WhereBroadcastableLayerParams   *message,
9198                       uint8_t             *out);
9199 size_t core_ml__specification__where_broadcastable_layer_params__pack_to_buffer
9200                      (const CoreML__Specification__WhereBroadcastableLayerParams   *message,
9201                       ProtobufCBuffer     *buffer);
9202 CoreML__Specification__WhereBroadcastableLayerParams *
9203        core_ml__specification__where_broadcastable_layer_params__unpack
9204                      (ProtobufCAllocator  *allocator,
9205                       size_t               len,
9206                       const uint8_t       *data);
9207 void   core_ml__specification__where_broadcastable_layer_params__free_unpacked
9208                      (CoreML__Specification__WhereBroadcastableLayerParams *message,
9209                       ProtobufCAllocator *allocator);
9210 /* CoreML__Specification__SinLayerParams methods */
9211 void   core_ml__specification__sin_layer_params__init
9212                      (CoreML__Specification__SinLayerParams         *message);
9213 size_t core_ml__specification__sin_layer_params__get_packed_size
9214                      (const CoreML__Specification__SinLayerParams   *message);
9215 size_t core_ml__specification__sin_layer_params__pack
9216                      (const CoreML__Specification__SinLayerParams   *message,
9217                       uint8_t             *out);
9218 size_t core_ml__specification__sin_layer_params__pack_to_buffer
9219                      (const CoreML__Specification__SinLayerParams   *message,
9220                       ProtobufCBuffer     *buffer);
9221 CoreML__Specification__SinLayerParams *
9222        core_ml__specification__sin_layer_params__unpack
9223                      (ProtobufCAllocator  *allocator,
9224                       size_t               len,
9225                       const uint8_t       *data);
9226 void   core_ml__specification__sin_layer_params__free_unpacked
9227                      (CoreML__Specification__SinLayerParams *message,
9228                       ProtobufCAllocator *allocator);
9229 /* CoreML__Specification__CosLayerParams methods */
9230 void   core_ml__specification__cos_layer_params__init
9231                      (CoreML__Specification__CosLayerParams         *message);
9232 size_t core_ml__specification__cos_layer_params__get_packed_size
9233                      (const CoreML__Specification__CosLayerParams   *message);
9234 size_t core_ml__specification__cos_layer_params__pack
9235                      (const CoreML__Specification__CosLayerParams   *message,
9236                       uint8_t             *out);
9237 size_t core_ml__specification__cos_layer_params__pack_to_buffer
9238                      (const CoreML__Specification__CosLayerParams   *message,
9239                       ProtobufCBuffer     *buffer);
9240 CoreML__Specification__CosLayerParams *
9241        core_ml__specification__cos_layer_params__unpack
9242                      (ProtobufCAllocator  *allocator,
9243                       size_t               len,
9244                       const uint8_t       *data);
9245 void   core_ml__specification__cos_layer_params__free_unpacked
9246                      (CoreML__Specification__CosLayerParams *message,
9247                       ProtobufCAllocator *allocator);
9248 /* CoreML__Specification__TanLayerParams methods */
9249 void   core_ml__specification__tan_layer_params__init
9250                      (CoreML__Specification__TanLayerParams         *message);
9251 size_t core_ml__specification__tan_layer_params__get_packed_size
9252                      (const CoreML__Specification__TanLayerParams   *message);
9253 size_t core_ml__specification__tan_layer_params__pack
9254                      (const CoreML__Specification__TanLayerParams   *message,
9255                       uint8_t             *out);
9256 size_t core_ml__specification__tan_layer_params__pack_to_buffer
9257                      (const CoreML__Specification__TanLayerParams   *message,
9258                       ProtobufCBuffer     *buffer);
9259 CoreML__Specification__TanLayerParams *
9260        core_ml__specification__tan_layer_params__unpack
9261                      (ProtobufCAllocator  *allocator,
9262                       size_t               len,
9263                       const uint8_t       *data);
9264 void   core_ml__specification__tan_layer_params__free_unpacked
9265                      (CoreML__Specification__TanLayerParams *message,
9266                       ProtobufCAllocator *allocator);
9267 /* CoreML__Specification__AsinLayerParams methods */
9268 void   core_ml__specification__asin_layer_params__init
9269                      (CoreML__Specification__AsinLayerParams         *message);
9270 size_t core_ml__specification__asin_layer_params__get_packed_size
9271                      (const CoreML__Specification__AsinLayerParams   *message);
9272 size_t core_ml__specification__asin_layer_params__pack
9273                      (const CoreML__Specification__AsinLayerParams   *message,
9274                       uint8_t             *out);
9275 size_t core_ml__specification__asin_layer_params__pack_to_buffer
9276                      (const CoreML__Specification__AsinLayerParams   *message,
9277                       ProtobufCBuffer     *buffer);
9278 CoreML__Specification__AsinLayerParams *
9279        core_ml__specification__asin_layer_params__unpack
9280                      (ProtobufCAllocator  *allocator,
9281                       size_t               len,
9282                       const uint8_t       *data);
9283 void   core_ml__specification__asin_layer_params__free_unpacked
9284                      (CoreML__Specification__AsinLayerParams *message,
9285                       ProtobufCAllocator *allocator);
9286 /* CoreML__Specification__AcosLayerParams methods */
9287 void   core_ml__specification__acos_layer_params__init
9288                      (CoreML__Specification__AcosLayerParams         *message);
9289 size_t core_ml__specification__acos_layer_params__get_packed_size
9290                      (const CoreML__Specification__AcosLayerParams   *message);
9291 size_t core_ml__specification__acos_layer_params__pack
9292                      (const CoreML__Specification__AcosLayerParams   *message,
9293                       uint8_t             *out);
9294 size_t core_ml__specification__acos_layer_params__pack_to_buffer
9295                      (const CoreML__Specification__AcosLayerParams   *message,
9296                       ProtobufCBuffer     *buffer);
9297 CoreML__Specification__AcosLayerParams *
9298        core_ml__specification__acos_layer_params__unpack
9299                      (ProtobufCAllocator  *allocator,
9300                       size_t               len,
9301                       const uint8_t       *data);
9302 void   core_ml__specification__acos_layer_params__free_unpacked
9303                      (CoreML__Specification__AcosLayerParams *message,
9304                       ProtobufCAllocator *allocator);
9305 /* CoreML__Specification__AtanLayerParams methods */
9306 void   core_ml__specification__atan_layer_params__init
9307                      (CoreML__Specification__AtanLayerParams         *message);
9308 size_t core_ml__specification__atan_layer_params__get_packed_size
9309                      (const CoreML__Specification__AtanLayerParams   *message);
9310 size_t core_ml__specification__atan_layer_params__pack
9311                      (const CoreML__Specification__AtanLayerParams   *message,
9312                       uint8_t             *out);
9313 size_t core_ml__specification__atan_layer_params__pack_to_buffer
9314                      (const CoreML__Specification__AtanLayerParams   *message,
9315                       ProtobufCBuffer     *buffer);
9316 CoreML__Specification__AtanLayerParams *
9317        core_ml__specification__atan_layer_params__unpack
9318                      (ProtobufCAllocator  *allocator,
9319                       size_t               len,
9320                       const uint8_t       *data);
9321 void   core_ml__specification__atan_layer_params__free_unpacked
9322                      (CoreML__Specification__AtanLayerParams *message,
9323                       ProtobufCAllocator *allocator);
9324 /* CoreML__Specification__SinhLayerParams methods */
9325 void   core_ml__specification__sinh_layer_params__init
9326                      (CoreML__Specification__SinhLayerParams         *message);
9327 size_t core_ml__specification__sinh_layer_params__get_packed_size
9328                      (const CoreML__Specification__SinhLayerParams   *message);
9329 size_t core_ml__specification__sinh_layer_params__pack
9330                      (const CoreML__Specification__SinhLayerParams   *message,
9331                       uint8_t             *out);
9332 size_t core_ml__specification__sinh_layer_params__pack_to_buffer
9333                      (const CoreML__Specification__SinhLayerParams   *message,
9334                       ProtobufCBuffer     *buffer);
9335 CoreML__Specification__SinhLayerParams *
9336        core_ml__specification__sinh_layer_params__unpack
9337                      (ProtobufCAllocator  *allocator,
9338                       size_t               len,
9339                       const uint8_t       *data);
9340 void   core_ml__specification__sinh_layer_params__free_unpacked
9341                      (CoreML__Specification__SinhLayerParams *message,
9342                       ProtobufCAllocator *allocator);
9343 /* CoreML__Specification__CoshLayerParams methods */
9344 void   core_ml__specification__cosh_layer_params__init
9345                      (CoreML__Specification__CoshLayerParams         *message);
9346 size_t core_ml__specification__cosh_layer_params__get_packed_size
9347                      (const CoreML__Specification__CoshLayerParams   *message);
9348 size_t core_ml__specification__cosh_layer_params__pack
9349                      (const CoreML__Specification__CoshLayerParams   *message,
9350                       uint8_t             *out);
9351 size_t core_ml__specification__cosh_layer_params__pack_to_buffer
9352                      (const CoreML__Specification__CoshLayerParams   *message,
9353                       ProtobufCBuffer     *buffer);
9354 CoreML__Specification__CoshLayerParams *
9355        core_ml__specification__cosh_layer_params__unpack
9356                      (ProtobufCAllocator  *allocator,
9357                       size_t               len,
9358                       const uint8_t       *data);
9359 void   core_ml__specification__cosh_layer_params__free_unpacked
9360                      (CoreML__Specification__CoshLayerParams *message,
9361                       ProtobufCAllocator *allocator);
9362 /* CoreML__Specification__TanhLayerParams methods */
9363 void   core_ml__specification__tanh_layer_params__init
9364                      (CoreML__Specification__TanhLayerParams         *message);
9365 size_t core_ml__specification__tanh_layer_params__get_packed_size
9366                      (const CoreML__Specification__TanhLayerParams   *message);
9367 size_t core_ml__specification__tanh_layer_params__pack
9368                      (const CoreML__Specification__TanhLayerParams   *message,
9369                       uint8_t             *out);
9370 size_t core_ml__specification__tanh_layer_params__pack_to_buffer
9371                      (const CoreML__Specification__TanhLayerParams   *message,
9372                       ProtobufCBuffer     *buffer);
9373 CoreML__Specification__TanhLayerParams *
9374        core_ml__specification__tanh_layer_params__unpack
9375                      (ProtobufCAllocator  *allocator,
9376                       size_t               len,
9377                       const uint8_t       *data);
9378 void   core_ml__specification__tanh_layer_params__free_unpacked
9379                      (CoreML__Specification__TanhLayerParams *message,
9380                       ProtobufCAllocator *allocator);
9381 /* CoreML__Specification__AsinhLayerParams methods */
9382 void   core_ml__specification__asinh_layer_params__init
9383                      (CoreML__Specification__AsinhLayerParams         *message);
9384 size_t core_ml__specification__asinh_layer_params__get_packed_size
9385                      (const CoreML__Specification__AsinhLayerParams   *message);
9386 size_t core_ml__specification__asinh_layer_params__pack
9387                      (const CoreML__Specification__AsinhLayerParams   *message,
9388                       uint8_t             *out);
9389 size_t core_ml__specification__asinh_layer_params__pack_to_buffer
9390                      (const CoreML__Specification__AsinhLayerParams   *message,
9391                       ProtobufCBuffer     *buffer);
9392 CoreML__Specification__AsinhLayerParams *
9393        core_ml__specification__asinh_layer_params__unpack
9394                      (ProtobufCAllocator  *allocator,
9395                       size_t               len,
9396                       const uint8_t       *data);
9397 void   core_ml__specification__asinh_layer_params__free_unpacked
9398                      (CoreML__Specification__AsinhLayerParams *message,
9399                       ProtobufCAllocator *allocator);
9400 /* CoreML__Specification__AcoshLayerParams methods */
9401 void   core_ml__specification__acosh_layer_params__init
9402                      (CoreML__Specification__AcoshLayerParams         *message);
9403 size_t core_ml__specification__acosh_layer_params__get_packed_size
9404                      (const CoreML__Specification__AcoshLayerParams   *message);
9405 size_t core_ml__specification__acosh_layer_params__pack
9406                      (const CoreML__Specification__AcoshLayerParams   *message,
9407                       uint8_t             *out);
9408 size_t core_ml__specification__acosh_layer_params__pack_to_buffer
9409                      (const CoreML__Specification__AcoshLayerParams   *message,
9410                       ProtobufCBuffer     *buffer);
9411 CoreML__Specification__AcoshLayerParams *
9412        core_ml__specification__acosh_layer_params__unpack
9413                      (ProtobufCAllocator  *allocator,
9414                       size_t               len,
9415                       const uint8_t       *data);
9416 void   core_ml__specification__acosh_layer_params__free_unpacked
9417                      (CoreML__Specification__AcoshLayerParams *message,
9418                       ProtobufCAllocator *allocator);
9419 /* CoreML__Specification__AtanhLayerParams methods */
9420 void   core_ml__specification__atanh_layer_params__init
9421                      (CoreML__Specification__AtanhLayerParams         *message);
9422 size_t core_ml__specification__atanh_layer_params__get_packed_size
9423                      (const CoreML__Specification__AtanhLayerParams   *message);
9424 size_t core_ml__specification__atanh_layer_params__pack
9425                      (const CoreML__Specification__AtanhLayerParams   *message,
9426                       uint8_t             *out);
9427 size_t core_ml__specification__atanh_layer_params__pack_to_buffer
9428                      (const CoreML__Specification__AtanhLayerParams   *message,
9429                       ProtobufCBuffer     *buffer);
9430 CoreML__Specification__AtanhLayerParams *
9431        core_ml__specification__atanh_layer_params__unpack
9432                      (ProtobufCAllocator  *allocator,
9433                       size_t               len,
9434                       const uint8_t       *data);
9435 void   core_ml__specification__atanh_layer_params__free_unpacked
9436                      (CoreML__Specification__AtanhLayerParams *message,
9437                       ProtobufCAllocator *allocator);
9438 /* CoreML__Specification__PowBroadcastableLayerParams methods */
9439 void   core_ml__specification__pow_broadcastable_layer_params__init
9440                      (CoreML__Specification__PowBroadcastableLayerParams         *message);
9441 size_t core_ml__specification__pow_broadcastable_layer_params__get_packed_size
9442                      (const CoreML__Specification__PowBroadcastableLayerParams   *message);
9443 size_t core_ml__specification__pow_broadcastable_layer_params__pack
9444                      (const CoreML__Specification__PowBroadcastableLayerParams   *message,
9445                       uint8_t             *out);
9446 size_t core_ml__specification__pow_broadcastable_layer_params__pack_to_buffer
9447                      (const CoreML__Specification__PowBroadcastableLayerParams   *message,
9448                       ProtobufCBuffer     *buffer);
9449 CoreML__Specification__PowBroadcastableLayerParams *
9450        core_ml__specification__pow_broadcastable_layer_params__unpack
9451                      (ProtobufCAllocator  *allocator,
9452                       size_t               len,
9453                       const uint8_t       *data);
9454 void   core_ml__specification__pow_broadcastable_layer_params__free_unpacked
9455                      (CoreML__Specification__PowBroadcastableLayerParams *message,
9456                       ProtobufCAllocator *allocator);
9457 /* CoreML__Specification__Exp2LayerParams methods */
9458 void   core_ml__specification__exp2_layer_params__init
9459                      (CoreML__Specification__Exp2LayerParams         *message);
9460 size_t core_ml__specification__exp2_layer_params__get_packed_size
9461                      (const CoreML__Specification__Exp2LayerParams   *message);
9462 size_t core_ml__specification__exp2_layer_params__pack
9463                      (const CoreML__Specification__Exp2LayerParams   *message,
9464                       uint8_t             *out);
9465 size_t core_ml__specification__exp2_layer_params__pack_to_buffer
9466                      (const CoreML__Specification__Exp2LayerParams   *message,
9467                       ProtobufCBuffer     *buffer);
9468 CoreML__Specification__Exp2LayerParams *
9469        core_ml__specification__exp2_layer_params__unpack
9470                      (ProtobufCAllocator  *allocator,
9471                       size_t               len,
9472                       const uint8_t       *data);
9473 void   core_ml__specification__exp2_layer_params__free_unpacked
9474                      (CoreML__Specification__Exp2LayerParams *message,
9475                       ProtobufCAllocator *allocator);
9476 /* CoreML__Specification__WhereNonZeroLayerParams methods */
9477 void   core_ml__specification__where_non_zero_layer_params__init
9478                      (CoreML__Specification__WhereNonZeroLayerParams         *message);
9479 size_t core_ml__specification__where_non_zero_layer_params__get_packed_size
9480                      (const CoreML__Specification__WhereNonZeroLayerParams   *message);
9481 size_t core_ml__specification__where_non_zero_layer_params__pack
9482                      (const CoreML__Specification__WhereNonZeroLayerParams   *message,
9483                       uint8_t             *out);
9484 size_t core_ml__specification__where_non_zero_layer_params__pack_to_buffer
9485                      (const CoreML__Specification__WhereNonZeroLayerParams   *message,
9486                       ProtobufCBuffer     *buffer);
9487 CoreML__Specification__WhereNonZeroLayerParams *
9488        core_ml__specification__where_non_zero_layer_params__unpack
9489                      (ProtobufCAllocator  *allocator,
9490                       size_t               len,
9491                       const uint8_t       *data);
9492 void   core_ml__specification__where_non_zero_layer_params__free_unpacked
9493                      (CoreML__Specification__WhereNonZeroLayerParams *message,
9494                       ProtobufCAllocator *allocator);
9495 /* CoreML__Specification__MatrixBandPartLayerParams methods */
9496 void   core_ml__specification__matrix_band_part_layer_params__init
9497                      (CoreML__Specification__MatrixBandPartLayerParams         *message);
9498 size_t core_ml__specification__matrix_band_part_layer_params__get_packed_size
9499                      (const CoreML__Specification__MatrixBandPartLayerParams   *message);
9500 size_t core_ml__specification__matrix_band_part_layer_params__pack
9501                      (const CoreML__Specification__MatrixBandPartLayerParams   *message,
9502                       uint8_t             *out);
9503 size_t core_ml__specification__matrix_band_part_layer_params__pack_to_buffer
9504                      (const CoreML__Specification__MatrixBandPartLayerParams   *message,
9505                       ProtobufCBuffer     *buffer);
9506 CoreML__Specification__MatrixBandPartLayerParams *
9507        core_ml__specification__matrix_band_part_layer_params__unpack
9508                      (ProtobufCAllocator  *allocator,
9509                       size_t               len,
9510                       const uint8_t       *data);
9511 void   core_ml__specification__matrix_band_part_layer_params__free_unpacked
9512                      (CoreML__Specification__MatrixBandPartLayerParams *message,
9513                       ProtobufCAllocator *allocator);
9514 /* CoreML__Specification__UpperTriangularLayerParams methods */
9515 void   core_ml__specification__upper_triangular_layer_params__init
9516                      (CoreML__Specification__UpperTriangularLayerParams         *message);
9517 size_t core_ml__specification__upper_triangular_layer_params__get_packed_size
9518                      (const CoreML__Specification__UpperTriangularLayerParams   *message);
9519 size_t core_ml__specification__upper_triangular_layer_params__pack
9520                      (const CoreML__Specification__UpperTriangularLayerParams   *message,
9521                       uint8_t             *out);
9522 size_t core_ml__specification__upper_triangular_layer_params__pack_to_buffer
9523                      (const CoreML__Specification__UpperTriangularLayerParams   *message,
9524                       ProtobufCBuffer     *buffer);
9525 CoreML__Specification__UpperTriangularLayerParams *
9526        core_ml__specification__upper_triangular_layer_params__unpack
9527                      (ProtobufCAllocator  *allocator,
9528                       size_t               len,
9529                       const uint8_t       *data);
9530 void   core_ml__specification__upper_triangular_layer_params__free_unpacked
9531                      (CoreML__Specification__UpperTriangularLayerParams *message,
9532                       ProtobufCAllocator *allocator);
9533 /* CoreML__Specification__LowerTriangularLayerParams methods */
9534 void   core_ml__specification__lower_triangular_layer_params__init
9535                      (CoreML__Specification__LowerTriangularLayerParams         *message);
9536 size_t core_ml__specification__lower_triangular_layer_params__get_packed_size
9537                      (const CoreML__Specification__LowerTriangularLayerParams   *message);
9538 size_t core_ml__specification__lower_triangular_layer_params__pack
9539                      (const CoreML__Specification__LowerTriangularLayerParams   *message,
9540                       uint8_t             *out);
9541 size_t core_ml__specification__lower_triangular_layer_params__pack_to_buffer
9542                      (const CoreML__Specification__LowerTriangularLayerParams   *message,
9543                       ProtobufCBuffer     *buffer);
9544 CoreML__Specification__LowerTriangularLayerParams *
9545        core_ml__specification__lower_triangular_layer_params__unpack
9546                      (ProtobufCAllocator  *allocator,
9547                       size_t               len,
9548                       const uint8_t       *data);
9549 void   core_ml__specification__lower_triangular_layer_params__free_unpacked
9550                      (CoreML__Specification__LowerTriangularLayerParams *message,
9551                       ProtobufCAllocator *allocator);
9552 /* CoreML__Specification__BroadcastToLikeLayerParams methods */
9553 void   core_ml__specification__broadcast_to_like_layer_params__init
9554                      (CoreML__Specification__BroadcastToLikeLayerParams         *message);
9555 size_t core_ml__specification__broadcast_to_like_layer_params__get_packed_size
9556                      (const CoreML__Specification__BroadcastToLikeLayerParams   *message);
9557 size_t core_ml__specification__broadcast_to_like_layer_params__pack
9558                      (const CoreML__Specification__BroadcastToLikeLayerParams   *message,
9559                       uint8_t             *out);
9560 size_t core_ml__specification__broadcast_to_like_layer_params__pack_to_buffer
9561                      (const CoreML__Specification__BroadcastToLikeLayerParams   *message,
9562                       ProtobufCBuffer     *buffer);
9563 CoreML__Specification__BroadcastToLikeLayerParams *
9564        core_ml__specification__broadcast_to_like_layer_params__unpack
9565                      (ProtobufCAllocator  *allocator,
9566                       size_t               len,
9567                       const uint8_t       *data);
9568 void   core_ml__specification__broadcast_to_like_layer_params__free_unpacked
9569                      (CoreML__Specification__BroadcastToLikeLayerParams *message,
9570                       ProtobufCAllocator *allocator);
9571 /* CoreML__Specification__BroadcastToStaticLayerParams methods */
9572 void   core_ml__specification__broadcast_to_static_layer_params__init
9573                      (CoreML__Specification__BroadcastToStaticLayerParams         *message);
9574 size_t core_ml__specification__broadcast_to_static_layer_params__get_packed_size
9575                      (const CoreML__Specification__BroadcastToStaticLayerParams   *message);
9576 size_t core_ml__specification__broadcast_to_static_layer_params__pack
9577                      (const CoreML__Specification__BroadcastToStaticLayerParams   *message,
9578                       uint8_t             *out);
9579 size_t core_ml__specification__broadcast_to_static_layer_params__pack_to_buffer
9580                      (const CoreML__Specification__BroadcastToStaticLayerParams   *message,
9581                       ProtobufCBuffer     *buffer);
9582 CoreML__Specification__BroadcastToStaticLayerParams *
9583        core_ml__specification__broadcast_to_static_layer_params__unpack
9584                      (ProtobufCAllocator  *allocator,
9585                       size_t               len,
9586                       const uint8_t       *data);
9587 void   core_ml__specification__broadcast_to_static_layer_params__free_unpacked
9588                      (CoreML__Specification__BroadcastToStaticLayerParams *message,
9589                       ProtobufCAllocator *allocator);
9590 /* CoreML__Specification__BroadcastToDynamicLayerParams methods */
9591 void   core_ml__specification__broadcast_to_dynamic_layer_params__init
9592                      (CoreML__Specification__BroadcastToDynamicLayerParams         *message);
9593 size_t core_ml__specification__broadcast_to_dynamic_layer_params__get_packed_size
9594                      (const CoreML__Specification__BroadcastToDynamicLayerParams   *message);
9595 size_t core_ml__specification__broadcast_to_dynamic_layer_params__pack
9596                      (const CoreML__Specification__BroadcastToDynamicLayerParams   *message,
9597                       uint8_t             *out);
9598 size_t core_ml__specification__broadcast_to_dynamic_layer_params__pack_to_buffer
9599                      (const CoreML__Specification__BroadcastToDynamicLayerParams   *message,
9600                       ProtobufCBuffer     *buffer);
9601 CoreML__Specification__BroadcastToDynamicLayerParams *
9602        core_ml__specification__broadcast_to_dynamic_layer_params__unpack
9603                      (ProtobufCAllocator  *allocator,
9604                       size_t               len,
9605                       const uint8_t       *data);
9606 void   core_ml__specification__broadcast_to_dynamic_layer_params__free_unpacked
9607                      (CoreML__Specification__BroadcastToDynamicLayerParams *message,
9608                       ProtobufCAllocator *allocator);
9609 /* CoreML__Specification__AddBroadcastableLayerParams methods */
9610 void   core_ml__specification__add_broadcastable_layer_params__init
9611                      (CoreML__Specification__AddBroadcastableLayerParams         *message);
9612 size_t core_ml__specification__add_broadcastable_layer_params__get_packed_size
9613                      (const CoreML__Specification__AddBroadcastableLayerParams   *message);
9614 size_t core_ml__specification__add_broadcastable_layer_params__pack
9615                      (const CoreML__Specification__AddBroadcastableLayerParams   *message,
9616                       uint8_t             *out);
9617 size_t core_ml__specification__add_broadcastable_layer_params__pack_to_buffer
9618                      (const CoreML__Specification__AddBroadcastableLayerParams   *message,
9619                       ProtobufCBuffer     *buffer);
9620 CoreML__Specification__AddBroadcastableLayerParams *
9621        core_ml__specification__add_broadcastable_layer_params__unpack
9622                      (ProtobufCAllocator  *allocator,
9623                       size_t               len,
9624                       const uint8_t       *data);
9625 void   core_ml__specification__add_broadcastable_layer_params__free_unpacked
9626                      (CoreML__Specification__AddBroadcastableLayerParams *message,
9627                       ProtobufCAllocator *allocator);
9628 /* CoreML__Specification__MaxBroadcastableLayerParams methods */
9629 void   core_ml__specification__max_broadcastable_layer_params__init
9630                      (CoreML__Specification__MaxBroadcastableLayerParams         *message);
9631 size_t core_ml__specification__max_broadcastable_layer_params__get_packed_size
9632                      (const CoreML__Specification__MaxBroadcastableLayerParams   *message);
9633 size_t core_ml__specification__max_broadcastable_layer_params__pack
9634                      (const CoreML__Specification__MaxBroadcastableLayerParams   *message,
9635                       uint8_t             *out);
9636 size_t core_ml__specification__max_broadcastable_layer_params__pack_to_buffer
9637                      (const CoreML__Specification__MaxBroadcastableLayerParams   *message,
9638                       ProtobufCBuffer     *buffer);
9639 CoreML__Specification__MaxBroadcastableLayerParams *
9640        core_ml__specification__max_broadcastable_layer_params__unpack
9641                      (ProtobufCAllocator  *allocator,
9642                       size_t               len,
9643                       const uint8_t       *data);
9644 void   core_ml__specification__max_broadcastable_layer_params__free_unpacked
9645                      (CoreML__Specification__MaxBroadcastableLayerParams *message,
9646                       ProtobufCAllocator *allocator);
9647 /* CoreML__Specification__MinBroadcastableLayerParams methods */
9648 void   core_ml__specification__min_broadcastable_layer_params__init
9649                      (CoreML__Specification__MinBroadcastableLayerParams         *message);
9650 size_t core_ml__specification__min_broadcastable_layer_params__get_packed_size
9651                      (const CoreML__Specification__MinBroadcastableLayerParams   *message);
9652 size_t core_ml__specification__min_broadcastable_layer_params__pack
9653                      (const CoreML__Specification__MinBroadcastableLayerParams   *message,
9654                       uint8_t             *out);
9655 size_t core_ml__specification__min_broadcastable_layer_params__pack_to_buffer
9656                      (const CoreML__Specification__MinBroadcastableLayerParams   *message,
9657                       ProtobufCBuffer     *buffer);
9658 CoreML__Specification__MinBroadcastableLayerParams *
9659        core_ml__specification__min_broadcastable_layer_params__unpack
9660                      (ProtobufCAllocator  *allocator,
9661                       size_t               len,
9662                       const uint8_t       *data);
9663 void   core_ml__specification__min_broadcastable_layer_params__free_unpacked
9664                      (CoreML__Specification__MinBroadcastableLayerParams *message,
9665                       ProtobufCAllocator *allocator);
9666 /* CoreML__Specification__ModBroadcastableLayerParams methods */
9667 void   core_ml__specification__mod_broadcastable_layer_params__init
9668                      (CoreML__Specification__ModBroadcastableLayerParams         *message);
9669 size_t core_ml__specification__mod_broadcastable_layer_params__get_packed_size
9670                      (const CoreML__Specification__ModBroadcastableLayerParams   *message);
9671 size_t core_ml__specification__mod_broadcastable_layer_params__pack
9672                      (const CoreML__Specification__ModBroadcastableLayerParams   *message,
9673                       uint8_t             *out);
9674 size_t core_ml__specification__mod_broadcastable_layer_params__pack_to_buffer
9675                      (const CoreML__Specification__ModBroadcastableLayerParams   *message,
9676                       ProtobufCBuffer     *buffer);
9677 CoreML__Specification__ModBroadcastableLayerParams *
9678        core_ml__specification__mod_broadcastable_layer_params__unpack
9679                      (ProtobufCAllocator  *allocator,
9680                       size_t               len,
9681                       const uint8_t       *data);
9682 void   core_ml__specification__mod_broadcastable_layer_params__free_unpacked
9683                      (CoreML__Specification__ModBroadcastableLayerParams *message,
9684                       ProtobufCAllocator *allocator);
9685 /* CoreML__Specification__FloorDivBroadcastableLayerParams methods */
9686 void   core_ml__specification__floor_div_broadcastable_layer_params__init
9687                      (CoreML__Specification__FloorDivBroadcastableLayerParams         *message);
9688 size_t core_ml__specification__floor_div_broadcastable_layer_params__get_packed_size
9689                      (const CoreML__Specification__FloorDivBroadcastableLayerParams   *message);
9690 size_t core_ml__specification__floor_div_broadcastable_layer_params__pack
9691                      (const CoreML__Specification__FloorDivBroadcastableLayerParams   *message,
9692                       uint8_t             *out);
9693 size_t core_ml__specification__floor_div_broadcastable_layer_params__pack_to_buffer
9694                      (const CoreML__Specification__FloorDivBroadcastableLayerParams   *message,
9695                       ProtobufCBuffer     *buffer);
9696 CoreML__Specification__FloorDivBroadcastableLayerParams *
9697        core_ml__specification__floor_div_broadcastable_layer_params__unpack
9698                      (ProtobufCAllocator  *allocator,
9699                       size_t               len,
9700                       const uint8_t       *data);
9701 void   core_ml__specification__floor_div_broadcastable_layer_params__free_unpacked
9702                      (CoreML__Specification__FloorDivBroadcastableLayerParams *message,
9703                       ProtobufCAllocator *allocator);
9704 /* CoreML__Specification__SubtractBroadcastableLayerParams methods */
9705 void   core_ml__specification__subtract_broadcastable_layer_params__init
9706                      (CoreML__Specification__SubtractBroadcastableLayerParams         *message);
9707 size_t core_ml__specification__subtract_broadcastable_layer_params__get_packed_size
9708                      (const CoreML__Specification__SubtractBroadcastableLayerParams   *message);
9709 size_t core_ml__specification__subtract_broadcastable_layer_params__pack
9710                      (const CoreML__Specification__SubtractBroadcastableLayerParams   *message,
9711                       uint8_t             *out);
9712 size_t core_ml__specification__subtract_broadcastable_layer_params__pack_to_buffer
9713                      (const CoreML__Specification__SubtractBroadcastableLayerParams   *message,
9714                       ProtobufCBuffer     *buffer);
9715 CoreML__Specification__SubtractBroadcastableLayerParams *
9716        core_ml__specification__subtract_broadcastable_layer_params__unpack
9717                      (ProtobufCAllocator  *allocator,
9718                       size_t               len,
9719                       const uint8_t       *data);
9720 void   core_ml__specification__subtract_broadcastable_layer_params__free_unpacked
9721                      (CoreML__Specification__SubtractBroadcastableLayerParams *message,
9722                       ProtobufCAllocator *allocator);
9723 /* CoreML__Specification__MultiplyBroadcastableLayerParams methods */
9724 void   core_ml__specification__multiply_broadcastable_layer_params__init
9725                      (CoreML__Specification__MultiplyBroadcastableLayerParams         *message);
9726 size_t core_ml__specification__multiply_broadcastable_layer_params__get_packed_size
9727                      (const CoreML__Specification__MultiplyBroadcastableLayerParams   *message);
9728 size_t core_ml__specification__multiply_broadcastable_layer_params__pack
9729                      (const CoreML__Specification__MultiplyBroadcastableLayerParams   *message,
9730                       uint8_t             *out);
9731 size_t core_ml__specification__multiply_broadcastable_layer_params__pack_to_buffer
9732                      (const CoreML__Specification__MultiplyBroadcastableLayerParams   *message,
9733                       ProtobufCBuffer     *buffer);
9734 CoreML__Specification__MultiplyBroadcastableLayerParams *
9735        core_ml__specification__multiply_broadcastable_layer_params__unpack
9736                      (ProtobufCAllocator  *allocator,
9737                       size_t               len,
9738                       const uint8_t       *data);
9739 void   core_ml__specification__multiply_broadcastable_layer_params__free_unpacked
9740                      (CoreML__Specification__MultiplyBroadcastableLayerParams *message,
9741                       ProtobufCAllocator *allocator);
9742 /* CoreML__Specification__DivideBroadcastableLayerParams methods */
9743 void   core_ml__specification__divide_broadcastable_layer_params__init
9744                      (CoreML__Specification__DivideBroadcastableLayerParams         *message);
9745 size_t core_ml__specification__divide_broadcastable_layer_params__get_packed_size
9746                      (const CoreML__Specification__DivideBroadcastableLayerParams   *message);
9747 size_t core_ml__specification__divide_broadcastable_layer_params__pack
9748                      (const CoreML__Specification__DivideBroadcastableLayerParams   *message,
9749                       uint8_t             *out);
9750 size_t core_ml__specification__divide_broadcastable_layer_params__pack_to_buffer
9751                      (const CoreML__Specification__DivideBroadcastableLayerParams   *message,
9752                       ProtobufCBuffer     *buffer);
9753 CoreML__Specification__DivideBroadcastableLayerParams *
9754        core_ml__specification__divide_broadcastable_layer_params__unpack
9755                      (ProtobufCAllocator  *allocator,
9756                       size_t               len,
9757                       const uint8_t       *data);
9758 void   core_ml__specification__divide_broadcastable_layer_params__free_unpacked
9759                      (CoreML__Specification__DivideBroadcastableLayerParams *message,
9760                       ProtobufCAllocator *allocator);
9761 /* CoreML__Specification__GatherLayerParams methods */
9762 void   core_ml__specification__gather_layer_params__init
9763                      (CoreML__Specification__GatherLayerParams         *message);
9764 size_t core_ml__specification__gather_layer_params__get_packed_size
9765                      (const CoreML__Specification__GatherLayerParams   *message);
9766 size_t core_ml__specification__gather_layer_params__pack
9767                      (const CoreML__Specification__GatherLayerParams   *message,
9768                       uint8_t             *out);
9769 size_t core_ml__specification__gather_layer_params__pack_to_buffer
9770                      (const CoreML__Specification__GatherLayerParams   *message,
9771                       ProtobufCBuffer     *buffer);
9772 CoreML__Specification__GatherLayerParams *
9773        core_ml__specification__gather_layer_params__unpack
9774                      (ProtobufCAllocator  *allocator,
9775                       size_t               len,
9776                       const uint8_t       *data);
9777 void   core_ml__specification__gather_layer_params__free_unpacked
9778                      (CoreML__Specification__GatherLayerParams *message,
9779                       ProtobufCAllocator *allocator);
9780 /* CoreML__Specification__ScatterLayerParams methods */
9781 void   core_ml__specification__scatter_layer_params__init
9782                      (CoreML__Specification__ScatterLayerParams         *message);
9783 size_t core_ml__specification__scatter_layer_params__get_packed_size
9784                      (const CoreML__Specification__ScatterLayerParams   *message);
9785 size_t core_ml__specification__scatter_layer_params__pack
9786                      (const CoreML__Specification__ScatterLayerParams   *message,
9787                       uint8_t             *out);
9788 size_t core_ml__specification__scatter_layer_params__pack_to_buffer
9789                      (const CoreML__Specification__ScatterLayerParams   *message,
9790                       ProtobufCBuffer     *buffer);
9791 CoreML__Specification__ScatterLayerParams *
9792        core_ml__specification__scatter_layer_params__unpack
9793                      (ProtobufCAllocator  *allocator,
9794                       size_t               len,
9795                       const uint8_t       *data);
9796 void   core_ml__specification__scatter_layer_params__free_unpacked
9797                      (CoreML__Specification__ScatterLayerParams *message,
9798                       ProtobufCAllocator *allocator);
9799 /* CoreML__Specification__GatherNDLayerParams methods */
9800 void   core_ml__specification__gather_ndlayer_params__init
9801                      (CoreML__Specification__GatherNDLayerParams         *message);
9802 size_t core_ml__specification__gather_ndlayer_params__get_packed_size
9803                      (const CoreML__Specification__GatherNDLayerParams   *message);
9804 size_t core_ml__specification__gather_ndlayer_params__pack
9805                      (const CoreML__Specification__GatherNDLayerParams   *message,
9806                       uint8_t             *out);
9807 size_t core_ml__specification__gather_ndlayer_params__pack_to_buffer
9808                      (const CoreML__Specification__GatherNDLayerParams   *message,
9809                       ProtobufCBuffer     *buffer);
9810 CoreML__Specification__GatherNDLayerParams *
9811        core_ml__specification__gather_ndlayer_params__unpack
9812                      (ProtobufCAllocator  *allocator,
9813                       size_t               len,
9814                       const uint8_t       *data);
9815 void   core_ml__specification__gather_ndlayer_params__free_unpacked
9816                      (CoreML__Specification__GatherNDLayerParams *message,
9817                       ProtobufCAllocator *allocator);
9818 /* CoreML__Specification__ScatterNDLayerParams methods */
9819 void   core_ml__specification__scatter_ndlayer_params__init
9820                      (CoreML__Specification__ScatterNDLayerParams         *message);
9821 size_t core_ml__specification__scatter_ndlayer_params__get_packed_size
9822                      (const CoreML__Specification__ScatterNDLayerParams   *message);
9823 size_t core_ml__specification__scatter_ndlayer_params__pack
9824                      (const CoreML__Specification__ScatterNDLayerParams   *message,
9825                       uint8_t             *out);
9826 size_t core_ml__specification__scatter_ndlayer_params__pack_to_buffer
9827                      (const CoreML__Specification__ScatterNDLayerParams   *message,
9828                       ProtobufCBuffer     *buffer);
9829 CoreML__Specification__ScatterNDLayerParams *
9830        core_ml__specification__scatter_ndlayer_params__unpack
9831                      (ProtobufCAllocator  *allocator,
9832                       size_t               len,
9833                       const uint8_t       *data);
9834 void   core_ml__specification__scatter_ndlayer_params__free_unpacked
9835                      (CoreML__Specification__ScatterNDLayerParams *message,
9836                       ProtobufCAllocator *allocator);
9837 /* CoreML__Specification__GatherAlongAxisLayerParams methods */
9838 void   core_ml__specification__gather_along_axis_layer_params__init
9839                      (CoreML__Specification__GatherAlongAxisLayerParams         *message);
9840 size_t core_ml__specification__gather_along_axis_layer_params__get_packed_size
9841                      (const CoreML__Specification__GatherAlongAxisLayerParams   *message);
9842 size_t core_ml__specification__gather_along_axis_layer_params__pack
9843                      (const CoreML__Specification__GatherAlongAxisLayerParams   *message,
9844                       uint8_t             *out);
9845 size_t core_ml__specification__gather_along_axis_layer_params__pack_to_buffer
9846                      (const CoreML__Specification__GatherAlongAxisLayerParams   *message,
9847                       ProtobufCBuffer     *buffer);
9848 CoreML__Specification__GatherAlongAxisLayerParams *
9849        core_ml__specification__gather_along_axis_layer_params__unpack
9850                      (ProtobufCAllocator  *allocator,
9851                       size_t               len,
9852                       const uint8_t       *data);
9853 void   core_ml__specification__gather_along_axis_layer_params__free_unpacked
9854                      (CoreML__Specification__GatherAlongAxisLayerParams *message,
9855                       ProtobufCAllocator *allocator);
9856 /* CoreML__Specification__ScatterAlongAxisLayerParams methods */
9857 void   core_ml__specification__scatter_along_axis_layer_params__init
9858                      (CoreML__Specification__ScatterAlongAxisLayerParams         *message);
9859 size_t core_ml__specification__scatter_along_axis_layer_params__get_packed_size
9860                      (const CoreML__Specification__ScatterAlongAxisLayerParams   *message);
9861 size_t core_ml__specification__scatter_along_axis_layer_params__pack
9862                      (const CoreML__Specification__ScatterAlongAxisLayerParams   *message,
9863                       uint8_t             *out);
9864 size_t core_ml__specification__scatter_along_axis_layer_params__pack_to_buffer
9865                      (const CoreML__Specification__ScatterAlongAxisLayerParams   *message,
9866                       ProtobufCBuffer     *buffer);
9867 CoreML__Specification__ScatterAlongAxisLayerParams *
9868        core_ml__specification__scatter_along_axis_layer_params__unpack
9869                      (ProtobufCAllocator  *allocator,
9870                       size_t               len,
9871                       const uint8_t       *data);
9872 void   core_ml__specification__scatter_along_axis_layer_params__free_unpacked
9873                      (CoreML__Specification__ScatterAlongAxisLayerParams *message,
9874                       ProtobufCAllocator *allocator);
9875 /* CoreML__Specification__StackLayerParams methods */
9876 void   core_ml__specification__stack_layer_params__init
9877                      (CoreML__Specification__StackLayerParams         *message);
9878 size_t core_ml__specification__stack_layer_params__get_packed_size
9879                      (const CoreML__Specification__StackLayerParams   *message);
9880 size_t core_ml__specification__stack_layer_params__pack
9881                      (const CoreML__Specification__StackLayerParams   *message,
9882                       uint8_t             *out);
9883 size_t core_ml__specification__stack_layer_params__pack_to_buffer
9884                      (const CoreML__Specification__StackLayerParams   *message,
9885                       ProtobufCBuffer     *buffer);
9886 CoreML__Specification__StackLayerParams *
9887        core_ml__specification__stack_layer_params__unpack
9888                      (ProtobufCAllocator  *allocator,
9889                       size_t               len,
9890                       const uint8_t       *data);
9891 void   core_ml__specification__stack_layer_params__free_unpacked
9892                      (CoreML__Specification__StackLayerParams *message,
9893                       ProtobufCAllocator *allocator);
9894 /* CoreML__Specification__RankPreservingReshapeLayerParams methods */
9895 void   core_ml__specification__rank_preserving_reshape_layer_params__init
9896                      (CoreML__Specification__RankPreservingReshapeLayerParams         *message);
9897 size_t core_ml__specification__rank_preserving_reshape_layer_params__get_packed_size
9898                      (const CoreML__Specification__RankPreservingReshapeLayerParams   *message);
9899 size_t core_ml__specification__rank_preserving_reshape_layer_params__pack
9900                      (const CoreML__Specification__RankPreservingReshapeLayerParams   *message,
9901                       uint8_t             *out);
9902 size_t core_ml__specification__rank_preserving_reshape_layer_params__pack_to_buffer
9903                      (const CoreML__Specification__RankPreservingReshapeLayerParams   *message,
9904                       ProtobufCBuffer     *buffer);
9905 CoreML__Specification__RankPreservingReshapeLayerParams *
9906        core_ml__specification__rank_preserving_reshape_layer_params__unpack
9907                      (ProtobufCAllocator  *allocator,
9908                       size_t               len,
9909                       const uint8_t       *data);
9910 void   core_ml__specification__rank_preserving_reshape_layer_params__free_unpacked
9911                      (CoreML__Specification__RankPreservingReshapeLayerParams *message,
9912                       ProtobufCAllocator *allocator);
9913 /* CoreML__Specification__ConstantPaddingLayerParams methods */
9914 void   core_ml__specification__constant_padding_layer_params__init
9915                      (CoreML__Specification__ConstantPaddingLayerParams         *message);
9916 size_t core_ml__specification__constant_padding_layer_params__get_packed_size
9917                      (const CoreML__Specification__ConstantPaddingLayerParams   *message);
9918 size_t core_ml__specification__constant_padding_layer_params__pack
9919                      (const CoreML__Specification__ConstantPaddingLayerParams   *message,
9920                       uint8_t             *out);
9921 size_t core_ml__specification__constant_padding_layer_params__pack_to_buffer
9922                      (const CoreML__Specification__ConstantPaddingLayerParams   *message,
9923                       ProtobufCBuffer     *buffer);
9924 CoreML__Specification__ConstantPaddingLayerParams *
9925        core_ml__specification__constant_padding_layer_params__unpack
9926                      (ProtobufCAllocator  *allocator,
9927                       size_t               len,
9928                       const uint8_t       *data);
9929 void   core_ml__specification__constant_padding_layer_params__free_unpacked
9930                      (CoreML__Specification__ConstantPaddingLayerParams *message,
9931                       ProtobufCAllocator *allocator);
9932 /* CoreML__Specification__RandomNormalLikeLayerParams methods */
9933 void   core_ml__specification__random_normal_like_layer_params__init
9934                      (CoreML__Specification__RandomNormalLikeLayerParams         *message);
9935 size_t core_ml__specification__random_normal_like_layer_params__get_packed_size
9936                      (const CoreML__Specification__RandomNormalLikeLayerParams   *message);
9937 size_t core_ml__specification__random_normal_like_layer_params__pack
9938                      (const CoreML__Specification__RandomNormalLikeLayerParams   *message,
9939                       uint8_t             *out);
9940 size_t core_ml__specification__random_normal_like_layer_params__pack_to_buffer
9941                      (const CoreML__Specification__RandomNormalLikeLayerParams   *message,
9942                       ProtobufCBuffer     *buffer);
9943 CoreML__Specification__RandomNormalLikeLayerParams *
9944        core_ml__specification__random_normal_like_layer_params__unpack
9945                      (ProtobufCAllocator  *allocator,
9946                       size_t               len,
9947                       const uint8_t       *data);
9948 void   core_ml__specification__random_normal_like_layer_params__free_unpacked
9949                      (CoreML__Specification__RandomNormalLikeLayerParams *message,
9950                       ProtobufCAllocator *allocator);
9951 /* CoreML__Specification__RandomNormalStaticLayerParams methods */
9952 void   core_ml__specification__random_normal_static_layer_params__init
9953                      (CoreML__Specification__RandomNormalStaticLayerParams         *message);
9954 size_t core_ml__specification__random_normal_static_layer_params__get_packed_size
9955                      (const CoreML__Specification__RandomNormalStaticLayerParams   *message);
9956 size_t core_ml__specification__random_normal_static_layer_params__pack
9957                      (const CoreML__Specification__RandomNormalStaticLayerParams   *message,
9958                       uint8_t             *out);
9959 size_t core_ml__specification__random_normal_static_layer_params__pack_to_buffer
9960                      (const CoreML__Specification__RandomNormalStaticLayerParams   *message,
9961                       ProtobufCBuffer     *buffer);
9962 CoreML__Specification__RandomNormalStaticLayerParams *
9963        core_ml__specification__random_normal_static_layer_params__unpack
9964                      (ProtobufCAllocator  *allocator,
9965                       size_t               len,
9966                       const uint8_t       *data);
9967 void   core_ml__specification__random_normal_static_layer_params__free_unpacked
9968                      (CoreML__Specification__RandomNormalStaticLayerParams *message,
9969                       ProtobufCAllocator *allocator);
9970 /* CoreML__Specification__RandomNormalDynamicLayerParams methods */
9971 void   core_ml__specification__random_normal_dynamic_layer_params__init
9972                      (CoreML__Specification__RandomNormalDynamicLayerParams         *message);
9973 size_t core_ml__specification__random_normal_dynamic_layer_params__get_packed_size
9974                      (const CoreML__Specification__RandomNormalDynamicLayerParams   *message);
9975 size_t core_ml__specification__random_normal_dynamic_layer_params__pack
9976                      (const CoreML__Specification__RandomNormalDynamicLayerParams   *message,
9977                       uint8_t             *out);
9978 size_t core_ml__specification__random_normal_dynamic_layer_params__pack_to_buffer
9979                      (const CoreML__Specification__RandomNormalDynamicLayerParams   *message,
9980                       ProtobufCBuffer     *buffer);
9981 CoreML__Specification__RandomNormalDynamicLayerParams *
9982        core_ml__specification__random_normal_dynamic_layer_params__unpack
9983                      (ProtobufCAllocator  *allocator,
9984                       size_t               len,
9985                       const uint8_t       *data);
9986 void   core_ml__specification__random_normal_dynamic_layer_params__free_unpacked
9987                      (CoreML__Specification__RandomNormalDynamicLayerParams *message,
9988                       ProtobufCAllocator *allocator);
9989 /* CoreML__Specification__RandomUniformLikeLayerParams methods */
9990 void   core_ml__specification__random_uniform_like_layer_params__init
9991                      (CoreML__Specification__RandomUniformLikeLayerParams         *message);
9992 size_t core_ml__specification__random_uniform_like_layer_params__get_packed_size
9993                      (const CoreML__Specification__RandomUniformLikeLayerParams   *message);
9994 size_t core_ml__specification__random_uniform_like_layer_params__pack
9995                      (const CoreML__Specification__RandomUniformLikeLayerParams   *message,
9996                       uint8_t             *out);
9997 size_t core_ml__specification__random_uniform_like_layer_params__pack_to_buffer
9998                      (const CoreML__Specification__RandomUniformLikeLayerParams   *message,
9999                       ProtobufCBuffer     *buffer);
10000 CoreML__Specification__RandomUniformLikeLayerParams *
10001        core_ml__specification__random_uniform_like_layer_params__unpack
10002                      (ProtobufCAllocator  *allocator,
10003                       size_t               len,
10004                       const uint8_t       *data);
10005 void   core_ml__specification__random_uniform_like_layer_params__free_unpacked
10006                      (CoreML__Specification__RandomUniformLikeLayerParams *message,
10007                       ProtobufCAllocator *allocator);
10008 /* CoreML__Specification__RandomUniformStaticLayerParams methods */
10009 void   core_ml__specification__random_uniform_static_layer_params__init
10010                      (CoreML__Specification__RandomUniformStaticLayerParams         *message);
10011 size_t core_ml__specification__random_uniform_static_layer_params__get_packed_size
10012                      (const CoreML__Specification__RandomUniformStaticLayerParams   *message);
10013 size_t core_ml__specification__random_uniform_static_layer_params__pack
10014                      (const CoreML__Specification__RandomUniformStaticLayerParams   *message,
10015                       uint8_t             *out);
10016 size_t core_ml__specification__random_uniform_static_layer_params__pack_to_buffer
10017                      (const CoreML__Specification__RandomUniformStaticLayerParams   *message,
10018                       ProtobufCBuffer     *buffer);
10019 CoreML__Specification__RandomUniformStaticLayerParams *
10020        core_ml__specification__random_uniform_static_layer_params__unpack
10021                      (ProtobufCAllocator  *allocator,
10022                       size_t               len,
10023                       const uint8_t       *data);
10024 void   core_ml__specification__random_uniform_static_layer_params__free_unpacked
10025                      (CoreML__Specification__RandomUniformStaticLayerParams *message,
10026                       ProtobufCAllocator *allocator);
10027 /* CoreML__Specification__RandomUniformDynamicLayerParams methods */
10028 void   core_ml__specification__random_uniform_dynamic_layer_params__init
10029                      (CoreML__Specification__RandomUniformDynamicLayerParams         *message);
10030 size_t core_ml__specification__random_uniform_dynamic_layer_params__get_packed_size
10031                      (const CoreML__Specification__RandomUniformDynamicLayerParams   *message);
10032 size_t core_ml__specification__random_uniform_dynamic_layer_params__pack
10033                      (const CoreML__Specification__RandomUniformDynamicLayerParams   *message,
10034                       uint8_t             *out);
10035 size_t core_ml__specification__random_uniform_dynamic_layer_params__pack_to_buffer
10036                      (const CoreML__Specification__RandomUniformDynamicLayerParams   *message,
10037                       ProtobufCBuffer     *buffer);
10038 CoreML__Specification__RandomUniformDynamicLayerParams *
10039        core_ml__specification__random_uniform_dynamic_layer_params__unpack
10040                      (ProtobufCAllocator  *allocator,
10041                       size_t               len,
10042                       const uint8_t       *data);
10043 void   core_ml__specification__random_uniform_dynamic_layer_params__free_unpacked
10044                      (CoreML__Specification__RandomUniformDynamicLayerParams *message,
10045                       ProtobufCAllocator *allocator);
10046 /* CoreML__Specification__RandomBernoulliLikeLayerParams methods */
10047 void   core_ml__specification__random_bernoulli_like_layer_params__init
10048                      (CoreML__Specification__RandomBernoulliLikeLayerParams         *message);
10049 size_t core_ml__specification__random_bernoulli_like_layer_params__get_packed_size
10050                      (const CoreML__Specification__RandomBernoulliLikeLayerParams   *message);
10051 size_t core_ml__specification__random_bernoulli_like_layer_params__pack
10052                      (const CoreML__Specification__RandomBernoulliLikeLayerParams   *message,
10053                       uint8_t             *out);
10054 size_t core_ml__specification__random_bernoulli_like_layer_params__pack_to_buffer
10055                      (const CoreML__Specification__RandomBernoulliLikeLayerParams   *message,
10056                       ProtobufCBuffer     *buffer);
10057 CoreML__Specification__RandomBernoulliLikeLayerParams *
10058        core_ml__specification__random_bernoulli_like_layer_params__unpack
10059                      (ProtobufCAllocator  *allocator,
10060                       size_t               len,
10061                       const uint8_t       *data);
10062 void   core_ml__specification__random_bernoulli_like_layer_params__free_unpacked
10063                      (CoreML__Specification__RandomBernoulliLikeLayerParams *message,
10064                       ProtobufCAllocator *allocator);
10065 /* CoreML__Specification__RandomBernoulliStaticLayerParams methods */
10066 void   core_ml__specification__random_bernoulli_static_layer_params__init
10067                      (CoreML__Specification__RandomBernoulliStaticLayerParams         *message);
10068 size_t core_ml__specification__random_bernoulli_static_layer_params__get_packed_size
10069                      (const CoreML__Specification__RandomBernoulliStaticLayerParams   *message);
10070 size_t core_ml__specification__random_bernoulli_static_layer_params__pack
10071                      (const CoreML__Specification__RandomBernoulliStaticLayerParams   *message,
10072                       uint8_t             *out);
10073 size_t core_ml__specification__random_bernoulli_static_layer_params__pack_to_buffer
10074                      (const CoreML__Specification__RandomBernoulliStaticLayerParams   *message,
10075                       ProtobufCBuffer     *buffer);
10076 CoreML__Specification__RandomBernoulliStaticLayerParams *
10077        core_ml__specification__random_bernoulli_static_layer_params__unpack
10078                      (ProtobufCAllocator  *allocator,
10079                       size_t               len,
10080                       const uint8_t       *data);
10081 void   core_ml__specification__random_bernoulli_static_layer_params__free_unpacked
10082                      (CoreML__Specification__RandomBernoulliStaticLayerParams *message,
10083                       ProtobufCAllocator *allocator);
10084 /* CoreML__Specification__RandomBernoulliDynamicLayerParams methods */
10085 void   core_ml__specification__random_bernoulli_dynamic_layer_params__init
10086                      (CoreML__Specification__RandomBernoulliDynamicLayerParams         *message);
10087 size_t core_ml__specification__random_bernoulli_dynamic_layer_params__get_packed_size
10088                      (const CoreML__Specification__RandomBernoulliDynamicLayerParams   *message);
10089 size_t core_ml__specification__random_bernoulli_dynamic_layer_params__pack
10090                      (const CoreML__Specification__RandomBernoulliDynamicLayerParams   *message,
10091                       uint8_t             *out);
10092 size_t core_ml__specification__random_bernoulli_dynamic_layer_params__pack_to_buffer
10093                      (const CoreML__Specification__RandomBernoulliDynamicLayerParams   *message,
10094                       ProtobufCBuffer     *buffer);
10095 CoreML__Specification__RandomBernoulliDynamicLayerParams *
10096        core_ml__specification__random_bernoulli_dynamic_layer_params__unpack
10097                      (ProtobufCAllocator  *allocator,
10098                       size_t               len,
10099                       const uint8_t       *data);
10100 void   core_ml__specification__random_bernoulli_dynamic_layer_params__free_unpacked
10101                      (CoreML__Specification__RandomBernoulliDynamicLayerParams *message,
10102                       ProtobufCAllocator *allocator);
10103 /* CoreML__Specification__CategoricalDistributionLayerParams methods */
10104 void   core_ml__specification__categorical_distribution_layer_params__init
10105                      (CoreML__Specification__CategoricalDistributionLayerParams         *message);
10106 size_t core_ml__specification__categorical_distribution_layer_params__get_packed_size
10107                      (const CoreML__Specification__CategoricalDistributionLayerParams   *message);
10108 size_t core_ml__specification__categorical_distribution_layer_params__pack
10109                      (const CoreML__Specification__CategoricalDistributionLayerParams   *message,
10110                       uint8_t             *out);
10111 size_t core_ml__specification__categorical_distribution_layer_params__pack_to_buffer
10112                      (const CoreML__Specification__CategoricalDistributionLayerParams   *message,
10113                       ProtobufCBuffer     *buffer);
10114 CoreML__Specification__CategoricalDistributionLayerParams *
10115        core_ml__specification__categorical_distribution_layer_params__unpack
10116                      (ProtobufCAllocator  *allocator,
10117                       size_t               len,
10118                       const uint8_t       *data);
10119 void   core_ml__specification__categorical_distribution_layer_params__free_unpacked
10120                      (CoreML__Specification__CategoricalDistributionLayerParams *message,
10121                       ProtobufCAllocator *allocator);
10122 /* CoreML__Specification__ReduceL1LayerParams methods */
10123 void   core_ml__specification__reduce_l1_layer_params__init
10124                      (CoreML__Specification__ReduceL1LayerParams         *message);
10125 size_t core_ml__specification__reduce_l1_layer_params__get_packed_size
10126                      (const CoreML__Specification__ReduceL1LayerParams   *message);
10127 size_t core_ml__specification__reduce_l1_layer_params__pack
10128                      (const CoreML__Specification__ReduceL1LayerParams   *message,
10129                       uint8_t             *out);
10130 size_t core_ml__specification__reduce_l1_layer_params__pack_to_buffer
10131                      (const CoreML__Specification__ReduceL1LayerParams   *message,
10132                       ProtobufCBuffer     *buffer);
10133 CoreML__Specification__ReduceL1LayerParams *
10134        core_ml__specification__reduce_l1_layer_params__unpack
10135                      (ProtobufCAllocator  *allocator,
10136                       size_t               len,
10137                       const uint8_t       *data);
10138 void   core_ml__specification__reduce_l1_layer_params__free_unpacked
10139                      (CoreML__Specification__ReduceL1LayerParams *message,
10140                       ProtobufCAllocator *allocator);
10141 /* CoreML__Specification__ReduceL2LayerParams methods */
10142 void   core_ml__specification__reduce_l2_layer_params__init
10143                      (CoreML__Specification__ReduceL2LayerParams         *message);
10144 size_t core_ml__specification__reduce_l2_layer_params__get_packed_size
10145                      (const CoreML__Specification__ReduceL2LayerParams   *message);
10146 size_t core_ml__specification__reduce_l2_layer_params__pack
10147                      (const CoreML__Specification__ReduceL2LayerParams   *message,
10148                       uint8_t             *out);
10149 size_t core_ml__specification__reduce_l2_layer_params__pack_to_buffer
10150                      (const CoreML__Specification__ReduceL2LayerParams   *message,
10151                       ProtobufCBuffer     *buffer);
10152 CoreML__Specification__ReduceL2LayerParams *
10153        core_ml__specification__reduce_l2_layer_params__unpack
10154                      (ProtobufCAllocator  *allocator,
10155                       size_t               len,
10156                       const uint8_t       *data);
10157 void   core_ml__specification__reduce_l2_layer_params__free_unpacked
10158                      (CoreML__Specification__ReduceL2LayerParams *message,
10159                       ProtobufCAllocator *allocator);
10160 /* CoreML__Specification__ReduceMaxLayerParams methods */
10161 void   core_ml__specification__reduce_max_layer_params__init
10162                      (CoreML__Specification__ReduceMaxLayerParams         *message);
10163 size_t core_ml__specification__reduce_max_layer_params__get_packed_size
10164                      (const CoreML__Specification__ReduceMaxLayerParams   *message);
10165 size_t core_ml__specification__reduce_max_layer_params__pack
10166                      (const CoreML__Specification__ReduceMaxLayerParams   *message,
10167                       uint8_t             *out);
10168 size_t core_ml__specification__reduce_max_layer_params__pack_to_buffer
10169                      (const CoreML__Specification__ReduceMaxLayerParams   *message,
10170                       ProtobufCBuffer     *buffer);
10171 CoreML__Specification__ReduceMaxLayerParams *
10172        core_ml__specification__reduce_max_layer_params__unpack
10173                      (ProtobufCAllocator  *allocator,
10174                       size_t               len,
10175                       const uint8_t       *data);
10176 void   core_ml__specification__reduce_max_layer_params__free_unpacked
10177                      (CoreML__Specification__ReduceMaxLayerParams *message,
10178                       ProtobufCAllocator *allocator);
10179 /* CoreML__Specification__ReduceMinLayerParams methods */
10180 void   core_ml__specification__reduce_min_layer_params__init
10181                      (CoreML__Specification__ReduceMinLayerParams         *message);
10182 size_t core_ml__specification__reduce_min_layer_params__get_packed_size
10183                      (const CoreML__Specification__ReduceMinLayerParams   *message);
10184 size_t core_ml__specification__reduce_min_layer_params__pack
10185                      (const CoreML__Specification__ReduceMinLayerParams   *message,
10186                       uint8_t             *out);
10187 size_t core_ml__specification__reduce_min_layer_params__pack_to_buffer
10188                      (const CoreML__Specification__ReduceMinLayerParams   *message,
10189                       ProtobufCBuffer     *buffer);
10190 CoreML__Specification__ReduceMinLayerParams *
10191        core_ml__specification__reduce_min_layer_params__unpack
10192                      (ProtobufCAllocator  *allocator,
10193                       size_t               len,
10194                       const uint8_t       *data);
10195 void   core_ml__specification__reduce_min_layer_params__free_unpacked
10196                      (CoreML__Specification__ReduceMinLayerParams *message,
10197                       ProtobufCAllocator *allocator);
10198 /* CoreML__Specification__ReduceSumLayerParams methods */
10199 void   core_ml__specification__reduce_sum_layer_params__init
10200                      (CoreML__Specification__ReduceSumLayerParams         *message);
10201 size_t core_ml__specification__reduce_sum_layer_params__get_packed_size
10202                      (const CoreML__Specification__ReduceSumLayerParams   *message);
10203 size_t core_ml__specification__reduce_sum_layer_params__pack
10204                      (const CoreML__Specification__ReduceSumLayerParams   *message,
10205                       uint8_t             *out);
10206 size_t core_ml__specification__reduce_sum_layer_params__pack_to_buffer
10207                      (const CoreML__Specification__ReduceSumLayerParams   *message,
10208                       ProtobufCBuffer     *buffer);
10209 CoreML__Specification__ReduceSumLayerParams *
10210        core_ml__specification__reduce_sum_layer_params__unpack
10211                      (ProtobufCAllocator  *allocator,
10212                       size_t               len,
10213                       const uint8_t       *data);
10214 void   core_ml__specification__reduce_sum_layer_params__free_unpacked
10215                      (CoreML__Specification__ReduceSumLayerParams *message,
10216                       ProtobufCAllocator *allocator);
10217 /* CoreML__Specification__ReduceProdLayerParams methods */
10218 void   core_ml__specification__reduce_prod_layer_params__init
10219                      (CoreML__Specification__ReduceProdLayerParams         *message);
10220 size_t core_ml__specification__reduce_prod_layer_params__get_packed_size
10221                      (const CoreML__Specification__ReduceProdLayerParams   *message);
10222 size_t core_ml__specification__reduce_prod_layer_params__pack
10223                      (const CoreML__Specification__ReduceProdLayerParams   *message,
10224                       uint8_t             *out);
10225 size_t core_ml__specification__reduce_prod_layer_params__pack_to_buffer
10226                      (const CoreML__Specification__ReduceProdLayerParams   *message,
10227                       ProtobufCBuffer     *buffer);
10228 CoreML__Specification__ReduceProdLayerParams *
10229        core_ml__specification__reduce_prod_layer_params__unpack
10230                      (ProtobufCAllocator  *allocator,
10231                       size_t               len,
10232                       const uint8_t       *data);
10233 void   core_ml__specification__reduce_prod_layer_params__free_unpacked
10234                      (CoreML__Specification__ReduceProdLayerParams *message,
10235                       ProtobufCAllocator *allocator);
10236 /* CoreML__Specification__ReduceMeanLayerParams methods */
10237 void   core_ml__specification__reduce_mean_layer_params__init
10238                      (CoreML__Specification__ReduceMeanLayerParams         *message);
10239 size_t core_ml__specification__reduce_mean_layer_params__get_packed_size
10240                      (const CoreML__Specification__ReduceMeanLayerParams   *message);
10241 size_t core_ml__specification__reduce_mean_layer_params__pack
10242                      (const CoreML__Specification__ReduceMeanLayerParams   *message,
10243                       uint8_t             *out);
10244 size_t core_ml__specification__reduce_mean_layer_params__pack_to_buffer
10245                      (const CoreML__Specification__ReduceMeanLayerParams   *message,
10246                       ProtobufCBuffer     *buffer);
10247 CoreML__Specification__ReduceMeanLayerParams *
10248        core_ml__specification__reduce_mean_layer_params__unpack
10249                      (ProtobufCAllocator  *allocator,
10250                       size_t               len,
10251                       const uint8_t       *data);
10252 void   core_ml__specification__reduce_mean_layer_params__free_unpacked
10253                      (CoreML__Specification__ReduceMeanLayerParams *message,
10254                       ProtobufCAllocator *allocator);
10255 /* CoreML__Specification__ReduceLogSumLayerParams methods */
10256 void   core_ml__specification__reduce_log_sum_layer_params__init
10257                      (CoreML__Specification__ReduceLogSumLayerParams         *message);
10258 size_t core_ml__specification__reduce_log_sum_layer_params__get_packed_size
10259                      (const CoreML__Specification__ReduceLogSumLayerParams   *message);
10260 size_t core_ml__specification__reduce_log_sum_layer_params__pack
10261                      (const CoreML__Specification__ReduceLogSumLayerParams   *message,
10262                       uint8_t             *out);
10263 size_t core_ml__specification__reduce_log_sum_layer_params__pack_to_buffer
10264                      (const CoreML__Specification__ReduceLogSumLayerParams   *message,
10265                       ProtobufCBuffer     *buffer);
10266 CoreML__Specification__ReduceLogSumLayerParams *
10267        core_ml__specification__reduce_log_sum_layer_params__unpack
10268                      (ProtobufCAllocator  *allocator,
10269                       size_t               len,
10270                       const uint8_t       *data);
10271 void   core_ml__specification__reduce_log_sum_layer_params__free_unpacked
10272                      (CoreML__Specification__ReduceLogSumLayerParams *message,
10273                       ProtobufCAllocator *allocator);
10274 /* CoreML__Specification__ReduceSumSquareLayerParams methods */
10275 void   core_ml__specification__reduce_sum_square_layer_params__init
10276                      (CoreML__Specification__ReduceSumSquareLayerParams         *message);
10277 size_t core_ml__specification__reduce_sum_square_layer_params__get_packed_size
10278                      (const CoreML__Specification__ReduceSumSquareLayerParams   *message);
10279 size_t core_ml__specification__reduce_sum_square_layer_params__pack
10280                      (const CoreML__Specification__ReduceSumSquareLayerParams   *message,
10281                       uint8_t             *out);
10282 size_t core_ml__specification__reduce_sum_square_layer_params__pack_to_buffer
10283                      (const CoreML__Specification__ReduceSumSquareLayerParams   *message,
10284                       ProtobufCBuffer     *buffer);
10285 CoreML__Specification__ReduceSumSquareLayerParams *
10286        core_ml__specification__reduce_sum_square_layer_params__unpack
10287                      (ProtobufCAllocator  *allocator,
10288                       size_t               len,
10289                       const uint8_t       *data);
10290 void   core_ml__specification__reduce_sum_square_layer_params__free_unpacked
10291                      (CoreML__Specification__ReduceSumSquareLayerParams *message,
10292                       ProtobufCAllocator *allocator);
10293 /* CoreML__Specification__ReduceLogSumExpLayerParams methods */
10294 void   core_ml__specification__reduce_log_sum_exp_layer_params__init
10295                      (CoreML__Specification__ReduceLogSumExpLayerParams         *message);
10296 size_t core_ml__specification__reduce_log_sum_exp_layer_params__get_packed_size
10297                      (const CoreML__Specification__ReduceLogSumExpLayerParams   *message);
10298 size_t core_ml__specification__reduce_log_sum_exp_layer_params__pack
10299                      (const CoreML__Specification__ReduceLogSumExpLayerParams   *message,
10300                       uint8_t             *out);
10301 size_t core_ml__specification__reduce_log_sum_exp_layer_params__pack_to_buffer
10302                      (const CoreML__Specification__ReduceLogSumExpLayerParams   *message,
10303                       ProtobufCBuffer     *buffer);
10304 CoreML__Specification__ReduceLogSumExpLayerParams *
10305        core_ml__specification__reduce_log_sum_exp_layer_params__unpack
10306                      (ProtobufCAllocator  *allocator,
10307                       size_t               len,
10308                       const uint8_t       *data);
10309 void   core_ml__specification__reduce_log_sum_exp_layer_params__free_unpacked
10310                      (CoreML__Specification__ReduceLogSumExpLayerParams *message,
10311                       ProtobufCAllocator *allocator);
10312 /* CoreML__Specification__ExpandDimsLayerParams methods */
10313 void   core_ml__specification__expand_dims_layer_params__init
10314                      (CoreML__Specification__ExpandDimsLayerParams         *message);
10315 size_t core_ml__specification__expand_dims_layer_params__get_packed_size
10316                      (const CoreML__Specification__ExpandDimsLayerParams   *message);
10317 size_t core_ml__specification__expand_dims_layer_params__pack
10318                      (const CoreML__Specification__ExpandDimsLayerParams   *message,
10319                       uint8_t             *out);
10320 size_t core_ml__specification__expand_dims_layer_params__pack_to_buffer
10321                      (const CoreML__Specification__ExpandDimsLayerParams   *message,
10322                       ProtobufCBuffer     *buffer);
10323 CoreML__Specification__ExpandDimsLayerParams *
10324        core_ml__specification__expand_dims_layer_params__unpack
10325                      (ProtobufCAllocator  *allocator,
10326                       size_t               len,
10327                       const uint8_t       *data);
10328 void   core_ml__specification__expand_dims_layer_params__free_unpacked
10329                      (CoreML__Specification__ExpandDimsLayerParams *message,
10330                       ProtobufCAllocator *allocator);
10331 /* CoreML__Specification__FlattenTo2DLayerParams methods */
10332 void   core_ml__specification__flatten_to2_dlayer_params__init
10333                      (CoreML__Specification__FlattenTo2DLayerParams         *message);
10334 size_t core_ml__specification__flatten_to2_dlayer_params__get_packed_size
10335                      (const CoreML__Specification__FlattenTo2DLayerParams   *message);
10336 size_t core_ml__specification__flatten_to2_dlayer_params__pack
10337                      (const CoreML__Specification__FlattenTo2DLayerParams   *message,
10338                       uint8_t             *out);
10339 size_t core_ml__specification__flatten_to2_dlayer_params__pack_to_buffer
10340                      (const CoreML__Specification__FlattenTo2DLayerParams   *message,
10341                       ProtobufCBuffer     *buffer);
10342 CoreML__Specification__FlattenTo2DLayerParams *
10343        core_ml__specification__flatten_to2_dlayer_params__unpack
10344                      (ProtobufCAllocator  *allocator,
10345                       size_t               len,
10346                       const uint8_t       *data);
10347 void   core_ml__specification__flatten_to2_dlayer_params__free_unpacked
10348                      (CoreML__Specification__FlattenTo2DLayerParams *message,
10349                       ProtobufCAllocator *allocator);
10350 /* CoreML__Specification__ReshapeStaticLayerParams methods */
10351 void   core_ml__specification__reshape_static_layer_params__init
10352                      (CoreML__Specification__ReshapeStaticLayerParams         *message);
10353 size_t core_ml__specification__reshape_static_layer_params__get_packed_size
10354                      (const CoreML__Specification__ReshapeStaticLayerParams   *message);
10355 size_t core_ml__specification__reshape_static_layer_params__pack
10356                      (const CoreML__Specification__ReshapeStaticLayerParams   *message,
10357                       uint8_t             *out);
10358 size_t core_ml__specification__reshape_static_layer_params__pack_to_buffer
10359                      (const CoreML__Specification__ReshapeStaticLayerParams   *message,
10360                       ProtobufCBuffer     *buffer);
10361 CoreML__Specification__ReshapeStaticLayerParams *
10362        core_ml__specification__reshape_static_layer_params__unpack
10363                      (ProtobufCAllocator  *allocator,
10364                       size_t               len,
10365                       const uint8_t       *data);
10366 void   core_ml__specification__reshape_static_layer_params__free_unpacked
10367                      (CoreML__Specification__ReshapeStaticLayerParams *message,
10368                       ProtobufCAllocator *allocator);
10369 /* CoreML__Specification__ReshapeLikeLayerParams methods */
10370 void   core_ml__specification__reshape_like_layer_params__init
10371                      (CoreML__Specification__ReshapeLikeLayerParams         *message);
10372 size_t core_ml__specification__reshape_like_layer_params__get_packed_size
10373                      (const CoreML__Specification__ReshapeLikeLayerParams   *message);
10374 size_t core_ml__specification__reshape_like_layer_params__pack
10375                      (const CoreML__Specification__ReshapeLikeLayerParams   *message,
10376                       uint8_t             *out);
10377 size_t core_ml__specification__reshape_like_layer_params__pack_to_buffer
10378                      (const CoreML__Specification__ReshapeLikeLayerParams   *message,
10379                       ProtobufCBuffer     *buffer);
10380 CoreML__Specification__ReshapeLikeLayerParams *
10381        core_ml__specification__reshape_like_layer_params__unpack
10382                      (ProtobufCAllocator  *allocator,
10383                       size_t               len,
10384                       const uint8_t       *data);
10385 void   core_ml__specification__reshape_like_layer_params__free_unpacked
10386                      (CoreML__Specification__ReshapeLikeLayerParams *message,
10387                       ProtobufCAllocator *allocator);
10388 /* CoreML__Specification__ReshapeDynamicLayerParams methods */
10389 void   core_ml__specification__reshape_dynamic_layer_params__init
10390                      (CoreML__Specification__ReshapeDynamicLayerParams         *message);
10391 size_t core_ml__specification__reshape_dynamic_layer_params__get_packed_size
10392                      (const CoreML__Specification__ReshapeDynamicLayerParams   *message);
10393 size_t core_ml__specification__reshape_dynamic_layer_params__pack
10394                      (const CoreML__Specification__ReshapeDynamicLayerParams   *message,
10395                       uint8_t             *out);
10396 size_t core_ml__specification__reshape_dynamic_layer_params__pack_to_buffer
10397                      (const CoreML__Specification__ReshapeDynamicLayerParams   *message,
10398                       ProtobufCBuffer     *buffer);
10399 CoreML__Specification__ReshapeDynamicLayerParams *
10400        core_ml__specification__reshape_dynamic_layer_params__unpack
10401                      (ProtobufCAllocator  *allocator,
10402                       size_t               len,
10403                       const uint8_t       *data);
10404 void   core_ml__specification__reshape_dynamic_layer_params__free_unpacked
10405                      (CoreML__Specification__ReshapeDynamicLayerParams *message,
10406                       ProtobufCAllocator *allocator);
10407 /* CoreML__Specification__SqueezeLayerParams methods */
10408 void   core_ml__specification__squeeze_layer_params__init
10409                      (CoreML__Specification__SqueezeLayerParams         *message);
10410 size_t core_ml__specification__squeeze_layer_params__get_packed_size
10411                      (const CoreML__Specification__SqueezeLayerParams   *message);
10412 size_t core_ml__specification__squeeze_layer_params__pack
10413                      (const CoreML__Specification__SqueezeLayerParams   *message,
10414                       uint8_t             *out);
10415 size_t core_ml__specification__squeeze_layer_params__pack_to_buffer
10416                      (const CoreML__Specification__SqueezeLayerParams   *message,
10417                       ProtobufCBuffer     *buffer);
10418 CoreML__Specification__SqueezeLayerParams *
10419        core_ml__specification__squeeze_layer_params__unpack
10420                      (ProtobufCAllocator  *allocator,
10421                       size_t               len,
10422                       const uint8_t       *data);
10423 void   core_ml__specification__squeeze_layer_params__free_unpacked
10424                      (CoreML__Specification__SqueezeLayerParams *message,
10425                       ProtobufCAllocator *allocator);
10426 /* CoreML__Specification__TopKLayerParams methods */
10427 void   core_ml__specification__top_klayer_params__init
10428                      (CoreML__Specification__TopKLayerParams         *message);
10429 size_t core_ml__specification__top_klayer_params__get_packed_size
10430                      (const CoreML__Specification__TopKLayerParams   *message);
10431 size_t core_ml__specification__top_klayer_params__pack
10432                      (const CoreML__Specification__TopKLayerParams   *message,
10433                       uint8_t             *out);
10434 size_t core_ml__specification__top_klayer_params__pack_to_buffer
10435                      (const CoreML__Specification__TopKLayerParams   *message,
10436                       ProtobufCBuffer     *buffer);
10437 CoreML__Specification__TopKLayerParams *
10438        core_ml__specification__top_klayer_params__unpack
10439                      (ProtobufCAllocator  *allocator,
10440                       size_t               len,
10441                       const uint8_t       *data);
10442 void   core_ml__specification__top_klayer_params__free_unpacked
10443                      (CoreML__Specification__TopKLayerParams *message,
10444                       ProtobufCAllocator *allocator);
10445 /* CoreML__Specification__ArgMaxLayerParams methods */
10446 void   core_ml__specification__arg_max_layer_params__init
10447                      (CoreML__Specification__ArgMaxLayerParams         *message);
10448 size_t core_ml__specification__arg_max_layer_params__get_packed_size
10449                      (const CoreML__Specification__ArgMaxLayerParams   *message);
10450 size_t core_ml__specification__arg_max_layer_params__pack
10451                      (const CoreML__Specification__ArgMaxLayerParams   *message,
10452                       uint8_t             *out);
10453 size_t core_ml__specification__arg_max_layer_params__pack_to_buffer
10454                      (const CoreML__Specification__ArgMaxLayerParams   *message,
10455                       ProtobufCBuffer     *buffer);
10456 CoreML__Specification__ArgMaxLayerParams *
10457        core_ml__specification__arg_max_layer_params__unpack
10458                      (ProtobufCAllocator  *allocator,
10459                       size_t               len,
10460                       const uint8_t       *data);
10461 void   core_ml__specification__arg_max_layer_params__free_unpacked
10462                      (CoreML__Specification__ArgMaxLayerParams *message,
10463                       ProtobufCAllocator *allocator);
10464 /* CoreML__Specification__ArgMinLayerParams methods */
10465 void   core_ml__specification__arg_min_layer_params__init
10466                      (CoreML__Specification__ArgMinLayerParams         *message);
10467 size_t core_ml__specification__arg_min_layer_params__get_packed_size
10468                      (const CoreML__Specification__ArgMinLayerParams   *message);
10469 size_t core_ml__specification__arg_min_layer_params__pack
10470                      (const CoreML__Specification__ArgMinLayerParams   *message,
10471                       uint8_t             *out);
10472 size_t core_ml__specification__arg_min_layer_params__pack_to_buffer
10473                      (const CoreML__Specification__ArgMinLayerParams   *message,
10474                       ProtobufCBuffer     *buffer);
10475 CoreML__Specification__ArgMinLayerParams *
10476        core_ml__specification__arg_min_layer_params__unpack
10477                      (ProtobufCAllocator  *allocator,
10478                       size_t               len,
10479                       const uint8_t       *data);
10480 void   core_ml__specification__arg_min_layer_params__free_unpacked
10481                      (CoreML__Specification__ArgMinLayerParams *message,
10482                       ProtobufCAllocator *allocator);
10483 /* CoreML__Specification__SplitNDLayerParams methods */
10484 void   core_ml__specification__split_ndlayer_params__init
10485                      (CoreML__Specification__SplitNDLayerParams         *message);
10486 size_t core_ml__specification__split_ndlayer_params__get_packed_size
10487                      (const CoreML__Specification__SplitNDLayerParams   *message);
10488 size_t core_ml__specification__split_ndlayer_params__pack
10489                      (const CoreML__Specification__SplitNDLayerParams   *message,
10490                       uint8_t             *out);
10491 size_t core_ml__specification__split_ndlayer_params__pack_to_buffer
10492                      (const CoreML__Specification__SplitNDLayerParams   *message,
10493                       ProtobufCBuffer     *buffer);
10494 CoreML__Specification__SplitNDLayerParams *
10495        core_ml__specification__split_ndlayer_params__unpack
10496                      (ProtobufCAllocator  *allocator,
10497                       size_t               len,
10498                       const uint8_t       *data);
10499 void   core_ml__specification__split_ndlayer_params__free_unpacked
10500                      (CoreML__Specification__SplitNDLayerParams *message,
10501                       ProtobufCAllocator *allocator);
10502 /* CoreML__Specification__CeilLayerParams methods */
10503 void   core_ml__specification__ceil_layer_params__init
10504                      (CoreML__Specification__CeilLayerParams         *message);
10505 size_t core_ml__specification__ceil_layer_params__get_packed_size
10506                      (const CoreML__Specification__CeilLayerParams   *message);
10507 size_t core_ml__specification__ceil_layer_params__pack
10508                      (const CoreML__Specification__CeilLayerParams   *message,
10509                       uint8_t             *out);
10510 size_t core_ml__specification__ceil_layer_params__pack_to_buffer
10511                      (const CoreML__Specification__CeilLayerParams   *message,
10512                       ProtobufCBuffer     *buffer);
10513 CoreML__Specification__CeilLayerParams *
10514        core_ml__specification__ceil_layer_params__unpack
10515                      (ProtobufCAllocator  *allocator,
10516                       size_t               len,
10517                       const uint8_t       *data);
10518 void   core_ml__specification__ceil_layer_params__free_unpacked
10519                      (CoreML__Specification__CeilLayerParams *message,
10520                       ProtobufCAllocator *allocator);
10521 /* CoreML__Specification__RoundLayerParams methods */
10522 void   core_ml__specification__round_layer_params__init
10523                      (CoreML__Specification__RoundLayerParams         *message);
10524 size_t core_ml__specification__round_layer_params__get_packed_size
10525                      (const CoreML__Specification__RoundLayerParams   *message);
10526 size_t core_ml__specification__round_layer_params__pack
10527                      (const CoreML__Specification__RoundLayerParams   *message,
10528                       uint8_t             *out);
10529 size_t core_ml__specification__round_layer_params__pack_to_buffer
10530                      (const CoreML__Specification__RoundLayerParams   *message,
10531                       ProtobufCBuffer     *buffer);
10532 CoreML__Specification__RoundLayerParams *
10533        core_ml__specification__round_layer_params__unpack
10534                      (ProtobufCAllocator  *allocator,
10535                       size_t               len,
10536                       const uint8_t       *data);
10537 void   core_ml__specification__round_layer_params__free_unpacked
10538                      (CoreML__Specification__RoundLayerParams *message,
10539                       ProtobufCAllocator *allocator);
10540 /* CoreML__Specification__FloorLayerParams methods */
10541 void   core_ml__specification__floor_layer_params__init
10542                      (CoreML__Specification__FloorLayerParams         *message);
10543 size_t core_ml__specification__floor_layer_params__get_packed_size
10544                      (const CoreML__Specification__FloorLayerParams   *message);
10545 size_t core_ml__specification__floor_layer_params__pack
10546                      (const CoreML__Specification__FloorLayerParams   *message,
10547                       uint8_t             *out);
10548 size_t core_ml__specification__floor_layer_params__pack_to_buffer
10549                      (const CoreML__Specification__FloorLayerParams   *message,
10550                       ProtobufCBuffer     *buffer);
10551 CoreML__Specification__FloorLayerParams *
10552        core_ml__specification__floor_layer_params__unpack
10553                      (ProtobufCAllocator  *allocator,
10554                       size_t               len,
10555                       const uint8_t       *data);
10556 void   core_ml__specification__floor_layer_params__free_unpacked
10557                      (CoreML__Specification__FloorLayerParams *message,
10558                       ProtobufCAllocator *allocator);
10559 /* CoreML__Specification__SignLayerParams methods */
10560 void   core_ml__specification__sign_layer_params__init
10561                      (CoreML__Specification__SignLayerParams         *message);
10562 size_t core_ml__specification__sign_layer_params__get_packed_size
10563                      (const CoreML__Specification__SignLayerParams   *message);
10564 size_t core_ml__specification__sign_layer_params__pack
10565                      (const CoreML__Specification__SignLayerParams   *message,
10566                       uint8_t             *out);
10567 size_t core_ml__specification__sign_layer_params__pack_to_buffer
10568                      (const CoreML__Specification__SignLayerParams   *message,
10569                       ProtobufCBuffer     *buffer);
10570 CoreML__Specification__SignLayerParams *
10571        core_ml__specification__sign_layer_params__unpack
10572                      (ProtobufCAllocator  *allocator,
10573                       size_t               len,
10574                       const uint8_t       *data);
10575 void   core_ml__specification__sign_layer_params__free_unpacked
10576                      (CoreML__Specification__SignLayerParams *message,
10577                       ProtobufCAllocator *allocator);
10578 /* CoreML__Specification__ClipLayerParams methods */
10579 void   core_ml__specification__clip_layer_params__init
10580                      (CoreML__Specification__ClipLayerParams         *message);
10581 size_t core_ml__specification__clip_layer_params__get_packed_size
10582                      (const CoreML__Specification__ClipLayerParams   *message);
10583 size_t core_ml__specification__clip_layer_params__pack
10584                      (const CoreML__Specification__ClipLayerParams   *message,
10585                       uint8_t             *out);
10586 size_t core_ml__specification__clip_layer_params__pack_to_buffer
10587                      (const CoreML__Specification__ClipLayerParams   *message,
10588                       ProtobufCBuffer     *buffer);
10589 CoreML__Specification__ClipLayerParams *
10590        core_ml__specification__clip_layer_params__unpack
10591                      (ProtobufCAllocator  *allocator,
10592                       size_t               len,
10593                       const uint8_t       *data);
10594 void   core_ml__specification__clip_layer_params__free_unpacked
10595                      (CoreML__Specification__ClipLayerParams *message,
10596                       ProtobufCAllocator *allocator);
10597 /* CoreML__Specification__SliceStaticLayerParams methods */
10598 void   core_ml__specification__slice_static_layer_params__init
10599                      (CoreML__Specification__SliceStaticLayerParams         *message);
10600 size_t core_ml__specification__slice_static_layer_params__get_packed_size
10601                      (const CoreML__Specification__SliceStaticLayerParams   *message);
10602 size_t core_ml__specification__slice_static_layer_params__pack
10603                      (const CoreML__Specification__SliceStaticLayerParams   *message,
10604                       uint8_t             *out);
10605 size_t core_ml__specification__slice_static_layer_params__pack_to_buffer
10606                      (const CoreML__Specification__SliceStaticLayerParams   *message,
10607                       ProtobufCBuffer     *buffer);
10608 CoreML__Specification__SliceStaticLayerParams *
10609        core_ml__specification__slice_static_layer_params__unpack
10610                      (ProtobufCAllocator  *allocator,
10611                       size_t               len,
10612                       const uint8_t       *data);
10613 void   core_ml__specification__slice_static_layer_params__free_unpacked
10614                      (CoreML__Specification__SliceStaticLayerParams *message,
10615                       ProtobufCAllocator *allocator);
10616 /* CoreML__Specification__SliceDynamicLayerParams methods */
10617 void   core_ml__specification__slice_dynamic_layer_params__init
10618                      (CoreML__Specification__SliceDynamicLayerParams         *message);
10619 size_t core_ml__specification__slice_dynamic_layer_params__get_packed_size
10620                      (const CoreML__Specification__SliceDynamicLayerParams   *message);
10621 size_t core_ml__specification__slice_dynamic_layer_params__pack
10622                      (const CoreML__Specification__SliceDynamicLayerParams   *message,
10623                       uint8_t             *out);
10624 size_t core_ml__specification__slice_dynamic_layer_params__pack_to_buffer
10625                      (const CoreML__Specification__SliceDynamicLayerParams   *message,
10626                       ProtobufCBuffer     *buffer);
10627 CoreML__Specification__SliceDynamicLayerParams *
10628        core_ml__specification__slice_dynamic_layer_params__unpack
10629                      (ProtobufCAllocator  *allocator,
10630                       size_t               len,
10631                       const uint8_t       *data);
10632 void   core_ml__specification__slice_dynamic_layer_params__free_unpacked
10633                      (CoreML__Specification__SliceDynamicLayerParams *message,
10634                       ProtobufCAllocator *allocator);
10635 /* CoreML__Specification__TileLayerParams methods */
10636 void   core_ml__specification__tile_layer_params__init
10637                      (CoreML__Specification__TileLayerParams         *message);
10638 size_t core_ml__specification__tile_layer_params__get_packed_size
10639                      (const CoreML__Specification__TileLayerParams   *message);
10640 size_t core_ml__specification__tile_layer_params__pack
10641                      (const CoreML__Specification__TileLayerParams   *message,
10642                       uint8_t             *out);
10643 size_t core_ml__specification__tile_layer_params__pack_to_buffer
10644                      (const CoreML__Specification__TileLayerParams   *message,
10645                       ProtobufCBuffer     *buffer);
10646 CoreML__Specification__TileLayerParams *
10647        core_ml__specification__tile_layer_params__unpack
10648                      (ProtobufCAllocator  *allocator,
10649                       size_t               len,
10650                       const uint8_t       *data);
10651 void   core_ml__specification__tile_layer_params__free_unpacked
10652                      (CoreML__Specification__TileLayerParams *message,
10653                       ProtobufCAllocator *allocator);
10654 /* CoreML__Specification__GetShapeLayerParams methods */
10655 void   core_ml__specification__get_shape_layer_params__init
10656                      (CoreML__Specification__GetShapeLayerParams         *message);
10657 size_t core_ml__specification__get_shape_layer_params__get_packed_size
10658                      (const CoreML__Specification__GetShapeLayerParams   *message);
10659 size_t core_ml__specification__get_shape_layer_params__pack
10660                      (const CoreML__Specification__GetShapeLayerParams   *message,
10661                       uint8_t             *out);
10662 size_t core_ml__specification__get_shape_layer_params__pack_to_buffer
10663                      (const CoreML__Specification__GetShapeLayerParams   *message,
10664                       ProtobufCBuffer     *buffer);
10665 CoreML__Specification__GetShapeLayerParams *
10666        core_ml__specification__get_shape_layer_params__unpack
10667                      (ProtobufCAllocator  *allocator,
10668                       size_t               len,
10669                       const uint8_t       *data);
10670 void   core_ml__specification__get_shape_layer_params__free_unpacked
10671                      (CoreML__Specification__GetShapeLayerParams *message,
10672                       ProtobufCAllocator *allocator);
10673 /* CoreML__Specification__ErfLayerParams methods */
10674 void   core_ml__specification__erf_layer_params__init
10675                      (CoreML__Specification__ErfLayerParams         *message);
10676 size_t core_ml__specification__erf_layer_params__get_packed_size
10677                      (const CoreML__Specification__ErfLayerParams   *message);
10678 size_t core_ml__specification__erf_layer_params__pack
10679                      (const CoreML__Specification__ErfLayerParams   *message,
10680                       uint8_t             *out);
10681 size_t core_ml__specification__erf_layer_params__pack_to_buffer
10682                      (const CoreML__Specification__ErfLayerParams   *message,
10683                       ProtobufCBuffer     *buffer);
10684 CoreML__Specification__ErfLayerParams *
10685        core_ml__specification__erf_layer_params__unpack
10686                      (ProtobufCAllocator  *allocator,
10687                       size_t               len,
10688                       const uint8_t       *data);
10689 void   core_ml__specification__erf_layer_params__free_unpacked
10690                      (CoreML__Specification__ErfLayerParams *message,
10691                       ProtobufCAllocator *allocator);
10692 /* CoreML__Specification__GeluLayerParams methods */
10693 void   core_ml__specification__gelu_layer_params__init
10694                      (CoreML__Specification__GeluLayerParams         *message);
10695 size_t core_ml__specification__gelu_layer_params__get_packed_size
10696                      (const CoreML__Specification__GeluLayerParams   *message);
10697 size_t core_ml__specification__gelu_layer_params__pack
10698                      (const CoreML__Specification__GeluLayerParams   *message,
10699                       uint8_t             *out);
10700 size_t core_ml__specification__gelu_layer_params__pack_to_buffer
10701                      (const CoreML__Specification__GeluLayerParams   *message,
10702                       ProtobufCBuffer     *buffer);
10703 CoreML__Specification__GeluLayerParams *
10704        core_ml__specification__gelu_layer_params__unpack
10705                      (ProtobufCAllocator  *allocator,
10706                       size_t               len,
10707                       const uint8_t       *data);
10708 void   core_ml__specification__gelu_layer_params__free_unpacked
10709                      (CoreML__Specification__GeluLayerParams *message,
10710                       ProtobufCAllocator *allocator);
10711 /* CoreML__Specification__RangeStaticLayerParams methods */
10712 void   core_ml__specification__range_static_layer_params__init
10713                      (CoreML__Specification__RangeStaticLayerParams         *message);
10714 size_t core_ml__specification__range_static_layer_params__get_packed_size
10715                      (const CoreML__Specification__RangeStaticLayerParams   *message);
10716 size_t core_ml__specification__range_static_layer_params__pack
10717                      (const CoreML__Specification__RangeStaticLayerParams   *message,
10718                       uint8_t             *out);
10719 size_t core_ml__specification__range_static_layer_params__pack_to_buffer
10720                      (const CoreML__Specification__RangeStaticLayerParams   *message,
10721                       ProtobufCBuffer     *buffer);
10722 CoreML__Specification__RangeStaticLayerParams *
10723        core_ml__specification__range_static_layer_params__unpack
10724                      (ProtobufCAllocator  *allocator,
10725                       size_t               len,
10726                       const uint8_t       *data);
10727 void   core_ml__specification__range_static_layer_params__free_unpacked
10728                      (CoreML__Specification__RangeStaticLayerParams *message,
10729                       ProtobufCAllocator *allocator);
10730 /* CoreML__Specification__RangeDynamicLayerParams methods */
10731 void   core_ml__specification__range_dynamic_layer_params__init
10732                      (CoreML__Specification__RangeDynamicLayerParams         *message);
10733 size_t core_ml__specification__range_dynamic_layer_params__get_packed_size
10734                      (const CoreML__Specification__RangeDynamicLayerParams   *message);
10735 size_t core_ml__specification__range_dynamic_layer_params__pack
10736                      (const CoreML__Specification__RangeDynamicLayerParams   *message,
10737                       uint8_t             *out);
10738 size_t core_ml__specification__range_dynamic_layer_params__pack_to_buffer
10739                      (const CoreML__Specification__RangeDynamicLayerParams   *message,
10740                       ProtobufCBuffer     *buffer);
10741 CoreML__Specification__RangeDynamicLayerParams *
10742        core_ml__specification__range_dynamic_layer_params__unpack
10743                      (ProtobufCAllocator  *allocator,
10744                       size_t               len,
10745                       const uint8_t       *data);
10746 void   core_ml__specification__range_dynamic_layer_params__free_unpacked
10747                      (CoreML__Specification__RangeDynamicLayerParams *message,
10748                       ProtobufCAllocator *allocator);
10749 /* CoreML__Specification__SlidingWindowsLayerParams methods */
10750 void   core_ml__specification__sliding_windows_layer_params__init
10751                      (CoreML__Specification__SlidingWindowsLayerParams         *message);
10752 size_t core_ml__specification__sliding_windows_layer_params__get_packed_size
10753                      (const CoreML__Specification__SlidingWindowsLayerParams   *message);
10754 size_t core_ml__specification__sliding_windows_layer_params__pack
10755                      (const CoreML__Specification__SlidingWindowsLayerParams   *message,
10756                       uint8_t             *out);
10757 size_t core_ml__specification__sliding_windows_layer_params__pack_to_buffer
10758                      (const CoreML__Specification__SlidingWindowsLayerParams   *message,
10759                       ProtobufCBuffer     *buffer);
10760 CoreML__Specification__SlidingWindowsLayerParams *
10761        core_ml__specification__sliding_windows_layer_params__unpack
10762                      (ProtobufCAllocator  *allocator,
10763                       size_t               len,
10764                       const uint8_t       *data);
10765 void   core_ml__specification__sliding_windows_layer_params__free_unpacked
10766                      (CoreML__Specification__SlidingWindowsLayerParams *message,
10767                       ProtobufCAllocator *allocator);
10768 /* CoreML__Specification__LayerNormalizationLayerParams methods */
10769 void   core_ml__specification__layer_normalization_layer_params__init
10770                      (CoreML__Specification__LayerNormalizationLayerParams         *message);
10771 size_t core_ml__specification__layer_normalization_layer_params__get_packed_size
10772                      (const CoreML__Specification__LayerNormalizationLayerParams   *message);
10773 size_t core_ml__specification__layer_normalization_layer_params__pack
10774                      (const CoreML__Specification__LayerNormalizationLayerParams   *message,
10775                       uint8_t             *out);
10776 size_t core_ml__specification__layer_normalization_layer_params__pack_to_buffer
10777                      (const CoreML__Specification__LayerNormalizationLayerParams   *message,
10778                       ProtobufCBuffer     *buffer);
10779 CoreML__Specification__LayerNormalizationLayerParams *
10780        core_ml__specification__layer_normalization_layer_params__unpack
10781                      (ProtobufCAllocator  *allocator,
10782                       size_t               len,
10783                       const uint8_t       *data);
10784 void   core_ml__specification__layer_normalization_layer_params__free_unpacked
10785                      (CoreML__Specification__LayerNormalizationLayerParams *message,
10786                       ProtobufCAllocator *allocator);
10787 /* CoreML__Specification__NonMaximumSuppressionLayerParams methods */
10788 void   core_ml__specification__non_maximum_suppression_layer_params__init
10789                      (CoreML__Specification__NonMaximumSuppressionLayerParams         *message);
10790 size_t core_ml__specification__non_maximum_suppression_layer_params__get_packed_size
10791                      (const CoreML__Specification__NonMaximumSuppressionLayerParams   *message);
10792 size_t core_ml__specification__non_maximum_suppression_layer_params__pack
10793                      (const CoreML__Specification__NonMaximumSuppressionLayerParams   *message,
10794                       uint8_t             *out);
10795 size_t core_ml__specification__non_maximum_suppression_layer_params__pack_to_buffer
10796                      (const CoreML__Specification__NonMaximumSuppressionLayerParams   *message,
10797                       ProtobufCBuffer     *buffer);
10798 CoreML__Specification__NonMaximumSuppressionLayerParams *
10799        core_ml__specification__non_maximum_suppression_layer_params__unpack
10800                      (ProtobufCAllocator  *allocator,
10801                       size_t               len,
10802                       const uint8_t       *data);
10803 void   core_ml__specification__non_maximum_suppression_layer_params__free_unpacked
10804                      (CoreML__Specification__NonMaximumSuppressionLayerParams *message,
10805                       ProtobufCAllocator *allocator);
10806 /* CoreML__Specification__ClampedReLULayerParams methods */
10807 void   core_ml__specification__clamped_re_lulayer_params__init
10808                      (CoreML__Specification__ClampedReLULayerParams         *message);
10809 size_t core_ml__specification__clamped_re_lulayer_params__get_packed_size
10810                      (const CoreML__Specification__ClampedReLULayerParams   *message);
10811 size_t core_ml__specification__clamped_re_lulayer_params__pack
10812                      (const CoreML__Specification__ClampedReLULayerParams   *message,
10813                       uint8_t             *out);
10814 size_t core_ml__specification__clamped_re_lulayer_params__pack_to_buffer
10815                      (const CoreML__Specification__ClampedReLULayerParams   *message,
10816                       ProtobufCBuffer     *buffer);
10817 CoreML__Specification__ClampedReLULayerParams *
10818        core_ml__specification__clamped_re_lulayer_params__unpack
10819                      (ProtobufCAllocator  *allocator,
10820                       size_t               len,
10821                       const uint8_t       *data);
10822 void   core_ml__specification__clamped_re_lulayer_params__free_unpacked
10823                      (CoreML__Specification__ClampedReLULayerParams *message,
10824                       ProtobufCAllocator *allocator);
10825 /* CoreML__Specification__ArgSortLayerParams methods */
10826 void   core_ml__specification__arg_sort_layer_params__init
10827                      (CoreML__Specification__ArgSortLayerParams         *message);
10828 size_t core_ml__specification__arg_sort_layer_params__get_packed_size
10829                      (const CoreML__Specification__ArgSortLayerParams   *message);
10830 size_t core_ml__specification__arg_sort_layer_params__pack
10831                      (const CoreML__Specification__ArgSortLayerParams   *message,
10832                       uint8_t             *out);
10833 size_t core_ml__specification__arg_sort_layer_params__pack_to_buffer
10834                      (const CoreML__Specification__ArgSortLayerParams   *message,
10835                       ProtobufCBuffer     *buffer);
10836 CoreML__Specification__ArgSortLayerParams *
10837        core_ml__specification__arg_sort_layer_params__unpack
10838                      (ProtobufCAllocator  *allocator,
10839                       size_t               len,
10840                       const uint8_t       *data);
10841 void   core_ml__specification__arg_sort_layer_params__free_unpacked
10842                      (CoreML__Specification__ArgSortLayerParams *message,
10843                       ProtobufCAllocator *allocator);
10844 /* CoreML__Specification__SliceBySizeLayerParams methods */
10845 void   core_ml__specification__slice_by_size_layer_params__init
10846                      (CoreML__Specification__SliceBySizeLayerParams         *message);
10847 size_t core_ml__specification__slice_by_size_layer_params__get_packed_size
10848                      (const CoreML__Specification__SliceBySizeLayerParams   *message);
10849 size_t core_ml__specification__slice_by_size_layer_params__pack
10850                      (const CoreML__Specification__SliceBySizeLayerParams   *message,
10851                       uint8_t             *out);
10852 size_t core_ml__specification__slice_by_size_layer_params__pack_to_buffer
10853                      (const CoreML__Specification__SliceBySizeLayerParams   *message,
10854                       ProtobufCBuffer     *buffer);
10855 CoreML__Specification__SliceBySizeLayerParams *
10856        core_ml__specification__slice_by_size_layer_params__unpack
10857                      (ProtobufCAllocator  *allocator,
10858                       size_t               len,
10859                       const uint8_t       *data);
10860 void   core_ml__specification__slice_by_size_layer_params__free_unpacked
10861                      (CoreML__Specification__SliceBySizeLayerParams *message,
10862                       ProtobufCAllocator *allocator);
10863 /* CoreML__Specification__NeuralNetworkClassifier methods */
10864 void   core_ml__specification__neural_network_classifier__init
10865                      (CoreML__Specification__NeuralNetworkClassifier         *message);
10866 size_t core_ml__specification__neural_network_classifier__get_packed_size
10867                      (const CoreML__Specification__NeuralNetworkClassifier   *message);
10868 size_t core_ml__specification__neural_network_classifier__pack
10869                      (const CoreML__Specification__NeuralNetworkClassifier   *message,
10870                       uint8_t             *out);
10871 size_t core_ml__specification__neural_network_classifier__pack_to_buffer
10872                      (const CoreML__Specification__NeuralNetworkClassifier   *message,
10873                       ProtobufCBuffer     *buffer);
10874 CoreML__Specification__NeuralNetworkClassifier *
10875        core_ml__specification__neural_network_classifier__unpack
10876                      (ProtobufCAllocator  *allocator,
10877                       size_t               len,
10878                       const uint8_t       *data);
10879 void   core_ml__specification__neural_network_classifier__free_unpacked
10880                      (CoreML__Specification__NeuralNetworkClassifier *message,
10881                       ProtobufCAllocator *allocator);
10882 /* CoreML__Specification__OneHotLayerParams methods */
10883 void   core_ml__specification__one_hot_layer_params__init
10884                      (CoreML__Specification__OneHotLayerParams         *message);
10885 size_t core_ml__specification__one_hot_layer_params__get_packed_size
10886                      (const CoreML__Specification__OneHotLayerParams   *message);
10887 size_t core_ml__specification__one_hot_layer_params__pack
10888                      (const CoreML__Specification__OneHotLayerParams   *message,
10889                       uint8_t             *out);
10890 size_t core_ml__specification__one_hot_layer_params__pack_to_buffer
10891                      (const CoreML__Specification__OneHotLayerParams   *message,
10892                       ProtobufCBuffer     *buffer);
10893 CoreML__Specification__OneHotLayerParams *
10894        core_ml__specification__one_hot_layer_params__unpack
10895                      (ProtobufCAllocator  *allocator,
10896                       size_t               len,
10897                       const uint8_t       *data);
10898 void   core_ml__specification__one_hot_layer_params__free_unpacked
10899                      (CoreML__Specification__OneHotLayerParams *message,
10900                       ProtobufCAllocator *allocator);
10901 /* CoreML__Specification__CumSumLayerParams methods */
10902 void   core_ml__specification__cum_sum_layer_params__init
10903                      (CoreML__Specification__CumSumLayerParams         *message);
10904 size_t core_ml__specification__cum_sum_layer_params__get_packed_size
10905                      (const CoreML__Specification__CumSumLayerParams   *message);
10906 size_t core_ml__specification__cum_sum_layer_params__pack
10907                      (const CoreML__Specification__CumSumLayerParams   *message,
10908                       uint8_t             *out);
10909 size_t core_ml__specification__cum_sum_layer_params__pack_to_buffer
10910                      (const CoreML__Specification__CumSumLayerParams   *message,
10911                       ProtobufCBuffer     *buffer);
10912 CoreML__Specification__CumSumLayerParams *
10913        core_ml__specification__cum_sum_layer_params__unpack
10914                      (ProtobufCAllocator  *allocator,
10915                       size_t               len,
10916                       const uint8_t       *data);
10917 void   core_ml__specification__cum_sum_layer_params__free_unpacked
10918                      (CoreML__Specification__CumSumLayerParams *message,
10919                       ProtobufCAllocator *allocator);
10920 /* CoreML__Specification__NeuralNetworkRegressor methods */
10921 void   core_ml__specification__neural_network_regressor__init
10922                      (CoreML__Specification__NeuralNetworkRegressor         *message);
10923 size_t core_ml__specification__neural_network_regressor__get_packed_size
10924                      (const CoreML__Specification__NeuralNetworkRegressor   *message);
10925 size_t core_ml__specification__neural_network_regressor__pack
10926                      (const CoreML__Specification__NeuralNetworkRegressor   *message,
10927                       uint8_t             *out);
10928 size_t core_ml__specification__neural_network_regressor__pack_to_buffer
10929                      (const CoreML__Specification__NeuralNetworkRegressor   *message,
10930                       ProtobufCBuffer     *buffer);
10931 CoreML__Specification__NeuralNetworkRegressor *
10932        core_ml__specification__neural_network_regressor__unpack
10933                      (ProtobufCAllocator  *allocator,
10934                       size_t               len,
10935                       const uint8_t       *data);
10936 void   core_ml__specification__neural_network_regressor__free_unpacked
10937                      (CoreML__Specification__NeuralNetworkRegressor *message,
10938                       ProtobufCAllocator *allocator);
10939 /* CoreML__Specification__NetworkUpdateParameters methods */
10940 void   core_ml__specification__network_update_parameters__init
10941                      (CoreML__Specification__NetworkUpdateParameters         *message);
10942 size_t core_ml__specification__network_update_parameters__get_packed_size
10943                      (const CoreML__Specification__NetworkUpdateParameters   *message);
10944 size_t core_ml__specification__network_update_parameters__pack
10945                      (const CoreML__Specification__NetworkUpdateParameters   *message,
10946                       uint8_t             *out);
10947 size_t core_ml__specification__network_update_parameters__pack_to_buffer
10948                      (const CoreML__Specification__NetworkUpdateParameters   *message,
10949                       ProtobufCBuffer     *buffer);
10950 CoreML__Specification__NetworkUpdateParameters *
10951        core_ml__specification__network_update_parameters__unpack
10952                      (ProtobufCAllocator  *allocator,
10953                       size_t               len,
10954                       const uint8_t       *data);
10955 void   core_ml__specification__network_update_parameters__free_unpacked
10956                      (CoreML__Specification__NetworkUpdateParameters *message,
10957                       ProtobufCAllocator *allocator);
10958 /* CoreML__Specification__LossLayer methods */
10959 void   core_ml__specification__loss_layer__init
10960                      (CoreML__Specification__LossLayer         *message);
10961 size_t core_ml__specification__loss_layer__get_packed_size
10962                      (const CoreML__Specification__LossLayer   *message);
10963 size_t core_ml__specification__loss_layer__pack
10964                      (const CoreML__Specification__LossLayer   *message,
10965                       uint8_t             *out);
10966 size_t core_ml__specification__loss_layer__pack_to_buffer
10967                      (const CoreML__Specification__LossLayer   *message,
10968                       ProtobufCBuffer     *buffer);
10969 CoreML__Specification__LossLayer *
10970        core_ml__specification__loss_layer__unpack
10971                      (ProtobufCAllocator  *allocator,
10972                       size_t               len,
10973                       const uint8_t       *data);
10974 void   core_ml__specification__loss_layer__free_unpacked
10975                      (CoreML__Specification__LossLayer *message,
10976                       ProtobufCAllocator *allocator);
10977 /* CoreML__Specification__CategoricalCrossEntropyLossLayer methods */
10978 void   core_ml__specification__categorical_cross_entropy_loss_layer__init
10979                      (CoreML__Specification__CategoricalCrossEntropyLossLayer         *message);
10980 size_t core_ml__specification__categorical_cross_entropy_loss_layer__get_packed_size
10981                      (const CoreML__Specification__CategoricalCrossEntropyLossLayer   *message);
10982 size_t core_ml__specification__categorical_cross_entropy_loss_layer__pack
10983                      (const CoreML__Specification__CategoricalCrossEntropyLossLayer   *message,
10984                       uint8_t             *out);
10985 size_t core_ml__specification__categorical_cross_entropy_loss_layer__pack_to_buffer
10986                      (const CoreML__Specification__CategoricalCrossEntropyLossLayer   *message,
10987                       ProtobufCBuffer     *buffer);
10988 CoreML__Specification__CategoricalCrossEntropyLossLayer *
10989        core_ml__specification__categorical_cross_entropy_loss_layer__unpack
10990                      (ProtobufCAllocator  *allocator,
10991                       size_t               len,
10992                       const uint8_t       *data);
10993 void   core_ml__specification__categorical_cross_entropy_loss_layer__free_unpacked
10994                      (CoreML__Specification__CategoricalCrossEntropyLossLayer *message,
10995                       ProtobufCAllocator *allocator);
10996 /* CoreML__Specification__MeanSquaredErrorLossLayer methods */
10997 void   core_ml__specification__mean_squared_error_loss_layer__init
10998                      (CoreML__Specification__MeanSquaredErrorLossLayer         *message);
10999 size_t core_ml__specification__mean_squared_error_loss_layer__get_packed_size
11000                      (const CoreML__Specification__MeanSquaredErrorLossLayer   *message);
11001 size_t core_ml__specification__mean_squared_error_loss_layer__pack
11002                      (const CoreML__Specification__MeanSquaredErrorLossLayer   *message,
11003                       uint8_t             *out);
11004 size_t core_ml__specification__mean_squared_error_loss_layer__pack_to_buffer
11005                      (const CoreML__Specification__MeanSquaredErrorLossLayer   *message,
11006                       ProtobufCBuffer     *buffer);
11007 CoreML__Specification__MeanSquaredErrorLossLayer *
11008        core_ml__specification__mean_squared_error_loss_layer__unpack
11009                      (ProtobufCAllocator  *allocator,
11010                       size_t               len,
11011                       const uint8_t       *data);
11012 void   core_ml__specification__mean_squared_error_loss_layer__free_unpacked
11013                      (CoreML__Specification__MeanSquaredErrorLossLayer *message,
11014                       ProtobufCAllocator *allocator);
11015 /* CoreML__Specification__Optimizer methods */
11016 void   core_ml__specification__optimizer__init
11017                      (CoreML__Specification__Optimizer         *message);
11018 size_t core_ml__specification__optimizer__get_packed_size
11019                      (const CoreML__Specification__Optimizer   *message);
11020 size_t core_ml__specification__optimizer__pack
11021                      (const CoreML__Specification__Optimizer   *message,
11022                       uint8_t             *out);
11023 size_t core_ml__specification__optimizer__pack_to_buffer
11024                      (const CoreML__Specification__Optimizer   *message,
11025                       ProtobufCBuffer     *buffer);
11026 CoreML__Specification__Optimizer *
11027        core_ml__specification__optimizer__unpack
11028                      (ProtobufCAllocator  *allocator,
11029                       size_t               len,
11030                       const uint8_t       *data);
11031 void   core_ml__specification__optimizer__free_unpacked
11032                      (CoreML__Specification__Optimizer *message,
11033                       ProtobufCAllocator *allocator);
11034 /* CoreML__Specification__SGDOptimizer methods */
11035 void   core_ml__specification__sgdoptimizer__init
11036                      (CoreML__Specification__SGDOptimizer         *message);
11037 size_t core_ml__specification__sgdoptimizer__get_packed_size
11038                      (const CoreML__Specification__SGDOptimizer   *message);
11039 size_t core_ml__specification__sgdoptimizer__pack
11040                      (const CoreML__Specification__SGDOptimizer   *message,
11041                       uint8_t             *out);
11042 size_t core_ml__specification__sgdoptimizer__pack_to_buffer
11043                      (const CoreML__Specification__SGDOptimizer   *message,
11044                       ProtobufCBuffer     *buffer);
11045 CoreML__Specification__SGDOptimizer *
11046        core_ml__specification__sgdoptimizer__unpack
11047                      (ProtobufCAllocator  *allocator,
11048                       size_t               len,
11049                       const uint8_t       *data);
11050 void   core_ml__specification__sgdoptimizer__free_unpacked
11051                      (CoreML__Specification__SGDOptimizer *message,
11052                       ProtobufCAllocator *allocator);
11053 /* CoreML__Specification__AdamOptimizer methods */
11054 void   core_ml__specification__adam_optimizer__init
11055                      (CoreML__Specification__AdamOptimizer         *message);
11056 size_t core_ml__specification__adam_optimizer__get_packed_size
11057                      (const CoreML__Specification__AdamOptimizer   *message);
11058 size_t core_ml__specification__adam_optimizer__pack
11059                      (const CoreML__Specification__AdamOptimizer   *message,
11060                       uint8_t             *out);
11061 size_t core_ml__specification__adam_optimizer__pack_to_buffer
11062                      (const CoreML__Specification__AdamOptimizer   *message,
11063                       ProtobufCBuffer     *buffer);
11064 CoreML__Specification__AdamOptimizer *
11065        core_ml__specification__adam_optimizer__unpack
11066                      (ProtobufCAllocator  *allocator,
11067                       size_t               len,
11068                       const uint8_t       *data);
11069 void   core_ml__specification__adam_optimizer__free_unpacked
11070                      (CoreML__Specification__AdamOptimizer *message,
11071                       ProtobufCAllocator *allocator);
11072 /* --- per-message closures --- */
11073 
11074 typedef void (*CoreML__Specification__NeuralNetwork_Closure)
11075                  (const CoreML__Specification__NeuralNetwork *message,
11076                   void *closure_data);
11077 typedef void (*CoreML__Specification__NeuralNetworkImageScaler_Closure)
11078                  (const CoreML__Specification__NeuralNetworkImageScaler *message,
11079                   void *closure_data);
11080 typedef void (*CoreML__Specification__NeuralNetworkMeanImage_Closure)
11081                  (const CoreML__Specification__NeuralNetworkMeanImage *message,
11082                   void *closure_data);
11083 typedef void (*CoreML__Specification__NeuralNetworkPreprocessing_Closure)
11084                  (const CoreML__Specification__NeuralNetworkPreprocessing *message,
11085                   void *closure_data);
11086 typedef void (*CoreML__Specification__ActivationReLU_Closure)
11087                  (const CoreML__Specification__ActivationReLU *message,
11088                   void *closure_data);
11089 typedef void (*CoreML__Specification__ActivationLeakyReLU_Closure)
11090                  (const CoreML__Specification__ActivationLeakyReLU *message,
11091                   void *closure_data);
11092 typedef void (*CoreML__Specification__ActivationTanh_Closure)
11093                  (const CoreML__Specification__ActivationTanh *message,
11094                   void *closure_data);
11095 typedef void (*CoreML__Specification__ActivationScaledTanh_Closure)
11096                  (const CoreML__Specification__ActivationScaledTanh *message,
11097                   void *closure_data);
11098 typedef void (*CoreML__Specification__ActivationSigmoid_Closure)
11099                  (const CoreML__Specification__ActivationSigmoid *message,
11100                   void *closure_data);
11101 typedef void (*CoreML__Specification__ActivationLinear_Closure)
11102                  (const CoreML__Specification__ActivationLinear *message,
11103                   void *closure_data);
11104 typedef void (*CoreML__Specification__ActivationSigmoidHard_Closure)
11105                  (const CoreML__Specification__ActivationSigmoidHard *message,
11106                   void *closure_data);
11107 typedef void (*CoreML__Specification__ActivationPReLU_Closure)
11108                  (const CoreML__Specification__ActivationPReLU *message,
11109                   void *closure_data);
11110 typedef void (*CoreML__Specification__ActivationELU_Closure)
11111                  (const CoreML__Specification__ActivationELU *message,
11112                   void *closure_data);
11113 typedef void (*CoreML__Specification__ActivationThresholdedReLU_Closure)
11114                  (const CoreML__Specification__ActivationThresholdedReLU *message,
11115                   void *closure_data);
11116 typedef void (*CoreML__Specification__ActivationSoftsign_Closure)
11117                  (const CoreML__Specification__ActivationSoftsign *message,
11118                   void *closure_data);
11119 typedef void (*CoreML__Specification__ActivationSoftplus_Closure)
11120                  (const CoreML__Specification__ActivationSoftplus *message,
11121                   void *closure_data);
11122 typedef void (*CoreML__Specification__ActivationParametricSoftplus_Closure)
11123                  (const CoreML__Specification__ActivationParametricSoftplus *message,
11124                   void *closure_data);
11125 typedef void (*CoreML__Specification__ActivationParams_Closure)
11126                  (const CoreML__Specification__ActivationParams *message,
11127                   void *closure_data);
11128 typedef void (*CoreML__Specification__Tensor_Closure)
11129                  (const CoreML__Specification__Tensor *message,
11130                   void *closure_data);
11131 typedef void (*CoreML__Specification__NeuralNetworkLayer_Closure)
11132                  (const CoreML__Specification__NeuralNetworkLayer *message,
11133                   void *closure_data);
11134 typedef void (*CoreML__Specification__BranchLayerParams_Closure)
11135                  (const CoreML__Specification__BranchLayerParams *message,
11136                   void *closure_data);
11137 typedef void (*CoreML__Specification__LoopLayerParams_Closure)
11138                  (const CoreML__Specification__LoopLayerParams *message,
11139                   void *closure_data);
11140 typedef void (*CoreML__Specification__LoopBreakLayerParams_Closure)
11141                  (const CoreML__Specification__LoopBreakLayerParams *message,
11142                   void *closure_data);
11143 typedef void (*CoreML__Specification__LoopContinueLayerParams_Closure)
11144                  (const CoreML__Specification__LoopContinueLayerParams *message,
11145                   void *closure_data);
11146 typedef void (*CoreML__Specification__CopyLayerParams_Closure)
11147                  (const CoreML__Specification__CopyLayerParams *message,
11148                   void *closure_data);
11149 typedef void (*CoreML__Specification__GreaterThanLayerParams_Closure)
11150                  (const CoreML__Specification__GreaterThanLayerParams *message,
11151                   void *closure_data);
11152 typedef void (*CoreML__Specification__GreaterEqualLayerParams_Closure)
11153                  (const CoreML__Specification__GreaterEqualLayerParams *message,
11154                   void *closure_data);
11155 typedef void (*CoreML__Specification__LessThanLayerParams_Closure)
11156                  (const CoreML__Specification__LessThanLayerParams *message,
11157                   void *closure_data);
11158 typedef void (*CoreML__Specification__LessEqualLayerParams_Closure)
11159                  (const CoreML__Specification__LessEqualLayerParams *message,
11160                   void *closure_data);
11161 typedef void (*CoreML__Specification__EqualLayerParams_Closure)
11162                  (const CoreML__Specification__EqualLayerParams *message,
11163                   void *closure_data);
11164 typedef void (*CoreML__Specification__NotEqualLayerParams_Closure)
11165                  (const CoreML__Specification__NotEqualLayerParams *message,
11166                   void *closure_data);
11167 typedef void (*CoreML__Specification__LogicalAndLayerParams_Closure)
11168                  (const CoreML__Specification__LogicalAndLayerParams *message,
11169                   void *closure_data);
11170 typedef void (*CoreML__Specification__LogicalOrLayerParams_Closure)
11171                  (const CoreML__Specification__LogicalOrLayerParams *message,
11172                   void *closure_data);
11173 typedef void (*CoreML__Specification__LogicalXorLayerParams_Closure)
11174                  (const CoreML__Specification__LogicalXorLayerParams *message,
11175                   void *closure_data);
11176 typedef void (*CoreML__Specification__LogicalNotLayerParams_Closure)
11177                  (const CoreML__Specification__LogicalNotLayerParams *message,
11178                   void *closure_data);
11179 typedef void (*CoreML__Specification__BorderAmounts__EdgeSizes_Closure)
11180                  (const CoreML__Specification__BorderAmounts__EdgeSizes *message,
11181                   void *closure_data);
11182 typedef void (*CoreML__Specification__BorderAmounts_Closure)
11183                  (const CoreML__Specification__BorderAmounts *message,
11184                   void *closure_data);
11185 typedef void (*CoreML__Specification__ValidPadding_Closure)
11186                  (const CoreML__Specification__ValidPadding *message,
11187                   void *closure_data);
11188 typedef void (*CoreML__Specification__SamePadding_Closure)
11189                  (const CoreML__Specification__SamePadding *message,
11190                   void *closure_data);
11191 typedef void (*CoreML__Specification__SamplingMode_Closure)
11192                  (const CoreML__Specification__SamplingMode *message,
11193                   void *closure_data);
11194 typedef void (*CoreML__Specification__BoxCoordinatesMode_Closure)
11195                  (const CoreML__Specification__BoxCoordinatesMode *message,
11196                   void *closure_data);
11197 typedef void (*CoreML__Specification__WeightParams_Closure)
11198                  (const CoreML__Specification__WeightParams *message,
11199                   void *closure_data);
11200 typedef void (*CoreML__Specification__QuantizationParams_Closure)
11201                  (const CoreML__Specification__QuantizationParams *message,
11202                   void *closure_data);
11203 typedef void (*CoreML__Specification__LinearQuantizationParams_Closure)
11204                  (const CoreML__Specification__LinearQuantizationParams *message,
11205                   void *closure_data);
11206 typedef void (*CoreML__Specification__LookUpTableQuantizationParams_Closure)
11207                  (const CoreML__Specification__LookUpTableQuantizationParams *message,
11208                   void *closure_data);
11209 typedef void (*CoreML__Specification__ConvolutionLayerParams_Closure)
11210                  (const CoreML__Specification__ConvolutionLayerParams *message,
11211                   void *closure_data);
11212 typedef void (*CoreML__Specification__Convolution3DLayerParams_Closure)
11213                  (const CoreML__Specification__Convolution3DLayerParams *message,
11214                   void *closure_data);
11215 typedef void (*CoreML__Specification__InnerProductLayerParams_Closure)
11216                  (const CoreML__Specification__InnerProductLayerParams *message,
11217                   void *closure_data);
11218 typedef void (*CoreML__Specification__EmbeddingLayerParams_Closure)
11219                  (const CoreML__Specification__EmbeddingLayerParams *message,
11220                   void *closure_data);
11221 typedef void (*CoreML__Specification__EmbeddingNDLayerParams_Closure)
11222                  (const CoreML__Specification__EmbeddingNDLayerParams *message,
11223                   void *closure_data);
11224 typedef void (*CoreML__Specification__BatchnormLayerParams_Closure)
11225                  (const CoreML__Specification__BatchnormLayerParams *message,
11226                   void *closure_data);
11227 typedef void (*CoreML__Specification__PoolingLayerParams__ValidCompletePadding_Closure)
11228                  (const CoreML__Specification__PoolingLayerParams__ValidCompletePadding *message,
11229                   void *closure_data);
11230 typedef void (*CoreML__Specification__PoolingLayerParams_Closure)
11231                  (const CoreML__Specification__PoolingLayerParams *message,
11232                   void *closure_data);
11233 typedef void (*CoreML__Specification__Pooling3DLayerParams_Closure)
11234                  (const CoreML__Specification__Pooling3DLayerParams *message,
11235                   void *closure_data);
11236 typedef void (*CoreML__Specification__GlobalPooling3DLayerParams_Closure)
11237                  (const CoreML__Specification__GlobalPooling3DLayerParams *message,
11238                   void *closure_data);
11239 typedef void (*CoreML__Specification__PaddingLayerParams__PaddingConstant_Closure)
11240                  (const CoreML__Specification__PaddingLayerParams__PaddingConstant *message,
11241                   void *closure_data);
11242 typedef void (*CoreML__Specification__PaddingLayerParams__PaddingReflection_Closure)
11243                  (const CoreML__Specification__PaddingLayerParams__PaddingReflection *message,
11244                   void *closure_data);
11245 typedef void (*CoreML__Specification__PaddingLayerParams__PaddingReplication_Closure)
11246                  (const CoreML__Specification__PaddingLayerParams__PaddingReplication *message,
11247                   void *closure_data);
11248 typedef void (*CoreML__Specification__PaddingLayerParams_Closure)
11249                  (const CoreML__Specification__PaddingLayerParams *message,
11250                   void *closure_data);
11251 typedef void (*CoreML__Specification__ConcatLayerParams_Closure)
11252                  (const CoreML__Specification__ConcatLayerParams *message,
11253                   void *closure_data);
11254 typedef void (*CoreML__Specification__LRNLayerParams_Closure)
11255                  (const CoreML__Specification__LRNLayerParams *message,
11256                   void *closure_data);
11257 typedef void (*CoreML__Specification__SoftmaxLayerParams_Closure)
11258                  (const CoreML__Specification__SoftmaxLayerParams *message,
11259                   void *closure_data);
11260 typedef void (*CoreML__Specification__SplitLayerParams_Closure)
11261                  (const CoreML__Specification__SplitLayerParams *message,
11262                   void *closure_data);
11263 typedef void (*CoreML__Specification__AddLayerParams_Closure)
11264                  (const CoreML__Specification__AddLayerParams *message,
11265                   void *closure_data);
11266 typedef void (*CoreML__Specification__MultiplyLayerParams_Closure)
11267                  (const CoreML__Specification__MultiplyLayerParams *message,
11268                   void *closure_data);
11269 typedef void (*CoreML__Specification__UnaryFunctionLayerParams_Closure)
11270                  (const CoreML__Specification__UnaryFunctionLayerParams *message,
11271                   void *closure_data);
11272 typedef void (*CoreML__Specification__UpsampleLayerParams_Closure)
11273                  (const CoreML__Specification__UpsampleLayerParams *message,
11274                   void *closure_data);
11275 typedef void (*CoreML__Specification__ResizeBilinearLayerParams_Closure)
11276                  (const CoreML__Specification__ResizeBilinearLayerParams *message,
11277                   void *closure_data);
11278 typedef void (*CoreML__Specification__CropResizeLayerParams_Closure)
11279                  (const CoreML__Specification__CropResizeLayerParams *message,
11280                   void *closure_data);
11281 typedef void (*CoreML__Specification__BiasLayerParams_Closure)
11282                  (const CoreML__Specification__BiasLayerParams *message,
11283                   void *closure_data);
11284 typedef void (*CoreML__Specification__ScaleLayerParams_Closure)
11285                  (const CoreML__Specification__ScaleLayerParams *message,
11286                   void *closure_data);
11287 typedef void (*CoreML__Specification__LoadConstantLayerParams_Closure)
11288                  (const CoreML__Specification__LoadConstantLayerParams *message,
11289                   void *closure_data);
11290 typedef void (*CoreML__Specification__L2NormalizeLayerParams_Closure)
11291                  (const CoreML__Specification__L2NormalizeLayerParams *message,
11292                   void *closure_data);
11293 typedef void (*CoreML__Specification__FlattenLayerParams_Closure)
11294                  (const CoreML__Specification__FlattenLayerParams *message,
11295                   void *closure_data);
11296 typedef void (*CoreML__Specification__ReshapeLayerParams_Closure)
11297                  (const CoreML__Specification__ReshapeLayerParams *message,
11298                   void *closure_data);
11299 typedef void (*CoreML__Specification__PermuteLayerParams_Closure)
11300                  (const CoreML__Specification__PermuteLayerParams *message,
11301                   void *closure_data);
11302 typedef void (*CoreML__Specification__ReorganizeDataLayerParams_Closure)
11303                  (const CoreML__Specification__ReorganizeDataLayerParams *message,
11304                   void *closure_data);
11305 typedef void (*CoreML__Specification__SliceLayerParams_Closure)
11306                  (const CoreML__Specification__SliceLayerParams *message,
11307                   void *closure_data);
11308 typedef void (*CoreML__Specification__ReduceLayerParams_Closure)
11309                  (const CoreML__Specification__ReduceLayerParams *message,
11310                   void *closure_data);
11311 typedef void (*CoreML__Specification__CropLayerParams_Closure)
11312                  (const CoreML__Specification__CropLayerParams *message,
11313                   void *closure_data);
11314 typedef void (*CoreML__Specification__AverageLayerParams_Closure)
11315                  (const CoreML__Specification__AverageLayerParams *message,
11316                   void *closure_data);
11317 typedef void (*CoreML__Specification__MaxLayerParams_Closure)
11318                  (const CoreML__Specification__MaxLayerParams *message,
11319                   void *closure_data);
11320 typedef void (*CoreML__Specification__MinLayerParams_Closure)
11321                  (const CoreML__Specification__MinLayerParams *message,
11322                   void *closure_data);
11323 typedef void (*CoreML__Specification__DotProductLayerParams_Closure)
11324                  (const CoreML__Specification__DotProductLayerParams *message,
11325                   void *closure_data);
11326 typedef void (*CoreML__Specification__MeanVarianceNormalizeLayerParams_Closure)
11327                  (const CoreML__Specification__MeanVarianceNormalizeLayerParams *message,
11328                   void *closure_data);
11329 typedef void (*CoreML__Specification__SequenceRepeatLayerParams_Closure)
11330                  (const CoreML__Specification__SequenceRepeatLayerParams *message,
11331                   void *closure_data);
11332 typedef void (*CoreML__Specification__SimpleRecurrentLayerParams_Closure)
11333                  (const CoreML__Specification__SimpleRecurrentLayerParams *message,
11334                   void *closure_data);
11335 typedef void (*CoreML__Specification__GRULayerParams_Closure)
11336                  (const CoreML__Specification__GRULayerParams *message,
11337                   void *closure_data);
11338 typedef void (*CoreML__Specification__LSTMParams_Closure)
11339                  (const CoreML__Specification__LSTMParams *message,
11340                   void *closure_data);
11341 typedef void (*CoreML__Specification__LSTMWeightParams_Closure)
11342                  (const CoreML__Specification__LSTMWeightParams *message,
11343                   void *closure_data);
11344 typedef void (*CoreML__Specification__UniDirectionalLSTMLayerParams_Closure)
11345                  (const CoreML__Specification__UniDirectionalLSTMLayerParams *message,
11346                   void *closure_data);
11347 typedef void (*CoreML__Specification__BiDirectionalLSTMLayerParams_Closure)
11348                  (const CoreML__Specification__BiDirectionalLSTMLayerParams *message,
11349                   void *closure_data);
11350 typedef void (*CoreML__Specification__CustomLayerParams__CustomLayerParamValue_Closure)
11351                  (const CoreML__Specification__CustomLayerParams__CustomLayerParamValue *message,
11352                   void *closure_data);
11353 typedef void (*CoreML__Specification__CustomLayerParams__ParametersEntry_Closure)
11354                  (const CoreML__Specification__CustomLayerParams__ParametersEntry *message,
11355                   void *closure_data);
11356 typedef void (*CoreML__Specification__CustomLayerParams_Closure)
11357                  (const CoreML__Specification__CustomLayerParams *message,
11358                   void *closure_data);
11359 typedef void (*CoreML__Specification__TransposeLayerParams_Closure)
11360                  (const CoreML__Specification__TransposeLayerParams *message,
11361                   void *closure_data);
11362 typedef void (*CoreML__Specification__BatchedMatMulLayerParams_Closure)
11363                  (const CoreML__Specification__BatchedMatMulLayerParams *message,
11364                   void *closure_data);
11365 typedef void (*CoreML__Specification__ConcatNDLayerParams_Closure)
11366                  (const CoreML__Specification__ConcatNDLayerParams *message,
11367                   void *closure_data);
11368 typedef void (*CoreML__Specification__SoftmaxNDLayerParams_Closure)
11369                  (const CoreML__Specification__SoftmaxNDLayerParams *message,
11370                   void *closure_data);
11371 typedef void (*CoreML__Specification__ReverseLayerParams_Closure)
11372                  (const CoreML__Specification__ReverseLayerParams *message,
11373                   void *closure_data);
11374 typedef void (*CoreML__Specification__ReverseSeqLayerParams_Closure)
11375                  (const CoreML__Specification__ReverseSeqLayerParams *message,
11376                   void *closure_data);
11377 typedef void (*CoreML__Specification__LoadConstantNDLayerParams_Closure)
11378                  (const CoreML__Specification__LoadConstantNDLayerParams *message,
11379                   void *closure_data);
11380 typedef void (*CoreML__Specification__FillLikeLayerParams_Closure)
11381                  (const CoreML__Specification__FillLikeLayerParams *message,
11382                   void *closure_data);
11383 typedef void (*CoreML__Specification__FillStaticLayerParams_Closure)
11384                  (const CoreML__Specification__FillStaticLayerParams *message,
11385                   void *closure_data);
11386 typedef void (*CoreML__Specification__FillDynamicLayerParams_Closure)
11387                  (const CoreML__Specification__FillDynamicLayerParams *message,
11388                   void *closure_data);
11389 typedef void (*CoreML__Specification__WhereBroadcastableLayerParams_Closure)
11390                  (const CoreML__Specification__WhereBroadcastableLayerParams *message,
11391                   void *closure_data);
11392 typedef void (*CoreML__Specification__SinLayerParams_Closure)
11393                  (const CoreML__Specification__SinLayerParams *message,
11394                   void *closure_data);
11395 typedef void (*CoreML__Specification__CosLayerParams_Closure)
11396                  (const CoreML__Specification__CosLayerParams *message,
11397                   void *closure_data);
11398 typedef void (*CoreML__Specification__TanLayerParams_Closure)
11399                  (const CoreML__Specification__TanLayerParams *message,
11400                   void *closure_data);
11401 typedef void (*CoreML__Specification__AsinLayerParams_Closure)
11402                  (const CoreML__Specification__AsinLayerParams *message,
11403                   void *closure_data);
11404 typedef void (*CoreML__Specification__AcosLayerParams_Closure)
11405                  (const CoreML__Specification__AcosLayerParams *message,
11406                   void *closure_data);
11407 typedef void (*CoreML__Specification__AtanLayerParams_Closure)
11408                  (const CoreML__Specification__AtanLayerParams *message,
11409                   void *closure_data);
11410 typedef void (*CoreML__Specification__SinhLayerParams_Closure)
11411                  (const CoreML__Specification__SinhLayerParams *message,
11412                   void *closure_data);
11413 typedef void (*CoreML__Specification__CoshLayerParams_Closure)
11414                  (const CoreML__Specification__CoshLayerParams *message,
11415                   void *closure_data);
11416 typedef void (*CoreML__Specification__TanhLayerParams_Closure)
11417                  (const CoreML__Specification__TanhLayerParams *message,
11418                   void *closure_data);
11419 typedef void (*CoreML__Specification__AsinhLayerParams_Closure)
11420                  (const CoreML__Specification__AsinhLayerParams *message,
11421                   void *closure_data);
11422 typedef void (*CoreML__Specification__AcoshLayerParams_Closure)
11423                  (const CoreML__Specification__AcoshLayerParams *message,
11424                   void *closure_data);
11425 typedef void (*CoreML__Specification__AtanhLayerParams_Closure)
11426                  (const CoreML__Specification__AtanhLayerParams *message,
11427                   void *closure_data);
11428 typedef void (*CoreML__Specification__PowBroadcastableLayerParams_Closure)
11429                  (const CoreML__Specification__PowBroadcastableLayerParams *message,
11430                   void *closure_data);
11431 typedef void (*CoreML__Specification__Exp2LayerParams_Closure)
11432                  (const CoreML__Specification__Exp2LayerParams *message,
11433                   void *closure_data);
11434 typedef void (*CoreML__Specification__WhereNonZeroLayerParams_Closure)
11435                  (const CoreML__Specification__WhereNonZeroLayerParams *message,
11436                   void *closure_data);
11437 typedef void (*CoreML__Specification__MatrixBandPartLayerParams_Closure)
11438                  (const CoreML__Specification__MatrixBandPartLayerParams *message,
11439                   void *closure_data);
11440 typedef void (*CoreML__Specification__UpperTriangularLayerParams_Closure)
11441                  (const CoreML__Specification__UpperTriangularLayerParams *message,
11442                   void *closure_data);
11443 typedef void (*CoreML__Specification__LowerTriangularLayerParams_Closure)
11444                  (const CoreML__Specification__LowerTriangularLayerParams *message,
11445                   void *closure_data);
11446 typedef void (*CoreML__Specification__BroadcastToLikeLayerParams_Closure)
11447                  (const CoreML__Specification__BroadcastToLikeLayerParams *message,
11448                   void *closure_data);
11449 typedef void (*CoreML__Specification__BroadcastToStaticLayerParams_Closure)
11450                  (const CoreML__Specification__BroadcastToStaticLayerParams *message,
11451                   void *closure_data);
11452 typedef void (*CoreML__Specification__BroadcastToDynamicLayerParams_Closure)
11453                  (const CoreML__Specification__BroadcastToDynamicLayerParams *message,
11454                   void *closure_data);
11455 typedef void (*CoreML__Specification__AddBroadcastableLayerParams_Closure)
11456                  (const CoreML__Specification__AddBroadcastableLayerParams *message,
11457                   void *closure_data);
11458 typedef void (*CoreML__Specification__MaxBroadcastableLayerParams_Closure)
11459                  (const CoreML__Specification__MaxBroadcastableLayerParams *message,
11460                   void *closure_data);
11461 typedef void (*CoreML__Specification__MinBroadcastableLayerParams_Closure)
11462                  (const CoreML__Specification__MinBroadcastableLayerParams *message,
11463                   void *closure_data);
11464 typedef void (*CoreML__Specification__ModBroadcastableLayerParams_Closure)
11465                  (const CoreML__Specification__ModBroadcastableLayerParams *message,
11466                   void *closure_data);
11467 typedef void (*CoreML__Specification__FloorDivBroadcastableLayerParams_Closure)
11468                  (const CoreML__Specification__FloorDivBroadcastableLayerParams *message,
11469                   void *closure_data);
11470 typedef void (*CoreML__Specification__SubtractBroadcastableLayerParams_Closure)
11471                  (const CoreML__Specification__SubtractBroadcastableLayerParams *message,
11472                   void *closure_data);
11473 typedef void (*CoreML__Specification__MultiplyBroadcastableLayerParams_Closure)
11474                  (const CoreML__Specification__MultiplyBroadcastableLayerParams *message,
11475                   void *closure_data);
11476 typedef void (*CoreML__Specification__DivideBroadcastableLayerParams_Closure)
11477                  (const CoreML__Specification__DivideBroadcastableLayerParams *message,
11478                   void *closure_data);
11479 typedef void (*CoreML__Specification__GatherLayerParams_Closure)
11480                  (const CoreML__Specification__GatherLayerParams *message,
11481                   void *closure_data);
11482 typedef void (*CoreML__Specification__ScatterLayerParams_Closure)
11483                  (const CoreML__Specification__ScatterLayerParams *message,
11484                   void *closure_data);
11485 typedef void (*CoreML__Specification__GatherNDLayerParams_Closure)
11486                  (const CoreML__Specification__GatherNDLayerParams *message,
11487                   void *closure_data);
11488 typedef void (*CoreML__Specification__ScatterNDLayerParams_Closure)
11489                  (const CoreML__Specification__ScatterNDLayerParams *message,
11490                   void *closure_data);
11491 typedef void (*CoreML__Specification__GatherAlongAxisLayerParams_Closure)
11492                  (const CoreML__Specification__GatherAlongAxisLayerParams *message,
11493                   void *closure_data);
11494 typedef void (*CoreML__Specification__ScatterAlongAxisLayerParams_Closure)
11495                  (const CoreML__Specification__ScatterAlongAxisLayerParams *message,
11496                   void *closure_data);
11497 typedef void (*CoreML__Specification__StackLayerParams_Closure)
11498                  (const CoreML__Specification__StackLayerParams *message,
11499                   void *closure_data);
11500 typedef void (*CoreML__Specification__RankPreservingReshapeLayerParams_Closure)
11501                  (const CoreML__Specification__RankPreservingReshapeLayerParams *message,
11502                   void *closure_data);
11503 typedef void (*CoreML__Specification__ConstantPaddingLayerParams_Closure)
11504                  (const CoreML__Specification__ConstantPaddingLayerParams *message,
11505                   void *closure_data);
11506 typedef void (*CoreML__Specification__RandomNormalLikeLayerParams_Closure)
11507                  (const CoreML__Specification__RandomNormalLikeLayerParams *message,
11508                   void *closure_data);
11509 typedef void (*CoreML__Specification__RandomNormalStaticLayerParams_Closure)
11510                  (const CoreML__Specification__RandomNormalStaticLayerParams *message,
11511                   void *closure_data);
11512 typedef void (*CoreML__Specification__RandomNormalDynamicLayerParams_Closure)
11513                  (const CoreML__Specification__RandomNormalDynamicLayerParams *message,
11514                   void *closure_data);
11515 typedef void (*CoreML__Specification__RandomUniformLikeLayerParams_Closure)
11516                  (const CoreML__Specification__RandomUniformLikeLayerParams *message,
11517                   void *closure_data);
11518 typedef void (*CoreML__Specification__RandomUniformStaticLayerParams_Closure)
11519                  (const CoreML__Specification__RandomUniformStaticLayerParams *message,
11520                   void *closure_data);
11521 typedef void (*CoreML__Specification__RandomUniformDynamicLayerParams_Closure)
11522                  (const CoreML__Specification__RandomUniformDynamicLayerParams *message,
11523                   void *closure_data);
11524 typedef void (*CoreML__Specification__RandomBernoulliLikeLayerParams_Closure)
11525                  (const CoreML__Specification__RandomBernoulliLikeLayerParams *message,
11526                   void *closure_data);
11527 typedef void (*CoreML__Specification__RandomBernoulliStaticLayerParams_Closure)
11528                  (const CoreML__Specification__RandomBernoulliStaticLayerParams *message,
11529                   void *closure_data);
11530 typedef void (*CoreML__Specification__RandomBernoulliDynamicLayerParams_Closure)
11531                  (const CoreML__Specification__RandomBernoulliDynamicLayerParams *message,
11532                   void *closure_data);
11533 typedef void (*CoreML__Specification__CategoricalDistributionLayerParams_Closure)
11534                  (const CoreML__Specification__CategoricalDistributionLayerParams *message,
11535                   void *closure_data);
11536 typedef void (*CoreML__Specification__ReduceL1LayerParams_Closure)
11537                  (const CoreML__Specification__ReduceL1LayerParams *message,
11538                   void *closure_data);
11539 typedef void (*CoreML__Specification__ReduceL2LayerParams_Closure)
11540                  (const CoreML__Specification__ReduceL2LayerParams *message,
11541                   void *closure_data);
11542 typedef void (*CoreML__Specification__ReduceMaxLayerParams_Closure)
11543                  (const CoreML__Specification__ReduceMaxLayerParams *message,
11544                   void *closure_data);
11545 typedef void (*CoreML__Specification__ReduceMinLayerParams_Closure)
11546                  (const CoreML__Specification__ReduceMinLayerParams *message,
11547                   void *closure_data);
11548 typedef void (*CoreML__Specification__ReduceSumLayerParams_Closure)
11549                  (const CoreML__Specification__ReduceSumLayerParams *message,
11550                   void *closure_data);
11551 typedef void (*CoreML__Specification__ReduceProdLayerParams_Closure)
11552                  (const CoreML__Specification__ReduceProdLayerParams *message,
11553                   void *closure_data);
11554 typedef void (*CoreML__Specification__ReduceMeanLayerParams_Closure)
11555                  (const CoreML__Specification__ReduceMeanLayerParams *message,
11556                   void *closure_data);
11557 typedef void (*CoreML__Specification__ReduceLogSumLayerParams_Closure)
11558                  (const CoreML__Specification__ReduceLogSumLayerParams *message,
11559                   void *closure_data);
11560 typedef void (*CoreML__Specification__ReduceSumSquareLayerParams_Closure)
11561                  (const CoreML__Specification__ReduceSumSquareLayerParams *message,
11562                   void *closure_data);
11563 typedef void (*CoreML__Specification__ReduceLogSumExpLayerParams_Closure)
11564                  (const CoreML__Specification__ReduceLogSumExpLayerParams *message,
11565                   void *closure_data);
11566 typedef void (*CoreML__Specification__ExpandDimsLayerParams_Closure)
11567                  (const CoreML__Specification__ExpandDimsLayerParams *message,
11568                   void *closure_data);
11569 typedef void (*CoreML__Specification__FlattenTo2DLayerParams_Closure)
11570                  (const CoreML__Specification__FlattenTo2DLayerParams *message,
11571                   void *closure_data);
11572 typedef void (*CoreML__Specification__ReshapeStaticLayerParams_Closure)
11573                  (const CoreML__Specification__ReshapeStaticLayerParams *message,
11574                   void *closure_data);
11575 typedef void (*CoreML__Specification__ReshapeLikeLayerParams_Closure)
11576                  (const CoreML__Specification__ReshapeLikeLayerParams *message,
11577                   void *closure_data);
11578 typedef void (*CoreML__Specification__ReshapeDynamicLayerParams_Closure)
11579                  (const CoreML__Specification__ReshapeDynamicLayerParams *message,
11580                   void *closure_data);
11581 typedef void (*CoreML__Specification__SqueezeLayerParams_Closure)
11582                  (const CoreML__Specification__SqueezeLayerParams *message,
11583                   void *closure_data);
11584 typedef void (*CoreML__Specification__TopKLayerParams_Closure)
11585                  (const CoreML__Specification__TopKLayerParams *message,
11586                   void *closure_data);
11587 typedef void (*CoreML__Specification__ArgMaxLayerParams_Closure)
11588                  (const CoreML__Specification__ArgMaxLayerParams *message,
11589                   void *closure_data);
11590 typedef void (*CoreML__Specification__ArgMinLayerParams_Closure)
11591                  (const CoreML__Specification__ArgMinLayerParams *message,
11592                   void *closure_data);
11593 typedef void (*CoreML__Specification__SplitNDLayerParams_Closure)
11594                  (const CoreML__Specification__SplitNDLayerParams *message,
11595                   void *closure_data);
11596 typedef void (*CoreML__Specification__CeilLayerParams_Closure)
11597                  (const CoreML__Specification__CeilLayerParams *message,
11598                   void *closure_data);
11599 typedef void (*CoreML__Specification__RoundLayerParams_Closure)
11600                  (const CoreML__Specification__RoundLayerParams *message,
11601                   void *closure_data);
11602 typedef void (*CoreML__Specification__FloorLayerParams_Closure)
11603                  (const CoreML__Specification__FloorLayerParams *message,
11604                   void *closure_data);
11605 typedef void (*CoreML__Specification__SignLayerParams_Closure)
11606                  (const CoreML__Specification__SignLayerParams *message,
11607                   void *closure_data);
11608 typedef void (*CoreML__Specification__ClipLayerParams_Closure)
11609                  (const CoreML__Specification__ClipLayerParams *message,
11610                   void *closure_data);
11611 typedef void (*CoreML__Specification__SliceStaticLayerParams_Closure)
11612                  (const CoreML__Specification__SliceStaticLayerParams *message,
11613                   void *closure_data);
11614 typedef void (*CoreML__Specification__SliceDynamicLayerParams_Closure)
11615                  (const CoreML__Specification__SliceDynamicLayerParams *message,
11616                   void *closure_data);
11617 typedef void (*CoreML__Specification__TileLayerParams_Closure)
11618                  (const CoreML__Specification__TileLayerParams *message,
11619                   void *closure_data);
11620 typedef void (*CoreML__Specification__GetShapeLayerParams_Closure)
11621                  (const CoreML__Specification__GetShapeLayerParams *message,
11622                   void *closure_data);
11623 typedef void (*CoreML__Specification__ErfLayerParams_Closure)
11624                  (const CoreML__Specification__ErfLayerParams *message,
11625                   void *closure_data);
11626 typedef void (*CoreML__Specification__GeluLayerParams_Closure)
11627                  (const CoreML__Specification__GeluLayerParams *message,
11628                   void *closure_data);
11629 typedef void (*CoreML__Specification__RangeStaticLayerParams_Closure)
11630                  (const CoreML__Specification__RangeStaticLayerParams *message,
11631                   void *closure_data);
11632 typedef void (*CoreML__Specification__RangeDynamicLayerParams_Closure)
11633                  (const CoreML__Specification__RangeDynamicLayerParams *message,
11634                   void *closure_data);
11635 typedef void (*CoreML__Specification__SlidingWindowsLayerParams_Closure)
11636                  (const CoreML__Specification__SlidingWindowsLayerParams *message,
11637                   void *closure_data);
11638 typedef void (*CoreML__Specification__LayerNormalizationLayerParams_Closure)
11639                  (const CoreML__Specification__LayerNormalizationLayerParams *message,
11640                   void *closure_data);
11641 typedef void (*CoreML__Specification__NonMaximumSuppressionLayerParams_Closure)
11642                  (const CoreML__Specification__NonMaximumSuppressionLayerParams *message,
11643                   void *closure_data);
11644 typedef void (*CoreML__Specification__ClampedReLULayerParams_Closure)
11645                  (const CoreML__Specification__ClampedReLULayerParams *message,
11646                   void *closure_data);
11647 typedef void (*CoreML__Specification__ArgSortLayerParams_Closure)
11648                  (const CoreML__Specification__ArgSortLayerParams *message,
11649                   void *closure_data);
11650 typedef void (*CoreML__Specification__SliceBySizeLayerParams_Closure)
11651                  (const CoreML__Specification__SliceBySizeLayerParams *message,
11652                   void *closure_data);
11653 typedef void (*CoreML__Specification__NeuralNetworkClassifier_Closure)
11654                  (const CoreML__Specification__NeuralNetworkClassifier *message,
11655                   void *closure_data);
11656 typedef void (*CoreML__Specification__OneHotLayerParams_Closure)
11657                  (const CoreML__Specification__OneHotLayerParams *message,
11658                   void *closure_data);
11659 typedef void (*CoreML__Specification__CumSumLayerParams_Closure)
11660                  (const CoreML__Specification__CumSumLayerParams *message,
11661                   void *closure_data);
11662 typedef void (*CoreML__Specification__NeuralNetworkRegressor_Closure)
11663                  (const CoreML__Specification__NeuralNetworkRegressor *message,
11664                   void *closure_data);
11665 typedef void (*CoreML__Specification__NetworkUpdateParameters_Closure)
11666                  (const CoreML__Specification__NetworkUpdateParameters *message,
11667                   void *closure_data);
11668 typedef void (*CoreML__Specification__LossLayer_Closure)
11669                  (const CoreML__Specification__LossLayer *message,
11670                   void *closure_data);
11671 typedef void (*CoreML__Specification__CategoricalCrossEntropyLossLayer_Closure)
11672                  (const CoreML__Specification__CategoricalCrossEntropyLossLayer *message,
11673                   void *closure_data);
11674 typedef void (*CoreML__Specification__MeanSquaredErrorLossLayer_Closure)
11675                  (const CoreML__Specification__MeanSquaredErrorLossLayer *message,
11676                   void *closure_data);
11677 typedef void (*CoreML__Specification__Optimizer_Closure)
11678                  (const CoreML__Specification__Optimizer *message,
11679                   void *closure_data);
11680 typedef void (*CoreML__Specification__SGDOptimizer_Closure)
11681                  (const CoreML__Specification__SGDOptimizer *message,
11682                   void *closure_data);
11683 typedef void (*CoreML__Specification__AdamOptimizer_Closure)
11684                  (const CoreML__Specification__AdamOptimizer *message,
11685                   void *closure_data);
11686 
11687 /* --- services --- */
11688 
11689 
11690 /* --- descriptors --- */
11691 
11692 extern const ProtobufCEnumDescriptor    core_ml__specification__neural_network_multi_array_shape_mapping__descriptor;
11693 extern const ProtobufCEnumDescriptor    core_ml__specification__neural_network_image_shape_mapping__descriptor;
11694 extern const ProtobufCEnumDescriptor    core_ml__specification__scatter_mode__descriptor;
11695 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network__descriptor;
11696 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_image_scaler__descriptor;
11697 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_mean_image__descriptor;
11698 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_preprocessing__descriptor;
11699 extern const ProtobufCMessageDescriptor core_ml__specification__activation_re_lu__descriptor;
11700 extern const ProtobufCMessageDescriptor core_ml__specification__activation_leaky_re_lu__descriptor;
11701 extern const ProtobufCMessageDescriptor core_ml__specification__activation_tanh__descriptor;
11702 extern const ProtobufCMessageDescriptor core_ml__specification__activation_scaled_tanh__descriptor;
11703 extern const ProtobufCMessageDescriptor core_ml__specification__activation_sigmoid__descriptor;
11704 extern const ProtobufCMessageDescriptor core_ml__specification__activation_linear__descriptor;
11705 extern const ProtobufCMessageDescriptor core_ml__specification__activation_sigmoid_hard__descriptor;
11706 extern const ProtobufCMessageDescriptor core_ml__specification__activation_pre_lu__descriptor;
11707 extern const ProtobufCMessageDescriptor core_ml__specification__activation_elu__descriptor;
11708 extern const ProtobufCMessageDescriptor core_ml__specification__activation_thresholded_re_lu__descriptor;
11709 extern const ProtobufCMessageDescriptor core_ml__specification__activation_softsign__descriptor;
11710 extern const ProtobufCMessageDescriptor core_ml__specification__activation_softplus__descriptor;
11711 extern const ProtobufCMessageDescriptor core_ml__specification__activation_parametric_softplus__descriptor;
11712 extern const ProtobufCMessageDescriptor core_ml__specification__activation_params__descriptor;
11713 extern const ProtobufCMessageDescriptor core_ml__specification__tensor__descriptor;
11714 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_layer__descriptor;
11715 extern const ProtobufCMessageDescriptor core_ml__specification__branch_layer_params__descriptor;
11716 extern const ProtobufCMessageDescriptor core_ml__specification__loop_layer_params__descriptor;
11717 extern const ProtobufCMessageDescriptor core_ml__specification__loop_break_layer_params__descriptor;
11718 extern const ProtobufCMessageDescriptor core_ml__specification__loop_continue_layer_params__descriptor;
11719 extern const ProtobufCMessageDescriptor core_ml__specification__copy_layer_params__descriptor;
11720 extern const ProtobufCMessageDescriptor core_ml__specification__greater_than_layer_params__descriptor;
11721 extern const ProtobufCMessageDescriptor core_ml__specification__greater_equal_layer_params__descriptor;
11722 extern const ProtobufCMessageDescriptor core_ml__specification__less_than_layer_params__descriptor;
11723 extern const ProtobufCMessageDescriptor core_ml__specification__less_equal_layer_params__descriptor;
11724 extern const ProtobufCMessageDescriptor core_ml__specification__equal_layer_params__descriptor;
11725 extern const ProtobufCMessageDescriptor core_ml__specification__not_equal_layer_params__descriptor;
11726 extern const ProtobufCMessageDescriptor core_ml__specification__logical_and_layer_params__descriptor;
11727 extern const ProtobufCMessageDescriptor core_ml__specification__logical_or_layer_params__descriptor;
11728 extern const ProtobufCMessageDescriptor core_ml__specification__logical_xor_layer_params__descriptor;
11729 extern const ProtobufCMessageDescriptor core_ml__specification__logical_not_layer_params__descriptor;
11730 extern const ProtobufCMessageDescriptor core_ml__specification__border_amounts__descriptor;
11731 extern const ProtobufCMessageDescriptor core_ml__specification__border_amounts__edge_sizes__descriptor;
11732 extern const ProtobufCMessageDescriptor core_ml__specification__valid_padding__descriptor;
11733 extern const ProtobufCMessageDescriptor core_ml__specification__same_padding__descriptor;
11734 extern const ProtobufCEnumDescriptor    core_ml__specification__same_padding__same_padding_mode__descriptor;
11735 extern const ProtobufCMessageDescriptor core_ml__specification__sampling_mode__descriptor;
11736 extern const ProtobufCEnumDescriptor    core_ml__specification__sampling_mode__method__descriptor;
11737 extern const ProtobufCMessageDescriptor core_ml__specification__box_coordinates_mode__descriptor;
11738 extern const ProtobufCEnumDescriptor    core_ml__specification__box_coordinates_mode__coordinates__descriptor;
11739 extern const ProtobufCMessageDescriptor core_ml__specification__weight_params__descriptor;
11740 extern const ProtobufCMessageDescriptor core_ml__specification__quantization_params__descriptor;
11741 extern const ProtobufCMessageDescriptor core_ml__specification__linear_quantization_params__descriptor;
11742 extern const ProtobufCMessageDescriptor core_ml__specification__look_up_table_quantization_params__descriptor;
11743 extern const ProtobufCMessageDescriptor core_ml__specification__convolution_layer_params__descriptor;
11744 extern const ProtobufCMessageDescriptor core_ml__specification__convolution3_dlayer_params__descriptor;
11745 extern const ProtobufCEnumDescriptor    core_ml__specification__convolution3_dlayer_params__padding_type__descriptor;
11746 extern const ProtobufCMessageDescriptor core_ml__specification__inner_product_layer_params__descriptor;
11747 extern const ProtobufCMessageDescriptor core_ml__specification__embedding_layer_params__descriptor;
11748 extern const ProtobufCMessageDescriptor core_ml__specification__embedding_ndlayer_params__descriptor;
11749 extern const ProtobufCMessageDescriptor core_ml__specification__batchnorm_layer_params__descriptor;
11750 extern const ProtobufCMessageDescriptor core_ml__specification__pooling_layer_params__descriptor;
11751 extern const ProtobufCMessageDescriptor core_ml__specification__pooling_layer_params__valid_complete_padding__descriptor;
11752 extern const ProtobufCEnumDescriptor    core_ml__specification__pooling_layer_params__pooling_type__descriptor;
11753 extern const ProtobufCMessageDescriptor core_ml__specification__pooling3_dlayer_params__descriptor;
11754 extern const ProtobufCEnumDescriptor    core_ml__specification__pooling3_dlayer_params__pooling_type3_d__descriptor;
11755 extern const ProtobufCEnumDescriptor    core_ml__specification__pooling3_dlayer_params__pooling3_dpadding_type__descriptor;
11756 extern const ProtobufCMessageDescriptor core_ml__specification__global_pooling3_dlayer_params__descriptor;
11757 extern const ProtobufCEnumDescriptor    core_ml__specification__global_pooling3_dlayer_params__global_pooling_type3_d__descriptor;
11758 extern const ProtobufCMessageDescriptor core_ml__specification__padding_layer_params__descriptor;
11759 extern const ProtobufCMessageDescriptor core_ml__specification__padding_layer_params__padding_constant__descriptor;
11760 extern const ProtobufCMessageDescriptor core_ml__specification__padding_layer_params__padding_reflection__descriptor;
11761 extern const ProtobufCMessageDescriptor core_ml__specification__padding_layer_params__padding_replication__descriptor;
11762 extern const ProtobufCMessageDescriptor core_ml__specification__concat_layer_params__descriptor;
11763 extern const ProtobufCMessageDescriptor core_ml__specification__lrnlayer_params__descriptor;
11764 extern const ProtobufCMessageDescriptor core_ml__specification__softmax_layer_params__descriptor;
11765 extern const ProtobufCMessageDescriptor core_ml__specification__split_layer_params__descriptor;
11766 extern const ProtobufCMessageDescriptor core_ml__specification__add_layer_params__descriptor;
11767 extern const ProtobufCMessageDescriptor core_ml__specification__multiply_layer_params__descriptor;
11768 extern const ProtobufCMessageDescriptor core_ml__specification__unary_function_layer_params__descriptor;
11769 extern const ProtobufCEnumDescriptor    core_ml__specification__unary_function_layer_params__operation__descriptor;
11770 extern const ProtobufCMessageDescriptor core_ml__specification__upsample_layer_params__descriptor;
11771 extern const ProtobufCEnumDescriptor    core_ml__specification__upsample_layer_params__interpolation_mode__descriptor;
11772 extern const ProtobufCEnumDescriptor    core_ml__specification__upsample_layer_params__linear_upsample_mode__descriptor;
11773 extern const ProtobufCMessageDescriptor core_ml__specification__resize_bilinear_layer_params__descriptor;
11774 extern const ProtobufCMessageDescriptor core_ml__specification__crop_resize_layer_params__descriptor;
11775 extern const ProtobufCMessageDescriptor core_ml__specification__bias_layer_params__descriptor;
11776 extern const ProtobufCMessageDescriptor core_ml__specification__scale_layer_params__descriptor;
11777 extern const ProtobufCMessageDescriptor core_ml__specification__load_constant_layer_params__descriptor;
11778 extern const ProtobufCMessageDescriptor core_ml__specification__l2_normalize_layer_params__descriptor;
11779 extern const ProtobufCMessageDescriptor core_ml__specification__flatten_layer_params__descriptor;
11780 extern const ProtobufCEnumDescriptor    core_ml__specification__flatten_layer_params__flatten_order__descriptor;
11781 extern const ProtobufCMessageDescriptor core_ml__specification__reshape_layer_params__descriptor;
11782 extern const ProtobufCEnumDescriptor    core_ml__specification__reshape_layer_params__reshape_order__descriptor;
11783 extern const ProtobufCMessageDescriptor core_ml__specification__permute_layer_params__descriptor;
11784 extern const ProtobufCMessageDescriptor core_ml__specification__reorganize_data_layer_params__descriptor;
11785 extern const ProtobufCEnumDescriptor    core_ml__specification__reorganize_data_layer_params__reorganization_type__descriptor;
11786 extern const ProtobufCMessageDescriptor core_ml__specification__slice_layer_params__descriptor;
11787 extern const ProtobufCEnumDescriptor    core_ml__specification__slice_layer_params__slice_axis__descriptor;
11788 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_layer_params__descriptor;
11789 extern const ProtobufCEnumDescriptor    core_ml__specification__reduce_layer_params__reduce_operation__descriptor;
11790 extern const ProtobufCEnumDescriptor    core_ml__specification__reduce_layer_params__reduce_axis__descriptor;
11791 extern const ProtobufCMessageDescriptor core_ml__specification__crop_layer_params__descriptor;
11792 extern const ProtobufCMessageDescriptor core_ml__specification__average_layer_params__descriptor;
11793 extern const ProtobufCMessageDescriptor core_ml__specification__max_layer_params__descriptor;
11794 extern const ProtobufCMessageDescriptor core_ml__specification__min_layer_params__descriptor;
11795 extern const ProtobufCMessageDescriptor core_ml__specification__dot_product_layer_params__descriptor;
11796 extern const ProtobufCMessageDescriptor core_ml__specification__mean_variance_normalize_layer_params__descriptor;
11797 extern const ProtobufCMessageDescriptor core_ml__specification__sequence_repeat_layer_params__descriptor;
11798 extern const ProtobufCMessageDescriptor core_ml__specification__simple_recurrent_layer_params__descriptor;
11799 extern const ProtobufCMessageDescriptor core_ml__specification__grulayer_params__descriptor;
11800 extern const ProtobufCMessageDescriptor core_ml__specification__lstmparams__descriptor;
11801 extern const ProtobufCMessageDescriptor core_ml__specification__lstmweight_params__descriptor;
11802 extern const ProtobufCMessageDescriptor core_ml__specification__uni_directional_lstmlayer_params__descriptor;
11803 extern const ProtobufCMessageDescriptor core_ml__specification__bi_directional_lstmlayer_params__descriptor;
11804 extern const ProtobufCMessageDescriptor core_ml__specification__custom_layer_params__descriptor;
11805 extern const ProtobufCMessageDescriptor core_ml__specification__custom_layer_params__custom_layer_param_value__descriptor;
11806 extern const ProtobufCMessageDescriptor core_ml__specification__custom_layer_params__parameters_entry__descriptor;
11807 extern const ProtobufCMessageDescriptor core_ml__specification__transpose_layer_params__descriptor;
11808 extern const ProtobufCMessageDescriptor core_ml__specification__batched_mat_mul_layer_params__descriptor;
11809 extern const ProtobufCMessageDescriptor core_ml__specification__concat_ndlayer_params__descriptor;
11810 extern const ProtobufCMessageDescriptor core_ml__specification__softmax_ndlayer_params__descriptor;
11811 extern const ProtobufCMessageDescriptor core_ml__specification__reverse_layer_params__descriptor;
11812 extern const ProtobufCMessageDescriptor core_ml__specification__reverse_seq_layer_params__descriptor;
11813 extern const ProtobufCMessageDescriptor core_ml__specification__load_constant_ndlayer_params__descriptor;
11814 extern const ProtobufCMessageDescriptor core_ml__specification__fill_like_layer_params__descriptor;
11815 extern const ProtobufCMessageDescriptor core_ml__specification__fill_static_layer_params__descriptor;
11816 extern const ProtobufCMessageDescriptor core_ml__specification__fill_dynamic_layer_params__descriptor;
11817 extern const ProtobufCMessageDescriptor core_ml__specification__where_broadcastable_layer_params__descriptor;
11818 extern const ProtobufCMessageDescriptor core_ml__specification__sin_layer_params__descriptor;
11819 extern const ProtobufCMessageDescriptor core_ml__specification__cos_layer_params__descriptor;
11820 extern const ProtobufCMessageDescriptor core_ml__specification__tan_layer_params__descriptor;
11821 extern const ProtobufCMessageDescriptor core_ml__specification__asin_layer_params__descriptor;
11822 extern const ProtobufCMessageDescriptor core_ml__specification__acos_layer_params__descriptor;
11823 extern const ProtobufCMessageDescriptor core_ml__specification__atan_layer_params__descriptor;
11824 extern const ProtobufCMessageDescriptor core_ml__specification__sinh_layer_params__descriptor;
11825 extern const ProtobufCMessageDescriptor core_ml__specification__cosh_layer_params__descriptor;
11826 extern const ProtobufCMessageDescriptor core_ml__specification__tanh_layer_params__descriptor;
11827 extern const ProtobufCMessageDescriptor core_ml__specification__asinh_layer_params__descriptor;
11828 extern const ProtobufCMessageDescriptor core_ml__specification__acosh_layer_params__descriptor;
11829 extern const ProtobufCMessageDescriptor core_ml__specification__atanh_layer_params__descriptor;
11830 extern const ProtobufCMessageDescriptor core_ml__specification__pow_broadcastable_layer_params__descriptor;
11831 extern const ProtobufCMessageDescriptor core_ml__specification__exp2_layer_params__descriptor;
11832 extern const ProtobufCMessageDescriptor core_ml__specification__where_non_zero_layer_params__descriptor;
11833 extern const ProtobufCMessageDescriptor core_ml__specification__matrix_band_part_layer_params__descriptor;
11834 extern const ProtobufCMessageDescriptor core_ml__specification__upper_triangular_layer_params__descriptor;
11835 extern const ProtobufCMessageDescriptor core_ml__specification__lower_triangular_layer_params__descriptor;
11836 extern const ProtobufCMessageDescriptor core_ml__specification__broadcast_to_like_layer_params__descriptor;
11837 extern const ProtobufCMessageDescriptor core_ml__specification__broadcast_to_static_layer_params__descriptor;
11838 extern const ProtobufCMessageDescriptor core_ml__specification__broadcast_to_dynamic_layer_params__descriptor;
11839 extern const ProtobufCMessageDescriptor core_ml__specification__add_broadcastable_layer_params__descriptor;
11840 extern const ProtobufCMessageDescriptor core_ml__specification__max_broadcastable_layer_params__descriptor;
11841 extern const ProtobufCMessageDescriptor core_ml__specification__min_broadcastable_layer_params__descriptor;
11842 extern const ProtobufCMessageDescriptor core_ml__specification__mod_broadcastable_layer_params__descriptor;
11843 extern const ProtobufCMessageDescriptor core_ml__specification__floor_div_broadcastable_layer_params__descriptor;
11844 extern const ProtobufCMessageDescriptor core_ml__specification__subtract_broadcastable_layer_params__descriptor;
11845 extern const ProtobufCMessageDescriptor core_ml__specification__multiply_broadcastable_layer_params__descriptor;
11846 extern const ProtobufCMessageDescriptor core_ml__specification__divide_broadcastable_layer_params__descriptor;
11847 extern const ProtobufCMessageDescriptor core_ml__specification__gather_layer_params__descriptor;
11848 extern const ProtobufCMessageDescriptor core_ml__specification__scatter_layer_params__descriptor;
11849 extern const ProtobufCMessageDescriptor core_ml__specification__gather_ndlayer_params__descriptor;
11850 extern const ProtobufCMessageDescriptor core_ml__specification__scatter_ndlayer_params__descriptor;
11851 extern const ProtobufCMessageDescriptor core_ml__specification__gather_along_axis_layer_params__descriptor;
11852 extern const ProtobufCMessageDescriptor core_ml__specification__scatter_along_axis_layer_params__descriptor;
11853 extern const ProtobufCMessageDescriptor core_ml__specification__stack_layer_params__descriptor;
11854 extern const ProtobufCMessageDescriptor core_ml__specification__rank_preserving_reshape_layer_params__descriptor;
11855 extern const ProtobufCMessageDescriptor core_ml__specification__constant_padding_layer_params__descriptor;
11856 extern const ProtobufCMessageDescriptor core_ml__specification__random_normal_like_layer_params__descriptor;
11857 extern const ProtobufCMessageDescriptor core_ml__specification__random_normal_static_layer_params__descriptor;
11858 extern const ProtobufCMessageDescriptor core_ml__specification__random_normal_dynamic_layer_params__descriptor;
11859 extern const ProtobufCMessageDescriptor core_ml__specification__random_uniform_like_layer_params__descriptor;
11860 extern const ProtobufCMessageDescriptor core_ml__specification__random_uniform_static_layer_params__descriptor;
11861 extern const ProtobufCMessageDescriptor core_ml__specification__random_uniform_dynamic_layer_params__descriptor;
11862 extern const ProtobufCMessageDescriptor core_ml__specification__random_bernoulli_like_layer_params__descriptor;
11863 extern const ProtobufCMessageDescriptor core_ml__specification__random_bernoulli_static_layer_params__descriptor;
11864 extern const ProtobufCMessageDescriptor core_ml__specification__random_bernoulli_dynamic_layer_params__descriptor;
11865 extern const ProtobufCMessageDescriptor core_ml__specification__categorical_distribution_layer_params__descriptor;
11866 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_l1_layer_params__descriptor;
11867 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_l2_layer_params__descriptor;
11868 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_max_layer_params__descriptor;
11869 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_min_layer_params__descriptor;
11870 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_sum_layer_params__descriptor;
11871 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_prod_layer_params__descriptor;
11872 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_mean_layer_params__descriptor;
11873 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_log_sum_layer_params__descriptor;
11874 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_sum_square_layer_params__descriptor;
11875 extern const ProtobufCMessageDescriptor core_ml__specification__reduce_log_sum_exp_layer_params__descriptor;
11876 extern const ProtobufCMessageDescriptor core_ml__specification__expand_dims_layer_params__descriptor;
11877 extern const ProtobufCMessageDescriptor core_ml__specification__flatten_to2_dlayer_params__descriptor;
11878 extern const ProtobufCMessageDescriptor core_ml__specification__reshape_static_layer_params__descriptor;
11879 extern const ProtobufCMessageDescriptor core_ml__specification__reshape_like_layer_params__descriptor;
11880 extern const ProtobufCMessageDescriptor core_ml__specification__reshape_dynamic_layer_params__descriptor;
11881 extern const ProtobufCMessageDescriptor core_ml__specification__squeeze_layer_params__descriptor;
11882 extern const ProtobufCMessageDescriptor core_ml__specification__top_klayer_params__descriptor;
11883 extern const ProtobufCMessageDescriptor core_ml__specification__arg_max_layer_params__descriptor;
11884 extern const ProtobufCMessageDescriptor core_ml__specification__arg_min_layer_params__descriptor;
11885 extern const ProtobufCMessageDescriptor core_ml__specification__split_ndlayer_params__descriptor;
11886 extern const ProtobufCMessageDescriptor core_ml__specification__ceil_layer_params__descriptor;
11887 extern const ProtobufCMessageDescriptor core_ml__specification__round_layer_params__descriptor;
11888 extern const ProtobufCMessageDescriptor core_ml__specification__floor_layer_params__descriptor;
11889 extern const ProtobufCMessageDescriptor core_ml__specification__sign_layer_params__descriptor;
11890 extern const ProtobufCMessageDescriptor core_ml__specification__clip_layer_params__descriptor;
11891 extern const ProtobufCMessageDescriptor core_ml__specification__slice_static_layer_params__descriptor;
11892 extern const ProtobufCMessageDescriptor core_ml__specification__slice_dynamic_layer_params__descriptor;
11893 extern const ProtobufCMessageDescriptor core_ml__specification__tile_layer_params__descriptor;
11894 extern const ProtobufCMessageDescriptor core_ml__specification__get_shape_layer_params__descriptor;
11895 extern const ProtobufCMessageDescriptor core_ml__specification__erf_layer_params__descriptor;
11896 extern const ProtobufCMessageDescriptor core_ml__specification__gelu_layer_params__descriptor;
11897 extern const ProtobufCEnumDescriptor    core_ml__specification__gelu_layer_params__gelu_mode__descriptor;
11898 extern const ProtobufCMessageDescriptor core_ml__specification__range_static_layer_params__descriptor;
11899 extern const ProtobufCMessageDescriptor core_ml__specification__range_dynamic_layer_params__descriptor;
11900 extern const ProtobufCMessageDescriptor core_ml__specification__sliding_windows_layer_params__descriptor;
11901 extern const ProtobufCMessageDescriptor core_ml__specification__layer_normalization_layer_params__descriptor;
11902 extern const ProtobufCMessageDescriptor core_ml__specification__non_maximum_suppression_layer_params__descriptor;
11903 extern const ProtobufCMessageDescriptor core_ml__specification__clamped_re_lulayer_params__descriptor;
11904 extern const ProtobufCMessageDescriptor core_ml__specification__arg_sort_layer_params__descriptor;
11905 extern const ProtobufCMessageDescriptor core_ml__specification__slice_by_size_layer_params__descriptor;
11906 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_classifier__descriptor;
11907 extern const ProtobufCMessageDescriptor core_ml__specification__one_hot_layer_params__descriptor;
11908 extern const ProtobufCMessageDescriptor core_ml__specification__cum_sum_layer_params__descriptor;
11909 extern const ProtobufCMessageDescriptor core_ml__specification__neural_network_regressor__descriptor;
11910 extern const ProtobufCMessageDescriptor core_ml__specification__network_update_parameters__descriptor;
11911 extern const ProtobufCMessageDescriptor core_ml__specification__loss_layer__descriptor;
11912 extern const ProtobufCMessageDescriptor core_ml__specification__categorical_cross_entropy_loss_layer__descriptor;
11913 extern const ProtobufCMessageDescriptor core_ml__specification__mean_squared_error_loss_layer__descriptor;
11914 extern const ProtobufCMessageDescriptor core_ml__specification__optimizer__descriptor;
11915 extern const ProtobufCMessageDescriptor core_ml__specification__sgdoptimizer__descriptor;
11916 extern const ProtobufCMessageDescriptor core_ml__specification__adam_optimizer__descriptor;
11917 
11918 PROTOBUF_C__END_DECLS
11919 
11920 
11921 #endif  /* PROTOBUF_C_NeuralNetwork_2eproto__INCLUDED */
11922