1#!/usr/local/bin/thrift --gen java:beans,nocamel,hashcode
2
3/*
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
11 *
12 *   http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing,
15 * software distributed under the License is distributed on an
16 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17 * KIND, either express or implied. See the License for the
18 * specific language governing permissions and limitations
19 * under the License.
20 *
21 * Contains some contributions under the Thrift Software License.
22 * Please see doc/old-thrift-license.txt in the Thrift distribution for
23 * details.
24 */
25
26namespace java backtype.storm.generated
27
28union JavaObjectArg {
29  1: i32 int_arg;
30  2: i64 long_arg;
31  3: string string_arg;
32  4: bool bool_arg;
33  5: binary binary_arg;
34  6: double double_arg;
35}
36
37struct JavaObject {
38  1: required string full_class_name;
39  2: required list<JavaObjectArg> args_list;
40}
41
42struct NullStruct {
43
44}
45
46struct GlobalStreamId {
47  1: required string componentId;
48  2: required string streamId;
49  #Going to need to add an enum for the stream type (NORMAL or FAILURE)
50}
51
52union Grouping {
53  1: list<string> fields; //empty list means global grouping
54  2: NullStruct shuffle; // tuple is sent to random task
55  3: NullStruct all; // tuple is sent to every task
56  4: NullStruct none; // tuple is sent to a single task (storm's choice) -> allows storm to optimize the topology by bundling tasks into a single process
57  5: NullStruct direct; // this bolt expects the source bolt to send tuples directly to it
58  6: JavaObject custom_object;
59  7: binary custom_serialized;
60  8: NullStruct local_or_shuffle; // prefer sending to tasks in the same worker process, otherwise shuffle
61}
62
63struct StreamInfo {
64  1: required list<string> output_fields;
65  2: required bool direct;
66}
67
68struct ShellComponent {
69  // should change this to 1: required list<string> execution_command;
70  1: string execution_command;
71  2: string script;
72}
73
74union ComponentObject {
75  1: binary serialized_java;
76  2: ShellComponent shell;
77  3: JavaObject java_object;
78}
79
80struct ComponentCommon {
81  1: required map<GlobalStreamId, Grouping> inputs;
82  2: required map<string, StreamInfo> streams; //key is stream id
83  3: optional i32 parallelism_hint; //how many threads across the cluster should be dedicated to this component
84
85  // component specific configuration respects:
86  // topology.debug: false
87  // topology.max.task.parallelism: null // can replace isDistributed with this
88  // topology.max.spout.pending: null
89  // topology.kryo.register // this is the only additive one
90
91  // component specific configuration
92  4: optional string json_conf;
93}
94
95struct SpoutSpec {
96  1: required ComponentObject spout_object;
97  2: required ComponentCommon common;
98  // can force a spout to be non-distributed by overriding the component configuration
99  // and setting TOPOLOGY_MAX_TASK_PARALLELISM to 1
100}
101
102struct Bolt {
103  1: required ComponentObject bolt_object;
104  2: required ComponentCommon common;
105}
106
107// not implemented yet
108// this will eventually be the basis for subscription implementation in storm
109struct StateSpoutSpec {
110  1: required ComponentObject state_spout_object;
111  2: required ComponentCommon common;
112}
113
114struct StormTopology {
115  //ids must be unique across maps
116  // #workers to use is in conf
117  1: required map<string, SpoutSpec> spouts;
118  2: required map<string, Bolt> bolts;
119  3: required map<string, StateSpoutSpec> state_spouts;
120}
121
122exception AlreadyAliveException {
123  1: required string msg;
124}
125
126exception NotAliveException {
127  1: required string msg;
128}
129
130exception InvalidTopologyException {
131  1: required string msg;
132}
133
134struct TopologySummary {
135  1: required string id;
136  2: required string name;
137  3: required i32 num_tasks;
138  4: required i32 num_executors;
139  5: required i32 num_workers;
140  6: required i32 uptime_secs;
141  7: required string status;
142}
143
144struct SupervisorSummary {
145  1: required string host;
146  2: required i32 uptime_secs;
147  3: required i32 num_workers;
148  4: required i32 num_used_workers;
149  5: required string supervisor_id;
150}
151
152struct ClusterSummary {
153  1: required list<SupervisorSummary> supervisors;
154  2: required i32 nimbus_uptime_secs;
155  3: required list<TopologySummary> topologies;
156}
157
158struct ErrorInfo {
159  1: required string error;
160  2: required i32 error_time_secs;
161}
162
163struct BoltStats {
164  1: required map<string, map<GlobalStreamId, i64>> acked;
165  2: required map<string, map<GlobalStreamId, i64>> failed;
166  3: required map<string, map<GlobalStreamId, double>> process_ms_avg;
167  4: required map<string, map<GlobalStreamId, i64>> executed;
168  5: required map<string, map<GlobalStreamId, double>> execute_ms_avg;
169}
170
171struct SpoutStats {
172  1: required map<string, map<string, i64>> acked;
173  2: required map<string, map<string, i64>> failed;
174  3: required map<string, map<string, double>> complete_ms_avg;
175}
176
177union ExecutorSpecificStats {
178  1: BoltStats bolt;
179  2: SpoutStats spout;
180}
181
182// Stats are a map from the time window (all time or a number indicating number of seconds in the window)
183//    to the stats. Usually stats are a stream id to a count or average.
184struct ExecutorStats {
185  1: required map<string, map<string, i64>> emitted;
186  2: required map<string, map<string, i64>> transferred;
187  3: required ExecutorSpecificStats specific;
188}
189
190struct ExecutorInfo {
191  1: required i32 task_start;
192  2: required i32 task_end;
193}
194
195struct ExecutorSummary {
196  1: required ExecutorInfo executor_info;
197  2: required string component_id;
198  3: required string host;
199  4: required i32 port;
200  5: required i32 uptime_secs;
201  7: optional ExecutorStats stats;
202}
203
204struct TopologyInfo {
205  1: required string id;
206  2: required string name;
207  3: required i32 uptime_secs;
208  4: required list<ExecutorSummary> executors;
209  5: required string status;
210  6: required map<string, list<ErrorInfo>> errors;
211}
212
213struct KillOptions {
214  1: optional i32 wait_secs;
215}
216
217struct RebalanceOptions {
218  1: optional i32 wait_secs;
219  2: optional i32 num_workers;
220  3: optional map<string, i32> num_executors;
221}
222
223enum TopologyInitialStatus {
224    ACTIVE = 1,
225    INACTIVE = 2
226}
227struct SubmitOptions {
228  1: required TopologyInitialStatus initial_status;
229}
230
231service Nimbus {
232  void submitTopology(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology) throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite);
233  void submitTopologyWithOpts(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology, 5: SubmitOptions options) throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite);
234  void killTopology(1: string name) throws (1: NotAliveException e);
235  void killTopologyWithOpts(1: string name, 2: KillOptions options) throws (1: NotAliveException e);
236  void activate(1: string name) throws (1: NotAliveException e);
237  void deactivate(1: string name) throws (1: NotAliveException e);
238  void rebalance(1: string name, 2: RebalanceOptions options) throws (1: NotAliveException e, 2: InvalidTopologyException ite);
239
240  // need to add functions for asking about status of storms, what nodes they're running on, looking at task logs
241
242  string beginFileUpload();
243  void uploadChunk(1: string location, 2: binary chunk);
244  void finishFileUpload(1: string location);
245
246  string beginFileDownload(1: string file);
247  //can stop downloading chunks when receive 0-length byte array back
248  binary downloadChunk(1: string id);
249
250  // returns json
251  string getNimbusConf();
252  // stats functions
253  ClusterSummary getClusterInfo();
254  TopologyInfo getTopologyInfo(1: string id) throws (1: NotAliveException e);
255  //returns json
256  string getTopologyConf(1: string id) throws (1: NotAliveException e);
257  StormTopology getTopology(1: string id) throws (1: NotAliveException e);
258  StormTopology getUserTopology(1: string id) throws (1: NotAliveException e);
259}
260
261struct DRPCRequest {
262  1: required string func_args;
263  2: required string request_id;
264}
265
266exception DRPCExecutionException {
267  1: required string msg;
268}
269
270service DistributedRPC {
271  string execute(1: string functionName, 2: string funcArgs) throws (1: DRPCExecutionException e);
272}
273
274service DistributedRPCInvocations {
275  void result(1: string id, 2: string result);
276  DRPCRequest fetchRequest(1: string functionName);
277  void failRequest(1: string id);
278}
279