1 /**********************************************************************
2
3 iseq.h -
4
5 $Author: ko1 $
6 created at: 04/01/01 23:36:57 JST
7
8 Copyright (C) 2004-2008 Koichi Sasada
9
10 **********************************************************************/
11
12 #ifndef RUBY_ISEQ_H
13 #define RUBY_ISEQ_H 1
14
15 RUBY_EXTERN const int ruby_api_version[];
16 #define ISEQ_MAJOR_VERSION ((unsigned int)ruby_api_version[0])
17 #define ISEQ_MINOR_VERSION ((unsigned int)ruby_api_version[1])
18
19 #ifndef rb_iseq_t
20 typedef struct rb_iseq_struct rb_iseq_t;
21 #define rb_iseq_t rb_iseq_t
22 #endif
23
24 static inline size_t
rb_call_info_kw_arg_bytes(int keyword_len)25 rb_call_info_kw_arg_bytes(int keyword_len)
26 {
27 return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
28 }
29
30 #define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
31 #define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
32 #define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
33 #define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
34
35 #define ISEQ_PC2BRANCHINDEX(iseq) iseq->body->variable.pc2branchindex
36 #define ISEQ_PC2BRANCHINDEX_SET(iseq, h) RB_OBJ_WRITE(iseq, &iseq->body->variable.pc2branchindex, h)
37
38 #define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
39
40 static inline rb_snum_t
ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t * iseq)41 ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq)
42 {
43 rb_snum_t cnt = iseq->body->variable.flip_count;
44 iseq->body->variable.flip_count += 1;
45 return cnt;
46 }
47
48 static inline VALUE *
ISEQ_ORIGINAL_ISEQ(const rb_iseq_t * iseq)49 ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
50 {
51 return iseq->body->variable.original_iseq;
52 }
53
54 static inline void
ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t * iseq)55 ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
56 {
57 void *ptr = iseq->body->variable.original_iseq;
58 iseq->body->variable.original_iseq = NULL;
59 if (ptr) {
60 ruby_xfree(ptr);
61 }
62 }
63
64 static inline VALUE *
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t * iseq,long size)65 ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
66 {
67 return iseq->body->variable.original_iseq =
68 ruby_xmalloc2(size, sizeof(VALUE));
69 }
70
71 #define ISEQ_TRACE_EVENTS (RUBY_EVENT_LINE | \
72 RUBY_EVENT_CLASS | \
73 RUBY_EVENT_END | \
74 RUBY_EVENT_CALL | \
75 RUBY_EVENT_RETURN| \
76 RUBY_EVENT_B_CALL| \
77 RUBY_EVENT_B_RETURN| \
78 RUBY_EVENT_COVERAGE_LINE| \
79 RUBY_EVENT_COVERAGE_BRANCH)
80
81 #define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
82 #define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
83 #define ISEQ_TRANSLATED IMEMO_FL_USER3
84 #define ISEQ_MARKABLE_ISEQ IMEMO_FL_USER4
85
86 #define ISEQ_EXECUTABLE_P(iseq) (FL_TEST_RAW((iseq), ISEQ_NOT_LOADED_YET | ISEQ_USE_COMPILE_DATA) == 0)
87
88 struct iseq_compile_data {
89 /* GC is needed */
90 const VALUE err_info;
91 VALUE mark_ary;
92 const VALUE catch_table_ary; /* Array */
93
94 /* GC is not needed */
95 struct iseq_label_data *start_label;
96 struct iseq_label_data *end_label;
97 struct iseq_label_data *redo_label;
98 const rb_iseq_t *current_block;
99 VALUE ensure_node;
100 VALUE for_iseq;
101 struct iseq_compile_data_ensure_node_stack *ensure_node_stack;
102 struct iseq_compile_data_storage *storage_head;
103 struct iseq_compile_data_storage *storage_current;
104 int loopval_popped; /* used by NODE_BREAK */
105 int last_line;
106 int label_no;
107 int node_level;
108 unsigned int ci_index;
109 unsigned int ci_kw_index;
110 const rb_compile_option_t *option;
111 struct rb_id_table *ivar_cache_table;
112 #if SUPPORT_JOKE
113 st_table *labels_table;
114 #endif
115 };
116
117 static inline struct iseq_compile_data *
ISEQ_COMPILE_DATA(const rb_iseq_t * iseq)118 ISEQ_COMPILE_DATA(const rb_iseq_t *iseq)
119 {
120 if (iseq->flags & ISEQ_USE_COMPILE_DATA) {
121 return iseq->aux.compile_data;
122 }
123 else {
124 return NULL;
125 }
126 }
127
128 static inline void
ISEQ_COMPILE_DATA_ALLOC(rb_iseq_t * iseq)129 ISEQ_COMPILE_DATA_ALLOC(rb_iseq_t *iseq)
130 {
131 iseq->aux.compile_data = ZALLOC(struct iseq_compile_data);
132 iseq->flags |= ISEQ_USE_COMPILE_DATA;
133 }
134
135 static inline void
ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t * iseq)136 ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t *iseq)
137 {
138 iseq->flags &= ~ISEQ_USE_COMPILE_DATA;
139 iseq->aux.compile_data = NULL;
140 }
141
142 static inline rb_iseq_t *
iseq_imemo_alloc(void)143 iseq_imemo_alloc(void)
144 {
145 return (rb_iseq_t *)rb_imemo_new(imemo_iseq, 0, 0, 0, 0);
146 }
147
148 VALUE rb_iseq_ibf_dump(const rb_iseq_t *iseq, VALUE opt);
149 void rb_ibf_load_iseq_complete(rb_iseq_t *iseq);
150 const rb_iseq_t *rb_iseq_ibf_load(VALUE str);
151 VALUE rb_iseq_ibf_load_extra_data(VALUE str);
152 void rb_iseq_init_trace(rb_iseq_t *iseq);
153 int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line);
154 int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval);
155 const rb_iseq_t *rb_iseq_load_iseq(VALUE fname);
156
157 #if VM_INSN_INFO_TABLE_IMPL == 2
158 unsigned int *rb_iseq_insns_info_decode_positions(const struct rb_iseq_constant_body *body);
159 #endif
160
161 RUBY_SYMBOL_EXPORT_BEGIN
162
163 /* compile.c */
164 VALUE rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node);
165 VALUE rb_iseq_compile_ifunc(rb_iseq_t *iseq, const struct vm_ifunc *ifunc);
166 int rb_iseq_translate_threaded_code(rb_iseq_t *iseq);
167 VALUE *rb_iseq_original_iseq(const rb_iseq_t *iseq);
168 void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc,
169 VALUE locals, VALUE args,
170 VALUE exception, VALUE body);
171
172 /* iseq.c */
173 VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
174 VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
175 struct st_table *ruby_insn_make_insn_table(void);
176 unsigned int rb_iseq_line_no(const rb_iseq_t *iseq, size_t pos);
177 void rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events);
178 void rb_iseq_trace_set_all(rb_event_flag_t turnon_events);
179 void rb_iseq_trace_on_all(void);
180 void rb_iseq_insns_info_encode_positions(const rb_iseq_t *iseq);
181
182 VALUE rb_iseqw_new(const rb_iseq_t *iseq);
183 const rb_iseq_t *rb_iseqw_to_iseq(VALUE iseqw);
184
185 VALUE rb_iseq_absolute_path(const rb_iseq_t *iseq); /* obsolete */
186 VALUE rb_iseq_label(const rb_iseq_t *iseq);
187 VALUE rb_iseq_base_label(const rb_iseq_t *iseq);
188 VALUE rb_iseq_first_lineno(const rb_iseq_t *iseq);
189 VALUE rb_iseq_method_name(const rb_iseq_t *iseq);
190 void rb_iseq_code_location(const rb_iseq_t *iseq, int *first_lineno, int *first_column, int *last_lineno, int *last_column);
191
192 void rb_iseq_remove_coverage_all(void);
193
194 /* proc.c */
195 const rb_iseq_t *rb_method_iseq(VALUE body);
196 const rb_iseq_t *rb_proc_get_iseq(VALUE proc, int *is_proc);
197
198 struct rb_compile_option_struct {
199 unsigned int inline_const_cache: 1;
200 unsigned int peephole_optimization: 1;
201 unsigned int tailcall_optimization: 1;
202 unsigned int specialized_instruction: 1;
203 unsigned int operands_unification: 1;
204 unsigned int instructions_unification: 1;
205 unsigned int stack_caching: 1;
206 unsigned int frozen_string_literal: 1;
207 unsigned int debug_frozen_string_literal: 1;
208 unsigned int coverage_enabled: 1;
209 int debug_level;
210 };
211
212 struct iseq_insn_info_entry {
213 int line_no;
214 rb_event_flag_t events;
215 };
216
217 struct iseq_catch_table_entry {
218 enum catch_type {
219 CATCH_TYPE_RESCUE = INT2FIX(1),
220 CATCH_TYPE_ENSURE = INT2FIX(2),
221 CATCH_TYPE_RETRY = INT2FIX(3),
222 CATCH_TYPE_BREAK = INT2FIX(4),
223 CATCH_TYPE_REDO = INT2FIX(5),
224 CATCH_TYPE_NEXT = INT2FIX(6)
225 } type;
226
227 /*
228 * iseq type:
229 * CATCH_TYPE_RESCUE, CATCH_TYPE_ENSURE:
230 * use iseq as continuation.
231 *
232 * CATCH_TYPE_BREAK (iter):
233 * use iseq as key.
234 *
235 * CATCH_TYPE_BREAK (while), CATCH_TYPE_RETRY,
236 * CATCH_TYPE_REDO, CATCH_TYPE_NEXT:
237 * NULL.
238 */
239 const rb_iseq_t *iseq;
240
241 unsigned int start;
242 unsigned int end;
243 unsigned int cont;
244 unsigned int sp;
245 };
246
247 PACKED_STRUCT_UNALIGNED(struct iseq_catch_table {
248 unsigned int size;
249 struct iseq_catch_table_entry entries[FLEX_ARY_LEN];
250 });
251
252 static inline int
iseq_catch_table_bytes(int n)253 iseq_catch_table_bytes(int n)
254 {
255 enum {
256 catch_table_entry_size = sizeof(struct iseq_catch_table_entry),
257 catch_table_entries_max = (INT_MAX - offsetof(struct iseq_catch_table, entries)) / catch_table_entry_size
258 };
259 if (n > catch_table_entries_max) rb_fatal("too large iseq_catch_table - %d", n);
260 return (int)(offsetof(struct iseq_catch_table, entries) +
261 n * catch_table_entry_size);
262 }
263
264 #define INITIAL_ISEQ_COMPILE_DATA_STORAGE_BUFF_SIZE (512)
265
266 struct iseq_compile_data_storage {
267 struct iseq_compile_data_storage *next;
268 unsigned int pos;
269 unsigned int size;
270 char buff[FLEX_ARY_LEN];
271 };
272
273 /* defined? */
274
275 enum defined_type {
276 DEFINED_NOT_DEFINED,
277 DEFINED_NIL = 1,
278 DEFINED_IVAR,
279 DEFINED_LVAR,
280 DEFINED_GVAR,
281 DEFINED_CVAR,
282 DEFINED_CONST,
283 DEFINED_METHOD,
284 DEFINED_YIELD,
285 DEFINED_ZSUPER,
286 DEFINED_SELF,
287 DEFINED_TRUE,
288 DEFINED_FALSE,
289 DEFINED_ASGN,
290 DEFINED_EXPR,
291 DEFINED_IVAR2,
292 DEFINED_REF,
293 DEFINED_FUNC
294 };
295
296 VALUE rb_iseq_defined_string(enum defined_type type);
297 void rb_iseq_make_compile_option(struct rb_compile_option_struct *option, VALUE opt);
298
299 /* vm.c */
300 VALUE rb_iseq_local_variables(const rb_iseq_t *iseq);
301
302 RUBY_SYMBOL_EXPORT_END
303
304 #endif /* RUBY_ISEQ_H */
305