1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15
16 #include "private/gc_priv.h"
17
18 #include <stdio.h>
19 #include <string.h>
20
21 /* Allocate reclaim list for kind: */
22 /* Return TRUE on success */
GC_alloc_reclaim_list(struct obj_kind * kind)23 STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
24 {
25 struct hblk ** result = (struct hblk **)
26 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
27 if (result == 0) return(FALSE);
28 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
29 kind -> ok_reclaim_list = result;
30 return(TRUE);
31 }
32
33 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
34 GC_bool ignore_off_page,
35 GC_bool retry); /* from alloc.c */
36
37 /* Allocate a large block of size lb bytes. */
38 /* The block is not cleared. */
39 /* Flags is 0 or IGNORE_OFF_PAGE. */
40 /* We hold the allocation lock. */
41 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large(size_t lb,int k,unsigned flags)42 GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
43 {
44 struct hblk * h;
45 word n_blocks;
46 ptr_t result;
47 GC_bool retry = FALSE;
48
49 /* Round up to a multiple of a granule. */
50 lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
51 n_blocks = OBJ_SZ_TO_BLOCKS(lb);
52 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
53 /* Do our share of marking work */
54 if (GC_incremental && !GC_dont_gc)
55 GC_collect_a_little_inner((int)n_blocks);
56 h = GC_allochblk(lb, k, flags);
57 # ifdef USE_MUNMAP
58 if (0 == h) {
59 GC_merge_unmapped();
60 h = GC_allochblk(lb, k, flags);
61 }
62 # endif
63 while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
64 h = GC_allochblk(lb, k, flags);
65 retry = TRUE;
66 }
67 if (h == 0) {
68 result = 0;
69 } else {
70 size_t total_bytes = n_blocks * HBLKSIZE;
71 if (n_blocks > 1) {
72 GC_large_allocd_bytes += total_bytes;
73 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
74 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
75 }
76 /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
77 result = h -> hb_body;
78 }
79 return result;
80 }
81
82 /* Allocate a large block of size lb bytes. Clear if appropriate. */
83 /* We hold the allocation lock. */
84 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large_and_clear(size_t lb,int k,unsigned flags)85 STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
86 {
87 ptr_t result = GC_alloc_large(lb, k, flags);
88 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
89
90 if (0 == result) return 0;
91 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
92 /* Clear the whole block, in case of GC_realloc call. */
93 BZERO(result, n_blocks * HBLKSIZE);
94 }
95 return result;
96 }
97
98 /* allocate lb bytes for an object of kind k. */
99 /* Should not be used to directly to allocate */
100 /* objects such as STUBBORN objects that */
101 /* require special handling on allocation. */
102 /* First a version that assumes we already */
103 /* hold lock: */
GC_generic_malloc_inner(size_t lb,int k)104 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
105 {
106 void *op;
107
108 if(SMALL_OBJ(lb)) {
109 struct obj_kind * kind = GC_obj_kinds + k;
110 size_t lg = GC_size_map[lb];
111 void ** opp = &(kind -> ok_freelist[lg]);
112
113 op = *opp;
114 if (EXPECT(0 == op, FALSE)) {
115 if (lg == 0) {
116 if (!EXPECT(GC_is_initialized, TRUE)) {
117 GC_init();
118 lg = GC_size_map[lb];
119 }
120 if (0 == lg) {
121 GC_extend_size_map(lb);
122 lg = GC_size_map[lb];
123 GC_ASSERT(lg != 0);
124 }
125 /* Retry */
126 opp = &(kind -> ok_freelist[lg]);
127 op = *opp;
128 }
129 if (0 == op) {
130 if (0 == kind -> ok_reclaim_list &&
131 !GC_alloc_reclaim_list(kind))
132 return NULL;
133 op = GC_allocobj(lg, k);
134 if (0 == op)
135 return NULL;
136 }
137 }
138 *opp = obj_link(op);
139 obj_link(op) = 0;
140 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
141 } else {
142 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
143 GC_bytes_allocd += lb;
144 }
145
146 return op;
147 }
148
149 #if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
150 || !defined(GC_NO_FINALIZATION)
151 /* Allocate a composite object of size n bytes. The caller */
152 /* guarantees that pointers past the first page are not relevant. */
GC_generic_malloc_inner_ignore_off_page(size_t lb,int k)153 GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
154 {
155 word lb_adjusted;
156 void * op;
157
158 if (lb <= HBLKSIZE)
159 return(GC_generic_malloc_inner(lb, k));
160 lb_adjusted = ADD_SLOP(lb);
161 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
162 GC_bytes_allocd += lb_adjusted;
163 return op;
164 }
165 #endif
166
167 #ifdef GC_COLLECT_AT_MALLOC
168 /* Parameter to force GC at every malloc of size greater or equal to */
169 /* the given value. This might be handy during debugging. */
170 size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
171 #endif
172
GC_generic_malloc(size_t lb,int k)173 GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
174 {
175 void * result;
176 DCL_LOCK_STATE;
177
178 if (EXPECT(GC_have_errors, FALSE))
179 GC_print_all_errors();
180 GC_INVOKE_FINALIZERS();
181 GC_DBG_COLLECT_AT_MALLOC(lb);
182 if (SMALL_OBJ(lb)) {
183 LOCK();
184 result = GC_generic_malloc_inner((word)lb, k);
185 UNLOCK();
186 } else {
187 size_t lg;
188 size_t lb_rounded;
189 word n_blocks;
190 GC_bool init;
191
192 lg = ROUNDED_UP_GRANULES(lb);
193 lb_rounded = GRANULES_TO_BYTES(lg);
194 if (lb_rounded < lb)
195 return((*GC_get_oom_fn())(lb));
196 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
197 init = GC_obj_kinds[k].ok_init;
198 LOCK();
199 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
200 if (0 != result) {
201 if (GC_debugging_started) {
202 BZERO(result, n_blocks * HBLKSIZE);
203 } else {
204 # ifdef THREADS
205 /* Clear any memory that might be used for GC descriptors */
206 /* before we release the lock. */
207 ((word *)result)[0] = 0;
208 ((word *)result)[1] = 0;
209 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
210 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
211 # endif
212 }
213 }
214 GC_bytes_allocd += lb_rounded;
215 UNLOCK();
216 if (init && !GC_debugging_started && 0 != result) {
217 BZERO(result, n_blocks * HBLKSIZE);
218 }
219 }
220 if (0 == result) {
221 return((*GC_get_oom_fn())(lb));
222 } else {
223 return(result);
224 }
225 }
226
227 /* Allocate lb bytes of atomic (pointer-free) data. */
228 #ifdef THREAD_LOCAL_ALLOC
GC_core_malloc_atomic(size_t lb)229 GC_INNER void * GC_core_malloc_atomic(size_t lb)
230 #else
231 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
232 #endif
233 {
234 void *op;
235 void ** opp;
236 size_t lg;
237 DCL_LOCK_STATE;
238
239 if(SMALL_OBJ(lb)) {
240 GC_DBG_COLLECT_AT_MALLOC(lb);
241 lg = GC_size_map[lb];
242 opp = &(GC_aobjfreelist[lg]);
243 LOCK();
244 if (EXPECT((op = *opp) == 0, FALSE)) {
245 UNLOCK();
246 return(GENERAL_MALLOC((word)lb, PTRFREE));
247 }
248 *opp = obj_link(op);
249 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
250 UNLOCK();
251 return((void *) op);
252 } else {
253 return(GENERAL_MALLOC((word)lb, PTRFREE));
254 }
255 }
256
257 /* Allocate lb bytes of composite (pointerful) data */
258 #ifdef THREAD_LOCAL_ALLOC
GC_core_malloc(size_t lb)259 GC_INNER void * GC_core_malloc(size_t lb)
260 #else
261 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
262 #endif
263 {
264 void *op;
265 void **opp;
266 size_t lg;
267 DCL_LOCK_STATE;
268
269 if(SMALL_OBJ(lb)) {
270 GC_DBG_COLLECT_AT_MALLOC(lb);
271 lg = GC_size_map[lb];
272 opp = (void **)&(GC_objfreelist[lg]);
273 LOCK();
274 if (EXPECT((op = *opp) == 0, FALSE)) {
275 UNLOCK();
276 return (GENERAL_MALLOC((word)lb, NORMAL));
277 }
278 GC_ASSERT(0 == obj_link(op)
279 || ((word)obj_link(op)
280 <= (word)GC_greatest_plausible_heap_addr
281 && (word)obj_link(op)
282 >= (word)GC_least_plausible_heap_addr));
283 *opp = obj_link(op);
284 obj_link(op) = 0;
285 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
286 UNLOCK();
287 return op;
288 } else {
289 return(GENERAL_MALLOC(lb, NORMAL));
290 }
291 }
292
293 /* Allocate lb bytes of pointerful, traced, but not collectible data. */
GC_malloc_uncollectable(size_t lb)294 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
295 {
296 void *op;
297 void **opp;
298 size_t lg;
299 DCL_LOCK_STATE;
300
301 if( SMALL_OBJ(lb) ) {
302 GC_DBG_COLLECT_AT_MALLOC(lb);
303 if (EXTRA_BYTES != 0 && lb != 0) lb--;
304 /* We don't need the extra byte, since this won't be */
305 /* collected anyway. */
306 lg = GC_size_map[lb];
307 opp = &(GC_uobjfreelist[lg]);
308 LOCK();
309 op = *opp;
310 if (EXPECT(0 != op, TRUE)) {
311 *opp = obj_link(op);
312 obj_link(op) = 0;
313 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
314 /* Mark bit was already set on free list. It will be */
315 /* cleared only temporarily during a collection, as a */
316 /* result of the normal free list mark bit clearing. */
317 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
318 UNLOCK();
319 } else {
320 UNLOCK();
321 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
322 /* For small objects, the free lists are completely marked. */
323 }
324 GC_ASSERT(0 == op || GC_is_marked(op));
325 return((void *) op);
326 } else {
327 hdr * hhdr;
328
329 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
330 if (0 == op) return(0);
331
332 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
333 hhdr = HDR(op);
334 /* We don't need the lock here, since we have an undisguised */
335 /* pointer. We do need to hold the lock while we adjust */
336 /* mark bits. */
337 LOCK();
338 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
339 # ifndef THREADS
340 GC_ASSERT(hhdr -> hb_n_marks == 0);
341 /* This is not guaranteed in the multi-threaded case */
342 /* because the counter could be updated before locking. */
343 # endif
344 hhdr -> hb_n_marks = 1;
345 UNLOCK();
346 return((void *) op);
347 }
348 }
349
350 #ifdef REDIRECT_MALLOC
351
352 # ifndef MSWINCE
353 # include <errno.h>
354 # endif
355
356 /* Avoid unnecessary nested procedure calls here, by #defining some */
357 /* malloc replacements. Otherwise we end up saving a */
358 /* meaningless return address in the object. It also speeds things up, */
359 /* but it is admittedly quite ugly. */
360 # define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
361
malloc(size_t lb)362 void * malloc(size_t lb)
363 {
364 /* It might help to manually inline the GC_malloc call here. */
365 /* But any decent compiler should reduce the extra procedure call */
366 /* to at most a jump instruction in this case. */
367 # if defined(I386) && defined(GC_SOLARIS_THREADS)
368 /* Thread initialization can call malloc before we're ready for. */
369 /* It's not clear that this is enough to help matters. */
370 /* The thread implementation may well call malloc at other */
371 /* inopportune times. */
372 if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
373 # endif /* I386 && GC_SOLARIS_THREADS */
374 return((void *)REDIRECT_MALLOC(lb));
375 }
376
377 #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
378 STATIC ptr_t GC_libpthread_start = 0;
379 STATIC ptr_t GC_libpthread_end = 0;
380 STATIC ptr_t GC_libld_start = 0;
381 STATIC ptr_t GC_libld_end = 0;
382
GC_init_lib_bounds(void)383 STATIC void GC_init_lib_bounds(void)
384 {
385 if (GC_libpthread_start != 0) return;
386 GC_init(); /* if not called yet */
387 if (!GC_text_mapping("libpthread-",
388 &GC_libpthread_start, &GC_libpthread_end)) {
389 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
390 /* This might still work with some versions of libpthread, */
391 /* so we don't abort. Perhaps we should. */
392 /* Generate message only once: */
393 GC_libpthread_start = (ptr_t)1;
394 }
395 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
396 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
397 }
398 }
399 #endif /* GC_LINUX_THREADS */
400
401 #include <limits.h>
402 #ifdef SIZE_MAX
403 # define GC_SIZE_MAX SIZE_MAX
404 #else
405 # define GC_SIZE_MAX (~(size_t)0)
406 #endif
407
408 #define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
409
calloc(size_t n,size_t lb)410 void * calloc(size_t n, size_t lb)
411 {
412 if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
413 && lb && n > GC_SIZE_MAX / lb)
414 return NULL;
415 # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
416 /* libpthread allocated some memory that is only pointed to by */
417 /* mmapped thread stacks. Make sure it is not collectible. */
418 {
419 static GC_bool lib_bounds_set = FALSE;
420 ptr_t caller = (ptr_t)__builtin_return_address(0);
421 /* This test does not need to ensure memory visibility, since */
422 /* the bounds will be set when/if we create another thread. */
423 if (!EXPECT(lib_bounds_set, TRUE)) {
424 GC_init_lib_bounds();
425 lib_bounds_set = TRUE;
426 }
427 if (((word)caller >= (word)GC_libpthread_start
428 && (word)caller < (word)GC_libpthread_end)
429 || ((word)caller >= (word)GC_libld_start
430 && (word)caller < (word)GC_libld_end))
431 return GC_malloc_uncollectable(n*lb);
432 /* The two ranges are actually usually adjacent, so there may */
433 /* be a way to speed this up. */
434 }
435 # endif
436 return((void *)REDIRECT_MALLOC(n*lb));
437 }
438
439 #ifndef strdup
strdup(const char * s)440 char *strdup(const char *s)
441 {
442 size_t lb = strlen(s) + 1;
443 char *result = (char *)REDIRECT_MALLOC(lb);
444 if (result == 0) {
445 errno = ENOMEM;
446 return 0;
447 }
448 BCOPY(s, result, lb);
449 return result;
450 }
451 #endif /* !defined(strdup) */
452 /* If strdup is macro defined, we assume that it actually calls malloc, */
453 /* and thus the right thing will happen even without overriding it. */
454 /* This seems to be true on most Linux systems. */
455
456 #ifndef strndup
457 /* This is similar to strdup(). */
strndup(const char * str,size_t size)458 char *strndup(const char *str, size_t size)
459 {
460 char *copy;
461 size_t len = strlen(str);
462 if (len > size)
463 len = size;
464 copy = (char *)REDIRECT_MALLOC(len + 1);
465 if (copy == NULL) {
466 errno = ENOMEM;
467 return NULL;
468 }
469 BCOPY(str, copy, len);
470 copy[len] = '\0';
471 return copy;
472 }
473 #endif /* !strndup */
474
475 #undef GC_debug_malloc_replacement
476
477 #endif /* REDIRECT_MALLOC */
478
479 /* Explicitly deallocate an object p. */
GC_free(void * p)480 GC_API void GC_CALL GC_free(void * p)
481 {
482 struct hblk *h;
483 hdr *hhdr;
484 size_t sz; /* In bytes */
485 size_t ngranules; /* sz in granules */
486 void **flh;
487 int knd;
488 struct obj_kind * ok;
489 DCL_LOCK_STATE;
490
491 if (p == 0) return;
492 /* Required by ANSI. It's not my fault ... */
493 # ifdef LOG_ALLOCS
494 GC_log_printf("GC_free(%p) after GC #%lu\n",
495 p, (unsigned long)GC_gc_no);
496 # endif
497 h = HBLKPTR(p);
498 hhdr = HDR(h);
499 # if defined(REDIRECT_MALLOC) && \
500 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
501 || defined(MSWIN32))
502 /* For Solaris, we have to redirect malloc calls during */
503 /* initialization. For the others, this seems to happen */
504 /* implicitly. */
505 /* Don't try to deallocate that memory. */
506 if (0 == hhdr) return;
507 # endif
508 GC_ASSERT(GC_base(p) == p);
509 sz = hhdr -> hb_sz;
510 ngranules = BYTES_TO_GRANULES(sz);
511 knd = hhdr -> hb_obj_kind;
512 ok = &GC_obj_kinds[knd];
513 if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
514 LOCK();
515 GC_bytes_freed += sz;
516 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
517 /* Its unnecessary to clear the mark bit. If the */
518 /* object is reallocated, it doesn't matter. O.w. the */
519 /* collector will do it, since it's on a free list. */
520 if (ok -> ok_init) {
521 BZERO((word *)p + 1, sz-sizeof(word));
522 }
523 flh = &(ok -> ok_freelist[ngranules]);
524 obj_link(p) = *flh;
525 *flh = (ptr_t)p;
526 UNLOCK();
527 } else {
528 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
529 LOCK();
530 GC_bytes_freed += sz;
531 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
532 if (nblocks > 1) {
533 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
534 }
535 GC_freehblk(h);
536 UNLOCK();
537 }
538 }
539
540 /* Explicitly deallocate an object p when we already hold lock. */
541 /* Only used for internally allocated objects, so we can take some */
542 /* shortcuts. */
543 #ifdef THREADS
GC_free_inner(void * p)544 GC_INNER void GC_free_inner(void * p)
545 {
546 struct hblk *h;
547 hdr *hhdr;
548 size_t sz; /* bytes */
549 size_t ngranules; /* sz in granules */
550 void ** flh;
551 int knd;
552 struct obj_kind * ok;
553
554 h = HBLKPTR(p);
555 hhdr = HDR(h);
556 knd = hhdr -> hb_obj_kind;
557 sz = hhdr -> hb_sz;
558 ngranules = BYTES_TO_GRANULES(sz);
559 ok = &GC_obj_kinds[knd];
560 if (ngranules <= MAXOBJGRANULES) {
561 GC_bytes_freed += sz;
562 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
563 if (ok -> ok_init) {
564 BZERO((word *)p + 1, sz-sizeof(word));
565 }
566 flh = &(ok -> ok_freelist[ngranules]);
567 obj_link(p) = *flh;
568 *flh = (ptr_t)p;
569 } else {
570 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
571 GC_bytes_freed += sz;
572 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
573 if (nblocks > 1) {
574 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
575 }
576 GC_freehblk(h);
577 }
578 }
579 #endif /* THREADS */
580
581 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
582 # define REDIRECT_FREE GC_free
583 #endif
584
585 #ifdef REDIRECT_FREE
free(void * p)586 void free(void * p)
587 {
588 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
589 {
590 /* Don't bother with initialization checks. If nothing */
591 /* has been initialized, the check fails, and that's safe, */
592 /* since we have not allocated uncollectible objects neither. */
593 ptr_t caller = (ptr_t)__builtin_return_address(0);
594 /* This test does not need to ensure memory visibility, since */
595 /* the bounds will be set when/if we create another thread. */
596 if (((word)caller >= (word)GC_libpthread_start
597 && (word)caller < (word)GC_libpthread_end)
598 || ((word)caller >= (word)GC_libld_start
599 && (word)caller < (word)GC_libld_end)) {
600 GC_free(p);
601 return;
602 }
603 }
604 # endif
605 # ifndef IGNORE_FREE
606 REDIRECT_FREE(p);
607 # endif
608 }
609 #endif /* REDIRECT_FREE */
610