1 /*
2  * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jfr/jfrEvents.hpp"
27 #include "jfr/jni/jfrJavaSupport.hpp"
28 #include "jfr/recorder/jfrRecorder.hpp"
29 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
30 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
31 #include "jfr/recorder/service/jfrOptionSet.hpp"
32 #include "jfr/recorder/service/jfrPostBox.hpp"
33 #include "jfr/recorder/storage/jfrFullStorage.inline.hpp"
34 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
35 #include "jfr/recorder/storage/jfrStorage.hpp"
36 #include "jfr/recorder/storage/jfrStorageControl.hpp"
37 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
38 #include "jfr/utilities/jfrIterator.hpp"
39 #include "jfr/utilities/jfrLinkedList.inline.hpp"
40 #include "jfr/utilities/jfrTime.hpp"
41 #include "jfr/writers/jfrNativeEventWriter.hpp"
42 #include "logging/log.hpp"
43 #include "runtime/mutexLocker.hpp"
44 #include "runtime/os.inline.hpp"
45 #include "runtime/safepoint.hpp"
46 #include "runtime/thread.hpp"
47 
48 typedef JfrStorage::BufferPtr BufferPtr;
49 
50 static JfrStorage* _instance = NULL;
51 static JfrStorageControl* _control;
52 
instance()53 JfrStorage& JfrStorage::instance() {
54   return *_instance;
55 }
56 
create(JfrChunkWriter & chunkwriter,JfrPostBox & post_box)57 JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
58   assert(_instance == NULL, "invariant");
59   _instance = new JfrStorage(chunkwriter, post_box);
60   return _instance;
61 }
62 
destroy()63 void JfrStorage::destroy() {
64   if (_instance != NULL) {
65     delete _instance;
66     _instance = NULL;
67   }
68 }
69 
JfrStorage(JfrChunkWriter & chunkwriter,JfrPostBox & post_box)70 JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
71   _control(NULL),
72   _global_mspace(NULL),
73   _thread_local_mspace(NULL),
74   _chunkwriter(chunkwriter),
75   _post_box(post_box) {}
76 
~JfrStorage()77 JfrStorage::~JfrStorage() {
78   if (_control != NULL) {
79     delete _control;
80   }
81   if (_global_mspace != NULL) {
82     delete _global_mspace;
83   }
84   if (_thread_local_mspace != NULL) {
85     delete _thread_local_mspace;
86   }
87   if (_full_list != NULL) {
88     delete _full_list;
89   }
90   _instance = NULL;
91 }
92 
93 static const size_t thread_local_cache_count = 8;
94 // start to discard data when the only this number of free buffers are left
95 static const size_t in_memory_discard_threshold_delta = 2;
96 
initialize()97 bool JfrStorage::initialize() {
98   assert(_control == NULL, "invariant");
99   assert(_global_mspace == NULL, "invariant");
100   assert(_thread_local_mspace == NULL, "invariant");
101 
102   const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
103   assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
104   const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
105   const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
106 
107   _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
108   if (_control == NULL) {
109     return false;
110   }
111   _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size,
112                                                    num_global_buffers, // cache count limit
113                                                    num_global_buffers, // cache_preallocate count
114                                                    false, // preallocate_to_free_list (== preallocate directly to live list)
115                                                    this);
116   if (_global_mspace == NULL) {
117     return false;
118   }
119   assert(_global_mspace->live_list_is_nonempty(), "invariant");
120   _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size,
121                                                              thread_local_cache_count, // cache count limit
122                                                              thread_local_cache_count, // cache preallocate count
123                                                              true,  // preallocate_to_free_list
124                                                              this);
125   if (_thread_local_mspace == NULL) {
126     return false;
127   }
128   assert(_thread_local_mspace->free_list_is_nonempty(), "invariant");
129   // The full list will contain nodes pointing to retired global and transient buffers.
130   _full_list = new JfrFullList(*_control);
131   return _full_list != NULL && _full_list->initialize(num_global_buffers * 2);
132 }
133 
control()134 JfrStorageControl& JfrStorage::control() {
135   return *instance()._control;
136 }
137 
log_allocation_failure(const char * msg,size_t size)138 static void log_allocation_failure(const char* msg, size_t size) {
139   log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
140 }
141 
acquire_thread_local(Thread * thread,size_t size)142 BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
143   BufferPtr buffer = mspace_acquire_to_live_list(size, instance()._thread_local_mspace, thread);
144   if (buffer == NULL) {
145     log_allocation_failure("thread local_memory", size);
146     return NULL;
147   }
148   assert(buffer->acquired_by_self(), "invariant");
149   return buffer;
150 }
151 
acquire_transient(size_t size,Thread * thread)152 BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
153   BufferPtr buffer = mspace_allocate_transient_lease(size, instance()._thread_local_mspace, thread);
154   if (buffer == NULL) {
155     log_allocation_failure("transient memory", size);
156     return NULL;
157   }
158   assert(buffer->acquired_by_self(), "invariant");
159   assert(buffer->transient(), "invariant");
160   assert(buffer->lease(), "invariant");
161   return buffer;
162 }
163 
acquire_lease(size_t size,JfrStorageMspace * mspace,JfrStorage & storage_instance,size_t retry_count,Thread * thread)164 static BufferPtr acquire_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
165   assert(size <= mspace->min_element_size(), "invariant");
166   while (true) {
167     BufferPtr buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread);
168     if (buffer == NULL && storage_instance.control().should_discard()) {
169       storage_instance.discard_oldest(thread);
170       continue;
171     }
172     return buffer;
173   }
174 }
175 
acquire_promotion_buffer(size_t size,JfrStorageMspace * mspace,JfrStorage & storage_instance,size_t retry_count,Thread * thread)176 static BufferPtr acquire_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
177   assert(size <= mspace->min_element_size(), "invariant");
178   while (true) {
179     BufferPtr buffer= mspace_acquire_live_with_retry(size, mspace, retry_count, thread);
180     if (buffer == NULL && storage_instance.control().should_discard()) {
181       storage_instance.discard_oldest(thread);
182       continue;
183     }
184     return buffer;
185   }
186 }
187 
188 static const size_t lease_retry = 10;
189 
acquire_large(size_t size,Thread * thread)190 BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
191   JfrStorage& storage_instance = instance();
192   const size_t max_elem_size = storage_instance._global_mspace->min_element_size(); // min is also max
193   // if not too large and capacity is still available, ask for a lease from the global system
194   if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
195     BufferPtr const buffer = acquire_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
196     if (buffer != NULL) {
197       assert(buffer->acquired_by_self(), "invariant");
198       assert(!buffer->transient(), "invariant");
199       assert(buffer->lease(), "invariant");
200       storage_instance.control().increment_leased();
201       return buffer;
202     }
203   }
204   return acquire_transient(size, thread);
205 }
206 
write_data_loss_event(JfrBuffer * buffer,u8 unflushed_size,Thread * thread)207 static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
208   assert(buffer != NULL, "invariant");
209   assert(buffer->empty(), "invariant");
210   const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
211   if (EventDataLoss::is_enabled()) {
212     JfrNativeEventWriter writer(buffer, thread);
213     writer.begin_event_write(false);
214     writer.write<u8>(EventDataLoss::eventId);
215     writer.write(JfrTicks::now());
216     writer.write(unflushed_size);
217     writer.write(total_data_loss);
218     writer.end_event_write(false);
219   }
220 }
221 
write_data_loss(BufferPtr buffer,Thread * thread)222 static void write_data_loss(BufferPtr buffer, Thread* thread) {
223   assert(buffer != NULL, "invariant");
224   const size_t unflushed_size = buffer->unflushed_size();
225   buffer->reinitialize();
226   if (unflushed_size == 0) {
227     return;
228   }
229   write_data_loss_event(buffer, unflushed_size, thread);
230 }
231 
232 static const size_t promotion_retry = 100;
233 
flush_regular_buffer(BufferPtr buffer,Thread * thread)234 bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
235   assert(buffer != NULL, "invariant");
236   assert(!buffer->lease(), "invariant");
237   assert(!buffer->transient(), "invariant");
238   const size_t unflushed_size = buffer->unflushed_size();
239   if (unflushed_size == 0) {
240     buffer->reinitialize();
241     assert(buffer->empty(), "invariant");
242     return true;
243   }
244 
245   if (buffer->excluded()) {
246     const bool thread_is_excluded = thread->jfr_thread_local()->is_excluded();
247     buffer->reinitialize(thread_is_excluded);
248     assert(buffer->empty(), "invariant");
249     if (!thread_is_excluded) {
250       // state change from exclusion to inclusion requires a thread checkpoint
251       JfrCheckpointManager::write_thread_checkpoint(thread);
252     }
253     return true;
254   }
255 
256   BufferPtr const promotion_buffer = acquire_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
257   if (promotion_buffer == NULL) {
258     write_data_loss(buffer, thread);
259     return false;
260   }
261   assert(promotion_buffer->acquired_by_self(), "invariant");
262   assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
263   buffer->move(promotion_buffer, unflushed_size);
264   assert(buffer->empty(), "invariant");
265   return true;
266 }
267 
268 /*
269 * 1. If the buffer was a "lease" from the global system, release back.
270 * 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
271 *
272 * The buffer is effectively invalidated for the thread post-return,
273 * and the caller should take means to ensure that it is not referenced any longer.
274 */
release_large(BufferPtr buffer,Thread * thread)275 void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
276   assert(buffer != NULL, "invariant");
277   assert(buffer->lease(), "invariant");
278   assert(buffer->acquired_by_self(), "invariant");
279   buffer->clear_lease();
280   if (buffer->transient()) {
281     buffer->set_retired();
282     register_full(buffer, thread);
283   } else {
284     buffer->release();
285     control().decrement_leased();
286   }
287 }
288 
register_full(BufferPtr buffer,Thread * thread)289 void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
290   assert(buffer != NULL, "invariant");
291   assert(buffer->acquired_by(thread), "invariant");
292   assert(buffer->retired(), "invariant");
293   if (_full_list->add(buffer)) {
294     _post_box.post(MSG_FULLBUFFER);
295   }
296 }
297 
298 // don't use buffer on return, it is gone
release(BufferPtr buffer,Thread * thread)299 void JfrStorage::release(BufferPtr buffer, Thread* thread) {
300   assert(buffer != NULL, "invariant");
301   assert(!buffer->lease(), "invariant");
302   assert(!buffer->transient(), "invariant");
303   assert(!buffer->retired(), "invariant");
304   if (!buffer->empty()) {
305     if (!flush_regular_buffer(buffer, thread)) {
306       buffer->reinitialize();
307     }
308   }
309   assert(buffer->empty(), "invariant");
310   assert(buffer->identity() != NULL, "invariant");
311   buffer->clear_excluded();
312   buffer->set_retired();
313 }
314 
release_thread_local(BufferPtr buffer,Thread * thread)315 void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
316   assert(buffer != NULL, "invariant");
317   JfrStorage& storage_instance = instance();
318   storage_instance.release(buffer, thread);
319 }
320 
log_discard(size_t pre_full_count,size_t post_full_count,size_t amount)321 static void log_discard(size_t pre_full_count, size_t post_full_count, size_t amount) {
322   if (log_is_enabled(Debug, jfr, system)) {
323     const size_t number_of_discards = pre_full_count - post_full_count;
324     if (number_of_discards > 0) {
325       log_debug(jfr, system)("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", number_of_discards, amount);
326       log_debug(jfr, system)("Current number of full buffers " SIZE_FORMAT "", number_of_discards);
327     }
328   }
329 }
330 
discard_oldest(Thread * thread)331 void JfrStorage::discard_oldest(Thread* thread) {
332   if (JfrBuffer_lock->try_lock()) {
333     if (!control().should_discard()) {
334       // another thread handled it
335       return;
336     }
337     const size_t num_full_pre_discard = control().full_count();
338     size_t discarded_size = 0;
339     while (_full_list->is_nonempty()) {
340       BufferPtr oldest = _full_list->remove();
341       assert(oldest != NULL, "invariant");
342       assert(oldest->identity() != NULL, "invariant");
343       discarded_size += oldest->discard();
344       assert(oldest->unflushed_size() == 0, "invariant");
345       if (oldest->transient()) {
346         mspace_release(oldest, _thread_local_mspace);
347         continue;
348       }
349       oldest->reinitialize();
350       assert(!oldest->retired(), "invariant");
351       oldest->release(); // publish
352       break;
353     }
354     JfrBuffer_lock->unlock();
355     log_discard(num_full_pre_discard, control().full_count(), discarded_size);
356   }
357 }
358 
359 #ifdef ASSERT
360 typedef const BufferPtr ConstBufferPtr;
361 
assert_flush_precondition(ConstBufferPtr cur,size_t used,bool native,const Thread * t)362 static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
363   assert(t != NULL, "invariant");
364   assert(cur != NULL, "invariant");
365   assert(cur->pos() + used <= cur->end(), "invariant");
366   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
367 }
368 
assert_flush_regular_precondition(ConstBufferPtr cur,const u1 * const cur_pos,size_t used,size_t req,const Thread * t)369 static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
370   assert(t != NULL, "invariant");
371   assert(cur != NULL, "invariant");
372   assert(!cur->lease(), "invariant");
373   assert(cur_pos != NULL, "invariant");
374   assert(req >= used, "invariant");
375 }
376 
assert_provision_large_precondition(ConstBufferPtr cur,size_t used,size_t req,const Thread * t)377 static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
378   assert(cur != NULL, "invariant");
379   assert(t != NULL, "invariant");
380   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
381   assert(req >= used, "invariant");
382 }
383 
assert_flush_large_precondition(ConstBufferPtr cur,const u1 * const cur_pos,size_t used,size_t req,bool native,Thread * t)384 static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
385   assert(t != NULL, "invariant");
386   assert(cur != NULL, "invariant");
387   assert(cur->lease(), "invariant");
388   assert(!cur->excluded(), "invariant");
389   assert(cur_pos != NULL, "invariant");
390   assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
391   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
392   assert(req >= used, "invariant");
393   assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
394 }
395 #endif // ASSERT
396 
flush(BufferPtr cur,size_t used,size_t req,bool native,Thread * t)397 BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
398   debug_only(assert_flush_precondition(cur, used, native, t);)
399   const u1* const cur_pos = cur->pos();
400   req += used;
401   // requested size now encompass the outstanding used size
402   return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
403                           instance().flush_regular(cur, cur_pos, used, req, native, t);
404 }
405 
flush_regular(BufferPtr cur,const u1 * const cur_pos,size_t used,size_t req,bool native,Thread * t)406 BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
407   debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
408   // A flush is needed before memmove since a non-large buffer is thread stable
409   // (thread local). The flush will not modify memory in addresses above pos()
410   // which is where the "used / uncommitted" data resides. It is therefore both
411   // possible and valid to migrate data after the flush. This is however only
412   // the case for stable thread local buffers; it is not the case for large buffers.
413   flush_regular_buffer(cur, t);
414   if (cur->excluded()) {
415     return cur;
416   }
417   if (cur->free_size() >= req) {
418     // simplest case, no switching of buffers
419     if (used > 0) {
420       // source and destination may overlap so memmove must be used instead of memcpy
421       memmove(cur->pos(), (void*)cur_pos, used);
422     }
423     assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
424     return cur;
425   }
426   // Going for a "larger-than-regular" buffer.
427   // Shelve the current buffer to make room for a temporary lease.
428   assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
429   t->jfr_thread_local()->shelve_buffer(cur);
430   return provision_large(cur, cur_pos, used, req, native, t);
431 }
432 
store_buffer_to_thread_local(BufferPtr buffer,JfrThreadLocal * jfr_thread_local,bool native)433 static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
434   assert(buffer != NULL, "invariant");
435   if (native) {
436     jfr_thread_local->set_native_buffer(buffer);
437   } else {
438     jfr_thread_local->set_java_buffer(buffer);
439   }
440   return buffer;
441 }
442 
restore_shelved_buffer(bool native,Thread * t)443 static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
444   JfrThreadLocal* const tl = t->jfr_thread_local();
445   BufferPtr shelved = tl->shelved_buffer();
446   assert(shelved != NULL, "invariant");
447   tl->shelve_buffer(NULL);
448   // restore shelved buffer back as primary
449   return store_buffer_to_thread_local(shelved, tl, native);
450 }
451 
flush_large(BufferPtr cur,const u1 * const cur_pos,size_t used,size_t req,bool native,Thread * t)452 BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
453   debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
454   // Can the "regular" buffer (now shelved) accommodate the requested size?
455   BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
456   assert(shelved != NULL, "invariant");
457   if (shelved->free_size() >= req) {
458     if (req > 0) {
459       memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
460     }
461     // release and invalidate
462     release_large(cur, t);
463     return restore_shelved_buffer(native, t);
464   }
465   // regular too small
466   return provision_large(cur, cur_pos,  used, req, native, t);
467 }
468 
large_fail(BufferPtr cur,bool native,JfrStorage & storage_instance,Thread * t)469 static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
470   assert(cur != NULL, "invariant");
471   assert(t != NULL, "invariant");
472   if (cur->lease()) {
473     storage_instance.release_large(cur, t);
474   }
475   return restore_shelved_buffer(native, t);
476 }
477 
478 // Always returns a non-null buffer.
479 // If accommodating the large request fails, the shelved buffer is returned
480 // even though it might be smaller than the requested size.
481 // Caller needs to ensure if the size was successfully accommodated.
provision_large(BufferPtr cur,const u1 * const cur_pos,size_t used,size_t req,bool native,Thread * t)482 BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
483   debug_only(assert_provision_large_precondition(cur, used, req, t);)
484   assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
485   BufferPtr const buffer = acquire_large(req, t);
486   if (buffer == NULL) {
487     // unable to allocate and serve the request
488     return large_fail(cur, native, *this, t);
489   }
490   // ok managed to acquire a "large" buffer for the requested size
491   assert(buffer->free_size() >= req, "invariant");
492   assert(buffer->lease(), "invariant");
493   // transfer outstanding data
494   memcpy(buffer->pos(), (void*)cur_pos, used);
495   if (cur->lease()) {
496     release_large(cur, t);
497     // don't use current anymore, it is gone
498   }
499   return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
500 }
501 
502 typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
503 typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
504 typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
505 
506 typedef Excluded<JfrBuffer, true> NonExcluded;
507 typedef PredicatedConcurrentWriteOp<WriteOperation, NonExcluded>  ConcurrentNonExcludedWriteOperation;
508 
509 typedef ScavengingReleaseOp<JfrThreadLocalMspace, JfrThreadLocalMspace::LiveList> ReleaseThreadLocalOperation;
510 typedef CompositeOperation<ConcurrentNonExcludedWriteOperation, ReleaseThreadLocalOperation> ConcurrentWriteReleaseThreadLocalOperation;
511 
write()512 size_t JfrStorage::write() {
513   const size_t full_elements = write_full();
514   WriteOperation wo(_chunkwriter);
515   NonExcluded ne;
516   ConcurrentNonExcludedWriteOperation cnewo(wo, ne);
517   ReleaseThreadLocalOperation rtlo(_thread_local_mspace, _thread_local_mspace->live_list());
518   ConcurrentWriteReleaseThreadLocalOperation tlop(&cnewo, &rtlo);
519   process_live_list(tlop, _thread_local_mspace);
520   assert(_global_mspace->free_list_is_empty(), "invariant");
521   assert(_global_mspace->live_list_is_nonempty(), "invariant");
522   process_live_list(cnewo, _global_mspace);
523   return full_elements + wo.elements();
524 }
525 
write_at_safepoint()526 size_t JfrStorage::write_at_safepoint() {
527   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
528   const size_t full_elements = write_full();
529   WriteOperation wo(_chunkwriter);
530   NonExcluded ne;
531   ConcurrentNonExcludedWriteOperation cnewo(wo, ne); // concurrent because of gc's
532   process_live_list(cnewo, _thread_local_mspace);
533   assert(_global_mspace->free_list_is_empty(), "invariant");
534   assert(_global_mspace->live_list_is_nonempty(), "invariant");
535   process_live_list(cnewo, _global_mspace);
536   return full_elements + wo.elements();
537 }
538 
539 typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
540 typedef CompositeOperation<DiscardOperation, ReleaseThreadLocalOperation> DiscardReleaseThreadLocalOperation;
541 
clear()542 size_t JfrStorage::clear() {
543   const size_t full_elements = clear_full();
544   DiscardOperation discarder(concurrent); // concurrent discard mode
545   ReleaseThreadLocalOperation rtlo(_thread_local_mspace, _thread_local_mspace->live_list());
546   DiscardReleaseThreadLocalOperation tldo(&discarder, &rtlo);
547   process_live_list(tldo, _thread_local_mspace);
548   assert(_global_mspace->free_list_is_empty(), "invariant");
549   assert(_global_mspace->live_list_is_nonempty(), "invariant");
550   process_live_list(discarder, _global_mspace);
551   return full_elements + discarder.elements();
552 }
553 
554 template <typename Processor>
process_full(Processor & processor,JfrFullList * list,JfrStorageControl & control)555 static size_t process_full(Processor& processor, JfrFullList* list, JfrStorageControl& control) {
556   assert(list != NULL, "invariant");
557   assert(list->is_nonempty(), "invariant");
558   size_t count = 0;
559   do {
560     BufferPtr full = list->remove();
561     if (full == NULL) break;
562     assert(full->retired(), "invariant");
563     processor.process(full);
564     // at this point, the buffer is already live or destroyed
565     ++count;
566   } while (list->is_nonempty());
567   return count;
568 }
569 
log(size_t count,size_t amount,bool clear=false)570 static void log(size_t count, size_t amount, bool clear = false) {
571   if (log_is_enabled(Debug, jfr, system)) {
572     if (count > 0) {
573       log_debug(jfr, system)("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
574         clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
575     }
576   }
577 }
578 
579 typedef ReleaseOp<JfrThreadLocalMspace> ReleaseFullOperation;
580 typedef CompositeOperation<MutexedWriteOperation, ReleaseFullOperation> WriteFullOperation;
581 
582 // full writer
583 // Assumption is retired only; exclusive access
584 // MutexedWriter -> ReleaseOp
585 //
write_full()586 size_t JfrStorage::write_full() {
587   assert(_chunkwriter.is_valid(), "invariant");
588   if (_full_list->is_empty()) {
589     return 0;
590   }
591   WriteOperation wo(_chunkwriter);
592   MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
593   ReleaseFullOperation rfo(_thread_local_mspace);
594   WriteFullOperation wfo(&writer, &rfo);
595   const size_t count = process_full(wfo, _full_list, control());
596   if (count != 0) {
597     log(count, writer.size());
598   }
599   return count;
600 }
601 
clear_full()602 size_t JfrStorage::clear_full() {
603   if (_full_list->is_empty()) {
604     return 0;
605   }
606   DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
607   const size_t count = process_full(discarder, _full_list, control());
608   if (count != 0) {
609     log(count, discarder.size());
610   }
611   return count;
612 }
613