1 /*
2 * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
26 #define SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
27
28 #include "jfr/recorder/storage/jfrStorageUtils.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/thread.inline.hpp"
31
32 template <typename T>
write(T * t,const u1 * data,size_t size)33 inline bool UnBufferedWriteToChunk<T>::write(T* t, const u1* data, size_t size) {
34 assert((intptr_t)size >= 0, "invariant");
35 _writer.write_unbuffered(data, (intptr_t)size);
36 ++_elements;
37 _size += size;
38 return true;
39 }
40
41 template <typename T>
discard(T * t,const u1 * data,size_t size)42 inline bool DefaultDiscarder<T>::discard(T* t, const u1* data, size_t size) {
43 ++_elements;
44 _size += size;
45 return true;
46 }
47
48 template <typename Type>
get_unflushed_size(const u1 * top,Type * t)49 inline size_t get_unflushed_size(const u1* top, Type* t) {
50 assert(t != NULL, "invariant");
51 return Atomic::load_acquire(t->pos_address()) - top;
52 }
53
54 template <typename Operation>
process(typename Operation::Type * t)55 inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) {
56 const bool is_retired = t->retired();
57 // acquire_critical_section_top() must be read before pos() for stable access
58 const u1* const top = is_retired ? t->top() : t->acquire_critical_section_top();
59 const size_t unflushed_size = get_unflushed_size(top, t);
60 assert((intptr_t)unflushed_size >= 0, "invariant");
61 if (unflushed_size == 0) {
62 if (is_retired) {
63 t->set_top(top);
64 } else {
65 t->release_critical_section_top(top);
66 }
67 return true;
68 }
69 const bool result = _operation.write(t, top, unflushed_size);
70 if (is_retired) {
71 t->set_top(top + unflushed_size);
72 } else {
73 t->release_critical_section_top(top + unflushed_size);
74 }
75 return result;
76 }
77
78 template <typename Operation>
process(typename Operation::Type * t)79 inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) {
80 assert(t != NULL, "invariant");
81 const u1* const top = t->top();
82 const size_t unflushed_size = get_unflushed_size(top, t);
83 assert((intptr_t)unflushed_size >= 0, "invariant");
84 if (unflushed_size == 0) {
85 return true;
86 }
87 const bool result = _operation.write(t, top, unflushed_size);
88 t->set_top(top + unflushed_size);
89 return result;
90 }
91
92 template <typename Type>
retired_sensitive_acquire(Type * t)93 static void retired_sensitive_acquire(Type* t) {
94 assert(t != NULL, "invariant");
95 if (t->retired()) {
96 return;
97 }
98 Thread* const thread = Thread::current();
99 while (!t->try_acquire(thread)) {
100 if (t->retired()) {
101 return;
102 }
103 }
104 }
105
106 template <typename Operation>
process(typename Operation::Type * t)107 inline bool ExclusiveOp<Operation>::process(typename Operation::Type* t) {
108 retired_sensitive_acquire(t);
109 assert(t->acquired_by_self() || t->retired(), "invariant");
110 // User is required to ensure proper release of the acquisition
111 return MutexedWriteOp<Operation>::process(t);
112 }
113
114 template <typename Operation>
process(typename Operation::Type * t)115 inline bool DiscardOp<Operation>::process(typename Operation::Type* t) {
116 assert(t != NULL, "invariant");
117 const u1* const top = _mode == concurrent ? t->acquire_critical_section_top() : t->top();
118 const size_t unflushed_size = get_unflushed_size(top, t);
119 assert((intptr_t)unflushed_size >= 0, "invariant");
120 if (unflushed_size == 0) {
121 if (_mode == concurrent) {
122 t->release_critical_section_top(top);
123 }
124 return true;
125 }
126 const bool result = _operation.discard(t, top, unflushed_size);
127 if (_mode == concurrent) {
128 t->release_critical_section_top(top + unflushed_size);
129 } else {
130 t->set_top(top + unflushed_size);
131 }
132 return result;
133 }
134
135 template <typename Operation>
process(typename Operation::Type * t)136 inline bool ExclusiveDiscardOp<Operation>::process(typename Operation::Type* t) {
137 retired_sensitive_acquire(t);
138 assert(t->acquired_by_self() || t->retired(), "invariant");
139 // User is required to ensure proper release of the acquisition
140 return DiscardOp<Operation>::process(t);
141 }
142
143 template <typename Operation>
process(typename Operation::Type * t)144 inline bool EpochDispatchOp<Operation>::process(typename Operation::Type* t) {
145 assert(t != NULL, "invariant");
146 const u1* const current_top = _previous_epoch ? t->start() : t->top();
147 const size_t unflushed_size = Atomic::load_acquire(t->pos_address()) - current_top;
148 assert((intptr_t)unflushed_size >= 0, "invariant");
149 if (unflushed_size == 0) {
150 return true;
151 }
152 _elements = dispatch(_previous_epoch, current_top, unflushed_size);
153 t->set_top(current_top + unflushed_size);
154 return true;
155 }
156
157 template <typename Operation>
dispatch(bool previous_epoch,const u1 * element,size_t size)158 size_t EpochDispatchOp<Operation>::dispatch(bool previous_epoch, const u1* element, size_t size) {
159 assert(element != NULL, "invariant");
160 const u1* const limit = element + size;
161 size_t elements = 0;
162 while (element < limit) {
163 element += _operation(element, previous_epoch);
164 ++elements;
165 }
166 assert(element == limit, "invariant");
167 return elements;
168 }
169
170 #endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
171