1 /*
2  * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "opto/locknode.hpp"
27 #include "opto/parse.hpp"
28 #include "opto/rootnode.hpp"
29 #include "opto/runtime.hpp"
30 
31 //=============================================================================
in_RegMask(uint i) const32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
33   return _inmask;
34 }
35 
out_RegMask() const36 const RegMask &BoxLockNode::out_RegMask() const {
37   return *Matcher::idealreg2regmask[Op_RegP];
38 }
39 
size_of() const40 uint BoxLockNode::size_of() const { return sizeof(*this); }
41 
BoxLockNode(int slot)42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
43                                        _slot(slot), _is_eliminated(false) {
44   init_class_id(Class_BoxLock);
45   init_flags(Flag_rematerialize);
46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
47   _inmask.Insert(reg);
48 }
49 
50 //-----------------------------hash--------------------------------------------
hash() const51 uint BoxLockNode::hash() const {
52   if (EliminateNestedLocks)
53     return NO_HASH; // Each locked region has own BoxLock node
54   return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
55 }
56 
57 //------------------------------cmp--------------------------------------------
cmp(const Node & n) const58 bool BoxLockNode::cmp( const Node &n ) const {
59   if (EliminateNestedLocks)
60     return (&n == this); // Always fail except on self
61   const BoxLockNode &bn = (const BoxLockNode &)n;
62   return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
63 }
64 
box_node(Node * box)65 BoxLockNode* BoxLockNode::box_node(Node* box) {
66   // Chase down the BoxNode after RA which may spill box nodes.
67   while (!box->is_BoxLock()) {
68     //    if (box_node->is_SpillCopy()) {
69     //      Node *m = box_node->in(1);
70     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
71     //        box_node = m->in(m->as_Mach()->operand_index(2));
72     //        continue;
73     //      }
74     //    }
75     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
76     // Only BoxLock nodes with the same stack slot are merged.
77     // So it is enough to trace one path to find the slot value.
78     box = box->in(1);
79   }
80   return box->as_BoxLock();
81 }
82 
reg(Node * box)83 OptoReg::Name BoxLockNode::reg(Node* box) {
84   return box_node(box)->in_RegMask(0).find_first_elem();
85 }
86 
87 // Is BoxLock node used for one simple lock region (same box and obj)?
is_simple_lock_region(LockNode ** unique_lock,Node * obj)88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
89   LockNode* lock = NULL;
90   bool has_one_lock = false;
91   for (uint i = 0; i < this->outcnt(); i++) {
92     Node* n = this->raw_out(i);
93     assert(!n->is_Phi(), "should not merge BoxLock nodes");
94     if (n->is_AbstractLock()) {
95       AbstractLockNode* alock = n->as_AbstractLock();
96       // Check lock's box since box could be referenced by Lock's debug info.
97       if (alock->box_node() == this) {
98         if (alock->obj_node()->eqv_uncast(obj)) {
99           if ((unique_lock != NULL) && alock->is_Lock()) {
100             if (lock == NULL) {
101               lock = alock->as_Lock();
102               has_one_lock = true;
103             } else if (lock != alock->as_Lock()) {
104               has_one_lock = false;
105             }
106           }
107         } else {
108           return false; // Different objects
109         }
110       }
111     }
112   }
113 #ifdef ASSERT
114   // Verify that FastLock and Safepoint reference only this lock region.
115   for (uint i = 0; i < this->outcnt(); i++) {
116     Node* n = this->raw_out(i);
117     if (n->is_FastLock()) {
118       FastLockNode* flock = n->as_FastLock();
119       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
120     }
121     // Don't check monitor info in safepoints since the referenced object could
122     // be different from the locked object. It could be Phi node of different
123     // cast nodes which point to this locked object.
124     // We assume that no other objects could be referenced in monitor info
125     // associated with this BoxLock node because all associated locks and
126     // unlocks are reference only this one object.
127   }
128 #endif
129   if (unique_lock != NULL && has_one_lock) {
130     *unique_lock = lock;
131   }
132   return true;
133 }
134 
135 //=============================================================================
136 //-----------------------------hash--------------------------------------------
hash() const137 uint FastLockNode::hash() const { return NO_HASH; }
138 
size_of() const139 uint FastLockNode::size_of() const { return sizeof(*this); }
140 
141 //------------------------------cmp--------------------------------------------
cmp(const Node & n) const142 bool FastLockNode::cmp( const Node &n ) const {
143   return (&n == this);                // Always fail except on self
144 }
145 
146 //=============================================================================
147 //-----------------------------hash--------------------------------------------
hash() const148 uint FastUnlockNode::hash() const { return NO_HASH; }
149 
150 //------------------------------cmp--------------------------------------------
cmp(const Node & n) const151 bool FastUnlockNode::cmp( const Node &n ) const {
152   return (&n == this);                // Always fail except on self
153 }
154 
155 //
156 // Create a counter which counts the number of times this lock is acquired
157 //
create_lock_counter(JVMState * state)158 void FastLockNode::create_lock_counter(JVMState* state) {
159   BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
160            OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
161   _counters = blnc->counters();
162 }
163 
create_rtm_lock_counter(JVMState * state)164 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
165 #if INCLUDE_RTM_OPT
166   Compile* C = Compile::current();
167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
170     _rtm_counters = rlnc->counters();
171     if (UseRTMForStackLocks) {
172       rlnc = (RTMLockingNamedCounter*)
173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
174       _stack_rtm_counters = rlnc->counters();
175     }
176   }
177 #endif
178 }
179 
180 //=============================================================================
181 //------------------------------do_monitor_enter-------------------------------
do_monitor_enter()182 void Parse::do_monitor_enter() {
183   kill_dead_locals();
184 
185   // Null check; get casted pointer.
186   Node* obj = null_check(peek());
187   // Check for locking null object
188   if (stopped()) return;
189 
190   // the monitor object is not part of debug info expression stack
191   pop();
192 
193   // Insert a FastLockNode which takes as arguments the current thread pointer,
194   // the obj pointer & the address of the stack slot pair used for the lock.
195   shared_lock(obj);
196 }
197 
198 //------------------------------do_monitor_exit--------------------------------
do_monitor_exit()199 void Parse::do_monitor_exit() {
200   kill_dead_locals();
201 
202   pop();                        // Pop oop to unlock
203   // Because monitors are guaranteed paired (else we bail out), we know
204   // the matching Lock for this Unlock.  Hence we know there is no need
205   // for a null check on Unlock.
206   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
207 }
208