File: | jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp |
Warning: | line 252, column 21 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. | |||
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
4 | * | |||
5 | * This code is free software; you can redistribute it and/or modify it | |||
6 | * under the terms of the GNU General Public License version 2 only, as | |||
7 | * published by the Free Software Foundation. | |||
8 | * | |||
9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
13 | * accompanied this code). | |||
14 | * | |||
15 | * You should have received a copy of the GNU General Public License version | |||
16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
18 | * | |||
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
20 | * or visit www.oracle.com if you need additional information or have any | |||
21 | * questions. | |||
22 | * | |||
23 | */ | |||
24 | ||||
25 | #include "precompiled.hpp" | |||
26 | #include "cds/archiveBuilder.hpp" | |||
27 | #include "cds/dynamicArchive.hpp" | |||
28 | #include "classfile/altHashing.hpp" | |||
29 | #include "classfile/classLoaderData.hpp" | |||
30 | #include "classfile/compactHashtable.hpp" | |||
31 | #include "classfile/javaClasses.hpp" | |||
32 | #include "classfile/symbolTable.hpp" | |||
33 | #include "memory/allocation.inline.hpp" | |||
34 | #include "memory/metaspaceClosure.hpp" | |||
35 | #include "memory/resourceArea.hpp" | |||
36 | #include "oops/oop.inline.hpp" | |||
37 | #include "runtime/atomic.hpp" | |||
38 | #include "runtime/interfaceSupport.inline.hpp" | |||
39 | #include "runtime/timerTrace.hpp" | |||
40 | #include "services/diagnosticCommand.hpp" | |||
41 | #include "utilities/concurrentHashTable.inline.hpp" | |||
42 | #include "utilities/concurrentHashTableTasks.inline.hpp" | |||
43 | #include "utilities/utf8.hpp" | |||
44 | ||||
45 | // We used to not resize at all, so let's be conservative | |||
46 | // and not set it too short before we decide to resize, | |||
47 | // to match previous startup behavior | |||
48 | const double PREF_AVG_LIST_LEN = 8.0; | |||
49 | // 2^24 is max size, like StringTable. | |||
50 | const size_t END_SIZE = 24; | |||
51 | // If a chain gets to 100 something might be wrong | |||
52 | const size_t REHASH_LEN = 100; | |||
53 | ||||
54 | const size_t ON_STACK_BUFFER_LENGTH = 128; | |||
55 | ||||
56 | // -------------------------------------------------------------------------- | |||
57 | ||||
58 | inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) { | |||
59 | if (value->equals(key, len)) { | |||
60 | return true; | |||
61 | } else { | |||
62 | return false; | |||
63 | } | |||
64 | } | |||
65 | ||||
66 | static OffsetCompactHashtable< | |||
67 | const char*, Symbol*, | |||
68 | symbol_equals_compact_hashtable_entry | |||
69 | > _shared_table; | |||
70 | ||||
71 | static OffsetCompactHashtable< | |||
72 | const char*, Symbol*, | |||
73 | symbol_equals_compact_hashtable_entry | |||
74 | > _dynamic_shared_table; | |||
75 | ||||
76 | // -------------------------------------------------------------------------- | |||
77 | ||||
78 | typedef ConcurrentHashTable<SymbolTableConfig, mtSymbol> SymbolTableHash; | |||
79 | static SymbolTableHash* _local_table = NULL__null; | |||
80 | ||||
81 | volatile bool SymbolTable::_has_work = 0; | |||
82 | volatile bool SymbolTable::_needs_rehashing = false; | |||
83 | ||||
84 | // For statistics | |||
85 | static size_t _symbols_removed = 0; | |||
86 | static size_t _symbols_counted = 0; | |||
87 | static size_t _current_size = 0; | |||
88 | ||||
89 | static volatile size_t _items_count = 0; | |||
90 | static volatile bool _has_items_to_clean = false; | |||
91 | ||||
92 | ||||
93 | static volatile bool _alt_hash = false; | |||
94 | ||||
95 | #ifdef USE_LIBRARY_BASED_TLS_ONLY | |||
96 | static volatile bool _lookup_shared_first = false; | |||
97 | #else | |||
98 | // "_lookup_shared_first" can get highly contended with many cores if multiple threads | |||
99 | // are updating "lookup success history" in a global shared variable. If built-in TLS is available, use it. | |||
100 | static THREAD_LOCAL__thread bool _lookup_shared_first = false; | |||
101 | #endif | |||
102 | ||||
103 | // Static arena for symbols that are not deallocated | |||
104 | Arena* SymbolTable::_arena = NULL__null; | |||
105 | ||||
106 | static uint64_t _alt_hash_seed = 0; | |||
107 | ||||
108 | static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) { | |||
109 | #ifndef PRODUCT | |||
110 | ResourceMark rm; | |||
111 | log_trace(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Trace>("%s [%s]", msg, sym->as_quoted_ascii()); | |||
112 | #endif // PRODUCT | |||
113 | } | |||
114 | ||||
115 | // Pick hashing algorithm. | |||
116 | static uintx hash_symbol(const char* s, int len, bool useAlt) { | |||
117 | return useAlt ? | |||
118 | AltHashing::halfsiphash_32(_alt_hash_seed, (const uint8_t*)s, len) : | |||
119 | java_lang_String::hash_code((const jbyte*)s, len); | |||
120 | } | |||
121 | ||||
122 | #if INCLUDE_CDS1 | |||
123 | static uintx hash_shared_symbol(const char* s, int len) { | |||
124 | return java_lang_String::hash_code((const jbyte*)s, len); | |||
125 | } | |||
126 | #endif | |||
127 | ||||
128 | class SymbolTableConfig : public AllStatic { | |||
129 | private: | |||
130 | public: | |||
131 | typedef Symbol* Value; // value of the Node in the hashtable | |||
132 | ||||
133 | static uintx get_hash(Value const& value, bool* is_dead) { | |||
134 | *is_dead = (value->refcount() == 0); | |||
135 | if (*is_dead) { | |||
136 | return 0; | |||
137 | } else { | |||
138 | return hash_symbol((const char*)value->bytes(), value->utf8_length(), _alt_hash); | |||
139 | } | |||
140 | } | |||
141 | // We use default allocation/deallocation but counted | |||
142 | static void* allocate_node(void* context, size_t size, Value const& value) { | |||
143 | SymbolTable::item_added(); | |||
144 | return AllocateHeap(size, mtSymbol); | |||
145 | } | |||
146 | static void free_node(void* context, void* memory, Value const& value) { | |||
147 | // We get here because #1 some threads lost a race to insert a newly created Symbol | |||
148 | // or #2 we're cleaning up unused symbol. | |||
149 | // If #1, then the symbol can be either permanent, | |||
150 | // or regular newly created one (refcount==1) | |||
151 | // If #2, then the symbol is dead (refcount==0) | |||
152 | assert(value->is_permanent() || (value->refcount() == 1) || (value->refcount() == 0),do { if (!(value->is_permanent() || (value->refcount() == 1) || (value->refcount() == 0))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 153, "assert(" "value->is_permanent() || (value->refcount() == 1) || (value->refcount() == 0)" ") failed", "refcount %d", value->refcount()); ::breakpoint (); } } while (0) | |||
153 | "refcount %d", value->refcount())do { if (!(value->is_permanent() || (value->refcount() == 1) || (value->refcount() == 0))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 153, "assert(" "value->is_permanent() || (value->refcount() == 1) || (value->refcount() == 0)" ") failed", "refcount %d", value->refcount()); ::breakpoint (); } } while (0); | |||
154 | if (value->refcount() == 1) { | |||
155 | value->decrement_refcount(); | |||
156 | assert(value->refcount() == 0, "expected dead symbol")do { if (!(value->refcount() == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 156, "assert(" "value->refcount() == 0" ") failed", "expected dead symbol" ); ::breakpoint(); } } while (0); | |||
157 | } | |||
158 | SymbolTable::delete_symbol(value); | |||
159 | FreeHeap(memory); | |||
160 | SymbolTable::item_removed(); | |||
161 | } | |||
162 | }; | |||
163 | ||||
164 | static size_t ceil_log2(size_t value) { | |||
165 | size_t ret; | |||
166 | for (ret = 1; ((size_t)1 << ret) < value; ++ret); | |||
167 | return ret; | |||
168 | } | |||
169 | ||||
170 | void SymbolTable::create_table () { | |||
171 | size_t start_size_log_2 = ceil_log2(SymbolTableSize); | |||
172 | _current_size = ((size_t)1) << start_size_log_2; | |||
173 | log_trace(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Trace>("Start size: " SIZE_FORMAT"%" "l" "u" " (" SIZE_FORMAT"%" "l" "u" ")", | |||
174 | _current_size, start_size_log_2); | |||
175 | _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN); | |||
176 | ||||
177 | // Initialize the arena for global symbols, size passed in depends on CDS. | |||
178 | if (symbol_alloc_arena_size == 0) { | |||
179 | _arena = new (mtSymbol) Arena(mtSymbol); | |||
180 | } else { | |||
181 | _arena = new (mtSymbol) Arena(mtSymbol, symbol_alloc_arena_size); | |||
182 | } | |||
183 | } | |||
184 | ||||
185 | void SymbolTable::delete_symbol(Symbol* sym) { | |||
186 | if (sym->is_permanent()) { | |||
187 | MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena | |||
188 | // Deleting permanent symbol should not occur very often (insert race condition), | |||
189 | // so log it. | |||
190 | log_trace_symboltable_helper(sym, "Freeing permanent symbol"); | |||
191 | if (!arena()->Afree(sym, sym->size())) { | |||
192 | log_trace_symboltable_helper(sym, "Leaked permanent symbol"); | |||
193 | } | |||
194 | } else { | |||
195 | delete sym; | |||
196 | } | |||
197 | } | |||
198 | ||||
199 | void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); } | |||
200 | void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); } | |||
201 | bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); } | |||
202 | ||||
203 | void SymbolTable::item_added() { | |||
204 | Atomic::inc(&_items_count); | |||
205 | } | |||
206 | ||||
207 | void SymbolTable::item_removed() { | |||
208 | Atomic::inc(&(_symbols_removed)); | |||
209 | Atomic::dec(&_items_count); | |||
210 | } | |||
211 | ||||
212 | double SymbolTable::get_load_factor() { | |||
213 | return (double)_items_count/_current_size; | |||
214 | } | |||
215 | ||||
216 | size_t SymbolTable::table_size() { | |||
217 | return ((size_t)1) << _local_table->get_size_log2(Thread::current()); | |||
218 | } | |||
219 | ||||
220 | void SymbolTable::trigger_cleanup() { | |||
221 | MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); | |||
222 | _has_work = true; | |||
223 | Service_lock->notify_all(); | |||
224 | } | |||
225 | ||||
226 | Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap) { | |||
227 | assert (len <= Symbol::max_length(), "should be checked by caller")do { if (!(len <= Symbol::max_length())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 227, "assert(" "len <= Symbol::max_length()" ") failed", "should be checked by caller"); ::breakpoint(); } } while (0 ); | |||
228 | ||||
229 | Symbol* sym; | |||
230 | if (DumpSharedSpaces) { | |||
231 | // TODO: Special handling of Symbol allocation for DumpSharedSpaces will be removed | |||
232 | // in JDK-8250989 | |||
233 | c_heap = false; | |||
234 | } | |||
235 | if (c_heap) { | |||
236 | // refcount starts as 1 | |||
237 | sym = new (len) Symbol((const u1*)name, len, 1); | |||
238 | assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted")do { if (!(sym != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 238, "assert(" "sym != __null" ") failed", "new should call vm_exit_out_of_memory if C_HEAP is exhausted" ); ::breakpoint(); } } while (0); | |||
239 | } else if (DumpSharedSpaces) { | |||
240 | // See comments inside Symbol::operator new(size_t, int) | |||
241 | sym = new (len) Symbol((const u1*)name, len, PERM_REFCOUNT0xffff); | |||
242 | assert(sym != NULL, "new should call vm_exit_out_of_memory if failed to allocate symbol during DumpSharedSpaces")do { if (!(sym != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 242, "assert(" "sym != __null" ") failed", "new should call vm_exit_out_of_memory if failed to allocate symbol during DumpSharedSpaces" ); ::breakpoint(); } } while (0); | |||
243 | } else { | |||
244 | // Allocate to global arena | |||
245 | MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena | |||
246 | sym = new (len, arena()) Symbol((const u1*)name, len, PERM_REFCOUNT0xffff); | |||
247 | } | |||
248 | return sym; | |||
249 | } | |||
250 | ||||
251 | class SymbolsDo : StackObj { | |||
252 | SymbolClosure *_cl; | |||
253 | public: | |||
254 | SymbolsDo(SymbolClosure *cl) : _cl(cl) {} | |||
255 | bool operator()(Symbol** value) { | |||
256 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 256, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
257 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 257, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
258 | _cl->do_symbol(value); | |||
259 | return true; | |||
260 | }; | |||
261 | }; | |||
262 | ||||
263 | class SharedSymbolIterator { | |||
264 | SymbolClosure* _symbol_closure; | |||
265 | public: | |||
266 | SharedSymbolIterator(SymbolClosure* f) : _symbol_closure(f) {} | |||
267 | void do_value(Symbol* symbol) { | |||
268 | _symbol_closure->do_symbol(&symbol); | |||
269 | } | |||
270 | }; | |||
271 | ||||
272 | // Call function for all symbols in the symbol table. | |||
273 | void SymbolTable::symbols_do(SymbolClosure *cl) { | |||
274 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 274, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "Must be at safepoint"); ::breakpoint(); } } while (0); | |||
275 | // all symbols from shared table | |||
276 | SharedSymbolIterator iter(cl); | |||
277 | _shared_table.iterate(&iter); | |||
278 | _dynamic_shared_table.iterate(&iter); | |||
279 | ||||
280 | // all symbols from the dynamic table | |||
281 | SymbolsDo sd(cl); | |||
282 | _local_table->do_safepoint_scan(sd); | |||
283 | } | |||
284 | ||||
285 | // Call function for all symbols in shared table. Used by -XX:+PrintSharedArchiveAndExit | |||
286 | void SymbolTable::shared_symbols_do(SymbolClosure *cl) { | |||
287 | SharedSymbolIterator iter(cl); | |||
288 | _shared_table.iterate(&iter); | |||
289 | _dynamic_shared_table.iterate(&iter); | |||
290 | } | |||
291 | ||||
292 | Symbol* SymbolTable::lookup_dynamic(const char* name, | |||
293 | int len, unsigned int hash) { | |||
294 | Symbol* sym = do_lookup(name, len, hash); | |||
295 | assert((sym == NULL) || sym->refcount() != 0, "refcount must not be zero")do { if (!((sym == __null) || sym->refcount() != 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 295, "assert(" "(sym == __null) || sym->refcount() != 0" ") failed", "refcount must not be zero"); ::breakpoint(); } } while (0); | |||
296 | return sym; | |||
297 | } | |||
298 | ||||
299 | #if INCLUDE_CDS1 | |||
300 | Symbol* SymbolTable::lookup_shared(const char* name, | |||
301 | int len, unsigned int hash) { | |||
302 | Symbol* sym = NULL__null; | |||
303 | if (!_shared_table.empty()) { | |||
304 | if (_alt_hash) { | |||
305 | // hash_code parameter may use alternate hashing algorithm but the shared table | |||
306 | // always uses the same original hash code. | |||
307 | hash = hash_shared_symbol(name, len); | |||
308 | } | |||
309 | sym = _shared_table.lookup(name, hash, len); | |||
310 | if (sym == NULL__null && DynamicArchive::is_mapped()) { | |||
311 | sym = _dynamic_shared_table.lookup(name, hash, len); | |||
312 | } | |||
313 | } | |||
314 | return sym; | |||
315 | } | |||
316 | #endif | |||
317 | ||||
318 | Symbol* SymbolTable::lookup_common(const char* name, | |||
319 | int len, unsigned int hash) { | |||
320 | Symbol* sym; | |||
321 | if (_lookup_shared_first) { | |||
322 | sym = lookup_shared(name, len, hash); | |||
323 | if (sym == NULL__null) { | |||
324 | _lookup_shared_first = false; | |||
325 | sym = lookup_dynamic(name, len, hash); | |||
326 | } | |||
327 | } else { | |||
328 | sym = lookup_dynamic(name, len, hash); | |||
329 | if (sym == NULL__null) { | |||
330 | sym = lookup_shared(name, len, hash); | |||
331 | if (sym != NULL__null) { | |||
332 | _lookup_shared_first = true; | |||
333 | } | |||
334 | } | |||
335 | } | |||
336 | return sym; | |||
337 | } | |||
338 | ||||
339 | Symbol* SymbolTable::new_symbol(const char* name, int len) { | |||
340 | unsigned int hash = hash_symbol(name, len, _alt_hash); | |||
341 | Symbol* sym = lookup_common(name, len, hash); | |||
342 | if (sym == NULL__null) { | |||
343 | sym = do_add_if_needed(name, len, hash, true); | |||
344 | } | |||
345 | assert(sym->refcount() != 0, "lookup should have incremented the count")do { if (!(sym->refcount() != 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 345, "assert(" "sym->refcount() != 0" ") failed", "lookup should have incremented the count" ); ::breakpoint(); } } while (0); | |||
346 | assert(sym->equals(name, len), "symbol must be properly initialized")do { if (!(sym->equals(name, len))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 346, "assert(" "sym->equals(name, len)" ") failed", "symbol must be properly initialized" ); ::breakpoint(); } } while (0); | |||
347 | return sym; | |||
348 | } | |||
349 | ||||
350 | Symbol* SymbolTable::new_symbol(const Symbol* sym, int begin, int end) { | |||
351 | assert(begin <= end && end <= sym->utf8_length(), "just checking")do { if (!(begin <= end && end <= sym->utf8_length ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 351, "assert(" "begin <= end && end <= sym->utf8_length()" ") failed", "just checking"); ::breakpoint(); } } while (0); | |||
352 | assert(sym->refcount() != 0, "require a valid symbol")do { if (!(sym->refcount() != 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 352, "assert(" "sym->refcount() != 0" ") failed", "require a valid symbol" ); ::breakpoint(); } } while (0); | |||
353 | const char* name = (const char*)sym->base() + begin; | |||
354 | int len = end - begin; | |||
355 | unsigned int hash = hash_symbol(name, len, _alt_hash); | |||
356 | Symbol* found = lookup_common(name, len, hash); | |||
357 | if (found == NULL__null) { | |||
358 | found = do_add_if_needed(name, len, hash, true); | |||
359 | } | |||
360 | return found; | |||
361 | } | |||
362 | ||||
363 | class SymbolTableLookup : StackObj { | |||
364 | private: | |||
365 | uintx _hash; | |||
366 | int _len; | |||
367 | const char* _str; | |||
368 | public: | |||
369 | SymbolTableLookup(const char* key, int len, uintx hash) | |||
370 | : _hash(hash), _len(len), _str(key) {} | |||
371 | uintx get_hash() const { | |||
372 | return _hash; | |||
373 | } | |||
374 | bool equals(Symbol** value, bool* is_dead) { | |||
375 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 375, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
376 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 376, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
377 | Symbol *sym = *value; | |||
378 | if (sym->equals(_str, _len)) { | |||
379 | if (sym->try_increment_refcount()) { | |||
380 | // something is referencing this symbol now. | |||
381 | return true; | |||
382 | } else { | |||
383 | assert(sym->refcount() == 0, "expected dead symbol")do { if (!(sym->refcount() == 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 383, "assert(" "sym->refcount() == 0" ") failed", "expected dead symbol" ); ::breakpoint(); } } while (0); | |||
384 | *is_dead = true; | |||
385 | return false; | |||
386 | } | |||
387 | } else { | |||
388 | *is_dead = (sym->refcount() == 0); | |||
389 | return false; | |||
390 | } | |||
391 | } | |||
392 | }; | |||
393 | ||||
394 | class SymbolTableGet : public StackObj { | |||
395 | Symbol* _return; | |||
396 | public: | |||
397 | SymbolTableGet() : _return(NULL__null) {} | |||
398 | void operator()(Symbol** value) { | |||
399 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 399, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
400 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 400, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
401 | _return = *value; | |||
402 | } | |||
403 | Symbol* get_res_sym() const { | |||
404 | return _return; | |||
405 | } | |||
406 | }; | |||
407 | ||||
408 | Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) { | |||
409 | Thread* thread = Thread::current(); | |||
410 | SymbolTableLookup lookup(name, len, hash); | |||
411 | SymbolTableGet stg; | |||
412 | bool rehash_warning = false; | |||
413 | _local_table->get(thread, lookup, stg, &rehash_warning); | |||
414 | update_needs_rehash(rehash_warning); | |||
415 | Symbol* sym = stg.get_res_sym(); | |||
416 | assert((sym == NULL) || sym->refcount() != 0, "found dead symbol")do { if (!((sym == __null) || sym->refcount() != 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 416, "assert(" "(sym == __null) || sym->refcount() != 0" ") failed", "found dead symbol"); ::breakpoint(); } } while ( 0); | |||
417 | return sym; | |||
418 | } | |||
419 | ||||
420 | Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) { | |||
421 | hash = hash_symbol(name, len, _alt_hash); | |||
422 | return lookup_common(name, len, hash); | |||
423 | } | |||
424 | ||||
425 | // Suggestion: Push unicode-based lookup all the way into the hashing | |||
426 | // and probing logic, so there is no need for convert_to_utf8 until | |||
427 | // an actual new Symbol* is created. | |||
428 | Symbol* SymbolTable::new_symbol(const jchar* name, int utf16_length) { | |||
429 | int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); | |||
430 | char stack_buf[ON_STACK_BUFFER_LENGTH]; | |||
431 | if (utf8_length < (int) sizeof(stack_buf)) { | |||
432 | char* chars = stack_buf; | |||
433 | UNICODE::convert_to_utf8(name, utf16_length, chars); | |||
434 | return new_symbol(chars, utf8_length); | |||
435 | } else { | |||
436 | ResourceMark rm; | |||
437 | char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1)(char*) resource_allocate_bytes((utf8_length + 1) * sizeof(char )); | |||
438 | UNICODE::convert_to_utf8(name, utf16_length, chars); | |||
439 | return new_symbol(chars, utf8_length); | |||
440 | } | |||
441 | } | |||
442 | ||||
443 | Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length, | |||
444 | unsigned int& hash) { | |||
445 | int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); | |||
446 | char stack_buf[ON_STACK_BUFFER_LENGTH]; | |||
447 | if (utf8_length < (int) sizeof(stack_buf)) { | |||
448 | char* chars = stack_buf; | |||
449 | UNICODE::convert_to_utf8(name, utf16_length, chars); | |||
450 | return lookup_only(chars, utf8_length, hash); | |||
451 | } else { | |||
452 | ResourceMark rm; | |||
453 | char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1)(char*) resource_allocate_bytes((utf8_length + 1) * sizeof(char )); | |||
454 | UNICODE::convert_to_utf8(name, utf16_length, chars); | |||
455 | return lookup_only(chars, utf8_length, hash); | |||
456 | } | |||
457 | } | |||
458 | ||||
459 | void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp, | |||
460 | int names_count, const char** names, int* lengths, | |||
461 | int* cp_indices, unsigned int* hashValues) { | |||
462 | // Note that c_heap will be true for non-strong hidden classes. | |||
463 | // even if their loader is the boot loader because they will have a different cld. | |||
464 | bool c_heap = !loader_data->is_the_null_class_loader_data(); | |||
465 | for (int i = 0; i < names_count; i++) { | |||
466 | const char *name = names[i]; | |||
467 | int len = lengths[i]; | |||
468 | unsigned int hash = hashValues[i]; | |||
469 | assert(lookup_shared(name, len, hash) == NULL, "must have checked already")do { if (!(lookup_shared(name, len, hash) == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 469, "assert(" "lookup_shared(name, len, hash) == __null" ") failed" , "must have checked already"); ::breakpoint(); } } while (0); | |||
470 | Symbol* sym = do_add_if_needed(name, len, hash, c_heap); | |||
471 | assert(sym->refcount() != 0, "lookup should have incremented the count")do { if (!(sym->refcount() != 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 471, "assert(" "sym->refcount() != 0" ") failed", "lookup should have incremented the count" ); ::breakpoint(); } } while (0); | |||
472 | cp->symbol_at_put(cp_indices[i], sym); | |||
473 | } | |||
474 | } | |||
475 | ||||
476 | Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap) { | |||
477 | SymbolTableLookup lookup(name, len, hash); | |||
478 | SymbolTableGet stg; | |||
479 | bool clean_hint = false; | |||
480 | bool rehash_warning = false; | |||
481 | Symbol* sym = NULL__null; | |||
482 | Thread* current = Thread::current(); | |||
483 | ||||
484 | do { | |||
485 | // Callers have looked up the symbol once, insert the symbol. | |||
486 | sym = allocate_symbol(name, len, heap); | |||
487 | if (_local_table->insert(current, lookup, sym, &rehash_warning, &clean_hint)) { | |||
488 | break; | |||
489 | } | |||
490 | // In case another thread did a concurrent add, return value already in the table. | |||
491 | // This could fail if the symbol got deleted concurrently, so loop back until success. | |||
492 | if (_local_table->get(current, lookup, stg, &rehash_warning)) { | |||
493 | sym = stg.get_res_sym(); | |||
494 | break; | |||
495 | } | |||
496 | } while(true); | |||
497 | ||||
498 | update_needs_rehash(rehash_warning); | |||
499 | ||||
500 | if (clean_hint) { | |||
501 | mark_has_items_to_clean(); | |||
502 | check_concurrent_work(); | |||
503 | } | |||
504 | ||||
505 | assert((sym == NULL) || sym->refcount() != 0, "found dead symbol")do { if (!((sym == __null) || sym->refcount() != 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 505, "assert(" "(sym == __null) || sym->refcount() != 0" ") failed", "found dead symbol"); ::breakpoint(); } } while ( 0); | |||
506 | return sym; | |||
507 | } | |||
508 | ||||
509 | Symbol* SymbolTable::new_permanent_symbol(const char* name) { | |||
510 | unsigned int hash = 0; | |||
511 | int len = (int)strlen(name); | |||
512 | Symbol* sym = SymbolTable::lookup_only(name, len, hash); | |||
513 | if (sym == NULL__null) { | |||
514 | sym = do_add_if_needed(name, len, hash, false); | |||
515 | } | |||
516 | if (!sym->is_permanent()) { | |||
517 | sym->make_permanent(); | |||
518 | log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one"); | |||
519 | } | |||
520 | return sym; | |||
521 | } | |||
522 | ||||
523 | struct SizeFunc : StackObj { | |||
524 | size_t operator()(Symbol** value) { | |||
525 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 525, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
526 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 526, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
527 | return (*value)->size() * HeapWordSize; | |||
528 | }; | |||
529 | }; | |||
530 | ||||
531 | TableStatistics SymbolTable::get_table_statistics() { | |||
532 | static TableStatistics ts; | |||
533 | SizeFunc sz; | |||
534 | ts = _local_table->statistics_get(Thread::current(), sz, ts); | |||
535 | return ts; | |||
536 | } | |||
537 | ||||
538 | void SymbolTable::print_table_statistics(outputStream* st, | |||
539 | const char* table_name) { | |||
540 | SizeFunc sz; | |||
541 | _local_table->statistics_to(Thread::current(), sz, st, table_name); | |||
542 | } | |||
543 | ||||
544 | // Verification | |||
545 | class VerifySymbols : StackObj { | |||
546 | public: | |||
547 | bool operator()(Symbol** value) { | |||
548 | guarantee(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 548, "guarantee(" "value != NULL" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
549 | guarantee(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 549, "guarantee(" "*value != NULL" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
550 | Symbol* sym = *value; | |||
551 | guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()),do { if (!(sym->equals((const char*)sym->bytes(), sym-> utf8_length()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 552, "guarantee(" "sym->equals((const char*)sym->bytes(), sym->utf8_length())" ") failed", "symbol must be internally consistent"); ::breakpoint (); } } while (0) | |||
552 | "symbol must be internally consistent")do { if (!(sym->equals((const char*)sym->bytes(), sym-> utf8_length()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 552, "guarantee(" "sym->equals((const char*)sym->bytes(), sym->utf8_length())" ") failed", "symbol must be internally consistent"); ::breakpoint (); } } while (0); | |||
553 | return true; | |||
554 | }; | |||
555 | }; | |||
556 | ||||
557 | void SymbolTable::verify() { | |||
558 | Thread* thr = Thread::current(); | |||
559 | VerifySymbols vs; | |||
560 | if (!_local_table->try_scan(thr, vs)) { | |||
561 | log_info(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Info))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Info>("verify unavailable at this moment"); | |||
562 | } | |||
563 | } | |||
564 | ||||
565 | // Dumping | |||
566 | class DumpSymbol : StackObj { | |||
567 | Thread* _thr; | |||
568 | outputStream* _st; | |||
569 | public: | |||
570 | DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {} | |||
571 | bool operator()(Symbol** value) { | |||
572 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 572, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
573 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 573, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
574 | Symbol* sym = *value; | |||
575 | const char* utf8_string = (const char*)sym->bytes(); | |||
576 | int utf8_length = sym->utf8_length(); | |||
577 | _st->print("%d %d: ", utf8_length, sym->refcount()); | |||
578 | HashtableTextDump::put_utf8(_st, utf8_string, utf8_length); | |||
579 | _st->cr(); | |||
580 | return true; | |||
581 | }; | |||
582 | }; | |||
583 | ||||
584 | void SymbolTable::dump(outputStream* st, bool verbose) { | |||
585 | if (!verbose) { | |||
586 | print_table_statistics(st, "SymbolTable"); | |||
587 | } else { | |||
588 | Thread* thr = Thread::current(); | |||
589 | ResourceMark rm(thr); | |||
590 | st->print_cr("VERSION: 1.1"); | |||
591 | DumpSymbol ds(thr, st); | |||
592 | if (!_local_table->try_scan(thr, ds)) { | |||
593 | log_info(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Info))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Info>("dump unavailable at this moment"); | |||
594 | } | |||
595 | } | |||
596 | } | |||
597 | ||||
598 | #if INCLUDE_CDS1 | |||
599 | void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols, | |||
600 | CompactHashtableWriter* writer) { | |||
601 | ArchiveBuilder* builder = ArchiveBuilder::current(); | |||
602 | int len = symbols->length(); | |||
603 | for (int i = 0; i < len; i++) { | |||
604 | Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i)); | |||
605 | unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length()); | |||
606 | assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),do { if (!(fixed_hash == hash_symbol((const char*)sym->bytes (), sym->utf8_length(), false))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 607, "assert(" "fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false)" ") failed", "must not rehash during dumping"); ::breakpoint( ); } } while (0) | |||
607 | "must not rehash during dumping")do { if (!(fixed_hash == hash_symbol((const char*)sym->bytes (), sym->utf8_length(), false))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 607, "assert(" "fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false)" ") failed", "must not rehash during dumping"); ::breakpoint( ); } } while (0); | |||
608 | sym->set_permanent(); | |||
609 | writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym)); | |||
610 | } | |||
611 | } | |||
612 | ||||
613 | size_t SymbolTable::estimate_size_for_archive() { | |||
614 | return CompactHashtableWriter::estimate_size(int(_items_count)); | |||
615 | } | |||
616 | ||||
617 | void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) { | |||
618 | CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats()); | |||
619 | copy_shared_symbol_table(symbols, &writer); | |||
620 | if (!DynamicDumpSharedSpaces) { | |||
621 | _shared_table.reset(); | |||
622 | writer.dump(&_shared_table, "symbol"); | |||
623 | } else { | |||
624 | _dynamic_shared_table.reset(); | |||
625 | writer.dump(&_dynamic_shared_table, "symbol"); | |||
626 | } | |||
627 | } | |||
628 | ||||
629 | void SymbolTable::serialize_shared_table_header(SerializeClosure* soc, | |||
630 | bool is_static_archive) { | |||
631 | OffsetCompactHashtable<const char*, Symbol*, symbol_equals_compact_hashtable_entry> * table; | |||
632 | if (is_static_archive) { | |||
633 | table = &_shared_table; | |||
634 | } else { | |||
635 | table = &_dynamic_shared_table; | |||
636 | } | |||
637 | table->serialize_header(soc); | |||
638 | if (soc->writing()) { | |||
639 | // Sanity. Make sure we don't use the shared table at dump time | |||
640 | table->reset(); | |||
641 | } | |||
642 | } | |||
643 | #endif //INCLUDE_CDS | |||
644 | ||||
645 | // Concurrent work | |||
646 | void SymbolTable::grow(JavaThread* jt) { | |||
647 | SymbolTableHash::GrowTask gt(_local_table); | |||
648 | if (!gt.prepare(jt)) { | |||
649 | return; | |||
650 | } | |||
651 | log_trace(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Trace>("Started to grow"); | |||
652 | { | |||
653 | TraceTime timer("Grow", TRACETIME_LOG(Debug, symboltable, perf)(LogImpl<(LogTag::_symboltable), (LogTag::_perf), (LogTag:: __NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG )>::is_level(LogLevel::Debug)) ? static_cast<TraceTimerLogPrintFunc >(&LogImpl<(LogTag::_symboltable), (LogTag::_perf), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), ( LogTag::__NO_TAG)>::write<LogLevel::Debug>) : (TraceTimerLogPrintFunc )__null); | |||
654 | while (gt.do_task(jt)) { | |||
655 | gt.pause(jt); | |||
656 | { | |||
657 | ThreadBlockInVM tbivm(jt); | |||
658 | } | |||
659 | gt.cont(jt); | |||
660 | } | |||
661 | } | |||
662 | gt.done(jt); | |||
663 | _current_size = table_size(); | |||
664 | log_debug(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Grown to size:" SIZE_FORMAT"%" "l" "u", _current_size); | |||
665 | } | |||
666 | ||||
667 | struct SymbolTableDoDelete : StackObj { | |||
668 | size_t _deleted; | |||
669 | SymbolTableDoDelete() : _deleted(0) {} | |||
670 | void operator()(Symbol** value) { | |||
671 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 671, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
672 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 672, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
673 | Symbol *sym = *value; | |||
674 | assert(sym->refcount() == 0, "refcount")do { if (!(sym->refcount() == 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 674, "assert(" "sym->refcount() == 0" ") failed", "refcount" ); ::breakpoint(); } } while (0); | |||
675 | _deleted++; | |||
676 | } | |||
677 | }; | |||
678 | ||||
679 | struct SymbolTableDeleteCheck : StackObj { | |||
680 | size_t _processed; | |||
681 | SymbolTableDeleteCheck() : _processed(0) {} | |||
682 | bool operator()(Symbol** value) { | |||
683 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 683, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
684 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 684, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
685 | _processed++; | |||
686 | Symbol *sym = *value; | |||
687 | return (sym->refcount() == 0); | |||
688 | } | |||
689 | }; | |||
690 | ||||
691 | void SymbolTable::clean_dead_entries(JavaThread* jt) { | |||
692 | SymbolTableHash::BulkDeleteTask bdt(_local_table); | |||
693 | if (!bdt.prepare(jt)) { | |||
694 | return; | |||
695 | } | |||
696 | ||||
697 | SymbolTableDeleteCheck stdc; | |||
698 | SymbolTableDoDelete stdd; | |||
699 | { | |||
700 | TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf)(LogImpl<(LogTag::_symboltable), (LogTag::_perf), (LogTag:: __NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG )>::is_level(LogLevel::Debug)) ? static_cast<TraceTimerLogPrintFunc >(&LogImpl<(LogTag::_symboltable), (LogTag::_perf), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), ( LogTag::__NO_TAG)>::write<LogLevel::Debug>) : (TraceTimerLogPrintFunc )__null); | |||
701 | while (bdt.do_task(jt, stdc, stdd)) { | |||
702 | bdt.pause(jt); | |||
703 | { | |||
704 | ThreadBlockInVM tbivm(jt); | |||
705 | } | |||
706 | bdt.cont(jt); | |||
707 | } | |||
708 | reset_has_items_to_clean(); | |||
709 | bdt.done(jt); | |||
710 | } | |||
711 | ||||
712 | Atomic::add(&_symbols_counted, stdc._processed); | |||
713 | ||||
714 | log_debug(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Cleaned " SIZE_FORMAT"%" "l" "u" " of " SIZE_FORMAT"%" "l" "u", | |||
715 | stdd._deleted, stdc._processed); | |||
716 | } | |||
717 | ||||
718 | void SymbolTable::check_concurrent_work() { | |||
719 | if (_has_work) { | |||
720 | return; | |||
721 | } | |||
722 | // We should clean/resize if we have | |||
723 | // more items than preferred load factor or | |||
724 | // more dead items than water mark. | |||
725 | if (has_items_to_clean() || (get_load_factor() > PREF_AVG_LIST_LEN)) { | |||
726 | log_debug(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Concurrent work triggered, load factor: %f, items to clean: %s", | |||
727 | get_load_factor(), has_items_to_clean() ? "true" : "false"); | |||
728 | trigger_cleanup(); | |||
729 | } | |||
730 | } | |||
731 | ||||
732 | void SymbolTable::do_concurrent_work(JavaThread* jt) { | |||
733 | double load_factor = get_load_factor(); | |||
734 | log_debug(symboltable, perf)(!(LogImpl<(LogTag::_symboltable), (LogTag::_perf), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::_perf), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Concurrent work, live factor: %g", load_factor); | |||
| ||||
735 | // We prefer growing, since that also removes dead items | |||
736 | if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { | |||
737 | grow(jt); | |||
738 | } else { | |||
739 | clean_dead_entries(jt); | |||
740 | } | |||
741 | _has_work = false; | |||
742 | } | |||
743 | ||||
744 | // Rehash | |||
745 | bool SymbolTable::do_rehash() { | |||
746 | if (!_local_table->is_safepoint_safe()) { | |||
747 | return false; | |||
748 | } | |||
749 | ||||
750 | // We use current size | |||
751 | size_t new_size = _local_table->get_size_log2(Thread::current()); | |||
752 | SymbolTableHash* new_table = new SymbolTableHash(new_size, END_SIZE, REHASH_LEN); | |||
753 | // Use alt hash from now on | |||
754 | _alt_hash = true; | |||
755 | if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) { | |||
756 | _alt_hash = false; | |||
757 | delete new_table; | |||
758 | return false; | |||
759 | } | |||
760 | ||||
761 | // free old table | |||
762 | delete _local_table; | |||
763 | _local_table = new_table; | |||
764 | ||||
765 | return true; | |||
766 | } | |||
767 | ||||
768 | void SymbolTable::rehash_table() { | |||
769 | static bool rehashed = false; | |||
770 | log_debug(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Table imbalanced, rehashing called."); | |||
771 | ||||
772 | // Grow instead of rehash. | |||
773 | if (get_load_factor() > PREF_AVG_LIST_LEN && | |||
774 | !_local_table->is_max_size_reached()) { | |||
775 | log_debug(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Debug>("Choosing growing over rehashing."); | |||
776 | trigger_cleanup(); | |||
777 | _needs_rehashing = false; | |||
778 | return; | |||
779 | } | |||
780 | ||||
781 | // Already rehashed. | |||
782 | if (rehashed) { | |||
783 | log_warning(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Warning))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Warning>("Rehashing already done, still long lists."); | |||
784 | trigger_cleanup(); | |||
785 | _needs_rehashing = false; | |||
786 | return; | |||
787 | } | |||
788 | ||||
789 | _alt_hash_seed = AltHashing::compute_seed(); | |||
790 | ||||
791 | if (do_rehash()) { | |||
792 | rehashed = true; | |||
793 | } else { | |||
794 | log_info(symboltable)(!(LogImpl<(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Info))) ? (void)0 : LogImpl <(LogTag::_symboltable), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Info>("Resizes in progress rehashing skipped."); | |||
795 | } | |||
796 | ||||
797 | _needs_rehashing = false; | |||
798 | } | |||
799 | ||||
800 | //--------------------------------------------------------------------------- | |||
801 | // Non-product code | |||
802 | ||||
803 | #ifndef PRODUCT | |||
804 | ||||
805 | class HistogramIterator : StackObj { | |||
806 | public: | |||
807 | static const size_t results_length = 100; | |||
808 | size_t counts[results_length]; | |||
809 | size_t sizes[results_length]; | |||
810 | size_t total_size; | |||
811 | size_t total_count; | |||
812 | size_t total_length; | |||
813 | size_t max_length; | |||
814 | size_t out_of_range_count; | |||
815 | size_t out_of_range_size; | |||
816 | HistogramIterator() : total_size(0), total_count(0), total_length(0), | |||
817 | max_length(0), out_of_range_count(0), out_of_range_size(0) { | |||
818 | // initialize results to zero | |||
819 | for (size_t i = 0; i < results_length; i++) { | |||
820 | counts[i] = 0; | |||
821 | sizes[i] = 0; | |||
822 | } | |||
823 | } | |||
824 | bool operator()(Symbol** value) { | |||
825 | assert(value != NULL, "expected valid value")do { if (!(value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 825, "assert(" "value != __null" ") failed", "expected valid value" ); ::breakpoint(); } } while (0); | |||
826 | assert(*value != NULL, "value should point to a symbol")do { if (!(*value != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/classfile/symbolTable.cpp" , 826, "assert(" "*value != __null" ") failed", "value should point to a symbol" ); ::breakpoint(); } } while (0); | |||
827 | Symbol* sym = *value; | |||
828 | size_t size = sym->size(); | |||
829 | size_t len = sym->utf8_length(); | |||
830 | if (len < results_length) { | |||
831 | counts[len]++; | |||
832 | sizes[len] += size; | |||
833 | } else { | |||
834 | out_of_range_count++; | |||
835 | out_of_range_size += size; | |||
836 | } | |||
837 | total_count++; | |||
838 | total_size += size; | |||
839 | total_length += len; | |||
840 | max_length = MAX2(max_length, len); | |||
841 | ||||
842 | return true; | |||
843 | }; | |||
844 | }; | |||
845 | ||||
846 | void SymbolTable::print_histogram() { | |||
847 | HistogramIterator hi; | |||
848 | _local_table->do_scan(Thread::current(), hi); | |||
849 | tty->print_cr("Symbol Table Histogram:"); | |||
850 | tty->print_cr(" Total number of symbols " SIZE_FORMAT_W(7)"%" "7" "l" "u", hi.total_count); | |||
851 | tty->print_cr(" Total size in memory " SIZE_FORMAT_W(7)"%" "7" "l" "u" "K", | |||
852 | (hi.total_size * wordSize) / 1024); | |||
853 | tty->print_cr(" Total counted " SIZE_FORMAT_W(7)"%" "7" "l" "u", _symbols_counted); | |||
854 | tty->print_cr(" Total removed " SIZE_FORMAT_W(7)"%" "7" "l" "u", _symbols_removed); | |||
855 | if (_symbols_counted > 0) { | |||
856 | tty->print_cr(" Percent removed %3.2f", | |||
857 | ((float)_symbols_removed / _symbols_counted) * 100); | |||
858 | } | |||
859 | tty->print_cr(" Reference counts " SIZE_FORMAT_W(7)"%" "7" "l" "u", Symbol::_total_count); | |||
860 | tty->print_cr(" Symbol arena used " SIZE_FORMAT_W(7)"%" "7" "l" "u" "K", arena()->used() / 1024); | |||
861 | tty->print_cr(" Symbol arena size " SIZE_FORMAT_W(7)"%" "7" "l" "u" "K", arena()->size_in_bytes() / 1024); | |||
862 | tty->print_cr(" Total symbol length " SIZE_FORMAT_W(7)"%" "7" "l" "u", hi.total_length); | |||
863 | tty->print_cr(" Maximum symbol length " SIZE_FORMAT_W(7)"%" "7" "l" "u", hi.max_length); | |||
864 | tty->print_cr(" Average symbol length %7.2f", ((float)hi.total_length / hi.total_count)); | |||
865 | tty->print_cr(" Symbol length histogram:"); | |||
866 | tty->print_cr(" %6s %10s %10s", "Length", "#Symbols", "Size"); | |||
867 | for (size_t i = 0; i < hi.results_length; i++) { | |||
868 | if (hi.counts[i] > 0) { | |||
869 | tty->print_cr(" " SIZE_FORMAT_W(6)"%" "6" "l" "u" " " SIZE_FORMAT_W(10)"%" "10" "l" "u" " " SIZE_FORMAT_W(10)"%" "10" "l" "u" "K", | |||
870 | i, hi.counts[i], (hi.sizes[i] * wordSize) / 1024); | |||
871 | } | |||
872 | } | |||
873 | tty->print_cr(" >=" SIZE_FORMAT_W(6)"%" "6" "l" "u" " " SIZE_FORMAT_W(10)"%" "10" "l" "u" " " SIZE_FORMAT_W(10)"%" "10" "l" "u" "K\n", | |||
874 | hi.results_length, hi.out_of_range_count, (hi.out_of_range_size*wordSize) / 1024); | |||
875 | } | |||
876 | #endif // PRODUCT | |||
877 | ||||
878 | // Utility for dumping symbols | |||
879 | SymboltableDCmd::SymboltableDCmd(outputStream* output, bool heap) : | |||
880 | DCmdWithParser(output, heap), | |||
881 | _verbose("-verbose", "Dump the content of each symbol in the table", | |||
882 | "BOOLEAN", false, "false") { | |||
883 | _dcmdparser.add_dcmd_option(&_verbose); | |||
884 | } | |||
885 | ||||
886 | void SymboltableDCmd::execute(DCmdSource source, TRAPSJavaThread* __the_thread__) { | |||
887 | VM_DumpHashtable dumper(output(), VM_DumpHashtable::DumpSymbols, | |||
888 | _verbose.value()); | |||
889 | VMThread::execute(&dumper); | |||
890 | } |
1 | /* |
2 | * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP |
26 | #define SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP |
27 | |
28 | // No concurrentHashTableTasks.hpp |
29 | |
30 | #include "runtime/atomic.hpp" |
31 | #include "utilities/globalDefinitions.hpp" |
32 | #include "utilities/concurrentHashTable.inline.hpp" |
33 | |
34 | // This inline file contains BulkDeleteTask and GrowTasks which are both bucket |
35 | // operations, which they are serialized with each other. |
36 | |
37 | // Base class for pause and/or parallel bulk operations. |
38 | template <typename CONFIG, MEMFLAGS F> |
39 | class ConcurrentHashTable<CONFIG, F>::BucketsOperation { |
40 | protected: |
41 | ConcurrentHashTable<CONFIG, F>* _cht; |
42 | |
43 | // Default size of _task_size_log2 |
44 | static const size_t DEFAULT_TASK_SIZE_LOG2 = 12; |
45 | |
46 | // The table is split into ranges, every increment is one range. |
47 | volatile size_t _next_to_claim; |
48 | size_t _task_size_log2; // Number of buckets. |
49 | size_t _stop_task; // Last task |
50 | size_t _size_log2; // Table size. |
51 | bool _is_mt; |
52 | |
53 | BucketsOperation(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false) |
54 | : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2), |
55 | _stop_task(0), _size_log2(0), _is_mt(is_mt) {} |
56 | |
57 | // Returns true if you succeeded to claim the range start -> (stop-1). |
58 | bool claim(size_t* start, size_t* stop) { |
59 | size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u); |
60 | if (claimed >= _stop_task) { |
61 | return false; |
62 | } |
63 | *start = claimed * (((size_t)1) << _task_size_log2); |
64 | *stop = ((*start) + (((size_t)1) << _task_size_log2)); |
65 | return true; |
66 | } |
67 | |
68 | // Calculate starting values. |
69 | void setup(Thread* thread) { |
70 | thread_owns_resize_lock(thread); |
71 | _size_log2 = _cht->_table->_log2_size; |
72 | _task_size_log2 = MIN2(_task_size_log2, _size_log2); |
73 | size_t tmp = _size_log2 > _task_size_log2 ? |
74 | _size_log2 - _task_size_log2 : 0; |
75 | _stop_task = (((size_t)1) << tmp); |
76 | } |
77 | |
78 | // Returns false if all ranges are claimed. |
79 | bool have_more_work() { |
80 | return Atomic::load_acquire(&_next_to_claim) >= _stop_task; |
81 | } |
82 | |
83 | void thread_owns_resize_lock(Thread* thread) { |
84 | assert(BucketsOperation::_cht->_resize_lock_owner == thread,do { if (!(BucketsOperation::_cht->_resize_lock_owner == thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 85, "assert(" "BucketsOperation::_cht->_resize_lock_owner == thread" ") failed", "Should be locked by me"); ::breakpoint(); } } while (0) |
85 | "Should be locked by me")do { if (!(BucketsOperation::_cht->_resize_lock_owner == thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 85, "assert(" "BucketsOperation::_cht->_resize_lock_owner == thread" ") failed", "Should be locked by me"); ::breakpoint(); } } while (0); |
86 | assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),do { if (!(BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 87, "assert(" "BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock not held"); ::breakpoint(); } } while (0) |
87 | "Operations lock not held")do { if (!(BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 87, "assert(" "BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock not held"); ::breakpoint(); } } while (0); |
88 | } |
89 | void thread_owns_only_state_lock(Thread* thread) { |
90 | assert(BucketsOperation::_cht->_resize_lock_owner == thread,do { if (!(BucketsOperation::_cht->_resize_lock_owner == thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 91, "assert(" "BucketsOperation::_cht->_resize_lock_owner == thread" ") failed", "Should be locked by me"); ::breakpoint(); } } while (0) |
91 | "Should be locked by me")do { if (!(BucketsOperation::_cht->_resize_lock_owner == thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 91, "assert(" "BucketsOperation::_cht->_resize_lock_owner == thread" ") failed", "Should be locked by me"); ::breakpoint(); } } while (0); |
92 | assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),do { if (!(!BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 93, "assert(" "!BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock held"); ::breakpoint(); } } while (0) |
93 | "Operations lock held")do { if (!(!BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 93, "assert(" "!BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock held"); ::breakpoint(); } } while (0); |
94 | } |
95 | void thread_do_not_own_resize_lock(Thread* thread) { |
96 | assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),do { if (!(!BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 97, "assert(" "!BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock held"); ::breakpoint(); } } while (0) |
97 | "Operations lock held")do { if (!(!BucketsOperation::_cht->_resize_lock->owned_by_self ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 97, "assert(" "!BucketsOperation::_cht->_resize_lock->owned_by_self()" ") failed", "Operations lock held"); ::breakpoint(); } } while (0); |
98 | assert(BucketsOperation::_cht->_resize_lock_owner != thread,do { if (!(BucketsOperation::_cht->_resize_lock_owner != thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 99, "assert(" "BucketsOperation::_cht->_resize_lock_owner != thread" ") failed", "Should not be locked by me"); ::breakpoint(); } } while (0) |
99 | "Should not be locked by me")do { if (!(BucketsOperation::_cht->_resize_lock_owner != thread )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 99, "assert(" "BucketsOperation::_cht->_resize_lock_owner != thread" ") failed", "Should not be locked by me"); ::breakpoint(); } } while (0); |
100 | } |
101 | |
102 | public: |
103 | // Pauses for safepoint |
104 | void pause(Thread* thread) { |
105 | // This leaves internal state locked. |
106 | this->thread_owns_resize_lock(thread); |
107 | BucketsOperation::_cht->_resize_lock->unlock(); |
108 | this->thread_owns_only_state_lock(thread); |
109 | } |
110 | |
111 | // Continues after safepoint. |
112 | void cont(Thread* thread) { |
113 | this->thread_owns_only_state_lock(thread); |
114 | // If someone slips in here directly after safepoint. |
115 | while (!BucketsOperation::_cht->_resize_lock->try_lock()) |
116 | { /* for ever */ }; |
117 | this->thread_owns_resize_lock(thread); |
118 | } |
119 | }; |
120 | |
121 | // For doing pausable/parallel bulk delete. |
122 | template <typename CONFIG, MEMFLAGS F> |
123 | class ConcurrentHashTable<CONFIG, F>::BulkDeleteTask : |
124 | public BucketsOperation |
125 | { |
126 | public: |
127 | BulkDeleteTask(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false) |
128 | : BucketsOperation(cht, is_mt) { |
129 | } |
130 | // Before start prepare must be called. |
131 | bool prepare(Thread* thread) { |
132 | bool lock = BucketsOperation::_cht->try_resize_lock(thread); |
133 | if (!lock) { |
134 | return false; |
135 | } |
136 | this->setup(thread); |
137 | return true; |
138 | } |
139 | |
140 | // Does one range destroying all matching EVALUATE_FUNC and |
141 | // DELETE_FUNC is called be destruction. Returns true if there is more work. |
142 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
143 | bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { |
144 | size_t start, stop; |
145 | assert(BucketsOperation::_cht->_resize_lock_owner != NULL,do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 146, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0) |
146 | "Should be locked")do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 146, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0); |
147 | if (!this->claim(&start, &stop)) { |
148 | return false; |
149 | } |
150 | BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop, |
151 | eval_f, del_f, |
152 | BucketsOperation::_is_mt); |
153 | assert(BucketsOperation::_cht->_resize_lock_owner != NULL,do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 154, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0) |
154 | "Should be locked")do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 154, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0); |
155 | return true; |
156 | } |
157 | |
158 | // Must be called after ranges are done. |
159 | void done(Thread* thread) { |
160 | this->thread_owns_resize_lock(thread); |
161 | BucketsOperation::_cht->unlock_resize_lock(thread); |
162 | this->thread_do_not_own_resize_lock(thread); |
163 | } |
164 | }; |
165 | |
166 | template <typename CONFIG, MEMFLAGS F> |
167 | class ConcurrentHashTable<CONFIG, F>::GrowTask : |
168 | public BucketsOperation |
169 | { |
170 | public: |
171 | GrowTask(ConcurrentHashTable<CONFIG, F>* cht) : BucketsOperation(cht) { |
172 | } |
173 | // Before start prepare must be called. |
174 | bool prepare(Thread* thread) { |
175 | if (!BucketsOperation::_cht->internal_grow_prolog( |
176 | thread, BucketsOperation::_cht->_log2_size_limit)) { |
177 | return false; |
178 | } |
179 | this->setup(thread); |
180 | return true; |
181 | } |
182 | |
183 | // Re-sizes a portion of the table. Returns true if there is more work. |
184 | bool do_task(Thread* thread) { |
185 | size_t start, stop; |
186 | assert(BucketsOperation::_cht->_resize_lock_owner != NULL,do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 187, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0) |
187 | "Should be locked")do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 187, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0); |
188 | if (!this->claim(&start, &stop)) { |
189 | return false; |
190 | } |
191 | BucketsOperation::_cht->internal_grow_range(thread, start, stop); |
192 | assert(BucketsOperation::_cht->_resize_lock_owner != NULL,do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 193, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0) |
193 | "Should be locked")do { if (!(BucketsOperation::_cht->_resize_lock_owner != __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp" , 193, "assert(" "BucketsOperation::_cht->_resize_lock_owner != __null" ") failed", "Should be locked"); ::breakpoint(); } } while ( 0); |
194 | return true; |
195 | } |
196 | |
197 | // Must be called after do_task returns false. |
198 | void done(Thread* thread) { |
199 | this->thread_owns_resize_lock(thread); |
200 | BucketsOperation::_cht->internal_grow_epilog(thread); |
201 | this->thread_do_not_own_resize_lock(thread); |
202 | } |
203 | }; |
204 | |
205 | #endif // SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP |
1 | /* | ||||||||
2 | * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. | ||||||||
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | ||||||||
4 | * | ||||||||
5 | * This code is free software; you can redistribute it and/or modify it | ||||||||
6 | * under the terms of the GNU General Public License version 2 only, as | ||||||||
7 | * published by the Free Software Foundation. | ||||||||
8 | * | ||||||||
9 | * This code is distributed in the hope that it will be useful, but WITHOUT | ||||||||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||||||||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||||||||
12 | * version 2 for more details (a copy is included in the LICENSE file that | ||||||||
13 | * accompanied this code). | ||||||||
14 | * | ||||||||
15 | * You should have received a copy of the GNU General Public License version | ||||||||
16 | * 2 along with this work; if not, write to the Free Software Foundation, | ||||||||
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||||
18 | * | ||||||||
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | ||||||||
20 | * or visit www.oracle.com if you need additional information or have any | ||||||||
21 | * questions. | ||||||||
22 | * | ||||||||
23 | */ | ||||||||
24 | |||||||||
25 | #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP | ||||||||
26 | #define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP | ||||||||
27 | |||||||||
28 | #include "utilities/concurrentHashTable.hpp" | ||||||||
29 | |||||||||
30 | #include "memory/allocation.inline.hpp" | ||||||||
31 | #include "runtime/atomic.hpp" | ||||||||
32 | #include "runtime/orderAccess.hpp" | ||||||||
33 | #include "runtime/prefetch.inline.hpp" | ||||||||
34 | #include "utilities/globalCounter.inline.hpp" | ||||||||
35 | #include "utilities/numberSeq.hpp" | ||||||||
36 | #include "utilities/spinYield.hpp" | ||||||||
37 | |||||||||
38 | // 2^30 = 1G buckets | ||||||||
39 | #define SIZE_BIG_LOG230 30 | ||||||||
40 | // 2^2 = 4 buckets | ||||||||
41 | #define SIZE_SMALL_LOG22 2 | ||||||||
42 | |||||||||
43 | // Number from spinYield.hpp. In some loops SpinYield would be unfair. | ||||||||
44 | #define SPINPAUSES_PER_YIELD8192 8192 | ||||||||
45 | |||||||||
46 | #ifdef ASSERT1 | ||||||||
47 | #ifdef _LP641 | ||||||||
48 | // Two low bits are not usable. | ||||||||
49 | static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac)(0xfbadbadbadbadbacULL); | ||||||||
50 | #else | ||||||||
51 | // Two low bits are not usable. | ||||||||
52 | static const void* POISON_PTR = (void*)0xffbadbac; | ||||||||
53 | #endif | ||||||||
54 | #endif | ||||||||
55 | |||||||||
56 | // Node | ||||||||
57 | template <typename CONFIG, MEMFLAGS F> | ||||||||
58 | inline typename ConcurrentHashTable<CONFIG, F>::Node* | ||||||||
59 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
60 | Node::next() const | ||||||||
61 | { | ||||||||
62 | return Atomic::load_acquire(&_next); | ||||||||
63 | } | ||||||||
64 | |||||||||
65 | // Bucket | ||||||||
66 | template <typename CONFIG, MEMFLAGS F> | ||||||||
67 | inline typename ConcurrentHashTable<CONFIG, F>::Node* | ||||||||
68 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
69 | Bucket::first_raw() const | ||||||||
70 | { | ||||||||
71 | return Atomic::load_acquire(&_first); | ||||||||
72 | } | ||||||||
73 | |||||||||
74 | template <typename CONFIG, MEMFLAGS F> | ||||||||
75 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
76 | Bucket::release_assign_node_ptr( | ||||||||
77 | typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst, | ||||||||
78 | typename ConcurrentHashTable<CONFIG, F>::Node* node) const | ||||||||
79 | { | ||||||||
80 | // Due to this assert this methods is not static. | ||||||||
81 | assert(is_locked(), "Must be locked.")do { if (!(is_locked())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 81, "assert(" "is_locked()" ") failed", "Must be locked."); ::breakpoint(); } } while (0); | ||||||||
82 | Node** tmp = (Node**)dst; | ||||||||
83 | Atomic::release_store(tmp, clear_set_state(node, *dst)); | ||||||||
84 | } | ||||||||
85 | |||||||||
86 | template <typename CONFIG, MEMFLAGS F> | ||||||||
87 | inline typename ConcurrentHashTable<CONFIG, F>::Node* | ||||||||
88 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
89 | Bucket::first() const | ||||||||
90 | { | ||||||||
91 | // We strip the states bit before returning the ptr. | ||||||||
92 | return clear_state(Atomic::load_acquire(&_first)); | ||||||||
93 | } | ||||||||
94 | |||||||||
95 | template <typename CONFIG, MEMFLAGS F> | ||||||||
96 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
97 | Bucket::have_redirect() const | ||||||||
98 | { | ||||||||
99 | return is_state(first_raw(), STATE_REDIRECT_BIT); | ||||||||
100 | } | ||||||||
101 | |||||||||
102 | template <typename CONFIG, MEMFLAGS F> | ||||||||
103 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
104 | Bucket::is_locked() const | ||||||||
105 | { | ||||||||
106 | return is_state(first_raw(), STATE_LOCK_BIT); | ||||||||
107 | } | ||||||||
108 | |||||||||
109 | template <typename CONFIG, MEMFLAGS F> | ||||||||
110 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
111 | Bucket::lock() | ||||||||
112 | { | ||||||||
113 | int i = 0; | ||||||||
114 | // SpinYield would be unfair here | ||||||||
115 | while (!this->trylock()) { | ||||||||
116 | if ((++i) == SPINPAUSES_PER_YIELD8192) { | ||||||||
117 | // On contemporary OS yielding will give CPU to another runnable thread if | ||||||||
118 | // there is no CPU available. | ||||||||
119 | os::naked_yield(); | ||||||||
120 | i = 0; | ||||||||
121 | } else { | ||||||||
122 | SpinPause(); | ||||||||
123 | } | ||||||||
124 | } | ||||||||
125 | } | ||||||||
126 | |||||||||
127 | template <typename CONFIG, MEMFLAGS F> | ||||||||
128 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
129 | Bucket::release_assign_last_node_next( | ||||||||
130 | typename ConcurrentHashTable<CONFIG, F>::Node* node) | ||||||||
131 | { | ||||||||
132 | assert(is_locked(), "Must be locked.")do { if (!(is_locked())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 132, "assert(" "is_locked()" ") failed", "Must be locked.") ; ::breakpoint(); } } while (0); | ||||||||
133 | Node* const volatile * ret = first_ptr(); | ||||||||
134 | while (clear_state(*ret) != NULL__null) { | ||||||||
135 | ret = clear_state(*ret)->next_ptr(); | ||||||||
136 | } | ||||||||
137 | release_assign_node_ptr(ret, node); | ||||||||
138 | } | ||||||||
139 | |||||||||
140 | template <typename CONFIG, MEMFLAGS F> | ||||||||
141 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
142 | Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node, | ||||||||
143 | typename ConcurrentHashTable<CONFIG, F>::Node* expect | ||||||||
144 | ) | ||||||||
145 | { | ||||||||
146 | if (is_locked()) { | ||||||||
147 | return false; | ||||||||
148 | } | ||||||||
149 | if (Atomic::cmpxchg(&_first, expect, node) == expect) { | ||||||||
150 | return true; | ||||||||
151 | } | ||||||||
152 | return false; | ||||||||
153 | } | ||||||||
154 | |||||||||
155 | template <typename CONFIG, MEMFLAGS F> | ||||||||
156 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
157 | Bucket::trylock() | ||||||||
158 | { | ||||||||
159 | if (is_locked()) { | ||||||||
160 | return false; | ||||||||
161 | } | ||||||||
162 | // We will expect a clean first pointer. | ||||||||
163 | Node* tmp = first(); | ||||||||
164 | if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { | ||||||||
165 | return true; | ||||||||
166 | } | ||||||||
167 | return false; | ||||||||
168 | } | ||||||||
169 | |||||||||
170 | template <typename CONFIG, MEMFLAGS F> | ||||||||
171 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
172 | Bucket::unlock() | ||||||||
173 | { | ||||||||
174 | assert(is_locked(), "Must be locked.")do { if (!(is_locked())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 174, "assert(" "is_locked()" ") failed", "Must be locked.") ; ::breakpoint(); } } while (0); | ||||||||
175 | assert(!have_redirect(),do { if (!(!have_redirect())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 176, "assert(" "!have_redirect()" ") failed", "Unlocking a bucket after it has reached terminal state." ); ::breakpoint(); } } while (0) | ||||||||
176 | "Unlocking a bucket after it has reached terminal state.")do { if (!(!have_redirect())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 176, "assert(" "!have_redirect()" ") failed", "Unlocking a bucket after it has reached terminal state." ); ::breakpoint(); } } while (0); | ||||||||
177 | Atomic::release_store(&_first, clear_state(first())); | ||||||||
178 | } | ||||||||
179 | |||||||||
180 | template <typename CONFIG, MEMFLAGS F> | ||||||||
181 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
182 | Bucket::redirect() | ||||||||
183 | { | ||||||||
184 | assert(is_locked(), "Must be locked.")do { if (!(is_locked())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 184, "assert(" "is_locked()" ") failed", "Must be locked.") ; ::breakpoint(); } } while (0); | ||||||||
185 | Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); | ||||||||
186 | } | ||||||||
187 | |||||||||
188 | // InternalTable | ||||||||
189 | template <typename CONFIG, MEMFLAGS F> | ||||||||
190 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
191 | InternalTable::InternalTable(size_t log2_size) | ||||||||
192 | : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), | ||||||||
193 | _hash_mask(~(~((size_t)0) << _log2_size)) | ||||||||
194 | { | ||||||||
195 | assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,do { if (!(_log2_size >= 2 && _log2_size <= 30) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 196, "assert(" "_log2_size >= 2 && _log2_size <= 30" ") failed", "Bad size"); ::breakpoint(); } } while (0) | ||||||||
196 | "Bad size")do { if (!(_log2_size >= 2 && _log2_size <= 30) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 196, "assert(" "_log2_size >= 2 && _log2_size <= 30" ") failed", "Bad size"); ::breakpoint(); } } while (0); | ||||||||
197 | _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F)(Bucket*) (AllocateHeap((_size) * sizeof(Bucket), F)); | ||||||||
198 | // Use placement new for each element instead of new[] which could use more | ||||||||
199 | // memory than allocated. | ||||||||
200 | for (size_t i = 0; i < _size; ++i) { | ||||||||
201 | new (_buckets + i) Bucket(); | ||||||||
202 | } | ||||||||
203 | } | ||||||||
204 | |||||||||
205 | template <typename CONFIG, MEMFLAGS F> | ||||||||
206 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
207 | InternalTable::~InternalTable() | ||||||||
208 | { | ||||||||
209 | FREE_C_HEAP_ARRAY(Bucket, _buckets)FreeHeap((char*)(_buckets)); | ||||||||
210 | } | ||||||||
211 | |||||||||
212 | // ScopedCS | ||||||||
213 | template <typename CONFIG, MEMFLAGS F> | ||||||||
214 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
215 | ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht) | ||||||||
216 | : _thread(thread), | ||||||||
217 | _cht(cht), | ||||||||
218 | _cs_context(GlobalCounter::critical_section_begin(_thread)) | ||||||||
219 | { | ||||||||
220 | // This version is published now. | ||||||||
221 | if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL__null) { | ||||||||
222 | Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL__null); | ||||||||
223 | } | ||||||||
224 | } | ||||||||
225 | |||||||||
226 | template <typename CONFIG, MEMFLAGS F> | ||||||||
227 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
228 | ScopedCS::~ScopedCS() | ||||||||
229 | { | ||||||||
230 | GlobalCounter::critical_section_end(_thread, _cs_context); | ||||||||
231 | } | ||||||||
232 | |||||||||
233 | template <typename CONFIG, MEMFLAGS F> | ||||||||
234 | template <typename LOOKUP_FUNC> | ||||||||
235 | inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>:: | ||||||||
236 | MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) | ||||||||
237 | { | ||||||||
238 | return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); | ||||||||
239 | } | ||||||||
240 | |||||||||
241 | // HaveDeletables | ||||||||
242 | template <typename CONFIG, MEMFLAGS F> | ||||||||
243 | template <typename EVALUATE_FUNC> | ||||||||
244 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
245 | HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket, | ||||||||
246 | EVALUATE_FUNC& eval_f, | ||||||||
247 | Bucket* prefetch_bucket) | ||||||||
248 | { | ||||||||
249 | // Instantiated for pointer type (true), so we can use prefetch. | ||||||||
250 | // When visiting all Nodes doing this prefetch give around 30%. | ||||||||
251 | Node* pref = prefetch_bucket != NULL__null ? prefetch_bucket->first() : NULL__null; | ||||||||
252 | for (Node* next = bucket->first(); next != NULL__null ; next = next->next()) { | ||||||||
| |||||||||
253 | if (pref != NULL__null) { | ||||||||
254 | Prefetch::read(*pref->value(), 0); | ||||||||
255 | pref = pref->next(); | ||||||||
256 | } | ||||||||
257 | // Read next() Node* once. May be racing with a thread moving the next | ||||||||
258 | // pointers. | ||||||||
259 | Node* next_pref = next->next(); | ||||||||
260 | if (next_pref != NULL__null) { | ||||||||
261 | Prefetch::read(*next_pref->value(), 0); | ||||||||
262 | } | ||||||||
263 | if (eval_f(next->value())) { | ||||||||
264 | return true; | ||||||||
265 | } | ||||||||
266 | } | ||||||||
267 | return false; | ||||||||
268 | } | ||||||||
269 | |||||||||
270 | template <typename CONFIG, MEMFLAGS F> | ||||||||
271 | template <bool b, typename EVALUATE_FUNC> | ||||||||
272 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
273 | HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket, | ||||||||
274 | EVALUATE_FUNC& eval_f, | ||||||||
275 | Bucket* preb) | ||||||||
276 | { | ||||||||
277 | for (Node* next = bucket->first(); next != NULL__null ; next = next->next()) { | ||||||||
278 | if (eval_f(next->value())) { | ||||||||
279 | return true; | ||||||||
280 | } | ||||||||
281 | } | ||||||||
282 | return false; | ||||||||
283 | } | ||||||||
284 | |||||||||
285 | // ConcurrentHashTable | ||||||||
286 | template <typename CONFIG, MEMFLAGS F> | ||||||||
287 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
288 | write_synchonize_on_visible_epoch(Thread* thread) | ||||||||
289 | { | ||||||||
290 | assert(_resize_lock_owner == thread, "Re-size lock not held")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 290, "assert(" "_resize_lock_owner == thread" ") failed", "Re-size lock not held" ); ::breakpoint(); } } while (0); | ||||||||
291 | OrderAccess::fence(); // Prevent below load from floating up. | ||||||||
292 | // If no reader saw this version we can skip write_synchronize. | ||||||||
293 | if (Atomic::load_acquire(&_invisible_epoch) == thread) { | ||||||||
294 | return; | ||||||||
295 | } | ||||||||
296 | assert(_invisible_epoch == NULL, "Two thread doing bulk operations")do { if (!(_invisible_epoch == __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 296, "assert(" "_invisible_epoch == __null" ") failed", "Two thread doing bulk operations" ); ::breakpoint(); } } while (0); | ||||||||
297 | // We set this/next version that we are synchronizing for to not published. | ||||||||
298 | // A reader will zero this flag if it reads this/next version. | ||||||||
299 | Atomic::release_store(&_invisible_epoch, thread); | ||||||||
300 | GlobalCounter::write_synchronize(); | ||||||||
301 | } | ||||||||
302 | |||||||||
303 | template <typename CONFIG, MEMFLAGS F> | ||||||||
304 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
305 | try_resize_lock(Thread* locker) | ||||||||
306 | { | ||||||||
307 | if (_resize_lock->try_lock()) { | ||||||||
308 | if (_resize_lock_owner != NULL__null) { | ||||||||
309 | assert(locker != _resize_lock_owner, "Already own lock")do { if (!(locker != _resize_lock_owner)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 309, "assert(" "locker != _resize_lock_owner" ") failed", "Already own lock" ); ::breakpoint(); } } while (0); | ||||||||
310 | // We got mutex but internal state is locked. | ||||||||
311 | _resize_lock->unlock(); | ||||||||
312 | return false; | ||||||||
313 | } | ||||||||
314 | } else { | ||||||||
315 | return false; | ||||||||
316 | } | ||||||||
317 | _invisible_epoch = 0; | ||||||||
318 | _resize_lock_owner = locker; | ||||||||
319 | return true; | ||||||||
320 | } | ||||||||
321 | |||||||||
322 | template <typename CONFIG, MEMFLAGS F> | ||||||||
323 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
324 | lock_resize_lock(Thread* locker) | ||||||||
325 | { | ||||||||
326 | size_t i = 0; | ||||||||
327 | // If lock is hold by some other thread, the chances that it is return quick | ||||||||
328 | // is low. So we will prefer yielding. | ||||||||
329 | SpinYield yield(1, 512); | ||||||||
330 | do { | ||||||||
331 | _resize_lock->lock_without_safepoint_check(); | ||||||||
332 | // If holder of lock dropped mutex for safepoint mutex might be unlocked, | ||||||||
333 | // and _resize_lock_owner will contain the owner. | ||||||||
334 | if (_resize_lock_owner != NULL__null) { | ||||||||
335 | assert(locker != _resize_lock_owner, "Already own lock")do { if (!(locker != _resize_lock_owner)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 335, "assert(" "locker != _resize_lock_owner" ") failed", "Already own lock" ); ::breakpoint(); } } while (0); | ||||||||
336 | // We got mutex but internal state is locked. | ||||||||
337 | _resize_lock->unlock(); | ||||||||
338 | yield.wait(); | ||||||||
339 | } else { | ||||||||
340 | break; | ||||||||
341 | } | ||||||||
342 | } while(true); | ||||||||
343 | _resize_lock_owner = locker; | ||||||||
344 | _invisible_epoch = 0; | ||||||||
345 | } | ||||||||
346 | |||||||||
347 | template <typename CONFIG, MEMFLAGS F> | ||||||||
348 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
349 | unlock_resize_lock(Thread* locker) | ||||||||
350 | { | ||||||||
351 | _invisible_epoch = 0; | ||||||||
352 | assert(locker == _resize_lock_owner, "Not unlocked by locker.")do { if (!(locker == _resize_lock_owner)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 352, "assert(" "locker == _resize_lock_owner" ") failed", "Not unlocked by locker." ); ::breakpoint(); } } while (0); | ||||||||
353 | _resize_lock_owner = NULL__null; | ||||||||
354 | _resize_lock->unlock(); | ||||||||
355 | } | ||||||||
356 | |||||||||
357 | template <typename CONFIG, MEMFLAGS F> | ||||||||
358 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
359 | free_nodes() | ||||||||
360 | { | ||||||||
361 | // We assume we are not MT during freeing. | ||||||||
362 | for (size_t node_it = 0; node_it < _table->_size; node_it++) { | ||||||||
363 | Bucket* bucket = _table->get_buckets() + node_it; | ||||||||
364 | Node* node = bucket->first(); | ||||||||
365 | while (node != NULL__null) { | ||||||||
366 | Node* free_node = node; | ||||||||
367 | node = node->next(); | ||||||||
368 | Node::destroy_node(_context, free_node); | ||||||||
369 | } | ||||||||
370 | } | ||||||||
371 | } | ||||||||
372 | |||||||||
373 | template <typename CONFIG, MEMFLAGS F> | ||||||||
374 | inline typename ConcurrentHashTable<CONFIG, F>::InternalTable* | ||||||||
375 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
376 | get_table() const | ||||||||
377 | { | ||||||||
378 | return Atomic::load_acquire(&_table); | ||||||||
379 | } | ||||||||
380 | |||||||||
381 | template <typename CONFIG, MEMFLAGS F> | ||||||||
382 | inline typename ConcurrentHashTable<CONFIG, F>::InternalTable* | ||||||||
383 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
384 | get_new_table() const | ||||||||
385 | { | ||||||||
386 | return Atomic::load_acquire(&_new_table); | ||||||||
387 | } | ||||||||
388 | |||||||||
389 | template <typename CONFIG, MEMFLAGS F> | ||||||||
390 | inline typename ConcurrentHashTable<CONFIG, F>::InternalTable* | ||||||||
391 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
392 | set_table_from_new() | ||||||||
393 | { | ||||||||
394 | InternalTable* old_table = _table; | ||||||||
395 | // Publish the new table. | ||||||||
396 | Atomic::release_store(&_table, _new_table); | ||||||||
397 | // All must see this. | ||||||||
398 | GlobalCounter::write_synchronize(); | ||||||||
399 | // _new_table not read any more. | ||||||||
400 | _new_table = NULL__null; | ||||||||
401 | DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)_new_table = (InternalTable*)POISON_PTR; | ||||||||
402 | return old_table; | ||||||||
403 | } | ||||||||
404 | |||||||||
405 | template <typename CONFIG, MEMFLAGS F> | ||||||||
406 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
407 | internal_grow_range(Thread* thread, size_t start, size_t stop) | ||||||||
408 | { | ||||||||
409 | assert(stop <= _table->_size, "Outside backing array")do { if (!(stop <= _table->_size)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 409, "assert(" "stop <= _table->_size" ") failed", "Outside backing array" ); ::breakpoint(); } } while (0); | ||||||||
410 | assert(_new_table != NULL, "Grow not proper setup before start")do { if (!(_new_table != __null)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 410, "assert(" "_new_table != __null" ") failed", "Grow not proper setup before start" ); ::breakpoint(); } } while (0); | ||||||||
411 | // The state is also copied here. Hence all buckets in new table will be | ||||||||
412 | // locked. I call the siblings odd/even, where even have high bit 0 and odd | ||||||||
413 | // have high bit 1. | ||||||||
414 | for (size_t even_index = start; even_index < stop; even_index++) { | ||||||||
415 | Bucket* bucket = _table->get_bucket(even_index); | ||||||||
416 | |||||||||
417 | bucket->lock(); | ||||||||
418 | |||||||||
419 | size_t odd_index = even_index + _table->_size; | ||||||||
420 | _new_table->get_buckets()[even_index] = *bucket; | ||||||||
421 | _new_table->get_buckets()[odd_index] = *bucket; | ||||||||
422 | |||||||||
423 | // Moves lockers go to new table, where they will wait until unlock() below. | ||||||||
424 | bucket->redirect(); /* Must release stores above */ | ||||||||
425 | |||||||||
426 | // When this is done we have separated the nodes into corresponding buckets | ||||||||
427 | // in new table. | ||||||||
428 | if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) { | ||||||||
429 | // If bucket is empty, unzip does nothing. | ||||||||
430 | // We must make sure readers go to new table before we poison the bucket. | ||||||||
431 | DEBUG_ONLY(GlobalCounter::write_synchronize();)GlobalCounter::write_synchronize(); | ||||||||
432 | } | ||||||||
433 | |||||||||
434 | // Unlock for writes into the new table buckets. | ||||||||
435 | _new_table->get_bucket(even_index)->unlock(); | ||||||||
436 | _new_table->get_bucket(odd_index)->unlock(); | ||||||||
437 | |||||||||
438 | DEBUG_ONLY(bucket->release_assign_node_ptr( _table->get_bucket(even_index )->first_ptr(), (Node*)POISON_PTR); | ||||||||
439 | bucket->release_assign_node_ptr(bucket->release_assign_node_ptr( _table->get_bucket(even_index )->first_ptr(), (Node*)POISON_PTR); | ||||||||
440 | _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);bucket->release_assign_node_ptr( _table->get_bucket(even_index )->first_ptr(), (Node*)POISON_PTR); | ||||||||
441 | )bucket->release_assign_node_ptr( _table->get_bucket(even_index )->first_ptr(), (Node*)POISON_PTR); | ||||||||
442 | } | ||||||||
443 | } | ||||||||
444 | |||||||||
445 | template <typename CONFIG, MEMFLAGS F> | ||||||||
446 | template <typename LOOKUP_FUNC, typename DELETE_FUNC> | ||||||||
447 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
448 | internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) | ||||||||
449 | { | ||||||||
450 | Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); | ||||||||
451 | assert(bucket->is_locked(), "Must be locked.")do { if (!(bucket->is_locked())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 451, "assert(" "bucket->is_locked()" ") failed", "Must be locked." ); ::breakpoint(); } } while (0); | ||||||||
452 | Node* const volatile * rem_n_prev = bucket->first_ptr(); | ||||||||
453 | Node* rem_n = bucket->first(); | ||||||||
454 | bool have_dead = false; | ||||||||
455 | while (rem_n != NULL__null) { | ||||||||
456 | if (lookup_f.equals(rem_n->value(), &have_dead)) { | ||||||||
457 | bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); | ||||||||
458 | break; | ||||||||
459 | } else { | ||||||||
460 | rem_n_prev = rem_n->next_ptr(); | ||||||||
461 | rem_n = rem_n->next(); | ||||||||
462 | } | ||||||||
463 | } | ||||||||
464 | |||||||||
465 | bucket->unlock(); | ||||||||
466 | |||||||||
467 | if (rem_n == NULL__null) { | ||||||||
468 | return false; | ||||||||
469 | } | ||||||||
470 | // Publish the deletion. | ||||||||
471 | GlobalCounter::write_synchronize(); | ||||||||
472 | delete_f(rem_n->value()); | ||||||||
473 | Node::destroy_node(_context, rem_n); | ||||||||
474 | JFR_ONLY(_stats_rate.remove();)_stats_rate.remove(); | ||||||||
475 | return true; | ||||||||
476 | } | ||||||||
477 | |||||||||
478 | template <typename CONFIG, MEMFLAGS F> | ||||||||
479 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> | ||||||||
480 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
481 | do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, | ||||||||
482 | EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) | ||||||||
483 | { | ||||||||
484 | // Here we have resize lock so table is SMR safe, and there is no new | ||||||||
485 | // table. Can do this in parallel if we want. | ||||||||
486 | assert((is_mt && _resize_lock_owner != NULL) ||do { if (!((is_mt && _resize_lock_owner != __null) || (!is_mt && _resize_lock_owner == thread))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 487, "assert(" "(is_mt && _resize_lock_owner != __null) || (!is_mt && _resize_lock_owner == thread)" ") failed", "Re-size lock not held"); ::breakpoint(); } } while (0) | ||||||||
487 | (!is_mt && _resize_lock_owner == thread), "Re-size lock not held")do { if (!((is_mt && _resize_lock_owner != __null) || (!is_mt && _resize_lock_owner == thread))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 487, "assert(" "(is_mt && _resize_lock_owner != __null) || (!is_mt && _resize_lock_owner == thread)" ") failed", "Re-size lock not held"); ::breakpoint(); } } while (0); | ||||||||
488 | Node* ndel[BULK_DELETE_LIMIT]; | ||||||||
489 | InternalTable* table = get_table(); | ||||||||
490 | assert(start_idx < stop_idx, "Must be")do { if (!(start_idx < stop_idx)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 490, "assert(" "start_idx < stop_idx" ") failed", "Must be" ); ::breakpoint(); } } while (0); | ||||||||
491 | assert(stop_idx <= _table->_size, "Must be")do { if (!(stop_idx <= _table->_size)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 491, "assert(" "stop_idx <= _table->_size" ") failed" , "Must be"); ::breakpoint(); } } while (0); | ||||||||
492 | // Here manual do critical section since we don't want to take the cost of | ||||||||
493 | // locking the bucket if there is nothing to delete. But we can have | ||||||||
494 | // concurrent single deletes. The _invisible_epoch can only be used by the | ||||||||
495 | // owner of _resize_lock, us here. There we should not changed it in our | ||||||||
496 | // own read-side. | ||||||||
497 | GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread); | ||||||||
498 | for (size_t bucket_it = start_idx; bucket_it
| ||||||||
499 | Bucket* bucket = table->get_bucket(bucket_it); | ||||||||
500 | Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? | ||||||||
501 | table->get_bucket(bucket_it+1) : NULL__null; | ||||||||
502 | |||||||||
503 | if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>:: | ||||||||
504 | have_deletable(bucket, eval_f, prefetch_bucket)) { | ||||||||
505 | // Nothing to remove in this bucket. | ||||||||
506 | continue; | ||||||||
507 | } | ||||||||
508 | |||||||||
509 | GlobalCounter::critical_section_end(thread, cs_context); | ||||||||
510 | // We left critical section but the bucket cannot be removed while we hold | ||||||||
511 | // the _resize_lock. | ||||||||
512 | bucket->lock(); | ||||||||
513 | size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); | ||||||||
514 | bucket->unlock(); | ||||||||
515 | if (is_mt) { | ||||||||
516 | GlobalCounter::write_synchronize(); | ||||||||
517 | } else { | ||||||||
518 | write_synchonize_on_visible_epoch(thread); | ||||||||
519 | } | ||||||||
520 | for (size_t node_it = 0; node_it < nd; node_it++) { | ||||||||
521 | del_f(ndel[node_it]->value()); | ||||||||
522 | Node::destroy_node(_context, ndel[node_it]); | ||||||||
523 | JFR_ONLY(_stats_rate.remove();)_stats_rate.remove(); | ||||||||
524 | DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)ndel[node_it] = (Node*)POISON_PTR; | ||||||||
525 | } | ||||||||
526 | cs_context = GlobalCounter::critical_section_begin(thread); | ||||||||
527 | } | ||||||||
528 | GlobalCounter::critical_section_end(thread, cs_context); | ||||||||
529 | } | ||||||||
530 | |||||||||
531 | template <typename CONFIG, MEMFLAGS F> | ||||||||
532 | template <typename LOOKUP_FUNC> | ||||||||
533 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
534 | delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) | ||||||||
535 | { | ||||||||
536 | assert(bucket->is_locked(), "Must be locked.")do { if (!(bucket->is_locked())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 536, "assert(" "bucket->is_locked()" ") failed", "Must be locked." ); ::breakpoint(); } } while (0); | ||||||||
537 | |||||||||
538 | size_t dels = 0; | ||||||||
539 | Node* ndel[BULK_DELETE_LIMIT]; | ||||||||
540 | Node* const volatile * rem_n_prev = bucket->first_ptr(); | ||||||||
541 | Node* rem_n = bucket->first(); | ||||||||
542 | while (rem_n != NULL__null) { | ||||||||
543 | bool is_dead = false; | ||||||||
544 | lookup_f.equals(rem_n->value(), &is_dead); | ||||||||
545 | if (is_dead) { | ||||||||
546 | ndel[dels++] = rem_n; | ||||||||
547 | Node* next_node = rem_n->next(); | ||||||||
548 | bucket->release_assign_node_ptr(rem_n_prev, next_node); | ||||||||
549 | rem_n = next_node; | ||||||||
550 | if (dels == BULK_DELETE_LIMIT) { | ||||||||
551 | break; | ||||||||
552 | } | ||||||||
553 | } else { | ||||||||
554 | rem_n_prev = rem_n->next_ptr(); | ||||||||
555 | rem_n = rem_n->next(); | ||||||||
556 | } | ||||||||
557 | } | ||||||||
558 | if (dels > 0) { | ||||||||
559 | GlobalCounter::write_synchronize(); | ||||||||
560 | for (size_t node_it = 0; node_it < dels; node_it++) { | ||||||||
561 | Node::destroy_node(_context, ndel[node_it]); | ||||||||
562 | JFR_ONLY(_stats_rate.remove();)_stats_rate.remove(); | ||||||||
563 | DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)ndel[node_it] = (Node*)POISON_PTR; | ||||||||
564 | } | ||||||||
565 | } | ||||||||
566 | } | ||||||||
567 | |||||||||
568 | template <typename CONFIG, MEMFLAGS F> | ||||||||
569 | inline typename ConcurrentHashTable<CONFIG, F>::Bucket* | ||||||||
570 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
571 | get_bucket(uintx hash) const | ||||||||
572 | { | ||||||||
573 | InternalTable* table = get_table(); | ||||||||
574 | Bucket* bucket = get_bucket_in(table, hash); | ||||||||
575 | if (bucket->have_redirect()) { | ||||||||
576 | table = get_new_table(); | ||||||||
577 | bucket = get_bucket_in(table, hash); | ||||||||
578 | } | ||||||||
579 | return bucket; | ||||||||
580 | } | ||||||||
581 | |||||||||
582 | template <typename CONFIG, MEMFLAGS F> | ||||||||
583 | inline typename ConcurrentHashTable<CONFIG, F>::Bucket* | ||||||||
584 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
585 | get_bucket_locked(Thread* thread, const uintx hash) | ||||||||
586 | { | ||||||||
587 | Bucket* bucket; | ||||||||
588 | int i = 0; | ||||||||
589 | // SpinYield would be unfair here | ||||||||
590 | while(true) { | ||||||||
591 | { | ||||||||
592 | // We need a critical section to protect the table itself. But if we fail | ||||||||
593 | // we must leave critical section otherwise we would deadlock. | ||||||||
594 | ScopedCS cs(thread, this); | ||||||||
595 | bucket = get_bucket(hash); | ||||||||
596 | if (bucket->trylock()) { | ||||||||
597 | break; /* ends critical section */ | ||||||||
598 | } | ||||||||
599 | } /* ends critical section */ | ||||||||
600 | if ((++i) == SPINPAUSES_PER_YIELD8192) { | ||||||||
601 | // On contemporary OS yielding will give CPU to another runnable thread if | ||||||||
602 | // there is no CPU available. | ||||||||
603 | os::naked_yield(); | ||||||||
604 | i = 0; | ||||||||
605 | } else { | ||||||||
606 | SpinPause(); | ||||||||
607 | } | ||||||||
608 | } | ||||||||
609 | return bucket; | ||||||||
610 | } | ||||||||
611 | |||||||||
612 | // Always called within critical section | ||||||||
613 | template <typename CONFIG, MEMFLAGS F> | ||||||||
614 | template <typename LOOKUP_FUNC> | ||||||||
615 | typename ConcurrentHashTable<CONFIG, F>::Node* | ||||||||
616 | ConcurrentHashTable<CONFIG, F>:: | ||||||||
617 | get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, | ||||||||
618 | bool* have_dead, size_t* loops) const | ||||||||
619 | { | ||||||||
620 | size_t loop_count = 0; | ||||||||
621 | Node* node = bucket->first(); | ||||||||
622 | while (node != NULL__null) { | ||||||||
623 | bool is_dead = false; | ||||||||
624 | ++loop_count; | ||||||||
625 | if (lookup_f.equals(node->value(), &is_dead)) { | ||||||||
626 | break; | ||||||||
627 | } | ||||||||
628 | if (is_dead && !(*have_dead)) { | ||||||||
629 | *have_dead = true; | ||||||||
630 | } | ||||||||
631 | node = node->next(); | ||||||||
632 | } | ||||||||
633 | if (loops != NULL__null) { | ||||||||
634 | *loops = loop_count; | ||||||||
635 | } | ||||||||
636 | return node; | ||||||||
637 | } | ||||||||
638 | |||||||||
639 | template <typename CONFIG, MEMFLAGS F> | ||||||||
640 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
641 | unzip_bucket(Thread* thread, InternalTable* old_table, | ||||||||
642 | InternalTable* new_table, size_t even_index, size_t odd_index) | ||||||||
643 | { | ||||||||
644 | Node* aux = old_table->get_bucket(even_index)->first(); | ||||||||
645 | if (aux == NULL__null) { | ||||||||
646 | // This is an empty bucket and in debug we poison first ptr in bucket. | ||||||||
647 | // Therefore we must make sure no readers are looking at this bucket. | ||||||||
648 | // If we don't do a write_synch here, caller must do it. | ||||||||
649 | return false; | ||||||||
650 | } | ||||||||
651 | Node* delete_me = NULL__null; | ||||||||
652 | Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); | ||||||||
653 | Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); | ||||||||
654 | while (aux != NULL__null) { | ||||||||
655 | bool dead_hash = false; | ||||||||
656 | size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); | ||||||||
657 | Node* aux_next = aux->next(); | ||||||||
658 | if (dead_hash) { | ||||||||
659 | delete_me = aux; | ||||||||
660 | // This item is dead, move both list to next | ||||||||
661 | new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, | ||||||||
662 | aux_next); | ||||||||
663 | new_table->get_bucket(even_index)->release_assign_node_ptr(even, | ||||||||
664 | aux_next); | ||||||||
665 | } else { | ||||||||
666 | size_t aux_index = bucket_idx_hash(new_table, aux_hash); | ||||||||
667 | if (aux_index == even_index) { | ||||||||
668 | // This is a even, so move odd to aux/even next | ||||||||
669 | new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, | ||||||||
670 | aux_next); | ||||||||
671 | // Keep in even list | ||||||||
672 | even = aux->next_ptr(); | ||||||||
673 | } else if (aux_index == odd_index) { | ||||||||
674 | // This is a odd, so move odd to aux/odd next | ||||||||
675 | new_table->get_bucket(even_index)->release_assign_node_ptr(even, | ||||||||
676 | aux_next); | ||||||||
677 | // Keep in odd list | ||||||||
678 | odd = aux->next_ptr(); | ||||||||
679 | } else { | ||||||||
680 | fatal("aux_index does not match even or odd indices")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 680, "aux_index does not match even or odd indices"); ::breakpoint (); } while (0); | ||||||||
681 | } | ||||||||
682 | } | ||||||||
683 | aux = aux_next; | ||||||||
684 | |||||||||
685 | // We can only move 1 pointer otherwise a reader might be moved to the wrong | ||||||||
686 | // chain. E.g. looking for even hash value but got moved to the odd bucket | ||||||||
687 | // chain. | ||||||||
688 | write_synchonize_on_visible_epoch(thread); | ||||||||
689 | if (delete_me != NULL__null) { | ||||||||
690 | Node::destroy_node(_context, delete_me); | ||||||||
691 | delete_me = NULL__null; | ||||||||
692 | } | ||||||||
693 | } | ||||||||
694 | return true; | ||||||||
695 | } | ||||||||
696 | |||||||||
697 | template <typename CONFIG, MEMFLAGS F> | ||||||||
698 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
699 | internal_shrink_prolog(Thread* thread, size_t log2_size) | ||||||||
700 | { | ||||||||
701 | if (!try_resize_lock(thread)) { | ||||||||
702 | return false; | ||||||||
703 | } | ||||||||
704 | assert(_resize_lock_owner == thread, "Re-size lock not held")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 704, "assert(" "_resize_lock_owner == thread" ") failed", "Re-size lock not held" ); ::breakpoint(); } } while (0); | ||||||||
705 | if (_table->_log2_size == _log2_start_size || | ||||||||
706 | _table->_log2_size <= log2_size) { | ||||||||
707 | unlock_resize_lock(thread); | ||||||||
708 | return false; | ||||||||
709 | } | ||||||||
710 | _new_table = new InternalTable(_table->_log2_size - 1); | ||||||||
711 | return true; | ||||||||
712 | } | ||||||||
713 | |||||||||
714 | template <typename CONFIG, MEMFLAGS F> | ||||||||
715 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
716 | internal_shrink_epilog(Thread* thread) | ||||||||
717 | { | ||||||||
718 | assert(_resize_lock_owner == thread, "Re-size lock not held")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 718, "assert(" "_resize_lock_owner == thread" ") failed", "Re-size lock not held" ); ::breakpoint(); } } while (0); | ||||||||
719 | |||||||||
720 | InternalTable* old_table = set_table_from_new(); | ||||||||
721 | _size_limit_reached = false; | ||||||||
722 | unlock_resize_lock(thread); | ||||||||
723 | #ifdef ASSERT1 | ||||||||
724 | for (size_t i = 0; i < old_table->_size; i++) { | ||||||||
725 | assert(old_table->get_bucket(i++)->first() == POISON_PTR,do { if (!(old_table->get_bucket(i++)->first() == POISON_PTR )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 726, "assert(" "old_table->get_bucket(i++)->first() == POISON_PTR" ") failed", "No poison found"); ::breakpoint(); } } while (0 ) | ||||||||
726 | "No poison found")do { if (!(old_table->get_bucket(i++)->first() == POISON_PTR )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 726, "assert(" "old_table->get_bucket(i++)->first() == POISON_PTR" ") failed", "No poison found"); ::breakpoint(); } } while (0 ); | ||||||||
727 | } | ||||||||
728 | #endif | ||||||||
729 | // ABA safe, old_table not visible to any other threads. | ||||||||
730 | delete old_table; | ||||||||
731 | } | ||||||||
732 | |||||||||
733 | template <typename CONFIG, MEMFLAGS F> | ||||||||
734 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
735 | internal_shrink_range(Thread* thread, size_t start, size_t stop) | ||||||||
736 | { | ||||||||
737 | // The state is also copied here. | ||||||||
738 | // Hence all buckets in new table will be locked. | ||||||||
739 | for (size_t bucket_it = start; bucket_it < stop; bucket_it++) { | ||||||||
740 | size_t even_hash_index = bucket_it; // High bit 0 | ||||||||
741 | size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1 | ||||||||
742 | |||||||||
743 | Bucket* b_old_even = _table->get_bucket(even_hash_index); | ||||||||
744 | Bucket* b_old_odd = _table->get_bucket(odd_hash_index); | ||||||||
745 | |||||||||
746 | b_old_even->lock(); | ||||||||
747 | b_old_odd->lock(); | ||||||||
748 | |||||||||
749 | _new_table->get_buckets()[bucket_it] = *b_old_even; | ||||||||
750 | |||||||||
751 | // Put chains together. | ||||||||
752 | _new_table->get_bucket(bucket_it)-> | ||||||||
753 | release_assign_last_node_next(*(b_old_odd->first_ptr())); | ||||||||
754 | |||||||||
755 | b_old_even->redirect(); | ||||||||
756 | b_old_odd->redirect(); | ||||||||
757 | |||||||||
758 | write_synchonize_on_visible_epoch(thread); | ||||||||
759 | |||||||||
760 | // Unlock for writes into new smaller table. | ||||||||
761 | _new_table->get_bucket(bucket_it)->unlock(); | ||||||||
762 | |||||||||
763 | DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),b_old_even->release_assign_node_ptr(b_old_even->first_ptr (), (Node*)POISON_PTR); | ||||||||
764 | (Node*)POISON_PTR);)b_old_even->release_assign_node_ptr(b_old_even->first_ptr (), (Node*)POISON_PTR); | ||||||||
765 | DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr (), (Node*)POISON_PTR); | ||||||||
766 | (Node*)POISON_PTR);)b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr (), (Node*)POISON_PTR); | ||||||||
767 | } | ||||||||
768 | } | ||||||||
769 | |||||||||
770 | template <typename CONFIG, MEMFLAGS F> | ||||||||
771 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
772 | internal_shrink(Thread* thread, size_t log2_size) | ||||||||
773 | { | ||||||||
774 | if (!internal_shrink_prolog(thread, log2_size)) { | ||||||||
775 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 775, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
776 | return false; | ||||||||
777 | } | ||||||||
778 | assert(_resize_lock_owner == thread, "Should be locked by me")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 778, "assert(" "_resize_lock_owner == thread" ") failed", "Should be locked by me" ); ::breakpoint(); } } while (0); | ||||||||
779 | internal_shrink_range(thread, 0, _new_table->_size); | ||||||||
780 | internal_shrink_epilog(thread); | ||||||||
781 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 781, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
782 | return true; | ||||||||
783 | } | ||||||||
784 | |||||||||
785 | template <typename CONFIG, MEMFLAGS F> | ||||||||
786 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
787 | internal_reset(size_t log2_size) | ||||||||
788 | { | ||||||||
789 | assert(_table != NULL, "table failed")do { if (!(_table != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 789, "assert(" "_table != __null" ") failed", "table failed" ); ::breakpoint(); } } while (0); | ||||||||
790 | assert(_log2_size_limit >= log2_size, "bad ergo")do { if (!(_log2_size_limit >= log2_size)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 790, "assert(" "_log2_size_limit >= log2_size" ") failed" , "bad ergo"); ::breakpoint(); } } while (0); | ||||||||
791 | |||||||||
792 | delete _table; | ||||||||
793 | // Create and publish a new table | ||||||||
794 | InternalTable* table = new InternalTable(log2_size); | ||||||||
795 | _size_limit_reached = (log2_size == _log2_size_limit); | ||||||||
796 | Atomic::release_store(&_table, table); | ||||||||
797 | } | ||||||||
798 | |||||||||
799 | template <typename CONFIG, MEMFLAGS F> | ||||||||
800 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
801 | internal_grow_prolog(Thread* thread, size_t log2_size) | ||||||||
802 | { | ||||||||
803 | // This double checking of _size_limit_reached/is_max_size_reached() | ||||||||
804 | // we only do in grow path, since grow means high load on table | ||||||||
805 | // while shrink means low load. | ||||||||
806 | if (is_max_size_reached()) { | ||||||||
807 | return false; | ||||||||
808 | } | ||||||||
809 | if (!try_resize_lock(thread)) { | ||||||||
810 | // Either we have an ongoing resize or an operation which doesn't want us | ||||||||
811 | // to resize now. | ||||||||
812 | return false; | ||||||||
813 | } | ||||||||
814 | if (is_max_size_reached() || _table->_log2_size >= log2_size) { | ||||||||
815 | unlock_resize_lock(thread); | ||||||||
816 | return false; | ||||||||
817 | } | ||||||||
818 | |||||||||
819 | _new_table = new InternalTable(_table->_log2_size + 1); | ||||||||
820 | _size_limit_reached = _new_table->_log2_size == _log2_size_limit; | ||||||||
821 | |||||||||
822 | return true; | ||||||||
823 | } | ||||||||
824 | |||||||||
825 | template <typename CONFIG, MEMFLAGS F> | ||||||||
826 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
827 | internal_grow_epilog(Thread* thread) | ||||||||
828 | { | ||||||||
829 | assert(_resize_lock_owner == thread, "Should be locked")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 829, "assert(" "_resize_lock_owner == thread" ") failed", "Should be locked" ); ::breakpoint(); } } while (0); | ||||||||
830 | |||||||||
831 | InternalTable* old_table = set_table_from_new(); | ||||||||
832 | unlock_resize_lock(thread); | ||||||||
833 | #ifdef ASSERT1 | ||||||||
834 | for (size_t i = 0; i < old_table->_size; i++) { | ||||||||
835 | assert(old_table->get_bucket(i++)->first() == POISON_PTR,do { if (!(old_table->get_bucket(i++)->first() == POISON_PTR )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 836, "assert(" "old_table->get_bucket(i++)->first() == POISON_PTR" ") failed", "No poison found"); ::breakpoint(); } } while (0 ) | ||||||||
836 | "No poison found")do { if (!(old_table->get_bucket(i++)->first() == POISON_PTR )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 836, "assert(" "old_table->get_bucket(i++)->first() == POISON_PTR" ") failed", "No poison found"); ::breakpoint(); } } while (0 ); | ||||||||
837 | } | ||||||||
838 | #endif | ||||||||
839 | // ABA safe, old_table not visible to any other threads. | ||||||||
840 | delete old_table; | ||||||||
841 | } | ||||||||
842 | |||||||||
843 | template <typename CONFIG, MEMFLAGS F> | ||||||||
844 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
845 | internal_grow(Thread* thread, size_t log2_size) | ||||||||
846 | { | ||||||||
847 | if (!internal_grow_prolog(thread, log2_size)) { | ||||||||
848 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 848, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
849 | return false; | ||||||||
850 | } | ||||||||
851 | assert(_resize_lock_owner == thread, "Should be locked by me")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 851, "assert(" "_resize_lock_owner == thread" ") failed", "Should be locked by me" ); ::breakpoint(); } } while (0); | ||||||||
852 | internal_grow_range(thread, 0, _table->_size); | ||||||||
853 | internal_grow_epilog(thread); | ||||||||
854 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 854, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
855 | return true; | ||||||||
856 | } | ||||||||
857 | |||||||||
858 | // Always called within critical section | ||||||||
859 | template <typename CONFIG, MEMFLAGS F> | ||||||||
860 | template <typename LOOKUP_FUNC> | ||||||||
861 | inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>:: | ||||||||
862 | internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) | ||||||||
863 | { | ||||||||
864 | bool clean = false; | ||||||||
865 | size_t loops = 0; | ||||||||
866 | VALUE* ret = NULL__null; | ||||||||
867 | |||||||||
868 | const Bucket* bucket = get_bucket(lookup_f.get_hash()); | ||||||||
869 | Node* node = get_node(bucket, lookup_f, &clean, &loops); | ||||||||
870 | if (node != NULL__null) { | ||||||||
871 | ret = node->value(); | ||||||||
872 | } | ||||||||
873 | if (grow_hint != NULL__null) { | ||||||||
874 | *grow_hint = loops > _grow_hint; | ||||||||
875 | } | ||||||||
876 | |||||||||
877 | return ret; | ||||||||
878 | } | ||||||||
879 | |||||||||
880 | template <typename CONFIG, MEMFLAGS F> | ||||||||
881 | template <typename LOOKUP_FUNC, typename FOUND_FUNC> | ||||||||
882 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
883 | internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value, | ||||||||
884 | FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint) | ||||||||
885 | { | ||||||||
886 | bool ret = false; | ||||||||
887 | bool clean = false; | ||||||||
888 | bool locked; | ||||||||
889 | size_t loops = 0; | ||||||||
890 | size_t i = 0; | ||||||||
891 | uintx hash = lookup_f.get_hash(); | ||||||||
892 | Node* new_node = Node::create_node(_context, value, NULL__null); | ||||||||
893 | |||||||||
894 | while (true) { | ||||||||
895 | { | ||||||||
896 | ScopedCS cs(thread, this); /* protected the table/bucket */ | ||||||||
897 | Bucket* bucket = get_bucket(hash); | ||||||||
898 | Node* first_at_start = bucket->first(); | ||||||||
899 | Node* old = get_node(bucket, lookup_f, &clean, &loops); | ||||||||
900 | if (old == NULL__null) { | ||||||||
901 | new_node->set_next(first_at_start); | ||||||||
902 | if (bucket->cas_first(new_node, first_at_start)) { | ||||||||
903 | foundf(new_node->value()); | ||||||||
904 | JFR_ONLY(_stats_rate.add();)_stats_rate.add(); | ||||||||
905 | new_node = NULL__null; | ||||||||
906 | ret = true; | ||||||||
907 | break; /* leave critical section */ | ||||||||
908 | } | ||||||||
909 | // CAS failed we must leave critical section and retry. | ||||||||
910 | locked = bucket->is_locked(); | ||||||||
911 | } else { | ||||||||
912 | // There is a duplicate. | ||||||||
913 | foundf(old->value()); | ||||||||
914 | break; /* leave critical section */ | ||||||||
915 | } | ||||||||
916 | } /* leave critical section */ | ||||||||
917 | i++; | ||||||||
918 | if (locked) { | ||||||||
919 | os::naked_yield(); | ||||||||
920 | } else { | ||||||||
921 | SpinPause(); | ||||||||
922 | } | ||||||||
923 | } | ||||||||
924 | |||||||||
925 | if (new_node != NULL__null) { | ||||||||
926 | // CAS failed and a duplicate was inserted, we must free this node. | ||||||||
927 | Node::destroy_node(_context, new_node); | ||||||||
928 | } else if (i == 0 && clean) { | ||||||||
929 | // We only do cleaning on fast inserts. | ||||||||
930 | Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); | ||||||||
931 | delete_in_bucket(thread, bucket, lookup_f); | ||||||||
932 | bucket->unlock(); | ||||||||
933 | clean = false; | ||||||||
934 | } | ||||||||
935 | |||||||||
936 | if (grow_hint != NULL__null) { | ||||||||
937 | *grow_hint = loops > _grow_hint; | ||||||||
938 | } | ||||||||
939 | |||||||||
940 | if (clean_hint != NULL__null) { | ||||||||
941 | *clean_hint = clean; | ||||||||
942 | } | ||||||||
943 | |||||||||
944 | return ret; | ||||||||
945 | } | ||||||||
946 | |||||||||
947 | template <typename CONFIG, MEMFLAGS F> | ||||||||
948 | template <typename FUNC> | ||||||||
949 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
950 | visit_nodes(Bucket* bucket, FUNC& visitor_f) | ||||||||
951 | { | ||||||||
952 | Node* current_node = bucket->first(); | ||||||||
953 | while (current_node != NULL__null) { | ||||||||
954 | Prefetch::read(current_node->next(), 0); | ||||||||
955 | if (!visitor_f(current_node->value())) { | ||||||||
956 | return false; | ||||||||
957 | } | ||||||||
958 | current_node = current_node->next(); | ||||||||
959 | } | ||||||||
960 | return true; | ||||||||
961 | } | ||||||||
962 | |||||||||
963 | template <typename CONFIG, MEMFLAGS F> | ||||||||
964 | template <typename FUNC> | ||||||||
965 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
966 | do_scan_locked(Thread* thread, FUNC& scan_f) | ||||||||
967 | { | ||||||||
968 | assert(_resize_lock_owner == thread, "Re-size lock not held")do { if (!(_resize_lock_owner == thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 968, "assert(" "_resize_lock_owner == thread" ") failed", "Re-size lock not held" ); ::breakpoint(); } } while (0); | ||||||||
969 | // We can do a critical section over the entire loop but that would block | ||||||||
970 | // updates for a long time. Instead we choose to block resizes. | ||||||||
971 | InternalTable* table = get_table(); | ||||||||
972 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { | ||||||||
973 | ScopedCS cs(thread, this); | ||||||||
974 | if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { | ||||||||
975 | break; /* ends critical section */ | ||||||||
976 | } | ||||||||
977 | } /* ends critical section */ | ||||||||
978 | } | ||||||||
979 | |||||||||
980 | template <typename CONFIG, MEMFLAGS F> | ||||||||
981 | template <typename EVALUATE_FUNC> | ||||||||
982 | inline size_t ConcurrentHashTable<CONFIG, F>:: | ||||||||
983 | delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, | ||||||||
984 | size_t num_del, Node** ndel) | ||||||||
985 | { | ||||||||
986 | size_t dels = 0; | ||||||||
987 | Node* const volatile * rem_n_prev = bucket->first_ptr(); | ||||||||
988 | Node* rem_n = bucket->first(); | ||||||||
989 | while (rem_n != NULL__null) { | ||||||||
990 | if (eval_f(rem_n->value())) { | ||||||||
991 | ndel[dels++] = rem_n; | ||||||||
992 | Node* next_node = rem_n->next(); | ||||||||
993 | bucket->release_assign_node_ptr(rem_n_prev, next_node); | ||||||||
994 | rem_n = next_node; | ||||||||
995 | if (dels == num_del) { | ||||||||
996 | break; | ||||||||
997 | } | ||||||||
998 | } else { | ||||||||
999 | rem_n_prev = rem_n->next_ptr(); | ||||||||
1000 | rem_n = rem_n->next(); | ||||||||
1001 | } | ||||||||
1002 | } | ||||||||
1003 | return dels; | ||||||||
1004 | } | ||||||||
1005 | |||||||||
1006 | // Constructor | ||||||||
1007 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1008 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
1009 | ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, void* context) | ||||||||
1010 | : _context(context), _new_table(NULL__null), _log2_size_limit(log2size_limit), | ||||||||
1011 | _log2_start_size(log2size), _grow_hint(grow_hint), | ||||||||
1012 | _size_limit_reached(false), _resize_lock_owner(NULL__null), | ||||||||
1013 | _invisible_epoch(0) | ||||||||
1014 | { | ||||||||
1015 | _stats_rate = TableRateStatistics(); | ||||||||
1016 | _resize_lock = | ||||||||
1017 | new Mutex(Mutex::nosafepoint-2, "ConcurrentHashTableResize_lock"); | ||||||||
1018 | _table = new InternalTable(log2size); | ||||||||
1019 | assert(log2size_limit >= log2size, "bad ergo")do { if (!(log2size_limit >= log2size)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1019, "assert(" "log2size_limit >= log2size" ") failed", "bad ergo"); ::breakpoint(); } } while (0); | ||||||||
1020 | _size_limit_reached = _table->_log2_size == _log2_size_limit; | ||||||||
1021 | } | ||||||||
1022 | |||||||||
1023 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1024 | inline ConcurrentHashTable<CONFIG, F>:: | ||||||||
1025 | ~ConcurrentHashTable() | ||||||||
1026 | { | ||||||||
1027 | delete _resize_lock; | ||||||||
1028 | free_nodes(); | ||||||||
1029 | delete _table; | ||||||||
1030 | } | ||||||||
1031 | |||||||||
1032 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1033 | inline size_t ConcurrentHashTable<CONFIG, F>:: | ||||||||
1034 | get_mem_size(Thread* thread) | ||||||||
1035 | { | ||||||||
1036 | ScopedCS cs(thread, this); | ||||||||
1037 | return sizeof(*this) + _table->get_mem_size(); | ||||||||
1038 | } | ||||||||
1039 | |||||||||
1040 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1041 | inline size_t ConcurrentHashTable<CONFIG, F>:: | ||||||||
1042 | get_size_log2(Thread* thread) | ||||||||
1043 | { | ||||||||
1044 | ScopedCS cs(thread, this); | ||||||||
1045 | return _table->_log2_size; | ||||||||
1046 | } | ||||||||
1047 | |||||||||
1048 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1049 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1050 | shrink(Thread* thread, size_t size_limit_log2) | ||||||||
1051 | { | ||||||||
1052 | size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; | ||||||||
1053 | bool ret = internal_shrink(thread, tmp); | ||||||||
1054 | return ret; | ||||||||
1055 | } | ||||||||
1056 | |||||||||
1057 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1058 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
1059 | unsafe_reset(size_t size_log2) | ||||||||
1060 | { | ||||||||
1061 | size_t tmp = size_log2 == 0 ? _log2_start_size : size_log2; | ||||||||
1062 | internal_reset(tmp); | ||||||||
1063 | } | ||||||||
1064 | |||||||||
1065 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1066 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1067 | grow(Thread* thread, size_t size_limit_log2) | ||||||||
1068 | { | ||||||||
1069 | size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; | ||||||||
1070 | return internal_grow(thread, tmp); | ||||||||
1071 | } | ||||||||
1072 | |||||||||
1073 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1074 | template <typename LOOKUP_FUNC, typename FOUND_FUNC> | ||||||||
1075 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1076 | get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) | ||||||||
1077 | { | ||||||||
1078 | bool ret = false; | ||||||||
1079 | ScopedCS cs(thread, this); | ||||||||
1080 | VALUE* val = internal_get(thread, lookup_f, grow_hint); | ||||||||
1081 | if (val != NULL__null) { | ||||||||
1082 | found_f(val); | ||||||||
1083 | ret = true; | ||||||||
1084 | } | ||||||||
1085 | return ret; | ||||||||
1086 | } | ||||||||
1087 | |||||||||
1088 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1089 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1090 | unsafe_insert(const VALUE& value) { | ||||||||
1091 | bool dead_hash = false; | ||||||||
1092 | size_t hash = CONFIG::get_hash(value, &dead_hash); | ||||||||
1093 | if (dead_hash) { | ||||||||
1094 | return false; | ||||||||
1095 | } | ||||||||
1096 | // This is an unsafe operation. | ||||||||
1097 | InternalTable* table = get_table(); | ||||||||
1098 | Bucket* bucket = get_bucket_in(table, hash); | ||||||||
1099 | assert(!bucket->have_redirect() && !bucket->is_locked(), "bad")do { if (!(!bucket->have_redirect() && !bucket-> is_locked())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1099, "assert(" "!bucket->have_redirect() && !bucket->is_locked()" ") failed", "bad"); ::breakpoint(); } } while (0); | ||||||||
1100 | Node* new_node = Node::create_node(_context, value, bucket->first()); | ||||||||
1101 | if (!bucket->cas_first(new_node, bucket->first())) { | ||||||||
1102 | assert(false, "bad")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1102, "assert(" "false" ") failed", "bad"); ::breakpoint(); } } while (0); | ||||||||
1103 | } | ||||||||
1104 | JFR_ONLY(_stats_rate.add();)_stats_rate.add(); | ||||||||
1105 | return true; | ||||||||
1106 | } | ||||||||
1107 | |||||||||
1108 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1109 | template <typename SCAN_FUNC> | ||||||||
1110 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1111 | try_scan(Thread* thread, SCAN_FUNC& scan_f) | ||||||||
1112 | { | ||||||||
1113 | if (!try_resize_lock(thread)) { | ||||||||
1114 | return false; | ||||||||
1115 | } | ||||||||
1116 | do_scan_locked(thread, scan_f); | ||||||||
1117 | unlock_resize_lock(thread); | ||||||||
1118 | return true; | ||||||||
1119 | } | ||||||||
1120 | |||||||||
1121 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1122 | template <typename SCAN_FUNC> | ||||||||
1123 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
1124 | do_scan(Thread* thread, SCAN_FUNC& scan_f) | ||||||||
1125 | { | ||||||||
1126 | assert(!SafepointSynchronize::is_at_safepoint(),do { if (!(!SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1127, "assert(" "!SafepointSynchronize::is_at_safepoint()" ") failed" , "must be outside a safepoint"); ::breakpoint(); } } while ( 0) | ||||||||
1127 | "must be outside a safepoint")do { if (!(!SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1127, "assert(" "!SafepointSynchronize::is_at_safepoint()" ") failed" , "must be outside a safepoint"); ::breakpoint(); } } while ( 0); | ||||||||
1128 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1128, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
1129 | lock_resize_lock(thread); | ||||||||
1130 | do_scan_locked(thread, scan_f); | ||||||||
1131 | unlock_resize_lock(thread); | ||||||||
1132 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1132, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
1133 | } | ||||||||
1134 | |||||||||
1135 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1136 | template <typename SCAN_FUNC> | ||||||||
1137 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
1138 | do_safepoint_scan(SCAN_FUNC& scan_f) | ||||||||
1139 | { | ||||||||
1140 | // We only allow this method to be used during a safepoint. | ||||||||
1141 | assert(SafepointSynchronize::is_at_safepoint(),do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1142, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "must only be called in a safepoint"); ::breakpoint(); } } while (0) | ||||||||
1142 | "must only be called in a safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1142, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "must only be called in a safepoint"); ::breakpoint(); } } while (0); | ||||||||
1143 | |||||||||
1144 | // Here we skip protection, | ||||||||
1145 | // thus no other thread may use this table at the same time. | ||||||||
1146 | InternalTable* table = get_table(); | ||||||||
1147 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { | ||||||||
1148 | Bucket* bucket = table->get_bucket(bucket_it); | ||||||||
1149 | // If bucket have a redirect the items will be in the new table. | ||||||||
1150 | // We must visit them there since the new table will contain any | ||||||||
1151 | // concurrent inserts done after this bucket was resized. | ||||||||
1152 | // If the bucket don't have redirect flag all items is in this table. | ||||||||
1153 | if (!bucket->have_redirect()) { | ||||||||
1154 | if(!visit_nodes(bucket, scan_f)) { | ||||||||
1155 | return; | ||||||||
1156 | } | ||||||||
1157 | } else { | ||||||||
1158 | assert(bucket->is_locked(), "Bucket must be locked.")do { if (!(bucket->is_locked())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1158, "assert(" "bucket->is_locked()" ") failed", "Bucket must be locked." ); ::breakpoint(); } } while (0); | ||||||||
1159 | } | ||||||||
1160 | } | ||||||||
1161 | // If there is a paused resize we also need to visit the already resized items. | ||||||||
1162 | table = get_new_table(); | ||||||||
1163 | if (table == NULL__null) { | ||||||||
1164 | return; | ||||||||
1165 | } | ||||||||
1166 | DEBUG_ONLY(if (table == POISON_PTR) { return; })if (table == POISON_PTR) { return; } | ||||||||
1167 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { | ||||||||
1168 | Bucket* bucket = table->get_bucket(bucket_it); | ||||||||
1169 | assert(!bucket->is_locked(), "Bucket must be unlocked.")do { if (!(!bucket->is_locked())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1169, "assert(" "!bucket->is_locked()" ") failed", "Bucket must be unlocked." ); ::breakpoint(); } } while (0); | ||||||||
1170 | if (!visit_nodes(bucket, scan_f)) { | ||||||||
1171 | return; | ||||||||
1172 | } | ||||||||
1173 | } | ||||||||
1174 | } | ||||||||
1175 | |||||||||
1176 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1177 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> | ||||||||
1178 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1179 | try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) | ||||||||
1180 | { | ||||||||
1181 | if (!try_resize_lock(thread)) { | ||||||||
1182 | return false; | ||||||||
1183 | } | ||||||||
1184 | do_bulk_delete_locked(thread, eval_f, del_f); | ||||||||
1185 | unlock_resize_lock(thread); | ||||||||
1186 | assert(_resize_lock_owner != thread, "Re-size lock held")do { if (!(_resize_lock_owner != thread)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1186, "assert(" "_resize_lock_owner != thread" ") failed", "Re-size lock held" ); ::breakpoint(); } } while (0); | ||||||||
1187 | return true; | ||||||||
1188 | } | ||||||||
1189 | |||||||||
1190 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1191 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> | ||||||||
1192 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
1193 | bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) | ||||||||
1194 | { | ||||||||
1195 | assert(!SafepointSynchronize::is_at_safepoint(),do { if (!(!SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1196, "assert(" "!SafepointSynchronize::is_at_safepoint()" ") failed" , "must be outside a safepoint"); ::breakpoint(); } } while ( 0) | ||||||||
1196 | "must be outside a safepoint")do { if (!(!SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1196, "assert(" "!SafepointSynchronize::is_at_safepoint()" ") failed" , "must be outside a safepoint"); ::breakpoint(); } } while ( 0); | ||||||||
1197 | lock_resize_lock(thread); | ||||||||
1198 | do_bulk_delete_locked(thread, eval_f, del_f); | ||||||||
1199 | unlock_resize_lock(thread); | ||||||||
1200 | } | ||||||||
1201 | |||||||||
1202 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1203 | template <typename VALUE_SIZE_FUNC> | ||||||||
1204 | inline TableStatistics ConcurrentHashTable<CONFIG, F>:: | ||||||||
1205 | statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f) | ||||||||
1206 | { | ||||||||
1207 | NumberSeq summary; | ||||||||
1208 | size_t literal_bytes = 0; | ||||||||
1209 | InternalTable* table = get_table(); | ||||||||
1210 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { | ||||||||
1211 | ScopedCS cs(thread, this); | ||||||||
1212 | size_t count = 0; | ||||||||
1213 | Bucket* bucket = table->get_bucket(bucket_it); | ||||||||
1214 | if (bucket->have_redirect() || bucket->is_locked()) { | ||||||||
1215 | continue; | ||||||||
1216 | } | ||||||||
1217 | Node* current_node = bucket->first(); | ||||||||
1218 | while (current_node != NULL__null) { | ||||||||
1219 | ++count; | ||||||||
1220 | literal_bytes += vs_f(current_node->value()); | ||||||||
1221 | current_node = current_node->next(); | ||||||||
1222 | } | ||||||||
1223 | summary.add((double)count); | ||||||||
1224 | } | ||||||||
1225 | |||||||||
1226 | return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); | ||||||||
1227 | } | ||||||||
1228 | |||||||||
1229 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1230 | template <typename VALUE_SIZE_FUNC> | ||||||||
1231 | inline TableStatistics ConcurrentHashTable<CONFIG, F>:: | ||||||||
1232 | statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old) | ||||||||
1233 | { | ||||||||
1234 | if (!try_resize_lock(thread)) { | ||||||||
1235 | return old; | ||||||||
1236 | } | ||||||||
1237 | |||||||||
1238 | TableStatistics ts = statistics_calculate(thread, vs_f); | ||||||||
1239 | unlock_resize_lock(thread); | ||||||||
1240 | |||||||||
1241 | return ts; | ||||||||
1242 | } | ||||||||
1243 | |||||||||
1244 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1245 | template <typename VALUE_SIZE_FUNC> | ||||||||
1246 | inline void ConcurrentHashTable<CONFIG, F>:: | ||||||||
1247 | statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, | ||||||||
1248 | outputStream* st, const char* table_name) | ||||||||
1249 | { | ||||||||
1250 | if (!try_resize_lock(thread)) { | ||||||||
1251 | st->print_cr("statistics unavailable at this moment"); | ||||||||
1252 | return; | ||||||||
1253 | } | ||||||||
1254 | |||||||||
1255 | TableStatistics ts = statistics_calculate(thread, vs_f); | ||||||||
1256 | unlock_resize_lock(thread); | ||||||||
1257 | |||||||||
1258 | ts.print(st, table_name); | ||||||||
1259 | } | ||||||||
1260 | |||||||||
1261 | template <typename CONFIG, MEMFLAGS F> | ||||||||
1262 | inline bool ConcurrentHashTable<CONFIG, F>:: | ||||||||
1263 | try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht) | ||||||||
1264 | { | ||||||||
1265 | if (!try_resize_lock(thread)) { | ||||||||
1266 | return false; | ||||||||
1267 | } | ||||||||
1268 | assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL")do { if (!(_new_table == __null || _new_table == POISON_PTR)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1268, "assert(" "_new_table == __null || _new_table == POISON_PTR" ") failed", "Must be NULL"); ::breakpoint(); } } while (0); | ||||||||
1269 | for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { | ||||||||
1270 | Bucket* bucket = _table->get_bucket(bucket_it); | ||||||||
1271 | assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended")do { if (!(!bucket->have_redirect() && !bucket-> is_locked())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1271, "assert(" "!bucket->have_redirect() && !bucket->is_locked()" ") failed", "Table must be uncontended"); ::breakpoint(); } } while (0); | ||||||||
1272 | while (bucket->first() != NULL__null) { | ||||||||
1273 | Node* move_node = bucket->first(); | ||||||||
1274 | bool ok = bucket->cas_first(move_node->next(), move_node); | ||||||||
1275 | assert(ok, "Uncontended cas must work")do { if (!(ok)) { (*g_assert_poison) = 'X';; report_vm_error( "/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1275, "assert(" "ok" ") failed", "Uncontended cas must work" ); ::breakpoint(); } } while (0); | ||||||||
1276 | bool dead_hash = false; | ||||||||
1277 | size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); | ||||||||
1278 | if (!dead_hash) { | ||||||||
1279 | Bucket* insert_bucket = to_cht->get_bucket(insert_hash); | ||||||||
1280 | assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present")do { if (!(!bucket->have_redirect() && !bucket-> is_locked())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1280, "assert(" "!bucket->have_redirect() && !bucket->is_locked()" ") failed", "Not bit should be present"); ::breakpoint(); } } while (0); | ||||||||
1281 | move_node->set_next(insert_bucket->first()); | ||||||||
1282 | ok = insert_bucket->cas_first(move_node, insert_bucket->first()); | ||||||||
1283 | assert(ok, "Uncontended cas must work")do { if (!(ok)) { (*g_assert_poison) = 'X';; report_vm_error( "/home/daniel/Projects/java/jdk/src/hotspot/share/utilities/concurrentHashTable.inline.hpp" , 1283, "assert(" "ok" ") failed", "Uncontended cas must work" ); ::breakpoint(); } } while (0); | ||||||||
1284 | } | ||||||||
1285 | } | ||||||||
1286 | } | ||||||||
1287 | unlock_resize_lock(thread); | ||||||||
1288 | return true; | ||||||||
1289 | } | ||||||||
1290 | |||||||||
1291 | #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP |
1 | /* |
2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_RUNTIME_ATOMIC_HPP |
26 | #define SHARE_RUNTIME_ATOMIC_HPP |
27 | |
28 | #include "memory/allocation.hpp" |
29 | #include "metaprogramming/conditional.hpp" |
30 | #include "metaprogramming/enableIf.hpp" |
31 | #include "metaprogramming/isIntegral.hpp" |
32 | #include "metaprogramming/isPointer.hpp" |
33 | #include "metaprogramming/isSame.hpp" |
34 | #include "metaprogramming/primitiveConversions.hpp" |
35 | #include "metaprogramming/removeCV.hpp" |
36 | #include "metaprogramming/removePointer.hpp" |
37 | #include "runtime/orderAccess.hpp" |
38 | #include "utilities/align.hpp" |
39 | #include "utilities/bytes.hpp" |
40 | #include "utilities/macros.hpp" |
41 | #include <type_traits> |
42 | |
43 | enum atomic_memory_order { |
44 | // The modes that align with C++11 are intended to |
45 | // follow the same semantics. |
46 | memory_order_relaxed = 0, |
47 | memory_order_acquire = 2, |
48 | memory_order_release = 3, |
49 | memory_order_acq_rel = 4, |
50 | memory_order_seq_cst = 5, |
51 | // Strong two-way memory barrier. |
52 | memory_order_conservative = 8 |
53 | }; |
54 | |
55 | enum ScopedFenceType { |
56 | X_ACQUIRE |
57 | , RELEASE_X |
58 | , RELEASE_X_FENCE |
59 | }; |
60 | |
61 | class Atomic : AllStatic { |
62 | public: |
63 | // Atomic operations on int64 types are not available on all 32-bit |
64 | // platforms. If atomic ops on int64 are defined here they must only |
65 | // be used from code that verifies they are available at runtime and |
66 | // can provide an alternative action if not - see supports_cx8() for |
67 | // a means to test availability. |
68 | |
69 | // The memory operations that are mentioned with each of the atomic |
70 | // function families come from src/share/vm/runtime/orderAccess.hpp, |
71 | // e.g., <fence> is described in that file and is implemented by the |
72 | // OrderAccess::fence() function. See that file for the gory details |
73 | // on the Memory Access Ordering Model. |
74 | |
75 | // All of the atomic operations that imply a read-modify-write action |
76 | // guarantee a two-way memory barrier across that operation. Historically |
77 | // these semantics reflect the strength of atomic operations that are |
78 | // provided on SPARC/X86. We assume that strength is necessary unless |
79 | // we can prove that a weaker form is sufficiently safe. |
80 | |
81 | // Atomically store to a location |
82 | // The type T must be either a pointer type convertible to or equal |
83 | // to D, an integral/enum type equal to D, or a type equal to D that |
84 | // is primitive convertible using PrimitiveConversions. |
85 | template<typename D, typename T> |
86 | inline static void store(volatile D* dest, T store_value); |
87 | |
88 | template <typename D, typename T> |
89 | inline static void release_store(volatile D* dest, T store_value); |
90 | |
91 | template <typename D, typename T> |
92 | inline static void release_store_fence(volatile D* dest, T store_value); |
93 | |
94 | // Atomically load from a location |
95 | // The type T must be either a pointer type, an integral/enum type, |
96 | // or a type that is primitive convertible using PrimitiveConversions. |
97 | template<typename T> |
98 | inline static T load(const volatile T* dest); |
99 | |
100 | template <typename T> |
101 | inline static T load_acquire(const volatile T* dest); |
102 | |
103 | // Atomically add to a location. *add*() provide: |
104 | // <fence> add-value-to-dest <membar StoreLoad|StoreStore> |
105 | |
106 | // Returns updated value. |
107 | template<typename D, typename I> |
108 | inline static D add(D volatile* dest, I add_value, |
109 | atomic_memory_order order = memory_order_conservative); |
110 | |
111 | // Returns previous value. |
112 | template<typename D, typename I> |
113 | inline static D fetch_and_add(D volatile* dest, I add_value, |
114 | atomic_memory_order order = memory_order_conservative); |
115 | |
116 | template<typename D, typename I> |
117 | inline static D sub(D volatile* dest, I sub_value, |
118 | atomic_memory_order order = memory_order_conservative); |
119 | |
120 | // Atomically increment location. inc() provide: |
121 | // <fence> increment-dest <membar StoreLoad|StoreStore> |
122 | // The type D may be either a pointer type, or an integral |
123 | // type. If it is a pointer type, then the increment is |
124 | // scaled to the size of the type pointed to by the pointer. |
125 | template<typename D> |
126 | inline static void inc(D volatile* dest, |
127 | atomic_memory_order order = memory_order_conservative); |
128 | |
129 | // Atomically decrement a location. dec() provide: |
130 | // <fence> decrement-dest <membar StoreLoad|StoreStore> |
131 | // The type D may be either a pointer type, or an integral |
132 | // type. If it is a pointer type, then the decrement is |
133 | // scaled to the size of the type pointed to by the pointer. |
134 | template<typename D> |
135 | inline static void dec(D volatile* dest, |
136 | atomic_memory_order order = memory_order_conservative); |
137 | |
138 | // Performs atomic exchange of *dest with exchange_value. Returns old |
139 | // prior value of *dest. xchg*() provide: |
140 | // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> |
141 | // The type T must be either a pointer type convertible to or equal |
142 | // to D, an integral/enum type equal to D, or a type equal to D that |
143 | // is primitive convertible using PrimitiveConversions. |
144 | template<typename D, typename T> |
145 | inline static D xchg(volatile D* dest, T exchange_value, |
146 | atomic_memory_order order = memory_order_conservative); |
147 | |
148 | // Performs atomic compare of *dest and compare_value, and exchanges |
149 | // *dest with exchange_value if the comparison succeeded. Returns prior |
150 | // value of *dest. cmpxchg*() provide: |
151 | // <fence> compare-and-exchange <membar StoreLoad|StoreStore> |
152 | |
153 | template<typename D, typename U, typename T> |
154 | inline static D cmpxchg(D volatile* dest, |
155 | U compare_value, |
156 | T exchange_value, |
157 | atomic_memory_order order = memory_order_conservative); |
158 | |
159 | // Performs atomic compare of *dest and NULL, and replaces *dest |
160 | // with exchange_value if the comparison succeeded. Returns true if |
161 | // the comparison succeeded and the exchange occurred. This is |
162 | // often used as part of lazy initialization, as a lock-free |
163 | // alternative to the Double-Checked Locking Pattern. |
164 | template<typename D, typename T> |
165 | inline static bool replace_if_null(D* volatile* dest, T* value, |
166 | atomic_memory_order order = memory_order_conservative); |
167 | |
168 | private: |
169 | WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private |
170 | // Test whether From is implicitly convertible to To. |
171 | // From and To must be pointer types. |
172 | // Note: Provides the limited subset of C++11 std::is_convertible |
173 | // that is needed here. |
174 | template<typename From, typename To> struct IsPointerConvertible; |
175 | |
176 | protected: |
177 | // Dispatch handler for store. Provides type-based validity |
178 | // checking and limited conversions around calls to the platform- |
179 | // specific implementation layer provided by PlatformOp. |
180 | template<typename D, typename T, typename PlatformOp, typename Enable = void> |
181 | struct StoreImpl; |
182 | |
183 | // Platform-specific implementation of store. Support for sizes |
184 | // of 1, 2, 4, and (if different) pointer size bytes are required. |
185 | // The class is a function object that must be default constructable, |
186 | // with these requirements: |
187 | // |
188 | // either: |
189 | // - dest is of type D*, an integral, enum or pointer type. |
190 | // - new_value are of type T, an integral, enum or pointer type D or |
191 | // pointer type convertible to D. |
192 | // or: |
193 | // - T and D are the same and are primitive convertible using PrimitiveConversions |
194 | // and either way: |
195 | // - platform_store is an object of type PlatformStore<sizeof(T)>. |
196 | // |
197 | // Then |
198 | // platform_store(new_value, dest) |
199 | // must be a valid expression. |
200 | // |
201 | // The default implementation is a volatile store. If a platform |
202 | // requires more for e.g. 64 bit stores, a specialization is required |
203 | template<size_t byte_size> struct PlatformStore; |
204 | |
205 | // Dispatch handler for load. Provides type-based validity |
206 | // checking and limited conversions around calls to the platform- |
207 | // specific implementation layer provided by PlatformOp. |
208 | template<typename T, typename PlatformOp, typename Enable = void> |
209 | struct LoadImpl; |
210 | |
211 | // Platform-specific implementation of load. Support for sizes of |
212 | // 1, 2, 4 bytes and (if different) pointer size bytes are required. |
213 | // The class is a function object that must be default |
214 | // constructable, with these requirements: |
215 | // |
216 | // - dest is of type T*, an integral, enum or pointer type, or |
217 | // T is convertible to a primitive type using PrimitiveConversions |
218 | // - platform_load is an object of type PlatformLoad<sizeof(T)>. |
219 | // |
220 | // Then |
221 | // platform_load(src) |
222 | // must be a valid expression, returning a result convertible to T. |
223 | // |
224 | // The default implementation is a volatile load. If a platform |
225 | // requires more for e.g. 64 bit loads, a specialization is required |
226 | template<size_t byte_size> struct PlatformLoad; |
227 | |
228 | // Give platforms a variation point to specialize. |
229 | template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore; |
230 | template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad; |
231 | |
232 | private: |
233 | // Dispatch handler for add. Provides type-based validity checking |
234 | // and limited conversions around calls to the platform-specific |
235 | // implementation layer provided by PlatformAdd. |
236 | template<typename D, typename I, typename Enable = void> |
237 | struct AddImpl; |
238 | |
239 | // Platform-specific implementation of add. Support for sizes of 4 |
240 | // bytes and (if different) pointer size bytes are required. The |
241 | // class must be default constructable, with these requirements: |
242 | // |
243 | // - dest is of type D*, an integral or pointer type. |
244 | // - add_value is of type I, an integral type. |
245 | // - sizeof(I) == sizeof(D). |
246 | // - if D is an integral type, I == D. |
247 | // - order is of type atomic_memory_order. |
248 | // - platform_add is an object of type PlatformAdd<sizeof(D)>. |
249 | // |
250 | // Then both |
251 | // platform_add.add_and_fetch(dest, add_value, order) |
252 | // platform_add.fetch_and_add(dest, add_value, order) |
253 | // must be valid expressions returning a result convertible to D. |
254 | // |
255 | // add_and_fetch atomically adds add_value to the value of dest, |
256 | // returning the new value. |
257 | // |
258 | // fetch_and_add atomically adds add_value to the value of dest, |
259 | // returning the old value. |
260 | // |
261 | // When D is a pointer type P*, both add_and_fetch and fetch_and_add |
262 | // treat it as if it were an uintptr_t; they do not perform any |
263 | // scaling of add_value, as that has already been done by the caller. |
264 | // |
265 | // No definition is provided; all platforms must explicitly define |
266 | // this class and any needed specializations. |
267 | template<size_t byte_size> struct PlatformAdd; |
268 | |
269 | // Support for platforms that implement some variants of add using a |
270 | // (typically out of line) non-template helper function. The |
271 | // generic arguments passed to PlatformAdd need to be translated to |
272 | // the appropriate type for the helper function, the helper function |
273 | // invoked on the translated arguments, and the result translated |
274 | // back. Type is the parameter / return type of the helper |
275 | // function. No scaling of add_value is performed when D is a pointer |
276 | // type, so this function can be used to implement the support function |
277 | // required by AddAndFetch. |
278 | template<typename Type, typename Fn, typename D, typename I> |
279 | static D add_using_helper(Fn fn, D volatile* dest, I add_value); |
280 | |
281 | // Dispatch handler for cmpxchg. Provides type-based validity |
282 | // checking and limited conversions around calls to the |
283 | // platform-specific implementation layer provided by |
284 | // PlatformCmpxchg. |
285 | template<typename D, typename U, typename T, typename Enable = void> |
286 | struct CmpxchgImpl; |
287 | |
288 | // Platform-specific implementation of cmpxchg. Support for sizes |
289 | // of 1, 4, and 8 are required. The class is a function object that |
290 | // must be default constructable, with these requirements: |
291 | // |
292 | // - dest is of type T*. |
293 | // - exchange_value and compare_value are of type T. |
294 | // - order is of type atomic_memory_order. |
295 | // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. |
296 | // |
297 | // Then |
298 | // platform_cmpxchg(dest, compare_value, exchange_value, order) |
299 | // must be a valid expression, returning a result convertible to T. |
300 | // |
301 | // A default definition is provided, which declares a function template |
302 | // T operator()(T volatile*, T, T, atomic_memory_order) const |
303 | // |
304 | // For each required size, a platform must either provide an |
305 | // appropriate definition of that function, or must entirely |
306 | // specialize the class template for that size. |
307 | template<size_t byte_size> struct PlatformCmpxchg; |
308 | |
309 | // Support for platforms that implement some variants of cmpxchg |
310 | // using a (typically out of line) non-template helper function. |
311 | // The generic arguments passed to PlatformCmpxchg need to be |
312 | // translated to the appropriate type for the helper function, the |
313 | // helper invoked on the translated arguments, and the result |
314 | // translated back. Type is the parameter / return type of the |
315 | // helper function. |
316 | template<typename Type, typename Fn, typename T> |
317 | static T cmpxchg_using_helper(Fn fn, |
318 | T volatile* dest, |
319 | T compare_value, |
320 | T exchange_value); |
321 | |
322 | // Support platforms that do not provide Read-Modify-Write |
323 | // byte-level atomic access. To use, derive PlatformCmpxchg<1> from |
324 | // this class. |
325 | public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. |
326 | struct CmpxchgByteUsingInt; |
327 | private: |
328 | |
329 | // Dispatch handler for xchg. Provides type-based validity |
330 | // checking and limited conversions around calls to the |
331 | // platform-specific implementation layer provided by |
332 | // PlatformXchg. |
333 | template<typename D, typename T, typename Enable = void> |
334 | struct XchgImpl; |
335 | |
336 | // Platform-specific implementation of xchg. Support for sizes |
337 | // of 4, and sizeof(intptr_t) are required. The class is a function |
338 | // object that must be default constructable, with these requirements: |
339 | // |
340 | // - dest is of type T*. |
341 | // - exchange_value is of type T. |
342 | // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. |
343 | // |
344 | // Then |
345 | // platform_xchg(dest, exchange_value) |
346 | // must be a valid expression, returning a result convertible to T. |
347 | // |
348 | // A default definition is provided, which declares a function template |
349 | // T operator()(T volatile*, T, atomic_memory_order) const |
350 | // |
351 | // For each required size, a platform must either provide an |
352 | // appropriate definition of that function, or must entirely |
353 | // specialize the class template for that size. |
354 | template<size_t byte_size> struct PlatformXchg; |
355 | |
356 | // Support for platforms that implement some variants of xchg |
357 | // using a (typically out of line) non-template helper function. |
358 | // The generic arguments passed to PlatformXchg need to be |
359 | // translated to the appropriate type for the helper function, the |
360 | // helper invoked on the translated arguments, and the result |
361 | // translated back. Type is the parameter / return type of the |
362 | // helper function. |
363 | template<typename Type, typename Fn, typename T> |
364 | static T xchg_using_helper(Fn fn, |
365 | T volatile* dest, |
366 | T exchange_value); |
367 | }; |
368 | |
369 | template<typename From, typename To> |
370 | struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { |
371 | // Determine whether From* is implicitly convertible to To*, using |
372 | // the "sizeof trick". |
373 | typedef char yes; |
374 | typedef char (&no)[2]; |
375 | |
376 | static yes test(To*); |
377 | static no test(...); |
378 | static From* test_value; |
379 | |
380 | static const bool value = (sizeof(yes) == sizeof(test(test_value))); |
381 | }; |
382 | |
383 | // Handle load for pointer, integral and enum types. |
384 | template<typename T, typename PlatformOp> |
385 | struct Atomic::LoadImpl< |
386 | T, |
387 | PlatformOp, |
388 | typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value || IsPointer<T>::value>::type> |
389 | { |
390 | T operator()(T const volatile* dest) const { |
391 | // Forward to the platform handler for the size of T. |
392 | return PlatformOp()(dest); |
393 | } |
394 | }; |
395 | |
396 | // Handle load for types that have a translator. |
397 | // |
398 | // All the involved types must be identical. |
399 | // |
400 | // This translates the original call into a call on the decayed |
401 | // arguments, and returns the recovered result of that translated |
402 | // call. |
403 | template<typename T, typename PlatformOp> |
404 | struct Atomic::LoadImpl< |
405 | T, |
406 | PlatformOp, |
407 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
408 | { |
409 | T operator()(T const volatile* dest) const { |
410 | typedef PrimitiveConversions::Translate<T> Translator; |
411 | typedef typename Translator::Decayed Decayed; |
412 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed))static_assert((sizeof(T) == sizeof(Decayed)), "sizeof(T) == sizeof(Decayed)" ); |
413 | Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest)); |
414 | return Translator::recover(result); |
415 | } |
416 | }; |
417 | |
418 | // Default implementation of atomic load if a specific platform |
419 | // does not provide a specialization for a certain size class. |
420 | // For increased safety, the default implementation only allows |
421 | // load types that are pointer sized or smaller. If a platform still |
422 | // supports wide atomics, then it has to use specialization |
423 | // of Atomic::PlatformLoad for that wider size class. |
424 | template<size_t byte_size> |
425 | struct Atomic::PlatformLoad { |
426 | template<typename T> |
427 | T operator()(T const volatile* dest) const { |
428 | STATIC_ASSERT(sizeof(T) <= sizeof(void*))static_assert((sizeof(T) <= sizeof(void*)), "sizeof(T) <= sizeof(void*)" ); // wide atomics need specialization |
429 | return *dest; |
430 | } |
431 | }; |
432 | |
433 | // Handle store for integral and enum types. |
434 | // |
435 | // All the involved types must be identical. |
436 | template<typename T, typename PlatformOp> |
437 | struct Atomic::StoreImpl< |
438 | T, T, |
439 | PlatformOp, |
440 | typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type> |
441 | { |
442 | void operator()(T volatile* dest, T new_value) const { |
443 | // Forward to the platform handler for the size of T. |
444 | PlatformOp()(dest, new_value); |
445 | } |
446 | }; |
447 | |
448 | // Handle store for pointer types. |
449 | // |
450 | // The new_value must be implicitly convertible to the |
451 | // destination's type; it must be type-correct to store the |
452 | // new_value in the destination. |
453 | template<typename D, typename T, typename PlatformOp> |
454 | struct Atomic::StoreImpl< |
455 | D*, T*, |
456 | PlatformOp, |
457 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> |
458 | { |
459 | void operator()(D* volatile* dest, T* new_value) const { |
460 | // Allow derived to base conversion, and adding cv-qualifiers. |
461 | D* value = new_value; |
462 | PlatformOp()(dest, value); |
463 | } |
464 | }; |
465 | |
466 | // Handle store for types that have a translator. |
467 | // |
468 | // All the involved types must be identical. |
469 | // |
470 | // This translates the original call into a call on the decayed |
471 | // arguments. |
472 | template<typename T, typename PlatformOp> |
473 | struct Atomic::StoreImpl< |
474 | T, T, |
475 | PlatformOp, |
476 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
477 | { |
478 | void operator()(T volatile* dest, T new_value) const { |
479 | typedef PrimitiveConversions::Translate<T> Translator; |
480 | typedef typename Translator::Decayed Decayed; |
481 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed))static_assert((sizeof(T) == sizeof(Decayed)), "sizeof(T) == sizeof(Decayed)" ); |
482 | PlatformOp()(reinterpret_cast<Decayed volatile*>(dest), |
483 | Translator::decay(new_value)); |
484 | } |
485 | }; |
486 | |
487 | // Default implementation of atomic store if a specific platform |
488 | // does not provide a specialization for a certain size class. |
489 | // For increased safety, the default implementation only allows |
490 | // storing types that are pointer sized or smaller. If a platform still |
491 | // supports wide atomics, then it has to use specialization |
492 | // of Atomic::PlatformStore for that wider size class. |
493 | template<size_t byte_size> |
494 | struct Atomic::PlatformStore { |
495 | template<typename T> |
496 | void operator()(T volatile* dest, |
497 | T new_value) const { |
498 | STATIC_ASSERT(sizeof(T) <= sizeof(void*))static_assert((sizeof(T) <= sizeof(void*)), "sizeof(T) <= sizeof(void*)" ); // wide atomics need specialization |
499 | (void)const_cast<T&>(*dest = new_value); |
500 | } |
501 | }; |
502 | |
503 | template<typename D> |
504 | inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { |
505 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value)static_assert((IsPointer<D>::value || IsIntegral<D> ::value), "IsPointer<D>::value || IsIntegral<D>::value" ); |
506 | typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; |
507 | Atomic::add(dest, I(1), order); |
508 | } |
509 | |
510 | template<typename D> |
511 | inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { |
512 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value)static_assert((IsPointer<D>::value || IsIntegral<D> ::value), "IsPointer<D>::value || IsIntegral<D>::value" ); |
513 | typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; |
514 | // Assumes two's complement integer representation. |
515 | #pragma warning(suppress: 4146) |
516 | Atomic::add(dest, I(-1), order); |
517 | } |
518 | |
519 | template<typename D, typename I> |
520 | inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { |
521 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value)static_assert((IsPointer<D>::value || IsIntegral<D> ::value), "IsPointer<D>::value || IsIntegral<D>::value" ); |
522 | STATIC_ASSERT(IsIntegral<I>::value)static_assert((IsIntegral<I>::value), "IsIntegral<I>::value" ); |
523 | // If D is a pointer type, use [u]intptr_t as the addend type, |
524 | // matching signedness of I. Otherwise, use D as the addend type. |
525 | typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI; |
526 | typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType; |
527 | // Only allow conversions that can't change the value. |
528 | STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value)static_assert((IsSigned<I>::value == IsSigned<AddendType >::value), "IsSigned<I>::value == IsSigned<AddendType>::value" ); |
529 | STATIC_ASSERT(sizeof(I) <= sizeof(AddendType))static_assert((sizeof(I) <= sizeof(AddendType)), "sizeof(I) <= sizeof(AddendType)" ); |
530 | AddendType addend = sub_value; |
531 | // Assumes two's complement integer representation. |
532 | #pragma warning(suppress: 4146) // In case AddendType is not signed. |
533 | return Atomic::add(dest, -addend, order); |
534 | } |
535 | |
536 | // Define the class before including platform file, which may specialize |
537 | // the operator definition. No generic definition of specializations |
538 | // of the operator template are provided, nor are there any generic |
539 | // specializations of the class. The platform file is responsible for |
540 | // providing those. |
541 | template<size_t byte_size> |
542 | struct Atomic::PlatformCmpxchg { |
543 | template<typename T> |
544 | T operator()(T volatile* dest, |
545 | T compare_value, |
546 | T exchange_value, |
547 | atomic_memory_order order) const; |
548 | }; |
549 | |
550 | // Define the class before including platform file, which may use this |
551 | // as a base class, requiring it be complete. The definition is later |
552 | // in this file, near the other definitions related to cmpxchg. |
553 | struct Atomic::CmpxchgByteUsingInt { |
554 | static uint8_t get_byte_in_int(uint32_t n, uint32_t idx); |
555 | static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx); |
556 | template<typename T> |
557 | T operator()(T volatile* dest, |
558 | T compare_value, |
559 | T exchange_value, |
560 | atomic_memory_order order) const; |
561 | }; |
562 | |
563 | // Define the class before including platform file, which may specialize |
564 | // the operator definition. No generic definition of specializations |
565 | // of the operator template are provided, nor are there any generic |
566 | // specializations of the class. The platform file is responsible for |
567 | // providing those. |
568 | template<size_t byte_size> |
569 | struct Atomic::PlatformXchg { |
570 | template<typename T> |
571 | T operator()(T volatile* dest, |
572 | T exchange_value, |
573 | atomic_memory_order order) const; |
574 | }; |
575 | |
576 | template <ScopedFenceType T> |
577 | class ScopedFenceGeneral: public StackObj { |
578 | public: |
579 | void prefix() {} |
580 | void postfix() {} |
581 | }; |
582 | |
583 | // The following methods can be specialized using simple template specialization |
584 | // in the platform specific files for optimization purposes. Otherwise the |
585 | // generalized variant is used. |
586 | |
587 | template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); } |
588 | template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); } |
589 | template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); } |
590 | template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); } |
591 | |
592 | template <ScopedFenceType T> |
593 | class ScopedFence : public ScopedFenceGeneral<T> { |
594 | void *const _field; |
595 | public: |
596 | ScopedFence(void *const field) : _field(field) { prefix(); } |
597 | ~ScopedFence() { postfix(); } |
598 | void prefix() { ScopedFenceGeneral<T>::prefix(); } |
599 | void postfix() { ScopedFenceGeneral<T>::postfix(); } |
600 | }; |
601 | |
602 | // platform specific in-line definitions - must come before shared definitions |
603 | |
604 | #include OS_CPU_HEADER(atomic)"atomic_linux_x86.hpp" |
605 | |
606 | // shared in-line definitions |
607 | |
608 | // size_t casts... |
609 | #if (SIZE_MAX(18446744073709551615UL) != UINTPTR_MAX(18446744073709551615UL)) |
610 | #error size_t is not WORD_SIZE, interesting platform, but missing implementation here |
611 | #endif |
612 | |
613 | template<typename T> |
614 | inline T Atomic::load(const volatile T* dest) { |
615 | return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest); |
616 | } |
617 | |
618 | template<size_t byte_size, ScopedFenceType type> |
619 | struct Atomic::PlatformOrderedLoad { |
620 | template <typename T> |
621 | T operator()(const volatile T* p) const { |
622 | ScopedFence<type> f((void*)p); |
623 | return Atomic::load(p); |
624 | } |
625 | }; |
626 | |
627 | template <typename T> |
628 | inline T Atomic::load_acquire(const volatile T* p) { |
629 | return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p); |
630 | } |
631 | |
632 | template<typename D, typename T> |
633 | inline void Atomic::store(volatile D* dest, T store_value) { |
634 | StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value); |
635 | } |
636 | |
637 | template<size_t byte_size, ScopedFenceType type> |
638 | struct Atomic::PlatformOrderedStore { |
639 | template <typename T> |
640 | void operator()(volatile T* p, T v) const { |
641 | ScopedFence<type> f((void*)p); |
642 | Atomic::store(p, v); |
643 | } |
644 | }; |
645 | |
646 | template <typename D, typename T> |
647 | inline void Atomic::release_store(volatile D* p, T v) { |
648 | StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v); |
649 | } |
650 | |
651 | template <typename D, typename T> |
652 | inline void Atomic::release_store_fence(volatile D* p, T v) { |
653 | StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v); |
654 | } |
655 | |
656 | template<typename D, typename I> |
657 | inline D Atomic::add(D volatile* dest, I add_value, |
658 | atomic_memory_order order) { |
659 | return AddImpl<D, I>::add_and_fetch(dest, add_value, order); |
660 | } |
661 | |
662 | template<typename D, typename I> |
663 | inline D Atomic::fetch_and_add(D volatile* dest, I add_value, |
664 | atomic_memory_order order) { |
665 | return AddImpl<D, I>::fetch_and_add(dest, add_value, order); |
666 | } |
667 | |
668 | template<typename D, typename I> |
669 | struct Atomic::AddImpl< |
670 | D, I, |
671 | typename EnableIf<IsIntegral<I>::value && |
672 | IsIntegral<D>::value && |
673 | (sizeof(I) <= sizeof(D)) && |
674 | (IsSigned<I>::value == IsSigned<D>::value)>::type> |
675 | { |
676 | static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) { |
677 | D addend = add_value; |
678 | return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order); |
679 | } |
680 | static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) { |
681 | D addend = add_value; |
682 | return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order); |
683 | } |
684 | }; |
685 | |
686 | template<typename P, typename I> |
687 | struct Atomic::AddImpl< |
688 | P*, I, |
689 | typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> |
690 | { |
691 | STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*))static_assert((sizeof(intptr_t) == sizeof(P*)), "sizeof(intptr_t) == sizeof(P*)" ); |
692 | STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*))static_assert((sizeof(uintptr_t) == sizeof(P*)), "sizeof(uintptr_t) == sizeof(P*)" ); |
693 | typedef typename Conditional<IsSigned<I>::value, |
694 | intptr_t, |
695 | uintptr_t>::type CI; |
696 | |
697 | static CI scale_addend(CI add_value) { |
698 | return add_value * sizeof(P); |
699 | } |
700 | |
701 | static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) { |
702 | CI addend = add_value; |
703 | return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order); |
704 | } |
705 | static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) { |
706 | CI addend = add_value; |
707 | return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order); |
708 | } |
709 | }; |
710 | |
711 | template<typename Type, typename Fn, typename D, typename I> |
712 | inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { |
713 | return PrimitiveConversions::cast<D>( |
714 | fn(PrimitiveConversions::cast<Type>(add_value), |
715 | reinterpret_cast<Type volatile*>(dest))); |
716 | } |
717 | |
718 | template<typename D, typename U, typename T> |
719 | inline D Atomic::cmpxchg(D volatile* dest, |
720 | U compare_value, |
721 | T exchange_value, |
722 | atomic_memory_order order) { |
723 | return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order); |
724 | } |
725 | |
726 | template<typename D, typename T> |
727 | inline bool Atomic::replace_if_null(D* volatile* dest, T* value, |
728 | atomic_memory_order order) { |
729 | // Presently using a trivial implementation in terms of cmpxchg. |
730 | // Consider adding platform support, to permit the use of compiler |
731 | // intrinsics like gcc's __sync_bool_compare_and_swap. |
732 | D* expected_null = NULL__null; |
733 | return expected_null == cmpxchg(dest, expected_null, value, order); |
734 | } |
735 | |
736 | // Handle cmpxchg for integral and enum types. |
737 | // |
738 | // All the involved types must be identical. |
739 | template<typename T> |
740 | struct Atomic::CmpxchgImpl< |
741 | T, T, T, |
742 | typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type> |
743 | { |
744 | T operator()(T volatile* dest, T compare_value, T exchange_value, |
745 | atomic_memory_order order) const { |
746 | // Forward to the platform handler for the size of T. |
747 | return PlatformCmpxchg<sizeof(T)>()(dest, |
748 | compare_value, |
749 | exchange_value, |
750 | order); |
751 | } |
752 | }; |
753 | |
754 | // Handle cmpxchg for pointer types. |
755 | // |
756 | // The destination's type and the compare_value type must be the same, |
757 | // ignoring cv-qualifiers; we don't care about the cv-qualifiers of |
758 | // the compare_value. |
759 | // |
760 | // The exchange_value must be implicitly convertible to the |
761 | // destination's type; it must be type-correct to store the |
762 | // exchange_value in the destination. |
763 | template<typename D, typename U, typename T> |
764 | struct Atomic::CmpxchgImpl< |
765 | D*, U*, T*, |
766 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && |
767 | IsSame<typename RemoveCV<D>::type, |
768 | typename RemoveCV<U>::type>::value>::type> |
769 | { |
770 | D* operator()(D* volatile* dest, U* compare_value, T* exchange_value, |
771 | atomic_memory_order order) const { |
772 | // Allow derived to base conversion, and adding cv-qualifiers. |
773 | D* new_value = exchange_value; |
774 | // Don't care what the CV qualifiers for compare_value are, |
775 | // but we need to match D* when calling platform support. |
776 | D* old_value = const_cast<D*>(compare_value); |
777 | return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order); |
778 | } |
779 | }; |
780 | |
781 | // Handle cmpxchg for types that have a translator. |
782 | // |
783 | // All the involved types must be identical. |
784 | // |
785 | // This translates the original call into a call on the decayed |
786 | // arguments, and returns the recovered result of that translated |
787 | // call. |
788 | template<typename T> |
789 | struct Atomic::CmpxchgImpl< |
790 | T, T, T, |
791 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
792 | { |
793 | T operator()(T volatile* dest, T compare_value, T exchange_value, |
794 | atomic_memory_order order) const { |
795 | typedef PrimitiveConversions::Translate<T> Translator; |
796 | typedef typename Translator::Decayed Decayed; |
797 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed))static_assert((sizeof(T) == sizeof(Decayed)), "sizeof(T) == sizeof(Decayed)" ); |
798 | return Translator::recover( |
799 | cmpxchg(reinterpret_cast<Decayed volatile*>(dest), |
800 | Translator::decay(compare_value), |
801 | Translator::decay(exchange_value), |
802 | order)); |
803 | } |
804 | }; |
805 | |
806 | template<typename Type, typename Fn, typename T> |
807 | inline T Atomic::cmpxchg_using_helper(Fn fn, |
808 | T volatile* dest, |
809 | T compare_value, |
810 | T exchange_value) { |
811 | STATIC_ASSERT(sizeof(Type) == sizeof(T))static_assert((sizeof(Type) == sizeof(T)), "sizeof(Type) == sizeof(T)" ); |
812 | return PrimitiveConversions::cast<T>( |
813 | fn(PrimitiveConversions::cast<Type>(exchange_value), |
814 | reinterpret_cast<Type volatile*>(dest), |
815 | PrimitiveConversions::cast<Type>(compare_value))); |
816 | } |
817 | |
818 | inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n, |
819 | uint8_t b, |
820 | uint32_t idx) { |
821 | int bitsIdx = BitsPerByte * idx; |
822 | return (n & ~(static_cast<uint32_t>(0xff) << bitsIdx)) |
823 | | (static_cast<uint32_t>(b) << bitsIdx); |
824 | } |
825 | |
826 | inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n, |
827 | uint32_t idx) { |
828 | int bitsIdx = BitsPerByte * idx; |
829 | return (uint8_t)(n >> bitsIdx); |
830 | } |
831 | |
832 | template<typename T> |
833 | inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, |
834 | T compare_value, |
835 | T exchange_value, |
836 | atomic_memory_order order) const { |
837 | STATIC_ASSERT(sizeof(T) == sizeof(uint8_t))static_assert((sizeof(T) == sizeof(uint8_t)), "sizeof(T) == sizeof(uint8_t)" ); |
838 | uint8_t canon_exchange_value = exchange_value; |
839 | uint8_t canon_compare_value = compare_value; |
840 | volatile uint32_t* aligned_dest |
841 | = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t))); |
842 | size_t offset = pointer_delta(dest, aligned_dest, 1); |
843 | |
844 | uint32_t idx = (Endian::NATIVE == Endian::BIG) |
845 | ? (sizeof(uint32_t) - 1 - offset) |
846 | : offset; |
847 | |
848 | // current value may not be what we are looking for, so force it |
849 | // to that value so the initial cmpxchg will fail if it is different |
850 | uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx); |
851 | |
852 | // always execute a real cmpxchg so that we get the required memory |
853 | // barriers even on initial failure |
854 | do { |
855 | // value to swap in matches current value |
856 | // except for the one byte we want to update |
857 | uint32_t new_value = set_byte_in_int(cur, canon_exchange_value, idx); |
858 | |
859 | uint32_t res = cmpxchg(aligned_dest, cur, new_value, order); |
860 | if (res == cur) break; // success |
861 | |
862 | // at least one byte in the int changed value, so update |
863 | // our view of the current int |
864 | cur = res; |
865 | // if our byte is still as cur we loop and try again |
866 | } while (get_byte_in_int(cur, idx) == canon_compare_value); |
867 | |
868 | return PrimitiveConversions::cast<T>(get_byte_in_int(cur, idx)); |
869 | } |
870 | |
871 | // Handle xchg for integral and enum types. |
872 | // |
873 | // All the involved types must be identical. |
874 | template<typename T> |
875 | struct Atomic::XchgImpl< |
876 | T, T, |
877 | typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type> |
878 | { |
879 | T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { |
880 | // Forward to the platform handler for the size of T. |
881 | return PlatformXchg<sizeof(T)>()(dest, exchange_value, order); |
882 | } |
883 | }; |
884 | |
885 | // Handle xchg for pointer types. |
886 | // |
887 | // The exchange_value must be implicitly convertible to the |
888 | // destination's type; it must be type-correct to store the |
889 | // exchange_value in the destination. |
890 | template<typename D, typename T> |
891 | struct Atomic::XchgImpl< |
892 | D*, T*, |
893 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> |
894 | { |
895 | D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const { |
896 | // Allow derived to base conversion, and adding cv-qualifiers. |
897 | D* new_value = exchange_value; |
898 | return PlatformXchg<sizeof(D*)>()(dest, new_value, order); |
899 | } |
900 | }; |
901 | |
902 | // Handle xchg for types that have a translator. |
903 | // |
904 | // All the involved types must be identical. |
905 | // |
906 | // This translates the original call into a call on the decayed |
907 | // arguments, and returns the recovered result of that translated |
908 | // call. |
909 | template<typename T> |
910 | struct Atomic::XchgImpl< |
911 | T, T, |
912 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
913 | { |
914 | T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { |
915 | typedef PrimitiveConversions::Translate<T> Translator; |
916 | typedef typename Translator::Decayed Decayed; |
917 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed))static_assert((sizeof(T) == sizeof(Decayed)), "sizeof(T) == sizeof(Decayed)" ); |
918 | return Translator::recover( |
919 | xchg(reinterpret_cast<Decayed volatile*>(dest), |
920 | Translator::decay(exchange_value), |
921 | order)); |
922 | } |
923 | }; |
924 | |
925 | template<typename Type, typename Fn, typename T> |
926 | inline T Atomic::xchg_using_helper(Fn fn, |
927 | T volatile* dest, |
928 | T exchange_value) { |
929 | STATIC_ASSERT(sizeof(Type) == sizeof(T))static_assert((sizeof(Type) == sizeof(T)), "sizeof(Type) == sizeof(T)" ); |
930 | // Notice the swapped order of arguments. Change when/if stubs are rewritten. |
931 | return PrimitiveConversions::cast<T>( |
932 | fn(PrimitiveConversions::cast<Type>(exchange_value), |
933 | reinterpret_cast<Type volatile*>(dest))); |
934 | } |
935 | |
936 | template<typename D, typename T> |
937 | inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { |
938 | return XchgImpl<D, T>()(dest, exchange_value, order); |
939 | } |
940 | |
941 | #endif // SHARE_RUNTIME_ATOMIC_HPP |