| File: | jdk/src/hotspot/share/services/heapDumperCompression.cpp |
| Warning: | line 481, column 19 Dereference of null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright (c) 2020 SAP SE. All rights reserved. | |||
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
| 4 | * | |||
| 5 | * This code is free software; you can redistribute it and/or modify it | |||
| 6 | * under the terms of the GNU General Public License version 2 only, as | |||
| 7 | * published by the Free Software Foundation. | |||
| 8 | * | |||
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
| 13 | * accompanied this code). | |||
| 14 | * | |||
| 15 | * You should have received a copy of the GNU General Public License version | |||
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
| 18 | * | |||
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
| 20 | * or visit www.oracle.com if you need additional information or have any | |||
| 21 | * questions. | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | ||||
| 25 | #include "precompiled.hpp" | |||
| 26 | #include "jvm.h" | |||
| 27 | #include "runtime/arguments.hpp" | |||
| 28 | #include "runtime/mutexLocker.hpp" | |||
| 29 | #include "runtime/os.hpp" | |||
| 30 | #include "runtime/thread.inline.hpp" | |||
| 31 | #include "services/heapDumperCompression.hpp" | |||
| 32 | ||||
| 33 | ||||
| 34 | char const* FileWriter::open_writer() { | |||
| 35 | assert(_fd < 0, "Must not already be open")do { if (!(_fd < 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 35, "assert(" "_fd < 0" ") failed", "Must not already be open" ); ::breakpoint(); } } while (0); | |||
| 36 | ||||
| 37 | _fd = os::create_binary_file(_path, _overwrite); | |||
| 38 | ||||
| 39 | if (_fd < 0) { | |||
| 40 | return os::strerror(errno(*__errno_location ())); | |||
| 41 | } | |||
| 42 | ||||
| 43 | return NULL__null; | |||
| 44 | } | |||
| 45 | ||||
| 46 | FileWriter::~FileWriter() { | |||
| 47 | if (_fd >= 0) { | |||
| 48 | os::close(_fd); | |||
| 49 | _fd = -1; | |||
| 50 | } | |||
| 51 | } | |||
| 52 | ||||
| 53 | char const* FileWriter::write_buf(char* buf, ssize_t size) { | |||
| 54 | assert(_fd >= 0, "Must be open")do { if (!(_fd >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 54, "assert(" "_fd >= 0" ") failed", "Must be open"); :: breakpoint(); } } while (0); | |||
| 55 | assert(size > 0, "Must write at least one byte")do { if (!(size > 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 55, "assert(" "size > 0" ") failed", "Must write at least one byte" ); ::breakpoint(); } } while (0); | |||
| 56 | ||||
| 57 | ssize_t n = (ssize_t) os::write(_fd, buf, (uint) size); | |||
| 58 | ||||
| 59 | if (n <= 0) { | |||
| 60 | return os::strerror(errno(*__errno_location ())); | |||
| 61 | } | |||
| 62 | ||||
| 63 | return NULL__null; | |||
| 64 | } | |||
| 65 | ||||
| 66 | ||||
| 67 | typedef char const* (*GzipInitFunc)(size_t, size_t*, size_t*, int); | |||
| 68 | typedef size_t(*GzipCompressFunc)(char*, size_t, char*, size_t, char*, size_t, | |||
| 69 | int, char*, char const**); | |||
| 70 | ||||
| 71 | static GzipInitFunc gzip_init_func; | |||
| 72 | static GzipCompressFunc gzip_compress_func; | |||
| 73 | ||||
| 74 | void* GZipCompressor::load_gzip_func(char const* name) { | |||
| 75 | char path[JVM_MAXPATHLEN4096 + 1]; | |||
| 76 | char ebuf[1024]; | |||
| 77 | void* handle; | |||
| 78 | MutexLocker locker(Zip_lock, Monitor::_no_safepoint_check_flag); | |||
| 79 | ||||
| 80 | if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) { | |||
| 81 | handle = os::dll_load(path, ebuf, sizeof ebuf); | |||
| 82 | ||||
| 83 | if (handle != NULL__null) { | |||
| 84 | return os::dll_lookup(handle, name); | |||
| 85 | } | |||
| 86 | } | |||
| 87 | ||||
| 88 | return NULL__null; | |||
| 89 | } | |||
| 90 | ||||
| 91 | char const* GZipCompressor::init(size_t block_size, size_t* needed_out_size, | |||
| 92 | size_t* needed_tmp_size) { | |||
| 93 | _block_size = block_size; | |||
| 94 | _is_first = true; | |||
| 95 | ||||
| 96 | if (gzip_compress_func == NULL__null) { | |||
| 97 | gzip_compress_func = (GzipCompressFunc) load_gzip_func("ZIP_GZip_Fully"); | |||
| 98 | ||||
| 99 | if (gzip_compress_func == NULL__null) { | |||
| 100 | return "Cannot get ZIP_GZip_Fully function"; | |||
| 101 | } | |||
| 102 | } | |||
| 103 | ||||
| 104 | if (gzip_init_func == NULL__null) { | |||
| 105 | gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams"); | |||
| 106 | ||||
| 107 | if (gzip_init_func == NULL__null) { | |||
| 108 | return "Cannot get ZIP_GZip_InitParams function"; | |||
| 109 | } | |||
| 110 | } | |||
| 111 | ||||
| 112 | char const* result = gzip_init_func(block_size, needed_out_size, | |||
| 113 | needed_tmp_size, _level); | |||
| 114 | *needed_out_size += 1024; // Add extra space for the comment in the first chunk. | |||
| 115 | ||||
| 116 | return result; | |||
| 117 | } | |||
| 118 | ||||
| 119 | char const* GZipCompressor::compress(char* in, size_t in_size, char* out, size_t out_size, | |||
| 120 | char* tmp, size_t tmp_size, size_t* compressed_size) { | |||
| 121 | char const* msg = NULL__null; | |||
| 122 | ||||
| 123 | if (_is_first) { | |||
| 124 | char buf[128]; | |||
| 125 | // Write the block size used as a comment in the first gzip chunk, so the | |||
| 126 | // code used to read it later can make a good choice of the buffer sizes it uses. | |||
| 127 | jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT"%" "l" "u", _block_size); | |||
| 128 | *compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level, | |||
| 129 | buf, &msg); | |||
| 130 | _is_first = false; | |||
| 131 | } else { | |||
| 132 | *compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level, | |||
| 133 | NULL__null, &msg); | |||
| 134 | } | |||
| 135 | ||||
| 136 | return msg; | |||
| 137 | } | |||
| 138 | ||||
| 139 | WorkList::WorkList() { | |||
| 140 | _head._next = &_head; | |||
| 141 | _head._prev = &_head; | |||
| 142 | } | |||
| 143 | ||||
| 144 | void WorkList::insert(WriteWork* before, WriteWork* work) { | |||
| 145 | work->_prev = before; | |||
| 146 | work->_next = before->_next; | |||
| 147 | before->_next = work; | |||
| 148 | work->_next->_prev = work; | |||
| 149 | } | |||
| 150 | ||||
| 151 | WriteWork* WorkList::remove(WriteWork* work) { | |||
| 152 | if (work != NULL__null) { | |||
| 153 | assert(work->_next != work, "Invalid next")do { if (!(work->_next != work)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 153, "assert(" "work->_next != work" ") failed", "Invalid next" ); ::breakpoint(); } } while (0); | |||
| 154 | assert(work->_prev != work, "Invalid prev")do { if (!(work->_prev != work)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 154, "assert(" "work->_prev != work" ") failed", "Invalid prev" ); ::breakpoint(); } } while (0); | |||
| 155 | work->_prev->_next = work->_next;; | |||
| 156 | work->_next->_prev = work->_prev; | |||
| 157 | work->_next = NULL__null; | |||
| 158 | work->_prev = NULL__null; | |||
| 159 | } | |||
| 160 | ||||
| 161 | return work; | |||
| 162 | } | |||
| 163 | ||||
| 164 | void WorkList::add_by_id(WriteWork* work) { | |||
| 165 | if (is_empty()) { | |||
| 166 | add_first(work); | |||
| 167 | } else { | |||
| 168 | WriteWork* last_curr = &_head; | |||
| 169 | WriteWork* curr = _head._next; | |||
| 170 | ||||
| 171 | while (curr->_id < work->_id) { | |||
| 172 | last_curr = curr; | |||
| 173 | curr = curr->_next; | |||
| 174 | ||||
| 175 | if (curr == &_head) { | |||
| 176 | add_last(work); | |||
| 177 | return; | |||
| 178 | } | |||
| 179 | } | |||
| 180 | ||||
| 181 | insert(last_curr, work); | |||
| 182 | } | |||
| 183 | } | |||
| 184 | ||||
| 185 | ||||
| 186 | ||||
| 187 | CompressionBackend::CompressionBackend(AbstractWriter* writer, | |||
| 188 | AbstractCompressor* compressor, size_t block_size, size_t max_waste) : | |||
| 189 | _active(false), | |||
| 190 | _err(NULL__null), | |||
| 191 | _nr_of_threads(0), | |||
| 192 | _works_created(0), | |||
| 193 | _work_creation_failed(false), | |||
| 194 | _id_to_write(0), | |||
| 195 | _next_id(0), | |||
| 196 | _in_size(block_size), | |||
| 197 | _max_waste(max_waste), | |||
| 198 | _out_size(0), | |||
| 199 | _tmp_size(0), | |||
| 200 | _written(0), | |||
| 201 | _writer(writer), | |||
| 202 | _compressor(compressor), | |||
| 203 | _lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint, "HProfCompressionBackend_lock")) { | |||
| 204 | if (_writer == NULL__null) { | |||
| 205 | set_error("Could not allocate writer"); | |||
| 206 | } else if (_lock == NULL__null) { | |||
| 207 | set_error("Could not allocate lock"); | |||
| 208 | } else { | |||
| 209 | set_error(_writer->open_writer()); | |||
| 210 | } | |||
| 211 | ||||
| 212 | if (_compressor != NULL__null) { | |||
| 213 | set_error(_compressor->init(_in_size, &_out_size, &_tmp_size)); | |||
| 214 | } | |||
| 215 | ||||
| 216 | _current = allocate_work(_in_size, _out_size, _tmp_size); | |||
| 217 | ||||
| 218 | if (_current == NULL__null) { | |||
| 219 | set_error("Could not allocate memory for buffer"); | |||
| 220 | } | |||
| 221 | ||||
| 222 | _active = (_err == NULL__null); | |||
| 223 | } | |||
| 224 | ||||
| 225 | CompressionBackend::~CompressionBackend() { | |||
| 226 | assert(!_active, "Must not be active by now")do { if (!(!_active)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 226, "assert(" "!_active" ") failed", "Must not be active by now" ); ::breakpoint(); } } while (0); | |||
| 227 | assert(_nr_of_threads == 0, "Must have no active threads")do { if (!(_nr_of_threads == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 227, "assert(" "_nr_of_threads == 0" ") failed", "Must have no active threads" ); ::breakpoint(); } } while (0); | |||
| 228 | assert(_to_compress.is_empty() && _finished.is_empty(), "Still work to do")do { if (!(_to_compress.is_empty() && _finished.is_empty ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 228, "assert(" "_to_compress.is_empty() && _finished.is_empty()" ") failed", "Still work to do"); ::breakpoint(); } } while ( 0); | |||
| 229 | ||||
| 230 | free_work_list(&_unused); | |||
| 231 | free_work(_current); | |||
| 232 | assert(_works_created == 0, "All work must have been freed")do { if (!(_works_created == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 232, "assert(" "_works_created == 0" ") failed", "All work must have been freed" ); ::breakpoint(); } } while (0); | |||
| 233 | ||||
| 234 | delete _compressor; | |||
| 235 | delete _writer; | |||
| 236 | delete _lock; | |||
| 237 | } | |||
| 238 | ||||
| 239 | void CompressionBackend::flush_buffer(MonitorLocker* ml) { | |||
| 240 | ||||
| 241 | // Make sure we write the last partially filled buffer. | |||
| 242 | if ((_current != NULL__null) && (_current->_in_used > 0)) { | |||
| 243 | _current->_id = _next_id++; | |||
| 244 | _to_compress.add_last(_current); | |||
| 245 | _current = NULL__null; | |||
| 246 | ml->notify_all(); | |||
| 247 | } | |||
| 248 | ||||
| 249 | // Wait for the threads to drain the compression work list and do some work yourself. | |||
| 250 | while (!_to_compress.is_empty()) { | |||
| 251 | do_foreground_work(); | |||
| 252 | } | |||
| 253 | } | |||
| 254 | ||||
| 255 | void CompressionBackend::flush_buffer() { | |||
| 256 | assert(_active, "Must be active")do { if (!(_active)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 256, "assert(" "_active" ") failed", "Must be active"); ::breakpoint (); } } while (0); | |||
| 257 | ||||
| 258 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 259 | flush_buffer(&ml); | |||
| 260 | } | |||
| 261 | ||||
| 262 | void CompressionBackend::deactivate() { | |||
| 263 | assert(_active, "Must be active")do { if (!(_active)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 263, "assert(" "_active" ") failed", "Must be active"); ::breakpoint (); } } while (0); | |||
| 264 | ||||
| 265 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 266 | flush_buffer(&ml); | |||
| 267 | ||||
| 268 | _active = false; | |||
| 269 | ml.notify_all(); | |||
| 270 | } | |||
| 271 | ||||
| 272 | void CompressionBackend::thread_loop() { | |||
| 273 | { | |||
| 274 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 275 | _nr_of_threads++; | |||
| 276 | } | |||
| 277 | ||||
| 278 | WriteWork* work; | |||
| 279 | while ((work = get_work()) != NULL__null) { | |||
| 280 | do_compress(work); | |||
| 281 | finish_work(work); | |||
| 282 | } | |||
| 283 | ||||
| 284 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 285 | _nr_of_threads--; | |||
| 286 | assert(_nr_of_threads >= 0, "Too many threads finished")do { if (!(_nr_of_threads >= 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 286, "assert(" "_nr_of_threads >= 0" ") failed", "Too many threads finished" ); ::breakpoint(); } } while (0); | |||
| 287 | } | |||
| 288 | ||||
| 289 | void CompressionBackend::set_error(char const* new_error) { | |||
| 290 | if ((new_error != NULL__null) && (_err == NULL__null)) { | |||
| 291 | _err = new_error; | |||
| 292 | } | |||
| 293 | } | |||
| 294 | ||||
| 295 | WriteWork* CompressionBackend::allocate_work(size_t in_size, size_t out_size, | |||
| 296 | size_t tmp_size) { | |||
| 297 | WriteWork* result = (WriteWork*) os::malloc(sizeof(WriteWork), mtInternal); | |||
| 298 | ||||
| 299 | if (result == NULL__null) { | |||
| 300 | _work_creation_failed = true; | |||
| 301 | return NULL__null; | |||
| 302 | } | |||
| 303 | ||||
| 304 | _works_created++; | |||
| 305 | result->_in = (char*) os::malloc(in_size, mtInternal); | |||
| 306 | result->_in_max = in_size; | |||
| 307 | result->_in_used = 0; | |||
| 308 | result->_out = NULL__null; | |||
| 309 | result->_tmp = NULL__null; | |||
| 310 | ||||
| 311 | if (result->_in == NULL__null) { | |||
| 312 | goto fail; | |||
| 313 | } | |||
| 314 | ||||
| 315 | if (out_size > 0) { | |||
| 316 | result->_out = (char*) os::malloc(out_size, mtInternal); | |||
| 317 | result->_out_used = 0; | |||
| 318 | result->_out_max = out_size; | |||
| 319 | ||||
| 320 | if (result->_out == NULL__null) { | |||
| 321 | goto fail; | |||
| 322 | } | |||
| 323 | } | |||
| 324 | ||||
| 325 | if (tmp_size > 0) { | |||
| 326 | result->_tmp = (char*) os::malloc(tmp_size, mtInternal); | |||
| 327 | result->_tmp_max = tmp_size; | |||
| 328 | ||||
| 329 | if (result->_tmp == NULL__null) { | |||
| 330 | goto fail; | |||
| 331 | } | |||
| 332 | } | |||
| 333 | ||||
| 334 | return result; | |||
| 335 | ||||
| 336 | fail: | |||
| 337 | free_work(result); | |||
| 338 | _work_creation_failed = true; | |||
| 339 | return NULL__null; | |||
| 340 | } | |||
| 341 | ||||
| 342 | void CompressionBackend::free_work(WriteWork* work) { | |||
| 343 | if (work != NULL__null) { | |||
| 344 | os::free(work->_in); | |||
| 345 | os::free(work->_out); | |||
| 346 | os::free(work->_tmp); | |||
| 347 | os::free(work); | |||
| 348 | --_works_created; | |||
| 349 | } | |||
| 350 | } | |||
| 351 | ||||
| 352 | void CompressionBackend::free_work_list(WorkList* list) { | |||
| 353 | while (!list->is_empty()) { | |||
| 354 | free_work(list->remove_first()); | |||
| 355 | } | |||
| 356 | } | |||
| 357 | ||||
| 358 | void CompressionBackend::do_foreground_work() { | |||
| 359 | assert(!_to_compress.is_empty(), "Must have work to do")do { if (!(!_to_compress.is_empty())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 359, "assert(" "!_to_compress.is_empty()" ") failed", "Must have work to do" ); ::breakpoint(); } } while (0); | |||
| 360 | assert(_lock->owned_by_self(), "Must have the lock")do { if (!(_lock->owned_by_self())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 360, "assert(" "_lock->owned_by_self()" ") failed", "Must have the lock" ); ::breakpoint(); } } while (0); | |||
| 361 | ||||
| 362 | WriteWork* work = _to_compress.remove_first(); | |||
| 363 | MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag); | |||
| 364 | do_compress(work); | |||
| 365 | finish_work(work); | |||
| 366 | } | |||
| 367 | ||||
| 368 | WriteWork* CompressionBackend::get_work() { | |||
| 369 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 370 | ||||
| 371 | while (_active && _to_compress.is_empty()) { | |||
| 372 | ml.wait(); | |||
| 373 | } | |||
| 374 | ||||
| 375 | return _to_compress.remove_first(); | |||
| 376 | } | |||
| 377 | ||||
| 378 | void CompressionBackend::flush_external_buffer(char* buffer, size_t used, size_t max) { | |||
| 379 | assert(buffer != NULL && used != 0 && max != 0, "Invalid data send to compression backend")do { if (!(buffer != __null && used != 0 && max != 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 379, "assert(" "buffer != __null && used != 0 && max != 0" ") failed", "Invalid data send to compression backend"); ::breakpoint (); } } while (0); | |||
| ||||
| 380 | assert(_active == true, "Backend must be active when flushing external buffer")do { if (!(_active == true)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 380, "assert(" "_active == true" ") failed", "Backend must be active when flushing external buffer" ); ::breakpoint(); } } while (0); | |||
| 381 | char* buf; | |||
| 382 | size_t tmp_used = 0; | |||
| 383 | size_t tmp_max = 0; | |||
| 384 | ||||
| 385 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 386 | // First try current buffer. Use it if empty. | |||
| 387 | if (_current->_in_used == 0) { | |||
| 388 | buf = _current->_in; | |||
| 389 | } else { | |||
| 390 | // If current buffer is not clean, flush it. | |||
| 391 | MutexUnlocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 392 | get_new_buffer(&buf, &tmp_used, &tmp_max, true); | |||
| 393 | } | |||
| 394 | assert (_current->_in != NULL && _current->_in_max >= max &&do { if (!(_current->_in != __null && _current-> _in_max >= max && _current->_in_used == 0)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 395, "assert(" "_current->_in != __null && _current->_in_max >= max && _current->_in_used == 0" ") failed", "Invalid buffer from compression backend"); ::breakpoint (); } } while (0) | |||
| 395 | _current->_in_used == 0, "Invalid buffer from compression backend")do { if (!(_current->_in != __null && _current-> _in_max >= max && _current->_in_used == 0)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 395, "assert(" "_current->_in != __null && _current->_in_max >= max && _current->_in_used == 0" ") failed", "Invalid buffer from compression backend"); ::breakpoint (); } } while (0); | |||
| 396 | // Copy data to backend buffer. | |||
| 397 | memcpy(buf, buffer, used); | |||
| 398 | ||||
| 399 | assert(_current->_in == buf, "Must be current")do { if (!(_current->_in == buf)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/heapDumperCompression.cpp" , 399, "assert(" "_current->_in == buf" ") failed", "Must be current" ); ::breakpoint(); } } while (0); | |||
| 400 | _current->_in_used += used; | |||
| 401 | } | |||
| 402 | ||||
| 403 | void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max, bool force_reset) { | |||
| 404 | if (_active
| |||
| 405 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 406 | if (*used > 0 || force_reset
| |||
| 407 | _current->_in_used += *used; | |||
| 408 | // Check if we do not waste more than _max_waste. If yes, write the buffer. | |||
| 409 | // Otherwise return the rest of the buffer as the new buffer. | |||
| 410 | if (_current->_in_max - _current->_in_used <= _max_waste || force_reset
| |||
| 411 | _current->_id = _next_id++; | |||
| 412 | _to_compress.add_last(_current); | |||
| 413 | _current = NULL__null; | |||
| 414 | ml.notify_all(); | |||
| 415 | } else { | |||
| 416 | *buffer = _current->_in + _current->_in_used; | |||
| 417 | *used = 0; | |||
| 418 | *max = _current->_in_max - _current->_in_used; | |||
| 419 | return; | |||
| 420 | } | |||
| 421 | } | |||
| 422 | ||||
| 423 | while ((_current
| |||
| 424 | // Add more work objects if needed. | |||
| 425 | if (!_work_creation_failed && (_works_created <= _nr_of_threads)) { | |||
| 426 | WriteWork* work = allocate_work(_in_size, _out_size, _tmp_size); | |||
| 427 | ||||
| 428 | if (work != NULL__null) { | |||
| 429 | _unused.add_first(work); | |||
| 430 | } | |||
| 431 | } else if (!_to_compress.is_empty() && (_nr_of_threads == 0)) { | |||
| 432 | do_foreground_work(); | |||
| 433 | } else { | |||
| 434 | ml.wait(); | |||
| 435 | } | |||
| 436 | } | |||
| 437 | ||||
| 438 | if (_current == NULL__null) { | |||
| 439 | _current = _unused.remove_first(); | |||
| 440 | } | |||
| 441 | ||||
| 442 | if (_current != NULL__null) { | |||
| 443 | _current->_in_used = 0; | |||
| 444 | _current->_out_used = 0; | |||
| 445 | *buffer = _current->_in; | |||
| 446 | *used = 0; | |||
| 447 | *max = _current->_in_max; | |||
| 448 | ||||
| 449 | return; | |||
| 450 | } | |||
| 451 | } | |||
| 452 | ||||
| 453 | *buffer = NULL__null; | |||
| 454 | *used = 0; | |||
| 455 | *max = 0; | |||
| 456 | ||||
| 457 | return; | |||
| 458 | } | |||
| 459 | ||||
| 460 | void CompressionBackend::do_compress(WriteWork* work) { | |||
| 461 | if (_compressor != NULL__null) { | |||
| 462 | char const* msg = _compressor->compress(work->_in, work->_in_used, work->_out, | |||
| 463 | work->_out_max, | |||
| 464 | work->_tmp, _tmp_size, &work->_out_used); | |||
| 465 | ||||
| 466 | if (msg != NULL__null) { | |||
| 467 | MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 468 | set_error(msg); | |||
| 469 | } | |||
| 470 | } | |||
| 471 | } | |||
| 472 | ||||
| 473 | void CompressionBackend::finish_work(WriteWork* work) { | |||
| 474 | MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); | |||
| 475 | ||||
| 476 | _finished.add_by_id(work); | |||
| 477 | ||||
| 478 | // Write all finished works as far as we can. | |||
| 479 | while (!_finished.is_empty() && (_finished.first()->_id == _id_to_write)) { | |||
| 480 | WriteWork* to_write = _finished.remove_first(); | |||
| 481 | size_t size = _compressor
| |||
| ||||
| 482 | char* p = _compressor == NULL__null ? to_write->_in : to_write->_out; | |||
| 483 | char const* msg = NULL__null; | |||
| 484 | ||||
| 485 | if (_err == NULL__null) { | |||
| 486 | _written += size; | |||
| 487 | MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag); | |||
| 488 | msg = _writer->write_buf(p, (ssize_t) size); | |||
| 489 | } | |||
| 490 | ||||
| 491 | set_error(msg); | |||
| 492 | _unused.add_first(to_write); | |||
| 493 | _id_to_write++; | |||
| 494 | } | |||
| 495 | ||||
| 496 | ml.notify_all(); | |||
| 497 | } |