1 /*
2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/classListParser.hpp"
29 #include "classfile/classLoaderExt.hpp"
30 #include "classfile/dictionary.hpp"
31 #include "classfile/loaderConstraints.hpp"
32 #include "classfile/javaClasses.inline.hpp"
33 #include "classfile/placeholders.hpp"
34 #include "classfile/symbolTable.hpp"
35 #include "classfile/stringTable.hpp"
36 #include "classfile/systemDictionary.hpp"
37 #include "classfile/systemDictionaryShared.hpp"
38 #include "code/codeCache.hpp"
39 #include "gc/shared/softRefPolicy.hpp"
40 #include "interpreter/bytecodeStream.hpp"
41 #include "interpreter/bytecodes.hpp"
42 #include "logging/log.hpp"
43 #include "logging/logMessage.hpp"
44 #include "memory/archiveUtils.inline.hpp"
45 #include "memory/dynamicArchive.hpp"
46 #include "memory/filemap.hpp"
47 #include "memory/heapShared.inline.hpp"
48 #include "memory/metaspace.hpp"
49 #include "memory/metaspaceClosure.hpp"
50 #include "memory/metaspaceShared.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "oops/compressedOops.inline.hpp"
54 #include "oops/flatArrayKlass.hpp"
55 #include "oops/inlineKlass.hpp"
56 #include "oops/instanceClassLoaderKlass.hpp"
57 #include "oops/instanceMirrorKlass.hpp"
58 #include "oops/instanceRefKlass.hpp"
59 #include "oops/methodData.hpp"
60 #include "oops/objArrayKlass.hpp"
61 #include "oops/objArrayOop.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "oops/typeArrayKlass.hpp"
64 #include "prims/jvmtiRedefineClasses.hpp"
65 #include "runtime/handles.inline.hpp"
66 #include "runtime/os.hpp"
67 #include "runtime/safepointVerifiers.hpp"
68 #include "runtime/signature.hpp"
69 #include "runtime/timerTrace.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "runtime/vmOperations.hpp"
72 #include "utilities/align.hpp"
73 #include "utilities/bitMap.inline.hpp"
74 #include "utilities/ostream.hpp"
75 #include "utilities/defaultStream.hpp"
76 #include "utilities/hashtable.inline.hpp"
77 #if INCLUDE_G1GC
78 #include "gc/g1/g1CollectedHeap.hpp"
79 #endif
80
81 ReservedSpace MetaspaceShared::_shared_rs;
82 VirtualSpace MetaspaceShared::_shared_vs;
83 ReservedSpace MetaspaceShared::_symbol_rs;
84 VirtualSpace MetaspaceShared::_symbol_vs;
85 MetaspaceSharedStats MetaspaceShared::_stats;
86 bool MetaspaceShared::_has_error_classes;
87 bool MetaspaceShared::_archive_loading_failed = false;
88 bool MetaspaceShared::_remapped_readwrite = false;
89 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
90 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
91 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
92 intx MetaspaceShared::_relocation_delta;
93 char* MetaspaceShared::_requested_base_address;
94 bool MetaspaceShared::_use_optimized_module_handling = true;
95
96 // The CDS archive is divided into the following regions:
97 // mc - misc code (the method entry trampolines, c++ vtables)
98 // rw - read-write metadata
99 // ro - read-only metadata and read-only tables
100 //
101 // ca0 - closed archive heap space #0
102 // ca1 - closed archive heap space #1 (may be empty)
103 // oa0 - open archive heap space #0
104 // oa1 - open archive heap space #1 (may be empty)
105 //
106 // The mc, rw, and ro regions are linearly allocated, starting from
107 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions
108 // are page-aligned, and there's no gap between any consecutive regions.
109 //
110 // These 3 regions are populated in the following steps:
111 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
112 // temporarily allocated outside of the shared regions. Only the method entry
113 // trampolines are written into the mc region.
114 // [2] C++ vtables are copied into the mc region.
115 // [3] ArchiveCompactor copies RW metadata into the rw region.
116 // [4] ArchiveCompactor copies RO metadata into the ro region.
117 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
118 // are copied into the ro region as read-only tables.
119 //
120 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
121 // Their layout is independent of the other 4 regions.
122
123 char* DumpRegion::expand_top_to(char* newtop) {
124 assert(is_allocatable(), "must be initialized and not packed");
125 assert(newtop >= _top, "must not grow backwards");
126 if (newtop > _end) {
127 MetaspaceShared::report_out_of_space(_name, newtop - _top);
128 ShouldNotReachHere();
129 }
130
131 if (_rs == MetaspaceShared::shared_rs()) {
132 uintx delta;
133 if (DynamicDumpSharedSpaces) {
134 delta = DynamicArchive::object_delta_uintx(newtop);
135 } else {
136 delta = MetaspaceShared::object_delta_uintx(newtop);
137 }
138 if (delta > MAX_SHARED_DELTA) {
139 // This is just a sanity check and should not appear in any real world usage. This
140 // happens only if you allocate more than 2GB of shared objects and would require
141 // millions of shared classes.
142 vm_exit_during_initialization("Out of memory in the CDS archive",
143 "Please reduce the number of shared classes.");
144 }
145 }
146
147 MetaspaceShared::commit_to(_rs, _vs, newtop);
148 _top = newtop;
149 return _top;
150 }
151
152 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
153 char* p = (char*)align_up(_top, alignment);
154 char* newtop = p + align_up(num_bytes, alignment);
155 expand_top_to(newtop);
156 memset(p, 0, newtop - p);
157 return p;
158 }
159
160 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
161 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
162 intptr_t *p = (intptr_t*)_top;
163 char* newtop = _top + sizeof(intptr_t);
164 expand_top_to(newtop);
165 *p = n;
166 if (need_to_mark) {
167 ArchivePtrMarker::mark_pointer(p);
168 }
169 }
170
171 void DumpRegion::print(size_t total_bytes) const {
172 log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
173 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
174 p2i(_base + MetaspaceShared::final_delta()));
175 }
176
177 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
178 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
179 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
180 if (strcmp(_name, failing_region) == 0) {
181 log_error(cds)(" required = %d", int(needed_bytes));
182 }
183 }
184
185 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
186 _rs = rs;
187 _vs = vs;
188 // Start with 0 committed bytes. The memory will be committed as needed by
189 // MetaspaceShared::commit_to().
190 if (!_vs->initialize(*_rs, 0)) {
191 fatal("Unable to allocate memory for shared space");
192 }
193 _base = _top = _rs->base();
194 _end = _rs->end();
195 }
196
197 void DumpRegion::pack(DumpRegion* next) {
198 assert(!is_packed(), "sanity");
199 _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
200 _is_packed = true;
201 if (next != NULL) {
202 next->_rs = _rs;
203 next->_vs = _vs;
204 next->_base = next->_top = this->_end;
205 next->_end = _rs->end();
206 }
207 }
208
209 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
210 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
211
212 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
213 first_space->init(&_shared_rs, &_shared_vs);
214 }
215
216 DumpRegion* MetaspaceShared::misc_code_dump_space() {
217 return &_mc_region;
218 }
219
220 DumpRegion* MetaspaceShared::read_write_dump_space() {
221 return &_rw_region;
222 }
223
224 DumpRegion* MetaspaceShared::read_only_dump_space() {
225 return &_ro_region;
226 }
227
228 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
229 ReservedSpace* rs) {
230 current->pack(next);
231 }
232
233 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
234 return _symbol_region.allocate(num_bytes);
235 }
236
237 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
238 return _mc_region.allocate(num_bytes);
239 }
240
241 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
242 return _ro_region.allocate(num_bytes);
243 }
244
245 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
246
247 static bool shared_base_valid(char* shared_base) {
248 #ifdef _LP64
249 return CompressedKlassPointers::is_valid_base((address)shared_base);
250 #else
251 return true;
252 #endif
253 }
254
255 static bool shared_base_too_high(char* shared_base, size_t cds_total) {
256 if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) {
257 // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
258 // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around.
259 return true;
260 }
261 if (max_uintx - uintx(shared_base) < uintx(cds_total)) {
262 // The end of the archive will wrap around
263 return true;
264 }
265
266 return false;
267 }
268
269 static char* compute_shared_base(size_t cds_total) {
270 char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
271 const char* err = NULL;
272 if (shared_base_too_high(shared_base, cds_total)) {
273 err = "too high";
274 } else if (!shared_base_valid(shared_base)) {
275 err = "invalid for this platform";
276 }
277 if (err) {
278 log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT,
279 p2i((void*)SharedBaseAddress), err,
280 p2i((void*)Arguments::default_SharedBaseAddress()));
281 SharedBaseAddress = Arguments::default_SharedBaseAddress();
282 shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
283 }
284 assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity");
285 return shared_base;
286 }
287
288 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
289 assert(DumpSharedSpaces, "should be called for dump time only");
290
291 const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
292
293 #ifdef _LP64
294 // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1,
295 // will use that to house both the archives and the ccs. See below for
296 // details.
297 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
298 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
299 #else
300 // We don't support archives larger than 256MB on 32-bit due to limited
301 // virtual address space.
302 size_t cds_total = align_down(256*M, reserve_alignment);
303 #endif
304
305 char* shared_base = compute_shared_base(cds_total);
306 _requested_base_address = shared_base;
307
308 // Whether to use SharedBaseAddress as attach address.
309 bool use_requested_base = true;
310
311 if (shared_base == NULL) {
312 use_requested_base = false;
313 }
314
315 if (ArchiveRelocationMode == 1) {
316 log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
317 use_requested_base = false;
318 }
319
320 // First try to reserve the space at the specified SharedBaseAddress.
321 assert(!_shared_rs.is_reserved(), "must be");
322 if (use_requested_base) {
323 _shared_rs = ReservedSpace(cds_total, reserve_alignment,
324 false /* large */, (char*)shared_base);
325 if (_shared_rs.is_reserved()) {
326 assert(_shared_rs.base() == shared_base, "should match");
327 } else {
328 log_info(cds)("dumptime space reservation: failed to map at "
329 "SharedBaseAddress " PTR_FORMAT, p2i(shared_base));
330 }
331 }
332 if (!_shared_rs.is_reserved()) {
333 // Get a reserved space anywhere if attaching at the SharedBaseAddress
334 // fails:
335 if (UseCompressedClassPointers) {
336 // If we need to reserve class space as well, let the platform handle
337 // the reservation.
338 LP64_ONLY(_shared_rs =
339 Metaspace::reserve_address_space_for_compressed_classes(cds_total);)
340 NOT_LP64(ShouldNotReachHere();)
341 } else {
342 // anywhere is fine.
343 _shared_rs = ReservedSpace(cds_total, reserve_alignment,
344 false /* large */, (char*)NULL);
345 }
346 }
347
348 if (!_shared_rs.is_reserved()) {
349 vm_exit_during_initialization("Unable to reserve memory for shared space",
350 err_msg(SIZE_FORMAT " bytes.", cds_total));
351 }
352
353 #ifdef _LP64
354
355 if (UseCompressedClassPointers) {
356
357 assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity");
358
359 // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space
360 // must be allocated near the cds such as that the compressed Klass pointer
361 // encoding can be used to en/decode pointers from both cds and ccs. Since
362 // Metaspace cannot do this (it knows nothing about cds), we do it for
363 // Metaspace here and pass it the space to use for ccs.
364 //
365 // We do this by reserving space for the ccs behind the archives. Note
366 // however that ccs follows a different alignment
367 // (Metaspace::reserve_alignment), so there may be a gap between ccs and
368 // cds.
369 // We use a similar layout at runtime, see reserve_address_space_for_archives().
370 //
371 // +-- SharedBaseAddress (default = 0x800000000)
372 // v
373 // +-..---------+---------+ ... +----+----+----+--------+-----------------+
374 // | Heap | Archive | | MC | RW | RO | [gap] | class space |
375 // +-..---------+---------+ ... +----+----+----+--------+-----------------+
376 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->|
377 //
378 // Note: ccs must follow the archives, and the archives must start at the
379 // encoding base. However, the exact placement of ccs does not matter as
380 // long as it it resides in the encoding range of CompressedKlassPointers
381 // and comes after the archive.
382 //
383 // We do this by splitting up the allocated 4G into 3G of archive space,
384 // followed by 1G for the ccs:
385 // + The upper 1 GB is used as the "temporary compressed class space"
386 // -- preload_classes() will store Klasses into this space.
387 // + The lower 3 GB is used for the archive -- when preload_classes()
388 // is done, ArchiveCompactor will copy the class metadata into this
389 // space, first the RW parts, then the RO parts.
390
391 // Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
392 size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment());
393 address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment());
394 size_t archive_size = class_space_start - (address)_shared_rs.base();
395
396 ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size);
397 _shared_rs = _shared_rs.first_part(archive_size);
398
399 // ... as does the size of ccs.
400 tmp_class_space = tmp_class_space.first_part(class_space_size);
401 CompressedClassSpaceSize = class_space_size;
402
403 // Let Metaspace initialize ccs
404 Metaspace::initialize_class_space(tmp_class_space);
405
406 // and set up CompressedKlassPointers encoding.
407 CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total);
408
409 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
410 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
411
412 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
413 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
414
415 assert(_shared_rs.end() == tmp_class_space.base() &&
416 is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) &&
417 is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) &&
418 is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity");
419 }
420
421 #endif
422
423 init_shared_dump_space(&_mc_region);
424 SharedBaseAddress = (size_t)_shared_rs.base();
425 log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
426 _shared_rs.size(), p2i(_shared_rs.base()));
427
428 // We don't want any valid object to be at the very bottom of the archive.
429 // See ArchivePtrMarker::mark_pointer().
430 MetaspaceShared::misc_code_space_alloc(16);
431
432 size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
433 _symbol_rs = ReservedSpace(symbol_rs_size);
434 if (!_symbol_rs.is_reserved()) {
435 vm_exit_during_initialization("Unable to reserve memory for symbols",
436 err_msg(SIZE_FORMAT " bytes.", symbol_rs_size));
437 }
438 _symbol_region.init(&_symbol_rs, &_symbol_vs);
439 }
440
441 // Called by universe_post_init()
442 void MetaspaceShared::post_initialize(TRAPS) {
443 if (UseSharedSpaces) {
444 int size = FileMapInfo::get_number_of_shared_paths();
445 if (size > 0) {
446 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
447 if (!DynamicDumpSharedSpaces) {
448 FileMapInfo* info;
449 if (FileMapInfo::dynamic_info() == NULL) {
450 info = FileMapInfo::current_info();
451 } else {
452 info = FileMapInfo::dynamic_info();
453 }
454 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index());
455 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index());
456 }
457 }
458 }
459 }
460
461 static GrowableArrayCHeap<Handle, mtClassShared>* _extra_interned_strings = NULL;
462
463 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
464 _extra_interned_strings = new GrowableArrayCHeap<Handle, mtClassShared>(10000);
465
466 HashtableTextDump reader(filename);
467 reader.check_version("VERSION: 1.0");
468
469 while (reader.remain() > 0) {
470 int utf8_length;
471 int prefix_type = reader.scan_prefix(&utf8_length);
472 ResourceMark rm(THREAD);
473 if (utf8_length == 0x7fffffff) {
474 // buf_len will overflown 32-bit value.
475 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length));
476 }
477 int buf_len = utf8_length+1;
478 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len);
479 reader.get_utf8(utf8_buffer, utf8_length);
480 utf8_buffer[utf8_length] = '\0';
481
482 if (prefix_type == HashtableTextDump::SymbolPrefix) {
483 SymbolTable::new_permanent_symbol(utf8_buffer);
484 } else{
485 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
486 oop s = StringTable::intern(utf8_buffer, THREAD);
487
488 if (HAS_PENDING_EXCEPTION) {
489 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d",
490 reader.last_line_no(), utf8_length);
491 CLEAR_PENDING_EXCEPTION;
492 } else {
493 #if INCLUDE_G1GC
494 if (UseG1GC) {
495 typeArrayOop body = java_lang_String::value(s);
496 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
497 if (hr->is_humongous()) {
498 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order
499 // to maximize free heap space and minimize fragmentation.
500 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
501 reader.last_line_no(), utf8_length);
502 continue;
503 }
504 }
505 #endif
506 // Interned strings are GC'ed if there are no references to it, so let's
507 // add a reference to keep this string alive.
508 assert(s != NULL, "must succeed");
509 Handle h(THREAD, s);
510 _extra_interned_strings->append(h);
511 }
512 }
513 }
514 }
515
516 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
517 Arguments::assert_is_dumping_archive();
518 char* base = rs->base();
519 size_t need_committed_size = newtop - base;
520 size_t has_committed_size = vs->committed_size();
521 if (need_committed_size < has_committed_size) {
522 return;
523 }
524
525 size_t min_bytes = need_committed_size - has_committed_size;
526 size_t preferred_bytes = 1 * M;
527 size_t uncommitted = vs->reserved_size() - has_committed_size;
528
529 size_t commit =MAX2(min_bytes, preferred_bytes);
530 commit = MIN2(commit, uncommitted);
531 assert(commit <= uncommitted, "sanity");
532
533 bool result = vs->expand_by(commit, false);
534 if (rs == &_shared_rs) {
535 ArchivePtrMarker::expand_ptr_end((address*)vs->high());
536 }
537
538 if (!result) {
539 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
540 need_committed_size));
541 }
542
543 assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
544 const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
545 log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
546 which, commit, vs->actual_committed_size(), vs->high());
547 }
548
549 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
550 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
551 }
552
553 // Read/write a data stream for restoring/preserving metadata pointers and
554 // miscellaneous data from/to the shared archive file.
555
556 void MetaspaceShared::serialize(SerializeClosure* soc) {
557 int tag = 0;
558 soc->do_tag(--tag);
559
560 // Verify the sizes of various metadata in the system.
561 soc->do_tag(sizeof(Method));
562 soc->do_tag(sizeof(ConstMethod));
563 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
564 soc->do_tag(sizeof(ConstantPool));
565 soc->do_tag(sizeof(ConstantPoolCache));
566 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
567 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
568 soc->do_tag(sizeof(Symbol));
569
570 // Dump/restore miscellaneous metadata.
571 JavaClasses::serialize_offsets(soc);
572 Universe::serialize(soc);
573 soc->do_tag(--tag);
574
575 // Dump/restore references to commonly used names and signatures.
576 vmSymbols::serialize(soc);
577 soc->do_tag(--tag);
578
579 // Dump/restore the symbol/string/subgraph_info tables
580 SymbolTable::serialize_shared_table_header(soc);
581 StringTable::serialize_shared_table_header(soc);
582 HeapShared::serialize_subgraph_info_table_header(soc);
583 SystemDictionaryShared::serialize_dictionary_headers(soc);
584
585 InstanceMirrorKlass::serialize_offsets(soc);
586
587 // Dump/restore well known classes (pointers)
588 SystemDictionaryShared::serialize_well_known_klasses(soc);
589 soc->do_tag(--tag);
590
591 serialize_cloned_cpp_vtptrs(soc);
592 soc->do_tag(--tag);
593
594 soc->do_tag(666);
595 }
596
597 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) {
598 if (DumpSharedSpaces) {
599 if (_i2i_entry_code_buffers == NULL) {
600 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
601 _i2i_entry_code_buffers_size = total_size;
602 }
603 } else if (UseSharedSpaces) {
604 assert(_i2i_entry_code_buffers != NULL, "must already been initialized");
605 } else {
606 return NULL;
607 }
608
609 assert(_i2i_entry_code_buffers_size == total_size, "must not change");
610 return _i2i_entry_code_buffers;
611 }
612
613 uintx MetaspaceShared::object_delta_uintx(void* obj) {
614 Arguments::assert_is_dumping_archive();
615 if (DumpSharedSpaces) {
616 assert(shared_rs()->contains(obj), "must be");
617 } else {
618 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be");
619 }
620 address base_address = address(SharedBaseAddress);
621 uintx deltax = address(obj) - base_address;
622 return deltax;
623 }
624
625 // Global object for holding classes that have been loaded. Since this
626 // is run at a safepoint just before exit, this is the entire set of classes.
627 static GrowableArray<Klass*>* _global_klass_objects;
628
629 static int global_klass_compare(Klass** a, Klass **b) {
630 return a[0]->name()->fast_compare(b[0]->name());
631 }
632
633 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
634 return _global_klass_objects;
635 }
636
637 static void collect_array_classes(Klass* k) {
638 _global_klass_objects->append_if_missing(k);
639 if (k->is_array_klass()) {
640 // Add in the array classes too
641 ArrayKlass* ak = ArrayKlass::cast(k);
642 Klass* h = ak->higher_dimension();
643 if (h != NULL) {
644 h->array_klasses_do(collect_array_classes);
645 }
646 }
647 }
648
649 class CollectClassesClosure : public KlassClosure {
650 void do_klass(Klass* k) {
651 if (k->is_instance_klass() &&
652 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) {
653 // Don't add to the _global_klass_objects
654 } else {
655 _global_klass_objects->append_if_missing(k);
656 }
657 if (k->is_array_klass()) {
658 // Add in the array classes too
659 ArrayKlass* ak = ArrayKlass::cast(k);
660 Klass* h = ak->higher_dimension();
661 if (h != NULL) {
662 h->array_klasses_do(collect_array_classes);
663 }
664 }
665 }
666 };
667
668 // Global object for holding symbols that created during class loading. See SymbolTable::new_symbol
669 static GrowableArray<Symbol*>* _global_symbol_objects = NULL;
670
671 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
672 if (a[0] < b[0]) {
673 return -1;
674 } else if (a[0] == b[0]) {
675 ResourceMark rm;
676 log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string());
677 return 0;
678 } else {
679 return 1;
680 }
681 }
682
683 void MetaspaceShared::add_symbol(Symbol* sym) {
684 MutexLocker ml(CDSAddSymbol_lock, Mutex::_no_safepoint_check_flag);
685 if (_global_symbol_objects == NULL) {
686 _global_symbol_objects = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Symbol*>(2048, mtSymbol);
687 }
688 _global_symbol_objects->append(sym);
689 }
690
691 GrowableArray<Symbol*>* MetaspaceShared::collected_symbols() {
692 return _global_symbol_objects;
693 }
694
695 static void remove_unshareable_in_classes() {
696 for (int i = 0; i < _global_klass_objects->length(); i++) {
697 Klass* k = _global_klass_objects->at(i);
698 if (!k->is_objArray_klass()) {
699 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
700 // on their array classes.
701 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
702 k->remove_unshareable_info();
703 }
704 }
705 }
706
707 static void remove_java_mirror_in_classes() {
708 for (int i = 0; i < _global_klass_objects->length(); i++) {
709 Klass* k = _global_klass_objects->at(i);
710 if (!k->is_objArray_klass()) {
711 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
712 // on their array classes.
713 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
714 k->remove_java_mirror();
715 }
716 }
717 }
718
719 static void clear_basic_type_mirrors() {
720 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity");
721 Universe::set_int_mirror(NULL);
722 Universe::set_float_mirror(NULL);
723 Universe::set_double_mirror(NULL);
724 Universe::set_byte_mirror(NULL);
725 Universe::set_bool_mirror(NULL);
726 Universe::set_char_mirror(NULL);
727 Universe::set_long_mirror(NULL);
728 Universe::set_short_mirror(NULL);
729 Universe::set_void_mirror(NULL);
730 }
731
732 static void rewrite_nofast_bytecode(const methodHandle& method) {
733 BytecodeStream bcs(method);
734 while (!bcs.is_last_bytecode()) {
735 Bytecodes::Code opcode = bcs.next();
736 switch (opcode) {
737 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break;
738 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break;
739 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break;
740 case Bytecodes::_iload: {
741 if (!bcs.is_wide()) {
742 *bcs.bcp() = Bytecodes::_nofast_iload;
743 }
744 break;
745 }
746 default: break;
747 }
748 }
749 }
750
751 // Walk all methods in the class list to ensure that they won't be modified at
752 // run time. This includes:
753 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
754 // at run time by RewriteBytecodes/RewriteFrequentPairs
755 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
756 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) {
757 for (int i = 0; i < _global_klass_objects->length(); i++) {
758 Klass* k = _global_klass_objects->at(i);
759 if (k->is_instance_klass()) {
760 InstanceKlass* ik = InstanceKlass::cast(k);
761 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik);
762 }
763 }
764 }
765
766 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) {
767 for (int i = 0; i < ik->methods()->length(); i++) {
768 methodHandle m(thread, ik->methods()->at(i));
769 rewrite_nofast_bytecode(m);
770 Fingerprinter fp(m);
771 // The side effect of this call sets method's fingerprint field.
772 fp.fingerprint();
773 }
774 }
775
776 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
777 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
778 //
779 // Addresses of the vtables and the methods may be different across JVM runs,
780 // if libjvm.so is dynamically loaded at a different base address.
781 //
782 // To ensure that the Metadata objects in the CDS archive always have the correct vtable:
783 //
784 // + at dump time: we redirect the _vptr to point to our own vtables inside
785 // the CDS image
786 // + at run time: we clone the actual contents of the vtables from libjvm.so
787 // into our own tables.
788
789 // Currently, the archive contain ONLY the following types of objects that have C++ vtables.
790 #define CPP_VTABLE_PATCH_TYPES_DO(f) \
791 f(ConstantPool) \
792 f(InstanceClassLoaderKlass) \
793 f(InstanceKlass) \
794 f(InstanceMirrorKlass) \
795 f(InstanceRefKlass) \
796 f(Method) \
797 f(ObjArrayKlass) \
798 f(TypeArrayKlass) \
799 f(FlatArrayKlass) \
800 f(InlineKlass)
801
802 class CppVtableInfo {
803 intptr_t _vtable_size;
804 intptr_t _cloned_vtable[1];
805 public:
806 static int num_slots(int vtable_size) {
807 return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
808 }
809 int vtable_size() { return int(uintx(_vtable_size)); }
810 void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
811 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; }
812 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
813 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
814 static size_t byte_size(int vtable_size) {
815 CppVtableInfo i;
816 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
817 }
818 };
819
820 static inline intptr_t* vtable_of(Metadata* m) {
821 return *((intptr_t**)m);
822 }
823
824 template <class T> class CppVtableCloner : public T {
825 static CppVtableInfo* _info;
826
827 static int get_vtable_length(const char* name);
828
829 public:
830 // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
831 static intptr_t* allocate(const char* name);
832
833 // Clone the vtable to ...
834 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
835
836 static void zero_vtable_clone() {
837 assert(DumpSharedSpaces, "dump-time only");
838 _info->zero();
839 }
840
841 static bool is_valid_shared_object(const T* obj) {
842 intptr_t* vptr = *(intptr_t**)obj;
843 return vptr == _info->cloned_vtable();
844 }
845
846 static void init_orig_cpp_vtptr(int kind);
847 };
848
849 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
850
851 template <class T>
852 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
853 assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment");
854 int n = get_vtable_length(name);
855 _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
856 _info->set_vtable_size(n);
857
858 intptr_t* p = clone_vtable(name, _info);
859 assert((char*)p == _mc_region.top(), "must be");
860
861 return _info->cloned_vtable();
862 }
863
864 template <class T>
865 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
866 if (!DumpSharedSpaces) {
867 assert(_info == 0, "_info is initialized only at dump time");
868 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
869 }
870 T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
871 int n = info->vtable_size();
872 intptr_t* srcvtable = vtable_of(&tmp);
873 intptr_t* dstvtable = info->cloned_vtable();
874
875 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
876 // safe to do memcpy.
877 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
878 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
879 return dstvtable + n;
880 }
881
882 // To determine the size of the vtable for each type, we use the following
883 // trick by declaring 2 subclasses:
884 //
885 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} };
886 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
887 //
888 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
889 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
890 // - The first N entries have are exactly the same as in InstanceKlass's vtable.
891 // - Their last entry is different.
892 //
893 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
894 // and find the first entry that's different.
895 //
896 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
897 // esoteric compilers.
898
899 template <class T> class CppVtableTesterB: public T {
900 public:
901 virtual int last_virtual_method() {return 1;}
902 };
903
904 template <class T> class CppVtableTesterA : public T {
905 public:
906 virtual void* last_virtual_method() {
907 // Make this different than CppVtableTesterB::last_virtual_method so the C++
908 // compiler/linker won't alias the two functions.
909 return NULL;
910 }
911 };
912
913 template <class T>
914 int CppVtableCloner<T>::get_vtable_length(const char* name) {
915 CppVtableTesterA<T> a;
916 CppVtableTesterB<T> b;
917
918 intptr_t* avtable = vtable_of(&a);
919 intptr_t* bvtable = vtable_of(&b);
920
921 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
922 int vtable_len = 1;
923 for (; ; vtable_len++) {
924 if (avtable[vtable_len] != bvtable[vtable_len]) {
925 break;
926 }
927 }
928 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name);
929
930 return vtable_len;
931 }
932
933 #define ALLOC_CPP_VTABLE_CLONE(c) \
934 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
935 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
936
937 #define CLONE_CPP_VTABLE(c) \
938 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
939
940 #define ZERO_CPP_VTABLE(c) \
941 CppVtableCloner<c>::zero_vtable_clone();
942
943 #define INIT_ORIG_CPP_VTPTRS(c) \
944 CppVtableCloner<c>::init_orig_cpp_vtptr(c##_Kind);
945
946 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind,
947
948 enum ClonedVtableKind {
949 // E.g., ConstantPool_Kind == 0, InstanceKlass_Kind == 1, etc.
950 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND)
951 _num_cloned_vtable_kinds
952 };
953
954 // This is a map of all the original vtptrs. E.g., for
955 // ConstantPool *cp = new (...) ConstantPool(...) ; // a dynamically allocated constant pool
956 // the following holds true:
957 // _orig_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0]
958 static intptr_t* _orig_cpp_vtptrs[_num_cloned_vtable_kinds];
959 static bool _orig_cpp_vtptrs_inited = false;
960
961 template <class T>
962 void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) {
963 assert(kind < _num_cloned_vtable_kinds, "sanity");
964 T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
965 intptr_t* srcvtable = vtable_of(&tmp);
966 _orig_cpp_vtptrs[kind] = srcvtable;
967 }
968
969 // This is the index of all the cloned vtables. E.g., for
970 // ConstantPool* cp = ....; // an archived constant pool
971 // InstanceKlass* ik = ....;// an archived class
972 // the following holds true:
973 // _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0]
974 // _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0]
975 static intptr_t** _cloned_cpp_vtptrs = NULL;
976
977 void MetaspaceShared::allocate_cloned_cpp_vtptrs() {
978 assert(DumpSharedSpaces, "must");
979 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*);
980 _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*));
981 }
982
983 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) {
984 soc->do_ptr((void**)&_cloned_cpp_vtptrs);
985 }
986
987 intptr_t* MetaspaceShared::get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj) {
988 if (!_orig_cpp_vtptrs_inited) {
989 CPP_VTABLE_PATCH_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
990 _orig_cpp_vtptrs_inited = true;
991 }
992
993 Arguments::assert_is_dumping_archive();
994 int kind = -1;
995 switch (msotype) {
996 case MetaspaceObj::SymbolType:
997 case MetaspaceObj::TypeArrayU1Type:
998 case MetaspaceObj::TypeArrayU2Type:
999 case MetaspaceObj::TypeArrayU4Type:
1000 case MetaspaceObj::TypeArrayU8Type:
1001 case MetaspaceObj::TypeArrayOtherType:
1002 case MetaspaceObj::ConstMethodType:
1003 case MetaspaceObj::ConstantPoolCacheType:
1004 case MetaspaceObj::AnnotationsType:
1005 case MetaspaceObj::MethodCountersType:
1006 case MetaspaceObj::RecordComponentType:
1007 // These have no vtables.
1008 break;
1009 case MetaspaceObj::MethodDataType:
1010 // We don't archive MethodData <-- should have been removed in removed_unsharable_info
1011 ShouldNotReachHere();
1012 break;
1013 default:
1014 for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) {
1015 if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind]) {
1016 break;
1017 }
1018 }
1019 if (kind >= _num_cloned_vtable_kinds) {
1020 fatal("Cannot find C++ vtable for " INTPTR_FORMAT " -- you probably added"
1021 " a new subtype of Klass or MetaData without updating CPP_VTABLE_PATCH_TYPES_DO",
1022 p2i(obj));
1023 }
1024 }
1025
1026 if (kind >= 0) {
1027 assert(kind < _num_cloned_vtable_kinds, "must be");
1028 return _cloned_cpp_vtptrs[kind];
1029 } else {
1030 return NULL;
1031 }
1032 }
1033
1034 // This can be called at both dump time and run time:
1035 // - clone the contents of the c++ vtables into the space
1036 // allocated by allocate_cpp_vtable_clones()
1037 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
1038 assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
1039 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
1040 }
1041
1042 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
1043 assert(DumpSharedSpaces, "dump-time only");
1044 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
1045 }
1046
1047 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
1048 char* MetaspaceShared::allocate_cpp_vtable_clones() {
1049 char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables
1050
1051 assert(DumpSharedSpaces, "dump-time only");
1052 // Layout (each slot is a intptr_t):
1053 // [number of slots in the first vtable = n1]
1054 // [ <n1> slots for the first vtable]
1055 // [number of slots in the first second = n2]
1056 // [ <n2> slots for the second vtable]
1057 // ...
1058 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
1059 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
1060
1061 return cloned_vtables;
1062 }
1063
1064 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
1065 assert(is_in_shared_metaspace(m), "must be");
1066 return CppVtableCloner<Method>::is_valid_shared_object(m);
1067 }
1068
1069 void WriteClosure::do_oop(oop* o) {
1070 if (*o == NULL) {
1071 _dump_region->append_intptr_t(0);
1072 } else {
1073 assert(HeapShared::is_heap_object_archiving_allowed(),
1074 "Archiving heap object is not allowed");
1075 _dump_region->append_intptr_t(
1076 (intptr_t)CompressedOops::encode_not_null(*o));
1077 }
1078 }
1079
1080 void WriteClosure::do_region(u_char* start, size_t size) {
1081 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1082 assert(size % sizeof(intptr_t) == 0, "bad size");
1083 do_tag((int)size);
1084 while (size > 0) {
1085 _dump_region->append_intptr_t(*(intptr_t*)start, true);
1086 start += sizeof(intptr_t);
1087 size -= sizeof(intptr_t);
1088 }
1089 }
1090
1091 // This is for dumping detailed statistics for the allocations
1092 // in the shared spaces.
1093 class DumpAllocStats : public ResourceObj {
1094 public:
1095
1096 // Here's poor man's enum inheritance
1097 #define SHAREDSPACE_OBJ_TYPES_DO(f) \
1098 METASPACE_OBJ_TYPES_DO(f) \
1099 f(SymbolHashentry) \
1100 f(SymbolBucket) \
1101 f(StringHashentry) \
1102 f(StringBucket) \
1103 f(Other)
1104
1105 enum Type {
1106 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
1107 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
1108 _number_of_types
1109 };
1110
1111 static const char * type_name(Type type) {
1112 switch(type) {
1113 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
1114 default:
1115 ShouldNotReachHere();
1116 return NULL;
1117 }
1118 }
1119
1120 public:
1121 enum { RO = 0, RW = 1 };
1122
1123 int _counts[2][_number_of_types];
1124 int _bytes [2][_number_of_types];
1125
1126 DumpAllocStats() {
1127 memset(_counts, 0, sizeof(_counts));
1128 memset(_bytes, 0, sizeof(_bytes));
1129 };
1130
1131 void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
1132 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
1133 int which = (read_only) ? RO : RW;
1134 _counts[which][type] ++;
1135 _bytes [which][type] += byte_size;
1136 }
1137
1138 void record_other_type(int byte_size, bool read_only) {
1139 int which = (read_only) ? RO : RW;
1140 _bytes [which][OtherType] += byte_size;
1141 }
1142 void print_stats(int ro_all, int rw_all, int mc_all);
1143 };
1144
1145 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
1146 // Calculate size of data that was not allocated by Metaspace::allocate()
1147 MetaspaceSharedStats *stats = MetaspaceShared::stats();
1148
1149 // symbols
1150 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
1151 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
1152
1153 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
1154 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
1155
1156 // strings
1157 _counts[RO][StringHashentryType] = stats->string.hashentry_count;
1158 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
1159
1160 _counts[RO][StringBucketType] = stats->string.bucket_count;
1161 _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
1162
1163 // TODO: count things like dictionary, vtable, etc
1164 _bytes[RW][OtherType] += mc_all;
1165 rw_all += mc_all; // mc is mapped Read/Write
1166
1167 // prevent divide-by-zero
1168 if (ro_all < 1) {
1169 ro_all = 1;
1170 }
1171 if (rw_all < 1) {
1172 rw_all = 1;
1173 }
1174
1175 int all_ro_count = 0;
1176 int all_ro_bytes = 0;
1177 int all_rw_count = 0;
1178 int all_rw_bytes = 0;
1179
1180 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
1181 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
1182 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
1183 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
1184
1185 LogMessage(cds) msg;
1186
1187 msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
1188 msg.debug("%s", hdr);
1189 msg.debug("%s", sep);
1190 for (int type = 0; type < int(_number_of_types); type ++) {
1191 const char *name = type_name((Type)type);
1192 int ro_count = _counts[RO][type];
1193 int ro_bytes = _bytes [RO][type];
1194 int rw_count = _counts[RW][type];
1195 int rw_bytes = _bytes [RW][type];
1196 int count = ro_count + rw_count;
1197 int bytes = ro_bytes + rw_bytes;
1198
1199 double ro_perc = percent_of(ro_bytes, ro_all);
1200 double rw_perc = percent_of(rw_bytes, rw_all);
1201 double perc = percent_of(bytes, ro_all + rw_all);
1202
1203 msg.debug(fmt_stats, name,
1204 ro_count, ro_bytes, ro_perc,
1205 rw_count, rw_bytes, rw_perc,
1206 count, bytes, perc);
1207
1208 all_ro_count += ro_count;
1209 all_ro_bytes += ro_bytes;
1210 all_rw_count += rw_count;
1211 all_rw_bytes += rw_bytes;
1212 }
1213
1214 int all_count = all_ro_count + all_rw_count;
1215 int all_bytes = all_ro_bytes + all_rw_bytes;
1216
1217 double all_ro_perc = percent_of(all_ro_bytes, ro_all);
1218 double all_rw_perc = percent_of(all_rw_bytes, rw_all);
1219 double all_perc = percent_of(all_bytes, ro_all + rw_all);
1220
1221 msg.debug("%s", sep);
1222 msg.debug(fmt_stats, "Total",
1223 all_ro_count, all_ro_bytes, all_ro_perc,
1224 all_rw_count, all_rw_bytes, all_rw_perc,
1225 all_count, all_bytes, all_perc);
1226
1227 assert(all_ro_bytes == ro_all, "everything should have been counted");
1228 assert(all_rw_bytes == rw_all, "everything should have been counted");
1229
1230 #undef fmt_stats
1231 }
1232
1233 // Populate the shared space.
1234
1235 class VM_PopulateDumpSharedSpace: public VM_Operation {
1236 private:
1237 GrowableArray<MemRegion> *_closed_archive_heap_regions;
1238 GrowableArray<MemRegion> *_open_archive_heap_regions;
1239
1240 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps;
1241 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps;
1242
1243 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
1244 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
1245 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1246 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
1247 void dump_symbols();
1248 char* dump_read_only_tables();
1249 void print_class_stats();
1250 void print_region_stats(FileMapInfo* map_info);
1251 void print_bitmap_region_stats(size_t size, size_t total_size);
1252 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1253 const char *name, size_t total_size);
1254 void relocate_to_requested_base_address(CHeapBitMap* ptrmap);
1255
1256 public:
1257
1258 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1259 void doit(); // outline because gdb sucks
1260 bool allow_nested_vm_operations() const { return true; }
1261 }; // class VM_PopulateDumpSharedSpace
1262
1263 // ArchiveCompactor --
1264 //
1265 // This class is the central piece of shared archive compaction -- all metaspace data are
1266 // initially allocated outside of the shared regions. ArchiveCompactor copies the
1267 // metaspace data into their final location in the shared regions.
1268
1269 class ArchiveCompactor : AllStatic {
1270 static const int INITIAL_TABLE_SIZE = 8087;
1271 static const int MAX_TABLE_SIZE = 1000000;
1272
1273 static DumpAllocStats* _alloc_stats;
1274
1275 typedef KVHashtable<address, address, mtInternal> RelocationTable;
1276 static RelocationTable* _new_loc_table;
1277
1278 public:
1279 static void initialize() {
1280 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
1281 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE);
1282 }
1283 static DumpAllocStats* alloc_stats() {
1284 return _alloc_stats;
1285 }
1286
1287 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
1288 // outside of ArchiveCompactor::allocate(). These are usually for misc tables
1289 // that are allocated in the RO space.
1290 class OtherROAllocMark {
1291 char* _oldtop;
1292 public:
1293 OtherROAllocMark() {
1294 _oldtop = _ro_region.top();
1295 }
1296 ~OtherROAllocMark() {
1297 char* newtop = _ro_region.top();
1298 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
1299 }
1300 };
1301
1302 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
1303 address obj = ref->obj();
1304 int bytes = ref->size() * BytesPerWord;
1305 char* p;
1306 size_t alignment = BytesPerWord;
1307 char* oldtop;
1308 char* newtop;
1309
1310 if (read_only) {
1311 oldtop = _ro_region.top();
1312 p = _ro_region.allocate(bytes, alignment);
1313 newtop = _ro_region.top();
1314 } else {
1315 oldtop = _rw_region.top();
1316 if (ref->msotype() == MetaspaceObj::ClassType) {
1317 // Save a pointer immediate in front of an InstanceKlass, so
1318 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
1319 // without building another hashtable. See RunTimeSharedClassInfo::get_for()
1320 // in systemDictionaryShared.cpp.
1321 Klass* klass = (Klass*)obj;
1322 if (klass->is_instance_klass()) {
1323 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
1324 _rw_region.allocate(sizeof(address), BytesPerWord);
1325 }
1326 }
1327 p = _rw_region.allocate(bytes, alignment);
1328 newtop = _rw_region.top();
1329 }
1330 memcpy(p, obj, bytes);
1331
1332 intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)p);
1333 if (archived_vtable != NULL) {
1334 *(address*)p = (address)archived_vtable;
1335 ArchivePtrMarker::mark_pointer((address*)p);
1336 }
1337
1338 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once");
1339 _new_loc_table->add(obj, (address)p);
1340 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
1341 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) {
1342 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size());
1343 }
1344 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
1345 }
1346
1347 static address get_new_loc(MetaspaceClosure::Ref* ref) {
1348 address* pp = _new_loc_table->lookup(ref->obj());
1349 assert(pp != NULL, "must be");
1350 return *pp;
1351 }
1352
1353 private:
1354 // Makes a shallow copy of visited MetaspaceObj's
1355 class ShallowCopier: public UniqueMetaspaceClosure {
1356 bool _read_only;
1357 public:
1358 ShallowCopier(bool read_only) : _read_only(read_only) {}
1359
1360 virtual bool do_unique_ref(Ref* ref, bool read_only) {
1361 if (read_only == _read_only) {
1362 allocate(ref, read_only);
1363 }
1364 return true; // recurse into ref.obj()
1365 }
1366 };
1367
1368 // Relocate embedded pointers within a MetaspaceObj's shallow copy
1369 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
1370 public:
1371 virtual bool do_unique_ref(Ref* ref, bool read_only) {
1372 address new_loc = get_new_loc(ref);
1373 RefRelocator refer;
1374 ref->metaspace_pointers_do_at(&refer, new_loc);
1375 return true; // recurse into ref.obj()
1376 }
1377 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
1378 assert_valid(type);
1379
1380 address obj = ref->obj();
1381 address new_obj = get_new_loc(ref);
1382 size_t offset = pointer_delta(p, obj, sizeof(u1));
1383 intptr_t* new_p = (intptr_t*)(new_obj + offset);
1384 switch (type) {
1385 case _method_entry_ref:
1386 assert(*p == *new_p, "must be a copy");
1387 break;
1388 case _internal_pointer_ref:
1389 {
1390 size_t off = pointer_delta(*((address*)p), obj, sizeof(u1));
1391 assert(0 <= intx(off) && intx(off) < ref->size() * BytesPerWord, "must point to internal address");
1392 *((address*)new_p) = new_obj + off;
1393 }
1394 break;
1395 default:
1396 ShouldNotReachHere();
1397 }
1398 ArchivePtrMarker::mark_pointer((address*)new_p);
1399 }
1400 };
1401
1402 // Relocate a reference to point to its shallow copy
1403 class RefRelocator: public MetaspaceClosure {
1404 public:
1405 virtual bool do_ref(Ref* ref, bool read_only) {
1406 if (ref->not_null()) {
1407 ref->update(get_new_loc(ref));
1408 ArchivePtrMarker::mark_pointer(ref->addr());
1409 }
1410 return false; // Do not recurse.
1411 }
1412 };
1413
1414 #ifdef ASSERT
1415 class IsRefInArchiveChecker: public MetaspaceClosure {
1416 public:
1417 virtual bool do_ref(Ref* ref, bool read_only) {
1418 if (ref->not_null()) {
1419 char* obj = (char*)ref->obj();
1420 assert(_ro_region.contains(obj) || _rw_region.contains(obj),
1421 "must be relocated to point to CDS archive");
1422 }
1423 return false; // Do not recurse.
1424 }
1425 };
1426 #endif
1427
1428 public:
1429 static void copy_and_compact() {
1430 ResourceMark rm;
1431
1432 log_info(cds)("Scanning all metaspace objects ... ");
1433 {
1434 // allocate and shallow-copy RW objects, immediately following the MC region
1435 log_info(cds)("Allocating RW objects ... ");
1436 _mc_region.pack(&_rw_region);
1437
1438 ResourceMark rm;
1439 ShallowCopier rw_copier(false);
1440 iterate_roots(&rw_copier);
1441 }
1442 {
1443 // allocate and shallow-copy of RO object, immediately following the RW region
1444 log_info(cds)("Allocating RO objects ... ");
1445 _rw_region.pack(&_ro_region);
1446
1447 ResourceMark rm;
1448 ShallowCopier ro_copier(true);
1449 iterate_roots(&ro_copier);
1450 }
1451 {
1452 log_info(cds)("Relocating embedded pointers ... ");
1453 ResourceMark rm;
1454 ShallowCopyEmbeddedRefRelocator emb_reloc;
1455 iterate_roots(&emb_reloc);
1456 }
1457 {
1458 log_info(cds)("Relocating external roots ... ");
1459 ResourceMark rm;
1460 RefRelocator ext_reloc;
1461 iterate_roots(&ext_reloc);
1462 }
1463 {
1464 log_info(cds)("Fixing symbol identity hash ... ");
1465 os::init_random(0x12345678);
1466 GrowableArray<Symbol*>* all_symbols = MetaspaceShared::collected_symbols();
1467 all_symbols->sort(compare_symbols_by_address);
1468 for (int i = 0; i < all_symbols->length(); i++) {
1469 assert(all_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
1470 all_symbols->at(i)->update_identity_hash();
1471 }
1472 }
1473 #ifdef ASSERT
1474 {
1475 log_info(cds)("Verifying external roots ... ");
1476 ResourceMark rm;
1477 IsRefInArchiveChecker checker;
1478 iterate_roots(&checker);
1479 }
1480 #endif
1481 }
1482
1483 // We must relocate the System::_well_known_klasses only after we have copied the
1484 // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1485 // old objects which assert that their klass is the original klass.
1486 static void relocate_well_known_klasses() {
1487 {
1488 log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... ");
1489 ResourceMark rm;
1490 RefRelocator ext_reloc;
1491 SystemDictionary::well_known_klasses_do(&ext_reloc);
1492 }
1493 // NOTE: after this point, we shouldn't have any globals that can reach the old
1494 // objects.
1495
1496 // We cannot use any of the objects in the heap anymore (except for the
1497 // shared strings) because their headers no longer point to valid Klasses.
1498 }
1499
1500 static void iterate_roots(MetaspaceClosure* it) {
1501 // To ensure deterministic contents in the archive, we just need to ensure that
1502 // we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
1503 // the MetsapceObjs are located originally, as they are copied sequentially into
1504 // the archive during the iteration.
1505 //
1506 // The only issue here is that the symbol table and the system directories may be
1507 // randomly ordered, so we copy the symbols and klasses into two arrays and sort
1508 // them deterministically.
1509 //
1510 // During -Xshare:dump, the order of Symbol creation is strictly determined by
1511 // the SharedClassListFile (class loading is done in a single thread and the JIT
1512 // is disabled). Also, Symbols are allocated in monotonically increasing addresses
1513 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
1514 // ascending address order, we ensure that all Symbols are copied into deterministic
1515 // locations in the archive.
1516 GrowableArray<Symbol*>* symbols = _global_symbol_objects;
1517 for (int i = 0; i < symbols->length(); i++) {
1518 it->push(symbols->adr_at(i));
1519 }
1520 if (_global_klass_objects != NULL) {
1521 // Need to fix up the pointers
1522 for (int i = 0; i < _global_klass_objects->length(); i++) {
1523 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1524 it->push(_global_klass_objects->adr_at(i));
1525 }
1526 }
1527 FileMapInfo::metaspace_pointers_do(it, false);
1528 SystemDictionaryShared::dumptime_classes_do(it);
1529 Universe::metaspace_pointers_do(it);
1530 SymbolTable::metaspace_pointers_do(it);
1531 vmSymbols::metaspace_pointers_do(it);
1532
1533 it->finish();
1534 }
1535
1536 static Klass* get_relocated_klass(Klass* orig_klass) {
1537 assert(DumpSharedSpaces, "dump time only");
1538 address* pp = _new_loc_table->lookup((address)orig_klass);
1539 assert(pp != NULL, "must be");
1540 Klass* klass = (Klass*)(*pp);
1541 assert(klass->is_klass(), "must be");
1542 return klass;
1543 }
1544 };
1545
1546 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1547 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1548
1549 void VM_PopulateDumpSharedSpace::dump_symbols() {
1550 log_info(cds)("Dumping symbol table ...");
1551
1552 NOT_PRODUCT(SymbolTable::verify());
1553 SymbolTable::write_to_archive();
1554 }
1555
1556 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1557 ArchiveCompactor::OtherROAllocMark mark;
1558
1559 log_info(cds)("Removing java_mirror ... ");
1560 if (!HeapShared::is_heap_object_archiving_allowed()) {
1561 clear_basic_type_mirrors();
1562 }
1563 remove_java_mirror_in_classes();
1564 log_info(cds)("done. ");
1565
1566 SystemDictionaryShared::write_to_archive();
1567
1568 // Write the other data to the output array.
1569 char* start = _ro_region.top();
1570 WriteClosure wc(&_ro_region);
1571 MetaspaceShared::serialize(&wc);
1572
1573 // Write the bitmaps for patching the archive heap regions
1574 _closed_archive_heap_oopmaps = NULL;
1575 _open_archive_heap_oopmaps = NULL;
1576 dump_archive_heap_oopmaps();
1577
1578 return start;
1579 }
1580
1581 void VM_PopulateDumpSharedSpace::print_class_stats() {
1582 log_info(cds)("Number of classes %d", _global_klass_objects->length());
1583 {
1584 int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1585 for (int i = 0; i < _global_klass_objects->length(); i++) {
1586 Klass* k = _global_klass_objects->at(i);
1587 if (k->is_instance_klass()) {
1588 num_inst ++;
1589 } else if (k->is_objArray_klass()) {
1590 num_obj_array ++;
1591 } else {
1592 assert(k->is_typeArray_klass(), "sanity");
1593 num_type_array ++;
1594 }
1595 }
1596 log_info(cds)(" instance classes = %5d", num_inst);
1597 log_info(cds)(" obj array classes = %5d", num_obj_array);
1598 log_info(cds)(" type array classes = %5d", num_type_array);
1599 }
1600 }
1601
1602 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) {
1603 intx addr_delta = MetaspaceShared::final_delta();
1604 if (addr_delta == 0) {
1605 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top());
1606 } else {
1607 // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR).
1608 // This means that the current content of the archive is based on a random
1609 // address. Let's relocate all the pointers, so that it can be mapped to
1610 // MetaspaceShared::requested_base_address() without runtime relocation.
1611 //
1612 // Note: both the base and dynamic archive are written with
1613 // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
1614
1615 // Patch all pointers that are marked by ptrmap within this region,
1616 // where we have just dumped all the metaspace data.
1617 address patch_base = (address)SharedBaseAddress;
1618 address patch_end = (address)_ro_region.top();
1619 size_t size = patch_end - patch_base;
1620
1621 // the current value of the pointers to be patched must be within this
1622 // range (i.e., must point to valid metaspace objects)
1623 address valid_old_base = patch_base;
1624 address valid_old_end = patch_end;
1625
1626 // after patching, the pointers must point inside this range
1627 // (the requested location of the archive, as mapped at runtime).
1628 address valid_new_base = (address)MetaspaceShared::requested_base_address();
1629 address valid_new_end = valid_new_base + size;
1630
1631 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
1632 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
1633 p2i(valid_new_base), p2i(valid_new_end));
1634
1635 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
1636 valid_new_base, valid_new_end, addr_delta, ptrmap);
1637 ptrmap->iterate(&patcher);
1638 ArchivePtrMarker::compact(patcher.max_non_null_offset());
1639 }
1640 }
1641
1642 void VM_PopulateDumpSharedSpace::doit() {
1643 HeapShared::run_full_gc_in_vm_thread();
1644 CHeapBitMap ptrmap;
1645 MetaspaceShared::initialize_ptr_marker(&ptrmap);
1646
1647 // We should no longer allocate anything from the metaspace, so that:
1648 //
1649 // (1) Metaspace::allocate might trigger GC if we have run out of
1650 // committed metaspace, but we can't GC because we're running
1651 // in the VM thread.
1652 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
1653 Metaspace::freeze();
1654 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
1655
1656 Thread* THREAD = VMThread::vm_thread();
1657
1658 FileMapInfo::check_nonempty_dir_in_shared_path_table();
1659
1660 NOT_PRODUCT(SystemDictionary::verify();)
1661 // The following guarantee is meant to ensure that no loader constraints
1662 // exist yet, since the constraints table is not shared. This becomes
1663 // more important now that we don't re-initialize vtables/itables for
1664 // shared classes at runtime, where constraints were previously created.
1665 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1666 "loader constraints are not saved");
1667 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1668 "placeholders are not saved");
1669
1670 // At this point, many classes have been loaded.
1671 // Gather systemDictionary classes in a global array and do everything to
1672 // that so we don't have to walk the SystemDictionary again.
1673 SystemDictionaryShared::check_excluded_classes();
1674 _global_klass_objects = new GrowableArray<Klass*>(1000);
1675 CollectClassesClosure collect_classes;
1676 ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1677 _global_klass_objects->sort(global_klass_compare);
1678
1679 print_class_stats();
1680
1681 // Ensure the ConstMethods won't be modified at run-time
1682 log_info(cds)("Updating ConstMethods ... ");
1683 rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD);
1684 log_info(cds)("done. ");
1685
1686 // Remove all references outside the metadata
1687 log_info(cds)("Removing unshareable information ... ");
1688 remove_unshareable_in_classes();
1689 log_info(cds)("done. ");
1690
1691 MetaspaceShared::allocate_cloned_cpp_vtptrs();
1692 char* cloned_vtables = _mc_region.top();
1693 MetaspaceShared::allocate_cpp_vtable_clones();
1694
1695 ArchiveCompactor::initialize();
1696 ArchiveCompactor::copy_and_compact();
1697
1698 dump_symbols();
1699
1700 // Dump supported java heap objects
1701 _closed_archive_heap_regions = NULL;
1702 _open_archive_heap_regions = NULL;
1703 dump_java_heap_objects();
1704
1705 ArchiveCompactor::relocate_well_known_klasses();
1706
1707 char* serialized_data = dump_read_only_tables();
1708 _ro_region.pack();
1709
1710 // The vtable clones contain addresses of the current process.
1711 // We don't want to write these addresses into the archive. Same for i2i buffer.
1712 MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1713 memset(MetaspaceShared::i2i_entry_code_buffers(), 0,
1714 MetaspaceShared::i2i_entry_code_buffers_size());
1715
1716 // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
1717 // without runtime relocation.
1718 relocate_to_requested_base_address(&ptrmap);
1719
1720 // Create and write the archive file that maps the shared spaces.
1721
1722 FileMapInfo* mapinfo = new FileMapInfo(true);
1723 mapinfo->populate_header(os::vm_allocation_granularity());
1724 mapinfo->set_serialized_data(serialized_data);
1725 mapinfo->set_cloned_vtables(cloned_vtables);
1726 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
1727 MetaspaceShared::i2i_entry_code_buffers_size());
1728 mapinfo->open_for_write();
1729 MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps);
1730 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
1731 _closed_archive_heap_regions,
1732 _closed_archive_heap_oopmaps,
1733 MetaspaceShared::first_closed_archive_heap_region,
1734 MetaspaceShared::max_closed_archive_heap_region);
1735 _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1736 _open_archive_heap_regions,
1737 _open_archive_heap_oopmaps,
1738 MetaspaceShared::first_open_archive_heap_region,
1739 MetaspaceShared::max_open_archive_heap_region);
1740
1741 mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
1742 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1743 mapinfo->write_header();
1744 print_region_stats(mapinfo);
1745 mapinfo->close();
1746
1747 if (log_is_enabled(Info, cds)) {
1748 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1749 int(_mc_region.used()));
1750 }
1751
1752 if (PrintSystemDictionaryAtExit) {
1753 SystemDictionary::print();
1754 }
1755
1756 if (AllowArchivingWithJavaAgent) {
1757 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
1758 "for testing purposes only and should not be used in a production environment");
1759 }
1760
1761 // There may be other pending VM operations that operate on the InstanceKlasses,
1762 // which will fail because InstanceKlasses::remove_unshareable_info()
1763 // has been called. Forget these operations and exit the VM directly.
1764 vm_direct_exit(0);
1765 }
1766
1767 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
1768 // Print statistics of all the regions
1769 const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used();
1770 const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned();
1771 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1772 _mc_region.reserved() +
1773 bitmap_reserved +
1774 _total_closed_archive_region_size +
1775 _total_open_archive_region_size;
1776 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1777 _mc_region.used() +
1778 bitmap_used +
1779 _total_closed_archive_region_size +
1780 _total_open_archive_region_size;
1781 const double total_u_perc = percent_of(total_bytes, total_reserved);
1782
1783 _mc_region.print(total_reserved);
1784 _rw_region.print(total_reserved);
1785 _ro_region.print(total_reserved);
1786 print_bitmap_region_stats(bitmap_used, total_reserved);
1787 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
1788 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
1789
1790 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1791 total_bytes, total_reserved, total_u_perc);
1792 }
1793
1794 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
1795 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1796 size, size/double(total_size)*100.0, size);
1797 }
1798
1799 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1800 const char *name, size_t total_size) {
1801 int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1802 for (int i = 0; i < arr_len; i++) {
1803 char* start = (char*)heap_mem->at(i).start();
1804 size_t size = heap_mem->at(i).byte_size();
1805 char* top = start + size;
1806 log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1807 name, i, size, size/double(total_size)*100.0, size, p2i(start));
1808
1809 }
1810 }
1811
1812 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo,
1813 GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps,
1814 GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) {
1815 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1816 // MetaspaceShared::n_regions (internal to hotspot).
1817 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1818
1819 // mc contains the trampoline code for method entries, which are patched at run time,
1820 // so it needs to be read/write.
1821 write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1822 write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1823 write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1824 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps);
1825 }
1826
1827 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
1828 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1829 }
1830
1831 // Update a Java object to point its Klass* to the new location after
1832 // shared archive has been compacted.
1833 void MetaspaceShared::relocate_klass_ptr(oop o) {
1834 assert(DumpSharedSpaces, "sanity");
1835 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1836 o->set_klass(k);
1837 }
1838
1839 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
1840 assert(DumpSharedSpaces, "sanity");
1841 k = ArchiveCompactor::get_relocated_klass(k);
1842 if (is_final) {
1843 k = (Klass*)(address(k) + final_delta());
1844 }
1845 return k;
1846 }
1847
1848 class LinkSharedClassesClosure : public KlassClosure {
1849 Thread* THREAD;
1850 bool _made_progress;
1851 public:
1852 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1853
1854 void reset() { _made_progress = false; }
1855 bool made_progress() const { return _made_progress; }
1856
1857 void do_klass(Klass* k) {
1858 if (k->is_instance_klass()) {
1859 InstanceKlass* ik = InstanceKlass::cast(k);
1860 // For dynamic CDS dump, only link classes loaded by the builtin class loaders.
1861 bool do_linking = DumpSharedSpaces ? true : !ik->is_shared_unregistered_class();
1862 if (do_linking) {
1863 // Link the class to cause the bytecodes to be rewritten and the
1864 // cpcache to be created. Class verification is done according
1865 // to -Xverify setting.
1866 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1867 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1868
1869 if (DumpSharedSpaces) {
1870 // The following function is used to resolve all Strings in the statically
1871 // dumped classes to archive all the Strings. The archive heap is not supported
1872 // for the dynamic archive.
1873 ik->constants()->resolve_class_constants(THREAD);
1874 }
1875 }
1876 }
1877 }
1878 };
1879
1880 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1881 // We need to iterate because verification may cause additional classes
1882 // to be loaded.
1883 LinkSharedClassesClosure link_closure(THREAD);
1884 do {
1885 link_closure.reset();
1886 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure);
1887 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1888 } while (link_closure.made_progress());
1889 }
1890
1891 void MetaspaceShared::prepare_for_dumping() {
1892 Arguments::check_unsupported_dumping_properties();
1893 ClassLoader::initialize_shared_path();
1894 }
1895
1896 // Preload classes from a list, populate the shared spaces and dump to a
1897 // file.
1898 void MetaspaceShared::preload_and_dump(TRAPS) {
1899 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1900 ResourceMark rm(THREAD);
1901 char class_list_path_str[JVM_MAXPATHLEN];
1902 // Preload classes to be shared.
1903 const char* class_list_path;
1904 if (SharedClassListFile == NULL) {
1905 // Construct the path to the class list (in jre/lib)
1906 // Walk up two directories from the location of the VM and
1907 // optionally tack on "lib" (depending on platform)
1908 os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1909 for (int i = 0; i < 3; i++) {
1910 char *end = strrchr(class_list_path_str, *os::file_separator());
1911 if (end != NULL) *end = '\0';
1912 }
1913 int class_list_path_len = (int)strlen(class_list_path_str);
1914 if (class_list_path_len >= 3) {
1915 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1916 if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1917 jio_snprintf(class_list_path_str + class_list_path_len,
1918 sizeof(class_list_path_str) - class_list_path_len,
1919 "%slib", os::file_separator());
1920 class_list_path_len += 4;
1921 }
1922 }
1923 }
1924 if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1925 jio_snprintf(class_list_path_str + class_list_path_len,
1926 sizeof(class_list_path_str) - class_list_path_len,
1927 "%sclasslist", os::file_separator());
1928 }
1929 class_list_path = class_list_path_str;
1930 } else {
1931 class_list_path = SharedClassListFile;
1932 }
1933
1934 log_info(cds)("Loading classes to share ...");
1935 _has_error_classes = false;
1936 int class_count = preload_classes(class_list_path, THREAD);
1937 if (ExtraSharedClassListFile) {
1938 class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1939 }
1940 log_info(cds)("Loading classes to share: done.");
1941
1942 log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1943
1944 if (SharedArchiveConfigFile) {
1945 log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile);
1946 read_extra_data(SharedArchiveConfigFile, THREAD);
1947 }
1948 log_info(cds)("Reading extra data: done.");
1949
1950 HeapShared::init_subgraph_entry_fields(THREAD);
1951
1952 // Rewrite and link classes
1953 log_info(cds)("Rewriting and linking classes ...");
1954
1955 // Link any classes which got missed. This would happen if we have loaded classes that
1956 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1957 // fails verification, all other interfaces that were not specified in the classlist but
1958 // are implemented by K are not verified.
1959 link_and_cleanup_shared_classes(CATCH);
1960 log_info(cds)("Rewriting and linking classes: done");
1961
1962 VM_PopulateDumpSharedSpace op;
1963 MutexLocker ml(THREAD, HeapShared::is_heap_object_archiving_allowed() ?
1964 Heap_lock : NULL); // needed by HeapShared::run_gc()
1965 VMThread::execute(&op);
1966 }
1967 }
1968
1969
1970 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1971 ClassListParser parser(class_list_path);
1972 int class_count = 0;
1973
1974 while (parser.parse_one_line()) {
1975 Klass* klass = parser.load_current_class(THREAD);
1976 if (HAS_PENDING_EXCEPTION) {
1977 if (klass == NULL &&
1978 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
1979 // print a warning only when the pending exception is class not found
1980 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name());
1981 }
1982 CLEAR_PENDING_EXCEPTION;
1983 }
1984 if (klass != NULL) {
1985 if (log_is_enabled(Trace, cds)) {
1986 ResourceMark rm(THREAD);
1987 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1988 }
1989
1990 if (klass->is_instance_klass()) {
1991 InstanceKlass* ik = InstanceKlass::cast(klass);
1992
1993 // Link the class to cause the bytecodes to be rewritten and the
1994 // cpcache to be created. The linking is done as soon as classes
1995 // are loaded in order that the related data structures (klass and
1996 // cpCache) are located together.
1997 try_link_class(ik, THREAD);
1998 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1999 }
2000
2001 class_count++;
2002 }
2003 }
2004
2005 return class_count;
2006 }
2007
2008 // Returns true if the class's status has changed
2009 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
2010 Arguments::assert_is_dumping_archive();
2011 if (ik->init_state() < InstanceKlass::linked &&
2012 !SystemDictionaryShared::has_class_failed_verification(ik)) {
2013 bool saved = BytecodeVerificationLocal;
2014 if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) {
2015 // The verification decision is based on BytecodeVerificationRemote
2016 // for non-system classes. Since we are using the NULL classloader
2017 // to load non-system classes for customized class loaders during dumping,
2018 // we need to temporarily change BytecodeVerificationLocal to be the same as
2019 // BytecodeVerificationRemote. Note this can cause the parent system
2020 // classes also being verified. The extra overhead is acceptable during
2021 // dumping.
2022 BytecodeVerificationLocal = BytecodeVerificationRemote;
2023 }
2024 ik->link_class(THREAD);
2025 if (HAS_PENDING_EXCEPTION) {
2026 ResourceMark rm(THREAD);
2027 log_warning(cds)("Preload Warning: Verification failed for %s",
2028 ik->external_name());
2029 CLEAR_PENDING_EXCEPTION;
2030 SystemDictionaryShared::set_class_has_failed_verification(ik);
2031 _has_error_classes = true;
2032 }
2033 BytecodeVerificationLocal = saved;
2034 return true;
2035 } else {
2036 return false;
2037 }
2038 }
2039
2040 #if INCLUDE_CDS_JAVA_HEAP
2041 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
2042 // The closed and open archive heap space has maximum two regions.
2043 // See FileMapInfo::write_archive_heap_regions() for details.
2044 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
2045 _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
2046 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions,
2047 _open_archive_heap_regions);
2048 ArchiveCompactor::OtherROAllocMark mark;
2049 HeapShared::write_subgraph_info_table();
2050 }
2051
2052 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
2053 if (HeapShared::is_heap_object_archiving_allowed()) {
2054 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
2055 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
2056
2057 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
2058 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
2059 }
2060 }
2061
2062 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
2063 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
2064 for (int i=0; i<regions->length(); i++) {
2065 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
2066 size_t size_in_bits = oopmap.size();
2067 size_t size_in_bytes = oopmap.size_in_bytes();
2068 uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
2069 oopmap.write_to(buffer, size_in_bytes);
2070 log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
2071 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
2072 p2i(buffer), size_in_bytes,
2073 p2i(regions->at(i).start()), regions->at(i).byte_size());
2074
2075 ArchiveHeapOopmapInfo info;
2076 info._oopmap = (address)buffer;
2077 info._oopmap_size_in_bits = size_in_bits;
2078 info._oopmap_size_in_bytes = size_in_bytes;
2079 oopmaps->append(info);
2080 }
2081 }
2082 #endif // INCLUDE_CDS_JAVA_HEAP
2083
2084 void ReadClosure::do_ptr(void** p) {
2085 assert(*p == NULL, "initializing previous initialized pointer.");
2086 intptr_t obj = nextPtr();
2087 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
2088 "hit tag while initializing ptrs.");
2089 *p = (void*)obj;
2090 }
2091
2092 void ReadClosure::do_u4(u4* p) {
2093 intptr_t obj = nextPtr();
2094 *p = (u4)(uintx(obj));
2095 }
2096
2097 void ReadClosure::do_bool(bool* p) {
2098 intptr_t obj = nextPtr();
2099 *p = (bool)(uintx(obj));
2100 }
2101
2102 void ReadClosure::do_tag(int tag) {
2103 int old_tag;
2104 old_tag = (int)(intptr_t)nextPtr();
2105 // do_int(&old_tag);
2106 assert(tag == old_tag, "old tag doesn't match");
2107 FileMapInfo::assert_mark(tag == old_tag);
2108 }
2109
2110 void ReadClosure::do_oop(oop *p) {
2111 narrowOop o = (narrowOop)nextPtr();
2112 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
2113 p = NULL;
2114 } else {
2115 assert(HeapShared::is_heap_object_archiving_allowed(),
2116 "Archived heap object is not allowed");
2117 assert(HeapShared::open_archive_heap_region_mapped(),
2118 "Open archive heap region is not mapped");
2119 *p = HeapShared::decode_from_archive(o);
2120 }
2121 }
2122
2123 void ReadClosure::do_region(u_char* start, size_t size) {
2124 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
2125 assert(size % sizeof(intptr_t) == 0, "bad size");
2126 do_tag((int)size);
2127 while (size > 0) {
2128 *(intptr_t*)start = nextPtr();
2129 start += sizeof(intptr_t);
2130 size -= sizeof(intptr_t);
2131 }
2132 }
2133
2134 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
2135 assert(base <= static_top && static_top <= top, "must be");
2136 _shared_metaspace_static_top = static_top;
2137 MetaspaceObj::set_shared_metaspace_range(base, top);
2138 }
2139
2140 // Return true if given address is in the misc data region
2141 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
2142 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
2143 }
2144
2145 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
2146 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
2147 return true;
2148 }
2149 return false;
2150 }
2151
2152 bool MetaspaceShared::is_shared_dynamic(void* p) {
2153 if ((p < MetaspaceObj::shared_metaspace_top()) &&
2154 (p >= _shared_metaspace_static_top)) {
2155 return true;
2156 } else {
2157 return false;
2158 }
2159 }
2160
2161 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
2162 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
2163 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
2164
2165 FileMapInfo* static_mapinfo = open_static_archive();
2166 FileMapInfo* dynamic_mapinfo = NULL;
2167
2168 if (static_mapinfo != NULL) {
2169 dynamic_mapinfo = open_dynamic_archive();
2170
2171 // First try to map at the requested address
2172 result = map_archives(static_mapinfo, dynamic_mapinfo, true);
2173 if (result == MAP_ARCHIVE_MMAP_FAILURE) {
2174 // Mapping has failed (probably due to ASLR). Let's map at an address chosen
2175 // by the OS.
2176 log_info(cds)("Try to map archive(s) at an alternative address");
2177 result = map_archives(static_mapinfo, dynamic_mapinfo, false);
2178 }
2179 }
2180
2181 if (result == MAP_ARCHIVE_SUCCESS) {
2182 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
2183 char* cds_base = static_mapinfo->mapped_base();
2184 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
2185 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
2186 _relocation_delta = static_mapinfo->relocation_delta();
2187 if (dynamic_mapped) {
2188 FileMapInfo::set_shared_path_table(dynamic_mapinfo);
2189 } else {
2190 FileMapInfo::set_shared_path_table(static_mapinfo);
2191 }
2192 _requested_base_address = static_mapinfo->requested_base_address();
2193 } else {
2194 set_shared_metaspace_range(NULL, NULL, NULL);
2195 UseSharedSpaces = false;
2196 FileMapInfo::fail_continue("Unable to map shared spaces");
2197 if (PrintSharedArchiveAndExit) {
2198 vm_exit_during_initialization("Unable to use shared archive.");
2199 }
2200 }
2201
2202 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
2203 delete static_mapinfo;
2204 }
2205 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
2206 delete dynamic_mapinfo;
2207 }
2208 }
2209
2210 FileMapInfo* MetaspaceShared::open_static_archive() {
2211 FileMapInfo* mapinfo = new FileMapInfo(true);
2212 if (!mapinfo->initialize()) {
2213 delete(mapinfo);
2214 return NULL;
2215 }
2216 return mapinfo;
2217 }
2218
2219 FileMapInfo* MetaspaceShared::open_dynamic_archive() {
2220 if (DynamicDumpSharedSpaces) {
2221 return NULL;
2222 }
2223 if (Arguments::GetSharedDynamicArchivePath() == NULL) {
2224 return NULL;
2225 }
2226
2227 FileMapInfo* mapinfo = new FileMapInfo(false);
2228 if (!mapinfo->initialize()) {
2229 delete(mapinfo);
2230 return NULL;
2231 }
2232 return mapinfo;
2233 }
2234
2235 // use_requested_addr:
2236 // true = map at FileMapHeader::_requested_base_address
2237 // false = map at an alternative address picked by OS.
2238 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
2239 bool use_requested_addr) {
2240 if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) {
2241 log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address.");
2242 return MAP_ARCHIVE_MMAP_FAILURE;
2243 }
2244
2245 PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
2246 // For product build only -- this is for benchmarking the cost of doing relocation.
2247 // For debug builds, the check is done below, after reserving the space, for better test coverage
2248 // (see comment below).
2249 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
2250 return MAP_ARCHIVE_MMAP_FAILURE;
2251 });
2252
2253 if (ArchiveRelocationMode == 2 && !use_requested_addr) {
2254 log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address");
2255 return MAP_ARCHIVE_MMAP_FAILURE;
2256 };
2257
2258 if (dynamic_mapinfo != NULL) {
2259 // Ensure that the OS won't be able to allocate new memory spaces between the two
2260 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
2261 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
2262 }
2263
2264 ReservedSpace archive_space_rs, class_space_rs;
2265 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
2266 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
2267 use_requested_addr, archive_space_rs,
2268 class_space_rs);
2269 if (mapped_base_address == NULL) {
2270 result = MAP_ARCHIVE_MMAP_FAILURE;
2271 log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
2272 } else {
2273
2274 #ifdef ASSERT
2275 // Some sanity checks after reserving address spaces for archives
2276 // and class space.
2277 assert(archive_space_rs.is_reserved(), "Sanity");
2278 if (Metaspace::using_class_space()) {
2279 // Class space must closely follow the archive space. Both spaces
2280 // must be aligned correctly.
2281 assert(class_space_rs.is_reserved(),
2282 "A class space should have been reserved");
2283 assert(class_space_rs.base() >= archive_space_rs.end(),
2284 "class space should follow the cds archive space");
2285 assert(is_aligned(archive_space_rs.base(),
2286 MetaspaceShared::reserved_space_alignment()),
2287 "Archive space misaligned");
2288 assert(is_aligned(class_space_rs.base(),
2289 Metaspace::reserve_alignment()),
2290 "class space misaligned");
2291 }
2292 #endif // ASSERT
2293
2294 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
2295 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
2296 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
2297 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
2298
2299 if (MetaspaceShared::use_windows_memory_mapping()) {
2300 // We have now reserved address space for the archives, and will map in
2301 // the archive files into this space.
2302 //
2303 // Special handling for Windows: on Windows we cannot map a file view
2304 // into an existing memory mapping. So, we unmap the address range we
2305 // just reserved again, which will make it available for mapping the
2306 // archives.
2307 // Reserving this range has not been for naught however since it makes
2308 // us reasonably sure the address range is available.
2309 //
2310 // But still it may fail, since between unmapping the range and mapping
2311 // in the archive someone else may grab the address space. Therefore
2312 // there is a fallback in FileMap::map_region() where we just read in
2313 // the archive files sequentially instead of mapping it in. We couple
2314 // this with use_requested_addr, since we're going to patch all the
2315 // pointers anyway so there's no benefit to mmap.
2316 if (use_requested_addr) {
2317 log_info(cds)("Windows mmap workaround: releasing archive space.");
2318 archive_space_rs.release();
2319 }
2320 }
2321 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
2322 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
2323 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
2324
2325 DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
2326 // This is for simulating mmap failures at the requested address. In
2327 // debug builds, we do it here (after all archives have possibly been
2328 // mapped), so we can thoroughly test the code for failure handling
2329 // (releasing all allocated resource, etc).
2330 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
2331 if (static_result == MAP_ARCHIVE_SUCCESS) {
2332 static_result = MAP_ARCHIVE_MMAP_FAILURE;
2333 }
2334 if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
2335 dynamic_result = MAP_ARCHIVE_MMAP_FAILURE;
2336 }
2337 });
2338
2339 if (static_result == MAP_ARCHIVE_SUCCESS) {
2340 if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
2341 result = MAP_ARCHIVE_SUCCESS;
2342 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
2343 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
2344 // No need to retry mapping the dynamic archive again, as it will never succeed
2345 // (bad file, etc) -- just keep the base archive.
2346 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
2347 dynamic_mapinfo->full_path());
2348 result = MAP_ARCHIVE_SUCCESS;
2349 // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no
2350 // easy API to do that right now.
2351 } else {
2352 result = MAP_ARCHIVE_MMAP_FAILURE;
2353 }
2354 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
2355 result = MAP_ARCHIVE_OTHER_FAILURE;
2356 } else {
2357 result = MAP_ARCHIVE_MMAP_FAILURE;
2358 }
2359 }
2360
2361 if (result == MAP_ARCHIVE_SUCCESS) {
2362 SharedBaseAddress = (size_t)mapped_base_address;
2363 LP64_ONLY({
2364 if (Metaspace::using_class_space()) {
2365 // Set up ccs in metaspace.
2366 Metaspace::initialize_class_space(class_space_rs);
2367
2368 // Set up compressed Klass pointer encoding: the encoding range must
2369 // cover both archive and class space.
2370 address cds_base = (address)static_mapinfo->mapped_base();
2371 address ccs_end = (address)class_space_rs.end();
2372 CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base);
2373
2374 // map_heap_regions() compares the current narrow oop and klass encodings
2375 // with the archived ones, so it must be done after all encodings are determined.
2376 static_mapinfo->map_heap_regions();
2377 }
2378 });
2379 log_info(cds)("Using optimized module handling %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
2380 } else {
2381 unmap_archive(static_mapinfo);
2382 unmap_archive(dynamic_mapinfo);
2383 release_reserved_spaces(archive_space_rs, class_space_rs);
2384 }
2385
2386 return result;
2387 }
2388
2389
2390 // This will reserve two address spaces suitable to house Klass structures, one
2391 // for the cds archives (static archive and optionally dynamic archive) and
2392 // optionally one move for ccs.
2393 //
2394 // Since both spaces must fall within the compressed class pointer encoding
2395 // range, they are allocated close to each other.
2396 //
2397 // Space for archives will be reserved first, followed by a potential gap,
2398 // followed by the space for ccs:
2399 //
2400 // +-- Base address A B End
2401 // | | | |
2402 // v v v v
2403 // +-------------+--------------+ +----------------------+
2404 // | static arc | [dyn. arch] | [gap] | compr. class space |
2405 // +-------------+--------------+ +----------------------+
2406 //
2407 // (The gap may result from different alignment requirements between metaspace
2408 // and CDS)
2409 //
2410 // If UseCompressedClassPointers is disabled, only one address space will be
2411 // reserved:
2412 //
2413 // +-- Base address End
2414 // | |
2415 // v v
2416 // +-------------+--------------+
2417 // | static arc | [dyn. arch] |
2418 // +-------------+--------------+
2419 //
2420 // Base address: If use_archive_base_addr address is true, the Base address is
2421 // determined by the address stored in the static archive. If
2422 // use_archive_base_addr address is false, this base address is determined
2423 // by the platform.
2424 //
2425 // If UseCompressedClassPointers=1, the range encompassing both spaces will be
2426 // suitable to en/decode narrow Klass pointers: the base will be valid for
2427 // encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax.
2428 //
2429 // Return:
2430 //
2431 // - On success:
2432 // - archive_space_rs will be reserved and large enough to host static and
2433 // if needed dynamic archive: [Base, A).
2434 // archive_space_rs.base and size will be aligned to CDS reserve
2435 // granularity.
2436 // - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will
2437 // be reserved. Its start address will be aligned to metaspace reserve
2438 // alignment, which may differ from CDS alignment. It will follow the cds
2439 // archive space, close enough such that narrow class pointer encoding
2440 // covers both spaces.
2441 // If UseCompressedClassPointers=0, class_space_rs remains unreserved.
2442 // - On error: NULL is returned and the spaces remain unreserved.
2443 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
2444 FileMapInfo* dynamic_mapinfo,
2445 bool use_archive_base_addr,
2446 ReservedSpace& archive_space_rs,
2447 ReservedSpace& class_space_rs) {
2448
2449 address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL);
2450 const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment();
2451
2452 // Size and requested location of the archive_space_rs (for both static and dynamic archives)
2453 assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
2454 size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
2455 size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
2456
2457 // If a base address is given, it must have valid alignment and be suitable as encoding base.
2458 if (base_address != NULL) {
2459 assert(is_aligned(base_address, archive_space_alignment),
2460 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
2461 if (Metaspace::using_class_space()) {
2462 assert(CompressedKlassPointers::is_valid_base(base_address),
2463 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
2464 }
2465 }
2466
2467 if (!Metaspace::using_class_space()) {
2468 // Get the simple case out of the way first:
2469 // no compressed class space, simple allocation.
2470 archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
2471 false /* bool large */, (char*)base_address);
2472 if (archive_space_rs.is_reserved()) {
2473 assert(base_address == NULL ||
2474 (address)archive_space_rs.base() == base_address, "Sanity");
2475 // Register archive space with NMT.
2476 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
2477 return archive_space_rs.base();
2478 }
2479 return NULL;
2480 }
2481
2482 #ifdef _LP64
2483
2484 // Complex case: two spaces adjacent to each other, both to be addressable
2485 // with narrow class pointers.
2486 // We reserve the whole range spanning both spaces, then split that range up.
2487
2488 const size_t class_space_alignment = Metaspace::reserve_alignment();
2489
2490 // To simplify matters, lets assume that metaspace alignment will always be
2491 // equal or a multiple of archive alignment.
2492 assert(is_power_of_2(class_space_alignment) &&
2493 is_power_of_2(archive_space_alignment) &&
2494 class_space_alignment >= archive_space_alignment,
2495 "Sanity");
2496
2497 const size_t class_space_size = CompressedClassSpaceSize;
2498 assert(CompressedClassSpaceSize > 0 &&
2499 is_aligned(CompressedClassSpaceSize, class_space_alignment),
2500 "CompressedClassSpaceSize malformed: "
2501 SIZE_FORMAT, CompressedClassSpaceSize);
2502
2503 const size_t ccs_begin_offset = align_up(base_address + archive_space_size,
2504 class_space_alignment) - base_address;
2505 const size_t gap_size = ccs_begin_offset - archive_space_size;
2506
2507 const size_t total_range_size =
2508 align_up(archive_space_size + gap_size + class_space_size,
2509 os::vm_allocation_granularity());
2510
2511 ReservedSpace total_rs;
2512 if (base_address != NULL) {
2513 // Reserve at the given archive base address, or not at all.
2514 total_rs = ReservedSpace(total_range_size, archive_space_alignment,
2515 false /* bool large */, (char*) base_address);
2516 } else {
2517 // Reserve at any address, but leave it up to the platform to choose a good one.
2518 total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
2519 }
2520
2521 if (!total_rs.is_reserved()) {
2522 return NULL;
2523 }
2524
2525 // Paranoid checks:
2526 assert(base_address == NULL || (address)total_rs.base() == base_address,
2527 "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base()));
2528 assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity");
2529 assert(total_rs.size() == total_range_size, "Sanity");
2530 assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity");
2531
2532 // Now split up the space into ccs and cds archive. For simplicity, just leave
2533 // the gap reserved at the end of the archive space.
2534 archive_space_rs = total_rs.first_part(ccs_begin_offset,
2535 (size_t)os::vm_allocation_granularity(),
2536 /*split=*/true);
2537 class_space_rs = total_rs.last_part(ccs_begin_offset);
2538
2539 assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity");
2540 assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity");
2541 assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity");
2542 assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity");
2543
2544 // NMT: fix up the space tags
2545 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
2546 MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
2547
2548 return archive_space_rs.base();
2549
2550 #else
2551 ShouldNotReachHere();
2552 return NULL;
2553 #endif
2554
2555 }
2556
2557 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs,
2558 ReservedSpace& class_space_rs) {
2559 if (archive_space_rs.is_reserved()) {
2560 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
2561 archive_space_rs.release();
2562 }
2563 if (class_space_rs.is_reserved()) {
2564 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
2565 class_space_rs.release();
2566 }
2567 }
2568
2569 static int archive_regions[] = {MetaspaceShared::mc,
2570 MetaspaceShared::rw,
2571 MetaspaceShared::ro};
2572 static int archive_regions_count = 3;
2573
2574 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
2575 assert(UseSharedSpaces, "must be runtime");
2576 if (mapinfo == NULL) {
2577 return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded.
2578 }
2579
2580 mapinfo->set_is_mapped(false);
2581
2582 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) {
2583 log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT
2584 " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity());
2585 return MAP_ARCHIVE_OTHER_FAILURE;
2586 }
2587
2588 MapArchiveResult result =
2589 mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs);
2590
2591 if (result != MAP_ARCHIVE_SUCCESS) {
2592 unmap_archive(mapinfo);
2593 return result;
2594 }
2595
2596 if (!mapinfo->validate_shared_path_table()) {
2597 unmap_archive(mapinfo);
2598 return MAP_ARCHIVE_OTHER_FAILURE;
2599 }
2600
2601 mapinfo->set_is_mapped(true);
2602 return MAP_ARCHIVE_SUCCESS;
2603 }
2604
2605 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
2606 assert(UseSharedSpaces, "must be runtime");
2607 if (mapinfo != NULL) {
2608 mapinfo->unmap_regions(archive_regions, archive_regions_count);
2609 mapinfo->set_is_mapped(false);
2610 }
2611 }
2612
2613 // Read the miscellaneous data from the shared file, and
2614 // serialize it out to its various destinations.
2615
2616 void MetaspaceShared::initialize_shared_spaces() {
2617 FileMapInfo *static_mapinfo = FileMapInfo::current_info();
2618 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
2619 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
2620 char* buffer = static_mapinfo->cloned_vtables();
2621 clone_cpp_vtables((intptr_t*)buffer);
2622
2623 // Verify various attributes of the archive, plus initialize the
2624 // shared string/symbol tables
2625 buffer = static_mapinfo->serialized_data();
2626 intptr_t* array = (intptr_t*)buffer;
2627 ReadClosure rc(&array);
2628 serialize(&rc);
2629
2630 // Initialize the run-time symbol table.
2631 SymbolTable::create_table();
2632
2633 static_mapinfo->patch_archived_heap_embedded_pointers();
2634
2635 // Close the mapinfo file
2636 static_mapinfo->close();
2637
2638 static_mapinfo->unmap_region(MetaspaceShared::bm);
2639
2640 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
2641 if (dynamic_mapinfo != NULL) {
2642 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
2643 ReadClosure rc(&buffer);
2644 SymbolTable::serialize_shared_table_header(&rc, false);
2645 SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
2646 dynamic_mapinfo->close();
2647 }
2648
2649 if (PrintSharedArchiveAndExit) {
2650 if (PrintSharedDictionary) {
2651 tty->print_cr("\nShared classes:\n");
2652 SystemDictionaryShared::print_on(tty);
2653 }
2654 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
2655 tty->print_cr("archive is invalid");
2656 vm_exit(1);
2657 } else {
2658 tty->print_cr("archive is valid");
2659 vm_exit(0);
2660 }
2661 }
2662 }
2663
2664 // JVM/TI RedefineClasses() support:
2665 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
2666 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2667
2668 if (UseSharedSpaces) {
2669 // remap the shared readonly space to shared readwrite, private
2670 FileMapInfo* mapinfo = FileMapInfo::current_info();
2671 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
2672 return false;
2673 }
2674 if (FileMapInfo::dynamic_info() != NULL) {
2675 mapinfo = FileMapInfo::dynamic_info();
2676 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
2677 return false;
2678 }
2679 }
2680 _remapped_readwrite = true;
2681 }
2682 return true;
2683 }
2684
2685 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
2686 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
2687 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
2688 // or so.
2689 _mc_region.print_out_of_space_msg(name, needed_bytes);
2690 _rw_region.print_out_of_space_msg(name, needed_bytes);
2691 _ro_region.print_out_of_space_msg(name, needed_bytes);
2692
2693 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
2694 "Please reduce the number of shared classes.");
2695 }
2696
2697 // This is used to relocate the pointers so that the base archive can be mapped at
2698 // MetaspaceShared::requested_base_address() without runtime relocation.
2699 intx MetaspaceShared::final_delta() {
2700 return intx(MetaspaceShared::requested_base_address()) // We want the base archive to be mapped to here at runtime
2701 - intx(SharedBaseAddress); // .. but the base archive is mapped at here at dump time
2702 }
2703
2704 void MetaspaceShared::print_on(outputStream* st) {
2705 if (UseSharedSpaces || DumpSharedSpaces) {
2706 st->print("CDS archive(s) mapped at: ");
2707 address base;
2708 address top;
2709 if (UseSharedSpaces) { // Runtime
2710 base = (address)MetaspaceObj::shared_metaspace_base();
2711 address static_top = (address)_shared_metaspace_static_top;
2712 top = (address)MetaspaceObj::shared_metaspace_top();
2713 st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
2714 } else if (DumpSharedSpaces) { // Dump Time
2715 base = (address)_shared_rs.base();
2716 top = (address)_shared_rs.end();
2717 st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top));
2718 }
2719 st->print("size " SIZE_FORMAT ", ", top - base);
2720 st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode);
2721 } else {
2722 st->print("CDS disabled.");
2723 }
2724 st->cr();
2725 }
2726
2727
2728
2729
2730