Path: blob/master/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
41149 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/codeCache.hpp"26#include "gc/parallel/parallelArguments.hpp"27#include "gc/parallel/objectStartArray.inline.hpp"28#include "gc/parallel/parallelInitLogger.hpp"29#include "gc/parallel/parallelScavengeHeap.inline.hpp"30#include "gc/parallel/psAdaptiveSizePolicy.hpp"31#include "gc/parallel/psMemoryPool.hpp"32#include "gc/parallel/psParallelCompact.inline.hpp"33#include "gc/parallel/psPromotionManager.hpp"34#include "gc/parallel/psScavenge.hpp"35#include "gc/parallel/psVMOperations.hpp"36#include "gc/shared/gcHeapSummary.hpp"37#include "gc/shared/gcLocker.hpp"38#include "gc/shared/gcWhen.hpp"39#include "gc/shared/genArguments.hpp"40#include "gc/shared/gcInitLogger.hpp"41#include "gc/shared/locationPrinter.inline.hpp"42#include "gc/shared/scavengableNMethods.hpp"43#include "logging/log.hpp"44#include "memory/iterator.hpp"45#include "memory/metaspaceCounters.hpp"46#include "memory/metaspaceUtils.hpp"47#include "memory/universe.hpp"48#include "oops/oop.inline.hpp"49#include "runtime/handles.inline.hpp"50#include "runtime/java.hpp"51#include "runtime/vmThread.hpp"52#include "services/memoryManager.hpp"53#include "services/memTracker.hpp"54#include "utilities/macros.hpp"55#include "utilities/vmError.hpp"5657PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;58PSOldGen* ParallelScavengeHeap::_old_gen = NULL;59PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;60PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;6162jint ParallelScavengeHeap::initialize() {63const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();6465ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);6667trace_actual_reserved_page_size(reserved_heap_size, heap_rs);6869initialize_reserved_region(heap_rs);7071PSCardTable* card_table = new PSCardTable(heap_rs.region());72card_table->initialize();73CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);74barrier_set->initialize();75BarrierSet::set_barrier_set(barrier_set);7677// Make up the generations78assert(MinOldSize <= OldSize && OldSize <= MaxOldSize, "Parameter check");79assert(MinNewSize <= NewSize && NewSize <= MaxNewSize, "Parameter check");8081// Layout the reserved space for the generations.82ReservedSpace old_rs = heap_rs.first_part(MaxOldSize);83ReservedSpace young_rs = heap_rs.last_part(MaxOldSize);84assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");8586// Set up WorkGang87_workers.initialize_workers();8889// Create and initialize the generations.90_young_gen = new PSYoungGen(91young_rs,92NewSize,93MinNewSize,94MaxNewSize);95_old_gen = new PSOldGen(96old_rs,97OldSize,98MinOldSize,99MaxOldSize,100"old", 1);101102assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");103assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");104105double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;106double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;107108const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();109const size_t old_capacity = _old_gen->capacity_in_bytes();110const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);111_size_policy =112new PSAdaptiveSizePolicy(eden_capacity,113initial_promo_size,114young_gen()->to_space()->capacity_in_bytes(),115GenAlignment,116max_gc_pause_sec,117max_gc_minor_pause_sec,118GCTimeRatio119);120121assert((old_gen()->virtual_space()->high_boundary() ==122young_gen()->virtual_space()->low_boundary()),123"Boundaries must meet");124// initialize the policy counters - 2 collectors, 2 generations125_gc_policy_counters =126new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);127128if (!PSParallelCompact::initialize()) {129return JNI_ENOMEM;130}131132ParallelInitLogger::print();133134return JNI_OK;135}136137void ParallelScavengeHeap::initialize_serviceability() {138139_eden_pool = new EdenMutableSpacePool(_young_gen,140_young_gen->eden_space(),141"PS Eden Space",142false /* support_usage_threshold */);143144_survivor_pool = new SurvivorMutableSpacePool(_young_gen,145"PS Survivor Space",146false /* support_usage_threshold */);147148_old_pool = new PSGenerationPool(_old_gen,149"PS Old Gen",150true /* support_usage_threshold */);151152_young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");153_old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");154155_old_manager->add_pool(_eden_pool);156_old_manager->add_pool(_survivor_pool);157_old_manager->add_pool(_old_pool);158159_young_manager->add_pool(_eden_pool);160_young_manager->add_pool(_survivor_pool);161162}163164class PSIsScavengable : public BoolObjectClosure {165bool do_object_b(oop obj) {166return ParallelScavengeHeap::heap()->is_in_young(obj);167}168};169170static PSIsScavengable _is_scavengable;171172void ParallelScavengeHeap::post_initialize() {173CollectedHeap::post_initialize();174// Need to init the tenuring threshold175PSScavenge::initialize();176PSParallelCompact::post_initialize();177PSPromotionManager::initialize();178179ScavengableNMethods::initialize(&_is_scavengable);180}181182void ParallelScavengeHeap::update_counters() {183young_gen()->update_counters();184old_gen()->update_counters();185MetaspaceCounters::update_performance_counters();186}187188size_t ParallelScavengeHeap::capacity() const {189size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();190return value;191}192193size_t ParallelScavengeHeap::used() const {194size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();195return value;196}197198bool ParallelScavengeHeap::is_maximal_no_gc() const {199return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();200}201202203size_t ParallelScavengeHeap::max_capacity() const {204size_t estimated = reserved_region().byte_size();205if (UseAdaptiveSizePolicy) {206estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());207} else {208estimated -= young_gen()->to_space()->capacity_in_bytes();209}210return MAX2(estimated, capacity());211}212213bool ParallelScavengeHeap::is_in(const void* p) const {214return young_gen()->is_in(p) || old_gen()->is_in(p);215}216217bool ParallelScavengeHeap::is_in_reserved(const void* p) const {218return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);219}220221// There are two levels of allocation policy here.222//223// When an allocation request fails, the requesting thread must invoke a VM224// operation, transfer control to the VM thread, and await the results of a225// garbage collection. That is quite expensive, and we should avoid doing it226// multiple times if possible.227//228// To accomplish this, we have a basic allocation policy, and also a229// failed allocation policy.230//231// The basic allocation policy controls how you allocate memory without232// attempting garbage collection. It is okay to grab locks and233// expand the heap, if that can be done without coming to a safepoint.234// It is likely that the basic allocation policy will not be very235// aggressive.236//237// The failed allocation policy is invoked from the VM thread after238// the basic allocation policy is unable to satisfy a mem_allocate239// request. This policy needs to cover the entire range of collection,240// heap expansion, and out-of-memory conditions. It should make every241// attempt to allocate the requested memory.242243// Basic allocation policy. Should never be called at a safepoint, or244// from the VM thread.245//246// This method must handle cases where many mem_allocate requests fail247// simultaneously. When that happens, only one VM operation will succeed,248// and the rest will not be executed. For that reason, this method loops249// during failed allocation attempts. If the java heap becomes exhausted,250// we rely on the size_policy object to force a bail out.251HeapWord* ParallelScavengeHeap::mem_allocate(252size_t size,253bool* gc_overhead_limit_was_exceeded) {254assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");255assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");256assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");257258// In general gc_overhead_limit_was_exceeded should be false so259// set it so here and reset it to true only if the gc time260// limit is being exceeded as checked below.261*gc_overhead_limit_was_exceeded = false;262263HeapWord* result = young_gen()->allocate(size);264265uint loop_count = 0;266uint gc_count = 0;267uint gclocker_stalled_count = 0;268269while (result == NULL) {270// We don't want to have multiple collections for a single filled generation.271// To prevent this, each thread tracks the total_collections() value, and if272// the count has changed, does not do a new collection.273//274// The collection count must be read only while holding the heap lock. VM275// operations also hold the heap lock during collections. There is a lock276// contention case where thread A blocks waiting on the Heap_lock, while277// thread B is holding it doing a collection. When thread A gets the lock,278// the collection count has already changed. To prevent duplicate collections,279// The policy MUST attempt allocations during the same period it reads the280// total_collections() value!281{282MutexLocker ml(Heap_lock);283gc_count = total_collections();284285result = young_gen()->allocate(size);286if (result != NULL) {287return result;288}289290// If certain conditions hold, try allocating from the old gen.291result = mem_allocate_old_gen(size);292if (result != NULL) {293return result;294}295296if (gclocker_stalled_count > GCLockerRetryAllocationCount) {297return NULL;298}299300// Failed to allocate without a gc.301if (GCLocker::is_active_and_needs_gc()) {302// If this thread is not in a jni critical section, we stall303// the requestor until the critical section has cleared and304// GC allowed. When the critical section clears, a GC is305// initiated by the last thread exiting the critical section; so306// we retry the allocation sequence from the beginning of the loop,307// rather than causing more, now probably unnecessary, GC attempts.308JavaThread* jthr = JavaThread::current();309if (!jthr->in_critical()) {310MutexUnlocker mul(Heap_lock);311GCLocker::stall_until_clear();312gclocker_stalled_count += 1;313continue;314} else {315if (CheckJNICalls) {316fatal("Possible deadlock due to allocating while"317" in jni critical section");318}319return NULL;320}321}322}323324if (result == NULL) {325// Generate a VM operation326VM_ParallelGCFailedAllocation op(size, gc_count);327VMThread::execute(&op);328329// Did the VM operation execute? If so, return the result directly.330// This prevents us from looping until time out on requests that can331// not be satisfied.332if (op.prologue_succeeded()) {333assert(is_in_or_null(op.result()), "result not in heap");334335// If GC was locked out during VM operation then retry allocation336// and/or stall as necessary.337if (op.gc_locked()) {338assert(op.result() == NULL, "must be NULL if gc_locked() is true");339continue; // retry and/or stall as necessary340}341342// Exit the loop if the gc time limit has been exceeded.343// The allocation must have failed above ("result" guarding344// this path is NULL) and the most recent collection has exceeded the345// gc overhead limit (although enough may have been collected to346// satisfy the allocation). Exit the loop so that an out-of-memory347// will be thrown (return a NULL ignoring the contents of348// op.result()),349// but clear gc_overhead_limit_exceeded so that the next collection350// starts with a clean slate (i.e., forgets about previous overhead351// excesses). Fill op.result() with a filler object so that the352// heap remains parsable.353const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();354const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();355356if (limit_exceeded && softrefs_clear) {357*gc_overhead_limit_was_exceeded = true;358size_policy()->set_gc_overhead_limit_exceeded(false);359log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");360if (op.result() != NULL) {361CollectedHeap::fill_with_object(op.result(), size);362}363return NULL;364}365366return op.result();367}368}369370// The policy object will prevent us from looping forever. If the371// time spent in gc crosses a threshold, we will bail out.372loop_count++;373if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&374(loop_count % QueuedAllocationWarningCount == 0)) {375log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);376log_warning(gc)("\tsize=" SIZE_FORMAT, size);377}378}379380return result;381}382383// A "death march" is a series of ultra-slow allocations in which a full gc is384// done before each allocation, and after the full gc the allocation still385// cannot be satisfied from the young gen. This routine detects that condition;386// it should be called after a full gc has been done and the allocation387// attempted from the young gen. The parameter 'addr' should be the result of388// that young gen allocation attempt.389void390ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {391if (addr != NULL) {392_death_march_count = 0; // death march has ended393} else if (_death_march_count == 0) {394if (should_alloc_in_eden(size)) {395_death_march_count = 1; // death march has started396}397}398}399400HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {401assert_locked_or_safepoint(Heap_lock);402HeapWord* res = old_gen()->allocate(size);403if (res != NULL) {404_size_policy->tenured_allocation(size * HeapWordSize);405}406return res;407}408409HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {410if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {411// Size is too big for eden, or gc is locked out.412return allocate_old_gen_and_record(size);413}414415// If a "death march" is in progress, allocate from the old gen a limited416// number of times before doing a GC.417if (_death_march_count > 0) {418if (_death_march_count < 64) {419++_death_march_count;420return allocate_old_gen_and_record(size);421} else {422_death_march_count = 0;423}424}425return NULL;426}427428void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {429// The do_full_collection() parameter clear_all_soft_refs430// is interpreted here as maximum_compaction which will431// cause SoftRefs to be cleared.432bool maximum_compaction = clear_all_soft_refs;433PSParallelCompact::invoke(maximum_compaction);434}435436// Failed allocation policy. Must be called from the VM thread, and437// only at a safepoint! Note that this method has policy for allocation438// flow, and NOT collection policy. So we do not check for gc collection439// time over limit here, that is the responsibility of the heap specific440// collection methods. This method decides where to attempt allocations,441// and when to attempt collections, but no collection specific policy.442HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {443assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");444assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");445assert(!is_gc_active(), "not reentrant");446assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");447448// We assume that allocation in eden will fail unless we collect.449450// First level allocation failure, scavenge and allocate in young gen.451GCCauseSetter gccs(this, GCCause::_allocation_failure);452const bool invoked_full_gc = PSScavenge::invoke();453HeapWord* result = young_gen()->allocate(size);454455// Second level allocation failure.456// Mark sweep and allocate in young generation.457if (result == NULL && !invoked_full_gc) {458do_full_collection(false);459result = young_gen()->allocate(size);460}461462death_march_check(result, size);463464// Third level allocation failure.465// After mark sweep and young generation allocation failure,466// allocate in old generation.467if (result == NULL) {468result = allocate_old_gen_and_record(size);469}470471// Fourth level allocation failure. We're running out of memory.472// More complete mark sweep and allocate in young generation.473if (result == NULL) {474do_full_collection(true);475result = young_gen()->allocate(size);476}477478// Fifth level allocation failure.479// After more complete mark sweep, allocate in old generation.480if (result == NULL) {481result = allocate_old_gen_and_record(size);482}483484return result;485}486487void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {488CollectedHeap::ensure_parsability(retire_tlabs);489young_gen()->eden_space()->ensure_parsability();490}491492size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {493return young_gen()->eden_space()->tlab_capacity(thr);494}495496size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {497return young_gen()->eden_space()->tlab_used(thr);498}499500size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {501return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);502}503504HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {505HeapWord* result = young_gen()->allocate(requested_size);506if (result != NULL) {507*actual_size = requested_size;508}509510return result;511}512513void ParallelScavengeHeap::resize_all_tlabs() {514CollectedHeap::resize_all_tlabs();515}516517// This method is used by System.gc() and JVMTI.518void ParallelScavengeHeap::collect(GCCause::Cause cause) {519assert(!Heap_lock->owned_by_self(),520"this thread should not own the Heap_lock");521522uint gc_count = 0;523uint full_gc_count = 0;524{525MutexLocker ml(Heap_lock);526// This value is guarded by the Heap_lock527gc_count = total_collections();528full_gc_count = total_full_collections();529}530531if (GCLocker::should_discard(cause, gc_count)) {532return;533}534535VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);536VMThread::execute(&op);537}538539void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {540young_gen()->object_iterate(cl);541old_gen()->object_iterate(cl);542}543544// The HeapBlockClaimer is used during parallel iteration over the heap,545// allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.546// The eden and survivor spaces are treated as single blocks as it is hard to divide547// these spaces.548// The old space is divided into fixed-size blocks.549class HeapBlockClaimer : public StackObj {550size_t _claimed_index;551552public:553static const size_t InvalidIndex = SIZE_MAX;554static const size_t EdenIndex = 0;555static const size_t SurvivorIndex = 1;556static const size_t NumNonOldGenClaims = 2;557558HeapBlockClaimer() : _claimed_index(EdenIndex) { }559// Claim the block and get the block index.560size_t claim_and_get_block() {561size_t block_index;562block_index = Atomic::fetch_and_add(&_claimed_index, 1u);563564PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();565size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;566567return block_index < num_claims ? block_index : InvalidIndex;568}569};570571void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,572HeapBlockClaimer* claimer) {573size_t block_index = claimer->claim_and_get_block();574// Iterate until all blocks are claimed575if (block_index == HeapBlockClaimer::EdenIndex) {576young_gen()->eden_space()->object_iterate(cl);577block_index = claimer->claim_and_get_block();578}579if (block_index == HeapBlockClaimer::SurvivorIndex) {580young_gen()->from_space()->object_iterate(cl);581young_gen()->to_space()->object_iterate(cl);582block_index = claimer->claim_and_get_block();583}584while (block_index != HeapBlockClaimer::InvalidIndex) {585old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);586block_index = claimer->claim_and_get_block();587}588}589590class PSScavengeParallelObjectIterator : public ParallelObjectIterator {591private:592ParallelScavengeHeap* _heap;593HeapBlockClaimer _claimer;594595public:596PSScavengeParallelObjectIterator() :597_heap(ParallelScavengeHeap::heap()),598_claimer() {}599600virtual void object_iterate(ObjectClosure* cl, uint worker_id) {601_heap->object_iterate_parallel(cl, &_claimer);602}603};604605ParallelObjectIterator* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {606return new PSScavengeParallelObjectIterator();607}608609HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {610if (young_gen()->is_in_reserved(addr)) {611assert(young_gen()->is_in(addr),612"addr should be in allocated part of young gen");613// called from os::print_location by find or VMError614if (Debugging || VMError::is_error_reported()) return NULL;615Unimplemented();616} else if (old_gen()->is_in_reserved(addr)) {617assert(old_gen()->is_in(addr),618"addr should be in allocated part of old gen");619return old_gen()->start_array()->object_start((HeapWord*)addr);620}621return 0;622}623624bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {625return block_start(addr) == addr;626}627628void ParallelScavengeHeap::prepare_for_verify() {629ensure_parsability(false); // no need to retire TLABs for verification630}631632PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {633PSOldGen* old = old_gen();634HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();635VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());636SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());637638PSYoungGen* young = young_gen();639VirtualSpaceSummary young_summary(young->reserved().start(),640(HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());641642MutableSpace* eden = young_gen()->eden_space();643SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());644645MutableSpace* from = young_gen()->from_space();646SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());647648MutableSpace* to = young_gen()->to_space();649SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());650651VirtualSpaceSummary heap_summary = create_heap_space_summary();652return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);653}654655bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {656return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);657}658659void ParallelScavengeHeap::print_on(outputStream* st) const {660if (young_gen() != NULL) {661young_gen()->print_on(st);662}663if (old_gen() != NULL) {664old_gen()->print_on(st);665}666MetaspaceUtils::print_on(st);667}668669void ParallelScavengeHeap::print_on_error(outputStream* st) const {670this->CollectedHeap::print_on_error(st);671672st->cr();673PSParallelCompact::print_on_error(st);674}675676void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {677ParallelScavengeHeap::heap()->workers().threads_do(tc);678}679680void ParallelScavengeHeap::print_tracing_info() const {681AdaptiveSizePolicyOutput::print();682log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());683log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());684}685686PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {687const PSYoungGen* const young = young_gen();688const MutableSpace* const eden = young->eden_space();689const MutableSpace* const from = young->from_space();690const PSOldGen* const old = old_gen();691692return PreGenGCValues(young->used_in_bytes(),693young->capacity_in_bytes(),694eden->used_in_bytes(),695eden->capacity_in_bytes(),696from->used_in_bytes(),697from->capacity_in_bytes(),698old->used_in_bytes(),699old->capacity_in_bytes());700}701702void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {703const PSYoungGen* const young = young_gen();704const MutableSpace* const eden = young->eden_space();705const MutableSpace* const from = young->from_space();706const PSOldGen* const old = old_gen();707708log_info(gc, heap)(HEAP_CHANGE_FORMAT" "709HEAP_CHANGE_FORMAT" "710HEAP_CHANGE_FORMAT,711HEAP_CHANGE_FORMAT_ARGS(young->name(),712pre_gc_values.young_gen_used(),713pre_gc_values.young_gen_capacity(),714young->used_in_bytes(),715young->capacity_in_bytes()),716HEAP_CHANGE_FORMAT_ARGS("Eden",717pre_gc_values.eden_used(),718pre_gc_values.eden_capacity(),719eden->used_in_bytes(),720eden->capacity_in_bytes()),721HEAP_CHANGE_FORMAT_ARGS("From",722pre_gc_values.from_used(),723pre_gc_values.from_capacity(),724from->used_in_bytes(),725from->capacity_in_bytes()));726log_info(gc, heap)(HEAP_CHANGE_FORMAT,727HEAP_CHANGE_FORMAT_ARGS(old->name(),728pre_gc_values.old_gen_used(),729pre_gc_values.old_gen_capacity(),730old->used_in_bytes(),731old->capacity_in_bytes()));732MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());733}734735void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {736// Why do we need the total_collections()-filter below?737if (total_collections() > 0) {738log_debug(gc, verify)("Tenured");739old_gen()->verify();740741log_debug(gc, verify)("Eden");742young_gen()->verify();743}744}745746void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {747// Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.748if(log_is_enabled(Info, pagesize)) {749const size_t page_size = rs.page_size();750os::trace_page_sizes("Heap",751MinHeapSize,752reserved_heap_size,753page_size,754rs.base(),755rs.size());756}757}758759void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {760const PSHeapSummary& heap_summary = create_ps_heap_summary();761gc_tracer->report_gc_heap_summary(when, heap_summary);762763const MetaspaceSummary& metaspace_summary = create_metaspace_summary();764gc_tracer->report_metaspace_summary(when, metaspace_summary);765}766767CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {768return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());769}770771PSCardTable* ParallelScavengeHeap::card_table() {772return static_cast<PSCardTable*>(barrier_set()->card_table());773}774775void ParallelScavengeHeap::resize_young_gen(size_t eden_size,776size_t survivor_size) {777// Delegate the resize to the generation.778_young_gen->resize(eden_size, survivor_size);779}780781void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {782// Delegate the resize to the generation.783_old_gen->resize(desired_free_space);784}785786ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {787// nothing particular788}789790ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {791// nothing particular792}793794#ifndef PRODUCT795void ParallelScavengeHeap::record_gen_tops_before_GC() {796if (ZapUnusedHeapArea) {797young_gen()->record_spaces_top();798old_gen()->record_spaces_top();799}800}801802void ParallelScavengeHeap::gen_mangle_unused_area() {803if (ZapUnusedHeapArea) {804young_gen()->eden_space()->mangle_unused_area();805young_gen()->to_space()->mangle_unused_area();806young_gen()->from_space()->mangle_unused_area();807old_gen()->object_space()->mangle_unused_area();808}809}810#endif811812void ParallelScavengeHeap::register_nmethod(nmethod* nm) {813ScavengableNMethods::register_nmethod(nm);814}815816void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {817ScavengableNMethods::unregister_nmethod(nm);818}819820void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {821ScavengableNMethods::verify_nmethod(nm);822}823824void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {825// nothing particular826}827828void ParallelScavengeHeap::prune_scavengable_nmethods() {829ScavengableNMethods::prune_nmethods();830}831832GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {833GrowableArray<GCMemoryManager*> memory_managers(2);834memory_managers.append(_young_manager);835memory_managers.append(_old_manager);836return memory_managers;837}838839GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {840GrowableArray<MemoryPool*> memory_pools(3);841memory_pools.append(_eden_pool);842memory_pools.append(_survivor_pool);843memory_pools.append(_old_pool);844return memory_pools;845}846847848