Path: blob/master/src/hotspot/share/gc/serial/defNewGeneration.cpp
41152 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/serial/defNewGeneration.inline.hpp"26#include "gc/serial/serialGcRefProcProxyTask.hpp"27#include "gc/serial/serialHeap.inline.hpp"28#include "gc/serial/tenuredGeneration.hpp"29#include "gc/shared/adaptiveSizePolicy.hpp"30#include "gc/shared/ageTable.inline.hpp"31#include "gc/shared/cardTableRS.hpp"32#include "gc/shared/collectorCounters.hpp"33#include "gc/shared/gcArguments.hpp"34#include "gc/shared/gcHeapSummary.hpp"35#include "gc/shared/gcLocker.hpp"36#include "gc/shared/gcPolicyCounters.hpp"37#include "gc/shared/gcTimer.hpp"38#include "gc/shared/gcTrace.hpp"39#include "gc/shared/gcTraceTime.inline.hpp"40#include "gc/shared/generationSpec.hpp"41#include "gc/shared/genOopClosures.inline.hpp"42#include "gc/shared/preservedMarks.inline.hpp"43#include "gc/shared/referencePolicy.hpp"44#include "gc/shared/referenceProcessorPhaseTimes.hpp"45#include "gc/shared/space.inline.hpp"46#include "gc/shared/spaceDecorator.inline.hpp"47#include "gc/shared/strongRootsScope.hpp"48#include "gc/shared/weakProcessor.hpp"49#include "logging/log.hpp"50#include "memory/iterator.inline.hpp"51#include "memory/resourceArea.hpp"52#include "oops/instanceRefKlass.hpp"53#include "oops/oop.inline.hpp"54#include "runtime/java.hpp"55#include "runtime/prefetch.inline.hpp"56#include "runtime/thread.inline.hpp"57#include "utilities/align.hpp"58#include "utilities/copy.hpp"59#include "utilities/globalDefinitions.hpp"60#include "utilities/stack.inline.hpp"6162//63// DefNewGeneration functions.6465// Methods of protected closure types.6667DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {68assert(_young_gen->kind() == Generation::DefNew, "Expected the young generation here");69}7071bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {72return cast_from_oop<HeapWord*>(p) >= _young_gen->reserved().end() || p->is_forwarded();73}7475DefNewGeneration::KeepAliveClosure::76KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {77_rs = GenCollectedHeap::heap()->rem_set();78}7980void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }81void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }828384DefNewGeneration::FastKeepAliveClosure::85FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :86DefNewGeneration::KeepAliveClosure(cl) {87_boundary = g->reserved().end();88}8990void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }91void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }9293DefNewGeneration::FastEvacuateFollowersClosure::94FastEvacuateFollowersClosure(SerialHeap* heap,95DefNewScanClosure* cur,96DefNewYoungerGenClosure* older) :97_heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older)98{99}100101void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {102do {103_heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older);104} while (!_heap->no_allocs_since_save_marks());105guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");106}107108void CLDScanClosure::do_cld(ClassLoaderData* cld) {109NOT_PRODUCT(ResourceMark rm);110log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",111p2i(cld),112cld->loader_name_and_id(),113cld->has_modified_oops() ? "true" : "false");114115// If the cld has not been dirtied we know that there's116// no references into the young gen and we can skip it.117if (cld->has_modified_oops()) {118119// Tell the closure which CLD is being scanned so that it can be dirtied120// if oops are left pointing into the young gen.121_scavenge_closure->set_scanned_cld(cld);122123// Clean the cld since we're going to scavenge all the metadata.124cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);125126_scavenge_closure->set_scanned_cld(NULL);127}128}129130ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :131_g(g)132{133_boundary = _g->reserved().end();134}135136DefNewGeneration::DefNewGeneration(ReservedSpace rs,137size_t initial_size,138size_t min_size,139size_t max_size,140const char* policy)141: Generation(rs, initial_size),142_preserved_marks_set(false /* in_c_heap */),143_promo_failure_drain_in_progress(false),144_should_allocate_from_space(false)145{146MemRegion cmr((HeapWord*)_virtual_space.low(),147(HeapWord*)_virtual_space.high());148GenCollectedHeap* gch = GenCollectedHeap::heap();149150gch->rem_set()->resize_covered_region(cmr);151152_eden_space = new ContiguousSpace();153_from_space = new ContiguousSpace();154_to_space = new ContiguousSpace();155156// Compute the maximum eden and survivor space sizes. These sizes157// are computed assuming the entire reserved space is committed.158// These values are exported as performance counters.159uintx size = _virtual_space.reserved_size();160_max_survivor_size = compute_survivor_size(size, SpaceAlignment);161_max_eden_size = size - (2*_max_survivor_size);162163// allocate the performance counters164165// Generation counters -- generation 0, 3 subspaces166_gen_counters = new GenerationCounters("new", 0, 3,167min_size, max_size, &_virtual_space);168_gc_counters = new CollectorCounters(policy, 0);169170_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,171_gen_counters);172_from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,173_gen_counters);174_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,175_gen_counters);176177compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);178update_counters();179_old_gen = NULL;180_tenuring_threshold = MaxTenuringThreshold;181_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;182183_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();184}185186void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,187bool clear_space,188bool mangle_space) {189// If the spaces are being cleared (only done at heap initialization190// currently), the survivor spaces need not be empty.191// Otherwise, no care is taken for used areas in the survivor spaces192// so check.193assert(clear_space || (to()->is_empty() && from()->is_empty()),194"Initialization of the survivor spaces assumes these are empty");195196// Compute sizes197uintx size = _virtual_space.committed_size();198uintx survivor_size = compute_survivor_size(size, SpaceAlignment);199uintx eden_size = size - (2*survivor_size);200assert(eden_size > 0 && survivor_size <= eden_size, "just checking");201202if (eden_size < minimum_eden_size) {203// May happen due to 64Kb rounding, if so adjust eden size back up204minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);205uintx maximum_survivor_size = (size - minimum_eden_size) / 2;206uintx unaligned_survivor_size =207align_down(maximum_survivor_size, SpaceAlignment);208survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);209eden_size = size - (2*survivor_size);210assert(eden_size > 0 && survivor_size <= eden_size, "just checking");211assert(eden_size >= minimum_eden_size, "just checking");212}213214char *eden_start = _virtual_space.low();215char *from_start = eden_start + eden_size;216char *to_start = from_start + survivor_size;217char *to_end = to_start + survivor_size;218219assert(to_end == _virtual_space.high(), "just checking");220assert(Space::is_aligned(eden_start), "checking alignment");221assert(Space::is_aligned(from_start), "checking alignment");222assert(Space::is_aligned(to_start), "checking alignment");223224MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);225MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);226MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);227228// A minimum eden size implies that there is a part of eden that229// is being used and that affects the initialization of any230// newly formed eden.231bool live_in_eden = minimum_eden_size > 0;232233// If not clearing the spaces, do some checking to verify that234// the space are already mangled.235if (!clear_space) {236// Must check mangling before the spaces are reshaped. Otherwise,237// the bottom or end of one space may have moved into another238// a failure of the check may not correctly indicate which space239// is not properly mangled.240if (ZapUnusedHeapArea) {241HeapWord* limit = (HeapWord*) _virtual_space.high();242eden()->check_mangled_unused_area(limit);243from()->check_mangled_unused_area(limit);244to()->check_mangled_unused_area(limit);245}246}247248// Reset the spaces for their new regions.249eden()->initialize(edenMR,250clear_space && !live_in_eden,251SpaceDecorator::Mangle);252// If clear_space and live_in_eden, we will not have cleared any253// portion of eden above its top. This can cause newly254// expanded space not to be mangled if using ZapUnusedHeapArea.255// We explicitly do such mangling here.256if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {257eden()->mangle_unused_area();258}259from()->initialize(fromMR, clear_space, mangle_space);260to()->initialize(toMR, clear_space, mangle_space);261262// Set next compaction spaces.263eden()->set_next_compaction_space(from());264// The to-space is normally empty before a compaction so need265// not be considered. The exception is during promotion266// failure handling when to-space can contain live objects.267from()->set_next_compaction_space(NULL);268}269270void DefNewGeneration::swap_spaces() {271ContiguousSpace* s = from();272_from_space = to();273_to_space = s;274eden()->set_next_compaction_space(from());275// The to-space is normally empty before a compaction so need276// not be considered. The exception is during promotion277// failure handling when to-space can contain live objects.278from()->set_next_compaction_space(NULL);279280if (UsePerfData) {281CSpaceCounters* c = _from_counters;282_from_counters = _to_counters;283_to_counters = c;284}285}286287bool DefNewGeneration::expand(size_t bytes) {288MutexLocker x(ExpandHeap_lock);289HeapWord* prev_high = (HeapWord*) _virtual_space.high();290bool success = _virtual_space.expand_by(bytes);291if (success && ZapUnusedHeapArea) {292// Mangle newly committed space immediately because it293// can be done here more simply that after the new294// spaces have been computed.295HeapWord* new_high = (HeapWord*) _virtual_space.high();296MemRegion mangle_region(prev_high, new_high);297SpaceMangler::mangle_region(mangle_region);298}299300// Do not attempt an expand-to-the reserve size. The301// request should properly observe the maximum size of302// the generation so an expand-to-reserve should be303// unnecessary. Also a second call to expand-to-reserve304// value potentially can cause an undue expansion.305// For example if the first expand fail for unknown reasons,306// but the second succeeds and expands the heap to its maximum307// value.308if (GCLocker::is_active()) {309log_debug(gc)("Garbage collection disabled, expanded heap instead");310}311312return success;313}314315size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,316size_t new_size_before,317size_t alignment) const {318size_t desired_new_size = new_size_before;319320if (NewSizeThreadIncrease > 0) {321int threads_count;322size_t thread_increase_size = 0;323324// 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.325threads_count = Threads::number_of_non_daemon_threads();326if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {327thread_increase_size = threads_count * NewSizeThreadIncrease;328329// 2. Check an overflow at 'new_size_candidate + thread_increase_size'.330if (new_size_candidate <= max_uintx - thread_increase_size) {331new_size_candidate += thread_increase_size;332333// 3. Check an overflow at 'align_up'.334size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));335if (new_size_candidate <= aligned_max) {336desired_new_size = align_up(new_size_candidate, alignment);337}338}339}340}341342return desired_new_size;343}344345void DefNewGeneration::compute_new_size() {346// This is called after a GC that includes the old generation, so from-space347// will normally be empty.348// Note that we check both spaces, since if scavenge failed they revert roles.349// If not we bail out (otherwise we would have to relocate the objects).350if (!from()->is_empty() || !to()->is_empty()) {351return;352}353354GenCollectedHeap* gch = GenCollectedHeap::heap();355356size_t old_size = gch->old_gen()->capacity();357size_t new_size_before = _virtual_space.committed_size();358size_t min_new_size = initial_size();359size_t max_new_size = reserved().byte_size();360assert(min_new_size <= new_size_before &&361new_size_before <= max_new_size,362"just checking");363// All space sizes must be multiples of Generation::GenGrain.364size_t alignment = Generation::GenGrain;365366int threads_count = 0;367size_t thread_increase_size = 0;368369size_t new_size_candidate = old_size / NewRatio;370// Compute desired new generation size based on NewRatio and NewSizeThreadIncrease371// and reverts to previous value if any overflow happens372size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);373374// Adjust new generation size375desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);376assert(desired_new_size <= max_new_size, "just checking");377378bool changed = false;379if (desired_new_size > new_size_before) {380size_t change = desired_new_size - new_size_before;381assert(change % alignment == 0, "just checking");382if (expand(change)) {383changed = true;384}385// If the heap failed to expand to the desired size,386// "changed" will be false. If the expansion failed387// (and at this point it was expected to succeed),388// ignore the failure (leaving "changed" as false).389}390if (desired_new_size < new_size_before && eden()->is_empty()) {391// bail out of shrinking if objects in eden392size_t change = new_size_before - desired_new_size;393assert(change % alignment == 0, "just checking");394_virtual_space.shrink_by(change);395changed = true;396}397if (changed) {398// The spaces have already been mangled at this point but399// may not have been cleared (set top = bottom) and should be.400// Mangling was done when the heap was being expanded.401compute_space_boundaries(eden()->used(),402SpaceDecorator::Clear,403SpaceDecorator::DontMangle);404MemRegion cmr((HeapWord*)_virtual_space.low(),405(HeapWord*)_virtual_space.high());406gch->rem_set()->resize_covered_region(cmr);407408log_debug(gc, ergo, heap)(409"New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",410new_size_before/K, _virtual_space.committed_size()/K,411eden()->capacity()/K, from()->capacity()/K);412log_trace(gc, ergo, heap)(413" [allowed " SIZE_FORMAT "K extra for %d threads]",414thread_increase_size/K, threads_count);415}416}417418419size_t DefNewGeneration::capacity() const {420return eden()->capacity()421+ from()->capacity(); // to() is only used during scavenge422}423424425size_t DefNewGeneration::used() const {426return eden()->used()427+ from()->used(); // to() is only used during scavenge428}429430431size_t DefNewGeneration::free() const {432return eden()->free()433+ from()->free(); // to() is only used during scavenge434}435436size_t DefNewGeneration::max_capacity() const {437const size_t reserved_bytes = reserved().byte_size();438return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);439}440441size_t DefNewGeneration::unsafe_max_alloc_nogc() const {442return eden()->free();443}444445size_t DefNewGeneration::capacity_before_gc() const {446return eden()->capacity();447}448449size_t DefNewGeneration::contiguous_available() const {450return eden()->free();451}452453454HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }455HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }456457void DefNewGeneration::object_iterate(ObjectClosure* blk) {458eden()->object_iterate(blk);459from()->object_iterate(blk);460}461462463void DefNewGeneration::space_iterate(SpaceClosure* blk,464bool usedOnly) {465blk->do_space(eden());466blk->do_space(from());467blk->do_space(to());468}469470// The last collection bailed out, we are running out of heap space,471// so we try to allocate the from-space, too.472HeapWord* DefNewGeneration::allocate_from_space(size_t size) {473bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();474475// If the Heap_lock is not locked by this thread, this will be called476// again later with the Heap_lock held.477bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));478479HeapWord* result = NULL;480if (do_alloc) {481result = from()->allocate(size);482}483484log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s",485size,486GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?487"true" : "false",488Heap_lock->is_locked() ? "locked" : "unlocked",489from()->free(),490should_try_alloc ? "" : " should_allocate_from_space: NOT",491do_alloc ? " Heap_lock is not owned by self" : "",492result == NULL ? "NULL" : "object");493494return result;495}496497HeapWord* DefNewGeneration::expand_and_allocate(size_t size,498bool is_tlab,499bool parallel) {500// We don't attempt to expand the young generation (but perhaps we should.)501return allocate(size, is_tlab);502}503504void DefNewGeneration::adjust_desired_tenuring_threshold() {505// Set the desired survivor size to half the real survivor space506size_t const survivor_capacity = to()->capacity() / HeapWordSize;507size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);508509_tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);510511if (UsePerfData) {512GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();513gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);514gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);515}516517age_table()->print_age_table(_tenuring_threshold);518}519520void DefNewGeneration::collect(bool full,521bool clear_all_soft_refs,522size_t size,523bool is_tlab) {524assert(full || size > 0, "otherwise we don't want to collect");525526SerialHeap* heap = SerialHeap::heap();527528_gc_timer->register_gc_start();529DefNewTracer gc_tracer;530gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start());531532_old_gen = heap->old_gen();533534// If the next generation is too full to accommodate promotion535// from this generation, pass on collection; let the next generation536// do it.537if (!collection_attempt_is_safe()) {538log_trace(gc)(":: Collection attempt not safe ::");539heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one540return;541}542assert(to()->is_empty(), "Else not collection_attempt_is_safe");543544init_assuming_no_promotion_failure();545546GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());547548heap->trace_heap_before_gc(&gc_tracer);549550// These can be shared for all code paths551IsAliveClosure is_alive(this);552ScanWeakRefClosure scan_weak_ref(this);553554age_table()->clear();555to()->clear(SpaceDecorator::Mangle);556// The preserved marks should be empty at the start of the GC.557_preserved_marks_set.init(1);558559assert(heap->no_allocs_since_save_marks(),560"save marks have not been newly set.");561562DefNewScanClosure scan_closure(this);563DefNewYoungerGenClosure younger_gen_closure(this, _old_gen);564565CLDScanClosure cld_scan_closure(&scan_closure);566567set_promo_failure_scan_stack_closure(&scan_closure);568FastEvacuateFollowersClosure evacuate_followers(heap,569&scan_closure,570&younger_gen_closure);571572assert(heap->no_allocs_since_save_marks(),573"save marks have not been newly set.");574575{576StrongRootsScope srs(0);577578heap->young_process_roots(&scan_closure,579&younger_gen_closure,580&cld_scan_closure);581}582583// "evacuate followers".584evacuate_followers.do_void();585586FastKeepAliveClosure keep_alive(this, &scan_weak_ref);587ReferenceProcessor* rp = ref_processor();588rp->setup_policy(clear_all_soft_refs);589ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());590SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);591const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);592gc_tracer.report_gc_reference_stats(stats);593gc_tracer.report_tenuring_threshold(tenuring_threshold());594pt.print_all_references();595596assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");597598WeakProcessor::weak_oops_do(&is_alive, &keep_alive);599600// Verify that the usage of keep_alive didn't copy any objects.601assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");602603if (!_promotion_failed) {604// Swap the survivor spaces.605eden()->clear(SpaceDecorator::Mangle);606from()->clear(SpaceDecorator::Mangle);607if (ZapUnusedHeapArea) {608// This is now done here because of the piece-meal mangling which609// can check for valid mangling at intermediate points in the610// collection(s). When a young collection fails to collect611// sufficient space resizing of the young generation can occur612// an redistribute the spaces in the young generation. Mangle613// here so that unzapped regions don't get distributed to614// other spaces.615to()->mangle_unused_area();616}617swap_spaces();618619assert(to()->is_empty(), "to space should be empty now");620621adjust_desired_tenuring_threshold();622623// A successful scavenge should restart the GC time limit count which is624// for full GC's.625AdaptiveSizePolicy* size_policy = heap->size_policy();626size_policy->reset_gc_overhead_limit_count();627assert(!heap->incremental_collection_failed(), "Should be clear");628} else {629assert(_promo_failure_scan_stack.is_empty(), "post condition");630_promo_failure_scan_stack.clear(true); // Clear cached segments.631632remove_forwarding_pointers();633log_info(gc, promotion)("Promotion failed");634// Add to-space to the list of space to compact635// when a promotion failure has occurred. In that636// case there can be live objects in to-space637// as a result of a partial evacuation of eden638// and from-space.639swap_spaces(); // For uniformity wrt ParNewGeneration.640from()->set_next_compaction_space(to());641heap->set_incremental_collection_failed();642643// Inform the next generation that a promotion failure occurred.644_old_gen->promotion_failure_occurred();645gc_tracer.report_promotion_failed(_promotion_failed_info);646647// Reset the PromotionFailureALot counters.648NOT_PRODUCT(heap->reset_promotion_should_fail();)649}650// We should have processed and cleared all the preserved marks.651_preserved_marks_set.reclaim();652653heap->trace_heap_after_gc(&gc_tracer);654655_gc_timer->register_gc_end();656657gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());658}659660void DefNewGeneration::init_assuming_no_promotion_failure() {661_promotion_failed = false;662_promotion_failed_info.reset();663from()->set_next_compaction_space(NULL);664}665666void DefNewGeneration::remove_forwarding_pointers() {667RemoveForwardedPointerClosure rspc;668eden()->object_iterate(&rspc);669from()->object_iterate(&rspc);670restore_preserved_marks();671}672673void DefNewGeneration::restore_preserved_marks() {674_preserved_marks_set.restore(NULL);675}676677void DefNewGeneration::handle_promotion_failure(oop old) {678log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());679680_promotion_failed = true;681_promotion_failed_info.register_copy_failure(old->size());682_preserved_marks_set.get()->push_if_necessary(old, old->mark());683// forward to self684old->forward_to(old);685686_promo_failure_scan_stack.push(old);687688if (!_promo_failure_drain_in_progress) {689// prevent recursion in copy_to_survivor_space()690_promo_failure_drain_in_progress = true;691drain_promo_failure_scan_stack();692_promo_failure_drain_in_progress = false;693}694}695696oop DefNewGeneration::copy_to_survivor_space(oop old) {697assert(is_in_reserved(old) && !old->is_forwarded(),698"shouldn't be scavenging this oop");699size_t s = old->size();700oop obj = NULL;701702// Try allocating obj in to-space (unless too old)703if (old->age() < tenuring_threshold()) {704obj = cast_to_oop(to()->allocate(s));705}706707// Otherwise try allocating obj tenured708if (obj == NULL) {709obj = _old_gen->promote(old, s);710if (obj == NULL) {711handle_promotion_failure(old);712return old;713}714} else {715// Prefetch beyond obj716const intx interval = PrefetchCopyIntervalInBytes;717Prefetch::write(obj, interval);718719// Copy obj720Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);721722// Increment age if obj still in new generation723obj->incr_age();724age_table()->add(obj, s);725}726727// Done, insert forward pointer to obj in this header728old->forward_to(obj);729730return obj;731}732733void DefNewGeneration::drain_promo_failure_scan_stack() {734while (!_promo_failure_scan_stack.is_empty()) {735oop obj = _promo_failure_scan_stack.pop();736obj->oop_iterate(_promo_failure_scan_stack_closure);737}738}739740void DefNewGeneration::save_marks() {741eden()->set_saved_mark();742to()->set_saved_mark();743from()->set_saved_mark();744}745746747void DefNewGeneration::reset_saved_marks() {748eden()->reset_saved_mark();749to()->reset_saved_mark();750from()->reset_saved_mark();751}752753754bool DefNewGeneration::no_allocs_since_save_marks() {755assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");756assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");757return to()->saved_mark_at_top();758}759760void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,761size_t max_alloc_words) {762if (requestor == this || _promotion_failed) {763return;764}765assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");766767/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.768if (to_space->top() > to_space->bottom()) {769trace("to_space not empty when contribute_scratch called");770}771*/772773ContiguousSpace* to_space = to();774assert(to_space->end() >= to_space->top(), "pointers out of order");775size_t free_words = pointer_delta(to_space->end(), to_space->top());776if (free_words >= MinFreeScratchWords) {777ScratchBlock* sb = (ScratchBlock*)to_space->top();778sb->num_words = free_words;779sb->next = list;780list = sb;781}782}783784void DefNewGeneration::reset_scratch() {785// If contributing scratch in to_space, mangle all of786// to_space if ZapUnusedHeapArea. This is needed because787// top is not maintained while using to-space as scratch.788if (ZapUnusedHeapArea) {789to()->mangle_unused_area_complete();790}791}792793bool DefNewGeneration::collection_attempt_is_safe() {794if (!to()->is_empty()) {795log_trace(gc)(":: to is not empty ::");796return false;797}798if (_old_gen == NULL) {799GenCollectedHeap* gch = GenCollectedHeap::heap();800_old_gen = gch->old_gen();801}802return _old_gen->promotion_attempt_is_safe(used());803}804805void DefNewGeneration::gc_epilogue(bool full) {806DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)807808assert(!GCLocker::is_active(), "We should not be executing here");809// Check if the heap is approaching full after a collection has810// been done. Generally the young generation is empty at811// a minimum at the end of a collection. If it is not, then812// the heap is approaching full.813GenCollectedHeap* gch = GenCollectedHeap::heap();814if (full) {815DEBUG_ONLY(seen_incremental_collection_failed = false;)816if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {817log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",818GCCause::to_string(gch->gc_cause()));819gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state820set_should_allocate_from_space(); // we seem to be running out of space821} else {822log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",823GCCause::to_string(gch->gc_cause()));824gch->clear_incremental_collection_failed(); // We just did a full collection825clear_should_allocate_from_space(); // if set826}827} else {828#ifdef ASSERT829// It is possible that incremental_collection_failed() == true830// here, because an attempted scavenge did not succeed. The policy831// is normally expected to cause a full collection which should832// clear that condition, so we should not be here twice in a row833// with incremental_collection_failed() == true without having done834// a full collection in between.835if (!seen_incremental_collection_failed &&836gch->incremental_collection_failed()) {837log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",838GCCause::to_string(gch->gc_cause()));839seen_incremental_collection_failed = true;840} else if (seen_incremental_collection_failed) {841log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",842GCCause::to_string(gch->gc_cause()));843assert(gch->gc_cause() == GCCause::_scavenge_alot ||844!gch->incremental_collection_failed(),845"Twice in a row");846seen_incremental_collection_failed = false;847}848#endif // ASSERT849}850851if (ZapUnusedHeapArea) {852eden()->check_mangled_unused_area_complete();853from()->check_mangled_unused_area_complete();854to()->check_mangled_unused_area_complete();855}856857// update the generation and space performance counters858update_counters();859gch->counters()->update_counters();860}861862void DefNewGeneration::record_spaces_top() {863assert(ZapUnusedHeapArea, "Not mangling unused space");864eden()->set_top_for_allocations();865to()->set_top_for_allocations();866from()->set_top_for_allocations();867}868869void DefNewGeneration::ref_processor_init() {870Generation::ref_processor_init();871}872873874void DefNewGeneration::update_counters() {875if (UsePerfData) {876_eden_counters->update_all();877_from_counters->update_all();878_to_counters->update_all();879_gen_counters->update_all();880}881}882883void DefNewGeneration::verify() {884eden()->verify();885from()->verify();886to()->verify();887}888889void DefNewGeneration::print_on(outputStream* st) const {890Generation::print_on(st);891st->print(" eden");892eden()->print_on(st);893st->print(" from");894from()->print_on(st);895st->print(" to ");896to()->print_on(st);897}898899900const char* DefNewGeneration::name() const {901return "def new generation";902}903904// Moved from inline file as they are not called inline905CompactibleSpace* DefNewGeneration::first_compaction_space() const {906return eden();907}908909HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {910// This is the slow-path allocation for the DefNewGeneration.911// Most allocations are fast-path in compiled code.912// We try to allocate from the eden. If that works, we are happy.913// Note that since DefNewGeneration supports lock-free allocation, we914// have to use it here, as well.915HeapWord* result = eden()->par_allocate(word_size);916if (result == NULL) {917// If the eden is full and the last collection bailed out, we are running918// out of heap space, and we try to allocate the from-space, too.919// allocate_from_space can't be inlined because that would introduce a920// circular dependency at compile time.921result = allocate_from_space(word_size);922}923return result;924}925926HeapWord* DefNewGeneration::par_allocate(size_t word_size,927bool is_tlab) {928return eden()->par_allocate(word_size);929}930931size_t DefNewGeneration::tlab_capacity() const {932return eden()->capacity();933}934935size_t DefNewGeneration::tlab_used() const {936return eden()->used();937}938939size_t DefNewGeneration::unsafe_max_tlab_alloc() const {940return unsafe_max_alloc_nogc();941}942943944