Path: blob/master/src/hotspot/share/gc/parallel/psOldGen.cpp
41149 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/parallel/objectStartArray.inline.hpp"26#include "gc/parallel/parallelArguments.hpp"27#include "gc/parallel/parallelScavengeHeap.hpp"28#include "gc/parallel/psAdaptiveSizePolicy.hpp"29#include "gc/parallel/psCardTable.hpp"30#include "gc/parallel/psOldGen.hpp"31#include "gc/shared/cardTableBarrierSet.hpp"32#include "gc/shared/gcLocker.hpp"33#include "gc/shared/spaceDecorator.inline.hpp"34#include "logging/log.hpp"35#include "oops/oop.inline.hpp"36#include "runtime/java.hpp"37#include "utilities/align.hpp"3839PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,40size_t max_size, const char* perf_data_name, int level):41_min_gen_size(min_size),42_max_gen_size(max_size)43{44initialize(rs, initial_size, GenAlignment, perf_data_name, level);45}4647void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment,48const char* perf_data_name, int level) {49initialize_virtual_space(rs, initial_size, alignment);50initialize_work(perf_data_name, level);5152// The old gen can grow to max_gen_size(). _reserve reflects only53// the current maximum that can be committed.54assert(_reserved.byte_size() <= max_gen_size(), "Consistency check");5556initialize_performance_counters(perf_data_name, level);57}5859void PSOldGen::initialize_virtual_space(ReservedSpace rs,60size_t initial_size,61size_t alignment) {6263_virtual_space = new PSVirtualSpace(rs, alignment);64if (!_virtual_space->expand_by(initial_size)) {65vm_exit_during_initialization("Could not reserve enough space for "66"object heap");67}68}6970void PSOldGen::initialize_work(const char* perf_data_name, int level) {71//72// Basic memory initialization73//7475MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),76heap_word_size(max_gen_size()));77assert(limit_reserved.byte_size() == max_gen_size(),78"word vs bytes confusion");79//80// Object start stuff81//8283start_array()->initialize(limit_reserved);8485_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),86(HeapWord*)virtual_space()->high_boundary());8788//89// Card table stuff90//9192MemRegion cmr((HeapWord*)virtual_space()->low(),93(HeapWord*)virtual_space()->high());94if (ZapUnusedHeapArea) {95// Mangle newly committed space immediately rather than96// waiting for the initialization of the space even though97// mangling is related to spaces. Doing it here eliminates98// the need to carry along information that a complete mangling99// (bottom to end) needs to be done.100SpaceMangler::mangle_region(cmr);101}102103ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();104PSCardTable* ct = heap->card_table();105ct->resize_covered_region(cmr);106107// Verify that the start and end of this generation is the start of a card.108// If this wasn't true, a single card could span more than one generation,109// which would cause problems when we commit/uncommit memory, and when we110// clear and dirty cards.111guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");112if (_reserved.end() != heap->reserved_region().end()) {113// Don't check at the very end of the heap as we'll assert that we're probing off114// the end if we try.115guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");116}117118//119// ObjectSpace stuff120//121122_object_space = new MutableSpace(virtual_space()->alignment());123object_space()->initialize(cmr,124SpaceDecorator::Clear,125SpaceDecorator::Mangle,126MutableSpace::SetupPages,127&ParallelScavengeHeap::heap()->workers());128129// Update the start_array130start_array()->set_covered_region(cmr);131}132133void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {134// Generation Counters, generation 'level', 1 subspace135_gen_counters = new PSGenerationCounters(perf_data_name, level, 1, min_gen_size(),136max_gen_size(), virtual_space());137_space_counters = new SpaceCounters(perf_data_name, 0,138virtual_space()->reserved_size(),139_object_space, _gen_counters);140}141142// Assume that the generation has been allocated if its143// reserved size is not 0.144bool PSOldGen::is_allocated() {145return virtual_space()->reserved_size() != 0;146}147148size_t PSOldGen::num_iterable_blocks() const {149return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;150}151152void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {153size_t block_word_size = IterateBlockSize / HeapWordSize;154assert((block_word_size % (ObjectStartArray::block_size)) == 0,155"Block size not a multiple of start_array block");156157MutableSpace *space = object_space();158159HeapWord* begin = space->bottom() + block_index * block_word_size;160HeapWord* end = MIN2(space->top(), begin + block_word_size);161162if (!start_array()->object_starts_in_range(begin, end)) {163return;164}165166// Get object starting at or reaching into this block.167HeapWord* start = start_array()->object_start(begin);168if (start < begin) {169start += cast_to_oop(start)->size();170}171assert(start >= begin,172"Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,173p2i(start), p2i(begin));174// Iterate all objects until the end.175for (HeapWord* p = start; p < end; p += cast_to_oop(p)->size()) {176cl->do_object(cast_to_oop(p));177}178}179180bool PSOldGen::expand_for_allocate(size_t word_size) {181assert(word_size > 0, "allocating zero words?");182bool result = true;183{184MutexLocker x(ExpandHeap_lock);185// Avoid "expand storms" by rechecking available space after obtaining186// the lock, because another thread may have already made sufficient187// space available. If insufficient space available, that will remain188// true until we expand, since we have the lock. Other threads may take189// the space we need before we can allocate it, regardless of whether we190// expand. That's okay, we'll just try expanding again.191if (object_space()->needs_expand(word_size)) {192result = expand(word_size*HeapWordSize);193}194}195if (GCExpandToAllocateDelayMillis > 0) {196os::naked_sleep(GCExpandToAllocateDelayMillis);197}198return result;199}200201bool PSOldGen::expand(size_t bytes) {202assert_lock_strong(ExpandHeap_lock);203assert_locked_or_safepoint(Heap_lock);204assert(bytes > 0, "precondition");205const size_t alignment = virtual_space()->alignment();206size_t aligned_bytes = align_up(bytes, alignment);207size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);208209if (UseNUMA) {210// With NUMA we use round-robin page allocation for the old gen. Expand by at least211// providing a page per lgroup. Alignment is larger or equal to the page size.212aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());213}214if (aligned_bytes == 0) {215// The alignment caused the number of bytes to wrap. A call to expand216// implies a best effort to expand by "bytes" but not a guarantee. Align217// down to give a best effort. This is likely the most that the generation218// can expand since it has some capacity to start with.219aligned_bytes = align_down(bytes, alignment);220}221222bool success = false;223if (aligned_expand_bytes > aligned_bytes) {224success = expand_by(aligned_expand_bytes);225}226if (!success) {227success = expand_by(aligned_bytes);228}229if (!success) {230success = expand_to_reserved();231}232233if (success && GCLocker::is_active_and_needs_gc()) {234log_debug(gc)("Garbage collection disabled, expanded heap instead");235}236return success;237}238239bool PSOldGen::expand_by(size_t bytes) {240assert_lock_strong(ExpandHeap_lock);241assert_locked_or_safepoint(Heap_lock);242assert(bytes > 0, "precondition");243bool result = virtual_space()->expand_by(bytes);244if (result) {245if (ZapUnusedHeapArea) {246// We need to mangle the newly expanded area. The memregion spans247// end -> new_end, we assume that top -> end is already mangled.248// Do the mangling before post_resize() is called because249// the space is available for allocation after post_resize();250HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();251assert(object_space()->end() < virtual_space_high,252"Should be true before post_resize()");253MemRegion mangle_region(object_space()->end(), virtual_space_high);254// Note that the object space has not yet been updated to255// coincide with the new underlying virtual space.256SpaceMangler::mangle_region(mangle_region);257}258post_resize();259if (UsePerfData) {260_space_counters->update_capacity();261_gen_counters->update_all();262}263}264265if (result) {266size_t new_mem_size = virtual_space()->committed_size();267size_t old_mem_size = new_mem_size - bytes;268log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",269name(), old_mem_size/K, bytes/K, new_mem_size/K);270}271272return result;273}274275bool PSOldGen::expand_to_reserved() {276assert_lock_strong(ExpandHeap_lock);277assert_locked_or_safepoint(Heap_lock);278279bool result = false;280const size_t remaining_bytes = virtual_space()->uncommitted_size();281if (remaining_bytes > 0) {282result = expand_by(remaining_bytes);283DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));284}285return result;286}287288void PSOldGen::shrink(size_t bytes) {289assert_lock_strong(ExpandHeap_lock);290assert_locked_or_safepoint(Heap_lock);291292size_t size = align_down(bytes, virtual_space()->alignment());293if (size > 0) {294assert_lock_strong(ExpandHeap_lock);295virtual_space()->shrink_by(bytes);296post_resize();297298size_t new_mem_size = virtual_space()->committed_size();299size_t old_mem_size = new_mem_size + bytes;300log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",301name(), old_mem_size/K, bytes/K, new_mem_size/K);302}303}304305void PSOldGen::resize(size_t desired_free_space) {306const size_t alignment = virtual_space()->alignment();307const size_t size_before = virtual_space()->committed_size();308size_t new_size = used_in_bytes() + desired_free_space;309if (new_size < used_in_bytes()) {310// Overflowed the addition.311new_size = max_gen_size();312}313// Adjust according to our min and max314new_size = clamp(new_size, min_gen_size(), max_gen_size());315316assert(max_gen_size() >= reserved().byte_size(), "max new size problem?");317new_size = align_up(new_size, alignment);318319const size_t current_size = capacity_in_bytes();320321log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "322"desired free: " SIZE_FORMAT " used: " SIZE_FORMAT323" new size: " SIZE_FORMAT " current size " SIZE_FORMAT324" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,325desired_free_space, used_in_bytes(), new_size, current_size,326max_gen_size(), min_gen_size());327328if (new_size == current_size) {329// No change requested330return;331}332if (new_size > current_size) {333size_t change_bytes = new_size - current_size;334MutexLocker x(ExpandHeap_lock);335expand(change_bytes);336} else {337size_t change_bytes = current_size - new_size;338MutexLocker x(ExpandHeap_lock);339shrink(change_bytes);340}341342log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",343ParallelScavengeHeap::heap()->total_collections(),344size_before,345virtual_space()->committed_size());346}347348// NOTE! We need to be careful about resizing. During a GC, multiple349// allocators may be active during heap expansion. If we allow the350// heap resizing to become visible before we have correctly resized351// all heap related data structures, we may cause program failures.352void PSOldGen::post_resize() {353// First construct a memregion representing the new size354MemRegion new_memregion((HeapWord*)virtual_space()->low(),355(HeapWord*)virtual_space()->high());356size_t new_word_size = new_memregion.word_size();357358start_array()->set_covered_region(new_memregion);359ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);360361WorkGang* workers = Thread::current()->is_VM_thread() ?362&ParallelScavengeHeap::heap()->workers() : NULL;363364// The update of the space's end is done by this call. As that365// makes the new space available for concurrent allocation, this366// must be the last step when expanding.367object_space()->initialize(new_memregion,368SpaceDecorator::DontClear,369SpaceDecorator::DontMangle,370MutableSpace::SetupPages,371workers);372373assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),374"Sanity");375}376377void PSOldGen::print() const { print_on(tty);}378void PSOldGen::print_on(outputStream* st) const {379st->print(" %-15s", name());380st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",381capacity_in_bytes()/K, used_in_bytes()/K);382st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",383p2i(virtual_space()->low_boundary()),384p2i(virtual_space()->high()),385p2i(virtual_space()->high_boundary()));386387st->print(" object"); object_space()->print_on(st);388}389390void PSOldGen::update_counters() {391if (UsePerfData) {392_space_counters->update_all();393_gen_counters->update_all();394}395}396397void PSOldGen::verify() {398object_space()->verify();399}400401class VerifyObjectStartArrayClosure : public ObjectClosure {402PSOldGen* _old_gen;403ObjectStartArray* _start_array;404405public:406VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :407_old_gen(old_gen), _start_array(start_array) { }408409virtual void do_object(oop obj) {410HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1;411guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object");412guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation");413}414};415416void PSOldGen::verify_object_start_array() {417VerifyObjectStartArrayClosure check( this, &_start_array );418object_iterate(&check);419}420421#ifndef PRODUCT422void PSOldGen::record_spaces_top() {423assert(ZapUnusedHeapArea, "Not mangling unused space");424object_space()->set_top_for_allocations();425}426#endif427428429