Path: blob/master/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
41152 views
/*1* Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/epsilon/epsilonHeap.hpp"26#include "gc/epsilon/epsilonInitLogger.hpp"27#include "gc/epsilon/epsilonMemoryPool.hpp"28#include "gc/epsilon/epsilonThreadLocalData.hpp"29#include "gc/shared/gcArguments.hpp"30#include "gc/shared/locationPrinter.inline.hpp"31#include "memory/allocation.hpp"32#include "memory/allocation.inline.hpp"33#include "memory/metaspaceUtils.hpp"34#include "memory/resourceArea.hpp"35#include "memory/universe.hpp"36#include "runtime/atomic.hpp"37#include "runtime/globals.hpp"3839jint EpsilonHeap::initialize() {40size_t align = HeapAlignment;41size_t init_byte_size = align_up(InitialHeapSize, align);42size_t max_byte_size = align_up(MaxHeapSize, align);4344// Initialize backing storage45ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);46_virtual_space.initialize(heap_rs, init_byte_size);4748MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());4950initialize_reserved_region(heap_rs);5152_space = new ContiguousSpace();53_space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);5455// Precompute hot fields56_max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));57_step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);58_step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);59_decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;6061// Enable monitoring62_monitoring_support = new EpsilonMonitoringSupport(this);63_last_counter_update = 0;64_last_heap_print = 0;6566// Install barrier set67BarrierSet::set_barrier_set(new EpsilonBarrierSet());6869// All done, print out the configuration70EpsilonInitLogger::print();7172return JNI_OK;73}7475void EpsilonHeap::post_initialize() {76CollectedHeap::post_initialize();77}7879void EpsilonHeap::initialize_serviceability() {80_pool = new EpsilonMemoryPool(this);81_memory_manager.add_pool(_pool);82}8384GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {85GrowableArray<GCMemoryManager*> memory_managers(1);86memory_managers.append(&_memory_manager);87return memory_managers;88}8990GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {91GrowableArray<MemoryPool*> memory_pools(1);92memory_pools.append(_pool);93return memory_pools;94}9596size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {97// Return max allocatable TLAB size, and let allocation path figure out98// the actual allocation size. Note: result should be in bytes.99return _max_tlab_size * HeapWordSize;100}101102EpsilonHeap* EpsilonHeap::heap() {103return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);104}105106HeapWord* EpsilonHeap::allocate_work(size_t size) {107assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);108109HeapWord* res = NULL;110while (true) {111// Try to allocate, assume space is available112res = _space->par_allocate(size);113if (res != NULL) {114break;115}116117// Allocation failed, attempt expansion, and retry:118{119MutexLocker ml(Heap_lock);120121// Try to allocate under the lock, assume another thread was able to expand122res = _space->par_allocate(size);123if (res != NULL) {124break;125}126127// Expand and loop back if space is available128size_t space_left = max_capacity() - capacity();129size_t want_space = MAX2(size, EpsilonMinHeapExpand);130131if (want_space < space_left) {132// Enough space to expand in bulk:133bool expand = _virtual_space.expand_by(want_space);134assert(expand, "Should be able to expand");135} else if (size < space_left) {136// No space to expand in bulk, and this allocation is still possible,137// take all the remaining space:138bool expand = _virtual_space.expand_by(space_left);139assert(expand, "Should be able to expand");140} else {141// No space left:142return NULL;143}144145_space->set_end((HeapWord *) _virtual_space.high());146}147}148149size_t used = _space->used();150151// Allocation successful, update counters152{153size_t last = _last_counter_update;154if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {155_monitoring_support->update_counters();156}157}158159// ...and print the occupancy line, if needed160{161size_t last = _last_heap_print;162if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {163print_heap_info(used);164print_metaspace_info();165}166}167168assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));169return res;170}171172HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,173size_t requested_size,174size_t* actual_size) {175Thread* thread = Thread::current();176177// Defaults in case elastic paths are not taken178bool fits = true;179size_t size = requested_size;180size_t ergo_tlab = requested_size;181int64_t time = 0;182183if (EpsilonElasticTLAB) {184ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);185186if (EpsilonElasticTLABDecay) {187int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);188time = (int64_t) os::javaTimeNanos();189190assert(last_time <= time, "time should be monotonic");191192// If the thread had not allocated recently, retract the ergonomic size.193// This conserves memory when the thread had initial burst of allocations,194// and then started allocating only sporadically.195if (last_time != 0 && (time - last_time > _decay_time_ns)) {196ergo_tlab = 0;197EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);198}199}200201// If we can fit the allocation under current TLAB size, do so.202// Otherwise, we want to elastically increase the TLAB size.203fits = (requested_size <= ergo_tlab);204if (!fits) {205size = (size_t) (ergo_tlab * EpsilonTLABElasticity);206}207}208209// Always honor boundaries210size = clamp(size, min_size, _max_tlab_size);211212// Always honor alignment213size = align_up(size, MinObjAlignment);214215// Check that adjustments did not break local and global invariants216assert(is_object_aligned(size),217"Size honors object alignment: " SIZE_FORMAT, size);218assert(min_size <= size,219"Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);220assert(size <= _max_tlab_size,221"Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);222assert(size <= CollectedHeap::max_tlab_size(),223"Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());224225if (log_is_enabled(Trace, gc)) {226ResourceMark rm;227log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT228"K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",229thread->name(),230requested_size * HeapWordSize / K,231min_size * HeapWordSize / K,232_max_tlab_size * HeapWordSize / K,233ergo_tlab * HeapWordSize / K,234size * HeapWordSize / K);235}236237// All prepared, let's do it!238HeapWord* res = allocate_work(size);239240if (res != NULL) {241// Allocation successful242*actual_size = size;243if (EpsilonElasticTLABDecay) {244EpsilonThreadLocalData::set_last_tlab_time(thread, time);245}246if (EpsilonElasticTLAB && !fits) {247// If we requested expansion, this is our new ergonomic TLAB size248EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);249}250} else {251// Allocation failed, reset ergonomics to try and fit smaller TLABs252if (EpsilonElasticTLAB) {253EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);254}255}256257return res;258}259260HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {261*gc_overhead_limit_was_exceeded = false;262return allocate_work(size);263}264265void EpsilonHeap::collect(GCCause::Cause cause) {266switch (cause) {267case GCCause::_metadata_GC_threshold:268case GCCause::_metadata_GC_clear_soft_refs:269// Receiving these causes means the VM itself entered the safepoint for metadata collection.270// While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would271// re-enter the safepoint again very soon.272273assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");274log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));275MetaspaceGC::compute_new_size();276print_metaspace_info();277break;278default:279log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));280}281_monitoring_support->update_counters();282}283284void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {285collect(gc_cause());286}287288void EpsilonHeap::object_iterate(ObjectClosure *cl) {289_space->object_iterate(cl);290}291292void EpsilonHeap::print_on(outputStream *st) const {293st->print_cr("Epsilon Heap");294295// Cast away constness:296((VirtualSpace)_virtual_space).print_on(st);297298if (_space != NULL) {299st->print_cr("Allocation space:");300_space->print_on(st);301}302303MetaspaceUtils::print_on(st);304}305306bool EpsilonHeap::print_location(outputStream* st, void* addr) const {307return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);308}309310void EpsilonHeap::print_tracing_info() const {311print_heap_info(used());312print_metaspace_info();313}314315void EpsilonHeap::print_heap_info(size_t used) const {316size_t reserved = max_capacity();317size_t committed = capacity();318319if (reserved != 0) {320log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "321SIZE_FORMAT "%s (%.2f%%) used",322byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),323byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),324committed * 100.0 / reserved,325byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),326used * 100.0 / reserved);327} else {328log_info(gc)("Heap: no reliable data");329}330}331332void EpsilonHeap::print_metaspace_info() const {333MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();334size_t reserved = stats.reserved();335size_t committed = stats.committed();336size_t used = stats.used();337338if (reserved != 0) {339log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "340SIZE_FORMAT "%s (%.2f%%) used",341byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),342byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),343committed * 100.0 / reserved,344byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),345used * 100.0 / reserved);346} else {347log_info(gc, metaspace)("Metaspace: no reliable data");348}349}350351352