Path: blob/master/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp
41144 views
/*1* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef CPU_X86_C1_LINEARSCAN_X86_HPP25#define CPU_X86_C1_LINEARSCAN_X86_HPP2627inline bool LinearScan::is_processed_reg_num(int reg_num) {28#ifndef _LP6429// rsp and rbp (numbers 6 ancd 7) are ignored30assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");31assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");32assert(reg_num >= 0, "invalid reg_num");33#else34// rsp and rbp, r10, r15 (numbers [12,15]) are ignored35// r12 (number 11) is conditional on compressed oops.36assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");37assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");38assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");39assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");40assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");41assert(reg_num >= 0, "invalid reg_num");42#endif // _LP6443return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;44}4546inline int LinearScan::num_physical_regs(BasicType type) {47// Intel requires two cpu registers for long,48// but requires only one fpu register for double49if (LP64_ONLY(false &&) type == T_LONG) {50return 2;51}52return 1;53}545556inline bool LinearScan::requires_adjacent_regs(BasicType type) {57return false;58}5960inline bool LinearScan::is_caller_save(int assigned_reg) {61assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");62return true; // no callee-saved registers on Intel6364}656667inline void LinearScan::pd_add_temps(LIR_Op* op) {68switch (op->code()) {69case lir_tan: {70// The slow path for these functions may need to save and71// restore all live registers but we don't want to save and72// restore everything all the time, so mark the xmms as being73// killed. If the slow path were explicit or we could propagate74// live register masks down to the assembly we could do better75// but we don't have any easy way to do that right now. We76// could also consider not killing all xmm registers if we77// assume that slow paths are uncommon but it's not clear that78// would be a good idea.79if (UseSSE > 0) {80#ifdef ASSERT81if (TraceLinearScanLevel >= 2) {82tty->print_cr("killing XMMs for trig");83}84#endif85int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms();86int op_id = op->id();87for (int xmm = 0; xmm < num_caller_save_xmm_regs; xmm++) {88LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);89add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);90}91}92break;93}94default:95break;96}97}9899100// Implementation of LinearScanWalker101102inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {103int last_xmm_reg = pd_last_xmm_reg;104#ifdef _LP64105if (UseAVX < 3) {106last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;107}108#endif109if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {110assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");111_first_reg = pd_first_byte_reg;112_last_reg = FrameMap::last_byte_reg();113return true;114} else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {115_first_reg = pd_first_xmm_reg;116_last_reg = last_xmm_reg;117return true;118}119120return false;121}122123124class FpuStackAllocator {125private:126Compilation* _compilation;127LinearScan* _allocator;128129LIR_OpVisitState visitor;130131LIR_List* _lir;132int _pos;133FpuStackSim _sim;134FpuStackSim _temp_sim;135136bool _debug_information_computed;137138LinearScan* allocator() { return _allocator; }139Compilation* compilation() const { return _compilation; }140141// unified bailout support142void bailout(const char* msg) const { compilation()->bailout(msg); }143bool bailed_out() const { return compilation()->bailed_out(); }144145int pos() { return _pos; }146void set_pos(int pos) { _pos = pos; }147LIR_Op* cur_op() { return lir()->instructions_list()->at(pos()); }148LIR_List* lir() { return _lir; }149void set_lir(LIR_List* lir) { _lir = lir; }150FpuStackSim* sim() { return &_sim; }151FpuStackSim* temp_sim() { return &_temp_sim; }152153int fpu_num(LIR_Opr opr);154int tos_offset(LIR_Opr opr);155LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false);156157// Helper functions for handling operations158void insert_op(LIR_Op* op);159void insert_exchange(int offset);160void insert_exchange(LIR_Opr opr);161void insert_free(int offset);162void insert_free_if_dead(LIR_Opr opr);163void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore);164void insert_copy(LIR_Opr from, LIR_Opr to);165void do_rename(LIR_Opr from, LIR_Opr to);166void do_push(LIR_Opr opr);167void pop_if_last_use(LIR_Op* op, LIR_Opr opr);168void pop_always(LIR_Op* op, LIR_Opr opr);169void clear_fpu_stack(LIR_Opr preserve);170void handle_op1(LIR_Op1* op1);171void handle_op2(LIR_Op2* op2);172void handle_opCall(LIR_OpCall* opCall);173void compute_debug_information(LIR_Op* op);174void allocate_exception_handler(XHandler* xhandler);175void allocate_block(BlockBegin* block);176177#ifndef PRODUCT178void check_invalid_lir_op(LIR_Op* op);179#endif180181// Helper functions for merging of fpu stacks182void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg);183void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot);184void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim);185bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot);186void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim);187void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs);188bool merge_fpu_stack_with_successors(BlockBegin* block);189190public:191LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information192193FpuStackAllocator(Compilation* compilation, LinearScan* allocator);194void allocate();195};196197#endif // CPU_X86_C1_LINEARSCAN_X86_HPP198199200