diff --git a/make/hotspot/lib/JvmFlags.gmk b/make/hotspot/lib/JvmFlags.gmk index 1a91eb007..109f40f15 100644 --- a/make/hotspot/lib/JvmFlags.gmk +++ b/make/hotspot/lib/JvmFlags.gmk @@ -41,6 +41,12 @@ JVM_SRC_DIRS += $(call uniq, $(wildcard $(foreach d, $(JVM_SRC_ROOTS), \ $(JVM_VARIANT_OUTPUTDIR)/gensrc # +JVM_ACC_PLUGIN_DIR := $(TOPDIR)/src/java.base/share/native/libjplugin +JVM_ACC_PLUGIN_SRC := $(JVM_ACC_PLUGIN_DIR)/feature +ifeq ($(wildcard $(JVM_ACC_PLUGIN_SRC)), $(JVM_ACC_PLUGIN_SRC)) + JVM_SRC_DIRS += $(JVM_ACC_PLUGIN_SRC) +endif + JVM_CFLAGS_INCLUDES += \ $(patsubst %,-I%,$(JVM_SRC_DIRS)) \ -I$(TOPDIR)/src/hotspot/share/precompiled \ diff --git a/make/hotspot/lib/JvmMapfile.gmk b/make/hotspot/lib/JvmMapfile.gmk index ba44e5798..92e76b707 100644 --- a/make/hotspot/lib/JvmMapfile.gmk +++ b/make/hotspot/lib/JvmMapfile.gmk @@ -60,6 +60,12 @@ ifeq ($(call isTargetOs, solaris), true) endif endif +JVM_ACC_PLUGIN_DIR := $(TOPDIR)/src/java.base/share/native/libjplugin +JVM_ACC_PLUGIN_SYMBOLS_SRC := $(JVM_ACC_PLUGIN_DIR)/make/hotspot-symbols +ifeq ($(wildcard $(JVM_ACC_PLUGIN_SYMBOLS_SRC)), $(JVM_ACC_PLUGIN_SYMBOLS_SRC)) + SYMBOLS_SRC += $(JVM_ACC_PLUGIN_SYMBOLS_SRC)/symbols-plugin +endif + ################################################################################ # Create a dynamic list of symbols from the built object files. This is highly # platform dependent. diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 2306a8d60..0acc6a57f 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -5665,6 +5665,46 @@ void os::Linux::numa_init() { } } +#if INCLUDE_JBOLT +os::Linux::jboltHeap_init_t os::Linux::_jboltHeap_init; +os::Linux::jboltLog_precalc_t os::Linux::_jboltLog_precalc; +os::Linux::jboltLog_do_t os::Linux::_jboltLog_do; +os::Linux::jboltMerge_judge_t os::Linux::_jboltMerge_judge; +#endif // INCLUDE_JBOLT + +void os::Linux::load_plugin_library() { +#if INCLUDE_JBOLT + _jboltHeap_init = CAST_TO_FN_PTR(jboltHeap_init_t, dlsym(RTLD_DEFAULT, "JBoltHeap_Init")); + _jboltLog_precalc = CAST_TO_FN_PTR(jboltLog_precalc_t, dlsym(RTLD_DEFAULT, "JBoltLog_PreCalc")); + _jboltLog_do = CAST_TO_FN_PTR(jboltLog_do_t, dlsym(RTLD_DEFAULT, "JBoltLog_DO")); + _jboltMerge_judge = CAST_TO_FN_PTR(jboltMerge_judge_t, dlsym(RTLD_DEFAULT, "JBoltMerge_Judge")); +#endif // INCLUDE_JBOLT + + char path[JVM_MAXPATHLEN]; + char ebuf[1024]; + void* handle = NULL; + if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "jvm11_Acc") || + os::dll_locate_lib(path, sizeof(path), "/usr/lib64", "jvm11_Acc")) { + handle = dlopen(path, RTLD_LAZY); + } + if (handle != NULL) { +#if INCLUDE_JBOLT + if (_jboltHeap_init == NULL) { + _jboltHeap_init = CAST_TO_FN_PTR(jboltHeap_init_t, dlsym(handle, "JBoltHeap_Init")); + } + if (_jboltLog_precalc == NULL) { + _jboltLog_precalc = CAST_TO_FN_PTR(jboltLog_precalc_t, dlsym(handle, "JBoltLog_PreCalc")); + } + if (_jboltLog_do == NULL) { + _jboltLog_do = CAST_TO_FN_PTR(jboltLog_do_t, dlsym(handle, "JBoltLog_DO")); + } + if (_jboltMerge_judge == NULL) { + _jboltMerge_judge = CAST_TO_FN_PTR(jboltMerge_judge_t, dlsym(handle, "JBoltMerge_Judge")); + } +#endif // INCLUDE_JBOLT + } +} + // this is called _after_ the global arguments have been parsed jint os::init_2(void) { @@ -5712,6 +5752,8 @@ jint os::init_2(void) { init_adjust_stacksize_for_guard_pages(); #endif + Linux::load_plugin_library(); + if (UseNUMA) { Linux::numa_init(); } diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index 2965fd606..fac136dfe 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -188,6 +188,7 @@ class Linux { static const char *libc_version() { return _libc_version; } static const char *libpthread_version() { return _libpthread_version; } + static void load_plugin_library(); static void libpthread_init(); static void sched_getcpu_init(); static bool libnuma_init(); @@ -271,7 +272,16 @@ class Linux { typedef void (*numa_set_bind_policy_func_t)(int policy); typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n); typedef int (*numa_distance_func_t)(int node1, int node2); - +#if INCLUDE_JBOLT + typedef void (*jboltHeap_init_t)(uintptr_t related_data[], address rs, address non_nmethod_space, address profiled_space, address non_profiled_space, address jbolt_hot_space, address jbolt_tmp_space); + typedef void (*jboltLog_precalc_t)(unsigned int topFrameIndex, unsigned int &max_frames, unsigned int framesCount); + typedef bool (*jboltLog_do_t)(uintptr_t related_data[], address stacktrace, unsigned int i, int comp_level, address new_func, address *tempfunc); + typedef int (*jboltMerge_judge_t)(uintptr_t related_data[], int candidate, address clusters, address merged, address cluster); + static jboltHeap_init_t _jboltHeap_init; + static jboltLog_precalc_t _jboltLog_precalc; + static jboltLog_do_t _jboltLog_do; + static jboltMerge_judge_t _jboltMerge_judge; +#endif static sched_getcpu_func_t _sched_getcpu; static numa_node_to_cpus_func_t _numa_node_to_cpus; static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2; @@ -466,6 +476,33 @@ class Linux { return false; } } + +#if INCLUDE_JBOLT + static bool jboltHeap_init(uintptr_t related_data[], address rs, address non_nmethod_space, address profiled_space, address non_profiled_space, address jbolt_hot_space, address jbolt_tmp_space) { + if (_jboltHeap_init != NULL) { + _jboltHeap_init(related_data, rs, non_nmethod_space, profiled_space, non_profiled_space, jbolt_hot_space, jbolt_tmp_space); + return true; + } + return false; + } + static void jboltLog_precalc(unsigned int topFrameIndex, unsigned int &max_frames, unsigned int framesCount) { + if (_jboltLog_precalc != NULL) { + _jboltLog_precalc(topFrameIndex, max_frames, framesCount); + } + } + static bool jboltLog_do(uintptr_t related_data[], address stacktrace, unsigned int i, int comp_level, address new_func, address *tempfunc) { + if (_jboltLog_do != NULL) { + return _jboltLog_do(related_data, stacktrace, i, comp_level, new_func, tempfunc); + } + return false; + } + static int jboltMerge_judge(uintptr_t related_data[], int candidate, address clusters, address merged, address cluster) { + if (_jboltMerge_judge != NULL) { + return _jboltMerge_judge(related_data, candidate, clusters, merged, cluster); + } + return -1; + } +#endif // INCLUDE_JBOLT }; #endif // OS_LINUX_VM_OS_LINUX_HPP diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index e7e3dc187..f66926600 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -69,6 +69,9 @@ #ifdef COMPILER2 #include "opto/runtime.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif // ciEnv // @@ -1033,15 +1036,33 @@ void ciEnv::register_method(ciMethod* target, assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry"); assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry"); - nm = nmethod::new_nmethod(method, - compile_id(), - entry_bci, - offsets, - orig_pc_offset, - debug_info(), dependencies(), code_buffer, - frame_words, oop_map_set, - handler_table, inc_table, - compiler, task()->comp_level()); +#if INCLUDE_JBOLT + if (UseJBolt && JBoltManager::reorder_phase_is_collecting_or_reordering()) { + int code_blob_type = JBoltManager::calc_code_blob_type(method(), task(), THREAD); + nm = nmethod::new_nmethod(method, + compile_id(), + entry_bci, + offsets, + orig_pc_offset, + debug_info(), dependencies(), code_buffer, + frame_words, oop_map_set, + handler_table, inc_table, + compiler, task()->comp_level(), + NULL, NULL, + code_blob_type); + } else +#endif // INCLUDE_JBOLT + { + nm = nmethod::new_nmethod(method, + compile_id(), + entry_bci, + offsets, + orig_pc_offset, + debug_info(), dependencies(), code_buffer, + frame_words, oop_map_set, + handler_table, inc_table, + compiler, task()->comp_level()); + } // Free codeBlobs code_buffer->free_blob(); diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 82b01d096..a14abe4e4 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -39,10 +39,12 @@ struct CodeBlobType { enum { MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods) MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods - NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs - All = 3, // All types (No code cache segmentation) - AOT = 4, // AOT methods - NumTypes = 5 // Number of CodeBlobTypes + MethodJBoltHot = 2, // Hot methods (determined by JBolt) of level 1 and 4 nmethods + MethodJBoltTmp = 3, // Temporary storage of JBolt hot methods + NonNMethod = 4, // Non-nmethods like Buffers, Adapters and Runtime Stubs + All = 5, // All types (No code cache segmentation) + AOT = 6, // AOT methods + NumTypes = 7 // Number of CodeBlobTypes }; }; diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index f95fbcce2..cd3e376d4 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -66,6 +66,9 @@ #include "opto/compile.hpp" #include "opto/node.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif // INCLUDE_JBOLT // Helper class for printing in CodeCache class CodeBlob_sizes { @@ -292,6 +295,16 @@ void CodeCache::initialize_heaps() { non_nmethod_size = align_up(non_nmethod_size, alignment); profiled_size = align_down(profiled_size, alignment); +#if INCLUDE_JBOLT + if (UseJBolt && !JBoltDumpMode) { + // We replace the original add-heap logic with the JBolt one. manual dump mode doesn't need that + JBoltManager::init_code_heaps(non_nmethod_size, profiled_size, non_profiled_size, cache_size, alignment); + return; + } + // The following add-heap logic will not be executed if JBolt load mode is on. + // If the following logic is modified, remember to modify the JBolt logic accordingly. +#endif // INCLUDE_JBOLT + // Reserve one continuous chunk of memory for CodeHeaps and split it into // parts for the individual heaps. The memory layout looks like this: // ---------- high ----------- @@ -345,6 +358,12 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { // Heaps available for allocation bool CodeCache::heap_available(int code_blob_type) { + if (code_blob_type == CodeBlobType::MethodJBoltHot) { + return JBOLT_ONLY(UseJBolt && !JBoltDumpMode) NOT_JBOLT(false); + } else if (code_blob_type == CodeBlobType::MethodJBoltTmp) { + return JBOLT_ONLY(UseJBolt && !JBoltDumpMode) NOT_JBOLT(false); + } + if (!SegmentedCodeCache) { // No segmentation: use a single code heap return (code_blob_type == CodeBlobType::All); @@ -372,6 +391,12 @@ const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { case CodeBlobType::MethodProfiled: return "ProfiledCodeHeapSize"; break; + case CodeBlobType::MethodJBoltHot: + return "JBoltHotCodeHeapSize"; + break; + case CodeBlobType::MethodJBoltTmp: + return "JBoltTmpCodeHeapSize"; + break; } ShouldNotReachHere(); return NULL; @@ -522,6 +547,17 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_t type = CodeBlobType::MethodNonProfiled; } break; +#if INCLUDE_JBOLT + case CodeBlobType::MethodJBoltHot: + case CodeBlobType::MethodJBoltTmp: + if (JBoltLoadMode) { + type = CodeBlobType::MethodNonProfiled; + break; + } + // [jbolt]: JBoltCodeCache is too full to contain all ordered methods, but the hotter ones should have been recompiled. + JBoltManager::handle_full_jbolt_code_cache(); + return NULL; +#endif // INCLUDE_JBOLT } if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { if (PrintCodeCacheExtension) { diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index 3ca988c92..37edfa6e0 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -47,6 +47,10 @@ // executed at level 2 or 3 // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those // executed at level 1 or 4 and native methods +// - JBolt nmethods: sorted non-profiled nmethods that are judged to be hot +// by JBolt +// - JBolt tmp nmethods: non-profiled nmethods that are judged to be hot by +// JBolt but not sorted yet // - All: Used for code of all types if code cache segmentation is disabled. // // In the rare case of the non-nmethod code heap getting full, non-nmethod code @@ -84,6 +88,9 @@ class CodeCache : AllStatic { #if INCLUDE_SHENANDOAHGC friend class ShenandoahParallelCodeHeapIterator; #endif +#if INCLUDE_JBOLT + friend class JBoltManager; +#endif // INCLUDE_JBOLT private: // CodeHeaps of the cache static GrowableArray* _heaps; @@ -242,13 +249,17 @@ class CodeCache : AllStatic { } static bool code_blob_type_accepts_compiled(int type) { - bool result = type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; + // Modified `type <= CodeBlobType::MethodProfiled` to `type < CodeBlobType::NonNMethod` + // after adding the JBolt heap. The two logics are still equivalent even without JBolt. + bool result = type == CodeBlobType::All || type < CodeBlobType::NonNMethod; AOT_ONLY( result = result || type == CodeBlobType::AOT; ) return result; } static bool code_blob_type_accepts_nmethod(int type) { - return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; + // Modified `type <= CodeBlobType::MethodProfiled` to `type < CodeBlobType::NonNMethod` + // after adding the JBolt heap. The two logics are still equivalent even without JBolt. + return type == CodeBlobType::All || type < CodeBlobType::NonNMethod; } static bool code_blob_type_accepts_allocable(int type) { diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 6bc63116b..ae02db085 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -67,6 +67,9 @@ #if INCLUDE_JVMCI #include "jvmci/jvmciJavaClasses.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif #ifdef DTRACE_ENABLED @@ -481,6 +484,9 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, , jweak installed_code, jweak speculationLog #endif +#if INCLUDE_JBOLT + , int code_blob_type // for jbolt +#endif // INCLUDE_JBOLT ) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); @@ -496,7 +502,11 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, + align_up(nul_chk_table->size_in_bytes() , oopSize) + align_up(debug_info->data_size() , oopSize); +#if INCLUDE_JBOLT + nm = new (nmethod_size, comp_level, code_blob_type) +#else // INCLUDE_JBOLT nm = new (nmethod_size, comp_level) +#endif // INCLUDE_JBOLT nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets, orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, oop_maps, @@ -641,6 +651,15 @@ void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); } +#if INCLUDE_JBOLT +void* nmethod::operator new(size_t size, int nmethod_size, int comp_level, int code_blob_type) throw () { + if (code_blob_type < CodeBlobType::All) { + return CodeCache::allocate(nmethod_size, code_blob_type); + } + return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); +} +#endif // INCLUDE_JBOLT + nmethod::nmethod( Method* method, CompilerType type, diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index b5018dcf8..6e33af573 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -214,6 +214,11 @@ class nmethod : public CompiledMethod { // helper methods void* operator new(size_t size, int nmethod_size, int comp_level) throw(); +#if INCLUDE_JBOLT + // For JBolt. So the code can be allocated in code segments defined by JBolt. + void* operator new(size_t size, int nmethod_size, int comp_level, int code_blob_type) throw (); +#endif // INCLUDE_JBOLT + const char* reloc_string_for(u_char* begin, u_char* end); // Returns true if this thread changed the state of the nmethod or // false if another thread performed the transition. @@ -253,6 +258,9 @@ class nmethod : public CompiledMethod { , jweak installed_code = NULL, jweak speculation_log = NULL #endif +#if INCLUDE_JBOLT + , int code_blob_type = CodeBlobType::All // for jbolt +#endif // INCLUDE_JBOLT ); static nmethod* new_native_nmethod(const methodHandle& method, diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index c3fae3df2..5d806966d 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -75,6 +75,9 @@ #ifdef COMPILER2 #include "opto/c2compiler.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif // INCLUDE_JBOLT #ifdef DTRACE_ENABLED @@ -1882,6 +1885,12 @@ void CompileBroker::compiler_thread_loop() { } } +#if INCLUDE_JBOLT + if (UseJBolt && JBoltLoadMode) { + JBoltManager::check_start_reordering(thread); + } +#endif // INCLUDE_JBOLT + if (UseDynamicNumberOfCompilerThreads) { possibly_add_compiler_threads(); } diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp index 53e496cd3..2c8ecb5ce 100644 --- a/src/hotspot/share/compiler/compileBroker.hpp +++ b/src/hotspot/share/compiler/compileBroker.hpp @@ -139,6 +139,9 @@ public: class CompileBroker: AllStatic { friend class Threads; friend class CompileTaskWrapper; +#if INCLUDE_JBOLT + friend class JBoltManager; +#endif // INCLUDE_JBOLT public: enum { diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index 2029defdc..61f3af955 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -55,6 +55,9 @@ class CompileTask : public CHeapObj { Reason_Whitebox, // Whitebox API Reason_MustBeCompiled, // Java callHelper, LinkResolver Reason_Bootstrap, // JVMCI bootstrap +#if INCLUDE_JBOLT + Reason_Reorder, // JBolt reorder +#endif Reason_Count }; @@ -69,6 +72,9 @@ class CompileTask : public CHeapObj { "whitebox", "must_be_compiled", "bootstrap" +#if INCLUDE_JBOLT + , "reorder" +#endif }; return reason_names[compile_reason]; } @@ -225,6 +231,12 @@ public: print_inlining_inner(tty, method, inline_level, bci, msg); } static void print_inlining_ul(ciMethod* method, int inline_level, int bci, const char* msg = NULL); + +#if INCLUDE_JBOLT + CompileReason compile_reason() { return _compile_reason; } + int hot_count() { return _hot_count; } + const char* failure_reason() { return _failure_reason; } +#endif // INCLUDE_JBOLT }; #endif // SHARE_VM_COMPILER_COMPILETASK_HPP diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp index 12589e11c..cf2c2b3b7 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.hpp +++ b/src/hotspot/share/compiler/compilerDefinitions.hpp @@ -26,6 +26,11 @@ #define SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP #include "memory/allocation.hpp" +#include "runtime/arguments.hpp" + +#if INCLUDE_JVMCI +#include "jvmci/jvmci_globals.hpp" +#endif // The (closed set) of concrete compiler classes. enum CompilerType { @@ -128,6 +133,27 @@ public: static void ergo_initialize(); + static bool has_c1() { return COMPILER1_PRESENT(true) NOT_COMPILER1(false); } + static bool has_c2() { return COMPILER2_PRESENT(true) NOT_COMPILER2(false); } + static bool has_jvmci() { return JVMCI_ONLY(true) NOT_JVMCI(false); } + + static bool is_jvmci_compiler() { return JVMCI_ONLY(has_jvmci() && UseJVMCICompiler) NOT_JVMCI(false); } + static bool is_interpreter_only() { return Arguments::is_interpreter_only() || TieredStopAtLevel == CompLevel_none; } + + // Is the JVM in a configuration that permits only c1-compiled methods (level 1,2,3)? + static bool is_c1_only() { + if (!is_interpreter_only() && has_c1()) { + const bool c1_only = !has_c2() && !is_jvmci_compiler(); + const bool tiered_degraded_to_c1_only = TieredCompilation && TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization; + return c1_only || tiered_degraded_to_c1_only; + } + return false; + } + + static bool is_c2_enabled() { + return has_c2() && !is_interpreter_only() && !is_c1_only() && !is_jvmci_compiler(); + } + private: static void set_tiered_flags(); }; diff --git a/src/hotspot/share/jbolt/jBoltCallGraph.cpp b/src/hotspot/share/jbolt/jBoltCallGraph.cpp new file mode 100644 index 000000000..c2a3b51f5 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltCallGraph.cpp @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "jbolt/jBoltCallGraph.hpp" +#include "jfr/utilities/jfrAllocation.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" +#include "oops/method.inline.hpp" +#include "utilities/defaultStream.hpp" + +#define PAGE_SIZE os::vm_page_size() + +static GrowableArray* _clusters = NULL; +static GrowableArray* _calls = NULL; +static GrowableArray* _funcs = NULL; + +// (JBolt hfsort optional)sort final clusters by density +static const bool _jbolt_density_sort = false; +// (JBolt hfsort optional)freeze merging while exceeding pagesize +static const bool _jbolt_merge_frozen = false; + +void JBoltCallGraph::initialize() { + ::_clusters = JBoltCallGraph::callgraph_instance().callgraph_clusters(); + ::_calls = JBoltCallGraph::callgraph_instance().callgraph_calls(); + ::_funcs = JBoltCallGraph::callgraph_instance().callgraph_funcs(); +} + +void JBoltCallGraph::deinitialize() { + ::_clusters = NULL; + ::_calls = NULL; + ::_funcs = NULL; +} + +int JBoltCallGraph::clear_instance() { + delete _clusters; + delete _calls; + delete _funcs; + + // Reinit default cluster start id + _init_cluster_id = 0; + + // Re-allocate + _clusters = create_growable_array(); + _calls = create_growable_array(); + _funcs = create_growable_array(); + + // Re-initialize + initialize(); + + return 0; +} + +static GrowableArray* clusters_copy() { + GrowableArray* copy = create_growable_array(_clusters->length()); + copy->appendAll(_clusters); + return copy; +} + +static GrowableArray* funcs_copy() { + GrowableArray* copy = create_growable_array(_funcs->length()); + copy->appendAll(_funcs); + return copy; +} + +static int find_func_index(const JBoltFunc* func) { + for (int i = 0; i < _funcs->length(); ++i) { + JBoltFunc& existing = _funcs->at(i); + if (existing == (*func)) { + return i; + } + } + return -1; +} + +// Searching for a cluster with corresponding func or creating a new one if doesn't exist +static JBoltCluster* find_cluster(JBoltFunc* func) { + for (int i = 0; i < _clusters->length(); ++i) { + JBoltCluster& cluster = _clusters->at(i); + int index = cluster.func_indexes()->at(0); + if (_funcs->at(index) == (*func)) { + return &cluster; + } + } + _funcs->append(*func); + _clusters->append(JBoltCluster(*func)); + JBoltCluster& cluster = _clusters->at(_clusters->length() - 1); + _funcs->at(_funcs->length() - 1).set_cluster_id(cluster.id()); + return &cluster; +} + +// Creating a new call in graph or updating the weight if exists +static void add_call_to_calls(GrowableArray* calls, const JBoltCall* call) { + for (int i = 0; i < calls->length(); ++i) { + JBoltCall& existing_call = calls->at(i); + if (existing_call == *call) { + if (existing_call.stacktrace_id() == call->stacktrace_id()) { + assert(call->call_count() >= existing_call.call_count(), "invariant"); + existing_call.callee().add_heat(call->call_count() - existing_call.call_count()); + existing_call.set_call_count(call->call_count()); + } + else { + existing_call.callee().add_heat(call->call_count()); + existing_call.set_call_count(existing_call.call_count() + call->call_count()); + } + return; + } + } + + calls->append(*call); + call->callee().add_heat(call->call_count()); + call->callee().append_call_index(calls->length() - 1); +} + +// Getting final funcs order from an array of processed clusters +static GrowableArray* clusters_to_funcs_order(GrowableArray* clusters) { + log_debug(jbolt)( "sorted clusters:\n"); + for (int i = 0; i < clusters->length(); ++i) { + log_debug(jbolt)( "cluster id: %d heats: %ld size: %dB density: %f\n", clusters->at(i).id(), clusters->at(i).heats(), clusters->at(i).size(), clusters->at(i).density()); + for (int j = 0; j < clusters->at(i).get_funcs_count(); ++j) { + JBoltFunc& func = _funcs->at(clusters->at(i).func_indexes()->at(j)); + const Method* const method = func.method(); + if (method != NULL) { + log_debug(jbolt)( "%d: method signature:%s heat: %ld size: %dB\n", + j, method->external_name(), func.heat(), func.size()); + } + } + } + + GrowableArray* order = create_growable_array(_funcs->length()); + // used to seperator distinct cluster, klass = NULL + JBoltFunc seperator_func; + order->append(seperator_func); + for (int i = 0; i < clusters->length(); ++i) { + JBoltCluster& cluster = clusters->at(i); + GrowableArray* func_indexes = cluster.func_indexes(); + + for (int j = 0; j < func_indexes->length(); ++j) { + int index = func_indexes->at(j); + order->append(_funcs->at(index)); + } + + order->append(seperator_func); + } + return order; +} + +template +static int fast_compare(T val1, T val2) { + return (val1 < val2) ? 1 : ((val1 == val2) ? 0 : -1); +} + +// Comparing function needed to sort an array of funcs by their weights (in decreasing order) +static int func_comparator(JBoltFunc* func1, JBoltFunc* func2) { + return _jbolt_density_sort ? fast_compare(func1->heat() * func2->size(), func2->heat() * func1->size()) : fast_compare(func1->heat(), func2->heat()); +} + +// Comparing cluster needed to sort an array of clusters by their densities (in decreasing order) +static int cluster_comparator(JBoltCluster* cluster1, JBoltCluster* cluster2) { + return _jbolt_density_sort ? fast_compare(cluster1->density(), cluster2->density()) : fast_compare(cluster1->heats(), cluster2->heats()); +} + +// Comparing call indexes needed to sort an array of call indexes by their call counts (in decreasing order) +static int func_call_indexes_comparator(int* index1, int* index2) { + return fast_compare(_calls->at(*index1).call_count(), _calls->at(*index2).call_count()); +} + +JBoltCallGraph& JBoltCallGraph::callgraph_instance() { + static JBoltCallGraph _call_graph; + return _call_graph; +} + +void JBoltCallGraph::add_func(JBoltFunc* func) { + if (!(UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) return; + JBoltCluster* cluster = find_cluster(func); + assert(cluster != NULL, "invariant"); +} + +void JBoltCallGraph::add_call(JBoltCall* call) { + if (!(UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) return; + // Self-recursion is not helpful for the call, skip it directly + if (call->caller() == call->callee()) return; + add_call_to_calls(_calls, call); +} + +uintptr_t related_data_jbolt_merge_judge[] = { + (uintptr_t)in_bytes(JBoltCluster::id_offset()), + (uintptr_t)in_bytes(JBoltCluster::heats_offset()), + (uintptr_t)in_bytes(JBoltCluster::frozen_offset()), + (uintptr_t)in_bytes(JBoltCluster::size_offset()), + (uintptr_t)in_bytes(JBoltCluster::density_offset()), + (uintptr_t)in_bytes(JBoltCluster::func_indexes_offset()), + + (uintptr_t)in_bytes(GrowableArray
::data_offset()), + + (uintptr_t)JBoltCluster::find_cluster_by_id, + (uintptr_t)_jbolt_merge_frozen +}; + +static void deal_with_each_func(GrowableArray* clusters, GrowableArray* funcs, GrowableArray* merged) { + for (int i = 0; i < funcs->length(); ++i) { + JBoltFunc& func = funcs->at(i); + + JBoltCluster* cluster = JBoltCluster::find_cluster_by_id(clusters, func.cluster_id()); + + // for cluster size larger than page size, should be frozen and don't merge with any cluster + if (_jbolt_merge_frozen && cluster->frozen()) continue; + + // find best predecessor + func.call_indexes()->sort(&func_call_indexes_comparator); + + int bestPred = -1; + + for (int j = 0; j < func.call_indexes()->length(); ++j) { + const JBoltCall& call = _calls->at(func.call_indexes()->at(j)); + + bestPred = os::Linux::jboltMerge_judge(related_data_jbolt_merge_judge, call.caller().cluster_id(), (address)clusters, (address)merged, (address)cluster); + + if (bestPred == -1) continue; + + break; + } + + // not merge -- no suitable caller nodes + if (bestPred == -1) { + continue; + } + + JBoltCluster* predCluster = JBoltCluster::find_cluster_by_id(clusters, bestPred); + + // merge callee cluster to caller cluster + for (int j = 0; j < cluster->func_indexes()->length(); ++j) { + int index = cluster->func_indexes()->at(j); + predCluster->append_func_index(index); + } + predCluster->add_heat(cluster->heats()); + predCluster->add_size(cluster->size()); + predCluster->update_density(); + merged->at(cluster->id()) = bestPred; + cluster->clear(); + } +} + +// Every node is a cluster with funcs +// Initially each cluster has only one func inside +GrowableArray* JBoltCallGraph::hfsort() { + if (!(UseJBolt && (JBoltDumpMode || JBoltManager::auto_mode()))) return NULL; + log_debug(jbolt)( "hfsort begin...\n"); + // Copies are needed for saving initial graph in memory + GrowableArray* clusters = clusters_copy(); + GrowableArray* funcs = funcs_copy(); + + // store a map for finding head of merge chain + GrowableArray* merged = create_growable_array(clusters->length()); + for (int i = 0; i < clusters->length(); ++i) { + merged->append(-1); + } + + // sorted by func(initially a node) weight(now just as 'heat') + funcs->sort(&func_comparator); + + // Process each function, and consider merging its cluster with the + // one containing its most likely predecessor. + deal_with_each_func(clusters, funcs, merged); + + // the set of clusters that are left + GrowableArray* sortedClusters = create_growable_array(); + for (int i = 0; i < clusters->length(); ++i) { + if (clusters->at(i).id() != -1) { + sortedClusters->append(clusters->at(i)); + } + } + + sortedClusters->sort(&cluster_comparator); + + GrowableArray* order = clusters_to_funcs_order(sortedClusters); + + delete clusters; + delete funcs; + delete merged; + delete sortedClusters; + log_debug(jbolt)( "hfsort over...\n"); + + return order; +} + +JBoltFunc::JBoltFunc() : + _method(NULL), + _method_id(0), + _heat(0), + _size(0), + _cluster_id(-1), + _method_key(), + _call_indexes(create_growable_array()) {} + +JBoltFunc::JBoltFunc(const JBoltFunc& func) : + _method(func._method), + _method_id(func._method_id), + _heat(func._heat), + _size(func._size), + _cluster_id(func._cluster_id), + _method_key(func._method_key), + _call_indexes(create_growable_array(func.get_calls_count())) { + GrowableArray* array = func.call_indexes(); + _call_indexes->appendAll(array); + } + +JBoltFunc::JBoltFunc(const Method* method, traceid method_id, int size, JBoltMethodKey method_key) : + _method(method), + _method_id(method_id), + _heat(0), + _size(size), + _cluster_id(-1), + _method_key(method_key), + _call_indexes(create_growable_array()) { + // not new_symbol, need to inc reference cnt + _method_key.klass()->increment_refcount(); + _method_key.name()->increment_refcount(); + _method_key.sig()->increment_refcount(); + } + +void JBoltFunc::add_heat(int64_t heat) { + _heat += heat; + assert(_cluster_id != -1, "invariant"); + _clusters->at(_cluster_id).add_heat(heat); + _clusters->at(_cluster_id).update_density(); +} + +void JBoltFunc::set_heat(int64_t heat) { + int64_t diff = heat - _heat; + _heat = heat; + assert(_cluster_id != -1, "invariant"); + _clusters->at(_cluster_id).add_heat(diff); + _clusters->at(_cluster_id).update_density(); +} + +void JBoltFunc::set_cluster_id(int cluster_id) { _cluster_id = cluster_id; } + +void JBoltFunc::append_call_index(int index) { _call_indexes->append(index); } + +JBoltFunc* JBoltFunc::constructor(const Method* method, traceid method_id, int size, JBoltMethodKey method_key) { + JBoltFunc *ret = new JBoltFunc(method, method_id, size, method_key); + return ret; +} + +JBoltFunc* JBoltFunc::copy_constructor(const JBoltFunc* func) { + JBoltFunc *ret = new JBoltFunc(*func); + return ret; +} + +JBoltCluster::JBoltCluster() : + _id(-1), + _heats(0), + _frozen(false), + _size(0), + _density(0.0), + _func_indexes(create_growable_array()) {} + +JBoltCluster::JBoltCluster(const JBoltFunc& func) : + _id(_init_cluster_id++), + _heats(func.heat()), + _frozen(false), + _size(func.size()), + _density(0.0), + _func_indexes(create_growable_array()) { + if (_size >= PAGE_SIZE) + freeze(); + + update_density(); + + int func_idx = find_func_index(&func); + assert(func_idx != -1, "invariant"); + _func_indexes->append(func_idx); + } + +JBoltCluster::JBoltCluster(const JBoltCluster& cluster) : + _id(cluster.id()), + _heats(cluster.heats()), + _frozen(cluster.frozen()), + _size(cluster.size()), + _density(cluster.density()), + _func_indexes(create_growable_array(cluster.get_funcs_count())) { + GrowableArray* array = cluster.func_indexes(); + _func_indexes->appendAll(array); + } + +void JBoltCluster::add_heat(int64_t heat) { _heats += heat; } + +void JBoltCluster::freeze() { _frozen = true; } + +void JBoltCluster::add_size(int size) { _size += size; } + +void JBoltCluster::update_density() { _density = (double)_heats / (double)_size; } + +void JBoltCluster::append_func_index(int index) { _func_indexes->append(index); } + +void JBoltCluster::clear() { + _id = -1; + _heats = 0; + _frozen = false; + _size = 0; + _density = 0.0; + _func_indexes->clear(); +} + +// Searching for a cluster by its id +JBoltCluster* JBoltCluster::find_cluster_by_id(GrowableArray* clusters, u4 id) { + if (id >= (u4)clusters->length()) return NULL; + + return &(clusters->at(id)); +} + +JBoltCluster* JBoltCluster::constructor(const JBoltFunc* func) { + JBoltCluster *ret = new JBoltCluster(*func); + return ret; +} + +JBoltCluster* JBoltCluster::copy_constructor(const JBoltCluster* cluster) { + JBoltCluster *ret = new JBoltCluster(*cluster); + return ret; +} + +JBoltCall::JBoltCall() : + _caller_index(-1), + _callee_index(-1), + _call_count(0), + _stacktrace_id(0) {} + +JBoltCall::JBoltCall(const JBoltCall& call) : + _caller_index(call._caller_index), + _callee_index(call._callee_index), + _call_count(call._call_count), + _stacktrace_id(call._stacktrace_id) {} + +JBoltCall::JBoltCall(const JBoltFunc& caller_func, const JBoltFunc& callee_func, u4 call_count, traceid stacktrace_id) : + _call_count(call_count), + _stacktrace_id(stacktrace_id) { + _caller_index = find_func_index(&caller_func); + _callee_index = find_func_index(&callee_func); + assert(_caller_index != -1, "invariant"); + assert(_callee_index != -1, "invariant"); + } + +JBoltFunc& JBoltCall::caller() const { return _funcs->at(_caller_index); } + +JBoltFunc& JBoltCall::callee() const { return _funcs->at(_callee_index); } + +void JBoltCall::set_caller_index(int index) { _caller_index = index; } + +void JBoltCall::set_callee_index(int index) { _callee_index = index; } + +void JBoltCall::set_call_count(u4 call_count) { _call_count = call_count; } + +JBoltCall* JBoltCall::constructor(const JBoltFunc* caller_func, const JBoltFunc* callee_func, u4 call_count, traceid stacktrace_id) { + JBoltCall *ret = new JBoltCall(*caller_func, *callee_func, call_count, stacktrace_id); + return ret; +} + +JBoltCall* JBoltCall::copy_constructor(const JBoltCall* call) { + JBoltCall *ret = new JBoltCall(*call); + return ret; +} \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltCallGraph.hpp b/src/hotspot/share/jbolt/jBoltCallGraph.hpp new file mode 100644 index 000000000..1bfbcabcc --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltCallGraph.hpp @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTCALLGRAPH_HPP +#define SHARE_JBOLT_JBOLTCALLGRAPH_HPP + +#include "jbolt/jbolt_globals.hpp" +#include "jbolt/jBoltManager.hpp" +#include "jfr/utilities/jfrTypes.hpp" +#include "utilities/growableArray.hpp" + +class JBoltFunc; +class JBoltCall; +class JBoltCluster; + +template +static GrowableArray* create_growable_array(int size = 1) { + GrowableArray* array = new (ResourceObj::C_HEAP, mtTracing) GrowableArray(size, mtTracing); + assert(array != NULL, "invariant"); + return array; +} + +// initial cluster id +static u4 _init_cluster_id = 0; + +class JBoltCallGraph : public CHeapObj { + private: + GrowableArray* _clusters = NULL; + GrowableArray* _calls = NULL; + GrowableArray* _funcs = NULL; + + JBoltCallGraph() { + _clusters = create_growable_array(); + _calls = create_growable_array(); + _funcs = create_growable_array(); + } + + // for constructing CG + void add_func(JBoltFunc* func); // Node + void add_call(JBoltCall* call); // Edge + + public: + static JBoltCallGraph& callgraph_instance(); + // these two funcs initialize and deinitialize homonymous static array pointers in global + static void initialize(); + static void deinitialize(); + + GrowableArray* callgraph_clusters() { return _clusters; } + GrowableArray* callgraph_calls() { return _calls; } + GrowableArray* callgraph_funcs() { return _funcs; } + + static void static_add_func(JBoltFunc* func) { callgraph_instance().add_func(func); } + static void static_add_call(JBoltCall* call) { callgraph_instance().add_call(call); } + + // for dealing with CG + GrowableArray* hfsort(); + + int clear_instance(); + + virtual ~JBoltCallGraph() { + delete _clusters; + delete _calls; + delete _funcs; + + _clusters = NULL; + _calls = NULL; + _funcs = NULL; + } +}; + +class JBoltFunc : public CHeapObj { + private: + const Method* _method; + traceid _method_id; + int64_t _heat; + int _size; + int _cluster_id; + JBoltMethodKey _method_key; + GrowableArray* _call_indexes; + + public: + JBoltFunc(); + JBoltFunc(const JBoltFunc& func); + JBoltFunc(const Method* method, traceid method_id, int size, JBoltMethodKey method_key); + + virtual ~JBoltFunc() { + delete _call_indexes; + } + + bool operator==(const JBoltFunc& func) const { return (_method == func._method && _method_id == func._method_id) || (_method_key.equals(func._method_key)); } + bool operator!=(const JBoltFunc& func) const { return (_method != func._method || _method_id != func._method_id) && !(_method_key.equals(func._method_key)); } + + JBoltFunc& operator=(const JBoltFunc& func) { + _method = func._method; + _method_id = func._method_id; + _heat = func._heat; + _size = func._size; + _cluster_id = func._cluster_id; + _method_key = func._method_key; + if (_call_indexes != NULL) { + delete _call_indexes; + } + _call_indexes = create_growable_array(func.get_calls_count()); + _call_indexes->appendAll(func.call_indexes()); + + return *this; + } + + const Method* method() const { return _method; } + const traceid method_id() const { return _method_id; } + const int64_t heat() const { return _heat; } + const int size() const { return _size; } + const int cluster_id() const { return _cluster_id; } + JBoltMethodKey method_key() const { return _method_key; } + GrowableArray* call_indexes() const { return _call_indexes; } + int get_calls_count() const { return _call_indexes->length(); } + + void add_heat(int64_t heat); + void set_heat(int64_t heat); + void set_cluster_id(int cluster_id); + void append_call_index(int index); + + static ByteSize method_offset() { return byte_offset_of(JBoltFunc, _method); } + static ByteSize method_id_offset() { return byte_offset_of(JBoltFunc, _method_id); } + static ByteSize heat_offset() { return byte_offset_of(JBoltFunc, _heat); } + static ByteSize size_offset() { return byte_offset_of(JBoltFunc, _size); } + static ByteSize cluster_id_offset() { return byte_offset_of(JBoltFunc, _cluster_id); } + static ByteSize call_indexes_offset() { return byte_offset_of(JBoltFunc, _call_indexes); } + + static JBoltFunc* constructor(const Method* method, traceid method_id, int size, JBoltMethodKey method_key); + static JBoltFunc* copy_constructor(const JBoltFunc* func); +}; + +class JBoltCluster : public CHeapObj { + private: + int _id; + int64_t _heats; + bool _frozen; + int _size; + double _density; + GrowableArray* _func_indexes; + + public: + JBoltCluster(); + JBoltCluster(const JBoltFunc& func); + JBoltCluster(const JBoltCluster& cluster); + + bool operator==(const JBoltCluster& cluster) const { + if (_id != cluster.id()) return false; + + int count = get_funcs_count(); + if (count != cluster.get_funcs_count()) + return false; + + for (int i = 0; i < count; ++i) { + if (_func_indexes->at(i) != cluster._func_indexes->at(i)) { + return false; + } + } + + return true; + } + + JBoltCluster& operator=(const JBoltCluster& cluster) { + _id = cluster.id(); + _heats = cluster.heats(); + _frozen = cluster.frozen(); + _size = cluster.size(); + _density = cluster.density(); + if (_func_indexes != NULL) { + delete _func_indexes; + } + _func_indexes = create_growable_array(cluster.get_funcs_count()); + _func_indexes->appendAll(cluster.func_indexes()); + return *this; + } + + virtual ~JBoltCluster() { delete _func_indexes; } + + int id() const { return _id; } + int64_t heats() const { return _heats; } + bool frozen() const { return _frozen; } + int size() const { return _size; } + double density() const { return _density; } + GrowableArray* func_indexes() const { return _func_indexes; } + int get_funcs_count() const { return _func_indexes->length(); } + + void add_heat(int64_t heat); + void freeze(); + void add_size(int size); + void update_density(); + void append_func_index(int index); + void clear(); + + static JBoltCluster* find_cluster_by_id(GrowableArray* clusters, u4 id); + + static ByteSize id_offset() { return byte_offset_of(JBoltCluster, _id); } + static ByteSize heats_offset() { return byte_offset_of(JBoltCluster, _heats); } + static ByteSize frozen_offset() { return byte_offset_of(JBoltCluster, _frozen); } + static ByteSize size_offset() { return byte_offset_of(JBoltCluster, _size); } + static ByteSize density_offset() { return byte_offset_of(JBoltCluster, _density); } + static ByteSize func_indexes_offset() { return byte_offset_of(JBoltCluster, _func_indexes); } + + static JBoltCluster* constructor(const JBoltFunc* func); + static JBoltCluster* copy_constructor(const JBoltCluster* cluster); +}; + +class JBoltCall : public CHeapObj { + private: + int _caller_index; + int _callee_index; + u4 _call_count; + traceid _stacktrace_id; + + public: + JBoltCall(); + JBoltCall(const JBoltCall& call); + JBoltCall(const JBoltFunc& caller_func, const JBoltFunc& callee_func, u4 call_count, traceid stacktrace_id); + + bool operator==(const JBoltCall& call) const { + return _caller_index == call._caller_index && _callee_index == call._callee_index; + } + + JBoltCall& operator=(const JBoltCall& call) { + _caller_index = call._caller_index; + _callee_index = call._callee_index; + _call_count = call._call_count; + _stacktrace_id = call._stacktrace_id; + return *this; + } + + virtual ~JBoltCall() {} + + int caller_index() const { return _caller_index; } + int callee_index() const { return _callee_index; } + u4 call_count() const { return _call_count; } + traceid stacktrace_id() const { return _stacktrace_id; } + + JBoltFunc& caller() const; + JBoltFunc& callee() const; + void set_caller_index(int index); + void set_callee_index(int index); + void set_call_count(u4 count); + + static ByteSize caller_offset() { return byte_offset_of(JBoltCall, _caller_index); } + static ByteSize callee_offset() { return byte_offset_of(JBoltCall, _caller_index); } + static ByteSize call_count_offset() { return byte_offset_of(JBoltCall, _call_count); } + static ByteSize stacktrace_id_offset() { return byte_offset_of(JBoltCall, _stacktrace_id); } + + static JBoltCall* constructor(const JBoltFunc* caller_func, const JBoltFunc* callee_func, u4 call_count, traceid stacktrace_id); + static JBoltCall* copy_constructor(const JBoltCall* call); +}; + +#endif // SHARE_JBOLT_JBOLTCALLGRAPH_HPP diff --git a/src/hotspot/share/jbolt/jBoltControlThread.cpp b/src/hotspot/share/jbolt/jBoltControlThread.cpp new file mode 100644 index 000000000..d813a94f9 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltControlThread.cpp @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +#include + +#include "classfile/javaClasses.inline.hpp" +#include "classfile/vmSymbols.hpp" +#include "jbolt/jBoltControlThread.hpp" +#include "jbolt/jBoltManager.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" +#include "runtime/atomic.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/jniHandles.inline.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/sweeper.hpp" + +JavaThread* volatile JBoltControlThread::_the_java_thread = NULL; +Monitor* JBoltControlThread::_control_wait_monitor = NULL; +Monitor* JBoltControlThread::_sample_wait_monitor = NULL; +jobject JBoltControlThread::_thread_obj = NULL; +int volatile JBoltControlThread::_signal = JBoltControlThread::SIG_NULL; +bool volatile JBoltControlThread::_abort = false; +intx volatile JBoltControlThread::_interval = 0; + +static bool not_first = false; + +void JBoltControlThread::init(TRAPS) { + Handle string = java_lang_String::create_from_str("JBolt Control", CATCH); + Handle thread_group(THREAD, Universe::system_thread_group()); + Handle thread_oop = JavaCalls::construct_new_instance( + SystemDictionary::Thread_klass(), + vmSymbols::threadgroup_string_void_signature(), + thread_group, + string, + CATCH); + _thread_obj = JNIHandles::make_global(thread_oop); + _control_wait_monitor = new Monitor(Mutex::nonleaf, "JBoltControlMonitor"); + _sample_wait_monitor = new Monitor(Mutex::nonleaf, "JBoltSampleMonitor"); + OrderAccess::release_store(&_interval, JBoltSampleInterval); +} + +void JBoltControlThread::start_thread(TRAPS) { + guarantee(OrderAccess::load_acquire(&_the_java_thread) == NULL, "sanity"); + JavaThread* new_thread = new JavaThread(&thread_entry); + if (new_thread->osthread() == NULL) { + fatal("Failed to create JBoltControlThread as no os thread!"); + return; + } + + Handle thread_oop(THREAD, JNIHandles::resolve_non_null(_thread_obj)); + { + MutexLocker mu(Threads_lock, THREAD); + java_lang_Thread::set_thread(thread_oop(), new_thread); + java_lang_Thread::set_priority(thread_oop(), MinPriority); + java_lang_Thread::set_daemon(thread_oop()); + new_thread->set_threadObj(thread_oop()); + Threads::add(new_thread); + Thread::start(new_thread); + } + guarantee(Atomic::cmpxchg(new_thread, &_the_java_thread, (JavaThread*) NULL) == NULL, "sanity"); +} + +intx JBoltControlThread::sample_interval() { + return OrderAccess::load_acquire(&_interval); +} + +// Work to do before restarting a control schedule, twice and after only +bool JBoltControlThread::prev_control_schdule(TRAPS) { + guarantee(JBoltManager::auto_mode(), "sanity"); + // Clear obsolete data structures + if (JBoltManager::clear_last_sample_datas() != 0) { + log_error(jbolt)("Something wrong happened in data clean, not going on..."); + return false; + } + + // Restart JFR + bufferedStream output; + DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.start name=jbolt-jfr", ' ', THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + log_warning(jbolt)("unable to start jfr jbolt-jfr"); + log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name()); + // don't unwind this exception + CLEAR_PENDING_EXCEPTION; + } + + return true; +} + +void JBoltControlThread::control_schdule(TRAPS) { + guarantee(JBoltManager::auto_mode(), "sanity"); + + { MonitorLocker locker(_sample_wait_monitor); + // Perform time wait + log_info(jbolt)("JBolt Starting Sample for %lds!!!", sample_interval()); + const jlong interval = (jlong) sample_interval(); + jlong cur_time = os::javaTimeMillis(); + const jlong end_time = cur_time + (interval * 1000); + while ((end_time > cur_time) && OrderAccess::load_acquire(&_signal) != SIG_STOP_PROFILING) { + int64_t timeout = (int64_t) (end_time - cur_time); + locker.wait(timeout); + cur_time = os::javaTimeMillis(); + } + } + // Close JFR + guarantee(JBoltManager::reorder_phase_profiling_to_waiting(), "sanity"); + bufferedStream output; + DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.stop name=jbolt-jfr", ' ', THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + // JFR.stop maybe failed if a jfr recording is already stopped + // but it's nothing worry, jbolt should continue to work normally + log_warning(jbolt)("unable to stop jfr jbolt-jfr"); + log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name()); + // don't unwind this exception + CLEAR_PENDING_EXCEPTION; + } + if (Atomic::cmpxchg(false, &_abort, true) == /* should abort */ true) { + return; + } + + size_t total_nmethod_size = 0; + // Init structures for load phase + JBoltManager::init_auto_transition(&total_nmethod_size, CATCH); + + if (total_nmethod_size > JBoltCodeHeapSize) { + log_warning(jbolt)("JBolt reordering not complete because JBolt CodeHeap is too small to place all ordered methods. Please use -XX:JBoltCodeHeapSize to enlarge"); + log_warning(jbolt)("JBoltCodeHeapSize=" UINTX_FORMAT " B ( need " UINTX_FORMAT " B).", JBoltCodeHeapSize, total_nmethod_size); + } + + if (not_first) { + // Exchange Hot Segment primary and secondary relationships + JBoltManager::swap_semi_jbolt_segs(); + } + + if (!not_first && EnableDumpGraph) { + // When EnableDumpGraph, dump initial code heaps for compared + JBoltManager::dump_code_heaps_with_count(); + } + + guarantee(JBoltManager::reorder_phase_waiting_to_reordering(), "sanity"); + OrderAccess::release_store(&_signal, SIG_NULL); + + // Start reorder + JBoltManager::reorder_all_methods(CATCH); +} + +// Work to do after reordering, twice and after only +void JBoltControlThread::post_control_schdule(TRAPS) { + JBoltManager::clear_secondary_hot_seg(THREAD); +} + +struct tm JBoltControlThread::next_trigger_time(struct tm* localtime) { + struct tm target_tm = *localtime; + GrowableArray* rescheduling_time = JBoltManager::rescheduling_time(); + for (int i = 0; i < rescheduling_time->length(); ++i) { + char* target_time = rescheduling_time->at(i); + int target_hour = (target_time[0] - '0') * 10 + (target_time[1] - '0'); + int target_minute = (target_time[3] - '0') * 10 + (target_time[4] - '0'); + if (target_hour > localtime->tm_hour || (target_hour == localtime->tm_hour && target_minute > localtime->tm_min)) { + target_tm.tm_hour = target_hour; + target_tm.tm_min = target_minute; + target_tm.tm_sec = 0; + break; + } + if (i == rescheduling_time->length() - 1) { + target_time = rescheduling_time->at(0); + target_hour = (target_time[0] - '0') * 10 + (target_time[1] - '0'); + target_minute = (target_time[3] - '0') * 10 + (target_time[4] - '0'); + target_tm.tm_mday += 1; + target_tm.tm_hour = target_hour; + target_tm.tm_min = target_minute; + target_tm.tm_sec = 0; + mktime(&target_tm); + } + } + + return target_tm; +} + +void JBoltControlThread::wait_for_next_trigger(TRAPS) { + MonitorLocker locker(_control_wait_monitor); + time_t current_time; + struct tm p; + time(¤t_time); + localtime_r(¤t_time, &p); + if (JBoltManager::rescheduling_time() != NULL && JBoltManager::rescheduling_time()->length() > 0) { + struct tm target_tm = next_trigger_time(&p); + log_info(jbolt)("next trigger is at %d.%d.%d.%02d:%02d:%02d",1900+target_tm.tm_year,1+target_tm.tm_mon,target_tm.tm_mday,target_tm.tm_hour,target_tm.tm_min,target_tm.tm_sec); + while (OrderAccess::load_acquire(&_signal) != SIG_START_PROFILING) { + long time_wait = mktime(&target_tm) - current_time; + if (time_wait <= 0) { + log_info(jbolt)("successfully trigger at %02d:%02d",target_tm.tm_hour,target_tm.tm_min); + break; + } + locker.wait(time_wait * 1000); + time(¤t_time); + } + } + else { + while (OrderAccess::load_acquire(&_signal) != SIG_START_PROFILING) { + locker.wait(60 * 1000); + } + } +} + +void JBoltControlThread::thread_run_auto_loop(TRAPS) { + do { + OrderAccess::release_store(&_signal, SIG_NULL); + if (not_first && !prev_control_schdule(THREAD)) continue; + guarantee(JBoltManager::reorder_phase_available_to_profiling(), "sanity"); + control_schdule(THREAD); + if (!JBoltManager::reorder_phase_reordering_to_available()) { + // abort logic + guarantee(JBoltManager::reorder_phase_waiting_to_available(), "sanity"); + guarantee(Atomic::cmpxchg(SIG_NULL, &_signal, SIG_STOP_PROFILING) == SIG_STOP_PROFILING, "sanity"); + } + else if (not_first) { + post_control_schdule(THREAD); + } + not_first = true; + wait_for_next_trigger(THREAD); + JBoltManager::clear_structures(); + } while(true); +} + +void JBoltControlThread::thread_run(TRAPS) { + if (JBoltManager::auto_mode()) { + thread_run_auto_loop(THREAD); + } else { + guarantee(JBoltManager::can_reorder_now(), "sanity"); + guarantee(JBoltManager::reorder_phase_collecting_to_reordering(), "sanity"); + JBoltManager::reorder_all_methods(CATCH); + JBoltManager::clear_structures(); + guarantee(JBoltManager::reorder_phase_reordering_to_end(), "sanity"); + assert(JBoltLoadMode, "Only manual JBoltLoadMode can reach here"); + } +} + +bool JBoltControlThread::notify_sample_wait(bool abort) { + int old_sig = Atomic::cmpxchg(SIG_STOP_PROFILING, &_signal, SIG_NULL); + if (old_sig == SIG_NULL) { + MonitorLocker locker(_sample_wait_monitor); + // abort implementation maybe not in order in extreme cases + // add fence? or delete abort() if not so useful. + OrderAccess::release_store(&_abort, abort); + locker.notify(); + return true; + } + return false; +} + +bool JBoltControlThread::notify_control_wait(intx interval) { + int old_sig = Atomic::cmpxchg(SIG_START_PROFILING, &_signal, SIG_NULL); + if (old_sig == SIG_NULL) { + // this lock will be grabbed by ControlThread until it's waiting + MonitorLocker locker(_control_wait_monitor); + OrderAccess::release_store(&_interval, interval); + locker.notify(); + return true; + } + return false; +} + +JavaThread* JBoltControlThread::get_thread() { + return OrderAccess::load_acquire(&_the_java_thread); +} \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltControlThread.hpp b/src/hotspot/share/jbolt/jBoltControlThread.hpp new file mode 100644 index 000000000..946a61960 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltControlThread.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP +#define SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP + +#include "runtime/thread.hpp" + +/** + * Control JBolt how to run in this thread. + */ +class JBoltControlThread: public AllStatic { +public: + static const int SIG_NULL = 0; + static const int SIG_START_PROFILING = 1; + static const int SIG_STOP_PROFILING = 2; + +private: + static JavaThread* volatile _the_java_thread; + // Can be notified by jcmd JBolt.start, restart a control schedule + static Monitor* _control_wait_monitor; + // Can be notified by jcmd JBolt.stop/abort, stop a running JFR + static Monitor* _sample_wait_monitor; + static jobject _thread_obj; + static int volatile _signal; + static bool volatile _abort; + static intx volatile _interval; + + static void thread_entry(JavaThread* thread, TRAPS) { thread_run(thread); } + static void thread_run(TRAPS); + static void thread_run_auto_loop(TRAPS); + + static intx sample_interval(); + static bool prev_control_schdule(TRAPS); + static void control_schdule(TRAPS); + static void post_control_schdule(TRAPS); + static void wait_for_next_trigger(TRAPS); + + static struct tm next_trigger_time(struct tm* localtime); + +public: + static void init(TRAPS); + + static void start_thread(TRAPS); + + static bool notify_sample_wait(bool abort = false); + + static bool notify_control_wait(intx interval); + + static JavaThread* get_thread(); +}; + +#endif // SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltDcmds.cpp b/src/hotspot/share/jbolt/jBoltDcmds.cpp new file mode 100644 index 000000000..249a98001 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltDcmds.cpp @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "jbolt/jBoltDcmds.hpp" +#include "jbolt/jBoltControlThread.hpp" +#include "jbolt/jBoltManager.hpp" + +bool register_jbolt_dcmds() { + uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI | DCmd_Source_MBean; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + if (EnableDumpGraph) DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + return true; +} + +JBoltStartDCmd::JBoltStartDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), + _duration("duration", "Duration of time(second) in this sample.", "INT", false, "600") { + _dcmdparser.add_dcmd_option(&_duration); +} + +int JBoltStartDCmd::num_arguments() { + ResourceMark rm; + JBoltStartDCmd* dcmd = new JBoltStartDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } else { + return 0; + } +} + +void JBoltStartDCmd::execute(DCmdSource source, TRAPS) { + if (!UseJBolt) { + output()->print_cr("Unable to execute because \"UseJBolt\" is disabled."); + return; + } + + if (!JBoltManager::auto_mode()) { + output()->print_cr("JBolt JCMD can only be used in auto mode."); + return; + } + + if (!JBoltManager::reorder_phase_is_available()) { + output()->print_cr("Unable to start because it's working now. Stop it first."); + return; + } + + intx interval = _duration.is_set() ? _duration.value() : JBoltSampleInterval; + + if (interval < 0) { + output()->print_cr("duration is set to %ld which is above range, should be in [0, %d]", interval, max_jint); + return; + } + + if (JBoltControlThread::notify_control_wait(interval)) { + output()->print_cr("OK. Start a new JBolt schedule, duration=%lds.", interval); + } + else { + output()->print_cr("It's busy now. Please try again later..."); + } +} + +void JBoltStartDCmd::print_help(const char* name) const { + output()->print_cr( + "Syntax : %s [options]\n" + "\n" + "Options:\n" + "\n" + " duration (Optional) Duration of time(second) in this sample. (INT, default value=600)\n" + "\n" + "Options must be specified using the or = syntax.\n" + "\n" + "Example usage:\n" + " $ jcmd JBolt.start\n" + " $ jcmd JBolt.start duration=900", name); +} + +void JBoltStopDCmd::execute(DCmdSource source, TRAPS) { + if (!UseJBolt) { + output()->print_cr("Unable to execute because \"UseJBolt\" is disabled."); + return; + } + + if (!JBoltManager::auto_mode()) { + output()->print_cr("JBolt JCMD can only be used in auto mode."); + return; + } + + if (!JBoltManager::reorder_phase_is_profiling()) { + output()->print_cr("Unable to stop because it's not sampling now."); + return; + } + + if (JBoltControlThread::notify_sample_wait()) { + output()->print_cr("OK.\"jbolt-jfr\" would be stopped and turn to reorder."); + } else { + output()->print_cr("It's busy now. Please try again later..."); + } +} + +void JBoltStopDCmd::print_help(const char* name) const { + output()->print_cr( + "Syntax : %s\n" + "\n" + "Example usage:\n" + " $ jcmd JBolt.stop", name); +} + +void JBoltAbortDCmd::execute(DCmdSource source, TRAPS) { + if (!UseJBolt) { + output()->print_cr("Unable to execute because \"UseJBolt\" is disabled."); + return; + } + + if (!JBoltManager::auto_mode()) { + output()->print_cr("JBolt JCMD can only be used in auto mode."); + return; + } + + if (!JBoltManager::reorder_phase_is_profiling()) { + output()->print_cr("Unable to abort because it's not sampling now."); + return; + } + + if (JBoltControlThread::notify_sample_wait(true)) { + output()->print_cr("OK.\"jbolt-jfr\" would be aborted."); + } else { + output()->print_cr("It's busy now. Please try again later..."); + } +} + +void JBoltAbortDCmd::print_help(const char* name) const { + output()->print_cr( + "Syntax : %s\n" + "\n" + "Example usage:\n" + " $ jcmd JBolt.abort", name); +} + +JBoltDumpDCmd::JBoltDumpDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), + _filename("filename", "Name of the file to which the flight recording data is dumped", "STRING", true, NULL) { + _dcmdparser.add_dcmd_option(&_filename); +} + +int JBoltDumpDCmd::num_arguments() { + ResourceMark rm; + JBoltDumpDCmd* dcmd = new JBoltDumpDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } else { + return 0; + } +} + +void JBoltDumpDCmd::execute(DCmdSource source, TRAPS) { + if (!UseJBolt) { + output()->print_cr("Unable to execute because \"UseJBolt\" is disabled."); + return; + } + + if (!JBoltManager::auto_mode()) { + output()->print_cr("JBolt JCMD can only be used in auto mode."); + return; + } + + const char* path = _filename.value(); + char buffer[PATH_MAX]; + char* rp = NULL; + + JBoltErrorCode ec = JBoltManager::dump_order_in_jcmd(path); + switch (ec) { + case JBoltOrderNULL: + output()->print_cr("Failed: No order applied by JBolt now."); + break; + case JBoltOpenFileError: + output()->print_cr("Failed: File open error or NULL: %s", path); + break; + case JBoltOK: + rp = realpath(path, buffer); + output()->print_cr("Successful: Dump to %s", buffer); + break; + default: + ShouldNotReachHere(); + } +} + +void JBoltDumpDCmd::print_help(const char* name) const { + output()->print_cr( + "Syntax : %s [options]\n" + "\n" + "Options:\n" + "\n" + " filename Name of the file to which the flight recording data is dumped. (STRING, no default value)\n" + "\n" + "Options must be specified using the or = syntax.\n" + "\n" + "Example usage:\n" + " $ jcmd JBolt.dump filename=order.log", name); +} + +void JBoltDumpGraphDCmd::execute(DCmdSource source, TRAPS) { + if (!UseJBolt) { + output()->print_cr("Unable to execute because \"UseJBolt\" is disabled."); + return; + } + + if (!JBoltManager::auto_mode()) { + output()->print_cr("JBolt JCMD can only be used in auto mode."); + return; + } + + if (JBoltManager::reorder_phase_is_profiling()) { + output()->print_cr("Unable to dump because it's sampling now. Stop it first"); + return; + } + + JBoltManager::dump_code_heaps_with_count(); +} + +void JBoltDumpGraphDCmd::print_help(const char* name) const { + output()->print_cr( + "Syntax : %s\n" + "\n" + "Example usage:\n" + " $ jcmd JBolt.dumpgraph", name); +} \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltDcmds.hpp b/src/hotspot/share/jbolt/jBoltDcmds.hpp new file mode 100644 index 000000000..478a2043a --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltDcmds.hpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTDCMDS_HPP +#define SHARE_JBOLT_JBOLTDCMDS_HPP + +#include "services/diagnosticCommand.hpp" + +class JBoltStartDCmd : public DCmdWithParser { + protected: + DCmdArgument _duration; + public: + JBoltStartDCmd(outputStream* output, bool heap); + + static const char* name() { + return "JBolt.start"; + } + static const char* description() { + return "Starts a new JBolt sample schedule(fail if sampling)"; + } + static const char* impact() { + return "Medium: Depending on JFR that JBolt rely on, the impact can range from low to high."; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "control", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); + virtual void print_help(const char* name) const; +}; + +class JBoltStopDCmd : public DCmd { + public: + JBoltStopDCmd(outputStream* output, bool heap) : DCmd(output, heap) {} + + static const char* name() { + return "JBolt.stop"; + } + static const char* description() { + return "Stop a running JBolt sample schedule and reorder immediately(fail if not sampling)"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "control", NULL}; + return p; + } + static int num_arguments() { + return 0; + } + + virtual void execute(DCmdSource source, TRAPS); + virtual void print_help(const char* name) const; +}; + +class JBoltAbortDCmd : public DCmd { + public: + JBoltAbortDCmd(outputStream* output, bool heap) : DCmd(output, heap) {} + + static const char* name() { + return "JBolt.abort"; + } + static const char* description() { + return "Stop a running JBolt sample schedule but don't reorder(fail if not sampling)"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments() { + return 0; + } + + virtual void execute(DCmdSource source, TRAPS); + virtual void print_help(const char* name) const; +}; + +class JBoltDumpDCmd : public DCmdWithParser { + protected: + DCmdArgument _filename; + public: + JBoltDumpDCmd(outputStream* output, bool heap); + + static const char* name() { + return "JBolt.dump"; + } + static const char* description() { + return "dump an effective order to file(fail if no order)"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); + virtual void print_help(const char* name) const; +}; + +class JBoltDumpGraphDCmd : public DCmd { + public: + JBoltDumpGraphDCmd(outputStream* output, bool heap) : DCmd(output, heap) {} + + static const char* name() { + return "JBolt.dumpgraph"; + } + static const char* description() { + return "Dump count files to provide data for drawing a heap heat graph"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments() { + return 0; + } + + virtual void execute(DCmdSource source, TRAPS); + virtual void print_help(const char* name) const; +}; + +bool register_jbolt_dcmds(); + +#endif // SHARE_JBOLT_JBOLTDCMDS_HPP \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltManager.cpp b/src/hotspot/share/jbolt/jBoltManager.cpp new file mode 100644 index 000000000..13d8dec55 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltManager.cpp @@ -0,0 +1,1682 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include +#include + +#include "classfile/javaClasses.inline.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/vmSymbols.hpp" +#include "code/codeBlob.hpp" +#include "code/codeCache.hpp" +#include "compiler/compileBroker.hpp" +#include "jbolt/jBoltCallGraph.hpp" +#include "jbolt/jBoltControlThread.hpp" +#include "jbolt/jBoltManager.hpp" +#include "jbolt/jBoltUtils.inline.hpp" +#include "jfr/jfr.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" +#include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" +#include "oops/method.inline.hpp" +#include "runtime/arguments.hpp" +#include "runtime/atomic.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/jniHandles.hpp" +#include "runtime/os.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "runtime/sweeper.hpp" +#include "utilities/formatBuffer.hpp" + +#define LINE_BUF_SIZE 8192 // used to parse JBolt order file +#define MIN_FRAMESCOUNT 2 // used as default stacktrace depth +#define ILL_NM_STATE -2 // used to present nmethod illegal state +#define PATH_LENGTH 256 // used to store path + +#define B_TF(b) (b ? "V" : "X") + +GrowableArray* JBoltManager::_hot_methods_sorted = NULL; +JBoltManager::MethodKeyMap* JBoltManager::_hot_methods_vis = NULL; +int JBoltManager::_reorder_method_threshold_cnt = 0; + +volatile int JBoltManager::_reorder_phase = JBoltReorderPhase::Available; +volatile int JBoltManager::_reorderable_method_cnt = 0; +Method* volatile JBoltManager::_cur_reordering_method = NULL; + +Thread* JBoltManager::_start_reordering_thread = NULL; + +JBoltManager::StackFrameKeyMap* JBoltManager::_sampled_methods_refs = NULL; +JBoltManager::MethodHotCountMap* JBoltManager::_sampled_methods_hotcount_stored = NULL; + +bool JBoltManager::_auto_mode = false; + +// swap between MethodJBoltHot and MethodJBoltTmp +volatile int JBoltManager::_primary_hot_seg = CodeBlobType::MethodJBoltHot; +volatile int JBoltManager::_secondary_hot_seg = CodeBlobType::MethodJBoltTmp; + +// used in Reordering phase, reset to ##false after swapping the hot codecache +volatile bool JBoltManager::_hot_codecache_full = false; +volatile bool JBoltManager::_force_sweep = false; + +GrowableArray* JBoltManager::_rescheduling_time = NULL; +GrowableArray* _order_stored = NULL; + +// This is a tmp obj used only in initialization phases. +// We cannot alloc Symbol in phase 1 so we have to parses the order file again +// in phase 2. +// This obj will be freed after initialization. +static FILE* _order_fp = NULL; + +static bool read_line(FILE* fp, char* buf, int buf_len, int* res_len) { + if (fgets(buf, buf_len, fp) == NULL) { + return false; + } + int len = (int) strcspn(buf, "\r\n"); + buf[len] = '\0'; + *res_len = len; + return true; +} + +static bool read_a_size(char* buf, size_t* res) { + char* t = strchr(buf, ' '); + if (t == NULL) return false; + *t = '\0'; + julong v; + if (!Arguments::atojulong(buf, &v)) { + *t = ' '; + return false; + } + *t = ' '; + *res = (size_t) v; + return true; +} + +static void replace_all(char* s, char from, char to) { + char* begin = s; + while (true) { + char* t = strchr(begin, from); + if (t == NULL) { + break; + } + *t = to; + begin = t + 1; + } +} + +JBoltMethodValue::~JBoltMethodValue() { + if (_comp_info != NULL) delete get_comp_info(); +} + +CompileTaskInfo* JBoltMethodValue::get_comp_info() { + return OrderAccess::load_acquire(&_comp_info); +} + +bool JBoltMethodValue::set_comp_info(CompileTaskInfo* info) { + return Atomic::cmpxchg(info, &_comp_info, (CompileTaskInfo*) NULL) == NULL; +} + +void JBoltMethodValue::clear_comp_info_but_not_release() { + OrderAccess::release_store(&_comp_info, (CompileTaskInfo*) NULL); +} + +JBoltStackFrameValue::~JBoltStackFrameValue() { + if (_method_holder != NULL) { + if (JNIHandles::is_weak_global_handle(_method_holder)) { + JNIHandles::destroy_weak_global(_method_holder); + } else { + JNIHandles::destroy_global(_method_holder); + } + } +} + +jobject JBoltStackFrameValue::get_method_holder() { return _method_holder; } + +void JBoltStackFrameValue::clear_method_holder_but_not_release() { _method_holder = NULL; } + +CompileTaskInfo::CompileTaskInfo(Method* method, int osr_bci, int comp_level, int comp_reason, Method* hot_method, int hot_cnt): + _method(method), _osr_bci(osr_bci), _comp_level(comp_level), _comp_reason(comp_reason), _hot_method(hot_method), _hot_count(hot_cnt) { + Thread* thread = Thread::current(); + + assert(_method != NULL, "sanity"); + // _method_holder can be null for boot loader (the null loader) + _method_holder = JNIHandles::make_weak_global(Handle(thread, _method->method_holder()->klass_holder())); + + if (_hot_method != NULL && _hot_method != _method) { + _hot_method_holder = JNIHandles::make_weak_global(Handle(thread, _hot_method->method_holder()->klass_holder())); + } else { + _hot_method_holder = NULL; + } +} + +CompileTaskInfo::~CompileTaskInfo() { + if (_method_holder != NULL) { + if (JNIHandles::is_weak_global_handle(_method_holder)) { + JNIHandles::destroy_weak_global(_method_holder); + } else { + JNIHandles::destroy_global(_method_holder); + } + } + if (_hot_method_holder != NULL) { + if (JNIHandles::is_weak_global_handle(_hot_method_holder)) { + JNIHandles::destroy_weak_global(_hot_method_holder); + } else { + JNIHandles::destroy_global(_hot_method_holder); + } + } +} + +/** + * Set the weak reference to strong reference if the method is not unloaded. + * It seems that the life cycle of Method is consistent with that of the Klass and CLD. + * @see CompileTask::select_for_compilation() + */ +bool CompileTaskInfo::try_select() { + NoSafepointVerifier nsv; + Thread* thread = Thread::current(); + // is unloaded + if (_method_holder != NULL && JNIHandles::is_weak_global_handle(_method_holder) && JNIHandles::is_global_weak_cleared(_method_holder)) { + if (log_is_enabled(Debug, jbolt)) { + log_debug(jbolt)("Some method has been unloaded so skip reordering for it: p=%p.", _method); + } + return false; + } + + assert(_method->method_holder()->is_loader_alive(), "should be alive"); + Handle method_holder(thread, _method->method_holder()->klass_holder()); + JNIHandles::destroy_weak_global(_method_holder); + _method_holder = JNIHandles::make_global(method_holder); + + if (_hot_method_holder != NULL) { + Handle hot_method_holder(thread, _hot_method->method_holder()->klass_holder()); + JNIHandles::destroy_weak_global(_hot_method_holder); + _hot_method_holder = JNIHandles::make_global(Handle(thread, _hot_method->method_holder()->klass_holder())); + } + return true; +} + +static const char *method_type_to_string(u1 type) { + switch (type) { + case JfrStackFrame::FRAME_INTERPRETER: + return "Interpreted"; + case JfrStackFrame::FRAME_JIT: + return "JIT compiled"; + case JfrStackFrame::FRAME_INLINE: + return "Inlined"; + case JfrStackFrame::FRAME_NATIVE: + return "Native"; + default: + ShouldNotReachHere(); + return "Unknown"; + } +} + +uintptr_t related_data_jbolt_log_do[] = { + (uintptr_t)in_bytes(JfrStackTrace::hash_offset()), + (uintptr_t)in_bytes(JfrStackTrace::id_offset()), + (uintptr_t)in_bytes(JfrStackTrace::hotcount_offset()), + (uintptr_t)in_bytes(JfrStackTrace::frames_offset()), + (uintptr_t)in_bytes(JfrStackTrace::frames_count_offset()), + + (uintptr_t)in_bytes(JfrStackFrame::method_offset()), + (uintptr_t)in_bytes(JfrStackFrame::methodid_offset()), + (uintptr_t)in_bytes(JfrStackFrame::bci_offset()), + (uintptr_t)in_bytes(JfrStackFrame::type_offset()), + + (uintptr_t)JBoltFunc::constructor, + (uintptr_t)JBoltFunc::copy_constructor, + (uintptr_t)JBoltCall::constructor, + (uintptr_t)JBoltCall::copy_constructor, + (uintptr_t)JBoltCallGraph::static_add_func, + (uintptr_t)JBoltCallGraph::static_add_call +}; + +/** + * Invoked in JfrStackTraceRepository::add_jbolt(). + * Each time JFR record a valid stacktrace, + * we log a weak ptr of each unique method in _sampled_methods_refs. + */ +void JBoltManager::log_stacktrace(const JfrStackTrace& stacktrace) { + Thread* thread = Thread::current(); + HandleMark hm(thread); + + const JfrStackFrame* frames = stacktrace.get_frames(); + unsigned int framesCount = stacktrace.get_framesCount(); + + for (u4 i = 0; i < framesCount; ++i) { + const JfrStackFrame& frame = frames[i]; + + JBoltStackFrameKey stackframe_key(const_cast(frame.get_method()), frame.get_methodId()); + + if (!_sampled_methods_refs->contains(stackframe_key)) { + jobject method_holder = JNIHandles::make_weak_global(Handle(thread, frame.get_method()->method_holder()->klass_holder())); + JBoltStackFrameValue stackframe_value(method_holder); + _sampled_methods_refs->put(stackframe_key, stackframe_value); + // put() transmits method_holder ownership to element in map + // set the method_holder to NULL in temp variable stackframe_value, to avoid double free + stackframe_value.clear_method_holder_but_not_release(); + } + } +} + +methodHandle JBoltManager::lookup_method(Method* method, traceid method_id) { + Thread* thread = Thread::current(); + JBoltStackFrameKey stackframe_key(method, method_id); + JBoltStackFrameValue* stackframe_value = _sampled_methods_refs->get(stackframe_key); + if (stackframe_value == NULL) { + return methodHandle(); + } + + jobject method_holder = stackframe_value->get_method_holder(); + if (method_holder != NULL && JNIHandles::is_weak_global_handle(method_holder) && JNIHandles::is_global_weak_cleared(method_holder)) { + log_debug(jbolt)("method at %p is unloaded", (void*)method); + return methodHandle(); + } + + const Method* const lookup_method = method; + if (lookup_method == NULL) { + // stacktrace obsolete + return methodHandle(); + } + assert(lookup_method != NULL, "invariant"); + methodHandle method_handle(thread, const_cast(lookup_method)); + + return method_handle; +} + +void JBoltManager::construct_stacktrace(const JfrStackTrace& stacktrace) { + NoSafepointVerifier nsv; + if (stacktrace.get_framesCount() < MIN_FRAMESCOUNT) + return; + + u4 topFrameIndex = 0; + u4 max_frames = 0; + + const JfrStackFrame* frames = stacktrace.get_frames(); + unsigned int framesCount = stacktrace.get_framesCount(); + + // Native method subsidence + while (topFrameIndex < framesCount) { + const JfrStackFrame& frame = frames[topFrameIndex]; + + if (strcmp(method_type_to_string(frame.get_type()), "Native") != 0) { + break; + } + + topFrameIndex++; + } + + if (framesCount - topFrameIndex < MIN_FRAMESCOUNT) { + return; + } + + os::Linux::jboltLog_precalc(topFrameIndex, max_frames, framesCount); + + JBoltFunc **tempfunc = NULL; + + for (u4 i = 0; i < max_frames; ++i) { + const JfrStackFrame& frame = frames[topFrameIndex + i]; + + methodHandle method = lookup_method(const_cast(frame.get_method()), frame.get_methodId()); + if (method.is_null()) { + break; + } + + if (i == 0) { + int hotcount = stacktrace.hotcount(); + int* exist_hotcount = _sampled_methods_hotcount_stored->get(method()); + if (exist_hotcount != NULL) { + hotcount += *exist_hotcount; + } + _sampled_methods_hotcount_stored->put(method(), hotcount); + } + + const CompiledMethod* const compiled = method->code(); + + log_trace(jbolt)( + "Method id - %lu\n\tBytecode index - %d\n\tSignature - %s\n\tType - %s\n\tCompiler - %s\n\tCompile Level - %d\n\tSize - %dB\n", + frame.get_methodId(), + frame.get_byteCodeIndex(), + method->external_name(), + method_type_to_string(frame.get_type()), + compiled != NULL ? compiled->compiler_name() : "None", + compiled != NULL ? compiled->comp_level() : -1, + compiled != NULL ? compiled->size() : 0); + + if (compiled == NULL) continue; + + JBoltMethodKey method_key(method->constants()->pool_holder()->name(), method->name(), method->signature()); + JBoltFunc* func = JBoltFunc::constructor(frame.get_method(), frame.get_methodId(), compiled->size(), method_key); + + if (!os::Linux::jboltLog_do(related_data_jbolt_log_do, (address)(const_cast(&stacktrace)), i, compiled->comp_level(), (address)func, (address*)&tempfunc)) { + delete func; + func = NULL; + continue; + } + } + + log_trace(jbolt)( + "StackTrace hash - %u hotcount - %u\n==============================\n", stacktrace.hash(), stacktrace.hotcount()); +} + +/** + * Invoked in JfrStackTraceRepository::write(). + * Each time JfrChunkWrite do write and clear stacktrace table, + * we update the CG by invoke construct_stacktrace(). + */ +void JBoltManager::construct_cg_once() { + guarantee((UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting()), "sanity"); + + GrowableArray* traces = create_growable_array(); + + { + MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag); + const JfrStackTraceRepository& repository = JfrStackTraceRepository::instance(); + + if (repository.get_entries_count_jbolt() == 0) { + return; + } + + const JfrStackTrace* const * table = repository.get_stacktrace_table_jbolt(); + for (uint i = 0; i < repository.TABLE_SIZE; ++i) { + for (const JfrStackTrace* trace = table[i]; trace != NULL; trace = trace->next()) { + traces->append(const_cast(trace)); + } + } + } + + for (int i = 0; i < traces->length(); ++i) { + construct_stacktrace(*(traces->at(i))); + } + + log_trace(jbolt)( + "+++++++ one time log over ++++++\n\n"); + delete traces; +} + +static void write_order(const GrowableArray* order, fileStream& fs) { + assert(order != NULL, "sanity"); + const char* methodFlag = "M"; + const char* segmentor = "C\n"; + + log_debug(jbolt)("+============================+\n\t\t\tORDER\n"); + + for (int i = 0; i < order->length(); ++i) { + const JBoltFunc& func = order->at(i); + if (func.method() == NULL) { + fs.write(segmentor, strlen(segmentor)); + continue; + } + + char* holder_name = func.method_key().klass()->as_C_string(); + char* name = func.method_key().name()->as_C_string(); + char* signature = func.method_key().sig()->as_C_string(); + char size[LINE_BUF_SIZE] = {0}; + snprintf(size, sizeof(size), "%d", func.size()); + + log_debug(jbolt)("order %d --- Method - %s %s %s\n", i, holder_name, name, signature); + + fs.write(methodFlag, strlen(methodFlag)); + fs.write(" ", 1); + fs.write(size, strlen(size)); + fs.write(" ", 1); + fs.write(holder_name, strlen(holder_name)); + fs.write(" ", 1); + fs.write(name, strlen(name)); + fs.write(" ", 1); + fs.write(signature, strlen(signature)); + fs.write("\n", 1); + } +} + +/** + * Invoked in before_exit(). + * + * Dump the order to JBoltOrderFile before vm exit. + */ +void JBoltManager::dump_order_in_manual() { + guarantee((UseJBolt && JBoltDumpMode), "sanity"); + guarantee(reorder_phase_profiling_to_waiting(), "sanity"); + NoSafepointVerifier nsv; + ResourceMark rm; + GrowableArray* order = JBoltCallGraph::callgraph_instance().hfsort(); + + fileStream order_file(JBoltOrderFile, "w+"); + + if (JBoltOrderFile == NULL || !order_file.is_open()) { + log_error(jbolt)("JBoltOrderFile open error"); + vm_exit_during_initialization("JBoltOrderFile open error"); + } + + write_order(order, order_file); + + log_info(jbolt)("order generate successful !!"); + log_debug(jbolt)("+============================+\n"); + delete order; + delete _sampled_methods_refs; + _sampled_methods_refs = NULL; + JBoltCallGraph::deinitialize(); +} + +JBoltErrorCode JBoltManager::dump_order_in_jcmd(const char* filename) { + guarantee(UseJBolt, "sanity"); + NoSafepointVerifier nsv; + ResourceMark rm; + + if (_order_stored == NULL) return JBoltOrderNULL; + + fileStream order_file(filename, "w+"); + + if (filename == NULL || !order_file.is_open()) return JBoltOpenFileError; + + write_order(_order_stored, order_file); + + return JBoltOK; +} + +#define check_arg_not_set(flag) \ +do { \ + if (FLAG_IS_CMDLINE(flag)) { \ + vm_exit_during_initialization(err_msg("Do not set VM option " #flag " without UseJBolt enabled.")); \ + } \ +} while(0) + +/** + * Do not set the JBolt-related flags manually if UseJBolt is not enabled. + */ +void JBoltManager::check_arguments_not_set() { + if (UseJBolt) return; + + check_arg_not_set(JBoltDumpMode); + check_arg_not_set(JBoltLoadMode); + check_arg_not_set(JBoltOrderFile); + check_arg_not_set(JBoltSampleInterval); + check_arg_not_set(JBoltCodeHeapSize); + check_arg_not_set(JBoltRescheduling); + check_arg_not_set(JBoltReorderThreshold); + check_arg_not_set(EnableDumpGraph); +} + +/** + * Check which mode is JBolt in. + * If JBoltDumpMode or JBoltLoadMode is set manually then do nothing, else it will be fully auto sched by JBolt itself. + */ +void JBoltManager::check_mode() { + if (!(JBoltDumpMode || JBoltLoadMode)) { + _auto_mode = true; + return; + } + + if (!FLAG_IS_DEFAULT(JBoltSampleInterval)) { + log_warning(jbolt)("JBoltSampleInterval is ignored because it is not in auto mode."); + } + + if (JBoltDumpMode && JBoltLoadMode) { + vm_exit_during_initialization("Do not set both JBoltDumpMode and JBoltLoadMode!"); + } + + guarantee((JBoltDumpMode ^ JBoltLoadMode), "Must set either JBoltDumpMode or JBoltLoadMode!"); +} + +/** + * If in auto mode, JBoltOrderFile will be ignored + * If in any manual mode, then JBoltOrderFile will be necessary. + * Check whether the order file exists or is accessable. + */ +void JBoltManager::check_order_file() { + if (auto_mode()) { + if (JBoltOrderFile != NULL) log_warning(jbolt)("JBoltOrderFile is ignored because it is in auto mode."); + return; + } + + if (JBoltOrderFile == NULL) { + vm_exit_during_initialization("JBoltOrderFile is not set!"); + } + + bool file_exist = (::access(JBoltOrderFile, F_OK) == 0); + if (file_exist) { + if (JBoltDumpMode) { + log_warning(jbolt)("JBoltOrderFile to dump already exists and will be overwritten: file=%s.", JBoltOrderFile); + ::remove(JBoltOrderFile); + } + } else { + if (JBoltLoadMode) { + vm_exit_during_initialization(err_msg("JBoltOrderFile does not exist or cannot be accessed! file=\"%s\".", JBoltOrderFile)); + } + } +} + +void JBoltManager::check_dependency() { + if (FLAG_IS_CMDLINE(FlightRecorder) ? !FlightRecorder : false) { + vm_exit_during_initialization("JBolt depends on JFR!"); + } + + if (!CompilerConfig::is_c2_enabled()) { + vm_exit_during_initialization("JBolt depends on C2!"); + } + + if (!SegmentedCodeCache) { + vm_exit_during_initialization("JBolt depends on SegmentedCodeCache!"); + } +} + +size_t JBoltManager::calc_nmethod_size_with_padding(size_t nmethod_size) { + return align_up(nmethod_size, (size_t) CodeCacheSegmentSize); +} + +size_t JBoltManager::calc_segment_size_with_padding(size_t segment_size) { + size_t page_size = CodeCache::page_size(); + if (segment_size < page_size) return page_size; + return align_down(segment_size, page_size); +} + +/** + * We have to parse the file twice because SymbolTable is not inited in phase 1... + */ +void JBoltManager::load_order_file_phase1(int* method_cnt, size_t* segment_size) { + assert(JBoltOrderFile != NULL, "sanity"); + + _order_fp = os::fopen(JBoltOrderFile, "r"); + if (_order_fp == NULL) { + vm_exit_during_initialization(err_msg("Cannot open file JBoltOrderFile! file=\"%s\".", JBoltOrderFile)); + } + + int mth_cnt = 0; + size_t seg_size = 0; + + char line[LINE_BUF_SIZE]; + int len = -1; + while (read_line(_order_fp, line, sizeof(line), &len)) { + if (len <= 2) continue; + if (line[0] != 'M' || line[1] != ' ') continue; + char* left_start = line + 2; + + // parse nmethod size + size_t nmethod_size; + if (!read_a_size(left_start, &nmethod_size)) { + vm_exit_during_initialization(err_msg("Wrong format of JBolt order line! line=\"%s\".", line)); + } + ++mth_cnt; + seg_size += calc_nmethod_size_with_padding(nmethod_size); + } + + *method_cnt = mth_cnt; + *segment_size = seg_size; + log_trace(jbolt)("Read order file method_cnt=%d, estimated_segment_size=" SIZE_FORMAT ".", mth_cnt, seg_size); +} + +bool JBoltManager::parse_method_line_phase2(char* const line, const int len, TRAPS) { + // Skip "M ". + char* left_start = line + 2; + + // Skip nmethod size (has parsed in phase1). + { + char* t = strchr(left_start, ' '); + if (t == NULL) return false; + left_start = t + 1; + } + + // Modify "java.lang.Obj" to "java/lang/Obj". + replace_all(left_start, '.', '/'); + + // Parse the three symbols: class name, method name, signature. + Symbol* three_symbols[3]; + for (int i = 0; i < 2; ++i) { + char* t = strchr(left_start, ' '); + if (t == NULL) return false; + Symbol* sym = SymbolTable::new_symbol(left_start, t - left_start, THREAD); + three_symbols[i] = sym; + left_start = t + 1; + } + Symbol* sym = SymbolTable::new_symbol(left_start, line + len - left_start, THREAD); + three_symbols[2] = sym; + if (log_is_enabled(Trace, jbolt)) { + log_trace(jbolt)("HotMethod init: key={%s %s %s}", + three_symbols[0]->as_C_string(), + three_symbols[1]->as_C_string(), + three_symbols[2]->as_C_string()); + } + + // Add to data structure. + JBoltMethodKey method_key(three_symbols[0], three_symbols[1], three_symbols[2]); + _hot_methods_sorted->append(method_key); + JBoltMethodValue method_value; + bool put = _hot_methods_vis->put(method_key, method_value); + if (!put) { + vm_exit_during_initialization(err_msg("Duplicated method: {%s %s %s}!", + three_symbols[0]->as_C_string(), + three_symbols[1]->as_C_string(), + three_symbols[2]->as_C_string())); + } + + return true; +} + +bool JBoltManager::parse_connected_component_line_phase2(char* const line, const int len) { return true; } + +void JBoltManager::load_order_file_phase2(TRAPS) { + guarantee(_order_fp != NULL, "sanity"); + + // re-scan + fseek(_order_fp, 0, SEEK_SET); + + char line[LINE_BUF_SIZE]; + int len = -1; + while (read_line(_order_fp, line, sizeof(line), &len)) { + if (len <= 0) continue; + bool success = false; + switch (line[0]) { + case '#': success = true; break; // ignore comments + case 'M': success = parse_method_line_phase2(line, len, THREAD); break; + case 'C': success = parse_connected_component_line_phase2(line, len); break; + default: break; + } + if (!success) { + vm_exit_during_initialization(err_msg("Wrong format of JBolt order line! line=\"%s\".", line)); + } + } + fclose(_order_fp); + _order_fp = NULL; +} + +void JBoltManager::init_load_mode_phase1() { + if (!(auto_mode() || JBoltLoadMode)) return; + + if (auto_mode()) { + // auto mode has no order now, initialize as default. + _hot_methods_sorted = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray(1, mtCompiler); + _hot_methods_vis = new (ResourceObj::C_HEAP, mtCompiler) MethodKeyMap(); + log_info(jbolt)("Default set JBoltCodeHeapSize=" UINTX_FORMAT " B (" UINTX_FORMAT " MB).", JBoltCodeHeapSize, JBoltCodeHeapSize / 1024 / 1024); + return; + } + guarantee(reorder_phase_available_to_collecting(), "sanity"); + size_t total_nmethod_size = 0; + int method_cnt = 0; + load_order_file_phase1(&method_cnt, &total_nmethod_size); + + _hot_methods_sorted = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray(method_cnt, mtCompiler); + _hot_methods_vis = new (ResourceObj::C_HEAP, mtCompiler) MethodKeyMap(); + + if (FLAG_IS_DEFAULT(JBoltCodeHeapSize)) { + FLAG_SET_ERGO(uintx, JBoltCodeHeapSize, calc_segment_size_with_padding(total_nmethod_size)); + log_info(jbolt)("Auto set JBoltCodeHeapSize=" UINTX_FORMAT " B (" UINTX_FORMAT " MB).", JBoltCodeHeapSize, JBoltCodeHeapSize / 1024 / 1024); + } +} + +void JBoltManager::init_load_mode_phase2(TRAPS) { + // Only manual load mode need load phase2 + if (!JBoltLoadMode) return; + + load_order_file_phase2(CHECK); + _reorderable_method_cnt = 0; + _reorder_method_threshold_cnt = _hot_methods_sorted->length() * JBoltReorderThreshold; +} + +void JBoltManager::init_dump_mode_phase2(TRAPS) { + if (!(auto_mode() || JBoltDumpMode)) return; + + JBoltCallGraph::initialize(); + _sampled_methods_refs = new (ResourceObj::C_HEAP, mtTracing) StackFrameKeyMap(); + _sampled_methods_hotcount_stored = new (ResourceObj::C_HEAP, mtTracing) MethodHotCountMap(); + + // JBolt will create a JFR by itself + // In auto mode, will stop in JBoltControlThread::start_thread() after JBoltSampleInterval. + // In manual dump mode, won't stop until program exit. + log_info(jbolt)("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\"."); + bufferedStream output; + DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.start name=jbolt-jfr", ' ', THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + log_warning(jbolt)("unable to start jfr jbolt-jfr"); + log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name()); + // don't unwind this exception + CLEAR_PENDING_EXCEPTION; + } +} + +static void update_stored_order(const GrowableArray* order) { + if (_order_stored != NULL) { + // use a tmp for releasing space to provent _order_stored from being a wild pointer + GrowableArray* tmp = _order_stored; + _order_stored = NULL; + delete tmp; + } + _order_stored = new (ResourceObj::C_HEAP, mtTracing) GrowableArray(order->length(), mtTracing); + _order_stored->appendAll(order); +} + +static CompileTaskInfo* create_compile_task_info(const methodHandle& method) { + CompiledMethod* compiled = method->code(); + if (compiled == NULL) { + log_trace(jbolt)("Recompilation Task init failed because of null nmethod. func: %s.", method->external_name()); + return NULL; + } + int osr_bci = compiled->is_osr_method() ? compiled->osr_entry_bci() : InvocationEntryBci; + int comp_level = compiled->comp_level(); + // comp_level adaptation for deoptmization + if (comp_level > CompLevel_simple && comp_level <= CompLevel_full_optimization) comp_level = CompLevel_full_optimization; + CompileTask::CompileReason comp_reason = CompileTask::Reason_Reorder; + CompileTaskInfo* ret = new CompileTaskInfo(method(), osr_bci, comp_level, (int)comp_reason, + NULL, 0); + return ret; +} + +/** + * This function is invoked by JBoltControlThread. + * Do initialization for converting dump mode to load mode. + */ +void JBoltManager::init_auto_transition(size_t* segment_size, TRAPS) { + guarantee(UseJBolt && auto_mode(), "sanity"); + NoSafepointVerifier nsv; + ResourceMark rm; + + GrowableArray* order = JBoltCallGraph::callgraph_instance().hfsort(); + update_stored_order(order); + + size_t seg_size = 0; + for (int i = 0; i < order->length(); ++i) { + const JBoltFunc& func = order->at(i); + if (func.method() == NULL) { + continue; + } + + methodHandle method = lookup_method(const_cast(func.method()), func.method_id()); + if (method.is_null()) { + continue; + } + + CompileTaskInfo* cti = create_compile_task_info(method); + if (cti == NULL) { + continue; + } + + JBoltMethodKey method_key = func.method_key(); + JBoltMethodValue method_value; + if (!method_value.set_comp_info(cti)) { + delete cti; + continue; + } + + seg_size += calc_nmethod_size_with_padding(func.size()); + _hot_methods_sorted->append(method_key); + bool put = _hot_methods_vis->put(method_key, method_value); + if (!put) { + vm_exit_during_initialization(err_msg("Duplicated method: {%s %s %s}!", + method_key.klass()->as_C_string(), + method_key.name()->as_C_string(), + method_key.sig()->as_C_string())); + } + method_value.clear_comp_info_but_not_release(); + } + log_info(jbolt)("order generate successful !!"); + *segment_size = calc_segment_size_with_padding(seg_size); + delete order; +} + +/** + * This function must be invoked after CompilerConfig::ergo_initialize() in Arguments::apply_ergo(). + * This function must be invoked before CodeCache::initialize_heaps() in codeCache_init() in init_globals(). + * Thread and SymbolTable is not inited now! + */ +void JBoltManager::init_phase1() { + if (!UseJBolt) return; + check_mode(); + check_dependency(); + check_order_file(); + parse_rescheduling(); + + /* dump mode has nothing to do in phase1 */ + init_load_mode_phase1(); +} + +void JBoltManager::init_phase2(TRAPS) { + if (!UseJBolt) return; + + ResourceMark rm(THREAD); + init_dump_mode_phase2(CHECK); + init_load_mode_phase2(CHECK); + + // Manual dump mode doesn't need JBoltControlThread, directly go to profiling phase + if (JBoltDumpMode) { + guarantee(JBoltManager::reorder_phase_available_to_profiling(), "sanity"); + return; + } + + JBoltControlThread::init(CHECK); + // Auto mode will start control thread earlier. + // Manual load mode start later in check_start_reordering() + if (auto_mode()) { + JBoltControlThread::start_thread(CHECK_AND_CLEAR); + } +} + +/** + * Code heaps are initialized between init phase 1 and init phase 2. + */ +void JBoltManager::init_code_heaps(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, size_t alignment) { + assert(UseJBolt && !JBoltDumpMode, "sanity"); + if(!is_aligned(JBoltCodeHeapSize, alignment)) { + vm_exit_during_initialization(err_msg("JBoltCodeHeapSize should be %ld aligned, please adjust", alignment)); + } + + size_t jbolt_hot_size = JBoltCodeHeapSize; + size_t jbolt_tmp_size = JBoltCodeHeapSize; + size_t jbolt_total_size = jbolt_hot_size + jbolt_tmp_size; + if (non_profiled_size <= jbolt_total_size) { + vm_exit_during_initialization(err_msg( + "Not enough space in non-profiled code heap to split out JBolt heap(s): " SIZE_FORMAT "K <= " SIZE_FORMAT "K", + non_profiled_size/K, jbolt_total_size/K)); + } + non_profiled_size -= jbolt_total_size; + non_profiled_size = align_down(non_profiled_size, alignment); + FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); + + ReservedCodeSpace rs = CodeCache::reserve_heap_memory(cache_size); + ReservedSpace non_nmethod_space, profiled_space, non_profiled_space, jbolt_hot_space, jbolt_tmp_space; + + uintptr_t related_data_jbolt_heap_init[] = { + (uintptr_t)non_nmethod_size, + (uintptr_t)profiled_size, + (uintptr_t)non_profiled_size, + (uintptr_t)jbolt_hot_size, + (uintptr_t)jbolt_tmp_size, + + (uintptr_t)ReservedSpace::static_first_part, + (uintptr_t)ReservedSpace::static_last_part + }; + + if (!os::Linux::jboltHeap_init(related_data_jbolt_heap_init, (address)&rs, (address)&non_nmethod_space, (address)&profiled_space, (address)&non_profiled_space, (address)&jbolt_hot_space, (address)&jbolt_tmp_space)) { + jbolt_hot_size = CodeCache::page_size(); + jbolt_tmp_size = CodeCache::page_size(); + non_profiled_size += (jbolt_total_size - 2 * CodeCache::page_size()); + // Reserve one continuous chunk of memory for CodeHeaps and split it into + // parts for the individual heaps. The memory layout looks like this: + // ---------- high ----------- + // Non-profiled nmethods + // JBolt tmp nmethods + // JBolt hot nmethods + // Profiled nmethods + // Non-nmethods + // ---------- low ------------ + non_nmethod_space = rs.first_part(non_nmethod_size); + ReservedSpace r1 = rs.last_part(non_nmethod_size); + profiled_space = r1.first_part(profiled_size); + ReservedSpace r2 = r1.last_part(profiled_size); + jbolt_hot_space = r2.first_part(jbolt_hot_size); + ReservedSpace r3 = r2.last_part(jbolt_hot_size); + jbolt_tmp_space = r3.first_part(jbolt_tmp_size); + non_profiled_space = r3.last_part(jbolt_tmp_size); + } + + CodeCache::add_heap(non_nmethod_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); + CodeCache::add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); + CodeCache::add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); + const char* no_space = NULL; + CodeCache::add_heap(jbolt_hot_space, "CodeHeap 'jbolt hot nmethods'", CodeBlobType::MethodJBoltHot); + if (jbolt_hot_size != jbolt_hot_space.size()) { + no_space = "hot"; + } + CodeCache::add_heap(jbolt_tmp_space, "CodeHeap 'jbolt tmp nmethods'", CodeBlobType::MethodJBoltTmp); + if (jbolt_tmp_size != jbolt_tmp_space.size()) { + no_space = "tmp"; + } + if (no_space != NULL) { + vm_exit_during_initialization(FormatBuffer<1024>( + "No enough space for JBolt %s heap: \n" + "Expect: cache_size=" SIZE_FORMAT "K, profiled_size=" SIZE_FORMAT "K, non_nmethod_size=" SIZE_FORMAT "K, jbolt_hot_size=" SIZE_FORMAT "K, non_profiled_size=" SIZE_FORMAT "K, jbolt_tmp_size=" SIZE_FORMAT "K\n" + "Actual: cache_size=" SIZE_FORMAT "K, profiled_size=" SIZE_FORMAT "K, non_nmethod_size=" SIZE_FORMAT "K, jbolt_hot_size=" SIZE_FORMAT "K, non_profiled_size=" SIZE_FORMAT "K, jbolt_tmp_size=" SIZE_FORMAT "K\n" + "alignment=" SIZE_FORMAT, + no_space, + cache_size/K, profiled_size/K, non_nmethod_size/K, jbolt_hot_size/K, non_profiled_size/K, jbolt_tmp_size/K, + rs.size()/K, profiled_space.size()/K, non_nmethod_space.size()/K, jbolt_hot_space.size()/K, non_profiled_space.size()/K, jbolt_tmp_space.size()/K, + alignment)); + } +} + +int JBoltManager::reorder_phase() { + return OrderAccess::load_acquire(&_reorder_phase); +} + +bool JBoltManager::reorder_phase_available_to_collecting() { + assert(!auto_mode(), "two-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Collecting, &_reorder_phase, JBoltReorderPhase::Available) == JBoltReorderPhase::Available; +} + +bool JBoltManager::reorder_phase_collecting_to_reordering() { + assert(!auto_mode(), "two-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Reordering, &_reorder_phase, JBoltReorderPhase::Collecting) == JBoltReorderPhase::Collecting; +} + +bool JBoltManager::reorder_phase_available_to_profiling() { + return Atomic::cmpxchg(JBoltReorderPhase::Profiling, &_reorder_phase, JBoltReorderPhase::Available) == JBoltReorderPhase::Available; +} + +bool JBoltManager::reorder_phase_profiling_to_reordering() { + assert(auto_mode(), "one-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Reordering, &_reorder_phase, JBoltReorderPhase::Profiling) == JBoltReorderPhase::Profiling; +} + +bool JBoltManager::reorder_phase_reordering_to_available() { + assert(auto_mode(), "one-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Available, &_reorder_phase, JBoltReorderPhase::Reordering) == JBoltReorderPhase::Reordering; +} + +bool JBoltManager::reorder_phase_profiling_to_available() { + assert(auto_mode(), "one-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Available, &_reorder_phase, JBoltReorderPhase::Profiling) == JBoltReorderPhase::Profiling; +} + +bool JBoltManager::reorder_phase_profiling_to_waiting() { + return Atomic::cmpxchg(JBoltReorderPhase::Waiting, &_reorder_phase, JBoltReorderPhase::Profiling) == JBoltReorderPhase::Profiling; +} + +bool JBoltManager::reorder_phase_waiting_to_reordering() { + assert(auto_mode(), "one-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Reordering, &_reorder_phase, JBoltReorderPhase::Waiting) == JBoltReorderPhase::Waiting; +} + +bool JBoltManager::reorder_phase_waiting_to_available() { + assert(auto_mode(), "one-phase only"); + return Atomic::cmpxchg(JBoltReorderPhase::Available, &_reorder_phase, JBoltReorderPhase::Waiting) == JBoltReorderPhase::Waiting; +} + +bool JBoltManager::reorder_phase_reordering_to_end() { + return Atomic::cmpxchg(JBoltReorderPhase::End, &_reorder_phase, JBoltReorderPhase::Reordering) == JBoltReorderPhase::Reordering; +} + +bool JBoltManager::reorder_phase_is_waiting() { + return OrderAccess::load_acquire(&_reorder_phase) == JBoltReorderPhase::Waiting; +} + +bool JBoltManager::reorder_phase_is_available() { + bool res = (OrderAccess::load_acquire(&_reorder_phase) == JBoltReorderPhase::Available); + assert(!res || auto_mode(), "one-phase only"); + return res; +} + +bool JBoltManager::reorder_phase_is_collecting() { + bool res = (OrderAccess::load_acquire(&_reorder_phase) == JBoltReorderPhase::Collecting); + assert(!res || !auto_mode(), "two-phase only"); + return res; +} + +bool JBoltManager::reorder_phase_is_profiling() { + bool res = (OrderAccess::load_acquire(&_reorder_phase) == JBoltReorderPhase::Profiling); + return res; +} + +bool JBoltManager::reorder_phase_is_reordering() { + return OrderAccess::load_acquire(&_reorder_phase) == JBoltReorderPhase::Reordering; +} + +bool JBoltManager::reorder_phase_is_profiling_or_waiting() { + int p = OrderAccess::load_acquire(&_reorder_phase); + return p == JBoltReorderPhase::Profiling || p == JBoltReorderPhase::Waiting; +} + +bool JBoltManager::reorder_phase_is_collecting_or_reordering() { + int p = OrderAccess::load_acquire(&_reorder_phase); + assert(p != JBoltReorderPhase::Collecting || !auto_mode(), "two-phase only"); + return p == JBoltReorderPhase::Collecting || p == JBoltReorderPhase::Reordering; +} + +Method* JBoltManager::cur_reordering_method() { + return OrderAccess::load_acquire(&_cur_reordering_method); +} + +void JBoltManager::set_cur_reordering_method(Method* method) { + OrderAccess::release_store(&_cur_reordering_method, method); +} + +int JBoltManager::inc_reorderable_method_cnt() { + return Atomic::add(+1, &_reorderable_method_cnt); +} + +bool JBoltManager::can_reorder_now() { + return OrderAccess::load_acquire(&_reorderable_method_cnt) >= _reorder_method_threshold_cnt; +} + +bool JBoltManager::should_reorder_now() { + return OrderAccess::load_acquire(&_reorderable_method_cnt) == _reorder_method_threshold_cnt; +} + +int JBoltManager::primary_hot_seg() { + return OrderAccess::load_acquire(&_primary_hot_seg); +} + +int JBoltManager::secondary_hot_seg() { + return OrderAccess::load_acquire(&_secondary_hot_seg); +} + +bool JBoltManager::force_sweep() { + return OrderAccess::load_acquire(&_force_sweep); +} + +static bool is_valid_time(const char* timeStr) { + // hh:mm + if (strlen(timeStr) != 5) return false; + + if (timeStr[2] != ':') return false; + + if (timeStr[0] < '0' || timeStr[0] > '2') return false; + if (timeStr[1] < '0' || timeStr[1] > '9') return false; + if (timeStr[3] < '0' || timeStr[3] > '5') return false; + if (timeStr[4] < '0' || timeStr[4] > '9') return false; + + int hour = (timeStr[0] - '0') * 10 + (timeStr[1] - '0'); + int minute = (timeStr[3] - '0') * 10 + (timeStr[4] - '0'); + + if (hour < 0 || hour > 23 || minute < 0 || minute > 59) return false; + + return true; +} + +void JBoltManager::remove_duplicate_time(GrowableArray* times) { + for (int i = 0; i < times->length(); ++i) { + char* time = times->at(i); + bool exists = false; + for (int j = 0; j < _rescheduling_time->length(); ++j) { + char* uniqueTime = _rescheduling_time->at(j); + if (strcmp(time, uniqueTime) == 0) { + exists = true; + log_warning(jbolt)("time %s is duplicated in JBoltRescheduling", time); + break; + } + } + if (!exists) { + if (_rescheduling_time->length() >= 10) { + // support max 10 time to reschedule + log_warning(jbolt)("JBoltRescheduling support up to 10 time settings, any excess will be ignored."); + return; + } + log_trace(jbolt)("Set time trigger at %s", time); + _rescheduling_time->append(time); + } + } +} + +static int time_comparator(char** time1, char** time2) { + int hour1 = ((*time1)[0] - '0') * 10 + ((*time1)[1] - '0'); + int minute1 = ((*time1)[3] - '0') * 10 + ((*time1)[4] - '0'); + int hour2 = ((*time2)[0] - '0') * 10 + ((*time2)[1] - '0'); + int minute2 = ((*time2)[3] - '0') * 10 + ((*time2)[4] - '0'); + + if (hour1 == hour2) { + return (minute1 > minute2) ? 1 : ((minute1 == minute2) ? 0 : -1); + } + return (hour1 > hour2) ? 1 : ((hour1 == hour2) ? 0 : -1); +} + +void JBoltManager::parse_rescheduling() { + if (!FLAG_IS_CMDLINE(JBoltRescheduling)) return; + + if (JBoltRescheduling == NULL || strlen(JBoltRescheduling) == 0) { + vm_exit_during_initialization("JBoltRescheduling is set but is null"); + } + + const int buflen = 1024; + if (strlen(JBoltRescheduling) > buflen) { + vm_exit_during_initialization("JBoltRescheduling is too long"); + } + + if (!auto_mode()) { + log_warning(jbolt)("JBoltRescheduling is ignored because it is not in auto mode."); + return; + } + + ResourceMark rm; + _rescheduling_time = new (ResourceObj::C_HEAP, mtTracing) GrowableArray(1, mtTracing); + GrowableArray* tmp_time = new (ResourceObj::C_HEAP, mtTracing) GrowableArray(1, mtTracing); + + const char* rescheduling_str = JBoltRescheduling; + const char* start = rescheduling_str; + const char* end = strchr(rescheduling_str, ','); + char timeStr[buflen] = {0}; + + while (end != NULL) { + size_t len = (size_t)(end - start); + strncpy(timeStr, start, buflen); + timeStr[len] = '\0'; + + if (is_valid_time(timeStr)) { + tmp_time->append(strdup(timeStr)); + } + else { + vm_exit_during_initialization(err_msg("Invalid time %s in JBoltRescheduling", timeStr)); + } + + start = end + 1; + end = strchr(start, ','); + } + + if (*start != '\0') { + strncpy(timeStr, start, buflen); + timeStr[strlen(start)] = '\0'; + + if (is_valid_time(timeStr)) { + tmp_time->append(strdup(timeStr)); + } + else { + vm_exit_during_initialization(err_msg("Invalid time %s in JBoltRescheduling", timeStr)); + } + } + + remove_duplicate_time(tmp_time); + _rescheduling_time->sort(&time_comparator); + + delete tmp_time; +} + +GrowableArray* JBoltManager::rescheduling_time() { + return _rescheduling_time; +} + +int JBoltManager::clear_manager() { + /* _hot_methods_sorted, _hot_methods_vis and _sampled_methods_refs have been cleared in other pos, don't delete again */ + guarantee(_hot_methods_sorted == NULL, "sanity"); + guarantee(_hot_methods_vis == NULL, "sanity"); + guarantee(_sampled_methods_refs == NULL, "sanity"); + // Re-allocate them + _hot_methods_sorted = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray(1, mtCompiler); + _hot_methods_vis = new (ResourceObj::C_HEAP, mtCompiler) MethodKeyMap(); + _sampled_methods_refs = new (ResourceObj::C_HEAP, mtTracing) StackFrameKeyMap(); + + if (_sampled_methods_hotcount_stored != NULL) { + MethodHotCountMap* tmp = _sampled_methods_hotcount_stored; + _sampled_methods_hotcount_stored = NULL; + delete tmp; + } + _sampled_methods_hotcount_stored = new (ResourceObj::C_HEAP, mtTracing) MethodHotCountMap(); + + return 0; +} + +/** + * Invoked in JBoltControlThread::prev_control_schedule(). + * Expect to only execute in auto mode while JBolt.start triggered. + * Clear JBolt related data structures to restore a initial env same as sample never happening. +*/ +int JBoltManager::clear_last_sample_datas() { + int ret = 0; + // Clear _table_jbolt in JfrStackTraceRepository + ret = JfrStackTraceRepository::clear_jbolt(); + // Clear JBoltCallGraph + ret = JBoltCallGraph::callgraph_instance().clear_instance(); + // Clear JBoltManager + ret = clear_manager(); + + return ret; +} + +/** + * Invoked in JBoltControlThread::prev_control_schedule(). + * Swap primary hot segment with secondary hot segment + */ +void JBoltManager::swap_semi_jbolt_segs() { + guarantee(reorder_phase_is_waiting(), "swap must happen in reorder phase Profiling."); + int tmp = Atomic::xchg(OrderAccess::load_acquire(&_primary_hot_seg), &_secondary_hot_seg); + Atomic::xchg(tmp, &_primary_hot_seg); + OrderAccess::release_store(&_hot_codecache_full, false); +} + +/** + * Invoked in JBoltControlThread::post_control_schdule(). + * Free scondary hot segment space for next reorder. + */ +void JBoltManager::clear_secondary_hot_seg(TRAPS) { + guarantee(reorder_phase_is_available(), "secondary clear must happen in reorder phase Available."); + // scan secondary hot seg and recompile alive nmethods to non-profiled + ResourceMark rm(THREAD); + // We cannot alloc weak handle within CodeCache_lock because of the mutex rank check. + // So instead we keep the methods alive only within the scope of this method. + JBoltUtils::MetaDataKeepAliveMark mdm(THREAD); + const GrowableArray& to_recompile = mdm.kept(); + + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeHeap* sec_hot = CodeCache::get_code_heap(secondary_hot_seg()); + for (CodeBlob* cb = (CodeBlob*) sec_hot->first(); cb != NULL; cb = (CodeBlob*) sec_hot->next(cb)) { + nmethod* nm = cb->as_nmethod_or_null(); + Method* m = nm->method(); + if (nm && nm->get_state() == CompiledMethod::in_use && m != NULL) { + mdm.add(m); + } + } + } + + for (int i = 0; i < to_recompile.length(); ++i) { + Method* m = (Method*) to_recompile.at(i); + methodHandle method(THREAD, m); + CompileTaskInfo* cti = create_compile_task_info(method); + if (cti == NULL) continue; + guarantee(cti->try_select(), "method is on stack, should be ok"); + assert(cti->hot_method() == NULL, "sanity"); + methodHandle hot_method; + + bool recompile_result = enqueue_recompile_task(cti, method, hot_method, THREAD); + if(recompile_result) { + check_compiled_result(method(), CodeBlobType::MethodNonProfiled, THREAD); + } + delete cti; + } + + OrderAccess::release_store(&_force_sweep, true); + // need 2 cleaning passes before not_entrant converting to zombie, @see nmethod::mark_as_seen_on_stack + NMethodSweeper::force_sweep(); + NMethodSweeper::force_sweep(); + // this time sweep converting to zombie + NMethodSweeper::force_sweep(); + // this time sweep cleaning zombie + NMethodSweeper::force_sweep(); + OrderAccess::release_store(&_force_sweep, false); + log_info(jbolt)("Sweep secondary codecache: %s", CodeCache::get_code_heap_name(JBoltManager::secondary_hot_seg())); + print_code_heaps(); +} + +/** + * Invoked in ciEnv::register_method() in CompilerThread. + * Controls where the new nmethod should be allocated. + * + * Returns CodeBlobType::All if it is not determined by JBolt logic. + */ +int JBoltManager::calc_code_blob_type(Method* method, CompileTask* task, TRAPS) { + assert(UseJBolt && reorder_phase_is_collecting_or_reordering(), "sanity"); + const int not_care = CodeBlobType::All; + + // Only cares about non-profiled segment. + int lvl = task->comp_level(); + if (lvl != CompLevel_full_optimization && lvl != CompLevel_simple) { + return not_care; + } + + // Ignore on-stack-replacement. + if (task->osr_bci() != InvocationEntryBci) { + return not_care; + } + + int cur_reorder_phase = reorder_phase(); + // Do nothing after reordering. + if (cur_reorder_phase != JBoltReorderPhase::Collecting && cur_reorder_phase != JBoltReorderPhase::Reordering) { + return not_care; + } + // Only cares about the current reordering method. + if (cur_reorder_phase == JBoltReorderPhase::Reordering) { + if (cur_reordering_method() == method) { + log_trace(jbolt)("Compiling to JBolt heap: method=%s.", method->name_and_sig_as_C_string()); + return primary_hot_seg(); + } + return not_care; + } + guarantee(cur_reorder_phase == JBoltReorderPhase::Collecting, "sanity"); + assert(!auto_mode(), "sanity"); + + JBoltMethodKey method_key(method); + JBoltMethodValue* method_value = _hot_methods_vis->get(method_key); + if (method_value == NULL) { + return not_care; + } + + // Register the method and the compile task. + if (method_value->get_comp_info() == NULL) { + CompileTaskInfo* cti = new CompileTaskInfo(method, task->osr_bci(), task->comp_level(), (int) task->compile_reason(), + task->hot_method(), task->hot_count()); + if (method_value->set_comp_info(cti)) { + int cnt = inc_reorderable_method_cnt(); + log_trace(jbolt)("Reorderable method found: cnt=%d, lvl=%d, p=%p, method=%s.", + cnt, task->comp_level(), method, method->name_and_sig_as_C_string()); + if (is_power_of_2(_reorder_method_threshold_cnt - cnt)) { + log_info(jbolt)("Reorderable cnt: %d/%d/%d", cnt, _reorder_method_threshold_cnt, _hot_methods_sorted->length()); + } + if (cnt == _reorder_method_threshold_cnt) { + log_info(jbolt)("Time to reorder: %d/%d/%d", cnt, _reorder_method_threshold_cnt, _hot_methods_sorted->length()); + _start_reordering_thread = THREAD; + } + } else { + delete cti; + } + } + + return secondary_hot_seg(); +} + +/* + * Invoked in CodeCache::allocate() + * set _hot_codecache_full to stop recompilation early + */ +void JBoltManager::handle_full_jbolt_code_cache() { + log_warning(jbolt)("%s is full, will stop recompilation", CodeCache::get_code_heap_name(primary_hot_seg())); + OrderAccess::release_store(&_hot_codecache_full, true); +} + +/** + * Check if reordering should start. + * The reordering should only start once (for now). + * We don't do this check in "if (cnt == _reorder_method_threshold_cnt)" in calc_code_blob_type() + * because it will cause an assert error: "Possible safepoint reached by thread that does not allow it". + */ +void JBoltManager::check_start_reordering(TRAPS) { + // _start_reordering_thread is set and tested in the same thread. No need to be atomic. + if (_start_reordering_thread == THREAD) { + _start_reordering_thread = NULL; + if (JBoltControlThread::get_thread() == NULL) { + assert(can_reorder_now(), "sanity"); + log_info(jbolt)("Starting JBoltControlThread to reorder."); + JBoltControlThread::start_thread(CHECK_AND_CLEAR); + } + } +} + +/** + * The task will be added to the compile queue and be compiled just like other tasks. + */ +CompileTask* JBoltManager::create_a_task_instance(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS) { + int osr_bci = cti->osr_bci(); + int comp_level = cti->comp_level(); + CompileTask::CompileReason comp_reason = (CompileTask::CompileReason) cti->comp_reason(); + int hot_count = cti->hot_count(); + bool is_blocking = true; + + // init a task (@see CompileBroker::create_compile_task()) + CompileTask* task = CompileTask::allocate(); + int compile_id = CompileBroker::assign_compile_id(method, osr_bci); + task->initialize(compile_id, method, osr_bci, comp_level, + hot_method, hot_count, comp_reason, + is_blocking); + return task; +} + +/** + * Print the failure reason if something is wrong in recompilation. + */ +bool JBoltManager::check_compiled_result(Method* method, int check_blob_type, TRAPS) { + CompiledMethod* cm = method->code(); + if (cm == NULL) { + log_trace(jbolt)("Recompilation failed because of null nmethod. method=%s", method->name_and_sig_as_C_string()); + return false; + } + nmethod* nm = cm->as_nmethod_or_null(); + if (nm == NULL) { + log_trace(jbolt)("Recompilation failed because the code is not a nmethod. method=%s", method->name_and_sig_as_C_string()); + return false; + } + int code_blob_type = CodeCache::get_code_blob_type(nm); + if (code_blob_type != check_blob_type) { + log_trace(jbolt)("Recompilation failed because the nmethod is not in heap [%s]: it's in [%s]. method=%s", + CodeCache::get_code_heap_name(check_blob_type), CodeCache::get_code_heap_name(code_blob_type), method->name_and_sig_as_C_string()); + return false; + } + log_trace(jbolt)("Recompilation good: code=%p, size=%d, method=%s, heap=%s.", + nm, nm->size(), method->name_and_sig_as_C_string(), CodeCache::get_code_heap_name(check_blob_type)); + return true; +} + +/** + * Create the compile task instance and enqueue into compile queue + */ +bool JBoltManager::enqueue_recompile_task(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS) { + CompileTask* task = NULL; + CompileQueue* queue = CompileBroker::compile_queue(cti->comp_level()); + { MutexLocker locker(MethodCompileQueue_lock, THREAD); + if (CompileBroker::compilation_is_in_queue(method)) { + log_trace(jbolt)("JBOLT won't compile as \"compilation is in queue\": method=%s.", method->name_and_sig_as_C_string()); + return false; + } + + task = create_a_task_instance(cti, method, hot_method, CHECK_AND_CLEAR_false); + if (task == NULL) { + log_trace(jbolt)("JBOLT won't compile as \"task instance is NULL\": method=%s.", method->name_and_sig_as_C_string()); + return false; + } + queue->add(task); + } + + // Same waiting logic as CompileBroker::wait_for_completion(). + { MonitorLocker ml(task->lock(), THREAD); + while (!task->is_complete() && !CompileBroker::is_compilation_disabled_forever()) { + ml.wait(); + } + } + + CompileBroker::wait_for_completion(task); + task = NULL; // freed + return true; +} + +/** + * Recompilation is to move the nmethod to _primary_hot_seg. + */ +bool JBoltManager::recompile_one(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS) { + ResourceMark rm(THREAD); + + if (cti->osr_bci() != InvocationEntryBci) { + log_trace(jbolt)("We don't handle on-stack-replacement nmethods: method=%s.", method->name_and_sig_as_C_string()); + return false; + } + + if (log_is_enabled(Trace, jbolt)) { + const char* heap_name = NULL; + CompiledMethod* cm = method->code(); + if (cm == NULL) heap_name = ""; + else if (!cm->is_nmethod()) heap_name = ""; + else heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(cm)); + log_trace(jbolt)("Start to recompile & reorder: heap=%s, method=%s.", heap_name, method->name_and_sig_as_C_string()); + } + + // Add a compilation task. + set_cur_reordering_method(method()); + bool ret = enqueue_recompile_task(cti, method, hot_method, CHECK_AND_CLEAR_false); + ret = ret && check_compiled_result(method(), primary_hot_seg(), CHECK_AND_CLEAR_false); + return ret; +} + +/** + * This method is invoked in a new thread JBoltControlThread. + * Recompiles the methods in the order list one by one (serially) based on the hot order. + * The methods to recompile were almost all in MethodJBoltTmp, and will in install in + * MethodJBoltHot after recompilation. + */ +void JBoltManager::reorder_all_methods(TRAPS) { + guarantee(UseJBolt && reorder_phase_is_reordering(), "sanity"); + log_info(jbolt)("Start to reorder!"); + print_code_heaps(); + + ResourceMark rm(THREAD); + for (int i = 0; i < _hot_methods_sorted->length(); ++i) { + JBoltMethodKey k = _hot_methods_sorted->at(i); + JBoltMethodValue* v = _hot_methods_vis->get(k); + if (v == NULL) continue; + CompileTaskInfo* cti = v->get_comp_info(); + if (cti == NULL) continue; + if (!cti->try_select()) continue; + + methodHandle method(THREAD, cti->method()); + methodHandle hot_method(THREAD, cti->hot_method()); + + if (!recompile_one(cti, method, hot_method, THREAD) && OrderAccess::load_acquire(&_hot_codecache_full)) { + // JBolt codecache is full, stop early + break; + } + if (HAS_PENDING_EXCEPTION) { + Handle ex(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; + LogTarget(Warning, jbolt) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print("Failed to recompile the method: %s.", method->name_and_sig_as_C_string()); + java_lang_Throwable::print(ex(), &ls); + } + } + } + + log_info(jbolt)("JBolt reordering succeeds."); + print_code_heaps(); + +} + +void JBoltManager::clear_structures() { + delete _sampled_methods_refs; + _sampled_methods_refs = NULL; + JBoltCallGraph::deinitialize(); + set_cur_reordering_method(NULL); + delete _hot_methods_sorted; + _hot_methods_sorted = NULL; + delete _hot_methods_vis; + _hot_methods_vis = NULL; +} + +void JBoltManager::print_code_heap(outputStream& ls, CodeHeap* heap, const char* name) { + for (CodeBlob* cb = (CodeBlob*) heap->first(); cb != NULL; cb = (CodeBlob*) heap->next(cb)) { + nmethod* nm = cb->as_nmethod_or_null(); + Method* m = nm != NULL ? nm->method() : NULL; + ls.print_cr("%s %p %d alive=%s, zombie=%s, nmethod=%s, entrant=%s, name=[%s %s %s]", + name, + cb, cb->size(), + B_TF(cb->is_alive()), + B_TF(cb->is_zombie()), + B_TF(cb->is_nmethod()), + nm ? B_TF(!nm->is_not_entrant()) : "?", + m ? m->method_holder()->name()->as_C_string() : cb->name(), + m ? m->name()->as_C_string() : NULL, + m ? m->signature()->as_C_string() : NULL); + } +} + +void JBoltManager::print_code_heaps() { + { + LogTarget(Debug, jbolt) lt; + if (!lt.is_enabled()) return; + LogStream ls(lt); + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::print_summary(&ls, true); + } + + { + LogTarget(Trace, jbolt) lt; + if (!lt.is_enabled()) return; + LogStream ls(lt); + CodeHeap* hot_heap = CodeCache::get_code_heap(CodeBlobType::MethodJBoltHot); + CodeHeap* tmp_heap = CodeCache::get_code_heap(CodeBlobType::MethodJBoltTmp); + + ResourceMark rm; + if (hot_heap == NULL) { + ls.print_cr("The jbolt hot heap is null."); + } else { + print_code_heap(ls, hot_heap, "hot"); + } + if (tmp_heap == NULL) { + ls.print_cr("The jbolt tmp heap is null."); + } else { + print_code_heap(ls, tmp_heap, "tmp"); + } + } +} + +void JBoltManager::dump_nmethod_count(fileStream& file, nmethod* nm, CodeBlob* cb) { + int hotcount = 0; + if (cb->is_alive() && !nm->is_not_entrant() && _sampled_methods_hotcount_stored->get(nm->method()) != NULL) { + hotcount = *(_sampled_methods_hotcount_stored->get(nm->method())); + } + file.print_cr(" sample count: %d", hotcount); +} + +void JBoltManager::dump_code_heap_with_count(const char* filename, CodeHeap* heap) { + if (heap == NULL) return; + + fileStream invocation_count_file(filename, "w+"); + uint64_t total = 0; + if (invocation_count_file.is_open()) { + invocation_count_file.print("%s:", heap->name()); + invocation_count_file.print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT + "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", + (size_t)(heap->high_boundary() - heap->low_boundary())/K, (size_t)(heap->high_boundary() - heap->low_boundary() - heap->unallocated_capacity())/K, + heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); + invocation_count_file.print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", + p2i(heap->low_boundary()), + p2i(heap->high()), + p2i(heap->high_boundary())); + for (CodeBlob* cb = (CodeBlob*) heap->first(); cb != NULL; cb = (CodeBlob*) heap->next(cb)) { + nmethod* nm = cb->as_nmethod_or_null(); + invocation_count_file.print_cr("###%lu %s size=%dB %p %p %p state=%d name=%s alive=%s nmethod=%s use=%s entrant=%s zombie=%s level=%d code=%p", + total++, + "np", + cb->size(), + cb, cb->code_begin(), cb->data_end(), + nm ? nm->get_state() : ILL_NM_STATE, + (nm && nm->method()) ? nm->method()->name_and_sig_as_C_string() : "NULL", + B_TF(cb->is_alive()), + B_TF(cb->is_nmethod()), + nm ? B_TF(nm->is_in_use()) : "?", + nm ? B_TF(!nm->is_not_entrant()) : "?", + nm ? B_TF(nm->is_zombie()) : "?", + nm ? nm->comp_level() : -1, + (nm && nm->method()) ? nm->method()->code() : 0); + if (nm && nm->method()) { + dump_nmethod_count(invocation_count_file, nm, cb); + } + } + } + else { + log_info(jbolt)("%s open error\n", filename); + } +} + +void JBoltManager::dump_code_heaps_with_count() { + if (!EnableDumpGraph) { + ShouldNotReachHere(); + return; + } + + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeHeap* np_heap = CodeCache::get_code_heap(CodeBlobType::MethodNonProfiled); + CodeHeap* hot_heap = (UseJBolt && !JBoltDumpMode) ? CodeCache::get_code_heap(CodeBlobType::MethodJBoltHot) : NULL; + CodeHeap* tmp_heap = (UseJBolt && !JBoltDumpMode) ? CodeCache::get_code_heap(CodeBlobType::MethodJBoltTmp) : NULL; + + ResourceMark rm; + time_t current_time; + struct tm p; + char oldpath[PATH_LENGTH]; + char dirname[PATH_LENGTH]; + + time(¤t_time); + localtime_r(¤t_time, &p); + sprintf(dirname, "JBOLT.%d.%d.%d.%02d:%02d:%02d",1900+p.tm_year,1+p.tm_mon,p.tm_mday,p.tm_hour,p.tm_min,p.tm_sec); + + mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH); + if (getcwd(oldpath, PATH_LENGTH) != NULL) { + if (chdir(dirname) == OS_ERR) { + warning("Can't change to directory %s", dirname); + return; + } + dump_code_heap_with_count("count_np.txt", np_heap); + dump_code_heap_with_count("count_hot.txt", hot_heap); + dump_code_heap_with_count("count_tmp.txt", tmp_heap); + if (chdir(oldpath) == OS_ERR) { + warning("Can't change to directory %s", oldpath); + } + } +} + +#undef B_TF \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltManager.hpp b/src/hotspot/share/jbolt/jBoltManager.hpp new file mode 100644 index 000000000..aeb3e1f4f --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltManager.hpp @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTMANAGER_HPP +#define SHARE_JBOLT_JBOLTMANAGER_HPP + +#include "compiler/compileTask.hpp" +#include "jbolt/jbolt_globals.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/dcmd/jfrDcmds.hpp" +#include "memory/allocation.hpp" +#include "memory/heap.hpp" +#include "oops/method.hpp" +#include "oops/symbol.hpp" +#include "runtime/handles.hpp" +#include "runtime/thread.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/resourceHash.hpp" + +enum JBoltErrorCode { + JBoltOK = 0, + JBoltOrderNULL = 1, + JBoltOpenFileError = 2 +}; + +struct JBoltReorderPhase { + static const int Waiting = -1; // JBolt logic is waiting for something to be done. + static const int Available = 0; // JBolt logic is not working or is done (can be reordered again now). + static const int Collecting = 1; // Collecting methods in the order file (this phase is for two-phase only). + static const int Profiling = 2; // JFR is working (this phase is for one-phase only). + static const int Reordering = 3; // Recompiling and re-laying. + static const int End = 4; // JBolt is not available anymore (for two-phase, or error happened on one-phase). +}; + +class CompileTaskInfo : public CHeapObj { + Method* const _method; + jobject _method_holder; + const int _osr_bci; + const int _comp_level; + const int _comp_reason; + Method* const _hot_method; + jobject _hot_method_holder; + const int _hot_count; + +public: + CompileTaskInfo(Method* method, int osr_bci, int comp_level, int comp_reason, Method* hot_method, int hot_cnt); + ~CompileTaskInfo(); + + bool try_select(); + + Method* method() const { return _method; } + int osr_bci() const { return _osr_bci; } + int comp_level() const { return _comp_level; } + int comp_reason() const { return _comp_reason; } + Method* hot_method() const { return _hot_method; } + int hot_count() const { return _hot_count; } +}; + +class JBoltMethodKey : public StackObj { + Symbol* _klass; + Symbol* _name; + Symbol* _sig; + + void inc_ref_cnt() { + Symbol* arr[] = { _klass, _name, _sig }; + for (int i = 0; i < (int) (sizeof(arr) / sizeof(arr[0])); ++i) { + if (arr[i] != NULL) arr[i]->increment_refcount(); + } + } + + void dec_ref_cnt() { + Symbol* arr[] = { _klass, _name, _sig }; + for (int i = 0; i < (int) (sizeof(arr) / sizeof(arr[0])); ++i) { + if (arr[i] != NULL) arr[i]->decrement_refcount(); + } + } +public: + + JBoltMethodKey(Symbol* klass, Symbol* name, Symbol* sig): _klass(klass), _name(name), _sig(sig) { /* no inc_ref_cnt() here for SymbolTable::new_symbol() */ } + JBoltMethodKey(Method* method): _klass(method->method_holder()->name()), _name(method->name()), _sig(method->signature()) { inc_ref_cnt(); } + JBoltMethodKey(const JBoltMethodKey& other): _klass(other._klass), _name(other._name), _sig(other._sig) { inc_ref_cnt(); } + JBoltMethodKey(): _klass(NULL), _name(NULL), _sig(NULL) {} + ~JBoltMethodKey() { dec_ref_cnt(); } + + JBoltMethodKey& operator = (const JBoltMethodKey& other) { + dec_ref_cnt(); + _klass = other._klass; + _name = other._name; + _sig = other._sig; + inc_ref_cnt(); + return *this; + } + + unsigned hash() const { + unsigned v = primitive_hash(_klass); + v = v * 31 + primitive_hash(_name); + v = v * 31 + primitive_hash(_sig); + return v; + } + bool equals(const JBoltMethodKey& other) const { + return _klass == other._klass && _name == other._name && _sig == other._sig; + } + + static unsigned calc_hash(const JBoltMethodKey& k) { + return k.hash(); + } + static bool calc_equals(const JBoltMethodKey& k1, const JBoltMethodKey& k2) { + return k1.equals(k2); + } + + Symbol* klass() const { return _klass; } + Symbol* name() const { return _name; } + Symbol* sig() const { return _sig; } +}; + +class JBoltMethodValue : public StackObj { +private: + CompileTaskInfo* volatile _comp_info; + +public: + JBoltMethodValue(): _comp_info(NULL) {} + ~JBoltMethodValue(); + + CompileTaskInfo* get_comp_info(); + bool set_comp_info(CompileTaskInfo* info); + void clear_comp_info_but_not_release(); +}; + +class JBoltStackFrameKey : public StackObj { + Method* _method; + traceid _methodid; + +public: + JBoltStackFrameKey(Method* method, traceid methodid): _method(method), _methodid(methodid) {} + JBoltStackFrameKey(const JBoltStackFrameKey& other): _method(other._method), _methodid(other._methodid) {} + JBoltStackFrameKey(): _method(NULL), _methodid(0) {} + ~JBoltStackFrameKey() { /* nothing to do as _method is a softcopy of JfrStackFrame::_method */ } + + + JBoltStackFrameKey& operator = (const JBoltStackFrameKey& other) { + _method = other._method; + _methodid = other._methodid; + return *this; + } + + unsigned hash() const { + unsigned v = primitive_hash(_method); + v = v * 31 + primitive_hash(_methodid); + return v; + } + + bool equals(const JBoltStackFrameKey& other) const { + return _method == other._method && _methodid == other._methodid; + } + + static unsigned calc_hash(const JBoltStackFrameKey& k) { + return k.hash(); + } + + static bool calc_equals(const JBoltStackFrameKey& k1, const JBoltStackFrameKey& k2) { + return k1.equals(k2); + } +}; + +class JBoltStackFrameValue : public StackObj { +private: + jobject _method_holder; + +public: + JBoltStackFrameValue(jobject method_holder): _method_holder(method_holder) {} + ~JBoltStackFrameValue(); + + jobject get_method_holder(); + void clear_method_holder_but_not_release(); +}; + +class JBoltManager : public AllStatic { + friend class JBoltControlThread; + + typedef ResourceHashtable MethodKeyMap; + + typedef ResourceHashtable StackFrameKeyMap; + + typedef ResourceHashtable, primitive_equals, + 15889, ResourceObj::C_HEAP, mtTracing> MethodHotCountMap; + + static GrowableArray* _hot_methods_sorted; + static MethodKeyMap* _hot_methods_vis; + static int _reorder_method_threshold_cnt; + + static volatile int _reorder_phase; + static volatile int _reorderable_method_cnt; + static Method* volatile _cur_reordering_method; + + // the CompilerThread to start the new JBoltControlThread + static Thread* _start_reordering_thread; + + static StackFrameKeyMap* _sampled_methods_refs; + static MethodHotCountMap* _sampled_methods_hotcount_stored; + + // when not set JBoltDumpMode or JBoltLoadMode, JBolt will be in one-step auto mode. + static bool _auto_mode; + + // use MethodJBoltHot and MethodJBoltTmp as two semi hot space. + // each time restart a schedule, we exchange primary and secondary + static volatile int _primary_hot_seg; + static volatile int _secondary_hot_seg; + + // when primary hot codecache is full, we stop recompiling. + static volatile bool _hot_codecache_full; + // JBolt force sweep codecache. + static volatile bool _force_sweep; + + // timelist to trigger JBolt rescheduling(format: hh:mm) + static GrowableArray* _rescheduling_time; + +private: + // Used in dump mode. + static methodHandle lookup_method(Method* method, traceid method_id); + static void construct_stacktrace(const JfrStackTrace &stacktrace); + + // Used in init phase 1. + static void check_mode(); + static void check_order_file(); + static void check_dependency(); + static size_t calc_nmethod_size_with_padding(size_t nmethod_size); + static size_t calc_segment_size_with_padding(size_t segment_size); + static void load_order_file_phase1(int* method_cnt , size_t* total_nmethod_size); + static void init_load_mode_phase1(); + + // Used in init phase 2. + static bool parse_method_line_phase2(char* const line, const int len, TRAPS); + static bool parse_connected_component_line_phase2(char* const line, const int len); + static void load_order_file_phase2(TRAPS); + static void init_load_mode_phase2(TRAPS); + static void init_dump_mode_phase2(TRAPS); + + // Used in auto mode. + static int primary_hot_seg(); + static int secondary_hot_seg(); + static void remove_duplicate_time(GrowableArray* times); + static void parse_rescheduling(); + static GrowableArray* rescheduling_time(); + + // Used in auto mode prev_control_schedule + static int clear_last_sample_datas(); + static void swap_semi_jbolt_segs(); + static int clear_manager(); + + // Used in auto mode control_schedule + static void init_auto_transition(size_t* segment_size, TRAPS); + + // Used in auto mode post_control_schedule + static void clear_secondary_hot_seg(TRAPS); + + // JBolt phases + static int reorder_phase(); + + static bool reorder_phase_available_to_collecting(); + static bool reorder_phase_collecting_to_reordering(); + + static bool reorder_phase_available_to_profiling(); + static bool reorder_phase_profiling_to_reordering(); + static bool reorder_phase_reordering_to_available(); + static bool reorder_phase_profiling_to_available(); + static bool reorder_phase_profiling_to_waiting(); + static bool reorder_phase_waiting_to_reordering(); + static bool reorder_phase_waiting_to_available(); + + static bool reorder_phase_reordering_to_end(); + + static Method* cur_reordering_method(); + static void set_cur_reordering_method(Method* method); + static int inc_reorderable_method_cnt(); + + // Used in reordering phase. + static CompileTask* create_a_task_instance(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS); + static bool check_compiled_result(Method* method, int check_blob_type, TRAPS); + static bool enqueue_recompile_task(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS); + static bool recompile_one(CompileTaskInfo* cti, const methodHandle& method, const methodHandle& hot_method, TRAPS); + + static void print_code_heap(outputStream& ls, CodeHeap* heap, const char* name); + static void dump_nmethod_count(fileStream& file, nmethod* nm, CodeBlob* cb); + static void dump_code_heap_with_count(const char* filename, CodeHeap* heap); +public: + static void log_stacktrace(const JfrStackTrace &stacktrace); + static void construct_cg_once(); + static void dump_order_in_manual(); + static JBoltErrorCode dump_order_in_jcmd(const char* filename); + + static void check_arguments_not_set(); + static void init_phase1(); + static void init_phase2(TRAPS); + static void init_code_heaps(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, size_t alignment); + + static bool auto_mode() { return _auto_mode; } + + static bool reorder_phase_is_waiting(); + static bool reorder_phase_is_available(); + static bool reorder_phase_is_collecting(); + static bool reorder_phase_is_profiling(); + static bool reorder_phase_is_reordering(); + static bool reorder_phase_is_profiling_or_waiting(); + static bool reorder_phase_is_collecting_or_reordering(); + + static bool can_reorder_now(); + static bool should_reorder_now(); + + static int calc_code_blob_type(Method* method, CompileTask* task, TRAPS); + + static void handle_full_jbolt_code_cache(); + static bool force_sweep(); + + static void check_start_reordering(TRAPS); + static void reorder_all_methods(TRAPS); + static void clear_structures(); + + static void print_code_heaps(); + static void dump_code_heaps_with_count(); +}; + +#endif // SHARE_JBOLT_JBOLTMANAGER_HPP diff --git a/src/hotspot/share/jbolt/jBoltUtils.cpp b/src/hotspot/share/jbolt/jBoltUtils.cpp new file mode 100644 index 000000000..6db5c9095 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltUtils.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "jbolt/jBoltUtils.hpp" + +JBoltUtils::MetaDataKeepAliveMark::MetaDataKeepAliveMark(Thread* thread) : _thread(thread), _kept() { + assert(thread == Thread::current(), "Must be current thread"); + assert(_thread->is_in_stack((address)this), "not on stack?"); +} + +JBoltUtils::MetaDataKeepAliveMark::~MetaDataKeepAliveMark() { + for (int i = _kept.length() - 1; i >= 0; --i) { + Metadata* md = _kept.at(i); + int idx = _thread->metadata_handles()->find_from_end(md); + assert(idx != -1, "not in metadata_handles list"); + _thread->metadata_handles()->remove_at(idx); + } +} \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltUtils.hpp b/src/hotspot/share/jbolt/jBoltUtils.hpp new file mode 100644 index 000000000..8258b7125 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltUtils.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTUTILS_HPP +#define SHARE_JBOLT_JBOLTUTILS_HPP + +#include "memory/allocation.hpp" +#include "oops/metadata.hpp" +#include "runtime/thread.hpp" +#include "utilities/growableArray.hpp" + +class JBoltUtils : public AllStatic { +public: + /** + * Keep the metadata alive. + * + * @see KeepAliveRegistrar + * @see methodHandle + */ + class MetaDataKeepAliveMark : public StackObj { + private: + Thread* _thread; + GrowableArray _kept; + + public: + MetaDataKeepAliveMark(Thread* thread); + ~MetaDataKeepAliveMark(); + + void add(Metadata* md); + + const GrowableArray& kept() { return _kept; } + }; +}; + +#endif // SHARE_JBOLT_JBOLTUTILS_HPP \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jBoltUtils.inline.hpp b/src/hotspot/share/jbolt/jBoltUtils.inline.hpp new file mode 100644 index 000000000..972983df0 --- /dev/null +++ b/src/hotspot/share/jbolt/jBoltUtils.inline.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLTUTILS_INLINE_HPP +#define SHARE_JBOLT_JBOLTUTILS_INLINE_HPP + +#include "jbolt/jBoltUtils.hpp" + +// Register a metadata as 'in-use' by the thread. It's fine to register a +// metadata multiple times (though perhaps inefficient). +inline void JBoltUtils::MetaDataKeepAliveMark::add(Metadata* md) { + assert(md->is_valid(), "obj is valid"); + assert(_thread == Thread::current(), "thread must be current"); + _kept.push(md); + _thread->metadata_handles()->push(md); +} + +#endif // SHARE_JBOLT_JBOLTUTILS_INLINE_HPP \ No newline at end of file diff --git a/src/hotspot/share/jbolt/jbolt_globals.cpp b/src/hotspot/share/jbolt/jbolt_globals.cpp new file mode 100644 index 000000000..aee3feef1 --- /dev/null +++ b/src/hotspot/share/jbolt/jbolt_globals.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "jbolt/jbolt_globals.hpp" +#include "runtime/globals_extension.hpp" + +JBOLT_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \ + MATERIALIZE_PD_DEVELOPER_FLAG, \ + MATERIALIZE_PRODUCT_FLAG, \ + MATERIALIZE_PD_PRODUCT_FLAG, \ + MATERIALIZE_DIAGNOSTIC_FLAG, \ + MATERIALIZE_PD_DIAGNOSTIC_FLAG, \ + MATERIALIZE_EXPERIMENTAL_FLAG, \ + MATERIALIZE_NOTPRODUCT_FLAG, + IGNORE_RANGE, \ + IGNORE_CONSTRAINT) diff --git a/src/hotspot/share/jbolt/jbolt_globals.hpp b/src/hotspot/share/jbolt/jbolt_globals.hpp new file mode 100644 index 000000000..8ebc4fb7a --- /dev/null +++ b/src/hotspot/share/jbolt/jbolt_globals.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_JBOLT_JBOLT_GLOBALS_HPP +#define SHARE_JBOLT_JBOLT_GLOBALS_HPP + +#include "runtime/globals.hpp" + +#define JBOLT_FLAGS(develop, \ + develop_pd, \ + product, \ + product_pd, \ + diagnostic, \ + diagnostic_pd, \ + experimental, \ + notproduct, \ + range, \ + constraint) \ + \ + experimental(bool, UseJBolt, false, \ + "Enable JBolt feature.") \ + \ + experimental(bool, JBoltDumpMode, false, \ + "Trial run of JBolt. Collect profiling and dump it.") \ + \ + experimental(bool, JBoltLoadMode, false, \ + "Second run of JBolt. Load the profiling and reorder nmethods.") \ + \ + experimental(intx, JBoltSampleInterval, 600, \ + "Sample interval(second) of JBolt dump mode" \ + "only useful in auto mode.") \ + range(0, 36000) \ + \ + experimental(ccstr, JBoltOrderFile, NULL, \ + "The JBolt method order file to dump or load.") \ + \ + diagnostic(double, JBoltReorderThreshold, 0.86, \ + "The threshold to trigger JBolt reorder in load mode.") \ + range(0.1, 0.9) \ + \ + experimental(uintx, JBoltCodeHeapSize, 8*M , \ + "Code heap size of MethodJBoltHot and MethodJBoltTmp heaps.") \ + \ + experimental(ccstr, JBoltRescheduling, NULL, \ + "Trigger rescheduling at a fixed time every day.") \ + \ + diagnostic(bool, EnableDumpGraph, false, \ + "Enable JBolt.dumpgraph to produce source data files") \ + \ + +// end of JBOLT_FLAGS + +JBOLT_FLAGS(DECLARE_DEVELOPER_FLAG, \ + DECLARE_PD_DEVELOPER_FLAG, \ + DECLARE_PRODUCT_FLAG, \ + DECLARE_PD_PRODUCT_FLAG, \ + DECLARE_DIAGNOSTIC_FLAG, \ + DECLARE_PD_DIAGNOSTIC_FLAG, \ + DECLARE_EXPERIMENTAL_FLAG, \ + DECLARE_NOTPRODUCT_FLAG, \ + IGNORE_RANGE, \ + IGNORE_CONSTRAINT) + +#endif // SHARE_JBOLT_JBOLT_GLOBALS_HPP diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 2ae21bf0c..d24455569 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -779,6 +779,8 @@ + + diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp index 59bbd0c2d..d9580e57e 100644 --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp @@ -66,6 +66,9 @@ #if INCLUDE_SHENANDOAHGC #include "gc/shenandoah/shenandoahJfrSupport.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jbolt_globals.hpp" +#endif // INCLUDE_JBOLT /** * JfrPeriodic class @@ -554,6 +557,8 @@ TRACE_REQUEST_FUNC(CodeCacheConfiguration) { event.set_nonNMethodSize(NonNMethodCodeHeapSize); event.set_profiledSize(ProfiledCodeHeapSize); event.set_nonProfiledSize(NonProfiledCodeHeapSize); + event.set_jboltHotSize(JBoltCodeHeapSize); + event.set_jboltTmpSize(JBoltCodeHeapSize); event.set_expansionSize(CodeCacheExpansionSize); event.set_minBlockLength(CodeCacheMinBlockLength); event.set_startAddress((u8)CodeCache::low_bound()); diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp index 261e605dd..a88353346 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp @@ -37,6 +37,9 @@ #include "runtime/semaphore.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif enum JfrSampleType { NO_SAMPLE = 0, @@ -256,7 +259,13 @@ bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackF return false; } EventExecutionSample *event = &_events[_added_java - 1]; - traceid id = JfrStackTraceRepository::add(sampler.stacktrace()); + traceid id = 0; +#if INCLUDE_JBOLT + if (UseJBolt && JBoltManager::reorder_phase_is_profiling()) { + id = JfrStackTraceRepository::add_jbolt(sampler.stacktrace()); + } else +#endif + id = JfrStackTraceRepository::add(sampler.stacktrace()); assert(id != 0, "Stacktrace id should not be 0"); event->set_stackTrace(id); return true; diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp index 630116b0d..91efd5459 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp @@ -58,7 +58,11 @@ JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames_ownership(false), _reached_root(false), _lineno(false), - _written(false) {} + _written(false) +#if INCLUDE_JBOLT + , _hotcount(1) +#endif + {} JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) : _next(next), @@ -70,7 +74,11 @@ JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrSt _frames_ownership(true), _reached_root(trace._reached_root), _lineno(trace._lineno), - _written(false) { + _written(false) +#if INCLUDE_JBOLT + , _hotcount(trace._hotcount) +#endif +{ copy_frames(&_frames, trace._nr_of_frames, trace._frames); } diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp index 314ac8128..7486e5bff 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp @@ -53,6 +53,17 @@ class JfrStackFrame { void write(JfrCheckpointWriter& cpw) const; void resolve_lineno() const; +#if INCLUDE_JBOLT + const Method* get_method() const { return _method; } + traceid get_methodId() const { return _methodid; } + int get_byteCodeIndex() const { return _bci; } + u1 get_type() const { return _type; } + + static ByteSize method_offset() { return byte_offset_of(JfrStackFrame, _method ); } + static ByteSize methodid_offset() { return byte_offset_of(JfrStackFrame, _methodid ); } + static ByteSize bci_offset() { return byte_offset_of(JfrStackFrame, _bci ); } + static ByteSize type_offset() { return byte_offset_of(JfrStackFrame, _type ); } +#endif enum { FRAME_INTERPRETER = 0, FRAME_JIT, @@ -69,6 +80,9 @@ class JfrStackTrace : public JfrCHeapObj { friend class ObjectSampler; friend class OSThreadSampler; friend class StackTraceResolver; +#if INCLUDE_JBOLT + friend class JBoltManager; +#endif private: const JfrStackTrace* _next; JfrStackFrame* _frames; @@ -80,6 +94,9 @@ class JfrStackTrace : public JfrCHeapObj { bool _reached_root; mutable bool _lineno; mutable bool _written; +#if INCLUDE_JBOLT + u4 _hotcount; +#endif const JfrStackTrace* next() const { return _next; } @@ -107,6 +124,17 @@ class JfrStackTrace : public JfrCHeapObj { public: unsigned int hash() const { return _hash; } traceid id() const { return _id; } +#if INCLUDE_JBOLT + u4 hotcount() const { return _hotcount; } + const JfrStackFrame* get_frames() const { return _frames; } + u4 get_framesCount() const { return _nr_of_frames; } + + static ByteSize hash_offset() { return byte_offset_of(JfrStackTrace, _hash ); } + static ByteSize id_offset() { return byte_offset_of(JfrStackTrace, _id ); } + static ByteSize hotcount_offset() { return byte_offset_of(JfrStackTrace, _hotcount ); } + static ByteSize frames_offset() { return byte_offset_of(JfrStackTrace, _frames ); } + static ByteSize frames_count_offset() { return byte_offset_of(JfrStackTrace, _nr_of_frames ); } +#endif }; #endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp index d873651f2..07502c767 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp @@ -29,6 +29,9 @@ #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "jfr/support/jfrThreadLocal.hpp" #include "runtime/mutexLocker.hpp" +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif /* * There are two separate repository instances. @@ -51,9 +54,16 @@ static JfrStackTraceRepository& leak_profiler_instance() { return *_leak_profiler_instance; } +#if INCLUDE_JBOLT +JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(0), _last_entries_jbolt(0), _entries_jbolt(0) { + memset(_table, 0, sizeof(_table)); + memset(_table_jbolt, 0, sizeof(_table_jbolt)); +} +#else JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(0) { memset(_table, 0, sizeof(_table)); } +#endif JfrStackTraceRepository* JfrStackTraceRepository::create() { assert(_instance == NULL, "invariant"); @@ -98,11 +108,16 @@ bool JfrStackTraceRepository::is_modified() const { } size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) { +#if INCLUDE_JBOLT + if (clear && (UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) { + JBoltManager::construct_cg_once(); + } +#endif + MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag); if (_entries == 0) { return 0; } - MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag); - assert(_entries > 0, "invariant"); + int count = 0; for (u4 i = 0; i < TABLE_SIZE; ++i) { JfrStackTrace* stacktrace = _table[i]; @@ -121,6 +136,21 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) { if (clear) { memset(_table, 0, sizeof(_table)); _entries = 0; +#if INCLUDE_JBOLT + for (u4 i = 0; i < TABLE_SIZE; ++i) { + JfrStackTrace* stacktrace = _table_jbolt[i]; + while (stacktrace != NULL) { + JfrStackTrace* next = const_cast(stacktrace->next()); + delete stacktrace; + stacktrace = next; + } + } + memset(_table_jbolt, 0, sizeof(_table_jbolt)); + _entries_jbolt = 0; + } + _last_entries_jbolt = _entries_jbolt; + { +#endif } _last_entries = _entries; return count; @@ -143,6 +173,21 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) { const size_t processed = repo._entries; repo._entries = 0; repo._last_entries = 0; +#if INCLUDE_JBOLT + if (repo._entries_jbolt != 0) { + for (u4 i = 0; i < TABLE_SIZE; ++i) { + JfrStackTrace* stacktrace = repo._table_jbolt[i]; + while (stacktrace != NULL) { + JfrStackTrace* next = const_cast(stacktrace->next()); + delete stacktrace; + stacktrace = next; + } + } + memset(repo._table_jbolt, 0, sizeof(repo._table_jbolt)); + repo._entries_jbolt = 0; + repo._last_entries_jbolt = 0; + } +#endif return processed; } @@ -232,6 +277,75 @@ const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned return trace; } +#if INCLUDE_JBOLT +size_t JfrStackTraceRepository::clear_jbolt(JfrStackTraceRepository& repo) { + MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag); + if (repo._entries_jbolt == 0) { + return 0; + } + + for (u4 i = 0; i < TABLE_SIZE; ++i) { + JfrStackTrace* stacktrace = repo._table_jbolt[i]; + while (stacktrace != NULL) { + JfrStackTrace* next = const_cast(stacktrace->next()); + delete stacktrace; + stacktrace = next; + } + } + memset(repo._table_jbolt, 0, sizeof(repo._table_jbolt)); + const size_t processed = repo._entries; + repo._entries_jbolt = 0; + repo._last_entries_jbolt = 0; + + return processed; +} + +size_t JfrStackTraceRepository::clear_jbolt() { + clear_jbolt(leak_profiler_instance()); + return clear_jbolt(instance()); +} + +traceid JfrStackTraceRepository::add_jbolt(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) { + traceid tid = repo.add_trace_jbolt(stacktrace); + if (tid == 0) { + stacktrace.resolve_linenos(); + tid = repo.add_trace_jbolt(stacktrace); + } + assert(tid != 0, "invariant"); + return tid; +} + +traceid JfrStackTraceRepository::add_jbolt(const JfrStackTrace& stacktrace) { + JBoltManager::log_stacktrace(stacktrace); + return add_jbolt(instance(), stacktrace); +} + +traceid JfrStackTraceRepository::add_trace_jbolt(const JfrStackTrace& stacktrace) { + traceid id = add_trace(stacktrace); + MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag); + const size_t index = stacktrace._hash % TABLE_SIZE; + + if (UseJBolt && JBoltManager::reorder_phase_is_profiling()) { + const JfrStackTrace* table_jbolt_entry = _table_jbolt[index]; + while (table_jbolt_entry != NULL) { + if (table_jbolt_entry->equals(stacktrace)) { + // [jbolt]: each time add an old trace, inc its hotcount + const_cast(table_jbolt_entry)->_hotcount++; + return table_jbolt_entry->id(); + } + table_jbolt_entry = table_jbolt_entry->next(); + } + } + + if (id != 0 && UseJBolt && JBoltManager::reorder_phase_is_profiling()) { + _table_jbolt[index] = new JfrStackTrace(id, stacktrace, _table_jbolt[index]); + ++_entries_jbolt; + } + + return id; +} +#endif + void JfrStackTraceRepository::clear_leak_profiler() { clear(leak_profiler_instance()); } diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp index bf32df99f..e8868a467 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp @@ -42,6 +42,9 @@ class JfrStackTraceRepository : public JfrCHeapObj { friend class RecordStackTrace; friend class StackTraceBlobInstaller; friend class WriteStackTraceRepository; +#if INCLUDE_JBOLT + friend class JBoltManager; +#endif private: static const u4 TABLE_SIZE = 2053; @@ -49,6 +52,19 @@ class JfrStackTraceRepository : public JfrCHeapObj { u4 _last_entries; u4 _entries; +#if INCLUDE_JBOLT + // [jbolt]: an exclusive table for jbolt. It should be a subset of _table + JfrStackTrace* _table_jbolt[TABLE_SIZE]; + u4 _last_entries_jbolt; + u4 _entries_jbolt; + + static size_t clear_jbolt(); + static size_t clear_jbolt(JfrStackTraceRepository& repo); + traceid add_trace_jbolt(const JfrStackTrace& stacktrace); + static traceid add_jbolt(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace); + static traceid add_jbolt(const JfrStackTrace& stacktrace); +#endif + JfrStackTraceRepository(); static JfrStackTraceRepository& instance(); static JfrStackTraceRepository* create(); @@ -71,6 +87,13 @@ class JfrStackTraceRepository : public JfrCHeapObj { public: static traceid record(Thread* thread, int skip = 0); +#if INCLUDE_JBOLT + const JfrStackTrace* const * get_stacktrace_table() const { return _table; } + u4 get_entries_count() const { return _entries; } + + const JfrStackTrace* const * get_stacktrace_table_jbolt() const { return _table_jbolt; } + u4 get_entries_count_jbolt() const { return _entries_jbolt; } +#endif }; #endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp index 0ec37b2f6..4e117530a 100644 --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -84,6 +84,7 @@ LOG_TAG(inlining) \ LOG_TAG(interpreter) \ LOG_TAG(itables) \ + JBOLT_ONLY(LOG_TAG(jbolt)) \ LOG_TAG(jfr) \ LOG_TAG(jit) \ LOG_TAG(jni) \ diff --git a/src/hotspot/share/memory/virtualspace.hpp b/src/hotspot/share/memory/virtualspace.hpp index 4dace9d88..4cdddcf0b 100644 --- a/src/hotspot/share/memory/virtualspace.hpp +++ b/src/hotspot/share/memory/virtualspace.hpp @@ -82,6 +82,16 @@ class ReservedSpace { bool split = false, bool realloc = true); inline ReservedSpace last_part (size_t partition_size); +#if INCLUDE_JBOLT + static ReservedSpace static_first_part(ReservedSpace rs, size_t partition_size) { + return rs.first_part(partition_size); + } + + static ReservedSpace static_last_part(ReservedSpace rs, size_t partition_size) { + return rs.last_part(partition_size); + } +#endif + // Alignment static size_t page_align_size_up(size_t size); static size_t page_align_size_down(size_t size); diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 346526487..392f9b6e1 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -99,6 +99,7 @@ class Method : public Metadata { #ifndef PRODUCT int64_t _compiled_invocation_count; #endif + // Entry point for calling both from and to the interpreter. address _i2i_entry; // All-args-on-stack calling convention // Entry point for calling from compiled code, to compiled code if it exists diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index d8bfcff2d..943c110fc 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -1035,8 +1035,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { #ifndef PRODUCT void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { - if( CountCompiledCalls ) { - if( at_method_entry ) { + if(CountCompiledCalls) { + if(at_method_entry) { // bump invocation counter if top method (for statistics) if (CountCompiledCalls && depth() == 1) { const TypePtr* addr_type = TypeMetadataPtr::make(method()); @@ -1067,7 +1067,6 @@ void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { } #endif //PRODUCT - ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, ciKlass* holder, ciMethod* callee, const TypeOopPtr* receiver_type, bool is_virtual, diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index c1a7b2833..e16299178 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -1185,7 +1185,7 @@ void Parse::do_method_entry() { set_parse_bci(InvocationEntryBci); // Pseudo-BCP set_sp(0); // Java Stack Pointer - NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); ) + NOT_PRODUCT(count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/);) if (C->env()->dtrace_method_probes()) { make_dtrace_method_entry(method()); diff --git a/src/hotspot/share/runtime/flags/jvmFlag.cpp b/src/hotspot/share/runtime/flags/jvmFlag.cpp index 630c8becd..36d122464 100644 --- a/src/hotspot/share/runtime/flags/jvmFlag.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlag.cpp @@ -33,6 +33,9 @@ #include "runtime/globals_extension.hpp" #include "utilities/defaultStream.hpp" #include "utilities/stringUtils.hpp" +#if INCLUDE_JBOLT +#include "jbolt/jbolt_globals.hpp" +#endif #define DEFAULT_RANGE_STR_CHUNK_SIZE 64 static char* create_range_str(const char *fmt, ...) { @@ -786,6 +789,15 @@ const char* JVMFlag::flag_error_str(JVMFlag::Error error) { #define JVMCI_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) }, #define JVMCI_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_NOT_PRODUCT) }, +#define JBOLT_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_PRODUCT) }, +#define JBOLT_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_PLATFORM_DEPENDENT) }, +#define JBOLT_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_DIAGNOSTIC) }, +#define JBOLT_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_PLATFORM_DEPENDENT) }, +#define JBOLT_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_EXPERIMENTAL) }, +#define JBOLT_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_DEVELOP) }, +#define JBOLT_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) }, +#define JBOLT_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JBOLT | JVMFlag::KIND_NOT_PRODUCT) }, + #ifdef _LP64 #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_LP64_PRODUCT) }, #else @@ -854,6 +866,18 @@ static JVMFlag flagTable[] = { IGNORE_CONSTRAINT, \ IGNORE_WRITEABLE) #endif // INCLUDE_JVMCI +#if INCLUDE_JBOLT + JBOLT_FLAGS(JBOLT_DEVELOP_FLAG_STRUCT, \ + JBOLT_PD_DEVELOP_FLAG_STRUCT, \ + JBOLT_PRODUCT_FLAG_STRUCT, \ + JBOLT_PD_PRODUCT_FLAG_STRUCT, \ + JBOLT_DIAGNOSTIC_FLAG_STRUCT, \ + JBOLT_PD_DIAGNOSTIC_FLAG_STRUCT, \ + JBOLT_EXPERIMENTAL_FLAG_STRUCT, \ + JBOLT_NOTPRODUCT_FLAG_STRUCT, \ + IGNORE_RANGE, \ + IGNORE_CONSTRAINT) +#endif // INCLUDE_JBOLT #ifdef COMPILER1 C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, \ C1_PD_DEVELOP_FLAG_STRUCT, \ diff --git a/src/hotspot/share/runtime/flags/jvmFlag.hpp b/src/hotspot/share/runtime/flags/jvmFlag.hpp index c0854b33c..439249f25 100644 --- a/src/hotspot/share/runtime/flags/jvmFlag.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlag.hpp @@ -62,9 +62,10 @@ struct JVMFlag { KIND_ARCH = 1 << 14, KIND_LP64_PRODUCT = 1 << 15, KIND_JVMCI = 1 << 16, + KIND_JBOLT = 1 << 17, // set this bit if the flag was set on the command line - ORIG_COMMAND_LINE = 1 << 17, + ORIG_COMMAND_LINE = 1 << 18, KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE) }; diff --git a/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp b/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp index f947baf53..e5154f1f0 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp @@ -365,6 +365,19 @@ void JVMFlagRangeList::init(void) { IGNORE_WRITEABLE)); #endif // INCLUDE_JVMCI +#if INCLUDE_JBOLT + emit_range_no(NULL JBOLT_FLAGS(EMIT_RANGE_DEVELOPER_FLAG, \ + EMIT_RANGE_PD_DEVELOPER_FLAG, \ + EMIT_RANGE_PRODUCT_FLAG, \ + EMIT_RANGE_PD_PRODUCT_FLAG, \ + EMIT_RANGE_DIAGNOSTIC_FLAG, \ + EMIT_RANGE_PD_DIAGNOSTIC_FLAG, \ + EMIT_RANGE_EXPERIMENTAL_FLAG, \ + EMIT_RANGE_NOTPRODUCT_FLAG, + EMIT_RANGE_CHECK, \ + IGNORE_CONSTRAINT)); +#endif // INCLUDE_JBOLT + #ifdef COMPILER1 emit_range_no(NULL C1_FLAGS(EMIT_RANGE_DEVELOPER_FLAG, EMIT_RANGE_PD_DEVELOPER_FLAG, diff --git a/src/hotspot/share/runtime/globals_extension.hpp b/src/hotspot/share/runtime/globals_extension.hpp index 02491f6c7..c6adc45b0 100644 --- a/src/hotspot/share/runtime/globals_extension.hpp +++ b/src/hotspot/share/runtime/globals_extension.hpp @@ -36,6 +36,9 @@ #ifdef COMPILER2 #include "opto/c2_globals.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jbolt_globals.hpp" +#endif // Construct enum of Flag_ constants. @@ -62,6 +65,15 @@ #define JVMCI_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define JVMCI_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define JBOLT_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define JBOLT_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), +#define JBOLT_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define JBOLT_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), +#define JBOLT_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define JBOLT_PD_DIAGNOSTIC_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), +#define JBOLT_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define JBOLT_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), + #ifdef _LP64 #define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #else @@ -130,6 +142,18 @@ typedef enum { IGNORE_CONSTRAINT, \ IGNORE_WRITEABLE) #endif // INCLUDE_JVMCI +#if INCLUDE_JBOLT + JBOLT_FLAGS(JBOLT_DEVELOP_FLAG_MEMBER, \ + JBOLT_PD_DEVELOP_FLAG_MEMBER, \ + JBOLT_PRODUCT_FLAG_MEMBER, \ + JBOLT_PD_PRODUCT_FLAG_MEMBER, \ + JBOLT_DIAGNOSTIC_FLAG_MEMBER, \ + JBOLT_PD_DIAGNOSTIC_FLAG_MEMBER, \ + JBOLT_EXPERIMENTAL_FLAG_MEMBER, \ + JBOLT_NOTPRODUCT_FLAG_MEMBER, \ + IGNORE_RANGE, \ + IGNORE_CONSTRAINT) +#endif #ifdef COMPILER1 C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, \ C1_PD_DEVELOP_FLAG_MEMBER, \ @@ -191,6 +215,15 @@ typedef enum { #define JVMCI_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define JVMCI_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_PD_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define JBOLT_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), + #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), @@ -259,6 +292,18 @@ typedef enum { IGNORE_CONSTRAINT, IGNORE_WRITEABLE) #endif // INCLUDE_JVMCI +#if INCLUDE_JBOLT + JBOLT_FLAGS(JBOLT_DEVELOP_FLAG_MEMBER_WITH_TYPE, + JBOLT_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + JBOLT_PRODUCT_FLAG_MEMBER_WITH_TYPE, + JBOLT_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + JBOLT_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + JBOLT_PD_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + JBOLT_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE, + JBOLT_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, + IGNORE_RANGE, + IGNORE_CONSTRAINT) +#endif // INCLUDE_JBOLT #ifdef COMPILER1 C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 84123b29e..2f61f3055 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -88,6 +88,9 @@ #if INCLUDE_JFR #include "jfr/jfr.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif GrowableArray* collected_profiled_methods; @@ -540,6 +543,12 @@ void before_exit(JavaThread* thread) { // Note: we don't wait until it actually dies. os::terminate_signal_thread(); +#if INCLUDE_JBOLT + if (UseJBolt && JBoltDumpMode) { + JBoltManager::dump_order_in_manual(); + } +#endif + print_statistics(); Universe::heap()->print_tracing_info(); diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index f0747079b..cfb14dd37 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -295,8 +295,8 @@ void mutex_init() { // lower the JNI oopstorage lock ranks to make them super-special. def(JNIGlobalAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never); def(JNIGlobalActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never); - def(JNIWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never); - def(JNIWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never); + def(JNIWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never); + def(JNIWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never); def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 721b3e072..99112cb34 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -181,7 +181,7 @@ void print_owned_locks_on_error(outputStream* st); char *lock_name(Mutex *mutex); class MutexLocker: StackObj { - private: + protected: Monitor * _mutex; public: MutexLocker(Monitor * mutex) { @@ -205,6 +205,38 @@ class MutexLocker: StackObj { }; +class MonitorLocker: public MutexLocker { + protected: + Monitor* as_monitor() const { + return static_cast(_mutex); + } + + public: + MonitorLocker(Monitor* monitor) : + MutexLocker(monitor) { + // Superclass constructor did locking + assert(monitor != NULL, "NULL monitor not allowed"); + } + + MonitorLocker(Monitor* monitor, Thread* thread) : + MutexLocker(monitor, thread) { + // Superclass constructor did locking + assert(monitor != NULL, "NULL monitor not allowed"); + } + + bool wait(long timeout = 0) { + return as_monitor()->wait(!Monitor::_no_safepoint_check_flag, timeout, !Monitor::_as_suspend_equivalent_flag); + } + + void notify_all() { + as_monitor()->notify_all(); + } + + void notify() { + as_monitor()->notify(); + } +}; + // for debugging: check that we're already owning this lock (or are at a safepoint) #ifdef ASSERT void assert_locked_or_safepoint(const Monitor * lock); diff --git a/src/hotspot/share/runtime/sweeper.cpp b/src/hotspot/share/runtime/sweeper.cpp index e92682b6e..82f25c50b 100644 --- a/src/hotspot/share/runtime/sweeper.cpp +++ b/src/hotspot/share/runtime/sweeper.cpp @@ -46,6 +46,9 @@ #include "runtime/vmThread.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" +#if INCLUDE_JBOLT +#include "jbolt/jBoltManager.hpp" +#endif #ifdef ASSERT @@ -375,7 +378,7 @@ void NMethodSweeper::possibly_sweep() { // allocations go to the non-profiled heap and we must be make sure that there is // enough space. double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; - if (free_percent <= StartAggressiveSweepingAt) { + if (free_percent <= StartAggressiveSweepingAt || (UseJBolt && JBoltManager::force_sweep())) { do_stack_scanning(); } diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index d843651a4..be2ecb437 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -138,6 +138,10 @@ #if INCLUDE_JFR #include "jfr/jfr.hpp" #endif +#if INCLUDE_JBOLT +#include "jbolt/jBoltDcmds.hpp" +#include "jbolt/jBoltManager.hpp" +#endif // INCLUDE_JBOLT // Initialization after module runtime initialization void universe_post_module_init(); // must happen after call_initPhase2 @@ -3844,6 +3848,14 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // Initialize Java-Level synchronization subsystem ObjectMonitor::Initialize(); +#if INCLUDE_JBOLT + if (UseJBolt) { + JBoltManager::init_phase1(); + } else { + JBoltManager::check_arguments_not_set(); + } +#endif // INCLUDE_JBOLT + // Initialize global modules jint status = init_globals(); if (status != JNI_OK) { @@ -4089,6 +4101,13 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { ShouldNotReachHere(); } +#if INCLUDE_JBOLT + register_jbolt_dcmds(); + if (UseJBolt) { + JBoltManager::init_phase2(CATCH); + } +#endif // INCLUDE_JBOLT + return JNI_OK; } diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index 7f2873457..dee43669c 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -30,6 +30,9 @@ #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" +#if INCLUDE_JBOLT +#include "utilities/sizes.hpp" +#endif // INCLUDE_JBOLT // A growable array. @@ -211,6 +214,10 @@ template class GrowableArray : public GenericGrowableArray { // Does nothing for resource and arena objects ~GrowableArray() { if (on_C_heap()) clear_and_deallocate(); } +#if INCLUDE_JBOLT + static ByteSize data_offset() { return byte_offset_of(GrowableArray, _data); } +#endif + void clear() { _len = 0; } int length() const { return _len; } int max_length() const { return _max; } diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp index 6605ab367..6dd187652 100644 --- a/src/hotspot/share/utilities/macros.hpp +++ b/src/hotspot/share/utilities/macros.hpp @@ -119,6 +119,18 @@ #define NOT_CDS_RETURN_(code) { return code; } #endif // INCLUDE_CDS +#ifndef INCLUDE_JBOLT +#define INCLUDE_JBOLT 1 +#endif + +#if INCLUDE_JBOLT +#define JBOLT_ONLY(x) x +#define NOT_JBOLT(x) +#else +#define JBOLT_ONLY(x) +#define NOT_JBOLT(x) x +#endif // INCLUDE_JBOLT + #ifndef INCLUDE_MANAGEMENT #define INCLUDE_MANAGEMENT 1 #endif // INCLUDE_MANAGEMENT diff --git a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java index 1c28a1fea..b5252cf63 100644 --- a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java +++ b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java @@ -73,7 +73,7 @@ public class CodeCacheCLITestCase { * Verifies that with disabled SegmentedCodeCache PrintCodeCache output * contains only CodeCache's entry. */ - NON_SEGMENTED(options -> !options.segmented, EnumSet.of(BlobType.All), + NON_SEGMENTED(options -> !options.segmented, EnumSet.copyOf(CodeCacheOptions.NON_SEGMENTED_HEAPS), CommandLineOptionTest.prepareBooleanFlag(SEGMENTED_CODE_CACHE, false)), /** @@ -82,7 +82,7 @@ public class CodeCacheCLITestCase { * profiled-nmethods heap and non-segmented CodeCache. */ NON_TIERED(ONLY_SEGMENTED, - EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled), + EnumSet.copyOf(CodeCacheOptions.SEGMENTED_HEAPS_WO_PROFILED), CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION, false)), /** @@ -91,7 +91,7 @@ public class CodeCacheCLITestCase { * heaps only. */ TIERED_LEVEL_0(SEGMENTED_SERVER, - EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled), + EnumSet.copyOf(CodeCacheOptions.SEGMENTED_HEAPS_WO_PROFILED), CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION, true), CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 0)), @@ -101,7 +101,7 @@ public class CodeCacheCLITestCase { * heaps only. */ TIERED_LEVEL_1(SEGMENTED_SERVER, - EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled), + EnumSet.copyOf(CodeCacheOptions.SEGMENTED_HEAPS_WO_PROFILED), CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION, true), CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 1)), @@ -110,7 +110,7 @@ public class CodeCacheCLITestCase { * contain information about all three code heaps. */ TIERED_LEVEL_4(SEGMENTED_SERVER, - EnumSet.complementOf(EnumSet.of(BlobType.All)), + EnumSet.copyOf(CodeCacheOptions.ALL_SEGMENTED_HEAPS), CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION, true), CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 4)); diff --git a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java index d5e2f16c8..868d2796d 100644 --- a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java +++ b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java @@ -33,20 +33,27 @@ import java.util.List; public class CodeCacheOptions { public static final String SEGMENTED_CODE_CACHE = "SegmentedCodeCache"; - private static final EnumSet NON_SEGMENTED_HEAPS + public static final EnumSet NON_SEGMENTED_HEAPS = EnumSet.of(BlobType.All); - private static final EnumSet ALL_SEGMENTED_HEAPS - = EnumSet.complementOf(NON_SEGMENTED_HEAPS); - private static final EnumSet SEGMENTED_HEAPS_WO_PROFILED + public static final EnumSet JBOLT_HEAPS + = EnumSet.of(BlobType.MethodJBoltHot, BlobType.MethodJBoltTmp); + public static final EnumSet ALL_SEGMENTED_HEAPS + = EnumSet.complementOf(union(NON_SEGMENTED_HEAPS, JBOLT_HEAPS)); + public static final EnumSet ALL_SEGMENTED_HEAPS_WITH_JBOLT + = union(ALL_SEGMENTED_HEAPS, JBOLT_HEAPS); + public static final EnumSet SEGMENTED_HEAPS_WO_PROFILED = EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled); - private static final EnumSet ONLY_NON_METHODS_HEAP + public static final EnumSet ONLY_NON_METHODS_HEAP = EnumSet.of(BlobType.NonNMethod); public final long reserved; public final long nonNmethods; public final long nonProfiled; public final long profiled; + public final long jboltHot; + public final long jboltTmp; public final boolean segmented; + public final boolean useJBolt; public static long mB(long val) { return CodeCacheOptions.kB(val) * 1024L; @@ -56,12 +63,21 @@ public class CodeCacheOptions { return val * 1024L; } + public static > EnumSet union(EnumSet e1, EnumSet e2) { + EnumSet res = EnumSet.copyOf(e1); + res.addAll(e2); + return res; + } + public CodeCacheOptions(long reserved) { this.reserved = reserved; this.nonNmethods = 0; this.nonProfiled = 0; this.profiled = 0; + this.jboltHot = 0; + this.jboltTmp = 0; this.segmented = false; + this.useJBolt = false; } public CodeCacheOptions(long reserved, long nonNmethods, long nonProfiled, @@ -70,7 +86,25 @@ public class CodeCacheOptions { this.nonNmethods = nonNmethods; this.nonProfiled = nonProfiled; this.profiled = profiled; + this.jboltHot = 0; + this.jboltTmp = 0; + this.segmented = true; + this.useJBolt = false; + } + + /** + * No tests for JBolt yet as the related VM options are experimental now. + */ + public CodeCacheOptions(long reserved, long nonNmethods, long nonProfiled, + long profiled, long jboltHot, long jboltTmp) { + this.reserved = reserved; + this.nonNmethods = nonNmethods; + this.nonProfiled = nonProfiled; + this.profiled = profiled; + this.jboltHot = jboltHot; + this.jboltTmp = jboltTmp; this.segmented = true; + this.useJBolt = true; } public long sizeForHeap(BlobType heap) { @@ -83,6 +117,10 @@ public class CodeCacheOptions { return this.nonProfiled; case MethodProfiled: return this.profiled; + case MethodJBoltHot: + return this.jboltHot; + case MethodJBoltTmp: + return this.jboltTmp; default: throw new Error("Unknown heap: " + heap.name()); } @@ -107,14 +145,26 @@ public class CodeCacheOptions { CommandLineOptionTest.prepareNumericFlag( BlobType.MethodProfiled.sizeOptionName, profiled)); } + + if (useJBolt) { + Collections.addAll(options, + CommandLineOptionTest.prepareNumericFlag( + BlobType.MethodJBoltHot.sizeOptionName, jboltHot), + CommandLineOptionTest.prepareNumericFlag( + BlobType.MethodJBoltTmp.sizeOptionName, jboltTmp)); + } + return options.toArray(new String[options.size()]); } public CodeCacheOptions mapOptions(EnumSet involvedCodeHeaps) { if (involvedCodeHeaps.isEmpty() || involvedCodeHeaps.equals(NON_SEGMENTED_HEAPS) - || involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS)) { + || involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS_WITH_JBOLT)) { return this; + } else if (involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS)) { + return new CodeCacheOptions(reserved, nonNmethods, + nonProfiled + jboltHot + jboltTmp, profiled); } else if (involvedCodeHeaps.equals(SEGMENTED_HEAPS_WO_PROFILED)) { return new CodeCacheOptions(reserved, nonNmethods, profiled + nonProfiled, 0L); diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java new file mode 100644 index 000000000..f85c86542 --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test JBolt dump mode functions. + * @library /test/lib + * @requires vm.flagless + * + * @run driver compiler.codecache.jbolt.JBoltDumpModeTest + */ + +package compiler.codecache.jbolt; + +import java.io.File; +import java.io.IOException; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.Utils; + +public class JBoltDumpModeTest { + public static final String SRC_DIR = Utils.TEST_SRC; + public static final String ORDER_FILE = SRC_DIR + "/order.log"; + + private static void createOrderFile() { + try { + File order = new File(ORDER_FILE); + if (!order.exists()) { + order.createNewFile(); + } + } + catch (IOException e) { + e.printStackTrace(); + } + } + + private static void clearOrderFile() { + File order = new File(ORDER_FILE); + if (order.exists()) { + order.delete(); + } + } + + private static void OrderFileShouldExist() throws Exception { + File order = new File(ORDER_FILE); + if (order.exists()) { + order.delete(); + } + else { + throw new RuntimeException(ORDER_FILE + " doesn't exist as expect."); + } + } + + private static void OrderFileShouldNotExist() throws Exception { + File order = new File(ORDER_FILE); + if (order.exists()) { + throw new RuntimeException(ORDER_FILE + " exists while expect not."); + } + } + + private static void testNormalUse() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + ORDER_FILE, + "-XX:+JBoltDumpMode", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + ORDER_FILE, + "-XX:+JBoltDumpMode", + "-XX:StartFlightRecording", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + ORDER_FILE, + "-XX:+JBoltDumpMode", + "-Xlog:jbolt*=trace", + "--version" + ); + + clearOrderFile(); + + String stdout; + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + stdout = out1.getStdout(); + if (!stdout.contains("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\".")) { + throw new RuntimeException(stdout); + } + out1.shouldHaveExitValue(0); + OrderFileShouldExist(); + + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + stdout = out2.getStdout(); + if (!stdout.contains("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\".")) { + throw new RuntimeException(stdout); + } + out2.shouldHaveExitValue(0); + OrderFileShouldExist(); + + createOrderFile(); + OutputAnalyzer out3 = new OutputAnalyzer(pb3.start()); + stdout = out3.getStdout(); + if (!stdout.contains("JBoltOrderFile to dump already exists and will be overwritten:")) { + throw new RuntimeException(stdout); + } + out3.shouldHaveExitValue(0); + OrderFileShouldExist(); + } + + private static void testUnabletoRun() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + ORDER_FILE, + "-XX:+JBoltDumpMode", + "-XX:-FlightRecorder", + "-Xlog:jbolt*=trace", + "--version" + ); + + clearOrderFile(); + + String stdout; + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + + stdout = out.getStdout(); + if (!stdout.contains("JBolt depends on JFR!")) { + throw new RuntimeException(stdout); + } + OrderFileShouldNotExist(); + } + + private static void testFatalError() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + ORDER_FILE, + "-XX:+JBoltDumpMode", + "-XX:foo", + "-Xlog:jbolt*=trace", + "--version" + ); + + clearOrderFile(); + + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + + out.stderrShouldContain("Could not create the Java Virtual Machine"); + OrderFileShouldNotExist(); + } + + public static void main(String[] args) throws Exception { + testNormalUse(); + testUnabletoRun(); + testFatalError(); + } +} \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltReschedulingTest.java b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltReschedulingTest.java new file mode 100644 index 000000000..549ae3122 --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltReschedulingTest.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test JBolt timing rescheduling functions. + * @library /test/lib + * @requires vm.flagless + * + * @run driver compiler.codecache.jbolt.JBoltReschedulingTest + */ + +package compiler.codecache.jbolt; + +import java.io.File; +import java.io.IOException; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.Utils; + +public class JBoltReschedulingTest { + public static final int LONG_LENGTH = 1025; + + private static void testNormalUse() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=07:30,14:30,21:30", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=00:30,01:30,02:30,03:30,03:30,04:30,05:30,06:30,07:30,08:30,09:30,10:30", + "-Xlog:jbolt*=trace", + "--version" + ); + + String stdout; + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + stdout = out1.getStdout(); + if (!stdout.contains("Set time trigger at 07:30") && !stdout.contains("Set time trigger at 14:30") && !stdout.contains("Set time trigger at 21:30")) { + throw new RuntimeException(stdout); + } + out1.shouldHaveExitValue(0); + + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + stdout = out2.getStdout(); + // 03:30 is duplicate and 10:30 above max time length(10) + if (!stdout.contains("Set time trigger at 09:30") || stdout.contains("Set time trigger at 10:30")) { + throw new RuntimeException(stdout); + } + out2.shouldHaveExitValue(0); + } + + private static void testErrorCases() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:JBoltRescheduling=07:30,14:30,21:30", + "-Xlog:jbolt*=trace", + "--version" + ); + + StringBuilder sb = new StringBuilder(LONG_LENGTH); + for (int i = 0; i < LONG_LENGTH; ++i) { + sb.append('a'); + } + String long_str = sb.toString(); + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=" + long_str, + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=12:303", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb4 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=1:30", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb5 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=12.30", + "-Xlog:jbolt*=trace", + "--version" + ); + + ProcessBuilder pb6 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltRescheduling=24:61", + "-Xlog:jbolt*=trace", + "--version" + ); + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + + out1.stdoutShouldContain("Do not set VM option JBoltRescheduling without UseJBolt enabled."); + out1.shouldHaveExitValue(1); + + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + + out2.stdoutShouldContain("JBoltRescheduling is too long"); + out2.shouldHaveExitValue(1); + + OutputAnalyzer out3 = new OutputAnalyzer(pb3.start()); + + out3.stdoutShouldContain("Invalid time 12:303 in JBoltRescheduling"); + out3.shouldHaveExitValue(1); + + OutputAnalyzer out4 = new OutputAnalyzer(pb4.start()); + + out4.stdoutShouldContain("Invalid time 1:30 in JBoltRescheduling"); + out4.shouldHaveExitValue(1); + + OutputAnalyzer out5 = new OutputAnalyzer(pb5.start()); + + out5.stdoutShouldContain("Invalid time 12.30 in JBoltRescheduling"); + out5.shouldHaveExitValue(1); + + OutputAnalyzer out6 = new OutputAnalyzer(pb6.start()); + + out6.stdoutShouldContain("Invalid time 24:61 in JBoltRescheduling"); + out6.shouldHaveExitValue(1); + } + + public static void main(String[] args) throws Exception { + testNormalUse(); + testErrorCases(); + } +} \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java new file mode 100644 index 000000000..d8fddf9bf --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test JBolt VM options. + * @library /test/lib + * @requires vm.flagless + * + * @run driver compiler.codecache.jbolt.JBoltVMOptionsTest + */ + +package compiler.codecache.jbolt; + +import java.io.File; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.Utils; + +public class JBoltVMOptionsTest { + public static final String SRC_DIR = Utils.TEST_SRC; + public static final String TEMP_FILE = SRC_DIR + "/tmp.log"; + + public static void main(String[] args) throws Exception { + test1(); + test2(); + test3(); + test4(); + } + + private static void clearTmpFile() { + File tmp = new File(TEMP_FILE); + if (tmp.exists()) { + tmp.delete(); + } + } + + private static void test1() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltDumpMode", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:+JBoltDumpMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb4 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:JBoltOrderFile=" + TEMP_FILE, + "-Xlog:jbolt*=trace", + "--version" + ); + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + OutputAnalyzer out3 = new OutputAnalyzer(pb3.start()); + OutputAnalyzer out4 = new OutputAnalyzer(pb4.start()); + + String stdout; + + stdout = out1.getStdout(); + if (!stdout.contains("JBoltOrderFile is not set!")) { + throw new RuntimeException(stdout); + } + + stdout = out2.getStdout(); + if (!stdout.contains("JBoltOrderFile is not set!")) { + throw new RuntimeException(stdout); + } + + stdout = out3.getStdout(); + if (!stdout.contains("Do not set both JBoltDumpMode and JBoltLoadMode!")) { + throw new RuntimeException(stdout); + } + + stdout = out4.getStdout(); + if (!stdout.contains("JBoltOrderFile is ignored because it is in auto mode.")) { + throw new RuntimeException(stdout); + } + } + + private static void test2() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+PrintFlagsFinal", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltDumpMode", + "-XX:JBoltOrderFile=" + TEMP_FILE, + "-XX:+PrintFlagsFinal", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log", + "-XX:+PrintFlagsFinal", + "-Xlog:jbolt*=trace", + "--version" + ); + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + OutputAnalyzer out3 = new OutputAnalyzer(pb3.start()); + + String stdout; + + stdout = out1.getStdout().replaceAll(" +", ""); + if (!stdout.contains("JBoltDumpMode=false") || !stdout.contains("JBoltLoadMode=false")) { + throw new RuntimeException(stdout); + } + + stdout = out2.getStdout().replaceAll(" +", ""); + if (!stdout.contains("JBoltDumpMode=true") || !stdout.contains("JBoltLoadMode=false")) { + throw new RuntimeException(stdout); + } + + clearTmpFile(); + + stdout = out3.getStdout().replaceAll(" +", ""); + if (!stdout.contains("JBoltDumpMode=false") || !stdout.contains("JBoltLoadMode=true")) { + throw new RuntimeException(stdout); + } + } + + private static void test3() throws Exception { + ProcessBuilder pbF0 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + TEMP_FILE, + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pbF1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pbF2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o2.log", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pbF3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o3.log", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pbF4 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseJBolt", + "-XX:+JBoltLoadMode", + "-XX:JBoltOrderFile=" + SRC_DIR + "/o4.log", + "-Xlog:jbolt*=trace", + "--version" + ); + + OutputAnalyzer outF0 = new OutputAnalyzer(pbF0.start()); + OutputAnalyzer outF1 = new OutputAnalyzer(pbF1.start()); + OutputAnalyzer outF2 = new OutputAnalyzer(pbF2.start()); + OutputAnalyzer outF3 = new OutputAnalyzer(pbF3.start()); + OutputAnalyzer outF4 = new OutputAnalyzer(pbF4.start()); + + String stdout; + + stdout = outF0.getStdout(); + if (!stdout.contains("JBoltOrderFile does not exist or cannot be accessed!")) { + throw new RuntimeException(stdout); + } + + stdout = outF1.getStdout(); + if (!stdout.contains("Wrong format of JBolt order line! line=\"X 123 aa bb cc\".")) { + throw new RuntimeException(stdout); + } + + stdout = outF2.getStdout(); + if (!stdout.contains("Wrong format of JBolt order line! line=\"M aa/bb/C dd ()V\".")) { + throw new RuntimeException(stdout); + } + + stdout = outF3.getStdout(); + if (!stdout.contains("Duplicated method: {aa/bb/CC dd ()V}!")) { + throw new RuntimeException(stdout); + } + + stdout = outF4.getStdout(); + if (stdout.contains("Error occurred during initialization of VM")) { + throw new RuntimeException(stdout); + } + outF4.shouldHaveExitValue(0); + + clearTmpFile(); + } + + private static void test4() throws Exception { + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+JBoltDumpMode", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:+JBoltLoadMode", + "-Xlog:jbolt*=trace", + "--version" + ); + ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:JBoltOrderFile=" + TEMP_FILE, + "-Xlog:jbolt*=trace", + "--version" + ); + + OutputAnalyzer out1 = new OutputAnalyzer(pb1.start()); + OutputAnalyzer out2 = new OutputAnalyzer(pb2.start()); + OutputAnalyzer out3 = new OutputAnalyzer(pb3.start()); + + String stdout; + + stdout = out1.getStdout(); + if (!stdout.contains("Do not set VM option JBoltDumpMode without UseJBolt enabled.")) { + throw new RuntimeException(stdout); + } + + stdout = out2.getStdout(); + if (!stdout.contains("Do not set VM option JBoltLoadMode without UseJBolt enabled.")) { + throw new RuntimeException(stdout); + } + + stdout = out3.getStdout(); + if (!stdout.contains("Do not set VM option JBoltOrderFile without UseJBolt enabled.")) { + throw new RuntimeException(stdout); + } + + clearTmpFile(); + } +} \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log new file mode 100644 index 000000000..9d7e8fb8e --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log @@ -0,0 +1,2 @@ +M 123 aa/bb/C dd ()V +X 123 aa bb cc \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log new file mode 100644 index 000000000..ef348a6ab --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log @@ -0,0 +1,2 @@ +M aa/bb/C dd ()V +M 123 aa/bb/CC dd ()V \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log new file mode 100644 index 000000000..fe6906b47 --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log @@ -0,0 +1,4 @@ +# this is a comment +C +M 123 aa/bb/CC dd ()V +M 123 aa/bb/CC dd ()V \ No newline at end of file diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log new file mode 100644 index 000000000..13e96dbab --- /dev/null +++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log @@ -0,0 +1,12 @@ +M 123 aa/bb/CC dd ()V +# asdfadsfadfs +C +M 456 aa/bb/CC ddd ()V +M 456 aa/bb/CCC dd ()V + +C + + + + +M 456 aa/bb/CCCCCC ddddddd ()V diff --git a/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java b/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java index 499297ad2..fd37b73d4 100644 --- a/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java +++ b/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java @@ -94,7 +94,7 @@ public class MemoryTest { expectedMaxNumPools[HEAP] = expectedNumPools; expectedMinNumPools[NONHEAP] = 2; - expectedMaxNumPools[NONHEAP] = 5; + expectedMaxNumPools[NONHEAP] = 7; checkMemoryPools(); checkMemoryManagers(); diff --git a/test/lib/sun/hotspot/code/BlobType.java b/test/lib/sun/hotspot/code/BlobType.java index 4b5a1e11d..4a7c87334 100644 --- a/test/lib/sun/hotspot/code/BlobType.java +++ b/test/lib/sun/hotspot/code/BlobType.java @@ -46,8 +46,24 @@ public enum BlobType { || type == BlobType.MethodNonProfiled; } }, + // Execution hot non-profiled nmethods + MethodJBoltHot(2, "CodeHeap 'jbolt hot nmethods'", "JBoltCodeHeapSize") { + @Override + public boolean allowTypeWhenOverflow(BlobType type) { + return super.allowTypeWhenOverflow(type) + || type == BlobType.MethodNonProfiled; + } + }, + // Execution tmp non-profiled nmethods + MethodJBoltTmp(3, "CodeHeap 'jbolt tmp nmethods'", "JBoltCodeHeapSize") { + @Override + public boolean allowTypeWhenOverflow(BlobType type) { + return super.allowTypeWhenOverflow(type) + || type == BlobType.MethodNonProfiled; + } + }, // Non-nmethods like Buffers, Adapters and Runtime Stubs - NonNMethod(2, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") { + NonNMethod(4, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") { @Override public boolean allowTypeWhenOverflow(BlobType type) { return super.allowTypeWhenOverflow(type) @@ -56,7 +72,7 @@ public enum BlobType { } }, // All types (No code cache segmentation) - All(3, "CodeCache", "ReservedCodeCacheSize"); + All(5, "CodeCache", "ReservedCodeCacheSize"); public final int id; public final String sizeOptionName; @@ -99,6 +115,10 @@ public enum BlobType { // there is no MethodProfiled in non tiered world or pure C1 result.remove(MethodProfiled); } + if (!whiteBox.getBooleanVMFlag("UseJBolt") || whiteBox.getBooleanVMFlag("JBoltDumpMode")) { + result.remove(MethodJBoltHot); + result.remove(MethodJBoltTmp); + } return result; } -- 2.47.1