Skip to content

Commit

Permalink
Merge pull request #1891 from SAP/pr-jdk-25+4
Browse files Browse the repository at this point in the history
Merge to tag jdk-25+4
  • Loading branch information
RealCLanger authored Jan 3, 2025
2 parents a90f7ca + a87bc7e commit 05e57bb
Show file tree
Hide file tree
Showing 106 changed files with 6,034 additions and 783 deletions.
12 changes: 6 additions & 6 deletions src/hotspot/cpu/s390/vm_version_s390.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,12 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
}

// The OptoScheduling information is not maintained in s390.ad.
if (OptoScheduling) {
warning("OptoScheduling is not supported on this CPU.");
FLAG_SET_DEFAULT(OptoScheduling, false);
}
#endif
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
Expand All @@ -323,12 +329,6 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
}

// The OptoScheduling information is not maintained in s390.ad.
if (OptoScheduling) {
warning("OptoScheduling is not supported on this CPU.");
FLAG_SET_DEFAULT(OptoScheduling, false);
}
}


Expand Down
34 changes: 26 additions & 8 deletions src/hotspot/os/linux/os_linux.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3331,6 +3331,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_set_bind_policy")));
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
set_numa_bitmask_equal(CAST_TO_FN_PTR(numa_bitmask_equal_func_t,
libnuma_dlsym(handle, "numa_bitmask_equal")));
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
libnuma_dlsym(handle, "numa_distance")));
set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
Expand All @@ -3341,13 +3343,16 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_move_pages")));
set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t,
libnuma_dlsym(handle, "numa_set_preferred")));
set_numa_get_run_node_mask(CAST_TO_FN_PTR(numa_get_run_node_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_run_node_mask")));

if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
set_numa_interleave_bitmask(_numa_get_interleave_mask());
set_numa_membind_bitmask(_numa_get_membind());
set_numa_cpunodebind_bitmask(_numa_get_run_node_mask());
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (mtInternal) GrowableArray<int>(0, mtInternal);
rebuild_nindex_to_node_map();
Expand Down Expand Up @@ -3524,9 +3529,11 @@ os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
os::Linux::numa_bitmask_equal_func_t os::Linux::_numa_bitmask_equal;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
os::Linux::numa_get_run_node_mask_func_t os::Linux::_numa_get_run_node_mask;
os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
Expand All @@ -3535,6 +3542,7 @@ struct bitmask* os::Linux::_numa_all_nodes_ptr;
struct bitmask* os::Linux::_numa_nodes_ptr;
struct bitmask* os::Linux::_numa_interleave_bitmask;
struct bitmask* os::Linux::_numa_membind_bitmask;
struct bitmask* os::Linux::_numa_cpunodebind_bitmask;

bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
Expand Down Expand Up @@ -4559,19 +4567,19 @@ void os::Linux::numa_init() {
// bitmask when externally configured to run on all or fewer nodes.

if (!Linux::libnuma_init()) {
FLAG_SET_ERGO(UseNUMA, false);
FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.
disable_numa("Failed to initialize libnuma");
} else {
if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
// If there's only one node (they start from 0) or if the process
// is bound explicitly to a single node using membind, disable NUMA
UseNUMA = false;
Linux::set_configured_numa_policy(Linux::identify_numa_policy());
if (Linux::numa_max_node() < 1) {
disable_numa("Only a single NUMA node is available");
} else if (Linux::is_bound_to_single_mem_node()) {
disable_numa("The process is bound to a single NUMA node");
} else if (Linux::mem_and_cpu_node_mismatch()) {
disable_numa("The process memory and cpu node configuration does not match");
} else {
LogTarget(Info,os) log;
LogStream ls(log);

Linux::set_configured_numa_policy(Linux::identify_numa_policy());

struct bitmask* bmp = Linux::_numa_membind_bitmask;
const char* numa_mode = "membind";

Expand Down Expand Up @@ -4609,6 +4617,16 @@ void os::Linux::numa_init() {
}
}

void os::Linux::disable_numa(const char* reason) {
if ((UseNUMA && FLAG_IS_CMDLINE(UseNUMA)) ||
(UseNUMAInterleaving && FLAG_IS_CMDLINE(UseNUMAInterleaving))) {
// Only issue a warning if the user explicitly asked for NUMA support
log_warning(os)("NUMA support disabled: %s", reason);
}
FLAG_SET_ERGO(UseNUMA, false);
FLAG_SET_ERGO(UseNUMAInterleaving, false);
}

#if defined(IA32) && !defined(ZERO)
/*
* Work-around (execute code at a high address) for broken NX emulation using CS limit,
Expand Down
37 changes: 32 additions & 5 deletions src/hotspot/os/linux/os_linux.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ class os::Linux {
private:
static void numa_init();

static void disable_numa(const char* reason);
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
typedef int (*numa_node_to_cpus_v2_func_t)(int node, void *mask);
Expand All @@ -206,10 +207,12 @@ class os::Linux {
typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
typedef struct bitmask* (*numa_get_membind_func_t)(void);
typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
typedef struct bitmask* (*numa_get_run_node_mask_func_t)(void);
typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
typedef void (*numa_set_preferred_func_t)(int node);
typedef void (*numa_set_bind_policy_func_t)(int policy);
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
typedef int (*numa_bitmask_equal_func_t)(struct bitmask *bmp1, struct bitmask *bmp2);
typedef int (*numa_distance_func_t)(int node1, int node2);

static sched_getcpu_func_t _sched_getcpu;
Expand All @@ -223,8 +226,10 @@ class os::Linux {
static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
static numa_bitmask_equal_func_t _numa_bitmask_equal;
static numa_distance_func_t _numa_distance;
static numa_get_membind_func_t _numa_get_membind;
static numa_get_run_node_mask_func_t _numa_get_run_node_mask;
static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
static numa_move_pages_func_t _numa_move_pages;
static numa_set_preferred_func_t _numa_set_preferred;
Expand All @@ -233,6 +238,7 @@ class os::Linux {
static struct bitmask* _numa_nodes_ptr;
static struct bitmask* _numa_interleave_bitmask;
static struct bitmask* _numa_membind_bitmask;
static struct bitmask* _numa_cpunodebind_bitmask;

static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
Expand All @@ -245,8 +251,10 @@ class os::Linux {
static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
static void set_numa_bitmask_equal(numa_bitmask_equal_func_t func) { _numa_bitmask_equal = func; }
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
static void set_numa_get_run_node_mask(numa_get_run_node_mask_func_t func) { _numa_get_run_node_mask = func; }
static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
static void set_numa_set_preferred(numa_set_preferred_func_t func) { _numa_set_preferred = func; }
Expand All @@ -255,6 +263,7 @@ class os::Linux {
static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); }
static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; }
static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; }
static void set_numa_cpunodebind_bitmask(struct bitmask* ptr) { _numa_cpunodebind_bitmask = ptr ; }
static int sched_getcpu_syscall(void);

enum NumaAllocationPolicy{
Expand Down Expand Up @@ -360,21 +369,26 @@ class os::Linux {
}
return false;
}
// Check if bound to only one numa node.
// Returns true if bound to a single numa node, otherwise returns false.
static bool is_bound_to_single_node() {
// Check if memory is bound to only one numa node.
// Returns true if memory is bound to a single numa node, otherwise returns false.
static bool is_bound_to_single_mem_node() {
int nodes = 0;
unsigned int node = 0;
unsigned int highest_node_number = 0;

if (_numa_membind_bitmask != nullptr && _numa_max_node != nullptr && _numa_bitmask_isbitset != nullptr) {
struct bitmask* mem_nodes_bitmask = Linux::_numa_membind_bitmask;
if (Linux::is_running_in_interleave_mode()) {
mem_nodes_bitmask = Linux::_numa_interleave_bitmask;
}

if (mem_nodes_bitmask != nullptr && _numa_max_node != nullptr && _numa_bitmask_isbitset != nullptr) {
highest_node_number = _numa_max_node();
} else {
return false;
}

for (node = 0; node <= highest_node_number; node++) {
if (_numa_bitmask_isbitset(_numa_membind_bitmask, node)) {
if (_numa_bitmask_isbitset(mem_nodes_bitmask, node)) {
nodes++;
}
}
Expand All @@ -385,6 +399,19 @@ class os::Linux {
return false;
}
}
// Check if cpu and memory nodes are aligned, returns true if nodes misalign
static bool mem_and_cpu_node_mismatch() {
struct bitmask* mem_nodes_bitmask = Linux::_numa_membind_bitmask;
if (Linux::is_running_in_interleave_mode()) {
mem_nodes_bitmask = Linux::_numa_interleave_bitmask;
}

if (mem_nodes_bitmask == nullptr || Linux::_numa_cpunodebind_bitmask == nullptr) {
return false;
}

return !_numa_bitmask_equal(mem_nodes_bitmask, Linux::_numa_cpunodebind_bitmask);
}

static const GrowableArray<int>* numa_nindex_to_node() {
return _nindex_to_node;
Expand Down
8 changes: 8 additions & 0 deletions src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,14 @@ size_t G1HeapSizingPolicy::young_collection_expansion_amount() {
}

static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) {
assert(free_ratio <= 100, "precondition");
if (free_ratio == 100) {
// If 100 then below calculations will divide by zero and return min of
// resulting infinity and MaxHeapSize. Avoid issues of UB vs is_iec559
// and ubsan warnings, and just immediately return MaxHeapSize.
return MaxHeapSize;
}

const double desired_free_percentage = (double) free_ratio / 100.0;
const double desired_used_percentage = 1.0 - desired_free_percentage;

Expand Down
Loading

0 comments on commit 05e57bb

Please sign in to comment.