diff --git a/gc/base/AtomicOperations.hpp b/gc/base/AtomicOperations.hpp
index 01c1c9a8928..53bc788782e 100644
--- a/gc/base/AtomicOperations.hpp
+++ b/gc/base/AtomicOperations.hpp
@@ -171,9 +171,17 @@ class MM_AtomicOperations
* @return the value at memory location address BEFORE the store was attempted
*/
MMINLINE_DEBUG static uint64_t
- lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue)
+ lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue);
+ return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -219,9 +227,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static uint64_t
- addU64(volatile uint64_t *address, uint64_t addend)
+ addU64(volatile uint64_t *address, uint64_t addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::addU64(address, addend);
+ return VM_AtomicSupport::addU64(address, addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -235,9 +251,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static double
- addDouble(volatile double *address, double addend)
+ addDouble(volatile double *address, double addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::addDouble(address, addend);
+ return VM_AtomicSupport::addDouble(address, addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -283,9 +307,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static uint64_t
- subtractU64(volatile uint64_t *address, uint64_t value)
+ subtractU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::subtractU64(address, value);
+ return VM_AtomicSupport::subtractU64(address, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -313,9 +345,17 @@ class MM_AtomicOperations
* @note This method can spin indefinitely while attempting to write the new value.
*/
MMINLINE_DEBUG static void
- setU64(volatile uint64_t *address, uint64_t value)
+ setU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- VM_AtomicSupport::setU64(address, value);
+ VM_AtomicSupport::setU64(address, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -327,9 +367,17 @@ class MM_AtomicOperations
* @return the value stored at the address.
*/
MMINLINE_DEBUG static uint64_t
- getU64(volatile uint64_t *address)
+ getU64(volatile uint64_t *address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif
+ )
{
- return VM_AtomicSupport::getU64(address);
+ return VM_AtomicSupport::getU64(address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif
+ );
}
/**
diff --git a/gc/stats/ScavengerCopyScanRatio.hpp b/gc/stats/ScavengerCopyScanRatio.hpp
index 9b0628f0f9b..499795e99d1 100644
--- a/gc/stats/ScavengerCopyScanRatio.hpp
+++ b/gc/stats/ScavengerCopyScanRatio.hpp
@@ -115,6 +115,9 @@ class MM_ScavengerCopyScanRatio
volatile uint64_t _accumulatingSamples; /**< accumulator for aggregating per thread wait/copy/s`wait/copy/scanes-- these are periodically latched into _accumulatedSamples and reset */
volatile uint64_t _accumulatedSamples; /**< most recent aggregate wait/copy/scan counts from SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE samples */
volatile uintptr_t _majorUpdateThreadEnv; /**< a token for the thread that has claimed a major update and owns the critical region wherein the update is effected */
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ uint32_t _atomicLockWord;
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
uintptr_t _scalingUpdateCount; /**< the number of times _accumulatingSamples was latched into _accumulatedSamples */
uintptr_t _overflowCount; /**< the number of times _accumulatingSamples overflowed one or more counters */
uint64_t _resetTimestamp; /**< timestamp at reset() */
@@ -134,6 +137,9 @@ class MM_ScavengerCopyScanRatio
_accumulatingSamples(0)
,_accumulatedSamples(SCAVENGER_COUNTER_DEFAULT_ACCUMULATOR)
,_majorUpdateThreadEnv(0)
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ ,_atomicLockWord(0)
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
,_scalingUpdateCount(0)
,_overflowCount(0)
,_resetTimestamp(0)
@@ -152,7 +158,11 @@ class MM_ScavengerCopyScanRatio
MMINLINE double
getScalingFactor(MM_EnvironmentBase* env)
{
- uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples);
+ uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
return getScalingFactor(env, _threadCount, waits(accumulatedSamples), copied(accumulatedSamples), scanned(accumulatedSamples), updates(accumulatedSamples));
}
@@ -232,7 +242,11 @@ class MM_ScavengerCopyScanRatio
majorUpdate(MM_EnvironmentBase* env, uint64_t updateResult, uintptr_t nonEmptyScanLists, uintptr_t cachesQueued) {
if (0 == (SCAVENGER_COUNTER_OVERFLOW & updateResult)) {
/* no overflow so latch updateResult into _accumulatedSamples and record the update */
- MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult);
+ MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
_scalingUpdateCount += 1;
_threadCount = record(env, nonEmptyScanLists, cachesQueued);
} else {
@@ -363,11 +377,19 @@ class MM_ScavengerCopyScanRatio
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = &_accumulatingSamples;
uint64_t oldValue = *localAddr;
- if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate)) {
+ if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) {
newValue = oldValue + threadUpdate;
uint64_t updateCount = updates(newValue);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE <= updateCount) {
- MM_AtomicOperations::setU64(&_accumulatingSamples, 0);
+ MM_AtomicOperations::setU64(&_accumulatingSamples, 0
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE < updateCount) {
newValue = 0;
}
diff --git a/include_core/AtomicSupport.hpp b/include_core/AtomicSupport.hpp
index b15de456d49..f9571215f7d 100644
--- a/include_core/AtomicSupport.hpp
+++ b/include_core/AtomicSupport.hpp
@@ -352,8 +352,8 @@ class VM_AtomicSupport
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMRZTPF)
- cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
- return oldValue;
+ cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
+ return oldValue;
#elif defined(__GNUC__) /* defined(OMRZTPF) */
/* Assume GCC >= 4.2 */
return __sync_val_compare_and_swap(address, oldValue, newValue);
@@ -388,7 +388,12 @@ class VM_AtomicSupport
* @return the value at memory location address BEFORE the store was attempted
*/
VMINLINE static uint64_t
- lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue, bool readBeforeCAS = false)
+ lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue,
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ uint32_t &lock,
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ bool readBeforeCAS = false
+ )
{
#if defined(ATOMIC_SUPPORT_STUB)
return 0;
@@ -406,7 +411,22 @@ class VM_AtomicSupport
}
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
-#if defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* defined(ATOMIC_SUPPORT_STUB) */
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ {
+ uint64_t currentValue;
+ while (lockCompareExchangeU32(&lock, 0, 1) != 0) {}
+#if defined(__xlC__)
+ readBarrier();
+#endif /* defined(__xlC__) */
+ currentValue = *address;
+ if (currentValue == oldValue) {
+ *address = newValue;
+ }
+ writeBarrier();
+ lock = 0;
+ return currentValue;
+ }
+#elif defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* !defined(OMR_ENV_64BIT_CAPABLE) */
return OMRCAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
#elif defined(OMRZTPF) /* defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) */
csg((csg_t *)&oldValue, (csg_t *)address, (csg_t)newValue);
@@ -591,14 +611,22 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static uint64_t
- addU64(volatile uint64_t *address, uint64_t addend)
+ addU64(volatile uint64_t *address, uint64_t addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue + addend;
@@ -615,7 +643,11 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static double
- addDouble(volatile double *address, double addend)
+ addDouble(volatile double *address, double addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* double is stored as 64bit */
/* Stop compiler optimizing away load of oldValue */
@@ -624,7 +656,11 @@ class VM_AtomicSupport
double oldValue = *address;
double newValue = oldValue + addend;
- while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue) != *(uint64_t *)&oldValue) {
+ while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ ) != *(uint64_t *)&oldValue) {
oldValue = *address;
newValue = oldValue + addend;
}
@@ -666,14 +702,22 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static uint64_t
- subtractU64(volatile uint64_t *address, uint64_t value)
+ subtractU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue - value;
@@ -740,14 +784,22 @@ class VM_AtomicSupport
* @note This method can spin indefinitely while attempting to write the new value.
*/
VMINLINE static uint64_t
- setU64(volatile uint64_t *address, uint64_t value)
+ setU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, value)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue;
@@ -762,13 +814,21 @@ class VM_AtomicSupport
* @return the value stored at the address.
*/
VMINLINE static uint64_t
- getU64(volatile uint64_t *address)
+ getU64(volatile uint64_t *address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif
+ )
{
uint64_t value = *address;
#if !defined(OMR_ENV_DATA64)
/* this is necessary to ensure atomic read of 64-bit value on 32/31-bit platforms */
- value = lockCompareExchangeU64(address, value, value);
+ value = lockCompareExchangeU64(address, value, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
#endif /* !defined(OMR_ENV_DATA64) */
return value;