Skip to content

Commit

Permalink
Add routines for 64-bit CAS on 32-bit PPC CPUs
Browse files Browse the repository at this point in the history
Signed-off-by: Younes Manton <[email protected]>
  • Loading branch information
ymanton committed Sep 14, 2018
1 parent 1ebd8d7 commit 0b769dc
Show file tree
Hide file tree
Showing 3 changed files with 160 additions and 30 deletions.
72 changes: 60 additions & 12 deletions gc/base/AtomicOperations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,17 @@ class MM_AtomicOperations
* @return the value at memory location <b>address</b> BEFORE the store was attempted
*/
MMINLINE_DEBUG static uint64_t
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue)
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue);
return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -219,9 +227,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static uint64_t
addU64(volatile uint64_t *address, uint64_t addend)
addU64(volatile uint64_t *address, uint64_t addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::addU64(address, addend);
return VM_AtomicSupport::addU64(address, addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand All @@ -235,9 +251,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static double
addDouble(volatile double *address, double addend)
addDouble(volatile double *address, double addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::addDouble(address, addend);
return VM_AtomicSupport::addDouble(address, addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -283,9 +307,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static uint64_t
subtractU64(volatile uint64_t *address, uint64_t value)
subtractU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::subtractU64(address, value);
return VM_AtomicSupport::subtractU64(address, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -313,9 +345,17 @@ class MM_AtomicOperations
* @note This method can spin indefinitely while attempting to write the new value.
*/
MMINLINE_DEBUG static void
setU64(volatile uint64_t *address, uint64_t value)
setU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
VM_AtomicSupport::setU64(address, value);
VM_AtomicSupport::setU64(address, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand All @@ -327,9 +367,17 @@ class MM_AtomicOperations
* @return the value stored at the address.
*/
MMINLINE_DEBUG static uint64_t
getU64(volatile uint64_t *address)
getU64(volatile uint64_t *address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif
)
{
return VM_AtomicSupport::getU64(address);
return VM_AtomicSupport::getU64(address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif
);
}

/**
Expand Down
30 changes: 26 additions & 4 deletions gc/stats/ScavengerCopyScanRatio.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ class MM_ScavengerCopyScanRatio
volatile uint64_t _accumulatingSamples; /**< accumulator for aggregating per thread wait/copy/s`wait/copy/scanes-- these are periodically latched into _accumulatedSamples and reset */
volatile uint64_t _accumulatedSamples; /**< most recent aggregate wait/copy/scan counts from SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE samples */
volatile uintptr_t _majorUpdateThreadEnv; /**< a token for the thread that has claimed a major update and owns the critical region wherein the update is effected */
#if !defined(OMR_ENV_64BIT_CAPABLE)
uint32_t _atomicLockWord;
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
uintptr_t _scalingUpdateCount; /**< the number of times _accumulatingSamples was latched into _accumulatedSamples */
uintptr_t _overflowCount; /**< the number of times _accumulatingSamples overflowed one or more counters */
uint64_t _resetTimestamp; /**< timestamp at reset() */
Expand All @@ -134,6 +137,9 @@ class MM_ScavengerCopyScanRatio
_accumulatingSamples(0)
,_accumulatedSamples(SCAVENGER_COUNTER_DEFAULT_ACCUMULATOR)
,_majorUpdateThreadEnv(0)
#if !defined(OMR_ENV_64BIT_CAPABLE)
,_atomicLockWord(0)
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
,_scalingUpdateCount(0)
,_overflowCount(0)
,_resetTimestamp(0)
Expand All @@ -152,7 +158,11 @@ class MM_ScavengerCopyScanRatio
MMINLINE double
getScalingFactor(MM_EnvironmentBase* env)
{
uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples);
uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
return getScalingFactor(env, _threadCount, waits(accumulatedSamples), copied(accumulatedSamples), scanned(accumulatedSamples), updates(accumulatedSamples));
}

Expand Down Expand Up @@ -232,7 +242,11 @@ class MM_ScavengerCopyScanRatio
majorUpdate(MM_EnvironmentBase* env, uint64_t updateResult, uintptr_t nonEmptyScanLists, uintptr_t cachesQueued) {
if (0 == (SCAVENGER_COUNTER_OVERFLOW & updateResult)) {
/* no overflow so latch updateResult into _accumulatedSamples and record the update */
MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult);
MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
_scalingUpdateCount += 1;
_threadCount = record(env, nonEmptyScanLists, cachesQueued);
} else {
Expand Down Expand Up @@ -363,11 +377,19 @@ class MM_ScavengerCopyScanRatio
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = &_accumulatingSamples;
uint64_t oldValue = *localAddr;
if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate)) {
if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) {
newValue = oldValue + threadUpdate;
uint64_t updateCount = updates(newValue);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE <= updateCount) {
MM_AtomicOperations::setU64(&_accumulatingSamples, 0);
MM_AtomicOperations::setU64(&_accumulatingSamples, 0
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE < updateCount) {
newValue = 0;
}
Expand Down
88 changes: 74 additions & 14 deletions include_core/AtomicSupport.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,8 @@ class VM_AtomicSupport
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMRZTPF)
cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
return oldValue;
cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
return oldValue;
#elif defined(__GNUC__) /* defined(OMRZTPF) */
/* Assume GCC >= 4.2 */
return __sync_val_compare_and_swap(address, oldValue, newValue);
Expand Down Expand Up @@ -388,7 +388,12 @@ class VM_AtomicSupport
* @return the value at memory location <b>address</b> BEFORE the store was attempted
*/
VMINLINE static uint64_t
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue, bool readBeforeCAS = false)
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue,
#if !defined(OMR_ENV_64BIT_CAPABLE)
uint32_t &lock,
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
bool readBeforeCAS = false
)
{
#if defined(ATOMIC_SUPPORT_STUB)
return 0;
Expand All @@ -406,7 +411,22 @@ class VM_AtomicSupport
}
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* defined(ATOMIC_SUPPORT_STUB) */
#if !defined(OMR_ENV_64BIT_CAPABLE)
{
uint64_t currentValue;
while (lockCompareExchangeU32(&lock, 0, 1) != 0) {}
#if defined(__xlC__)
readBarrier();
#endif /* defined(__xlC__) */
currentValue = *address;
if (currentValue == oldValue) {
*address = newValue;
}
writeBarrier();
lock = 0;
return currentValue;
}
#elif defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* !defined(OMR_ENV_64BIT_CAPABLE) */
return OMRCAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
#elif defined(OMRZTPF) /* defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) */
csg((csg_t *)&oldValue, (csg_t *)address, (csg_t)newValue);
Expand Down Expand Up @@ -591,14 +611,22 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static uint64_t
addU64(volatile uint64_t *address, uint64_t addend)
addU64(volatile uint64_t *address, uint64_t addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue + addend;
Expand All @@ -615,7 +643,11 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static double
addDouble(volatile double *address, double addend)
addDouble(volatile double *address, double addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* double is stored as 64bit */
/* Stop compiler optimizing away load of oldValue */
Expand All @@ -624,7 +656,11 @@ class VM_AtomicSupport
double oldValue = *address;
double newValue = oldValue + addend;

while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue) != *(uint64_t *)&oldValue) {
while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
) != *(uint64_t *)&oldValue) {
oldValue = *address;
newValue = oldValue + addend;
}
Expand Down Expand Up @@ -666,14 +702,22 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static uint64_t
subtractU64(volatile uint64_t *address, uint64_t value)
subtractU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue - value;
Expand Down Expand Up @@ -740,14 +784,22 @@ class VM_AtomicSupport
* @note This method can spin indefinitely while attempting to write the new value.
*/
VMINLINE static uint64_t
setU64(volatile uint64_t *address, uint64_t value)
setU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, value)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue;
Expand All @@ -762,13 +814,21 @@ class VM_AtomicSupport
* @return the value stored at the address.
*/
VMINLINE static uint64_t
getU64(volatile uint64_t *address)
getU64(volatile uint64_t *address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif
)
{
uint64_t value = *address;

#if !defined(OMR_ENV_DATA64)
/* this is necessary to ensure atomic read of 64-bit value on 32/31-bit platforms */
value = lockCompareExchangeU64(address, value, value);
value = lockCompareExchangeU64(address, value, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
#endif /* !defined(OMR_ENV_DATA64) */

return value;
Expand Down

0 comments on commit 0b769dc

Please sign in to comment.