Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Support for ppc32 #2930

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 60 additions & 12 deletions gc/base/AtomicOperations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,17 @@ class MM_AtomicOperations
* @return the value at memory location <b>address</b> BEFORE the store was attempted
*/
MMINLINE_DEBUG static uint64_t
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue)
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue);
return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -219,9 +227,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static uint64_t
addU64(volatile uint64_t *address, uint64_t addend)
addU64(volatile uint64_t *address, uint64_t addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::addU64(address, addend);
return VM_AtomicSupport::addU64(address, addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand All @@ -235,9 +251,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static double
addDouble(volatile double *address, double addend)
addDouble(volatile double *address, double addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::addDouble(address, addend);
return VM_AtomicSupport::addDouble(address, addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -283,9 +307,17 @@ class MM_AtomicOperations
* @return The value at memory location <b>address</b>
*/
MMINLINE_DEBUG static uint64_t
subtractU64(volatile uint64_t *address, uint64_t value)
subtractU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
return VM_AtomicSupport::subtractU64(address, value);
return VM_AtomicSupport::subtractU64(address, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand Down Expand Up @@ -313,9 +345,17 @@ class MM_AtomicOperations
* @note This method can spin indefinitely while attempting to write the new value.
*/
MMINLINE_DEBUG static void
setU64(volatile uint64_t *address, uint64_t value)
setU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
VM_AtomicSupport::setU64(address, value);
VM_AtomicSupport::setU64(address, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
}

/**
Expand All @@ -327,9 +367,17 @@ class MM_AtomicOperations
* @return the value stored at the address.
*/
MMINLINE_DEBUG static uint64_t
getU64(volatile uint64_t *address)
getU64(volatile uint64_t *address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif
)
{
return VM_AtomicSupport::getU64(address);
return VM_AtomicSupport::getU64(address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif
);
}

/**
Expand Down
30 changes: 26 additions & 4 deletions gc/stats/ScavengerCopyScanRatio.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ class MM_ScavengerCopyScanRatio
volatile uint64_t _accumulatingSamples; /**< accumulator for aggregating per thread wait/copy/s`wait/copy/scanes-- these are periodically latched into _accumulatedSamples and reset */
volatile uint64_t _accumulatedSamples; /**< most recent aggregate wait/copy/scan counts from SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE samples */
volatile uintptr_t _majorUpdateThreadEnv; /**< a token for the thread that has claimed a major update and owns the critical region wherein the update is effected */
#if !defined(OMR_ENV_64BIT_CAPABLE)
uint32_t _atomicLockWord;
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
uintptr_t _scalingUpdateCount; /**< the number of times _accumulatingSamples was latched into _accumulatedSamples */
uintptr_t _overflowCount; /**< the number of times _accumulatingSamples overflowed one or more counters */
uint64_t _resetTimestamp; /**< timestamp at reset() */
Expand All @@ -134,6 +137,9 @@ class MM_ScavengerCopyScanRatio
_accumulatingSamples(0)
,_accumulatedSamples(SCAVENGER_COUNTER_DEFAULT_ACCUMULATOR)
,_majorUpdateThreadEnv(0)
#if !defined(OMR_ENV_64BIT_CAPABLE)
,_atomicLockWord(0)
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
,_scalingUpdateCount(0)
,_overflowCount(0)
,_resetTimestamp(0)
Expand All @@ -152,7 +158,11 @@ class MM_ScavengerCopyScanRatio
MMINLINE double
getScalingFactor(MM_EnvironmentBase* env)
{
uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples);
uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
return getScalingFactor(env, _threadCount, waits(accumulatedSamples), copied(accumulatedSamples), scanned(accumulatedSamples), updates(accumulatedSamples));
}

Expand Down Expand Up @@ -232,7 +242,11 @@ class MM_ScavengerCopyScanRatio
majorUpdate(MM_EnvironmentBase* env, uint64_t updateResult, uintptr_t nonEmptyScanLists, uintptr_t cachesQueued) {
if (0 == (SCAVENGER_COUNTER_OVERFLOW & updateResult)) {
/* no overflow so latch updateResult into _accumulatedSamples and record the update */
MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult);
MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
_scalingUpdateCount += 1;
_threadCount = record(env, nonEmptyScanLists, cachesQueued);
} else {
Expand Down Expand Up @@ -363,11 +377,19 @@ class MM_ScavengerCopyScanRatio
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = &_accumulatingSamples;
uint64_t oldValue = *localAddr;
if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate)) {
if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) {
newValue = oldValue + threadUpdate;
uint64_t updateCount = updates(newValue);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE <= updateCount) {
MM_AtomicOperations::setU64(&_accumulatingSamples, 0);
MM_AtomicOperations::setU64(&_accumulatingSamples, 0
#if !defined(OMR_ENV_64BIT_CAPABLE)
, _atomicLockWord
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE < updateCount) {
newValue = 0;
}
Expand Down
90 changes: 75 additions & 15 deletions include_core/AtomicSupport.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,8 @@ class VM_AtomicSupport
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMRZTPF)
cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
return oldValue;
cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
return oldValue;
#elif defined(__GNUC__) /* defined(OMRZTPF) */
/* Assume GCC >= 4.2 */
return __sync_val_compare_and_swap(address, oldValue, newValue);
Expand Down Expand Up @@ -388,7 +388,12 @@ class VM_AtomicSupport
* @return the value at memory location <b>address</b> BEFORE the store was attempted
*/
VMINLINE static uint64_t
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue, bool readBeforeCAS = false)
lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue,
#if !defined(OMR_ENV_64BIT_CAPABLE)
uint32_t &lock,
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
bool readBeforeCAS = false
)
{
#if defined(ATOMIC_SUPPORT_STUB)
return 0;
Expand All @@ -406,8 +411,23 @@ class VM_AtomicSupport
}
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* defined(ATOMIC_SUPPORT_STUB) */
return J9CAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
#if !defined(OMR_ENV_64BIT_CAPABLE)
{
uint64_t currentValue;
while (lockCompareExchangeU32(&lock, 0, 1) != 0) {}
#if defined(__xlC__)
readBarrier();
#endif /* defined(__xlC__) */
currentValue = *address;
if (currentValue == oldValue) {
*address = newValue;
}
writeBarrier();
lock = 0;
return currentValue;
}
#elif defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* !defined(OMR_ENV_64BIT_CAPABLE) */
return OMRCAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
#elif defined(OMRZTPF) /* defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) */
csg((csg_t *)&oldValue, (csg_t *)address, (csg_t)newValue);
return oldValue;
Expand Down Expand Up @@ -591,14 +611,22 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static uint64_t
addU64(volatile uint64_t *address, uint64_t addend)
addU64(volatile uint64_t *address, uint64_t addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue + addend;
Expand All @@ -615,7 +643,11 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static double
addDouble(volatile double *address, double addend)
addDouble(volatile double *address, double addend
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* double is stored as 64bit */
/* Stop compiler optimizing away load of oldValue */
Expand All @@ -624,7 +656,11 @@ class VM_AtomicSupport
double oldValue = *address;
double newValue = oldValue + addend;

while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue) != *(uint64_t *)&oldValue) {
while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
) != *(uint64_t *)&oldValue) {
oldValue = *address;
newValue = oldValue + addend;
}
Expand Down Expand Up @@ -666,14 +702,22 @@ class VM_AtomicSupport
* @return The value at memory location <b>address</b>
*/
VMINLINE static uint64_t
subtractU64(volatile uint64_t *address, uint64_t value)
subtractU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue - value;
Expand Down Expand Up @@ -740,14 +784,22 @@ class VM_AtomicSupport
* @note This method can spin indefinitely while attempting to write the new value.
*/
VMINLINE static uint64_t
setU64(volatile uint64_t *address, uint64_t value)
setU64(volatile uint64_t *address, uint64_t value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;

oldValue = (uint64_t)*localAddr;
while ((lockCompareExchangeU64(localAddr, oldValue, value)) != oldValue) {
while ((lockCompareExchangeU64(localAddr, oldValue, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
)) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue;
Expand All @@ -762,13 +814,21 @@ class VM_AtomicSupport
* @return the value stored at the address.
*/
VMINLINE static uint64_t
getU64(volatile uint64_t *address)
getU64(volatile uint64_t *address
#if !defined(OMR_ENV_64BIT_CAPABLE)
, uint32_t &lock
#endif
)
{
uint64_t value = *address;

#if !defined(OMR_ENV_DATA64)
/* this is necessary to ensure atomic read of 64-bit value on 32/31-bit platforms */
value = lockCompareExchangeU64(address, value, value);
value = lockCompareExchangeU64(address, value, value
#if !defined(OMR_ENV_64BIT_CAPABLE)
, lock
#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
);
#endif /* !defined(OMR_ENV_DATA64) */

return value;
Expand Down
2 changes: 1 addition & 1 deletion include_core/omrutilbase.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ uintptr_t setAtomic(volatile uintptr_t *address, uintptr_t value);
* @return The old value read from addr
*/
uint64_t
J9CAS8Helper(volatile uint64_t *addr, uint32_t compareLo, uint32_t compareHi, uint32_t swapLo, uint32_t swapHi);
OMRCAS8Helper(volatile uint64_t *addr, uint32_t compareLo, uint32_t compareHi, uint32_t swapLo, uint32_t swapHi);

#endif /* !OMR_ENV_DATA64 && (AIXPPC || LINUXPPC) */

Expand Down
13 changes: 5 additions & 8 deletions omrmakefiles/rules.linux.mk
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,8 @@ endif
## Position Independent compile flag
ifeq (gcc,$(OMR_TOOLCHAIN))
ifeq (ppc,$(OMR_HOST_ARCH))
# Used for GOT's under 4k, should we just go -fPIC for everyone?
GLOBAL_CFLAGS+=-fpic
GLOBAL_CXXFLAGS+=-fpic
GLOBAL_CFLAGS+=-fPIC
GLOBAL_CXXFLAGS+=-fPIC
else
ifeq (x86,$(OMR_HOST_ARCH))
ifeq (1,$(OMR_ENV_DATA64))
Expand Down Expand Up @@ -244,11 +243,9 @@ else
endif

ifneq (,$(findstring executable,$(ARTIFACT_TYPE)))
ifeq (x86,$(OMR_HOST_ARCH))
ifeq (1,$(OMR_ENV_DATA64))
else
GLOBAL_LDFLAGS+=-m32
endif
ifeq (1,$(OMR_ENV_DATA64))
else
GLOBAL_LDFLAGS+=-m32
endif

## Default Libraries
Expand Down
Loading