diff --git a/gc/base/AtomicOperations.hpp b/gc/base/AtomicOperations.hpp
index 01c1c9a8928..53bc788782e 100644
--- a/gc/base/AtomicOperations.hpp
+++ b/gc/base/AtomicOperations.hpp
@@ -171,9 +171,17 @@ class MM_AtomicOperations
* @return the value at memory location address BEFORE the store was attempted
*/
MMINLINE_DEBUG static uint64_t
- lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue)
+ lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue);
+ return VM_AtomicSupport::lockCompareExchangeU64(address, oldValue, newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -219,9 +227,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static uint64_t
- addU64(volatile uint64_t *address, uint64_t addend)
+ addU64(volatile uint64_t *address, uint64_t addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::addU64(address, addend);
+ return VM_AtomicSupport::addU64(address, addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -235,9 +251,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static double
- addDouble(volatile double *address, double addend)
+ addDouble(volatile double *address, double addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::addDouble(address, addend);
+ return VM_AtomicSupport::addDouble(address, addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -283,9 +307,17 @@ class MM_AtomicOperations
* @return The value at memory location address
*/
MMINLINE_DEBUG static uint64_t
- subtractU64(volatile uint64_t *address, uint64_t value)
+ subtractU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- return VM_AtomicSupport::subtractU64(address, value);
+ return VM_AtomicSupport::subtractU64(address, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -313,9 +345,17 @@ class MM_AtomicOperations
* @note This method can spin indefinitely while attempting to write the new value.
*/
MMINLINE_DEBUG static void
- setU64(volatile uint64_t *address, uint64_t value)
+ setU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
- VM_AtomicSupport::setU64(address, value);
+ VM_AtomicSupport::setU64(address, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
}
/**
@@ -327,9 +367,17 @@ class MM_AtomicOperations
* @return the value stored at the address.
*/
MMINLINE_DEBUG static uint64_t
- getU64(volatile uint64_t *address)
+ getU64(volatile uint64_t *address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif
+ )
{
- return VM_AtomicSupport::getU64(address);
+ return VM_AtomicSupport::getU64(address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif
+ );
}
/**
diff --git a/gc/stats/ScavengerCopyScanRatio.hpp b/gc/stats/ScavengerCopyScanRatio.hpp
index 9b0628f0f9b..499795e99d1 100644
--- a/gc/stats/ScavengerCopyScanRatio.hpp
+++ b/gc/stats/ScavengerCopyScanRatio.hpp
@@ -115,6 +115,9 @@ class MM_ScavengerCopyScanRatio
volatile uint64_t _accumulatingSamples; /**< accumulator for aggregating per thread wait/copy/s`wait/copy/scanes-- these are periodically latched into _accumulatedSamples and reset */
volatile uint64_t _accumulatedSamples; /**< most recent aggregate wait/copy/scan counts from SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE samples */
volatile uintptr_t _majorUpdateThreadEnv; /**< a token for the thread that has claimed a major update and owns the critical region wherein the update is effected */
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ uint32_t _atomicLockWord;
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
uintptr_t _scalingUpdateCount; /**< the number of times _accumulatingSamples was latched into _accumulatedSamples */
uintptr_t _overflowCount; /**< the number of times _accumulatingSamples overflowed one or more counters */
uint64_t _resetTimestamp; /**< timestamp at reset() */
@@ -134,6 +137,9 @@ class MM_ScavengerCopyScanRatio
_accumulatingSamples(0)
,_accumulatedSamples(SCAVENGER_COUNTER_DEFAULT_ACCUMULATOR)
,_majorUpdateThreadEnv(0)
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ ,_atomicLockWord(0)
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
,_scalingUpdateCount(0)
,_overflowCount(0)
,_resetTimestamp(0)
@@ -152,7 +158,11 @@ class MM_ScavengerCopyScanRatio
MMINLINE double
getScalingFactor(MM_EnvironmentBase* env)
{
- uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples);
+ uint64_t accumulatedSamples = MM_AtomicOperations::getU64(&_accumulatedSamples
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
return getScalingFactor(env, _threadCount, waits(accumulatedSamples), copied(accumulatedSamples), scanned(accumulatedSamples), updates(accumulatedSamples));
}
@@ -232,7 +242,11 @@ class MM_ScavengerCopyScanRatio
majorUpdate(MM_EnvironmentBase* env, uint64_t updateResult, uintptr_t nonEmptyScanLists, uintptr_t cachesQueued) {
if (0 == (SCAVENGER_COUNTER_OVERFLOW & updateResult)) {
/* no overflow so latch updateResult into _accumulatedSamples and record the update */
- MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult);
+ MM_AtomicOperations::setU64(&_accumulatedSamples, updateResult
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
_scalingUpdateCount += 1;
_threadCount = record(env, nonEmptyScanLists, cachesQueued);
} else {
@@ -363,11 +377,19 @@ class MM_ScavengerCopyScanRatio
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = &_accumulatingSamples;
uint64_t oldValue = *localAddr;
- if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate)) {
+ if (oldValue == MM_AtomicOperations::lockCompareExchangeU64(localAddr, oldValue, oldValue + threadUpdate
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) {
newValue = oldValue + threadUpdate;
uint64_t updateCount = updates(newValue);
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE <= updateCount) {
- MM_AtomicOperations::setU64(&_accumulatingSamples, 0);
+ MM_AtomicOperations::setU64(&_accumulatingSamples, 0
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , _atomicLockWord
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
if (SCAVENGER_THREAD_UPDATES_PER_MAJOR_UPDATE < updateCount) {
newValue = 0;
}
diff --git a/include_core/AtomicSupport.hpp b/include_core/AtomicSupport.hpp
index 50c26cb0578..f9571215f7d 100644
--- a/include_core/AtomicSupport.hpp
+++ b/include_core/AtomicSupport.hpp
@@ -352,8 +352,8 @@ class VM_AtomicSupport
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
#if defined(OMRZTPF)
- cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
- return oldValue;
+ cs((cs_t *)&oldValue, (cs_t *)address, (cs_t)newValue);
+ return oldValue;
#elif defined(__GNUC__) /* defined(OMRZTPF) */
/* Assume GCC >= 4.2 */
return __sync_val_compare_and_swap(address, oldValue, newValue);
@@ -388,7 +388,12 @@ class VM_AtomicSupport
* @return the value at memory location address BEFORE the store was attempted
*/
VMINLINE static uint64_t
- lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue, bool readBeforeCAS = false)
+ lockCompareExchangeU64(volatile uint64_t *address, uint64_t oldValue, uint64_t newValue,
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ uint32_t &lock,
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ bool readBeforeCAS = false
+ )
{
#if defined(ATOMIC_SUPPORT_STUB)
return 0;
@@ -406,8 +411,23 @@ class VM_AtomicSupport
}
}
#endif /* defined(ATOMIC_ALLOW_PRE_READ) */
-#if defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* defined(ATOMIC_SUPPORT_STUB) */
- return J9CAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ {
+ uint64_t currentValue;
+ while (lockCompareExchangeU32(&lock, 0, 1) != 0) {}
+#if defined(__xlC__)
+ readBarrier();
+#endif /* defined(__xlC__) */
+ currentValue = *address;
+ if (currentValue == oldValue) {
+ *address = newValue;
+ }
+ writeBarrier();
+ lock = 0;
+ return currentValue;
+ }
+#elif defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ return OMRCAS8Helper(address, ((uint32_t*)&oldValue)[1], ((uint32_t*)&oldValue)[0], ((uint32_t*)&newValue)[1], ((uint32_t*)&newValue)[0]);
#elif defined(OMRZTPF) /* defined(OMR_ARCH_POWER) && !defined(OMR_ENV_DATA64) */
csg((csg_t *)&oldValue, (csg_t *)address, (csg_t)newValue);
return oldValue;
@@ -591,14 +611,22 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static uint64_t
- addU64(volatile uint64_t *address, uint64_t addend)
+ addU64(volatile uint64_t *address, uint64_t addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, oldValue + addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue + addend;
@@ -615,7 +643,11 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static double
- addDouble(volatile double *address, double addend)
+ addDouble(volatile double *address, double addend
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* double is stored as 64bit */
/* Stop compiler optimizing away load of oldValue */
@@ -624,7 +656,11 @@ class VM_AtomicSupport
double oldValue = *address;
double newValue = oldValue + addend;
- while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue) != *(uint64_t *)&oldValue) {
+ while (lockCompareExchangeU64(localAddr, *(uint64_t *)&oldValue, *(uint64_t *)&newValue
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ ) != *(uint64_t *)&oldValue) {
oldValue = *address;
newValue = oldValue + addend;
}
@@ -666,14 +702,22 @@ class VM_AtomicSupport
* @return The value at memory location address
*/
VMINLINE static uint64_t
- subtractU64(volatile uint64_t *address, uint64_t value)
+ subtractU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, oldValue - value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue - value;
@@ -740,14 +784,22 @@ class VM_AtomicSupport
* @note This method can spin indefinitely while attempting to write the new value.
*/
VMINLINE static uint64_t
- setU64(volatile uint64_t *address, uint64_t value)
+ setU64(volatile uint64_t *address, uint64_t value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )
{
/* Stop compiler optimizing away load of oldValue */
volatile uint64_t *localAddr = address;
uint64_t oldValue;
oldValue = (uint64_t)*localAddr;
- while ((lockCompareExchangeU64(localAddr, oldValue, value)) != oldValue) {
+ while ((lockCompareExchangeU64(localAddr, oldValue, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ )) != oldValue) {
oldValue = (uint64_t)*localAddr;
}
return oldValue;
@@ -762,13 +814,21 @@ class VM_AtomicSupport
* @return the value stored at the address.
*/
VMINLINE static uint64_t
- getU64(volatile uint64_t *address)
+ getU64(volatile uint64_t *address
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , uint32_t &lock
+#endif
+ )
{
uint64_t value = *address;
#if !defined(OMR_ENV_DATA64)
/* this is necessary to ensure atomic read of 64-bit value on 32/31-bit platforms */
- value = lockCompareExchangeU64(address, value, value);
+ value = lockCompareExchangeU64(address, value, value
+#if !defined(OMR_ENV_64BIT_CAPABLE)
+ , lock
+#endif /* !defined(OMR_ENV_64BIT_CAPABLE) */
+ );
#endif /* !defined(OMR_ENV_DATA64) */
return value;
diff --git a/include_core/omrutilbase.h b/include_core/omrutilbase.h
index 317c32dd27f..d535754b7c3 100644
--- a/include_core/omrutilbase.h
+++ b/include_core/omrutilbase.h
@@ -154,7 +154,7 @@ uintptr_t setAtomic(volatile uintptr_t *address, uintptr_t value);
* @return The old value read from addr
*/
uint64_t
-J9CAS8Helper(volatile uint64_t *addr, uint32_t compareLo, uint32_t compareHi, uint32_t swapLo, uint32_t swapHi);
+OMRCAS8Helper(volatile uint64_t *addr, uint32_t compareLo, uint32_t compareHi, uint32_t swapLo, uint32_t swapHi);
#endif /* !OMR_ENV_DATA64 && (AIXPPC || LINUXPPC) */
diff --git a/omrmakefiles/rules.linux.mk b/omrmakefiles/rules.linux.mk
index 8568ffe7815..55007d08c68 100644
--- a/omrmakefiles/rules.linux.mk
+++ b/omrmakefiles/rules.linux.mk
@@ -76,9 +76,8 @@ endif
## Position Independent compile flag
ifeq (gcc,$(OMR_TOOLCHAIN))
ifeq (ppc,$(OMR_HOST_ARCH))
- # Used for GOT's under 4k, should we just go -fPIC for everyone?
- GLOBAL_CFLAGS+=-fpic
- GLOBAL_CXXFLAGS+=-fpic
+ GLOBAL_CFLAGS+=-fPIC
+ GLOBAL_CXXFLAGS+=-fPIC
else
ifeq (x86,$(OMR_HOST_ARCH))
ifeq (1,$(OMR_ENV_DATA64))
@@ -244,11 +243,9 @@ else
endif
ifneq (,$(findstring executable,$(ARTIFACT_TYPE)))
- ifeq (x86,$(OMR_HOST_ARCH))
- ifeq (1,$(OMR_ENV_DATA64))
- else
- GLOBAL_LDFLAGS+=-m32
- endif
+ ifeq (1,$(OMR_ENV_DATA64))
+ else
+ GLOBAL_LDFLAGS+=-m32
endif
## Default Libraries
diff --git a/tools/configure b/tools/configure
index 8df7aeec5f2..6c8ae53e7c1 100755
--- a/tools/configure
+++ b/tools/configure
@@ -663,7 +663,6 @@ infodir
docdir
oldincludedir
includedir
-runstatedir
localstatedir
sharedstatedir
sysconfdir
@@ -737,7 +736,6 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
-runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
@@ -990,15 +988,6 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
- -runstatedir | --runstatedir | --runstatedi | --runstated \
- | --runstate | --runstat | --runsta | --runst | --runs \
- | --run | --ru | --r)
- ac_prev=runstatedir ;;
- -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
- | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
- | --run=* | --ru=* | --r=*)
- runstatedir=$ac_optarg ;;
-
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1136,7 +1125,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
- libdir localedir mandir runstatedir
+ libdir localedir mandir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@@ -1289,7 +1278,6 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
- --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@@ -2131,7 +2119,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
###############################################################################
-# Copyright (c) 2015, 2016 IBM Corp. and others
+# Copyright (c) 2015, 2018 IBM Corp. and others
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License 2.0 which accompanies this
@@ -3787,10 +3775,12 @@ _ACEOF
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking OMR_BUILD_DATASIZE" >&5
$as_echo_n "checking OMR_BUILD_DATASIZE... " >&6; }
- if test "$ac_cv_sizeof_void_p" = 8; then :
+ if test -z $OMR_BUILD_DATASIZE; then :
+ if test "$ac_cv_sizeof_void_p" = 8; then :
OMR_BUILD_DATASIZE=64
else
OMR_BUILD_DATASIZE=32
+fi
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $OMR_BUILD_DATASIZE" >&5
$as_echo "$OMR_BUILD_DATASIZE" >&6; }
diff --git a/tools/configure.ac b/tools/configure.ac
index 8b83de58867..8e86a53cde5 100644
--- a/tools/configure.ac
+++ b/tools/configure.ac
@@ -50,9 +50,11 @@ then
AC_CHECK_SIZEOF([void *])
AC_MSG_CHECKING([OMR_BUILD_DATASIZE])
- AS_IF([test "$ac_cv_sizeof_void_p" = 8],
- [OMR_BUILD_DATASIZE=64],
- [OMR_BUILD_DATASIZE=32])
+ AS_IF([test -z $OMR_BUILD_DATASIZE],
+ [AS_IF([test "$ac_cv_sizeof_void_p" = 8],
+ [OMR_BUILD_DATASIZE=64],
+ [OMR_BUILD_DATASIZE=32])],
+ [])
AC_MSG_RESULT([$OMR_BUILD_DATASIZE])
diff --git a/util/omrutil/unix/aix/32/cas8help.s b/util/omrutil/unix/aix/32/cas8help.s
index a80aab62ee3..dfcfa96b8a7 100644
--- a/util/omrutil/unix/aix/32/cas8help.s
+++ b/util/omrutil/unix/aix/32/cas8help.s
@@ -53,21 +53,22 @@
.set r29,29
.set r30,30
.set r31,31
+ .set cr0,0
.toc
TOC.static: .tc .static[tc],_static[ro]
.csect _static[ro]
- .globl .J9CAS8Helper[pr]
- .globl J9CAS8Helper[ds]
- .globl .J9CAS8Helper
+ .globl .OMRCAS8Helper[pr]
+ .globl OMRCAS8Helper[ds]
+ .globl .OMRCAS8Helper
.toc
-TOC.J9CAS8Helper: .tc .J9CAS8Helper[tc],J9CAS8Helper[ds]
- .csect J9CAS8Helper[ds]
- .long .J9CAS8Helper[pr]
+TOC.OMRCAS8Helper: .tc .OMRCAS8Helper[tc],OMRCAS8Helper[ds]
+ .csect OMRCAS8Helper[ds]
+ .long .OMRCAS8Helper[pr]
.long TOC[tc0]
.long 0
- .csect .J9CAS8Helper[pr]
- .function .J9CAS8Helper[pr],startproc.J9CAS8Helper,16,0,(endproc.J9CAS8Helper-startproc.J9CAS8Helper)
- startproc.J9CAS8Helper:
+ .csect .OMRCAS8Helper[pr]
+ .function .OMRCAS8Helper[pr],startproc.OMRCAS8Helper,16,0,(endproc.OMRCAS8Helper-startproc.OMRCAS8Helper)
+ startproc.OMRCAS8Helper:
# in:
#
# r3 = the address of the 8-aligned memory address
@@ -80,23 +81,19 @@ TOC.J9CAS8Helper: .tc .J9CAS8Helper[tc],J9CAS8Helper[ds]
#
# r3 = high part of read value
# r4 = low part of read value
- ori r12, r3, 0
- ori r8, r4, 0
+ .machine "push"
+ .machine "ppc64"
+ rldimi r4, r5, 32, 0
+ rldimi r6, r7, 32, 0
loop:
- .long 0x7d2060a8 # ldarx r9, 0, r12
- .long 0x79230022 # srdi r3, r9, 32
- ori r4, r9, 0
- ori r10, r8, 0
- ori r11, r6, 0
- .long 0x78aa000e # rldimi r10, r5, 32, 0
- .long 0x78eb000e # rldimi r11, r7, 32, 0
- .long 0x7c295040 # cmpl 0, 1, r9, r10
- bne fail
- .long 0x7d6061ad # stdcx. r11, 0, r12
- bne loop
- blr
+ ldarx r8, 0, r3
+ cmpld cr0, r8, r4
+ bne- fail
+ stdcx. r6, 0, r3
+ bne- loop
fail:
- .long 0x7d2061ad # stdcx. r9, 0, r12
- bne loop
+ mr r4, r8
+ srdi r3, r8, 32
blr
- endproc.J9CAS8Helper:
+ .machine "pop"
+ endproc.OMRCAS8Helper:
diff --git a/util/omrutil/unix/linux/ppc/32/cas8help.s b/util/omrutil/unix/linux/ppc/32/cas8help.s
index 82d84e57086..f376b05cf41 100644
--- a/util/omrutil/unix/linux/ppc/32/cas8help.s
+++ b/util/omrutil/unix/linux/ppc/32/cas8help.s
@@ -52,12 +52,13 @@
.set r29,29
.set r30,30
.set r31,31
+ .set cr0,0
.section ".rodata"
- .global J9CAS8Helper
- .type J9CAS8Helper@function
+ .global OMRCAS8Helper
+ .type OMRCAS8Helper@function
.section ".text"
.align 2
-J9CAS8Helper:
+OMRCAS8Helper:
# in:
#
# r3 = the address of the 8-aligned memory address
@@ -70,22 +71,15 @@ J9CAS8Helper:
#
# r3 = high part of read value
# r4 = low part of read value
- ori r12, r3, 0
- ori r8, r4, 0
-loop:
- ldarx r9, 0, r12
- srdi r3, r9, 32
- ori r4, r9, 0
- ori r10, r8, 0
- ori r11, r6, 0
- rldimi r10, r5, 32, 0
- rldimi r11, r7, 32, 0
- cmpl cr0, 1, r9, r10
- bne fail
- stdcx. r11, 0, r12
- bne loop
- blr
-fail:
- stdcx. r9, 0, r12
- bne loop
+ rldimi r4, r5, 32, 0
+ rldimi r6, r7, 32, 0
+0:
+ ldarx r8, 0, r3
+ cmpld cr0, r8, r4
+ bne- 1f
+ stdcx. r6, 0, r3
+ bne- 0b
+1:
+ mr r4, r8
+ srdi r3, r8, 32
blr