Skip to content

Commit

Permalink
Use the new {.align.} pragma to workaround nim-lang/Nim#12720
Browse files Browse the repository at this point in the history
Rename to WV_CacheLinePadding

Unfortunately nim-lang/Nim#12722 broke Nim devel in the past 3 days, commit nim-lang/Nim@1b2c1bc is good.

Also C proc signatures changed to csize_t nim-lang/Nim#12497
  • Loading branch information
mratsim committed Nov 24, 2019
1 parent e3a1f29 commit 9ee79bf
Show file tree
Hide file tree
Showing 8 changed files with 27 additions and 32 deletions.
9 changes: 3 additions & 6 deletions unused/channels/channels_mpsc_bounded_lock.nim
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,11 @@ type
## - Messages are guaranteed to be delivered
## - Messages will be delivered exactly once
## - Linearizability
pad0: array[WV_CacheLineSize - 3*sizeof(int32), byte]
backLock: Lock # Padding? - pthread_lock is 40 bytes on Linux, unknown on windows.
backLock {.align:WV_CacheLinePadding.}: Lock
capacity: int32
buffer: ptr UncheckedArray[T]
pad1: array[WV_CacheLineSize - sizeof(int32), byte]
front: Atomic[int32]
pad2: array[WV_CacheLineSize - sizeof(int32), byte]
back: Atomic[int32]
front {.align:WV_CacheLinePadding.}: Atomic[int32]
back {.align:WV_CacheLinePadding.}: Atomic[int32]

# Private aliases
Channel[T] = ChannelMpscBounded[T]
Expand Down
6 changes: 3 additions & 3 deletions weave/channels/channels_legacy.nim
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ proc channel_cache_alloc(
return false
p = p.next

p = cast[ptr ChannelCacheObj](c_malloc(sizeof(ChannelCacheObj)))
p = cast[ptr ChannelCacheObj](c_malloc(csize_t sizeof(ChannelCacheObj)))
if p.isNil:
raise newException(OutOfMemError, "Could not allocate memory")

Expand Down Expand Up @@ -188,12 +188,12 @@ proc channel_alloc*(size, n: int32, impl: ChannelImplKind): ChannelRaw =
break
p = p.next

result = cast[ChannelRaw](c_malloc(sizeof(ChannelObj)))
result = cast[ChannelRaw](c_malloc(csize_t sizeof(ChannelObj)))
if result.isNil:
raise newException(OutOfMemError, "Could not allocate memory")

# To buffer n items, we allocate for n+1
result.buffer = cast[ptr UncheckedArray[byte]](c_malloc((n+1) * size))
result.buffer = cast[ptr UncheckedArray[byte]](c_malloc(csize_t (n+1)*size))
if result.buffer.isNil:
raise newException(OutOfMemError, "Could not allocate memory")

Expand Down
15 changes: 8 additions & 7 deletions weave/channels/channels_mpsc_unbounded.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import
std/atomics,
std/atomics, std/macros,
../config,
../primitives/compiler_optimization_hints, # for prefetch
../instrumentation/[contracts, loggers]
Expand All @@ -24,12 +24,13 @@ type
# to make sure there are no bugs
# on arch with relaxed memory models

count: Atomic[int]
# Accessed by all
count{.align: WV_CacheLinePadding.}: Atomic[int]
# Consumer only
front{.align: WV_CacheLinePadding.}: T
# Producers and consumer slow-path
back{.align: WV_CacheLinePadding.}: Atomic[pointer] # Workaround generic atomics bug: https://github.com/nim-lang/Nim/issues/12695
dummy: typeof(default(T)[]) # Deref the pointer type
pad0: array[WV_CacheLineSize - sizeof(pointer), byte]
front: T
pad1: array[WV_CacheLineSize - sizeof(int), byte]
back: Atomic[pointer] # Workaround generic atomics bug: https://github.com/nim-lang/Nim/issues/12695

template checkInvariants(): untyped =
ascertain: not(chan.front.isNil)
Expand Down Expand Up @@ -213,7 +214,7 @@ when isMainModule:
when defined(debugNimalloc):
createShared(ValObj)
else:
cast[Val](c_malloc(sizeof(ValObj)))
cast[Val](c_malloc(csize_t sizeof(ValObj)))

proc valFree(val: Val) =
## Note: it seems like freeing memory
Expand Down
4 changes: 1 addition & 3 deletions weave/channels/channels_spsc_single_object.nim
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ type
## - Messages are guaranteed to be delivered
## - Messages will be delivered exactly once
## - Linearizability
pad0: array[WV_CacheLineSize, byte] # If used in a sequence of channels
buffer: T
pad1: array[WV_CacheLineSize - sizeof(T), byte]
buffer {.align:WV_CacheLinePadding.}: T # Ensure proper padding if used in sequence of channels
full: Atomic[bool]

proc `=`[T](
Expand Down
16 changes: 8 additions & 8 deletions weave/channels/channels_spsc_single_ptr.nim
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
import
std/atomics,
../config,
../instrumentation/contracts
../instrumentation/contracts,
../primitives/compiler_optimization_hints


type
Expand Down Expand Up @@ -50,8 +51,7 @@ type
## - Messages will be delivered exactly once
## - Linearizability
ChannelRaw = object
pad0: array[WV_CacheLineSize - sizeof(pointer), byte] # If used in a sequence of channels
buffer: Atomic[pointer]
buffer {.align:WV_CacheLinePadding.}: Atomic[pointer] # Ensure proper padding if used in sequence of channels

# Internal type-erased implementation
# ---------------------------------------------------------------
Expand Down Expand Up @@ -128,18 +128,18 @@ func isEmpty*[T](chan: var ChannelSpscSinglePtr[T]): bool {.inline.} =
# Sanity checks
# ------------------------------------------------------------------------------
when isMainModule:
import strutils
import strutils, ../memory/allocs

when not compileOption("threads"):
{.error: "This requires --threads:on compilation flag".}

template sendLoop[T](chan: var Channel[T],
template sendLoop[T](chan: var ChannelSpscSinglePtr[T],
data: sink T,
body: untyped): untyped =
while not chan.trySend(data):
body

template recvLoop[T](chan: var Channel[T],
template recvLoop[T](chan: var ChannelSpscSinglePtr[T],
data: var T,
body: untyped): untyped =
while not chan.tryRecv(data):
Expand All @@ -148,7 +148,7 @@ when isMainModule:
type
ThreadArgs = object
ID: WorkerKind
chan: ptr Channel[ptr int]
chan: ptr ChannelSpscSinglePtr[ptr int]

WorkerKind = enum
Sender
Expand Down Expand Up @@ -198,7 +198,7 @@ when isMainModule:
echo "Testing if 2 threads can send data"
echo "-----------------------------------"
var threads: array[2, Thread[ThreadArgs]]
let chan = wv_alloc(Channel[ptr int])
let chan = wv_alloc(ChannelSpscSinglePtr[ptr int])
chan[].initialize()

createThread(threads[0], thread_func, ThreadArgs(ID: Receiver, chan: chan))
Expand Down
2 changes: 1 addition & 1 deletion weave/config.nim
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ const WV_MaxWorkers* {.intDefine.} = 255

# WV_Profile: turn on profiling

const WV_CacheLineSize* {.intDefine.} = 128
const WV_CacheLinePadding* {.intDefine.} = 128
## Datastructure that are accessed from multiple threads
## are padded by this value to avoid
## false sharing / cache threashing / cache ping-pong
Expand Down
4 changes: 2 additions & 2 deletions weave/memory/allocs.nim
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ proc wv_alloc*(T: typedesc): ptr T {.inline.}=
when defined(WV_useNimAlloc):
createSharedU(T)
else:
cast[ptr T](c_malloc(sizeof(T)))
cast[ptr T](c_malloc(csize_t sizeof(T)))

proc wv_allocPtr*(T: typedesc[ptr], zero: static bool = false): T {.inline.}=
## Default allocator for the Picasso library
Expand All @@ -60,7 +60,7 @@ proc wv_alloc*(T: typedesc, len: SomeInteger): ptr UncheckedArray[T] {.inline.}
when defined(WV_useNimAlloc):
cast[type result](createSharedU(T, len))
else:
cast[type result](c_malloc(len * sizeof(T)))
cast[type result](c_malloc(csize_t len*sizeof(T)))

proc wv_free*[T: ptr](p: T) {.inline.} =
when defined(WV_useNimAlloc):
Expand Down
3 changes: 1 addition & 2 deletions weave/memory/persistacks.nim
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ type
# Parents directly enqueue special actions like shutting down.
# So persistacks are in a global array
# and we need to avoid cache line conflict between workers
pad: array[WV_CacheLineSize - N*sizeof(ptr T) - sizeof(pointer) - sizeof(int8), byte]
stack: array[N, ptr T]
stack{.align:WV_CacheLinePadding.}: array[N, ptr T]
rawMem: ptr array[N, T]
len*: int8

Expand Down

0 comments on commit 9ee79bf

Please sign in to comment.