diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index ccacc50fe1a4f..ae856a37d690f 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -92,6 +92,7 @@ func Test25143(t *testing.T) { test25143(t) } func Test23356(t *testing.T) { test23356(t) } func Test26066(t *testing.T) { test26066(t) } func Test26213(t *testing.T) { test26213(t) } +func Test27660(t *testing.T) { test27660(t) } func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } func BenchmarkGoString(b *testing.B) { benchGoString(b) } diff --git a/misc/cgo/test/test27660.go b/misc/cgo/test/test27660.go new file mode 100644 index 0000000000000..8c23b7dc58f23 --- /dev/null +++ b/misc/cgo/test/test27660.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stress the interaction between the race detector and cgo in an +// attempt to reproduce the memory corruption described in #27660. +// The bug was very timing sensitive; at the time of writing this +// test would only trigger the bug about once out of every five runs. + +package cgotest + +// #include +import "C" + +import ( + "context" + "math/rand" + "runtime" + "sync" + "testing" + "time" +) + +func test27660(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ints := make([]int, 100) + locks := make([]sync.Mutex, 100) + // Slowly create threads so that ThreadSanitizer is forced to + // frequently resize its SyncClocks. + for i := 0; i < 100; i++ { + go func() { + for ctx.Err() == nil { + // Sleep in C for long enough that it is likely that the runtime + // will retake this goroutine's currently wired P. + C.usleep(1000 /* 1ms */) + runtime.Gosched() // avoid starvation (see #28701) + } + }() + go func() { + // Trigger lots of synchronization and memory reads/writes to + // increase the likelihood that the race described in #27660 + // results in corruption of ThreadSanitizer's internal state + // and thus an assertion failure or segfault. + for ctx.Err() == nil { + j := rand.Intn(100) + locks[j].Lock() + ints[j]++ + locks[j].Unlock() + } + }() + time.Sleep(time.Millisecond) + } +} diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 86bd2fb01c096..ca31408b50726 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -130,12 +130,19 @@ func cgocall(fn, arg unsafe.Pointer) int32 { mp.incgo = true errno := asmcgocall(fn, arg) - // Call endcgo before exitsyscall because exitsyscall may + // Update accounting before exitsyscall because exitsyscall may // reschedule us on to a different M. - endcgo(mp) + mp.incgo = false + mp.ncgo-- exitsyscall() + // Note that raceacquire must be called only after exitsyscall has + // wired this M to a P. + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + // From the garbage collector's perspective, time can move // backwards in the sequence above. If there's a callback into // Go code, GC will see this function at the call to @@ -153,16 +160,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { return errno } -//go:nosplit -func endcgo(mp *m) { - mp.incgo = false - mp.ncgo-- - - if raceenabled { - raceacquire(unsafe.Pointer(&racecgosync)) - } -} - // Call from C back to Go. //go:nosplit func cgocallbackg(ctxt uintptr) { @@ -347,13 +344,14 @@ func unwindm(restore *bool) { sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16)) } - // Call endcgo to do the accounting that cgocall will not have a - // chance to do during an unwind. + // Do the accounting that cgocall will not have a chance to do + // during an unwind. // // In the case where a Go call originates from C, ncgo is 0 // and there is no matching cgocall to end. if mp.ncgo > 0 { - endcgo(mp) + mp.incgo = false + mp.ncgo-- } releasem(mp)