Skip to content

Commit

Permalink
Revert to add missing events.
Browse files Browse the repository at this point in the history
  • Loading branch information
doujiang24 committed Jul 28, 2022
1 parent 69830d4 commit 7bf9b52
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 89 deletions.
3 changes: 2 additions & 1 deletion src/cmd/trace/trace.go
Original file line number Diff line number Diff line change
Expand Up @@ -1136,7 +1136,8 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
func isSystemGoroutine(entryFn string) bool {
// This mimics runtime.isSystemGoroutine as closely as
// possible.
return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
// Also, locked g in extra M (with empty entryFn) is system goroutine.
return entryFn == "" || entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
}

// firstTimestamp returns the timestamp of the first event record.
Expand Down
16 changes: 0 additions & 16 deletions src/runtime/cgocall.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,22 +226,7 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
savedsp := unsafe.Pointer(gp.syscallsp)
savedpc := gp.syscallpc
exitsyscall() // coming out of cgo call

if gp.m.isextra && gp.m.cgolevel == 0 {
// need a new goid, since the goroutine is reused from dead status.
gp.m.curg.goid = newgoid(gp.m.p.ptr())

if trace.enabled {
// delay emit trace events here, after getting a P.
systemstack(func() {
traceGoCreate(gp, 0) // no start pc for locked g in extra M
traceGoStart()
})
}
}

gp.m.incgo = false
gp.m.cgolevel++

osPreemptExtExit(gp.m)

Expand All @@ -251,7 +236,6 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
// The following code must not change to a different m.
// This is enforced by checking incgo in the schedule function.

gp.m.cgolevel--
gp.m.incgo = true

if gp.m != checkm {
Expand Down
96 changes: 29 additions & 67 deletions src/runtime/proc.go
Original file line number Diff line number Diff line change
Expand Up @@ -1913,9 +1913,15 @@ func oneNewExtraM() {
mp.lockedg.set(gp)
mp.isextra = true
gp.lockedm.set(mp)
gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
if trace.enabled {
traceGoCreate(gp, 0) // no start pc
gp.traceseq++
traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
}
// put on allg for garbage collector
allgadd(gp)

Expand Down Expand Up @@ -2509,15 +2515,12 @@ func execute(gp *g, inheritTime bool) {
}

if trace.enabled {
// delay emit trace events when entering go from c thread at the first level.
if !_g_.m.isextra || _g_.m.cgolevel != 0 {
if gp.syscallsp != 0 && gp.sysblocktraced {
// GoSysExit has to happen when we have a P, but before GoStart.
// So we emit it here.
traceGoSysExit(gp.sysexitticks)
}
traceGoStart()
// GoSysExit has to happen when we have a P, but before GoStart.
// So we emit it here.
if gp.syscallsp != 0 && gp.sysblocktraced {
traceGoSysExit(gp.sysexitticks)
}
traceGoStart()
}

gogo(&gp.sched)
Expand Down Expand Up @@ -3611,18 +3614,8 @@ func reentersyscall(pc, sp uintptr) {
})
}

// the goroutine is finished when it's the locked g from extra M,
// and it's returning back to c thread.
backToCThread := _g_.m.isextra && _g_.m.cgolevel == 0
if trace.enabled {
if backToCThread {
systemstack(func() {
traceGoEnd()
traceProcStop(_g_.m.p.ptr())
})
} else {
systemstack(traceGoSysCall)
}
systemstack(traceGoSysCall)
// systemstack itself clobbers g.sched.{pc,sp} and we might
// need them later when the G is genuinely blocked in a
// syscall
Expand All @@ -3644,28 +3637,12 @@ func reentersyscall(pc, sp uintptr) {
_g_.sysblocktraced = true
pp := _g_.m.p.ptr()
pp.m = 0
_g_.m.oldp.set(pp)
_g_.m.p = 0

if backToCThread {
// For a real syscall, we assume it will back to go after a very short time,
// it's reasonable in most cases. So, keep the P in _Psyscall is a better choice.
// But in the c to go scene, we can not assume there will be a short time
// for the next c to go call, to reuse the oldp in exitsyscall.
// And the Psyscall status P can only be retake by sysmon after 20us ~ 10ms,
// it means wasting P.
// So, it's better to handoffp here.
atomic.Store(&pp.status, _Pidle)
systemstack(func() {
handoffp(pp)
})
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
} else {
_g_.m.oldp.set(pp)
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
}
}

_g_.m.locks--
Expand Down Expand Up @@ -3784,11 +3761,8 @@ func exitsyscall() {
_g_.m.oldp = 0
if exitsyscallfast(oldp) {
if trace.enabled {
// delay emit trace events when entering go from c thread at the first level.
if !_g_.m.isextra || _g_.m.cgolevel != 0 {
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
}
// There's a cpu for us, so we can run.
Expand Down Expand Up @@ -3877,10 +3851,7 @@ func exitsyscallfast(oldp *p) bool {
osyield()
}
}
// delay emit trace events when entering go from c thread at the first level.
if !_g_.m.isextra || _g_.m.cgolevel != 0 {
traceGoSysExit(0)
}
traceGoSysExit(0)
}
})
if ok {
Expand All @@ -3897,10 +3868,6 @@ func exitsyscallfast(oldp *p) bool {
//go:nosplit
func exitsyscallfast_reacquired() {
_g_ := getg()
// there is no oldp when entering go from c thread at the first level.
if _g_.m.isextra && _g_.m.cgolevel == 0 {
panic("oldp should not existing")
}
if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
if trace.enabled {
// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
Expand Down Expand Up @@ -4103,20 +4070,6 @@ func newproc(fn *funcval) {
})
}

func newgoid(p *p) int64 {
if p.goidcache == p.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
p.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
p.goidcache -= _GoidCacheBatch - 1
p.goidcacheend = p.goidcache + _GoidCacheBatch
}
id := int64(p.goidcache)
p.goidcache++
return id
}

// Create a new g in state _Grunnable, starting at fn. callerpc is the
// address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler.
Expand Down Expand Up @@ -4180,7 +4133,16 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
casgstatus(newg, _Gdead, _Grunnable)
gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))

newg.goid = newgoid(_p_)
if _p_.goidcache == _p_.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
_p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
_p_.goidcache -= _GoidCacheBatch - 1
_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
}
newg.goid = int64(_p_.goidcache)
_p_.goidcache++
if raceenabled {
newg.racectx = racegostart(callerpc)
}
Expand Down
5 changes: 2 additions & 3 deletions src/runtime/runtime2.go
Original file line number Diff line number Diff line change
Expand Up @@ -540,13 +540,12 @@ type m struct {
printlock int8
incgo bool // m is executing a cgo call
isextra bool // m is an extra m
cgolevel int32 // level of cgo(c to go) calls currently in progress
freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
fastrand uint64
needextram bool
traceback uint8
ncgocall uint64 // number of cgo(go to c) calls in total
ncgo int32 // number of cgo(go to c) calls currently in progress
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
park note
Expand Down
9 changes: 7 additions & 2 deletions src/runtime/trace.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,12 +225,17 @@ func StartTrace() error {
// World is stopped, no need to lock.
forEachGRace(func(gp *g) {
status := readgstatus(gp)
if status != _Gdead {
if status != _Gdead || (gp.m != nil && gp.m.isextra) {
gp.traceseq = 0
gp.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)

if status == _Gdead {
gp.traceseq++
traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
}
}
if status == _Gwaiting {
// traceEvGoWaiting is implied to have seq=1.
Expand All @@ -240,7 +245,7 @@ func StartTrace() error {
if status == _Gsyscall {
gp.traceseq++
traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
} else {
} else if gp.m == nil || !gp.m.isextra {
gp.sysblocktraced = false
}
})
Expand Down

0 comments on commit 7bf9b52

Please sign in to comment.