From 7bf9b52472603534432509c2c94e233a486abf83 Mon Sep 17 00:00:00 2001 From: doujiang24 Date: Thu, 28 Jul 2022 11:14:14 +0800 Subject: [PATCH] Revert to add missing events. --- src/cmd/trace/trace.go | 3 +- src/runtime/cgocall.go | 16 ------- src/runtime/proc.go | 96 +++++++++++++---------------------------- src/runtime/runtime2.go | 5 +-- src/runtime/trace.go | 9 +++- 5 files changed, 40 insertions(+), 89 deletions(-) diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 0139639dae5c1e..7dcb17d388a26c 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -1136,7 +1136,8 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int { func isSystemGoroutine(entryFn string) bool { // This mimics runtime.isSystemGoroutine as closely as // possible. - return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.") + // Also, locked g in extra M (with empty entryFn) is system goroutine. + return entryFn == "" || entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.") } // firstTimestamp returns the timestamp of the first event record. diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 9c0dfb9cbcd1e0..a0c9560fd0f655 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -226,22 +226,7 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { savedsp := unsafe.Pointer(gp.syscallsp) savedpc := gp.syscallpc exitsyscall() // coming out of cgo call - - if gp.m.isextra && gp.m.cgolevel == 0 { - // need a new goid, since the goroutine is reused from dead status. - gp.m.curg.goid = newgoid(gp.m.p.ptr()) - - if trace.enabled { - // delay emit trace events here, after getting a P. - systemstack(func() { - traceGoCreate(gp, 0) // no start pc for locked g in extra M - traceGoStart() - }) - } - } - gp.m.incgo = false - gp.m.cgolevel++ osPreemptExtExit(gp.m) @@ -251,7 +236,6 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { // The following code must not change to a different m. // This is enforced by checking incgo in the schedule function. - gp.m.cgolevel-- gp.m.incgo = true if gp.m != checkm { diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 5a7261f61378be..95988b7eba033d 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1913,9 +1913,15 @@ func oneNewExtraM() { mp.lockedg.set(gp) mp.isextra = true gp.lockedm.set(mp) + gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) if raceenabled { gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum) } + if trace.enabled { + traceGoCreate(gp, 0) // no start pc + gp.traceseq++ + traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) + } // put on allg for garbage collector allgadd(gp) @@ -2509,15 +2515,12 @@ func execute(gp *g, inheritTime bool) { } if trace.enabled { - // delay emit trace events when entering go from c thread at the first level. - if !_g_.m.isextra || _g_.m.cgolevel != 0 { - if gp.syscallsp != 0 && gp.sysblocktraced { - // GoSysExit has to happen when we have a P, but before GoStart. - // So we emit it here. - traceGoSysExit(gp.sysexitticks) - } - traceGoStart() + // GoSysExit has to happen when we have a P, but before GoStart. + // So we emit it here. + if gp.syscallsp != 0 && gp.sysblocktraced { + traceGoSysExit(gp.sysexitticks) } + traceGoStart() } gogo(&gp.sched) @@ -3611,18 +3614,8 @@ func reentersyscall(pc, sp uintptr) { }) } - // the goroutine is finished when it's the locked g from extra M, - // and it's returning back to c thread. - backToCThread := _g_.m.isextra && _g_.m.cgolevel == 0 if trace.enabled { - if backToCThread { - systemstack(func() { - traceGoEnd() - traceProcStop(_g_.m.p.ptr()) - }) - } else { - systemstack(traceGoSysCall) - } + systemstack(traceGoSysCall) // systemstack itself clobbers g.sched.{pc,sp} and we might // need them later when the G is genuinely blocked in a // syscall @@ -3644,28 +3637,12 @@ func reentersyscall(pc, sp uintptr) { _g_.sysblocktraced = true pp := _g_.m.p.ptr() pp.m = 0 + _g_.m.oldp.set(pp) _g_.m.p = 0 - - if backToCThread { - // For a real syscall, we assume it will back to go after a very short time, - // it's reasonable in most cases. So, keep the P in _Psyscall is a better choice. - // But in the c to go scene, we can not assume there will be a short time - // for the next c to go call, to reuse the oldp in exitsyscall. - // And the Psyscall status P can only be retake by sysmon after 20us ~ 10ms, - // it means wasting P. - // So, it's better to handoffp here. - atomic.Store(&pp.status, _Pidle) - systemstack(func() { - handoffp(pp) - }) + atomic.Store(&pp.status, _Psyscall) + if sched.gcwaiting != 0 { + systemstack(entersyscall_gcwait) save(pc, sp) - } else { - _g_.m.oldp.set(pp) - atomic.Store(&pp.status, _Psyscall) - if sched.gcwaiting != 0 { - systemstack(entersyscall_gcwait) - save(pc, sp) - } } _g_.m.locks-- @@ -3784,11 +3761,8 @@ func exitsyscall() { _g_.m.oldp = 0 if exitsyscallfast(oldp) { if trace.enabled { - // delay emit trace events when entering go from c thread at the first level. - if !_g_.m.isextra || _g_.m.cgolevel != 0 { - if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { - systemstack(traceGoStart) - } + if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { + systemstack(traceGoStart) } } // There's a cpu for us, so we can run. @@ -3877,10 +3851,7 @@ func exitsyscallfast(oldp *p) bool { osyield() } } - // delay emit trace events when entering go from c thread at the first level. - if !_g_.m.isextra || _g_.m.cgolevel != 0 { - traceGoSysExit(0) - } + traceGoSysExit(0) } }) if ok { @@ -3897,10 +3868,6 @@ func exitsyscallfast(oldp *p) bool { //go:nosplit func exitsyscallfast_reacquired() { _g_ := getg() - // there is no oldp when entering go from c thread at the first level. - if _g_.m.isextra && _g_.m.cgolevel == 0 { - panic("oldp should not existing") - } if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { if trace.enabled { // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). @@ -4103,20 +4070,6 @@ func newproc(fn *funcval) { }) } -func newgoid(p *p) int64 { - if p.goidcache == p.goidcacheend { - // Sched.goidgen is the last allocated id, - // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. - // At startup sched.goidgen=0, so main goroutine receives goid=1. - p.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) - p.goidcache -= _GoidCacheBatch - 1 - p.goidcacheend = p.goidcache + _GoidCacheBatch - } - id := int64(p.goidcache) - p.goidcache++ - return id -} - // Create a new g in state _Grunnable, starting at fn. callerpc is the // address of the go statement that created this. The caller is responsible // for adding the new g to the scheduler. @@ -4180,7 +4133,16 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g { casgstatus(newg, _Gdead, _Grunnable) gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo)) - newg.goid = newgoid(_p_) + if _p_.goidcache == _p_.goidcacheend { + // Sched.goidgen is the last allocated id, + // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. + // At startup sched.goidgen=0, so main goroutine receives goid=1. + _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) + _p_.goidcache -= _GoidCacheBatch - 1 + _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch + } + newg.goid = int64(_p_.goidcache) + _p_.goidcache++ if raceenabled { newg.racectx = racegostart(callerpc) } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 6fee67caa8e8e0..d308e455e7d814 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -540,13 +540,12 @@ type m struct { printlock int8 incgo bool // m is executing a cgo call isextra bool // m is an extra m - cgolevel int32 // level of cgo(c to go) calls currently in progress freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) fastrand uint64 needextram bool traceback uint8 - ncgocall uint64 // number of cgo(go to c) calls in total - ncgo int32 // number of cgo(go to c) calls currently in progress + ncgocall uint64 // number of cgo calls in total + ncgo int32 // number of cgo calls currently in progress cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily cgoCallers *cgoCallers // cgo traceback if crashing in cgo call park note diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 5cbc59c9702bc4..12200a5063da08 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -225,12 +225,17 @@ func StartTrace() error { // World is stopped, no need to lock. forEachGRace(func(gp *g) { status := readgstatus(gp) - if status != _Gdead { + if status != _Gdead || (gp.m != nil && gp.m.isextra) { gp.traceseq = 0 gp.tracelastp = getg().m.p // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum. id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum}) traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID) + + if status == _Gdead { + gp.traceseq++ + traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) + } } if status == _Gwaiting { // traceEvGoWaiting is implied to have seq=1. @@ -240,7 +245,7 @@ func StartTrace() error { if status == _Gsyscall { gp.traceseq++ traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) - } else { + } else if gp.m == nil || !gp.m.isextra { gp.sysblocktraced = false } })