Improve stacktraces (#721)
* service/rpccommon: fixed typo * proc: test parseG while target is in runtime.deferreturn runtime.deferreturn will change the value of curg._defer.fn in such a way that if the target is stopped at just the right instruction it may crash an incorrect implementation of parseG * proc/stack: handle stack barriers correctly Correctly handle stack barriers insterted during garbage collection.
This commit is contained in:
parent
8c96e275d0
commit
e77595ce31
96
_fixtures/binarytrees.go
Normal file
96
_fixtures/binarytrees.go
Normal file
@ -0,0 +1,96 @@
|
||||
/* The Computer Language Benchmarks Game
|
||||
* http://benchmarksgame.alioth.debian.org/
|
||||
*
|
||||
* based on Go program by The Go Authors.
|
||||
* based on C program by Kevin Carson
|
||||
* flag.Arg hack by Isaac Gouy
|
||||
* modified by Jamil Djadala to use goroutines
|
||||
* modified by Chai Shushan
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var minDepth = 4
|
||||
var n = 20
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU() * 2)
|
||||
|
||||
flag.Parse()
|
||||
if flag.NArg() > 0 {
|
||||
n, _ = strconv.Atoi(flag.Arg(0))
|
||||
}
|
||||
|
||||
maxDepth := n
|
||||
if minDepth+2 > n {
|
||||
maxDepth = minDepth + 2
|
||||
}
|
||||
stretchDepth := maxDepth + 1
|
||||
|
||||
check_l := bottomUpTree(0, stretchDepth).ItemCheck()
|
||||
fmt.Printf("stretch tree of depth %d\t check: %d\n", stretchDepth, check_l)
|
||||
|
||||
longLivedTree := bottomUpTree(0, maxDepth)
|
||||
|
||||
result_trees := make([]int, maxDepth+1)
|
||||
result_check := make([]int, maxDepth+1)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for depth_l := minDepth; depth_l <= maxDepth; depth_l += 2 {
|
||||
wg.Add(1)
|
||||
go func(depth int) {
|
||||
iterations := 1 << uint(maxDepth-depth+minDepth)
|
||||
check := 0
|
||||
|
||||
for i := 1; i <= iterations; i++ {
|
||||
check += bottomUpTree(i, depth).ItemCheck()
|
||||
check += bottomUpTree(-i, depth).ItemCheck()
|
||||
}
|
||||
result_trees[depth] = iterations * 2
|
||||
result_check[depth] = check
|
||||
|
||||
wg.Done()
|
||||
}(depth_l)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for depth := minDepth; depth <= maxDepth; depth += 2 {
|
||||
fmt.Printf("%d\t trees of depth %d\t check: %d\n",
|
||||
result_trees[depth], depth, result_check[depth],
|
||||
)
|
||||
}
|
||||
fmt.Printf("long lived tree of depth %d\t check: %d\n",
|
||||
maxDepth, longLivedTree.ItemCheck(),
|
||||
)
|
||||
}
|
||||
|
||||
func bottomUpTree(item, depth int) *Node {
|
||||
if depth <= 0 {
|
||||
return &Node{item, nil, nil}
|
||||
}
|
||||
return &Node{item,
|
||||
bottomUpTree(2*item-1, depth-1),
|
||||
bottomUpTree(2*item, depth-1),
|
||||
}
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
item int
|
||||
left, right *Node
|
||||
}
|
||||
|
||||
func (self *Node) ItemCheck() int {
|
||||
if self.left == nil {
|
||||
return self.item
|
||||
}
|
||||
return self.item + self.left.ItemCheck() - self.right.ItemCheck()
|
||||
}
|
@ -2402,3 +2402,111 @@ func BenchmarkTrace(b *testing.B) {
|
||||
b.StopTimer()
|
||||
})
|
||||
}
|
||||
|
||||
func TestNextInDeferReturn(t *testing.T) {
|
||||
// runtime.deferreturn updates the G struct in a way that for one
|
||||
// instruction leaves the curg._defer field non-nil but with curg._defer.fn
|
||||
// field being nil.
|
||||
// We need to deal with this without panicing.
|
||||
withTestProcess("defercall", t, func(p *Process, fixture protest.Fixture) {
|
||||
_, err := setFunctionBreakpoint(p, "runtime.deferreturn")
|
||||
assertNoError(err, t, "setFunctionBreakpoint()")
|
||||
assertNoError(p.Continue(), t, "First Continue()")
|
||||
for i := 0; i < 20; i++ {
|
||||
assertNoError(p.Next(), t, fmt.Sprintf("Next() %d", i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func getg(goid int, gs []*G) *G {
|
||||
for _, g := range gs {
|
||||
if g.ID == goid {
|
||||
return g
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStacktraceWithBarriers(t *testing.T) {
|
||||
// Go's Garbage Collector will insert stack barriers into stacks.
|
||||
// This stack barrier is inserted by overwriting the return address for the
|
||||
// stack frame with the address of runtime.stackBarrier.
|
||||
// The original return address is saved into the stkbar slice inside the G
|
||||
// struct.
|
||||
withTestProcess("binarytrees", t, func(p *Process, fixture protest.Fixture) {
|
||||
// We want to get a user goroutine with a stack barrier, to get that we execute the program until runtime.gcInstallStackBarrier is executed AND the goroutine it was executed onto contains a call to main.bottomUpTree
|
||||
_, err := setFunctionBreakpoint(p, "runtime.gcInstallStackBarrier")
|
||||
assertNoError(err, t, "setFunctionBreakpoint()")
|
||||
stackBarrierGoids := []int{}
|
||||
for len(stackBarrierGoids) == 0 {
|
||||
assertNoError(p.Continue(), t, "Continue()")
|
||||
gs, err := p.GoroutinesInfo()
|
||||
assertNoError(err, t, "GoroutinesInfo()")
|
||||
for _, th := range p.Threads {
|
||||
if th.CurrentBreakpoint == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
goidVar, err := evalVariable(p, "gp.goid")
|
||||
assertNoError(err, t, "evalVariable")
|
||||
goid, _ := constant.Int64Val(goidVar.Value)
|
||||
|
||||
if g := getg(int(goid), gs); g != nil {
|
||||
stack, err := g.Stacktrace(50)
|
||||
assertNoError(err, t, fmt.Sprintf("Stacktrace(goroutine = %d)", goid))
|
||||
for _, frame := range stack {
|
||||
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.bottomUpTree" {
|
||||
stackBarrierGoids = append(stackBarrierGoids, int(goid))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(stackBarrierGoids) == 0 {
|
||||
t.Fatalf("Could not find a goroutine with stack barriers")
|
||||
}
|
||||
|
||||
t.Logf("stack barrier goids: %v\n", stackBarrierGoids)
|
||||
|
||||
assertNoError(p.StepOut(), t, "StepOut()")
|
||||
|
||||
gs, err := p.GoroutinesInfo()
|
||||
assertNoError(err, t, "GoroutinesInfo()")
|
||||
|
||||
for _, goid := range stackBarrierGoids {
|
||||
g := getg(goid, gs)
|
||||
|
||||
stack, err := g.Stacktrace(200)
|
||||
assertNoError(err, t, "Stacktrace()")
|
||||
|
||||
// Check that either main.main or main.main.func1 appear in the
|
||||
// stacktrace of this goroutine, if we failed at resolving stack barriers
|
||||
// correctly the stacktrace will be truncated and neither main.main or
|
||||
// main.main.func1 will appear
|
||||
found := false
|
||||
for _, frame := range stack {
|
||||
if frame.Current.Fn == nil {
|
||||
continue
|
||||
}
|
||||
if name := frame.Current.Fn.Name; name == "main.main" || name == "main.main.func1" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Stacktrace for %d:\n", goid)
|
||||
for _, frame := range stack {
|
||||
name := "<>"
|
||||
if frame.Current.Fn != nil {
|
||||
name = frame.Current.Fn.Name
|
||||
}
|
||||
t.Logf("\t%s [CFA: %x Ret: %x] at %s:%d", name, frame.CFA, frame.Ret, frame.Current.File, frame.Current.Line)
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Log("Truncated stacktrace for %d\n", goid)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -7,6 +7,11 @@ import (
|
||||
"github.com/derekparker/delve/dwarf/frame"
|
||||
)
|
||||
|
||||
// This code is partly adaped from runtime.gentraceback in
|
||||
// $GOROOT/src/runtime/traceback.go
|
||||
|
||||
const runtimeStackBarrier = "runtime.stackBarrier"
|
||||
|
||||
// NoReturnAddr is returned when return address
|
||||
// could not be found during stack trace.
|
||||
type NoReturnAddr struct {
|
||||
@ -29,6 +34,8 @@ type Stackframe struct {
|
||||
FDE *frame.FrameDescriptionEntry
|
||||
// Return address for this stack frame (as read from the stack frame itself).
|
||||
Ret uint64
|
||||
// Address to the memory location containing the return address
|
||||
addrret uint64
|
||||
}
|
||||
|
||||
// Scope returns a new EvalScope using this frame.
|
||||
@ -49,18 +56,18 @@ func (t *Thread) ReturnAddress() (uint64, error) {
|
||||
return locations[1].Current.PC, nil
|
||||
}
|
||||
|
||||
func (t *Thread) stackIterator() (*stackIterator, error) {
|
||||
func (t *Thread) stackIterator(stkbar []savedLR, stkbarPos int) (*stackIterator, error) {
|
||||
regs, err := t.Registers(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newStackIterator(t.dbp, regs.PC(), regs.SP()), nil
|
||||
return newStackIterator(t.dbp, regs.PC(), regs.SP(), stkbar, stkbarPos), nil
|
||||
}
|
||||
|
||||
// Stacktrace returns the stack trace for thread.
|
||||
// Note the locations in the array are return addresses not call addresses.
|
||||
func (t *Thread) Stacktrace(depth int) ([]Stackframe, error) {
|
||||
it, err := t.stackIterator()
|
||||
it, err := t.stackIterator(nil, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -68,10 +75,14 @@ func (t *Thread) Stacktrace(depth int) ([]Stackframe, error) {
|
||||
}
|
||||
|
||||
func (g *G) stackIterator() (*stackIterator, error) {
|
||||
if g.thread != nil {
|
||||
return g.thread.stackIterator()
|
||||
stkbar, err := g.stkbar()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newStackIterator(g.dbp, g.PC, g.SP), nil
|
||||
if g.thread != nil {
|
||||
return g.thread.stackIterator(stkbar, g.stkbarPos)
|
||||
}
|
||||
return newStackIterator(g.dbp, g.PC, g.SP, stkbar, g.stkbarPos), nil
|
||||
}
|
||||
|
||||
// Stacktrace returns the stack trace for a goroutine.
|
||||
@ -108,10 +119,35 @@ type stackIterator struct {
|
||||
frame Stackframe
|
||||
dbp *Process
|
||||
err error
|
||||
|
||||
stackBarrierPC uint64
|
||||
stkbar []savedLR
|
||||
}
|
||||
|
||||
func newStackIterator(dbp *Process, pc, sp uint64) *stackIterator {
|
||||
return &stackIterator{pc: pc, sp: sp, top: true, dbp: dbp, err: nil, atend: false}
|
||||
type savedLR struct {
|
||||
ptr uint64
|
||||
val uint64
|
||||
}
|
||||
|
||||
func newStackIterator(dbp *Process, pc, sp uint64, stkbar []savedLR, stkbarPos int) *stackIterator {
|
||||
stackBarrierPC := dbp.goSymTable.LookupFunc(runtimeStackBarrier).Entry
|
||||
if stkbar != nil {
|
||||
fn := dbp.goSymTable.PCToFunc(pc)
|
||||
if fn != nil && fn.Name == runtimeStackBarrier {
|
||||
// We caught the goroutine as it's executing the stack barrier, we must
|
||||
// determine whether or not g.stackPos has already been incremented or not.
|
||||
if len(stkbar) > 0 && stkbar[stkbarPos].ptr < sp {
|
||||
// runtime.stackBarrier has not incremented stkbarPos.
|
||||
} else if stkbarPos > 0 && stkbar[stkbarPos-1].ptr < sp {
|
||||
// runtime.stackBarrier has incremented stkbarPos.
|
||||
stkbarPos--
|
||||
} else {
|
||||
return &stackIterator{err: fmt.Errorf("failed to unwind through stackBarrier at SP %x", sp)}
|
||||
}
|
||||
}
|
||||
stkbar = stkbar[stkbarPos:]
|
||||
}
|
||||
return &stackIterator{pc: pc, sp: sp, top: true, dbp: dbp, err: nil, atend: false, stackBarrierPC: stackBarrierPC, stkbar: stkbar}
|
||||
}
|
||||
|
||||
// Next points the iterator to the next stack frame.
|
||||
@ -141,6 +177,13 @@ func (it *stackIterator) Next() bool {
|
||||
it.atend = true
|
||||
return true
|
||||
}
|
||||
|
||||
if it.stkbar != nil && it.frame.Ret == it.stackBarrierPC && it.frame.addrret == it.stkbar[0].ptr {
|
||||
// Skip stack barrier frames
|
||||
it.frame.Ret = it.stkbar[0].val
|
||||
it.stkbar = it.stkbar[1:]
|
||||
}
|
||||
|
||||
// Look for "top of stack" functions.
|
||||
if it.frame.Current.Fn.Name == "runtime.goexit" || it.frame.Current.Fn.Name == "runtime.rt0_go" || it.frame.Current.Fn.Name == "runtime.mcall" {
|
||||
it.atend = true
|
||||
@ -183,7 +226,7 @@ func (dbp *Process) frameInfo(pc, sp uint64, top bool) (Stackframe, error) {
|
||||
if err != nil {
|
||||
return Stackframe{}, err
|
||||
}
|
||||
r := Stackframe{Current: Location{PC: pc, File: f, Line: l, Fn: fn}, CFA: cfa, FDE: fde, Ret: binary.LittleEndian.Uint64(data)}
|
||||
r := Stackframe{Current: Location{PC: pc, File: f, Line: l, Fn: fn}, CFA: cfa, FDE: fde, Ret: binary.LittleEndian.Uint64(data), addrret: uint64(retaddr)}
|
||||
if !top {
|
||||
r.Call.File, r.Call.Line, r.Call.Fn = dbp.PCToLine(pc - 1)
|
||||
r.Call.PC, _, _ = dbp.goSymTable.LineToPC(r.Call.File, r.Call.Line)
|
||||
|
@ -122,6 +122,8 @@ type G struct {
|
||||
GoPC uint64 // PC of 'go' statement that created this goroutine.
|
||||
WaitReason string // Reason for goroutine being parked.
|
||||
Status uint64
|
||||
stkbarVar *Variable // stkbar field of g struct
|
||||
stkbarPos int // stkbarPos field of g struct
|
||||
|
||||
// Information on goroutine location
|
||||
CurrentLoc Location
|
||||
@ -394,6 +396,8 @@ func (gvar *Variable) parseG() (*G, error) {
|
||||
id, _ := constant.Int64Val(gvar.fieldVariable("goid").Value)
|
||||
gopc, _ := constant.Int64Val(gvar.fieldVariable("gopc").Value)
|
||||
waitReason := constant.StringVal(gvar.fieldVariable("waitreason").Value)
|
||||
stkbarVar, _ := gvar.structMember("stkbar")
|
||||
stkbarPos, _ := constant.Int64Val(gvar.fieldVariable("stkbarPos").Value)
|
||||
status, _ := constant.Int64Val(gvar.fieldVariable("atomicstatus").Value)
|
||||
f, l, fn := gvar.dbp.goSymTable.PCToLine(uint64(pc))
|
||||
g := &G{
|
||||
@ -405,6 +409,8 @@ func (gvar *Variable) parseG() (*G, error) {
|
||||
Status: uint64(status),
|
||||
CurrentLoc: Location{PC: uint64(pc), File: f, Line: l, Fn: fn},
|
||||
variable: gvar,
|
||||
stkbarVar: stkbarVar,
|
||||
stkbarPos: int(stkbarPos),
|
||||
dbp: gvar.dbp,
|
||||
}
|
||||
return g, nil
|
||||
@ -490,6 +496,28 @@ func (g *G) Go() Location {
|
||||
return Location{PC: g.GoPC, File: f, Line: l, Fn: fn}
|
||||
}
|
||||
|
||||
// Returns the list of saved return addresses used by stack barriers
|
||||
func (g *G) stkbar() ([]savedLR, error) {
|
||||
g.stkbarVar.loadValue(LoadConfig{false, 1, 0, int(g.stkbarVar.Len), 3})
|
||||
if g.stkbarVar.Unreadable != nil {
|
||||
return nil, fmt.Errorf("unreadable stkbar: %v\n", g.stkbarVar.Unreadable)
|
||||
}
|
||||
r := make([]savedLR, len(g.stkbarVar.Children))
|
||||
for i, child := range g.stkbarVar.Children {
|
||||
for _, field := range child.Children {
|
||||
switch field.Name {
|
||||
case "savedLRPtr":
|
||||
ptr, _ := constant.Int64Val(field.Value)
|
||||
r[i].ptr = uint64(ptr)
|
||||
case "savedLRVal":
|
||||
val, _ := constant.Int64Val(field.Value)
|
||||
r[i].val = uint64(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// EvalVariable returns the value of the given expression (backwards compatibility).
|
||||
func (scope *EvalScope) EvalVariable(name string, cfg LoadConfig) (*Variable, error) {
|
||||
return scope.EvalExpression(name, cfg)
|
||||
|
@ -403,7 +403,7 @@ func (err *internalError) Error() string {
|
||||
var out bytes.Buffer
|
||||
fmt.Fprintf(&out, "Internal debugger error: %v\n", err.Err)
|
||||
for _, frame := range err.Stack {
|
||||
fmt.Fprintf(&out, "%s (%#x)\n\t%s%d\n", frame.Func, frame.Pc, frame.File, frame.Line)
|
||||
fmt.Fprintf(&out, "%s (%#x)\n\t%s:%d\n", frame.Func, frame.Pc, frame.File, frame.Line)
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user