proc: read context from sigtrampgo, fixes TestCgoStacktrace2 on 1.21 (#3401)

* logflags,proc: flag to log stacktrace execution

Add a log flag to write logs about what the stacktracer does.

* proc: read context from sigtrampgo, fixes TestCgoStacktrace2 on 1.21

Changes stacktrace code to read the signal context from the arguments
of sigtrampgo.
Also changes the automatic fatalthrow breakpoint for go 1.21.
In combination these two changes fix TestCgoStacktrace2 on Go 1.21 on
various platforms.
This commit is contained in:
Alessandro Arzilli 2023-06-27 18:33:07 +02:00 committed by GitHub
parent 1647fa6b5e
commit d963eb1057
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1004 additions and 148 deletions

@ -1,14 +1,12 @@
Tests skipped by each supported backend:
* 386 skipped = 7
* 1 broken
* 386 skipped = 6
* 3 broken - cgo stacktraces
* 3 not implemented
* arm64 skipped = 2
* 1 broken
* arm64 skipped = 1
* 1 broken - global variable symbolication
* darwin/arm64 skipped = 1
* 1 broken - cgo stacktraces
* darwin/arm64 skipped = 2
* 2 broken - cgo stacktraces
* darwin/lldb skipped = 1
* 1 upstream issue
* freebsd skipped = 4
@ -17,11 +15,9 @@ Tests skipped by each supported backend:
* 1 broken
* pie skipped = 2
* 2 upstream issue - https://github.com/golang/go/issues/29322
* windows skipped = 5
* windows skipped = 4
* 1 broken
* 3 see https://github.com/go-delve/delve/issues/2768
* 1 upstream issue
* windows/arm64 skipped = 5
* windows/arm64 skipped = 4
* 3 broken
* 1 broken - cgo stacktraces
* 1 broken - step concurrent

@ -19,6 +19,7 @@ names selected from this list:
dap Log all DAP messages
fncall Log function call protocol
minidump Log minidump loading
stack Log stacktracer
Additionally --log-dest can be used to specify where the logs should be
written.

@ -412,6 +412,7 @@ names selected from this list:
dap Log all DAP messages
fncall Log function call protocol
minidump Log minidump loading
stack Log stacktracer
Additionally --log-dest can be used to specify where the logs should be
written.

@ -26,6 +26,7 @@ var rpc = false
var dap = false
var fnCall = false
var minidump = false
var stack = false
var logOut io.WriteCloser
@ -131,6 +132,15 @@ func MinidumpLogger() Logger {
return makeLogger(minidump, Fields{"layer": "core", "kind": "minidump"})
}
// Stack returns true if the stacktracer should be logged.
func Stack() bool {
return stack
}
func StackLogger() Logger {
return makeLogger(stack, Fields{"layer": "core", "kind": "stack"})
}
// WriteDAPListeningMessage writes the "DAP server listening" message in dap mode.
func WriteDAPListeningMessage(addr net.Addr) {
writeListeningMessage("DAP", addr)
@ -215,6 +225,8 @@ func Setup(logFlag bool, logstr, logDest string) error {
fnCall = true
case "minidump":
minidump = true
case "stack":
stack = true
default:
fmt.Fprintf(os.Stderr, "Warning: unknown log output value %q, run 'dlv help log' for usage.\n", logcmd)
}

@ -225,25 +225,16 @@ func amd64SwitchStack(it *stackIterator, _ *op.DwarfRegisters) bool {
it.switchToGoroutineStack()
return true
default:
if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" {
// The runtime switches to the system stack in multiple places.
// This usually happens through a call to runtime.systemstack but there
// are functions that switch to the system stack manually (for example
// runtime.morestack).
// Since we are only interested in printing the system stack for cgo
// calls we switch directly to the goroutine stack if we detect that the
// function at the top of the stack is a runtime function.
//
// The function "runtime.throw" is deliberately excluded from this
// because it can end up in the stack during a cgo call and switching to
// the goroutine stack will exclude all the C functions from the stack
// trace.
case "runtime.newstack", "runtime.systemstack":
if it.systemstack && it.g != nil {
it.switchToGoroutineStack()
return true
}
return false
default:
return false
}
}

@ -5,7 +5,6 @@ import (
"encoding/binary"
"fmt"
"runtime"
"strings"
"github.com/go-delve/delve/pkg/dwarf/frame"
"github.com/go-delve/delve/pkg/dwarf/op"
@ -269,15 +268,9 @@ func arm64SwitchStack(it *stackIterator, callFrameRegs *op.DwarfRegisters) bool
it.switchToGoroutineStack()
return true
}
default:
if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" {
// The runtime switches to the system stack in multiple places.
// This usually happens through a call to runtime.systemstack but there
// are functions that switch to the system stack manually (for example
// runtime.morestack).
// Since we are only interested in printing the system stack for cgo
// calls we switch directly to the goroutine stack if we detect that the
// function at the top of the stack is a runtime function.
case "runtime.newstack", "runtime.systemstack":
if it.systemstack && it.g != nil {
it.switchToGoroutineStack()
return true
}

@ -285,7 +285,7 @@ func (bpstate *BreakpointState) checkCond(tgt *Target, breaklet *Breaklet, threa
nextDeferOk := true
if breaklet.Kind&NextDeferBreakpoint != 0 {
var err error
frames, err := ThreadStacktrace(thread, 2)
frames, err := ThreadStacktrace(tgt, thread, 2)
if err == nil {
nextDeferOk, _ = isPanicCall(frames)
if !nextDeferOk {
@ -297,7 +297,7 @@ func (bpstate *BreakpointState) checkCond(tgt *Target, breaklet *Breaklet, threa
case WatchOutOfScopeBreakpoint:
if breaklet.checkPanicCall {
frames, err := ThreadStacktrace(thread, 2)
frames, err := ThreadStacktrace(tgt, thread, 2)
if err == nil {
ipc, _ := isPanicCall(frames)
active = active && ipc

@ -245,8 +245,8 @@ func logRegisters(t *testing.T, regs proc.Registers, arch *proc.Arch) {
}
func TestCore(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
if runtime.GOOS != "linux" || runtime.GOARCH == "386" {
t.Skip("unsupported")
}
if runtime.GOOS == "linux" && os.Getenv("CI") == "true" && buildMode == "pie" {
t.Skip("disabled on linux, Github Actions, with PIE buildmode")
@ -268,7 +268,7 @@ func TestCore(t *testing.T) {
var panickingStack []proc.Stackframe
for _, g := range gs {
t.Logf("Goroutine %d", g.ID)
stack, err := g.Stacktrace(10, 0)
stack, err := proc.GoroutineStacktrace(p, g, 10, 0)
if err != nil {
t.Errorf("Stacktrace() on goroutine %v = %v", g, err)
}
@ -315,8 +315,11 @@ func TestCore(t *testing.T) {
}
func TestCoreFpRegisters(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
if runtime.GOOS != "linux" || runtime.GOARCH == "386" {
t.Skip("unsupported")
}
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64")
}
// in go1.10 the crash is executed on a different thread and registers are
// no longer available in the core dump.
@ -334,7 +337,7 @@ func TestCoreFpRegisters(t *testing.T) {
var regs proc.Registers
for _, thread := range p.ThreadList() {
frames, err := proc.ThreadStacktrace(thread, 10)
frames, err := proc.ThreadStacktrace(p, thread, 10)
if err != nil {
t.Errorf("ThreadStacktrace for %x = %v", thread.ThreadID(), err)
continue
@ -402,8 +405,8 @@ func TestCoreFpRegisters(t *testing.T) {
}
func TestCoreWithEmptyString(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
if runtime.GOOS != "linux" || runtime.GOARCH == "386" {
t.Skip("unsupported")
}
if runtime.GOOS == "linux" && os.Getenv("CI") == "true" && buildMode == "pie" {
t.Skip("disabled on linux, Github Actions, with PIE buildmode")
@ -417,7 +420,7 @@ func TestCoreWithEmptyString(t *testing.T) {
var mainFrame *proc.Stackframe
mainSearch:
for _, g := range gs {
stack, err := g.Stacktrace(10, 0)
stack, err := proc.GoroutineStacktrace(p, g, 10, 0)
assertNoError(err, t, "Stacktrace()")
for _, frame := range stack {
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.main" {
@ -466,7 +469,7 @@ func TestMinidump(t *testing.T) {
t.Logf("%d goroutines", len(gs))
foundMain, foundTime := false, false
for _, g := range gs {
stack, err := g.Stacktrace(10, 0)
stack, err := proc.GoroutineStacktrace(p, g, 10, 0)
if err != nil {
t.Errorf("Stacktrace() on goroutine %v = %v", g, err)
}

@ -90,9 +90,9 @@ func ConvertEvalScope(dbp *Target, gid int64, frame, deferCall int) (*EvalScope,
var locs []Stackframe
if g != nil {
locs, err = g.Stacktrace(frame+1, opts)
locs, err = GoroutineStacktrace(dbp, g, frame+1, opts)
} else {
locs, err = ThreadStacktrace(ct, frame+1)
locs, err = ThreadStacktrace(dbp, ct, frame+1)
}
if err != nil {
return nil, err
@ -145,7 +145,7 @@ func FrameToScope(t *Target, thread MemoryReadWriter, g *G, frames ...Stackframe
// ThreadScope returns an EvalScope for the given thread.
func ThreadScope(t *Target, thread Thread) (*EvalScope, error) {
locations, err := ThreadStacktrace(thread, 1)
locations, err := ThreadStacktrace(t, thread, 1)
if err != nil {
return nil, err
}
@ -157,7 +157,7 @@ func ThreadScope(t *Target, thread Thread) (*EvalScope, error) {
// GoroutineScope returns an EvalScope for the goroutine running on the given thread.
func GoroutineScope(t *Target, thread Thread) (*EvalScope, error) {
locations, err := ThreadStacktrace(thread, 1)
locations, err := ThreadStacktrace(t, thread, 1)
if err != nil {
return nil, err
}

@ -152,25 +152,16 @@ func i386SwitchStack(it *stackIterator, _ *op.DwarfRegisters) bool {
it.switchToGoroutineStack()
return true
default:
if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" {
// The runtime switches to the system stack in multiple places.
// This usually happens through a call to runtime.systemstack but there
// are functions that switch to the system stack manually (for example
// runtime.morestack).
// Since we are only interested in printing the system stack for cgo
// calls we switch directly to the goroutine stack if we detect that the
// function at the top of the stack is a runtime function.
//
// The function "runtime.throw" is deliberately excluded from this
// because it can end up in the stack during a cgo call and switching to
// the goroutine stack will exclude all the C functions from the stack
// trace.
case "runtime.newstack", "runtime.systemstack":
if it.systemstack && it.g != nil {
it.switchToGoroutineStack()
return true
}
return false
default:
return false
}
}

@ -778,8 +778,8 @@ func TestRuntimeBreakpoint(t *testing.T) {
})
}
func returnAddress(thread proc.Thread) (uint64, error) {
locations, err := proc.ThreadStacktrace(thread, 2)
func returnAddress(tgt *proc.Target, thread proc.Thread) (uint64, error) {
locations, err := proc.ThreadStacktrace(tgt, thread, 2)
if err != nil {
return 0, err
}
@ -797,7 +797,7 @@ func TestFindReturnAddress(t *testing.T) {
if err != nil {
t.Fatal(err)
}
addr, err := returnAddress(p.CurrentThread())
addr, err := returnAddress(p, p.CurrentThread())
if err != nil {
t.Fatal(err)
}
@ -816,7 +816,7 @@ func TestFindReturnAddressTopOfStackFn(t *testing.T) {
if err := grp.Continue(); err != nil {
t.Fatal(err)
}
if _, err := returnAddress(p.CurrentThread()); err == nil {
if _, err := returnAddress(p, p.CurrentThread()); err == nil {
t.Fatal("expected error to be returned")
}
})
@ -913,7 +913,7 @@ func TestStacktrace(t *testing.T) {
for i := range stacks {
assertNoError(grp.Continue(), t, "Continue()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if len(locations) != len(stacks[i])+2 {
@ -941,7 +941,7 @@ func TestStacktrace2(t *testing.T) {
withTestProcess("retstack", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
assertNoError(grp.Continue(), t, "Continue()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if !stackMatch([]loc{{-1, "main.f"}, {16, "main.main"}}, locations, false) {
for i := range locations {
@ -951,7 +951,7 @@ func TestStacktrace2(t *testing.T) {
}
assertNoError(grp.Continue(), t, "Continue()")
locations, err = proc.ThreadStacktrace(p.CurrentThread(), 40)
locations, err = proc.ThreadStacktrace(p, p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
if !stackMatch([]loc{{-1, "main.g"}, {17, "main.main"}}, locations, false) {
for i := range locations {
@ -1016,7 +1016,7 @@ func TestStacktraceGoroutine(t *testing.T) {
mainCount := 0
for i, g := range gs {
locations, err := g.Stacktrace(40, 0)
locations, err := proc.GoroutineStacktrace(p, g, 40, 0)
if err != nil {
// On windows we do not have frame information for goroutines doing system calls.
t.Logf("Could not retrieve goroutine stack for goid=%d: %v", g.ID, err)
@ -1174,7 +1174,7 @@ func TestIssue239(t *testing.T) {
}
func findFirstNonRuntimeFrame(p *proc.Target) (proc.Stackframe, error) {
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 10)
if err != nil {
return proc.Stackframe{}, err
}
@ -1328,7 +1328,7 @@ func TestFrameEvaluation(t *testing.T) {
found := make([]bool, 10)
for _, g := range gs {
frame := -1
frames, err := g.Stacktrace(40, 0)
frames, err := proc.GoroutineStacktrace(p, g, 40, 0)
if err != nil {
t.Logf("could not stacktrace goroutine %d: %v\n", g.ID, err)
continue
@ -1375,7 +1375,7 @@ func TestFrameEvaluation(t *testing.T) {
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG()")
frames, err := g.Stacktrace(40, 0)
frames, err := proc.GoroutineStacktrace(p, g, 40, 0)
assertNoError(err, t, "Stacktrace()")
t.Logf("Goroutine %d %#v", g.ID, g.Thread)
logStacktrace(t, p, frames)
@ -1916,7 +1916,7 @@ func TestIssue332_Part1(t *testing.T) {
setFileBreakpoint(p, t, fixture.Source, 8)
assertNoError(grp.Continue(), t, "Continue()")
assertNoError(grp.Next(), t, "first Next()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2)
locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 2)
assertNoError(err, t, "Stacktrace()")
if locations[0].Call.Fn == nil {
t.Fatalf("Not on a function")
@ -1943,7 +1943,7 @@ func TestIssue332_Part2(t *testing.T) {
// step until we enter changeMe
for {
assertNoError(grp.Step(), t, "Step()")
locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2)
locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 2)
assertNoError(err, t, "Stacktrace()")
if locations[0].Call.Fn == nil {
t.Fatalf("Not on a function")
@ -2117,7 +2117,7 @@ func TestIssue462(t *testing.T) {
}()
assertNoError(grp.Continue(), t, "Continue()")
_, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
_, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40)
assertNoError(err, t, "Stacktrace()")
})
}
@ -2148,7 +2148,7 @@ func TestNextParked(t *testing.T) {
if g.Thread != nil {
continue
}
frames, _ := g.Stacktrace(5, 0)
frames, _ := proc.GoroutineStacktrace(p, g, 5, 0)
for _, frame := range frames {
// line 11 is the line where wg.Done is called
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.sayhi" && frame.Current.Line < 11 {
@ -2200,7 +2200,7 @@ func TestStepParked(t *testing.T) {
}
t.Logf("Parked g is: %v\n", parkedg)
frames, _ := parkedg.Stacktrace(20, 0)
frames, _ := proc.GoroutineStacktrace(p, parkedg, 20, 0)
for _, frame := range frames {
name := ""
if frame.Call.Fn != nil {
@ -2464,7 +2464,7 @@ func TestStepConcurrentDirect(t *testing.T) {
// loop exited
break
}
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 20)
if err != nil {
t.Errorf("Could not get stacktrace of goroutine %d\n", p.SelectedGoroutine().ID)
} else {
@ -3307,9 +3307,10 @@ func TestCgoStacktrace(t *testing.T) {
t.Skip("disabled on macOS with go before version 1.8")
}
}
skipOn(t, "broken - cgo stacktraces", "386")
skipOn(t, "broken - cgo stacktraces", "windows", "arm64")
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 21) {
skipOn(t, "broken - cgo stacktraces", "windows", "arm64")
}
protest.MustHaveCgo(t)
// Tests that:
@ -3351,7 +3352,7 @@ func TestCgoStacktrace(t *testing.T) {
}
}
frames, err := g.Stacktrace(100, 0)
frames, err := proc.GoroutineStacktrace(p, g, 100, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace at iteration step %d", itidx))
logStacktrace(t, p, frames)
@ -3381,7 +3382,7 @@ func TestCgoStacktrace(t *testing.T) {
}
// also check that ThreadStacktrace produces the same list of frames
threadFrames, err := proc.ThreadStacktrace(p.CurrentThread(), 100)
threadFrames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 100)
assertNoError(err, t, fmt.Sprintf("ThreadStacktrace at iteration step %d", itidx))
if len(threadFrames) != len(frames) {
@ -3443,7 +3444,7 @@ func TestSystemstackStacktrace(t *testing.T) {
assertNoError(grp.Continue(), t, "second continue")
g, err := proc.GetG(p.CurrentThread())
assertNoError(err, t, "GetG")
frames, err := g.Stacktrace(100, 0)
frames, err := proc.GoroutineStacktrace(p, g, 100, 0)
assertNoError(err, t, "stacktrace")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"!runtime.startpanic_m", "runtime.gopanic", "main.main"}, frames)
@ -3476,7 +3477,7 @@ func TestSystemstackOnRuntimeNewstack(t *testing.T) {
break
}
}
frames, err := g.Stacktrace(100, 0)
frames, err := proc.GoroutineStacktrace(p, g, 100, 0)
assertNoError(err, t, "stacktrace")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"!runtime.newstack", "main.main"}, frames)
@ -3495,7 +3496,7 @@ func TestIssue1034(t *testing.T) {
withTestProcess("cgostacktest/", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(grp.Continue(), t, "Continue()")
frames, err := p.SelectedGoroutine().Stacktrace(10, 0)
frames, err := proc.GoroutineStacktrace(p, p.SelectedGoroutine(), 10, 0)
assertNoError(err, t, "Stacktrace")
scope := proc.FrameToScope(p, p.Memory(), nil, frames[2:]...)
args, _ := scope.FunctionArguments(normalLoadConfig)
@ -3819,7 +3820,7 @@ func TestInlinedStacktraceAndVariables(t *testing.T) {
// first inlined call
assertNoError(grp.Continue(), t, "Continue")
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 20)
assertNoError(err, t, "ThreadStacktrace")
t.Logf("Stacktrace:\n")
for i := range frames {
@ -3846,7 +3847,7 @@ func TestInlinedStacktraceAndVariables(t *testing.T) {
// second inlined call
assertNoError(grp.Continue(), t, "Continue")
frames, err = proc.ThreadStacktrace(p.CurrentThread(), 20)
frames, err = proc.ThreadStacktrace(p, p.CurrentThread(), 20)
assertNoError(err, t, "ThreadStacktrace (2)")
t.Logf("Stacktrace 2:\n")
for i := range frames {
@ -4161,7 +4162,7 @@ func TestIssue1264(t *testing.T) {
func TestReadDefer(t *testing.T) {
withTestProcess("deferstack", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
assertNoError(grp.Continue(), t, "Continue")
frames, err := p.SelectedGoroutine().Stacktrace(10, proc.StacktraceReadDefers)
frames, err := proc.GoroutineStacktrace(p, p.SelectedGoroutine(), 10, proc.StacktraceReadDefers)
assertNoError(err, t, "Stacktrace")
logStacktrace(t, p, frames)
@ -4372,7 +4373,7 @@ func TestIssue1469(t *testing.T) {
t.Logf("too many threads running goroutine %d", gid)
for _, thread := range gid2thread[gid] {
t.Logf("\tThread %d", thread.ThreadID())
frames, err := proc.ThreadStacktrace(thread, 20)
frames, err := proc.ThreadStacktrace(p, thread, 20)
if err != nil {
t.Logf("\t\tcould not get stacktrace %v", err)
}
@ -4603,9 +4604,12 @@ func TestIssue1615(t *testing.T) {
}
func TestCgoStacktrace2(t *testing.T) {
skipOn(t, "upstream issue", "windows")
skipOn(t, "broken", "386")
skipOn(t, "broken", "arm64")
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 21) {
skipOn(t, "upstream issue", "windows")
skipOn(t, "broken", "arm64")
skipOn(t, "broken", "386")
}
skipOn(t, "broken - cgo stacktraces", "darwin", "arm64")
protest.MustHaveCgo(t)
// If a panic happens during cgo execution the stacktrace should show the C
// function that caused the problem.
@ -4614,7 +4618,7 @@ func TestCgoStacktrace2(t *testing.T) {
if _, exited := err.(proc.ErrProcessExited); exited {
t.Fatal("process exited")
}
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 100)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 100)
assertNoError(err, t, "Stacktrace()")
logStacktrace(t, p, frames)
m := stacktraceCheck(t, []string{"C.sigsegv", "C.testfn", "main.main"}, frames)
@ -4725,7 +4729,7 @@ func TestIssue1795(t *testing.T) {
assertNoError(grp.Continue(), t, "Continue()")
assertLineNumber(p, t, 12, "wrong line number after Continue (1),")
assertNoError(grp.Continue(), t, "Continue()")
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 40)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40)
assertNoError(err, t, "ThreadStacktrace()")
logStacktrace(t, p, frames)
if err := checkFrame(frames[0], "regexp.(*Regexp).doExecute", "", 0, false); err != nil {
@ -5066,7 +5070,7 @@ func TestStepOutPreservesGoroutine(t *testing.T) {
bestg := []*proc.G{}
for _, g := range gs {
t.Logf("stacktracing goroutine %d (%v)\n", g.ID, g.CurrentLoc)
frames, err := g.Stacktrace(20, 0)
frames, err := proc.GoroutineStacktrace(p, g, 20, 0)
assertNoError(err, t, "Stacktrace")
for _, frame := range frames {
if frame.Call.Fn != nil && frame.Call.Fn.Name == "main.coroutine" {
@ -5237,9 +5241,9 @@ func TestDump(t *testing.T) {
t.Errorf("Goroutine mismatch\nlive:\t%s\ncore:\t%s", convertGoroutine(gos[i]), convertGoroutine(cgos[i]))
}
frames, err := gos[i].Stacktrace(20, 0)
frames, err := proc.GoroutineStacktrace(p, gos[i], 20, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - live process", gos[i].ID))
cframes, err := cgos[i].Stacktrace(20, 0)
cframes, err := proc.GoroutineStacktrace(c, cgos[i], 20, 0)
assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - core dump", gos[i].ID))
if len(frames) != len(cframes) {
@ -5912,7 +5916,7 @@ func TestStacktraceExtlinkMac(t *testing.T) {
withTestProcess("issue3194", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(grp.Continue(), t, "First Continue()")
frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10)
frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 10)
assertNoError(err, t, "ThreadStacktrace")
logStacktrace(t, p, frames)
if len(frames) < 2 || frames[0].Call.Fn.Name != "main.main" || frames[1].Call.Fn.Name != "runtime.main" {

@ -6,10 +6,12 @@ import (
"fmt"
"go/constant"
"reflect"
"strings"
"github.com/go-delve/delve/pkg/dwarf/frame"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/reader"
"github.com/go-delve/delve/pkg/logflags"
)
// This code is partly adapted from runtime.gentraceback in
@ -91,7 +93,7 @@ func (frame *Stackframe) FramePointerOffset() int64 {
// ThreadStacktrace returns the stack trace for thread.
// Note the locations in the array are return addresses not call addresses.
func ThreadStacktrace(thread Thread, depth int) ([]Stackframe, error) {
func ThreadStacktrace(tgt *Target, thread Thread, depth int) ([]Stackframe, error) {
g, _ := GetG(thread)
if g == nil {
regs, err := thread.Registers()
@ -101,13 +103,13 @@ func ThreadStacktrace(thread Thread, depth int) ([]Stackframe, error) {
so := thread.BinInfo().PCToImage(regs.PC())
dwarfRegs := *(thread.BinInfo().Arch.RegistersToDwarfRegisters(so.StaticBase, regs))
dwarfRegs.ChangeFunc = thread.SetReg
it := newStackIterator(thread.BinInfo(), thread.ProcessMemory(), dwarfRegs, 0, nil, 0)
it := newStackIterator(tgt, thread.BinInfo(), thread.ProcessMemory(), dwarfRegs, 0, nil, 0)
return it.stacktrace(depth)
}
return g.Stacktrace(depth, 0)
return GoroutineStacktrace(tgt, g, depth, 0)
}
func (g *G) stackIterator(opts StacktraceOptions) (*stackIterator, error) {
func goroutineStackIterator(tgt *Target, g *G, opts StacktraceOptions) (*stackIterator, error) {
bi := g.variable.bi
if g.Thread != nil {
regs, err := g.Thread.Registers()
@ -118,13 +120,13 @@ func (g *G) stackIterator(opts StacktraceOptions) (*stackIterator, error) {
dwarfRegs := *(bi.Arch.RegistersToDwarfRegisters(so.StaticBase, regs))
dwarfRegs.ChangeFunc = g.Thread.SetReg
return newStackIterator(
bi, g.variable.mem,
tgt, bi, g.variable.mem,
dwarfRegs,
g.stack.hi, g, opts), nil
}
so := g.variable.bi.PCToImage(g.PC)
return newStackIterator(
bi, g.variable.mem,
tgt, bi, g.variable.mem,
bi.Arch.addrAndStackRegsToDwarfRegisters(so.StaticBase, g.PC, g.SP, g.BP, g.LR),
g.stack.hi, g, opts), nil
}
@ -145,10 +147,10 @@ const (
StacktraceG
)
// Stacktrace returns the stack trace for a goroutine.
// GoroutineStacktrace returns the stack trace for a goroutine.
// Note the locations in the array are return addresses not call addresses.
func (g *G) Stacktrace(depth int, opts StacktraceOptions) ([]Stackframe, error) {
it, err := g.stackIterator(opts)
func GoroutineStacktrace(tgt *Target, g *G, depth int, opts StacktraceOptions) ([]Stackframe, error) {
it, err := goroutineStackIterator(tgt, g, opts)
if err != nil {
return nil, err
}
@ -173,13 +175,14 @@ func (n NullAddrError) Error() string {
// required to iterate and walk the program
// stack.
type stackIterator struct {
pc uint64
top bool
atend bool
frame Stackframe
bi *BinaryInfo
mem MemoryReadWriter
err error
pc uint64
top bool
atend bool
frame Stackframe
target *Target
bi *BinaryInfo
mem MemoryReadWriter
err error
stackhi uint64
systemstack bool
@ -194,12 +197,12 @@ type stackIterator struct {
opts StacktraceOptions
}
func newStackIterator(bi *BinaryInfo, mem MemoryReadWriter, regs op.DwarfRegisters, stackhi uint64, g *G, opts StacktraceOptions) *stackIterator {
func newStackIterator(tgt *Target, bi *BinaryInfo, mem MemoryReadWriter, regs op.DwarfRegisters, stackhi uint64, g *G, opts StacktraceOptions) *stackIterator {
systemstack := true
if g != nil {
systemstack = g.SystemStack
}
return &stackIterator{pc: regs.PC(), regs: regs, top: true, bi: bi, mem: mem, err: nil, atend: false, stackhi: stackhi, systemstack: systemstack, g: g, opts: opts}
return &stackIterator{pc: regs.PC(), regs: regs, top: true, target: tgt, bi: bi, mem: mem, err: nil, atend: false, stackhi: stackhi, systemstack: systemstack, g: g, opts: opts}
}
// Next points the iterator to the next stack frame.
@ -208,11 +211,54 @@ func (it *stackIterator) Next() bool {
return false
}
if logflags.Stack() {
logger := logflags.StackLogger()
w := &strings.Builder{}
fmt.Fprintf(w, "current pc = %#x CFA = %#x FrameBase = %#x ", it.pc, it.regs.CFA, it.regs.FrameBase)
for i := 0; i < it.regs.CurrentSize(); i++ {
reg := it.regs.Reg(uint64(i))
if reg == nil {
continue
}
name, _, _ := it.bi.Arch.DwarfRegisterToString(i, reg)
fmt.Fprintf(w, " %s = %#x", name, reg.Uint64Val)
}
logger.Debugf("%s", w.String())
}
callFrameRegs, ret, retaddr := it.advanceRegs()
it.frame = it.newStackframe(ret, retaddr)
if logflags.Stack() {
logger := logflags.StackLogger()
fnname := "?"
if it.frame.Call.Fn != nil {
fnname = it.frame.Call.Fn.Name
}
logger.Debugf("new frame %#x %s:%d at %s", it.frame.Call.PC, it.frame.Call.File, it.frame.Call.Line, fnname)
}
if it.frame.Current.Fn != nil && it.frame.Current.Fn.Name == "runtime.sigtrampgo" && it.target != nil {
regs, err := it.readSigtrampgoContext()
if err != nil {
logflags.DebuggerLogger().Errorf("could not read runtime.sigtrampgo context: %v", err)
} else {
so := it.bi.PCToImage(regs.PC())
regs.StaticBase = so.StaticBase
it.pc = regs.PC()
it.regs = *regs
it.top = false
if it.g != nil && it.g.ID != 0 {
it.systemstack = !(uint64(it.regs.SP()) >= it.g.stack.lo && uint64(it.regs.SP()) < it.g.stack.hi)
}
logflags.StackLogger().Debugf("sigtramp context read")
return true
}
}
if it.opts&StacktraceSimple == 0 {
if it.bi.Arch.switchStack(it, &callFrameRegs) {
logflags.StackLogger().Debugf("stack switched")
return true
}
}
@ -398,11 +444,18 @@ func (it *stackIterator) advanceRegs() (callFrameRegs op.DwarfRegisters, ret uin
framectx = it.bi.Arch.fixFrameUnwindContext(fde.EstablishFrame(it.pc), it.pc, it.bi)
}
logger := logflags.StackLogger()
logger.Debugf("advanceRegs at %#x", it.pc)
cfareg, err := it.executeFrameRegRule(0, framectx.CFA, 0)
if cfareg == nil {
it.err = fmt.Errorf("CFA becomes undefined at PC %#x: %v", it.pc, err)
return op.DwarfRegisters{}, 0, 0
}
if logflags.Stack() {
logger.Debugf("\tCFA rule %s -> %#x", ruleString(&framectx.CFA, it.bi.Arch.RegnumToString), cfareg.Uint64Val)
}
it.regs.CFA = int64(cfareg.Uint64Val)
callimage := it.bi.PCToImage(it.pc)
@ -426,7 +479,16 @@ func (it *stackIterator) advanceRegs() (callFrameRegs op.DwarfRegisters, ret uin
callFrameRegs.AddReg(callFrameRegs.SPRegNum, cfareg)
for i, regRule := range framectx.Regs {
if logflags.Stack() {
logger.Debugf("\t%s rule %s ", it.bi.Arch.RegnumToString(i), ruleString(&regRule, it.bi.Arch.RegnumToString))
}
reg, err := it.executeFrameRegRule(i, regRule, it.regs.CFA)
if reg != nil {
logger.Debugf("\t\t-> %#x", reg.Uint64Val)
} else {
logger.Debugf("\t\t-> nothing (%v)", err)
}
callFrameRegs.AddReg(i, reg)
if i == framectx.RetAddrReg {
if reg == nil {
@ -701,3 +763,34 @@ func (d *Defer) DeferredFunc(p *Target) (file string, line int, fn *Function) {
file, line = bi.EntryLineForFunc(fn)
return file, line, fn
}
func ruleString(rule *frame.DWRule, regnumToString func(uint64) string) string {
switch rule.Rule {
case frame.RuleUndefined:
return "undefined"
case frame.RuleSameVal:
return "sameval"
case frame.RuleOffset:
return fmt.Sprintf("[cfa+%d]", rule.Offset)
case frame.RuleValOffset:
return fmt.Sprintf("cfa+%d", rule.Offset)
case frame.RuleRegister:
return fmt.Sprintf("R(%d)", rule.Reg)
case frame.RuleExpression:
w := &strings.Builder{}
op.PrettyPrint(w, rule.Expression, regnumToString)
return fmt.Sprintf("[expr(%s)]", w.String())
case frame.RuleValExpression:
w := &strings.Builder{}
op.PrettyPrint(w, rule.Expression, regnumToString)
return fmt.Sprintf("expr(%s)", w.String())
case frame.RuleArchitectural:
return "architectural"
case frame.RuleCFA:
return fmt.Sprintf("R(%d)+%d", rule.Reg, rule.Offset)
case frame.RuleFramePointer:
return fmt.Sprintf("[R(%d)] framepointer", rule.Reg)
default:
return fmt.Sprintf("unknown_rule(%d)", rule.Rule)
}
}

773
pkg/proc/stack_sigtramp.go Normal file

@ -0,0 +1,773 @@
package proc
import (
"encoding/binary"
"errors"
"fmt"
"unsafe"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/regnum"
"github.com/go-delve/delve/pkg/logflags"
)
// readSigtrampgoContext reads runtime.sigtrampgo context at the specified address
func (it *stackIterator) readSigtrampgoContext() (*op.DwarfRegisters, error) {
logger := logflags.DebuggerLogger()
scope := FrameToScope(it.target, it.mem, it.g, it.frame)
bi := it.bi
findvar := func(name string) *Variable {
vars, _ := scope.Locals(0)
for i := range vars {
if vars[i].Name == name {
return vars[i]
}
}
return nil
}
deref := func(v *Variable) (uint64, error) {
v.loadValue(loadSingleValue)
if v.Unreadable != nil {
return 0, fmt.Errorf("could not dereference %s: %v", v.Name, v.Unreadable)
}
if len(v.Children) < 1 {
return 0, fmt.Errorf("could not dereference %s (no children?)", v.Name)
}
logger.Debugf("%s address is %#x", v.Name, v.Children[0].Addr)
return v.Children[0].Addr, nil
}
getctxaddr := func() (uint64, error) {
ctxvar := findvar("ctx")
if ctxvar == nil {
return 0, errors.New("ctx variable not found")
}
addr, err := deref(ctxvar)
if err != nil {
return 0, err
}
return addr, nil
}
switch bi.GOOS {
case "windows":
epvar := findvar("ep")
if epvar == nil {
return nil, errors.New("ep variable not found")
}
epaddr, err := deref(epvar)
if err != nil {
return nil, err
}
switch bi.Arch.Name {
case "amd64":
return sigtrampContextWindowsAMD64(it.mem, epaddr)
case "arm64":
return sigtrampContextWindowsARM64(it.mem, epaddr)
default:
return nil, errors.New("not implemented")
}
case "linux":
addr, err := getctxaddr()
if err != nil {
return nil, err
}
switch bi.Arch.Name {
case "386":
return sigtrampContextLinux386(it.mem, addr)
case "amd64":
return sigtrampContextLinuxAMD64(it.mem, addr)
case "arm64":
return sigtrampContextLinuxARM64(it.mem, addr)
default:
return nil, errors.New("not implemented")
}
case "freebsd":
addr, err := getctxaddr()
if err != nil {
return nil, err
}
return sigtrampContextFreebsdAMD64(it.mem, addr)
case "darwin":
addr, err := getctxaddr()
if err != nil {
return nil, err
}
switch bi.Arch.Name {
case "amd64":
return sigtrampContextDarwinAMD64(it.mem, addr)
case "arm64":
return sigtrampContextDarwinARM64(it.mem, addr)
default:
return nil, errors.New("not implemnted")
}
default:
return nil, errors.New("not implemented")
}
}
func sigtrampContextLinuxAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type stackt struct {
ss_sp uint64
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}
type mcontext struct {
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rdi uint64
rsi uint64
rbp uint64
rbx uint64
rdx uint64
rax uint64
rcx uint64
rsp uint64
rip uint64
eflags uint64
cs uint16
gs uint16
fs uint16
__pad0 uint16
err uint64
trapno uint64
oldmask uint64
cr2 uint64
fpstate uint64 // pointer
__reserved1 [8]uint64
}
type fpxreg struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}
type fpstate struct {
cwd uint16
swd uint16
ftw uint16
fop uint16
rip uint64
rdp uint64
mxcsr uint32
mxcr_mask uint32
_st [8]fpxreg
_xmm [16][4]uint32
padding [24]uint32
}
type ucontext struct {
uc_flags uint64
uc_link uint64
uc_stack stackt
uc_mcontext mcontext
uc_sigmask [16]uint64
__fpregs_mem fpstate
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext)
dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1)
dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(regs.r8)
dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(regs.r9)
dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(regs.r10)
dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(regs.r11)
dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(regs.r12)
dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(regs.r13)
dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(regs.r14)
dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(regs.r15)
dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(regs.rdi)
dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(regs.rsi)
dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(regs.rbp)
dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(regs.rbx)
dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(regs.rdx)
dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(regs.rax)
dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(regs.rcx)
dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(regs.rsp)
dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(regs.rip)
dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(regs.eflags)
dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(uint64(regs.cs))
dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(uint64(regs.gs))
dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(regs.fs))
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil
}
func sigtrampContextLinux386(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type sigcontext struct {
gs uint16
__gsh uint16
fs uint16
__fsh uint16
es uint16
__esh uint16
ds uint16
__dsh uint16
edi uint32
esi uint32
ebp uint32
esp uint32
ebx uint32
edx uint32
ecx uint32
eax uint32
trapno uint32
err uint32
eip uint32
cs uint16
__csh uint16
eflags uint32
esp_at_signal uint32
ss uint16
__ssh uint16
fpstate uint32 // pointer
oldmask uint32
cr2 uint32
}
type stackt struct {
ss_sp uint32 // pointer
ss_flags int32
ss_size uint32
}
type ucontext struct {
uc_flags uint32
uc_link uint32 // pointer
uc_stack stackt
uc_mcontext sigcontext
uc_sigmask uint32
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext)
dregs := make([]*op.DwarfRegister, regnum.I386MaxRegNum()+1)
dregs[regnum.I386_Gs] = op.DwarfRegisterFromUint64(uint64(regs.gs))
dregs[regnum.I386_Fs] = op.DwarfRegisterFromUint64(uint64(regs.fs))
dregs[regnum.I386_Es] = op.DwarfRegisterFromUint64(uint64(regs.es))
dregs[regnum.I386_Ds] = op.DwarfRegisterFromUint64(uint64(regs.ds))
dregs[regnum.I386_Edi] = op.DwarfRegisterFromUint64(uint64(regs.edi))
dregs[regnum.I386_Esi] = op.DwarfRegisterFromUint64(uint64(regs.esi))
dregs[regnum.I386_Ebp] = op.DwarfRegisterFromUint64(uint64(regs.ebp))
dregs[regnum.I386_Esp] = op.DwarfRegisterFromUint64(uint64(regs.esp))
dregs[regnum.I386_Ebx] = op.DwarfRegisterFromUint64(uint64(regs.ebx))
dregs[regnum.I386_Edx] = op.DwarfRegisterFromUint64(uint64(regs.edx))
dregs[regnum.I386_Ecx] = op.DwarfRegisterFromUint64(uint64(regs.ecx))
dregs[regnum.I386_Eax] = op.DwarfRegisterFromUint64(uint64(regs.eax))
dregs[regnum.I386_Eip] = op.DwarfRegisterFromUint64(uint64(regs.eip))
dregs[regnum.I386_Cs] = op.DwarfRegisterFromUint64(uint64(regs.cs))
dregs[regnum.I386_Ss] = op.DwarfRegisterFromUint64(uint64(regs.ss))
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.I386_Eip, regnum.I386_Esp, regnum.I386_Ebp, 0), nil
}
func sigtrampContextLinuxARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type sigcontext struct {
fault_address uint64
regs [31]uint64
sp uint64
pc uint64
pstate uint64
_pad [8]byte
__reserved [4096]byte
}
type stackt struct {
ss_sp uint64 // pointer
ss_flags int32
pad_cgo_0 [4]byte
ss_size uint64
}
type ucontext struct {
uc_flags uint64
uc_link uint64 // pointer
uc_stack stackt
uc_sigmask uint64
_pad [(1024 - 64) / 8]byte
_pad2 [8]byte
uc_mcontext sigcontext
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext)
dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1)
for i := range regs.regs {
dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(regs.regs[i])
}
dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(regs.sp)
dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(regs.pc)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil
}
func sigtrampContextFreebsdAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type mcontext struct {
mc_onstack uint64
mc_rdi uint64
mc_rsi uint64
mc_rdx uint64
mc_rcx uint64
mc_r8 uint64
mc_r9 uint64
mc_rax uint64
mc_rbx uint64
mc_rbp uint64
mc_r10 uint64
mc_r11 uint64
mc_r12 uint64
mc_r13 uint64
mc_r14 uint64
mc_r15 uint64
mc_trapno uint32
mc_fs uint16
mc_gs uint16
mc_addr uint64
mc_flags uint32
mc_es uint16
mc_ds uint16
mc_err uint64
mc_rip uint64
mc_cs uint64
mc_rflags uint64
mc_rsp uint64
mc_ss uint64
mc_len uint64
mc_fpformat uint64
mc_ownedfp uint64
mc_fpstate [64]uint64
mc_fsbase uint64
mc_gsbase uint64
mc_xfpustate uint64
mc_xfpustate_len uint64
mc_spare [4]uint64
}
type ucontext struct {
uc_sigmask struct {
__bits [4]uint32
}
uc_mcontext mcontext
uc_link uint64 // pointer
uc_stack struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}
uc_flags int32
__spare__ [4]int32
pad_cgo_0 [12]byte
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
mctxt := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext
dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1)
dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(mctxt.mc_rdi)
dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(mctxt.mc_rsi)
dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(mctxt.mc_rdx)
dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(mctxt.mc_rcx)
dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(mctxt.mc_r8)
dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(mctxt.mc_r9)
dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(mctxt.mc_rax)
dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(mctxt.mc_rbx)
dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(mctxt.mc_rbp)
dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(mctxt.mc_r10)
dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(mctxt.mc_r11)
dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(mctxt.mc_r12)
dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(mctxt.mc_r13)
dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(mctxt.mc_r14)
dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(mctxt.mc_r15)
dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_fs))
dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_gs))
dregs[regnum.AMD64_Es] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_es))
dregs[regnum.AMD64_Ds] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_ds))
dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(mctxt.mc_rip)
dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(mctxt.mc_cs)
dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(mctxt.mc_rflags)
dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(mctxt.mc_rsp)
dregs[regnum.AMD64_Ss] = op.DwarfRegisterFromUint64(mctxt.mc_ss)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil
}
func sigtrampContextFromExceptionPointers(mem MemoryReader, addr uint64) (uint64, error) {
type exceptionpointers struct {
record uint64 // pointer
context uint64 // pointer
}
buf := make([]byte, unsafe.Sizeof(exceptionpointers{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return 0, err
}
return ((*exceptionpointers)(unsafe.Pointer(&buf[0]))).context, nil
}
func sigtrampContextWindowsAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type context struct {
p1home uint64
p2home uint64
p3home uint64
p4home uint64
p5home uint64
p6home uint64
contextflags uint32
mxcsr uint32
segcs uint16
segds uint16
seges uint16
segfs uint16
seggs uint16
segss uint16
eflags uint32
dr0 uint64
dr1 uint64
dr2 uint64
dr3 uint64
dr6 uint64
dr7 uint64
rax uint64
rcx uint64
rdx uint64
rbx uint64
rsp uint64
rbp uint64
rsi uint64
rdi uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rip uint64
anon0 [512]byte
vectorregister [26]struct {
low uint64
high int64
}
vectorcontrol uint64
debugcontrol uint64
lastbranchtorip uint64
lastbranchfromrip uint64
lastexceptiontorip uint64
lastexceptionfromrip uint64
}
ctxtaddr, err := sigtrampContextFromExceptionPointers(mem, addr)
if err != nil {
return nil, err
}
buf := make([]byte, unsafe.Sizeof(context{}))
_, err = mem.ReadMemory(buf, ctxtaddr)
if err != nil {
return nil, fmt.Errorf("could not read context: %v", err)
}
ctxt := (*context)(unsafe.Pointer(&buf[0]))
dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1)
dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(uint64(ctxt.segcs))
dregs[regnum.AMD64_Ds] = op.DwarfRegisterFromUint64(uint64(ctxt.segds))
dregs[regnum.AMD64_Es] = op.DwarfRegisterFromUint64(uint64(ctxt.seges))
dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(ctxt.segfs))
dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(ctxt.seggs))
dregs[regnum.AMD64_Ss] = op.DwarfRegisterFromUint64(uint64(ctxt.segss))
dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(uint64(ctxt.eflags))
dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(ctxt.rax)
dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(ctxt.rcx)
dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(ctxt.rdx)
dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(ctxt.rbx)
dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(ctxt.rsp)
dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(ctxt.rbp)
dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(ctxt.rsi)
dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(ctxt.rdi)
dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(ctxt.r8)
dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(ctxt.r9)
dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(ctxt.r10)
dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(ctxt.r11)
dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(ctxt.r12)
dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(ctxt.r13)
dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(ctxt.r14)
dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(ctxt.r15)
dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(ctxt.rip)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil
}
func sigtrampContextWindowsARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type context struct {
contextflags uint32
cpsr uint32
x [31]uint64 // fp is x[29], lr is x[30]
xsp uint64
pc uint64
v [32]struct {
low uint64
high int64
}
fpcr uint32
fpsr uint32
bcr [8]uint32
bvr [8]uint64
wcr [2]uint32
wvr [2]uint64
}
ctxtaddr, err := sigtrampContextFromExceptionPointers(mem, addr)
if err != nil {
return nil, err
}
buf := make([]byte, unsafe.Sizeof(context{}))
_, err = mem.ReadMemory(buf, ctxtaddr)
if err != nil {
return nil, fmt.Errorf("could not read context: %v", err)
}
ctxt := (*context)(unsafe.Pointer(&buf[0]))
dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1)
for i := range ctxt.x {
dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(ctxt.x[i])
}
dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(ctxt.xsp)
dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(ctxt.pc)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil
}
func sigtrampContextDarwinAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type ucontext struct {
uc_onstack int32
uc_sigmask uint32
uc_stack struct {
ss_sp uint64 // pointer
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}
uc_link uint64 // pointer
uc_mcsize uint64
uc_mcontext uint64 // pointer
}
type regmmst struct {
mmst_reg [10]int8
mmst_rsrv [6]int8
}
type regxmm struct {
xmm_reg [16]int8
}
type floatstate64 struct {
fpu_reserved [2]int32
fpu_fcw [2]byte
fpu_fsw [2]byte
fpu_ftw uint8
fpu_rsrv1 uint8
fpu_fop uint16
fpu_ip uint32
fpu_cs uint16
fpu_rsrv2 uint16
fpu_dp uint32
fpu_ds uint16
fpu_rsrv3 uint16
fpu_mxcsr uint32
fpu_mxcsrmask uint32
fpu_stmm0 regmmst
fpu_stmm1 regmmst
fpu_stmm2 regmmst
fpu_stmm3 regmmst
fpu_stmm4 regmmst
fpu_stmm5 regmmst
fpu_stmm6 regmmst
fpu_stmm7 regmmst
fpu_xmm0 regxmm
fpu_xmm1 regxmm
fpu_xmm2 regxmm
fpu_xmm3 regxmm
fpu_xmm4 regxmm
fpu_xmm5 regxmm
fpu_xmm6 regxmm
fpu_xmm7 regxmm
fpu_xmm8 regxmm
fpu_xmm9 regxmm
fpu_xmm10 regxmm
fpu_xmm11 regxmm
fpu_xmm12 regxmm
fpu_xmm13 regxmm
fpu_xmm14 regxmm
fpu_xmm15 regxmm
fpu_rsrv4 [96]int8
fpu_reserved1 int32
}
type regs64 struct {
rax uint64
rbx uint64
rcx uint64
rdx uint64
rdi uint64
rsi uint64
rbp uint64
rsp uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rip uint64
rflags uint64
cs uint64
fs uint64
gs uint64
}
type mcontext64 struct {
es struct {
trapno uint16
cpu uint16
err uint32
faultvaddr uint64
}
ss regs64
fs floatstate64
pad_cgo_0 [4]byte
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
mctxtaddr := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext
buf = make([]byte, unsafe.Sizeof(mcontext64{}))
_, err = mem.ReadMemory(buf, mctxtaddr)
if err != nil {
return nil, err
}
ss := ((*mcontext64)(unsafe.Pointer(&buf[0]))).ss
dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1)
dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(ss.rax)
dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(ss.rbx)
dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(ss.rcx)
dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(ss.rdx)
dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(ss.rdi)
dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(ss.rsi)
dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(ss.rbp)
dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(ss.rsp)
dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(ss.r8)
dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(ss.r9)
dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(ss.r10)
dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(ss.r11)
dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(ss.r12)
dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(ss.r13)
dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(ss.r14)
dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(ss.r15)
dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(ss.rip)
dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(ss.rflags)
dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(ss.cs)
dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(ss.fs)
dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(ss.gs)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil
}
func sigtrampContextDarwinARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) {
type ucontext struct {
uc_onstack int32
uc_sigmask uint32
uc_stack struct {
ss_sp uint64 // pointer
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}
uc_link uint64 // pointer
uc_mcsize uint64
uc_mcontext uint64 // pointer
}
type regs64 struct {
x [29]uint64 // registers x0 to x28
fp uint64 // frame register, x29
lr uint64 // link register, x30
sp uint64 // stack pointer, x31
pc uint64 // program counter
cpsr uint32 // current program status register
__pad uint32
}
type mcontext64 struct {
es struct {
far uint64 // virtual fault addr
esr uint32 // exception syndrome
exc uint32 // number of arm exception taken
}
ss regs64
ns struct {
v [64]uint64 // actually [32]uint128
fpsr uint32
fpcr uint32
}
}
buf := make([]byte, unsafe.Sizeof(ucontext{}))
_, err := mem.ReadMemory(buf, addr)
if err != nil {
return nil, err
}
mctxtaddr := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext
buf = make([]byte, unsafe.Sizeof(mcontext64{}))
_, err = mem.ReadMemory(buf, mctxtaddr)
if err != nil {
return nil, err
}
ss := ((*mcontext64)(unsafe.Pointer(&buf[0]))).ss
dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1)
for i := range ss.x {
dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(ss.x[i])
}
dregs[regnum.ARM64_BP] = op.DwarfRegisterFromUint64(ss.fp)
dregs[regnum.ARM64_LR] = op.DwarfRegisterFromUint64(ss.lr)
dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(ss.sp)
dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(ss.pc)
return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil
}

@ -33,7 +33,7 @@ func (t *Target) setStackWatchBreakpoints(scope *EvalScope, watchpoint *Breakpoi
return true, nil
}
topframe, retframe, err := topframe(scope.g, nil)
topframe, retframe, err := topframe(t, scope.g, nil)
if err != nil {
return err
}

@ -396,20 +396,18 @@ func (t *Target) createUnrecoveredPanicBreakpoint() {
// createFatalThrowBreakpoint creates the a breakpoint as runtime.fatalthrow.
func (t *Target) createFatalThrowBreakpoint() {
fatalpcs, err := FindFunctionLocation(t.Process, "runtime.throw", 0)
if err == nil {
bp, err := t.SetBreakpoint(fatalThrowID, fatalpcs[0], UserBreakpoint, nil)
setFatalThrow := func(pcs []uint64, err error) {
if err == nil {
bp.Logical.Name = FatalThrow
}
}
fatalpcs, err = FindFunctionLocation(t.Process, "runtime.fatal", 0)
if err == nil {
bp, err := t.SetBreakpoint(fatalThrowID, fatalpcs[0], UserBreakpoint, nil)
if err == nil {
bp.Logical.Name = FatalThrow
bp, err := t.SetBreakpoint(fatalThrowID, pcs[0], UserBreakpoint, nil)
if err == nil {
bp.Logical.Name = FatalThrow
}
}
}
setFatalThrow(FindFunctionLocation(t.Process, "runtime.throw", 0))
setFatalThrow(FindFunctionLocation(t.Process, "runtime.fatal", 0))
setFatalThrow(FindFunctionLocation(t.Process, "runtime.winthrow", 0))
setFatalThrow(FindFunctionLocation(t.Process, "runtime.fatalsignal", 0))
}
// createPluginOpenBreakpoint creates a breakpoint at the return instruction

@ -469,7 +469,7 @@ func (grp *TargetGroup) StepOut() error {
selg := dbp.SelectedGoroutine()
curthread := dbp.CurrentThread()
topframe, retframe, err := topframe(selg, curthread)
topframe, retframe, err := topframe(dbp, selg, curthread)
if err != nil {
return err
}
@ -511,7 +511,7 @@ func (grp *TargetGroup) StepOut() error {
}
if topframe.Ret != 0 {
topframe, retframe := skipAutogeneratedWrappersOut(selg, curthread, &topframe, &retframe)
topframe, retframe := skipAutogeneratedWrappersOut(grp.Selected, selg, curthread, &topframe, &retframe)
retFrameCond := astutil.And(sameGCond, frameoffCondition(retframe))
bp, err := allowDuplicateBreakpoint(dbp.SetBreakpoint(0, retframe.Current.PC, NextBreakpoint, retFrameCond))
if err != nil {
@ -600,7 +600,7 @@ func next(dbp *Target, stepInto, inlinedStepOut bool) error {
backward := dbp.recman.GetDirection() == Backward
selg := dbp.SelectedGoroutine()
curthread := dbp.CurrentThread()
topframe, retframe, err := topframe(selg, curthread)
topframe, retframe, err := topframe(dbp, selg, curthread)
if err != nil {
return err
}
@ -748,7 +748,7 @@ func next(dbp *Target, stepInto, inlinedStepOut bool) error {
}
if !topframe.Inlined {
topframe, retframe := skipAutogeneratedWrappersOut(selg, curthread, &topframe, &retframe)
topframe, retframe := skipAutogeneratedWrappersOut(dbp, selg, curthread, &topframe, &retframe)
retFrameCond := astutil.And(sameGCond, frameoffCondition(retframe))
// Add a breakpoint on the return address for the current frame.
@ -1036,7 +1036,7 @@ func skipAutogeneratedWrappersIn(p Process, startfn *Function, startpc uint64) (
// step out breakpoint.
// See genwrapper in: $GOROOT/src/cmd/compile/internal/gc/subr.go
// It also skips runtime.deferreturn frames (which are only ever on the stack on Go 1.18 or later)
func skipAutogeneratedWrappersOut(g *G, thread Thread, startTopframe, startRetframe *Stackframe) (topframe, retframe *Stackframe) {
func skipAutogeneratedWrappersOut(tgt *Target, g *G, thread Thread, startTopframe, startRetframe *Stackframe) (topframe, retframe *Stackframe) {
topframe, retframe = startTopframe, startRetframe
if startTopframe.Ret == 0 {
return
@ -1054,9 +1054,9 @@ func skipAutogeneratedWrappersOut(g *G, thread Thread, startTopframe, startRetfr
var err error
var frames []Stackframe
if g == nil {
frames, err = ThreadStacktrace(thread, maxSkipAutogeneratedWrappers)
frames, err = ThreadStacktrace(tgt, thread, maxSkipAutogeneratedWrappers)
} else {
frames, err = g.Stacktrace(maxSkipAutogeneratedWrappers, 0)
frames, err = GoroutineStacktrace(tgt, g, maxSkipAutogeneratedWrappers, 0)
}
if err != nil {
return
@ -1152,9 +1152,9 @@ func stepOutReverse(p *Target, topframe, retframe Stackframe, sameGCond ast.Expr
var frames []Stackframe
if selg == nil {
frames, err = ThreadStacktrace(curthread, 3)
frames, err = ThreadStacktrace(p, curthread, 3)
} else {
frames, err = selg.Stacktrace(3, 0)
frames, err = GoroutineStacktrace(p, selg, 3, 0)
}
if err != nil {
return err

@ -67,14 +67,14 @@ func (t *CommonThread) ReturnValues(cfg LoadConfig) []*Variable {
}
// topframe returns the two topmost frames of g, or thread if g is nil.
func topframe(g *G, thread Thread) (Stackframe, Stackframe, error) {
func topframe(tgt *Target, g *G, thread Thread) (Stackframe, Stackframe, error) {
var frames []Stackframe
var err error
if g == nil {
frames, err = ThreadStacktrace(thread, 1)
frames, err = ThreadStacktrace(tgt, thread, 1)
} else {
frames, err = g.Stacktrace(1, StacktraceReadDefers)
frames, err = GoroutineStacktrace(tgt, g, 1, StacktraceReadDefers)
}
if err != nil {
return Stackframe{}, Stackframe{}, err

@ -497,7 +497,7 @@ func (g *G) Defer() *Defer {
// UserCurrent returns the location the users code is at,
// or was at before entering a runtime function.
func (g *G) UserCurrent() Location {
it, err := g.stackIterator(0)
it, err := goroutineStackIterator(nil, g, 0)
if err != nil {
return g.CurrentLoc
}

@ -127,7 +127,7 @@ func doFuzzEvalExpressionSetup(f *testing.F) {
// 3. Run all the test cases on the core file, register which memory addresses are read
frames, err := c.SelectedGoroutine().Stacktrace(2, 0)
frames, err := proc.GoroutineStacktrace(c, c.SelectedGoroutine(), 2, 0)
assertNoError(err, f, "Stacktrace")
mem := c.Memory()

@ -1331,7 +1331,7 @@ func (d *Debugger) collectBreakpointInformation(apiThread *api.Thread, thread pr
}
if bp.Stacktrace > 0 {
rawlocs, err := proc.ThreadStacktrace(thread, bp.Stacktrace)
rawlocs, err := proc.ThreadStacktrace(tgt, thread, bp.Stacktrace)
if err != nil {
return err
}
@ -1757,9 +1757,9 @@ func (d *Debugger) Stacktrace(goroutineID int64, depth int, opts api.StacktraceO
}
if g == nil {
return proc.ThreadStacktrace(d.target.Selected.CurrentThread(), depth)
return proc.ThreadStacktrace(d.target.Selected, d.target.Selected.CurrentThread(), depth)
} else {
return g.Stacktrace(depth, proc.StacktraceOptions(opts))
return proc.GoroutineStacktrace(d.target.Selected, g, depth, proc.StacktraceOptions(opts))
}
}