2015-06-12 19:49:23 +00:00
|
|
|
package proc
|
2015-04-19 22:11:33 +00:00
|
|
|
|
2015-08-10 13:55:57 +00:00
|
|
|
import (
|
2017-08-24 07:46:47 +00:00
|
|
|
"debug/dwarf"
|
2016-02-02 11:26:29 +00:00
|
|
|
"errors"
|
2015-08-10 13:55:57 +00:00
|
|
|
"fmt"
|
2018-07-06 07:37:31 +00:00
|
|
|
"go/constant"
|
2017-02-08 00:23:47 +00:00
|
|
|
|
2019-01-04 18:39:25 +00:00
|
|
|
"github.com/go-delve/delve/pkg/dwarf/frame"
|
|
|
|
"github.com/go-delve/delve/pkg/dwarf/op"
|
|
|
|
"github.com/go-delve/delve/pkg/dwarf/reader"
|
2015-08-10 13:55:57 +00:00
|
|
|
)
|
2015-04-19 22:11:33 +00:00
|
|
|
|
2018-03-20 10:05:35 +00:00
|
|
|
// This code is partly adapted from runtime.gentraceback in
|
2017-02-08 13:14:57 +00:00
|
|
|
// $GOROOT/src/runtime/traceback.go
|
|
|
|
|
2016-01-10 08:57:52 +00:00
|
|
|
// Stackframe represents a frame in a system stack.
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
//
|
|
|
|
// Each stack frame has two locations Current and Call.
|
|
|
|
//
|
|
|
|
// For the topmost stackframe Current and Call are the same location.
|
|
|
|
//
|
|
|
|
// For stackframes after the first Current is the location corresponding to
|
|
|
|
// the return address and Call is the location of the CALL instruction that
|
|
|
|
// was last executed on the frame. Note however that Call.PC is always equal
|
|
|
|
// to Current.PC, because finding the correct value for Call.PC would
|
|
|
|
// require disassembling each function in the stacktrace.
|
|
|
|
//
|
|
|
|
// For synthetic stackframes generated for inlined function calls Current.Fn
|
|
|
|
// is the function containing the inlining and Call.Fn in the inlined
|
|
|
|
// function.
|
2015-08-28 20:06:29 +00:00
|
|
|
type Stackframe struct {
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
Current, Call Location
|
|
|
|
|
2017-10-05 07:26:19 +00:00
|
|
|
// Frame registers.
|
|
|
|
Regs op.DwarfRegisters
|
2017-05-16 18:23:33 +00:00
|
|
|
// High address of the stack.
|
2017-09-01 13:34:13 +00:00
|
|
|
stackHi uint64
|
2016-06-21 20:05:28 +00:00
|
|
|
// Return address for this stack frame (as read from the stack frame itself).
|
2016-07-03 07:02:21 +00:00
|
|
|
Ret uint64
|
2017-02-08 13:14:57 +00:00
|
|
|
// Address to the memory location containing the return address
|
|
|
|
addrret uint64
|
2018-03-20 10:05:35 +00:00
|
|
|
// Err is set if an error occurred during stacktrace
|
2017-06-23 11:31:05 +00:00
|
|
|
Err error
|
2017-09-01 13:34:13 +00:00
|
|
|
// SystemStack is true if this frame belongs to a system stack.
|
|
|
|
SystemStack bool
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
// Inlined is true if this frame is actually an inlined call.
|
|
|
|
Inlined bool
|
2018-08-18 10:12:39 +00:00
|
|
|
// Bottom is true if this is the bottom of the stack
|
|
|
|
Bottom bool
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
|
|
|
|
// lastpc is a memory address guaranteed to belong to the last instruction
|
|
|
|
// executed in this stack frame.
|
|
|
|
// For the topmost stack frame this will be the same as Current.PC and
|
|
|
|
// Call.PC, for other stack frames it will usually be Current.PC-1, but
|
|
|
|
// could be different when inlined calls are involved in the stacktrace.
|
|
|
|
// Note that this address isn't guaranteed to belong to the start of an
|
|
|
|
// instruction and, for this reason, should not be propagated outside of
|
|
|
|
// pkg/proc.
|
|
|
|
// Use this value to determine active lexical scopes for the stackframe.
|
|
|
|
lastpc uint64
|
2018-07-06 07:37:31 +00:00
|
|
|
|
|
|
|
// TopmostDefer is the defer that would be at the top of the stack when a
|
|
|
|
// panic unwind would get to this call frame, in other words it's the first
|
|
|
|
// deferred function that will be called if the runtime unwinds past this
|
|
|
|
// call frame.
|
|
|
|
TopmostDefer *Defer
|
|
|
|
|
|
|
|
// Defers is the list of functions deferred by this stack frame (so far).
|
|
|
|
Defers []*Defer
|
2017-09-01 13:34:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FrameOffset returns the address of the stack frame, absolute for system
|
|
|
|
// stack frames or as an offset from stackhi for goroutine stacks (a
|
|
|
|
// negative value).
|
|
|
|
func (frame *Stackframe) FrameOffset() int64 {
|
|
|
|
if frame.SystemStack {
|
|
|
|
return frame.Regs.CFA
|
|
|
|
}
|
|
|
|
return frame.Regs.CFA - int64(frame.stackHi)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FramePointerOffset returns the value of the frame pointer, absolute for
|
|
|
|
// system stack frames or as an offset from stackhi for goroutine stacks (a
|
|
|
|
// negative value).
|
|
|
|
func (frame *Stackframe) FramePointerOffset() int64 {
|
|
|
|
if frame.SystemStack {
|
|
|
|
return int64(frame.Regs.BP())
|
|
|
|
}
|
|
|
|
return int64(frame.Regs.BP()) - int64(frame.stackHi)
|
2015-09-17 08:42:34 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 22:57:47 +00:00
|
|
|
// ThreadStacktrace returns the stack trace for thread.
|
2015-07-01 03:16:52 +00:00
|
|
|
// Note the locations in the array are return addresses not call addresses.
|
2017-04-21 07:50:38 +00:00
|
|
|
func ThreadStacktrace(thread Thread, depth int) ([]Stackframe, error) {
|
2017-09-01 13:34:13 +00:00
|
|
|
g, _ := GetG(thread)
|
|
|
|
if g == nil {
|
|
|
|
regs, err := thread.Registers(true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-08 03:15:20 +00:00
|
|
|
so := thread.BinInfo().PCToImage(regs.PC())
|
2019-09-25 17:21:20 +00:00
|
|
|
it := newStackIterator(thread.BinInfo(), thread, thread.BinInfo().Arch.RegistersToDwarfRegisters(so.StaticBase, regs), 0, nil, -1, nil, 0)
|
2017-09-01 13:34:13 +00:00
|
|
|
return it.stacktrace(depth)
|
2015-04-19 22:11:33 +00:00
|
|
|
}
|
2019-09-25 17:21:20 +00:00
|
|
|
return g.Stacktrace(depth, 0)
|
2015-06-17 17:11:57 +00:00
|
|
|
}
|
|
|
|
|
2019-09-25 17:21:20 +00:00
|
|
|
func (g *G) stackIterator(opts StacktraceOptions) (*stackIterator, error) {
|
2017-02-08 13:14:57 +00:00
|
|
|
stkbar, err := g.stkbar()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-01 13:34:13 +00:00
|
|
|
|
2020-03-10 16:34:40 +00:00
|
|
|
bi := g.variable.bi
|
2017-04-21 06:55:53 +00:00
|
|
|
if g.Thread != nil {
|
2017-10-05 07:26:19 +00:00
|
|
|
regs, err := g.Thread.Registers(true)
|
2017-02-15 13:41:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-10 16:34:40 +00:00
|
|
|
so := bi.PCToImage(regs.PC())
|
2019-08-08 03:44:37 +00:00
|
|
|
return newStackIterator(
|
2020-03-10 16:34:40 +00:00
|
|
|
bi, g.Thread,
|
|
|
|
bi.Arch.RegistersToDwarfRegisters(so.StaticBase, regs),
|
2020-04-22 14:39:06 +00:00
|
|
|
g.stack.hi, stkbar, g.stkbarPos, g, opts), nil
|
2016-03-18 08:51:48 +00:00
|
|
|
}
|
2019-08-08 03:44:37 +00:00
|
|
|
so := g.variable.bi.PCToImage(g.PC)
|
|
|
|
return newStackIterator(
|
2020-03-10 16:34:40 +00:00
|
|
|
bi, g.variable.mem,
|
2020-03-30 18:03:29 +00:00
|
|
|
bi.Arch.addrAndStackRegsToDwarfRegisters(so.StaticBase, g.PC, g.SP, g.BP, g.LR),
|
2020-04-22 14:39:06 +00:00
|
|
|
g.stack.hi, stkbar, g.stkbarPos, g, opts), nil
|
2016-03-18 08:51:48 +00:00
|
|
|
}
|
|
|
|
|
2019-09-25 17:21:20 +00:00
|
|
|
type StacktraceOptions uint16
|
|
|
|
|
|
|
|
const (
|
|
|
|
// StacktraceReadDefers requests a stacktrace decorated with deferred calls
|
|
|
|
// for each frame.
|
|
|
|
StacktraceReadDefers StacktraceOptions = 1 << iota
|
|
|
|
|
|
|
|
// StacktraceSimple requests a stacktrace where no stack switches will be
|
|
|
|
// attempted.
|
|
|
|
StacktraceSimple
|
|
|
|
|
|
|
|
// StacktraceG requests a stacktrace starting with the register
|
|
|
|
// values saved in the runtime.g structure.
|
|
|
|
StacktraceG
|
|
|
|
)
|
|
|
|
|
2016-03-18 08:51:48 +00:00
|
|
|
// Stacktrace returns the stack trace for a goroutine.
|
|
|
|
// Note the locations in the array are return addresses not call addresses.
|
2019-09-25 17:21:20 +00:00
|
|
|
func (g *G) Stacktrace(depth int, opts StacktraceOptions) ([]Stackframe, error) {
|
|
|
|
it, err := g.stackIterator(opts)
|
2016-03-18 08:51:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-06-17 17:11:57 +00:00
|
|
|
}
|
2018-07-06 07:37:31 +00:00
|
|
|
frames, err := it.stacktrace(depth)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-25 17:21:20 +00:00
|
|
|
if opts&StacktraceReadDefers != 0 {
|
2018-07-06 07:37:31 +00:00
|
|
|
g.readDefers(frames)
|
|
|
|
}
|
|
|
|
return frames, nil
|
2015-06-17 17:11:57 +00:00
|
|
|
}
|
|
|
|
|
2016-01-10 08:57:52 +00:00
|
|
|
// NullAddrError is an error for a null address.
|
2015-05-07 21:55:06 +00:00
|
|
|
type NullAddrError struct{}
|
|
|
|
|
|
|
|
func (n NullAddrError) Error() string {
|
|
|
|
return "NULL address"
|
|
|
|
}
|
|
|
|
|
2016-03-18 08:51:48 +00:00
|
|
|
// stackIterator holds information
|
2016-01-10 08:57:52 +00:00
|
|
|
// required to iterate and walk the program
|
|
|
|
// stack.
|
2016-03-18 08:51:48 +00:00
|
|
|
type stackIterator struct {
|
2017-10-05 07:26:19 +00:00
|
|
|
pc uint64
|
|
|
|
top bool
|
|
|
|
atend bool
|
|
|
|
frame Stackframe
|
|
|
|
bi *BinaryInfo
|
|
|
|
mem MemoryReadWriter
|
|
|
|
err error
|
2015-10-16 06:42:02 +00:00
|
|
|
|
2017-05-16 18:23:33 +00:00
|
|
|
stackhi uint64
|
2017-09-01 13:34:13 +00:00
|
|
|
systemstack bool
|
2017-02-08 13:14:57 +00:00
|
|
|
stackBarrierPC uint64
|
|
|
|
stkbar []savedLR
|
2017-10-05 07:26:19 +00:00
|
|
|
|
2017-11-17 18:29:24 +00:00
|
|
|
// regs is the register set for the current frame
|
|
|
|
regs op.DwarfRegisters
|
2017-09-01 13:34:13 +00:00
|
|
|
|
2020-02-13 17:12:59 +00:00
|
|
|
g *G // the goroutine being stacktraced, nil if we are stacktracing a goroutine-less thread
|
|
|
|
g0_sched_sp uint64 // value of g0.sched.sp (see comments around its use)
|
|
|
|
g0_sched_sp_loaded bool // g0_sched_sp was loaded from g0
|
2019-09-25 17:21:20 +00:00
|
|
|
|
|
|
|
opts StacktraceOptions
|
2017-02-08 13:14:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type savedLR struct {
|
|
|
|
ptr uint64
|
|
|
|
val uint64
|
|
|
|
}
|
|
|
|
|
2019-09-25 17:21:20 +00:00
|
|
|
func newStackIterator(bi *BinaryInfo, mem MemoryReadWriter, regs op.DwarfRegisters, stackhi uint64, stkbar []savedLR, stkbarPos int, g *G, opts StacktraceOptions) *stackIterator {
|
2017-09-01 13:34:13 +00:00
|
|
|
stackBarrierFunc := bi.LookupFunc["runtime.stackBarrier"] // stack barriers were removed in Go 1.9
|
2017-03-13 17:53:16 +00:00
|
|
|
var stackBarrierPC uint64
|
|
|
|
if stackBarrierFunc != nil && stkbar != nil {
|
|
|
|
stackBarrierPC = stackBarrierFunc.Entry
|
2017-10-05 07:26:19 +00:00
|
|
|
fn := bi.PCToFunc(regs.PC())
|
2017-09-01 13:34:13 +00:00
|
|
|
if fn != nil && fn.Name == "runtime.stackBarrier" {
|
2017-02-08 13:14:57 +00:00
|
|
|
// We caught the goroutine as it's executing the stack barrier, we must
|
|
|
|
// determine whether or not g.stackPos has already been incremented or not.
|
2017-10-05 07:26:19 +00:00
|
|
|
if len(stkbar) > 0 && stkbar[stkbarPos].ptr < regs.SP() {
|
2017-02-08 13:14:57 +00:00
|
|
|
// runtime.stackBarrier has not incremented stkbarPos.
|
2017-10-05 07:26:19 +00:00
|
|
|
} else if stkbarPos > 0 && stkbar[stkbarPos-1].ptr < regs.SP() {
|
2017-02-08 13:14:57 +00:00
|
|
|
// runtime.stackBarrier has incremented stkbarPos.
|
|
|
|
stkbarPos--
|
|
|
|
} else {
|
2017-10-05 07:26:19 +00:00
|
|
|
return &stackIterator{err: fmt.Errorf("failed to unwind through stackBarrier at SP %x", regs.SP())}
|
2017-02-08 13:14:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
stkbar = stkbar[stkbarPos:]
|
|
|
|
}
|
2017-09-01 13:34:13 +00:00
|
|
|
systemstack := true
|
|
|
|
if g != nil {
|
|
|
|
systemstack = g.SystemStack
|
|
|
|
}
|
2020-02-13 17:12:59 +00:00
|
|
|
return &stackIterator{pc: regs.PC(), regs: regs, top: true, bi: bi, mem: mem, err: nil, atend: false, stackhi: stackhi, stackBarrierPC: stackBarrierPC, stkbar: stkbar, systemstack: systemstack, g: g, opts: opts}
|
2015-10-16 06:42:02 +00:00
|
|
|
}
|
|
|
|
|
2016-01-10 08:57:52 +00:00
|
|
|
// Next points the iterator to the next stack frame.
|
2016-03-18 08:51:48 +00:00
|
|
|
func (it *stackIterator) Next() bool {
|
2015-10-16 06:42:02 +00:00
|
|
|
if it.err != nil || it.atend {
|
|
|
|
return false
|
|
|
|
}
|
2020-01-21 17:11:20 +00:00
|
|
|
|
2017-11-17 18:29:24 +00:00
|
|
|
callFrameRegs, ret, retaddr := it.advanceRegs()
|
2017-10-05 07:26:19 +00:00
|
|
|
it.frame = it.newStackframe(ret, retaddr)
|
2015-10-16 06:42:02 +00:00
|
|
|
|
2017-02-08 13:14:57 +00:00
|
|
|
if it.stkbar != nil && it.frame.Ret == it.stackBarrierPC && it.frame.addrret == it.stkbar[0].ptr {
|
|
|
|
// Skip stack barrier frames
|
|
|
|
it.frame.Ret = it.stkbar[0].val
|
|
|
|
it.stkbar = it.stkbar[1:]
|
|
|
|
}
|
|
|
|
|
2019-09-25 17:21:20 +00:00
|
|
|
if it.opts&StacktraceSimple == 0 {
|
2020-03-30 18:03:29 +00:00
|
|
|
if it.bi.Arch.switchStack(it, &callFrameRegs) {
|
2019-09-25 17:21:20 +00:00
|
|
|
return true
|
|
|
|
}
|
2015-10-16 06:42:02 +00:00
|
|
|
}
|
|
|
|
|
2017-10-13 20:13:43 +00:00
|
|
|
if it.frame.Ret <= 0 {
|
|
|
|
it.atend = true
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-10-16 06:42:02 +00:00
|
|
|
it.top = false
|
|
|
|
it.pc = it.frame.Ret
|
2017-11-17 18:29:24 +00:00
|
|
|
it.regs = callFrameRegs
|
2015-10-16 06:42:02 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-08-01 23:31:50 +00:00
|
|
|
func (it *stackIterator) switchToGoroutineStack() {
|
|
|
|
it.systemstack = false
|
|
|
|
it.top = false
|
|
|
|
it.pc = it.g.PC
|
|
|
|
it.regs.Reg(it.regs.SPRegNum).Uint64Val = it.g.SP
|
2020-03-20 18:32:41 +00:00
|
|
|
it.regs.AddReg(it.regs.BPRegNum, op.DwarfRegisterFromUint64(it.g.BP))
|
2020-03-30 18:03:29 +00:00
|
|
|
if it.bi.Arch.Name == "arm64" {
|
2020-01-21 17:11:20 +00:00
|
|
|
it.regs.Reg(it.regs.LRRegNum).Uint64Val = it.g.LR
|
|
|
|
}
|
2019-08-01 23:31:50 +00:00
|
|
|
}
|
|
|
|
|
2016-01-10 08:57:52 +00:00
|
|
|
// Frame returns the frame the iterator is pointing at.
|
2016-03-18 08:51:48 +00:00
|
|
|
func (it *stackIterator) Frame() Stackframe {
|
2018-08-18 10:12:39 +00:00
|
|
|
it.frame.Bottom = it.atend
|
2015-10-16 06:42:02 +00:00
|
|
|
return it.frame
|
|
|
|
}
|
|
|
|
|
2016-01-10 08:57:52 +00:00
|
|
|
// Err returns the error encountered during stack iteration.
|
2016-03-18 08:51:48 +00:00
|
|
|
func (it *stackIterator) Err() error {
|
2015-10-16 06:42:02 +00:00
|
|
|
return it.err
|
|
|
|
}
|
|
|
|
|
2017-08-24 07:46:47 +00:00
|
|
|
// frameBase calculates the frame base pseudo-register for DWARF for fn and
|
|
|
|
// the current frame.
|
|
|
|
func (it *stackIterator) frameBase(fn *Function) int64 {
|
2020-03-20 17:23:10 +00:00
|
|
|
dwarfTree, err := fn.cu.image.getDwarfTree(fn.offset)
|
2017-08-24 07:46:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
2020-03-20 17:23:10 +00:00
|
|
|
fb, _, _, _ := it.bi.Location(dwarfTree.Entry, dwarf.AttrFrameBase, it.pc, it.regs)
|
2017-08-24 07:46:47 +00:00
|
|
|
return fb
|
|
|
|
}
|
|
|
|
|
2017-10-05 07:26:19 +00:00
|
|
|
func (it *stackIterator) newStackframe(ret, retaddr uint64) Stackframe {
|
2015-08-28 20:06:29 +00:00
|
|
|
if retaddr == 0 {
|
2017-10-05 07:26:19 +00:00
|
|
|
it.err = NullAddrError{}
|
|
|
|
return Stackframe{}
|
2015-08-28 20:06:29 +00:00
|
|
|
}
|
2017-10-05 07:26:19 +00:00
|
|
|
f, l, fn := it.bi.PCToLine(it.pc)
|
|
|
|
if fn == nil {
|
|
|
|
f = "?"
|
|
|
|
l = -1
|
2017-08-24 07:46:47 +00:00
|
|
|
} else {
|
|
|
|
it.regs.FrameBase = it.frameBase(fn)
|
2015-08-28 20:06:29 +00:00
|
|
|
}
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
r := Stackframe{Current: Location{PC: it.pc, File: f, Line: l, Fn: fn}, Regs: it.regs, Ret: ret, addrret: retaddr, stackHi: it.stackhi, SystemStack: it.systemstack, lastpc: it.pc}
|
2019-03-18 17:08:23 +00:00
|
|
|
r.Call = r.Current
|
|
|
|
if !it.top && r.Current.Fn != nil && it.pc != r.Current.Fn.Entry {
|
|
|
|
// if the return address is the entry point of the function that
|
|
|
|
// contains it then this is some kind of fake return frame (for example
|
|
|
|
// runtime.sigreturn) that didn't actually call the current frame,
|
|
|
|
// attempting to get the location of the CALL instruction would just
|
|
|
|
// obfuscate what's going on, since there is no CALL instruction.
|
|
|
|
switch r.Current.Fn.Name {
|
2017-09-01 13:34:13 +00:00
|
|
|
case "runtime.mstart", "runtime.systemstack_switch":
|
|
|
|
// these frames are inserted by runtime.systemstack and there is no CALL
|
|
|
|
// instruction to look for at pc - 1
|
|
|
|
default:
|
2019-03-18 17:08:23 +00:00
|
|
|
r.lastpc = it.pc - 1
|
|
|
|
r.Call.File, r.Call.Line = r.Current.Fn.cu.lineInfo.PCToLine(r.Current.Fn.Entry, it.pc-1)
|
2017-10-05 07:26:19 +00:00
|
|
|
}
|
2015-09-17 08:42:34 +00:00
|
|
|
}
|
2017-10-05 07:26:19 +00:00
|
|
|
return r
|
2015-08-28 20:06:29 +00:00
|
|
|
}
|
|
|
|
|
2016-03-18 08:51:48 +00:00
|
|
|
func (it *stackIterator) stacktrace(depth int) ([]Stackframe, error) {
|
2016-02-02 11:26:29 +00:00
|
|
|
if depth < 0 {
|
|
|
|
return nil, errors.New("negative maximum stack depth")
|
|
|
|
}
|
2019-09-25 17:21:20 +00:00
|
|
|
if it.opts&StacktraceG != 0 && it.g != nil {
|
|
|
|
it.switchToGoroutineStack()
|
|
|
|
it.top = true
|
|
|
|
}
|
2015-08-28 20:06:29 +00:00
|
|
|
frames := make([]Stackframe, 0, depth+1)
|
2015-10-16 06:42:02 +00:00
|
|
|
for it.Next() {
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
frames = it.appendInlineCalls(frames, it.Frame())
|
2015-10-16 06:42:02 +00:00
|
|
|
if len(frames) >= depth+1 {
|
2015-06-17 17:11:57 +00:00
|
|
|
break
|
|
|
|
}
|
2015-10-16 06:42:02 +00:00
|
|
|
}
|
|
|
|
if err := it.Err(); err != nil {
|
2017-06-23 11:31:05 +00:00
|
|
|
if len(frames) == 0 {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
frames = append(frames, Stackframe{Err: err})
|
2015-04-19 22:11:33 +00:00
|
|
|
}
|
2015-08-28 20:06:29 +00:00
|
|
|
return frames, nil
|
2015-04-19 22:11:33 +00:00
|
|
|
}
|
2017-10-05 07:26:19 +00:00
|
|
|
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
func (it *stackIterator) appendInlineCalls(frames []Stackframe, frame Stackframe) []Stackframe {
|
|
|
|
if frame.Call.Fn == nil {
|
|
|
|
return append(frames, frame)
|
|
|
|
}
|
|
|
|
if frame.Call.Fn.cu.lineInfo == nil {
|
|
|
|
return append(frames, frame)
|
|
|
|
}
|
|
|
|
|
|
|
|
callpc := frame.Call.PC
|
|
|
|
if len(frames) > 0 {
|
|
|
|
callpc--
|
|
|
|
}
|
|
|
|
|
2020-03-20 17:23:10 +00:00
|
|
|
dwarfTree, err := frame.Call.Fn.cu.image.getDwarfTree(frame.Call.Fn.offset)
|
|
|
|
if err != nil {
|
|
|
|
return append(frames, frame)
|
|
|
|
}
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
|
2020-03-20 17:23:10 +00:00
|
|
|
for _, entry := range reader.InlineStack(dwarfTree, callpc) {
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
fnname, okname := entry.Val(dwarf.AttrName).(string)
|
|
|
|
fileidx, okfileidx := entry.Val(dwarf.AttrCallFile).(int64)
|
|
|
|
line, okline := entry.Val(dwarf.AttrCallLine).(int64)
|
|
|
|
|
|
|
|
if !okname || !okfileidx || !okline {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if fileidx-1 < 0 || fileidx-1 >= int64(len(frame.Current.Fn.cu.lineInfo.FileNames)) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-03-20 17:23:10 +00:00
|
|
|
inlfn := &Function{Name: fnname, Entry: frame.Call.Fn.Entry, End: frame.Call.Fn.End, offset: entry.Offset, cu: frame.Call.Fn.cu}
|
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
|
|
|
frames = append(frames, Stackframe{
|
|
|
|
Current: frame.Current,
|
|
|
|
Call: Location{
|
|
|
|
frame.Call.PC,
|
|
|
|
frame.Call.File,
|
|
|
|
frame.Call.Line,
|
|
|
|
inlfn,
|
|
|
|
},
|
|
|
|
Regs: frame.Regs,
|
|
|
|
stackHi: frame.stackHi,
|
|
|
|
Ret: frame.Ret,
|
|
|
|
addrret: frame.addrret,
|
|
|
|
Err: frame.Err,
|
|
|
|
SystemStack: frame.SystemStack,
|
|
|
|
Inlined: true,
|
|
|
|
lastpc: frame.lastpc,
|
|
|
|
})
|
|
|
|
|
|
|
|
frame.Call.File = frame.Current.Fn.cu.lineInfo.FileNames[fileidx-1].Path
|
|
|
|
frame.Call.Line = int(line)
|
|
|
|
}
|
|
|
|
|
|
|
|
return append(frames, frame)
|
|
|
|
}
|
|
|
|
|
2017-10-05 07:26:19 +00:00
|
|
|
// advanceRegs calculates it.callFrameRegs using it.regs and the frame
|
|
|
|
// descriptor entry for the current stack frame.
|
|
|
|
// it.regs.CallFrameCFA is updated.
|
2017-11-17 18:29:24 +00:00
|
|
|
func (it *stackIterator) advanceRegs() (callFrameRegs op.DwarfRegisters, ret uint64, retaddr uint64) {
|
2017-10-05 07:26:19 +00:00
|
|
|
fde, err := it.bi.frameEntries.FDEForPC(it.pc)
|
|
|
|
var framectx *frame.FrameContext
|
2018-08-31 18:08:18 +00:00
|
|
|
if _, nofde := err.(*frame.ErrNoFDEForPC); nofde {
|
2020-03-30 18:03:29 +00:00
|
|
|
framectx = it.bi.Arch.fixFrameUnwindContext(nil, it.pc, it.bi)
|
2017-10-05 07:26:19 +00:00
|
|
|
} else {
|
2020-03-30 18:03:29 +00:00
|
|
|
framectx = it.bi.Arch.fixFrameUnwindContext(fde.EstablishFrame(it.pc), it.pc, it.bi)
|
2017-10-05 07:26:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cfareg, err := it.executeFrameRegRule(0, framectx.CFA, 0)
|
|
|
|
if cfareg == nil {
|
|
|
|
it.err = fmt.Errorf("CFA becomes undefined at PC %#x", it.pc)
|
2019-05-08 21:06:38 +00:00
|
|
|
return op.DwarfRegisters{}, 0, 0
|
2017-10-05 07:26:19 +00:00
|
|
|
}
|
|
|
|
it.regs.CFA = int64(cfareg.Uint64Val)
|
|
|
|
|
2019-08-08 03:15:20 +00:00
|
|
|
callimage := it.bi.PCToImage(it.pc)
|
2019-05-08 21:06:38 +00:00
|
|
|
|
2020-01-21 17:11:20 +00:00
|
|
|
callFrameRegs = op.DwarfRegisters{StaticBase: callimage.StaticBase, ByteOrder: it.regs.ByteOrder, PCRegNum: it.regs.PCRegNum, SPRegNum: it.regs.SPRegNum, BPRegNum: it.regs.BPRegNum, LRRegNum: it.regs.LRRegNum}
|
2017-10-05 07:26:19 +00:00
|
|
|
|
|
|
|
// According to the standard the compiler should be responsible for emitting
|
|
|
|
// rules for the RSP register so that it can then be used to calculate CFA,
|
|
|
|
// however neither Go nor GCC do this.
|
|
|
|
// In the following line we copy GDB's behaviour by assuming this is
|
|
|
|
// implicit.
|
|
|
|
// See also the comment in dwarf2_frame_default_init in
|
|
|
|
// $GDB_SOURCE/dwarf2-frame.c
|
2020-03-10 16:34:40 +00:00
|
|
|
callFrameRegs.AddReg(callFrameRegs.SPRegNum, cfareg)
|
2017-10-05 07:26:19 +00:00
|
|
|
|
|
|
|
for i, regRule := range framectx.Regs {
|
|
|
|
reg, err := it.executeFrameRegRule(i, regRule, it.regs.CFA)
|
2017-11-17 18:29:24 +00:00
|
|
|
callFrameRegs.AddReg(i, reg)
|
2017-10-05 07:26:19 +00:00
|
|
|
if i == framectx.RetAddrReg {
|
|
|
|
if reg == nil {
|
|
|
|
if err == nil {
|
|
|
|
err = fmt.Errorf("Undefined return address at %#x", it.pc)
|
|
|
|
}
|
|
|
|
it.err = err
|
|
|
|
} else {
|
|
|
|
ret = reg.Uint64Val
|
|
|
|
}
|
|
|
|
retaddr = uint64(it.regs.CFA + regRule.Offset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-30 18:03:29 +00:00
|
|
|
if it.bi.Arch.Name == "arm64" {
|
2020-01-21 17:11:20 +00:00
|
|
|
if ret == 0 && it.regs.Regs[it.regs.LRRegNum] != nil {
|
|
|
|
ret = it.regs.Regs[it.regs.LRRegNum].Uint64Val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 18:29:24 +00:00
|
|
|
return callFrameRegs, ret, retaddr
|
2017-10-05 07:26:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *stackIterator) executeFrameRegRule(regnum uint64, rule frame.DWRule, cfa int64) (*op.DwarfRegister, error) {
|
|
|
|
switch rule.Rule {
|
|
|
|
default:
|
|
|
|
fallthrough
|
|
|
|
case frame.RuleUndefined:
|
|
|
|
return nil, nil
|
|
|
|
case frame.RuleSameVal:
|
2020-01-21 17:11:20 +00:00
|
|
|
if it.regs.Reg(regnum) == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-09-01 13:34:13 +00:00
|
|
|
reg := *it.regs.Reg(regnum)
|
|
|
|
return ®, nil
|
2017-10-05 07:26:19 +00:00
|
|
|
case frame.RuleOffset:
|
|
|
|
return it.readRegisterAt(regnum, uint64(cfa+rule.Offset))
|
|
|
|
case frame.RuleValOffset:
|
|
|
|
return op.DwarfRegisterFromUint64(uint64(cfa + rule.Offset)), nil
|
|
|
|
case frame.RuleRegister:
|
|
|
|
return it.regs.Reg(rule.Reg), nil
|
|
|
|
case frame.RuleExpression:
|
2020-03-10 16:34:40 +00:00
|
|
|
v, _, err := op.ExecuteStackProgram(it.regs, rule.Expression, it.bi.Arch.PtrSize())
|
2017-10-05 07:26:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return it.readRegisterAt(regnum, uint64(v))
|
|
|
|
case frame.RuleValExpression:
|
2020-03-10 16:34:40 +00:00
|
|
|
v, _, err := op.ExecuteStackProgram(it.regs, rule.Expression, it.bi.Arch.PtrSize())
|
2017-10-05 07:26:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return op.DwarfRegisterFromUint64(uint64(v)), nil
|
|
|
|
case frame.RuleArchitectural:
|
|
|
|
return nil, errors.New("architectural frame rules are unsupported")
|
|
|
|
case frame.RuleCFA:
|
|
|
|
if it.regs.Reg(rule.Reg) == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return op.DwarfRegisterFromUint64(uint64(int64(it.regs.Uint64Val(rule.Reg)) + rule.Offset)), nil
|
2017-09-01 13:34:13 +00:00
|
|
|
case frame.RuleFramePointer:
|
|
|
|
curReg := it.regs.Reg(rule.Reg)
|
|
|
|
if curReg == nil {
|
2017-10-05 07:26:19 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2017-09-01 13:34:13 +00:00
|
|
|
if curReg.Uint64Val <= uint64(cfa) {
|
|
|
|
return it.readRegisterAt(regnum, curReg.Uint64Val)
|
|
|
|
}
|
2018-08-31 18:08:18 +00:00
|
|
|
newReg := *curReg
|
|
|
|
return &newReg, nil
|
2017-10-05 07:26:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *stackIterator) readRegisterAt(regnum uint64, addr uint64) (*op.DwarfRegister, error) {
|
2020-03-30 18:03:29 +00:00
|
|
|
buf := make([]byte, it.bi.Arch.regSize(regnum))
|
2017-10-05 07:26:19 +00:00
|
|
|
_, err := it.mem.ReadMemory(buf, uintptr(addr))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return op.DwarfRegisterFromBytes(buf), nil
|
|
|
|
}
|
2018-07-06 07:37:31 +00:00
|
|
|
|
2020-02-13 17:12:59 +00:00
|
|
|
func (it *stackIterator) loadG0SchedSP() {
|
|
|
|
if it.g0_sched_sp_loaded {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
it.g0_sched_sp_loaded = true
|
|
|
|
if it.g != nil {
|
2020-02-17 17:27:56 +00:00
|
|
|
mvar, _ := it.g.variable.structMember("m")
|
|
|
|
if mvar != nil {
|
|
|
|
g0var, _ := mvar.structMember("g0")
|
|
|
|
if g0var != nil {
|
|
|
|
g0, _ := g0var.parseG()
|
|
|
|
if g0 != nil {
|
|
|
|
it.g0_sched_sp = g0.SP
|
|
|
|
}
|
2020-02-13 17:12:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-06 07:37:31 +00:00
|
|
|
// Defer represents one deferred call
|
|
|
|
type Defer struct {
|
|
|
|
DeferredPC uint64 // Value of field _defer.fn.fn, the deferred function
|
|
|
|
DeferPC uint64 // PC address of instruction that added this defer
|
|
|
|
SP uint64 // Value of SP register when this function was deferred (this field gets adjusted when the stack is moved to match the new stack space)
|
|
|
|
link *Defer // Next deferred function
|
2018-07-10 10:15:11 +00:00
|
|
|
argSz int64
|
2018-07-06 07:37:31 +00:00
|
|
|
|
|
|
|
variable *Variable
|
|
|
|
Unreadable error
|
|
|
|
}
|
|
|
|
|
|
|
|
// readDefers decorates the frames with the function deferred at each stack frame.
|
|
|
|
func (g *G) readDefers(frames []Stackframe) {
|
|
|
|
curdefer := g.Defer()
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
// scan simultaneously frames and the curdefer linked list, assigning
|
|
|
|
// defers to their associated frames.
|
|
|
|
for {
|
|
|
|
if curdefer == nil || i >= len(frames) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if curdefer.Unreadable != nil {
|
|
|
|
// Current defer is unreadable, stick it into the first available frame
|
|
|
|
// (so that it can be reported to the user) and exit
|
|
|
|
frames[i].Defers = append(frames[i].Defers, curdefer)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if frames[i].Err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if frames[i].TopmostDefer == nil {
|
|
|
|
frames[i].TopmostDefer = curdefer
|
|
|
|
}
|
|
|
|
|
|
|
|
if frames[i].SystemStack || curdefer.SP >= uint64(frames[i].Regs.CFA) {
|
|
|
|
// frames[i].Regs.CFA is the value that SP had before the function of
|
|
|
|
// frames[i] was called.
|
|
|
|
// This means that when curdefer.SP == frames[i].Regs.CFA then curdefer
|
|
|
|
// was added by the previous frame.
|
|
|
|
//
|
|
|
|
// curdefer.SP < frames[i].Regs.CFA means curdefer was added by a
|
|
|
|
// function further down the stack.
|
|
|
|
//
|
|
|
|
// SystemStack frames live on a different physical stack and can't be
|
|
|
|
// compared with deferred frames.
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
frames[i].Defers = append(frames[i].Defers, curdefer)
|
|
|
|
curdefer = curdefer.Next()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Defer) load() {
|
2018-10-29 11:22:03 +00:00
|
|
|
d.variable.loadValue(LoadConfig{false, 1, 0, 0, -1, 0})
|
2018-07-06 07:37:31 +00:00
|
|
|
if d.variable.Unreadable != nil {
|
|
|
|
d.Unreadable = d.variable.Unreadable
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
fnvar := d.variable.fieldVariable("fn").maybeDereference()
|
|
|
|
if fnvar.Addr != 0 {
|
|
|
|
fnvar = fnvar.loadFieldNamed("fn")
|
|
|
|
if fnvar.Unreadable == nil {
|
|
|
|
d.DeferredPC, _ = constant.Uint64Val(fnvar.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
d.DeferPC, _ = constant.Uint64Val(d.variable.fieldVariable("pc").Value)
|
|
|
|
d.SP, _ = constant.Uint64Val(d.variable.fieldVariable("sp").Value)
|
2018-07-10 10:15:11 +00:00
|
|
|
d.argSz, _ = constant.Int64Val(d.variable.fieldVariable("siz").Value)
|
2018-07-06 07:37:31 +00:00
|
|
|
|
|
|
|
linkvar := d.variable.fieldVariable("link").maybeDereference()
|
|
|
|
if linkvar.Addr != 0 {
|
|
|
|
d.link = &Defer{variable: linkvar}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-31 18:08:18 +00:00
|
|
|
// errSPDecreased is used when (*Defer).Next detects a corrupted linked
|
2018-07-06 07:37:31 +00:00
|
|
|
// list, specifically when after followin a link pointer the value of SP
|
|
|
|
// decreases rather than increasing or staying the same (the defer list is a
|
|
|
|
// FIFO list, nodes further down the list have been added by function calls
|
|
|
|
// further down the call stack and therefore the SP should always increase).
|
2018-08-31 18:08:18 +00:00
|
|
|
var errSPDecreased = errors.New("corrupted defer list: SP decreased")
|
2018-07-06 07:37:31 +00:00
|
|
|
|
|
|
|
// Next returns the next defer in the linked list
|
|
|
|
func (d *Defer) Next() *Defer {
|
|
|
|
if d.link == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
d.link.load()
|
|
|
|
if d.link.SP < d.SP {
|
2018-08-31 18:08:18 +00:00
|
|
|
d.link.Unreadable = errSPDecreased
|
2018-07-06 07:37:31 +00:00
|
|
|
}
|
|
|
|
return d.link
|
|
|
|
}
|
2018-07-10 10:15:11 +00:00
|
|
|
|
|
|
|
// EvalScope returns an EvalScope relative to the argument frame of this deferred call.
|
|
|
|
// The argument frame of a deferred call is stored in memory immediately
|
|
|
|
// after the deferred header.
|
|
|
|
func (d *Defer) EvalScope(thread Thread) (*EvalScope, error) {
|
|
|
|
scope, err := GoroutineScope(thread)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not get scope: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bi := thread.BinInfo()
|
|
|
|
scope.PC = d.DeferredPC
|
|
|
|
scope.File, scope.Line, scope.Fn = bi.PCToLine(d.DeferredPC)
|
|
|
|
|
|
|
|
if scope.Fn == nil {
|
|
|
|
return nil, fmt.Errorf("could not find function at %#x", d.DeferredPC)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The arguments are stored immediately after the defer header struct, i.e.
|
|
|
|
// addr+sizeof(_defer). Since CFA in go is always the address of the first
|
|
|
|
// argument, that's what we use for the value of CFA.
|
|
|
|
// For SP we use CFA minus the size of one pointer because that would be
|
|
|
|
// the space occupied by pushing the return address on the stack during the
|
|
|
|
// CALL.
|
|
|
|
scope.Regs.CFA = (int64(d.variable.Addr) + d.variable.RealType.Common().ByteSize)
|
|
|
|
scope.Regs.Regs[scope.Regs.SPRegNum].Uint64Val = uint64(scope.Regs.CFA - int64(bi.Arch.PtrSize()))
|
|
|
|
|
2019-05-08 21:06:38 +00:00
|
|
|
rdr := scope.Fn.cu.image.dwarfReader
|
|
|
|
rdr.Seek(scope.Fn.offset)
|
|
|
|
e, err := rdr.Next()
|
2018-07-10 10:15:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not read DWARF function entry: %v", err)
|
|
|
|
}
|
|
|
|
scope.Regs.FrameBase, _, _, _ = bi.Location(e, dwarf.AttrFrameBase, scope.PC, scope.Regs)
|
|
|
|
scope.Mem = cacheMemory(scope.Mem, uintptr(scope.Regs.CFA), int(d.argSz))
|
|
|
|
|
|
|
|
return scope, nil
|
|
|
|
}
|